Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * lock.c
4 : * POSTGRES primary lock mechanism
5 : *
6 : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/storage/lmgr/lock.c
12 : *
13 : * NOTES
14 : * A lock table is a shared memory hash table. When
15 : * a process tries to acquire a lock of a type that conflicts
16 : * with existing locks, it is put to sleep using the routines
17 : * in storage/lmgr/proc.c.
18 : *
19 : * For the most part, this code should be invoked via lmgr.c
20 : * or another lock-management module, not directly.
21 : *
22 : * Interface:
23 : *
24 : * LockManagerShmemInit(), GetLocksMethodTable(), GetLockTagsMethodTable(),
25 : * LockAcquire(), LockRelease(), LockReleaseAll(),
26 : * LockCheckConflicts(), GrantLock()
27 : *
28 : *-------------------------------------------------------------------------
29 : */
30 : #include "postgres.h"
31 :
32 : #include <signal.h>
33 : #include <unistd.h>
34 :
35 : #include "access/transam.h"
36 : #include "access/twophase.h"
37 : #include "access/twophase_rmgr.h"
38 : #include "access/xlog.h"
39 : #include "access/xlogutils.h"
40 : #include "miscadmin.h"
41 : #include "pg_trace.h"
42 : #include "storage/lmgr.h"
43 : #include "storage/proc.h"
44 : #include "storage/procarray.h"
45 : #include "storage/spin.h"
46 : #include "storage/standby.h"
47 : #include "utils/memutils.h"
48 : #include "utils/ps_status.h"
49 : #include "utils/resowner.h"
50 :
51 :
52 : /* GUC variables */
53 : int max_locks_per_xact; /* used to set the lock table size */
54 : bool log_lock_failure = false;
55 :
56 : #define NLOCKENTS() \
57 : mul_size(max_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
58 :
59 :
60 : /*
61 : * Data structures defining the semantics of the standard lock methods.
62 : *
63 : * The conflict table defines the semantics of the various lock modes.
64 : */
65 : static const LOCKMASK LockConflicts[] = {
66 : 0,
67 :
68 : /* AccessShareLock */
69 : LOCKBIT_ON(AccessExclusiveLock),
70 :
71 : /* RowShareLock */
72 : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
73 :
74 : /* RowExclusiveLock */
75 : LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
76 : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
77 :
78 : /* ShareUpdateExclusiveLock */
79 : LOCKBIT_ON(ShareUpdateExclusiveLock) |
80 : LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
81 : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
82 :
83 : /* ShareLock */
84 : LOCKBIT_ON(RowExclusiveLock) | LOCKBIT_ON(ShareUpdateExclusiveLock) |
85 : LOCKBIT_ON(ShareRowExclusiveLock) |
86 : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
87 :
88 : /* ShareRowExclusiveLock */
89 : LOCKBIT_ON(RowExclusiveLock) | LOCKBIT_ON(ShareUpdateExclusiveLock) |
90 : LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
91 : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
92 :
93 : /* ExclusiveLock */
94 : LOCKBIT_ON(RowShareLock) |
95 : LOCKBIT_ON(RowExclusiveLock) | LOCKBIT_ON(ShareUpdateExclusiveLock) |
96 : LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
97 : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
98 :
99 : /* AccessExclusiveLock */
100 : LOCKBIT_ON(AccessShareLock) | LOCKBIT_ON(RowShareLock) |
101 : LOCKBIT_ON(RowExclusiveLock) | LOCKBIT_ON(ShareUpdateExclusiveLock) |
102 : LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
103 : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock)
104 :
105 : };
106 :
107 : /* Names of lock modes, for debug printouts */
108 : static const char *const lock_mode_names[] =
109 : {
110 : "INVALID",
111 : "AccessShareLock",
112 : "RowShareLock",
113 : "RowExclusiveLock",
114 : "ShareUpdateExclusiveLock",
115 : "ShareLock",
116 : "ShareRowExclusiveLock",
117 : "ExclusiveLock",
118 : "AccessExclusiveLock"
119 : };
120 :
121 : #ifndef LOCK_DEBUG
122 : static bool Dummy_trace = false;
123 : #endif
124 :
125 : static const LockMethodData default_lockmethod = {
126 : MaxLockMode,
127 : LockConflicts,
128 : lock_mode_names,
129 : #ifdef LOCK_DEBUG
130 : &Trace_locks
131 : #else
132 : &Dummy_trace
133 : #endif
134 : };
135 :
136 : static const LockMethodData user_lockmethod = {
137 : MaxLockMode,
138 : LockConflicts,
139 : lock_mode_names,
140 : #ifdef LOCK_DEBUG
141 : &Trace_userlocks
142 : #else
143 : &Dummy_trace
144 : #endif
145 : };
146 :
147 : /*
148 : * map from lock method id to the lock table data structures
149 : */
150 : static const LockMethod LockMethods[] = {
151 : NULL,
152 : &default_lockmethod,
153 : &user_lockmethod
154 : };
155 :
156 :
157 : /* Record that's written to 2PC state file when a lock is persisted */
158 : typedef struct TwoPhaseLockRecord
159 : {
160 : LOCKTAG locktag;
161 : LOCKMODE lockmode;
162 : } TwoPhaseLockRecord;
163 :
164 :
165 : /*
166 : * Count of the number of fast path lock slots we believe to be used. This
167 : * might be higher than the real number if another backend has transferred
168 : * our locks to the primary lock table, but it can never be lower than the
169 : * real value, since only we can acquire locks on our own behalf.
170 : *
171 : * XXX Allocate a static array of the maximum size. We could use a pointer
172 : * and then allocate just the right size to save a couple kB, but then we
173 : * would have to initialize that, while for the static array that happens
174 : * automatically. Doesn't seem worth the extra complexity.
175 : */
176 : static int FastPathLocalUseCounts[FP_LOCK_GROUPS_PER_BACKEND_MAX];
177 :
178 : /*
179 : * Flag to indicate if the relation extension lock is held by this backend.
180 : * This flag is used to ensure that while holding the relation extension lock
181 : * we don't try to acquire a heavyweight lock on any other object. This
182 : * restriction implies that the relation extension lock won't ever participate
183 : * in the deadlock cycle because we can never wait for any other heavyweight
184 : * lock after acquiring this lock.
185 : *
186 : * Such a restriction is okay for relation extension locks as unlike other
187 : * heavyweight locks these are not held till the transaction end. These are
188 : * taken for a short duration to extend a particular relation and then
189 : * released.
190 : */
191 : static bool IsRelationExtensionLockHeld PG_USED_FOR_ASSERTS_ONLY = false;
192 :
193 : /*
194 : * Number of fast-path locks per backend - size of the arrays in PGPROC.
195 : * This is set only once during start, before initializing shared memory,
196 : * and remains constant after that.
197 : *
198 : * We set the limit based on max_locks_per_transaction GUC, because that's
199 : * the best information about expected number of locks per backend we have.
200 : * See InitializeFastPathLocks() for details.
201 : */
202 : int FastPathLockGroupsPerBackend = 0;
203 :
204 : /*
205 : * Macros to calculate the fast-path group and index for a relation.
206 : *
207 : * The formula is a simple hash function, designed to spread the OIDs a bit,
208 : * so that even contiguous values end up in different groups. In most cases
209 : * there will be gaps anyway, but the multiplication should help a bit.
210 : *
211 : * The selected constant (49157) is a prime not too close to 2^k, and it's
212 : * small enough to not cause overflows (in 64-bit).
213 : *
214 : * We can assume that FastPathLockGroupsPerBackend is a power-of-two per
215 : * InitializeFastPathLocks().
216 : */
217 : #define FAST_PATH_REL_GROUP(rel) \
218 : (((uint64) (rel) * 49157) & (FastPathLockGroupsPerBackend - 1))
219 :
220 : /*
221 : * Given the group/slot indexes, calculate the slot index in the whole array
222 : * of fast-path lock slots.
223 : */
224 : #define FAST_PATH_SLOT(group, index) \
225 : (AssertMacro((uint32) (group) < FastPathLockGroupsPerBackend), \
226 : AssertMacro((uint32) (index) < FP_LOCK_SLOTS_PER_GROUP), \
227 : ((group) * FP_LOCK_SLOTS_PER_GROUP + (index)))
228 :
229 : /*
230 : * Given a slot index (into the whole per-backend array), calculated using
231 : * the FAST_PATH_SLOT macro, split it into group and index (in the group).
232 : */
233 : #define FAST_PATH_GROUP(index) \
234 : (AssertMacro((uint32) (index) < FastPathLockSlotsPerBackend()), \
235 : ((index) / FP_LOCK_SLOTS_PER_GROUP))
236 : #define FAST_PATH_INDEX(index) \
237 : (AssertMacro((uint32) (index) < FastPathLockSlotsPerBackend()), \
238 : ((index) % FP_LOCK_SLOTS_PER_GROUP))
239 :
240 : /* Macros for manipulating proc->fpLockBits */
241 : #define FAST_PATH_BITS_PER_SLOT 3
242 : #define FAST_PATH_LOCKNUMBER_OFFSET 1
243 : #define FAST_PATH_MASK ((1 << FAST_PATH_BITS_PER_SLOT) - 1)
244 : #define FAST_PATH_BITS(proc, n) (proc)->fpLockBits[FAST_PATH_GROUP(n)]
245 : #define FAST_PATH_GET_BITS(proc, n) \
246 : ((FAST_PATH_BITS(proc, n) >> (FAST_PATH_BITS_PER_SLOT * FAST_PATH_INDEX(n))) & FAST_PATH_MASK)
247 : #define FAST_PATH_BIT_POSITION(n, l) \
248 : (AssertMacro((l) >= FAST_PATH_LOCKNUMBER_OFFSET), \
249 : AssertMacro((l) < FAST_PATH_BITS_PER_SLOT+FAST_PATH_LOCKNUMBER_OFFSET), \
250 : AssertMacro((n) < FastPathLockSlotsPerBackend()), \
251 : ((l) - FAST_PATH_LOCKNUMBER_OFFSET + FAST_PATH_BITS_PER_SLOT * (FAST_PATH_INDEX(n))))
252 : #define FAST_PATH_SET_LOCKMODE(proc, n, l) \
253 : FAST_PATH_BITS(proc, n) |= UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)
254 : #define FAST_PATH_CLEAR_LOCKMODE(proc, n, l) \
255 : FAST_PATH_BITS(proc, n) &= ~(UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l))
256 : #define FAST_PATH_CHECK_LOCKMODE(proc, n, l) \
257 : (FAST_PATH_BITS(proc, n) & (UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)))
258 :
259 : /*
260 : * The fast-path lock mechanism is concerned only with relation locks on
261 : * unshared relations by backends bound to a database. The fast-path
262 : * mechanism exists mostly to accelerate acquisition and release of locks
263 : * that rarely conflict. Because ShareUpdateExclusiveLock is
264 : * self-conflicting, it can't use the fast-path mechanism; but it also does
265 : * not conflict with any of the locks that do, so we can ignore it completely.
266 : */
267 : #define EligibleForRelationFastPath(locktag, mode) \
268 : ((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
269 : (locktag)->locktag_type == LOCKTAG_RELATION && \
270 : (locktag)->locktag_field1 == MyDatabaseId && \
271 : MyDatabaseId != InvalidOid && \
272 : (mode) < ShareUpdateExclusiveLock)
273 : #define ConflictsWithRelationFastPath(locktag, mode) \
274 : ((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
275 : (locktag)->locktag_type == LOCKTAG_RELATION && \
276 : (locktag)->locktag_field1 != InvalidOid && \
277 : (mode) > ShareUpdateExclusiveLock)
278 :
279 : static bool FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode);
280 : static bool FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode);
281 : static bool FastPathTransferRelationLocks(LockMethod lockMethodTable,
282 : const LOCKTAG *locktag, uint32 hashcode);
283 : static PROCLOCK *FastPathGetRelationLockEntry(LOCALLOCK *locallock);
284 :
285 : /*
286 : * To make the fast-path lock mechanism work, we must have some way of
287 : * preventing the use of the fast-path when a conflicting lock might be present.
288 : * We partition* the locktag space into FAST_PATH_STRONG_LOCK_HASH_PARTITIONS,
289 : * and maintain an integer count of the number of "strong" lockers
290 : * in each partition. When any "strong" lockers are present (which is
291 : * hopefully not very often), the fast-path mechanism can't be used, and we
292 : * must fall back to the slower method of pushing matching locks directly
293 : * into the main lock tables.
294 : *
295 : * The deadlock detector does not know anything about the fast path mechanism,
296 : * so any locks that might be involved in a deadlock must be transferred from
297 : * the fast-path queues to the main lock table.
298 : */
299 :
300 : #define FAST_PATH_STRONG_LOCK_HASH_BITS 10
301 : #define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS \
302 : (1 << FAST_PATH_STRONG_LOCK_HASH_BITS)
303 : #define FastPathStrongLockHashPartition(hashcode) \
304 : ((hashcode) % FAST_PATH_STRONG_LOCK_HASH_PARTITIONS)
305 :
306 : typedef struct
307 : {
308 : slock_t mutex;
309 : uint32 count[FAST_PATH_STRONG_LOCK_HASH_PARTITIONS];
310 : } FastPathStrongRelationLockData;
311 :
312 : static volatile FastPathStrongRelationLockData *FastPathStrongRelationLocks;
313 :
314 :
315 : /*
316 : * Pointers to hash tables containing lock state
317 : *
318 : * The LockMethodLockHash and LockMethodProcLockHash hash tables are in
319 : * shared memory; LockMethodLocalHash is local to each backend.
320 : */
321 : static HTAB *LockMethodLockHash;
322 : static HTAB *LockMethodProcLockHash;
323 : static HTAB *LockMethodLocalHash;
324 :
325 :
326 : /* private state for error cleanup */
327 : static LOCALLOCK *StrongLockInProgress;
328 : static LOCALLOCK *awaitedLock;
329 : static ResourceOwner awaitedOwner;
330 :
331 :
332 : #ifdef LOCK_DEBUG
333 :
334 : /*------
335 : * The following configuration options are available for lock debugging:
336 : *
337 : * TRACE_LOCKS -- give a bunch of output what's going on in this file
338 : * TRACE_USERLOCKS -- same but for user locks
339 : * TRACE_LOCK_OIDMIN-- do not trace locks for tables below this oid
340 : * (use to avoid output on system tables)
341 : * TRACE_LOCK_TABLE -- trace locks on this table (oid) unconditionally
342 : * DEBUG_DEADLOCKS -- currently dumps locks at untimely occasions ;)
343 : *
344 : * Furthermore, but in storage/lmgr/lwlock.c:
345 : * TRACE_LWLOCKS -- trace lightweight locks (pretty useless)
346 : *
347 : * Define LOCK_DEBUG at compile time to get all these enabled.
348 : * --------
349 : */
350 :
351 : int Trace_lock_oidmin = FirstNormalObjectId;
352 : bool Trace_locks = false;
353 : bool Trace_userlocks = false;
354 : int Trace_lock_table = 0;
355 : bool Debug_deadlocks = false;
356 :
357 :
358 : inline static bool
359 : LOCK_DEBUG_ENABLED(const LOCKTAG *tag)
360 : {
361 : return
362 : (*(LockMethods[tag->locktag_lockmethodid]->trace_flag) &&
363 : ((Oid) tag->locktag_field2 >= (Oid) Trace_lock_oidmin))
364 : || (Trace_lock_table &&
365 : (tag->locktag_field2 == Trace_lock_table));
366 : }
367 :
368 :
369 : inline static void
370 : LOCK_PRINT(const char *where, const LOCK *lock, LOCKMODE type)
371 : {
372 : if (LOCK_DEBUG_ENABLED(&lock->tag))
373 : elog(LOG,
374 : "%s: lock(%p) id(%u,%u,%u,%u,%u,%u) grantMask(%x) "
375 : "req(%d,%d,%d,%d,%d,%d,%d)=%d "
376 : "grant(%d,%d,%d,%d,%d,%d,%d)=%d wait(%d) type(%s)",
377 : where, lock,
378 : lock->tag.locktag_field1, lock->tag.locktag_field2,
379 : lock->tag.locktag_field3, lock->tag.locktag_field4,
380 : lock->tag.locktag_type, lock->tag.locktag_lockmethodid,
381 : lock->grantMask,
382 : lock->requested[1], lock->requested[2], lock->requested[3],
383 : lock->requested[4], lock->requested[5], lock->requested[6],
384 : lock->requested[7], lock->nRequested,
385 : lock->granted[1], lock->granted[2], lock->granted[3],
386 : lock->granted[4], lock->granted[5], lock->granted[6],
387 : lock->granted[7], lock->nGranted,
388 : dclist_count(&lock->waitProcs),
389 : LockMethods[LOCK_LOCKMETHOD(*lock)]->lockModeNames[type]);
390 : }
391 :
392 :
393 : inline static void
394 : PROCLOCK_PRINT(const char *where, const PROCLOCK *proclockP)
395 : {
396 : if (LOCK_DEBUG_ENABLED(&proclockP->tag.myLock->tag))
397 : elog(LOG,
398 : "%s: proclock(%p) lock(%p) method(%u) proc(%p) hold(%x)",
399 : where, proclockP, proclockP->tag.myLock,
400 : PROCLOCK_LOCKMETHOD(*(proclockP)),
401 : proclockP->tag.myProc, (int) proclockP->holdMask);
402 : }
403 : #else /* not LOCK_DEBUG */
404 :
405 : #define LOCK_PRINT(where, lock, type) ((void) 0)
406 : #define PROCLOCK_PRINT(where, proclockP) ((void) 0)
407 : #endif /* not LOCK_DEBUG */
408 :
409 :
410 : static uint32 proclock_hash(const void *key, Size keysize);
411 : static void RemoveLocalLock(LOCALLOCK *locallock);
412 : static PROCLOCK *SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc,
413 : const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode);
414 : static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner);
415 : static void BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode);
416 : static void FinishStrongLockAcquire(void);
417 : static ProcWaitStatus WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner);
418 : static void ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock);
419 : static void LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent);
420 : static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode,
421 : PROCLOCK *proclock, LockMethod lockMethodTable);
422 : static void CleanUpLock(LOCK *lock, PROCLOCK *proclock,
423 : LockMethod lockMethodTable, uint32 hashcode,
424 : bool wakeupNeeded);
425 : static void LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc,
426 : LOCKTAG *locktag, LOCKMODE lockmode,
427 : bool decrement_strong_lock_count);
428 : static void GetSingleProcBlockerStatusData(PGPROC *blocked_proc,
429 : BlockedProcsData *data);
430 :
431 :
432 : /*
433 : * Initialize the lock manager's shmem data structures.
434 : *
435 : * This is called from CreateSharedMemoryAndSemaphores(), which see for more
436 : * comments. In the normal postmaster case, the shared hash tables are
437 : * created here, and backends inherit pointers to them via fork(). In the
438 : * EXEC_BACKEND case, each backend re-executes this code to obtain pointers to
439 : * the already existing shared hash tables. In either case, each backend must
440 : * also call InitLockManagerAccess() to create the locallock hash table.
441 : */
442 : void
443 2100 : LockManagerShmemInit(void)
444 : {
445 : HASHCTL info;
446 : long init_table_size,
447 : max_table_size;
448 : bool found;
449 :
450 : /*
451 : * Compute init/max size to request for lock hashtables. Note these
452 : * calculations must agree with LockManagerShmemSize!
453 : */
454 2100 : max_table_size = NLOCKENTS();
455 2100 : init_table_size = max_table_size / 2;
456 :
457 : /*
458 : * Allocate hash table for LOCK structs. This stores per-locked-object
459 : * information.
460 : */
461 2100 : info.keysize = sizeof(LOCKTAG);
462 2100 : info.entrysize = sizeof(LOCK);
463 2100 : info.num_partitions = NUM_LOCK_PARTITIONS;
464 :
465 2100 : LockMethodLockHash = ShmemInitHash("LOCK hash",
466 : init_table_size,
467 : max_table_size,
468 : &info,
469 : HASH_ELEM | HASH_BLOBS | HASH_PARTITION);
470 :
471 : /* Assume an average of 2 holders per lock */
472 2100 : max_table_size *= 2;
473 2100 : init_table_size *= 2;
474 :
475 : /*
476 : * Allocate hash table for PROCLOCK structs. This stores
477 : * per-lock-per-holder information.
478 : */
479 2100 : info.keysize = sizeof(PROCLOCKTAG);
480 2100 : info.entrysize = sizeof(PROCLOCK);
481 2100 : info.hash = proclock_hash;
482 2100 : info.num_partitions = NUM_LOCK_PARTITIONS;
483 :
484 2100 : LockMethodProcLockHash = ShmemInitHash("PROCLOCK hash",
485 : init_table_size,
486 : max_table_size,
487 : &info,
488 : HASH_ELEM | HASH_FUNCTION | HASH_PARTITION);
489 :
490 : /*
491 : * Allocate fast-path structures.
492 : */
493 2100 : FastPathStrongRelationLocks =
494 2100 : ShmemInitStruct("Fast Path Strong Relation Lock Data",
495 : sizeof(FastPathStrongRelationLockData), &found);
496 2100 : if (!found)
497 2100 : SpinLockInit(&FastPathStrongRelationLocks->mutex);
498 2100 : }
499 :
500 : /*
501 : * Initialize the lock manager's backend-private data structures.
502 : */
503 : void
504 43370 : InitLockManagerAccess(void)
505 : {
506 : /*
507 : * Allocate non-shared hash table for LOCALLOCK structs. This stores lock
508 : * counts and resource owner information.
509 : */
510 : HASHCTL info;
511 :
512 43370 : info.keysize = sizeof(LOCALLOCKTAG);
513 43370 : info.entrysize = sizeof(LOCALLOCK);
514 :
515 43370 : LockMethodLocalHash = hash_create("LOCALLOCK hash",
516 : 16,
517 : &info,
518 : HASH_ELEM | HASH_BLOBS);
519 43370 : }
520 :
521 :
522 : /*
523 : * Fetch the lock method table associated with a given lock
524 : */
525 : LockMethod
526 198 : GetLocksMethodTable(const LOCK *lock)
527 : {
528 198 : LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*lock);
529 :
530 : Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
531 198 : return LockMethods[lockmethodid];
532 : }
533 :
534 : /*
535 : * Fetch the lock method table associated with a given locktag
536 : */
537 : LockMethod
538 2254 : GetLockTagsMethodTable(const LOCKTAG *locktag)
539 : {
540 2254 : LOCKMETHODID lockmethodid = (LOCKMETHODID) locktag->locktag_lockmethodid;
541 :
542 : Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
543 2254 : return LockMethods[lockmethodid];
544 : }
545 :
546 :
547 : /*
548 : * Compute the hash code associated with a LOCKTAG.
549 : *
550 : * To avoid unnecessary recomputations of the hash code, we try to do this
551 : * just once per function, and then pass it around as needed. Aside from
552 : * passing the hashcode to hash_search_with_hash_value(), we can extract
553 : * the lock partition number from the hashcode.
554 : */
555 : uint32
556 36983180 : LockTagHashCode(const LOCKTAG *locktag)
557 : {
558 36983180 : return get_hash_value(LockMethodLockHash, locktag);
559 : }
560 :
561 : /*
562 : * Compute the hash code associated with a PROCLOCKTAG.
563 : *
564 : * Because we want to use just one set of partition locks for both the
565 : * LOCK and PROCLOCK hash tables, we have to make sure that PROCLOCKs
566 : * fall into the same partition number as their associated LOCKs.
567 : * dynahash.c expects the partition number to be the low-order bits of
568 : * the hash code, and therefore a PROCLOCKTAG's hash code must have the
569 : * same low-order bits as the associated LOCKTAG's hash code. We achieve
570 : * this with this specialized hash function.
571 : */
572 : static uint32
573 1298 : proclock_hash(const void *key, Size keysize)
574 : {
575 1298 : const PROCLOCKTAG *proclocktag = (const PROCLOCKTAG *) key;
576 : uint32 lockhash;
577 : Datum procptr;
578 :
579 : Assert(keysize == sizeof(PROCLOCKTAG));
580 :
581 : /* Look into the associated LOCK object, and compute its hash code */
582 1298 : lockhash = LockTagHashCode(&proclocktag->myLock->tag);
583 :
584 : /*
585 : * To make the hash code also depend on the PGPROC, we xor the proc
586 : * struct's address into the hash code, left-shifted so that the
587 : * partition-number bits don't change. Since this is only a hash, we
588 : * don't care if we lose high-order bits of the address; use an
589 : * intermediate variable to suppress cast-pointer-to-int warnings.
590 : */
591 1298 : procptr = PointerGetDatum(proclocktag->myProc);
592 1298 : lockhash ^= ((uint32) procptr) << LOG2_NUM_LOCK_PARTITIONS;
593 :
594 1298 : return lockhash;
595 : }
596 :
597 : /*
598 : * Compute the hash code associated with a PROCLOCKTAG, given the hashcode
599 : * for its underlying LOCK.
600 : *
601 : * We use this just to avoid redundant calls of LockTagHashCode().
602 : */
603 : static inline uint32
604 8852788 : ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 hashcode)
605 : {
606 8852788 : uint32 lockhash = hashcode;
607 : Datum procptr;
608 :
609 : /*
610 : * This must match proclock_hash()!
611 : */
612 8852788 : procptr = PointerGetDatum(proclocktag->myProc);
613 8852788 : lockhash ^= ((uint32) procptr) << LOG2_NUM_LOCK_PARTITIONS;
614 :
615 8852788 : return lockhash;
616 : }
617 :
618 : /*
619 : * Given two lock modes, return whether they would conflict.
620 : */
621 : bool
622 488 : DoLockModesConflict(LOCKMODE mode1, LOCKMODE mode2)
623 : {
624 488 : LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
625 :
626 488 : if (lockMethodTable->conflictTab[mode1] & LOCKBIT_ON(mode2))
627 284 : return true;
628 :
629 204 : return false;
630 : }
631 :
632 : /*
633 : * LockHeldByMe -- test whether lock 'locktag' is held by the current
634 : * transaction
635 : *
636 : * Returns true if current transaction holds a lock on 'tag' of mode
637 : * 'lockmode'. If 'orstronger' is true, a stronger lockmode is also OK.
638 : * ("Stronger" is defined as "numerically higher", which is a bit
639 : * semantically dubious but is OK for the purposes we use this for.)
640 : */
641 : bool
642 0 : LockHeldByMe(const LOCKTAG *locktag,
643 : LOCKMODE lockmode, bool orstronger)
644 : {
645 : LOCALLOCKTAG localtag;
646 : LOCALLOCK *locallock;
647 :
648 : /*
649 : * See if there is a LOCALLOCK entry for this lock and lockmode
650 : */
651 0 : MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
652 0 : localtag.lock = *locktag;
653 0 : localtag.mode = lockmode;
654 :
655 0 : locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
656 : &localtag,
657 : HASH_FIND, NULL);
658 :
659 0 : if (locallock && locallock->nLocks > 0)
660 0 : return true;
661 :
662 0 : if (orstronger)
663 : {
664 : LOCKMODE slockmode;
665 :
666 0 : for (slockmode = lockmode + 1;
667 : slockmode <= MaxLockMode;
668 0 : slockmode++)
669 : {
670 0 : if (LockHeldByMe(locktag, slockmode, false))
671 0 : return true;
672 : }
673 : }
674 :
675 0 : return false;
676 : }
677 :
678 : #ifdef USE_ASSERT_CHECKING
679 : /*
680 : * GetLockMethodLocalHash -- return the hash of local locks, for modules that
681 : * evaluate assertions based on all locks held.
682 : */
683 : HTAB *
684 : GetLockMethodLocalHash(void)
685 : {
686 : return LockMethodLocalHash;
687 : }
688 : #endif
689 :
690 : /*
691 : * LockHasWaiters -- look up 'locktag' and check if releasing this
692 : * lock would wake up other processes waiting for it.
693 : */
694 : bool
695 0 : LockHasWaiters(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
696 : {
697 0 : LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
698 : LockMethod lockMethodTable;
699 : LOCALLOCKTAG localtag;
700 : LOCALLOCK *locallock;
701 : LOCK *lock;
702 : PROCLOCK *proclock;
703 : LWLock *partitionLock;
704 0 : bool hasWaiters = false;
705 :
706 0 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
707 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
708 0 : lockMethodTable = LockMethods[lockmethodid];
709 0 : if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
710 0 : elog(ERROR, "unrecognized lock mode: %d", lockmode);
711 :
712 : #ifdef LOCK_DEBUG
713 : if (LOCK_DEBUG_ENABLED(locktag))
714 : elog(LOG, "LockHasWaiters: lock [%u,%u] %s",
715 : locktag->locktag_field1, locktag->locktag_field2,
716 : lockMethodTable->lockModeNames[lockmode]);
717 : #endif
718 :
719 : /*
720 : * Find the LOCALLOCK entry for this lock and lockmode
721 : */
722 0 : MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
723 0 : localtag.lock = *locktag;
724 0 : localtag.mode = lockmode;
725 :
726 0 : locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
727 : &localtag,
728 : HASH_FIND, NULL);
729 :
730 : /*
731 : * let the caller print its own error message, too. Do not ereport(ERROR).
732 : */
733 0 : if (!locallock || locallock->nLocks <= 0)
734 : {
735 0 : elog(WARNING, "you don't own a lock of type %s",
736 : lockMethodTable->lockModeNames[lockmode]);
737 0 : return false;
738 : }
739 :
740 : /*
741 : * Check the shared lock table.
742 : */
743 0 : partitionLock = LockHashPartitionLock(locallock->hashcode);
744 :
745 0 : LWLockAcquire(partitionLock, LW_SHARED);
746 :
747 : /*
748 : * We don't need to re-find the lock or proclock, since we kept their
749 : * addresses in the locallock table, and they couldn't have been removed
750 : * while we were holding a lock on them.
751 : */
752 0 : lock = locallock->lock;
753 : LOCK_PRINT("LockHasWaiters: found", lock, lockmode);
754 0 : proclock = locallock->proclock;
755 : PROCLOCK_PRINT("LockHasWaiters: found", proclock);
756 :
757 : /*
758 : * Double-check that we are actually holding a lock of the type we want to
759 : * release.
760 : */
761 0 : if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
762 : {
763 : PROCLOCK_PRINT("LockHasWaiters: WRONGTYPE", proclock);
764 0 : LWLockRelease(partitionLock);
765 0 : elog(WARNING, "you don't own a lock of type %s",
766 : lockMethodTable->lockModeNames[lockmode]);
767 0 : RemoveLocalLock(locallock);
768 0 : return false;
769 : }
770 :
771 : /*
772 : * Do the checking.
773 : */
774 0 : if ((lockMethodTable->conflictTab[lockmode] & lock->waitMask) != 0)
775 0 : hasWaiters = true;
776 :
777 0 : LWLockRelease(partitionLock);
778 :
779 0 : return hasWaiters;
780 : }
781 :
782 : /*
783 : * LockAcquire -- Check for lock conflicts, sleep if conflict found,
784 : * set lock if/when no conflicts.
785 : *
786 : * Inputs:
787 : * locktag: unique identifier for the lockable object
788 : * lockmode: lock mode to acquire
789 : * sessionLock: if true, acquire lock for session not current transaction
790 : * dontWait: if true, don't wait to acquire lock
791 : *
792 : * Returns one of:
793 : * LOCKACQUIRE_NOT_AVAIL lock not available, and dontWait=true
794 : * LOCKACQUIRE_OK lock successfully acquired
795 : * LOCKACQUIRE_ALREADY_HELD incremented count for lock already held
796 : * LOCKACQUIRE_ALREADY_CLEAR incremented count for lock already clear
797 : *
798 : * In the normal case where dontWait=false and the caller doesn't need to
799 : * distinguish a freshly acquired lock from one already taken earlier in
800 : * this same transaction, there is no need to examine the return value.
801 : *
802 : * Side Effects: The lock is acquired and recorded in lock tables.
803 : *
804 : * NOTE: if we wait for the lock, there is no way to abort the wait
805 : * short of aborting the transaction.
806 : */
807 : LockAcquireResult
808 1549622 : LockAcquire(const LOCKTAG *locktag,
809 : LOCKMODE lockmode,
810 : bool sessionLock,
811 : bool dontWait)
812 : {
813 1549622 : return LockAcquireExtended(locktag, lockmode, sessionLock, dontWait,
814 : true, NULL, false);
815 : }
816 :
817 : /*
818 : * LockAcquireExtended - allows us to specify additional options
819 : *
820 : * reportMemoryError specifies whether a lock request that fills the lock
821 : * table should generate an ERROR or not. Passing "false" allows the caller
822 : * to attempt to recover from lock-table-full situations, perhaps by forcibly
823 : * canceling other lock holders and then retrying. Note, however, that the
824 : * return code for that is LOCKACQUIRE_NOT_AVAIL, so that it's unsafe to use
825 : * in combination with dontWait = true, as the cause of failure couldn't be
826 : * distinguished.
827 : *
828 : * If locallockp isn't NULL, *locallockp receives a pointer to the LOCALLOCK
829 : * table entry if a lock is successfully acquired, or NULL if not.
830 : *
831 : * logLockFailure indicates whether to log details when a lock acquisition
832 : * fails with dontWait = true.
833 : */
834 : LockAcquireResult
835 40159436 : LockAcquireExtended(const LOCKTAG *locktag,
836 : LOCKMODE lockmode,
837 : bool sessionLock,
838 : bool dontWait,
839 : bool reportMemoryError,
840 : LOCALLOCK **locallockp,
841 : bool logLockFailure)
842 : {
843 40159436 : LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
844 : LockMethod lockMethodTable;
845 : LOCALLOCKTAG localtag;
846 : LOCALLOCK *locallock;
847 : LOCK *lock;
848 : PROCLOCK *proclock;
849 : bool found;
850 : ResourceOwner owner;
851 : uint32 hashcode;
852 : LWLock *partitionLock;
853 : bool found_conflict;
854 : ProcWaitStatus waitResult;
855 40159436 : bool log_lock = false;
856 :
857 40159436 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
858 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
859 40159436 : lockMethodTable = LockMethods[lockmethodid];
860 40159436 : if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
861 0 : elog(ERROR, "unrecognized lock mode: %d", lockmode);
862 :
863 40159436 : if (RecoveryInProgress() && !InRecovery &&
864 566648 : (locktag->locktag_type == LOCKTAG_OBJECT ||
865 566648 : locktag->locktag_type == LOCKTAG_RELATION) &&
866 : lockmode > RowExclusiveLock)
867 0 : ereport(ERROR,
868 : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
869 : errmsg("cannot acquire lock mode %s on database objects while recovery is in progress",
870 : lockMethodTable->lockModeNames[lockmode]),
871 : errhint("Only RowExclusiveLock or less can be acquired on database objects during recovery.")));
872 :
873 : #ifdef LOCK_DEBUG
874 : if (LOCK_DEBUG_ENABLED(locktag))
875 : elog(LOG, "LockAcquire: lock [%u,%u] %s",
876 : locktag->locktag_field1, locktag->locktag_field2,
877 : lockMethodTable->lockModeNames[lockmode]);
878 : #endif
879 :
880 : /* Identify owner for lock */
881 40159436 : if (sessionLock)
882 171624 : owner = NULL;
883 : else
884 39987812 : owner = CurrentResourceOwner;
885 :
886 : /*
887 : * Find or create a LOCALLOCK entry for this lock and lockmode
888 : */
889 40159436 : MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
890 40159436 : localtag.lock = *locktag;
891 40159436 : localtag.mode = lockmode;
892 :
893 40159436 : locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
894 : &localtag,
895 : HASH_ENTER, &found);
896 :
897 : /*
898 : * if it's a new locallock object, initialize it
899 : */
900 40159436 : if (!found)
901 : {
902 35831872 : locallock->lock = NULL;
903 35831872 : locallock->proclock = NULL;
904 35831872 : locallock->hashcode = LockTagHashCode(&(localtag.lock));
905 35831872 : locallock->nLocks = 0;
906 35831872 : locallock->holdsStrongLockCount = false;
907 35831872 : locallock->lockCleared = false;
908 35831872 : locallock->numLockOwners = 0;
909 35831872 : locallock->maxLockOwners = 8;
910 35831872 : locallock->lockOwners = NULL; /* in case next line fails */
911 35831872 : locallock->lockOwners = (LOCALLOCKOWNER *)
912 35831872 : MemoryContextAlloc(TopMemoryContext,
913 35831872 : locallock->maxLockOwners * sizeof(LOCALLOCKOWNER));
914 : }
915 : else
916 : {
917 : /* Make sure there will be room to remember the lock */
918 4327564 : if (locallock->numLockOwners >= locallock->maxLockOwners)
919 : {
920 38 : int newsize = locallock->maxLockOwners * 2;
921 :
922 38 : locallock->lockOwners = (LOCALLOCKOWNER *)
923 38 : repalloc(locallock->lockOwners,
924 : newsize * sizeof(LOCALLOCKOWNER));
925 38 : locallock->maxLockOwners = newsize;
926 : }
927 : }
928 40159436 : hashcode = locallock->hashcode;
929 :
930 40159436 : if (locallockp)
931 38609642 : *locallockp = locallock;
932 :
933 : /*
934 : * If we already hold the lock, we can just increase the count locally.
935 : *
936 : * If lockCleared is already set, caller need not worry about absorbing
937 : * sinval messages related to the lock's object.
938 : */
939 40159436 : if (locallock->nLocks > 0)
940 : {
941 4327564 : GrantLockLocal(locallock, owner);
942 4327564 : if (locallock->lockCleared)
943 4175088 : return LOCKACQUIRE_ALREADY_CLEAR;
944 : else
945 152476 : return LOCKACQUIRE_ALREADY_HELD;
946 : }
947 :
948 : /*
949 : * We don't acquire any other heavyweight lock while holding the relation
950 : * extension lock. We do allow to acquire the same relation extension
951 : * lock more than once but that case won't reach here.
952 : */
953 : Assert(!IsRelationExtensionLockHeld);
954 :
955 : /*
956 : * Prepare to emit a WAL record if acquisition of this lock needs to be
957 : * replayed in a standby server.
958 : *
959 : * Here we prepare to log; after lock is acquired we'll issue log record.
960 : * This arrangement simplifies error recovery in case the preparation step
961 : * fails.
962 : *
963 : * Only AccessExclusiveLocks can conflict with lock types that read-only
964 : * transactions can acquire in a standby server. Make sure this definition
965 : * matches the one in GetRunningTransactionLocks().
966 : */
967 35831872 : if (lockmode >= AccessExclusiveLock &&
968 468624 : locktag->locktag_type == LOCKTAG_RELATION &&
969 318810 : !RecoveryInProgress() &&
970 270672 : XLogStandbyInfoActive())
971 : {
972 200616 : LogAccessExclusiveLockPrepare();
973 200616 : log_lock = true;
974 : }
975 :
976 : /*
977 : * Attempt to take lock via fast path, if eligible. But if we remember
978 : * having filled up the fast path array, we don't attempt to make any
979 : * further use of it until we release some locks. It's possible that some
980 : * other backend has transferred some of those locks to the shared hash
981 : * table, leaving space free, but it's not worth acquiring the LWLock just
982 : * to check. It's also possible that we're acquiring a second or third
983 : * lock type on a relation we have already locked using the fast-path, but
984 : * for now we don't worry about that case either.
985 : */
986 35831872 : if (EligibleForRelationFastPath(locktag, lockmode) &&
987 32195944 : FastPathLocalUseCounts[FAST_PATH_REL_GROUP(locktag->locktag_field2)] < FP_LOCK_SLOTS_PER_GROUP)
988 : {
989 31739774 : uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
990 : bool acquired;
991 :
992 : /*
993 : * LWLockAcquire acts as a memory sequencing point, so it's safe to
994 : * assume that any strong locker whose increment to
995 : * FastPathStrongRelationLocks->counts becomes visible after we test
996 : * it has yet to begin to transfer fast-path locks.
997 : */
998 31739774 : LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
999 31739774 : if (FastPathStrongRelationLocks->count[fasthashcode] != 0)
1000 542288 : acquired = false;
1001 : else
1002 31197486 : acquired = FastPathGrantRelationLock(locktag->locktag_field2,
1003 : lockmode);
1004 31739774 : LWLockRelease(&MyProc->fpInfoLock);
1005 31739774 : if (acquired)
1006 : {
1007 : /*
1008 : * The locallock might contain stale pointers to some old shared
1009 : * objects; we MUST reset these to null before considering the
1010 : * lock to be acquired via fast-path.
1011 : */
1012 31197486 : locallock->lock = NULL;
1013 31197486 : locallock->proclock = NULL;
1014 31197486 : GrantLockLocal(locallock, owner);
1015 31197486 : return LOCKACQUIRE_OK;
1016 : }
1017 : }
1018 :
1019 : /*
1020 : * If this lock could potentially have been taken via the fast-path by
1021 : * some other backend, we must (temporarily) disable further use of the
1022 : * fast-path for this lock tag, and migrate any locks already taken via
1023 : * this method to the main lock table.
1024 : */
1025 4634386 : if (ConflictsWithRelationFastPath(locktag, lockmode))
1026 : {
1027 378800 : uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
1028 :
1029 378800 : BeginStrongLockAcquire(locallock, fasthashcode);
1030 378800 : if (!FastPathTransferRelationLocks(lockMethodTable, locktag,
1031 : hashcode))
1032 : {
1033 0 : AbortStrongLockAcquire();
1034 0 : if (locallock->nLocks == 0)
1035 0 : RemoveLocalLock(locallock);
1036 0 : if (locallockp)
1037 0 : *locallockp = NULL;
1038 0 : if (reportMemoryError)
1039 0 : ereport(ERROR,
1040 : (errcode(ERRCODE_OUT_OF_MEMORY),
1041 : errmsg("out of shared memory"),
1042 : errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
1043 : else
1044 0 : return LOCKACQUIRE_NOT_AVAIL;
1045 : }
1046 : }
1047 :
1048 : /*
1049 : * We didn't find the lock in our LOCALLOCK table, and we didn't manage to
1050 : * take it via the fast-path, either, so we've got to mess with the shared
1051 : * lock table.
1052 : */
1053 4634386 : partitionLock = LockHashPartitionLock(hashcode);
1054 :
1055 4634386 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
1056 :
1057 : /*
1058 : * Find or create lock and proclock entries with this tag
1059 : *
1060 : * Note: if the locallock object already existed, it might have a pointer
1061 : * to the lock already ... but we should not assume that that pointer is
1062 : * valid, since a lock object with zero hold and request counts can go
1063 : * away anytime. So we have to use SetupLockInTable() to recompute the
1064 : * lock and proclock pointers, even if they're already set.
1065 : */
1066 4634386 : proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
1067 : hashcode, lockmode);
1068 4634386 : if (!proclock)
1069 : {
1070 0 : AbortStrongLockAcquire();
1071 0 : LWLockRelease(partitionLock);
1072 0 : if (locallock->nLocks == 0)
1073 0 : RemoveLocalLock(locallock);
1074 0 : if (locallockp)
1075 0 : *locallockp = NULL;
1076 0 : if (reportMemoryError)
1077 0 : ereport(ERROR,
1078 : (errcode(ERRCODE_OUT_OF_MEMORY),
1079 : errmsg("out of shared memory"),
1080 : errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
1081 : else
1082 0 : return LOCKACQUIRE_NOT_AVAIL;
1083 : }
1084 4634386 : locallock->proclock = proclock;
1085 4634386 : lock = proclock->tag.myLock;
1086 4634386 : locallock->lock = lock;
1087 :
1088 : /*
1089 : * If lock requested conflicts with locks requested by waiters, must join
1090 : * wait queue. Otherwise, check for conflict with already-held locks.
1091 : * (That's last because most complex check.)
1092 : */
1093 4634386 : if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1094 354 : found_conflict = true;
1095 : else
1096 4634032 : found_conflict = LockCheckConflicts(lockMethodTable, lockmode,
1097 : lock, proclock);
1098 :
1099 4634386 : if (!found_conflict)
1100 : {
1101 : /* No conflict with held or previously requested locks */
1102 4630346 : GrantLock(lock, proclock, lockmode);
1103 4630346 : waitResult = PROC_WAIT_STATUS_OK;
1104 : }
1105 : else
1106 : {
1107 : /*
1108 : * Join the lock's wait queue. We call this even in the dontWait
1109 : * case, because JoinWaitQueue() may discover that we can acquire the
1110 : * lock immediately after all.
1111 : */
1112 4040 : waitResult = JoinWaitQueue(locallock, lockMethodTable, dontWait);
1113 : }
1114 :
1115 4634386 : if (waitResult == PROC_WAIT_STATUS_ERROR)
1116 : {
1117 : /*
1118 : * We're not getting the lock because a deadlock was detected already
1119 : * while trying to join the wait queue, or because we would have to
1120 : * wait but the caller requested no blocking.
1121 : *
1122 : * Undo the changes to shared entries before releasing the partition
1123 : * lock.
1124 : */
1125 1498 : AbortStrongLockAcquire();
1126 :
1127 1498 : if (proclock->holdMask == 0)
1128 : {
1129 : uint32 proclock_hashcode;
1130 :
1131 1092 : proclock_hashcode = ProcLockHashCode(&proclock->tag,
1132 : hashcode);
1133 1092 : dlist_delete(&proclock->lockLink);
1134 1092 : dlist_delete(&proclock->procLink);
1135 1092 : if (!hash_search_with_hash_value(LockMethodProcLockHash,
1136 1092 : &(proclock->tag),
1137 : proclock_hashcode,
1138 : HASH_REMOVE,
1139 : NULL))
1140 0 : elog(PANIC, "proclock table corrupted");
1141 : }
1142 : else
1143 : PROCLOCK_PRINT("LockAcquire: did not join wait queue", proclock);
1144 1498 : lock->nRequested--;
1145 1498 : lock->requested[lockmode]--;
1146 : LOCK_PRINT("LockAcquire: did not join wait queue",
1147 : lock, lockmode);
1148 : Assert((lock->nRequested > 0) &&
1149 : (lock->requested[lockmode] >= 0));
1150 : Assert(lock->nGranted <= lock->nRequested);
1151 1498 : LWLockRelease(partitionLock);
1152 1498 : if (locallock->nLocks == 0)
1153 1498 : RemoveLocalLock(locallock);
1154 :
1155 1498 : if (dontWait)
1156 : {
1157 : /*
1158 : * Log lock holders and waiters as a detail log message if
1159 : * logLockFailure = true and lock acquisition fails with dontWait
1160 : * = true
1161 : */
1162 1496 : if (logLockFailure)
1163 : {
1164 : StringInfoData buf,
1165 : lock_waiters_sbuf,
1166 : lock_holders_sbuf;
1167 : const char *modename;
1168 0 : int lockHoldersNum = 0;
1169 :
1170 0 : initStringInfo(&buf);
1171 0 : initStringInfo(&lock_waiters_sbuf);
1172 0 : initStringInfo(&lock_holders_sbuf);
1173 :
1174 0 : DescribeLockTag(&buf, &locallock->tag.lock);
1175 0 : modename = GetLockmodeName(locallock->tag.lock.locktag_lockmethodid,
1176 : lockmode);
1177 :
1178 : /* Gather a list of all lock holders and waiters */
1179 0 : LWLockAcquire(partitionLock, LW_SHARED);
1180 0 : GetLockHoldersAndWaiters(locallock, &lock_holders_sbuf,
1181 : &lock_waiters_sbuf, &lockHoldersNum);
1182 0 : LWLockRelease(partitionLock);
1183 :
1184 0 : ereport(LOG,
1185 : (errmsg("process %d could not obtain %s on %s",
1186 : MyProcPid, modename, buf.data),
1187 : errdetail_log_plural(
1188 : "Process holding the lock: %s, Wait queue: %s.",
1189 : "Processes holding the lock: %s, Wait queue: %s.",
1190 : lockHoldersNum,
1191 : lock_holders_sbuf.data,
1192 : lock_waiters_sbuf.data)));
1193 :
1194 0 : pfree(buf.data);
1195 0 : pfree(lock_holders_sbuf.data);
1196 0 : pfree(lock_waiters_sbuf.data);
1197 : }
1198 1496 : if (locallockp)
1199 444 : *locallockp = NULL;
1200 1496 : return LOCKACQUIRE_NOT_AVAIL;
1201 : }
1202 : else
1203 : {
1204 2 : DeadLockReport();
1205 : /* DeadLockReport() will not return */
1206 : }
1207 : }
1208 :
1209 : /*
1210 : * We are now in the lock queue, or the lock was already granted. If
1211 : * queued, go to sleep.
1212 : */
1213 4632888 : if (waitResult == PROC_WAIT_STATUS_WAITING)
1214 : {
1215 : Assert(!dontWait);
1216 : PROCLOCK_PRINT("LockAcquire: sleeping on lock", proclock);
1217 : LOCK_PRINT("LockAcquire: sleeping on lock", lock, lockmode);
1218 2532 : LWLockRelease(partitionLock);
1219 :
1220 2532 : waitResult = WaitOnLock(locallock, owner);
1221 :
1222 : /*
1223 : * NOTE: do not do any material change of state between here and
1224 : * return. All required changes in locktable state must have been
1225 : * done when the lock was granted to us --- see notes in WaitOnLock.
1226 : */
1227 :
1228 2448 : if (waitResult == PROC_WAIT_STATUS_ERROR)
1229 : {
1230 : /*
1231 : * We failed as a result of a deadlock, see CheckDeadLock(). Quit
1232 : * now.
1233 : */
1234 : Assert(!dontWait);
1235 10 : DeadLockReport();
1236 : /* DeadLockReport() will not return */
1237 : }
1238 : }
1239 : else
1240 4630356 : LWLockRelease(partitionLock);
1241 : Assert(waitResult == PROC_WAIT_STATUS_OK);
1242 :
1243 : /* The lock was granted to us. Update the local lock entry accordingly */
1244 : Assert((proclock->holdMask & LOCKBIT_ON(lockmode)) != 0);
1245 4632794 : GrantLockLocal(locallock, owner);
1246 :
1247 : /*
1248 : * Lock state is fully up-to-date now; if we error out after this, no
1249 : * special error cleanup is required.
1250 : */
1251 4632794 : FinishStrongLockAcquire();
1252 :
1253 : /*
1254 : * Emit a WAL record if acquisition of this lock needs to be replayed in a
1255 : * standby server.
1256 : */
1257 4632794 : if (log_lock)
1258 : {
1259 : /*
1260 : * Decode the locktag back to the original values, to avoid sending
1261 : * lots of empty bytes with every message. See lock.h to check how a
1262 : * locktag is defined for LOCKTAG_RELATION
1263 : */
1264 200190 : LogAccessExclusiveLock(locktag->locktag_field1,
1265 : locktag->locktag_field2);
1266 : }
1267 :
1268 4632794 : return LOCKACQUIRE_OK;
1269 : }
1270 :
1271 : /*
1272 : * Find or create LOCK and PROCLOCK objects as needed for a new lock
1273 : * request.
1274 : *
1275 : * Returns the PROCLOCK object, or NULL if we failed to create the objects
1276 : * for lack of shared memory.
1277 : *
1278 : * The appropriate partition lock must be held at entry, and will be
1279 : * held at exit.
1280 : */
1281 : static PROCLOCK *
1282 4637658 : SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc,
1283 : const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
1284 : {
1285 : LOCK *lock;
1286 : PROCLOCK *proclock;
1287 : PROCLOCKTAG proclocktag;
1288 : uint32 proclock_hashcode;
1289 : bool found;
1290 :
1291 : /*
1292 : * Find or create a lock with this tag.
1293 : */
1294 4637658 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
1295 : locktag,
1296 : hashcode,
1297 : HASH_ENTER_NULL,
1298 : &found);
1299 4637658 : if (!lock)
1300 0 : return NULL;
1301 :
1302 : /*
1303 : * if it's a new lock object, initialize it
1304 : */
1305 4637658 : if (!found)
1306 : {
1307 4174784 : lock->grantMask = 0;
1308 4174784 : lock->waitMask = 0;
1309 4174784 : dlist_init(&lock->procLocks);
1310 4174784 : dclist_init(&lock->waitProcs);
1311 4174784 : lock->nRequested = 0;
1312 4174784 : lock->nGranted = 0;
1313 25048704 : MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
1314 4174784 : MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
1315 : LOCK_PRINT("LockAcquire: new", lock, lockmode);
1316 : }
1317 : else
1318 : {
1319 : LOCK_PRINT("LockAcquire: found", lock, lockmode);
1320 : Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
1321 : Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
1322 : Assert(lock->nGranted <= lock->nRequested);
1323 : }
1324 :
1325 : /*
1326 : * Create the hash key for the proclock table.
1327 : */
1328 4637658 : proclocktag.myLock = lock;
1329 4637658 : proclocktag.myProc = proc;
1330 :
1331 4637658 : proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
1332 :
1333 : /*
1334 : * Find or create a proclock entry with this tag
1335 : */
1336 4637658 : proclock = (PROCLOCK *) hash_search_with_hash_value(LockMethodProcLockHash,
1337 : &proclocktag,
1338 : proclock_hashcode,
1339 : HASH_ENTER_NULL,
1340 : &found);
1341 4637658 : if (!proclock)
1342 : {
1343 : /* Oops, not enough shmem for the proclock */
1344 0 : if (lock->nRequested == 0)
1345 : {
1346 : /*
1347 : * There are no other requestors of this lock, so garbage-collect
1348 : * the lock object. We *must* do this to avoid a permanent leak
1349 : * of shared memory, because there won't be anything to cause
1350 : * anyone to release the lock object later.
1351 : */
1352 : Assert(dlist_is_empty(&(lock->procLocks)));
1353 0 : if (!hash_search_with_hash_value(LockMethodLockHash,
1354 0 : &(lock->tag),
1355 : hashcode,
1356 : HASH_REMOVE,
1357 : NULL))
1358 0 : elog(PANIC, "lock table corrupted");
1359 : }
1360 0 : return NULL;
1361 : }
1362 :
1363 : /*
1364 : * If new, initialize the new entry
1365 : */
1366 4637658 : if (!found)
1367 : {
1368 4210800 : uint32 partition = LockHashPartition(hashcode);
1369 :
1370 : /*
1371 : * It might seem unsafe to access proclock->groupLeader without a
1372 : * lock, but it's not really. Either we are initializing a proclock
1373 : * on our own behalf, in which case our group leader isn't changing
1374 : * because the group leader for a process can only ever be changed by
1375 : * the process itself; or else we are transferring a fast-path lock to
1376 : * the main lock table, in which case that process can't change its
1377 : * lock group leader without first releasing all of its locks (and in
1378 : * particular the one we are currently transferring).
1379 : */
1380 8421600 : proclock->groupLeader = proc->lockGroupLeader != NULL ?
1381 4210800 : proc->lockGroupLeader : proc;
1382 4210800 : proclock->holdMask = 0;
1383 4210800 : proclock->releaseMask = 0;
1384 : /* Add proclock to appropriate lists */
1385 4210800 : dlist_push_tail(&lock->procLocks, &proclock->lockLink);
1386 4210800 : dlist_push_tail(&proc->myProcLocks[partition], &proclock->procLink);
1387 : PROCLOCK_PRINT("LockAcquire: new", proclock);
1388 : }
1389 : else
1390 : {
1391 : PROCLOCK_PRINT("LockAcquire: found", proclock);
1392 : Assert((proclock->holdMask & ~lock->grantMask) == 0);
1393 :
1394 : #ifdef CHECK_DEADLOCK_RISK
1395 :
1396 : /*
1397 : * Issue warning if we already hold a lower-level lock on this object
1398 : * and do not hold a lock of the requested level or higher. This
1399 : * indicates a deadlock-prone coding practice (eg, we'd have a
1400 : * deadlock if another backend were following the same code path at
1401 : * about the same time).
1402 : *
1403 : * This is not enabled by default, because it may generate log entries
1404 : * about user-level coding practices that are in fact safe in context.
1405 : * It can be enabled to help find system-level problems.
1406 : *
1407 : * XXX Doing numeric comparison on the lockmodes is a hack; it'd be
1408 : * better to use a table. For now, though, this works.
1409 : */
1410 : {
1411 : int i;
1412 :
1413 : for (i = lockMethodTable->numLockModes; i > 0; i--)
1414 : {
1415 : if (proclock->holdMask & LOCKBIT_ON(i))
1416 : {
1417 : if (i >= (int) lockmode)
1418 : break; /* safe: we have a lock >= req level */
1419 : elog(LOG, "deadlock risk: raising lock level"
1420 : " from %s to %s on object %u/%u/%u",
1421 : lockMethodTable->lockModeNames[i],
1422 : lockMethodTable->lockModeNames[lockmode],
1423 : lock->tag.locktag_field1, lock->tag.locktag_field2,
1424 : lock->tag.locktag_field3);
1425 : break;
1426 : }
1427 : }
1428 : }
1429 : #endif /* CHECK_DEADLOCK_RISK */
1430 : }
1431 :
1432 : /*
1433 : * lock->nRequested and lock->requested[] count the total number of
1434 : * requests, whether granted or waiting, so increment those immediately.
1435 : * The other counts don't increment till we get the lock.
1436 : */
1437 4637658 : lock->nRequested++;
1438 4637658 : lock->requested[lockmode]++;
1439 : Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1440 :
1441 : /*
1442 : * We shouldn't already hold the desired lock; else locallock table is
1443 : * broken.
1444 : */
1445 4637658 : if (proclock->holdMask & LOCKBIT_ON(lockmode))
1446 0 : elog(ERROR, "lock %s on object %u/%u/%u is already held",
1447 : lockMethodTable->lockModeNames[lockmode],
1448 : lock->tag.locktag_field1, lock->tag.locktag_field2,
1449 : lock->tag.locktag_field3);
1450 :
1451 4637658 : return proclock;
1452 : }
1453 :
1454 : /*
1455 : * Check and set/reset the flag that we hold the relation extension lock.
1456 : *
1457 : * It is callers responsibility that this function is called after
1458 : * acquiring/releasing the relation extension lock.
1459 : *
1460 : * Pass acquired as true if lock is acquired, false otherwise.
1461 : */
1462 : static inline void
1463 72893958 : CheckAndSetLockHeld(LOCALLOCK *locallock, bool acquired)
1464 : {
1465 : #ifdef USE_ASSERT_CHECKING
1466 : if (LOCALLOCK_LOCKTAG(*locallock) == LOCKTAG_RELATION_EXTEND)
1467 : IsRelationExtensionLockHeld = acquired;
1468 : #endif
1469 72893958 : }
1470 :
1471 : /*
1472 : * Subroutine to free a locallock entry
1473 : */
1474 : static void
1475 35831872 : RemoveLocalLock(LOCALLOCK *locallock)
1476 : {
1477 : int i;
1478 :
1479 36013538 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
1480 : {
1481 181666 : if (locallock->lockOwners[i].owner != NULL)
1482 181590 : ResourceOwnerForgetLock(locallock->lockOwners[i].owner, locallock);
1483 : }
1484 35831872 : locallock->numLockOwners = 0;
1485 35831872 : if (locallock->lockOwners != NULL)
1486 35831872 : pfree(locallock->lockOwners);
1487 35831872 : locallock->lockOwners = NULL;
1488 :
1489 35831872 : if (locallock->holdsStrongLockCount)
1490 : {
1491 : uint32 fasthashcode;
1492 :
1493 378244 : fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
1494 :
1495 378244 : SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
1496 : Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
1497 378244 : FastPathStrongRelationLocks->count[fasthashcode]--;
1498 378244 : locallock->holdsStrongLockCount = false;
1499 378244 : SpinLockRelease(&FastPathStrongRelationLocks->mutex);
1500 : }
1501 :
1502 35831872 : if (!hash_search(LockMethodLocalHash,
1503 35831872 : &(locallock->tag),
1504 : HASH_REMOVE, NULL))
1505 0 : elog(WARNING, "locallock table corrupted");
1506 :
1507 : /*
1508 : * Indicate that the lock is released for certain types of locks
1509 : */
1510 35831872 : CheckAndSetLockHeld(locallock, false);
1511 35831872 : }
1512 :
1513 : /*
1514 : * LockCheckConflicts -- test whether requested lock conflicts
1515 : * with those already granted
1516 : *
1517 : * Returns true if conflict, false if no conflict.
1518 : *
1519 : * NOTES:
1520 : * Here's what makes this complicated: one process's locks don't
1521 : * conflict with one another, no matter what purpose they are held for
1522 : * (eg, session and transaction locks do not conflict). Nor do the locks
1523 : * of one process in a lock group conflict with those of another process in
1524 : * the same group. So, we must subtract off these locks when determining
1525 : * whether the requested new lock conflicts with those already held.
1526 : */
1527 : bool
1528 4636868 : LockCheckConflicts(LockMethod lockMethodTable,
1529 : LOCKMODE lockmode,
1530 : LOCK *lock,
1531 : PROCLOCK *proclock)
1532 : {
1533 4636868 : int numLockModes = lockMethodTable->numLockModes;
1534 : LOCKMASK myLocks;
1535 4636868 : int conflictMask = lockMethodTable->conflictTab[lockmode];
1536 : int conflictsRemaining[MAX_LOCKMODES];
1537 4636868 : int totalConflictsRemaining = 0;
1538 : dlist_iter proclock_iter;
1539 : int i;
1540 :
1541 : /*
1542 : * first check for global conflicts: If no locks conflict with my request,
1543 : * then I get the lock.
1544 : *
1545 : * Checking for conflict: lock->grantMask represents the types of
1546 : * currently held locks. conflictTable[lockmode] has a bit set for each
1547 : * type of lock that conflicts with request. Bitwise compare tells if
1548 : * there is a conflict.
1549 : */
1550 4636868 : if (!(conflictMask & lock->grantMask))
1551 : {
1552 : PROCLOCK_PRINT("LockCheckConflicts: no conflict", proclock);
1553 4442308 : return false;
1554 : }
1555 :
1556 : /*
1557 : * Rats. Something conflicts. But it could still be my own lock, or a
1558 : * lock held by another member of my locking group. First, figure out how
1559 : * many conflicts remain after subtracting out any locks I hold myself.
1560 : */
1561 194560 : myLocks = proclock->holdMask;
1562 1751040 : for (i = 1; i <= numLockModes; i++)
1563 : {
1564 1556480 : if ((conflictMask & LOCKBIT_ON(i)) == 0)
1565 : {
1566 832648 : conflictsRemaining[i] = 0;
1567 832648 : continue;
1568 : }
1569 723832 : conflictsRemaining[i] = lock->granted[i];
1570 723832 : if (myLocks & LOCKBIT_ON(i))
1571 211432 : --conflictsRemaining[i];
1572 723832 : totalConflictsRemaining += conflictsRemaining[i];
1573 : }
1574 :
1575 : /* If no conflicts remain, we get the lock. */
1576 194560 : if (totalConflictsRemaining == 0)
1577 : {
1578 : PROCLOCK_PRINT("LockCheckConflicts: resolved (simple)", proclock);
1579 189538 : return false;
1580 : }
1581 :
1582 : /* If no group locking, it's definitely a conflict. */
1583 5022 : if (proclock->groupLeader == MyProc && MyProc->lockGroupLeader == NULL)
1584 : {
1585 : Assert(proclock->tag.myProc == MyProc);
1586 : PROCLOCK_PRINT("LockCheckConflicts: conflicting (simple)",
1587 : proclock);
1588 3678 : return true;
1589 : }
1590 :
1591 : /*
1592 : * The relation extension lock conflict even between the group members.
1593 : */
1594 1344 : if (LOCK_LOCKTAG(*lock) == LOCKTAG_RELATION_EXTEND)
1595 : {
1596 : PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)",
1597 : proclock);
1598 14 : return true;
1599 : }
1600 :
1601 : /*
1602 : * Locks held in conflicting modes by members of our own lock group are
1603 : * not real conflicts; we can subtract those out and see if we still have
1604 : * a conflict. This is O(N) in the number of processes holding or
1605 : * awaiting locks on this object. We could improve that by making the
1606 : * shared memory state more complex (and larger) but it doesn't seem worth
1607 : * it.
1608 : */
1609 2398 : dlist_foreach(proclock_iter, &lock->procLocks)
1610 : {
1611 2042 : PROCLOCK *otherproclock =
1612 2042 : dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
1613 :
1614 2042 : if (proclock != otherproclock &&
1615 1686 : proclock->groupLeader == otherproclock->groupLeader &&
1616 978 : (otherproclock->holdMask & conflictMask) != 0)
1617 : {
1618 974 : int intersectMask = otherproclock->holdMask & conflictMask;
1619 :
1620 8766 : for (i = 1; i <= numLockModes; i++)
1621 : {
1622 7792 : if ((intersectMask & LOCKBIT_ON(i)) != 0)
1623 : {
1624 994 : if (conflictsRemaining[i] <= 0)
1625 0 : elog(PANIC, "proclocks held do not match lock");
1626 994 : conflictsRemaining[i]--;
1627 994 : totalConflictsRemaining--;
1628 : }
1629 : }
1630 :
1631 974 : if (totalConflictsRemaining == 0)
1632 : {
1633 : PROCLOCK_PRINT("LockCheckConflicts: resolved (group)",
1634 : proclock);
1635 974 : return false;
1636 : }
1637 : }
1638 : }
1639 :
1640 : /* Nope, it's a real conflict. */
1641 : PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)", proclock);
1642 356 : return true;
1643 : }
1644 :
1645 : /*
1646 : * GrantLock -- update the lock and proclock data structures to show
1647 : * the lock request has been granted.
1648 : *
1649 : * NOTE: if proc was blocked, it also needs to be removed from the wait list
1650 : * and have its waitLock/waitProcLock fields cleared. That's not done here.
1651 : *
1652 : * NOTE: the lock grant also has to be recorded in the associated LOCALLOCK
1653 : * table entry; but since we may be awaking some other process, we can't do
1654 : * that here; it's done by GrantLockLocal, instead.
1655 : */
1656 : void
1657 4636272 : GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
1658 : {
1659 4636272 : lock->nGranted++;
1660 4636272 : lock->granted[lockmode]++;
1661 4636272 : lock->grantMask |= LOCKBIT_ON(lockmode);
1662 4636272 : if (lock->granted[lockmode] == lock->requested[lockmode])
1663 4635712 : lock->waitMask &= LOCKBIT_OFF(lockmode);
1664 4636272 : proclock->holdMask |= LOCKBIT_ON(lockmode);
1665 : LOCK_PRINT("GrantLock", lock, lockmode);
1666 : Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1667 : Assert(lock->nGranted <= lock->nRequested);
1668 4636272 : }
1669 :
1670 : /*
1671 : * UnGrantLock -- opposite of GrantLock.
1672 : *
1673 : * Updates the lock and proclock data structures to show that the lock
1674 : * is no longer held nor requested by the current holder.
1675 : *
1676 : * Returns true if there were any waiters waiting on the lock that
1677 : * should now be woken up with ProcLockWakeup.
1678 : */
1679 : static bool
1680 4636116 : UnGrantLock(LOCK *lock, LOCKMODE lockmode,
1681 : PROCLOCK *proclock, LockMethod lockMethodTable)
1682 : {
1683 4636116 : bool wakeupNeeded = false;
1684 :
1685 : Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1686 : Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1687 : Assert(lock->nGranted <= lock->nRequested);
1688 :
1689 : /*
1690 : * fix the general lock stats
1691 : */
1692 4636116 : lock->nRequested--;
1693 4636116 : lock->requested[lockmode]--;
1694 4636116 : lock->nGranted--;
1695 4636116 : lock->granted[lockmode]--;
1696 :
1697 4636116 : if (lock->granted[lockmode] == 0)
1698 : {
1699 : /* change the conflict mask. No more of this lock type. */
1700 4609296 : lock->grantMask &= LOCKBIT_OFF(lockmode);
1701 : }
1702 :
1703 : LOCK_PRINT("UnGrantLock: updated", lock, lockmode);
1704 :
1705 : /*
1706 : * We need only run ProcLockWakeup if the released lock conflicts with at
1707 : * least one of the lock types requested by waiter(s). Otherwise whatever
1708 : * conflict made them wait must still exist. NOTE: before MVCC, we could
1709 : * skip wakeup if lock->granted[lockmode] was still positive. But that's
1710 : * not true anymore, because the remaining granted locks might belong to
1711 : * some waiter, who could now be awakened because he doesn't conflict with
1712 : * his own locks.
1713 : */
1714 4636116 : if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1715 2424 : wakeupNeeded = true;
1716 :
1717 : /*
1718 : * Now fix the per-proclock state.
1719 : */
1720 4636116 : proclock->holdMask &= LOCKBIT_OFF(lockmode);
1721 : PROCLOCK_PRINT("UnGrantLock: updated", proclock);
1722 :
1723 4636116 : return wakeupNeeded;
1724 : }
1725 :
1726 : /*
1727 : * CleanUpLock -- clean up after releasing a lock. We garbage-collect the
1728 : * proclock and lock objects if possible, and call ProcLockWakeup if there
1729 : * are remaining requests and the caller says it's OK. (Normally, this
1730 : * should be called after UnGrantLock, and wakeupNeeded is the result from
1731 : * UnGrantLock.)
1732 : *
1733 : * The appropriate partition lock must be held at entry, and will be
1734 : * held at exit.
1735 : */
1736 : static void
1737 4562202 : CleanUpLock(LOCK *lock, PROCLOCK *proclock,
1738 : LockMethod lockMethodTable, uint32 hashcode,
1739 : bool wakeupNeeded)
1740 : {
1741 : /*
1742 : * If this was my last hold on this lock, delete my entry in the proclock
1743 : * table.
1744 : */
1745 4562202 : if (proclock->holdMask == 0)
1746 : {
1747 : uint32 proclock_hashcode;
1748 :
1749 : PROCLOCK_PRINT("CleanUpLock: deleting", proclock);
1750 4209750 : dlist_delete(&proclock->lockLink);
1751 4209750 : dlist_delete(&proclock->procLink);
1752 4209750 : proclock_hashcode = ProcLockHashCode(&proclock->tag, hashcode);
1753 4209750 : if (!hash_search_with_hash_value(LockMethodProcLockHash,
1754 4209750 : &(proclock->tag),
1755 : proclock_hashcode,
1756 : HASH_REMOVE,
1757 : NULL))
1758 0 : elog(PANIC, "proclock table corrupted");
1759 : }
1760 :
1761 4562202 : if (lock->nRequested == 0)
1762 : {
1763 : /*
1764 : * The caller just released the last lock, so garbage-collect the lock
1765 : * object.
1766 : */
1767 : LOCK_PRINT("CleanUpLock: deleting", lock, 0);
1768 : Assert(dlist_is_empty(&lock->procLocks));
1769 4174796 : if (!hash_search_with_hash_value(LockMethodLockHash,
1770 4174796 : &(lock->tag),
1771 : hashcode,
1772 : HASH_REMOVE,
1773 : NULL))
1774 0 : elog(PANIC, "lock table corrupted");
1775 : }
1776 387406 : else if (wakeupNeeded)
1777 : {
1778 : /* There are waiters on this lock, so wake them up. */
1779 2506 : ProcLockWakeup(lockMethodTable, lock);
1780 : }
1781 4562202 : }
1782 :
1783 : /*
1784 : * GrantLockLocal -- update the locallock data structures to show
1785 : * the lock request has been granted.
1786 : *
1787 : * We expect that LockAcquire made sure there is room to add a new
1788 : * ResourceOwner entry.
1789 : */
1790 : static void
1791 40157844 : GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner)
1792 : {
1793 40157844 : LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
1794 : int i;
1795 :
1796 : Assert(locallock->numLockOwners < locallock->maxLockOwners);
1797 : /* Count the total */
1798 40157844 : locallock->nLocks++;
1799 : /* Count the per-owner lock */
1800 41820076 : for (i = 0; i < locallock->numLockOwners; i++)
1801 : {
1802 4757990 : if (lockOwners[i].owner == owner)
1803 : {
1804 3095758 : lockOwners[i].nLocks++;
1805 3095758 : return;
1806 : }
1807 : }
1808 37062086 : lockOwners[i].owner = owner;
1809 37062086 : lockOwners[i].nLocks = 1;
1810 37062086 : locallock->numLockOwners++;
1811 37062086 : if (owner != NULL)
1812 36891540 : ResourceOwnerRememberLock(owner, locallock);
1813 :
1814 : /* Indicate that the lock is acquired for certain types of locks. */
1815 37062086 : CheckAndSetLockHeld(locallock, true);
1816 : }
1817 :
1818 : /*
1819 : * BeginStrongLockAcquire - inhibit use of fastpath for a given LOCALLOCK,
1820 : * and arrange for error cleanup if it fails
1821 : */
1822 : static void
1823 378800 : BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode)
1824 : {
1825 : Assert(StrongLockInProgress == NULL);
1826 : Assert(locallock->holdsStrongLockCount == false);
1827 :
1828 : /*
1829 : * Adding to a memory location is not atomic, so we take a spinlock to
1830 : * ensure we don't collide with someone else trying to bump the count at
1831 : * the same time.
1832 : *
1833 : * XXX: It might be worth considering using an atomic fetch-and-add
1834 : * instruction here, on architectures where that is supported.
1835 : */
1836 :
1837 378800 : SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
1838 378800 : FastPathStrongRelationLocks->count[fasthashcode]++;
1839 378800 : locallock->holdsStrongLockCount = true;
1840 378800 : StrongLockInProgress = locallock;
1841 378800 : SpinLockRelease(&FastPathStrongRelationLocks->mutex);
1842 378800 : }
1843 :
1844 : /*
1845 : * FinishStrongLockAcquire - cancel pending cleanup for a strong lock
1846 : * acquisition once it's no longer needed
1847 : */
1848 : static void
1849 4632794 : FinishStrongLockAcquire(void)
1850 : {
1851 4632794 : StrongLockInProgress = NULL;
1852 4632794 : }
1853 :
1854 : /*
1855 : * AbortStrongLockAcquire - undo strong lock state changes performed by
1856 : * BeginStrongLockAcquire.
1857 : */
1858 : void
1859 909522 : AbortStrongLockAcquire(void)
1860 : {
1861 : uint32 fasthashcode;
1862 909522 : LOCALLOCK *locallock = StrongLockInProgress;
1863 :
1864 909522 : if (locallock == NULL)
1865 909096 : return;
1866 :
1867 426 : fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
1868 : Assert(locallock->holdsStrongLockCount == true);
1869 426 : SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
1870 : Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
1871 426 : FastPathStrongRelationLocks->count[fasthashcode]--;
1872 426 : locallock->holdsStrongLockCount = false;
1873 426 : StrongLockInProgress = NULL;
1874 426 : SpinLockRelease(&FastPathStrongRelationLocks->mutex);
1875 : }
1876 :
1877 : /*
1878 : * GrantAwaitedLock -- call GrantLockLocal for the lock we are doing
1879 : * WaitOnLock on.
1880 : *
1881 : * proc.c needs this for the case where we are booted off the lock by
1882 : * timeout, but discover that someone granted us the lock anyway.
1883 : *
1884 : * We could just export GrantLockLocal, but that would require including
1885 : * resowner.h in lock.h, which creates circularity.
1886 : */
1887 : void
1888 0 : GrantAwaitedLock(void)
1889 : {
1890 0 : GrantLockLocal(awaitedLock, awaitedOwner);
1891 0 : }
1892 :
1893 : /*
1894 : * GetAwaitedLock -- Return the lock we're currently doing WaitOnLock on.
1895 : */
1896 : LOCALLOCK *
1897 908042 : GetAwaitedLock(void)
1898 : {
1899 908042 : return awaitedLock;
1900 : }
1901 :
1902 : /*
1903 : * ResetAwaitedLock -- Forget that we are waiting on a lock.
1904 : */
1905 : void
1906 84 : ResetAwaitedLock(void)
1907 : {
1908 84 : awaitedLock = NULL;
1909 84 : }
1910 :
1911 : /*
1912 : * MarkLockClear -- mark an acquired lock as "clear"
1913 : *
1914 : * This means that we know we have absorbed all sinval messages that other
1915 : * sessions generated before we acquired this lock, and so we can confidently
1916 : * assume we know about any catalog changes protected by this lock.
1917 : */
1918 : void
1919 34555172 : MarkLockClear(LOCALLOCK *locallock)
1920 : {
1921 : Assert(locallock->nLocks > 0);
1922 34555172 : locallock->lockCleared = true;
1923 34555172 : }
1924 :
1925 : /*
1926 : * WaitOnLock -- wait to acquire a lock
1927 : *
1928 : * This is a wrapper around ProcSleep, with extra tracing and bookkeeping.
1929 : */
1930 : static ProcWaitStatus
1931 2532 : WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner)
1932 : {
1933 : ProcWaitStatus result;
1934 :
1935 : TRACE_POSTGRESQL_LOCK_WAIT_START(locallock->tag.lock.locktag_field1,
1936 : locallock->tag.lock.locktag_field2,
1937 : locallock->tag.lock.locktag_field3,
1938 : locallock->tag.lock.locktag_field4,
1939 : locallock->tag.lock.locktag_type,
1940 : locallock->tag.mode);
1941 :
1942 : /* adjust the process title to indicate that it's waiting */
1943 2532 : set_ps_display_suffix("waiting");
1944 :
1945 : /*
1946 : * Record the fact that we are waiting for a lock, so that
1947 : * LockErrorCleanup will clean up if cancel/die happens.
1948 : */
1949 2532 : awaitedLock = locallock;
1950 2532 : awaitedOwner = owner;
1951 :
1952 : /*
1953 : * NOTE: Think not to put any shared-state cleanup after the call to
1954 : * ProcSleep, in either the normal or failure path. The lock state must
1955 : * be fully set by the lock grantor, or by CheckDeadLock if we give up
1956 : * waiting for the lock. This is necessary because of the possibility
1957 : * that a cancel/die interrupt will interrupt ProcSleep after someone else
1958 : * grants us the lock, but before we've noticed it. Hence, after granting,
1959 : * the locktable state must fully reflect the fact that we own the lock;
1960 : * we can't do additional work on return.
1961 : *
1962 : * We can and do use a PG_TRY block to try to clean up after failure, but
1963 : * this still has a major limitation: elog(FATAL) can occur while waiting
1964 : * (eg, a "die" interrupt), and then control won't come back here. So all
1965 : * cleanup of essential state should happen in LockErrorCleanup, not here.
1966 : * We can use PG_TRY to clear the "waiting" status flags, since doing that
1967 : * is unimportant if the process exits.
1968 : */
1969 2532 : PG_TRY();
1970 : {
1971 2532 : result = ProcSleep(locallock);
1972 : }
1973 72 : PG_CATCH();
1974 : {
1975 : /* In this path, awaitedLock remains set until LockErrorCleanup */
1976 :
1977 : /* reset ps display to remove the suffix */
1978 72 : set_ps_display_remove_suffix();
1979 :
1980 : /* and propagate the error */
1981 72 : PG_RE_THROW();
1982 : }
1983 2448 : PG_END_TRY();
1984 :
1985 : /*
1986 : * We no longer want LockErrorCleanup to do anything.
1987 : */
1988 2448 : awaitedLock = NULL;
1989 :
1990 : /* reset ps display to remove the suffix */
1991 2448 : set_ps_display_remove_suffix();
1992 :
1993 : TRACE_POSTGRESQL_LOCK_WAIT_DONE(locallock->tag.lock.locktag_field1,
1994 : locallock->tag.lock.locktag_field2,
1995 : locallock->tag.lock.locktag_field3,
1996 : locallock->tag.lock.locktag_field4,
1997 : locallock->tag.lock.locktag_type,
1998 : locallock->tag.mode);
1999 :
2000 2448 : return result;
2001 : }
2002 :
2003 : /*
2004 : * Remove a proc from the wait-queue it is on (caller must know it is on one).
2005 : * This is only used when the proc has failed to get the lock, so we set its
2006 : * waitStatus to PROC_WAIT_STATUS_ERROR.
2007 : *
2008 : * Appropriate partition lock must be held by caller. Also, caller is
2009 : * responsible for signaling the proc if needed.
2010 : *
2011 : * NB: this does not clean up any locallock object that may exist for the lock.
2012 : */
2013 : void
2014 94 : RemoveFromWaitQueue(PGPROC *proc, uint32 hashcode)
2015 : {
2016 94 : LOCK *waitLock = proc->waitLock;
2017 94 : PROCLOCK *proclock = proc->waitProcLock;
2018 94 : LOCKMODE lockmode = proc->waitLockMode;
2019 94 : LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*waitLock);
2020 :
2021 : /* Make sure proc is waiting */
2022 : Assert(proc->waitStatus == PROC_WAIT_STATUS_WAITING);
2023 : Assert(proc->links.next != NULL);
2024 : Assert(waitLock);
2025 : Assert(!dclist_is_empty(&waitLock->waitProcs));
2026 : Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
2027 :
2028 : /* Remove proc from lock's wait queue */
2029 94 : dclist_delete_from_thoroughly(&waitLock->waitProcs, &proc->links);
2030 :
2031 : /* Undo increments of request counts by waiting process */
2032 : Assert(waitLock->nRequested > 0);
2033 : Assert(waitLock->nRequested > proc->waitLock->nGranted);
2034 94 : waitLock->nRequested--;
2035 : Assert(waitLock->requested[lockmode] > 0);
2036 94 : waitLock->requested[lockmode]--;
2037 : /* don't forget to clear waitMask bit if appropriate */
2038 94 : if (waitLock->granted[lockmode] == waitLock->requested[lockmode])
2039 92 : waitLock->waitMask &= LOCKBIT_OFF(lockmode);
2040 :
2041 : /* Clean up the proc's own state, and pass it the ok/fail signal */
2042 94 : proc->waitLock = NULL;
2043 94 : proc->waitProcLock = NULL;
2044 94 : proc->waitStatus = PROC_WAIT_STATUS_ERROR;
2045 :
2046 : /*
2047 : * Delete the proclock immediately if it represents no already-held locks.
2048 : * (This must happen now because if the owner of the lock decides to
2049 : * release it, and the requested/granted counts then go to zero,
2050 : * LockRelease expects there to be no remaining proclocks.) Then see if
2051 : * any other waiters for the lock can be woken up now.
2052 : */
2053 94 : CleanUpLock(waitLock, proclock,
2054 : LockMethods[lockmethodid], hashcode,
2055 : true);
2056 94 : }
2057 :
2058 : /*
2059 : * LockRelease -- look up 'locktag' and release one 'lockmode' lock on it.
2060 : * Release a session lock if 'sessionLock' is true, else release a
2061 : * regular transaction lock.
2062 : *
2063 : * Side Effects: find any waiting processes that are now wakable,
2064 : * grant them their requested locks and awaken them.
2065 : * (We have to grant the lock here to avoid a race between
2066 : * the waking process and any new process to
2067 : * come along and request the lock.)
2068 : */
2069 : bool
2070 35811270 : LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
2071 : {
2072 35811270 : LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
2073 : LockMethod lockMethodTable;
2074 : LOCALLOCKTAG localtag;
2075 : LOCALLOCK *locallock;
2076 : LOCK *lock;
2077 : PROCLOCK *proclock;
2078 : LWLock *partitionLock;
2079 : bool wakeupNeeded;
2080 :
2081 35811270 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2082 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2083 35811270 : lockMethodTable = LockMethods[lockmethodid];
2084 35811270 : if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
2085 0 : elog(ERROR, "unrecognized lock mode: %d", lockmode);
2086 :
2087 : #ifdef LOCK_DEBUG
2088 : if (LOCK_DEBUG_ENABLED(locktag))
2089 : elog(LOG, "LockRelease: lock [%u,%u] %s",
2090 : locktag->locktag_field1, locktag->locktag_field2,
2091 : lockMethodTable->lockModeNames[lockmode]);
2092 : #endif
2093 :
2094 : /*
2095 : * Find the LOCALLOCK entry for this lock and lockmode
2096 : */
2097 35811270 : MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
2098 35811270 : localtag.lock = *locktag;
2099 35811270 : localtag.mode = lockmode;
2100 :
2101 35811270 : locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
2102 : &localtag,
2103 : HASH_FIND, NULL);
2104 :
2105 : /*
2106 : * let the caller print its own error message, too. Do not ereport(ERROR).
2107 : */
2108 35811270 : if (!locallock || locallock->nLocks <= 0)
2109 : {
2110 26 : elog(WARNING, "you don't own a lock of type %s",
2111 : lockMethodTable->lockModeNames[lockmode]);
2112 26 : return false;
2113 : }
2114 :
2115 : /*
2116 : * Decrease the count for the resource owner.
2117 : */
2118 : {
2119 35811244 : LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2120 : ResourceOwner owner;
2121 : int i;
2122 :
2123 : /* Identify owner for lock */
2124 35811244 : if (sessionLock)
2125 170530 : owner = NULL;
2126 : else
2127 35640714 : owner = CurrentResourceOwner;
2128 :
2129 35812940 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
2130 : {
2131 35812916 : if (lockOwners[i].owner == owner)
2132 : {
2133 : Assert(lockOwners[i].nLocks > 0);
2134 35811220 : if (--lockOwners[i].nLocks == 0)
2135 : {
2136 34486782 : if (owner != NULL)
2137 34316312 : ResourceOwnerForgetLock(owner, locallock);
2138 : /* compact out unused slot */
2139 34486782 : locallock->numLockOwners--;
2140 34486782 : if (i < locallock->numLockOwners)
2141 126 : lockOwners[i] = lockOwners[locallock->numLockOwners];
2142 : }
2143 35811220 : break;
2144 : }
2145 : }
2146 35811244 : if (i < 0)
2147 : {
2148 : /* don't release a lock belonging to another owner */
2149 24 : elog(WARNING, "you don't own a lock of type %s",
2150 : lockMethodTable->lockModeNames[lockmode]);
2151 24 : return false;
2152 : }
2153 : }
2154 :
2155 : /*
2156 : * Decrease the total local count. If we're still holding the lock, we're
2157 : * done.
2158 : */
2159 35811220 : locallock->nLocks--;
2160 :
2161 35811220 : if (locallock->nLocks > 0)
2162 2158556 : return true;
2163 :
2164 : /*
2165 : * At this point we can no longer suppose we are clear of invalidation
2166 : * messages related to this lock. Although we'll delete the LOCALLOCK
2167 : * object before any intentional return from this routine, it seems worth
2168 : * the trouble to explicitly reset lockCleared right now, just in case
2169 : * some error prevents us from deleting the LOCALLOCK.
2170 : */
2171 33652664 : locallock->lockCleared = false;
2172 :
2173 : /* Attempt fast release of any lock eligible for the fast path. */
2174 33652664 : if (EligibleForRelationFastPath(locktag, lockmode) &&
2175 31022270 : FastPathLocalUseCounts[FAST_PATH_REL_GROUP(locktag->locktag_field2)] > 0)
2176 : {
2177 : bool released;
2178 :
2179 : /*
2180 : * We might not find the lock here, even if we originally entered it
2181 : * here. Another backend may have moved it to the main table.
2182 : */
2183 30574576 : LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
2184 30574576 : released = FastPathUnGrantRelationLock(locktag->locktag_field2,
2185 : lockmode);
2186 30574576 : LWLockRelease(&MyProc->fpInfoLock);
2187 30574576 : if (released)
2188 : {
2189 30104164 : RemoveLocalLock(locallock);
2190 30104164 : return true;
2191 : }
2192 : }
2193 :
2194 : /*
2195 : * Otherwise we've got to mess with the shared lock table.
2196 : */
2197 3548500 : partitionLock = LockHashPartitionLock(locallock->hashcode);
2198 :
2199 3548500 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2200 :
2201 : /*
2202 : * Normally, we don't need to re-find the lock or proclock, since we kept
2203 : * their addresses in the locallock table, and they couldn't have been
2204 : * removed while we were holding a lock on them. But it's possible that
2205 : * the lock was taken fast-path and has since been moved to the main hash
2206 : * table by another backend, in which case we will need to look up the
2207 : * objects here. We assume the lock field is NULL if so.
2208 : */
2209 3548500 : lock = locallock->lock;
2210 3548500 : if (!lock)
2211 : {
2212 : PROCLOCKTAG proclocktag;
2213 :
2214 : Assert(EligibleForRelationFastPath(locktag, lockmode));
2215 12 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
2216 : locktag,
2217 : locallock->hashcode,
2218 : HASH_FIND,
2219 : NULL);
2220 12 : if (!lock)
2221 0 : elog(ERROR, "failed to re-find shared lock object");
2222 12 : locallock->lock = lock;
2223 :
2224 12 : proclocktag.myLock = lock;
2225 12 : proclocktag.myProc = MyProc;
2226 12 : locallock->proclock = (PROCLOCK *) hash_search(LockMethodProcLockHash,
2227 : &proclocktag,
2228 : HASH_FIND,
2229 : NULL);
2230 12 : if (!locallock->proclock)
2231 0 : elog(ERROR, "failed to re-find shared proclock object");
2232 : }
2233 : LOCK_PRINT("LockRelease: found", lock, lockmode);
2234 3548500 : proclock = locallock->proclock;
2235 : PROCLOCK_PRINT("LockRelease: found", proclock);
2236 :
2237 : /*
2238 : * Double-check that we are actually holding a lock of the type we want to
2239 : * release.
2240 : */
2241 3548500 : if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
2242 : {
2243 : PROCLOCK_PRINT("LockRelease: WRONGTYPE", proclock);
2244 0 : LWLockRelease(partitionLock);
2245 0 : elog(WARNING, "you don't own a lock of type %s",
2246 : lockMethodTable->lockModeNames[lockmode]);
2247 0 : RemoveLocalLock(locallock);
2248 0 : return false;
2249 : }
2250 :
2251 : /*
2252 : * Do the releasing. CleanUpLock will waken any now-wakable waiters.
2253 : */
2254 3548500 : wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
2255 :
2256 3548500 : CleanUpLock(lock, proclock,
2257 : lockMethodTable, locallock->hashcode,
2258 : wakeupNeeded);
2259 :
2260 3548500 : LWLockRelease(partitionLock);
2261 :
2262 3548500 : RemoveLocalLock(locallock);
2263 3548500 : return true;
2264 : }
2265 :
2266 : /*
2267 : * LockReleaseAll -- Release all locks of the specified lock method that
2268 : * are held by the current process.
2269 : *
2270 : * Well, not necessarily *all* locks. The available behaviors are:
2271 : * allLocks == true: release all locks including session locks.
2272 : * allLocks == false: release all non-session locks.
2273 : */
2274 : void
2275 1732960 : LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
2276 : {
2277 : HASH_SEQ_STATUS status;
2278 : LockMethod lockMethodTable;
2279 : int i,
2280 : numLockModes;
2281 : LOCALLOCK *locallock;
2282 : LOCK *lock;
2283 : int partition;
2284 1732960 : bool have_fast_path_lwlock = false;
2285 :
2286 1732960 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2287 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2288 1732960 : lockMethodTable = LockMethods[lockmethodid];
2289 :
2290 : #ifdef LOCK_DEBUG
2291 : if (*(lockMethodTable->trace_flag))
2292 : elog(LOG, "LockReleaseAll: lockmethod=%d", lockmethodid);
2293 : #endif
2294 :
2295 : /*
2296 : * Get rid of our fast-path VXID lock, if appropriate. Note that this is
2297 : * the only way that the lock we hold on our own VXID can ever get
2298 : * released: it is always and only released when a toplevel transaction
2299 : * ends.
2300 : */
2301 1732960 : if (lockmethodid == DEFAULT_LOCKMETHOD)
2302 849252 : VirtualXactLockTableCleanup();
2303 :
2304 1732960 : numLockModes = lockMethodTable->numLockModes;
2305 :
2306 : /*
2307 : * First we run through the locallock table and get rid of unwanted
2308 : * entries, then we scan the process's proclocks and get rid of those. We
2309 : * do this separately because we may have multiple locallock entries
2310 : * pointing to the same proclock, and we daren't end up with any dangling
2311 : * pointers. Fast-path locks are cleaned up during the locallock table
2312 : * scan, though.
2313 : */
2314 1732960 : hash_seq_init(&status, LockMethodLocalHash);
2315 :
2316 4204472 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2317 : {
2318 : /*
2319 : * If the LOCALLOCK entry is unused, something must've gone wrong
2320 : * while trying to acquire this lock. Just forget the local entry.
2321 : */
2322 2471512 : if (locallock->nLocks == 0)
2323 : {
2324 94 : RemoveLocalLock(locallock);
2325 94 : continue;
2326 : }
2327 :
2328 : /* Ignore items that are not of the lockmethod to be removed */
2329 2471418 : if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
2330 147884 : continue;
2331 :
2332 : /*
2333 : * If we are asked to release all locks, we can just zap the entry.
2334 : * Otherwise, must scan to see if there are session locks. We assume
2335 : * there is at most one lockOwners entry for session locks.
2336 : */
2337 2323534 : if (!allLocks)
2338 : {
2339 2144682 : LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2340 :
2341 : /* If session lock is above array position 0, move it down to 0 */
2342 4416560 : for (i = 0; i < locallock->numLockOwners; i++)
2343 : {
2344 2271878 : if (lockOwners[i].owner == NULL)
2345 147298 : lockOwners[0] = lockOwners[i];
2346 : else
2347 2124580 : ResourceOwnerForgetLock(lockOwners[i].owner, locallock);
2348 : }
2349 :
2350 2144682 : if (locallock->numLockOwners > 0 &&
2351 2144682 : lockOwners[0].owner == NULL &&
2352 147298 : lockOwners[0].nLocks > 0)
2353 : {
2354 : /* Fix the locallock to show just the session locks */
2355 147298 : locallock->nLocks = lockOwners[0].nLocks;
2356 147298 : locallock->numLockOwners = 1;
2357 : /* We aren't deleting this locallock, so done */
2358 147298 : continue;
2359 : }
2360 : else
2361 1997384 : locallock->numLockOwners = 0;
2362 : }
2363 :
2364 : #ifdef USE_ASSERT_CHECKING
2365 :
2366 : /*
2367 : * Tuple locks are currently held only for short durations within a
2368 : * transaction. Check that we didn't forget to release one.
2369 : */
2370 : if (LOCALLOCK_LOCKTAG(*locallock) == LOCKTAG_TUPLE && !allLocks)
2371 : elog(WARNING, "tuple lock held at commit");
2372 : #endif
2373 :
2374 : /*
2375 : * If the lock or proclock pointers are NULL, this lock was taken via
2376 : * the relation fast-path (and is not known to have been transferred).
2377 : */
2378 2176236 : if (locallock->proclock == NULL || locallock->lock == NULL)
2379 : {
2380 1092712 : LOCKMODE lockmode = locallock->tag.mode;
2381 : Oid relid;
2382 :
2383 : /* Verify that a fast-path lock is what we've got. */
2384 1092712 : if (!EligibleForRelationFastPath(&locallock->tag.lock, lockmode))
2385 0 : elog(PANIC, "locallock table corrupted");
2386 :
2387 : /*
2388 : * If we don't currently hold the LWLock that protects our
2389 : * fast-path data structures, we must acquire it before attempting
2390 : * to release the lock via the fast-path. We will continue to
2391 : * hold the LWLock until we're done scanning the locallock table,
2392 : * unless we hit a transferred fast-path lock. (XXX is this
2393 : * really such a good idea? There could be a lot of entries ...)
2394 : */
2395 1092712 : if (!have_fast_path_lwlock)
2396 : {
2397 368316 : LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
2398 368316 : have_fast_path_lwlock = true;
2399 : }
2400 :
2401 : /* Attempt fast-path release. */
2402 1092712 : relid = locallock->tag.lock.locktag_field2;
2403 1092712 : if (FastPathUnGrantRelationLock(relid, lockmode))
2404 : {
2405 1090566 : RemoveLocalLock(locallock);
2406 1090566 : continue;
2407 : }
2408 :
2409 : /*
2410 : * Our lock, originally taken via the fast path, has been
2411 : * transferred to the main lock table. That's going to require
2412 : * some extra work, so release our fast-path lock before starting.
2413 : */
2414 2146 : LWLockRelease(&MyProc->fpInfoLock);
2415 2146 : have_fast_path_lwlock = false;
2416 :
2417 : /*
2418 : * Now dump the lock. We haven't got a pointer to the LOCK or
2419 : * PROCLOCK in this case, so we have to handle this a bit
2420 : * differently than a normal lock release. Unfortunately, this
2421 : * requires an extra LWLock acquire-and-release cycle on the
2422 : * partitionLock, but hopefully it shouldn't happen often.
2423 : */
2424 2146 : LockRefindAndRelease(lockMethodTable, MyProc,
2425 : &locallock->tag.lock, lockmode, false);
2426 2146 : RemoveLocalLock(locallock);
2427 2146 : continue;
2428 : }
2429 :
2430 : /* Mark the proclock to show we need to release this lockmode */
2431 1083524 : if (locallock->nLocks > 0)
2432 1083524 : locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
2433 :
2434 : /* And remove the locallock hashtable entry */
2435 1083524 : RemoveLocalLock(locallock);
2436 : }
2437 :
2438 : /* Done with the fast-path data structures */
2439 1732960 : if (have_fast_path_lwlock)
2440 366170 : LWLockRelease(&MyProc->fpInfoLock);
2441 :
2442 : /*
2443 : * Now, scan each lock partition separately.
2444 : */
2445 29460320 : for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
2446 : {
2447 : LWLock *partitionLock;
2448 27727360 : dlist_head *procLocks = &MyProc->myProcLocks[partition];
2449 : dlist_mutable_iter proclock_iter;
2450 :
2451 27727360 : partitionLock = LockHashPartitionLockByIndex(partition);
2452 :
2453 : /*
2454 : * If the proclock list for this partition is empty, we can skip
2455 : * acquiring the partition lock. This optimization is trickier than
2456 : * it looks, because another backend could be in process of adding
2457 : * something to our proclock list due to promoting one of our
2458 : * fast-path locks. However, any such lock must be one that we
2459 : * decided not to delete above, so it's okay to skip it again now;
2460 : * we'd just decide not to delete it again. We must, however, be
2461 : * careful to re-fetch the list header once we've acquired the
2462 : * partition lock, to be sure we have a valid, up-to-date pointer.
2463 : * (There is probably no significant risk if pointer fetch/store is
2464 : * atomic, but we don't wish to assume that.)
2465 : *
2466 : * XXX This argument assumes that the locallock table correctly
2467 : * represents all of our fast-path locks. While allLocks mode
2468 : * guarantees to clean up all of our normal locks regardless of the
2469 : * locallock situation, we lose that guarantee for fast-path locks.
2470 : * This is not ideal.
2471 : */
2472 27727360 : if (dlist_is_empty(procLocks))
2473 26661324 : continue; /* needn't examine this partition */
2474 :
2475 1066036 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2476 :
2477 2369344 : dlist_foreach_modify(proclock_iter, procLocks)
2478 : {
2479 1303308 : PROCLOCK *proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
2480 1303308 : bool wakeupNeeded = false;
2481 :
2482 : Assert(proclock->tag.myProc == MyProc);
2483 :
2484 1303308 : lock = proclock->tag.myLock;
2485 :
2486 : /* Ignore items that are not of the lockmethod to be removed */
2487 1303308 : if (LOCK_LOCKMETHOD(*lock) != lockmethodid)
2488 147878 : continue;
2489 :
2490 : /*
2491 : * In allLocks mode, force release of all locks even if locallock
2492 : * table had problems
2493 : */
2494 1155430 : if (allLocks)
2495 100944 : proclock->releaseMask = proclock->holdMask;
2496 : else
2497 : Assert((proclock->releaseMask & ~proclock->holdMask) == 0);
2498 :
2499 : /*
2500 : * Ignore items that have nothing to be released, unless they have
2501 : * holdMask == 0 and are therefore recyclable
2502 : */
2503 1155430 : if (proclock->releaseMask == 0 && proclock->holdMask != 0)
2504 145914 : continue;
2505 :
2506 : PROCLOCK_PRINT("LockReleaseAll", proclock);
2507 : LOCK_PRINT("LockReleaseAll", lock, 0);
2508 : Assert(lock->nRequested >= 0);
2509 : Assert(lock->nGranted >= 0);
2510 : Assert(lock->nGranted <= lock->nRequested);
2511 : Assert((proclock->holdMask & ~lock->grantMask) == 0);
2512 :
2513 : /*
2514 : * Release the previously-marked lock modes
2515 : */
2516 9085644 : for (i = 1; i <= numLockModes; i++)
2517 : {
2518 8076128 : if (proclock->releaseMask & LOCKBIT_ON(i))
2519 1083524 : wakeupNeeded |= UnGrantLock(lock, i, proclock,
2520 : lockMethodTable);
2521 : }
2522 : Assert((lock->nRequested >= 0) && (lock->nGranted >= 0));
2523 : Assert(lock->nGranted <= lock->nRequested);
2524 : LOCK_PRINT("LockReleaseAll: updated", lock, 0);
2525 :
2526 1009516 : proclock->releaseMask = 0;
2527 :
2528 : /* CleanUpLock will wake up waiters if needed. */
2529 1009516 : CleanUpLock(lock, proclock,
2530 : lockMethodTable,
2531 1009516 : LockTagHashCode(&lock->tag),
2532 : wakeupNeeded);
2533 : } /* loop over PROCLOCKs within this partition */
2534 :
2535 1066036 : LWLockRelease(partitionLock);
2536 : } /* loop over partitions */
2537 :
2538 : #ifdef LOCK_DEBUG
2539 : if (*(lockMethodTable->trace_flag))
2540 : elog(LOG, "LockReleaseAll done");
2541 : #endif
2542 1732960 : }
2543 :
2544 : /*
2545 : * LockReleaseSession -- Release all session locks of the specified lock method
2546 : * that are held by the current process.
2547 : */
2548 : void
2549 238 : LockReleaseSession(LOCKMETHODID lockmethodid)
2550 : {
2551 : HASH_SEQ_STATUS status;
2552 : LOCALLOCK *locallock;
2553 :
2554 238 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2555 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2556 :
2557 238 : hash_seq_init(&status, LockMethodLocalHash);
2558 :
2559 452 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2560 : {
2561 : /* Ignore items that are not of the specified lock method */
2562 214 : if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
2563 20 : continue;
2564 :
2565 194 : ReleaseLockIfHeld(locallock, true);
2566 : }
2567 238 : }
2568 :
2569 : /*
2570 : * LockReleaseCurrentOwner
2571 : * Release all locks belonging to CurrentResourceOwner
2572 : *
2573 : * If the caller knows what those locks are, it can pass them as an array.
2574 : * That speeds up the call significantly, when a lot of locks are held.
2575 : * Otherwise, pass NULL for locallocks, and we'll traverse through our hash
2576 : * table to find them.
2577 : */
2578 : void
2579 10640 : LockReleaseCurrentOwner(LOCALLOCK **locallocks, int nlocks)
2580 : {
2581 10640 : if (locallocks == NULL)
2582 : {
2583 : HASH_SEQ_STATUS status;
2584 : LOCALLOCK *locallock;
2585 :
2586 8 : hash_seq_init(&status, LockMethodLocalHash);
2587 :
2588 544 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2589 536 : ReleaseLockIfHeld(locallock, false);
2590 : }
2591 : else
2592 : {
2593 : int i;
2594 :
2595 15702 : for (i = nlocks - 1; i >= 0; i--)
2596 5070 : ReleaseLockIfHeld(locallocks[i], false);
2597 : }
2598 10640 : }
2599 :
2600 : /*
2601 : * ReleaseLockIfHeld
2602 : * Release any session-level locks on this lockable object if sessionLock
2603 : * is true; else, release any locks held by CurrentResourceOwner.
2604 : *
2605 : * It is tempting to pass this a ResourceOwner pointer (or NULL for session
2606 : * locks), but without refactoring LockRelease() we cannot support releasing
2607 : * locks belonging to resource owners other than CurrentResourceOwner.
2608 : * If we were to refactor, it'd be a good idea to fix it so we don't have to
2609 : * do a hashtable lookup of the locallock, too. However, currently this
2610 : * function isn't used heavily enough to justify refactoring for its
2611 : * convenience.
2612 : */
2613 : static void
2614 5800 : ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock)
2615 : {
2616 : ResourceOwner owner;
2617 : LOCALLOCKOWNER *lockOwners;
2618 : int i;
2619 :
2620 : /* Identify owner for lock (must match LockRelease!) */
2621 5800 : if (sessionLock)
2622 194 : owner = NULL;
2623 : else
2624 5606 : owner = CurrentResourceOwner;
2625 :
2626 : /* Scan to see if there are any locks belonging to the target owner */
2627 5800 : lockOwners = locallock->lockOwners;
2628 6186 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
2629 : {
2630 5800 : if (lockOwners[i].owner == owner)
2631 : {
2632 : Assert(lockOwners[i].nLocks > 0);
2633 5414 : if (lockOwners[i].nLocks < locallock->nLocks)
2634 : {
2635 : /*
2636 : * We will still hold this lock after forgetting this
2637 : * ResourceOwner.
2638 : */
2639 1396 : locallock->nLocks -= lockOwners[i].nLocks;
2640 : /* compact out unused slot */
2641 1396 : locallock->numLockOwners--;
2642 1396 : if (owner != NULL)
2643 1396 : ResourceOwnerForgetLock(owner, locallock);
2644 1396 : if (i < locallock->numLockOwners)
2645 0 : lockOwners[i] = lockOwners[locallock->numLockOwners];
2646 : }
2647 : else
2648 : {
2649 : Assert(lockOwners[i].nLocks == locallock->nLocks);
2650 : /* We want to call LockRelease just once */
2651 4018 : lockOwners[i].nLocks = 1;
2652 4018 : locallock->nLocks = 1;
2653 4018 : if (!LockRelease(&locallock->tag.lock,
2654 : locallock->tag.mode,
2655 : sessionLock))
2656 0 : elog(WARNING, "ReleaseLockIfHeld: failed??");
2657 : }
2658 5414 : break;
2659 : }
2660 : }
2661 5800 : }
2662 :
2663 : /*
2664 : * LockReassignCurrentOwner
2665 : * Reassign all locks belonging to CurrentResourceOwner to belong
2666 : * to its parent resource owner.
2667 : *
2668 : * If the caller knows what those locks are, it can pass them as an array.
2669 : * That speeds up the call significantly, when a lot of locks are held
2670 : * (e.g pg_dump with a large schema). Otherwise, pass NULL for locallocks,
2671 : * and we'll traverse through our hash table to find them.
2672 : */
2673 : void
2674 731262 : LockReassignCurrentOwner(LOCALLOCK **locallocks, int nlocks)
2675 : {
2676 731262 : ResourceOwner parent = ResourceOwnerGetParent(CurrentResourceOwner);
2677 :
2678 : Assert(parent != NULL);
2679 :
2680 731262 : if (locallocks == NULL)
2681 : {
2682 : HASH_SEQ_STATUS status;
2683 : LOCALLOCK *locallock;
2684 :
2685 7018 : hash_seq_init(&status, LockMethodLocalHash);
2686 :
2687 210674 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2688 203656 : LockReassignOwner(locallock, parent);
2689 : }
2690 : else
2691 : {
2692 : int i;
2693 :
2694 1552966 : for (i = nlocks - 1; i >= 0; i--)
2695 828722 : LockReassignOwner(locallocks[i], parent);
2696 : }
2697 731262 : }
2698 :
2699 : /*
2700 : * Subroutine of LockReassignCurrentOwner. Reassigns a given lock belonging to
2701 : * CurrentResourceOwner to its parent.
2702 : */
2703 : static void
2704 1032378 : LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent)
2705 : {
2706 : LOCALLOCKOWNER *lockOwners;
2707 : int i;
2708 1032378 : int ic = -1;
2709 1032378 : int ip = -1;
2710 :
2711 : /*
2712 : * Scan to see if there are any locks belonging to current owner or its
2713 : * parent
2714 : */
2715 1032378 : lockOwners = locallock->lockOwners;
2716 2418184 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
2717 : {
2718 1385806 : if (lockOwners[i].owner == CurrentResourceOwner)
2719 1001888 : ic = i;
2720 383918 : else if (lockOwners[i].owner == parent)
2721 298090 : ip = i;
2722 : }
2723 :
2724 1032378 : if (ic < 0)
2725 30490 : return; /* no current locks */
2726 :
2727 1001888 : if (ip < 0)
2728 : {
2729 : /* Parent has no slot, so just give it the child's slot */
2730 734226 : lockOwners[ic].owner = parent;
2731 734226 : ResourceOwnerRememberLock(parent, locallock);
2732 : }
2733 : else
2734 : {
2735 : /* Merge child's count with parent's */
2736 267662 : lockOwners[ip].nLocks += lockOwners[ic].nLocks;
2737 : /* compact out unused slot */
2738 267662 : locallock->numLockOwners--;
2739 267662 : if (ic < locallock->numLockOwners)
2740 1640 : lockOwners[ic] = lockOwners[locallock->numLockOwners];
2741 : }
2742 1001888 : ResourceOwnerForgetLock(CurrentResourceOwner, locallock);
2743 : }
2744 :
2745 : /*
2746 : * FastPathGrantRelationLock
2747 : * Grant lock using per-backend fast-path array, if there is space.
2748 : */
2749 : static bool
2750 31197486 : FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode)
2751 : {
2752 : uint32 i;
2753 31197486 : uint32 unused_slot = FastPathLockSlotsPerBackend();
2754 :
2755 : /* fast-path group the lock belongs to */
2756 31197486 : uint32 group = FAST_PATH_REL_GROUP(relid);
2757 :
2758 : /* Scan for existing entry for this relid, remembering empty slot. */
2759 529170230 : for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2760 : {
2761 : /* index into the whole per-backend array */
2762 498816978 : uint32 f = FAST_PATH_SLOT(group, i);
2763 :
2764 498816978 : if (FAST_PATH_GET_BITS(MyProc, f) == 0)
2765 481388888 : unused_slot = f;
2766 17428090 : else if (MyProc->fpRelId[f] == relid)
2767 : {
2768 : Assert(!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode));
2769 844234 : FAST_PATH_SET_LOCKMODE(MyProc, f, lockmode);
2770 844234 : return true;
2771 : }
2772 : }
2773 :
2774 : /* If no existing entry, use any empty slot. */
2775 30353252 : if (unused_slot < FastPathLockSlotsPerBackend())
2776 : {
2777 30353252 : MyProc->fpRelId[unused_slot] = relid;
2778 30353252 : FAST_PATH_SET_LOCKMODE(MyProc, unused_slot, lockmode);
2779 30353252 : ++FastPathLocalUseCounts[group];
2780 30353252 : return true;
2781 : }
2782 :
2783 : /* No existing entry, and no empty slot. */
2784 0 : return false;
2785 : }
2786 :
2787 : /*
2788 : * FastPathUnGrantRelationLock
2789 : * Release fast-path lock, if present. Update backend-private local
2790 : * use count, while we're at it.
2791 : */
2792 : static bool
2793 31667288 : FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode)
2794 : {
2795 : uint32 i;
2796 31667288 : bool result = false;
2797 :
2798 : /* fast-path group the lock belongs to */
2799 31667288 : uint32 group = FAST_PATH_REL_GROUP(relid);
2800 :
2801 31667288 : FastPathLocalUseCounts[group] = 0;
2802 538343896 : for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2803 : {
2804 : /* index into the whole per-backend array */
2805 506676608 : uint32 f = FAST_PATH_SLOT(group, i);
2806 :
2807 506676608 : if (MyProc->fpRelId[f] == relid
2808 43029960 : && FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2809 : {
2810 : Assert(!result);
2811 31194730 : FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
2812 31194730 : result = true;
2813 : /* we continue iterating so as to update FastPathLocalUseCount */
2814 : }
2815 506676608 : if (FAST_PATH_GET_BITS(MyProc, f) != 0)
2816 24391750 : ++FastPathLocalUseCounts[group];
2817 : }
2818 31667288 : return result;
2819 : }
2820 :
2821 : /*
2822 : * FastPathTransferRelationLocks
2823 : * Transfer locks matching the given lock tag from per-backend fast-path
2824 : * arrays to the shared hash table.
2825 : *
2826 : * Returns true if successful, false if ran out of shared memory.
2827 : */
2828 : static bool
2829 378800 : FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag,
2830 : uint32 hashcode)
2831 : {
2832 378800 : LWLock *partitionLock = LockHashPartitionLock(hashcode);
2833 378800 : Oid relid = locktag->locktag_field2;
2834 : uint32 i;
2835 :
2836 : /* fast-path group the lock belongs to */
2837 378800 : uint32 group = FAST_PATH_REL_GROUP(relid);
2838 :
2839 : /*
2840 : * Every PGPROC that can potentially hold a fast-path lock is present in
2841 : * ProcGlobal->allProcs. Prepared transactions are not, but any
2842 : * outstanding fast-path locks held by prepared transactions are
2843 : * transferred to the main lock table.
2844 : */
2845 55471324 : for (i = 0; i < ProcGlobal->allProcCount; i++)
2846 : {
2847 55092524 : PGPROC *proc = &ProcGlobal->allProcs[i];
2848 : uint32 j;
2849 :
2850 55092524 : LWLockAcquire(&proc->fpInfoLock, LW_EXCLUSIVE);
2851 :
2852 : /*
2853 : * If the target backend isn't referencing the same database as the
2854 : * lock, then we needn't examine the individual relation IDs at all;
2855 : * none of them can be relevant.
2856 : *
2857 : * proc->databaseId is set at backend startup time and never changes
2858 : * thereafter, so it might be safe to perform this test before
2859 : * acquiring &proc->fpInfoLock. In particular, it's certainly safe to
2860 : * assume that if the target backend holds any fast-path locks, it
2861 : * must have performed a memory-fencing operation (in particular, an
2862 : * LWLock acquisition) since setting proc->databaseId. However, it's
2863 : * less clear that our backend is certain to have performed a memory
2864 : * fencing operation since the other backend set proc->databaseId. So
2865 : * for now, we test it after acquiring the LWLock just to be safe.
2866 : *
2867 : * Also skip groups without any registered fast-path locks.
2868 : */
2869 55092524 : if (proc->databaseId != locktag->locktag_field1 ||
2870 18549644 : proc->fpLockBits[group] == 0)
2871 : {
2872 54782546 : LWLockRelease(&proc->fpInfoLock);
2873 54782546 : continue;
2874 : }
2875 :
2876 5267430 : for (j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
2877 : {
2878 : uint32 lockmode;
2879 :
2880 : /* index into the whole per-backend array */
2881 4959514 : uint32 f = FAST_PATH_SLOT(group, j);
2882 :
2883 : /* Look for an allocated slot matching the given relid. */
2884 4959514 : if (relid != proc->fpRelId[f] || FAST_PATH_GET_BITS(proc, f) == 0)
2885 4957452 : continue;
2886 :
2887 : /* Find or create lock object. */
2888 2062 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2889 8248 : for (lockmode = FAST_PATH_LOCKNUMBER_OFFSET;
2890 : lockmode < FAST_PATH_LOCKNUMBER_OFFSET + FAST_PATH_BITS_PER_SLOT;
2891 6186 : ++lockmode)
2892 : {
2893 : PROCLOCK *proclock;
2894 :
2895 6186 : if (!FAST_PATH_CHECK_LOCKMODE(proc, f, lockmode))
2896 4012 : continue;
2897 2174 : proclock = SetupLockInTable(lockMethodTable, proc, locktag,
2898 : hashcode, lockmode);
2899 2174 : if (!proclock)
2900 : {
2901 0 : LWLockRelease(partitionLock);
2902 0 : LWLockRelease(&proc->fpInfoLock);
2903 0 : return false;
2904 : }
2905 2174 : GrantLock(proclock->tag.myLock, proclock, lockmode);
2906 2174 : FAST_PATH_CLEAR_LOCKMODE(proc, f, lockmode);
2907 : }
2908 2062 : LWLockRelease(partitionLock);
2909 :
2910 : /* No need to examine remaining slots. */
2911 2062 : break;
2912 : }
2913 309978 : LWLockRelease(&proc->fpInfoLock);
2914 : }
2915 378800 : return true;
2916 : }
2917 :
2918 : /*
2919 : * FastPathGetRelationLockEntry
2920 : * Return the PROCLOCK for a lock originally taken via the fast-path,
2921 : * transferring it to the primary lock table if necessary.
2922 : *
2923 : * Note: caller takes care of updating the locallock object.
2924 : */
2925 : static PROCLOCK *
2926 598 : FastPathGetRelationLockEntry(LOCALLOCK *locallock)
2927 : {
2928 598 : LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
2929 598 : LOCKTAG *locktag = &locallock->tag.lock;
2930 598 : PROCLOCK *proclock = NULL;
2931 598 : LWLock *partitionLock = LockHashPartitionLock(locallock->hashcode);
2932 598 : Oid relid = locktag->locktag_field2;
2933 : uint32 i,
2934 : group;
2935 :
2936 : /* fast-path group the lock belongs to */
2937 598 : group = FAST_PATH_REL_GROUP(relid);
2938 :
2939 598 : LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
2940 :
2941 9572 : for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2942 : {
2943 : uint32 lockmode;
2944 :
2945 : /* index into the whole per-backend array */
2946 9556 : uint32 f = FAST_PATH_SLOT(group, i);
2947 :
2948 : /* Look for an allocated slot matching the given relid. */
2949 9556 : if (relid != MyProc->fpRelId[f] || FAST_PATH_GET_BITS(MyProc, f) == 0)
2950 8974 : continue;
2951 :
2952 : /* If we don't have a lock of the given mode, forget it! */
2953 582 : lockmode = locallock->tag.mode;
2954 582 : if (!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2955 0 : break;
2956 :
2957 : /* Find or create lock object. */
2958 582 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2959 :
2960 582 : proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
2961 : locallock->hashcode, lockmode);
2962 582 : if (!proclock)
2963 : {
2964 0 : LWLockRelease(partitionLock);
2965 0 : LWLockRelease(&MyProc->fpInfoLock);
2966 0 : ereport(ERROR,
2967 : (errcode(ERRCODE_OUT_OF_MEMORY),
2968 : errmsg("out of shared memory"),
2969 : errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
2970 : }
2971 582 : GrantLock(proclock->tag.myLock, proclock, lockmode);
2972 582 : FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
2973 :
2974 582 : LWLockRelease(partitionLock);
2975 :
2976 : /* No need to examine remaining slots. */
2977 582 : break;
2978 : }
2979 :
2980 598 : LWLockRelease(&MyProc->fpInfoLock);
2981 :
2982 : /* Lock may have already been transferred by some other backend. */
2983 598 : if (proclock == NULL)
2984 : {
2985 : LOCK *lock;
2986 : PROCLOCKTAG proclocktag;
2987 : uint32 proclock_hashcode;
2988 :
2989 16 : LWLockAcquire(partitionLock, LW_SHARED);
2990 :
2991 16 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
2992 : locktag,
2993 : locallock->hashcode,
2994 : HASH_FIND,
2995 : NULL);
2996 16 : if (!lock)
2997 0 : elog(ERROR, "failed to re-find shared lock object");
2998 :
2999 16 : proclocktag.myLock = lock;
3000 16 : proclocktag.myProc = MyProc;
3001 :
3002 16 : proclock_hashcode = ProcLockHashCode(&proclocktag, locallock->hashcode);
3003 : proclock = (PROCLOCK *)
3004 16 : hash_search_with_hash_value(LockMethodProcLockHash,
3005 : &proclocktag,
3006 : proclock_hashcode,
3007 : HASH_FIND,
3008 : NULL);
3009 16 : if (!proclock)
3010 0 : elog(ERROR, "failed to re-find shared proclock object");
3011 16 : LWLockRelease(partitionLock);
3012 : }
3013 :
3014 598 : return proclock;
3015 : }
3016 :
3017 : /*
3018 : * GetLockConflicts
3019 : * Get an array of VirtualTransactionIds of xacts currently holding locks
3020 : * that would conflict with the specified lock/lockmode.
3021 : * xacts merely awaiting such a lock are NOT reported.
3022 : *
3023 : * The result array is palloc'd and is terminated with an invalid VXID.
3024 : * *countp, if not null, is updated to the number of items set.
3025 : *
3026 : * Of course, the result could be out of date by the time it's returned, so
3027 : * use of this function has to be thought about carefully. Similarly, a
3028 : * PGPROC with no "lxid" will be considered non-conflicting regardless of any
3029 : * lock it holds. Existing callers don't care about a locker after that
3030 : * locker's pg_xact updates complete. CommitTransaction() clears "lxid" after
3031 : * pg_xact updates and before releasing locks.
3032 : *
3033 : * Note we never include the current xact's vxid in the result array,
3034 : * since an xact never blocks itself.
3035 : */
3036 : VirtualTransactionId *
3037 2684 : GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
3038 : {
3039 : static VirtualTransactionId *vxids;
3040 2684 : LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
3041 : LockMethod lockMethodTable;
3042 : LOCK *lock;
3043 : LOCKMASK conflictMask;
3044 : dlist_iter proclock_iter;
3045 : PROCLOCK *proclock;
3046 : uint32 hashcode;
3047 : LWLock *partitionLock;
3048 2684 : int count = 0;
3049 2684 : int fast_count = 0;
3050 :
3051 2684 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
3052 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
3053 2684 : lockMethodTable = LockMethods[lockmethodid];
3054 2684 : if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
3055 0 : elog(ERROR, "unrecognized lock mode: %d", lockmode);
3056 :
3057 : /*
3058 : * Allocate memory to store results, and fill with InvalidVXID. We only
3059 : * need enough space for MaxBackends + max_prepared_xacts + a terminator.
3060 : * InHotStandby allocate once in TopMemoryContext.
3061 : */
3062 2684 : if (InHotStandby)
3063 : {
3064 8 : if (vxids == NULL)
3065 2 : vxids = (VirtualTransactionId *)
3066 2 : MemoryContextAlloc(TopMemoryContext,
3067 : sizeof(VirtualTransactionId) *
3068 2 : (MaxBackends + max_prepared_xacts + 1));
3069 : }
3070 : else
3071 2676 : vxids = (VirtualTransactionId *)
3072 2676 : palloc0(sizeof(VirtualTransactionId) *
3073 2676 : (MaxBackends + max_prepared_xacts + 1));
3074 :
3075 : /* Compute hash code and partition lock, and look up conflicting modes. */
3076 2684 : hashcode = LockTagHashCode(locktag);
3077 2684 : partitionLock = LockHashPartitionLock(hashcode);
3078 2684 : conflictMask = lockMethodTable->conflictTab[lockmode];
3079 :
3080 : /*
3081 : * Fast path locks might not have been entered in the primary lock table.
3082 : * If the lock we're dealing with could conflict with such a lock, we must
3083 : * examine each backend's fast-path array for conflicts.
3084 : */
3085 2684 : if (ConflictsWithRelationFastPath(locktag, lockmode))
3086 : {
3087 : int i;
3088 2684 : Oid relid = locktag->locktag_field2;
3089 : VirtualTransactionId vxid;
3090 :
3091 : /* fast-path group the lock belongs to */
3092 2684 : uint32 group = FAST_PATH_REL_GROUP(relid);
3093 :
3094 : /*
3095 : * Iterate over relevant PGPROCs. Anything held by a prepared
3096 : * transaction will have been transferred to the primary lock table,
3097 : * so we need not worry about those. This is all a bit fuzzy, because
3098 : * new locks could be taken after we've visited a particular
3099 : * partition, but the callers had better be prepared to deal with that
3100 : * anyway, since the locks could equally well be taken between the
3101 : * time we return the value and the time the caller does something
3102 : * with it.
3103 : */
3104 420132 : for (i = 0; i < ProcGlobal->allProcCount; i++)
3105 : {
3106 417448 : PGPROC *proc = &ProcGlobal->allProcs[i];
3107 : uint32 j;
3108 :
3109 : /* A backend never blocks itself */
3110 417448 : if (proc == MyProc)
3111 2684 : continue;
3112 :
3113 414764 : LWLockAcquire(&proc->fpInfoLock, LW_SHARED);
3114 :
3115 : /*
3116 : * If the target backend isn't referencing the same database as
3117 : * the lock, then we needn't examine the individual relation IDs
3118 : * at all; none of them can be relevant.
3119 : *
3120 : * See FastPathTransferRelationLocks() for discussion of why we do
3121 : * this test after acquiring the lock.
3122 : *
3123 : * Also skip groups without any registered fast-path locks.
3124 : */
3125 414764 : if (proc->databaseId != locktag->locktag_field1 ||
3126 173446 : proc->fpLockBits[group] == 0)
3127 : {
3128 413892 : LWLockRelease(&proc->fpInfoLock);
3129 413892 : continue;
3130 : }
3131 :
3132 14344 : for (j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
3133 : {
3134 : uint32 lockmask;
3135 :
3136 : /* index into the whole per-backend array */
3137 13928 : uint32 f = FAST_PATH_SLOT(group, j);
3138 :
3139 : /* Look for an allocated slot matching the given relid. */
3140 13928 : if (relid != proc->fpRelId[f])
3141 13472 : continue;
3142 456 : lockmask = FAST_PATH_GET_BITS(proc, f);
3143 456 : if (!lockmask)
3144 0 : continue;
3145 456 : lockmask <<= FAST_PATH_LOCKNUMBER_OFFSET;
3146 :
3147 : /*
3148 : * There can only be one entry per relation, so if we found it
3149 : * and it doesn't conflict, we can skip the rest of the slots.
3150 : */
3151 456 : if ((lockmask & conflictMask) == 0)
3152 10 : break;
3153 :
3154 : /* Conflict! */
3155 446 : GET_VXID_FROM_PGPROC(vxid, *proc);
3156 :
3157 446 : if (VirtualTransactionIdIsValid(vxid))
3158 444 : vxids[count++] = vxid;
3159 : /* else, xact already committed or aborted */
3160 :
3161 : /* No need to examine remaining slots. */
3162 446 : break;
3163 : }
3164 :
3165 872 : LWLockRelease(&proc->fpInfoLock);
3166 : }
3167 : }
3168 :
3169 : /* Remember how many fast-path conflicts we found. */
3170 2684 : fast_count = count;
3171 :
3172 : /*
3173 : * Look up the lock object matching the tag.
3174 : */
3175 2684 : LWLockAcquire(partitionLock, LW_SHARED);
3176 :
3177 2684 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
3178 : locktag,
3179 : hashcode,
3180 : HASH_FIND,
3181 : NULL);
3182 2684 : if (!lock)
3183 : {
3184 : /*
3185 : * If the lock object doesn't exist, there is nothing holding a lock
3186 : * on this lockable object.
3187 : */
3188 140 : LWLockRelease(partitionLock);
3189 140 : vxids[count].procNumber = INVALID_PROC_NUMBER;
3190 140 : vxids[count].localTransactionId = InvalidLocalTransactionId;
3191 140 : if (countp)
3192 0 : *countp = count;
3193 140 : return vxids;
3194 : }
3195 :
3196 : /*
3197 : * Examine each existing holder (or awaiter) of the lock.
3198 : */
3199 5118 : dlist_foreach(proclock_iter, &lock->procLocks)
3200 : {
3201 2574 : proclock = dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
3202 :
3203 2574 : if (conflictMask & proclock->holdMask)
3204 : {
3205 2566 : PGPROC *proc = proclock->tag.myProc;
3206 :
3207 : /* A backend never blocks itself */
3208 2566 : if (proc != MyProc)
3209 : {
3210 : VirtualTransactionId vxid;
3211 :
3212 30 : GET_VXID_FROM_PGPROC(vxid, *proc);
3213 :
3214 30 : if (VirtualTransactionIdIsValid(vxid))
3215 : {
3216 : int i;
3217 :
3218 : /* Avoid duplicate entries. */
3219 52 : for (i = 0; i < fast_count; ++i)
3220 22 : if (VirtualTransactionIdEquals(vxids[i], vxid))
3221 0 : break;
3222 30 : if (i >= fast_count)
3223 30 : vxids[count++] = vxid;
3224 : }
3225 : /* else, xact already committed or aborted */
3226 : }
3227 : }
3228 : }
3229 :
3230 2544 : LWLockRelease(partitionLock);
3231 :
3232 2544 : if (count > MaxBackends + max_prepared_xacts) /* should never happen */
3233 0 : elog(PANIC, "too many conflicting locks found");
3234 :
3235 2544 : vxids[count].procNumber = INVALID_PROC_NUMBER;
3236 2544 : vxids[count].localTransactionId = InvalidLocalTransactionId;
3237 2544 : if (countp)
3238 2538 : *countp = count;
3239 2544 : return vxids;
3240 : }
3241 :
3242 : /*
3243 : * Find a lock in the shared lock table and release it. It is the caller's
3244 : * responsibility to verify that this is a sane thing to do. (For example, it
3245 : * would be bad to release a lock here if there might still be a LOCALLOCK
3246 : * object with pointers to it.)
3247 : *
3248 : * We currently use this in two situations: first, to release locks held by
3249 : * prepared transactions on commit (see lock_twophase_postcommit); and second,
3250 : * to release locks taken via the fast-path, transferred to the main hash
3251 : * table, and then released (see LockReleaseAll).
3252 : */
3253 : static void
3254 4092 : LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc,
3255 : LOCKTAG *locktag, LOCKMODE lockmode,
3256 : bool decrement_strong_lock_count)
3257 : {
3258 : LOCK *lock;
3259 : PROCLOCK *proclock;
3260 : PROCLOCKTAG proclocktag;
3261 : uint32 hashcode;
3262 : uint32 proclock_hashcode;
3263 : LWLock *partitionLock;
3264 : bool wakeupNeeded;
3265 :
3266 4092 : hashcode = LockTagHashCode(locktag);
3267 4092 : partitionLock = LockHashPartitionLock(hashcode);
3268 :
3269 4092 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3270 :
3271 : /*
3272 : * Re-find the lock object (it had better be there).
3273 : */
3274 4092 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
3275 : locktag,
3276 : hashcode,
3277 : HASH_FIND,
3278 : NULL);
3279 4092 : if (!lock)
3280 0 : elog(PANIC, "failed to re-find shared lock object");
3281 :
3282 : /*
3283 : * Re-find the proclock object (ditto).
3284 : */
3285 4092 : proclocktag.myLock = lock;
3286 4092 : proclocktag.myProc = proc;
3287 :
3288 4092 : proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
3289 :
3290 4092 : proclock = (PROCLOCK *) hash_search_with_hash_value(LockMethodProcLockHash,
3291 : &proclocktag,
3292 : proclock_hashcode,
3293 : HASH_FIND,
3294 : NULL);
3295 4092 : if (!proclock)
3296 0 : elog(PANIC, "failed to re-find shared proclock object");
3297 :
3298 : /*
3299 : * Double-check that we are actually holding a lock of the type we want to
3300 : * release.
3301 : */
3302 4092 : if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
3303 : {
3304 : PROCLOCK_PRINT("lock_twophase_postcommit: WRONGTYPE", proclock);
3305 0 : LWLockRelease(partitionLock);
3306 0 : elog(WARNING, "you don't own a lock of type %s",
3307 : lockMethodTable->lockModeNames[lockmode]);
3308 0 : return;
3309 : }
3310 :
3311 : /*
3312 : * Do the releasing. CleanUpLock will waken any now-wakable waiters.
3313 : */
3314 4092 : wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
3315 :
3316 4092 : CleanUpLock(lock, proclock,
3317 : lockMethodTable, hashcode,
3318 : wakeupNeeded);
3319 :
3320 4092 : LWLockRelease(partitionLock);
3321 :
3322 : /*
3323 : * Decrement strong lock count. This logic is needed only for 2PC.
3324 : */
3325 4092 : if (decrement_strong_lock_count
3326 1430 : && ConflictsWithRelationFastPath(locktag, lockmode))
3327 : {
3328 142 : uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
3329 :
3330 142 : SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
3331 : Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
3332 142 : FastPathStrongRelationLocks->count[fasthashcode]--;
3333 142 : SpinLockRelease(&FastPathStrongRelationLocks->mutex);
3334 : }
3335 : }
3336 :
3337 : /*
3338 : * CheckForSessionAndXactLocks
3339 : * Check to see if transaction holds both session-level and xact-level
3340 : * locks on the same object; if so, throw an error.
3341 : *
3342 : * If we have both session- and transaction-level locks on the same object,
3343 : * PREPARE TRANSACTION must fail. This should never happen with regular
3344 : * locks, since we only take those at session level in some special operations
3345 : * like VACUUM. It's possible to hit this with advisory locks, though.
3346 : *
3347 : * It would be nice if we could keep the session hold and give away the
3348 : * transactional hold to the prepared xact. However, that would require two
3349 : * PROCLOCK objects, and we cannot be sure that another PROCLOCK will be
3350 : * available when it comes time for PostPrepare_Locks to do the deed.
3351 : * So for now, we error out while we can still do so safely.
3352 : *
3353 : * Since the LOCALLOCK table stores a separate entry for each lockmode,
3354 : * we can't implement this check by examining LOCALLOCK entries in isolation.
3355 : * We must build a transient hashtable that is indexed by locktag only.
3356 : */
3357 : static void
3358 580 : CheckForSessionAndXactLocks(void)
3359 : {
3360 : typedef struct
3361 : {
3362 : LOCKTAG lock; /* identifies the lockable object */
3363 : bool sessLock; /* is any lockmode held at session level? */
3364 : bool xactLock; /* is any lockmode held at xact level? */
3365 : } PerLockTagEntry;
3366 :
3367 : HASHCTL hash_ctl;
3368 : HTAB *lockhtab;
3369 : HASH_SEQ_STATUS status;
3370 : LOCALLOCK *locallock;
3371 :
3372 : /* Create a local hash table keyed by LOCKTAG only */
3373 580 : hash_ctl.keysize = sizeof(LOCKTAG);
3374 580 : hash_ctl.entrysize = sizeof(PerLockTagEntry);
3375 580 : hash_ctl.hcxt = CurrentMemoryContext;
3376 :
3377 580 : lockhtab = hash_create("CheckForSessionAndXactLocks table",
3378 : 256, /* arbitrary initial size */
3379 : &hash_ctl,
3380 : HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
3381 :
3382 : /* Scan local lock table to find entries for each LOCKTAG */
3383 580 : hash_seq_init(&status, LockMethodLocalHash);
3384 :
3385 1982 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3386 : {
3387 1406 : LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3388 : PerLockTagEntry *hentry;
3389 : bool found;
3390 : int i;
3391 :
3392 : /*
3393 : * Ignore VXID locks. We don't want those to be held by prepared
3394 : * transactions, since they aren't meaningful after a restart.
3395 : */
3396 1406 : if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3397 0 : continue;
3398 :
3399 : /* Ignore it if we don't actually hold the lock */
3400 1406 : if (locallock->nLocks <= 0)
3401 0 : continue;
3402 :
3403 : /* Otherwise, find or make an entry in lockhtab */
3404 1406 : hentry = (PerLockTagEntry *) hash_search(lockhtab,
3405 1406 : &locallock->tag.lock,
3406 : HASH_ENTER, &found);
3407 1406 : if (!found) /* initialize, if newly created */
3408 1308 : hentry->sessLock = hentry->xactLock = false;
3409 :
3410 : /* Scan to see if we hold lock at session or xact level or both */
3411 2812 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
3412 : {
3413 1406 : if (lockOwners[i].owner == NULL)
3414 18 : hentry->sessLock = true;
3415 : else
3416 1388 : hentry->xactLock = true;
3417 : }
3418 :
3419 : /*
3420 : * We can throw error immediately when we see both types of locks; no
3421 : * need to wait around to see if there are more violations.
3422 : */
3423 1406 : if (hentry->sessLock && hentry->xactLock)
3424 4 : ereport(ERROR,
3425 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3426 : errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3427 : }
3428 :
3429 : /* Success, so clean up */
3430 576 : hash_destroy(lockhtab);
3431 576 : }
3432 :
3433 : /*
3434 : * AtPrepare_Locks
3435 : * Do the preparatory work for a PREPARE: make 2PC state file records
3436 : * for all locks currently held.
3437 : *
3438 : * Session-level locks are ignored, as are VXID locks.
3439 : *
3440 : * For the most part, we don't need to touch shared memory for this ---
3441 : * all the necessary state information is in the locallock table.
3442 : * Fast-path locks are an exception, however: we move any such locks to
3443 : * the main table before allowing PREPARE TRANSACTION to succeed.
3444 : */
3445 : void
3446 580 : AtPrepare_Locks(void)
3447 : {
3448 : HASH_SEQ_STATUS status;
3449 : LOCALLOCK *locallock;
3450 :
3451 : /* First, verify there aren't locks of both xact and session level */
3452 580 : CheckForSessionAndXactLocks();
3453 :
3454 : /* Now do the per-locallock cleanup work */
3455 576 : hash_seq_init(&status, LockMethodLocalHash);
3456 :
3457 1970 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3458 : {
3459 : TwoPhaseLockRecord record;
3460 1394 : LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3461 : bool haveSessionLock;
3462 : bool haveXactLock;
3463 : int i;
3464 :
3465 : /*
3466 : * Ignore VXID locks. We don't want those to be held by prepared
3467 : * transactions, since they aren't meaningful after a restart.
3468 : */
3469 1394 : if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3470 14 : continue;
3471 :
3472 : /* Ignore it if we don't actually hold the lock */
3473 1394 : if (locallock->nLocks <= 0)
3474 0 : continue;
3475 :
3476 : /* Scan to see whether we hold it at session or transaction level */
3477 1394 : haveSessionLock = haveXactLock = false;
3478 2788 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
3479 : {
3480 1394 : if (lockOwners[i].owner == NULL)
3481 14 : haveSessionLock = true;
3482 : else
3483 1380 : haveXactLock = true;
3484 : }
3485 :
3486 : /* Ignore it if we have only session lock */
3487 1394 : if (!haveXactLock)
3488 14 : continue;
3489 :
3490 : /* This can't happen, because we already checked it */
3491 1380 : if (haveSessionLock)
3492 0 : ereport(ERROR,
3493 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3494 : errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3495 :
3496 : /*
3497 : * If the local lock was taken via the fast-path, we need to move it
3498 : * to the primary lock table, or just get a pointer to the existing
3499 : * primary lock table entry if by chance it's already been
3500 : * transferred.
3501 : */
3502 1380 : if (locallock->proclock == NULL)
3503 : {
3504 598 : locallock->proclock = FastPathGetRelationLockEntry(locallock);
3505 598 : locallock->lock = locallock->proclock->tag.myLock;
3506 : }
3507 :
3508 : /*
3509 : * Arrange to not release any strong lock count held by this lock
3510 : * entry. We must retain the count until the prepared transaction is
3511 : * committed or rolled back.
3512 : */
3513 1380 : locallock->holdsStrongLockCount = false;
3514 :
3515 : /*
3516 : * Create a 2PC record.
3517 : */
3518 1380 : memcpy(&(record.locktag), &(locallock->tag.lock), sizeof(LOCKTAG));
3519 1380 : record.lockmode = locallock->tag.mode;
3520 :
3521 1380 : RegisterTwoPhaseRecord(TWOPHASE_RM_LOCK_ID, 0,
3522 : &record, sizeof(TwoPhaseLockRecord));
3523 : }
3524 576 : }
3525 :
3526 : /*
3527 : * PostPrepare_Locks
3528 : * Clean up after successful PREPARE
3529 : *
3530 : * Here, we want to transfer ownership of our locks to a dummy PGPROC
3531 : * that's now associated with the prepared transaction, and we want to
3532 : * clean out the corresponding entries in the LOCALLOCK table.
3533 : *
3534 : * Note: by removing the LOCALLOCK entries, we are leaving dangling
3535 : * pointers in the transaction's resource owner. This is OK at the
3536 : * moment since resowner.c doesn't try to free locks retail at a toplevel
3537 : * transaction commit or abort. We could alternatively zero out nLocks
3538 : * and leave the LOCALLOCK entries to be garbage-collected by LockReleaseAll,
3539 : * but that probably costs more cycles.
3540 : */
3541 : void
3542 576 : PostPrepare_Locks(TransactionId xid)
3543 : {
3544 576 : PGPROC *newproc = TwoPhaseGetDummyProc(xid, false);
3545 : HASH_SEQ_STATUS status;
3546 : LOCALLOCK *locallock;
3547 : LOCK *lock;
3548 : PROCLOCK *proclock;
3549 : PROCLOCKTAG proclocktag;
3550 : int partition;
3551 :
3552 : /* Can't prepare a lock group follower. */
3553 : Assert(MyProc->lockGroupLeader == NULL ||
3554 : MyProc->lockGroupLeader == MyProc);
3555 :
3556 : /* This is a critical section: any error means big trouble */
3557 576 : START_CRIT_SECTION();
3558 :
3559 : /*
3560 : * First we run through the locallock table and get rid of unwanted
3561 : * entries, then we scan the process's proclocks and transfer them to the
3562 : * target proc.
3563 : *
3564 : * We do this separately because we may have multiple locallock entries
3565 : * pointing to the same proclock, and we daren't end up with any dangling
3566 : * pointers.
3567 : */
3568 576 : hash_seq_init(&status, LockMethodLocalHash);
3569 :
3570 1970 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3571 : {
3572 1394 : LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3573 : bool haveSessionLock;
3574 : bool haveXactLock;
3575 : int i;
3576 :
3577 1394 : if (locallock->proclock == NULL || locallock->lock == NULL)
3578 : {
3579 : /*
3580 : * We must've run out of shared memory while trying to set up this
3581 : * lock. Just forget the local entry.
3582 : */
3583 : Assert(locallock->nLocks == 0);
3584 0 : RemoveLocalLock(locallock);
3585 0 : continue;
3586 : }
3587 :
3588 : /* Ignore VXID locks */
3589 1394 : if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3590 0 : continue;
3591 :
3592 : /* Scan to see whether we hold it at session or transaction level */
3593 1394 : haveSessionLock = haveXactLock = false;
3594 2788 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
3595 : {
3596 1394 : if (lockOwners[i].owner == NULL)
3597 14 : haveSessionLock = true;
3598 : else
3599 1380 : haveXactLock = true;
3600 : }
3601 :
3602 : /* Ignore it if we have only session lock */
3603 1394 : if (!haveXactLock)
3604 14 : continue;
3605 :
3606 : /* This can't happen, because we already checked it */
3607 1380 : if (haveSessionLock)
3608 0 : ereport(PANIC,
3609 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3610 : errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3611 :
3612 : /* Mark the proclock to show we need to release this lockmode */
3613 1380 : if (locallock->nLocks > 0)
3614 1380 : locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
3615 :
3616 : /* And remove the locallock hashtable entry */
3617 1380 : RemoveLocalLock(locallock);
3618 : }
3619 :
3620 : /*
3621 : * Now, scan each lock partition separately.
3622 : */
3623 9792 : for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
3624 : {
3625 : LWLock *partitionLock;
3626 9216 : dlist_head *procLocks = &(MyProc->myProcLocks[partition]);
3627 : dlist_mutable_iter proclock_iter;
3628 :
3629 9216 : partitionLock = LockHashPartitionLockByIndex(partition);
3630 :
3631 : /*
3632 : * If the proclock list for this partition is empty, we can skip
3633 : * acquiring the partition lock. This optimization is safer than the
3634 : * situation in LockReleaseAll, because we got rid of any fast-path
3635 : * locks during AtPrepare_Locks, so there cannot be any case where
3636 : * another backend is adding something to our lists now. For safety,
3637 : * though, we code this the same way as in LockReleaseAll.
3638 : */
3639 9216 : if (dlist_is_empty(procLocks))
3640 7934 : continue; /* needn't examine this partition */
3641 :
3642 1282 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3643 :
3644 2658 : dlist_foreach_modify(proclock_iter, procLocks)
3645 : {
3646 1376 : proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
3647 :
3648 : Assert(proclock->tag.myProc == MyProc);
3649 :
3650 1376 : lock = proclock->tag.myLock;
3651 :
3652 : /* Ignore VXID locks */
3653 1376 : if (lock->tag.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3654 76 : continue;
3655 :
3656 : PROCLOCK_PRINT("PostPrepare_Locks", proclock);
3657 : LOCK_PRINT("PostPrepare_Locks", lock, 0);
3658 : Assert(lock->nRequested >= 0);
3659 : Assert(lock->nGranted >= 0);
3660 : Assert(lock->nGranted <= lock->nRequested);
3661 : Assert((proclock->holdMask & ~lock->grantMask) == 0);
3662 :
3663 : /* Ignore it if nothing to release (must be a session lock) */
3664 1300 : if (proclock->releaseMask == 0)
3665 14 : continue;
3666 :
3667 : /* Else we should be releasing all locks */
3668 1286 : if (proclock->releaseMask != proclock->holdMask)
3669 0 : elog(PANIC, "we seem to have dropped a bit somewhere");
3670 :
3671 : /*
3672 : * We cannot simply modify proclock->tag.myProc to reassign
3673 : * ownership of the lock, because that's part of the hash key and
3674 : * the proclock would then be in the wrong hash chain. Instead
3675 : * use hash_update_hash_key. (We used to create a new hash entry,
3676 : * but that risks out-of-memory failure if other processes are
3677 : * busy making proclocks too.) We must unlink the proclock from
3678 : * our procLink chain and put it into the new proc's chain, too.
3679 : *
3680 : * Note: the updated proclock hash key will still belong to the
3681 : * same hash partition, cf proclock_hash(). So the partition lock
3682 : * we already hold is sufficient for this.
3683 : */
3684 1286 : dlist_delete(&proclock->procLink);
3685 :
3686 : /*
3687 : * Create the new hash key for the proclock.
3688 : */
3689 1286 : proclocktag.myLock = lock;
3690 1286 : proclocktag.myProc = newproc;
3691 :
3692 : /*
3693 : * Update groupLeader pointer to point to the new proc. (We'd
3694 : * better not be a member of somebody else's lock group!)
3695 : */
3696 : Assert(proclock->groupLeader == proclock->tag.myProc);
3697 1286 : proclock->groupLeader = newproc;
3698 :
3699 : /*
3700 : * Update the proclock. We should not find any existing entry for
3701 : * the same hash key, since there can be only one entry for any
3702 : * given lock with my own proc.
3703 : */
3704 1286 : if (!hash_update_hash_key(LockMethodProcLockHash,
3705 : proclock,
3706 : &proclocktag))
3707 0 : elog(PANIC, "duplicate entry found while reassigning a prepared transaction's locks");
3708 :
3709 : /* Re-link into the new proc's proclock list */
3710 1286 : dlist_push_tail(&newproc->myProcLocks[partition], &proclock->procLink);
3711 :
3712 : PROCLOCK_PRINT("PostPrepare_Locks: updated", proclock);
3713 : } /* loop over PROCLOCKs within this partition */
3714 :
3715 1282 : LWLockRelease(partitionLock);
3716 : } /* loop over partitions */
3717 :
3718 576 : END_CRIT_SECTION();
3719 576 : }
3720 :
3721 :
3722 : /*
3723 : * Estimate shared-memory space used for lock tables
3724 : */
3725 : Size
3726 3906 : LockManagerShmemSize(void)
3727 : {
3728 3906 : Size size = 0;
3729 : long max_table_size;
3730 :
3731 : /* lock hash table */
3732 3906 : max_table_size = NLOCKENTS();
3733 3906 : size = add_size(size, hash_estimate_size(max_table_size, sizeof(LOCK)));
3734 :
3735 : /* proclock hash table */
3736 3906 : max_table_size *= 2;
3737 3906 : size = add_size(size, hash_estimate_size(max_table_size, sizeof(PROCLOCK)));
3738 :
3739 : /*
3740 : * Since NLOCKENTS is only an estimate, add 10% safety margin.
3741 : */
3742 3906 : size = add_size(size, size / 10);
3743 :
3744 3906 : return size;
3745 : }
3746 :
3747 : /*
3748 : * GetLockStatusData - Return a summary of the lock manager's internal
3749 : * status, for use in a user-level reporting function.
3750 : *
3751 : * The return data consists of an array of LockInstanceData objects,
3752 : * which are a lightly abstracted version of the PROCLOCK data structures,
3753 : * i.e. there is one entry for each unique lock and interested PGPROC.
3754 : * It is the caller's responsibility to match up related items (such as
3755 : * references to the same lockable object or PGPROC) if wanted.
3756 : *
3757 : * The design goal is to hold the LWLocks for as short a time as possible;
3758 : * thus, this function simply makes a copy of the necessary data and releases
3759 : * the locks, allowing the caller to contemplate and format the data for as
3760 : * long as it pleases.
3761 : */
3762 : LockData *
3763 438 : GetLockStatusData(void)
3764 : {
3765 : LockData *data;
3766 : PROCLOCK *proclock;
3767 : HASH_SEQ_STATUS seqstat;
3768 : int els;
3769 : int el;
3770 : int i;
3771 :
3772 438 : data = (LockData *) palloc(sizeof(LockData));
3773 :
3774 : /* Guess how much space we'll need. */
3775 438 : els = MaxBackends;
3776 438 : el = 0;
3777 438 : data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * els);
3778 :
3779 : /*
3780 : * First, we iterate through the per-backend fast-path arrays, locking
3781 : * them one at a time. This might produce an inconsistent picture of the
3782 : * system state, but taking all of those LWLocks at the same time seems
3783 : * impractical (in particular, note MAX_SIMUL_LWLOCKS). It shouldn't
3784 : * matter too much, because none of these locks can be involved in lock
3785 : * conflicts anyway - anything that might must be present in the main lock
3786 : * table. (For the same reason, we don't sweat about making leaderPid
3787 : * completely valid. We cannot safely dereference another backend's
3788 : * lockGroupLeader field without holding all lock partition locks, and
3789 : * it's not worth that.)
3790 : */
3791 63036 : for (i = 0; i < ProcGlobal->allProcCount; ++i)
3792 : {
3793 62598 : PGPROC *proc = &ProcGlobal->allProcs[i];
3794 :
3795 : /* Skip backends with pid=0, as they don't hold fast-path locks */
3796 62598 : if (proc->pid == 0)
3797 57064 : continue;
3798 :
3799 5534 : LWLockAcquire(&proc->fpInfoLock, LW_SHARED);
3800 :
3801 27670 : for (uint32 g = 0; g < FastPathLockGroupsPerBackend; g++)
3802 : {
3803 : /* Skip groups without registered fast-path locks */
3804 22136 : if (proc->fpLockBits[g] == 0)
3805 18702 : continue;
3806 :
3807 58378 : for (int j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
3808 : {
3809 : LockInstanceData *instance;
3810 54944 : uint32 f = FAST_PATH_SLOT(g, j);
3811 54944 : uint32 lockbits = FAST_PATH_GET_BITS(proc, f);
3812 :
3813 : /* Skip unallocated slots */
3814 54944 : if (!lockbits)
3815 48986 : continue;
3816 :
3817 5958 : if (el >= els)
3818 : {
3819 20 : els += MaxBackends;
3820 20 : data->locks = (LockInstanceData *)
3821 20 : repalloc(data->locks, sizeof(LockInstanceData) * els);
3822 : }
3823 :
3824 5958 : instance = &data->locks[el];
3825 5958 : SET_LOCKTAG_RELATION(instance->locktag, proc->databaseId,
3826 : proc->fpRelId[f]);
3827 5958 : instance->holdMask = lockbits << FAST_PATH_LOCKNUMBER_OFFSET;
3828 5958 : instance->waitLockMode = NoLock;
3829 5958 : instance->vxid.procNumber = proc->vxid.procNumber;
3830 5958 : instance->vxid.localTransactionId = proc->vxid.lxid;
3831 5958 : instance->pid = proc->pid;
3832 5958 : instance->leaderPid = proc->pid;
3833 5958 : instance->fastpath = true;
3834 :
3835 : /*
3836 : * Successfully taking fast path lock means there were no
3837 : * conflicting locks.
3838 : */
3839 5958 : instance->waitStart = 0;
3840 :
3841 5958 : el++;
3842 : }
3843 : }
3844 :
3845 5534 : if (proc->fpVXIDLock)
3846 : {
3847 : VirtualTransactionId vxid;
3848 : LockInstanceData *instance;
3849 :
3850 1658 : if (el >= els)
3851 : {
3852 4 : els += MaxBackends;
3853 4 : data->locks = (LockInstanceData *)
3854 4 : repalloc(data->locks, sizeof(LockInstanceData) * els);
3855 : }
3856 :
3857 1658 : vxid.procNumber = proc->vxid.procNumber;
3858 1658 : vxid.localTransactionId = proc->fpLocalTransactionId;
3859 :
3860 1658 : instance = &data->locks[el];
3861 1658 : SET_LOCKTAG_VIRTUALTRANSACTION(instance->locktag, vxid);
3862 1658 : instance->holdMask = LOCKBIT_ON(ExclusiveLock);
3863 1658 : instance->waitLockMode = NoLock;
3864 1658 : instance->vxid.procNumber = proc->vxid.procNumber;
3865 1658 : instance->vxid.localTransactionId = proc->vxid.lxid;
3866 1658 : instance->pid = proc->pid;
3867 1658 : instance->leaderPid = proc->pid;
3868 1658 : instance->fastpath = true;
3869 1658 : instance->waitStart = 0;
3870 :
3871 1658 : el++;
3872 : }
3873 :
3874 5534 : LWLockRelease(&proc->fpInfoLock);
3875 : }
3876 :
3877 : /*
3878 : * Next, acquire lock on the entire shared lock data structure. We do
3879 : * this so that, at least for locks in the primary lock table, the state
3880 : * will be self-consistent.
3881 : *
3882 : * Since this is a read-only operation, we take shared instead of
3883 : * exclusive lock. There's not a whole lot of point to this, because all
3884 : * the normal operations require exclusive lock, but it doesn't hurt
3885 : * anything either. It will at least allow two backends to do
3886 : * GetLockStatusData in parallel.
3887 : *
3888 : * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
3889 : */
3890 7446 : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3891 7008 : LWLockAcquire(LockHashPartitionLockByIndex(i), LW_SHARED);
3892 :
3893 : /* Now we can safely count the number of proclocks */
3894 438 : data->nelements = el + hash_get_num_entries(LockMethodProcLockHash);
3895 438 : if (data->nelements > els)
3896 : {
3897 48 : els = data->nelements;
3898 48 : data->locks = (LockInstanceData *)
3899 48 : repalloc(data->locks, sizeof(LockInstanceData) * els);
3900 : }
3901 :
3902 : /* Now scan the tables to copy the data */
3903 438 : hash_seq_init(&seqstat, LockMethodProcLockHash);
3904 :
3905 4086 : while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
3906 : {
3907 3648 : PGPROC *proc = proclock->tag.myProc;
3908 3648 : LOCK *lock = proclock->tag.myLock;
3909 3648 : LockInstanceData *instance = &data->locks[el];
3910 :
3911 3648 : memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
3912 3648 : instance->holdMask = proclock->holdMask;
3913 3648 : if (proc->waitLock == proclock->tag.myLock)
3914 18 : instance->waitLockMode = proc->waitLockMode;
3915 : else
3916 3630 : instance->waitLockMode = NoLock;
3917 3648 : instance->vxid.procNumber = proc->vxid.procNumber;
3918 3648 : instance->vxid.localTransactionId = proc->vxid.lxid;
3919 3648 : instance->pid = proc->pid;
3920 3648 : instance->leaderPid = proclock->groupLeader->pid;
3921 3648 : instance->fastpath = false;
3922 3648 : instance->waitStart = (TimestampTz) pg_atomic_read_u64(&proc->waitStart);
3923 :
3924 3648 : el++;
3925 : }
3926 :
3927 : /*
3928 : * And release locks. We do this in reverse order for two reasons: (1)
3929 : * Anyone else who needs more than one of the locks will be trying to lock
3930 : * them in increasing order; we don't want to release the other process
3931 : * until it can get all the locks it needs. (2) This avoids O(N^2)
3932 : * behavior inside LWLockRelease.
3933 : */
3934 7446 : for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3935 7008 : LWLockRelease(LockHashPartitionLockByIndex(i));
3936 :
3937 : Assert(el == data->nelements);
3938 :
3939 438 : return data;
3940 : }
3941 :
3942 : /*
3943 : * GetBlockerStatusData - Return a summary of the lock manager's state
3944 : * concerning locks that are blocking the specified PID or any member of
3945 : * the PID's lock group, for use in a user-level reporting function.
3946 : *
3947 : * For each PID within the lock group that is awaiting some heavyweight lock,
3948 : * the return data includes an array of LockInstanceData objects, which are
3949 : * the same data structure used by GetLockStatusData; but unlike that function,
3950 : * this one reports only the PROCLOCKs associated with the lock that that PID
3951 : * is blocked on. (Hence, all the locktags should be the same for any one
3952 : * blocked PID.) In addition, we return an array of the PIDs of those backends
3953 : * that are ahead of the blocked PID in the lock's wait queue. These can be
3954 : * compared with the PIDs in the LockInstanceData objects to determine which
3955 : * waiters are ahead of or behind the blocked PID in the queue.
3956 : *
3957 : * If blocked_pid isn't a valid backend PID or nothing in its lock group is
3958 : * waiting on any heavyweight lock, return empty arrays.
3959 : *
3960 : * The design goal is to hold the LWLocks for as short a time as possible;
3961 : * thus, this function simply makes a copy of the necessary data and releases
3962 : * the locks, allowing the caller to contemplate and format the data for as
3963 : * long as it pleases.
3964 : */
3965 : BlockedProcsData *
3966 3276 : GetBlockerStatusData(int blocked_pid)
3967 : {
3968 : BlockedProcsData *data;
3969 : PGPROC *proc;
3970 : int i;
3971 :
3972 3276 : data = (BlockedProcsData *) palloc(sizeof(BlockedProcsData));
3973 :
3974 : /*
3975 : * Guess how much space we'll need, and preallocate. Most of the time
3976 : * this will avoid needing to do repalloc while holding the LWLocks. (We
3977 : * assume, but check with an Assert, that MaxBackends is enough entries
3978 : * for the procs[] array; the other two could need enlargement, though.)
3979 : */
3980 3276 : data->nprocs = data->nlocks = data->npids = 0;
3981 3276 : data->maxprocs = data->maxlocks = data->maxpids = MaxBackends;
3982 3276 : data->procs = (BlockedProcData *) palloc(sizeof(BlockedProcData) * data->maxprocs);
3983 3276 : data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * data->maxlocks);
3984 3276 : data->waiter_pids = (int *) palloc(sizeof(int) * data->maxpids);
3985 :
3986 : /*
3987 : * In order to search the ProcArray for blocked_pid and assume that that
3988 : * entry won't immediately disappear under us, we must hold ProcArrayLock.
3989 : * In addition, to examine the lock grouping fields of any other backend,
3990 : * we must hold all the hash partition locks. (Only one of those locks is
3991 : * actually relevant for any one lock group, but we can't know which one
3992 : * ahead of time.) It's fairly annoying to hold all those locks
3993 : * throughout this, but it's no worse than GetLockStatusData(), and it
3994 : * does have the advantage that we're guaranteed to return a
3995 : * self-consistent instantaneous state.
3996 : */
3997 3276 : LWLockAcquire(ProcArrayLock, LW_SHARED);
3998 :
3999 3276 : proc = BackendPidGetProcWithLock(blocked_pid);
4000 :
4001 : /* Nothing to do if it's gone */
4002 3276 : if (proc != NULL)
4003 : {
4004 : /*
4005 : * Acquire lock on the entire shared lock data structure. See notes
4006 : * in GetLockStatusData().
4007 : */
4008 55692 : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4009 52416 : LWLockAcquire(LockHashPartitionLockByIndex(i), LW_SHARED);
4010 :
4011 3276 : if (proc->lockGroupLeader == NULL)
4012 : {
4013 : /* Easy case, proc is not a lock group member */
4014 2798 : GetSingleProcBlockerStatusData(proc, data);
4015 : }
4016 : else
4017 : {
4018 : /* Examine all procs in proc's lock group */
4019 : dlist_iter iter;
4020 :
4021 1034 : dlist_foreach(iter, &proc->lockGroupLeader->lockGroupMembers)
4022 : {
4023 : PGPROC *memberProc;
4024 :
4025 556 : memberProc = dlist_container(PGPROC, lockGroupLink, iter.cur);
4026 556 : GetSingleProcBlockerStatusData(memberProc, data);
4027 : }
4028 : }
4029 :
4030 : /*
4031 : * And release locks. See notes in GetLockStatusData().
4032 : */
4033 55692 : for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
4034 52416 : LWLockRelease(LockHashPartitionLockByIndex(i));
4035 :
4036 : Assert(data->nprocs <= data->maxprocs);
4037 : }
4038 :
4039 3276 : LWLockRelease(ProcArrayLock);
4040 :
4041 3276 : return data;
4042 : }
4043 :
4044 : /* Accumulate data about one possibly-blocked proc for GetBlockerStatusData */
4045 : static void
4046 3354 : GetSingleProcBlockerStatusData(PGPROC *blocked_proc, BlockedProcsData *data)
4047 : {
4048 3354 : LOCK *theLock = blocked_proc->waitLock;
4049 : BlockedProcData *bproc;
4050 : dlist_iter proclock_iter;
4051 : dlist_iter proc_iter;
4052 : dclist_head *waitQueue;
4053 : int queue_size;
4054 :
4055 : /* Nothing to do if this proc is not blocked */
4056 3354 : if (theLock == NULL)
4057 1100 : return;
4058 :
4059 : /* Set up a procs[] element */
4060 2254 : bproc = &data->procs[data->nprocs++];
4061 2254 : bproc->pid = blocked_proc->pid;
4062 2254 : bproc->first_lock = data->nlocks;
4063 2254 : bproc->first_waiter = data->npids;
4064 :
4065 : /*
4066 : * We may ignore the proc's fast-path arrays, since nothing in those could
4067 : * be related to a contended lock.
4068 : */
4069 :
4070 : /* Collect all PROCLOCKs associated with theLock */
4071 6848 : dlist_foreach(proclock_iter, &theLock->procLocks)
4072 : {
4073 4594 : PROCLOCK *proclock =
4074 4594 : dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
4075 4594 : PGPROC *proc = proclock->tag.myProc;
4076 4594 : LOCK *lock = proclock->tag.myLock;
4077 : LockInstanceData *instance;
4078 :
4079 4594 : if (data->nlocks >= data->maxlocks)
4080 : {
4081 0 : data->maxlocks += MaxBackends;
4082 0 : data->locks = (LockInstanceData *)
4083 0 : repalloc(data->locks, sizeof(LockInstanceData) * data->maxlocks);
4084 : }
4085 :
4086 4594 : instance = &data->locks[data->nlocks];
4087 4594 : memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
4088 4594 : instance->holdMask = proclock->holdMask;
4089 4594 : if (proc->waitLock == lock)
4090 2330 : instance->waitLockMode = proc->waitLockMode;
4091 : else
4092 2264 : instance->waitLockMode = NoLock;
4093 4594 : instance->vxid.procNumber = proc->vxid.procNumber;
4094 4594 : instance->vxid.localTransactionId = proc->vxid.lxid;
4095 4594 : instance->pid = proc->pid;
4096 4594 : instance->leaderPid = proclock->groupLeader->pid;
4097 4594 : instance->fastpath = false;
4098 4594 : data->nlocks++;
4099 : }
4100 :
4101 : /* Enlarge waiter_pids[] if it's too small to hold all wait queue PIDs */
4102 2254 : waitQueue = &(theLock->waitProcs);
4103 2254 : queue_size = dclist_count(waitQueue);
4104 :
4105 2254 : if (queue_size > data->maxpids - data->npids)
4106 : {
4107 0 : data->maxpids = Max(data->maxpids + MaxBackends,
4108 : data->npids + queue_size);
4109 0 : data->waiter_pids = (int *) repalloc(data->waiter_pids,
4110 0 : sizeof(int) * data->maxpids);
4111 : }
4112 :
4113 : /* Collect PIDs from the lock's wait queue, stopping at blocked_proc */
4114 2290 : dclist_foreach(proc_iter, waitQueue)
4115 : {
4116 2290 : PGPROC *queued_proc = dlist_container(PGPROC, links, proc_iter.cur);
4117 :
4118 2290 : if (queued_proc == blocked_proc)
4119 2254 : break;
4120 36 : data->waiter_pids[data->npids++] = queued_proc->pid;
4121 36 : queued_proc = (PGPROC *) queued_proc->links.next;
4122 : }
4123 :
4124 2254 : bproc->num_locks = data->nlocks - bproc->first_lock;
4125 2254 : bproc->num_waiters = data->npids - bproc->first_waiter;
4126 : }
4127 :
4128 : /*
4129 : * Returns a list of currently held AccessExclusiveLocks, for use by
4130 : * LogStandbySnapshot(). The result is a palloc'd array,
4131 : * with the number of elements returned into *nlocks.
4132 : *
4133 : * XXX This currently takes a lock on all partitions of the lock table,
4134 : * but it's possible to do better. By reference counting locks and storing
4135 : * the value in the ProcArray entry for each backend we could tell if any
4136 : * locks need recording without having to acquire the partition locks and
4137 : * scan the lock table. Whether that's worth the additional overhead
4138 : * is pretty dubious though.
4139 : */
4140 : xl_standby_lock *
4141 2662 : GetRunningTransactionLocks(int *nlocks)
4142 : {
4143 : xl_standby_lock *accessExclusiveLocks;
4144 : PROCLOCK *proclock;
4145 : HASH_SEQ_STATUS seqstat;
4146 : int i;
4147 : int index;
4148 : int els;
4149 :
4150 : /*
4151 : * Acquire lock on the entire shared lock data structure.
4152 : *
4153 : * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
4154 : */
4155 45254 : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4156 42592 : LWLockAcquire(LockHashPartitionLockByIndex(i), LW_SHARED);
4157 :
4158 : /* Now we can safely count the number of proclocks */
4159 2662 : els = hash_get_num_entries(LockMethodProcLockHash);
4160 :
4161 : /*
4162 : * Allocating enough space for all locks in the lock table is overkill,
4163 : * but it's more convenient and faster than having to enlarge the array.
4164 : */
4165 2662 : accessExclusiveLocks = palloc(els * sizeof(xl_standby_lock));
4166 :
4167 : /* Now scan the tables to copy the data */
4168 2662 : hash_seq_init(&seqstat, LockMethodProcLockHash);
4169 :
4170 : /*
4171 : * If lock is a currently granted AccessExclusiveLock then it will have
4172 : * just one proclock holder, so locks are never accessed twice in this
4173 : * particular case. Don't copy this code for use elsewhere because in the
4174 : * general case this will give you duplicate locks when looking at
4175 : * non-exclusive lock types.
4176 : */
4177 2662 : index = 0;
4178 12072 : while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
4179 : {
4180 : /* make sure this definition matches the one used in LockAcquire */
4181 9410 : if ((proclock->holdMask & LOCKBIT_ON(AccessExclusiveLock)) &&
4182 5586 : proclock->tag.myLock->tag.locktag_type == LOCKTAG_RELATION)
4183 : {
4184 3252 : PGPROC *proc = proclock->tag.myProc;
4185 3252 : LOCK *lock = proclock->tag.myLock;
4186 3252 : TransactionId xid = proc->xid;
4187 :
4188 : /*
4189 : * Don't record locks for transactions if we know they have
4190 : * already issued their WAL record for commit but not yet released
4191 : * lock. It is still possible that we see locks held by already
4192 : * complete transactions, if they haven't yet zeroed their xids.
4193 : */
4194 3252 : if (!TransactionIdIsValid(xid))
4195 8 : continue;
4196 :
4197 3244 : accessExclusiveLocks[index].xid = xid;
4198 3244 : accessExclusiveLocks[index].dbOid = lock->tag.locktag_field1;
4199 3244 : accessExclusiveLocks[index].relOid = lock->tag.locktag_field2;
4200 :
4201 3244 : index++;
4202 : }
4203 : }
4204 :
4205 : Assert(index <= els);
4206 :
4207 : /*
4208 : * And release locks. We do this in reverse order for two reasons: (1)
4209 : * Anyone else who needs more than one of the locks will be trying to lock
4210 : * them in increasing order; we don't want to release the other process
4211 : * until it can get all the locks it needs. (2) This avoids O(N^2)
4212 : * behavior inside LWLockRelease.
4213 : */
4214 45254 : for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
4215 42592 : LWLockRelease(LockHashPartitionLockByIndex(i));
4216 :
4217 2662 : *nlocks = index;
4218 2662 : return accessExclusiveLocks;
4219 : }
4220 :
4221 : /* Provide the textual name of any lock mode */
4222 : const char *
4223 11652 : GetLockmodeName(LOCKMETHODID lockmethodid, LOCKMODE mode)
4224 : {
4225 : Assert(lockmethodid > 0 && lockmethodid < lengthof(LockMethods));
4226 : Assert(mode > 0 && mode <= LockMethods[lockmethodid]->numLockModes);
4227 11652 : return LockMethods[lockmethodid]->lockModeNames[mode];
4228 : }
4229 :
4230 : #ifdef LOCK_DEBUG
4231 : /*
4232 : * Dump all locks in the given proc's myProcLocks lists.
4233 : *
4234 : * Caller is responsible for having acquired appropriate LWLocks.
4235 : */
4236 : void
4237 : DumpLocks(PGPROC *proc)
4238 : {
4239 : int i;
4240 :
4241 : if (proc == NULL)
4242 : return;
4243 :
4244 : if (proc->waitLock)
4245 : LOCK_PRINT("DumpLocks: waiting on", proc->waitLock, 0);
4246 :
4247 : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4248 : {
4249 : dlist_head *procLocks = &proc->myProcLocks[i];
4250 : dlist_iter iter;
4251 :
4252 : dlist_foreach(iter, procLocks)
4253 : {
4254 : PROCLOCK *proclock = dlist_container(PROCLOCK, procLink, iter.cur);
4255 : LOCK *lock = proclock->tag.myLock;
4256 :
4257 : Assert(proclock->tag.myProc == proc);
4258 : PROCLOCK_PRINT("DumpLocks", proclock);
4259 : LOCK_PRINT("DumpLocks", lock, 0);
4260 : }
4261 : }
4262 : }
4263 :
4264 : /*
4265 : * Dump all lmgr locks.
4266 : *
4267 : * Caller is responsible for having acquired appropriate LWLocks.
4268 : */
4269 : void
4270 : DumpAllLocks(void)
4271 : {
4272 : PGPROC *proc;
4273 : PROCLOCK *proclock;
4274 : LOCK *lock;
4275 : HASH_SEQ_STATUS status;
4276 :
4277 : proc = MyProc;
4278 :
4279 : if (proc && proc->waitLock)
4280 : LOCK_PRINT("DumpAllLocks: waiting on", proc->waitLock, 0);
4281 :
4282 : hash_seq_init(&status, LockMethodProcLockHash);
4283 :
4284 : while ((proclock = (PROCLOCK *) hash_seq_search(&status)) != NULL)
4285 : {
4286 : PROCLOCK_PRINT("DumpAllLocks", proclock);
4287 :
4288 : lock = proclock->tag.myLock;
4289 : if (lock)
4290 : LOCK_PRINT("DumpAllLocks", lock, 0);
4291 : else
4292 : elog(LOG, "DumpAllLocks: proclock->tag.myLock = NULL");
4293 : }
4294 : }
4295 : #endif /* LOCK_DEBUG */
4296 :
4297 : /*
4298 : * LOCK 2PC resource manager's routines
4299 : */
4300 :
4301 : /*
4302 : * Re-acquire a lock belonging to a transaction that was prepared.
4303 : *
4304 : * Because this function is run at db startup, re-acquiring the locks should
4305 : * never conflict with running transactions because there are none. We
4306 : * assume that the lock state represented by the stored 2PC files is legal.
4307 : *
4308 : * When switching from Hot Standby mode to normal operation, the locks will
4309 : * be already held by the startup process. The locks are acquired for the new
4310 : * procs without checking for conflicts, so we don't get a conflict between the
4311 : * startup process and the dummy procs, even though we will momentarily have
4312 : * a situation where two procs are holding the same AccessExclusiveLock,
4313 : * which isn't normally possible because the conflict. If we're in standby
4314 : * mode, but a recovery snapshot hasn't been established yet, it's possible
4315 : * that some but not all of the locks are already held by the startup process.
4316 : *
4317 : * This approach is simple, but also a bit dangerous, because if there isn't
4318 : * enough shared memory to acquire the locks, an error will be thrown, which
4319 : * is promoted to FATAL and recovery will abort, bringing down postmaster.
4320 : * A safer approach would be to transfer the locks like we do in
4321 : * AtPrepare_Locks, but then again, in hot standby mode it's possible for
4322 : * read-only backends to use up all the shared lock memory anyway, so that
4323 : * replaying the WAL record that needs to acquire a lock will throw an error
4324 : * and PANIC anyway.
4325 : */
4326 : void
4327 180 : lock_twophase_recover(TransactionId xid, uint16 info,
4328 : void *recdata, uint32 len)
4329 : {
4330 180 : TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4331 180 : PGPROC *proc = TwoPhaseGetDummyProc(xid, false);
4332 : LOCKTAG *locktag;
4333 : LOCKMODE lockmode;
4334 : LOCKMETHODID lockmethodid;
4335 : LOCK *lock;
4336 : PROCLOCK *proclock;
4337 : PROCLOCKTAG proclocktag;
4338 : bool found;
4339 : uint32 hashcode;
4340 : uint32 proclock_hashcode;
4341 : int partition;
4342 : LWLock *partitionLock;
4343 : LockMethod lockMethodTable;
4344 :
4345 : Assert(len == sizeof(TwoPhaseLockRecord));
4346 180 : locktag = &rec->locktag;
4347 180 : lockmode = rec->lockmode;
4348 180 : lockmethodid = locktag->locktag_lockmethodid;
4349 :
4350 180 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4351 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4352 180 : lockMethodTable = LockMethods[lockmethodid];
4353 :
4354 180 : hashcode = LockTagHashCode(locktag);
4355 180 : partition = LockHashPartition(hashcode);
4356 180 : partitionLock = LockHashPartitionLock(hashcode);
4357 :
4358 180 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4359 :
4360 : /*
4361 : * Find or create a lock with this tag.
4362 : */
4363 180 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
4364 : locktag,
4365 : hashcode,
4366 : HASH_ENTER_NULL,
4367 : &found);
4368 180 : if (!lock)
4369 : {
4370 0 : LWLockRelease(partitionLock);
4371 0 : ereport(ERROR,
4372 : (errcode(ERRCODE_OUT_OF_MEMORY),
4373 : errmsg("out of shared memory"),
4374 : errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4375 : }
4376 :
4377 : /*
4378 : * if it's a new lock object, initialize it
4379 : */
4380 180 : if (!found)
4381 : {
4382 156 : lock->grantMask = 0;
4383 156 : lock->waitMask = 0;
4384 156 : dlist_init(&lock->procLocks);
4385 156 : dclist_init(&lock->waitProcs);
4386 156 : lock->nRequested = 0;
4387 156 : lock->nGranted = 0;
4388 936 : MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
4389 156 : MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
4390 : LOCK_PRINT("lock_twophase_recover: new", lock, lockmode);
4391 : }
4392 : else
4393 : {
4394 : LOCK_PRINT("lock_twophase_recover: found", lock, lockmode);
4395 : Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
4396 : Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
4397 : Assert(lock->nGranted <= lock->nRequested);
4398 : }
4399 :
4400 : /*
4401 : * Create the hash key for the proclock table.
4402 : */
4403 180 : proclocktag.myLock = lock;
4404 180 : proclocktag.myProc = proc;
4405 :
4406 180 : proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
4407 :
4408 : /*
4409 : * Find or create a proclock entry with this tag
4410 : */
4411 180 : proclock = (PROCLOCK *) hash_search_with_hash_value(LockMethodProcLockHash,
4412 : &proclocktag,
4413 : proclock_hashcode,
4414 : HASH_ENTER_NULL,
4415 : &found);
4416 180 : if (!proclock)
4417 : {
4418 : /* Oops, not enough shmem for the proclock */
4419 0 : if (lock->nRequested == 0)
4420 : {
4421 : /*
4422 : * There are no other requestors of this lock, so garbage-collect
4423 : * the lock object. We *must* do this to avoid a permanent leak
4424 : * of shared memory, because there won't be anything to cause
4425 : * anyone to release the lock object later.
4426 : */
4427 : Assert(dlist_is_empty(&lock->procLocks));
4428 0 : if (!hash_search_with_hash_value(LockMethodLockHash,
4429 0 : &(lock->tag),
4430 : hashcode,
4431 : HASH_REMOVE,
4432 : NULL))
4433 0 : elog(PANIC, "lock table corrupted");
4434 : }
4435 0 : LWLockRelease(partitionLock);
4436 0 : ereport(ERROR,
4437 : (errcode(ERRCODE_OUT_OF_MEMORY),
4438 : errmsg("out of shared memory"),
4439 : errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4440 : }
4441 :
4442 : /*
4443 : * If new, initialize the new entry
4444 : */
4445 180 : if (!found)
4446 : {
4447 : Assert(proc->lockGroupLeader == NULL);
4448 164 : proclock->groupLeader = proc;
4449 164 : proclock->holdMask = 0;
4450 164 : proclock->releaseMask = 0;
4451 : /* Add proclock to appropriate lists */
4452 164 : dlist_push_tail(&lock->procLocks, &proclock->lockLink);
4453 164 : dlist_push_tail(&proc->myProcLocks[partition],
4454 : &proclock->procLink);
4455 : PROCLOCK_PRINT("lock_twophase_recover: new", proclock);
4456 : }
4457 : else
4458 : {
4459 : PROCLOCK_PRINT("lock_twophase_recover: found", proclock);
4460 : Assert((proclock->holdMask & ~lock->grantMask) == 0);
4461 : }
4462 :
4463 : /*
4464 : * lock->nRequested and lock->requested[] count the total number of
4465 : * requests, whether granted or waiting, so increment those immediately.
4466 : */
4467 180 : lock->nRequested++;
4468 180 : lock->requested[lockmode]++;
4469 : Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
4470 :
4471 : /*
4472 : * We shouldn't already hold the desired lock.
4473 : */
4474 180 : if (proclock->holdMask & LOCKBIT_ON(lockmode))
4475 0 : elog(ERROR, "lock %s on object %u/%u/%u is already held",
4476 : lockMethodTable->lockModeNames[lockmode],
4477 : lock->tag.locktag_field1, lock->tag.locktag_field2,
4478 : lock->tag.locktag_field3);
4479 :
4480 : /*
4481 : * We ignore any possible conflicts and just grant ourselves the lock. Not
4482 : * only because we don't bother, but also to avoid deadlocks when
4483 : * switching from standby to normal mode. See function comment.
4484 : */
4485 180 : GrantLock(lock, proclock, lockmode);
4486 :
4487 : /*
4488 : * Bump strong lock count, to make sure any fast-path lock requests won't
4489 : * be granted without consulting the primary lock table.
4490 : */
4491 180 : if (ConflictsWithRelationFastPath(&lock->tag, lockmode))
4492 : {
4493 36 : uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
4494 :
4495 36 : SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
4496 36 : FastPathStrongRelationLocks->count[fasthashcode]++;
4497 36 : SpinLockRelease(&FastPathStrongRelationLocks->mutex);
4498 : }
4499 :
4500 180 : LWLockRelease(partitionLock);
4501 180 : }
4502 :
4503 : /*
4504 : * Re-acquire a lock belonging to a transaction that was prepared, when
4505 : * starting up into hot standby mode.
4506 : */
4507 : void
4508 0 : lock_twophase_standby_recover(TransactionId xid, uint16 info,
4509 : void *recdata, uint32 len)
4510 : {
4511 0 : TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4512 : LOCKTAG *locktag;
4513 : LOCKMODE lockmode;
4514 : LOCKMETHODID lockmethodid;
4515 :
4516 : Assert(len == sizeof(TwoPhaseLockRecord));
4517 0 : locktag = &rec->locktag;
4518 0 : lockmode = rec->lockmode;
4519 0 : lockmethodid = locktag->locktag_lockmethodid;
4520 :
4521 0 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4522 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4523 :
4524 0 : if (lockmode == AccessExclusiveLock &&
4525 0 : locktag->locktag_type == LOCKTAG_RELATION)
4526 : {
4527 0 : StandbyAcquireAccessExclusiveLock(xid,
4528 : locktag->locktag_field1 /* dboid */ ,
4529 : locktag->locktag_field2 /* reloid */ );
4530 : }
4531 0 : }
4532 :
4533 :
4534 : /*
4535 : * 2PC processing routine for COMMIT PREPARED case.
4536 : *
4537 : * Find and release the lock indicated by the 2PC record.
4538 : */
4539 : void
4540 1430 : lock_twophase_postcommit(TransactionId xid, uint16 info,
4541 : void *recdata, uint32 len)
4542 : {
4543 1430 : TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4544 1430 : PGPROC *proc = TwoPhaseGetDummyProc(xid, true);
4545 : LOCKTAG *locktag;
4546 : LOCKMETHODID lockmethodid;
4547 : LockMethod lockMethodTable;
4548 :
4549 : Assert(len == sizeof(TwoPhaseLockRecord));
4550 1430 : locktag = &rec->locktag;
4551 1430 : lockmethodid = locktag->locktag_lockmethodid;
4552 :
4553 1430 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4554 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4555 1430 : lockMethodTable = LockMethods[lockmethodid];
4556 :
4557 1430 : LockRefindAndRelease(lockMethodTable, proc, locktag, rec->lockmode, true);
4558 1430 : }
4559 :
4560 : /*
4561 : * 2PC processing routine for ROLLBACK PREPARED case.
4562 : *
4563 : * This is actually just the same as the COMMIT case.
4564 : */
4565 : void
4566 252 : lock_twophase_postabort(TransactionId xid, uint16 info,
4567 : void *recdata, uint32 len)
4568 : {
4569 252 : lock_twophase_postcommit(xid, info, recdata, len);
4570 252 : }
4571 :
4572 : /*
4573 : * VirtualXactLockTableInsert
4574 : *
4575 : * Take vxid lock via the fast-path. There can't be any pre-existing
4576 : * lockers, as we haven't advertised this vxid via the ProcArray yet.
4577 : *
4578 : * Since MyProc->fpLocalTransactionId will normally contain the same data
4579 : * as MyProc->vxid.lxid, you might wonder if we really need both. The
4580 : * difference is that MyProc->vxid.lxid is set and cleared unlocked, and
4581 : * examined by procarray.c, while fpLocalTransactionId is protected by
4582 : * fpInfoLock and is used only by the locking subsystem. Doing it this
4583 : * way makes it easier to verify that there are no funny race conditions.
4584 : *
4585 : * We don't bother recording this lock in the local lock table, since it's
4586 : * only ever released at the end of a transaction. Instead,
4587 : * LockReleaseAll() calls VirtualXactLockTableCleanup().
4588 : */
4589 : void
4590 848618 : VirtualXactLockTableInsert(VirtualTransactionId vxid)
4591 : {
4592 : Assert(VirtualTransactionIdIsValid(vxid));
4593 :
4594 848618 : LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
4595 :
4596 : Assert(MyProc->vxid.procNumber == vxid.procNumber);
4597 : Assert(MyProc->fpLocalTransactionId == InvalidLocalTransactionId);
4598 : Assert(MyProc->fpVXIDLock == false);
4599 :
4600 848618 : MyProc->fpVXIDLock = true;
4601 848618 : MyProc->fpLocalTransactionId = vxid.localTransactionId;
4602 :
4603 848618 : LWLockRelease(&MyProc->fpInfoLock);
4604 848618 : }
4605 :
4606 : /*
4607 : * VirtualXactLockTableCleanup
4608 : *
4609 : * Check whether a VXID lock has been materialized; if so, release it,
4610 : * unblocking waiters.
4611 : */
4612 : void
4613 849460 : VirtualXactLockTableCleanup(void)
4614 : {
4615 : bool fastpath;
4616 : LocalTransactionId lxid;
4617 :
4618 : Assert(MyProc->vxid.procNumber != INVALID_PROC_NUMBER);
4619 :
4620 : /*
4621 : * Clean up shared memory state.
4622 : */
4623 849460 : LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
4624 :
4625 849460 : fastpath = MyProc->fpVXIDLock;
4626 849460 : lxid = MyProc->fpLocalTransactionId;
4627 849460 : MyProc->fpVXIDLock = false;
4628 849460 : MyProc->fpLocalTransactionId = InvalidLocalTransactionId;
4629 :
4630 849460 : LWLockRelease(&MyProc->fpInfoLock);
4631 :
4632 : /*
4633 : * If fpVXIDLock has been cleared without touching fpLocalTransactionId,
4634 : * that means someone transferred the lock to the main lock table.
4635 : */
4636 849460 : if (!fastpath && LocalTransactionIdIsValid(lxid))
4637 : {
4638 : VirtualTransactionId vxid;
4639 : LOCKTAG locktag;
4640 :
4641 516 : vxid.procNumber = MyProcNumber;
4642 516 : vxid.localTransactionId = lxid;
4643 516 : SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid);
4644 :
4645 516 : LockRefindAndRelease(LockMethods[DEFAULT_LOCKMETHOD], MyProc,
4646 : &locktag, ExclusiveLock, false);
4647 : }
4648 849460 : }
4649 :
4650 : /*
4651 : * XactLockForVirtualXact
4652 : *
4653 : * If TransactionIdIsValid(xid), this is essentially XactLockTableWait(xid,
4654 : * NULL, NULL, XLTW_None) or ConditionalXactLockTableWait(xid). Unlike those
4655 : * functions, it assumes "xid" is never a subtransaction and that "xid" is
4656 : * prepared, committed, or aborted.
4657 : *
4658 : * If !TransactionIdIsValid(xid), this locks every prepared XID having been
4659 : * known as "vxid" before its PREPARE TRANSACTION.
4660 : */
4661 : static bool
4662 592 : XactLockForVirtualXact(VirtualTransactionId vxid,
4663 : TransactionId xid, bool wait)
4664 : {
4665 592 : bool more = false;
4666 :
4667 : /* There is no point to wait for 2PCs if you have no 2PCs. */
4668 592 : if (max_prepared_xacts == 0)
4669 198 : return true;
4670 :
4671 : do
4672 : {
4673 : LockAcquireResult lar;
4674 : LOCKTAG tag;
4675 :
4676 : /* Clear state from previous iterations. */
4677 394 : if (more)
4678 : {
4679 0 : xid = InvalidTransactionId;
4680 0 : more = false;
4681 : }
4682 :
4683 : /* If we have no xid, try to find one. */
4684 394 : if (!TransactionIdIsValid(xid))
4685 178 : xid = TwoPhaseGetXidByVirtualXID(vxid, &more);
4686 394 : if (!TransactionIdIsValid(xid))
4687 : {
4688 : Assert(!more);
4689 154 : return true;
4690 : }
4691 :
4692 : /* Check or wait for XID completion. */
4693 240 : SET_LOCKTAG_TRANSACTION(tag, xid);
4694 240 : lar = LockAcquire(&tag, ShareLock, false, !wait);
4695 240 : if (lar == LOCKACQUIRE_NOT_AVAIL)
4696 0 : return false;
4697 240 : LockRelease(&tag, ShareLock, false);
4698 240 : } while (more);
4699 :
4700 240 : return true;
4701 : }
4702 :
4703 : /*
4704 : * VirtualXactLock
4705 : *
4706 : * If wait = true, wait as long as the given VXID or any XID acquired by the
4707 : * same transaction is still running. Then, return true.
4708 : *
4709 : * If wait = false, just check whether that VXID or one of those XIDs is still
4710 : * running, and return true or false.
4711 : */
4712 : bool
4713 672 : VirtualXactLock(VirtualTransactionId vxid, bool wait)
4714 : {
4715 : LOCKTAG tag;
4716 : PGPROC *proc;
4717 672 : TransactionId xid = InvalidTransactionId;
4718 :
4719 : Assert(VirtualTransactionIdIsValid(vxid));
4720 :
4721 672 : if (VirtualTransactionIdIsRecoveredPreparedXact(vxid))
4722 : /* no vxid lock; localTransactionId is a normal, locked XID */
4723 2 : return XactLockForVirtualXact(vxid, vxid.localTransactionId, wait);
4724 :
4725 670 : SET_LOCKTAG_VIRTUALTRANSACTION(tag, vxid);
4726 :
4727 : /*
4728 : * If a lock table entry must be made, this is the PGPROC on whose behalf
4729 : * it must be done. Note that the transaction might end or the PGPROC
4730 : * might be reassigned to a new backend before we get around to examining
4731 : * it, but it doesn't matter. If we find upon examination that the
4732 : * relevant lxid is no longer running here, that's enough to prove that
4733 : * it's no longer running anywhere.
4734 : */
4735 670 : proc = ProcNumberGetProc(vxid.procNumber);
4736 670 : if (proc == NULL)
4737 8 : return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4738 :
4739 : /*
4740 : * We must acquire this lock before checking the procNumber and lxid
4741 : * against the ones we're waiting for. The target backend will only set
4742 : * or clear lxid while holding this lock.
4743 : */
4744 662 : LWLockAcquire(&proc->fpInfoLock, LW_EXCLUSIVE);
4745 :
4746 662 : if (proc->vxid.procNumber != vxid.procNumber
4747 662 : || proc->fpLocalTransactionId != vxid.localTransactionId)
4748 : {
4749 : /* VXID ended */
4750 106 : LWLockRelease(&proc->fpInfoLock);
4751 106 : return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4752 : }
4753 :
4754 : /*
4755 : * If we aren't asked to wait, there's no need to set up a lock table
4756 : * entry. The transaction is still in progress, so just return false.
4757 : */
4758 556 : if (!wait)
4759 : {
4760 30 : LWLockRelease(&proc->fpInfoLock);
4761 30 : return false;
4762 : }
4763 :
4764 : /*
4765 : * OK, we're going to need to sleep on the VXID. But first, we must set
4766 : * up the primary lock table entry, if needed (ie, convert the proc's
4767 : * fast-path lock on its VXID to a regular lock).
4768 : */
4769 526 : if (proc->fpVXIDLock)
4770 : {
4771 : PROCLOCK *proclock;
4772 : uint32 hashcode;
4773 : LWLock *partitionLock;
4774 :
4775 516 : hashcode = LockTagHashCode(&tag);
4776 :
4777 516 : partitionLock = LockHashPartitionLock(hashcode);
4778 516 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4779 :
4780 516 : proclock = SetupLockInTable(LockMethods[DEFAULT_LOCKMETHOD], proc,
4781 : &tag, hashcode, ExclusiveLock);
4782 516 : if (!proclock)
4783 : {
4784 0 : LWLockRelease(partitionLock);
4785 0 : LWLockRelease(&proc->fpInfoLock);
4786 0 : ereport(ERROR,
4787 : (errcode(ERRCODE_OUT_OF_MEMORY),
4788 : errmsg("out of shared memory"),
4789 : errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4790 : }
4791 516 : GrantLock(proclock->tag.myLock, proclock, ExclusiveLock);
4792 :
4793 516 : LWLockRelease(partitionLock);
4794 :
4795 516 : proc->fpVXIDLock = false;
4796 : }
4797 :
4798 : /*
4799 : * If the proc has an XID now, we'll avoid a TwoPhaseGetXidByVirtualXID()
4800 : * search. The proc might have assigned this XID but not yet locked it,
4801 : * in which case the proc will lock this XID before releasing the VXID.
4802 : * The fpInfoLock critical section excludes VirtualXactLockTableCleanup(),
4803 : * so we won't save an XID of a different VXID. It doesn't matter whether
4804 : * we save this before or after setting up the primary lock table entry.
4805 : */
4806 526 : xid = proc->xid;
4807 :
4808 : /* Done with proc->fpLockBits */
4809 526 : LWLockRelease(&proc->fpInfoLock);
4810 :
4811 : /* Time to wait. */
4812 526 : (void) LockAcquire(&tag, ShareLock, false, false);
4813 :
4814 476 : LockRelease(&tag, ShareLock, false);
4815 476 : return XactLockForVirtualXact(vxid, xid, wait);
4816 : }
4817 :
4818 : /*
4819 : * LockWaiterCount
4820 : *
4821 : * Find the number of lock requester on this locktag
4822 : */
4823 : int
4824 133012 : LockWaiterCount(const LOCKTAG *locktag)
4825 : {
4826 133012 : LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
4827 : LOCK *lock;
4828 : bool found;
4829 : uint32 hashcode;
4830 : LWLock *partitionLock;
4831 133012 : int waiters = 0;
4832 :
4833 133012 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4834 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4835 :
4836 133012 : hashcode = LockTagHashCode(locktag);
4837 133012 : partitionLock = LockHashPartitionLock(hashcode);
4838 133012 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4839 :
4840 133012 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
4841 : locktag,
4842 : hashcode,
4843 : HASH_FIND,
4844 : &found);
4845 133012 : if (found)
4846 : {
4847 : Assert(lock != NULL);
4848 30 : waiters = lock->nRequested;
4849 : }
4850 133012 : LWLockRelease(partitionLock);
4851 :
4852 133012 : return waiters;
4853 : }
|