Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * lock.c
4 : * POSTGRES primary lock mechanism
5 : *
6 : * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/storage/lmgr/lock.c
12 : *
13 : * NOTES
14 : * A lock table is a shared memory hash table. When
15 : * a process tries to acquire a lock of a type that conflicts
16 : * with existing locks, it is put to sleep using the routines
17 : * in storage/lmgr/proc.c.
18 : *
19 : * For the most part, this code should be invoked via lmgr.c
20 : * or another lock-management module, not directly.
21 : *
22 : * Interface:
23 : *
24 : * InitLocks(), GetLocksMethodTable(), GetLockTagsMethodTable(),
25 : * LockAcquire(), LockRelease(), LockReleaseAll(),
26 : * LockCheckConflicts(), GrantLock()
27 : *
28 : *-------------------------------------------------------------------------
29 : */
30 : #include "postgres.h"
31 :
32 : #include <signal.h>
33 : #include <unistd.h>
34 :
35 : #include "access/transam.h"
36 : #include "access/twophase.h"
37 : #include "access/twophase_rmgr.h"
38 : #include "access/xlog.h"
39 : #include "access/xlogutils.h"
40 : #include "miscadmin.h"
41 : #include "pg_trace.h"
42 : #include "storage/proc.h"
43 : #include "storage/procarray.h"
44 : #include "storage/spin.h"
45 : #include "storage/standby.h"
46 : #include "utils/memutils.h"
47 : #include "utils/ps_status.h"
48 : #include "utils/resowner.h"
49 :
50 :
51 : /* This configuration variable is used to set the lock table size */
52 : int max_locks_per_xact; /* set by guc.c */
53 :
54 : #define NLOCKENTS() \
55 : mul_size(max_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
56 :
57 :
58 : /*
59 : * Data structures defining the semantics of the standard lock methods.
60 : *
61 : * The conflict table defines the semantics of the various lock modes.
62 : */
63 : static const LOCKMASK LockConflicts[] = {
64 : 0,
65 :
66 : /* AccessShareLock */
67 : LOCKBIT_ON(AccessExclusiveLock),
68 :
69 : /* RowShareLock */
70 : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
71 :
72 : /* RowExclusiveLock */
73 : LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
74 : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
75 :
76 : /* ShareUpdateExclusiveLock */
77 : LOCKBIT_ON(ShareUpdateExclusiveLock) |
78 : LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
79 : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
80 :
81 : /* ShareLock */
82 : LOCKBIT_ON(RowExclusiveLock) | LOCKBIT_ON(ShareUpdateExclusiveLock) |
83 : LOCKBIT_ON(ShareRowExclusiveLock) |
84 : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
85 :
86 : /* ShareRowExclusiveLock */
87 : LOCKBIT_ON(RowExclusiveLock) | LOCKBIT_ON(ShareUpdateExclusiveLock) |
88 : LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
89 : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
90 :
91 : /* ExclusiveLock */
92 : LOCKBIT_ON(RowShareLock) |
93 : LOCKBIT_ON(RowExclusiveLock) | LOCKBIT_ON(ShareUpdateExclusiveLock) |
94 : LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
95 : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
96 :
97 : /* AccessExclusiveLock */
98 : LOCKBIT_ON(AccessShareLock) | LOCKBIT_ON(RowShareLock) |
99 : LOCKBIT_ON(RowExclusiveLock) | LOCKBIT_ON(ShareUpdateExclusiveLock) |
100 : LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
101 : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock)
102 :
103 : };
104 :
105 : /* Names of lock modes, for debug printouts */
106 : static const char *const lock_mode_names[] =
107 : {
108 : "INVALID",
109 : "AccessShareLock",
110 : "RowShareLock",
111 : "RowExclusiveLock",
112 : "ShareUpdateExclusiveLock",
113 : "ShareLock",
114 : "ShareRowExclusiveLock",
115 : "ExclusiveLock",
116 : "AccessExclusiveLock"
117 : };
118 :
119 : #ifndef LOCK_DEBUG
120 : static bool Dummy_trace = false;
121 : #endif
122 :
123 : static const LockMethodData default_lockmethod = {
124 : MaxLockMode,
125 : LockConflicts,
126 : lock_mode_names,
127 : #ifdef LOCK_DEBUG
128 : &Trace_locks
129 : #else
130 : &Dummy_trace
131 : #endif
132 : };
133 :
134 : static const LockMethodData user_lockmethod = {
135 : MaxLockMode,
136 : LockConflicts,
137 : lock_mode_names,
138 : #ifdef LOCK_DEBUG
139 : &Trace_userlocks
140 : #else
141 : &Dummy_trace
142 : #endif
143 : };
144 :
145 : /*
146 : * map from lock method id to the lock table data structures
147 : */
148 : static const LockMethod LockMethods[] = {
149 : NULL,
150 : &default_lockmethod,
151 : &user_lockmethod
152 : };
153 :
154 :
155 : /* Record that's written to 2PC state file when a lock is persisted */
156 : typedef struct TwoPhaseLockRecord
157 : {
158 : LOCKTAG locktag;
159 : LOCKMODE lockmode;
160 : } TwoPhaseLockRecord;
161 :
162 :
163 : /*
164 : * Count of the number of fast path lock slots we believe to be used. This
165 : * might be higher than the real number if another backend has transferred
166 : * our locks to the primary lock table, but it can never be lower than the
167 : * real value, since only we can acquire locks on our own behalf.
168 : *
169 : * XXX Allocate a static array of the maximum size. We could use a pointer
170 : * and then allocate just the right size to save a couple kB, but then we
171 : * would have to initialize that, while for the static array that happens
172 : * automatically. Doesn't seem worth the extra complexity.
173 : */
174 : static int FastPathLocalUseCounts[FP_LOCK_GROUPS_PER_BACKEND_MAX];
175 :
176 : /*
177 : * Flag to indicate if the relation extension lock is held by this backend.
178 : * This flag is used to ensure that while holding the relation extension lock
179 : * we don't try to acquire a heavyweight lock on any other object. This
180 : * restriction implies that the relation extension lock won't ever participate
181 : * in the deadlock cycle because we can never wait for any other heavyweight
182 : * lock after acquiring this lock.
183 : *
184 : * Such a restriction is okay for relation extension locks as unlike other
185 : * heavyweight locks these are not held till the transaction end. These are
186 : * taken for a short duration to extend a particular relation and then
187 : * released.
188 : */
189 : static bool IsRelationExtensionLockHeld PG_USED_FOR_ASSERTS_ONLY = false;
190 :
191 : /*
192 : * Number of fast-path locks per backend - size of the arrays in PGPROC.
193 : * This is set only once during start, before initializing shared memory,
194 : * and remains constant after that.
195 : *
196 : * We set the limit based on max_locks_per_transaction GUC, because that's
197 : * the best information about expected number of locks per backend we have.
198 : * See InitializeFastPathLocks() for details.
199 : */
200 : int FastPathLockGroupsPerBackend = 0;
201 :
202 : /*
203 : * Macros to calculate the fast-path group and index for a relation.
204 : *
205 : * The formula is a simple hash function, designed to spread the OIDs a bit,
206 : * so that even contiguous values end up in different groups. In most cases
207 : * there will be gaps anyway, but the multiplication should help a bit.
208 : *
209 : * The selected constant (49157) is a prime not too close to 2^k, and it's
210 : * small enough to not cause overflows (in 64-bit).
211 : */
212 : #define FAST_PATH_REL_GROUP(rel) \
213 : (((uint64) (rel) * 49157) % FastPathLockGroupsPerBackend)
214 :
215 : /*
216 : * Given the group/slot indexes, calculate the slot index in the whole array
217 : * of fast-path lock slots.
218 : */
219 : #define FAST_PATH_SLOT(group, index) \
220 : (AssertMacro((uint32) (group) < FastPathLockGroupsPerBackend), \
221 : AssertMacro((uint32) (index) < FP_LOCK_SLOTS_PER_GROUP), \
222 : ((group) * FP_LOCK_SLOTS_PER_GROUP + (index)))
223 :
224 : /*
225 : * Given a slot index (into the whole per-backend array), calculated using
226 : * the FAST_PATH_SLOT macro, split it into group and index (in the group).
227 : */
228 : #define FAST_PATH_GROUP(index) \
229 : (AssertMacro((uint32) (index) < FP_LOCK_SLOTS_PER_BACKEND), \
230 : ((index) / FP_LOCK_SLOTS_PER_GROUP))
231 : #define FAST_PATH_INDEX(index) \
232 : (AssertMacro((uint32) (index) < FP_LOCK_SLOTS_PER_BACKEND), \
233 : ((index) % FP_LOCK_SLOTS_PER_GROUP))
234 :
235 : /* Macros for manipulating proc->fpLockBits */
236 : #define FAST_PATH_BITS_PER_SLOT 3
237 : #define FAST_PATH_LOCKNUMBER_OFFSET 1
238 : #define FAST_PATH_MASK ((1 << FAST_PATH_BITS_PER_SLOT) - 1)
239 : #define FAST_PATH_BITS(proc, n) (proc)->fpLockBits[FAST_PATH_GROUP(n)]
240 : #define FAST_PATH_GET_BITS(proc, n) \
241 : ((FAST_PATH_BITS(proc, n) >> (FAST_PATH_BITS_PER_SLOT * FAST_PATH_INDEX(n))) & FAST_PATH_MASK)
242 : #define FAST_PATH_BIT_POSITION(n, l) \
243 : (AssertMacro((l) >= FAST_PATH_LOCKNUMBER_OFFSET), \
244 : AssertMacro((l) < FAST_PATH_BITS_PER_SLOT+FAST_PATH_LOCKNUMBER_OFFSET), \
245 : AssertMacro((n) < FP_LOCK_SLOTS_PER_BACKEND), \
246 : ((l) - FAST_PATH_LOCKNUMBER_OFFSET + FAST_PATH_BITS_PER_SLOT * (FAST_PATH_INDEX(n))))
247 : #define FAST_PATH_SET_LOCKMODE(proc, n, l) \
248 : FAST_PATH_BITS(proc, n) |= UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)
249 : #define FAST_PATH_CLEAR_LOCKMODE(proc, n, l) \
250 : FAST_PATH_BITS(proc, n) &= ~(UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l))
251 : #define FAST_PATH_CHECK_LOCKMODE(proc, n, l) \
252 : (FAST_PATH_BITS(proc, n) & (UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)))
253 :
254 : /*
255 : * The fast-path lock mechanism is concerned only with relation locks on
256 : * unshared relations by backends bound to a database. The fast-path
257 : * mechanism exists mostly to accelerate acquisition and release of locks
258 : * that rarely conflict. Because ShareUpdateExclusiveLock is
259 : * self-conflicting, it can't use the fast-path mechanism; but it also does
260 : * not conflict with any of the locks that do, so we can ignore it completely.
261 : */
262 : #define EligibleForRelationFastPath(locktag, mode) \
263 : ((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
264 : (locktag)->locktag_type == LOCKTAG_RELATION && \
265 : (locktag)->locktag_field1 == MyDatabaseId && \
266 : MyDatabaseId != InvalidOid && \
267 : (mode) < ShareUpdateExclusiveLock)
268 : #define ConflictsWithRelationFastPath(locktag, mode) \
269 : ((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
270 : (locktag)->locktag_type == LOCKTAG_RELATION && \
271 : (locktag)->locktag_field1 != InvalidOid && \
272 : (mode) > ShareUpdateExclusiveLock)
273 :
274 : static bool FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode);
275 : static bool FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode);
276 : static bool FastPathTransferRelationLocks(LockMethod lockMethodTable,
277 : const LOCKTAG *locktag, uint32 hashcode);
278 : static PROCLOCK *FastPathGetRelationLockEntry(LOCALLOCK *locallock);
279 :
280 : /*
281 : * To make the fast-path lock mechanism work, we must have some way of
282 : * preventing the use of the fast-path when a conflicting lock might be present.
283 : * We partition* the locktag space into FAST_PATH_STRONG_LOCK_HASH_PARTITIONS,
284 : * and maintain an integer count of the number of "strong" lockers
285 : * in each partition. When any "strong" lockers are present (which is
286 : * hopefully not very often), the fast-path mechanism can't be used, and we
287 : * must fall back to the slower method of pushing matching locks directly
288 : * into the main lock tables.
289 : *
290 : * The deadlock detector does not know anything about the fast path mechanism,
291 : * so any locks that might be involved in a deadlock must be transferred from
292 : * the fast-path queues to the main lock table.
293 : */
294 :
295 : #define FAST_PATH_STRONG_LOCK_HASH_BITS 10
296 : #define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS \
297 : (1 << FAST_PATH_STRONG_LOCK_HASH_BITS)
298 : #define FastPathStrongLockHashPartition(hashcode) \
299 : ((hashcode) % FAST_PATH_STRONG_LOCK_HASH_PARTITIONS)
300 :
301 : typedef struct
302 : {
303 : slock_t mutex;
304 : uint32 count[FAST_PATH_STRONG_LOCK_HASH_PARTITIONS];
305 : } FastPathStrongRelationLockData;
306 :
307 : static volatile FastPathStrongRelationLockData *FastPathStrongRelationLocks;
308 :
309 :
310 : /*
311 : * Pointers to hash tables containing lock state
312 : *
313 : * The LockMethodLockHash and LockMethodProcLockHash hash tables are in
314 : * shared memory; LockMethodLocalHash is local to each backend.
315 : */
316 : static HTAB *LockMethodLockHash;
317 : static HTAB *LockMethodProcLockHash;
318 : static HTAB *LockMethodLocalHash;
319 :
320 :
321 : /* private state for error cleanup */
322 : static LOCALLOCK *StrongLockInProgress;
323 : static LOCALLOCK *awaitedLock;
324 : static ResourceOwner awaitedOwner;
325 :
326 :
327 : #ifdef LOCK_DEBUG
328 :
329 : /*------
330 : * The following configuration options are available for lock debugging:
331 : *
332 : * TRACE_LOCKS -- give a bunch of output what's going on in this file
333 : * TRACE_USERLOCKS -- same but for user locks
334 : * TRACE_LOCK_OIDMIN-- do not trace locks for tables below this oid
335 : * (use to avoid output on system tables)
336 : * TRACE_LOCK_TABLE -- trace locks on this table (oid) unconditionally
337 : * DEBUG_DEADLOCKS -- currently dumps locks at untimely occasions ;)
338 : *
339 : * Furthermore, but in storage/lmgr/lwlock.c:
340 : * TRACE_LWLOCKS -- trace lightweight locks (pretty useless)
341 : *
342 : * Define LOCK_DEBUG at compile time to get all these enabled.
343 : * --------
344 : */
345 :
346 : int Trace_lock_oidmin = FirstNormalObjectId;
347 : bool Trace_locks = false;
348 : bool Trace_userlocks = false;
349 : int Trace_lock_table = 0;
350 : bool Debug_deadlocks = false;
351 :
352 :
353 : inline static bool
354 : LOCK_DEBUG_ENABLED(const LOCKTAG *tag)
355 : {
356 : return
357 : (*(LockMethods[tag->locktag_lockmethodid]->trace_flag) &&
358 : ((Oid) tag->locktag_field2 >= (Oid) Trace_lock_oidmin))
359 : || (Trace_lock_table &&
360 : (tag->locktag_field2 == Trace_lock_table));
361 : }
362 :
363 :
364 : inline static void
365 : LOCK_PRINT(const char *where, const LOCK *lock, LOCKMODE type)
366 : {
367 : if (LOCK_DEBUG_ENABLED(&lock->tag))
368 : elog(LOG,
369 : "%s: lock(%p) id(%u,%u,%u,%u,%u,%u) grantMask(%x) "
370 : "req(%d,%d,%d,%d,%d,%d,%d)=%d "
371 : "grant(%d,%d,%d,%d,%d,%d,%d)=%d wait(%d) type(%s)",
372 : where, lock,
373 : lock->tag.locktag_field1, lock->tag.locktag_field2,
374 : lock->tag.locktag_field3, lock->tag.locktag_field4,
375 : lock->tag.locktag_type, lock->tag.locktag_lockmethodid,
376 : lock->grantMask,
377 : lock->requested[1], lock->requested[2], lock->requested[3],
378 : lock->requested[4], lock->requested[5], lock->requested[6],
379 : lock->requested[7], lock->nRequested,
380 : lock->granted[1], lock->granted[2], lock->granted[3],
381 : lock->granted[4], lock->granted[5], lock->granted[6],
382 : lock->granted[7], lock->nGranted,
383 : dclist_count(&lock->waitProcs),
384 : LockMethods[LOCK_LOCKMETHOD(*lock)]->lockModeNames[type]);
385 : }
386 :
387 :
388 : inline static void
389 : PROCLOCK_PRINT(const char *where, const PROCLOCK *proclockP)
390 : {
391 : if (LOCK_DEBUG_ENABLED(&proclockP->tag.myLock->tag))
392 : elog(LOG,
393 : "%s: proclock(%p) lock(%p) method(%u) proc(%p) hold(%x)",
394 : where, proclockP, proclockP->tag.myLock,
395 : PROCLOCK_LOCKMETHOD(*(proclockP)),
396 : proclockP->tag.myProc, (int) proclockP->holdMask);
397 : }
398 : #else /* not LOCK_DEBUG */
399 :
400 : #define LOCK_PRINT(where, lock, type) ((void) 0)
401 : #define PROCLOCK_PRINT(where, proclockP) ((void) 0)
402 : #endif /* not LOCK_DEBUG */
403 :
404 :
405 : static uint32 proclock_hash(const void *key, Size keysize);
406 : static void RemoveLocalLock(LOCALLOCK *locallock);
407 : static PROCLOCK *SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc,
408 : const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode);
409 : static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner);
410 : static void BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode);
411 : static void FinishStrongLockAcquire(void);
412 : static ProcWaitStatus WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner);
413 : static void ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock);
414 : static void LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent);
415 : static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode,
416 : PROCLOCK *proclock, LockMethod lockMethodTable);
417 : static void CleanUpLock(LOCK *lock, PROCLOCK *proclock,
418 : LockMethod lockMethodTable, uint32 hashcode,
419 : bool wakeupNeeded);
420 : static void LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc,
421 : LOCKTAG *locktag, LOCKMODE lockmode,
422 : bool decrement_strong_lock_count);
423 : static void GetSingleProcBlockerStatusData(PGPROC *blocked_proc,
424 : BlockedProcsData *data);
425 :
426 :
427 : /*
428 : * Initialize the lock manager's shmem data structures.
429 : *
430 : * This is called from CreateSharedMemoryAndSemaphores(), which see for more
431 : * comments. In the normal postmaster case, the shared hash tables are
432 : * created here, and backends inherit pointers to them via fork(). In the
433 : * EXEC_BACKEND case, each backend re-executes this code to obtain pointers to
434 : * the already existing shared hash tables. In either case, each backend must
435 : * also call InitLockManagerAccess() to create the locallock hash table.
436 : */
437 : void
438 1902 : LockManagerShmemInit(void)
439 : {
440 : HASHCTL info;
441 : long init_table_size,
442 : max_table_size;
443 : bool found;
444 :
445 : /*
446 : * Compute init/max size to request for lock hashtables. Note these
447 : * calculations must agree with LockManagerShmemSize!
448 : */
449 1902 : max_table_size = NLOCKENTS();
450 1902 : init_table_size = max_table_size / 2;
451 :
452 : /*
453 : * Allocate hash table for LOCK structs. This stores per-locked-object
454 : * information.
455 : */
456 1902 : info.keysize = sizeof(LOCKTAG);
457 1902 : info.entrysize = sizeof(LOCK);
458 1902 : info.num_partitions = NUM_LOCK_PARTITIONS;
459 :
460 1902 : LockMethodLockHash = ShmemInitHash("LOCK hash",
461 : init_table_size,
462 : max_table_size,
463 : &info,
464 : HASH_ELEM | HASH_BLOBS | HASH_PARTITION);
465 :
466 : /* Assume an average of 2 holders per lock */
467 1902 : max_table_size *= 2;
468 1902 : init_table_size *= 2;
469 :
470 : /*
471 : * Allocate hash table for PROCLOCK structs. This stores
472 : * per-lock-per-holder information.
473 : */
474 1902 : info.keysize = sizeof(PROCLOCKTAG);
475 1902 : info.entrysize = sizeof(PROCLOCK);
476 1902 : info.hash = proclock_hash;
477 1902 : info.num_partitions = NUM_LOCK_PARTITIONS;
478 :
479 1902 : LockMethodProcLockHash = ShmemInitHash("PROCLOCK hash",
480 : init_table_size,
481 : max_table_size,
482 : &info,
483 : HASH_ELEM | HASH_FUNCTION | HASH_PARTITION);
484 :
485 : /*
486 : * Allocate fast-path structures.
487 : */
488 1902 : FastPathStrongRelationLocks =
489 1902 : ShmemInitStruct("Fast Path Strong Relation Lock Data",
490 : sizeof(FastPathStrongRelationLockData), &found);
491 1902 : if (!found)
492 1902 : SpinLockInit(&FastPathStrongRelationLocks->mutex);
493 1902 : }
494 :
495 : /*
496 : * Initialize the lock manager's backend-private data structures.
497 : */
498 : void
499 32858 : InitLockManagerAccess(void)
500 : {
501 : /*
502 : * Allocate non-shared hash table for LOCALLOCK structs. This stores lock
503 : * counts and resource owner information.
504 : */
505 : HASHCTL info;
506 :
507 32858 : info.keysize = sizeof(LOCALLOCKTAG);
508 32858 : info.entrysize = sizeof(LOCALLOCK);
509 :
510 32858 : LockMethodLocalHash = hash_create("LOCALLOCK hash",
511 : 16,
512 : &info,
513 : HASH_ELEM | HASH_BLOBS);
514 32858 : }
515 :
516 :
517 : /*
518 : * Fetch the lock method table associated with a given lock
519 : */
520 : LockMethod
521 198 : GetLocksMethodTable(const LOCK *lock)
522 : {
523 198 : LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*lock);
524 :
525 : Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
526 198 : return LockMethods[lockmethodid];
527 : }
528 :
529 : /*
530 : * Fetch the lock method table associated with a given locktag
531 : */
532 : LockMethod
533 2256 : GetLockTagsMethodTable(const LOCKTAG *locktag)
534 : {
535 2256 : LOCKMETHODID lockmethodid = (LOCKMETHODID) locktag->locktag_lockmethodid;
536 :
537 : Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
538 2256 : return LockMethods[lockmethodid];
539 : }
540 :
541 :
542 : /*
543 : * Compute the hash code associated with a LOCKTAG.
544 : *
545 : * To avoid unnecessary recomputations of the hash code, we try to do this
546 : * just once per function, and then pass it around as needed. Aside from
547 : * passing the hashcode to hash_search_with_hash_value(), we can extract
548 : * the lock partition number from the hashcode.
549 : */
550 : uint32
551 31877228 : LockTagHashCode(const LOCKTAG *locktag)
552 : {
553 31877228 : return get_hash_value(LockMethodLockHash, (const void *) locktag);
554 : }
555 :
556 : /*
557 : * Compute the hash code associated with a PROCLOCKTAG.
558 : *
559 : * Because we want to use just one set of partition locks for both the
560 : * LOCK and PROCLOCK hash tables, we have to make sure that PROCLOCKs
561 : * fall into the same partition number as their associated LOCKs.
562 : * dynahash.c expects the partition number to be the low-order bits of
563 : * the hash code, and therefore a PROCLOCKTAG's hash code must have the
564 : * same low-order bits as the associated LOCKTAG's hash code. We achieve
565 : * this with this specialized hash function.
566 : */
567 : static uint32
568 1610 : proclock_hash(const void *key, Size keysize)
569 : {
570 1610 : const PROCLOCKTAG *proclocktag = (const PROCLOCKTAG *) key;
571 : uint32 lockhash;
572 : Datum procptr;
573 :
574 : Assert(keysize == sizeof(PROCLOCKTAG));
575 :
576 : /* Look into the associated LOCK object, and compute its hash code */
577 1610 : lockhash = LockTagHashCode(&proclocktag->myLock->tag);
578 :
579 : /*
580 : * To make the hash code also depend on the PGPROC, we xor the proc
581 : * struct's address into the hash code, left-shifted so that the
582 : * partition-number bits don't change. Since this is only a hash, we
583 : * don't care if we lose high-order bits of the address; use an
584 : * intermediate variable to suppress cast-pointer-to-int warnings.
585 : */
586 1610 : procptr = PointerGetDatum(proclocktag->myProc);
587 1610 : lockhash ^= ((uint32) procptr) << LOG2_NUM_LOCK_PARTITIONS;
588 :
589 1610 : return lockhash;
590 : }
591 :
592 : /*
593 : * Compute the hash code associated with a PROCLOCKTAG, given the hashcode
594 : * for its underlying LOCK.
595 : *
596 : * We use this just to avoid redundant calls of LockTagHashCode().
597 : */
598 : static inline uint32
599 7519952 : ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 hashcode)
600 : {
601 7519952 : uint32 lockhash = hashcode;
602 : Datum procptr;
603 :
604 : /*
605 : * This must match proclock_hash()!
606 : */
607 7519952 : procptr = PointerGetDatum(proclocktag->myProc);
608 7519952 : lockhash ^= ((uint32) procptr) << LOG2_NUM_LOCK_PARTITIONS;
609 :
610 7519952 : return lockhash;
611 : }
612 :
613 : /*
614 : * Given two lock modes, return whether they would conflict.
615 : */
616 : bool
617 488 : DoLockModesConflict(LOCKMODE mode1, LOCKMODE mode2)
618 : {
619 488 : LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
620 :
621 488 : if (lockMethodTable->conflictTab[mode1] & LOCKBIT_ON(mode2))
622 284 : return true;
623 :
624 204 : return false;
625 : }
626 :
627 : /*
628 : * LockHeldByMe -- test whether lock 'locktag' is held by the current
629 : * transaction
630 : *
631 : * Returns true if current transaction holds a lock on 'tag' of mode
632 : * 'lockmode'. If 'orstronger' is true, a stronger lockmode is also OK.
633 : * ("Stronger" is defined as "numerically higher", which is a bit
634 : * semantically dubious but is OK for the purposes we use this for.)
635 : */
636 : bool
637 0 : LockHeldByMe(const LOCKTAG *locktag,
638 : LOCKMODE lockmode, bool orstronger)
639 : {
640 : LOCALLOCKTAG localtag;
641 : LOCALLOCK *locallock;
642 :
643 : /*
644 : * See if there is a LOCALLOCK entry for this lock and lockmode
645 : */
646 0 : MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
647 0 : localtag.lock = *locktag;
648 0 : localtag.mode = lockmode;
649 :
650 0 : locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
651 : &localtag,
652 : HASH_FIND, NULL);
653 :
654 0 : if (locallock && locallock->nLocks > 0)
655 0 : return true;
656 :
657 0 : if (orstronger)
658 : {
659 : LOCKMODE slockmode;
660 :
661 0 : for (slockmode = lockmode + 1;
662 : slockmode <= MaxLockMode;
663 0 : slockmode++)
664 : {
665 0 : if (LockHeldByMe(locktag, slockmode, false))
666 0 : return true;
667 : }
668 : }
669 :
670 0 : return false;
671 : }
672 :
673 : #ifdef USE_ASSERT_CHECKING
674 : /*
675 : * GetLockMethodLocalHash -- return the hash of local locks, for modules that
676 : * evaluate assertions based on all locks held.
677 : */
678 : HTAB *
679 : GetLockMethodLocalHash(void)
680 : {
681 : return LockMethodLocalHash;
682 : }
683 : #endif
684 :
685 : /*
686 : * LockHasWaiters -- look up 'locktag' and check if releasing this
687 : * lock would wake up other processes waiting for it.
688 : */
689 : bool
690 0 : LockHasWaiters(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
691 : {
692 0 : LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
693 : LockMethod lockMethodTable;
694 : LOCALLOCKTAG localtag;
695 : LOCALLOCK *locallock;
696 : LOCK *lock;
697 : PROCLOCK *proclock;
698 : LWLock *partitionLock;
699 0 : bool hasWaiters = false;
700 :
701 0 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
702 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
703 0 : lockMethodTable = LockMethods[lockmethodid];
704 0 : if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
705 0 : elog(ERROR, "unrecognized lock mode: %d", lockmode);
706 :
707 : #ifdef LOCK_DEBUG
708 : if (LOCK_DEBUG_ENABLED(locktag))
709 : elog(LOG, "LockHasWaiters: lock [%u,%u] %s",
710 : locktag->locktag_field1, locktag->locktag_field2,
711 : lockMethodTable->lockModeNames[lockmode]);
712 : #endif
713 :
714 : /*
715 : * Find the LOCALLOCK entry for this lock and lockmode
716 : */
717 0 : MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
718 0 : localtag.lock = *locktag;
719 0 : localtag.mode = lockmode;
720 :
721 0 : locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
722 : &localtag,
723 : HASH_FIND, NULL);
724 :
725 : /*
726 : * let the caller print its own error message, too. Do not ereport(ERROR).
727 : */
728 0 : if (!locallock || locallock->nLocks <= 0)
729 : {
730 0 : elog(WARNING, "you don't own a lock of type %s",
731 : lockMethodTable->lockModeNames[lockmode]);
732 0 : return false;
733 : }
734 :
735 : /*
736 : * Check the shared lock table.
737 : */
738 0 : partitionLock = LockHashPartitionLock(locallock->hashcode);
739 :
740 0 : LWLockAcquire(partitionLock, LW_SHARED);
741 :
742 : /*
743 : * We don't need to re-find the lock or proclock, since we kept their
744 : * addresses in the locallock table, and they couldn't have been removed
745 : * while we were holding a lock on them.
746 : */
747 0 : lock = locallock->lock;
748 : LOCK_PRINT("LockHasWaiters: found", lock, lockmode);
749 0 : proclock = locallock->proclock;
750 : PROCLOCK_PRINT("LockHasWaiters: found", proclock);
751 :
752 : /*
753 : * Double-check that we are actually holding a lock of the type we want to
754 : * release.
755 : */
756 0 : if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
757 : {
758 : PROCLOCK_PRINT("LockHasWaiters: WRONGTYPE", proclock);
759 0 : LWLockRelease(partitionLock);
760 0 : elog(WARNING, "you don't own a lock of type %s",
761 : lockMethodTable->lockModeNames[lockmode]);
762 0 : RemoveLocalLock(locallock);
763 0 : return false;
764 : }
765 :
766 : /*
767 : * Do the checking.
768 : */
769 0 : if ((lockMethodTable->conflictTab[lockmode] & lock->waitMask) != 0)
770 0 : hasWaiters = true;
771 :
772 0 : LWLockRelease(partitionLock);
773 :
774 0 : return hasWaiters;
775 : }
776 :
777 : /*
778 : * LockAcquire -- Check for lock conflicts, sleep if conflict found,
779 : * set lock if/when no conflicts.
780 : *
781 : * Inputs:
782 : * locktag: unique identifier for the lockable object
783 : * lockmode: lock mode to acquire
784 : * sessionLock: if true, acquire lock for session not current transaction
785 : * dontWait: if true, don't wait to acquire lock
786 : *
787 : * Returns one of:
788 : * LOCKACQUIRE_NOT_AVAIL lock not available, and dontWait=true
789 : * LOCKACQUIRE_OK lock successfully acquired
790 : * LOCKACQUIRE_ALREADY_HELD incremented count for lock already held
791 : * LOCKACQUIRE_ALREADY_CLEAR incremented count for lock already clear
792 : *
793 : * In the normal case where dontWait=false and the caller doesn't need to
794 : * distinguish a freshly acquired lock from one already taken earlier in
795 : * this same transaction, there is no need to examine the return value.
796 : *
797 : * Side Effects: The lock is acquired and recorded in lock tables.
798 : *
799 : * NOTE: if we wait for the lock, there is no way to abort the wait
800 : * short of aborting the transaction.
801 : */
802 : LockAcquireResult
803 1386284 : LockAcquire(const LOCKTAG *locktag,
804 : LOCKMODE lockmode,
805 : bool sessionLock,
806 : bool dontWait)
807 : {
808 1386284 : return LockAcquireExtended(locktag, lockmode, sessionLock, dontWait,
809 : true, NULL);
810 : }
811 :
812 : /*
813 : * LockAcquireExtended - allows us to specify additional options
814 : *
815 : * reportMemoryError specifies whether a lock request that fills the lock
816 : * table should generate an ERROR or not. Passing "false" allows the caller
817 : * to attempt to recover from lock-table-full situations, perhaps by forcibly
818 : * canceling other lock holders and then retrying. Note, however, that the
819 : * return code for that is LOCKACQUIRE_NOT_AVAIL, so that it's unsafe to use
820 : * in combination with dontWait = true, as the cause of failure couldn't be
821 : * distinguished.
822 : *
823 : * If locallockp isn't NULL, *locallockp receives a pointer to the LOCALLOCK
824 : * table entry if a lock is successfully acquired, or NULL if not.
825 : */
826 : LockAcquireResult
827 34416938 : LockAcquireExtended(const LOCKTAG *locktag,
828 : LOCKMODE lockmode,
829 : bool sessionLock,
830 : bool dontWait,
831 : bool reportMemoryError,
832 : LOCALLOCK **locallockp)
833 : {
834 34416938 : LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
835 : LockMethod lockMethodTable;
836 : LOCALLOCKTAG localtag;
837 : LOCALLOCK *locallock;
838 : LOCK *lock;
839 : PROCLOCK *proclock;
840 : bool found;
841 : ResourceOwner owner;
842 : uint32 hashcode;
843 : LWLock *partitionLock;
844 : bool found_conflict;
845 : ProcWaitStatus waitResult;
846 34416938 : bool log_lock = false;
847 :
848 34416938 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
849 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
850 34416938 : lockMethodTable = LockMethods[lockmethodid];
851 34416938 : if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
852 0 : elog(ERROR, "unrecognized lock mode: %d", lockmode);
853 :
854 34416938 : if (RecoveryInProgress() && !InRecovery &&
855 530724 : (locktag->locktag_type == LOCKTAG_OBJECT ||
856 530724 : locktag->locktag_type == LOCKTAG_RELATION) &&
857 : lockmode > RowExclusiveLock)
858 0 : ereport(ERROR,
859 : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
860 : errmsg("cannot acquire lock mode %s on database objects while recovery is in progress",
861 : lockMethodTable->lockModeNames[lockmode]),
862 : errhint("Only RowExclusiveLock or less can be acquired on database objects during recovery.")));
863 :
864 : #ifdef LOCK_DEBUG
865 : if (LOCK_DEBUG_ENABLED(locktag))
866 : elog(LOG, "LockAcquire: lock [%u,%u] %s",
867 : locktag->locktag_field1, locktag->locktag_field2,
868 : lockMethodTable->lockModeNames[lockmode]);
869 : #endif
870 :
871 : /* Identify owner for lock */
872 34416938 : if (sessionLock)
873 147132 : owner = NULL;
874 : else
875 34269806 : owner = CurrentResourceOwner;
876 :
877 : /*
878 : * Find or create a LOCALLOCK entry for this lock and lockmode
879 : */
880 34416938 : MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
881 34416938 : localtag.lock = *locktag;
882 34416938 : localtag.mode = lockmode;
883 :
884 34416938 : locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
885 : &localtag,
886 : HASH_ENTER, &found);
887 :
888 : /*
889 : * if it's a new locallock object, initialize it
890 : */
891 34416938 : if (!found)
892 : {
893 30857322 : locallock->lock = NULL;
894 30857322 : locallock->proclock = NULL;
895 30857322 : locallock->hashcode = LockTagHashCode(&(localtag.lock));
896 30857322 : locallock->nLocks = 0;
897 30857322 : locallock->holdsStrongLockCount = false;
898 30857322 : locallock->lockCleared = false;
899 30857322 : locallock->numLockOwners = 0;
900 30857322 : locallock->maxLockOwners = 8;
901 30857322 : locallock->lockOwners = NULL; /* in case next line fails */
902 30857322 : locallock->lockOwners = (LOCALLOCKOWNER *)
903 30857322 : MemoryContextAlloc(TopMemoryContext,
904 30857322 : locallock->maxLockOwners * sizeof(LOCALLOCKOWNER));
905 : }
906 : else
907 : {
908 : /* Make sure there will be room to remember the lock */
909 3559616 : if (locallock->numLockOwners >= locallock->maxLockOwners)
910 : {
911 38 : int newsize = locallock->maxLockOwners * 2;
912 :
913 38 : locallock->lockOwners = (LOCALLOCKOWNER *)
914 38 : repalloc(locallock->lockOwners,
915 : newsize * sizeof(LOCALLOCKOWNER));
916 38 : locallock->maxLockOwners = newsize;
917 : }
918 : }
919 34416938 : hashcode = locallock->hashcode;
920 :
921 34416938 : if (locallockp)
922 33030654 : *locallockp = locallock;
923 :
924 : /*
925 : * If we already hold the lock, we can just increase the count locally.
926 : *
927 : * If lockCleared is already set, caller need not worry about absorbing
928 : * sinval messages related to the lock's object.
929 : */
930 34416938 : if (locallock->nLocks > 0)
931 : {
932 3559616 : GrantLockLocal(locallock, owner);
933 3559616 : if (locallock->lockCleared)
934 3417106 : return LOCKACQUIRE_ALREADY_CLEAR;
935 : else
936 142510 : return LOCKACQUIRE_ALREADY_HELD;
937 : }
938 :
939 : /*
940 : * We don't acquire any other heavyweight lock while holding the relation
941 : * extension lock. We do allow to acquire the same relation extension
942 : * lock more than once but that case won't reach here.
943 : */
944 : Assert(!IsRelationExtensionLockHeld);
945 :
946 : /*
947 : * Prepare to emit a WAL record if acquisition of this lock needs to be
948 : * replayed in a standby server.
949 : *
950 : * Here we prepare to log; after lock is acquired we'll issue log record.
951 : * This arrangement simplifies error recovery in case the preparation step
952 : * fails.
953 : *
954 : * Only AccessExclusiveLocks can conflict with lock types that read-only
955 : * transactions can acquire in a standby server. Make sure this definition
956 : * matches the one in GetRunningTransactionLocks().
957 : */
958 30857322 : if (lockmode >= AccessExclusiveLock &&
959 433572 : locktag->locktag_type == LOCKTAG_RELATION &&
960 292676 : !RecoveryInProgress() &&
961 246800 : XLogStandbyInfoActive())
962 : {
963 189034 : LogAccessExclusiveLockPrepare();
964 189034 : log_lock = true;
965 : }
966 :
967 : /*
968 : * Attempt to take lock via fast path, if eligible. But if we remember
969 : * having filled up the fast path array, we don't attempt to make any
970 : * further use of it until we release some locks. It's possible that some
971 : * other backend has transferred some of those locks to the shared hash
972 : * table, leaving space free, but it's not worth acquiring the LWLock just
973 : * to check. It's also possible that we're acquiring a second or third
974 : * lock type on a relation we have already locked using the fast-path, but
975 : * for now we don't worry about that case either.
976 : */
977 30857322 : if (EligibleForRelationFastPath(locktag, lockmode) &&
978 27670000 : FastPathLocalUseCounts[FAST_PATH_REL_GROUP(locktag->locktag_field2)] < FP_LOCK_SLOTS_PER_GROUP)
979 : {
980 27398326 : uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
981 : bool acquired;
982 :
983 : /*
984 : * LWLockAcquire acts as a memory sequencing point, so it's safe to
985 : * assume that any strong locker whose increment to
986 : * FastPathStrongRelationLocks->counts becomes visible after we test
987 : * it has yet to begin to transfer fast-path locks.
988 : */
989 27398326 : LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
990 27398326 : if (FastPathStrongRelationLocks->count[fasthashcode] != 0)
991 489844 : acquired = false;
992 : else
993 26908482 : acquired = FastPathGrantRelationLock(locktag->locktag_field2,
994 : lockmode);
995 27398326 : LWLockRelease(&MyProc->fpInfoLock);
996 27398326 : if (acquired)
997 : {
998 : /*
999 : * The locallock might contain stale pointers to some old shared
1000 : * objects; we MUST reset these to null before considering the
1001 : * lock to be acquired via fast-path.
1002 : */
1003 26908482 : locallock->lock = NULL;
1004 26908482 : locallock->proclock = NULL;
1005 26908482 : GrantLockLocal(locallock, owner);
1006 26908482 : return LOCKACQUIRE_OK;
1007 : }
1008 : }
1009 :
1010 : /*
1011 : * If this lock could potentially have been taken via the fast-path by
1012 : * some other backend, we must (temporarily) disable further use of the
1013 : * fast-path for this lock tag, and migrate any locks already taken via
1014 : * this method to the main lock table.
1015 : */
1016 3948840 : if (ConflictsWithRelationFastPath(locktag, lockmode))
1017 : {
1018 347416 : uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
1019 :
1020 347416 : BeginStrongLockAcquire(locallock, fasthashcode);
1021 347416 : if (!FastPathTransferRelationLocks(lockMethodTable, locktag,
1022 : hashcode))
1023 : {
1024 0 : AbortStrongLockAcquire();
1025 0 : if (locallock->nLocks == 0)
1026 0 : RemoveLocalLock(locallock);
1027 0 : if (locallockp)
1028 0 : *locallockp = NULL;
1029 0 : if (reportMemoryError)
1030 0 : ereport(ERROR,
1031 : (errcode(ERRCODE_OUT_OF_MEMORY),
1032 : errmsg("out of shared memory"),
1033 : errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
1034 : else
1035 0 : return LOCKACQUIRE_NOT_AVAIL;
1036 : }
1037 : }
1038 :
1039 : /*
1040 : * We didn't find the lock in our LOCALLOCK table, and we didn't manage to
1041 : * take it via the fast-path, either, so we've got to mess with the shared
1042 : * lock table.
1043 : */
1044 3948840 : partitionLock = LockHashPartitionLock(hashcode);
1045 :
1046 3948840 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
1047 :
1048 : /*
1049 : * Find or create lock and proclock entries with this tag
1050 : *
1051 : * Note: if the locallock object already existed, it might have a pointer
1052 : * to the lock already ... but we should not assume that that pointer is
1053 : * valid, since a lock object with zero hold and request counts can go
1054 : * away anytime. So we have to use SetupLockInTable() to recompute the
1055 : * lock and proclock pointers, even if they're already set.
1056 : */
1057 3948840 : proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
1058 : hashcode, lockmode);
1059 3948840 : if (!proclock)
1060 : {
1061 0 : AbortStrongLockAcquire();
1062 0 : LWLockRelease(partitionLock);
1063 0 : if (locallock->nLocks == 0)
1064 0 : RemoveLocalLock(locallock);
1065 0 : if (locallockp)
1066 0 : *locallockp = NULL;
1067 0 : if (reportMemoryError)
1068 0 : ereport(ERROR,
1069 : (errcode(ERRCODE_OUT_OF_MEMORY),
1070 : errmsg("out of shared memory"),
1071 : errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
1072 : else
1073 0 : return LOCKACQUIRE_NOT_AVAIL;
1074 : }
1075 3948840 : locallock->proclock = proclock;
1076 3948840 : lock = proclock->tag.myLock;
1077 3948840 : locallock->lock = lock;
1078 :
1079 : /*
1080 : * If lock requested conflicts with locks requested by waiters, must join
1081 : * wait queue. Otherwise, check for conflict with already-held locks.
1082 : * (That's last because most complex check.)
1083 : */
1084 3948840 : if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1085 276 : found_conflict = true;
1086 : else
1087 3948564 : found_conflict = LockCheckConflicts(lockMethodTable, lockmode,
1088 : lock, proclock);
1089 :
1090 3948840 : if (!found_conflict)
1091 : {
1092 : /* No conflict with held or previously requested locks */
1093 3945182 : GrantLock(lock, proclock, lockmode);
1094 3945182 : waitResult = PROC_WAIT_STATUS_OK;
1095 : }
1096 : else
1097 : {
1098 : /*
1099 : * Join the lock's wait queue. We call this even in the dontWait
1100 : * case, because JoinWaitQueue() may discover that we can acquire the
1101 : * lock immediately after all.
1102 : */
1103 3658 : waitResult = JoinWaitQueue(locallock, lockMethodTable, dontWait);
1104 : }
1105 :
1106 3948840 : if (waitResult == PROC_WAIT_STATUS_ERROR)
1107 : {
1108 : /*
1109 : * We're not getting the lock because a deadlock was detected already
1110 : * while trying to join the wait queue, or because we would have to
1111 : * wait but the caller requested no blocking.
1112 : *
1113 : * Undo the changes to shared entries before releasing the partition
1114 : * lock.
1115 : */
1116 1332 : AbortStrongLockAcquire();
1117 :
1118 1332 : if (proclock->holdMask == 0)
1119 : {
1120 : uint32 proclock_hashcode;
1121 :
1122 926 : proclock_hashcode = ProcLockHashCode(&proclock->tag,
1123 : hashcode);
1124 926 : dlist_delete(&proclock->lockLink);
1125 926 : dlist_delete(&proclock->procLink);
1126 926 : if (!hash_search_with_hash_value(LockMethodProcLockHash,
1127 926 : &(proclock->tag),
1128 : proclock_hashcode,
1129 : HASH_REMOVE,
1130 : NULL))
1131 0 : elog(PANIC, "proclock table corrupted");
1132 : }
1133 : else
1134 : PROCLOCK_PRINT("LockAcquire: did not join wait queue", proclock);
1135 1332 : lock->nRequested--;
1136 1332 : lock->requested[lockmode]--;
1137 : LOCK_PRINT("LockAcquire: did not join wait queue",
1138 : lock, lockmode);
1139 : Assert((lock->nRequested > 0) &&
1140 : (lock->requested[lockmode] >= 0));
1141 : Assert(lock->nGranted <= lock->nRequested);
1142 1332 : LWLockRelease(partitionLock);
1143 1332 : if (locallock->nLocks == 0)
1144 1332 : RemoveLocalLock(locallock);
1145 :
1146 1332 : if (dontWait)
1147 : {
1148 1330 : if (locallockp)
1149 440 : *locallockp = NULL;
1150 1330 : return LOCKACQUIRE_NOT_AVAIL;
1151 : }
1152 : else
1153 : {
1154 2 : DeadLockReport();
1155 : /* DeadLockReport() will not return */
1156 : }
1157 : }
1158 :
1159 : /*
1160 : * We are now in the lock queue, or the lock was already granted. If
1161 : * queued, go to sleep.
1162 : */
1163 3947508 : if (waitResult == PROC_WAIT_STATUS_WAITING)
1164 : {
1165 : Assert(!dontWait);
1166 : PROCLOCK_PRINT("LockAcquire: sleeping on lock", proclock);
1167 : LOCK_PRINT("LockAcquire: sleeping on lock", lock, lockmode);
1168 2316 : LWLockRelease(partitionLock);
1169 :
1170 2316 : waitResult = WaitOnLock(locallock, owner);
1171 :
1172 : /*
1173 : * NOTE: do not do any material change of state between here and
1174 : * return. All required changes in locktable state must have been
1175 : * done when the lock was granted to us --- see notes in WaitOnLock.
1176 : */
1177 :
1178 2232 : if (waitResult == PROC_WAIT_STATUS_ERROR)
1179 : {
1180 : /*
1181 : * We failed as a result of a deadlock, see CheckDeadLock(). Quit
1182 : * now.
1183 : */
1184 : Assert(!dontWait);
1185 10 : DeadLockReport();
1186 : /* DeadLockReport() will not return */
1187 : }
1188 : }
1189 : else
1190 3945192 : LWLockRelease(partitionLock);
1191 : Assert(waitResult == PROC_WAIT_STATUS_OK);
1192 :
1193 : /* The lock was granted to us. Update the local lock entry accordingly */
1194 : Assert((proclock->holdMask & LOCKBIT_ON(lockmode)) != 0);
1195 3947414 : GrantLockLocal(locallock, owner);
1196 :
1197 : /*
1198 : * Lock state is fully up-to-date now; if we error out after this, no
1199 : * special error cleanup is required.
1200 : */
1201 3947414 : FinishStrongLockAcquire();
1202 :
1203 : /*
1204 : * Emit a WAL record if acquisition of this lock needs to be replayed in a
1205 : * standby server.
1206 : */
1207 3947414 : if (log_lock)
1208 : {
1209 : /*
1210 : * Decode the locktag back to the original values, to avoid sending
1211 : * lots of empty bytes with every message. See lock.h to check how a
1212 : * locktag is defined for LOCKTAG_RELATION
1213 : */
1214 188608 : LogAccessExclusiveLock(locktag->locktag_field1,
1215 : locktag->locktag_field2);
1216 : }
1217 :
1218 3947414 : return LOCKACQUIRE_OK;
1219 : }
1220 :
1221 : /*
1222 : * Find or create LOCK and PROCLOCK objects as needed for a new lock
1223 : * request.
1224 : *
1225 : * Returns the PROCLOCK object, or NULL if we failed to create the objects
1226 : * for lack of shared memory.
1227 : *
1228 : * The appropriate partition lock must be held at entry, and will be
1229 : * held at exit.
1230 : */
1231 : static PROCLOCK *
1232 3952122 : SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc,
1233 : const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
1234 : {
1235 : LOCK *lock;
1236 : PROCLOCK *proclock;
1237 : PROCLOCKTAG proclocktag;
1238 : uint32 proclock_hashcode;
1239 : bool found;
1240 :
1241 : /*
1242 : * Find or create a lock with this tag.
1243 : */
1244 3952122 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
1245 : locktag,
1246 : hashcode,
1247 : HASH_ENTER_NULL,
1248 : &found);
1249 3952122 : if (!lock)
1250 0 : return NULL;
1251 :
1252 : /*
1253 : * if it's a new lock object, initialize it
1254 : */
1255 3952122 : if (!found)
1256 : {
1257 3533336 : lock->grantMask = 0;
1258 3533336 : lock->waitMask = 0;
1259 3533336 : dlist_init(&lock->procLocks);
1260 3533336 : dclist_init(&lock->waitProcs);
1261 3533336 : lock->nRequested = 0;
1262 3533336 : lock->nGranted = 0;
1263 21200016 : MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
1264 3533336 : MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
1265 : LOCK_PRINT("LockAcquire: new", lock, lockmode);
1266 : }
1267 : else
1268 : {
1269 : LOCK_PRINT("LockAcquire: found", lock, lockmode);
1270 : Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
1271 : Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
1272 : Assert(lock->nGranted <= lock->nRequested);
1273 : }
1274 :
1275 : /*
1276 : * Create the hash key for the proclock table.
1277 : */
1278 3952122 : proclocktag.myLock = lock;
1279 3952122 : proclocktag.myProc = proc;
1280 :
1281 3952122 : proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
1282 :
1283 : /*
1284 : * Find or create a proclock entry with this tag
1285 : */
1286 3952122 : proclock = (PROCLOCK *) hash_search_with_hash_value(LockMethodProcLockHash,
1287 : &proclocktag,
1288 : proclock_hashcode,
1289 : HASH_ENTER_NULL,
1290 : &found);
1291 3952122 : if (!proclock)
1292 : {
1293 : /* Oops, not enough shmem for the proclock */
1294 0 : if (lock->nRequested == 0)
1295 : {
1296 : /*
1297 : * There are no other requestors of this lock, so garbage-collect
1298 : * the lock object. We *must* do this to avoid a permanent leak
1299 : * of shared memory, because there won't be anything to cause
1300 : * anyone to release the lock object later.
1301 : */
1302 : Assert(dlist_is_empty(&(lock->procLocks)));
1303 0 : if (!hash_search_with_hash_value(LockMethodLockHash,
1304 0 : &(lock->tag),
1305 : hashcode,
1306 : HASH_REMOVE,
1307 : NULL))
1308 0 : elog(PANIC, "lock table corrupted");
1309 : }
1310 0 : return NULL;
1311 : }
1312 :
1313 : /*
1314 : * If new, initialize the new entry
1315 : */
1316 3952122 : if (!found)
1317 : {
1318 3563328 : uint32 partition = LockHashPartition(hashcode);
1319 :
1320 : /*
1321 : * It might seem unsafe to access proclock->groupLeader without a
1322 : * lock, but it's not really. Either we are initializing a proclock
1323 : * on our own behalf, in which case our group leader isn't changing
1324 : * because the group leader for a process can only ever be changed by
1325 : * the process itself; or else we are transferring a fast-path lock to
1326 : * the main lock table, in which case that process can't change it's
1327 : * lock group leader without first releasing all of its locks (and in
1328 : * particular the one we are currently transferring).
1329 : */
1330 7126656 : proclock->groupLeader = proc->lockGroupLeader != NULL ?
1331 3563328 : proc->lockGroupLeader : proc;
1332 3563328 : proclock->holdMask = 0;
1333 3563328 : proclock->releaseMask = 0;
1334 : /* Add proclock to appropriate lists */
1335 3563328 : dlist_push_tail(&lock->procLocks, &proclock->lockLink);
1336 3563328 : dlist_push_tail(&proc->myProcLocks[partition], &proclock->procLink);
1337 : PROCLOCK_PRINT("LockAcquire: new", proclock);
1338 : }
1339 : else
1340 : {
1341 : PROCLOCK_PRINT("LockAcquire: found", proclock);
1342 : Assert((proclock->holdMask & ~lock->grantMask) == 0);
1343 :
1344 : #ifdef CHECK_DEADLOCK_RISK
1345 :
1346 : /*
1347 : * Issue warning if we already hold a lower-level lock on this object
1348 : * and do not hold a lock of the requested level or higher. This
1349 : * indicates a deadlock-prone coding practice (eg, we'd have a
1350 : * deadlock if another backend were following the same code path at
1351 : * about the same time).
1352 : *
1353 : * This is not enabled by default, because it may generate log entries
1354 : * about user-level coding practices that are in fact safe in context.
1355 : * It can be enabled to help find system-level problems.
1356 : *
1357 : * XXX Doing numeric comparison on the lockmodes is a hack; it'd be
1358 : * better to use a table. For now, though, this works.
1359 : */
1360 : {
1361 : int i;
1362 :
1363 : for (i = lockMethodTable->numLockModes; i > 0; i--)
1364 : {
1365 : if (proclock->holdMask & LOCKBIT_ON(i))
1366 : {
1367 : if (i >= (int) lockmode)
1368 : break; /* safe: we have a lock >= req level */
1369 : elog(LOG, "deadlock risk: raising lock level"
1370 : " from %s to %s on object %u/%u/%u",
1371 : lockMethodTable->lockModeNames[i],
1372 : lockMethodTable->lockModeNames[lockmode],
1373 : lock->tag.locktag_field1, lock->tag.locktag_field2,
1374 : lock->tag.locktag_field3);
1375 : break;
1376 : }
1377 : }
1378 : }
1379 : #endif /* CHECK_DEADLOCK_RISK */
1380 : }
1381 :
1382 : /*
1383 : * lock->nRequested and lock->requested[] count the total number of
1384 : * requests, whether granted or waiting, so increment those immediately.
1385 : * The other counts don't increment till we get the lock.
1386 : */
1387 3952122 : lock->nRequested++;
1388 3952122 : lock->requested[lockmode]++;
1389 : Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1390 :
1391 : /*
1392 : * We shouldn't already hold the desired lock; else locallock table is
1393 : * broken.
1394 : */
1395 3952122 : if (proclock->holdMask & LOCKBIT_ON(lockmode))
1396 0 : elog(ERROR, "lock %s on object %u/%u/%u is already held",
1397 : lockMethodTable->lockModeNames[lockmode],
1398 : lock->tag.locktag_field1, lock->tag.locktag_field2,
1399 : lock->tag.locktag_field3);
1400 :
1401 3952122 : return proclock;
1402 : }
1403 :
1404 : /*
1405 : * Check and set/reset the flag that we hold the relation extension lock.
1406 : *
1407 : * It is callers responsibility that this function is called after
1408 : * acquiring/releasing the relation extension lock.
1409 : *
1410 : * Pass acquired as true if lock is acquired, false otherwise.
1411 : */
1412 : static inline void
1413 62635596 : CheckAndSetLockHeld(LOCALLOCK *locallock, bool acquired)
1414 : {
1415 : #ifdef USE_ASSERT_CHECKING
1416 : if (LOCALLOCK_LOCKTAG(*locallock) == LOCKTAG_RELATION_EXTEND)
1417 : IsRelationExtensionLockHeld = acquired;
1418 : #endif
1419 62635596 : }
1420 :
1421 : /*
1422 : * Subroutine to free a locallock entry
1423 : */
1424 : static void
1425 30857322 : RemoveLocalLock(LOCALLOCK *locallock)
1426 : {
1427 : int i;
1428 :
1429 31000348 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
1430 : {
1431 143026 : if (locallock->lockOwners[i].owner != NULL)
1432 142950 : ResourceOwnerForgetLock(locallock->lockOwners[i].owner, locallock);
1433 : }
1434 30857322 : locallock->numLockOwners = 0;
1435 30857322 : if (locallock->lockOwners != NULL)
1436 30857322 : pfree(locallock->lockOwners);
1437 30857322 : locallock->lockOwners = NULL;
1438 :
1439 30857322 : if (locallock->holdsStrongLockCount)
1440 : {
1441 : uint32 fasthashcode;
1442 :
1443 346860 : fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
1444 :
1445 346860 : SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
1446 : Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
1447 346860 : FastPathStrongRelationLocks->count[fasthashcode]--;
1448 346860 : locallock->holdsStrongLockCount = false;
1449 346860 : SpinLockRelease(&FastPathStrongRelationLocks->mutex);
1450 : }
1451 :
1452 30857322 : if (!hash_search(LockMethodLocalHash,
1453 30857322 : &(locallock->tag),
1454 : HASH_REMOVE, NULL))
1455 0 : elog(WARNING, "locallock table corrupted");
1456 :
1457 : /*
1458 : * Indicate that the lock is released for certain types of locks
1459 : */
1460 30857322 : CheckAndSetLockHeld(locallock, false);
1461 30857322 : }
1462 :
1463 : /*
1464 : * LockCheckConflicts -- test whether requested lock conflicts
1465 : * with those already granted
1466 : *
1467 : * Returns true if conflict, false if no conflict.
1468 : *
1469 : * NOTES:
1470 : * Here's what makes this complicated: one process's locks don't
1471 : * conflict with one another, no matter what purpose they are held for
1472 : * (eg, session and transaction locks do not conflict). Nor do the locks
1473 : * of one process in a lock group conflict with those of another process in
1474 : * the same group. So, we must subtract off these locks when determining
1475 : * whether the requested new lock conflicts with those already held.
1476 : */
1477 : bool
1478 3951118 : LockCheckConflicts(LockMethod lockMethodTable,
1479 : LOCKMODE lockmode,
1480 : LOCK *lock,
1481 : PROCLOCK *proclock)
1482 : {
1483 3951118 : int numLockModes = lockMethodTable->numLockModes;
1484 : LOCKMASK myLocks;
1485 3951118 : int conflictMask = lockMethodTable->conflictTab[lockmode];
1486 : int conflictsRemaining[MAX_LOCKMODES];
1487 3951118 : int totalConflictsRemaining = 0;
1488 : dlist_iter proclock_iter;
1489 : int i;
1490 :
1491 : /*
1492 : * first check for global conflicts: If no locks conflict with my request,
1493 : * then I get the lock.
1494 : *
1495 : * Checking for conflict: lock->grantMask represents the types of
1496 : * currently held locks. conflictTable[lockmode] has a bit set for each
1497 : * type of lock that conflicts with request. Bitwise compare tells if
1498 : * there is a conflict.
1499 : */
1500 3951118 : if (!(conflictMask & lock->grantMask))
1501 : {
1502 : PROCLOCK_PRINT("LockCheckConflicts: no conflict", proclock);
1503 3772846 : return false;
1504 : }
1505 :
1506 : /*
1507 : * Rats. Something conflicts. But it could still be my own lock, or a
1508 : * lock held by another member of my locking group. First, figure out how
1509 : * many conflicts remain after subtracting out any locks I hold myself.
1510 : */
1511 178272 : myLocks = proclock->holdMask;
1512 1604448 : for (i = 1; i <= numLockModes; i++)
1513 : {
1514 1426176 : if ((conflictMask & LOCKBIT_ON(i)) == 0)
1515 : {
1516 765168 : conflictsRemaining[i] = 0;
1517 765168 : continue;
1518 : }
1519 661008 : conflictsRemaining[i] = lock->granted[i];
1520 661008 : if (myLocks & LOCKBIT_ON(i))
1521 193122 : --conflictsRemaining[i];
1522 661008 : totalConflictsRemaining += conflictsRemaining[i];
1523 : }
1524 :
1525 : /* If no conflicts remain, we get the lock. */
1526 178272 : if (totalConflictsRemaining == 0)
1527 : {
1528 : PROCLOCK_PRINT("LockCheckConflicts: resolved (simple)", proclock);
1529 173628 : return false;
1530 : }
1531 :
1532 : /* If no group locking, it's definitely a conflict. */
1533 4644 : if (proclock->groupLeader == MyProc && MyProc->lockGroupLeader == NULL)
1534 : {
1535 : Assert(proclock->tag.myProc == MyProc);
1536 : PROCLOCK_PRINT("LockCheckConflicts: conflicting (simple)",
1537 : proclock);
1538 3368 : return true;
1539 : }
1540 :
1541 : /*
1542 : * The relation extension lock conflict even between the group members.
1543 : */
1544 1276 : if (LOCK_LOCKTAG(*lock) == LOCKTAG_RELATION_EXTEND)
1545 : {
1546 : PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)",
1547 : proclock);
1548 60 : return true;
1549 : }
1550 :
1551 : /*
1552 : * Locks held in conflicting modes by members of our own lock group are
1553 : * not real conflicts; we can subtract those out and see if we still have
1554 : * a conflict. This is O(N) in the number of processes holding or
1555 : * awaiting locks on this object. We could improve that by making the
1556 : * shared memory state more complex (and larger) but it doesn't seem worth
1557 : * it.
1558 : */
1559 2046 : dlist_foreach(proclock_iter, &lock->procLocks)
1560 : {
1561 1788 : PROCLOCK *otherproclock =
1562 1788 : dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
1563 :
1564 1788 : if (proclock != otherproclock &&
1565 1530 : proclock->groupLeader == otherproclock->groupLeader &&
1566 962 : (otherproclock->holdMask & conflictMask) != 0)
1567 : {
1568 958 : int intersectMask = otherproclock->holdMask & conflictMask;
1569 :
1570 8622 : for (i = 1; i <= numLockModes; i++)
1571 : {
1572 7664 : if ((intersectMask & LOCKBIT_ON(i)) != 0)
1573 : {
1574 978 : if (conflictsRemaining[i] <= 0)
1575 0 : elog(PANIC, "proclocks held do not match lock");
1576 978 : conflictsRemaining[i]--;
1577 978 : totalConflictsRemaining--;
1578 : }
1579 : }
1580 :
1581 958 : if (totalConflictsRemaining == 0)
1582 : {
1583 : PROCLOCK_PRINT("LockCheckConflicts: resolved (group)",
1584 : proclock);
1585 958 : return false;
1586 : }
1587 : }
1588 : }
1589 :
1590 : /* Nope, it's a real conflict. */
1591 : PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)", proclock);
1592 258 : return true;
1593 : }
1594 :
1595 : /*
1596 : * GrantLock -- update the lock and proclock data structures to show
1597 : * the lock request has been granted.
1598 : *
1599 : * NOTE: if proc was blocked, it also needs to be removed from the wait list
1600 : * and have its waitLock/waitProcLock fields cleared. That's not done here.
1601 : *
1602 : * NOTE: the lock grant also has to be recorded in the associated LOCALLOCK
1603 : * table entry; but since we may be awaking some other process, we can't do
1604 : * that here; it's done by GrantLockLocal, instead.
1605 : */
1606 : void
1607 3950890 : GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
1608 : {
1609 3950890 : lock->nGranted++;
1610 3950890 : lock->granted[lockmode]++;
1611 3950890 : lock->grantMask |= LOCKBIT_ON(lockmode);
1612 3950890 : if (lock->granted[lockmode] == lock->requested[lockmode])
1613 3950374 : lock->waitMask &= LOCKBIT_OFF(lockmode);
1614 3950890 : proclock->holdMask |= LOCKBIT_ON(lockmode);
1615 : LOCK_PRINT("GrantLock", lock, lockmode);
1616 : Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1617 : Assert(lock->nGranted <= lock->nRequested);
1618 3950890 : }
1619 :
1620 : /*
1621 : * UnGrantLock -- opposite of GrantLock.
1622 : *
1623 : * Updates the lock and proclock data structures to show that the lock
1624 : * is no longer held nor requested by the current holder.
1625 : *
1626 : * Returns true if there were any waiters waiting on the lock that
1627 : * should now be woken up with ProcLockWakeup.
1628 : */
1629 : static bool
1630 3950750 : UnGrantLock(LOCK *lock, LOCKMODE lockmode,
1631 : PROCLOCK *proclock, LockMethod lockMethodTable)
1632 : {
1633 3950750 : bool wakeupNeeded = false;
1634 :
1635 : Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1636 : Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1637 : Assert(lock->nGranted <= lock->nRequested);
1638 :
1639 : /*
1640 : * fix the general lock stats
1641 : */
1642 3950750 : lock->nRequested--;
1643 3950750 : lock->requested[lockmode]--;
1644 3950750 : lock->nGranted--;
1645 3950750 : lock->granted[lockmode]--;
1646 :
1647 3950750 : if (lock->granted[lockmode] == 0)
1648 : {
1649 : /* change the conflict mask. No more of this lock type. */
1650 3929722 : lock->grantMask &= LOCKBIT_OFF(lockmode);
1651 : }
1652 :
1653 : LOCK_PRINT("UnGrantLock: updated", lock, lockmode);
1654 :
1655 : /*
1656 : * We need only run ProcLockWakeup if the released lock conflicts with at
1657 : * least one of the lock types requested by waiter(s). Otherwise whatever
1658 : * conflict made them wait must still exist. NOTE: before MVCC, we could
1659 : * skip wakeup if lock->granted[lockmode] was still positive. But that's
1660 : * not true anymore, because the remaining granted locks might belong to
1661 : * some waiter, who could now be awakened because he doesn't conflict with
1662 : * his own locks.
1663 : */
1664 3950750 : if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1665 2190 : wakeupNeeded = true;
1666 :
1667 : /*
1668 : * Now fix the per-proclock state.
1669 : */
1670 3950750 : proclock->holdMask &= LOCKBIT_OFF(lockmode);
1671 : PROCLOCK_PRINT("UnGrantLock: updated", proclock);
1672 :
1673 3950750 : return wakeupNeeded;
1674 : }
1675 :
1676 : /*
1677 : * CleanUpLock -- clean up after releasing a lock. We garbage-collect the
1678 : * proclock and lock objects if possible, and call ProcLockWakeup if there
1679 : * are remaining requests and the caller says it's OK. (Normally, this
1680 : * should be called after UnGrantLock, and wakeupNeeded is the result from
1681 : * UnGrantLock.)
1682 : *
1683 : * The appropriate partition lock must be held at entry, and will be
1684 : * held at exit.
1685 : */
1686 : static void
1687 3884846 : CleanUpLock(LOCK *lock, PROCLOCK *proclock,
1688 : LockMethod lockMethodTable, uint32 hashcode,
1689 : bool wakeupNeeded)
1690 : {
1691 : /*
1692 : * If this was my last hold on this lock, delete my entry in the proclock
1693 : * table.
1694 : */
1695 3884846 : if (proclock->holdMask == 0)
1696 : {
1697 : uint32 proclock_hashcode;
1698 :
1699 : PROCLOCK_PRINT("CleanUpLock: deleting", proclock);
1700 3562446 : dlist_delete(&proclock->lockLink);
1701 3562446 : dlist_delete(&proclock->procLink);
1702 3562446 : proclock_hashcode = ProcLockHashCode(&proclock->tag, hashcode);
1703 3562446 : if (!hash_search_with_hash_value(LockMethodProcLockHash,
1704 3562446 : &(proclock->tag),
1705 : proclock_hashcode,
1706 : HASH_REMOVE,
1707 : NULL))
1708 0 : elog(PANIC, "proclock table corrupted");
1709 : }
1710 :
1711 3884846 : if (lock->nRequested == 0)
1712 : {
1713 : /*
1714 : * The caller just released the last lock, so garbage-collect the lock
1715 : * object.
1716 : */
1717 : LOCK_PRINT("CleanUpLock: deleting", lock, 0);
1718 : Assert(dlist_is_empty(&lock->procLocks));
1719 3533362 : if (!hash_search_with_hash_value(LockMethodLockHash,
1720 3533362 : &(lock->tag),
1721 : hashcode,
1722 : HASH_REMOVE,
1723 : NULL))
1724 0 : elog(PANIC, "lock table corrupted");
1725 : }
1726 351484 : else if (wakeupNeeded)
1727 : {
1728 : /* There are waiters on this lock, so wake them up. */
1729 2276 : ProcLockWakeup(lockMethodTable, lock);
1730 : }
1731 3884846 : }
1732 :
1733 : /*
1734 : * GrantLockLocal -- update the locallock data structures to show
1735 : * the lock request has been granted.
1736 : *
1737 : * We expect that LockAcquire made sure there is room to add a new
1738 : * ResourceOwner entry.
1739 : */
1740 : static void
1741 34415516 : GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner)
1742 : {
1743 34415516 : LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
1744 : int i;
1745 :
1746 : Assert(locallock->numLockOwners < locallock->maxLockOwners);
1747 : /* Count the total */
1748 34415516 : locallock->nLocks++;
1749 : /* Count the per-owner lock */
1750 35672548 : for (i = 0; i < locallock->numLockOwners; i++)
1751 : {
1752 3894274 : if (lockOwners[i].owner == owner)
1753 : {
1754 2637242 : lockOwners[i].nLocks++;
1755 2637242 : return;
1756 : }
1757 : }
1758 31778274 : lockOwners[i].owner = owner;
1759 31778274 : lockOwners[i].nLocks = 1;
1760 31778274 : locallock->numLockOwners++;
1761 31778274 : if (owner != NULL)
1762 31632056 : ResourceOwnerRememberLock(owner, locallock);
1763 :
1764 : /* Indicate that the lock is acquired for certain types of locks. */
1765 31778274 : CheckAndSetLockHeld(locallock, true);
1766 : }
1767 :
1768 : /*
1769 : * BeginStrongLockAcquire - inhibit use of fastpath for a given LOCALLOCK,
1770 : * and arrange for error cleanup if it fails
1771 : */
1772 : static void
1773 347416 : BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode)
1774 : {
1775 : Assert(StrongLockInProgress == NULL);
1776 : Assert(locallock->holdsStrongLockCount == false);
1777 :
1778 : /*
1779 : * Adding to a memory location is not atomic, so we take a spinlock to
1780 : * ensure we don't collide with someone else trying to bump the count at
1781 : * the same time.
1782 : *
1783 : * XXX: It might be worth considering using an atomic fetch-and-add
1784 : * instruction here, on architectures where that is supported.
1785 : */
1786 :
1787 347416 : SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
1788 347416 : FastPathStrongRelationLocks->count[fasthashcode]++;
1789 347416 : locallock->holdsStrongLockCount = true;
1790 347416 : StrongLockInProgress = locallock;
1791 347416 : SpinLockRelease(&FastPathStrongRelationLocks->mutex);
1792 347416 : }
1793 :
1794 : /*
1795 : * FinishStrongLockAcquire - cancel pending cleanup for a strong lock
1796 : * acquisition once it's no longer needed
1797 : */
1798 : static void
1799 3947414 : FinishStrongLockAcquire(void)
1800 : {
1801 3947414 : StrongLockInProgress = NULL;
1802 3947414 : }
1803 :
1804 : /*
1805 : * AbortStrongLockAcquire - undo strong lock state changes performed by
1806 : * BeginStrongLockAcquire.
1807 : */
1808 : void
1809 798796 : AbortStrongLockAcquire(void)
1810 : {
1811 : uint32 fasthashcode;
1812 798796 : LOCALLOCK *locallock = StrongLockInProgress;
1813 :
1814 798796 : if (locallock == NULL)
1815 798370 : return;
1816 :
1817 426 : fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
1818 : Assert(locallock->holdsStrongLockCount == true);
1819 426 : SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
1820 : Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
1821 426 : FastPathStrongRelationLocks->count[fasthashcode]--;
1822 426 : locallock->holdsStrongLockCount = false;
1823 426 : StrongLockInProgress = NULL;
1824 426 : SpinLockRelease(&FastPathStrongRelationLocks->mutex);
1825 : }
1826 :
1827 : /*
1828 : * GrantAwaitedLock -- call GrantLockLocal for the lock we are doing
1829 : * WaitOnLock on.
1830 : *
1831 : * proc.c needs this for the case where we are booted off the lock by
1832 : * timeout, but discover that someone granted us the lock anyway.
1833 : *
1834 : * We could just export GrantLockLocal, but that would require including
1835 : * resowner.h in lock.h, which creates circularity.
1836 : */
1837 : void
1838 4 : GrantAwaitedLock(void)
1839 : {
1840 4 : GrantLockLocal(awaitedLock, awaitedOwner);
1841 4 : }
1842 :
1843 : /*
1844 : * GetAwaitedLock -- Return the lock we're currently doing WaitOnLock on.
1845 : */
1846 : LOCALLOCK *
1847 797480 : GetAwaitedLock(void)
1848 : {
1849 797480 : return awaitedLock;
1850 : }
1851 :
1852 : /*
1853 : * MarkLockClear -- mark an acquired lock as "clear"
1854 : *
1855 : * This means that we know we have absorbed all sinval messages that other
1856 : * sessions generated before we acquired this lock, and so we can confidently
1857 : * assume we know about any catalog changes protected by this lock.
1858 : */
1859 : void
1860 29712094 : MarkLockClear(LOCALLOCK *locallock)
1861 : {
1862 : Assert(locallock->nLocks > 0);
1863 29712094 : locallock->lockCleared = true;
1864 29712094 : }
1865 :
1866 : /*
1867 : * WaitOnLock -- wait to acquire a lock
1868 : *
1869 : * This is a wrapper around ProcSleep, with extra tracing and bookkeeping.
1870 : */
1871 : static ProcWaitStatus
1872 2316 : WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner)
1873 : {
1874 : ProcWaitStatus result;
1875 :
1876 : TRACE_POSTGRESQL_LOCK_WAIT_START(locallock->tag.lock.locktag_field1,
1877 : locallock->tag.lock.locktag_field2,
1878 : locallock->tag.lock.locktag_field3,
1879 : locallock->tag.lock.locktag_field4,
1880 : locallock->tag.lock.locktag_type,
1881 : locallock->tag.mode);
1882 :
1883 : /* adjust the process title to indicate that it's waiting */
1884 2316 : set_ps_display_suffix("waiting");
1885 :
1886 : /*
1887 : * Record the fact that we are waiting for a lock, so that
1888 : * LockErrorCleanup will clean up if cancel/die happens.
1889 : */
1890 2316 : awaitedLock = locallock;
1891 2316 : awaitedOwner = owner;
1892 :
1893 : /*
1894 : * NOTE: Think not to put any shared-state cleanup after the call to
1895 : * ProcSleep, in either the normal or failure path. The lock state must
1896 : * be fully set by the lock grantor, or by CheckDeadLock if we give up
1897 : * waiting for the lock. This is necessary because of the possibility
1898 : * that a cancel/die interrupt will interrupt ProcSleep after someone else
1899 : * grants us the lock, but before we've noticed it. Hence, after granting,
1900 : * the locktable state must fully reflect the fact that we own the lock;
1901 : * we can't do additional work on return.
1902 : *
1903 : * We can and do use a PG_TRY block to try to clean up after failure, but
1904 : * this still has a major limitation: elog(FATAL) can occur while waiting
1905 : * (eg, a "die" interrupt), and then control won't come back here. So all
1906 : * cleanup of essential state should happen in LockErrorCleanup, not here.
1907 : * We can use PG_TRY to clear the "waiting" status flags, since doing that
1908 : * is unimportant if the process exits.
1909 : */
1910 2316 : PG_TRY();
1911 : {
1912 2316 : result = ProcSleep(locallock);
1913 : }
1914 74 : PG_CATCH();
1915 : {
1916 : /* In this path, awaitedLock remains set until LockErrorCleanup */
1917 :
1918 : /* reset ps display to remove the suffix */
1919 74 : set_ps_display_remove_suffix();
1920 :
1921 : /* and propagate the error */
1922 74 : PG_RE_THROW();
1923 : }
1924 2232 : PG_END_TRY();
1925 :
1926 : /*
1927 : * We no longer want LockErrorCleanup to do anything.
1928 : */
1929 2232 : awaitedLock = NULL;
1930 :
1931 : /* reset ps display to remove the suffix */
1932 2232 : set_ps_display_remove_suffix();
1933 :
1934 : TRACE_POSTGRESQL_LOCK_WAIT_DONE(locallock->tag.lock.locktag_field1,
1935 : locallock->tag.lock.locktag_field2,
1936 : locallock->tag.lock.locktag_field3,
1937 : locallock->tag.lock.locktag_field4,
1938 : locallock->tag.lock.locktag_type,
1939 : locallock->tag.mode);
1940 :
1941 2232 : return result;
1942 : }
1943 :
1944 : /*
1945 : * Remove a proc from the wait-queue it is on (caller must know it is on one).
1946 : * This is only used when the proc has failed to get the lock, so we set its
1947 : * waitStatus to PROC_WAIT_STATUS_ERROR.
1948 : *
1949 : * Appropriate partition lock must be held by caller. Also, caller is
1950 : * responsible for signaling the proc if needed.
1951 : *
1952 : * NB: this does not clean up any locallock object that may exist for the lock.
1953 : */
1954 : void
1955 92 : RemoveFromWaitQueue(PGPROC *proc, uint32 hashcode)
1956 : {
1957 92 : LOCK *waitLock = proc->waitLock;
1958 92 : PROCLOCK *proclock = proc->waitProcLock;
1959 92 : LOCKMODE lockmode = proc->waitLockMode;
1960 92 : LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*waitLock);
1961 :
1962 : /* Make sure proc is waiting */
1963 : Assert(proc->waitStatus == PROC_WAIT_STATUS_WAITING);
1964 : Assert(proc->links.next != NULL);
1965 : Assert(waitLock);
1966 : Assert(!dclist_is_empty(&waitLock->waitProcs));
1967 : Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
1968 :
1969 : /* Remove proc from lock's wait queue */
1970 92 : dclist_delete_from_thoroughly(&waitLock->waitProcs, &proc->links);
1971 :
1972 : /* Undo increments of request counts by waiting process */
1973 : Assert(waitLock->nRequested > 0);
1974 : Assert(waitLock->nRequested > proc->waitLock->nGranted);
1975 92 : waitLock->nRequested--;
1976 : Assert(waitLock->requested[lockmode] > 0);
1977 92 : waitLock->requested[lockmode]--;
1978 : /* don't forget to clear waitMask bit if appropriate */
1979 92 : if (waitLock->granted[lockmode] == waitLock->requested[lockmode])
1980 92 : waitLock->waitMask &= LOCKBIT_OFF(lockmode);
1981 :
1982 : /* Clean up the proc's own state, and pass it the ok/fail signal */
1983 92 : proc->waitLock = NULL;
1984 92 : proc->waitProcLock = NULL;
1985 92 : proc->waitStatus = PROC_WAIT_STATUS_ERROR;
1986 :
1987 : /*
1988 : * Delete the proclock immediately if it represents no already-held locks.
1989 : * (This must happen now because if the owner of the lock decides to
1990 : * release it, and the requested/granted counts then go to zero,
1991 : * LockRelease expects there to be no remaining proclocks.) Then see if
1992 : * any other waiters for the lock can be woken up now.
1993 : */
1994 92 : CleanUpLock(waitLock, proclock,
1995 : LockMethods[lockmethodid], hashcode,
1996 : true);
1997 92 : }
1998 :
1999 : /*
2000 : * LockRelease -- look up 'locktag' and release one 'lockmode' lock on it.
2001 : * Release a session lock if 'sessionLock' is true, else release a
2002 : * regular transaction lock.
2003 : *
2004 : * Side Effects: find any waiting processes that are now wakable,
2005 : * grant them their requested locks and awaken them.
2006 : * (We have to grant the lock here to avoid a race between
2007 : * the waking process and any new process to
2008 : * come along and request the lock.)
2009 : */
2010 : bool
2011 30533190 : LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
2012 : {
2013 30533190 : LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
2014 : LockMethod lockMethodTable;
2015 : LOCALLOCKTAG localtag;
2016 : LOCALLOCK *locallock;
2017 : LOCK *lock;
2018 : PROCLOCK *proclock;
2019 : LWLock *partitionLock;
2020 : bool wakeupNeeded;
2021 :
2022 30533190 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2023 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2024 30533190 : lockMethodTable = LockMethods[lockmethodid];
2025 30533190 : if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
2026 0 : elog(ERROR, "unrecognized lock mode: %d", lockmode);
2027 :
2028 : #ifdef LOCK_DEBUG
2029 : if (LOCK_DEBUG_ENABLED(locktag))
2030 : elog(LOG, "LockRelease: lock [%u,%u] %s",
2031 : locktag->locktag_field1, locktag->locktag_field2,
2032 : lockMethodTable->lockModeNames[lockmode]);
2033 : #endif
2034 :
2035 : /*
2036 : * Find the LOCALLOCK entry for this lock and lockmode
2037 : */
2038 30533190 : MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
2039 30533190 : localtag.lock = *locktag;
2040 30533190 : localtag.mode = lockmode;
2041 :
2042 30533190 : locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
2043 : &localtag,
2044 : HASH_FIND, NULL);
2045 :
2046 : /*
2047 : * let the caller print its own error message, too. Do not ereport(ERROR).
2048 : */
2049 30533190 : if (!locallock || locallock->nLocks <= 0)
2050 : {
2051 26 : elog(WARNING, "you don't own a lock of type %s",
2052 : lockMethodTable->lockModeNames[lockmode]);
2053 26 : return false;
2054 : }
2055 :
2056 : /*
2057 : * Decrease the count for the resource owner.
2058 : */
2059 : {
2060 30533164 : LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2061 : ResourceOwner owner;
2062 : int i;
2063 :
2064 : /* Identify owner for lock */
2065 30533164 : if (sessionLock)
2066 146198 : owner = NULL;
2067 : else
2068 30386966 : owner = CurrentResourceOwner;
2069 :
2070 30534854 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
2071 : {
2072 30534830 : if (lockOwners[i].owner == owner)
2073 : {
2074 : Assert(lockOwners[i].nLocks > 0);
2075 30533140 : if (--lockOwners[i].nLocks == 0)
2076 : {
2077 29521724 : if (owner != NULL)
2078 29375582 : ResourceOwnerForgetLock(owner, locallock);
2079 : /* compact out unused slot */
2080 29521724 : locallock->numLockOwners--;
2081 29521724 : if (i < locallock->numLockOwners)
2082 86 : lockOwners[i] = lockOwners[locallock->numLockOwners];
2083 : }
2084 30533140 : break;
2085 : }
2086 : }
2087 30533164 : if (i < 0)
2088 : {
2089 : /* don't release a lock belonging to another owner */
2090 24 : elog(WARNING, "you don't own a lock of type %s",
2091 : lockMethodTable->lockModeNames[lockmode]);
2092 24 : return false;
2093 : }
2094 : }
2095 :
2096 : /*
2097 : * Decrease the total local count. If we're still holding the lock, we're
2098 : * done.
2099 : */
2100 30533140 : locallock->nLocks--;
2101 :
2102 30533140 : if (locallock->nLocks > 0)
2103 1602830 : return true;
2104 :
2105 : /*
2106 : * At this point we can no longer suppose we are clear of invalidation
2107 : * messages related to this lock. Although we'll delete the LOCALLOCK
2108 : * object before any intentional return from this routine, it seems worth
2109 : * the trouble to explicitly reset lockCleared right now, just in case
2110 : * some error prevents us from deleting the LOCALLOCK.
2111 : */
2112 28930310 : locallock->lockCleared = false;
2113 :
2114 : /* Attempt fast release of any lock eligible for the fast path. */
2115 28930310 : if (EligibleForRelationFastPath(locktag, lockmode) &&
2116 26645426 : FastPathLocalUseCounts[FAST_PATH_REL_GROUP(locktag->locktag_field2)] > 0)
2117 : {
2118 : bool released;
2119 :
2120 : /*
2121 : * We might not find the lock here, even if we originally entered it
2122 : * here. Another backend may have moved it to the main table.
2123 : */
2124 26236512 : LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
2125 26236512 : released = FastPathUnGrantRelationLock(locktag->locktag_field2,
2126 : lockmode);
2127 26236512 : LWLockRelease(&MyProc->fpInfoLock);
2128 26236512 : if (released)
2129 : {
2130 25940718 : RemoveLocalLock(locallock);
2131 25940718 : return true;
2132 : }
2133 : }
2134 :
2135 : /*
2136 : * Otherwise we've got to mess with the shared lock table.
2137 : */
2138 2989592 : partitionLock = LockHashPartitionLock(locallock->hashcode);
2139 :
2140 2989592 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2141 :
2142 : /*
2143 : * Normally, we don't need to re-find the lock or proclock, since we kept
2144 : * their addresses in the locallock table, and they couldn't have been
2145 : * removed while we were holding a lock on them. But it's possible that
2146 : * the lock was taken fast-path and has since been moved to the main hash
2147 : * table by another backend, in which case we will need to look up the
2148 : * objects here. We assume the lock field is NULL if so.
2149 : */
2150 2989592 : lock = locallock->lock;
2151 2989592 : if (!lock)
2152 : {
2153 : PROCLOCKTAG proclocktag;
2154 :
2155 : Assert(EligibleForRelationFastPath(locktag, lockmode));
2156 8 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
2157 : locktag,
2158 : locallock->hashcode,
2159 : HASH_FIND,
2160 : NULL);
2161 8 : if (!lock)
2162 0 : elog(ERROR, "failed to re-find shared lock object");
2163 8 : locallock->lock = lock;
2164 :
2165 8 : proclocktag.myLock = lock;
2166 8 : proclocktag.myProc = MyProc;
2167 8 : locallock->proclock = (PROCLOCK *) hash_search(LockMethodProcLockHash,
2168 : &proclocktag,
2169 : HASH_FIND,
2170 : NULL);
2171 8 : if (!locallock->proclock)
2172 0 : elog(ERROR, "failed to re-find shared proclock object");
2173 : }
2174 : LOCK_PRINT("LockRelease: found", lock, lockmode);
2175 2989592 : proclock = locallock->proclock;
2176 : PROCLOCK_PRINT("LockRelease: found", proclock);
2177 :
2178 : /*
2179 : * Double-check that we are actually holding a lock of the type we want to
2180 : * release.
2181 : */
2182 2989592 : if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
2183 : {
2184 : PROCLOCK_PRINT("LockRelease: WRONGTYPE", proclock);
2185 0 : LWLockRelease(partitionLock);
2186 0 : elog(WARNING, "you don't own a lock of type %s",
2187 : lockMethodTable->lockModeNames[lockmode]);
2188 0 : RemoveLocalLock(locallock);
2189 0 : return false;
2190 : }
2191 :
2192 : /*
2193 : * Do the releasing. CleanUpLock will waken any now-wakable waiters.
2194 : */
2195 2989592 : wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
2196 :
2197 2989592 : CleanUpLock(lock, proclock,
2198 : lockMethodTable, locallock->hashcode,
2199 : wakeupNeeded);
2200 :
2201 2989592 : LWLockRelease(partitionLock);
2202 :
2203 2989592 : RemoveLocalLock(locallock);
2204 2989592 : return true;
2205 : }
2206 :
2207 : /*
2208 : * LockReleaseAll -- Release all locks of the specified lock method that
2209 : * are held by the current process.
2210 : *
2211 : * Well, not necessarily *all* locks. The available behaviors are:
2212 : * allLocks == true: release all locks including session locks.
2213 : * allLocks == false: release all non-session locks.
2214 : */
2215 : void
2216 1510190 : LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
2217 : {
2218 : HASH_SEQ_STATUS status;
2219 : LockMethod lockMethodTable;
2220 : int i,
2221 : numLockModes;
2222 : LOCALLOCK *locallock;
2223 : LOCK *lock;
2224 : int partition;
2225 1510190 : bool have_fast_path_lwlock = false;
2226 :
2227 1510190 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2228 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2229 1510190 : lockMethodTable = LockMethods[lockmethodid];
2230 :
2231 : #ifdef LOCK_DEBUG
2232 : if (*(lockMethodTable->trace_flag))
2233 : elog(LOG, "LockReleaseAll: lockmethod=%d", lockmethodid);
2234 : #endif
2235 :
2236 : /*
2237 : * Get rid of our fast-path VXID lock, if appropriate. Note that this is
2238 : * the only way that the lock we hold on our own VXID can ever get
2239 : * released: it is always and only released when a toplevel transaction
2240 : * ends.
2241 : */
2242 1510190 : if (lockmethodid == DEFAULT_LOCKMETHOD)
2243 741278 : VirtualXactLockTableCleanup();
2244 :
2245 1510190 : numLockModes = lockMethodTable->numLockModes;
2246 :
2247 : /*
2248 : * First we run through the locallock table and get rid of unwanted
2249 : * entries, then we scan the process's proclocks and get rid of those. We
2250 : * do this separately because we may have multiple locallock entries
2251 : * pointing to the same proclock, and we daren't end up with any dangling
2252 : * pointers. Fast-path locks are cleaned up during the locallock table
2253 : * scan, though.
2254 : */
2255 1510190 : hash_seq_init(&status, LockMethodLocalHash);
2256 :
2257 3680070 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2258 : {
2259 : /*
2260 : * If the LOCALLOCK entry is unused, something must've gone wrong
2261 : * while trying to acquire this lock. Just forget the local entry.
2262 : */
2263 2169880 : if (locallock->nLocks == 0)
2264 : {
2265 92 : RemoveLocalLock(locallock);
2266 92 : continue;
2267 : }
2268 :
2269 : /* Ignore items that are not of the lockmethod to be removed */
2270 2169788 : if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
2271 123218 : continue;
2272 :
2273 : /*
2274 : * If we are asked to release all locks, we can just zap the entry.
2275 : * Otherwise, must scan to see if there are session locks. We assume
2276 : * there is at most one lockOwners entry for session locks.
2277 : */
2278 2046570 : if (!allLocks)
2279 : {
2280 1906712 : LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2281 :
2282 : /* If session lock is above array position 0, move it down to 0 */
2283 3918320 : for (i = 0; i < locallock->numLockOwners; i++)
2284 : {
2285 2011608 : if (lockOwners[i].owner == NULL)
2286 122678 : lockOwners[0] = lockOwners[i];
2287 : else
2288 1888930 : ResourceOwnerForgetLock(lockOwners[i].owner, locallock);
2289 : }
2290 :
2291 1906712 : if (locallock->numLockOwners > 0 &&
2292 1906712 : lockOwners[0].owner == NULL &&
2293 122678 : lockOwners[0].nLocks > 0)
2294 : {
2295 : /* Fix the locallock to show just the session locks */
2296 122678 : locallock->nLocks = lockOwners[0].nLocks;
2297 122678 : locallock->numLockOwners = 1;
2298 : /* We aren't deleting this locallock, so done */
2299 122678 : continue;
2300 : }
2301 : else
2302 1784034 : locallock->numLockOwners = 0;
2303 : }
2304 :
2305 : #ifdef USE_ASSERT_CHECKING
2306 :
2307 : /*
2308 : * Tuple locks are currently held only for short durations within a
2309 : * transaction. Check that we didn't forget to release one.
2310 : */
2311 : if (LOCALLOCK_LOCKTAG(*locallock) == LOCKTAG_TUPLE && !allLocks)
2312 : elog(WARNING, "tuple lock held at commit");
2313 : #endif
2314 :
2315 : /*
2316 : * If the lock or proclock pointers are NULL, this lock was taken via
2317 : * the relation fast-path (and is not known to have been transferred).
2318 : */
2319 1923892 : if (locallock->proclock == NULL || locallock->lock == NULL)
2320 : {
2321 967000 : LOCKMODE lockmode = locallock->tag.mode;
2322 : Oid relid;
2323 :
2324 : /* Verify that a fast-path lock is what we've got. */
2325 967000 : if (!EligibleForRelationFastPath(&locallock->tag.lock, lockmode))
2326 0 : elog(PANIC, "locallock table corrupted");
2327 :
2328 : /*
2329 : * If we don't currently hold the LWLock that protects our
2330 : * fast-path data structures, we must acquire it before attempting
2331 : * to release the lock via the fast-path. We will continue to
2332 : * hold the LWLock until we're done scanning the locallock table,
2333 : * unless we hit a transferred fast-path lock. (XXX is this
2334 : * really such a good idea? There could be a lot of entries ...)
2335 : */
2336 967000 : if (!have_fast_path_lwlock)
2337 : {
2338 326430 : LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
2339 326430 : have_fast_path_lwlock = true;
2340 : }
2341 :
2342 : /* Attempt fast-path release. */
2343 967000 : relid = locallock->tag.lock.locktag_field2;
2344 967000 : if (FastPathUnGrantRelationLock(relid, lockmode))
2345 : {
2346 964936 : RemoveLocalLock(locallock);
2347 964936 : continue;
2348 : }
2349 :
2350 : /*
2351 : * Our lock, originally taken via the fast path, has been
2352 : * transferred to the main lock table. That's going to require
2353 : * some extra work, so release our fast-path lock before starting.
2354 : */
2355 2064 : LWLockRelease(&MyProc->fpInfoLock);
2356 2064 : have_fast_path_lwlock = false;
2357 :
2358 : /*
2359 : * Now dump the lock. We haven't got a pointer to the LOCK or
2360 : * PROCLOCK in this case, so we have to handle this a bit
2361 : * differently than a normal lock release. Unfortunately, this
2362 : * requires an extra LWLock acquire-and-release cycle on the
2363 : * partitionLock, but hopefully it shouldn't happen often.
2364 : */
2365 2064 : LockRefindAndRelease(lockMethodTable, MyProc,
2366 : &locallock->tag.lock, lockmode, false);
2367 2064 : RemoveLocalLock(locallock);
2368 2064 : continue;
2369 : }
2370 :
2371 : /* Mark the proclock to show we need to release this lockmode */
2372 956892 : if (locallock->nLocks > 0)
2373 956892 : locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
2374 :
2375 : /* And remove the locallock hashtable entry */
2376 956892 : RemoveLocalLock(locallock);
2377 : }
2378 :
2379 : /* Done with the fast-path data structures */
2380 1510190 : if (have_fast_path_lwlock)
2381 324366 : LWLockRelease(&MyProc->fpInfoLock);
2382 :
2383 : /*
2384 : * Now, scan each lock partition separately.
2385 : */
2386 25673230 : for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
2387 : {
2388 : LWLock *partitionLock;
2389 24163040 : dlist_head *procLocks = &MyProc->myProcLocks[partition];
2390 : dlist_mutable_iter proclock_iter;
2391 :
2392 24163040 : partitionLock = LockHashPartitionLockByIndex(partition);
2393 :
2394 : /*
2395 : * If the proclock list for this partition is empty, we can skip
2396 : * acquiring the partition lock. This optimization is trickier than
2397 : * it looks, because another backend could be in process of adding
2398 : * something to our proclock list due to promoting one of our
2399 : * fast-path locks. However, any such lock must be one that we
2400 : * decided not to delete above, so it's okay to skip it again now;
2401 : * we'd just decide not to delete it again. We must, however, be
2402 : * careful to re-fetch the list header once we've acquired the
2403 : * partition lock, to be sure we have a valid, up-to-date pointer.
2404 : * (There is probably no significant risk if pointer fetch/store is
2405 : * atomic, but we don't wish to assume that.)
2406 : *
2407 : * XXX This argument assumes that the locallock table correctly
2408 : * represents all of our fast-path locks. While allLocks mode
2409 : * guarantees to clean up all of our normal locks regardless of the
2410 : * locallock situation, we lose that guarantee for fast-path locks.
2411 : * This is not ideal.
2412 : */
2413 24163040 : if (dlist_is_empty(procLocks))
2414 23228786 : continue; /* needn't examine this partition */
2415 :
2416 934254 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2417 :
2418 2069688 : dlist_foreach_modify(proclock_iter, procLocks)
2419 : {
2420 1135434 : PROCLOCK *proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
2421 1135434 : bool wakeupNeeded = false;
2422 :
2423 : Assert(proclock->tag.myProc == MyProc);
2424 :
2425 1135434 : lock = proclock->tag.myLock;
2426 :
2427 : /* Ignore items that are not of the lockmethod to be removed */
2428 1135434 : if (LOCK_LOCKMETHOD(*lock) != lockmethodid)
2429 123212 : continue;
2430 :
2431 : /*
2432 : * In allLocks mode, force release of all locks even if locallock
2433 : * table had problems
2434 : */
2435 1012222 : if (allLocks)
2436 75024 : proclock->releaseMask = proclock->holdMask;
2437 : else
2438 : Assert((proclock->releaseMask & ~proclock->holdMask) == 0);
2439 :
2440 : /*
2441 : * Ignore items that have nothing to be released, unless they have
2442 : * holdMask == 0 and are therefore recyclable
2443 : */
2444 1012222 : if (proclock->releaseMask == 0 && proclock->holdMask != 0)
2445 121326 : continue;
2446 :
2447 : PROCLOCK_PRINT("LockReleaseAll", proclock);
2448 : LOCK_PRINT("LockReleaseAll", lock, 0);
2449 : Assert(lock->nRequested >= 0);
2450 : Assert(lock->nGranted >= 0);
2451 : Assert(lock->nGranted <= lock->nRequested);
2452 : Assert((proclock->holdMask & ~lock->grantMask) == 0);
2453 :
2454 : /*
2455 : * Release the previously-marked lock modes
2456 : */
2457 8018064 : for (i = 1; i <= numLockModes; i++)
2458 : {
2459 7127168 : if (proclock->releaseMask & LOCKBIT_ON(i))
2460 956892 : wakeupNeeded |= UnGrantLock(lock, i, proclock,
2461 : lockMethodTable);
2462 : }
2463 : Assert((lock->nRequested >= 0) && (lock->nGranted >= 0));
2464 : Assert(lock->nGranted <= lock->nRequested);
2465 : LOCK_PRINT("LockReleaseAll: updated", lock, 0);
2466 :
2467 890896 : proclock->releaseMask = 0;
2468 :
2469 : /* CleanUpLock will wake up waiters if needed. */
2470 890896 : CleanUpLock(lock, proclock,
2471 : lockMethodTable,
2472 890896 : LockTagHashCode(&lock->tag),
2473 : wakeupNeeded);
2474 : } /* loop over PROCLOCKs within this partition */
2475 :
2476 934254 : LWLockRelease(partitionLock);
2477 : } /* loop over partitions */
2478 :
2479 : #ifdef LOCK_DEBUG
2480 : if (*(lockMethodTable->trace_flag))
2481 : elog(LOG, "LockReleaseAll done");
2482 : #endif
2483 1510190 : }
2484 :
2485 : /*
2486 : * LockReleaseSession -- Release all session locks of the specified lock method
2487 : * that are held by the current process.
2488 : */
2489 : void
2490 238 : LockReleaseSession(LOCKMETHODID lockmethodid)
2491 : {
2492 : HASH_SEQ_STATUS status;
2493 : LOCALLOCK *locallock;
2494 :
2495 238 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2496 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2497 :
2498 238 : hash_seq_init(&status, LockMethodLocalHash);
2499 :
2500 452 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2501 : {
2502 : /* Ignore items that are not of the specified lock method */
2503 214 : if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
2504 20 : continue;
2505 :
2506 194 : ReleaseLockIfHeld(locallock, true);
2507 : }
2508 238 : }
2509 :
2510 : /*
2511 : * LockReleaseCurrentOwner
2512 : * Release all locks belonging to CurrentResourceOwner
2513 : *
2514 : * If the caller knows what those locks are, it can pass them as an array.
2515 : * That speeds up the call significantly, when a lot of locks are held.
2516 : * Otherwise, pass NULL for locallocks, and we'll traverse through our hash
2517 : * table to find them.
2518 : */
2519 : void
2520 9524 : LockReleaseCurrentOwner(LOCALLOCK **locallocks, int nlocks)
2521 : {
2522 9524 : if (locallocks == NULL)
2523 : {
2524 : HASH_SEQ_STATUS status;
2525 : LOCALLOCK *locallock;
2526 :
2527 8 : hash_seq_init(&status, LockMethodLocalHash);
2528 :
2529 544 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2530 536 : ReleaseLockIfHeld(locallock, false);
2531 : }
2532 : else
2533 : {
2534 : int i;
2535 :
2536 14562 : for (i = nlocks - 1; i >= 0; i--)
2537 5046 : ReleaseLockIfHeld(locallocks[i], false);
2538 : }
2539 9524 : }
2540 :
2541 : /*
2542 : * ReleaseLockIfHeld
2543 : * Release any session-level locks on this lockable object if sessionLock
2544 : * is true; else, release any locks held by CurrentResourceOwner.
2545 : *
2546 : * It is tempting to pass this a ResourceOwner pointer (or NULL for session
2547 : * locks), but without refactoring LockRelease() we cannot support releasing
2548 : * locks belonging to resource owners other than CurrentResourceOwner.
2549 : * If we were to refactor, it'd be a good idea to fix it so we don't have to
2550 : * do a hashtable lookup of the locallock, too. However, currently this
2551 : * function isn't used heavily enough to justify refactoring for its
2552 : * convenience.
2553 : */
2554 : static void
2555 5776 : ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock)
2556 : {
2557 : ResourceOwner owner;
2558 : LOCALLOCKOWNER *lockOwners;
2559 : int i;
2560 :
2561 : /* Identify owner for lock (must match LockRelease!) */
2562 5776 : if (sessionLock)
2563 194 : owner = NULL;
2564 : else
2565 5582 : owner = CurrentResourceOwner;
2566 :
2567 : /* Scan to see if there are any locks belonging to the target owner */
2568 5776 : lockOwners = locallock->lockOwners;
2569 6162 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
2570 : {
2571 5776 : if (lockOwners[i].owner == owner)
2572 : {
2573 : Assert(lockOwners[i].nLocks > 0);
2574 5390 : if (lockOwners[i].nLocks < locallock->nLocks)
2575 : {
2576 : /*
2577 : * We will still hold this lock after forgetting this
2578 : * ResourceOwner.
2579 : */
2580 1372 : locallock->nLocks -= lockOwners[i].nLocks;
2581 : /* compact out unused slot */
2582 1372 : locallock->numLockOwners--;
2583 1372 : if (owner != NULL)
2584 1372 : ResourceOwnerForgetLock(owner, locallock);
2585 1372 : if (i < locallock->numLockOwners)
2586 0 : lockOwners[i] = lockOwners[locallock->numLockOwners];
2587 : }
2588 : else
2589 : {
2590 : Assert(lockOwners[i].nLocks == locallock->nLocks);
2591 : /* We want to call LockRelease just once */
2592 4018 : lockOwners[i].nLocks = 1;
2593 4018 : locallock->nLocks = 1;
2594 4018 : if (!LockRelease(&locallock->tag.lock,
2595 : locallock->tag.mode,
2596 : sessionLock))
2597 0 : elog(WARNING, "ReleaseLockIfHeld: failed??");
2598 : }
2599 5390 : break;
2600 : }
2601 : }
2602 5776 : }
2603 :
2604 : /*
2605 : * LockReassignCurrentOwner
2606 : * Reassign all locks belonging to CurrentResourceOwner to belong
2607 : * to its parent resource owner.
2608 : *
2609 : * If the caller knows what those locks are, it can pass them as an array.
2610 : * That speeds up the call significantly, when a lot of locks are held
2611 : * (e.g pg_dump with a large schema). Otherwise, pass NULL for locallocks,
2612 : * and we'll traverse through our hash table to find them.
2613 : */
2614 : void
2615 642822 : LockReassignCurrentOwner(LOCALLOCK **locallocks, int nlocks)
2616 : {
2617 642822 : ResourceOwner parent = ResourceOwnerGetParent(CurrentResourceOwner);
2618 :
2619 : Assert(parent != NULL);
2620 :
2621 642822 : if (locallocks == NULL)
2622 : {
2623 : HASH_SEQ_STATUS status;
2624 : LOCALLOCK *locallock;
2625 :
2626 6700 : hash_seq_init(&status, LockMethodLocalHash);
2627 :
2628 195486 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2629 188786 : LockReassignOwner(locallock, parent);
2630 : }
2631 : else
2632 : {
2633 : int i;
2634 :
2635 1363020 : for (i = nlocks - 1; i >= 0; i--)
2636 726898 : LockReassignOwner(locallocks[i], parent);
2637 : }
2638 642822 : }
2639 :
2640 : /*
2641 : * Subroutine of LockReassignCurrentOwner. Reassigns a given lock belonging to
2642 : * CurrentResourceOwner to its parent.
2643 : */
2644 : static void
2645 915684 : LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent)
2646 : {
2647 : LOCALLOCKOWNER *lockOwners;
2648 : int i;
2649 915684 : int ic = -1;
2650 915684 : int ip = -1;
2651 :
2652 : /*
2653 : * Scan to see if there are any locks belonging to current owner or its
2654 : * parent
2655 : */
2656 915684 : lockOwners = locallock->lockOwners;
2657 2138234 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
2658 : {
2659 1222550 : if (lockOwners[i].owner == CurrentResourceOwner)
2660 884542 : ic = i;
2661 338008 : else if (lockOwners[i].owner == parent)
2662 254302 : ip = i;
2663 : }
2664 :
2665 915684 : if (ic < 0)
2666 31142 : return; /* no current locks */
2667 :
2668 884542 : if (ip < 0)
2669 : {
2670 : /* Parent has no slot, so just give it the child's slot */
2671 661320 : lockOwners[ic].owner = parent;
2672 661320 : ResourceOwnerRememberLock(parent, locallock);
2673 : }
2674 : else
2675 : {
2676 : /* Merge child's count with parent's */
2677 223222 : lockOwners[ip].nLocks += lockOwners[ic].nLocks;
2678 : /* compact out unused slot */
2679 223222 : locallock->numLockOwners--;
2680 223222 : if (ic < locallock->numLockOwners)
2681 1418 : lockOwners[ic] = lockOwners[locallock->numLockOwners];
2682 : }
2683 884542 : ResourceOwnerForgetLock(CurrentResourceOwner, locallock);
2684 : }
2685 :
2686 : /*
2687 : * FastPathGrantRelationLock
2688 : * Grant lock using per-backend fast-path array, if there is space.
2689 : */
2690 : static bool
2691 26908482 : FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode)
2692 : {
2693 : uint32 i;
2694 26908482 : uint32 unused_slot = FP_LOCK_SLOTS_PER_BACKEND;
2695 :
2696 : /* fast-path group the lock belongs to */
2697 26908482 : uint32 group = FAST_PATH_REL_GROUP(relid);
2698 :
2699 : /* Scan for existing entry for this relid, remembering empty slot. */
2700 456426950 : for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2701 : {
2702 : /* index into the whole per-backend array */
2703 430252176 : uint32 f = FAST_PATH_SLOT(group, i);
2704 :
2705 430252176 : if (FAST_PATH_GET_BITS(MyProc, f) == 0)
2706 415691618 : unused_slot = f;
2707 14560558 : else if (MyProc->fpRelId[f] == relid)
2708 : {
2709 : Assert(!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode));
2710 733708 : FAST_PATH_SET_LOCKMODE(MyProc, f, lockmode);
2711 733708 : return true;
2712 : }
2713 : }
2714 :
2715 : /* If no existing entry, use any empty slot. */
2716 26174774 : if (unused_slot < FP_LOCK_SLOTS_PER_BACKEND)
2717 : {
2718 26174774 : MyProc->fpRelId[unused_slot] = relid;
2719 26174774 : FAST_PATH_SET_LOCKMODE(MyProc, unused_slot, lockmode);
2720 26174774 : ++FastPathLocalUseCounts[group];
2721 26174774 : return true;
2722 : }
2723 :
2724 : /* No existing entry, and no empty slot. */
2725 0 : return false;
2726 : }
2727 :
2728 : /*
2729 : * FastPathUnGrantRelationLock
2730 : * Release fast-path lock, if present. Update backend-private local
2731 : * use count, while we're at it.
2732 : */
2733 : static bool
2734 27203512 : FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode)
2735 : {
2736 : uint32 i;
2737 27203512 : bool result = false;
2738 :
2739 : /* fast-path group the lock belongs to */
2740 27203512 : uint32 group = FAST_PATH_REL_GROUP(relid);
2741 :
2742 27203512 : FastPathLocalUseCounts[group] = 0;
2743 462459704 : for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2744 : {
2745 : /* index into the whole per-backend array */
2746 435256192 : uint32 f = FAST_PATH_SLOT(group, i);
2747 :
2748 435256192 : if (MyProc->fpRelId[f] == relid
2749 37315608 : && FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2750 : {
2751 : Assert(!result);
2752 26905654 : FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
2753 26905654 : result = true;
2754 : /* we continue iterating so as to update FastPathLocalUseCount */
2755 : }
2756 435256192 : if (FAST_PATH_GET_BITS(MyProc, f) != 0)
2757 18762216 : ++FastPathLocalUseCounts[group];
2758 : }
2759 27203512 : return result;
2760 : }
2761 :
2762 : /*
2763 : * FastPathTransferRelationLocks
2764 : * Transfer locks matching the given lock tag from per-backend fast-path
2765 : * arrays to the shared hash table.
2766 : *
2767 : * Returns true if successful, false if ran out of shared memory.
2768 : */
2769 : static bool
2770 347416 : FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag,
2771 : uint32 hashcode)
2772 : {
2773 347416 : LWLock *partitionLock = LockHashPartitionLock(hashcode);
2774 347416 : Oid relid = locktag->locktag_field2;
2775 : uint32 i;
2776 :
2777 : /*
2778 : * Every PGPROC that can potentially hold a fast-path lock is present in
2779 : * ProcGlobal->allProcs. Prepared transactions are not, but any
2780 : * outstanding fast-path locks held by prepared transactions are
2781 : * transferred to the main lock table.
2782 : */
2783 36021050 : for (i = 0; i < ProcGlobal->allProcCount; i++)
2784 : {
2785 35673634 : PGPROC *proc = &ProcGlobal->allProcs[i];
2786 : uint32 j,
2787 : group;
2788 :
2789 35673634 : LWLockAcquire(&proc->fpInfoLock, LW_EXCLUSIVE);
2790 :
2791 : /*
2792 : * If the target backend isn't referencing the same database as the
2793 : * lock, then we needn't examine the individual relation IDs at all;
2794 : * none of them can be relevant.
2795 : *
2796 : * proc->databaseId is set at backend startup time and never changes
2797 : * thereafter, so it might be safe to perform this test before
2798 : * acquiring &proc->fpInfoLock. In particular, it's certainly safe to
2799 : * assume that if the target backend holds any fast-path locks, it
2800 : * must have performed a memory-fencing operation (in particular, an
2801 : * LWLock acquisition) since setting proc->databaseId. However, it's
2802 : * less clear that our backend is certain to have performed a memory
2803 : * fencing operation since the other backend set proc->databaseId. So
2804 : * for now, we test it after acquiring the LWLock just to be safe.
2805 : */
2806 35673634 : if (proc->databaseId != locktag->locktag_field1)
2807 : {
2808 17984102 : LWLockRelease(&proc->fpInfoLock);
2809 17984102 : continue;
2810 : }
2811 :
2812 : /* fast-path group the lock belongs to */
2813 17689532 : group = FAST_PATH_REL_GROUP(relid);
2814 :
2815 300719920 : for (j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
2816 : {
2817 : uint32 lockmode;
2818 :
2819 : /* index into the whole per-backend array */
2820 283032364 : uint32 f = FAST_PATH_SLOT(group, j);
2821 :
2822 : /* Look for an allocated slot matching the given relid. */
2823 283032364 : if (relid != proc->fpRelId[f] || FAST_PATH_GET_BITS(proc, f) == 0)
2824 283030388 : continue;
2825 :
2826 : /* Find or create lock object. */
2827 1976 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2828 7904 : for (lockmode = FAST_PATH_LOCKNUMBER_OFFSET;
2829 : lockmode < FAST_PATH_LOCKNUMBER_OFFSET + FAST_PATH_BITS_PER_SLOT;
2830 5928 : ++lockmode)
2831 : {
2832 : PROCLOCK *proclock;
2833 :
2834 5928 : if (!FAST_PATH_CHECK_LOCKMODE(proc, f, lockmode))
2835 3840 : continue;
2836 2088 : proclock = SetupLockInTable(lockMethodTable, proc, locktag,
2837 : hashcode, lockmode);
2838 2088 : if (!proclock)
2839 : {
2840 0 : LWLockRelease(partitionLock);
2841 0 : LWLockRelease(&proc->fpInfoLock);
2842 0 : return false;
2843 : }
2844 2088 : GrantLock(proclock->tag.myLock, proclock, lockmode);
2845 2088 : FAST_PATH_CLEAR_LOCKMODE(proc, f, lockmode);
2846 : }
2847 1976 : LWLockRelease(partitionLock);
2848 :
2849 : /* No need to examine remaining slots. */
2850 1976 : break;
2851 : }
2852 17689532 : LWLockRelease(&proc->fpInfoLock);
2853 : }
2854 347416 : return true;
2855 : }
2856 :
2857 : /*
2858 : * FastPathGetRelationLockEntry
2859 : * Return the PROCLOCK for a lock originally taken via the fast-path,
2860 : * transferring it to the primary lock table if necessary.
2861 : *
2862 : * Note: caller takes care of updating the locallock object.
2863 : */
2864 : static PROCLOCK *
2865 756 : FastPathGetRelationLockEntry(LOCALLOCK *locallock)
2866 : {
2867 756 : LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
2868 756 : LOCKTAG *locktag = &locallock->tag.lock;
2869 756 : PROCLOCK *proclock = NULL;
2870 756 : LWLock *partitionLock = LockHashPartitionLock(locallock->hashcode);
2871 756 : Oid relid = locktag->locktag_field2;
2872 : uint32 i,
2873 : group;
2874 :
2875 : /* fast-path group the lock belongs to */
2876 756 : group = FAST_PATH_REL_GROUP(relid);
2877 :
2878 756 : LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
2879 :
2880 12100 : for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2881 : {
2882 : uint32 lockmode;
2883 :
2884 : /* index into the whole per-backend array */
2885 12084 : uint32 f = FAST_PATH_SLOT(group, i);
2886 :
2887 : /* Look for an allocated slot matching the given relid. */
2888 12084 : if (relid != MyProc->fpRelId[f] || FAST_PATH_GET_BITS(MyProc, f) == 0)
2889 11344 : continue;
2890 :
2891 : /* If we don't have a lock of the given mode, forget it! */
2892 740 : lockmode = locallock->tag.mode;
2893 740 : if (!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2894 0 : break;
2895 :
2896 : /* Find or create lock object. */
2897 740 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2898 :
2899 740 : proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
2900 : locallock->hashcode, lockmode);
2901 740 : if (!proclock)
2902 : {
2903 0 : LWLockRelease(partitionLock);
2904 0 : LWLockRelease(&MyProc->fpInfoLock);
2905 0 : ereport(ERROR,
2906 : (errcode(ERRCODE_OUT_OF_MEMORY),
2907 : errmsg("out of shared memory"),
2908 : errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
2909 : }
2910 740 : GrantLock(proclock->tag.myLock, proclock, lockmode);
2911 740 : FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
2912 :
2913 740 : LWLockRelease(partitionLock);
2914 :
2915 : /* No need to examine remaining slots. */
2916 740 : break;
2917 : }
2918 :
2919 756 : LWLockRelease(&MyProc->fpInfoLock);
2920 :
2921 : /* Lock may have already been transferred by some other backend. */
2922 756 : if (proclock == NULL)
2923 : {
2924 : LOCK *lock;
2925 : PROCLOCKTAG proclocktag;
2926 : uint32 proclock_hashcode;
2927 :
2928 16 : LWLockAcquire(partitionLock, LW_SHARED);
2929 :
2930 16 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
2931 : locktag,
2932 : locallock->hashcode,
2933 : HASH_FIND,
2934 : NULL);
2935 16 : if (!lock)
2936 0 : elog(ERROR, "failed to re-find shared lock object");
2937 :
2938 16 : proclocktag.myLock = lock;
2939 16 : proclocktag.myProc = MyProc;
2940 :
2941 16 : proclock_hashcode = ProcLockHashCode(&proclocktag, locallock->hashcode);
2942 : proclock = (PROCLOCK *)
2943 16 : hash_search_with_hash_value(LockMethodProcLockHash,
2944 : &proclocktag,
2945 : proclock_hashcode,
2946 : HASH_FIND,
2947 : NULL);
2948 16 : if (!proclock)
2949 0 : elog(ERROR, "failed to re-find shared proclock object");
2950 16 : LWLockRelease(partitionLock);
2951 : }
2952 :
2953 756 : return proclock;
2954 : }
2955 :
2956 : /*
2957 : * GetLockConflicts
2958 : * Get an array of VirtualTransactionIds of xacts currently holding locks
2959 : * that would conflict with the specified lock/lockmode.
2960 : * xacts merely awaiting such a lock are NOT reported.
2961 : *
2962 : * The result array is palloc'd and is terminated with an invalid VXID.
2963 : * *countp, if not null, is updated to the number of items set.
2964 : *
2965 : * Of course, the result could be out of date by the time it's returned, so
2966 : * use of this function has to be thought about carefully. Similarly, a
2967 : * PGPROC with no "lxid" will be considered non-conflicting regardless of any
2968 : * lock it holds. Existing callers don't care about a locker after that
2969 : * locker's pg_xact updates complete. CommitTransaction() clears "lxid" after
2970 : * pg_xact updates and before releasing locks.
2971 : *
2972 : * Note we never include the current xact's vxid in the result array,
2973 : * since an xact never blocks itself.
2974 : */
2975 : VirtualTransactionId *
2976 2496 : GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
2977 : {
2978 : static VirtualTransactionId *vxids;
2979 2496 : LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
2980 : LockMethod lockMethodTable;
2981 : LOCK *lock;
2982 : LOCKMASK conflictMask;
2983 : dlist_iter proclock_iter;
2984 : PROCLOCK *proclock;
2985 : uint32 hashcode;
2986 : LWLock *partitionLock;
2987 2496 : int count = 0;
2988 2496 : int fast_count = 0;
2989 :
2990 2496 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2991 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2992 2496 : lockMethodTable = LockMethods[lockmethodid];
2993 2496 : if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
2994 0 : elog(ERROR, "unrecognized lock mode: %d", lockmode);
2995 :
2996 : /*
2997 : * Allocate memory to store results, and fill with InvalidVXID. We only
2998 : * need enough space for MaxBackends + max_prepared_xacts + a terminator.
2999 : * InHotStandby allocate once in TopMemoryContext.
3000 : */
3001 2496 : if (InHotStandby)
3002 : {
3003 8 : if (vxids == NULL)
3004 2 : vxids = (VirtualTransactionId *)
3005 2 : MemoryContextAlloc(TopMemoryContext,
3006 : sizeof(VirtualTransactionId) *
3007 2 : (MaxBackends + max_prepared_xacts + 1));
3008 : }
3009 : else
3010 2488 : vxids = (VirtualTransactionId *)
3011 2488 : palloc0(sizeof(VirtualTransactionId) *
3012 2488 : (MaxBackends + max_prepared_xacts + 1));
3013 :
3014 : /* Compute hash code and partition lock, and look up conflicting modes. */
3015 2496 : hashcode = LockTagHashCode(locktag);
3016 2496 : partitionLock = LockHashPartitionLock(hashcode);
3017 2496 : conflictMask = lockMethodTable->conflictTab[lockmode];
3018 :
3019 : /*
3020 : * Fast path locks might not have been entered in the primary lock table.
3021 : * If the lock we're dealing with could conflict with such a lock, we must
3022 : * examine each backend's fast-path array for conflicts.
3023 : */
3024 2496 : if (ConflictsWithRelationFastPath(locktag, lockmode))
3025 : {
3026 : int i;
3027 2496 : Oid relid = locktag->locktag_field2;
3028 : VirtualTransactionId vxid;
3029 :
3030 : /*
3031 : * Iterate over relevant PGPROCs. Anything held by a prepared
3032 : * transaction will have been transferred to the primary lock table,
3033 : * so we need not worry about those. This is all a bit fuzzy, because
3034 : * new locks could be taken after we've visited a particular
3035 : * partition, but the callers had better be prepared to deal with that
3036 : * anyway, since the locks could equally well be taken between the
3037 : * time we return the value and the time the caller does something
3038 : * with it.
3039 : */
3040 280844 : for (i = 0; i < ProcGlobal->allProcCount; i++)
3041 : {
3042 278348 : PGPROC *proc = &ProcGlobal->allProcs[i];
3043 : uint32 j,
3044 : group;
3045 :
3046 : /* A backend never blocks itself */
3047 278348 : if (proc == MyProc)
3048 2496 : continue;
3049 :
3050 275852 : LWLockAcquire(&proc->fpInfoLock, LW_SHARED);
3051 :
3052 : /*
3053 : * If the target backend isn't referencing the same database as
3054 : * the lock, then we needn't examine the individual relation IDs
3055 : * at all; none of them can be relevant.
3056 : *
3057 : * See FastPathTransferRelationLocks() for discussion of why we do
3058 : * this test after acquiring the lock.
3059 : */
3060 275852 : if (proc->databaseId != locktag->locktag_field1)
3061 : {
3062 110764 : LWLockRelease(&proc->fpInfoLock);
3063 110764 : continue;
3064 : }
3065 :
3066 : /* fast-path group the lock belongs to */
3067 165088 : group = FAST_PATH_REL_GROUP(relid);
3068 :
3069 2806086 : for (j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
3070 : {
3071 : uint32 lockmask;
3072 :
3073 : /* index into the whole per-backend array */
3074 2641384 : uint32 f = FAST_PATH_SLOT(group, j);
3075 :
3076 : /* Look for an allocated slot matching the given relid. */
3077 2641384 : if (relid != proc->fpRelId[f])
3078 2639680 : continue;
3079 1704 : lockmask = FAST_PATH_GET_BITS(proc, f);
3080 1704 : if (!lockmask)
3081 1318 : continue;
3082 386 : lockmask <<= FAST_PATH_LOCKNUMBER_OFFSET;
3083 :
3084 : /*
3085 : * There can only be one entry per relation, so if we found it
3086 : * and it doesn't conflict, we can skip the rest of the slots.
3087 : */
3088 386 : if ((lockmask & conflictMask) == 0)
3089 10 : break;
3090 :
3091 : /* Conflict! */
3092 376 : GET_VXID_FROM_PGPROC(vxid, *proc);
3093 :
3094 376 : if (VirtualTransactionIdIsValid(vxid))
3095 376 : vxids[count++] = vxid;
3096 : /* else, xact already committed or aborted */
3097 :
3098 : /* No need to examine remaining slots. */
3099 376 : break;
3100 : }
3101 :
3102 165088 : LWLockRelease(&proc->fpInfoLock);
3103 : }
3104 : }
3105 :
3106 : /* Remember how many fast-path conflicts we found. */
3107 2496 : fast_count = count;
3108 :
3109 : /*
3110 : * Look up the lock object matching the tag.
3111 : */
3112 2496 : LWLockAcquire(partitionLock, LW_SHARED);
3113 :
3114 2496 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
3115 : locktag,
3116 : hashcode,
3117 : HASH_FIND,
3118 : NULL);
3119 2496 : if (!lock)
3120 : {
3121 : /*
3122 : * If the lock object doesn't exist, there is nothing holding a lock
3123 : * on this lockable object.
3124 : */
3125 140 : LWLockRelease(partitionLock);
3126 140 : vxids[count].procNumber = INVALID_PROC_NUMBER;
3127 140 : vxids[count].localTransactionId = InvalidLocalTransactionId;
3128 140 : if (countp)
3129 0 : *countp = count;
3130 140 : return vxids;
3131 : }
3132 :
3133 : /*
3134 : * Examine each existing holder (or awaiter) of the lock.
3135 : */
3136 4746 : dlist_foreach(proclock_iter, &lock->procLocks)
3137 : {
3138 2390 : proclock = dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
3139 :
3140 2390 : if (conflictMask & proclock->holdMask)
3141 : {
3142 2382 : PGPROC *proc = proclock->tag.myProc;
3143 :
3144 : /* A backend never blocks itself */
3145 2382 : if (proc != MyProc)
3146 : {
3147 : VirtualTransactionId vxid;
3148 :
3149 34 : GET_VXID_FROM_PGPROC(vxid, *proc);
3150 :
3151 34 : if (VirtualTransactionIdIsValid(vxid))
3152 : {
3153 : int i;
3154 :
3155 : /* Avoid duplicate entries. */
3156 52 : for (i = 0; i < fast_count; ++i)
3157 18 : if (VirtualTransactionIdEquals(vxids[i], vxid))
3158 0 : break;
3159 34 : if (i >= fast_count)
3160 34 : vxids[count++] = vxid;
3161 : }
3162 : /* else, xact already committed or aborted */
3163 : }
3164 : }
3165 : }
3166 :
3167 2356 : LWLockRelease(partitionLock);
3168 :
3169 2356 : if (count > MaxBackends + max_prepared_xacts) /* should never happen */
3170 0 : elog(PANIC, "too many conflicting locks found");
3171 :
3172 2356 : vxids[count].procNumber = INVALID_PROC_NUMBER;
3173 2356 : vxids[count].localTransactionId = InvalidLocalTransactionId;
3174 2356 : if (countp)
3175 2350 : *countp = count;
3176 2356 : return vxids;
3177 : }
3178 :
3179 : /*
3180 : * Find a lock in the shared lock table and release it. It is the caller's
3181 : * responsibility to verify that this is a sane thing to do. (For example, it
3182 : * would be bad to release a lock here if there might still be a LOCALLOCK
3183 : * object with pointers to it.)
3184 : *
3185 : * We currently use this in two situations: first, to release locks held by
3186 : * prepared transactions on commit (see lock_twophase_postcommit); and second,
3187 : * to release locks taken via the fast-path, transferred to the main hash
3188 : * table, and then released (see LockReleaseAll).
3189 : */
3190 : static void
3191 4266 : LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc,
3192 : LOCKTAG *locktag, LOCKMODE lockmode,
3193 : bool decrement_strong_lock_count)
3194 : {
3195 : LOCK *lock;
3196 : PROCLOCK *proclock;
3197 : PROCLOCKTAG proclocktag;
3198 : uint32 hashcode;
3199 : uint32 proclock_hashcode;
3200 : LWLock *partitionLock;
3201 : bool wakeupNeeded;
3202 :
3203 4266 : hashcode = LockTagHashCode(locktag);
3204 4266 : partitionLock = LockHashPartitionLock(hashcode);
3205 :
3206 4266 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3207 :
3208 : /*
3209 : * Re-find the lock object (it had better be there).
3210 : */
3211 4266 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
3212 : locktag,
3213 : hashcode,
3214 : HASH_FIND,
3215 : NULL);
3216 4266 : if (!lock)
3217 0 : elog(PANIC, "failed to re-find shared lock object");
3218 :
3219 : /*
3220 : * Re-find the proclock object (ditto).
3221 : */
3222 4266 : proclocktag.myLock = lock;
3223 4266 : proclocktag.myProc = proc;
3224 :
3225 4266 : proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
3226 :
3227 4266 : proclock = (PROCLOCK *) hash_search_with_hash_value(LockMethodProcLockHash,
3228 : &proclocktag,
3229 : proclock_hashcode,
3230 : HASH_FIND,
3231 : NULL);
3232 4266 : if (!proclock)
3233 0 : elog(PANIC, "failed to re-find shared proclock object");
3234 :
3235 : /*
3236 : * Double-check that we are actually holding a lock of the type we want to
3237 : * release.
3238 : */
3239 4266 : if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
3240 : {
3241 : PROCLOCK_PRINT("lock_twophase_postcommit: WRONGTYPE", proclock);
3242 0 : LWLockRelease(partitionLock);
3243 0 : elog(WARNING, "you don't own a lock of type %s",
3244 : lockMethodTable->lockModeNames[lockmode]);
3245 0 : return;
3246 : }
3247 :
3248 : /*
3249 : * Do the releasing. CleanUpLock will waken any now-wakable waiters.
3250 : */
3251 4266 : wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
3252 :
3253 4266 : CleanUpLock(lock, proclock,
3254 : lockMethodTable, hashcode,
3255 : wakeupNeeded);
3256 :
3257 4266 : LWLockRelease(partitionLock);
3258 :
3259 : /*
3260 : * Decrement strong lock count. This logic is needed only for 2PC.
3261 : */
3262 4266 : if (decrement_strong_lock_count
3263 1748 : && ConflictsWithRelationFastPath(locktag, lockmode))
3264 : {
3265 142 : uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
3266 :
3267 142 : SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
3268 : Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
3269 142 : FastPathStrongRelationLocks->count[fasthashcode]--;
3270 142 : SpinLockRelease(&FastPathStrongRelationLocks->mutex);
3271 : }
3272 : }
3273 :
3274 : /*
3275 : * CheckForSessionAndXactLocks
3276 : * Check to see if transaction holds both session-level and xact-level
3277 : * locks on the same object; if so, throw an error.
3278 : *
3279 : * If we have both session- and transaction-level locks on the same object,
3280 : * PREPARE TRANSACTION must fail. This should never happen with regular
3281 : * locks, since we only take those at session level in some special operations
3282 : * like VACUUM. It's possible to hit this with advisory locks, though.
3283 : *
3284 : * It would be nice if we could keep the session hold and give away the
3285 : * transactional hold to the prepared xact. However, that would require two
3286 : * PROCLOCK objects, and we cannot be sure that another PROCLOCK will be
3287 : * available when it comes time for PostPrepare_Locks to do the deed.
3288 : * So for now, we error out while we can still do so safely.
3289 : *
3290 : * Since the LOCALLOCK table stores a separate entry for each lockmode,
3291 : * we can't implement this check by examining LOCALLOCK entries in isolation.
3292 : * We must build a transient hashtable that is indexed by locktag only.
3293 : */
3294 : static void
3295 738 : CheckForSessionAndXactLocks(void)
3296 : {
3297 : typedef struct
3298 : {
3299 : LOCKTAG lock; /* identifies the lockable object */
3300 : bool sessLock; /* is any lockmode held at session level? */
3301 : bool xactLock; /* is any lockmode held at xact level? */
3302 : } PerLockTagEntry;
3303 :
3304 : HASHCTL hash_ctl;
3305 : HTAB *lockhtab;
3306 : HASH_SEQ_STATUS status;
3307 : LOCALLOCK *locallock;
3308 :
3309 : /* Create a local hash table keyed by LOCKTAG only */
3310 738 : hash_ctl.keysize = sizeof(LOCKTAG);
3311 738 : hash_ctl.entrysize = sizeof(PerLockTagEntry);
3312 738 : hash_ctl.hcxt = CurrentMemoryContext;
3313 :
3314 738 : lockhtab = hash_create("CheckForSessionAndXactLocks table",
3315 : 256, /* arbitrary initial size */
3316 : &hash_ctl,
3317 : HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
3318 :
3319 : /* Scan local lock table to find entries for each LOCKTAG */
3320 738 : hash_seq_init(&status, LockMethodLocalHash);
3321 :
3322 2456 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3323 : {
3324 1722 : LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3325 : PerLockTagEntry *hentry;
3326 : bool found;
3327 : int i;
3328 :
3329 : /*
3330 : * Ignore VXID locks. We don't want those to be held by prepared
3331 : * transactions, since they aren't meaningful after a restart.
3332 : */
3333 1722 : if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3334 0 : continue;
3335 :
3336 : /* Ignore it if we don't actually hold the lock */
3337 1722 : if (locallock->nLocks <= 0)
3338 0 : continue;
3339 :
3340 : /* Otherwise, find or make an entry in lockhtab */
3341 1722 : hentry = (PerLockTagEntry *) hash_search(lockhtab,
3342 1722 : &locallock->tag.lock,
3343 : HASH_ENTER, &found);
3344 1722 : if (!found) /* initialize, if newly created */
3345 1624 : hentry->sessLock = hentry->xactLock = false;
3346 :
3347 : /* Scan to see if we hold lock at session or xact level or both */
3348 3444 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
3349 : {
3350 1722 : if (lockOwners[i].owner == NULL)
3351 18 : hentry->sessLock = true;
3352 : else
3353 1704 : hentry->xactLock = true;
3354 : }
3355 :
3356 : /*
3357 : * We can throw error immediately when we see both types of locks; no
3358 : * need to wait around to see if there are more violations.
3359 : */
3360 1722 : if (hentry->sessLock && hentry->xactLock)
3361 4 : ereport(ERROR,
3362 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3363 : errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3364 : }
3365 :
3366 : /* Success, so clean up */
3367 734 : hash_destroy(lockhtab);
3368 734 : }
3369 :
3370 : /*
3371 : * AtPrepare_Locks
3372 : * Do the preparatory work for a PREPARE: make 2PC state file records
3373 : * for all locks currently held.
3374 : *
3375 : * Session-level locks are ignored, as are VXID locks.
3376 : *
3377 : * For the most part, we don't need to touch shared memory for this ---
3378 : * all the necessary state information is in the locallock table.
3379 : * Fast-path locks are an exception, however: we move any such locks to
3380 : * the main table before allowing PREPARE TRANSACTION to succeed.
3381 : */
3382 : void
3383 738 : AtPrepare_Locks(void)
3384 : {
3385 : HASH_SEQ_STATUS status;
3386 : LOCALLOCK *locallock;
3387 :
3388 : /* First, verify there aren't locks of both xact and session level */
3389 738 : CheckForSessionAndXactLocks();
3390 :
3391 : /* Now do the per-locallock cleanup work */
3392 734 : hash_seq_init(&status, LockMethodLocalHash);
3393 :
3394 2444 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3395 : {
3396 : TwoPhaseLockRecord record;
3397 1710 : LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3398 : bool haveSessionLock;
3399 : bool haveXactLock;
3400 : int i;
3401 :
3402 : /*
3403 : * Ignore VXID locks. We don't want those to be held by prepared
3404 : * transactions, since they aren't meaningful after a restart.
3405 : */
3406 1710 : if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3407 14 : continue;
3408 :
3409 : /* Ignore it if we don't actually hold the lock */
3410 1710 : if (locallock->nLocks <= 0)
3411 0 : continue;
3412 :
3413 : /* Scan to see whether we hold it at session or transaction level */
3414 1710 : haveSessionLock = haveXactLock = false;
3415 3420 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
3416 : {
3417 1710 : if (lockOwners[i].owner == NULL)
3418 14 : haveSessionLock = true;
3419 : else
3420 1696 : haveXactLock = true;
3421 : }
3422 :
3423 : /* Ignore it if we have only session lock */
3424 1710 : if (!haveXactLock)
3425 14 : continue;
3426 :
3427 : /* This can't happen, because we already checked it */
3428 1696 : if (haveSessionLock)
3429 0 : ereport(ERROR,
3430 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3431 : errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3432 :
3433 : /*
3434 : * If the local lock was taken via the fast-path, we need to move it
3435 : * to the primary lock table, or just get a pointer to the existing
3436 : * primary lock table entry if by chance it's already been
3437 : * transferred.
3438 : */
3439 1696 : if (locallock->proclock == NULL)
3440 : {
3441 756 : locallock->proclock = FastPathGetRelationLockEntry(locallock);
3442 756 : locallock->lock = locallock->proclock->tag.myLock;
3443 : }
3444 :
3445 : /*
3446 : * Arrange to not release any strong lock count held by this lock
3447 : * entry. We must retain the count until the prepared transaction is
3448 : * committed or rolled back.
3449 : */
3450 1696 : locallock->holdsStrongLockCount = false;
3451 :
3452 : /*
3453 : * Create a 2PC record.
3454 : */
3455 1696 : memcpy(&(record.locktag), &(locallock->tag.lock), sizeof(LOCKTAG));
3456 1696 : record.lockmode = locallock->tag.mode;
3457 :
3458 1696 : RegisterTwoPhaseRecord(TWOPHASE_RM_LOCK_ID, 0,
3459 : &record, sizeof(TwoPhaseLockRecord));
3460 : }
3461 734 : }
3462 :
3463 : /*
3464 : * PostPrepare_Locks
3465 : * Clean up after successful PREPARE
3466 : *
3467 : * Here, we want to transfer ownership of our locks to a dummy PGPROC
3468 : * that's now associated with the prepared transaction, and we want to
3469 : * clean out the corresponding entries in the LOCALLOCK table.
3470 : *
3471 : * Note: by removing the LOCALLOCK entries, we are leaving dangling
3472 : * pointers in the transaction's resource owner. This is OK at the
3473 : * moment since resowner.c doesn't try to free locks retail at a toplevel
3474 : * transaction commit or abort. We could alternatively zero out nLocks
3475 : * and leave the LOCALLOCK entries to be garbage-collected by LockReleaseAll,
3476 : * but that probably costs more cycles.
3477 : */
3478 : void
3479 734 : PostPrepare_Locks(TransactionId xid)
3480 : {
3481 734 : PGPROC *newproc = TwoPhaseGetDummyProc(xid, false);
3482 : HASH_SEQ_STATUS status;
3483 : LOCALLOCK *locallock;
3484 : LOCK *lock;
3485 : PROCLOCK *proclock;
3486 : PROCLOCKTAG proclocktag;
3487 : int partition;
3488 :
3489 : /* Can't prepare a lock group follower. */
3490 : Assert(MyProc->lockGroupLeader == NULL ||
3491 : MyProc->lockGroupLeader == MyProc);
3492 :
3493 : /* This is a critical section: any error means big trouble */
3494 734 : START_CRIT_SECTION();
3495 :
3496 : /*
3497 : * First we run through the locallock table and get rid of unwanted
3498 : * entries, then we scan the process's proclocks and transfer them to the
3499 : * target proc.
3500 : *
3501 : * We do this separately because we may have multiple locallock entries
3502 : * pointing to the same proclock, and we daren't end up with any dangling
3503 : * pointers.
3504 : */
3505 734 : hash_seq_init(&status, LockMethodLocalHash);
3506 :
3507 2444 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3508 : {
3509 1710 : LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3510 : bool haveSessionLock;
3511 : bool haveXactLock;
3512 : int i;
3513 :
3514 1710 : if (locallock->proclock == NULL || locallock->lock == NULL)
3515 : {
3516 : /*
3517 : * We must've run out of shared memory while trying to set up this
3518 : * lock. Just forget the local entry.
3519 : */
3520 : Assert(locallock->nLocks == 0);
3521 0 : RemoveLocalLock(locallock);
3522 0 : continue;
3523 : }
3524 :
3525 : /* Ignore VXID locks */
3526 1710 : if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3527 0 : continue;
3528 :
3529 : /* Scan to see whether we hold it at session or transaction level */
3530 1710 : haveSessionLock = haveXactLock = false;
3531 3420 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
3532 : {
3533 1710 : if (lockOwners[i].owner == NULL)
3534 14 : haveSessionLock = true;
3535 : else
3536 1696 : haveXactLock = true;
3537 : }
3538 :
3539 : /* Ignore it if we have only session lock */
3540 1710 : if (!haveXactLock)
3541 14 : continue;
3542 :
3543 : /* This can't happen, because we already checked it */
3544 1696 : if (haveSessionLock)
3545 0 : ereport(PANIC,
3546 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3547 : errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3548 :
3549 : /* Mark the proclock to show we need to release this lockmode */
3550 1696 : if (locallock->nLocks > 0)
3551 1696 : locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
3552 :
3553 : /* And remove the locallock hashtable entry */
3554 1696 : RemoveLocalLock(locallock);
3555 : }
3556 :
3557 : /*
3558 : * Now, scan each lock partition separately.
3559 : */
3560 12478 : for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
3561 : {
3562 : LWLock *partitionLock;
3563 11744 : dlist_head *procLocks = &(MyProc->myProcLocks[partition]);
3564 : dlist_mutable_iter proclock_iter;
3565 :
3566 11744 : partitionLock = LockHashPartitionLockByIndex(partition);
3567 :
3568 : /*
3569 : * If the proclock list for this partition is empty, we can skip
3570 : * acquiring the partition lock. This optimization is safer than the
3571 : * situation in LockReleaseAll, because we got rid of any fast-path
3572 : * locks during AtPrepare_Locks, so there cannot be any case where
3573 : * another backend is adding something to our lists now. For safety,
3574 : * though, we code this the same way as in LockReleaseAll.
3575 : */
3576 11744 : if (dlist_is_empty(procLocks))
3577 10124 : continue; /* needn't examine this partition */
3578 :
3579 1620 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3580 :
3581 3300 : dlist_foreach_modify(proclock_iter, procLocks)
3582 : {
3583 1680 : proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
3584 :
3585 : Assert(proclock->tag.myProc == MyProc);
3586 :
3587 1680 : lock = proclock->tag.myLock;
3588 :
3589 : /* Ignore VXID locks */
3590 1680 : if (lock->tag.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3591 64 : continue;
3592 :
3593 : PROCLOCK_PRINT("PostPrepare_Locks", proclock);
3594 : LOCK_PRINT("PostPrepare_Locks", lock, 0);
3595 : Assert(lock->nRequested >= 0);
3596 : Assert(lock->nGranted >= 0);
3597 : Assert(lock->nGranted <= lock->nRequested);
3598 : Assert((proclock->holdMask & ~lock->grantMask) == 0);
3599 :
3600 : /* Ignore it if nothing to release (must be a session lock) */
3601 1616 : if (proclock->releaseMask == 0)
3602 14 : continue;
3603 :
3604 : /* Else we should be releasing all locks */
3605 1602 : if (proclock->releaseMask != proclock->holdMask)
3606 0 : elog(PANIC, "we seem to have dropped a bit somewhere");
3607 :
3608 : /*
3609 : * We cannot simply modify proclock->tag.myProc to reassign
3610 : * ownership of the lock, because that's part of the hash key and
3611 : * the proclock would then be in the wrong hash chain. Instead
3612 : * use hash_update_hash_key. (We used to create a new hash entry,
3613 : * but that risks out-of-memory failure if other processes are
3614 : * busy making proclocks too.) We must unlink the proclock from
3615 : * our procLink chain and put it into the new proc's chain, too.
3616 : *
3617 : * Note: the updated proclock hash key will still belong to the
3618 : * same hash partition, cf proclock_hash(). So the partition lock
3619 : * we already hold is sufficient for this.
3620 : */
3621 1602 : dlist_delete(&proclock->procLink);
3622 :
3623 : /*
3624 : * Create the new hash key for the proclock.
3625 : */
3626 1602 : proclocktag.myLock = lock;
3627 1602 : proclocktag.myProc = newproc;
3628 :
3629 : /*
3630 : * Update groupLeader pointer to point to the new proc. (We'd
3631 : * better not be a member of somebody else's lock group!)
3632 : */
3633 : Assert(proclock->groupLeader == proclock->tag.myProc);
3634 1602 : proclock->groupLeader = newproc;
3635 :
3636 : /*
3637 : * Update the proclock. We should not find any existing entry for
3638 : * the same hash key, since there can be only one entry for any
3639 : * given lock with my own proc.
3640 : */
3641 1602 : if (!hash_update_hash_key(LockMethodProcLockHash,
3642 : proclock,
3643 : &proclocktag))
3644 0 : elog(PANIC, "duplicate entry found while reassigning a prepared transaction's locks");
3645 :
3646 : /* Re-link into the new proc's proclock list */
3647 1602 : dlist_push_tail(&newproc->myProcLocks[partition], &proclock->procLink);
3648 :
3649 : PROCLOCK_PRINT("PostPrepare_Locks: updated", proclock);
3650 : } /* loop over PROCLOCKs within this partition */
3651 :
3652 1620 : LWLockRelease(partitionLock);
3653 : } /* loop over partitions */
3654 :
3655 734 : END_CRIT_SECTION();
3656 734 : }
3657 :
3658 :
3659 : /*
3660 : * Estimate shared-memory space used for lock tables
3661 : */
3662 : Size
3663 3534 : LockManagerShmemSize(void)
3664 : {
3665 3534 : Size size = 0;
3666 : long max_table_size;
3667 :
3668 : /* lock hash table */
3669 3534 : max_table_size = NLOCKENTS();
3670 3534 : size = add_size(size, hash_estimate_size(max_table_size, sizeof(LOCK)));
3671 :
3672 : /* proclock hash table */
3673 3534 : max_table_size *= 2;
3674 3534 : size = add_size(size, hash_estimate_size(max_table_size, sizeof(PROCLOCK)));
3675 :
3676 : /*
3677 : * Since NLOCKENTS is only an estimate, add 10% safety margin.
3678 : */
3679 3534 : size = add_size(size, size / 10);
3680 :
3681 3534 : return size;
3682 : }
3683 :
3684 : /*
3685 : * GetLockStatusData - Return a summary of the lock manager's internal
3686 : * status, for use in a user-level reporting function.
3687 : *
3688 : * The return data consists of an array of LockInstanceData objects,
3689 : * which are a lightly abstracted version of the PROCLOCK data structures,
3690 : * i.e. there is one entry for each unique lock and interested PGPROC.
3691 : * It is the caller's responsibility to match up related items (such as
3692 : * references to the same lockable object or PGPROC) if wanted.
3693 : *
3694 : * The design goal is to hold the LWLocks for as short a time as possible;
3695 : * thus, this function simply makes a copy of the necessary data and releases
3696 : * the locks, allowing the caller to contemplate and format the data for as
3697 : * long as it pleases.
3698 : */
3699 : LockData *
3700 614 : GetLockStatusData(void)
3701 : {
3702 : LockData *data;
3703 : PROCLOCK *proclock;
3704 : HASH_SEQ_STATUS seqstat;
3705 : int els;
3706 : int el;
3707 : int i;
3708 :
3709 614 : data = (LockData *) palloc(sizeof(LockData));
3710 :
3711 : /* Guess how much space we'll need. */
3712 614 : els = MaxBackends;
3713 614 : el = 0;
3714 614 : data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * els);
3715 :
3716 : /*
3717 : * First, we iterate through the per-backend fast-path arrays, locking
3718 : * them one at a time. This might produce an inconsistent picture of the
3719 : * system state, but taking all of those LWLocks at the same time seems
3720 : * impractical (in particular, note MAX_SIMUL_LWLOCKS). It shouldn't
3721 : * matter too much, because none of these locks can be involved in lock
3722 : * conflicts anyway - anything that might must be present in the main lock
3723 : * table. (For the same reason, we don't sweat about making leaderPid
3724 : * completely valid. We cannot safely dereference another backend's
3725 : * lockGroupLeader field without holding all lock partition locks, and
3726 : * it's not worth that.)
3727 : */
3728 66066 : for (i = 0; i < ProcGlobal->allProcCount; ++i)
3729 : {
3730 65452 : PGPROC *proc = &ProcGlobal->allProcs[i];
3731 :
3732 : /* Skip backends with pid=0, as they don't hold fast-path locks */
3733 65452 : if (proc->pid == 0)
3734 59810 : continue;
3735 :
3736 5642 : LWLockAcquire(&proc->fpInfoLock, LW_SHARED);
3737 :
3738 28210 : for (uint32 g = 0; g < FastPathLockGroupsPerBackend; g++)
3739 : {
3740 : /* Skip groups without registered fast-path locks */
3741 22568 : if (proc->fpLockBits[g] == 0)
3742 18632 : continue;
3743 :
3744 66912 : for (int j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
3745 : {
3746 : LockInstanceData *instance;
3747 62976 : uint32 f = FAST_PATH_SLOT(g, j);
3748 62976 : uint32 lockbits = FAST_PATH_GET_BITS(proc, f);
3749 :
3750 : /* Skip unallocated slots */
3751 62976 : if (!lockbits)
3752 56554 : continue;
3753 :
3754 6422 : if (el >= els)
3755 : {
3756 12 : els += MaxBackends;
3757 12 : data->locks = (LockInstanceData *)
3758 12 : repalloc(data->locks, sizeof(LockInstanceData) * els);
3759 : }
3760 :
3761 6422 : instance = &data->locks[el];
3762 6422 : SET_LOCKTAG_RELATION(instance->locktag, proc->databaseId,
3763 : proc->fpRelId[f]);
3764 6422 : instance->holdMask = lockbits << FAST_PATH_LOCKNUMBER_OFFSET;
3765 6422 : instance->waitLockMode = NoLock;
3766 6422 : instance->vxid.procNumber = proc->vxid.procNumber;
3767 6422 : instance->vxid.localTransactionId = proc->vxid.lxid;
3768 6422 : instance->pid = proc->pid;
3769 6422 : instance->leaderPid = proc->pid;
3770 6422 : instance->fastpath = true;
3771 :
3772 : /*
3773 : * Successfully taking fast path lock means there were no
3774 : * conflicting locks.
3775 : */
3776 6422 : instance->waitStart = 0;
3777 :
3778 6422 : el++;
3779 : }
3780 : }
3781 :
3782 5642 : if (proc->fpVXIDLock)
3783 : {
3784 : VirtualTransactionId vxid;
3785 : LockInstanceData *instance;
3786 :
3787 1992 : if (el >= els)
3788 : {
3789 4 : els += MaxBackends;
3790 4 : data->locks = (LockInstanceData *)
3791 4 : repalloc(data->locks, sizeof(LockInstanceData) * els);
3792 : }
3793 :
3794 1992 : vxid.procNumber = proc->vxid.procNumber;
3795 1992 : vxid.localTransactionId = proc->fpLocalTransactionId;
3796 :
3797 1992 : instance = &data->locks[el];
3798 1992 : SET_LOCKTAG_VIRTUALTRANSACTION(instance->locktag, vxid);
3799 1992 : instance->holdMask = LOCKBIT_ON(ExclusiveLock);
3800 1992 : instance->waitLockMode = NoLock;
3801 1992 : instance->vxid.procNumber = proc->vxid.procNumber;
3802 1992 : instance->vxid.localTransactionId = proc->vxid.lxid;
3803 1992 : instance->pid = proc->pid;
3804 1992 : instance->leaderPid = proc->pid;
3805 1992 : instance->fastpath = true;
3806 1992 : instance->waitStart = 0;
3807 :
3808 1992 : el++;
3809 : }
3810 :
3811 5642 : LWLockRelease(&proc->fpInfoLock);
3812 : }
3813 :
3814 : /*
3815 : * Next, acquire lock on the entire shared lock data structure. We do
3816 : * this so that, at least for locks in the primary lock table, the state
3817 : * will be self-consistent.
3818 : *
3819 : * Since this is a read-only operation, we take shared instead of
3820 : * exclusive lock. There's not a whole lot of point to this, because all
3821 : * the normal operations require exclusive lock, but it doesn't hurt
3822 : * anything either. It will at least allow two backends to do
3823 : * GetLockStatusData in parallel.
3824 : *
3825 : * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
3826 : */
3827 10438 : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3828 9824 : LWLockAcquire(LockHashPartitionLockByIndex(i), LW_SHARED);
3829 :
3830 : /* Now we can safely count the number of proclocks */
3831 614 : data->nelements = el + hash_get_num_entries(LockMethodProcLockHash);
3832 614 : if (data->nelements > els)
3833 : {
3834 34 : els = data->nelements;
3835 34 : data->locks = (LockInstanceData *)
3836 34 : repalloc(data->locks, sizeof(LockInstanceData) * els);
3837 : }
3838 :
3839 : /* Now scan the tables to copy the data */
3840 614 : hash_seq_init(&seqstat, LockMethodProcLockHash);
3841 :
3842 5302 : while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
3843 : {
3844 4688 : PGPROC *proc = proclock->tag.myProc;
3845 4688 : LOCK *lock = proclock->tag.myLock;
3846 4688 : LockInstanceData *instance = &data->locks[el];
3847 :
3848 4688 : memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
3849 4688 : instance->holdMask = proclock->holdMask;
3850 4688 : if (proc->waitLock == proclock->tag.myLock)
3851 18 : instance->waitLockMode = proc->waitLockMode;
3852 : else
3853 4670 : instance->waitLockMode = NoLock;
3854 4688 : instance->vxid.procNumber = proc->vxid.procNumber;
3855 4688 : instance->vxid.localTransactionId = proc->vxid.lxid;
3856 4688 : instance->pid = proc->pid;
3857 4688 : instance->leaderPid = proclock->groupLeader->pid;
3858 4688 : instance->fastpath = false;
3859 4688 : instance->waitStart = (TimestampTz) pg_atomic_read_u64(&proc->waitStart);
3860 :
3861 4688 : el++;
3862 : }
3863 :
3864 : /*
3865 : * And release locks. We do this in reverse order for two reasons: (1)
3866 : * Anyone else who needs more than one of the locks will be trying to lock
3867 : * them in increasing order; we don't want to release the other process
3868 : * until it can get all the locks it needs. (2) This avoids O(N^2)
3869 : * behavior inside LWLockRelease.
3870 : */
3871 10438 : for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3872 9824 : LWLockRelease(LockHashPartitionLockByIndex(i));
3873 :
3874 : Assert(el == data->nelements);
3875 :
3876 614 : return data;
3877 : }
3878 :
3879 : /*
3880 : * GetBlockerStatusData - Return a summary of the lock manager's state
3881 : * concerning locks that are blocking the specified PID or any member of
3882 : * the PID's lock group, for use in a user-level reporting function.
3883 : *
3884 : * For each PID within the lock group that is awaiting some heavyweight lock,
3885 : * the return data includes an array of LockInstanceData objects, which are
3886 : * the same data structure used by GetLockStatusData; but unlike that function,
3887 : * this one reports only the PROCLOCKs associated with the lock that that PID
3888 : * is blocked on. (Hence, all the locktags should be the same for any one
3889 : * blocked PID.) In addition, we return an array of the PIDs of those backends
3890 : * that are ahead of the blocked PID in the lock's wait queue. These can be
3891 : * compared with the PIDs in the LockInstanceData objects to determine which
3892 : * waiters are ahead of or behind the blocked PID in the queue.
3893 : *
3894 : * If blocked_pid isn't a valid backend PID or nothing in its lock group is
3895 : * waiting on any heavyweight lock, return empty arrays.
3896 : *
3897 : * The design goal is to hold the LWLocks for as short a time as possible;
3898 : * thus, this function simply makes a copy of the necessary data and releases
3899 : * the locks, allowing the caller to contemplate and format the data for as
3900 : * long as it pleases.
3901 : */
3902 : BlockedProcsData *
3903 3090 : GetBlockerStatusData(int blocked_pid)
3904 : {
3905 : BlockedProcsData *data;
3906 : PGPROC *proc;
3907 : int i;
3908 :
3909 3090 : data = (BlockedProcsData *) palloc(sizeof(BlockedProcsData));
3910 :
3911 : /*
3912 : * Guess how much space we'll need, and preallocate. Most of the time
3913 : * this will avoid needing to do repalloc while holding the LWLocks. (We
3914 : * assume, but check with an Assert, that MaxBackends is enough entries
3915 : * for the procs[] array; the other two could need enlargement, though.)
3916 : */
3917 3090 : data->nprocs = data->nlocks = data->npids = 0;
3918 3090 : data->maxprocs = data->maxlocks = data->maxpids = MaxBackends;
3919 3090 : data->procs = (BlockedProcData *) palloc(sizeof(BlockedProcData) * data->maxprocs);
3920 3090 : data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * data->maxlocks);
3921 3090 : data->waiter_pids = (int *) palloc(sizeof(int) * data->maxpids);
3922 :
3923 : /*
3924 : * In order to search the ProcArray for blocked_pid and assume that that
3925 : * entry won't immediately disappear under us, we must hold ProcArrayLock.
3926 : * In addition, to examine the lock grouping fields of any other backend,
3927 : * we must hold all the hash partition locks. (Only one of those locks is
3928 : * actually relevant for any one lock group, but we can't know which one
3929 : * ahead of time.) It's fairly annoying to hold all those locks
3930 : * throughout this, but it's no worse than GetLockStatusData(), and it
3931 : * does have the advantage that we're guaranteed to return a
3932 : * self-consistent instantaneous state.
3933 : */
3934 3090 : LWLockAcquire(ProcArrayLock, LW_SHARED);
3935 :
3936 3090 : proc = BackendPidGetProcWithLock(blocked_pid);
3937 :
3938 : /* Nothing to do if it's gone */
3939 3090 : if (proc != NULL)
3940 : {
3941 : /*
3942 : * Acquire lock on the entire shared lock data structure. See notes
3943 : * in GetLockStatusData().
3944 : */
3945 52530 : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3946 49440 : LWLockAcquire(LockHashPartitionLockByIndex(i), LW_SHARED);
3947 :
3948 3090 : if (proc->lockGroupLeader == NULL)
3949 : {
3950 : /* Easy case, proc is not a lock group member */
3951 2774 : GetSingleProcBlockerStatusData(proc, data);
3952 : }
3953 : else
3954 : {
3955 : /* Examine all procs in proc's lock group */
3956 : dlist_iter iter;
3957 :
3958 730 : dlist_foreach(iter, &proc->lockGroupLeader->lockGroupMembers)
3959 : {
3960 : PGPROC *memberProc;
3961 :
3962 414 : memberProc = dlist_container(PGPROC, lockGroupLink, iter.cur);
3963 414 : GetSingleProcBlockerStatusData(memberProc, data);
3964 : }
3965 : }
3966 :
3967 : /*
3968 : * And release locks. See notes in GetLockStatusData().
3969 : */
3970 52530 : for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3971 49440 : LWLockRelease(LockHashPartitionLockByIndex(i));
3972 :
3973 : Assert(data->nprocs <= data->maxprocs);
3974 : }
3975 :
3976 3090 : LWLockRelease(ProcArrayLock);
3977 :
3978 3090 : return data;
3979 : }
3980 :
3981 : /* Accumulate data about one possibly-blocked proc for GetBlockerStatusData */
3982 : static void
3983 3188 : GetSingleProcBlockerStatusData(PGPROC *blocked_proc, BlockedProcsData *data)
3984 : {
3985 3188 : LOCK *theLock = blocked_proc->waitLock;
3986 : BlockedProcData *bproc;
3987 : dlist_iter proclock_iter;
3988 : dlist_iter proc_iter;
3989 : dclist_head *waitQueue;
3990 : int queue_size;
3991 :
3992 : /* Nothing to do if this proc is not blocked */
3993 3188 : if (theLock == NULL)
3994 932 : return;
3995 :
3996 : /* Set up a procs[] element */
3997 2256 : bproc = &data->procs[data->nprocs++];
3998 2256 : bproc->pid = blocked_proc->pid;
3999 2256 : bproc->first_lock = data->nlocks;
4000 2256 : bproc->first_waiter = data->npids;
4001 :
4002 : /*
4003 : * We may ignore the proc's fast-path arrays, since nothing in those could
4004 : * be related to a contended lock.
4005 : */
4006 :
4007 : /* Collect all PROCLOCKs associated with theLock */
4008 6864 : dlist_foreach(proclock_iter, &theLock->procLocks)
4009 : {
4010 4608 : PROCLOCK *proclock =
4011 4608 : dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
4012 4608 : PGPROC *proc = proclock->tag.myProc;
4013 4608 : LOCK *lock = proclock->tag.myLock;
4014 : LockInstanceData *instance;
4015 :
4016 4608 : if (data->nlocks >= data->maxlocks)
4017 : {
4018 0 : data->maxlocks += MaxBackends;
4019 0 : data->locks = (LockInstanceData *)
4020 0 : repalloc(data->locks, sizeof(LockInstanceData) * data->maxlocks);
4021 : }
4022 :
4023 4608 : instance = &data->locks[data->nlocks];
4024 4608 : memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
4025 4608 : instance->holdMask = proclock->holdMask;
4026 4608 : if (proc->waitLock == lock)
4027 2336 : instance->waitLockMode = proc->waitLockMode;
4028 : else
4029 2272 : instance->waitLockMode = NoLock;
4030 4608 : instance->vxid.procNumber = proc->vxid.procNumber;
4031 4608 : instance->vxid.localTransactionId = proc->vxid.lxid;
4032 4608 : instance->pid = proc->pid;
4033 4608 : instance->leaderPid = proclock->groupLeader->pid;
4034 4608 : instance->fastpath = false;
4035 4608 : data->nlocks++;
4036 : }
4037 :
4038 : /* Enlarge waiter_pids[] if it's too small to hold all wait queue PIDs */
4039 2256 : waitQueue = &(theLock->waitProcs);
4040 2256 : queue_size = dclist_count(waitQueue);
4041 :
4042 2256 : if (queue_size > data->maxpids - data->npids)
4043 : {
4044 0 : data->maxpids = Max(data->maxpids + MaxBackends,
4045 : data->npids + queue_size);
4046 0 : data->waiter_pids = (int *) repalloc(data->waiter_pids,
4047 0 : sizeof(int) * data->maxpids);
4048 : }
4049 :
4050 : /* Collect PIDs from the lock's wait queue, stopping at blocked_proc */
4051 2296 : dclist_foreach(proc_iter, waitQueue)
4052 : {
4053 2296 : PGPROC *queued_proc = dlist_container(PGPROC, links, proc_iter.cur);
4054 :
4055 2296 : if (queued_proc == blocked_proc)
4056 2256 : break;
4057 40 : data->waiter_pids[data->npids++] = queued_proc->pid;
4058 40 : queued_proc = (PGPROC *) queued_proc->links.next;
4059 : }
4060 :
4061 2256 : bproc->num_locks = data->nlocks - bproc->first_lock;
4062 2256 : bproc->num_waiters = data->npids - bproc->first_waiter;
4063 : }
4064 :
4065 : /*
4066 : * Returns a list of currently held AccessExclusiveLocks, for use by
4067 : * LogStandbySnapshot(). The result is a palloc'd array,
4068 : * with the number of elements returned into *nlocks.
4069 : *
4070 : * XXX This currently takes a lock on all partitions of the lock table,
4071 : * but it's possible to do better. By reference counting locks and storing
4072 : * the value in the ProcArray entry for each backend we could tell if any
4073 : * locks need recording without having to acquire the partition locks and
4074 : * scan the lock table. Whether that's worth the additional overhead
4075 : * is pretty dubious though.
4076 : */
4077 : xl_standby_lock *
4078 1896 : GetRunningTransactionLocks(int *nlocks)
4079 : {
4080 : xl_standby_lock *accessExclusiveLocks;
4081 : PROCLOCK *proclock;
4082 : HASH_SEQ_STATUS seqstat;
4083 : int i;
4084 : int index;
4085 : int els;
4086 :
4087 : /*
4088 : * Acquire lock on the entire shared lock data structure.
4089 : *
4090 : * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
4091 : */
4092 32232 : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4093 30336 : LWLockAcquire(LockHashPartitionLockByIndex(i), LW_SHARED);
4094 :
4095 : /* Now we can safely count the number of proclocks */
4096 1896 : els = hash_get_num_entries(LockMethodProcLockHash);
4097 :
4098 : /*
4099 : * Allocating enough space for all locks in the lock table is overkill,
4100 : * but it's more convenient and faster than having to enlarge the array.
4101 : */
4102 1896 : accessExclusiveLocks = palloc(els * sizeof(xl_standby_lock));
4103 :
4104 : /* Now scan the tables to copy the data */
4105 1896 : hash_seq_init(&seqstat, LockMethodProcLockHash);
4106 :
4107 : /*
4108 : * If lock is a currently granted AccessExclusiveLock then it will have
4109 : * just one proclock holder, so locks are never accessed twice in this
4110 : * particular case. Don't copy this code for use elsewhere because in the
4111 : * general case this will give you duplicate locks when looking at
4112 : * non-exclusive lock types.
4113 : */
4114 1896 : index = 0;
4115 8682 : while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
4116 : {
4117 : /* make sure this definition matches the one used in LockAcquire */
4118 6786 : if ((proclock->holdMask & LOCKBIT_ON(AccessExclusiveLock)) &&
4119 3100 : proclock->tag.myLock->tag.locktag_type == LOCKTAG_RELATION)
4120 : {
4121 2128 : PGPROC *proc = proclock->tag.myProc;
4122 2128 : LOCK *lock = proclock->tag.myLock;
4123 2128 : TransactionId xid = proc->xid;
4124 :
4125 : /*
4126 : * Don't record locks for transactions if we know they have
4127 : * already issued their WAL record for commit but not yet released
4128 : * lock. It is still possible that we see locks held by already
4129 : * complete transactions, if they haven't yet zeroed their xids.
4130 : */
4131 2128 : if (!TransactionIdIsValid(xid))
4132 2 : continue;
4133 :
4134 2126 : accessExclusiveLocks[index].xid = xid;
4135 2126 : accessExclusiveLocks[index].dbOid = lock->tag.locktag_field1;
4136 2126 : accessExclusiveLocks[index].relOid = lock->tag.locktag_field2;
4137 :
4138 2126 : index++;
4139 : }
4140 : }
4141 :
4142 : Assert(index <= els);
4143 :
4144 : /*
4145 : * And release locks. We do this in reverse order for two reasons: (1)
4146 : * Anyone else who needs more than one of the locks will be trying to lock
4147 : * them in increasing order; we don't want to release the other process
4148 : * until it can get all the locks it needs. (2) This avoids O(N^2)
4149 : * behavior inside LWLockRelease.
4150 : */
4151 32232 : for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
4152 30336 : LWLockRelease(LockHashPartitionLockByIndex(i));
4153 :
4154 1896 : *nlocks = index;
4155 1896 : return accessExclusiveLocks;
4156 : }
4157 :
4158 : /* Provide the textual name of any lock mode */
4159 : const char *
4160 13990 : GetLockmodeName(LOCKMETHODID lockmethodid, LOCKMODE mode)
4161 : {
4162 : Assert(lockmethodid > 0 && lockmethodid < lengthof(LockMethods));
4163 : Assert(mode > 0 && mode <= LockMethods[lockmethodid]->numLockModes);
4164 13990 : return LockMethods[lockmethodid]->lockModeNames[mode];
4165 : }
4166 :
4167 : #ifdef LOCK_DEBUG
4168 : /*
4169 : * Dump all locks in the given proc's myProcLocks lists.
4170 : *
4171 : * Caller is responsible for having acquired appropriate LWLocks.
4172 : */
4173 : void
4174 : DumpLocks(PGPROC *proc)
4175 : {
4176 : int i;
4177 :
4178 : if (proc == NULL)
4179 : return;
4180 :
4181 : if (proc->waitLock)
4182 : LOCK_PRINT("DumpLocks: waiting on", proc->waitLock, 0);
4183 :
4184 : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4185 : {
4186 : dlist_head *procLocks = &proc->myProcLocks[i];
4187 : dlist_iter iter;
4188 :
4189 : dlist_foreach(iter, procLocks)
4190 : {
4191 : PROCLOCK *proclock = dlist_container(PROCLOCK, procLink, iter.cur);
4192 : LOCK *lock = proclock->tag.myLock;
4193 :
4194 : Assert(proclock->tag.myProc == proc);
4195 : PROCLOCK_PRINT("DumpLocks", proclock);
4196 : LOCK_PRINT("DumpLocks", lock, 0);
4197 : }
4198 : }
4199 : }
4200 :
4201 : /*
4202 : * Dump all lmgr locks.
4203 : *
4204 : * Caller is responsible for having acquired appropriate LWLocks.
4205 : */
4206 : void
4207 : DumpAllLocks(void)
4208 : {
4209 : PGPROC *proc;
4210 : PROCLOCK *proclock;
4211 : LOCK *lock;
4212 : HASH_SEQ_STATUS status;
4213 :
4214 : proc = MyProc;
4215 :
4216 : if (proc && proc->waitLock)
4217 : LOCK_PRINT("DumpAllLocks: waiting on", proc->waitLock, 0);
4218 :
4219 : hash_seq_init(&status, LockMethodProcLockHash);
4220 :
4221 : while ((proclock = (PROCLOCK *) hash_seq_search(&status)) != NULL)
4222 : {
4223 : PROCLOCK_PRINT("DumpAllLocks", proclock);
4224 :
4225 : lock = proclock->tag.myLock;
4226 : if (lock)
4227 : LOCK_PRINT("DumpAllLocks", lock, 0);
4228 : else
4229 : elog(LOG, "DumpAllLocks: proclock->tag.myLock = NULL");
4230 : }
4231 : }
4232 : #endif /* LOCK_DEBUG */
4233 :
4234 : /*
4235 : * LOCK 2PC resource manager's routines
4236 : */
4237 :
4238 : /*
4239 : * Re-acquire a lock belonging to a transaction that was prepared.
4240 : *
4241 : * Because this function is run at db startup, re-acquiring the locks should
4242 : * never conflict with running transactions because there are none. We
4243 : * assume that the lock state represented by the stored 2PC files is legal.
4244 : *
4245 : * When switching from Hot Standby mode to normal operation, the locks will
4246 : * be already held by the startup process. The locks are acquired for the new
4247 : * procs without checking for conflicts, so we don't get a conflict between the
4248 : * startup process and the dummy procs, even though we will momentarily have
4249 : * a situation where two procs are holding the same AccessExclusiveLock,
4250 : * which isn't normally possible because the conflict. If we're in standby
4251 : * mode, but a recovery snapshot hasn't been established yet, it's possible
4252 : * that some but not all of the locks are already held by the startup process.
4253 : *
4254 : * This approach is simple, but also a bit dangerous, because if there isn't
4255 : * enough shared memory to acquire the locks, an error will be thrown, which
4256 : * is promoted to FATAL and recovery will abort, bringing down postmaster.
4257 : * A safer approach would be to transfer the locks like we do in
4258 : * AtPrepare_Locks, but then again, in hot standby mode it's possible for
4259 : * read-only backends to use up all the shared lock memory anyway, so that
4260 : * replaying the WAL record that needs to acquire a lock will throw an error
4261 : * and PANIC anyway.
4262 : */
4263 : void
4264 176 : lock_twophase_recover(TransactionId xid, uint16 info,
4265 : void *recdata, uint32 len)
4266 : {
4267 176 : TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4268 176 : PGPROC *proc = TwoPhaseGetDummyProc(xid, false);
4269 : LOCKTAG *locktag;
4270 : LOCKMODE lockmode;
4271 : LOCKMETHODID lockmethodid;
4272 : LOCK *lock;
4273 : PROCLOCK *proclock;
4274 : PROCLOCKTAG proclocktag;
4275 : bool found;
4276 : uint32 hashcode;
4277 : uint32 proclock_hashcode;
4278 : int partition;
4279 : LWLock *partitionLock;
4280 : LockMethod lockMethodTable;
4281 :
4282 : Assert(len == sizeof(TwoPhaseLockRecord));
4283 176 : locktag = &rec->locktag;
4284 176 : lockmode = rec->lockmode;
4285 176 : lockmethodid = locktag->locktag_lockmethodid;
4286 :
4287 176 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4288 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4289 176 : lockMethodTable = LockMethods[lockmethodid];
4290 :
4291 176 : hashcode = LockTagHashCode(locktag);
4292 176 : partition = LockHashPartition(hashcode);
4293 176 : partitionLock = LockHashPartitionLock(hashcode);
4294 :
4295 176 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4296 :
4297 : /*
4298 : * Find or create a lock with this tag.
4299 : */
4300 176 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
4301 : locktag,
4302 : hashcode,
4303 : HASH_ENTER_NULL,
4304 : &found);
4305 176 : if (!lock)
4306 : {
4307 0 : LWLockRelease(partitionLock);
4308 0 : ereport(ERROR,
4309 : (errcode(ERRCODE_OUT_OF_MEMORY),
4310 : errmsg("out of shared memory"),
4311 : errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4312 : }
4313 :
4314 : /*
4315 : * if it's a new lock object, initialize it
4316 : */
4317 176 : if (!found)
4318 : {
4319 152 : lock->grantMask = 0;
4320 152 : lock->waitMask = 0;
4321 152 : dlist_init(&lock->procLocks);
4322 152 : dclist_init(&lock->waitProcs);
4323 152 : lock->nRequested = 0;
4324 152 : lock->nGranted = 0;
4325 912 : MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
4326 152 : MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
4327 : LOCK_PRINT("lock_twophase_recover: new", lock, lockmode);
4328 : }
4329 : else
4330 : {
4331 : LOCK_PRINT("lock_twophase_recover: found", lock, lockmode);
4332 : Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
4333 : Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
4334 : Assert(lock->nGranted <= lock->nRequested);
4335 : }
4336 :
4337 : /*
4338 : * Create the hash key for the proclock table.
4339 : */
4340 176 : proclocktag.myLock = lock;
4341 176 : proclocktag.myProc = proc;
4342 :
4343 176 : proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
4344 :
4345 : /*
4346 : * Find or create a proclock entry with this tag
4347 : */
4348 176 : proclock = (PROCLOCK *) hash_search_with_hash_value(LockMethodProcLockHash,
4349 : &proclocktag,
4350 : proclock_hashcode,
4351 : HASH_ENTER_NULL,
4352 : &found);
4353 176 : if (!proclock)
4354 : {
4355 : /* Oops, not enough shmem for the proclock */
4356 0 : if (lock->nRequested == 0)
4357 : {
4358 : /*
4359 : * There are no other requestors of this lock, so garbage-collect
4360 : * the lock object. We *must* do this to avoid a permanent leak
4361 : * of shared memory, because there won't be anything to cause
4362 : * anyone to release the lock object later.
4363 : */
4364 : Assert(dlist_is_empty(&lock->procLocks));
4365 0 : if (!hash_search_with_hash_value(LockMethodLockHash,
4366 0 : &(lock->tag),
4367 : hashcode,
4368 : HASH_REMOVE,
4369 : NULL))
4370 0 : elog(PANIC, "lock table corrupted");
4371 : }
4372 0 : LWLockRelease(partitionLock);
4373 0 : ereport(ERROR,
4374 : (errcode(ERRCODE_OUT_OF_MEMORY),
4375 : errmsg("out of shared memory"),
4376 : errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4377 : }
4378 :
4379 : /*
4380 : * If new, initialize the new entry
4381 : */
4382 176 : if (!found)
4383 : {
4384 : Assert(proc->lockGroupLeader == NULL);
4385 160 : proclock->groupLeader = proc;
4386 160 : proclock->holdMask = 0;
4387 160 : proclock->releaseMask = 0;
4388 : /* Add proclock to appropriate lists */
4389 160 : dlist_push_tail(&lock->procLocks, &proclock->lockLink);
4390 160 : dlist_push_tail(&proc->myProcLocks[partition],
4391 : &proclock->procLink);
4392 : PROCLOCK_PRINT("lock_twophase_recover: new", proclock);
4393 : }
4394 : else
4395 : {
4396 : PROCLOCK_PRINT("lock_twophase_recover: found", proclock);
4397 : Assert((proclock->holdMask & ~lock->grantMask) == 0);
4398 : }
4399 :
4400 : /*
4401 : * lock->nRequested and lock->requested[] count the total number of
4402 : * requests, whether granted or waiting, so increment those immediately.
4403 : */
4404 176 : lock->nRequested++;
4405 176 : lock->requested[lockmode]++;
4406 : Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
4407 :
4408 : /*
4409 : * We shouldn't already hold the desired lock.
4410 : */
4411 176 : if (proclock->holdMask & LOCKBIT_ON(lockmode))
4412 0 : elog(ERROR, "lock %s on object %u/%u/%u is already held",
4413 : lockMethodTable->lockModeNames[lockmode],
4414 : lock->tag.locktag_field1, lock->tag.locktag_field2,
4415 : lock->tag.locktag_field3);
4416 :
4417 : /*
4418 : * We ignore any possible conflicts and just grant ourselves the lock. Not
4419 : * only because we don't bother, but also to avoid deadlocks when
4420 : * switching from standby to normal mode. See function comment.
4421 : */
4422 176 : GrantLock(lock, proclock, lockmode);
4423 :
4424 : /*
4425 : * Bump strong lock count, to make sure any fast-path lock requests won't
4426 : * be granted without consulting the primary lock table.
4427 : */
4428 176 : if (ConflictsWithRelationFastPath(&lock->tag, lockmode))
4429 : {
4430 36 : uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
4431 :
4432 36 : SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
4433 36 : FastPathStrongRelationLocks->count[fasthashcode]++;
4434 36 : SpinLockRelease(&FastPathStrongRelationLocks->mutex);
4435 : }
4436 :
4437 176 : LWLockRelease(partitionLock);
4438 176 : }
4439 :
4440 : /*
4441 : * Re-acquire a lock belonging to a transaction that was prepared, when
4442 : * starting up into hot standby mode.
4443 : */
4444 : void
4445 0 : lock_twophase_standby_recover(TransactionId xid, uint16 info,
4446 : void *recdata, uint32 len)
4447 : {
4448 0 : TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4449 : LOCKTAG *locktag;
4450 : LOCKMODE lockmode;
4451 : LOCKMETHODID lockmethodid;
4452 :
4453 : Assert(len == sizeof(TwoPhaseLockRecord));
4454 0 : locktag = &rec->locktag;
4455 0 : lockmode = rec->lockmode;
4456 0 : lockmethodid = locktag->locktag_lockmethodid;
4457 :
4458 0 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4459 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4460 :
4461 0 : if (lockmode == AccessExclusiveLock &&
4462 0 : locktag->locktag_type == LOCKTAG_RELATION)
4463 : {
4464 0 : StandbyAcquireAccessExclusiveLock(xid,
4465 : locktag->locktag_field1 /* dboid */ ,
4466 : locktag->locktag_field2 /* reloid */ );
4467 : }
4468 0 : }
4469 :
4470 :
4471 : /*
4472 : * 2PC processing routine for COMMIT PREPARED case.
4473 : *
4474 : * Find and release the lock indicated by the 2PC record.
4475 : */
4476 : void
4477 1748 : lock_twophase_postcommit(TransactionId xid, uint16 info,
4478 : void *recdata, uint32 len)
4479 : {
4480 1748 : TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4481 1748 : PGPROC *proc = TwoPhaseGetDummyProc(xid, true);
4482 : LOCKTAG *locktag;
4483 : LOCKMETHODID lockmethodid;
4484 : LockMethod lockMethodTable;
4485 :
4486 : Assert(len == sizeof(TwoPhaseLockRecord));
4487 1748 : locktag = &rec->locktag;
4488 1748 : lockmethodid = locktag->locktag_lockmethodid;
4489 :
4490 1748 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4491 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4492 1748 : lockMethodTable = LockMethods[lockmethodid];
4493 :
4494 1748 : LockRefindAndRelease(lockMethodTable, proc, locktag, rec->lockmode, true);
4495 1748 : }
4496 :
4497 : /*
4498 : * 2PC processing routine for ROLLBACK PREPARED case.
4499 : *
4500 : * This is actually just the same as the COMMIT case.
4501 : */
4502 : void
4503 248 : lock_twophase_postabort(TransactionId xid, uint16 info,
4504 : void *recdata, uint32 len)
4505 : {
4506 248 : lock_twophase_postcommit(xid, info, recdata, len);
4507 248 : }
4508 :
4509 : /*
4510 : * VirtualXactLockTableInsert
4511 : *
4512 : * Take vxid lock via the fast-path. There can't be any pre-existing
4513 : * lockers, as we haven't advertised this vxid via the ProcArray yet.
4514 : *
4515 : * Since MyProc->fpLocalTransactionId will normally contain the same data
4516 : * as MyProc->vxid.lxid, you might wonder if we really need both. The
4517 : * difference is that MyProc->vxid.lxid is set and cleared unlocked, and
4518 : * examined by procarray.c, while fpLocalTransactionId is protected by
4519 : * fpInfoLock and is used only by the locking subsystem. Doing it this
4520 : * way makes it easier to verify that there are no funny race conditions.
4521 : *
4522 : * We don't bother recording this lock in the local lock table, since it's
4523 : * only ever released at the end of a transaction. Instead,
4524 : * LockReleaseAll() calls VirtualXactLockTableCleanup().
4525 : */
4526 : void
4527 740716 : VirtualXactLockTableInsert(VirtualTransactionId vxid)
4528 : {
4529 : Assert(VirtualTransactionIdIsValid(vxid));
4530 :
4531 740716 : LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
4532 :
4533 : Assert(MyProc->vxid.procNumber == vxid.procNumber);
4534 : Assert(MyProc->fpLocalTransactionId == InvalidLocalTransactionId);
4535 : Assert(MyProc->fpVXIDLock == false);
4536 :
4537 740716 : MyProc->fpVXIDLock = true;
4538 740716 : MyProc->fpLocalTransactionId = vxid.localTransactionId;
4539 :
4540 740716 : LWLockRelease(&MyProc->fpInfoLock);
4541 740716 : }
4542 :
4543 : /*
4544 : * VirtualXactLockTableCleanup
4545 : *
4546 : * Check whether a VXID lock has been materialized; if so, release it,
4547 : * unblocking waiters.
4548 : */
4549 : void
4550 741476 : VirtualXactLockTableCleanup(void)
4551 : {
4552 : bool fastpath;
4553 : LocalTransactionId lxid;
4554 :
4555 : Assert(MyProc->vxid.procNumber != INVALID_PROC_NUMBER);
4556 :
4557 : /*
4558 : * Clean up shared memory state.
4559 : */
4560 741476 : LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
4561 :
4562 741476 : fastpath = MyProc->fpVXIDLock;
4563 741476 : lxid = MyProc->fpLocalTransactionId;
4564 741476 : MyProc->fpVXIDLock = false;
4565 741476 : MyProc->fpLocalTransactionId = InvalidLocalTransactionId;
4566 :
4567 741476 : LWLockRelease(&MyProc->fpInfoLock);
4568 :
4569 : /*
4570 : * If fpVXIDLock has been cleared without touching fpLocalTransactionId,
4571 : * that means someone transferred the lock to the main lock table.
4572 : */
4573 741476 : if (!fastpath && LocalTransactionIdIsValid(lxid))
4574 : {
4575 : VirtualTransactionId vxid;
4576 : LOCKTAG locktag;
4577 :
4578 454 : vxid.procNumber = MyProcNumber;
4579 454 : vxid.localTransactionId = lxid;
4580 454 : SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid);
4581 :
4582 454 : LockRefindAndRelease(LockMethods[DEFAULT_LOCKMETHOD], MyProc,
4583 : &locktag, ExclusiveLock, false);
4584 : }
4585 741476 : }
4586 :
4587 : /*
4588 : * XactLockForVirtualXact
4589 : *
4590 : * If TransactionIdIsValid(xid), this is essentially XactLockTableWait(xid,
4591 : * NULL, NULL, XLTW_None) or ConditionalXactLockTableWait(xid). Unlike those
4592 : * functions, it assumes "xid" is never a subtransaction and that "xid" is
4593 : * prepared, committed, or aborted.
4594 : *
4595 : * If !TransactionIdIsValid(xid), this locks every prepared XID having been
4596 : * known as "vxid" before its PREPARE TRANSACTION.
4597 : */
4598 : static bool
4599 504 : XactLockForVirtualXact(VirtualTransactionId vxid,
4600 : TransactionId xid, bool wait)
4601 : {
4602 504 : bool more = false;
4603 :
4604 : /* There is no point to wait for 2PCs if you have no 2PCs. */
4605 504 : if (max_prepared_xacts == 0)
4606 168 : return true;
4607 :
4608 : do
4609 : {
4610 : LockAcquireResult lar;
4611 : LOCKTAG tag;
4612 :
4613 : /* Clear state from previous iterations. */
4614 336 : if (more)
4615 : {
4616 0 : xid = InvalidTransactionId;
4617 0 : more = false;
4618 : }
4619 :
4620 : /* If we have no xid, try to find one. */
4621 336 : if (!TransactionIdIsValid(xid))
4622 150 : xid = TwoPhaseGetXidByVirtualXID(vxid, &more);
4623 336 : if (!TransactionIdIsValid(xid))
4624 : {
4625 : Assert(!more);
4626 134 : return true;
4627 : }
4628 :
4629 : /* Check or wait for XID completion. */
4630 202 : SET_LOCKTAG_TRANSACTION(tag, xid);
4631 202 : lar = LockAcquire(&tag, ShareLock, false, !wait);
4632 202 : if (lar == LOCKACQUIRE_NOT_AVAIL)
4633 0 : return false;
4634 202 : LockRelease(&tag, ShareLock, false);
4635 202 : } while (more);
4636 :
4637 202 : return true;
4638 : }
4639 :
4640 : /*
4641 : * VirtualXactLock
4642 : *
4643 : * If wait = true, wait as long as the given VXID or any XID acquired by the
4644 : * same transaction is still running. Then, return true.
4645 : *
4646 : * If wait = false, just check whether that VXID or one of those XIDs is still
4647 : * running, and return true or false.
4648 : */
4649 : bool
4650 584 : VirtualXactLock(VirtualTransactionId vxid, bool wait)
4651 : {
4652 : LOCKTAG tag;
4653 : PGPROC *proc;
4654 584 : TransactionId xid = InvalidTransactionId;
4655 :
4656 : Assert(VirtualTransactionIdIsValid(vxid));
4657 :
4658 584 : if (VirtualTransactionIdIsRecoveredPreparedXact(vxid))
4659 : /* no vxid lock; localTransactionId is a normal, locked XID */
4660 2 : return XactLockForVirtualXact(vxid, vxid.localTransactionId, wait);
4661 :
4662 582 : SET_LOCKTAG_VIRTUALTRANSACTION(tag, vxid);
4663 :
4664 : /*
4665 : * If a lock table entry must be made, this is the PGPROC on whose behalf
4666 : * it must be done. Note that the transaction might end or the PGPROC
4667 : * might be reassigned to a new backend before we get around to examining
4668 : * it, but it doesn't matter. If we find upon examination that the
4669 : * relevant lxid is no longer running here, that's enough to prove that
4670 : * it's no longer running anywhere.
4671 : */
4672 582 : proc = ProcNumberGetProc(vxid.procNumber);
4673 582 : if (proc == NULL)
4674 6 : return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4675 :
4676 : /*
4677 : * We must acquire this lock before checking the procNumber and lxid
4678 : * against the ones we're waiting for. The target backend will only set
4679 : * or clear lxid while holding this lock.
4680 : */
4681 576 : LWLockAcquire(&proc->fpInfoLock, LW_EXCLUSIVE);
4682 :
4683 576 : if (proc->vxid.procNumber != vxid.procNumber
4684 576 : || proc->fpLocalTransactionId != vxid.localTransactionId)
4685 : {
4686 : /* VXID ended */
4687 78 : LWLockRelease(&proc->fpInfoLock);
4688 78 : return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4689 : }
4690 :
4691 : /*
4692 : * If we aren't asked to wait, there's no need to set up a lock table
4693 : * entry. The transaction is still in progress, so just return false.
4694 : */
4695 498 : if (!wait)
4696 : {
4697 30 : LWLockRelease(&proc->fpInfoLock);
4698 30 : return false;
4699 : }
4700 :
4701 : /*
4702 : * OK, we're going to need to sleep on the VXID. But first, we must set
4703 : * up the primary lock table entry, if needed (ie, convert the proc's
4704 : * fast-path lock on its VXID to a regular lock).
4705 : */
4706 468 : if (proc->fpVXIDLock)
4707 : {
4708 : PROCLOCK *proclock;
4709 : uint32 hashcode;
4710 : LWLock *partitionLock;
4711 :
4712 454 : hashcode = LockTagHashCode(&tag);
4713 :
4714 454 : partitionLock = LockHashPartitionLock(hashcode);
4715 454 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4716 :
4717 454 : proclock = SetupLockInTable(LockMethods[DEFAULT_LOCKMETHOD], proc,
4718 : &tag, hashcode, ExclusiveLock);
4719 454 : if (!proclock)
4720 : {
4721 0 : LWLockRelease(partitionLock);
4722 0 : LWLockRelease(&proc->fpInfoLock);
4723 0 : ereport(ERROR,
4724 : (errcode(ERRCODE_OUT_OF_MEMORY),
4725 : errmsg("out of shared memory"),
4726 : errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4727 : }
4728 454 : GrantLock(proclock->tag.myLock, proclock, ExclusiveLock);
4729 :
4730 454 : LWLockRelease(partitionLock);
4731 :
4732 454 : proc->fpVXIDLock = false;
4733 : }
4734 :
4735 : /*
4736 : * If the proc has an XID now, we'll avoid a TwoPhaseGetXidByVirtualXID()
4737 : * search. The proc might have assigned this XID but not yet locked it,
4738 : * in which case the proc will lock this XID before releasing the VXID.
4739 : * The fpInfoLock critical section excludes VirtualXactLockTableCleanup(),
4740 : * so we won't save an XID of a different VXID. It doesn't matter whether
4741 : * we save this before or after setting up the primary lock table entry.
4742 : */
4743 468 : xid = proc->xid;
4744 :
4745 : /* Done with proc->fpLockBits */
4746 468 : LWLockRelease(&proc->fpInfoLock);
4747 :
4748 : /* Time to wait. */
4749 468 : (void) LockAcquire(&tag, ShareLock, false, false);
4750 :
4751 418 : LockRelease(&tag, ShareLock, false);
4752 418 : return XactLockForVirtualXact(vxid, xid, wait);
4753 : }
4754 :
4755 : /*
4756 : * LockWaiterCount
4757 : *
4758 : * Find the number of lock requester on this locktag
4759 : */
4760 : int
4761 119998 : LockWaiterCount(const LOCKTAG *locktag)
4762 : {
4763 119998 : LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
4764 : LOCK *lock;
4765 : bool found;
4766 : uint32 hashcode;
4767 : LWLock *partitionLock;
4768 119998 : int waiters = 0;
4769 :
4770 119998 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4771 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4772 :
4773 119998 : hashcode = LockTagHashCode(locktag);
4774 119998 : partitionLock = LockHashPartitionLock(hashcode);
4775 119998 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4776 :
4777 119998 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
4778 : locktag,
4779 : hashcode,
4780 : HASH_FIND,
4781 : &found);
4782 119998 : if (found)
4783 : {
4784 : Assert(lock != NULL);
4785 50 : waiters = lock->nRequested;
4786 : }
4787 119998 : LWLockRelease(partitionLock);
4788 :
4789 119998 : return waiters;
4790 : }
|