Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * lock.c
4 : * POSTGRES primary lock mechanism
5 : *
6 : * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/storage/lmgr/lock.c
12 : *
13 : * NOTES
14 : * A lock table is a shared memory hash table. When
15 : * a process tries to acquire a lock of a type that conflicts
16 : * with existing locks, it is put to sleep using the routines
17 : * in storage/lmgr/proc.c.
18 : *
19 : * For the most part, this code should be invoked via lmgr.c
20 : * or another lock-management module, not directly.
21 : *
22 : * Interface:
23 : *
24 : * InitLocks(), GetLocksMethodTable(), GetLockTagsMethodTable(),
25 : * LockAcquire(), LockRelease(), LockReleaseAll(),
26 : * LockCheckConflicts(), GrantLock()
27 : *
28 : *-------------------------------------------------------------------------
29 : */
30 : #include "postgres.h"
31 :
32 : #include <signal.h>
33 : #include <unistd.h>
34 :
35 : #include "access/transam.h"
36 : #include "access/twophase.h"
37 : #include "access/twophase_rmgr.h"
38 : #include "access/xact.h"
39 : #include "access/xlog.h"
40 : #include "access/xlogutils.h"
41 : #include "miscadmin.h"
42 : #include "pg_trace.h"
43 : #include "pgstat.h"
44 : #include "storage/proc.h"
45 : #include "storage/procarray.h"
46 : #include "storage/sinvaladt.h"
47 : #include "storage/spin.h"
48 : #include "storage/standby.h"
49 : #include "utils/memutils.h"
50 : #include "utils/ps_status.h"
51 : #include "utils/resowner.h"
52 :
53 :
54 : /* This configuration variable is used to set the lock table size */
55 : int max_locks_per_xact; /* set by guc.c */
56 :
57 : #define NLOCKENTS() \
58 : mul_size(max_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
59 :
60 :
61 : /*
62 : * Data structures defining the semantics of the standard lock methods.
63 : *
64 : * The conflict table defines the semantics of the various lock modes.
65 : */
66 : static const LOCKMASK LockConflicts[] = {
67 : 0,
68 :
69 : /* AccessShareLock */
70 : LOCKBIT_ON(AccessExclusiveLock),
71 :
72 : /* RowShareLock */
73 : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
74 :
75 : /* RowExclusiveLock */
76 : LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
77 : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
78 :
79 : /* ShareUpdateExclusiveLock */
80 : LOCKBIT_ON(ShareUpdateExclusiveLock) |
81 : LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
82 : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
83 :
84 : /* ShareLock */
85 : LOCKBIT_ON(RowExclusiveLock) | LOCKBIT_ON(ShareUpdateExclusiveLock) |
86 : LOCKBIT_ON(ShareRowExclusiveLock) |
87 : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
88 :
89 : /* ShareRowExclusiveLock */
90 : LOCKBIT_ON(RowExclusiveLock) | LOCKBIT_ON(ShareUpdateExclusiveLock) |
91 : LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
92 : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
93 :
94 : /* ExclusiveLock */
95 : LOCKBIT_ON(RowShareLock) |
96 : LOCKBIT_ON(RowExclusiveLock) | LOCKBIT_ON(ShareUpdateExclusiveLock) |
97 : LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
98 : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
99 :
100 : /* AccessExclusiveLock */
101 : LOCKBIT_ON(AccessShareLock) | LOCKBIT_ON(RowShareLock) |
102 : LOCKBIT_ON(RowExclusiveLock) | LOCKBIT_ON(ShareUpdateExclusiveLock) |
103 : LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
104 : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock)
105 :
106 : };
107 :
108 : /* Names of lock modes, for debug printouts */
109 : static const char *const lock_mode_names[] =
110 : {
111 : "INVALID",
112 : "AccessShareLock",
113 : "RowShareLock",
114 : "RowExclusiveLock",
115 : "ShareUpdateExclusiveLock",
116 : "ShareLock",
117 : "ShareRowExclusiveLock",
118 : "ExclusiveLock",
119 : "AccessExclusiveLock"
120 : };
121 :
122 : #ifndef LOCK_DEBUG
123 : static bool Dummy_trace = false;
124 : #endif
125 :
126 : static const LockMethodData default_lockmethod = {
127 : MaxLockMode,
128 : LockConflicts,
129 : lock_mode_names,
130 : #ifdef LOCK_DEBUG
131 : &Trace_locks
132 : #else
133 : &Dummy_trace
134 : #endif
135 : };
136 :
137 : static const LockMethodData user_lockmethod = {
138 : MaxLockMode,
139 : LockConflicts,
140 : lock_mode_names,
141 : #ifdef LOCK_DEBUG
142 : &Trace_userlocks
143 : #else
144 : &Dummy_trace
145 : #endif
146 : };
147 :
148 : /*
149 : * map from lock method id to the lock table data structures
150 : */
151 : static const LockMethod LockMethods[] = {
152 : NULL,
153 : &default_lockmethod,
154 : &user_lockmethod
155 : };
156 :
157 :
158 : /* Record that's written to 2PC state file when a lock is persisted */
159 : typedef struct TwoPhaseLockRecord
160 : {
161 : LOCKTAG locktag;
162 : LOCKMODE lockmode;
163 : } TwoPhaseLockRecord;
164 :
165 :
166 : /*
167 : * Count of the number of fast path lock slots we believe to be used. This
168 : * might be higher than the real number if another backend has transferred
169 : * our locks to the primary lock table, but it can never be lower than the
170 : * real value, since only we can acquire locks on our own behalf.
171 : */
172 : static int FastPathLocalUseCount = 0;
173 :
174 : /*
175 : * Flag to indicate if the relation extension lock is held by this backend.
176 : * This flag is used to ensure that while holding the relation extension lock
177 : * we don't try to acquire a heavyweight lock on any other object. This
178 : * restriction implies that the relation extension lock won't ever participate
179 : * in the deadlock cycle because we can never wait for any other heavyweight
180 : * lock after acquiring this lock.
181 : *
182 : * Such a restriction is okay for relation extension locks as unlike other
183 : * heavyweight locks these are not held till the transaction end. These are
184 : * taken for a short duration to extend a particular relation and then
185 : * released.
186 : */
187 : static bool IsRelationExtensionLockHeld PG_USED_FOR_ASSERTS_ONLY = false;
188 :
189 : /* Macros for manipulating proc->fpLockBits */
190 : #define FAST_PATH_BITS_PER_SLOT 3
191 : #define FAST_PATH_LOCKNUMBER_OFFSET 1
192 : #define FAST_PATH_MASK ((1 << FAST_PATH_BITS_PER_SLOT) - 1)
193 : #define FAST_PATH_GET_BITS(proc, n) \
194 : (((proc)->fpLockBits >> (FAST_PATH_BITS_PER_SLOT * n)) & FAST_PATH_MASK)
195 : #define FAST_PATH_BIT_POSITION(n, l) \
196 : (AssertMacro((l) >= FAST_PATH_LOCKNUMBER_OFFSET), \
197 : AssertMacro((l) < FAST_PATH_BITS_PER_SLOT+FAST_PATH_LOCKNUMBER_OFFSET), \
198 : AssertMacro((n) < FP_LOCK_SLOTS_PER_BACKEND), \
199 : ((l) - FAST_PATH_LOCKNUMBER_OFFSET + FAST_PATH_BITS_PER_SLOT * (n)))
200 : #define FAST_PATH_SET_LOCKMODE(proc, n, l) \
201 : (proc)->fpLockBits |= UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)
202 : #define FAST_PATH_CLEAR_LOCKMODE(proc, n, l) \
203 : (proc)->fpLockBits &= ~(UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l))
204 : #define FAST_PATH_CHECK_LOCKMODE(proc, n, l) \
205 : ((proc)->fpLockBits & (UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)))
206 :
207 : /*
208 : * The fast-path lock mechanism is concerned only with relation locks on
209 : * unshared relations by backends bound to a database. The fast-path
210 : * mechanism exists mostly to accelerate acquisition and release of locks
211 : * that rarely conflict. Because ShareUpdateExclusiveLock is
212 : * self-conflicting, it can't use the fast-path mechanism; but it also does
213 : * not conflict with any of the locks that do, so we can ignore it completely.
214 : */
215 : #define EligibleForRelationFastPath(locktag, mode) \
216 : ((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
217 : (locktag)->locktag_type == LOCKTAG_RELATION && \
218 : (locktag)->locktag_field1 == MyDatabaseId && \
219 : MyDatabaseId != InvalidOid && \
220 : (mode) < ShareUpdateExclusiveLock)
221 : #define ConflictsWithRelationFastPath(locktag, mode) \
222 : ((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
223 : (locktag)->locktag_type == LOCKTAG_RELATION && \
224 : (locktag)->locktag_field1 != InvalidOid && \
225 : (mode) > ShareUpdateExclusiveLock)
226 :
227 : static bool FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode);
228 : static bool FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode);
229 : static bool FastPathTransferRelationLocks(LockMethod lockMethodTable,
230 : const LOCKTAG *locktag, uint32 hashcode);
231 : static PROCLOCK *FastPathGetRelationLockEntry(LOCALLOCK *locallock);
232 :
233 : /*
234 : * To make the fast-path lock mechanism work, we must have some way of
235 : * preventing the use of the fast-path when a conflicting lock might be present.
236 : * We partition* the locktag space into FAST_PATH_STRONG_LOCK_HASH_PARTITIONS,
237 : * and maintain an integer count of the number of "strong" lockers
238 : * in each partition. When any "strong" lockers are present (which is
239 : * hopefully not very often), the fast-path mechanism can't be used, and we
240 : * must fall back to the slower method of pushing matching locks directly
241 : * into the main lock tables.
242 : *
243 : * The deadlock detector does not know anything about the fast path mechanism,
244 : * so any locks that might be involved in a deadlock must be transferred from
245 : * the fast-path queues to the main lock table.
246 : */
247 :
248 : #define FAST_PATH_STRONG_LOCK_HASH_BITS 10
249 : #define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS \
250 : (1 << FAST_PATH_STRONG_LOCK_HASH_BITS)
251 : #define FastPathStrongLockHashPartition(hashcode) \
252 : ((hashcode) % FAST_PATH_STRONG_LOCK_HASH_PARTITIONS)
253 :
254 : typedef struct
255 : {
256 : slock_t mutex;
257 : uint32 count[FAST_PATH_STRONG_LOCK_HASH_PARTITIONS];
258 : } FastPathStrongRelationLockData;
259 :
260 : static volatile FastPathStrongRelationLockData *FastPathStrongRelationLocks;
261 :
262 :
263 : /*
264 : * Pointers to hash tables containing lock state
265 : *
266 : * The LockMethodLockHash and LockMethodProcLockHash hash tables are in
267 : * shared memory; LockMethodLocalHash is local to each backend.
268 : */
269 : static HTAB *LockMethodLockHash;
270 : static HTAB *LockMethodProcLockHash;
271 : static HTAB *LockMethodLocalHash;
272 :
273 :
274 : /* private state for error cleanup */
275 : static LOCALLOCK *StrongLockInProgress;
276 : static LOCALLOCK *awaitedLock;
277 : static ResourceOwner awaitedOwner;
278 :
279 :
280 : #ifdef LOCK_DEBUG
281 :
282 : /*------
283 : * The following configuration options are available for lock debugging:
284 : *
285 : * TRACE_LOCKS -- give a bunch of output what's going on in this file
286 : * TRACE_USERLOCKS -- same but for user locks
287 : * TRACE_LOCK_OIDMIN-- do not trace locks for tables below this oid
288 : * (use to avoid output on system tables)
289 : * TRACE_LOCK_TABLE -- trace locks on this table (oid) unconditionally
290 : * DEBUG_DEADLOCKS -- currently dumps locks at untimely occasions ;)
291 : *
292 : * Furthermore, but in storage/lmgr/lwlock.c:
293 : * TRACE_LWLOCKS -- trace lightweight locks (pretty useless)
294 : *
295 : * Define LOCK_DEBUG at compile time to get all these enabled.
296 : * --------
297 : */
298 :
299 : int Trace_lock_oidmin = FirstNormalObjectId;
300 : bool Trace_locks = false;
301 : bool Trace_userlocks = false;
302 : int Trace_lock_table = 0;
303 : bool Debug_deadlocks = false;
304 :
305 :
306 : inline static bool
307 : LOCK_DEBUG_ENABLED(const LOCKTAG *tag)
308 : {
309 : return
310 : (*(LockMethods[tag->locktag_lockmethodid]->trace_flag) &&
311 : ((Oid) tag->locktag_field2 >= (Oid) Trace_lock_oidmin))
312 : || (Trace_lock_table &&
313 : (tag->locktag_field2 == Trace_lock_table));
314 : }
315 :
316 :
317 : inline static void
318 : LOCK_PRINT(const char *where, const LOCK *lock, LOCKMODE type)
319 : {
320 : if (LOCK_DEBUG_ENABLED(&lock->tag))
321 : elog(LOG,
322 : "%s: lock(%p) id(%u,%u,%u,%u,%u,%u) grantMask(%x) "
323 : "req(%d,%d,%d,%d,%d,%d,%d)=%d "
324 : "grant(%d,%d,%d,%d,%d,%d,%d)=%d wait(%d) type(%s)",
325 : where, lock,
326 : lock->tag.locktag_field1, lock->tag.locktag_field2,
327 : lock->tag.locktag_field3, lock->tag.locktag_field4,
328 : lock->tag.locktag_type, lock->tag.locktag_lockmethodid,
329 : lock->grantMask,
330 : lock->requested[1], lock->requested[2], lock->requested[3],
331 : lock->requested[4], lock->requested[5], lock->requested[6],
332 : lock->requested[7], lock->nRequested,
333 : lock->granted[1], lock->granted[2], lock->granted[3],
334 : lock->granted[4], lock->granted[5], lock->granted[6],
335 : lock->granted[7], lock->nGranted,
336 : dclist_count(&lock->waitProcs),
337 : LockMethods[LOCK_LOCKMETHOD(*lock)]->lockModeNames[type]);
338 : }
339 :
340 :
341 : inline static void
342 : PROCLOCK_PRINT(const char *where, const PROCLOCK *proclockP)
343 : {
344 : if (LOCK_DEBUG_ENABLED(&proclockP->tag.myLock->tag))
345 : elog(LOG,
346 : "%s: proclock(%p) lock(%p) method(%u) proc(%p) hold(%x)",
347 : where, proclockP, proclockP->tag.myLock,
348 : PROCLOCK_LOCKMETHOD(*(proclockP)),
349 : proclockP->tag.myProc, (int) proclockP->holdMask);
350 : }
351 : #else /* not LOCK_DEBUG */
352 :
353 : #define LOCK_PRINT(where, lock, type) ((void) 0)
354 : #define PROCLOCK_PRINT(where, proclockP) ((void) 0)
355 : #endif /* not LOCK_DEBUG */
356 :
357 :
358 : static uint32 proclock_hash(const void *key, Size keysize);
359 : static void RemoveLocalLock(LOCALLOCK *locallock);
360 : static PROCLOCK *SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc,
361 : const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode);
362 : static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner);
363 : static void BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode);
364 : static void FinishStrongLockAcquire(void);
365 : static void WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner);
366 : static void ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock);
367 : static void LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent);
368 : static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode,
369 : PROCLOCK *proclock, LockMethod lockMethodTable);
370 : static void CleanUpLock(LOCK *lock, PROCLOCK *proclock,
371 : LockMethod lockMethodTable, uint32 hashcode,
372 : bool wakeupNeeded);
373 : static void LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc,
374 : LOCKTAG *locktag, LOCKMODE lockmode,
375 : bool decrement_strong_lock_count);
376 : static void GetSingleProcBlockerStatusData(PGPROC *blocked_proc,
377 : BlockedProcsData *data);
378 :
379 :
380 : /*
381 : * InitLocks -- Initialize the lock manager's data structures.
382 : *
383 : * This is called from CreateSharedMemoryAndSemaphores(), which see for
384 : * more comments. In the normal postmaster case, the shared hash tables
385 : * are created here, as well as a locallock hash table that will remain
386 : * unused and empty in the postmaster itself. Backends inherit the pointers
387 : * to the shared tables via fork(), and also inherit an image of the locallock
388 : * hash table, which they proceed to use. In the EXEC_BACKEND case, each
389 : * backend re-executes this code to obtain pointers to the already existing
390 : * shared hash tables and to create its locallock hash table.
391 : */
392 : void
393 1562 : InitLocks(void)
394 : {
395 : HASHCTL info;
396 : long init_table_size,
397 : max_table_size;
398 : bool found;
399 :
400 : /*
401 : * Compute init/max size to request for lock hashtables. Note these
402 : * calculations must agree with LockShmemSize!
403 : */
404 1562 : max_table_size = NLOCKENTS();
405 1562 : init_table_size = max_table_size / 2;
406 :
407 : /*
408 : * Allocate hash table for LOCK structs. This stores per-locked-object
409 : * information.
410 : */
411 1562 : info.keysize = sizeof(LOCKTAG);
412 1562 : info.entrysize = sizeof(LOCK);
413 1562 : info.num_partitions = NUM_LOCK_PARTITIONS;
414 :
415 1562 : LockMethodLockHash = ShmemInitHash("LOCK hash",
416 : init_table_size,
417 : max_table_size,
418 : &info,
419 : HASH_ELEM | HASH_BLOBS | HASH_PARTITION);
420 :
421 : /* Assume an average of 2 holders per lock */
422 1562 : max_table_size *= 2;
423 1562 : init_table_size *= 2;
424 :
425 : /*
426 : * Allocate hash table for PROCLOCK structs. This stores
427 : * per-lock-per-holder information.
428 : */
429 1562 : info.keysize = sizeof(PROCLOCKTAG);
430 1562 : info.entrysize = sizeof(PROCLOCK);
431 1562 : info.hash = proclock_hash;
432 1562 : info.num_partitions = NUM_LOCK_PARTITIONS;
433 :
434 1562 : LockMethodProcLockHash = ShmemInitHash("PROCLOCK hash",
435 : init_table_size,
436 : max_table_size,
437 : &info,
438 : HASH_ELEM | HASH_FUNCTION | HASH_PARTITION);
439 :
440 : /*
441 : * Allocate fast-path structures.
442 : */
443 1562 : FastPathStrongRelationLocks =
444 1562 : ShmemInitStruct("Fast Path Strong Relation Lock Data",
445 : sizeof(FastPathStrongRelationLockData), &found);
446 1562 : if (!found)
447 1562 : SpinLockInit(&FastPathStrongRelationLocks->mutex);
448 :
449 : /*
450 : * Allocate non-shared hash table for LOCALLOCK structs. This stores lock
451 : * counts and resource owner information.
452 : *
453 : * The non-shared table could already exist in this process (this occurs
454 : * when the postmaster is recreating shared memory after a backend crash).
455 : * If so, delete and recreate it. (We could simply leave it, since it
456 : * ought to be empty in the postmaster, but for safety let's zap it.)
457 : */
458 1562 : if (LockMethodLocalHash)
459 8 : hash_destroy(LockMethodLocalHash);
460 :
461 1562 : info.keysize = sizeof(LOCALLOCKTAG);
462 1562 : info.entrysize = sizeof(LOCALLOCK);
463 :
464 1562 : LockMethodLocalHash = hash_create("LOCALLOCK hash",
465 : 16,
466 : &info,
467 : HASH_ELEM | HASH_BLOBS);
468 1562 : }
469 :
470 :
471 : /*
472 : * Fetch the lock method table associated with a given lock
473 : */
474 : LockMethod
475 178 : GetLocksMethodTable(const LOCK *lock)
476 : {
477 178 : LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*lock);
478 :
479 : Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
480 178 : return LockMethods[lockmethodid];
481 : }
482 :
483 : /*
484 : * Fetch the lock method table associated with a given locktag
485 : */
486 : LockMethod
487 2190 : GetLockTagsMethodTable(const LOCKTAG *locktag)
488 : {
489 2190 : LOCKMETHODID lockmethodid = (LOCKMETHODID) locktag->locktag_lockmethodid;
490 :
491 : Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
492 2190 : return LockMethods[lockmethodid];
493 : }
494 :
495 :
496 : /*
497 : * Compute the hash code associated with a LOCKTAG.
498 : *
499 : * To avoid unnecessary recomputations of the hash code, we try to do this
500 : * just once per function, and then pass it around as needed. Aside from
501 : * passing the hashcode to hash_search_with_hash_value(), we can extract
502 : * the lock partition number from the hashcode.
503 : */
504 : uint32
505 25147774 : LockTagHashCode(const LOCKTAG *locktag)
506 : {
507 25147774 : return get_hash_value(LockMethodLockHash, (const void *) locktag);
508 : }
509 :
510 : /*
511 : * Compute the hash code associated with a PROCLOCKTAG.
512 : *
513 : * Because we want to use just one set of partition locks for both the
514 : * LOCK and PROCLOCK hash tables, we have to make sure that PROCLOCKs
515 : * fall into the same partition number as their associated LOCKs.
516 : * dynahash.c expects the partition number to be the low-order bits of
517 : * the hash code, and therefore a PROCLOCKTAG's hash code must have the
518 : * same low-order bits as the associated LOCKTAG's hash code. We achieve
519 : * this with this specialized hash function.
520 : */
521 : static uint32
522 1694 : proclock_hash(const void *key, Size keysize)
523 : {
524 1694 : const PROCLOCKTAG *proclocktag = (const PROCLOCKTAG *) key;
525 : uint32 lockhash;
526 : Datum procptr;
527 :
528 : Assert(keysize == sizeof(PROCLOCKTAG));
529 :
530 : /* Look into the associated LOCK object, and compute its hash code */
531 1694 : lockhash = LockTagHashCode(&proclocktag->myLock->tag);
532 :
533 : /*
534 : * To make the hash code also depend on the PGPROC, we xor the proc
535 : * struct's address into the hash code, left-shifted so that the
536 : * partition-number bits don't change. Since this is only a hash, we
537 : * don't care if we lose high-order bits of the address; use an
538 : * intermediate variable to suppress cast-pointer-to-int warnings.
539 : */
540 1694 : procptr = PointerGetDatum(proclocktag->myProc);
541 1694 : lockhash ^= ((uint32) procptr) << LOG2_NUM_LOCK_PARTITIONS;
542 :
543 1694 : return lockhash;
544 : }
545 :
546 : /*
547 : * Compute the hash code associated with a PROCLOCKTAG, given the hashcode
548 : * for its underlying LOCK.
549 : *
550 : * We use this just to avoid redundant calls of LockTagHashCode().
551 : */
552 : static inline uint32
553 6020542 : ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 hashcode)
554 : {
555 6020542 : uint32 lockhash = hashcode;
556 : Datum procptr;
557 :
558 : /*
559 : * This must match proclock_hash()!
560 : */
561 6020542 : procptr = PointerGetDatum(proclocktag->myProc);
562 6020542 : lockhash ^= ((uint32) procptr) << LOG2_NUM_LOCK_PARTITIONS;
563 :
564 6020542 : return lockhash;
565 : }
566 :
567 : /*
568 : * Given two lock modes, return whether they would conflict.
569 : */
570 : bool
571 464 : DoLockModesConflict(LOCKMODE mode1, LOCKMODE mode2)
572 : {
573 464 : LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
574 :
575 464 : if (lockMethodTable->conflictTab[mode1] & LOCKBIT_ON(mode2))
576 272 : return true;
577 :
578 192 : return false;
579 : }
580 :
581 : /*
582 : * LockHeldByMe -- test whether lock 'locktag' is held with mode 'lockmode'
583 : * by the current transaction
584 : */
585 : bool
586 0 : LockHeldByMe(const LOCKTAG *locktag, LOCKMODE lockmode)
587 : {
588 : LOCALLOCKTAG localtag;
589 : LOCALLOCK *locallock;
590 :
591 : /*
592 : * See if there is a LOCALLOCK entry for this lock and lockmode
593 : */
594 0 : MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
595 0 : localtag.lock = *locktag;
596 0 : localtag.mode = lockmode;
597 :
598 0 : locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
599 : &localtag,
600 : HASH_FIND, NULL);
601 :
602 0 : return (locallock && locallock->nLocks > 0);
603 : }
604 :
605 : #ifdef USE_ASSERT_CHECKING
606 : /*
607 : * GetLockMethodLocalHash -- return the hash of local locks, for modules that
608 : * evaluate assertions based on all locks held.
609 : */
610 : HTAB *
611 : GetLockMethodLocalHash(void)
612 : {
613 : return LockMethodLocalHash;
614 : }
615 : #endif
616 :
617 : /*
618 : * LockHasWaiters -- look up 'locktag' and check if releasing this
619 : * lock would wake up other processes waiting for it.
620 : */
621 : bool
622 0 : LockHasWaiters(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
623 : {
624 0 : LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
625 : LockMethod lockMethodTable;
626 : LOCALLOCKTAG localtag;
627 : LOCALLOCK *locallock;
628 : LOCK *lock;
629 : PROCLOCK *proclock;
630 : LWLock *partitionLock;
631 0 : bool hasWaiters = false;
632 :
633 0 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
634 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
635 0 : lockMethodTable = LockMethods[lockmethodid];
636 0 : if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
637 0 : elog(ERROR, "unrecognized lock mode: %d", lockmode);
638 :
639 : #ifdef LOCK_DEBUG
640 : if (LOCK_DEBUG_ENABLED(locktag))
641 : elog(LOG, "LockHasWaiters: lock [%u,%u] %s",
642 : locktag->locktag_field1, locktag->locktag_field2,
643 : lockMethodTable->lockModeNames[lockmode]);
644 : #endif
645 :
646 : /*
647 : * Find the LOCALLOCK entry for this lock and lockmode
648 : */
649 0 : MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
650 0 : localtag.lock = *locktag;
651 0 : localtag.mode = lockmode;
652 :
653 0 : locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
654 : &localtag,
655 : HASH_FIND, NULL);
656 :
657 : /*
658 : * let the caller print its own error message, too. Do not ereport(ERROR).
659 : */
660 0 : if (!locallock || locallock->nLocks <= 0)
661 : {
662 0 : elog(WARNING, "you don't own a lock of type %s",
663 : lockMethodTable->lockModeNames[lockmode]);
664 0 : return false;
665 : }
666 :
667 : /*
668 : * Check the shared lock table.
669 : */
670 0 : partitionLock = LockHashPartitionLock(locallock->hashcode);
671 :
672 0 : LWLockAcquire(partitionLock, LW_SHARED);
673 :
674 : /*
675 : * We don't need to re-find the lock or proclock, since we kept their
676 : * addresses in the locallock table, and they couldn't have been removed
677 : * while we were holding a lock on them.
678 : */
679 0 : lock = locallock->lock;
680 : LOCK_PRINT("LockHasWaiters: found", lock, lockmode);
681 0 : proclock = locallock->proclock;
682 : PROCLOCK_PRINT("LockHasWaiters: found", proclock);
683 :
684 : /*
685 : * Double-check that we are actually holding a lock of the type we want to
686 : * release.
687 : */
688 0 : if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
689 : {
690 : PROCLOCK_PRINT("LockHasWaiters: WRONGTYPE", proclock);
691 0 : LWLockRelease(partitionLock);
692 0 : elog(WARNING, "you don't own a lock of type %s",
693 : lockMethodTable->lockModeNames[lockmode]);
694 0 : RemoveLocalLock(locallock);
695 0 : return false;
696 : }
697 :
698 : /*
699 : * Do the checking.
700 : */
701 0 : if ((lockMethodTable->conflictTab[lockmode] & lock->waitMask) != 0)
702 0 : hasWaiters = true;
703 :
704 0 : LWLockRelease(partitionLock);
705 :
706 0 : return hasWaiters;
707 : }
708 :
709 : /*
710 : * LockAcquire -- Check for lock conflicts, sleep if conflict found,
711 : * set lock if/when no conflicts.
712 : *
713 : * Inputs:
714 : * locktag: unique identifier for the lockable object
715 : * lockmode: lock mode to acquire
716 : * sessionLock: if true, acquire lock for session not current transaction
717 : * dontWait: if true, don't wait to acquire lock
718 : *
719 : * Returns one of:
720 : * LOCKACQUIRE_NOT_AVAIL lock not available, and dontWait=true
721 : * LOCKACQUIRE_OK lock successfully acquired
722 : * LOCKACQUIRE_ALREADY_HELD incremented count for lock already held
723 : * LOCKACQUIRE_ALREADY_CLEAR incremented count for lock already clear
724 : *
725 : * In the normal case where dontWait=false and the caller doesn't need to
726 : * distinguish a freshly acquired lock from one already taken earlier in
727 : * this same transaction, there is no need to examine the return value.
728 : *
729 : * Side Effects: The lock is acquired and recorded in lock tables.
730 : *
731 : * NOTE: if we wait for the lock, there is no way to abort the wait
732 : * short of aborting the transaction.
733 : */
734 : LockAcquireResult
735 861012 : LockAcquire(const LOCKTAG *locktag,
736 : LOCKMODE lockmode,
737 : bool sessionLock,
738 : bool dontWait)
739 : {
740 861012 : return LockAcquireExtended(locktag, lockmode, sessionLock, dontWait,
741 : true, NULL);
742 : }
743 :
744 : /*
745 : * LockAcquireExtended - allows us to specify additional options
746 : *
747 : * reportMemoryError specifies whether a lock request that fills the lock
748 : * table should generate an ERROR or not. Passing "false" allows the caller
749 : * to attempt to recover from lock-table-full situations, perhaps by forcibly
750 : * canceling other lock holders and then retrying. Note, however, that the
751 : * return code for that is LOCKACQUIRE_NOT_AVAIL, so that it's unsafe to use
752 : * in combination with dontWait = true, as the cause of failure couldn't be
753 : * distinguished.
754 : *
755 : * If locallockp isn't NULL, *locallockp receives a pointer to the LOCALLOCK
756 : * table entry if a lock is successfully acquired, or NULL if not.
757 : */
758 : LockAcquireResult
759 27151158 : LockAcquireExtended(const LOCKTAG *locktag,
760 : LOCKMODE lockmode,
761 : bool sessionLock,
762 : bool dontWait,
763 : bool reportMemoryError,
764 : LOCALLOCK **locallockp)
765 : {
766 27151158 : LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
767 : LockMethod lockMethodTable;
768 : LOCALLOCKTAG localtag;
769 : LOCALLOCK *locallock;
770 : LOCK *lock;
771 : PROCLOCK *proclock;
772 : bool found;
773 : ResourceOwner owner;
774 : uint32 hashcode;
775 : LWLock *partitionLock;
776 : bool found_conflict;
777 27151158 : bool log_lock = false;
778 :
779 27151158 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
780 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
781 27151158 : lockMethodTable = LockMethods[lockmethodid];
782 27151158 : if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
783 0 : elog(ERROR, "unrecognized lock mode: %d", lockmode);
784 :
785 27151158 : if (RecoveryInProgress() && !InRecovery &&
786 366536 : (locktag->locktag_type == LOCKTAG_OBJECT ||
787 366536 : locktag->locktag_type == LOCKTAG_RELATION) &&
788 : lockmode > RowExclusiveLock)
789 0 : ereport(ERROR,
790 : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
791 : errmsg("cannot acquire lock mode %s on database objects while recovery is in progress",
792 : lockMethodTable->lockModeNames[lockmode]),
793 : errhint("Only RowExclusiveLock or less can be acquired on database objects during recovery.")));
794 :
795 : #ifdef LOCK_DEBUG
796 : if (LOCK_DEBUG_ENABLED(locktag))
797 : elog(LOG, "LockAcquire: lock [%u,%u] %s",
798 : locktag->locktag_field1, locktag->locktag_field2,
799 : lockMethodTable->lockModeNames[lockmode]);
800 : #endif
801 :
802 : /* Identify owner for lock */
803 27151158 : if (sessionLock)
804 58738 : owner = NULL;
805 : else
806 27092420 : owner = CurrentResourceOwner;
807 :
808 : /*
809 : * Find or create a LOCALLOCK entry for this lock and lockmode
810 : */
811 27151158 : MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
812 27151158 : localtag.lock = *locktag;
813 27151158 : localtag.mode = lockmode;
814 :
815 27151158 : locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
816 : &localtag,
817 : HASH_ENTER, &found);
818 :
819 : /*
820 : * if it's a new locallock object, initialize it
821 : */
822 27151158 : if (!found)
823 : {
824 24277614 : locallock->lock = NULL;
825 24277614 : locallock->proclock = NULL;
826 24277614 : locallock->hashcode = LockTagHashCode(&(localtag.lock));
827 24277614 : locallock->nLocks = 0;
828 24277614 : locallock->holdsStrongLockCount = false;
829 24277614 : locallock->lockCleared = false;
830 24277614 : locallock->numLockOwners = 0;
831 24277614 : locallock->maxLockOwners = 8;
832 24277614 : locallock->lockOwners = NULL; /* in case next line fails */
833 24277614 : locallock->lockOwners = (LOCALLOCKOWNER *)
834 24277614 : MemoryContextAlloc(TopMemoryContext,
835 24277614 : locallock->maxLockOwners * sizeof(LOCALLOCKOWNER));
836 : }
837 : else
838 : {
839 : /* Make sure there will be room to remember the lock */
840 2873544 : if (locallock->numLockOwners >= locallock->maxLockOwners)
841 : {
842 38 : int newsize = locallock->maxLockOwners * 2;
843 :
844 38 : locallock->lockOwners = (LOCALLOCKOWNER *)
845 38 : repalloc(locallock->lockOwners,
846 : newsize * sizeof(LOCALLOCKOWNER));
847 38 : locallock->maxLockOwners = newsize;
848 : }
849 : }
850 27151158 : hashcode = locallock->hashcode;
851 :
852 27151158 : if (locallockp)
853 26290146 : *locallockp = locallock;
854 :
855 : /*
856 : * If we already hold the lock, we can just increase the count locally.
857 : *
858 : * If lockCleared is already set, caller need not worry about absorbing
859 : * sinval messages related to the lock's object.
860 : */
861 27151158 : if (locallock->nLocks > 0)
862 : {
863 2873544 : GrantLockLocal(locallock, owner);
864 2873544 : if (locallock->lockCleared)
865 2749008 : return LOCKACQUIRE_ALREADY_CLEAR;
866 : else
867 124536 : return LOCKACQUIRE_ALREADY_HELD;
868 : }
869 :
870 : /*
871 : * We don't acquire any other heavyweight lock while holding the relation
872 : * extension lock. We do allow to acquire the same relation extension
873 : * lock more than once but that case won't reach here.
874 : */
875 : Assert(!IsRelationExtensionLockHeld);
876 :
877 : /*
878 : * Prepare to emit a WAL record if acquisition of this lock needs to be
879 : * replayed in a standby server.
880 : *
881 : * Here we prepare to log; after lock is acquired we'll issue log record.
882 : * This arrangement simplifies error recovery in case the preparation step
883 : * fails.
884 : *
885 : * Only AccessExclusiveLocks can conflict with lock types that read-only
886 : * transactions can acquire in a standby server. Make sure this definition
887 : * matches the one in GetRunningTransactionLocks().
888 : */
889 24277614 : if (lockmode >= AccessExclusiveLock &&
890 354466 : locktag->locktag_type == LOCKTAG_RELATION &&
891 234928 : !RecoveryInProgress() &&
892 196322 : XLogStandbyInfoActive())
893 : {
894 146498 : LogAccessExclusiveLockPrepare();
895 146498 : log_lock = true;
896 : }
897 :
898 : /*
899 : * Attempt to take lock via fast path, if eligible. But if we remember
900 : * having filled up the fast path array, we don't attempt to make any
901 : * further use of it until we release some locks. It's possible that some
902 : * other backend has transferred some of those locks to the shared hash
903 : * table, leaving space free, but it's not worth acquiring the LWLock just
904 : * to check. It's also possible that we're acquiring a second or third
905 : * lock type on a relation we have already locked using the fast-path, but
906 : * for now we don't worry about that case either.
907 : */
908 24277614 : if (EligibleForRelationFastPath(locktag, lockmode) &&
909 21933662 : FastPathLocalUseCount < FP_LOCK_SLOTS_PER_BACKEND)
910 : {
911 21434326 : uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
912 : bool acquired;
913 :
914 : /*
915 : * LWLockAcquire acts as a memory sequencing point, so it's safe to
916 : * assume that any strong locker whose increment to
917 : * FastPathStrongRelationLocks->counts becomes visible after we test
918 : * it has yet to begin to transfer fast-path locks.
919 : */
920 21434326 : LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
921 21434326 : if (FastPathStrongRelationLocks->count[fasthashcode] != 0)
922 294100 : acquired = false;
923 : else
924 21140226 : acquired = FastPathGrantRelationLock(locktag->locktag_field2,
925 : lockmode);
926 21434326 : LWLockRelease(&MyProc->fpInfoLock);
927 21434326 : if (acquired)
928 : {
929 : /*
930 : * The locallock might contain stale pointers to some old shared
931 : * objects; we MUST reset these to null before considering the
932 : * lock to be acquired via fast-path.
933 : */
934 21140226 : locallock->lock = NULL;
935 21140226 : locallock->proclock = NULL;
936 21140226 : GrantLockLocal(locallock, owner);
937 21140226 : return LOCKACQUIRE_OK;
938 : }
939 : }
940 :
941 : /*
942 : * If this lock could potentially have been taken via the fast-path by
943 : * some other backend, we must (temporarily) disable further use of the
944 : * fast-path for this lock tag, and migrate any locks already taken via
945 : * this method to the main lock table.
946 : */
947 3137388 : if (ConflictsWithRelationFastPath(locktag, lockmode))
948 : {
949 282646 : uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
950 :
951 282646 : BeginStrongLockAcquire(locallock, fasthashcode);
952 282646 : if (!FastPathTransferRelationLocks(lockMethodTable, locktag,
953 : hashcode))
954 : {
955 0 : AbortStrongLockAcquire();
956 0 : if (locallock->nLocks == 0)
957 0 : RemoveLocalLock(locallock);
958 0 : if (locallockp)
959 0 : *locallockp = NULL;
960 0 : if (reportMemoryError)
961 0 : ereport(ERROR,
962 : (errcode(ERRCODE_OUT_OF_MEMORY),
963 : errmsg("out of shared memory"),
964 : errhint("You might need to increase %s.", "max_locks_per_transaction")));
965 : else
966 0 : return LOCKACQUIRE_NOT_AVAIL;
967 : }
968 : }
969 :
970 : /*
971 : * We didn't find the lock in our LOCALLOCK table, and we didn't manage to
972 : * take it via the fast-path, either, so we've got to mess with the shared
973 : * lock table.
974 : */
975 3137388 : partitionLock = LockHashPartitionLock(hashcode);
976 :
977 3137388 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
978 :
979 : /*
980 : * Find or create lock and proclock entries with this tag
981 : *
982 : * Note: if the locallock object already existed, it might have a pointer
983 : * to the lock already ... but we should not assume that that pointer is
984 : * valid, since a lock object with zero hold and request counts can go
985 : * away anytime. So we have to use SetupLockInTable() to recompute the
986 : * lock and proclock pointers, even if they're already set.
987 : */
988 3137388 : proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
989 : hashcode, lockmode);
990 3137388 : if (!proclock)
991 : {
992 0 : AbortStrongLockAcquire();
993 0 : LWLockRelease(partitionLock);
994 0 : if (locallock->nLocks == 0)
995 0 : RemoveLocalLock(locallock);
996 0 : if (locallockp)
997 0 : *locallockp = NULL;
998 0 : if (reportMemoryError)
999 0 : ereport(ERROR,
1000 : (errcode(ERRCODE_OUT_OF_MEMORY),
1001 : errmsg("out of shared memory"),
1002 : errhint("You might need to increase %s.", "max_locks_per_transaction")));
1003 : else
1004 0 : return LOCKACQUIRE_NOT_AVAIL;
1005 : }
1006 3137388 : locallock->proclock = proclock;
1007 3137388 : lock = proclock->tag.myLock;
1008 3137388 : locallock->lock = lock;
1009 :
1010 : /*
1011 : * If lock requested conflicts with locks requested by waiters, must join
1012 : * wait queue. Otherwise, check for conflict with already-held locks.
1013 : * (That's last because most complex check.)
1014 : */
1015 3137388 : if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1016 60 : found_conflict = true;
1017 : else
1018 3137328 : found_conflict = LockCheckConflicts(lockMethodTable, lockmode,
1019 : lock, proclock);
1020 :
1021 3137388 : if (!found_conflict)
1022 : {
1023 : /* No conflict with held or previously requested locks */
1024 3134000 : GrantLock(lock, proclock, lockmode);
1025 3134000 : GrantLockLocal(locallock, owner);
1026 : }
1027 : else
1028 : {
1029 : /*
1030 : * We can't acquire the lock immediately. If caller specified no
1031 : * blocking, remove useless table entries and return
1032 : * LOCKACQUIRE_NOT_AVAIL without waiting.
1033 : */
1034 3388 : if (dontWait)
1035 : {
1036 1320 : AbortStrongLockAcquire();
1037 1320 : if (proclock->holdMask == 0)
1038 : {
1039 : uint32 proclock_hashcode;
1040 :
1041 916 : proclock_hashcode = ProcLockHashCode(&proclock->tag, hashcode);
1042 916 : dlist_delete(&proclock->lockLink);
1043 916 : dlist_delete(&proclock->procLink);
1044 916 : if (!hash_search_with_hash_value(LockMethodProcLockHash,
1045 916 : &(proclock->tag),
1046 : proclock_hashcode,
1047 : HASH_REMOVE,
1048 : NULL))
1049 0 : elog(PANIC, "proclock table corrupted");
1050 : }
1051 : else
1052 : PROCLOCK_PRINT("LockAcquire: NOWAIT", proclock);
1053 1320 : lock->nRequested--;
1054 1320 : lock->requested[lockmode]--;
1055 : LOCK_PRINT("LockAcquire: conditional lock failed", lock, lockmode);
1056 : Assert((lock->nRequested > 0) && (lock->requested[lockmode] >= 0));
1057 : Assert(lock->nGranted <= lock->nRequested);
1058 1320 : LWLockRelease(partitionLock);
1059 1320 : if (locallock->nLocks == 0)
1060 1320 : RemoveLocalLock(locallock);
1061 1320 : if (locallockp)
1062 448 : *locallockp = NULL;
1063 1320 : return LOCKACQUIRE_NOT_AVAIL;
1064 : }
1065 :
1066 : /*
1067 : * Set bitmask of locks this process already holds on this object.
1068 : */
1069 2068 : MyProc->heldLocks = proclock->holdMask;
1070 :
1071 : /*
1072 : * Sleep till someone wakes me up.
1073 : */
1074 :
1075 : TRACE_POSTGRESQL_LOCK_WAIT_START(locktag->locktag_field1,
1076 : locktag->locktag_field2,
1077 : locktag->locktag_field3,
1078 : locktag->locktag_field4,
1079 : locktag->locktag_type,
1080 : lockmode);
1081 :
1082 2068 : WaitOnLock(locallock, owner);
1083 :
1084 : TRACE_POSTGRESQL_LOCK_WAIT_DONE(locktag->locktag_field1,
1085 : locktag->locktag_field2,
1086 : locktag->locktag_field3,
1087 : locktag->locktag_field4,
1088 : locktag->locktag_type,
1089 : lockmode);
1090 :
1091 : /*
1092 : * NOTE: do not do any material change of state between here and
1093 : * return. All required changes in locktable state must have been
1094 : * done when the lock was granted to us --- see notes in WaitOnLock.
1095 : */
1096 :
1097 : /*
1098 : * Check the proclock entry status, in case something in the ipc
1099 : * communication doesn't work correctly.
1100 : */
1101 1974 : if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
1102 : {
1103 0 : AbortStrongLockAcquire();
1104 : PROCLOCK_PRINT("LockAcquire: INCONSISTENT", proclock);
1105 : LOCK_PRINT("LockAcquire: INCONSISTENT", lock, lockmode);
1106 : /* Should we retry ? */
1107 0 : LWLockRelease(partitionLock);
1108 0 : elog(ERROR, "LockAcquire failed");
1109 : }
1110 : PROCLOCK_PRINT("LockAcquire: granted", proclock);
1111 : LOCK_PRINT("LockAcquire: granted", lock, lockmode);
1112 : }
1113 :
1114 : /*
1115 : * Lock state is fully up-to-date now; if we error out after this, no
1116 : * special error cleanup is required.
1117 : */
1118 3135974 : FinishStrongLockAcquire();
1119 :
1120 3135974 : LWLockRelease(partitionLock);
1121 :
1122 : /*
1123 : * Emit a WAL record if acquisition of this lock needs to be replayed in a
1124 : * standby server.
1125 : */
1126 3135974 : if (log_lock)
1127 : {
1128 : /*
1129 : * Decode the locktag back to the original values, to avoid sending
1130 : * lots of empty bytes with every message. See lock.h to check how a
1131 : * locktag is defined for LOCKTAG_RELATION
1132 : */
1133 146074 : LogAccessExclusiveLock(locktag->locktag_field1,
1134 : locktag->locktag_field2);
1135 : }
1136 :
1137 3135974 : return LOCKACQUIRE_OK;
1138 : }
1139 :
1140 : /*
1141 : * Find or create LOCK and PROCLOCK objects as needed for a new lock
1142 : * request.
1143 : *
1144 : * Returns the PROCLOCK object, or NULL if we failed to create the objects
1145 : * for lack of shared memory.
1146 : *
1147 : * The appropriate partition lock must be held at entry, and will be
1148 : * held at exit.
1149 : */
1150 : static PROCLOCK *
1151 3140708 : SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc,
1152 : const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
1153 : {
1154 : LOCK *lock;
1155 : PROCLOCK *proclock;
1156 : PROCLOCKTAG proclocktag;
1157 : uint32 proclock_hashcode;
1158 : bool found;
1159 :
1160 : /*
1161 : * Find or create a lock with this tag.
1162 : */
1163 3140708 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
1164 : locktag,
1165 : hashcode,
1166 : HASH_ENTER_NULL,
1167 : &found);
1168 3140708 : if (!lock)
1169 0 : return NULL;
1170 :
1171 : /*
1172 : * if it's a new lock object, initialize it
1173 : */
1174 3140708 : if (!found)
1175 : {
1176 2854550 : lock->grantMask = 0;
1177 2854550 : lock->waitMask = 0;
1178 2854550 : dlist_init(&lock->procLocks);
1179 2854550 : dclist_init(&lock->waitProcs);
1180 2854550 : lock->nRequested = 0;
1181 2854550 : lock->nGranted = 0;
1182 17127300 : MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
1183 2854550 : MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
1184 : LOCK_PRINT("LockAcquire: new", lock, lockmode);
1185 : }
1186 : else
1187 : {
1188 : LOCK_PRINT("LockAcquire: found", lock, lockmode);
1189 : Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
1190 : Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
1191 : Assert(lock->nGranted <= lock->nRequested);
1192 : }
1193 :
1194 : /*
1195 : * Create the hash key for the proclock table.
1196 : */
1197 3140708 : proclocktag.myLock = lock;
1198 3140708 : proclocktag.myProc = proc;
1199 :
1200 3140708 : proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
1201 :
1202 : /*
1203 : * Find or create a proclock entry with this tag
1204 : */
1205 3140708 : proclock = (PROCLOCK *) hash_search_with_hash_value(LockMethodProcLockHash,
1206 : &proclocktag,
1207 : proclock_hashcode,
1208 : HASH_ENTER_NULL,
1209 : &found);
1210 3140708 : if (!proclock)
1211 : {
1212 : /* Oops, not enough shmem for the proclock */
1213 0 : if (lock->nRequested == 0)
1214 : {
1215 : /*
1216 : * There are no other requestors of this lock, so garbage-collect
1217 : * the lock object. We *must* do this to avoid a permanent leak
1218 : * of shared memory, because there won't be anything to cause
1219 : * anyone to release the lock object later.
1220 : */
1221 : Assert(dlist_is_empty(&(lock->procLocks)));
1222 0 : if (!hash_search_with_hash_value(LockMethodLockHash,
1223 0 : &(lock->tag),
1224 : hashcode,
1225 : HASH_REMOVE,
1226 : NULL))
1227 0 : elog(PANIC, "lock table corrupted");
1228 : }
1229 0 : return NULL;
1230 : }
1231 :
1232 : /*
1233 : * If new, initialize the new entry
1234 : */
1235 3140708 : if (!found)
1236 : {
1237 2875340 : uint32 partition = LockHashPartition(hashcode);
1238 :
1239 : /*
1240 : * It might seem unsafe to access proclock->groupLeader without a
1241 : * lock, but it's not really. Either we are initializing a proclock
1242 : * on our own behalf, in which case our group leader isn't changing
1243 : * because the group leader for a process can only ever be changed by
1244 : * the process itself; or else we are transferring a fast-path lock to
1245 : * the main lock table, in which case that process can't change it's
1246 : * lock group leader without first releasing all of its locks (and in
1247 : * particular the one we are currently transferring).
1248 : */
1249 5750680 : proclock->groupLeader = proc->lockGroupLeader != NULL ?
1250 2875340 : proc->lockGroupLeader : proc;
1251 2875340 : proclock->holdMask = 0;
1252 2875340 : proclock->releaseMask = 0;
1253 : /* Add proclock to appropriate lists */
1254 2875340 : dlist_push_tail(&lock->procLocks, &proclock->lockLink);
1255 2875340 : dlist_push_tail(&proc->myProcLocks[partition], &proclock->procLink);
1256 : PROCLOCK_PRINT("LockAcquire: new", proclock);
1257 : }
1258 : else
1259 : {
1260 : PROCLOCK_PRINT("LockAcquire: found", proclock);
1261 : Assert((proclock->holdMask & ~lock->grantMask) == 0);
1262 :
1263 : #ifdef CHECK_DEADLOCK_RISK
1264 :
1265 : /*
1266 : * Issue warning if we already hold a lower-level lock on this object
1267 : * and do not hold a lock of the requested level or higher. This
1268 : * indicates a deadlock-prone coding practice (eg, we'd have a
1269 : * deadlock if another backend were following the same code path at
1270 : * about the same time).
1271 : *
1272 : * This is not enabled by default, because it may generate log entries
1273 : * about user-level coding practices that are in fact safe in context.
1274 : * It can be enabled to help find system-level problems.
1275 : *
1276 : * XXX Doing numeric comparison on the lockmodes is a hack; it'd be
1277 : * better to use a table. For now, though, this works.
1278 : */
1279 : {
1280 : int i;
1281 :
1282 : for (i = lockMethodTable->numLockModes; i > 0; i--)
1283 : {
1284 : if (proclock->holdMask & LOCKBIT_ON(i))
1285 : {
1286 : if (i >= (int) lockmode)
1287 : break; /* safe: we have a lock >= req level */
1288 : elog(LOG, "deadlock risk: raising lock level"
1289 : " from %s to %s on object %u/%u/%u",
1290 : lockMethodTable->lockModeNames[i],
1291 : lockMethodTable->lockModeNames[lockmode],
1292 : lock->tag.locktag_field1, lock->tag.locktag_field2,
1293 : lock->tag.locktag_field3);
1294 : break;
1295 : }
1296 : }
1297 : }
1298 : #endif /* CHECK_DEADLOCK_RISK */
1299 : }
1300 :
1301 : /*
1302 : * lock->nRequested and lock->requested[] count the total number of
1303 : * requests, whether granted or waiting, so increment those immediately.
1304 : * The other counts don't increment till we get the lock.
1305 : */
1306 3140708 : lock->nRequested++;
1307 3140708 : lock->requested[lockmode]++;
1308 : Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1309 :
1310 : /*
1311 : * We shouldn't already hold the desired lock; else locallock table is
1312 : * broken.
1313 : */
1314 3140708 : if (proclock->holdMask & LOCKBIT_ON(lockmode))
1315 0 : elog(ERROR, "lock %s on object %u/%u/%u is already held",
1316 : lockMethodTable->lockModeNames[lockmode],
1317 : lock->tag.locktag_field1, lock->tag.locktag_field2,
1318 : lock->tag.locktag_field3);
1319 :
1320 3140708 : return proclock;
1321 : }
1322 :
1323 : /*
1324 : * Check and set/reset the flag that we hold the relation extension lock.
1325 : *
1326 : * It is callers responsibility that this function is called after
1327 : * acquiring/releasing the relation extension lock.
1328 : *
1329 : * Pass acquired as true if lock is acquired, false otherwise.
1330 : */
1331 : static inline void
1332 49233418 : CheckAndSetLockHeld(LOCALLOCK *locallock, bool acquired)
1333 : {
1334 : #ifdef USE_ASSERT_CHECKING
1335 : if (LOCALLOCK_LOCKTAG(*locallock) == LOCKTAG_RELATION_EXTEND)
1336 : IsRelationExtensionLockHeld = acquired;
1337 : #endif
1338 49233418 : }
1339 :
1340 : /*
1341 : * Subroutine to free a locallock entry
1342 : */
1343 : static void
1344 24277614 : RemoveLocalLock(LOCALLOCK *locallock)
1345 : {
1346 : int i;
1347 :
1348 24403808 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
1349 : {
1350 126194 : if (locallock->lockOwners[i].owner != NULL)
1351 126120 : ResourceOwnerForgetLock(locallock->lockOwners[i].owner, locallock);
1352 : }
1353 24277614 : locallock->numLockOwners = 0;
1354 24277614 : if (locallock->lockOwners != NULL)
1355 24277614 : pfree(locallock->lockOwners);
1356 24277614 : locallock->lockOwners = NULL;
1357 :
1358 24277614 : if (locallock->holdsStrongLockCount)
1359 : {
1360 : uint32 fasthashcode;
1361 :
1362 282092 : fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
1363 :
1364 282092 : SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
1365 : Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
1366 282092 : FastPathStrongRelationLocks->count[fasthashcode]--;
1367 282092 : locallock->holdsStrongLockCount = false;
1368 282092 : SpinLockRelease(&FastPathStrongRelationLocks->mutex);
1369 : }
1370 :
1371 24277614 : if (!hash_search(LockMethodLocalHash,
1372 24277614 : &(locallock->tag),
1373 : HASH_REMOVE, NULL))
1374 0 : elog(WARNING, "locallock table corrupted");
1375 :
1376 : /*
1377 : * Indicate that the lock is released for certain types of locks
1378 : */
1379 24277614 : CheckAndSetLockHeld(locallock, false);
1380 24277614 : }
1381 :
1382 : /*
1383 : * LockCheckConflicts -- test whether requested lock conflicts
1384 : * with those already granted
1385 : *
1386 : * Returns true if conflict, false if no conflict.
1387 : *
1388 : * NOTES:
1389 : * Here's what makes this complicated: one process's locks don't
1390 : * conflict with one another, no matter what purpose they are held for
1391 : * (eg, session and transaction locks do not conflict). Nor do the locks
1392 : * of one process in a lock group conflict with those of another process in
1393 : * the same group. So, we must subtract off these locks when determining
1394 : * whether the requested new lock conflicts with those already held.
1395 : */
1396 : bool
1397 3139410 : LockCheckConflicts(LockMethod lockMethodTable,
1398 : LOCKMODE lockmode,
1399 : LOCK *lock,
1400 : PROCLOCK *proclock)
1401 : {
1402 3139410 : int numLockModes = lockMethodTable->numLockModes;
1403 : LOCKMASK myLocks;
1404 3139410 : int conflictMask = lockMethodTable->conflictTab[lockmode];
1405 : int conflictsRemaining[MAX_LOCKMODES];
1406 3139410 : int totalConflictsRemaining = 0;
1407 : dlist_iter proclock_iter;
1408 : int i;
1409 :
1410 : /*
1411 : * first check for global conflicts: If no locks conflict with my request,
1412 : * then I get the lock.
1413 : *
1414 : * Checking for conflict: lock->grantMask represents the types of
1415 : * currently held locks. conflictTable[lockmode] has a bit set for each
1416 : * type of lock that conflicts with request. Bitwise compare tells if
1417 : * there is a conflict.
1418 : */
1419 3139410 : if (!(conflictMask & lock->grantMask))
1420 : {
1421 : PROCLOCK_PRINT("LockCheckConflicts: no conflict", proclock);
1422 3033630 : return false;
1423 : }
1424 :
1425 : /*
1426 : * Rats. Something conflicts. But it could still be my own lock, or a
1427 : * lock held by another member of my locking group. First, figure out how
1428 : * many conflicts remain after subtracting out any locks I hold myself.
1429 : */
1430 105780 : myLocks = proclock->holdMask;
1431 952020 : for (i = 1; i <= numLockModes; i++)
1432 : {
1433 846240 : if ((conflictMask & LOCKBIT_ON(i)) == 0)
1434 : {
1435 425774 : conflictsRemaining[i] = 0;
1436 425774 : continue;
1437 : }
1438 420466 : conflictsRemaining[i] = lock->granted[i];
1439 420466 : if (myLocks & LOCKBIT_ON(i))
1440 109662 : --conflictsRemaining[i];
1441 420466 : totalConflictsRemaining += conflictsRemaining[i];
1442 : }
1443 :
1444 : /* If no conflicts remain, we get the lock. */
1445 105780 : if (totalConflictsRemaining == 0)
1446 : {
1447 : PROCLOCK_PRINT("LockCheckConflicts: resolved (simple)", proclock);
1448 101468 : return false;
1449 : }
1450 :
1451 : /* If no group locking, it's definitely a conflict. */
1452 4312 : if (proclock->groupLeader == MyProc && MyProc->lockGroupLeader == NULL)
1453 : {
1454 : Assert(proclock->tag.myProc == MyProc);
1455 : PROCLOCK_PRINT("LockCheckConflicts: conflicting (simple)",
1456 : proclock);
1457 3324 : return true;
1458 : }
1459 :
1460 : /*
1461 : * The relation extension lock conflict even between the group members.
1462 : */
1463 988 : if (LOCK_LOCKTAG(*lock) == LOCKTAG_RELATION_EXTEND)
1464 : {
1465 : PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)",
1466 : proclock);
1467 10 : return true;
1468 : }
1469 :
1470 : /*
1471 : * Locks held in conflicting modes by members of our own lock group are
1472 : * not real conflicts; we can subtract those out and see if we still have
1473 : * a conflict. This is O(N) in the number of processes holding or
1474 : * awaiting locks on this object. We could improve that by making the
1475 : * shared memory state more complex (and larger) but it doesn't seem worth
1476 : * it.
1477 : */
1478 1464 : dlist_foreach(proclock_iter, &lock->procLocks)
1479 : {
1480 1370 : PROCLOCK *otherproclock =
1481 1370 : dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
1482 :
1483 1370 : if (proclock != otherproclock &&
1484 1276 : proclock->groupLeader == otherproclock->groupLeader &&
1485 888 : (otherproclock->holdMask & conflictMask) != 0)
1486 : {
1487 884 : int intersectMask = otherproclock->holdMask & conflictMask;
1488 :
1489 7956 : for (i = 1; i <= numLockModes; i++)
1490 : {
1491 7072 : if ((intersectMask & LOCKBIT_ON(i)) != 0)
1492 : {
1493 898 : if (conflictsRemaining[i] <= 0)
1494 0 : elog(PANIC, "proclocks held do not match lock");
1495 898 : conflictsRemaining[i]--;
1496 898 : totalConflictsRemaining--;
1497 : }
1498 : }
1499 :
1500 884 : if (totalConflictsRemaining == 0)
1501 : {
1502 : PROCLOCK_PRINT("LockCheckConflicts: resolved (group)",
1503 : proclock);
1504 884 : return false;
1505 : }
1506 : }
1507 : }
1508 :
1509 : /* Nope, it's a real conflict. */
1510 : PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)", proclock);
1511 94 : return true;
1512 : }
1513 :
1514 : /*
1515 : * GrantLock -- update the lock and proclock data structures to show
1516 : * the lock request has been granted.
1517 : *
1518 : * NOTE: if proc was blocked, it also needs to be removed from the wait list
1519 : * and have its waitLock/waitProcLock fields cleared. That's not done here.
1520 : *
1521 : * NOTE: the lock grant also has to be recorded in the associated LOCALLOCK
1522 : * table entry; but since we may be awaking some other process, we can't do
1523 : * that here; it's done by GrantLockLocal, instead.
1524 : */
1525 : void
1526 3139460 : GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
1527 : {
1528 3139460 : lock->nGranted++;
1529 3139460 : lock->granted[lockmode]++;
1530 3139460 : lock->grantMask |= LOCKBIT_ON(lockmode);
1531 3139460 : if (lock->granted[lockmode] == lock->requested[lockmode])
1532 3139194 : lock->waitMask &= LOCKBIT_OFF(lockmode);
1533 3139460 : proclock->holdMask |= LOCKBIT_ON(lockmode);
1534 : LOCK_PRINT("GrantLock", lock, lockmode);
1535 : Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1536 : Assert(lock->nGranted <= lock->nRequested);
1537 3139460 : }
1538 :
1539 : /*
1540 : * UnGrantLock -- opposite of GrantLock.
1541 : *
1542 : * Updates the lock and proclock data structures to show that the lock
1543 : * is no longer held nor requested by the current holder.
1544 : *
1545 : * Returns true if there were any waiters waiting on the lock that
1546 : * should now be woken up with ProcLockWakeup.
1547 : */
1548 : static bool
1549 3139310 : UnGrantLock(LOCK *lock, LOCKMODE lockmode,
1550 : PROCLOCK *proclock, LockMethod lockMethodTable)
1551 : {
1552 3139310 : bool wakeupNeeded = false;
1553 :
1554 : Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1555 : Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1556 : Assert(lock->nGranted <= lock->nRequested);
1557 :
1558 : /*
1559 : * fix the general lock stats
1560 : */
1561 3139310 : lock->nRequested--;
1562 3139310 : lock->requested[lockmode]--;
1563 3139310 : lock->nGranted--;
1564 3139310 : lock->granted[lockmode]--;
1565 :
1566 3139310 : if (lock->granted[lockmode] == 0)
1567 : {
1568 : /* change the conflict mask. No more of this lock type. */
1569 3124536 : lock->grantMask &= LOCKBIT_OFF(lockmode);
1570 : }
1571 :
1572 : LOCK_PRINT("UnGrantLock: updated", lock, lockmode);
1573 :
1574 : /*
1575 : * We need only run ProcLockWakeup if the released lock conflicts with at
1576 : * least one of the lock types requested by waiter(s). Otherwise whatever
1577 : * conflict made them wait must still exist. NOTE: before MVCC, we could
1578 : * skip wakeup if lock->granted[lockmode] was still positive. But that's
1579 : * not true anymore, because the remaining granted locks might belong to
1580 : * some waiter, who could now be awakened because he doesn't conflict with
1581 : * his own locks.
1582 : */
1583 3139310 : if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1584 1928 : wakeupNeeded = true;
1585 :
1586 : /*
1587 : * Now fix the per-proclock state.
1588 : */
1589 3139310 : proclock->holdMask &= LOCKBIT_OFF(lockmode);
1590 : PROCLOCK_PRINT("UnGrantLock: updated", proclock);
1591 :
1592 3139310 : return wakeupNeeded;
1593 : }
1594 :
1595 : /*
1596 : * CleanUpLock -- clean up after releasing a lock. We garbage-collect the
1597 : * proclock and lock objects if possible, and call ProcLockWakeup if there
1598 : * are remaining requests and the caller says it's OK. (Normally, this
1599 : * should be called after UnGrantLock, and wakeupNeeded is the result from
1600 : * UnGrantLock.)
1601 : *
1602 : * The appropriate partition lock must be held at entry, and will be
1603 : * held at exit.
1604 : */
1605 : static void
1606 3098990 : CleanUpLock(LOCK *lock, PROCLOCK *proclock,
1607 : LockMethod lockMethodTable, uint32 hashcode,
1608 : bool wakeupNeeded)
1609 : {
1610 : /*
1611 : * If this was my last hold on this lock, delete my entry in the proclock
1612 : * table.
1613 : */
1614 3098990 : if (proclock->holdMask == 0)
1615 : {
1616 : uint32 proclock_hashcode;
1617 :
1618 : PROCLOCK_PRINT("CleanUpLock: deleting", proclock);
1619 2874438 : dlist_delete(&proclock->lockLink);
1620 2874438 : dlist_delete(&proclock->procLink);
1621 2874438 : proclock_hashcode = ProcLockHashCode(&proclock->tag, hashcode);
1622 2874438 : if (!hash_search_with_hash_value(LockMethodProcLockHash,
1623 2874438 : &(proclock->tag),
1624 : proclock_hashcode,
1625 : HASH_REMOVE,
1626 : NULL))
1627 0 : elog(PANIC, "proclock table corrupted");
1628 : }
1629 :
1630 3098990 : if (lock->nRequested == 0)
1631 : {
1632 : /*
1633 : * The caller just released the last lock, so garbage-collect the lock
1634 : * object.
1635 : */
1636 : LOCK_PRINT("CleanUpLock: deleting", lock, 0);
1637 : Assert(dlist_is_empty(&lock->procLocks));
1638 2854558 : if (!hash_search_with_hash_value(LockMethodLockHash,
1639 2854558 : &(lock->tag),
1640 : hashcode,
1641 : HASH_REMOVE,
1642 : NULL))
1643 0 : elog(PANIC, "lock table corrupted");
1644 : }
1645 244432 : else if (wakeupNeeded)
1646 : {
1647 : /* There are waiters on this lock, so wake them up. */
1648 2016 : ProcLockWakeup(lockMethodTable, lock);
1649 : }
1650 3098990 : }
1651 :
1652 : /*
1653 : * GrantLockLocal -- update the locallock data structures to show
1654 : * the lock request has been granted.
1655 : *
1656 : * We expect that LockAcquire made sure there is room to add a new
1657 : * ResourceOwner entry.
1658 : */
1659 : static void
1660 27149746 : GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner)
1661 : {
1662 27149746 : LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
1663 : int i;
1664 :
1665 : Assert(locallock->numLockOwners < locallock->maxLockOwners);
1666 : /* Count the total */
1667 27149746 : locallock->nLocks++;
1668 : /* Count the per-owner lock */
1669 28131776 : for (i = 0; i < locallock->numLockOwners; i++)
1670 : {
1671 3175972 : if (lockOwners[i].owner == owner)
1672 : {
1673 2193942 : lockOwners[i].nLocks++;
1674 2193942 : return;
1675 : }
1676 : }
1677 24955804 : lockOwners[i].owner = owner;
1678 24955804 : lockOwners[i].nLocks = 1;
1679 24955804 : locallock->numLockOwners++;
1680 24955804 : if (owner != NULL)
1681 24897950 : ResourceOwnerRememberLock(owner, locallock);
1682 :
1683 : /* Indicate that the lock is acquired for certain types of locks. */
1684 24955804 : CheckAndSetLockHeld(locallock, true);
1685 : }
1686 :
1687 : /*
1688 : * BeginStrongLockAcquire - inhibit use of fastpath for a given LOCALLOCK,
1689 : * and arrange for error cleanup if it fails
1690 : */
1691 : static void
1692 282646 : BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode)
1693 : {
1694 : Assert(StrongLockInProgress == NULL);
1695 : Assert(locallock->holdsStrongLockCount == false);
1696 :
1697 : /*
1698 : * Adding to a memory location is not atomic, so we take a spinlock to
1699 : * ensure we don't collide with someone else trying to bump the count at
1700 : * the same time.
1701 : *
1702 : * XXX: It might be worth considering using an atomic fetch-and-add
1703 : * instruction here, on architectures where that is supported.
1704 : */
1705 :
1706 282646 : SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
1707 282646 : FastPathStrongRelationLocks->count[fasthashcode]++;
1708 282646 : locallock->holdsStrongLockCount = true;
1709 282646 : StrongLockInProgress = locallock;
1710 282646 : SpinLockRelease(&FastPathStrongRelationLocks->mutex);
1711 282646 : }
1712 :
1713 : /*
1714 : * FinishStrongLockAcquire - cancel pending cleanup for a strong lock
1715 : * acquisition once it's no longer needed
1716 : */
1717 : static void
1718 3135974 : FinishStrongLockAcquire(void)
1719 : {
1720 3135974 : StrongLockInProgress = NULL;
1721 3135974 : }
1722 :
1723 : /*
1724 : * AbortStrongLockAcquire - undo strong lock state changes performed by
1725 : * BeginStrongLockAcquire.
1726 : */
1727 : void
1728 566860 : AbortStrongLockAcquire(void)
1729 : {
1730 : uint32 fasthashcode;
1731 566860 : LOCALLOCK *locallock = StrongLockInProgress;
1732 :
1733 566860 : if (locallock == NULL)
1734 566436 : return;
1735 :
1736 424 : fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
1737 : Assert(locallock->holdsStrongLockCount == true);
1738 424 : SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
1739 : Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
1740 424 : FastPathStrongRelationLocks->count[fasthashcode]--;
1741 424 : locallock->holdsStrongLockCount = false;
1742 424 : StrongLockInProgress = NULL;
1743 424 : SpinLockRelease(&FastPathStrongRelationLocks->mutex);
1744 : }
1745 :
1746 : /*
1747 : * GrantAwaitedLock -- call GrantLockLocal for the lock we are doing
1748 : * WaitOnLock on.
1749 : *
1750 : * proc.c needs this for the case where we are booted off the lock by
1751 : * timeout, but discover that someone granted us the lock anyway.
1752 : *
1753 : * We could just export GrantLockLocal, but that would require including
1754 : * resowner.h in lock.h, which creates circularity.
1755 : */
1756 : void
1757 1976 : GrantAwaitedLock(void)
1758 : {
1759 1976 : GrantLockLocal(awaitedLock, awaitedOwner);
1760 1976 : }
1761 :
1762 : /*
1763 : * MarkLockClear -- mark an acquired lock as "clear"
1764 : *
1765 : * This means that we know we have absorbed all sinval messages that other
1766 : * sessions generated before we acquired this lock, and so we can confidently
1767 : * assume we know about any catalog changes protected by this lock.
1768 : */
1769 : void
1770 23558678 : MarkLockClear(LOCALLOCK *locallock)
1771 : {
1772 : Assert(locallock->nLocks > 0);
1773 23558678 : locallock->lockCleared = true;
1774 23558678 : }
1775 :
1776 : /*
1777 : * WaitOnLock -- wait to acquire a lock
1778 : *
1779 : * Caller must have set MyProc->heldLocks to reflect locks already held
1780 : * on the lockable object by this process.
1781 : *
1782 : * The appropriate partition lock must be held at entry.
1783 : */
1784 : static void
1785 2068 : WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner)
1786 : {
1787 2068 : LOCKMETHODID lockmethodid = LOCALLOCK_LOCKMETHOD(*locallock);
1788 2068 : LockMethod lockMethodTable = LockMethods[lockmethodid];
1789 :
1790 : LOCK_PRINT("WaitOnLock: sleeping on lock",
1791 : locallock->lock, locallock->tag.mode);
1792 :
1793 : /* adjust the process title to indicate that it's waiting */
1794 2068 : set_ps_display_suffix("waiting");
1795 :
1796 2068 : awaitedLock = locallock;
1797 2068 : awaitedOwner = owner;
1798 :
1799 : /*
1800 : * NOTE: Think not to put any shared-state cleanup after the call to
1801 : * ProcSleep, in either the normal or failure path. The lock state must
1802 : * be fully set by the lock grantor, or by CheckDeadLock if we give up
1803 : * waiting for the lock. This is necessary because of the possibility
1804 : * that a cancel/die interrupt will interrupt ProcSleep after someone else
1805 : * grants us the lock, but before we've noticed it. Hence, after granting,
1806 : * the locktable state must fully reflect the fact that we own the lock;
1807 : * we can't do additional work on return.
1808 : *
1809 : * We can and do use a PG_TRY block to try to clean up after failure, but
1810 : * this still has a major limitation: elog(FATAL) can occur while waiting
1811 : * (eg, a "die" interrupt), and then control won't come back here. So all
1812 : * cleanup of essential state should happen in LockErrorCleanup, not here.
1813 : * We can use PG_TRY to clear the "waiting" status flags, since doing that
1814 : * is unimportant if the process exits.
1815 : */
1816 2068 : PG_TRY();
1817 : {
1818 2068 : if (ProcSleep(locallock, lockMethodTable) != PROC_WAIT_STATUS_OK)
1819 : {
1820 : /*
1821 : * We failed as a result of a deadlock, see CheckDeadLock(). Quit
1822 : * now.
1823 : */
1824 10 : awaitedLock = NULL;
1825 : LOCK_PRINT("WaitOnLock: aborting on lock",
1826 : locallock->lock, locallock->tag.mode);
1827 10 : LWLockRelease(LockHashPartitionLock(locallock->hashcode));
1828 :
1829 : /*
1830 : * Now that we aren't holding the partition lock, we can give an
1831 : * error report including details about the detected deadlock.
1832 : */
1833 10 : DeadLockReport();
1834 : /* not reached */
1835 : }
1836 : }
1837 84 : PG_CATCH();
1838 : {
1839 : /* In this path, awaitedLock remains set until LockErrorCleanup */
1840 :
1841 : /* reset ps display to remove the suffix */
1842 84 : set_ps_display_remove_suffix();
1843 :
1844 : /* and propagate the error */
1845 84 : PG_RE_THROW();
1846 : }
1847 1974 : PG_END_TRY();
1848 :
1849 1974 : awaitedLock = NULL;
1850 :
1851 : /* reset ps display to remove the suffix */
1852 1974 : set_ps_display_remove_suffix();
1853 :
1854 : LOCK_PRINT("WaitOnLock: wakeup on lock",
1855 : locallock->lock, locallock->tag.mode);
1856 1974 : }
1857 :
1858 : /*
1859 : * Remove a proc from the wait-queue it is on (caller must know it is on one).
1860 : * This is only used when the proc has failed to get the lock, so we set its
1861 : * waitStatus to PROC_WAIT_STATUS_ERROR.
1862 : *
1863 : * Appropriate partition lock must be held by caller. Also, caller is
1864 : * responsible for signaling the proc if needed.
1865 : *
1866 : * NB: this does not clean up any locallock object that may exist for the lock.
1867 : */
1868 : void
1869 92 : RemoveFromWaitQueue(PGPROC *proc, uint32 hashcode)
1870 : {
1871 92 : LOCK *waitLock = proc->waitLock;
1872 92 : PROCLOCK *proclock = proc->waitProcLock;
1873 92 : LOCKMODE lockmode = proc->waitLockMode;
1874 92 : LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*waitLock);
1875 :
1876 : /* Make sure proc is waiting */
1877 : Assert(proc->waitStatus == PROC_WAIT_STATUS_WAITING);
1878 : Assert(proc->links.next != NULL);
1879 : Assert(waitLock);
1880 : Assert(!dclist_is_empty(&waitLock->waitProcs));
1881 : Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
1882 :
1883 : /* Remove proc from lock's wait queue */
1884 92 : dclist_delete_from_thoroughly(&waitLock->waitProcs, &proc->links);
1885 :
1886 : /* Undo increments of request counts by waiting process */
1887 : Assert(waitLock->nRequested > 0);
1888 : Assert(waitLock->nRequested > proc->waitLock->nGranted);
1889 92 : waitLock->nRequested--;
1890 : Assert(waitLock->requested[lockmode] > 0);
1891 92 : waitLock->requested[lockmode]--;
1892 : /* don't forget to clear waitMask bit if appropriate */
1893 92 : if (waitLock->granted[lockmode] == waitLock->requested[lockmode])
1894 88 : waitLock->waitMask &= LOCKBIT_OFF(lockmode);
1895 :
1896 : /* Clean up the proc's own state, and pass it the ok/fail signal */
1897 92 : proc->waitLock = NULL;
1898 92 : proc->waitProcLock = NULL;
1899 92 : proc->waitStatus = PROC_WAIT_STATUS_ERROR;
1900 :
1901 : /*
1902 : * Delete the proclock immediately if it represents no already-held locks.
1903 : * (This must happen now because if the owner of the lock decides to
1904 : * release it, and the requested/granted counts then go to zero,
1905 : * LockRelease expects there to be no remaining proclocks.) Then see if
1906 : * any other waiters for the lock can be woken up now.
1907 : */
1908 92 : CleanUpLock(waitLock, proclock,
1909 : LockMethods[lockmethodid], hashcode,
1910 : true);
1911 92 : }
1912 :
1913 : /*
1914 : * LockRelease -- look up 'locktag' and release one 'lockmode' lock on it.
1915 : * Release a session lock if 'sessionLock' is true, else release a
1916 : * regular transaction lock.
1917 : *
1918 : * Side Effects: find any waiting processes that are now wakable,
1919 : * grant them their requested locks and awaken them.
1920 : * (We have to grant the lock here to avoid a race between
1921 : * the waking process and any new process to
1922 : * come along and request the lock.)
1923 : */
1924 : bool
1925 23976460 : LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
1926 : {
1927 23976460 : LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
1928 : LockMethod lockMethodTable;
1929 : LOCALLOCKTAG localtag;
1930 : LOCALLOCK *locallock;
1931 : LOCK *lock;
1932 : PROCLOCK *proclock;
1933 : LWLock *partitionLock;
1934 : bool wakeupNeeded;
1935 :
1936 23976460 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
1937 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
1938 23976460 : lockMethodTable = LockMethods[lockmethodid];
1939 23976460 : if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
1940 0 : elog(ERROR, "unrecognized lock mode: %d", lockmode);
1941 :
1942 : #ifdef LOCK_DEBUG
1943 : if (LOCK_DEBUG_ENABLED(locktag))
1944 : elog(LOG, "LockRelease: lock [%u,%u] %s",
1945 : locktag->locktag_field1, locktag->locktag_field2,
1946 : lockMethodTable->lockModeNames[lockmode]);
1947 : #endif
1948 :
1949 : /*
1950 : * Find the LOCALLOCK entry for this lock and lockmode
1951 : */
1952 23976460 : MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
1953 23976460 : localtag.lock = *locktag;
1954 23976460 : localtag.mode = lockmode;
1955 :
1956 23976460 : locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
1957 : &localtag,
1958 : HASH_FIND, NULL);
1959 :
1960 : /*
1961 : * let the caller print its own error message, too. Do not ereport(ERROR).
1962 : */
1963 23976460 : if (!locallock || locallock->nLocks <= 0)
1964 : {
1965 26 : elog(WARNING, "you don't own a lock of type %s",
1966 : lockMethodTable->lockModeNames[lockmode]);
1967 26 : return false;
1968 : }
1969 :
1970 : /*
1971 : * Decrease the count for the resource owner.
1972 : */
1973 : {
1974 23976434 : LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
1975 : ResourceOwner owner;
1976 : int i;
1977 :
1978 : /* Identify owner for lock */
1979 23976434 : if (sessionLock)
1980 57824 : owner = NULL;
1981 : else
1982 23918610 : owner = CurrentResourceOwner;
1983 :
1984 23978312 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
1985 : {
1986 23978288 : if (lockOwners[i].owner == owner)
1987 : {
1988 : Assert(lockOwners[i].nLocks > 0);
1989 23976410 : if (--lockOwners[i].nLocks == 0)
1990 : {
1991 23195260 : if (owner != NULL)
1992 23137480 : ResourceOwnerForgetLock(owner, locallock);
1993 : /* compact out unused slot */
1994 23195260 : locallock->numLockOwners--;
1995 23195260 : if (i < locallock->numLockOwners)
1996 96 : lockOwners[i] = lockOwners[locallock->numLockOwners];
1997 : }
1998 23976410 : break;
1999 : }
2000 : }
2001 23976434 : if (i < 0)
2002 : {
2003 : /* don't release a lock belonging to another owner */
2004 24 : elog(WARNING, "you don't own a lock of type %s",
2005 : lockMethodTable->lockModeNames[lockmode]);
2006 24 : return false;
2007 : }
2008 : }
2009 :
2010 : /*
2011 : * Decrease the total local count. If we're still holding the lock, we're
2012 : * done.
2013 : */
2014 23976410 : locallock->nLocks--;
2015 :
2016 23976410 : if (locallock->nLocks > 0)
2017 1247142 : return true;
2018 :
2019 : /*
2020 : * At this point we can no longer suppose we are clear of invalidation
2021 : * messages related to this lock. Although we'll delete the LOCALLOCK
2022 : * object before any intentional return from this routine, it seems worth
2023 : * the trouble to explicitly reset lockCleared right now, just in case
2024 : * some error prevents us from deleting the LOCALLOCK.
2025 : */
2026 22729268 : locallock->lockCleared = false;
2027 :
2028 : /* Attempt fast release of any lock eligible for the fast path. */
2029 22729268 : if (EligibleForRelationFastPath(locktag, lockmode) &&
2030 21111638 : FastPathLocalUseCount > 0)
2031 : {
2032 : bool released;
2033 :
2034 : /*
2035 : * We might not find the lock here, even if we originally entered it
2036 : * here. Another backend may have moved it to the main table.
2037 : */
2038 20995216 : LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
2039 20995216 : released = FastPathUnGrantRelationLock(locktag->locktag_field2,
2040 : lockmode);
2041 20995216 : LWLockRelease(&MyProc->fpInfoLock);
2042 20995216 : if (released)
2043 : {
2044 20389870 : RemoveLocalLock(locallock);
2045 20389870 : return true;
2046 : }
2047 : }
2048 :
2049 : /*
2050 : * Otherwise we've got to mess with the shared lock table.
2051 : */
2052 2339398 : partitionLock = LockHashPartitionLock(locallock->hashcode);
2053 :
2054 2339398 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2055 :
2056 : /*
2057 : * Normally, we don't need to re-find the lock or proclock, since we kept
2058 : * their addresses in the locallock table, and they couldn't have been
2059 : * removed while we were holding a lock on them. But it's possible that
2060 : * the lock was taken fast-path and has since been moved to the main hash
2061 : * table by another backend, in which case we will need to look up the
2062 : * objects here. We assume the lock field is NULL if so.
2063 : */
2064 2339398 : lock = locallock->lock;
2065 2339398 : if (!lock)
2066 : {
2067 : PROCLOCKTAG proclocktag;
2068 :
2069 : Assert(EligibleForRelationFastPath(locktag, lockmode));
2070 10 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
2071 : locktag,
2072 : locallock->hashcode,
2073 : HASH_FIND,
2074 : NULL);
2075 10 : if (!lock)
2076 0 : elog(ERROR, "failed to re-find shared lock object");
2077 10 : locallock->lock = lock;
2078 :
2079 10 : proclocktag.myLock = lock;
2080 10 : proclocktag.myProc = MyProc;
2081 10 : locallock->proclock = (PROCLOCK *) hash_search(LockMethodProcLockHash,
2082 : &proclocktag,
2083 : HASH_FIND,
2084 : NULL);
2085 10 : if (!locallock->proclock)
2086 0 : elog(ERROR, "failed to re-find shared proclock object");
2087 : }
2088 : LOCK_PRINT("LockRelease: found", lock, lockmode);
2089 2339398 : proclock = locallock->proclock;
2090 : PROCLOCK_PRINT("LockRelease: found", proclock);
2091 :
2092 : /*
2093 : * Double-check that we are actually holding a lock of the type we want to
2094 : * release.
2095 : */
2096 2339398 : if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
2097 : {
2098 : PROCLOCK_PRINT("LockRelease: WRONGTYPE", proclock);
2099 0 : LWLockRelease(partitionLock);
2100 0 : elog(WARNING, "you don't own a lock of type %s",
2101 : lockMethodTable->lockModeNames[lockmode]);
2102 0 : RemoveLocalLock(locallock);
2103 0 : return false;
2104 : }
2105 :
2106 : /*
2107 : * Do the releasing. CleanUpLock will waken any now-wakable waiters.
2108 : */
2109 2339398 : wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
2110 :
2111 2339398 : CleanUpLock(lock, proclock,
2112 : lockMethodTable, locallock->hashcode,
2113 : wakeupNeeded);
2114 :
2115 2339398 : LWLockRelease(partitionLock);
2116 :
2117 2339398 : RemoveLocalLock(locallock);
2118 2339398 : return true;
2119 : }
2120 :
2121 : /*
2122 : * LockReleaseAll -- Release all locks of the specified lock method that
2123 : * are held by the current process.
2124 : *
2125 : * Well, not necessarily *all* locks. The available behaviors are:
2126 : * allLocks == true: release all locks including session locks.
2127 : * allLocks == false: release all non-session locks.
2128 : */
2129 : void
2130 1053074 : LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
2131 : {
2132 : HASH_SEQ_STATUS status;
2133 : LockMethod lockMethodTable;
2134 : int i,
2135 : numLockModes;
2136 : LOCALLOCK *locallock;
2137 : LOCK *lock;
2138 : int partition;
2139 1053074 : bool have_fast_path_lwlock = false;
2140 :
2141 1053074 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2142 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2143 1053074 : lockMethodTable = LockMethods[lockmethodid];
2144 :
2145 : #ifdef LOCK_DEBUG
2146 : if (*(lockMethodTable->trace_flag))
2147 : elog(LOG, "LockReleaseAll: lockmethod=%d", lockmethodid);
2148 : #endif
2149 :
2150 : /*
2151 : * Get rid of our fast-path VXID lock, if appropriate. Note that this is
2152 : * the only way that the lock we hold on our own VXID can ever get
2153 : * released: it is always and only released when a toplevel transaction
2154 : * ends.
2155 : */
2156 1053074 : if (lockmethodid == DEFAULT_LOCKMETHOD)
2157 514834 : VirtualXactLockTableCleanup();
2158 :
2159 1053074 : numLockModes = lockMethodTable->numLockModes;
2160 :
2161 : /*
2162 : * First we run through the locallock table and get rid of unwanted
2163 : * entries, then we scan the process's proclocks and get rid of those. We
2164 : * do this separately because we may have multiple locallock entries
2165 : * pointing to the same proclock, and we daren't end up with any dangling
2166 : * pointers. Fast-path locks are cleaned up during the locallock table
2167 : * scan, though.
2168 : */
2169 1053074 : hash_seq_init(&status, LockMethodLocalHash);
2170 :
2171 2675926 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2172 : {
2173 : /*
2174 : * If the LOCALLOCK entry is unused, we must've run out of shared
2175 : * memory while trying to set up this lock. Just forget the local
2176 : * entry.
2177 : */
2178 1622852 : if (locallock->nLocks == 0)
2179 : {
2180 92 : RemoveLocalLock(locallock);
2181 92 : continue;
2182 : }
2183 :
2184 : /* Ignore items that are not of the lockmethod to be removed */
2185 1622760 : if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
2186 38882 : continue;
2187 :
2188 : /*
2189 : * If we are asked to release all locks, we can just zap the entry.
2190 : * Otherwise, must scan to see if there are session locks. We assume
2191 : * there is at most one lockOwners entry for session locks.
2192 : */
2193 1583878 : if (!allLocks)
2194 : {
2195 1460700 : LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2196 :
2197 : /* If session lock is above array position 0, move it down to 0 */
2198 2944900 : for (i = 0; i < locallock->numLockOwners; i++)
2199 : {
2200 1484200 : if (lockOwners[i].owner == NULL)
2201 38716 : lockOwners[0] = lockOwners[i];
2202 : else
2203 1445484 : ResourceOwnerForgetLock(lockOwners[i].owner, locallock);
2204 : }
2205 :
2206 1460700 : if (locallock->numLockOwners > 0 &&
2207 1460700 : lockOwners[0].owner == NULL &&
2208 38716 : lockOwners[0].nLocks > 0)
2209 : {
2210 : /* Fix the locallock to show just the session locks */
2211 38716 : locallock->nLocks = lockOwners[0].nLocks;
2212 38716 : locallock->numLockOwners = 1;
2213 : /* We aren't deleting this locallock, so done */
2214 38716 : continue;
2215 : }
2216 : else
2217 1421984 : locallock->numLockOwners = 0;
2218 : }
2219 :
2220 : /*
2221 : * If the lock or proclock pointers are NULL, this lock was taken via
2222 : * the relation fast-path (and is not known to have been transferred).
2223 : */
2224 1545162 : if (locallock->proclock == NULL || locallock->lock == NULL)
2225 : {
2226 749556 : LOCKMODE lockmode = locallock->tag.mode;
2227 : Oid relid;
2228 :
2229 : /* Verify that a fast-path lock is what we've got. */
2230 749556 : if (!EligibleForRelationFastPath(&locallock->tag.lock, lockmode))
2231 0 : elog(PANIC, "locallock table corrupted");
2232 :
2233 : /*
2234 : * If we don't currently hold the LWLock that protects our
2235 : * fast-path data structures, we must acquire it before attempting
2236 : * to release the lock via the fast-path. We will continue to
2237 : * hold the LWLock until we're done scanning the locallock table,
2238 : * unless we hit a transferred fast-path lock. (XXX is this
2239 : * really such a good idea? There could be a lot of entries ...)
2240 : */
2241 749556 : if (!have_fast_path_lwlock)
2242 : {
2243 222380 : LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
2244 222380 : have_fast_path_lwlock = true;
2245 : }
2246 :
2247 : /* Attempt fast-path release. */
2248 749556 : relid = locallock->tag.lock.locktag_field2;
2249 749556 : if (FastPathUnGrantRelationLock(relid, lockmode))
2250 : {
2251 747584 : RemoveLocalLock(locallock);
2252 747584 : continue;
2253 : }
2254 :
2255 : /*
2256 : * Our lock, originally taken via the fast path, has been
2257 : * transferred to the main lock table. That's going to require
2258 : * some extra work, so release our fast-path lock before starting.
2259 : */
2260 1972 : LWLockRelease(&MyProc->fpInfoLock);
2261 1972 : have_fast_path_lwlock = false;
2262 :
2263 : /*
2264 : * Now dump the lock. We haven't got a pointer to the LOCK or
2265 : * PROCLOCK in this case, so we have to handle this a bit
2266 : * differently than a normal lock release. Unfortunately, this
2267 : * requires an extra LWLock acquire-and-release cycle on the
2268 : * partitionLock, but hopefully it shouldn't happen often.
2269 : */
2270 1972 : LockRefindAndRelease(lockMethodTable, MyProc,
2271 : &locallock->tag.lock, lockmode, false);
2272 1972 : RemoveLocalLock(locallock);
2273 1972 : continue;
2274 : }
2275 :
2276 : /* Mark the proclock to show we need to release this lockmode */
2277 795606 : if (locallock->nLocks > 0)
2278 795606 : locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
2279 :
2280 : /* And remove the locallock hashtable entry */
2281 795606 : RemoveLocalLock(locallock);
2282 : }
2283 :
2284 : /* Done with the fast-path data structures */
2285 1053074 : if (have_fast_path_lwlock)
2286 220408 : LWLockRelease(&MyProc->fpInfoLock);
2287 :
2288 : /*
2289 : * Now, scan each lock partition separately.
2290 : */
2291 17902258 : for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
2292 : {
2293 : LWLock *partitionLock;
2294 16849184 : dlist_head *procLocks = &MyProc->myProcLocks[partition];
2295 : dlist_mutable_iter proclock_iter;
2296 :
2297 16849184 : partitionLock = LockHashPartitionLockByIndex(partition);
2298 :
2299 : /*
2300 : * If the proclock list for this partition is empty, we can skip
2301 : * acquiring the partition lock. This optimization is trickier than
2302 : * it looks, because another backend could be in process of adding
2303 : * something to our proclock list due to promoting one of our
2304 : * fast-path locks. However, any such lock must be one that we
2305 : * decided not to delete above, so it's okay to skip it again now;
2306 : * we'd just decide not to delete it again. We must, however, be
2307 : * careful to re-fetch the list header once we've acquired the
2308 : * partition lock, to be sure we have a valid, up-to-date pointer.
2309 : * (There is probably no significant risk if pointer fetch/store is
2310 : * atomic, but we don't wish to assume that.)
2311 : *
2312 : * XXX This argument assumes that the locallock table correctly
2313 : * represents all of our fast-path locks. While allLocks mode
2314 : * guarantees to clean up all of our normal locks regardless of the
2315 : * locallock situation, we lose that guarantee for fast-path locks.
2316 : * This is not ideal.
2317 : */
2318 16849184 : if (dlist_is_empty(procLocks))
2319 16190864 : continue; /* needn't examine this partition */
2320 :
2321 658320 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2322 :
2323 1489878 : dlist_foreach_modify(proclock_iter, procLocks)
2324 : {
2325 831558 : PROCLOCK *proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
2326 831558 : bool wakeupNeeded = false;
2327 :
2328 : Assert(proclock->tag.myProc == MyProc);
2329 :
2330 831558 : lock = proclock->tag.myLock;
2331 :
2332 : /* Ignore items that are not of the lockmethod to be removed */
2333 831558 : if (LOCK_LOCKMETHOD(*lock) != lockmethodid)
2334 38876 : continue;
2335 :
2336 : /*
2337 : * In allLocks mode, force release of all locks even if locallock
2338 : * table had problems
2339 : */
2340 792682 : if (allLocks)
2341 80520 : proclock->releaseMask = proclock->holdMask;
2342 : else
2343 : Assert((proclock->releaseMask & ~proclock->holdMask) == 0);
2344 :
2345 : /*
2346 : * Ignore items that have nothing to be released, unless they have
2347 : * holdMask == 0 and are therefore recyclable
2348 : */
2349 792682 : if (proclock->releaseMask == 0 && proclock->holdMask != 0)
2350 37488 : continue;
2351 :
2352 : PROCLOCK_PRINT("LockReleaseAll", proclock);
2353 : LOCK_PRINT("LockReleaseAll", lock, 0);
2354 : Assert(lock->nRequested >= 0);
2355 : Assert(lock->nGranted >= 0);
2356 : Assert(lock->nGranted <= lock->nRequested);
2357 : Assert((proclock->holdMask & ~lock->grantMask) == 0);
2358 :
2359 : /*
2360 : * Release the previously-marked lock modes
2361 : */
2362 6796746 : for (i = 1; i <= numLockModes; i++)
2363 : {
2364 6041552 : if (proclock->releaseMask & LOCKBIT_ON(i))
2365 795606 : wakeupNeeded |= UnGrantLock(lock, i, proclock,
2366 : lockMethodTable);
2367 : }
2368 : Assert((lock->nRequested >= 0) && (lock->nGranted >= 0));
2369 : Assert(lock->nGranted <= lock->nRequested);
2370 : LOCK_PRINT("LockReleaseAll: updated", lock, 0);
2371 :
2372 755194 : proclock->releaseMask = 0;
2373 :
2374 : /* CleanUpLock will wake up waiters if needed. */
2375 755194 : CleanUpLock(lock, proclock,
2376 : lockMethodTable,
2377 755194 : LockTagHashCode(&lock->tag),
2378 : wakeupNeeded);
2379 : } /* loop over PROCLOCKs within this partition */
2380 :
2381 658320 : LWLockRelease(partitionLock);
2382 : } /* loop over partitions */
2383 :
2384 : #ifdef LOCK_DEBUG
2385 : if (*(lockMethodTable->trace_flag))
2386 : elog(LOG, "LockReleaseAll done");
2387 : #endif
2388 1053074 : }
2389 :
2390 : /*
2391 : * LockReleaseSession -- Release all session locks of the specified lock method
2392 : * that are held by the current process.
2393 : */
2394 : void
2395 238 : LockReleaseSession(LOCKMETHODID lockmethodid)
2396 : {
2397 : HASH_SEQ_STATUS status;
2398 : LOCALLOCK *locallock;
2399 :
2400 238 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2401 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2402 :
2403 238 : hash_seq_init(&status, LockMethodLocalHash);
2404 :
2405 452 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2406 : {
2407 : /* Ignore items that are not of the specified lock method */
2408 214 : if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
2409 20 : continue;
2410 :
2411 194 : ReleaseLockIfHeld(locallock, true);
2412 : }
2413 238 : }
2414 :
2415 : /*
2416 : * LockReleaseCurrentOwner
2417 : * Release all locks belonging to CurrentResourceOwner
2418 : *
2419 : * If the caller knows what those locks are, it can pass them as an array.
2420 : * That speeds up the call significantly, when a lot of locks are held.
2421 : * Otherwise, pass NULL for locallocks, and we'll traverse through our hash
2422 : * table to find them.
2423 : */
2424 : void
2425 9360 : LockReleaseCurrentOwner(LOCALLOCK **locallocks, int nlocks)
2426 : {
2427 9360 : if (locallocks == NULL)
2428 : {
2429 : HASH_SEQ_STATUS status;
2430 : LOCALLOCK *locallock;
2431 :
2432 8 : hash_seq_init(&status, LockMethodLocalHash);
2433 :
2434 530 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2435 522 : ReleaseLockIfHeld(locallock, false);
2436 : }
2437 : else
2438 : {
2439 : int i;
2440 :
2441 14352 : for (i = nlocks - 1; i >= 0; i--)
2442 5000 : ReleaseLockIfHeld(locallocks[i], false);
2443 : }
2444 9360 : }
2445 :
2446 : /*
2447 : * ReleaseLockIfHeld
2448 : * Release any session-level locks on this lockable object if sessionLock
2449 : * is true; else, release any locks held by CurrentResourceOwner.
2450 : *
2451 : * It is tempting to pass this a ResourceOwner pointer (or NULL for session
2452 : * locks), but without refactoring LockRelease() we cannot support releasing
2453 : * locks belonging to resource owners other than CurrentResourceOwner.
2454 : * If we were to refactor, it'd be a good idea to fix it so we don't have to
2455 : * do a hashtable lookup of the locallock, too. However, currently this
2456 : * function isn't used heavily enough to justify refactoring for its
2457 : * convenience.
2458 : */
2459 : static void
2460 5716 : ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock)
2461 : {
2462 : ResourceOwner owner;
2463 : LOCALLOCKOWNER *lockOwners;
2464 : int i;
2465 :
2466 : /* Identify owner for lock (must match LockRelease!) */
2467 5716 : if (sessionLock)
2468 194 : owner = NULL;
2469 : else
2470 5522 : owner = CurrentResourceOwner;
2471 :
2472 : /* Scan to see if there are any locks belonging to the target owner */
2473 5716 : lockOwners = locallock->lockOwners;
2474 6096 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
2475 : {
2476 5716 : if (lockOwners[i].owner == owner)
2477 : {
2478 : Assert(lockOwners[i].nLocks > 0);
2479 5336 : if (lockOwners[i].nLocks < locallock->nLocks)
2480 : {
2481 : /*
2482 : * We will still hold this lock after forgetting this
2483 : * ResourceOwner.
2484 : */
2485 1344 : locallock->nLocks -= lockOwners[i].nLocks;
2486 : /* compact out unused slot */
2487 1344 : locallock->numLockOwners--;
2488 1344 : if (owner != NULL)
2489 1344 : ResourceOwnerForgetLock(owner, locallock);
2490 1344 : if (i < locallock->numLockOwners)
2491 0 : lockOwners[i] = lockOwners[locallock->numLockOwners];
2492 : }
2493 : else
2494 : {
2495 : Assert(lockOwners[i].nLocks == locallock->nLocks);
2496 : /* We want to call LockRelease just once */
2497 3992 : lockOwners[i].nLocks = 1;
2498 3992 : locallock->nLocks = 1;
2499 3992 : if (!LockRelease(&locallock->tag.lock,
2500 : locallock->tag.mode,
2501 : sessionLock))
2502 0 : elog(WARNING, "ReleaseLockIfHeld: failed??");
2503 : }
2504 5336 : break;
2505 : }
2506 : }
2507 5716 : }
2508 :
2509 : /*
2510 : * LockReassignCurrentOwner
2511 : * Reassign all locks belonging to CurrentResourceOwner to belong
2512 : * to its parent resource owner.
2513 : *
2514 : * If the caller knows what those locks are, it can pass them as an array.
2515 : * That speeds up the call significantly, when a lot of locks are held
2516 : * (e.g pg_dump with a large schema). Otherwise, pass NULL for locallocks,
2517 : * and we'll traverse through our hash table to find them.
2518 : */
2519 : void
2520 566630 : LockReassignCurrentOwner(LOCALLOCK **locallocks, int nlocks)
2521 : {
2522 566630 : ResourceOwner parent = ResourceOwnerGetParent(CurrentResourceOwner);
2523 :
2524 : Assert(parent != NULL);
2525 :
2526 566630 : if (locallocks == NULL)
2527 : {
2528 : HASH_SEQ_STATUS status;
2529 : LOCALLOCK *locallock;
2530 :
2531 5446 : hash_seq_init(&status, LockMethodLocalHash);
2532 :
2533 144938 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2534 139492 : LockReassignOwner(locallock, parent);
2535 : }
2536 : else
2537 : {
2538 : int i;
2539 :
2540 1174414 : for (i = nlocks - 1; i >= 0; i--)
2541 613230 : LockReassignOwner(locallocks[i], parent);
2542 : }
2543 566630 : }
2544 :
2545 : /*
2546 : * Subroutine of LockReassignCurrentOwner. Reassigns a given lock belonging to
2547 : * CurrentResourceOwner to its parent.
2548 : */
2549 : static void
2550 752722 : LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent)
2551 : {
2552 : LOCALLOCKOWNER *lockOwners;
2553 : int i;
2554 752722 : int ic = -1;
2555 752722 : int ip = -1;
2556 :
2557 : /*
2558 : * Scan to see if there are any locks belonging to current owner or its
2559 : * parent
2560 : */
2561 752722 : lockOwners = locallock->lockOwners;
2562 1776298 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
2563 : {
2564 1023576 : if (lockOwners[i].owner == CurrentResourceOwner)
2565 735088 : ic = i;
2566 288488 : else if (lockOwners[i].owner == parent)
2567 205098 : ip = i;
2568 : }
2569 :
2570 752722 : if (ic < 0)
2571 17634 : return; /* no current locks */
2572 :
2573 735088 : if (ip < 0)
2574 : {
2575 : /* Parent has no slot, so just give it the child's slot */
2576 547566 : lockOwners[ic].owner = parent;
2577 547566 : ResourceOwnerRememberLock(parent, locallock);
2578 : }
2579 : else
2580 : {
2581 : /* Merge child's count with parent's */
2582 187522 : lockOwners[ip].nLocks += lockOwners[ic].nLocks;
2583 : /* compact out unused slot */
2584 187522 : locallock->numLockOwners--;
2585 187522 : if (ic < locallock->numLockOwners)
2586 1370 : lockOwners[ic] = lockOwners[locallock->numLockOwners];
2587 : }
2588 735088 : ResourceOwnerForgetLock(CurrentResourceOwner, locallock);
2589 : }
2590 :
2591 : /*
2592 : * FastPathGrantRelationLock
2593 : * Grant lock using per-backend fast-path array, if there is space.
2594 : */
2595 : static bool
2596 21140226 : FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode)
2597 : {
2598 : uint32 f;
2599 21140226 : uint32 unused_slot = FP_LOCK_SLOTS_PER_BACKEND;
2600 :
2601 : /* Scan for existing entry for this relid, remembering empty slot. */
2602 358212206 : for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2603 : {
2604 337583668 : if (FAST_PATH_GET_BITS(MyProc, f) == 0)
2605 294406218 : unused_slot = f;
2606 43177450 : else if (MyProc->fpRelId[f] == relid)
2607 : {
2608 : Assert(!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode));
2609 511688 : FAST_PATH_SET_LOCKMODE(MyProc, f, lockmode);
2610 511688 : return true;
2611 : }
2612 : }
2613 :
2614 : /* If no existing entry, use any empty slot. */
2615 20628538 : if (unused_slot < FP_LOCK_SLOTS_PER_BACKEND)
2616 : {
2617 20628538 : MyProc->fpRelId[unused_slot] = relid;
2618 20628538 : FAST_PATH_SET_LOCKMODE(MyProc, unused_slot, lockmode);
2619 20628538 : ++FastPathLocalUseCount;
2620 20628538 : return true;
2621 : }
2622 :
2623 : /* No existing entry, and no empty slot. */
2624 0 : return false;
2625 : }
2626 :
2627 : /*
2628 : * FastPathUnGrantRelationLock
2629 : * Release fast-path lock, if present. Update backend-private local
2630 : * use count, while we're at it.
2631 : */
2632 : static bool
2633 21744772 : FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode)
2634 : {
2635 : uint32 f;
2636 21744772 : bool result = false;
2637 :
2638 21744772 : FastPathLocalUseCount = 0;
2639 369661124 : for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2640 : {
2641 347916352 : if (MyProc->fpRelId[f] == relid
2642 25745140 : && FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2643 : {
2644 : Assert(!result);
2645 21137454 : FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
2646 21137454 : result = true;
2647 : /* we continue iterating so as to update FastPathLocalUseCount */
2648 : }
2649 347916352 : if (FAST_PATH_GET_BITS(MyProc, f) != 0)
2650 51192252 : ++FastPathLocalUseCount;
2651 : }
2652 21744772 : return result;
2653 : }
2654 :
2655 : /*
2656 : * FastPathTransferRelationLocks
2657 : * Transfer locks matching the given lock tag from per-backend fast-path
2658 : * arrays to the shared hash table.
2659 : *
2660 : * Returns true if successful, false if ran out of shared memory.
2661 : */
2662 : static bool
2663 282646 : FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag,
2664 : uint32 hashcode)
2665 : {
2666 282646 : LWLock *partitionLock = LockHashPartitionLock(hashcode);
2667 282646 : Oid relid = locktag->locktag_field2;
2668 : uint32 i;
2669 :
2670 : /*
2671 : * Every PGPROC that can potentially hold a fast-path lock is present in
2672 : * ProcGlobal->allProcs. Prepared transactions are not, but any
2673 : * outstanding fast-path locks held by prepared transactions are
2674 : * transferred to the main lock table.
2675 : */
2676 28754114 : for (i = 0; i < ProcGlobal->allProcCount; i++)
2677 : {
2678 28471468 : PGPROC *proc = &ProcGlobal->allProcs[i];
2679 : uint32 f;
2680 :
2681 28471468 : LWLockAcquire(&proc->fpInfoLock, LW_EXCLUSIVE);
2682 :
2683 : /*
2684 : * If the target backend isn't referencing the same database as the
2685 : * lock, then we needn't examine the individual relation IDs at all;
2686 : * none of them can be relevant.
2687 : *
2688 : * proc->databaseId is set at backend startup time and never changes
2689 : * thereafter, so it might be safe to perform this test before
2690 : * acquiring &proc->fpInfoLock. In particular, it's certainly safe to
2691 : * assume that if the target backend holds any fast-path locks, it
2692 : * must have performed a memory-fencing operation (in particular, an
2693 : * LWLock acquisition) since setting proc->databaseId. However, it's
2694 : * less clear that our backend is certain to have performed a memory
2695 : * fencing operation since the other backend set proc->databaseId. So
2696 : * for now, we test it after acquiring the LWLock just to be safe.
2697 : */
2698 28471468 : if (proc->databaseId != locktag->locktag_field1)
2699 : {
2700 13322456 : LWLockRelease(&proc->fpInfoLock);
2701 13322456 : continue;
2702 : }
2703 :
2704 257530858 : for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2705 : {
2706 : uint32 lockmode;
2707 :
2708 : /* Look for an allocated slot matching the given relid. */
2709 242383732 : if (relid != proc->fpRelId[f] || FAST_PATH_GET_BITS(proc, f) == 0)
2710 242381846 : continue;
2711 :
2712 : /* Find or create lock object. */
2713 1886 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2714 7544 : for (lockmode = FAST_PATH_LOCKNUMBER_OFFSET;
2715 : lockmode < FAST_PATH_LOCKNUMBER_OFFSET + FAST_PATH_BITS_PER_SLOT;
2716 5658 : ++lockmode)
2717 : {
2718 : PROCLOCK *proclock;
2719 :
2720 5658 : if (!FAST_PATH_CHECK_LOCKMODE(proc, f, lockmode))
2721 3660 : continue;
2722 1998 : proclock = SetupLockInTable(lockMethodTable, proc, locktag,
2723 : hashcode, lockmode);
2724 1998 : if (!proclock)
2725 : {
2726 0 : LWLockRelease(partitionLock);
2727 0 : LWLockRelease(&proc->fpInfoLock);
2728 0 : return false;
2729 : }
2730 1998 : GrantLock(proclock->tag.myLock, proclock, lockmode);
2731 1998 : FAST_PATH_CLEAR_LOCKMODE(proc, f, lockmode);
2732 : }
2733 1886 : LWLockRelease(partitionLock);
2734 :
2735 : /* No need to examine remaining slots. */
2736 1886 : break;
2737 : }
2738 15149012 : LWLockRelease(&proc->fpInfoLock);
2739 : }
2740 282646 : return true;
2741 : }
2742 :
2743 : /*
2744 : * FastPathGetRelationLockEntry
2745 : * Return the PROCLOCK for a lock originally taken via the fast-path,
2746 : * transferring it to the primary lock table if necessary.
2747 : *
2748 : * Note: caller takes care of updating the locallock object.
2749 : */
2750 : static PROCLOCK *
2751 790 : FastPathGetRelationLockEntry(LOCALLOCK *locallock)
2752 : {
2753 790 : LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
2754 790 : LOCKTAG *locktag = &locallock->tag.lock;
2755 790 : PROCLOCK *proclock = NULL;
2756 790 : LWLock *partitionLock = LockHashPartitionLock(locallock->hashcode);
2757 790 : Oid relid = locktag->locktag_field2;
2758 : uint32 f;
2759 :
2760 790 : LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
2761 :
2762 12616 : for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2763 : {
2764 : uint32 lockmode;
2765 :
2766 : /* Look for an allocated slot matching the given relid. */
2767 12600 : if (relid != MyProc->fpRelId[f] || FAST_PATH_GET_BITS(MyProc, f) == 0)
2768 11826 : continue;
2769 :
2770 : /* If we don't have a lock of the given mode, forget it! */
2771 774 : lockmode = locallock->tag.mode;
2772 774 : if (!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2773 0 : break;
2774 :
2775 : /* Find or create lock object. */
2776 774 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2777 :
2778 774 : proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
2779 : locallock->hashcode, lockmode);
2780 774 : if (!proclock)
2781 : {
2782 0 : LWLockRelease(partitionLock);
2783 0 : LWLockRelease(&MyProc->fpInfoLock);
2784 0 : ereport(ERROR,
2785 : (errcode(ERRCODE_OUT_OF_MEMORY),
2786 : errmsg("out of shared memory"),
2787 : errhint("You might need to increase %s.", "max_locks_per_transaction")));
2788 : }
2789 774 : GrantLock(proclock->tag.myLock, proclock, lockmode);
2790 774 : FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
2791 :
2792 774 : LWLockRelease(partitionLock);
2793 :
2794 : /* No need to examine remaining slots. */
2795 774 : break;
2796 : }
2797 :
2798 790 : LWLockRelease(&MyProc->fpInfoLock);
2799 :
2800 : /* Lock may have already been transferred by some other backend. */
2801 790 : if (proclock == NULL)
2802 : {
2803 : LOCK *lock;
2804 : PROCLOCKTAG proclocktag;
2805 : uint32 proclock_hashcode;
2806 :
2807 16 : LWLockAcquire(partitionLock, LW_SHARED);
2808 :
2809 16 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
2810 : locktag,
2811 : locallock->hashcode,
2812 : HASH_FIND,
2813 : NULL);
2814 16 : if (!lock)
2815 0 : elog(ERROR, "failed to re-find shared lock object");
2816 :
2817 16 : proclocktag.myLock = lock;
2818 16 : proclocktag.myProc = MyProc;
2819 :
2820 16 : proclock_hashcode = ProcLockHashCode(&proclocktag, locallock->hashcode);
2821 : proclock = (PROCLOCK *)
2822 16 : hash_search_with_hash_value(LockMethodProcLockHash,
2823 : &proclocktag,
2824 : proclock_hashcode,
2825 : HASH_FIND,
2826 : NULL);
2827 16 : if (!proclock)
2828 0 : elog(ERROR, "failed to re-find shared proclock object");
2829 16 : LWLockRelease(partitionLock);
2830 : }
2831 :
2832 790 : return proclock;
2833 : }
2834 :
2835 : /*
2836 : * GetLockConflicts
2837 : * Get an array of VirtualTransactionIds of xacts currently holding locks
2838 : * that would conflict with the specified lock/lockmode.
2839 : * xacts merely awaiting such a lock are NOT reported.
2840 : *
2841 : * The result array is palloc'd and is terminated with an invalid VXID.
2842 : * *countp, if not null, is updated to the number of items set.
2843 : *
2844 : * Of course, the result could be out of date by the time it's returned, so
2845 : * use of this function has to be thought about carefully. Similarly, a
2846 : * PGPROC with no "lxid" will be considered non-conflicting regardless of any
2847 : * lock it holds. Existing callers don't care about a locker after that
2848 : * locker's pg_xact updates complete. CommitTransaction() clears "lxid" after
2849 : * pg_xact updates and before releasing locks.
2850 : *
2851 : * Note we never include the current xact's vxid in the result array,
2852 : * since an xact never blocks itself.
2853 : */
2854 : VirtualTransactionId *
2855 2360 : GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
2856 : {
2857 : static VirtualTransactionId *vxids;
2858 2360 : LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
2859 : LockMethod lockMethodTable;
2860 : LOCK *lock;
2861 : LOCKMASK conflictMask;
2862 : dlist_iter proclock_iter;
2863 : PROCLOCK *proclock;
2864 : uint32 hashcode;
2865 : LWLock *partitionLock;
2866 2360 : int count = 0;
2867 2360 : int fast_count = 0;
2868 :
2869 2360 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2870 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2871 2360 : lockMethodTable = LockMethods[lockmethodid];
2872 2360 : if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
2873 0 : elog(ERROR, "unrecognized lock mode: %d", lockmode);
2874 :
2875 : /*
2876 : * Allocate memory to store results, and fill with InvalidVXID. We only
2877 : * need enough space for MaxBackends + max_prepared_xacts + a terminator.
2878 : * InHotStandby allocate once in TopMemoryContext.
2879 : */
2880 2360 : if (InHotStandby)
2881 : {
2882 8 : if (vxids == NULL)
2883 2 : vxids = (VirtualTransactionId *)
2884 2 : MemoryContextAlloc(TopMemoryContext,
2885 : sizeof(VirtualTransactionId) *
2886 2 : (MaxBackends + max_prepared_xacts + 1));
2887 : }
2888 : else
2889 2352 : vxids = (VirtualTransactionId *)
2890 2352 : palloc0(sizeof(VirtualTransactionId) *
2891 2352 : (MaxBackends + max_prepared_xacts + 1));
2892 :
2893 : /* Compute hash code and partition lock, and look up conflicting modes. */
2894 2360 : hashcode = LockTagHashCode(locktag);
2895 2360 : partitionLock = LockHashPartitionLock(hashcode);
2896 2360 : conflictMask = lockMethodTable->conflictTab[lockmode];
2897 :
2898 : /*
2899 : * Fast path locks might not have been entered in the primary lock table.
2900 : * If the lock we're dealing with could conflict with such a lock, we must
2901 : * examine each backend's fast-path array for conflicts.
2902 : */
2903 2360 : if (ConflictsWithRelationFastPath(locktag, lockmode))
2904 : {
2905 : int i;
2906 2360 : Oid relid = locktag->locktag_field2;
2907 : VirtualTransactionId vxid;
2908 :
2909 : /*
2910 : * Iterate over relevant PGPROCs. Anything held by a prepared
2911 : * transaction will have been transferred to the primary lock table,
2912 : * so we need not worry about those. This is all a bit fuzzy, because
2913 : * new locks could be taken after we've visited a particular
2914 : * partition, but the callers had better be prepared to deal with that
2915 : * anyway, since the locks could equally well be taken between the
2916 : * time we return the value and the time the caller does something
2917 : * with it.
2918 : */
2919 264620 : for (i = 0; i < ProcGlobal->allProcCount; i++)
2920 : {
2921 262260 : PGPROC *proc = &ProcGlobal->allProcs[i];
2922 : uint32 f;
2923 :
2924 : /* A backend never blocks itself */
2925 262260 : if (proc == MyProc)
2926 2360 : continue;
2927 :
2928 259900 : LWLockAcquire(&proc->fpInfoLock, LW_SHARED);
2929 :
2930 : /*
2931 : * If the target backend isn't referencing the same database as
2932 : * the lock, then we needn't examine the individual relation IDs
2933 : * at all; none of them can be relevant.
2934 : *
2935 : * See FastPathTransferRelationLocks() for discussion of why we do
2936 : * this test after acquiring the lock.
2937 : */
2938 259900 : if (proc->databaseId != locktag->locktag_field1)
2939 : {
2940 107444 : LWLockRelease(&proc->fpInfoLock);
2941 107444 : continue;
2942 : }
2943 :
2944 2591232 : for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2945 : {
2946 : uint32 lockmask;
2947 :
2948 : /* Look for an allocated slot matching the given relid. */
2949 2439200 : if (relid != proc->fpRelId[f])
2950 2437392 : continue;
2951 1808 : lockmask = FAST_PATH_GET_BITS(proc, f);
2952 1808 : if (!lockmask)
2953 1384 : continue;
2954 424 : lockmask <<= FAST_PATH_LOCKNUMBER_OFFSET;
2955 :
2956 : /*
2957 : * There can only be one entry per relation, so if we found it
2958 : * and it doesn't conflict, we can skip the rest of the slots.
2959 : */
2960 424 : if ((lockmask & conflictMask) == 0)
2961 10 : break;
2962 :
2963 : /* Conflict! */
2964 414 : GET_VXID_FROM_PGPROC(vxid, *proc);
2965 :
2966 414 : if (VirtualTransactionIdIsValid(vxid))
2967 414 : vxids[count++] = vxid;
2968 : /* else, xact already committed or aborted */
2969 :
2970 : /* No need to examine remaining slots. */
2971 414 : break;
2972 : }
2973 :
2974 152456 : LWLockRelease(&proc->fpInfoLock);
2975 : }
2976 : }
2977 :
2978 : /* Remember how many fast-path conflicts we found. */
2979 2360 : fast_count = count;
2980 :
2981 : /*
2982 : * Look up the lock object matching the tag.
2983 : */
2984 2360 : LWLockAcquire(partitionLock, LW_SHARED);
2985 :
2986 2360 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
2987 : locktag,
2988 : hashcode,
2989 : HASH_FIND,
2990 : NULL);
2991 2360 : if (!lock)
2992 : {
2993 : /*
2994 : * If the lock object doesn't exist, there is nothing holding a lock
2995 : * on this lockable object.
2996 : */
2997 140 : LWLockRelease(partitionLock);
2998 140 : vxids[count].backendId = InvalidBackendId;
2999 140 : vxids[count].localTransactionId = InvalidLocalTransactionId;
3000 140 : if (countp)
3001 0 : *countp = count;
3002 140 : return vxids;
3003 : }
3004 :
3005 : /*
3006 : * Examine each existing holder (or awaiter) of the lock.
3007 : */
3008 4472 : dlist_foreach(proclock_iter, &lock->procLocks)
3009 : {
3010 2252 : proclock = dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
3011 :
3012 2252 : if (conflictMask & proclock->holdMask)
3013 : {
3014 2244 : PGPROC *proc = proclock->tag.myProc;
3015 :
3016 : /* A backend never blocks itself */
3017 2244 : if (proc != MyProc)
3018 : {
3019 : VirtualTransactionId vxid;
3020 :
3021 32 : GET_VXID_FROM_PGPROC(vxid, *proc);
3022 :
3023 32 : if (VirtualTransactionIdIsValid(vxid))
3024 : {
3025 : int i;
3026 :
3027 : /* Avoid duplicate entries. */
3028 54 : for (i = 0; i < fast_count; ++i)
3029 22 : if (VirtualTransactionIdEquals(vxids[i], vxid))
3030 0 : break;
3031 32 : if (i >= fast_count)
3032 32 : vxids[count++] = vxid;
3033 : }
3034 : /* else, xact already committed or aborted */
3035 : }
3036 : }
3037 : }
3038 :
3039 2220 : LWLockRelease(partitionLock);
3040 :
3041 2220 : if (count > MaxBackends + max_prepared_xacts) /* should never happen */
3042 0 : elog(PANIC, "too many conflicting locks found");
3043 :
3044 2220 : vxids[count].backendId = InvalidBackendId;
3045 2220 : vxids[count].localTransactionId = InvalidLocalTransactionId;
3046 2220 : if (countp)
3047 2214 : *countp = count;
3048 2220 : return vxids;
3049 : }
3050 :
3051 : /*
3052 : * Find a lock in the shared lock table and release it. It is the caller's
3053 : * responsibility to verify that this is a sane thing to do. (For example, it
3054 : * would be bad to release a lock here if there might still be a LOCALLOCK
3055 : * object with pointers to it.)
3056 : *
3057 : * We currently use this in two situations: first, to release locks held by
3058 : * prepared transactions on commit (see lock_twophase_postcommit); and second,
3059 : * to release locks taken via the fast-path, transferred to the main hash
3060 : * table, and then released (see LockReleaseAll).
3061 : */
3062 : static void
3063 4306 : LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc,
3064 : LOCKTAG *locktag, LOCKMODE lockmode,
3065 : bool decrement_strong_lock_count)
3066 : {
3067 : LOCK *lock;
3068 : PROCLOCK *proclock;
3069 : PROCLOCKTAG proclocktag;
3070 : uint32 hashcode;
3071 : uint32 proclock_hashcode;
3072 : LWLock *partitionLock;
3073 : bool wakeupNeeded;
3074 :
3075 4306 : hashcode = LockTagHashCode(locktag);
3076 4306 : partitionLock = LockHashPartitionLock(hashcode);
3077 :
3078 4306 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3079 :
3080 : /*
3081 : * Re-find the lock object (it had better be there).
3082 : */
3083 4306 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
3084 : locktag,
3085 : hashcode,
3086 : HASH_FIND,
3087 : NULL);
3088 4306 : if (!lock)
3089 0 : elog(PANIC, "failed to re-find shared lock object");
3090 :
3091 : /*
3092 : * Re-find the proclock object (ditto).
3093 : */
3094 4306 : proclocktag.myLock = lock;
3095 4306 : proclocktag.myProc = proc;
3096 :
3097 4306 : proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
3098 :
3099 4306 : proclock = (PROCLOCK *) hash_search_with_hash_value(LockMethodProcLockHash,
3100 : &proclocktag,
3101 : proclock_hashcode,
3102 : HASH_FIND,
3103 : NULL);
3104 4306 : if (!proclock)
3105 0 : elog(PANIC, "failed to re-find shared proclock object");
3106 :
3107 : /*
3108 : * Double-check that we are actually holding a lock of the type we want to
3109 : * release.
3110 : */
3111 4306 : if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
3112 : {
3113 : PROCLOCK_PRINT("lock_twophase_postcommit: WRONGTYPE", proclock);
3114 0 : LWLockRelease(partitionLock);
3115 0 : elog(WARNING, "you don't own a lock of type %s",
3116 : lockMethodTable->lockModeNames[lockmode]);
3117 0 : return;
3118 : }
3119 :
3120 : /*
3121 : * Do the releasing. CleanUpLock will waken any now-wakable waiters.
3122 : */
3123 4306 : wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
3124 :
3125 4306 : CleanUpLock(lock, proclock,
3126 : lockMethodTable, hashcode,
3127 : wakeupNeeded);
3128 :
3129 4306 : LWLockRelease(partitionLock);
3130 :
3131 : /*
3132 : * Decrement strong lock count. This logic is needed only for 2PC.
3133 : */
3134 4306 : if (decrement_strong_lock_count
3135 1786 : && ConflictsWithRelationFastPath(locktag, lockmode))
3136 : {
3137 128 : uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
3138 :
3139 128 : SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
3140 : Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
3141 128 : FastPathStrongRelationLocks->count[fasthashcode]--;
3142 128 : SpinLockRelease(&FastPathStrongRelationLocks->mutex);
3143 : }
3144 : }
3145 :
3146 : /*
3147 : * CheckForSessionAndXactLocks
3148 : * Check to see if transaction holds both session-level and xact-level
3149 : * locks on the same object; if so, throw an error.
3150 : *
3151 : * If we have both session- and transaction-level locks on the same object,
3152 : * PREPARE TRANSACTION must fail. This should never happen with regular
3153 : * locks, since we only take those at session level in some special operations
3154 : * like VACUUM. It's possible to hit this with advisory locks, though.
3155 : *
3156 : * It would be nice if we could keep the session hold and give away the
3157 : * transactional hold to the prepared xact. However, that would require two
3158 : * PROCLOCK objects, and we cannot be sure that another PROCLOCK will be
3159 : * available when it comes time for PostPrepare_Locks to do the deed.
3160 : * So for now, we error out while we can still do so safely.
3161 : *
3162 : * Since the LOCALLOCK table stores a separate entry for each lockmode,
3163 : * we can't implement this check by examining LOCALLOCK entries in isolation.
3164 : * We must build a transient hashtable that is indexed by locktag only.
3165 : */
3166 : static void
3167 774 : CheckForSessionAndXactLocks(void)
3168 : {
3169 : typedef struct
3170 : {
3171 : LOCKTAG lock; /* identifies the lockable object */
3172 : bool sessLock; /* is any lockmode held at session level? */
3173 : bool xactLock; /* is any lockmode held at xact level? */
3174 : } PerLockTagEntry;
3175 :
3176 : HASHCTL hash_ctl;
3177 : HTAB *lockhtab;
3178 : HASH_SEQ_STATUS status;
3179 : LOCALLOCK *locallock;
3180 :
3181 : /* Create a local hash table keyed by LOCKTAG only */
3182 774 : hash_ctl.keysize = sizeof(LOCKTAG);
3183 774 : hash_ctl.entrysize = sizeof(PerLockTagEntry);
3184 774 : hash_ctl.hcxt = CurrentMemoryContext;
3185 :
3186 774 : lockhtab = hash_create("CheckForSessionAndXactLocks table",
3187 : 256, /* arbitrary initial size */
3188 : &hash_ctl,
3189 : HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
3190 :
3191 : /* Scan local lock table to find entries for each LOCKTAG */
3192 774 : hash_seq_init(&status, LockMethodLocalHash);
3193 :
3194 2566 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3195 : {
3196 1796 : LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3197 : PerLockTagEntry *hentry;
3198 : bool found;
3199 : int i;
3200 :
3201 : /*
3202 : * Ignore VXID locks. We don't want those to be held by prepared
3203 : * transactions, since they aren't meaningful after a restart.
3204 : */
3205 1796 : if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3206 0 : continue;
3207 :
3208 : /* Ignore it if we don't actually hold the lock */
3209 1796 : if (locallock->nLocks <= 0)
3210 0 : continue;
3211 :
3212 : /* Otherwise, find or make an entry in lockhtab */
3213 1796 : hentry = (PerLockTagEntry *) hash_search(lockhtab,
3214 1796 : &locallock->tag.lock,
3215 : HASH_ENTER, &found);
3216 1796 : if (!found) /* initialize, if newly created */
3217 1704 : hentry->sessLock = hentry->xactLock = false;
3218 :
3219 : /* Scan to see if we hold lock at session or xact level or both */
3220 3592 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
3221 : {
3222 1796 : if (lockOwners[i].owner == NULL)
3223 18 : hentry->sessLock = true;
3224 : else
3225 1778 : hentry->xactLock = true;
3226 : }
3227 :
3228 : /*
3229 : * We can throw error immediately when we see both types of locks; no
3230 : * need to wait around to see if there are more violations.
3231 : */
3232 1796 : if (hentry->sessLock && hentry->xactLock)
3233 4 : ereport(ERROR,
3234 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3235 : errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3236 : }
3237 :
3238 : /* Success, so clean up */
3239 770 : hash_destroy(lockhtab);
3240 770 : }
3241 :
3242 : /*
3243 : * AtPrepare_Locks
3244 : * Do the preparatory work for a PREPARE: make 2PC state file records
3245 : * for all locks currently held.
3246 : *
3247 : * Session-level locks are ignored, as are VXID locks.
3248 : *
3249 : * For the most part, we don't need to touch shared memory for this ---
3250 : * all the necessary state information is in the locallock table.
3251 : * Fast-path locks are an exception, however: we move any such locks to
3252 : * the main table before allowing PREPARE TRANSACTION to succeed.
3253 : */
3254 : void
3255 774 : AtPrepare_Locks(void)
3256 : {
3257 : HASH_SEQ_STATUS status;
3258 : LOCALLOCK *locallock;
3259 :
3260 : /* First, verify there aren't locks of both xact and session level */
3261 774 : CheckForSessionAndXactLocks();
3262 :
3263 : /* Now do the per-locallock cleanup work */
3264 770 : hash_seq_init(&status, LockMethodLocalHash);
3265 :
3266 2556 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3267 : {
3268 : TwoPhaseLockRecord record;
3269 1786 : LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3270 : bool haveSessionLock;
3271 : bool haveXactLock;
3272 : int i;
3273 :
3274 : /*
3275 : * Ignore VXID locks. We don't want those to be held by prepared
3276 : * transactions, since they aren't meaningful after a restart.
3277 : */
3278 1786 : if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3279 14 : continue;
3280 :
3281 : /* Ignore it if we don't actually hold the lock */
3282 1786 : if (locallock->nLocks <= 0)
3283 0 : continue;
3284 :
3285 : /* Scan to see whether we hold it at session or transaction level */
3286 1786 : haveSessionLock = haveXactLock = false;
3287 3572 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
3288 : {
3289 1786 : if (lockOwners[i].owner == NULL)
3290 14 : haveSessionLock = true;
3291 : else
3292 1772 : haveXactLock = true;
3293 : }
3294 :
3295 : /* Ignore it if we have only session lock */
3296 1786 : if (!haveXactLock)
3297 14 : continue;
3298 :
3299 : /* This can't happen, because we already checked it */
3300 1772 : if (haveSessionLock)
3301 0 : ereport(ERROR,
3302 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3303 : errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3304 :
3305 : /*
3306 : * If the local lock was taken via the fast-path, we need to move it
3307 : * to the primary lock table, or just get a pointer to the existing
3308 : * primary lock table entry if by chance it's already been
3309 : * transferred.
3310 : */
3311 1772 : if (locallock->proclock == NULL)
3312 : {
3313 790 : locallock->proclock = FastPathGetRelationLockEntry(locallock);
3314 790 : locallock->lock = locallock->proclock->tag.myLock;
3315 : }
3316 :
3317 : /*
3318 : * Arrange to not release any strong lock count held by this lock
3319 : * entry. We must retain the count until the prepared transaction is
3320 : * committed or rolled back.
3321 : */
3322 1772 : locallock->holdsStrongLockCount = false;
3323 :
3324 : /*
3325 : * Create a 2PC record.
3326 : */
3327 1772 : memcpy(&(record.locktag), &(locallock->tag.lock), sizeof(LOCKTAG));
3328 1772 : record.lockmode = locallock->tag.mode;
3329 :
3330 1772 : RegisterTwoPhaseRecord(TWOPHASE_RM_LOCK_ID, 0,
3331 : &record, sizeof(TwoPhaseLockRecord));
3332 : }
3333 770 : }
3334 :
3335 : /*
3336 : * PostPrepare_Locks
3337 : * Clean up after successful PREPARE
3338 : *
3339 : * Here, we want to transfer ownership of our locks to a dummy PGPROC
3340 : * that's now associated with the prepared transaction, and we want to
3341 : * clean out the corresponding entries in the LOCALLOCK table.
3342 : *
3343 : * Note: by removing the LOCALLOCK entries, we are leaving dangling
3344 : * pointers in the transaction's resource owner. This is OK at the
3345 : * moment since resowner.c doesn't try to free locks retail at a toplevel
3346 : * transaction commit or abort. We could alternatively zero out nLocks
3347 : * and leave the LOCALLOCK entries to be garbage-collected by LockReleaseAll,
3348 : * but that probably costs more cycles.
3349 : */
3350 : void
3351 770 : PostPrepare_Locks(TransactionId xid)
3352 : {
3353 770 : PGPROC *newproc = TwoPhaseGetDummyProc(xid, false);
3354 : HASH_SEQ_STATUS status;
3355 : LOCALLOCK *locallock;
3356 : LOCK *lock;
3357 : PROCLOCK *proclock;
3358 : PROCLOCKTAG proclocktag;
3359 : int partition;
3360 :
3361 : /* Can't prepare a lock group follower. */
3362 : Assert(MyProc->lockGroupLeader == NULL ||
3363 : MyProc->lockGroupLeader == MyProc);
3364 :
3365 : /* This is a critical section: any error means big trouble */
3366 770 : START_CRIT_SECTION();
3367 :
3368 : /*
3369 : * First we run through the locallock table and get rid of unwanted
3370 : * entries, then we scan the process's proclocks and transfer them to the
3371 : * target proc.
3372 : *
3373 : * We do this separately because we may have multiple locallock entries
3374 : * pointing to the same proclock, and we daren't end up with any dangling
3375 : * pointers.
3376 : */
3377 770 : hash_seq_init(&status, LockMethodLocalHash);
3378 :
3379 2556 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3380 : {
3381 1786 : LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3382 : bool haveSessionLock;
3383 : bool haveXactLock;
3384 : int i;
3385 :
3386 1786 : if (locallock->proclock == NULL || locallock->lock == NULL)
3387 : {
3388 : /*
3389 : * We must've run out of shared memory while trying to set up this
3390 : * lock. Just forget the local entry.
3391 : */
3392 : Assert(locallock->nLocks == 0);
3393 0 : RemoveLocalLock(locallock);
3394 0 : continue;
3395 : }
3396 :
3397 : /* Ignore VXID locks */
3398 1786 : if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3399 0 : continue;
3400 :
3401 : /* Scan to see whether we hold it at session or transaction level */
3402 1786 : haveSessionLock = haveXactLock = false;
3403 3572 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
3404 : {
3405 1786 : if (lockOwners[i].owner == NULL)
3406 14 : haveSessionLock = true;
3407 : else
3408 1772 : haveXactLock = true;
3409 : }
3410 :
3411 : /* Ignore it if we have only session lock */
3412 1786 : if (!haveXactLock)
3413 14 : continue;
3414 :
3415 : /* This can't happen, because we already checked it */
3416 1772 : if (haveSessionLock)
3417 0 : ereport(PANIC,
3418 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3419 : errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3420 :
3421 : /* Mark the proclock to show we need to release this lockmode */
3422 1772 : if (locallock->nLocks > 0)
3423 1772 : locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
3424 :
3425 : /* And remove the locallock hashtable entry */
3426 1772 : RemoveLocalLock(locallock);
3427 : }
3428 :
3429 : /*
3430 : * Now, scan each lock partition separately.
3431 : */
3432 13090 : for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
3433 : {
3434 : LWLock *partitionLock;
3435 12320 : dlist_head *procLocks = &(MyProc->myProcLocks[partition]);
3436 : dlist_mutable_iter proclock_iter;
3437 :
3438 12320 : partitionLock = LockHashPartitionLockByIndex(partition);
3439 :
3440 : /*
3441 : * If the proclock list for this partition is empty, we can skip
3442 : * acquiring the partition lock. This optimization is safer than the
3443 : * situation in LockReleaseAll, because we got rid of any fast-path
3444 : * locks during AtPrepare_Locks, so there cannot be any case where
3445 : * another backend is adding something to our lists now. For safety,
3446 : * though, we code this the same way as in LockReleaseAll.
3447 : */
3448 12320 : if (dlist_is_empty(procLocks))
3449 10624 : continue; /* needn't examine this partition */
3450 :
3451 1696 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3452 :
3453 3484 : dlist_foreach_modify(proclock_iter, procLocks)
3454 : {
3455 1788 : proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
3456 :
3457 : Assert(proclock->tag.myProc == MyProc);
3458 :
3459 1788 : lock = proclock->tag.myLock;
3460 :
3461 : /* Ignore VXID locks */
3462 1788 : if (lock->tag.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3463 90 : continue;
3464 :
3465 : PROCLOCK_PRINT("PostPrepare_Locks", proclock);
3466 : LOCK_PRINT("PostPrepare_Locks", lock, 0);
3467 : Assert(lock->nRequested >= 0);
3468 : Assert(lock->nGranted >= 0);
3469 : Assert(lock->nGranted <= lock->nRequested);
3470 : Assert((proclock->holdMask & ~lock->grantMask) == 0);
3471 :
3472 : /* Ignore it if nothing to release (must be a session lock) */
3473 1698 : if (proclock->releaseMask == 0)
3474 14 : continue;
3475 :
3476 : /* Else we should be releasing all locks */
3477 1684 : if (proclock->releaseMask != proclock->holdMask)
3478 0 : elog(PANIC, "we seem to have dropped a bit somewhere");
3479 :
3480 : /*
3481 : * We cannot simply modify proclock->tag.myProc to reassign
3482 : * ownership of the lock, because that's part of the hash key and
3483 : * the proclock would then be in the wrong hash chain. Instead
3484 : * use hash_update_hash_key. (We used to create a new hash entry,
3485 : * but that risks out-of-memory failure if other processes are
3486 : * busy making proclocks too.) We must unlink the proclock from
3487 : * our procLink chain and put it into the new proc's chain, too.
3488 : *
3489 : * Note: the updated proclock hash key will still belong to the
3490 : * same hash partition, cf proclock_hash(). So the partition lock
3491 : * we already hold is sufficient for this.
3492 : */
3493 1684 : dlist_delete(&proclock->procLink);
3494 :
3495 : /*
3496 : * Create the new hash key for the proclock.
3497 : */
3498 1684 : proclocktag.myLock = lock;
3499 1684 : proclocktag.myProc = newproc;
3500 :
3501 : /*
3502 : * Update groupLeader pointer to point to the new proc. (We'd
3503 : * better not be a member of somebody else's lock group!)
3504 : */
3505 : Assert(proclock->groupLeader == proclock->tag.myProc);
3506 1684 : proclock->groupLeader = newproc;
3507 :
3508 : /*
3509 : * Update the proclock. We should not find any existing entry for
3510 : * the same hash key, since there can be only one entry for any
3511 : * given lock with my own proc.
3512 : */
3513 1684 : if (!hash_update_hash_key(LockMethodProcLockHash,
3514 : proclock,
3515 : &proclocktag))
3516 0 : elog(PANIC, "duplicate entry found while reassigning a prepared transaction's locks");
3517 :
3518 : /* Re-link into the new proc's proclock list */
3519 1684 : dlist_push_tail(&newproc->myProcLocks[partition], &proclock->procLink);
3520 :
3521 : PROCLOCK_PRINT("PostPrepare_Locks: updated", proclock);
3522 : } /* loop over PROCLOCKs within this partition */
3523 :
3524 1696 : LWLockRelease(partitionLock);
3525 : } /* loop over partitions */
3526 :
3527 770 : END_CRIT_SECTION();
3528 770 : }
3529 :
3530 :
3531 : /*
3532 : * Estimate shared-memory space used for lock tables
3533 : */
3534 : Size
3535 2934 : LockShmemSize(void)
3536 : {
3537 2934 : Size size = 0;
3538 : long max_table_size;
3539 :
3540 : /* lock hash table */
3541 2934 : max_table_size = NLOCKENTS();
3542 2934 : size = add_size(size, hash_estimate_size(max_table_size, sizeof(LOCK)));
3543 :
3544 : /* proclock hash table */
3545 2934 : max_table_size *= 2;
3546 2934 : size = add_size(size, hash_estimate_size(max_table_size, sizeof(PROCLOCK)));
3547 :
3548 : /*
3549 : * Since NLOCKENTS is only an estimate, add 10% safety margin.
3550 : */
3551 2934 : size = add_size(size, size / 10);
3552 :
3553 2934 : return size;
3554 : }
3555 :
3556 : /*
3557 : * GetLockStatusData - Return a summary of the lock manager's internal
3558 : * status, for use in a user-level reporting function.
3559 : *
3560 : * The return data consists of an array of LockInstanceData objects,
3561 : * which are a lightly abstracted version of the PROCLOCK data structures,
3562 : * i.e. there is one entry for each unique lock and interested PGPROC.
3563 : * It is the caller's responsibility to match up related items (such as
3564 : * references to the same lockable object or PGPROC) if wanted.
3565 : *
3566 : * The design goal is to hold the LWLocks for as short a time as possible;
3567 : * thus, this function simply makes a copy of the necessary data and releases
3568 : * the locks, allowing the caller to contemplate and format the data for as
3569 : * long as it pleases.
3570 : */
3571 : LockData *
3572 508 : GetLockStatusData(void)
3573 : {
3574 : LockData *data;
3575 : PROCLOCK *proclock;
3576 : HASH_SEQ_STATUS seqstat;
3577 : int els;
3578 : int el;
3579 : int i;
3580 :
3581 508 : data = (LockData *) palloc(sizeof(LockData));
3582 :
3583 : /* Guess how much space we'll need. */
3584 508 : els = MaxBackends;
3585 508 : el = 0;
3586 508 : data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * els);
3587 :
3588 : /*
3589 : * First, we iterate through the per-backend fast-path arrays, locking
3590 : * them one at a time. This might produce an inconsistent picture of the
3591 : * system state, but taking all of those LWLocks at the same time seems
3592 : * impractical (in particular, note MAX_SIMUL_LWLOCKS). It shouldn't
3593 : * matter too much, because none of these locks can be involved in lock
3594 : * conflicts anyway - anything that might must be present in the main lock
3595 : * table. (For the same reason, we don't sweat about making leaderPid
3596 : * completely valid. We cannot safely dereference another backend's
3597 : * lockGroupLeader field without holding all lock partition locks, and
3598 : * it's not worth that.)
3599 : */
3600 52944 : for (i = 0; i < ProcGlobal->allProcCount; ++i)
3601 : {
3602 52436 : PGPROC *proc = &ProcGlobal->allProcs[i];
3603 : uint32 f;
3604 :
3605 52436 : LWLockAcquire(&proc->fpInfoLock, LW_SHARED);
3606 :
3607 891412 : for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; ++f)
3608 : {
3609 : LockInstanceData *instance;
3610 838976 : uint32 lockbits = FAST_PATH_GET_BITS(proc, f);
3611 :
3612 : /* Skip unallocated slots. */
3613 838976 : if (!lockbits)
3614 834258 : continue;
3615 :
3616 4718 : if (el >= els)
3617 : {
3618 4 : els += MaxBackends;
3619 4 : data->locks = (LockInstanceData *)
3620 4 : repalloc(data->locks, sizeof(LockInstanceData) * els);
3621 : }
3622 :
3623 4718 : instance = &data->locks[el];
3624 4718 : SET_LOCKTAG_RELATION(instance->locktag, proc->databaseId,
3625 : proc->fpRelId[f]);
3626 4718 : instance->holdMask = lockbits << FAST_PATH_LOCKNUMBER_OFFSET;
3627 4718 : instance->waitLockMode = NoLock;
3628 4718 : instance->backend = proc->backendId;
3629 4718 : instance->lxid = proc->lxid;
3630 4718 : instance->pid = proc->pid;
3631 4718 : instance->leaderPid = proc->pid;
3632 4718 : instance->fastpath = true;
3633 :
3634 : /*
3635 : * Successfully taking fast path lock means there were no
3636 : * conflicting locks.
3637 : */
3638 4718 : instance->waitStart = 0;
3639 :
3640 4718 : el++;
3641 : }
3642 :
3643 52436 : if (proc->fpVXIDLock)
3644 : {
3645 : VirtualTransactionId vxid;
3646 : LockInstanceData *instance;
3647 :
3648 1428 : if (el >= els)
3649 : {
3650 0 : els += MaxBackends;
3651 0 : data->locks = (LockInstanceData *)
3652 0 : repalloc(data->locks, sizeof(LockInstanceData) * els);
3653 : }
3654 :
3655 1428 : vxid.backendId = proc->backendId;
3656 1428 : vxid.localTransactionId = proc->fpLocalTransactionId;
3657 :
3658 1428 : instance = &data->locks[el];
3659 1428 : SET_LOCKTAG_VIRTUALTRANSACTION(instance->locktag, vxid);
3660 1428 : instance->holdMask = LOCKBIT_ON(ExclusiveLock);
3661 1428 : instance->waitLockMode = NoLock;
3662 1428 : instance->backend = proc->backendId;
3663 1428 : instance->lxid = proc->lxid;
3664 1428 : instance->pid = proc->pid;
3665 1428 : instance->leaderPid = proc->pid;
3666 1428 : instance->fastpath = true;
3667 1428 : instance->waitStart = 0;
3668 :
3669 1428 : el++;
3670 : }
3671 :
3672 52436 : LWLockRelease(&proc->fpInfoLock);
3673 : }
3674 :
3675 : /*
3676 : * Next, acquire lock on the entire shared lock data structure. We do
3677 : * this so that, at least for locks in the primary lock table, the state
3678 : * will be self-consistent.
3679 : *
3680 : * Since this is a read-only operation, we take shared instead of
3681 : * exclusive lock. There's not a whole lot of point to this, because all
3682 : * the normal operations require exclusive lock, but it doesn't hurt
3683 : * anything either. It will at least allow two backends to do
3684 : * GetLockStatusData in parallel.
3685 : *
3686 : * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
3687 : */
3688 8636 : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3689 8128 : LWLockAcquire(LockHashPartitionLockByIndex(i), LW_SHARED);
3690 :
3691 : /* Now we can safely count the number of proclocks */
3692 508 : data->nelements = el + hash_get_num_entries(LockMethodProcLockHash);
3693 508 : if (data->nelements > els)
3694 : {
3695 16 : els = data->nelements;
3696 16 : data->locks = (LockInstanceData *)
3697 16 : repalloc(data->locks, sizeof(LockInstanceData) * els);
3698 : }
3699 :
3700 : /* Now scan the tables to copy the data */
3701 508 : hash_seq_init(&seqstat, LockMethodProcLockHash);
3702 :
3703 3242 : while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
3704 : {
3705 2734 : PGPROC *proc = proclock->tag.myProc;
3706 2734 : LOCK *lock = proclock->tag.myLock;
3707 2734 : LockInstanceData *instance = &data->locks[el];
3708 :
3709 2734 : memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
3710 2734 : instance->holdMask = proclock->holdMask;
3711 2734 : if (proc->waitLock == proclock->tag.myLock)
3712 18 : instance->waitLockMode = proc->waitLockMode;
3713 : else
3714 2716 : instance->waitLockMode = NoLock;
3715 2734 : instance->backend = proc->backendId;
3716 2734 : instance->lxid = proc->lxid;
3717 2734 : instance->pid = proc->pid;
3718 2734 : instance->leaderPid = proclock->groupLeader->pid;
3719 2734 : instance->fastpath = false;
3720 2734 : instance->waitStart = (TimestampTz) pg_atomic_read_u64(&proc->waitStart);
3721 :
3722 2734 : el++;
3723 : }
3724 :
3725 : /*
3726 : * And release locks. We do this in reverse order for two reasons: (1)
3727 : * Anyone else who needs more than one of the locks will be trying to lock
3728 : * them in increasing order; we don't want to release the other process
3729 : * until it can get all the locks it needs. (2) This avoids O(N^2)
3730 : * behavior inside LWLockRelease.
3731 : */
3732 8636 : for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3733 8128 : LWLockRelease(LockHashPartitionLockByIndex(i));
3734 :
3735 : Assert(el == data->nelements);
3736 :
3737 508 : return data;
3738 : }
3739 :
3740 : /*
3741 : * GetBlockerStatusData - Return a summary of the lock manager's state
3742 : * concerning locks that are blocking the specified PID or any member of
3743 : * the PID's lock group, for use in a user-level reporting function.
3744 : *
3745 : * For each PID within the lock group that is awaiting some heavyweight lock,
3746 : * the return data includes an array of LockInstanceData objects, which are
3747 : * the same data structure used by GetLockStatusData; but unlike that function,
3748 : * this one reports only the PROCLOCKs associated with the lock that that PID
3749 : * is blocked on. (Hence, all the locktags should be the same for any one
3750 : * blocked PID.) In addition, we return an array of the PIDs of those backends
3751 : * that are ahead of the blocked PID in the lock's wait queue. These can be
3752 : * compared with the PIDs in the LockInstanceData objects to determine which
3753 : * waiters are ahead of or behind the blocked PID in the queue.
3754 : *
3755 : * If blocked_pid isn't a valid backend PID or nothing in its lock group is
3756 : * waiting on any heavyweight lock, return empty arrays.
3757 : *
3758 : * The design goal is to hold the LWLocks for as short a time as possible;
3759 : * thus, this function simply makes a copy of the necessary data and releases
3760 : * the locks, allowing the caller to contemplate and format the data for as
3761 : * long as it pleases.
3762 : */
3763 : BlockedProcsData *
3764 2994 : GetBlockerStatusData(int blocked_pid)
3765 : {
3766 : BlockedProcsData *data;
3767 : PGPROC *proc;
3768 : int i;
3769 :
3770 2994 : data = (BlockedProcsData *) palloc(sizeof(BlockedProcsData));
3771 :
3772 : /*
3773 : * Guess how much space we'll need, and preallocate. Most of the time
3774 : * this will avoid needing to do repalloc while holding the LWLocks. (We
3775 : * assume, but check with an Assert, that MaxBackends is enough entries
3776 : * for the procs[] array; the other two could need enlargement, though.)
3777 : */
3778 2994 : data->nprocs = data->nlocks = data->npids = 0;
3779 2994 : data->maxprocs = data->maxlocks = data->maxpids = MaxBackends;
3780 2994 : data->procs = (BlockedProcData *) palloc(sizeof(BlockedProcData) * data->maxprocs);
3781 2994 : data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * data->maxlocks);
3782 2994 : data->waiter_pids = (int *) palloc(sizeof(int) * data->maxpids);
3783 :
3784 : /*
3785 : * In order to search the ProcArray for blocked_pid and assume that that
3786 : * entry won't immediately disappear under us, we must hold ProcArrayLock.
3787 : * In addition, to examine the lock grouping fields of any other backend,
3788 : * we must hold all the hash partition locks. (Only one of those locks is
3789 : * actually relevant for any one lock group, but we can't know which one
3790 : * ahead of time.) It's fairly annoying to hold all those locks
3791 : * throughout this, but it's no worse than GetLockStatusData(), and it
3792 : * does have the advantage that we're guaranteed to return a
3793 : * self-consistent instantaneous state.
3794 : */
3795 2994 : LWLockAcquire(ProcArrayLock, LW_SHARED);
3796 :
3797 2994 : proc = BackendPidGetProcWithLock(blocked_pid);
3798 :
3799 : /* Nothing to do if it's gone */
3800 2994 : if (proc != NULL)
3801 : {
3802 : /*
3803 : * Acquire lock on the entire shared lock data structure. See notes
3804 : * in GetLockStatusData().
3805 : */
3806 50898 : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3807 47904 : LWLockAcquire(LockHashPartitionLockByIndex(i), LW_SHARED);
3808 :
3809 2994 : if (proc->lockGroupLeader == NULL)
3810 : {
3811 : /* Easy case, proc is not a lock group member */
3812 2628 : GetSingleProcBlockerStatusData(proc, data);
3813 : }
3814 : else
3815 : {
3816 : /* Examine all procs in proc's lock group */
3817 : dlist_iter iter;
3818 :
3819 848 : dlist_foreach(iter, &proc->lockGroupLeader->lockGroupMembers)
3820 : {
3821 : PGPROC *memberProc;
3822 :
3823 482 : memberProc = dlist_container(PGPROC, lockGroupLink, iter.cur);
3824 482 : GetSingleProcBlockerStatusData(memberProc, data);
3825 : }
3826 : }
3827 :
3828 : /*
3829 : * And release locks. See notes in GetLockStatusData().
3830 : */
3831 50898 : for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3832 47904 : LWLockRelease(LockHashPartitionLockByIndex(i));
3833 :
3834 : Assert(data->nprocs <= data->maxprocs);
3835 : }
3836 :
3837 2994 : LWLockRelease(ProcArrayLock);
3838 :
3839 2994 : return data;
3840 : }
3841 :
3842 : /* Accumulate data about one possibly-blocked proc for GetBlockerStatusData */
3843 : static void
3844 3110 : GetSingleProcBlockerStatusData(PGPROC *blocked_proc, BlockedProcsData *data)
3845 : {
3846 3110 : LOCK *theLock = blocked_proc->waitLock;
3847 : BlockedProcData *bproc;
3848 : dlist_iter proclock_iter;
3849 : dlist_iter proc_iter;
3850 : dclist_head *waitQueue;
3851 : int queue_size;
3852 :
3853 : /* Nothing to do if this proc is not blocked */
3854 3110 : if (theLock == NULL)
3855 920 : return;
3856 :
3857 : /* Set up a procs[] element */
3858 2190 : bproc = &data->procs[data->nprocs++];
3859 2190 : bproc->pid = blocked_proc->pid;
3860 2190 : bproc->first_lock = data->nlocks;
3861 2190 : bproc->first_waiter = data->npids;
3862 :
3863 : /*
3864 : * We may ignore the proc's fast-path arrays, since nothing in those could
3865 : * be related to a contended lock.
3866 : */
3867 :
3868 : /* Collect all PROCLOCKs associated with theLock */
3869 6658 : dlist_foreach(proclock_iter, &theLock->procLocks)
3870 : {
3871 4468 : PROCLOCK *proclock =
3872 4468 : dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
3873 4468 : PGPROC *proc = proclock->tag.myProc;
3874 4468 : LOCK *lock = proclock->tag.myLock;
3875 : LockInstanceData *instance;
3876 :
3877 4468 : if (data->nlocks >= data->maxlocks)
3878 : {
3879 0 : data->maxlocks += MaxBackends;
3880 0 : data->locks = (LockInstanceData *)
3881 0 : repalloc(data->locks, sizeof(LockInstanceData) * data->maxlocks);
3882 : }
3883 :
3884 4468 : instance = &data->locks[data->nlocks];
3885 4468 : memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
3886 4468 : instance->holdMask = proclock->holdMask;
3887 4468 : if (proc->waitLock == lock)
3888 2262 : instance->waitLockMode = proc->waitLockMode;
3889 : else
3890 2206 : instance->waitLockMode = NoLock;
3891 4468 : instance->backend = proc->backendId;
3892 4468 : instance->lxid = proc->lxid;
3893 4468 : instance->pid = proc->pid;
3894 4468 : instance->leaderPid = proclock->groupLeader->pid;
3895 4468 : instance->fastpath = false;
3896 4468 : data->nlocks++;
3897 : }
3898 :
3899 : /* Enlarge waiter_pids[] if it's too small to hold all wait queue PIDs */
3900 2190 : waitQueue = &(theLock->waitProcs);
3901 2190 : queue_size = dclist_count(waitQueue);
3902 :
3903 2190 : if (queue_size > data->maxpids - data->npids)
3904 : {
3905 0 : data->maxpids = Max(data->maxpids + MaxBackends,
3906 : data->npids + queue_size);
3907 0 : data->waiter_pids = (int *) repalloc(data->waiter_pids,
3908 0 : sizeof(int) * data->maxpids);
3909 : }
3910 :
3911 : /* Collect PIDs from the lock's wait queue, stopping at blocked_proc */
3912 2226 : dclist_foreach(proc_iter, waitQueue)
3913 : {
3914 2226 : PGPROC *queued_proc = dlist_container(PGPROC, links, proc_iter.cur);
3915 :
3916 2226 : if (queued_proc == blocked_proc)
3917 2190 : break;
3918 36 : data->waiter_pids[data->npids++] = queued_proc->pid;
3919 36 : queued_proc = (PGPROC *) queued_proc->links.next;
3920 : }
3921 :
3922 2190 : bproc->num_locks = data->nlocks - bproc->first_lock;
3923 2190 : bproc->num_waiters = data->npids - bproc->first_waiter;
3924 : }
3925 :
3926 : /*
3927 : * Returns a list of currently held AccessExclusiveLocks, for use by
3928 : * LogStandbySnapshot(). The result is a palloc'd array,
3929 : * with the number of elements returned into *nlocks.
3930 : *
3931 : * XXX This currently takes a lock on all partitions of the lock table,
3932 : * but it's possible to do better. By reference counting locks and storing
3933 : * the value in the ProcArray entry for each backend we could tell if any
3934 : * locks need recording without having to acquire the partition locks and
3935 : * scan the lock table. Whether that's worth the additional overhead
3936 : * is pretty dubious though.
3937 : */
3938 : xl_standby_lock *
3939 1324 : GetRunningTransactionLocks(int *nlocks)
3940 : {
3941 : xl_standby_lock *accessExclusiveLocks;
3942 : PROCLOCK *proclock;
3943 : HASH_SEQ_STATUS seqstat;
3944 : int i;
3945 : int index;
3946 : int els;
3947 :
3948 : /*
3949 : * Acquire lock on the entire shared lock data structure.
3950 : *
3951 : * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
3952 : */
3953 22508 : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3954 21184 : LWLockAcquire(LockHashPartitionLockByIndex(i), LW_SHARED);
3955 :
3956 : /* Now we can safely count the number of proclocks */
3957 1324 : els = hash_get_num_entries(LockMethodProcLockHash);
3958 :
3959 : /*
3960 : * Allocating enough space for all locks in the lock table is overkill,
3961 : * but it's more convenient and faster than having to enlarge the array.
3962 : */
3963 1324 : accessExclusiveLocks = palloc(els * sizeof(xl_standby_lock));
3964 :
3965 : /* Now scan the tables to copy the data */
3966 1324 : hash_seq_init(&seqstat, LockMethodProcLockHash);
3967 :
3968 : /*
3969 : * If lock is a currently granted AccessExclusiveLock then it will have
3970 : * just one proclock holder, so locks are never accessed twice in this
3971 : * particular case. Don't copy this code for use elsewhere because in the
3972 : * general case this will give you duplicate locks when looking at
3973 : * non-exclusive lock types.
3974 : */
3975 1324 : index = 0;
3976 2828 : while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
3977 : {
3978 : /* make sure this definition matches the one used in LockAcquire */
3979 1504 : if ((proclock->holdMask & LOCKBIT_ON(AccessExclusiveLock)) &&
3980 234 : proclock->tag.myLock->tag.locktag_type == LOCKTAG_RELATION)
3981 : {
3982 174 : PGPROC *proc = proclock->tag.myProc;
3983 174 : LOCK *lock = proclock->tag.myLock;
3984 174 : TransactionId xid = proc->xid;
3985 :
3986 : /*
3987 : * Don't record locks for transactions if we know they have
3988 : * already issued their WAL record for commit but not yet released
3989 : * lock. It is still possible that we see locks held by already
3990 : * complete transactions, if they haven't yet zeroed their xids.
3991 : */
3992 174 : if (!TransactionIdIsValid(xid))
3993 2 : continue;
3994 :
3995 172 : accessExclusiveLocks[index].xid = xid;
3996 172 : accessExclusiveLocks[index].dbOid = lock->tag.locktag_field1;
3997 172 : accessExclusiveLocks[index].relOid = lock->tag.locktag_field2;
3998 :
3999 172 : index++;
4000 : }
4001 : }
4002 :
4003 : Assert(index <= els);
4004 :
4005 : /*
4006 : * And release locks. We do this in reverse order for two reasons: (1)
4007 : * Anyone else who needs more than one of the locks will be trying to lock
4008 : * them in increasing order; we don't want to release the other process
4009 : * until it can get all the locks it needs. (2) This avoids O(N^2)
4010 : * behavior inside LWLockRelease.
4011 : */
4012 22508 : for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
4013 21184 : LWLockRelease(LockHashPartitionLockByIndex(i));
4014 :
4015 1324 : *nlocks = index;
4016 1324 : return accessExclusiveLocks;
4017 : }
4018 :
4019 : /* Provide the textual name of any lock mode */
4020 : const char *
4021 9248 : GetLockmodeName(LOCKMETHODID lockmethodid, LOCKMODE mode)
4022 : {
4023 : Assert(lockmethodid > 0 && lockmethodid < lengthof(LockMethods));
4024 : Assert(mode > 0 && mode <= LockMethods[lockmethodid]->numLockModes);
4025 9248 : return LockMethods[lockmethodid]->lockModeNames[mode];
4026 : }
4027 :
4028 : #ifdef LOCK_DEBUG
4029 : /*
4030 : * Dump all locks in the given proc's myProcLocks lists.
4031 : *
4032 : * Caller is responsible for having acquired appropriate LWLocks.
4033 : */
4034 : void
4035 : DumpLocks(PGPROC *proc)
4036 : {
4037 : int i;
4038 :
4039 : if (proc == NULL)
4040 : return;
4041 :
4042 : if (proc->waitLock)
4043 : LOCK_PRINT("DumpLocks: waiting on", proc->waitLock, 0);
4044 :
4045 : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4046 : {
4047 : dlist_head *procLocks = &proc->myProcLocks[i];
4048 : dlist_iter iter;
4049 :
4050 : dlist_foreach(iter, procLocks)
4051 : {
4052 : PROCLOCK *proclock = dlist_container(PROCLOCK, procLink, iter.cur);
4053 : LOCK *lock = proclock->tag.myLock;
4054 :
4055 : Assert(proclock->tag.myProc == proc);
4056 : PROCLOCK_PRINT("DumpLocks", proclock);
4057 : LOCK_PRINT("DumpLocks", lock, 0);
4058 : }
4059 : }
4060 : }
4061 :
4062 : /*
4063 : * Dump all lmgr locks.
4064 : *
4065 : * Caller is responsible for having acquired appropriate LWLocks.
4066 : */
4067 : void
4068 : DumpAllLocks(void)
4069 : {
4070 : PGPROC *proc;
4071 : PROCLOCK *proclock;
4072 : LOCK *lock;
4073 : HASH_SEQ_STATUS status;
4074 :
4075 : proc = MyProc;
4076 :
4077 : if (proc && proc->waitLock)
4078 : LOCK_PRINT("DumpAllLocks: waiting on", proc->waitLock, 0);
4079 :
4080 : hash_seq_init(&status, LockMethodProcLockHash);
4081 :
4082 : while ((proclock = (PROCLOCK *) hash_seq_search(&status)) != NULL)
4083 : {
4084 : PROCLOCK_PRINT("DumpAllLocks", proclock);
4085 :
4086 : lock = proclock->tag.myLock;
4087 : if (lock)
4088 : LOCK_PRINT("DumpAllLocks", lock, 0);
4089 : else
4090 : elog(LOG, "DumpAllLocks: proclock->tag.myLock = NULL");
4091 : }
4092 : }
4093 : #endif /* LOCK_DEBUG */
4094 :
4095 : /*
4096 : * LOCK 2PC resource manager's routines
4097 : */
4098 :
4099 : /*
4100 : * Re-acquire a lock belonging to a transaction that was prepared.
4101 : *
4102 : * Because this function is run at db startup, re-acquiring the locks should
4103 : * never conflict with running transactions because there are none. We
4104 : * assume that the lock state represented by the stored 2PC files is legal.
4105 : *
4106 : * When switching from Hot Standby mode to normal operation, the locks will
4107 : * be already held by the startup process. The locks are acquired for the new
4108 : * procs without checking for conflicts, so we don't get a conflict between the
4109 : * startup process and the dummy procs, even though we will momentarily have
4110 : * a situation where two procs are holding the same AccessExclusiveLock,
4111 : * which isn't normally possible because the conflict. If we're in standby
4112 : * mode, but a recovery snapshot hasn't been established yet, it's possible
4113 : * that some but not all of the locks are already held by the startup process.
4114 : *
4115 : * This approach is simple, but also a bit dangerous, because if there isn't
4116 : * enough shared memory to acquire the locks, an error will be thrown, which
4117 : * is promoted to FATAL and recovery will abort, bringing down postmaster.
4118 : * A safer approach would be to transfer the locks like we do in
4119 : * AtPrepare_Locks, but then again, in hot standby mode it's possible for
4120 : * read-only backends to use up all the shared lock memory anyway, so that
4121 : * replaying the WAL record that needs to acquire a lock will throw an error
4122 : * and PANIC anyway.
4123 : */
4124 : void
4125 158 : lock_twophase_recover(TransactionId xid, uint16 info,
4126 : void *recdata, uint32 len)
4127 : {
4128 158 : TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4129 158 : PGPROC *proc = TwoPhaseGetDummyProc(xid, false);
4130 : LOCKTAG *locktag;
4131 : LOCKMODE lockmode;
4132 : LOCKMETHODID lockmethodid;
4133 : LOCK *lock;
4134 : PROCLOCK *proclock;
4135 : PROCLOCKTAG proclocktag;
4136 : bool found;
4137 : uint32 hashcode;
4138 : uint32 proclock_hashcode;
4139 : int partition;
4140 : LWLock *partitionLock;
4141 : LockMethod lockMethodTable;
4142 :
4143 : Assert(len == sizeof(TwoPhaseLockRecord));
4144 158 : locktag = &rec->locktag;
4145 158 : lockmode = rec->lockmode;
4146 158 : lockmethodid = locktag->locktag_lockmethodid;
4147 :
4148 158 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4149 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4150 158 : lockMethodTable = LockMethods[lockmethodid];
4151 :
4152 158 : hashcode = LockTagHashCode(locktag);
4153 158 : partition = LockHashPartition(hashcode);
4154 158 : partitionLock = LockHashPartitionLock(hashcode);
4155 :
4156 158 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4157 :
4158 : /*
4159 : * Find or create a lock with this tag.
4160 : */
4161 158 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
4162 : locktag,
4163 : hashcode,
4164 : HASH_ENTER_NULL,
4165 : &found);
4166 158 : if (!lock)
4167 : {
4168 0 : LWLockRelease(partitionLock);
4169 0 : ereport(ERROR,
4170 : (errcode(ERRCODE_OUT_OF_MEMORY),
4171 : errmsg("out of shared memory"),
4172 : errhint("You might need to increase %s.", "max_locks_per_transaction")));
4173 : }
4174 :
4175 : /*
4176 : * if it's a new lock object, initialize it
4177 : */
4178 158 : if (!found)
4179 : {
4180 142 : lock->grantMask = 0;
4181 142 : lock->waitMask = 0;
4182 142 : dlist_init(&lock->procLocks);
4183 142 : dclist_init(&lock->waitProcs);
4184 142 : lock->nRequested = 0;
4185 142 : lock->nGranted = 0;
4186 852 : MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
4187 142 : MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
4188 : LOCK_PRINT("lock_twophase_recover: new", lock, lockmode);
4189 : }
4190 : else
4191 : {
4192 : LOCK_PRINT("lock_twophase_recover: found", lock, lockmode);
4193 : Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
4194 : Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
4195 : Assert(lock->nGranted <= lock->nRequested);
4196 : }
4197 :
4198 : /*
4199 : * Create the hash key for the proclock table.
4200 : */
4201 158 : proclocktag.myLock = lock;
4202 158 : proclocktag.myProc = proc;
4203 :
4204 158 : proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
4205 :
4206 : /*
4207 : * Find or create a proclock entry with this tag
4208 : */
4209 158 : proclock = (PROCLOCK *) hash_search_with_hash_value(LockMethodProcLockHash,
4210 : &proclocktag,
4211 : proclock_hashcode,
4212 : HASH_ENTER_NULL,
4213 : &found);
4214 158 : if (!proclock)
4215 : {
4216 : /* Oops, not enough shmem for the proclock */
4217 0 : if (lock->nRequested == 0)
4218 : {
4219 : /*
4220 : * There are no other requestors of this lock, so garbage-collect
4221 : * the lock object. We *must* do this to avoid a permanent leak
4222 : * of shared memory, because there won't be anything to cause
4223 : * anyone to release the lock object later.
4224 : */
4225 : Assert(dlist_is_empty(&lock->procLocks));
4226 0 : if (!hash_search_with_hash_value(LockMethodLockHash,
4227 0 : &(lock->tag),
4228 : hashcode,
4229 : HASH_REMOVE,
4230 : NULL))
4231 0 : elog(PANIC, "lock table corrupted");
4232 : }
4233 0 : LWLockRelease(partitionLock);
4234 0 : ereport(ERROR,
4235 : (errcode(ERRCODE_OUT_OF_MEMORY),
4236 : errmsg("out of shared memory"),
4237 : errhint("You might need to increase %s.", "max_locks_per_transaction")));
4238 : }
4239 :
4240 : /*
4241 : * If new, initialize the new entry
4242 : */
4243 158 : if (!found)
4244 : {
4245 : Assert(proc->lockGroupLeader == NULL);
4246 150 : proclock->groupLeader = proc;
4247 150 : proclock->holdMask = 0;
4248 150 : proclock->releaseMask = 0;
4249 : /* Add proclock to appropriate lists */
4250 150 : dlist_push_tail(&lock->procLocks, &proclock->lockLink);
4251 150 : dlist_push_tail(&proc->myProcLocks[partition],
4252 : &proclock->procLink);
4253 : PROCLOCK_PRINT("lock_twophase_recover: new", proclock);
4254 : }
4255 : else
4256 : {
4257 : PROCLOCK_PRINT("lock_twophase_recover: found", proclock);
4258 : Assert((proclock->holdMask & ~lock->grantMask) == 0);
4259 : }
4260 :
4261 : /*
4262 : * lock->nRequested and lock->requested[] count the total number of
4263 : * requests, whether granted or waiting, so increment those immediately.
4264 : */
4265 158 : lock->nRequested++;
4266 158 : lock->requested[lockmode]++;
4267 : Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
4268 :
4269 : /*
4270 : * We shouldn't already hold the desired lock.
4271 : */
4272 158 : if (proclock->holdMask & LOCKBIT_ON(lockmode))
4273 0 : elog(ERROR, "lock %s on object %u/%u/%u is already held",
4274 : lockMethodTable->lockModeNames[lockmode],
4275 : lock->tag.locktag_field1, lock->tag.locktag_field2,
4276 : lock->tag.locktag_field3);
4277 :
4278 : /*
4279 : * We ignore any possible conflicts and just grant ourselves the lock. Not
4280 : * only because we don't bother, but also to avoid deadlocks when
4281 : * switching from standby to normal mode. See function comment.
4282 : */
4283 158 : GrantLock(lock, proclock, lockmode);
4284 :
4285 : /*
4286 : * Bump strong lock count, to make sure any fast-path lock requests won't
4287 : * be granted without consulting the primary lock table.
4288 : */
4289 158 : if (ConflictsWithRelationFastPath(&lock->tag, lockmode))
4290 : {
4291 26 : uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
4292 :
4293 26 : SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
4294 26 : FastPathStrongRelationLocks->count[fasthashcode]++;
4295 26 : SpinLockRelease(&FastPathStrongRelationLocks->mutex);
4296 : }
4297 :
4298 158 : LWLockRelease(partitionLock);
4299 158 : }
4300 :
4301 : /*
4302 : * Re-acquire a lock belonging to a transaction that was prepared, when
4303 : * starting up into hot standby mode.
4304 : */
4305 : void
4306 0 : lock_twophase_standby_recover(TransactionId xid, uint16 info,
4307 : void *recdata, uint32 len)
4308 : {
4309 0 : TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4310 : LOCKTAG *locktag;
4311 : LOCKMODE lockmode;
4312 : LOCKMETHODID lockmethodid;
4313 :
4314 : Assert(len == sizeof(TwoPhaseLockRecord));
4315 0 : locktag = &rec->locktag;
4316 0 : lockmode = rec->lockmode;
4317 0 : lockmethodid = locktag->locktag_lockmethodid;
4318 :
4319 0 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4320 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4321 :
4322 0 : if (lockmode == AccessExclusiveLock &&
4323 0 : locktag->locktag_type == LOCKTAG_RELATION)
4324 : {
4325 0 : StandbyAcquireAccessExclusiveLock(xid,
4326 : locktag->locktag_field1 /* dboid */ ,
4327 : locktag->locktag_field2 /* reloid */ );
4328 : }
4329 0 : }
4330 :
4331 :
4332 : /*
4333 : * 2PC processing routine for COMMIT PREPARED case.
4334 : *
4335 : * Find and release the lock indicated by the 2PC record.
4336 : */
4337 : void
4338 1786 : lock_twophase_postcommit(TransactionId xid, uint16 info,
4339 : void *recdata, uint32 len)
4340 : {
4341 1786 : TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4342 1786 : PGPROC *proc = TwoPhaseGetDummyProc(xid, true);
4343 : LOCKTAG *locktag;
4344 : LOCKMETHODID lockmethodid;
4345 : LockMethod lockMethodTable;
4346 :
4347 : Assert(len == sizeof(TwoPhaseLockRecord));
4348 1786 : locktag = &rec->locktag;
4349 1786 : lockmethodid = locktag->locktag_lockmethodid;
4350 :
4351 1786 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4352 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4353 1786 : lockMethodTable = LockMethods[lockmethodid];
4354 :
4355 1786 : LockRefindAndRelease(lockMethodTable, proc, locktag, rec->lockmode, true);
4356 1786 : }
4357 :
4358 : /*
4359 : * 2PC processing routine for ROLLBACK PREPARED case.
4360 : *
4361 : * This is actually just the same as the COMMIT case.
4362 : */
4363 : void
4364 244 : lock_twophase_postabort(TransactionId xid, uint16 info,
4365 : void *recdata, uint32 len)
4366 : {
4367 244 : lock_twophase_postcommit(xid, info, recdata, len);
4368 244 : }
4369 :
4370 : /*
4371 : * VirtualXactLockTableInsert
4372 : *
4373 : * Take vxid lock via the fast-path. There can't be any pre-existing
4374 : * lockers, as we haven't advertised this vxid via the ProcArray yet.
4375 : *
4376 : * Since MyProc->fpLocalTransactionId will normally contain the same data
4377 : * as MyProc->lxid, you might wonder if we really need both. The
4378 : * difference is that MyProc->lxid is set and cleared unlocked, and
4379 : * examined by procarray.c, while fpLocalTransactionId is protected by
4380 : * fpInfoLock and is used only by the locking subsystem. Doing it this
4381 : * way makes it easier to verify that there are no funny race conditions.
4382 : *
4383 : * We don't bother recording this lock in the local lock table, since it's
4384 : * only ever released at the end of a transaction. Instead,
4385 : * LockReleaseAll() calls VirtualXactLockTableCleanup().
4386 : */
4387 : void
4388 514308 : VirtualXactLockTableInsert(VirtualTransactionId vxid)
4389 : {
4390 : Assert(VirtualTransactionIdIsValid(vxid));
4391 :
4392 514308 : LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
4393 :
4394 : Assert(MyProc->backendId == vxid.backendId);
4395 : Assert(MyProc->fpLocalTransactionId == InvalidLocalTransactionId);
4396 : Assert(MyProc->fpVXIDLock == false);
4397 :
4398 514308 : MyProc->fpVXIDLock = true;
4399 514308 : MyProc->fpLocalTransactionId = vxid.localTransactionId;
4400 :
4401 514308 : LWLockRelease(&MyProc->fpInfoLock);
4402 514308 : }
4403 :
4404 : /*
4405 : * VirtualXactLockTableCleanup
4406 : *
4407 : * Check whether a VXID lock has been materialized; if so, release it,
4408 : * unblocking waiters.
4409 : */
4410 : void
4411 514978 : VirtualXactLockTableCleanup(void)
4412 : {
4413 : bool fastpath;
4414 : LocalTransactionId lxid;
4415 :
4416 : Assert(MyProc->backendId != InvalidBackendId);
4417 :
4418 : /*
4419 : * Clean up shared memory state.
4420 : */
4421 514978 : LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
4422 :
4423 514978 : fastpath = MyProc->fpVXIDLock;
4424 514978 : lxid = MyProc->fpLocalTransactionId;
4425 514978 : MyProc->fpVXIDLock = false;
4426 514978 : MyProc->fpLocalTransactionId = InvalidLocalTransactionId;
4427 :
4428 514978 : LWLockRelease(&MyProc->fpInfoLock);
4429 :
4430 : /*
4431 : * If fpVXIDLock has been cleared without touching fpLocalTransactionId,
4432 : * that means someone transferred the lock to the main lock table.
4433 : */
4434 514978 : if (!fastpath && LocalTransactionIdIsValid(lxid))
4435 : {
4436 : VirtualTransactionId vxid;
4437 : LOCKTAG locktag;
4438 :
4439 548 : vxid.backendId = MyBackendId;
4440 548 : vxid.localTransactionId = lxid;
4441 548 : SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid);
4442 :
4443 548 : LockRefindAndRelease(LockMethods[DEFAULT_LOCKMETHOD], MyProc,
4444 : &locktag, ExclusiveLock, false);
4445 : }
4446 514978 : }
4447 :
4448 : /*
4449 : * XactLockForVirtualXact
4450 : *
4451 : * If TransactionIdIsValid(xid), this is essentially XactLockTableWait(xid,
4452 : * NULL, NULL, XLTW_None) or ConditionalXactLockTableWait(xid). Unlike those
4453 : * functions, it assumes "xid" is never a subtransaction and that "xid" is
4454 : * prepared, committed, or aborted.
4455 : *
4456 : * If !TransactionIdIsValid(xid), this locks every prepared XID having been
4457 : * known as "vxid" before its PREPARE TRANSACTION.
4458 : */
4459 : static bool
4460 592 : XactLockForVirtualXact(VirtualTransactionId vxid,
4461 : TransactionId xid, bool wait)
4462 : {
4463 592 : bool more = false;
4464 :
4465 : /* There is no point to wait for 2PCs if you have no 2PCs. */
4466 592 : if (max_prepared_xacts == 0)
4467 188 : return true;
4468 :
4469 : do
4470 : {
4471 : LockAcquireResult lar;
4472 : LOCKTAG tag;
4473 :
4474 : /* Clear state from previous iterations. */
4475 404 : if (more)
4476 : {
4477 0 : xid = InvalidTransactionId;
4478 0 : more = false;
4479 : }
4480 :
4481 : /* If we have no xid, try to find one. */
4482 404 : if (!TransactionIdIsValid(xid))
4483 210 : xid = TwoPhaseGetXidByVirtualXID(vxid, &more);
4484 404 : if (!TransactionIdIsValid(xid))
4485 : {
4486 : Assert(!more);
4487 186 : return true;
4488 : }
4489 :
4490 : /* Check or wait for XID completion. */
4491 218 : SET_LOCKTAG_TRANSACTION(tag, xid);
4492 218 : lar = LockAcquire(&tag, ShareLock, false, !wait);
4493 218 : if (lar == LOCKACQUIRE_NOT_AVAIL)
4494 0 : return false;
4495 218 : LockRelease(&tag, ShareLock, false);
4496 218 : } while (more);
4497 :
4498 218 : return true;
4499 : }
4500 :
4501 : /*
4502 : * VirtualXactLock
4503 : *
4504 : * If wait = true, wait as long as the given VXID or any XID acquired by the
4505 : * same transaction is still running. Then, return true.
4506 : *
4507 : * If wait = false, just check whether that VXID or one of those XIDs is still
4508 : * running, and return true or false.
4509 : */
4510 : bool
4511 672 : VirtualXactLock(VirtualTransactionId vxid, bool wait)
4512 : {
4513 : LOCKTAG tag;
4514 : PGPROC *proc;
4515 672 : TransactionId xid = InvalidTransactionId;
4516 :
4517 : Assert(VirtualTransactionIdIsValid(vxid));
4518 :
4519 672 : if (VirtualTransactionIdIsRecoveredPreparedXact(vxid))
4520 : /* no vxid lock; localTransactionId is a normal, locked XID */
4521 2 : return XactLockForVirtualXact(vxid, vxid.localTransactionId, wait);
4522 :
4523 670 : SET_LOCKTAG_VIRTUALTRANSACTION(tag, vxid);
4524 :
4525 : /*
4526 : * If a lock table entry must be made, this is the PGPROC on whose behalf
4527 : * it must be done. Note that the transaction might end or the PGPROC
4528 : * might be reassigned to a new backend before we get around to examining
4529 : * it, but it doesn't matter. If we find upon examination that the
4530 : * relevant lxid is no longer running here, that's enough to prove that
4531 : * it's no longer running anywhere.
4532 : */
4533 670 : proc = BackendIdGetProc(vxid.backendId);
4534 670 : if (proc == NULL)
4535 6 : return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4536 :
4537 : /*
4538 : * We must acquire this lock before checking the backendId and lxid
4539 : * against the ones we're waiting for. The target backend will only set
4540 : * or clear lxid while holding this lock.
4541 : */
4542 664 : LWLockAcquire(&proc->fpInfoLock, LW_EXCLUSIVE);
4543 :
4544 664 : if (proc->backendId != vxid.backendId
4545 664 : || proc->fpLocalTransactionId != vxid.localTransactionId)
4546 : {
4547 : /* VXID ended */
4548 76 : LWLockRelease(&proc->fpInfoLock);
4549 76 : return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4550 : }
4551 :
4552 : /*
4553 : * If we aren't asked to wait, there's no need to set up a lock table
4554 : * entry. The transaction is still in progress, so just return false.
4555 : */
4556 588 : if (!wait)
4557 : {
4558 30 : LWLockRelease(&proc->fpInfoLock);
4559 30 : return false;
4560 : }
4561 :
4562 : /*
4563 : * OK, we're going to need to sleep on the VXID. But first, we must set
4564 : * up the primary lock table entry, if needed (ie, convert the proc's
4565 : * fast-path lock on its VXID to a regular lock).
4566 : */
4567 558 : if (proc->fpVXIDLock)
4568 : {
4569 : PROCLOCK *proclock;
4570 : uint32 hashcode;
4571 : LWLock *partitionLock;
4572 :
4573 548 : hashcode = LockTagHashCode(&tag);
4574 :
4575 548 : partitionLock = LockHashPartitionLock(hashcode);
4576 548 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4577 :
4578 548 : proclock = SetupLockInTable(LockMethods[DEFAULT_LOCKMETHOD], proc,
4579 : &tag, hashcode, ExclusiveLock);
4580 548 : if (!proclock)
4581 : {
4582 0 : LWLockRelease(partitionLock);
4583 0 : LWLockRelease(&proc->fpInfoLock);
4584 0 : ereport(ERROR,
4585 : (errcode(ERRCODE_OUT_OF_MEMORY),
4586 : errmsg("out of shared memory"),
4587 : errhint("You might need to increase %s.", "max_locks_per_transaction")));
4588 : }
4589 548 : GrantLock(proclock->tag.myLock, proclock, ExclusiveLock);
4590 :
4591 548 : LWLockRelease(partitionLock);
4592 :
4593 548 : proc->fpVXIDLock = false;
4594 : }
4595 :
4596 : /*
4597 : * If the proc has an XID now, we'll avoid a TwoPhaseGetXidByVirtualXID()
4598 : * search. The proc might have assigned this XID but not yet locked it,
4599 : * in which case the proc will lock this XID before releasing the VXID.
4600 : * The fpInfoLock critical section excludes VirtualXactLockTableCleanup(),
4601 : * so we won't save an XID of a different VXID. It doesn't matter whether
4602 : * we save this before or after setting up the primary lock table entry.
4603 : */
4604 558 : xid = proc->xid;
4605 :
4606 : /* Done with proc->fpLockBits */
4607 558 : LWLockRelease(&proc->fpInfoLock);
4608 :
4609 : /* Time to wait. */
4610 558 : (void) LockAcquire(&tag, ShareLock, false, false);
4611 :
4612 508 : LockRelease(&tag, ShareLock, false);
4613 508 : return XactLockForVirtualXact(vxid, xid, wait);
4614 : }
4615 :
4616 : /*
4617 : * LockWaiterCount
4618 : *
4619 : * Find the number of lock requester on this locktag
4620 : */
4621 : int
4622 105892 : LockWaiterCount(const LOCKTAG *locktag)
4623 : {
4624 105892 : LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
4625 : LOCK *lock;
4626 : bool found;
4627 : uint32 hashcode;
4628 : LWLock *partitionLock;
4629 105892 : int waiters = 0;
4630 :
4631 105892 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4632 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4633 :
4634 105892 : hashcode = LockTagHashCode(locktag);
4635 105892 : partitionLock = LockHashPartitionLock(hashcode);
4636 105892 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4637 :
4638 105892 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
4639 : locktag,
4640 : hashcode,
4641 : HASH_FIND,
4642 : &found);
4643 105892 : if (found)
4644 : {
4645 : Assert(lock != NULL);
4646 52 : waiters = lock->nRequested;
4647 : }
4648 105892 : LWLockRelease(partitionLock);
4649 :
4650 105892 : return waiters;
4651 : }
|