Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * lock.c
4 : * POSTGRES primary lock mechanism
5 : *
6 : * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/storage/lmgr/lock.c
12 : *
13 : * NOTES
14 : * A lock table is a shared memory hash table. When
15 : * a process tries to acquire a lock of a type that conflicts
16 : * with existing locks, it is put to sleep using the routines
17 : * in storage/lmgr/proc.c.
18 : *
19 : * For the most part, this code should be invoked via lmgr.c
20 : * or another lock-management module, not directly.
21 : *
22 : * Interface:
23 : *
24 : * InitLocks(), GetLocksMethodTable(), GetLockTagsMethodTable(),
25 : * LockAcquire(), LockRelease(), LockReleaseAll(),
26 : * LockCheckConflicts(), GrantLock()
27 : *
28 : *-------------------------------------------------------------------------
29 : */
30 : #include "postgres.h"
31 :
32 : #include <signal.h>
33 : #include <unistd.h>
34 :
35 : #include "access/transam.h"
36 : #include "access/twophase.h"
37 : #include "access/twophase_rmgr.h"
38 : #include "access/xact.h"
39 : #include "access/xlog.h"
40 : #include "access/xlogutils.h"
41 : #include "miscadmin.h"
42 : #include "pg_trace.h"
43 : #include "pgstat.h"
44 : #include "storage/proc.h"
45 : #include "storage/procarray.h"
46 : #include "storage/sinvaladt.h"
47 : #include "storage/spin.h"
48 : #include "storage/standby.h"
49 : #include "utils/memutils.h"
50 : #include "utils/ps_status.h"
51 : #include "utils/resowner_private.h"
52 :
53 :
54 : /* This configuration variable is used to set the lock table size */
55 : int max_locks_per_xact; /* set by guc.c */
56 :
57 : #define NLOCKENTS() \
58 : mul_size(max_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
59 :
60 :
61 : /*
62 : * Data structures defining the semantics of the standard lock methods.
63 : *
64 : * The conflict table defines the semantics of the various lock modes.
65 : */
66 : static const LOCKMASK LockConflicts[] = {
67 : 0,
68 :
69 : /* AccessShareLock */
70 : LOCKBIT_ON(AccessExclusiveLock),
71 :
72 : /* RowShareLock */
73 : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
74 :
75 : /* RowExclusiveLock */
76 : LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
77 : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
78 :
79 : /* ShareUpdateExclusiveLock */
80 : LOCKBIT_ON(ShareUpdateExclusiveLock) |
81 : LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
82 : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
83 :
84 : /* ShareLock */
85 : LOCKBIT_ON(RowExclusiveLock) | LOCKBIT_ON(ShareUpdateExclusiveLock) |
86 : LOCKBIT_ON(ShareRowExclusiveLock) |
87 : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
88 :
89 : /* ShareRowExclusiveLock */
90 : LOCKBIT_ON(RowExclusiveLock) | LOCKBIT_ON(ShareUpdateExclusiveLock) |
91 : LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
92 : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
93 :
94 : /* ExclusiveLock */
95 : LOCKBIT_ON(RowShareLock) |
96 : LOCKBIT_ON(RowExclusiveLock) | LOCKBIT_ON(ShareUpdateExclusiveLock) |
97 : LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
98 : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
99 :
100 : /* AccessExclusiveLock */
101 : LOCKBIT_ON(AccessShareLock) | LOCKBIT_ON(RowShareLock) |
102 : LOCKBIT_ON(RowExclusiveLock) | LOCKBIT_ON(ShareUpdateExclusiveLock) |
103 : LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
104 : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock)
105 :
106 : };
107 :
108 : /* Names of lock modes, for debug printouts */
109 : static const char *const lock_mode_names[] =
110 : {
111 : "INVALID",
112 : "AccessShareLock",
113 : "RowShareLock",
114 : "RowExclusiveLock",
115 : "ShareUpdateExclusiveLock",
116 : "ShareLock",
117 : "ShareRowExclusiveLock",
118 : "ExclusiveLock",
119 : "AccessExclusiveLock"
120 : };
121 :
122 : #ifndef LOCK_DEBUG
123 : static bool Dummy_trace = false;
124 : #endif
125 :
126 : static const LockMethodData default_lockmethod = {
127 : MaxLockMode,
128 : LockConflicts,
129 : lock_mode_names,
130 : #ifdef LOCK_DEBUG
131 : &Trace_locks
132 : #else
133 : &Dummy_trace
134 : #endif
135 : };
136 :
137 : static const LockMethodData user_lockmethod = {
138 : MaxLockMode,
139 : LockConflicts,
140 : lock_mode_names,
141 : #ifdef LOCK_DEBUG
142 : &Trace_userlocks
143 : #else
144 : &Dummy_trace
145 : #endif
146 : };
147 :
148 : /*
149 : * map from lock method id to the lock table data structures
150 : */
151 : static const LockMethod LockMethods[] = {
152 : NULL,
153 : &default_lockmethod,
154 : &user_lockmethod
155 : };
156 :
157 :
158 : /* Record that's written to 2PC state file when a lock is persisted */
159 : typedef struct TwoPhaseLockRecord
160 : {
161 : LOCKTAG locktag;
162 : LOCKMODE lockmode;
163 : } TwoPhaseLockRecord;
164 :
165 :
166 : /*
167 : * Count of the number of fast path lock slots we believe to be used. This
168 : * might be higher than the real number if another backend has transferred
169 : * our locks to the primary lock table, but it can never be lower than the
170 : * real value, since only we can acquire locks on our own behalf.
171 : */
172 : static int FastPathLocalUseCount = 0;
173 :
174 : /*
175 : * Flag to indicate if the relation extension lock is held by this backend.
176 : * This flag is used to ensure that while holding the relation extension lock
177 : * we don't try to acquire a heavyweight lock on any other object. This
178 : * restriction implies that the relation extension lock won't ever participate
179 : * in the deadlock cycle because we can never wait for any other heavyweight
180 : * lock after acquiring this lock.
181 : *
182 : * Such a restriction is okay for relation extension locks as unlike other
183 : * heavyweight locks these are not held till the transaction end. These are
184 : * taken for a short duration to extend a particular relation and then
185 : * released.
186 : */
187 : static bool IsRelationExtensionLockHeld PG_USED_FOR_ASSERTS_ONLY = false;
188 :
189 : /*
190 : * Flag to indicate if the page lock is held by this backend. We don't
191 : * acquire any other heavyweight lock while holding the page lock except for
192 : * relation extension. However, these locks are never taken in reverse order
193 : * which implies that page locks will also never participate in the deadlock
194 : * cycle.
195 : *
196 : * Similar to relation extension, page locks are also held for a short
197 : * duration, so imposing such a restriction won't hurt.
198 : */
199 : static bool IsPageLockHeld PG_USED_FOR_ASSERTS_ONLY = false;
200 :
201 : /* Macros for manipulating proc->fpLockBits */
202 : #define FAST_PATH_BITS_PER_SLOT 3
203 : #define FAST_PATH_LOCKNUMBER_OFFSET 1
204 : #define FAST_PATH_MASK ((1 << FAST_PATH_BITS_PER_SLOT) - 1)
205 : #define FAST_PATH_GET_BITS(proc, n) \
206 : (((proc)->fpLockBits >> (FAST_PATH_BITS_PER_SLOT * n)) & FAST_PATH_MASK)
207 : #define FAST_PATH_BIT_POSITION(n, l) \
208 : (AssertMacro((l) >= FAST_PATH_LOCKNUMBER_OFFSET), \
209 : AssertMacro((l) < FAST_PATH_BITS_PER_SLOT+FAST_PATH_LOCKNUMBER_OFFSET), \
210 : AssertMacro((n) < FP_LOCK_SLOTS_PER_BACKEND), \
211 : ((l) - FAST_PATH_LOCKNUMBER_OFFSET + FAST_PATH_BITS_PER_SLOT * (n)))
212 : #define FAST_PATH_SET_LOCKMODE(proc, n, l) \
213 : (proc)->fpLockBits |= UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)
214 : #define FAST_PATH_CLEAR_LOCKMODE(proc, n, l) \
215 : (proc)->fpLockBits &= ~(UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l))
216 : #define FAST_PATH_CHECK_LOCKMODE(proc, n, l) \
217 : ((proc)->fpLockBits & (UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)))
218 :
219 : /*
220 : * The fast-path lock mechanism is concerned only with relation locks on
221 : * unshared relations by backends bound to a database. The fast-path
222 : * mechanism exists mostly to accelerate acquisition and release of locks
223 : * that rarely conflict. Because ShareUpdateExclusiveLock is
224 : * self-conflicting, it can't use the fast-path mechanism; but it also does
225 : * not conflict with any of the locks that do, so we can ignore it completely.
226 : */
227 : #define EligibleForRelationFastPath(locktag, mode) \
228 : ((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
229 : (locktag)->locktag_type == LOCKTAG_RELATION && \
230 : (locktag)->locktag_field1 == MyDatabaseId && \
231 : MyDatabaseId != InvalidOid && \
232 : (mode) < ShareUpdateExclusiveLock)
233 : #define ConflictsWithRelationFastPath(locktag, mode) \
234 : ((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
235 : (locktag)->locktag_type == LOCKTAG_RELATION && \
236 : (locktag)->locktag_field1 != InvalidOid && \
237 : (mode) > ShareUpdateExclusiveLock)
238 :
239 : static bool FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode);
240 : static bool FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode);
241 : static bool FastPathTransferRelationLocks(LockMethod lockMethodTable,
242 : const LOCKTAG *locktag, uint32 hashcode);
243 : static PROCLOCK *FastPathGetRelationLockEntry(LOCALLOCK *locallock);
244 :
245 : /*
246 : * To make the fast-path lock mechanism work, we must have some way of
247 : * preventing the use of the fast-path when a conflicting lock might be present.
248 : * We partition* the locktag space into FAST_PATH_STRONG_LOCK_HASH_PARTITIONS,
249 : * and maintain an integer count of the number of "strong" lockers
250 : * in each partition. When any "strong" lockers are present (which is
251 : * hopefully not very often), the fast-path mechanism can't be used, and we
252 : * must fall back to the slower method of pushing matching locks directly
253 : * into the main lock tables.
254 : *
255 : * The deadlock detector does not know anything about the fast path mechanism,
256 : * so any locks that might be involved in a deadlock must be transferred from
257 : * the fast-path queues to the main lock table.
258 : */
259 :
260 : #define FAST_PATH_STRONG_LOCK_HASH_BITS 10
261 : #define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS \
262 : (1 << FAST_PATH_STRONG_LOCK_HASH_BITS)
263 : #define FastPathStrongLockHashPartition(hashcode) \
264 : ((hashcode) % FAST_PATH_STRONG_LOCK_HASH_PARTITIONS)
265 :
266 : typedef struct
267 : {
268 : slock_t mutex;
269 : uint32 count[FAST_PATH_STRONG_LOCK_HASH_PARTITIONS];
270 : } FastPathStrongRelationLockData;
271 :
272 : static volatile FastPathStrongRelationLockData *FastPathStrongRelationLocks;
273 :
274 :
275 : /*
276 : * Pointers to hash tables containing lock state
277 : *
278 : * The LockMethodLockHash and LockMethodProcLockHash hash tables are in
279 : * shared memory; LockMethodLocalHash is local to each backend.
280 : */
281 : static HTAB *LockMethodLockHash;
282 : static HTAB *LockMethodProcLockHash;
283 : static HTAB *LockMethodLocalHash;
284 :
285 :
286 : /* private state for error cleanup */
287 : static LOCALLOCK *StrongLockInProgress;
288 : static LOCALLOCK *awaitedLock;
289 : static ResourceOwner awaitedOwner;
290 :
291 :
292 : #ifdef LOCK_DEBUG
293 :
294 : /*------
295 : * The following configuration options are available for lock debugging:
296 : *
297 : * TRACE_LOCKS -- give a bunch of output what's going on in this file
298 : * TRACE_USERLOCKS -- same but for user locks
299 : * TRACE_LOCK_OIDMIN-- do not trace locks for tables below this oid
300 : * (use to avoid output on system tables)
301 : * TRACE_LOCK_TABLE -- trace locks on this table (oid) unconditionally
302 : * DEBUG_DEADLOCKS -- currently dumps locks at untimely occasions ;)
303 : *
304 : * Furthermore, but in storage/lmgr/lwlock.c:
305 : * TRACE_LWLOCKS -- trace lightweight locks (pretty useless)
306 : *
307 : * Define LOCK_DEBUG at compile time to get all these enabled.
308 : * --------
309 : */
310 :
311 : int Trace_lock_oidmin = FirstNormalObjectId;
312 : bool Trace_locks = false;
313 : bool Trace_userlocks = false;
314 : int Trace_lock_table = 0;
315 : bool Debug_deadlocks = false;
316 :
317 :
318 : inline static bool
319 : LOCK_DEBUG_ENABLED(const LOCKTAG *tag)
320 : {
321 : return
322 : (*(LockMethods[tag->locktag_lockmethodid]->trace_flag) &&
323 : ((Oid) tag->locktag_field2 >= (Oid) Trace_lock_oidmin))
324 : || (Trace_lock_table &&
325 : (tag->locktag_field2 == Trace_lock_table));
326 : }
327 :
328 :
329 : inline static void
330 : LOCK_PRINT(const char *where, const LOCK *lock, LOCKMODE type)
331 : {
332 : if (LOCK_DEBUG_ENABLED(&lock->tag))
333 : elog(LOG,
334 : "%s: lock(%p) id(%u,%u,%u,%u,%u,%u) grantMask(%x) "
335 : "req(%d,%d,%d,%d,%d,%d,%d)=%d "
336 : "grant(%d,%d,%d,%d,%d,%d,%d)=%d wait(%d) type(%s)",
337 : where, lock,
338 : lock->tag.locktag_field1, lock->tag.locktag_field2,
339 : lock->tag.locktag_field3, lock->tag.locktag_field4,
340 : lock->tag.locktag_type, lock->tag.locktag_lockmethodid,
341 : lock->grantMask,
342 : lock->requested[1], lock->requested[2], lock->requested[3],
343 : lock->requested[4], lock->requested[5], lock->requested[6],
344 : lock->requested[7], lock->nRequested,
345 : lock->granted[1], lock->granted[2], lock->granted[3],
346 : lock->granted[4], lock->granted[5], lock->granted[6],
347 : lock->granted[7], lock->nGranted,
348 : dclist_count(&lock->waitProcs),
349 : LockMethods[LOCK_LOCKMETHOD(*lock)]->lockModeNames[type]);
350 : }
351 :
352 :
353 : inline static void
354 : PROCLOCK_PRINT(const char *where, const PROCLOCK *proclockP)
355 : {
356 : if (LOCK_DEBUG_ENABLED(&proclockP->tag.myLock->tag))
357 : elog(LOG,
358 : "%s: proclock(%p) lock(%p) method(%u) proc(%p) hold(%x)",
359 : where, proclockP, proclockP->tag.myLock,
360 : PROCLOCK_LOCKMETHOD(*(proclockP)),
361 : proclockP->tag.myProc, (int) proclockP->holdMask);
362 : }
363 : #else /* not LOCK_DEBUG */
364 :
365 : #define LOCK_PRINT(where, lock, type) ((void) 0)
366 : #define PROCLOCK_PRINT(where, proclockP) ((void) 0)
367 : #endif /* not LOCK_DEBUG */
368 :
369 :
370 : static uint32 proclock_hash(const void *key, Size keysize);
371 : static void RemoveLocalLock(LOCALLOCK *locallock);
372 : static PROCLOCK *SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc,
373 : const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode);
374 : static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner);
375 : static void BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode);
376 : static void FinishStrongLockAcquire(void);
377 : static void WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner);
378 : static void ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock);
379 : static void LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent);
380 : static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode,
381 : PROCLOCK *proclock, LockMethod lockMethodTable);
382 : static void CleanUpLock(LOCK *lock, PROCLOCK *proclock,
383 : LockMethod lockMethodTable, uint32 hashcode,
384 : bool wakeupNeeded);
385 : static void LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc,
386 : LOCKTAG *locktag, LOCKMODE lockmode,
387 : bool decrement_strong_lock_count);
388 : static void GetSingleProcBlockerStatusData(PGPROC *blocked_proc,
389 : BlockedProcsData *data);
390 :
391 :
392 : /*
393 : * InitLocks -- Initialize the lock manager's data structures.
394 : *
395 : * This is called from CreateSharedMemoryAndSemaphores(), which see for
396 : * more comments. In the normal postmaster case, the shared hash tables
397 : * are created here, as well as a locallock hash table that will remain
398 : * unused and empty in the postmaster itself. Backends inherit the pointers
399 : * to the shared tables via fork(), and also inherit an image of the locallock
400 : * hash table, which they proceed to use. In the EXEC_BACKEND case, each
401 : * backend re-executes this code to obtain pointers to the already existing
402 : * shared hash tables and to create its locallock hash table.
403 : */
404 : void
405 3456 : InitLocks(void)
406 : {
407 : HASHCTL info;
408 : long init_table_size,
409 : max_table_size;
410 : bool found;
411 :
412 : /*
413 : * Compute init/max size to request for lock hashtables. Note these
414 : * calculations must agree with LockShmemSize!
415 : */
416 3456 : max_table_size = NLOCKENTS();
417 3456 : init_table_size = max_table_size / 2;
418 :
419 : /*
420 : * Allocate hash table for LOCK structs. This stores per-locked-object
421 : * information.
422 : */
423 3456 : info.keysize = sizeof(LOCKTAG);
424 3456 : info.entrysize = sizeof(LOCK);
425 3456 : info.num_partitions = NUM_LOCK_PARTITIONS;
426 :
427 3456 : LockMethodLockHash = ShmemInitHash("LOCK hash",
428 : init_table_size,
429 : max_table_size,
430 : &info,
431 : HASH_ELEM | HASH_BLOBS | HASH_PARTITION);
432 :
433 : /* Assume an average of 2 holders per lock */
434 3456 : max_table_size *= 2;
435 3456 : init_table_size *= 2;
436 :
437 : /*
438 : * Allocate hash table for PROCLOCK structs. This stores
439 : * per-lock-per-holder information.
440 : */
441 3456 : info.keysize = sizeof(PROCLOCKTAG);
442 3456 : info.entrysize = sizeof(PROCLOCK);
443 3456 : info.hash = proclock_hash;
444 3456 : info.num_partitions = NUM_LOCK_PARTITIONS;
445 :
446 3456 : LockMethodProcLockHash = ShmemInitHash("PROCLOCK hash",
447 : init_table_size,
448 : max_table_size,
449 : &info,
450 : HASH_ELEM | HASH_FUNCTION | HASH_PARTITION);
451 :
452 : /*
453 : * Allocate fast-path structures.
454 : */
455 3456 : FastPathStrongRelationLocks =
456 3456 : ShmemInitStruct("Fast Path Strong Relation Lock Data",
457 : sizeof(FastPathStrongRelationLockData), &found);
458 3456 : if (!found)
459 3456 : SpinLockInit(&FastPathStrongRelationLocks->mutex);
460 :
461 : /*
462 : * Allocate non-shared hash table for LOCALLOCK structs. This stores lock
463 : * counts and resource owner information.
464 : *
465 : * The non-shared table could already exist in this process (this occurs
466 : * when the postmaster is recreating shared memory after a backend crash).
467 : * If so, delete and recreate it. (We could simply leave it, since it
468 : * ought to be empty in the postmaster, but for safety let's zap it.)
469 : */
470 3456 : if (LockMethodLocalHash)
471 8 : hash_destroy(LockMethodLocalHash);
472 :
473 3456 : info.keysize = sizeof(LOCALLOCKTAG);
474 3456 : info.entrysize = sizeof(LOCALLOCK);
475 :
476 3456 : LockMethodLocalHash = hash_create("LOCALLOCK hash",
477 : 16,
478 : &info,
479 : HASH_ELEM | HASH_BLOBS);
480 3456 : }
481 :
482 :
483 : /*
484 : * Fetch the lock method table associated with a given lock
485 : */
486 : LockMethod
487 180 : GetLocksMethodTable(const LOCK *lock)
488 : {
489 180 : LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*lock);
490 :
491 : Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
492 180 : return LockMethods[lockmethodid];
493 : }
494 :
495 : /*
496 : * Fetch the lock method table associated with a given locktag
497 : */
498 : LockMethod
499 2178 : GetLockTagsMethodTable(const LOCKTAG *locktag)
500 : {
501 2178 : LOCKMETHODID lockmethodid = (LOCKMETHODID) locktag->locktag_lockmethodid;
502 :
503 : Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
504 2178 : return LockMethods[lockmethodid];
505 : }
506 :
507 :
508 : /*
509 : * Compute the hash code associated with a LOCKTAG.
510 : *
511 : * To avoid unnecessary recomputations of the hash code, we try to do this
512 : * just once per function, and then pass it around as needed. Aside from
513 : * passing the hashcode to hash_search_with_hash_value(), we can extract
514 : * the lock partition number from the hashcode.
515 : */
516 : uint32
517 43019478 : LockTagHashCode(const LOCKTAG *locktag)
518 : {
519 43019478 : return get_hash_value(LockMethodLockHash, (const void *) locktag);
520 : }
521 :
522 : /*
523 : * Compute the hash code associated with a PROCLOCKTAG.
524 : *
525 : * Because we want to use just one set of partition locks for both the
526 : * LOCK and PROCLOCK hash tables, we have to make sure that PROCLOCKs
527 : * fall into the same partition number as their associated LOCKs.
528 : * dynahash.c expects the partition number to be the low-order bits of
529 : * the hash code, and therefore a PROCLOCKTAG's hash code must have the
530 : * same low-order bits as the associated LOCKTAG's hash code. We achieve
531 : * this with this specialized hash function.
532 : */
533 : static uint32
534 1612 : proclock_hash(const void *key, Size keysize)
535 : {
536 1612 : const PROCLOCKTAG *proclocktag = (const PROCLOCKTAG *) key;
537 : uint32 lockhash;
538 : Datum procptr;
539 :
540 : Assert(keysize == sizeof(PROCLOCKTAG));
541 :
542 : /* Look into the associated LOCK object, and compute its hash code */
543 1612 : lockhash = LockTagHashCode(&proclocktag->myLock->tag);
544 :
545 : /*
546 : * To make the hash code also depend on the PGPROC, we xor the proc
547 : * struct's address into the hash code, left-shifted so that the
548 : * partition-number bits don't change. Since this is only a hash, we
549 : * don't care if we lose high-order bits of the address; use an
550 : * intermediate variable to suppress cast-pointer-to-int warnings.
551 : */
552 1612 : procptr = PointerGetDatum(proclocktag->myProc);
553 1612 : lockhash ^= ((uint32) procptr) << LOG2_NUM_LOCK_PARTITIONS;
554 :
555 1612 : return lockhash;
556 : }
557 :
558 : /*
559 : * Compute the hash code associated with a PROCLOCKTAG, given the hashcode
560 : * for its underlying LOCK.
561 : *
562 : * We use this just to avoid redundant calls of LockTagHashCode().
563 : */
564 : static inline uint32
565 9019384 : ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 hashcode)
566 : {
567 9019384 : uint32 lockhash = hashcode;
568 : Datum procptr;
569 :
570 : /*
571 : * This must match proclock_hash()!
572 : */
573 9019384 : procptr = PointerGetDatum(proclocktag->myProc);
574 9019384 : lockhash ^= ((uint32) procptr) << LOG2_NUM_LOCK_PARTITIONS;
575 :
576 9019384 : return lockhash;
577 : }
578 :
579 : /*
580 : * Given two lock modes, return whether they would conflict.
581 : */
582 : bool
583 416 : DoLockModesConflict(LOCKMODE mode1, LOCKMODE mode2)
584 : {
585 416 : LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
586 :
587 416 : if (lockMethodTable->conflictTab[mode1] & LOCKBIT_ON(mode2))
588 244 : return true;
589 :
590 172 : return false;
591 : }
592 :
593 : /*
594 : * LockHeldByMe -- test whether lock 'locktag' is held with mode 'lockmode'
595 : * by the current transaction
596 : */
597 : bool
598 0 : LockHeldByMe(const LOCKTAG *locktag, LOCKMODE lockmode)
599 : {
600 : LOCALLOCKTAG localtag;
601 : LOCALLOCK *locallock;
602 :
603 : /*
604 : * See if there is a LOCALLOCK entry for this lock and lockmode
605 : */
606 0 : MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
607 0 : localtag.lock = *locktag;
608 0 : localtag.mode = lockmode;
609 :
610 0 : locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
611 : &localtag,
612 : HASH_FIND, NULL);
613 :
614 0 : return (locallock && locallock->nLocks > 0);
615 : }
616 :
617 : #ifdef USE_ASSERT_CHECKING
618 : /*
619 : * GetLockMethodLocalHash -- return the hash of local locks, for modules that
620 : * evaluate assertions based on all locks held.
621 : */
622 : HTAB *
623 : GetLockMethodLocalHash(void)
624 : {
625 : return LockMethodLocalHash;
626 : }
627 : #endif
628 :
629 : /*
630 : * LockHasWaiters -- look up 'locktag' and check if releasing this
631 : * lock would wake up other processes waiting for it.
632 : */
633 : bool
634 0 : LockHasWaiters(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
635 : {
636 0 : LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
637 : LockMethod lockMethodTable;
638 : LOCALLOCKTAG localtag;
639 : LOCALLOCK *locallock;
640 : LOCK *lock;
641 : PROCLOCK *proclock;
642 : LWLock *partitionLock;
643 0 : bool hasWaiters = false;
644 :
645 0 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
646 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
647 0 : lockMethodTable = LockMethods[lockmethodid];
648 0 : if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
649 0 : elog(ERROR, "unrecognized lock mode: %d", lockmode);
650 :
651 : #ifdef LOCK_DEBUG
652 : if (LOCK_DEBUG_ENABLED(locktag))
653 : elog(LOG, "LockHasWaiters: lock [%u,%u] %s",
654 : locktag->locktag_field1, locktag->locktag_field2,
655 : lockMethodTable->lockModeNames[lockmode]);
656 : #endif
657 :
658 : /*
659 : * Find the LOCALLOCK entry for this lock and lockmode
660 : */
661 0 : MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
662 0 : localtag.lock = *locktag;
663 0 : localtag.mode = lockmode;
664 :
665 0 : locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
666 : &localtag,
667 : HASH_FIND, NULL);
668 :
669 : /*
670 : * let the caller print its own error message, too. Do not ereport(ERROR).
671 : */
672 0 : if (!locallock || locallock->nLocks <= 0)
673 : {
674 0 : elog(WARNING, "you don't own a lock of type %s",
675 : lockMethodTable->lockModeNames[lockmode]);
676 0 : return false;
677 : }
678 :
679 : /*
680 : * Check the shared lock table.
681 : */
682 0 : partitionLock = LockHashPartitionLock(locallock->hashcode);
683 :
684 0 : LWLockAcquire(partitionLock, LW_SHARED);
685 :
686 : /*
687 : * We don't need to re-find the lock or proclock, since we kept their
688 : * addresses in the locallock table, and they couldn't have been removed
689 : * while we were holding a lock on them.
690 : */
691 0 : lock = locallock->lock;
692 : LOCK_PRINT("LockHasWaiters: found", lock, lockmode);
693 0 : proclock = locallock->proclock;
694 : PROCLOCK_PRINT("LockHasWaiters: found", proclock);
695 :
696 : /*
697 : * Double-check that we are actually holding a lock of the type we want to
698 : * release.
699 : */
700 0 : if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
701 : {
702 : PROCLOCK_PRINT("LockHasWaiters: WRONGTYPE", proclock);
703 0 : LWLockRelease(partitionLock);
704 0 : elog(WARNING, "you don't own a lock of type %s",
705 : lockMethodTable->lockModeNames[lockmode]);
706 0 : RemoveLocalLock(locallock);
707 0 : return false;
708 : }
709 :
710 : /*
711 : * Do the checking.
712 : */
713 0 : if ((lockMethodTable->conflictTab[lockmode] & lock->waitMask) != 0)
714 0 : hasWaiters = true;
715 :
716 0 : LWLockRelease(partitionLock);
717 :
718 0 : return hasWaiters;
719 : }
720 :
721 : /*
722 : * LockAcquire -- Check for lock conflicts, sleep if conflict found,
723 : * set lock if/when no conflicts.
724 : *
725 : * Inputs:
726 : * locktag: unique identifier for the lockable object
727 : * lockmode: lock mode to acquire
728 : * sessionLock: if true, acquire lock for session not current transaction
729 : * dontWait: if true, don't wait to acquire lock
730 : *
731 : * Returns one of:
732 : * LOCKACQUIRE_NOT_AVAIL lock not available, and dontWait=true
733 : * LOCKACQUIRE_OK lock successfully acquired
734 : * LOCKACQUIRE_ALREADY_HELD incremented count for lock already held
735 : * LOCKACQUIRE_ALREADY_CLEAR incremented count for lock already clear
736 : *
737 : * In the normal case where dontWait=false and the caller doesn't need to
738 : * distinguish a freshly acquired lock from one already taken earlier in
739 : * this same transaction, there is no need to examine the return value.
740 : *
741 : * Side Effects: The lock is acquired and recorded in lock tables.
742 : *
743 : * NOTE: if we wait for the lock, there is no way to abort the wait
744 : * short of aborting the transaction.
745 : */
746 : LockAcquireResult
747 1541356 : LockAcquire(const LOCKTAG *locktag,
748 : LOCKMODE lockmode,
749 : bool sessionLock,
750 : bool dontWait)
751 : {
752 1541356 : return LockAcquireExtended(locktag, lockmode, sessionLock, dontWait,
753 : true, NULL);
754 : }
755 :
756 : /*
757 : * LockAcquireExtended - allows us to specify additional options
758 : *
759 : * reportMemoryError specifies whether a lock request that fills the lock
760 : * table should generate an ERROR or not. Passing "false" allows the caller
761 : * to attempt to recover from lock-table-full situations, perhaps by forcibly
762 : * canceling other lock holders and then retrying. Note, however, that the
763 : * return code for that is LOCKACQUIRE_NOT_AVAIL, so that it's unsafe to use
764 : * in combination with dontWait = true, as the cause of failure couldn't be
765 : * distinguished.
766 : *
767 : * If locallockp isn't NULL, *locallockp receives a pointer to the LOCALLOCK
768 : * table entry if a lock is successfully acquired, or NULL if not.
769 : */
770 : LockAcquireResult
771 44879958 : LockAcquireExtended(const LOCKTAG *locktag,
772 : LOCKMODE lockmode,
773 : bool sessionLock,
774 : bool dontWait,
775 : bool reportMemoryError,
776 : LOCALLOCK **locallockp)
777 : {
778 44879958 : LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
779 : LockMethod lockMethodTable;
780 : LOCALLOCKTAG localtag;
781 : LOCALLOCK *locallock;
782 : LOCK *lock;
783 : PROCLOCK *proclock;
784 : bool found;
785 : ResourceOwner owner;
786 : uint32 hashcode;
787 : LWLock *partitionLock;
788 : bool found_conflict;
789 44879958 : bool log_lock = false;
790 :
791 44879958 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
792 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
793 44879958 : lockMethodTable = LockMethods[lockmethodid];
794 44879958 : if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
795 0 : elog(ERROR, "unrecognized lock mode: %d", lockmode);
796 :
797 44879958 : if (RecoveryInProgress() && !InRecovery &&
798 283920 : (locktag->locktag_type == LOCKTAG_OBJECT ||
799 283920 : locktag->locktag_type == LOCKTAG_RELATION) &&
800 : lockmode > RowExclusiveLock)
801 0 : ereport(ERROR,
802 : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
803 : errmsg("cannot acquire lock mode %s on database objects while recovery is in progress",
804 : lockMethodTable->lockModeNames[lockmode]),
805 : errhint("Only RowExclusiveLock or less can be acquired on database objects during recovery.")));
806 :
807 : #ifdef LOCK_DEBUG
808 : if (LOCK_DEBUG_ENABLED(locktag))
809 : elog(LOG, "LockAcquire: lock [%u,%u] %s",
810 : locktag->locktag_field1, locktag->locktag_field2,
811 : lockMethodTable->lockModeNames[lockmode]);
812 : #endif
813 :
814 : /* Identify owner for lock */
815 44879958 : if (sessionLock)
816 111786 : owner = NULL;
817 : else
818 44768172 : owner = CurrentResourceOwner;
819 :
820 : /*
821 : * Find or create a LOCALLOCK entry for this lock and lockmode
822 : */
823 44879958 : MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
824 44879958 : localtag.lock = *locktag;
825 44879958 : localtag.mode = lockmode;
826 :
827 44879958 : locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
828 : &localtag,
829 : HASH_ENTER, &found);
830 :
831 : /*
832 : * if it's a new locallock object, initialize it
833 : */
834 44879958 : if (!found)
835 : {
836 41434366 : locallock->lock = NULL;
837 41434366 : locallock->proclock = NULL;
838 41434366 : locallock->hashcode = LockTagHashCode(&(localtag.lock));
839 41434366 : locallock->nLocks = 0;
840 41434366 : locallock->holdsStrongLockCount = false;
841 41434366 : locallock->lockCleared = false;
842 41434366 : locallock->numLockOwners = 0;
843 41434366 : locallock->maxLockOwners = 8;
844 41434366 : locallock->lockOwners = NULL; /* in case next line fails */
845 41434366 : locallock->lockOwners = (LOCALLOCKOWNER *)
846 41434366 : MemoryContextAlloc(TopMemoryContext,
847 41434366 : locallock->maxLockOwners * sizeof(LOCALLOCKOWNER));
848 : }
849 : else
850 : {
851 : /* Make sure there will be room to remember the lock */
852 3445592 : if (locallock->numLockOwners >= locallock->maxLockOwners)
853 : {
854 38 : int newsize = locallock->maxLockOwners * 2;
855 :
856 38 : locallock->lockOwners = (LOCALLOCKOWNER *)
857 38 : repalloc(locallock->lockOwners,
858 : newsize * sizeof(LOCALLOCKOWNER));
859 38 : locallock->maxLockOwners = newsize;
860 : }
861 : }
862 44879958 : hashcode = locallock->hashcode;
863 :
864 44879958 : if (locallockp)
865 43338602 : *locallockp = locallock;
866 :
867 : /*
868 : * If we already hold the lock, we can just increase the count locally.
869 : *
870 : * If lockCleared is already set, caller need not worry about absorbing
871 : * sinval messages related to the lock's object.
872 : */
873 44879958 : if (locallock->nLocks > 0)
874 : {
875 3445592 : GrantLockLocal(locallock, owner);
876 3445592 : if (locallock->lockCleared)
877 3248942 : return LOCKACQUIRE_ALREADY_CLEAR;
878 : else
879 196650 : return LOCKACQUIRE_ALREADY_HELD;
880 : }
881 :
882 : /*
883 : * We don't acquire any other heavyweight lock while holding the relation
884 : * extension lock. We do allow to acquire the same relation extension
885 : * lock more than once but that case won't reach here.
886 : */
887 : Assert(!IsRelationExtensionLockHeld);
888 :
889 : /*
890 : * We don't acquire any other heavyweight lock while holding the page lock
891 : * except for relation extension.
892 : */
893 : Assert(!IsPageLockHeld ||
894 : (locktag->locktag_type == LOCKTAG_RELATION_EXTEND));
895 :
896 : /*
897 : * Prepare to emit a WAL record if acquisition of this lock needs to be
898 : * replayed in a standby server.
899 : *
900 : * Here we prepare to log; after lock is acquired we'll issue log record.
901 : * This arrangement simplifies error recovery in case the preparation step
902 : * fails.
903 : *
904 : * Only AccessExclusiveLocks can conflict with lock types that read-only
905 : * transactions can acquire in a standby server. Make sure this definition
906 : * matches the one in GetRunningTransactionLocks().
907 : */
908 41434366 : if (lockmode >= AccessExclusiveLock &&
909 570842 : locktag->locktag_type == LOCKTAG_RELATION &&
910 461498 : !RecoveryInProgress() &&
911 424352 : XLogStandbyInfoActive())
912 : {
913 378290 : LogAccessExclusiveLockPrepare();
914 378290 : log_lock = true;
915 : }
916 :
917 : /*
918 : * Attempt to take lock via fast path, if eligible. But if we remember
919 : * having filled up the fast path array, we don't attempt to make any
920 : * further use of it until we release some locks. It's possible that some
921 : * other backend has transferred some of those locks to the shared hash
922 : * table, leaving space free, but it's not worth acquiring the LWLock just
923 : * to check. It's also possible that we're acquiring a second or third
924 : * lock type on a relation we have already locked using the fast-path, but
925 : * for now we don't worry about that case either.
926 : */
927 41434366 : if (EligibleForRelationFastPath(locktag, lockmode) &&
928 37835992 : FastPathLocalUseCount < FP_LOCK_SLOTS_PER_BACKEND)
929 : {
930 37402984 : uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
931 : bool acquired;
932 :
933 : /*
934 : * LWLockAcquire acts as a memory sequencing point, so it's safe to
935 : * assume that any strong locker whose increment to
936 : * FastPathStrongRelationLocks->counts becomes visible after we test
937 : * it has yet to begin to transfer fast-path locks.
938 : */
939 37402984 : LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
940 37402984 : if (FastPathStrongRelationLocks->count[fasthashcode] != 0)
941 951702 : acquired = false;
942 : else
943 36451282 : acquired = FastPathGrantRelationLock(locktag->locktag_field2,
944 : lockmode);
945 37402984 : LWLockRelease(&MyProc->fpInfoLock);
946 37402984 : if (acquired)
947 : {
948 : /*
949 : * The locallock might contain stale pointers to some old shared
950 : * objects; we MUST reset these to null before considering the
951 : * lock to be acquired via fast-path.
952 : */
953 36451282 : locallock->lock = NULL;
954 36451282 : locallock->proclock = NULL;
955 36451282 : GrantLockLocal(locallock, owner);
956 36451282 : return LOCKACQUIRE_OK;
957 : }
958 : }
959 :
960 : /*
961 : * If this lock could potentially have been taken via the fast-path by
962 : * some other backend, we must (temporarily) disable further use of the
963 : * fast-path for this lock tag, and migrate any locks already taken via
964 : * this method to the main lock table.
965 : */
966 4983084 : if (ConflictsWithRelationFastPath(locktag, lockmode))
967 : {
968 522042 : uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
969 :
970 522042 : BeginStrongLockAcquire(locallock, fasthashcode);
971 522042 : if (!FastPathTransferRelationLocks(lockMethodTable, locktag,
972 : hashcode))
973 : {
974 0 : AbortStrongLockAcquire();
975 0 : if (locallock->nLocks == 0)
976 0 : RemoveLocalLock(locallock);
977 0 : if (locallockp)
978 0 : *locallockp = NULL;
979 0 : if (reportMemoryError)
980 0 : ereport(ERROR,
981 : (errcode(ERRCODE_OUT_OF_MEMORY),
982 : errmsg("out of shared memory"),
983 : errhint("You might need to increase max_locks_per_transaction.")));
984 : else
985 0 : return LOCKACQUIRE_NOT_AVAIL;
986 : }
987 : }
988 :
989 : /*
990 : * We didn't find the lock in our LOCALLOCK table, and we didn't manage to
991 : * take it via the fast-path, either, so we've got to mess with the shared
992 : * lock table.
993 : */
994 4983084 : partitionLock = LockHashPartitionLock(hashcode);
995 :
996 4983084 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
997 :
998 : /*
999 : * Find or create lock and proclock entries with this tag
1000 : *
1001 : * Note: if the locallock object already existed, it might have a pointer
1002 : * to the lock already ... but we should not assume that that pointer is
1003 : * valid, since a lock object with zero hold and request counts can go
1004 : * away anytime. So we have to use SetupLockInTable() to recompute the
1005 : * lock and proclock pointers, even if they're already set.
1006 : */
1007 4983084 : proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
1008 : hashcode, lockmode);
1009 4983084 : if (!proclock)
1010 : {
1011 0 : AbortStrongLockAcquire();
1012 0 : LWLockRelease(partitionLock);
1013 0 : if (locallock->nLocks == 0)
1014 0 : RemoveLocalLock(locallock);
1015 0 : if (locallockp)
1016 0 : *locallockp = NULL;
1017 0 : if (reportMemoryError)
1018 0 : ereport(ERROR,
1019 : (errcode(ERRCODE_OUT_OF_MEMORY),
1020 : errmsg("out of shared memory"),
1021 : errhint("You might need to increase max_locks_per_transaction.")));
1022 : else
1023 0 : return LOCKACQUIRE_NOT_AVAIL;
1024 : }
1025 4983084 : locallock->proclock = proclock;
1026 4983084 : lock = proclock->tag.myLock;
1027 4983084 : locallock->lock = lock;
1028 :
1029 : /*
1030 : * If lock requested conflicts with locks requested by waiters, must join
1031 : * wait queue. Otherwise, check for conflict with already-held locks.
1032 : * (That's last because most complex check.)
1033 : */
1034 4983084 : if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1035 92 : found_conflict = true;
1036 : else
1037 4982992 : found_conflict = LockCheckConflicts(lockMethodTable, lockmode,
1038 : lock, proclock);
1039 :
1040 4983084 : if (!found_conflict)
1041 : {
1042 : /* No conflict with held or previously requested locks */
1043 4979676 : GrantLock(lock, proclock, lockmode);
1044 4979676 : GrantLockLocal(locallock, owner);
1045 : }
1046 : else
1047 : {
1048 : /*
1049 : * We can't acquire the lock immediately. If caller specified no
1050 : * blocking, remove useless table entries and return
1051 : * LOCKACQUIRE_NOT_AVAIL without waiting.
1052 : */
1053 3408 : if (dontWait)
1054 : {
1055 1412 : AbortStrongLockAcquire();
1056 1412 : if (proclock->holdMask == 0)
1057 : {
1058 : uint32 proclock_hashcode;
1059 :
1060 1004 : proclock_hashcode = ProcLockHashCode(&proclock->tag, hashcode);
1061 1004 : dlist_delete(&proclock->lockLink);
1062 1004 : dlist_delete(&proclock->procLink);
1063 1004 : if (!hash_search_with_hash_value(LockMethodProcLockHash,
1064 1004 : &(proclock->tag),
1065 : proclock_hashcode,
1066 : HASH_REMOVE,
1067 : NULL))
1068 0 : elog(PANIC, "proclock table corrupted");
1069 : }
1070 : else
1071 : PROCLOCK_PRINT("LockAcquire: NOWAIT", proclock);
1072 1412 : lock->nRequested--;
1073 1412 : lock->requested[lockmode]--;
1074 : LOCK_PRINT("LockAcquire: conditional lock failed", lock, lockmode);
1075 : Assert((lock->nRequested > 0) && (lock->requested[lockmode] >= 0));
1076 : Assert(lock->nGranted <= lock->nRequested);
1077 1412 : LWLockRelease(partitionLock);
1078 1412 : if (locallock->nLocks == 0)
1079 1412 : RemoveLocalLock(locallock);
1080 1412 : if (locallockp)
1081 452 : *locallockp = NULL;
1082 1412 : return LOCKACQUIRE_NOT_AVAIL;
1083 : }
1084 :
1085 : /*
1086 : * Set bitmask of locks this process already holds on this object.
1087 : */
1088 1996 : MyProc->heldLocks = proclock->holdMask;
1089 :
1090 : /*
1091 : * Sleep till someone wakes me up.
1092 : */
1093 :
1094 : TRACE_POSTGRESQL_LOCK_WAIT_START(locktag->locktag_field1,
1095 : locktag->locktag_field2,
1096 : locktag->locktag_field3,
1097 : locktag->locktag_field4,
1098 : locktag->locktag_type,
1099 : lockmode);
1100 :
1101 1996 : WaitOnLock(locallock, owner);
1102 :
1103 : TRACE_POSTGRESQL_LOCK_WAIT_DONE(locktag->locktag_field1,
1104 : locktag->locktag_field2,
1105 : locktag->locktag_field3,
1106 : locktag->locktag_field4,
1107 : locktag->locktag_type,
1108 : lockmode);
1109 :
1110 : /*
1111 : * NOTE: do not do any material change of state between here and
1112 : * return. All required changes in locktable state must have been
1113 : * done when the lock was granted to us --- see notes in WaitOnLock.
1114 : */
1115 :
1116 : /*
1117 : * Check the proclock entry status, in case something in the ipc
1118 : * communication doesn't work correctly.
1119 : */
1120 1904 : if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
1121 : {
1122 0 : AbortStrongLockAcquire();
1123 : PROCLOCK_PRINT("LockAcquire: INCONSISTENT", proclock);
1124 : LOCK_PRINT("LockAcquire: INCONSISTENT", lock, lockmode);
1125 : /* Should we retry ? */
1126 0 : LWLockRelease(partitionLock);
1127 0 : elog(ERROR, "LockAcquire failed");
1128 : }
1129 : PROCLOCK_PRINT("LockAcquire: granted", proclock);
1130 : LOCK_PRINT("LockAcquire: granted", lock, lockmode);
1131 : }
1132 :
1133 : /*
1134 : * Lock state is fully up-to-date now; if we error out after this, no
1135 : * special error cleanup is required.
1136 : */
1137 4981580 : FinishStrongLockAcquire();
1138 :
1139 4981580 : LWLockRelease(partitionLock);
1140 :
1141 : /*
1142 : * Emit a WAL record if acquisition of this lock needs to be replayed in a
1143 : * standby server.
1144 : */
1145 4981580 : if (log_lock)
1146 : {
1147 : /*
1148 : * Decode the locktag back to the original values, to avoid sending
1149 : * lots of empty bytes with every message. See lock.h to check how a
1150 : * locktag is defined for LOCKTAG_RELATION
1151 : */
1152 377862 : LogAccessExclusiveLock(locktag->locktag_field1,
1153 : locktag->locktag_field2);
1154 : }
1155 :
1156 4981580 : return LOCKACQUIRE_OK;
1157 : }
1158 :
1159 : /*
1160 : * Find or create LOCK and PROCLOCK objects as needed for a new lock
1161 : * request.
1162 : *
1163 : * Returns the PROCLOCK object, or NULL if we failed to create the objects
1164 : * for lack of shared memory.
1165 : *
1166 : * The appropriate partition lock must be held at entry, and will be
1167 : * held at exit.
1168 : */
1169 : static PROCLOCK *
1170 4986308 : SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc,
1171 : const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
1172 : {
1173 : LOCK *lock;
1174 : PROCLOCK *proclock;
1175 : PROCLOCKTAG proclocktag;
1176 : uint32 proclock_hashcode;
1177 : bool found;
1178 :
1179 : /*
1180 : * Find or create a lock with this tag.
1181 : */
1182 4986308 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
1183 : locktag,
1184 : hashcode,
1185 : HASH_ENTER_NULL,
1186 : &found);
1187 4986308 : if (!lock)
1188 0 : return NULL;
1189 :
1190 : /*
1191 : * if it's a new lock object, initialize it
1192 : */
1193 4986308 : if (!found)
1194 : {
1195 4011112 : lock->grantMask = 0;
1196 4011112 : lock->waitMask = 0;
1197 4011112 : dlist_init(&lock->procLocks);
1198 4011112 : dclist_init(&lock->waitProcs);
1199 4011112 : lock->nRequested = 0;
1200 4011112 : lock->nGranted = 0;
1201 24066672 : MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
1202 4011112 : MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
1203 : LOCK_PRINT("LockAcquire: new", lock, lockmode);
1204 : }
1205 : else
1206 : {
1207 : LOCK_PRINT("LockAcquire: found", lock, lockmode);
1208 : Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
1209 : Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
1210 : Assert(lock->nGranted <= lock->nRequested);
1211 : }
1212 :
1213 : /*
1214 : * Create the hash key for the proclock table.
1215 : */
1216 4986308 : proclocktag.myLock = lock;
1217 4986308 : proclocktag.myProc = proc;
1218 :
1219 4986308 : proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
1220 :
1221 : /*
1222 : * Find or create a proclock entry with this tag
1223 : */
1224 4986308 : proclock = (PROCLOCK *) hash_search_with_hash_value(LockMethodProcLockHash,
1225 : &proclocktag,
1226 : proclock_hashcode,
1227 : HASH_ENTER_NULL,
1228 : &found);
1229 4986308 : if (!proclock)
1230 : {
1231 : /* Oops, not enough shmem for the proclock */
1232 0 : if (lock->nRequested == 0)
1233 : {
1234 : /*
1235 : * There are no other requestors of this lock, so garbage-collect
1236 : * the lock object. We *must* do this to avoid a permanent leak
1237 : * of shared memory, because there won't be anything to cause
1238 : * anyone to release the lock object later.
1239 : */
1240 : Assert(dlist_is_empty(&(lock->procLocks)));
1241 0 : if (!hash_search_with_hash_value(LockMethodLockHash,
1242 0 : &(lock->tag),
1243 : hashcode,
1244 : HASH_REMOVE,
1245 : NULL))
1246 0 : elog(PANIC, "lock table corrupted");
1247 : }
1248 0 : return NULL;
1249 : }
1250 :
1251 : /*
1252 : * If new, initialize the new entry
1253 : */
1254 4986308 : if (!found)
1255 : {
1256 4028720 : uint32 partition = LockHashPartition(hashcode);
1257 :
1258 : /*
1259 : * It might seem unsafe to access proclock->groupLeader without a
1260 : * lock, but it's not really. Either we are initializing a proclock
1261 : * on our own behalf, in which case our group leader isn't changing
1262 : * because the group leader for a process can only ever be changed by
1263 : * the process itself; or else we are transferring a fast-path lock to
1264 : * the main lock table, in which case that process can't change it's
1265 : * lock group leader without first releasing all of its locks (and in
1266 : * particular the one we are currently transferring).
1267 : */
1268 8057440 : proclock->groupLeader = proc->lockGroupLeader != NULL ?
1269 4028720 : proc->lockGroupLeader : proc;
1270 4028720 : proclock->holdMask = 0;
1271 4028720 : proclock->releaseMask = 0;
1272 : /* Add proclock to appropriate lists */
1273 4028720 : dlist_push_tail(&lock->procLocks, &proclock->lockLink);
1274 4028720 : dlist_push_tail(&proc->myProcLocks[partition], &proclock->procLink);
1275 : PROCLOCK_PRINT("LockAcquire: new", proclock);
1276 : }
1277 : else
1278 : {
1279 : PROCLOCK_PRINT("LockAcquire: found", proclock);
1280 : Assert((proclock->holdMask & ~lock->grantMask) == 0);
1281 :
1282 : #ifdef CHECK_DEADLOCK_RISK
1283 :
1284 : /*
1285 : * Issue warning if we already hold a lower-level lock on this object
1286 : * and do not hold a lock of the requested level or higher. This
1287 : * indicates a deadlock-prone coding practice (eg, we'd have a
1288 : * deadlock if another backend were following the same code path at
1289 : * about the same time).
1290 : *
1291 : * This is not enabled by default, because it may generate log entries
1292 : * about user-level coding practices that are in fact safe in context.
1293 : * It can be enabled to help find system-level problems.
1294 : *
1295 : * XXX Doing numeric comparison on the lockmodes is a hack; it'd be
1296 : * better to use a table. For now, though, this works.
1297 : */
1298 : {
1299 : int i;
1300 :
1301 : for (i = lockMethodTable->numLockModes; i > 0; i--)
1302 : {
1303 : if (proclock->holdMask & LOCKBIT_ON(i))
1304 : {
1305 : if (i >= (int) lockmode)
1306 : break; /* safe: we have a lock >= req level */
1307 : elog(LOG, "deadlock risk: raising lock level"
1308 : " from %s to %s on object %u/%u/%u",
1309 : lockMethodTable->lockModeNames[i],
1310 : lockMethodTable->lockModeNames[lockmode],
1311 : lock->tag.locktag_field1, lock->tag.locktag_field2,
1312 : lock->tag.locktag_field3);
1313 : break;
1314 : }
1315 : }
1316 : }
1317 : #endif /* CHECK_DEADLOCK_RISK */
1318 : }
1319 :
1320 : /*
1321 : * lock->nRequested and lock->requested[] count the total number of
1322 : * requests, whether granted or waiting, so increment those immediately.
1323 : * The other counts don't increment till we get the lock.
1324 : */
1325 4986308 : lock->nRequested++;
1326 4986308 : lock->requested[lockmode]++;
1327 : Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1328 :
1329 : /*
1330 : * We shouldn't already hold the desired lock; else locallock table is
1331 : * broken.
1332 : */
1333 4986308 : if (proclock->holdMask & LOCKBIT_ON(lockmode))
1334 0 : elog(ERROR, "lock %s on object %u/%u/%u is already held",
1335 : lockMethodTable->lockModeNames[lockmode],
1336 : lock->tag.locktag_field1, lock->tag.locktag_field2,
1337 : lock->tag.locktag_field3);
1338 :
1339 4986308 : return proclock;
1340 : }
1341 :
1342 : /*
1343 : * Check and set/reset the flag that we hold the relation extension/page lock.
1344 : *
1345 : * It is callers responsibility that this function is called after
1346 : * acquiring/releasing the relation extension/page lock.
1347 : *
1348 : * Pass acquired as true if lock is acquired, false otherwise.
1349 : */
1350 : static inline void
1351 83539334 : CheckAndSetLockHeld(LOCALLOCK *locallock, bool acquired)
1352 : {
1353 : #ifdef USE_ASSERT_CHECKING
1354 : if (LOCALLOCK_LOCKTAG(*locallock) == LOCKTAG_RELATION_EXTEND)
1355 : IsRelationExtensionLockHeld = acquired;
1356 : else if (LOCALLOCK_LOCKTAG(*locallock) == LOCKTAG_PAGE)
1357 : IsPageLockHeld = acquired;
1358 :
1359 : #endif
1360 83539334 : }
1361 :
1362 : /*
1363 : * Subroutine to free a locallock entry
1364 : */
1365 : static void
1366 41434366 : RemoveLocalLock(LOCALLOCK *locallock)
1367 : {
1368 : int i;
1369 :
1370 41550556 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
1371 : {
1372 116190 : if (locallock->lockOwners[i].owner != NULL)
1373 116122 : ResourceOwnerForgetLock(locallock->lockOwners[i].owner, locallock);
1374 : }
1375 41434366 : locallock->numLockOwners = 0;
1376 41434366 : if (locallock->lockOwners != NULL)
1377 41434366 : pfree(locallock->lockOwners);
1378 41434366 : locallock->lockOwners = NULL;
1379 :
1380 41434366 : if (locallock->holdsStrongLockCount)
1381 : {
1382 : uint32 fasthashcode;
1383 :
1384 521488 : fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
1385 :
1386 521488 : SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
1387 : Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
1388 521488 : FastPathStrongRelationLocks->count[fasthashcode]--;
1389 521488 : locallock->holdsStrongLockCount = false;
1390 521488 : SpinLockRelease(&FastPathStrongRelationLocks->mutex);
1391 : }
1392 :
1393 41434366 : if (!hash_search(LockMethodLocalHash,
1394 41434366 : &(locallock->tag),
1395 : HASH_REMOVE, NULL))
1396 0 : elog(WARNING, "locallock table corrupted");
1397 :
1398 : /*
1399 : * Indicate that the lock is released for certain types of locks
1400 : */
1401 41434366 : CheckAndSetLockHeld(locallock, false);
1402 41434366 : }
1403 :
1404 : /*
1405 : * LockCheckConflicts -- test whether requested lock conflicts
1406 : * with those already granted
1407 : *
1408 : * Returns true if conflict, false if no conflict.
1409 : *
1410 : * NOTES:
1411 : * Here's what makes this complicated: one process's locks don't
1412 : * conflict with one another, no matter what purpose they are held for
1413 : * (eg, session and transaction locks do not conflict). Nor do the locks
1414 : * of one process in a lock group conflict with those of another process in
1415 : * the same group. So, we must subtract off these locks when determining
1416 : * whether the requested new lock conflicts with those already held.
1417 : */
1418 : bool
1419 4984986 : LockCheckConflicts(LockMethod lockMethodTable,
1420 : LOCKMODE lockmode,
1421 : LOCK *lock,
1422 : PROCLOCK *proclock)
1423 : {
1424 4984986 : int numLockModes = lockMethodTable->numLockModes;
1425 : LOCKMASK myLocks;
1426 4984986 : int conflictMask = lockMethodTable->conflictTab[lockmode];
1427 : int conflictsRemaining[MAX_LOCKMODES];
1428 4984986 : int totalConflictsRemaining = 0;
1429 : dlist_iter proclock_iter;
1430 : int i;
1431 :
1432 : /*
1433 : * first check for global conflicts: If no locks conflict with my request,
1434 : * then I get the lock.
1435 : *
1436 : * Checking for conflict: lock->grantMask represents the types of
1437 : * currently held locks. conflictTable[lockmode] has a bit set for each
1438 : * type of lock that conflicts with request. Bitwise compare tells if
1439 : * there is a conflict.
1440 : */
1441 4984986 : if (!(conflictMask & lock->grantMask))
1442 : {
1443 : PROCLOCK_PRINT("LockCheckConflicts: no conflict", proclock);
1444 4494082 : return false;
1445 : }
1446 :
1447 : /*
1448 : * Rats. Something conflicts. But it could still be my own lock, or a
1449 : * lock held by another member of my locking group. First, figure out how
1450 : * many conflicts remain after subtracting out any locks I hold myself.
1451 : */
1452 490904 : myLocks = proclock->holdMask;
1453 4418136 : for (i = 1; i <= numLockModes; i++)
1454 : {
1455 3927232 : if ((conflictMask & LOCKBIT_ON(i)) == 0)
1456 : {
1457 2026980 : conflictsRemaining[i] = 0;
1458 2026980 : continue;
1459 : }
1460 1900252 : conflictsRemaining[i] = lock->granted[i];
1461 1900252 : if (myLocks & LOCKBIT_ON(i))
1462 494726 : --conflictsRemaining[i];
1463 1900252 : totalConflictsRemaining += conflictsRemaining[i];
1464 : }
1465 :
1466 : /* If no conflicts remain, we get the lock. */
1467 490904 : if (totalConflictsRemaining == 0)
1468 : {
1469 : PROCLOCK_PRINT("LockCheckConflicts: resolved (simple)", proclock);
1470 486674 : return false;
1471 : }
1472 :
1473 : /* If no group locking, it's definitely a conflict. */
1474 4230 : if (proclock->groupLeader == MyProc && MyProc->lockGroupLeader == NULL)
1475 : {
1476 : Assert(proclock->tag.myProc == MyProc);
1477 : PROCLOCK_PRINT("LockCheckConflicts: conflicting (simple)",
1478 : proclock);
1479 3312 : return true;
1480 : }
1481 :
1482 : /*
1483 : * The relation extension or page lock conflict even between the group
1484 : * members.
1485 : */
1486 918 : if (LOCK_LOCKTAG(*lock) == LOCKTAG_RELATION_EXTEND ||
1487 892 : (LOCK_LOCKTAG(*lock) == LOCKTAG_PAGE))
1488 : {
1489 : PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)",
1490 : proclock);
1491 26 : return true;
1492 : }
1493 :
1494 : /*
1495 : * Locks held in conflicting modes by members of our own lock group are
1496 : * not real conflicts; we can subtract those out and see if we still have
1497 : * a conflict. This is O(N) in the number of processes holding or
1498 : * awaiting locks on this object. We could improve that by making the
1499 : * shared memory state more complex (and larger) but it doesn't seem worth
1500 : * it.
1501 : */
1502 1134 : dlist_foreach(proclock_iter, &lock->procLocks)
1503 : {
1504 1070 : PROCLOCK *otherproclock =
1505 1070 : dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
1506 :
1507 1070 : if (proclock != otherproclock &&
1508 1006 : proclock->groupLeader == otherproclock->groupLeader &&
1509 832 : (otherproclock->holdMask & conflictMask) != 0)
1510 : {
1511 828 : int intersectMask = otherproclock->holdMask & conflictMask;
1512 :
1513 7452 : for (i = 1; i <= numLockModes; i++)
1514 : {
1515 6624 : if ((intersectMask & LOCKBIT_ON(i)) != 0)
1516 : {
1517 842 : if (conflictsRemaining[i] <= 0)
1518 0 : elog(PANIC, "proclocks held do not match lock");
1519 842 : conflictsRemaining[i]--;
1520 842 : totalConflictsRemaining--;
1521 : }
1522 : }
1523 :
1524 828 : if (totalConflictsRemaining == 0)
1525 : {
1526 : PROCLOCK_PRINT("LockCheckConflicts: resolved (group)",
1527 : proclock);
1528 828 : return false;
1529 : }
1530 : }
1531 : }
1532 :
1533 : /* Nope, it's a real conflict. */
1534 : PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)", proclock);
1535 64 : return true;
1536 : }
1537 :
1538 : /*
1539 : * GrantLock -- update the lock and proclock data structures to show
1540 : * the lock request has been granted.
1541 : *
1542 : * NOTE: if proc was blocked, it also needs to be removed from the wait list
1543 : * and have its waitLock/waitProcLock fields cleared. That's not done here.
1544 : *
1545 : * NOTE: the lock grant also has to be recorded in the associated LOCALLOCK
1546 : * table entry; but since we may be awaking some other process, we can't do
1547 : * that here; it's done by GrantLockLocal, instead.
1548 : */
1549 : void
1550 4984966 : GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
1551 : {
1552 4984966 : lock->nGranted++;
1553 4984966 : lock->granted[lockmode]++;
1554 4984966 : lock->grantMask |= LOCKBIT_ON(lockmode);
1555 4984966 : if (lock->granted[lockmode] == lock->requested[lockmode])
1556 4984730 : lock->waitMask &= LOCKBIT_OFF(lockmode);
1557 4984966 : proclock->holdMask |= LOCKBIT_ON(lockmode);
1558 : LOCK_PRINT("GrantLock", lock, lockmode);
1559 : Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1560 : Assert(lock->nGranted <= lock->nRequested);
1561 4984966 : }
1562 :
1563 : /*
1564 : * UnGrantLock -- opposite of GrantLock.
1565 : *
1566 : * Updates the lock and proclock data structures to show that the lock
1567 : * is no longer held nor requested by the current holder.
1568 : *
1569 : * Returns true if there were any waiters waiting on the lock that
1570 : * should now be woken up with ProcLockWakeup.
1571 : */
1572 : static bool
1573 4984818 : UnGrantLock(LOCK *lock, LOCKMODE lockmode,
1574 : PROCLOCK *proclock, LockMethod lockMethodTable)
1575 : {
1576 4984818 : bool wakeupNeeded = false;
1577 :
1578 : Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1579 : Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1580 : Assert(lock->nGranted <= lock->nRequested);
1581 :
1582 : /*
1583 : * fix the general lock stats
1584 : */
1585 4984818 : lock->nRequested--;
1586 4984818 : lock->requested[lockmode]--;
1587 4984818 : lock->nGranted--;
1588 4984818 : lock->granted[lockmode]--;
1589 :
1590 4984818 : if (lock->granted[lockmode] == 0)
1591 : {
1592 : /* change the conflict mask. No more of this lock type. */
1593 4972652 : lock->grantMask &= LOCKBIT_OFF(lockmode);
1594 : }
1595 :
1596 : LOCK_PRINT("UnGrantLock: updated", lock, lockmode);
1597 :
1598 : /*
1599 : * We need only run ProcLockWakeup if the released lock conflicts with at
1600 : * least one of the lock types requested by waiter(s). Otherwise whatever
1601 : * conflict made them wait must still exist. NOTE: before MVCC, we could
1602 : * skip wakeup if lock->granted[lockmode] was still positive. But that's
1603 : * not true anymore, because the remaining granted locks might belong to
1604 : * some waiter, who could now be awakened because he doesn't conflict with
1605 : * his own locks.
1606 : */
1607 4984818 : if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1608 1854 : wakeupNeeded = true;
1609 :
1610 : /*
1611 : * Now fix the per-proclock state.
1612 : */
1613 4984818 : proclock->holdMask &= LOCKBIT_OFF(lockmode);
1614 : PROCLOCK_PRINT("UnGrantLock: updated", proclock);
1615 :
1616 4984818 : return wakeupNeeded;
1617 : }
1618 :
1619 : /*
1620 : * CleanUpLock -- clean up after releasing a lock. We garbage-collect the
1621 : * proclock and lock objects if possible, and call ProcLockWakeup if there
1622 : * are remaining requests and the caller says it's OK. (Normally, this
1623 : * should be called after UnGrantLock, and wakeupNeeded is the result from
1624 : * UnGrantLock.)
1625 : *
1626 : * The appropriate partition lock must be held at entry, and will be
1627 : * held at exit.
1628 : */
1629 : static void
1630 4926746 : CleanUpLock(LOCK *lock, PROCLOCK *proclock,
1631 : LockMethod lockMethodTable, uint32 hashcode,
1632 : bool wakeupNeeded)
1633 : {
1634 : /*
1635 : * If this was my last hold on this lock, delete my entry in the proclock
1636 : * table.
1637 : */
1638 4926746 : if (proclock->holdMask == 0)
1639 : {
1640 : uint32 proclock_hashcode;
1641 :
1642 : PROCLOCK_PRINT("CleanUpLock: deleting", proclock);
1643 4027730 : dlist_delete(&proclock->lockLink);
1644 4027730 : dlist_delete(&proclock->procLink);
1645 4027730 : proclock_hashcode = ProcLockHashCode(&proclock->tag, hashcode);
1646 4027730 : if (!hash_search_with_hash_value(LockMethodProcLockHash,
1647 4027730 : &(proclock->tag),
1648 : proclock_hashcode,
1649 : HASH_REMOVE,
1650 : NULL))
1651 0 : elog(PANIC, "proclock table corrupted");
1652 : }
1653 :
1654 4926746 : if (lock->nRequested == 0)
1655 : {
1656 : /*
1657 : * The caller just released the last lock, so garbage-collect the lock
1658 : * object.
1659 : */
1660 : LOCK_PRINT("CleanUpLock: deleting", lock, 0);
1661 : Assert(dlist_is_empty(&lock->procLocks));
1662 4011122 : if (!hash_search_with_hash_value(LockMethodLockHash,
1663 4011122 : &(lock->tag),
1664 : hashcode,
1665 : HASH_REMOVE,
1666 : NULL))
1667 0 : elog(PANIC, "lock table corrupted");
1668 : }
1669 915624 : else if (wakeupNeeded)
1670 : {
1671 : /* There are waiters on this lock, so wake them up. */
1672 1942 : ProcLockWakeup(lockMethodTable, lock);
1673 : }
1674 4926746 : }
1675 :
1676 : /*
1677 : * GrantLockLocal -- update the locallock data structures to show
1678 : * the lock request has been granted.
1679 : *
1680 : * We expect that LockAcquire made sure there is room to add a new
1681 : * ResourceOwner entry.
1682 : */
1683 : static void
1684 44878454 : GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner)
1685 : {
1686 44878454 : LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
1687 : int i;
1688 :
1689 : Assert(locallock->numLockOwners < locallock->maxLockOwners);
1690 : /* Count the total */
1691 44878454 : locallock->nLocks++;
1692 : /* Count the per-owner lock */
1693 45839570 : for (i = 0; i < locallock->numLockOwners; i++)
1694 : {
1695 3734602 : if (lockOwners[i].owner == owner)
1696 : {
1697 2773486 : lockOwners[i].nLocks++;
1698 2773486 : return;
1699 : }
1700 : }
1701 42104968 : lockOwners[i].owner = owner;
1702 42104968 : lockOwners[i].nLocks = 1;
1703 42104968 : locallock->numLockOwners++;
1704 42104968 : if (owner != NULL)
1705 41994104 : ResourceOwnerRememberLock(owner, locallock);
1706 :
1707 : /* Indicate that the lock is acquired for certain types of locks. */
1708 42104968 : CheckAndSetLockHeld(locallock, true);
1709 : }
1710 :
1711 : /*
1712 : * BeginStrongLockAcquire - inhibit use of fastpath for a given LOCALLOCK,
1713 : * and arrange for error cleanup if it fails
1714 : */
1715 : static void
1716 522042 : BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode)
1717 : {
1718 : Assert(StrongLockInProgress == NULL);
1719 : Assert(locallock->holdsStrongLockCount == false);
1720 :
1721 : /*
1722 : * Adding to a memory location is not atomic, so we take a spinlock to
1723 : * ensure we don't collide with someone else trying to bump the count at
1724 : * the same time.
1725 : *
1726 : * XXX: It might be worth considering using an atomic fetch-and-add
1727 : * instruction here, on architectures where that is supported.
1728 : */
1729 :
1730 522042 : SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
1731 522042 : FastPathStrongRelationLocks->count[fasthashcode]++;
1732 522042 : locallock->holdsStrongLockCount = true;
1733 522042 : StrongLockInProgress = locallock;
1734 522042 : SpinLockRelease(&FastPathStrongRelationLocks->mutex);
1735 522042 : }
1736 :
1737 : /*
1738 : * FinishStrongLockAcquire - cancel pending cleanup for a strong lock
1739 : * acquisition once it's no longer needed
1740 : */
1741 : static void
1742 4981580 : FinishStrongLockAcquire(void)
1743 : {
1744 4981580 : StrongLockInProgress = NULL;
1745 4981580 : }
1746 :
1747 : /*
1748 : * AbortStrongLockAcquire - undo strong lock state changes performed by
1749 : * BeginStrongLockAcquire.
1750 : */
1751 : void
1752 991608 : AbortStrongLockAcquire(void)
1753 : {
1754 : uint32 fasthashcode;
1755 991608 : LOCALLOCK *locallock = StrongLockInProgress;
1756 :
1757 991608 : if (locallock == NULL)
1758 991184 : return;
1759 :
1760 424 : fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
1761 : Assert(locallock->holdsStrongLockCount == true);
1762 424 : SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
1763 : Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
1764 424 : FastPathStrongRelationLocks->count[fasthashcode]--;
1765 424 : locallock->holdsStrongLockCount = false;
1766 424 : StrongLockInProgress = NULL;
1767 424 : SpinLockRelease(&FastPathStrongRelationLocks->mutex);
1768 : }
1769 :
1770 : /*
1771 : * GrantAwaitedLock -- call GrantLockLocal for the lock we are doing
1772 : * WaitOnLock on.
1773 : *
1774 : * proc.c needs this for the case where we are booted off the lock by
1775 : * timeout, but discover that someone granted us the lock anyway.
1776 : *
1777 : * We could just export GrantLockLocal, but that would require including
1778 : * resowner.h in lock.h, which creates circularity.
1779 : */
1780 : void
1781 1904 : GrantAwaitedLock(void)
1782 : {
1783 1904 : GrantLockLocal(awaitedLock, awaitedOwner);
1784 1904 : }
1785 :
1786 : /*
1787 : * MarkLockClear -- mark an acquired lock as "clear"
1788 : *
1789 : * This means that we know we have absorbed all sinval messages that other
1790 : * sessions generated before we acquired this lock, and so we can confidently
1791 : * assume we know about any catalog changes protected by this lock.
1792 : */
1793 : void
1794 40161636 : MarkLockClear(LOCALLOCK *locallock)
1795 : {
1796 : Assert(locallock->nLocks > 0);
1797 40161636 : locallock->lockCleared = true;
1798 40161636 : }
1799 :
1800 : /*
1801 : * WaitOnLock -- wait to acquire a lock
1802 : *
1803 : * Caller must have set MyProc->heldLocks to reflect locks already held
1804 : * on the lockable object by this process.
1805 : *
1806 : * The appropriate partition lock must be held at entry.
1807 : */
1808 : static void
1809 1996 : WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner)
1810 : {
1811 1996 : LOCKMETHODID lockmethodid = LOCALLOCK_LOCKMETHOD(*locallock);
1812 1996 : LockMethod lockMethodTable = LockMethods[lockmethodid];
1813 :
1814 : LOCK_PRINT("WaitOnLock: sleeping on lock",
1815 : locallock->lock, locallock->tag.mode);
1816 :
1817 : /* adjust the process title to indicate that it's waiting */
1818 1996 : set_ps_display_suffix("waiting");
1819 :
1820 1996 : awaitedLock = locallock;
1821 1996 : awaitedOwner = owner;
1822 :
1823 : /*
1824 : * NOTE: Think not to put any shared-state cleanup after the call to
1825 : * ProcSleep, in either the normal or failure path. The lock state must
1826 : * be fully set by the lock grantor, or by CheckDeadLock if we give up
1827 : * waiting for the lock. This is necessary because of the possibility
1828 : * that a cancel/die interrupt will interrupt ProcSleep after someone else
1829 : * grants us the lock, but before we've noticed it. Hence, after granting,
1830 : * the locktable state must fully reflect the fact that we own the lock;
1831 : * we can't do additional work on return.
1832 : *
1833 : * We can and do use a PG_TRY block to try to clean up after failure, but
1834 : * this still has a major limitation: elog(FATAL) can occur while waiting
1835 : * (eg, a "die" interrupt), and then control won't come back here. So all
1836 : * cleanup of essential state should happen in LockErrorCleanup, not here.
1837 : * We can use PG_TRY to clear the "waiting" status flags, since doing that
1838 : * is unimportant if the process exits.
1839 : */
1840 1996 : PG_TRY();
1841 : {
1842 1996 : if (ProcSleep(locallock, lockMethodTable) != PROC_WAIT_STATUS_OK)
1843 : {
1844 : /*
1845 : * We failed as a result of a deadlock, see CheckDeadLock(). Quit
1846 : * now.
1847 : */
1848 10 : awaitedLock = NULL;
1849 : LOCK_PRINT("WaitOnLock: aborting on lock",
1850 : locallock->lock, locallock->tag.mode);
1851 10 : LWLockRelease(LockHashPartitionLock(locallock->hashcode));
1852 :
1853 : /*
1854 : * Now that we aren't holding the partition lock, we can give an
1855 : * error report including details about the detected deadlock.
1856 : */
1857 10 : DeadLockReport();
1858 : /* not reached */
1859 : }
1860 : }
1861 82 : PG_CATCH();
1862 : {
1863 : /* In this path, awaitedLock remains set until LockErrorCleanup */
1864 :
1865 : /* reset ps display to remove the suffix */
1866 82 : set_ps_display_remove_suffix();
1867 :
1868 : /* and propagate the error */
1869 82 : PG_RE_THROW();
1870 : }
1871 1904 : PG_END_TRY();
1872 :
1873 1904 : awaitedLock = NULL;
1874 :
1875 : /* reset ps display to remove the suffix */
1876 1904 : set_ps_display_remove_suffix();
1877 :
1878 : LOCK_PRINT("WaitOnLock: wakeup on lock",
1879 : locallock->lock, locallock->tag.mode);
1880 1904 : }
1881 :
1882 : /*
1883 : * Remove a proc from the wait-queue it is on (caller must know it is on one).
1884 : * This is only used when the proc has failed to get the lock, so we set its
1885 : * waitStatus to PROC_WAIT_STATUS_ERROR.
1886 : *
1887 : * Appropriate partition lock must be held by caller. Also, caller is
1888 : * responsible for signaling the proc if needed.
1889 : *
1890 : * NB: this does not clean up any locallock object that may exist for the lock.
1891 : */
1892 : void
1893 92 : RemoveFromWaitQueue(PGPROC *proc, uint32 hashcode)
1894 : {
1895 92 : LOCK *waitLock = proc->waitLock;
1896 92 : PROCLOCK *proclock = proc->waitProcLock;
1897 92 : LOCKMODE lockmode = proc->waitLockMode;
1898 92 : LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*waitLock);
1899 :
1900 : /* Make sure proc is waiting */
1901 : Assert(proc->waitStatus == PROC_WAIT_STATUS_WAITING);
1902 : Assert(proc->links.next != NULL);
1903 : Assert(waitLock);
1904 : Assert(!dclist_is_empty(&waitLock->waitProcs));
1905 : Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
1906 :
1907 : /* Remove proc from lock's wait queue */
1908 92 : dclist_delete_from(&waitLock->waitProcs, &proc->links);
1909 :
1910 : /* Undo increments of request counts by waiting process */
1911 : Assert(waitLock->nRequested > 0);
1912 : Assert(waitLock->nRequested > proc->waitLock->nGranted);
1913 92 : waitLock->nRequested--;
1914 : Assert(waitLock->requested[lockmode] > 0);
1915 92 : waitLock->requested[lockmode]--;
1916 : /* don't forget to clear waitMask bit if appropriate */
1917 92 : if (waitLock->granted[lockmode] == waitLock->requested[lockmode])
1918 90 : waitLock->waitMask &= LOCKBIT_OFF(lockmode);
1919 :
1920 : /* Clean up the proc's own state, and pass it the ok/fail signal */
1921 92 : proc->waitLock = NULL;
1922 92 : proc->waitProcLock = NULL;
1923 92 : proc->waitStatus = PROC_WAIT_STATUS_ERROR;
1924 :
1925 : /*
1926 : * Delete the proclock immediately if it represents no already-held locks.
1927 : * (This must happen now because if the owner of the lock decides to
1928 : * release it, and the requested/granted counts then go to zero,
1929 : * LockRelease expects there to be no remaining proclocks.) Then see if
1930 : * any other waiters for the lock can be woken up now.
1931 : */
1932 92 : CleanUpLock(waitLock, proclock,
1933 : LockMethods[lockmethodid], hashcode,
1934 : true);
1935 92 : }
1936 :
1937 : /*
1938 : * LockRelease -- look up 'locktag' and release one 'lockmode' lock on it.
1939 : * Release a session lock if 'sessionLock' is true, else release a
1940 : * regular transaction lock.
1941 : *
1942 : * Side Effects: find any waiting processes that are now wakable,
1943 : * grant them their requested locks and awaken them.
1944 : * (We have to grant the lock here to avoid a race between
1945 : * the waking process and any new process to
1946 : * come along and request the lock.)
1947 : */
1948 : bool
1949 40207254 : LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
1950 : {
1951 40207254 : LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
1952 : LockMethod lockMethodTable;
1953 : LOCALLOCKTAG localtag;
1954 : LOCALLOCK *locallock;
1955 : LOCK *lock;
1956 : PROCLOCK *proclock;
1957 : LWLock *partitionLock;
1958 : bool wakeupNeeded;
1959 :
1960 40207254 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
1961 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
1962 40207254 : lockMethodTable = LockMethods[lockmethodid];
1963 40207254 : if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
1964 0 : elog(ERROR, "unrecognized lock mode: %d", lockmode);
1965 :
1966 : #ifdef LOCK_DEBUG
1967 : if (LOCK_DEBUG_ENABLED(locktag))
1968 : elog(LOG, "LockRelease: lock [%u,%u] %s",
1969 : locktag->locktag_field1, locktag->locktag_field2,
1970 : lockMethodTable->lockModeNames[lockmode]);
1971 : #endif
1972 :
1973 : /*
1974 : * Find the LOCALLOCK entry for this lock and lockmode
1975 : */
1976 40207254 : MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
1977 40207254 : localtag.lock = *locktag;
1978 40207254 : localtag.mode = lockmode;
1979 :
1980 40207254 : locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
1981 : &localtag,
1982 : HASH_FIND, NULL);
1983 :
1984 : /*
1985 : * let the caller print its own error message, too. Do not ereport(ERROR).
1986 : */
1987 40207254 : if (!locallock || locallock->nLocks <= 0)
1988 : {
1989 26 : elog(WARNING, "you don't own a lock of type %s",
1990 : lockMethodTable->lockModeNames[lockmode]);
1991 26 : return false;
1992 : }
1993 :
1994 : /*
1995 : * Decrease the count for the resource owner.
1996 : */
1997 : {
1998 40207228 : LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
1999 : ResourceOwner owner;
2000 : int i;
2001 :
2002 : /* Identify owner for lock */
2003 40207228 : if (sessionLock)
2004 110844 : owner = NULL;
2005 : else
2006 40096384 : owner = CurrentResourceOwner;
2007 :
2008 40209654 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
2009 : {
2010 40209630 : if (lockOwners[i].owner == owner)
2011 : {
2012 : Assert(lockOwners[i].nLocks > 0);
2013 40207204 : if (--lockOwners[i].nLocks == 0)
2014 : {
2015 39055860 : if (owner != NULL)
2016 38945064 : ResourceOwnerForgetLock(owner, locallock);
2017 : /* compact out unused slot */
2018 39055860 : locallock->numLockOwners--;
2019 39055860 : if (i < locallock->numLockOwners)
2020 90 : lockOwners[i] = lockOwners[locallock->numLockOwners];
2021 : }
2022 40207204 : break;
2023 : }
2024 : }
2025 40207228 : if (i < 0)
2026 : {
2027 : /* don't release a lock belonging to another owner */
2028 24 : elog(WARNING, "you don't own a lock of type %s",
2029 : lockMethodTable->lockModeNames[lockmode]);
2030 24 : return false;
2031 : }
2032 : }
2033 :
2034 : /*
2035 : * Decrease the total local count. If we're still holding the lock, we're
2036 : * done.
2037 : */
2038 40207204 : locallock->nLocks--;
2039 :
2040 40207204 : if (locallock->nLocks > 0)
2041 1577838 : return true;
2042 :
2043 : /*
2044 : * At this point we can no longer suppose we are clear of invalidation
2045 : * messages related to this lock. Although we'll delete the LOCALLOCK
2046 : * object before any intentional return from this routine, it seems worth
2047 : * the trouble to explicitly reset lockCleared right now, just in case
2048 : * some error prevents us from deleting the LOCALLOCK.
2049 : */
2050 38629366 : locallock->lockCleared = false;
2051 :
2052 : /* Attempt fast release of any lock eligible for the fast path. */
2053 38629366 : if (EligibleForRelationFastPath(locktag, lockmode) &&
2054 36603282 : FastPathLocalUseCount > 0)
2055 : {
2056 : bool released;
2057 :
2058 : /*
2059 : * We might not find the lock here, even if we originally entered it
2060 : * here. Another backend may have moved it to the main table.
2061 : */
2062 35754286 : LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
2063 35754286 : released = FastPathUnGrantRelationLock(locktag->locktag_field2,
2064 : lockmode);
2065 35754286 : LWLockRelease(&MyProc->fpInfoLock);
2066 35754286 : if (released)
2067 : {
2068 35283264 : RemoveLocalLock(locallock);
2069 35283264 : return true;
2070 : }
2071 : }
2072 :
2073 : /*
2074 : * Otherwise we've got to mess with the shared lock table.
2075 : */
2076 3346102 : partitionLock = LockHashPartitionLock(locallock->hashcode);
2077 :
2078 3346102 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2079 :
2080 : /*
2081 : * Normally, we don't need to re-find the lock or proclock, since we kept
2082 : * their addresses in the locallock table, and they couldn't have been
2083 : * removed while we were holding a lock on them. But it's possible that
2084 : * the lock was taken fast-path and has since been moved to the main hash
2085 : * table by another backend, in which case we will need to look up the
2086 : * objects here. We assume the lock field is NULL if so.
2087 : */
2088 3346102 : lock = locallock->lock;
2089 3346102 : if (!lock)
2090 : {
2091 : PROCLOCKTAG proclocktag;
2092 :
2093 : Assert(EligibleForRelationFastPath(locktag, lockmode));
2094 8 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
2095 : locktag,
2096 : locallock->hashcode,
2097 : HASH_FIND,
2098 : NULL);
2099 8 : if (!lock)
2100 0 : elog(ERROR, "failed to re-find shared lock object");
2101 8 : locallock->lock = lock;
2102 :
2103 8 : proclocktag.myLock = lock;
2104 8 : proclocktag.myProc = MyProc;
2105 8 : locallock->proclock = (PROCLOCK *) hash_search(LockMethodProcLockHash,
2106 : &proclocktag,
2107 : HASH_FIND,
2108 : NULL);
2109 8 : if (!locallock->proclock)
2110 0 : elog(ERROR, "failed to re-find shared proclock object");
2111 : }
2112 : LOCK_PRINT("LockRelease: found", lock, lockmode);
2113 3346102 : proclock = locallock->proclock;
2114 : PROCLOCK_PRINT("LockRelease: found", proclock);
2115 :
2116 : /*
2117 : * Double-check that we are actually holding a lock of the type we want to
2118 : * release.
2119 : */
2120 3346102 : if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
2121 : {
2122 : PROCLOCK_PRINT("LockRelease: WRONGTYPE", proclock);
2123 0 : LWLockRelease(partitionLock);
2124 0 : elog(WARNING, "you don't own a lock of type %s",
2125 : lockMethodTable->lockModeNames[lockmode]);
2126 0 : RemoveLocalLock(locallock);
2127 0 : return false;
2128 : }
2129 :
2130 : /*
2131 : * Do the releasing. CleanUpLock will waken any now-wakable waiters.
2132 : */
2133 3346102 : wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
2134 :
2135 3346102 : CleanUpLock(lock, proclock,
2136 : lockMethodTable, locallock->hashcode,
2137 : wakeupNeeded);
2138 :
2139 3346102 : LWLockRelease(partitionLock);
2140 :
2141 3346102 : RemoveLocalLock(locallock);
2142 3346102 : return true;
2143 : }
2144 :
2145 : /*
2146 : * LockReleaseAll -- Release all locks of the specified lock method that
2147 : * are held by the current process.
2148 : *
2149 : * Well, not necessarily *all* locks. The available behaviors are:
2150 : * allLocks == true: release all locks including session locks.
2151 : * allLocks == false: release all non-session locks.
2152 : */
2153 : void
2154 1904918 : LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
2155 : {
2156 : HASH_SEQ_STATUS status;
2157 : LockMethod lockMethodTable;
2158 : int i,
2159 : numLockModes;
2160 : LOCALLOCK *locallock;
2161 : LOCK *lock;
2162 : int partition;
2163 1904918 : bool have_fast_path_lwlock = false;
2164 :
2165 1904918 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2166 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2167 1904918 : lockMethodTable = LockMethods[lockmethodid];
2168 :
2169 : #ifdef LOCK_DEBUG
2170 : if (*(lockMethodTable->trace_flag))
2171 : elog(LOG, "LockReleaseAll: lockmethod=%d", lockmethodid);
2172 : #endif
2173 :
2174 : /*
2175 : * Get rid of our fast-path VXID lock, if appropriate. Note that this is
2176 : * the only way that the lock we hold on our own VXID can ever get
2177 : * released: it is always and only released when a toplevel transaction
2178 : * ends.
2179 : */
2180 1904918 : if (lockmethodid == DEFAULT_LOCKMETHOD)
2181 941538 : VirtualXactLockTableCleanup();
2182 :
2183 1904918 : numLockModes = lockMethodTable->numLockModes;
2184 :
2185 : /*
2186 : * First we run through the locallock table and get rid of unwanted
2187 : * entries, then we scan the process's proclocks and get rid of those. We
2188 : * do this separately because we may have multiple locallock entries
2189 : * pointing to the same proclock, and we daren't end up with any dangling
2190 : * pointers. Fast-path locks are cleaned up during the locallock table
2191 : * scan, though.
2192 : */
2193 1904918 : hash_seq_init(&status, LockMethodLocalHash);
2194 :
2195 4932748 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2196 : {
2197 : /*
2198 : * If the LOCALLOCK entry is unused, we must've run out of shared
2199 : * memory while trying to set up this lock. Just forget the local
2200 : * entry.
2201 : */
2202 3027830 : if (locallock->nLocks == 0)
2203 : {
2204 92 : RemoveLocalLock(locallock);
2205 92 : continue;
2206 : }
2207 :
2208 : /* Ignore items that are not of the lockmethod to be removed */
2209 3027738 : if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
2210 113050 : continue;
2211 :
2212 : /*
2213 : * If we are asked to release all locks, we can just zap the entry.
2214 : * Otherwise, must scan to see if there are session locks. We assume
2215 : * there is at most one lockOwners entry for session locks.
2216 : */
2217 2914688 : if (!allLocks)
2218 : {
2219 2801374 : LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2220 :
2221 : /* If session lock is above array position 0, move it down to 0 */
2222 5680504 : for (i = 0; i < locallock->numLockOwners; i++)
2223 : {
2224 2879130 : if (lockOwners[i].owner == NULL)
2225 112884 : lockOwners[0] = lockOwners[i];
2226 : else
2227 2766246 : ResourceOwnerForgetLock(lockOwners[i].owner, locallock);
2228 : }
2229 :
2230 2801374 : if (locallock->numLockOwners > 0 &&
2231 2801374 : lockOwners[0].owner == NULL &&
2232 112884 : lockOwners[0].nLocks > 0)
2233 : {
2234 : /* Fix the locallock to show just the session locks */
2235 112884 : locallock->nLocks = lockOwners[0].nLocks;
2236 112884 : locallock->numLockOwners = 1;
2237 : /* We aren't deleting this locallock, so done */
2238 112884 : continue;
2239 : }
2240 : else
2241 2688490 : locallock->numLockOwners = 0;
2242 : }
2243 :
2244 : /*
2245 : * If the lock or proclock pointers are NULL, this lock was taken via
2246 : * the relation fast-path (and is not known to have been transferred).
2247 : */
2248 2801804 : if (locallock->proclock == NULL || locallock->lock == NULL)
2249 : {
2250 1167256 : LOCKMODE lockmode = locallock->tag.mode;
2251 : Oid relid;
2252 :
2253 : /* Verify that a fast-path lock is what we've got. */
2254 1167256 : if (!EligibleForRelationFastPath(&locallock->tag.lock, lockmode))
2255 0 : elog(PANIC, "locallock table corrupted");
2256 :
2257 : /*
2258 : * If we don't currently hold the LWLock that protects our
2259 : * fast-path data structures, we must acquire it before attempting
2260 : * to release the lock via the fast-path. We will continue to
2261 : * hold the LWLock until we're done scanning the locallock table,
2262 : * unless we hit a transferred fast-path lock. (XXX is this
2263 : * really such a good idea? There could be a lot of entries ...)
2264 : */
2265 1167256 : if (!have_fast_path_lwlock)
2266 : {
2267 426960 : LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
2268 426960 : have_fast_path_lwlock = true;
2269 : }
2270 :
2271 : /* Attempt fast-path release. */
2272 1167256 : relid = locallock->tag.lock.locktag_field2;
2273 1167256 : if (FastPathUnGrantRelationLock(relid, lockmode))
2274 : {
2275 1165290 : RemoveLocalLock(locallock);
2276 1165290 : continue;
2277 : }
2278 :
2279 : /*
2280 : * Our lock, originally taken via the fast path, has been
2281 : * transferred to the main lock table. That's going to require
2282 : * some extra work, so release our fast-path lock before starting.
2283 : */
2284 1966 : LWLockRelease(&MyProc->fpInfoLock);
2285 1966 : have_fast_path_lwlock = false;
2286 :
2287 : /*
2288 : * Now dump the lock. We haven't got a pointer to the LOCK or
2289 : * PROCLOCK in this case, so we have to handle this a bit
2290 : * differently than a normal lock release. Unfortunately, this
2291 : * requires an extra LWLock acquire-and-release cycle on the
2292 : * partitionLock, but hopefully it shouldn't happen often.
2293 : */
2294 1966 : LockRefindAndRelease(lockMethodTable, MyProc,
2295 : &locallock->tag.lock, lockmode, false);
2296 1966 : RemoveLocalLock(locallock);
2297 1966 : continue;
2298 : }
2299 :
2300 : /* Mark the proclock to show we need to release this lockmode */
2301 1634548 : if (locallock->nLocks > 0)
2302 1634548 : locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
2303 :
2304 : /* And remove the locallock hashtable entry */
2305 1634548 : RemoveLocalLock(locallock);
2306 : }
2307 :
2308 : /* Done with the fast-path data structures */
2309 1904918 : if (have_fast_path_lwlock)
2310 424994 : LWLockRelease(&MyProc->fpInfoLock);
2311 :
2312 : /*
2313 : * Now, scan each lock partition separately.
2314 : */
2315 32383606 : for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
2316 : {
2317 : LWLock *partitionLock;
2318 30478688 : dlist_head *procLocks = &MyProc->myProcLocks[partition];
2319 : dlist_mutable_iter proclock_iter;
2320 :
2321 30478688 : partitionLock = LockHashPartitionLockByIndex(partition);
2322 :
2323 : /*
2324 : * If the proclock list for this partition is empty, we can skip
2325 : * acquiring the partition lock. This optimization is trickier than
2326 : * it looks, because another backend could be in process of adding
2327 : * something to our proclock list due to promoting one of our
2328 : * fast-path locks. However, any such lock must be one that we
2329 : * decided not to delete above, so it's okay to skip it again now;
2330 : * we'd just decide not to delete it again. We must, however, be
2331 : * careful to re-fetch the list header once we've acquired the
2332 : * partition lock, to be sure we have a valid, up-to-date pointer.
2333 : * (There is probably no significant risk if pointer fetch/store is
2334 : * atomic, but we don't wish to assume that.)
2335 : *
2336 : * XXX This argument assumes that the locallock table correctly
2337 : * represents all of our fast-path locks. While allLocks mode
2338 : * guarantees to clean up all of our normal locks regardless of the
2339 : * locallock situation, we lose that guarantee for fast-path locks.
2340 : * This is not ideal.
2341 : */
2342 30478688 : if (dlist_is_empty(procLocks))
2343 28999372 : continue; /* needn't examine this partition */
2344 :
2345 1479316 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2346 :
2347 3280442 : dlist_foreach_modify(proclock_iter, procLocks)
2348 : {
2349 1801126 : PROCLOCK *proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
2350 1801126 : bool wakeupNeeded = false;
2351 :
2352 : Assert(proclock->tag.myProc == MyProc);
2353 :
2354 1801126 : lock = proclock->tag.myLock;
2355 :
2356 : /* Ignore items that are not of the lockmethod to be removed */
2357 1801126 : if (LOCK_LOCKMETHOD(*lock) != lockmethodid)
2358 113044 : continue;
2359 :
2360 : /*
2361 : * In allLocks mode, force release of all locks even if locallock
2362 : * table had problems
2363 : */
2364 1688082 : if (allLocks)
2365 72422 : proclock->releaseMask = proclock->holdMask;
2366 : else
2367 : Assert((proclock->releaseMask & ~proclock->holdMask) == 0);
2368 :
2369 : /*
2370 : * Ignore items that have nothing to be released, unless they have
2371 : * holdMask == 0 and are therefore recyclable
2372 : */
2373 1688082 : if (proclock->releaseMask == 0 && proclock->holdMask != 0)
2374 111698 : continue;
2375 :
2376 : PROCLOCK_PRINT("LockReleaseAll", proclock);
2377 : LOCK_PRINT("LockReleaseAll", lock, 0);
2378 : Assert(lock->nRequested >= 0);
2379 : Assert(lock->nGranted >= 0);
2380 : Assert(lock->nGranted <= lock->nRequested);
2381 : Assert((proclock->holdMask & ~lock->grantMask) == 0);
2382 :
2383 : /*
2384 : * Release the previously-marked lock modes
2385 : */
2386 14187456 : for (i = 1; i <= numLockModes; i++)
2387 : {
2388 12611072 : if (proclock->releaseMask & LOCKBIT_ON(i))
2389 1634548 : wakeupNeeded |= UnGrantLock(lock, i, proclock,
2390 : lockMethodTable);
2391 : }
2392 : Assert((lock->nRequested >= 0) && (lock->nGranted >= 0));
2393 : Assert(lock->nGranted <= lock->nRequested);
2394 : LOCK_PRINT("LockReleaseAll: updated", lock, 0);
2395 :
2396 1576384 : proclock->releaseMask = 0;
2397 :
2398 : /* CleanUpLock will wake up waiters if needed. */
2399 1576384 : CleanUpLock(lock, proclock,
2400 : lockMethodTable,
2401 1576384 : LockTagHashCode(&lock->tag),
2402 : wakeupNeeded);
2403 : } /* loop over PROCLOCKs within this partition */
2404 :
2405 1479316 : LWLockRelease(partitionLock);
2406 : } /* loop over partitions */
2407 :
2408 : #ifdef LOCK_DEBUG
2409 : if (*(lockMethodTable->trace_flag))
2410 : elog(LOG, "LockReleaseAll done");
2411 : #endif
2412 1904918 : }
2413 :
2414 : /*
2415 : * LockReleaseSession -- Release all session locks of the specified lock method
2416 : * that are held by the current process.
2417 : */
2418 : void
2419 238 : LockReleaseSession(LOCKMETHODID lockmethodid)
2420 : {
2421 : HASH_SEQ_STATUS status;
2422 : LOCALLOCK *locallock;
2423 :
2424 238 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2425 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2426 :
2427 238 : hash_seq_init(&status, LockMethodLocalHash);
2428 :
2429 452 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2430 : {
2431 : /* Ignore items that are not of the specified lock method */
2432 214 : if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
2433 20 : continue;
2434 :
2435 194 : ReleaseLockIfHeld(locallock, true);
2436 : }
2437 238 : }
2438 :
2439 : /*
2440 : * LockReleaseCurrentOwner
2441 : * Release all locks belonging to CurrentResourceOwner
2442 : *
2443 : * If the caller knows what those locks are, it can pass them as an array.
2444 : * That speeds up the call significantly, when a lot of locks are held.
2445 : * Otherwise, pass NULL for locallocks, and we'll traverse through our hash
2446 : * table to find them.
2447 : */
2448 : void
2449 9142 : LockReleaseCurrentOwner(LOCALLOCK **locallocks, int nlocks)
2450 : {
2451 9142 : if (locallocks == NULL)
2452 : {
2453 : HASH_SEQ_STATUS status;
2454 : LOCALLOCK *locallock;
2455 :
2456 8 : hash_seq_init(&status, LockMethodLocalHash);
2457 :
2458 530 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2459 522 : ReleaseLockIfHeld(locallock, false);
2460 : }
2461 : else
2462 : {
2463 : int i;
2464 :
2465 13996 : for (i = nlocks - 1; i >= 0; i--)
2466 4862 : ReleaseLockIfHeld(locallocks[i], false);
2467 : }
2468 9142 : }
2469 :
2470 : /*
2471 : * ReleaseLockIfHeld
2472 : * Release any session-level locks on this lockable object if sessionLock
2473 : * is true; else, release any locks held by CurrentResourceOwner.
2474 : *
2475 : * It is tempting to pass this a ResourceOwner pointer (or NULL for session
2476 : * locks), but without refactoring LockRelease() we cannot support releasing
2477 : * locks belonging to resource owners other than CurrentResourceOwner.
2478 : * If we were to refactor, it'd be a good idea to fix it so we don't have to
2479 : * do a hashtable lookup of the locallock, too. However, currently this
2480 : * function isn't used heavily enough to justify refactoring for its
2481 : * convenience.
2482 : */
2483 : static void
2484 5578 : ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock)
2485 : {
2486 : ResourceOwner owner;
2487 : LOCALLOCKOWNER *lockOwners;
2488 : int i;
2489 :
2490 : /* Identify owner for lock (must match LockRelease!) */
2491 5578 : if (sessionLock)
2492 194 : owner = NULL;
2493 : else
2494 5384 : owner = CurrentResourceOwner;
2495 :
2496 : /* Scan to see if there are any locks belonging to the target owner */
2497 5578 : lockOwners = locallock->lockOwners;
2498 5958 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
2499 : {
2500 5578 : if (lockOwners[i].owner == owner)
2501 : {
2502 : Assert(lockOwners[i].nLocks > 0);
2503 5198 : if (lockOwners[i].nLocks < locallock->nLocks)
2504 : {
2505 : /*
2506 : * We will still hold this lock after forgetting this
2507 : * ResourceOwner.
2508 : */
2509 1338 : locallock->nLocks -= lockOwners[i].nLocks;
2510 : /* compact out unused slot */
2511 1338 : locallock->numLockOwners--;
2512 1338 : if (owner != NULL)
2513 1338 : ResourceOwnerForgetLock(owner, locallock);
2514 1338 : if (i < locallock->numLockOwners)
2515 0 : lockOwners[i] = lockOwners[locallock->numLockOwners];
2516 : }
2517 : else
2518 : {
2519 : Assert(lockOwners[i].nLocks == locallock->nLocks);
2520 : /* We want to call LockRelease just once */
2521 3860 : lockOwners[i].nLocks = 1;
2522 3860 : locallock->nLocks = 1;
2523 3860 : if (!LockRelease(&locallock->tag.lock,
2524 : locallock->tag.mode,
2525 : sessionLock))
2526 0 : elog(WARNING, "ReleaseLockIfHeld: failed??");
2527 : }
2528 5198 : break;
2529 : }
2530 : }
2531 5578 : }
2532 :
2533 : /*
2534 : * LockReassignCurrentOwner
2535 : * Reassign all locks belonging to CurrentResourceOwner to belong
2536 : * to its parent resource owner.
2537 : *
2538 : * If the caller knows what those locks are, it can pass them as an array.
2539 : * That speeds up the call significantly, when a lot of locks are held
2540 : * (e.g pg_dump with a large schema). Otherwise, pass NULL for locallocks,
2541 : * and we'll traverse through our hash table to find them.
2542 : */
2543 : void
2544 935586 : LockReassignCurrentOwner(LOCALLOCK **locallocks, int nlocks)
2545 : {
2546 935586 : ResourceOwner parent = ResourceOwnerGetParent(CurrentResourceOwner);
2547 :
2548 : Assert(parent != NULL);
2549 :
2550 935586 : if (locallocks == NULL)
2551 : {
2552 : HASH_SEQ_STATUS status;
2553 : LOCALLOCK *locallock;
2554 :
2555 6742 : hash_seq_init(&status, LockMethodLocalHash);
2556 :
2557 159574 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2558 152832 : LockReassignOwner(locallock, parent);
2559 : }
2560 : else
2561 : {
2562 : int i;
2563 :
2564 2081512 : for (i = nlocks - 1; i >= 0; i--)
2565 1152668 : LockReassignOwner(locallocks[i], parent);
2566 : }
2567 935586 : }
2568 :
2569 : /*
2570 : * Subroutine of LockReassignCurrentOwner. Reassigns a given lock belonging to
2571 : * CurrentResourceOwner to its parent.
2572 : */
2573 : static void
2574 1305500 : LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent)
2575 : {
2576 : LOCALLOCKOWNER *lockOwners;
2577 : int i;
2578 1305500 : int ic = -1;
2579 1305500 : int ip = -1;
2580 :
2581 : /*
2582 : * Scan to see if there are any locks belonging to current owner or its
2583 : * parent
2584 : */
2585 1305500 : lockOwners = locallock->lockOwners;
2586 2859280 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
2587 : {
2588 1553780 : if (lockOwners[i].owner == CurrentResourceOwner)
2589 1287690 : ic = i;
2590 266090 : else if (lockOwners[i].owner == parent)
2591 183082 : ip = i;
2592 : }
2593 :
2594 1305500 : if (ic < 0)
2595 17810 : return; /* no current locks */
2596 :
2597 1287690 : if (ip < 0)
2598 : {
2599 : /* Parent has no slot, so just give it the child's slot */
2600 1122356 : lockOwners[ic].owner = parent;
2601 1122356 : ResourceOwnerRememberLock(parent, locallock);
2602 : }
2603 : else
2604 : {
2605 : /* Merge child's count with parent's */
2606 165334 : lockOwners[ip].nLocks += lockOwners[ic].nLocks;
2607 : /* compact out unused slot */
2608 165334 : locallock->numLockOwners--;
2609 165334 : if (ic < locallock->numLockOwners)
2610 1364 : lockOwners[ic] = lockOwners[locallock->numLockOwners];
2611 : }
2612 1287690 : ResourceOwnerForgetLock(CurrentResourceOwner, locallock);
2613 : }
2614 :
2615 : /*
2616 : * FastPathGrantRelationLock
2617 : * Grant lock using per-backend fast-path array, if there is space.
2618 : */
2619 : static bool
2620 36451282 : FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode)
2621 : {
2622 : uint32 f;
2623 36451282 : uint32 unused_slot = FP_LOCK_SLOTS_PER_BACKEND;
2624 :
2625 : /* Scan for existing entry for this relid, remembering empty slot. */
2626 614182320 : for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2627 : {
2628 579568016 : if (FAST_PATH_GET_BITS(MyProc, f) == 0)
2629 493832568 : unused_slot = f;
2630 85735448 : else if (MyProc->fpRelId[f] == relid)
2631 : {
2632 : Assert(!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode));
2633 1836978 : FAST_PATH_SET_LOCKMODE(MyProc, f, lockmode);
2634 1836978 : return true;
2635 : }
2636 : }
2637 :
2638 : /* If no existing entry, use any empty slot. */
2639 34614304 : if (unused_slot < FP_LOCK_SLOTS_PER_BACKEND)
2640 : {
2641 34614304 : MyProc->fpRelId[unused_slot] = relid;
2642 34614304 : FAST_PATH_SET_LOCKMODE(MyProc, unused_slot, lockmode);
2643 34614304 : ++FastPathLocalUseCount;
2644 34614304 : return true;
2645 : }
2646 :
2647 : /* No existing entry, and no empty slot. */
2648 0 : return false;
2649 : }
2650 :
2651 : /*
2652 : * FastPathUnGrantRelationLock
2653 : * Release fast-path lock, if present. Update backend-private local
2654 : * use count, while we're at it.
2655 : */
2656 : static bool
2657 36921542 : FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode)
2658 : {
2659 : uint32 f;
2660 36921542 : bool result = false;
2661 :
2662 36921542 : FastPathLocalUseCount = 0;
2663 627666214 : for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2664 : {
2665 590744672 : if (MyProc->fpRelId[f] == relid
2666 44757302 : && FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2667 : {
2668 : Assert(!result);
2669 36448554 : FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
2670 36448554 : result = true;
2671 : /* we continue iterating so as to update FastPathLocalUseCount */
2672 : }
2673 590744672 : if (FAST_PATH_GET_BITS(MyProc, f) != 0)
2674 95618158 : ++FastPathLocalUseCount;
2675 : }
2676 36921542 : return result;
2677 : }
2678 :
2679 : /*
2680 : * FastPathTransferRelationLocks
2681 : * Transfer locks matching the given lock tag from per-backend fast-path
2682 : * arrays to the shared hash table.
2683 : *
2684 : * Returns true if successful, false if ran out of shared memory.
2685 : */
2686 : static bool
2687 522042 : FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag,
2688 : uint32 hashcode)
2689 : {
2690 522042 : LWLock *partitionLock = LockHashPartitionLock(hashcode);
2691 522042 : Oid relid = locktag->locktag_field2;
2692 : uint32 i;
2693 :
2694 : /*
2695 : * Every PGPROC that can potentially hold a fast-path lock is present in
2696 : * ProcGlobal->allProcs. Prepared transactions are not, but any
2697 : * outstanding fast-path locks held by prepared transactions are
2698 : * transferred to the main lock table.
2699 : */
2700 59533092 : for (i = 0; i < ProcGlobal->allProcCount; i++)
2701 : {
2702 59011050 : PGPROC *proc = &ProcGlobal->allProcs[i];
2703 : uint32 f;
2704 :
2705 59011050 : LWLockAcquire(&proc->fpInfoLock, LW_EXCLUSIVE);
2706 :
2707 : /*
2708 : * If the target backend isn't referencing the same database as the
2709 : * lock, then we needn't examine the individual relation IDs at all;
2710 : * none of them can be relevant.
2711 : *
2712 : * proc->databaseId is set at backend startup time and never changes
2713 : * thereafter, so it might be safe to perform this test before
2714 : * acquiring &proc->fpInfoLock. In particular, it's certainly safe to
2715 : * assume that if the target backend holds any fast-path locks, it
2716 : * must have performed a memory-fencing operation (in particular, an
2717 : * LWLock acquisition) since setting proc->databaseId. However, it's
2718 : * less clear that our backend is certain to have performed a memory
2719 : * fencing operation since the other backend set proc->databaseId. So
2720 : * for now, we test it after acquiring the LWLock just to be safe.
2721 : */
2722 59011050 : if (proc->databaseId != locktag->locktag_field1)
2723 : {
2724 44202466 : LWLockRelease(&proc->fpInfoLock);
2725 44202466 : continue;
2726 : }
2727 :
2728 251743582 : for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2729 : {
2730 : uint32 lockmode;
2731 :
2732 : /* Look for an allocated slot matching the given relid. */
2733 236936876 : if (relid != proc->fpRelId[f] || FAST_PATH_GET_BITS(proc, f) == 0)
2734 236934998 : continue;
2735 :
2736 : /* Find or create lock object. */
2737 1878 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2738 7512 : for (lockmode = FAST_PATH_LOCKNUMBER_OFFSET;
2739 : lockmode < FAST_PATH_LOCKNUMBER_OFFSET + FAST_PATH_BITS_PER_SLOT;
2740 5634 : ++lockmode)
2741 : {
2742 : PROCLOCK *proclock;
2743 :
2744 5634 : if (!FAST_PATH_CHECK_LOCKMODE(proc, f, lockmode))
2745 3644 : continue;
2746 1990 : proclock = SetupLockInTable(lockMethodTable, proc, locktag,
2747 : hashcode, lockmode);
2748 1990 : if (!proclock)
2749 : {
2750 0 : LWLockRelease(partitionLock);
2751 0 : LWLockRelease(&proc->fpInfoLock);
2752 0 : return false;
2753 : }
2754 1990 : GrantLock(proclock->tag.myLock, proclock, lockmode);
2755 1990 : FAST_PATH_CLEAR_LOCKMODE(proc, f, lockmode);
2756 : }
2757 1878 : LWLockRelease(partitionLock);
2758 :
2759 : /* No need to examine remaining slots. */
2760 1878 : break;
2761 : }
2762 14808584 : LWLockRelease(&proc->fpInfoLock);
2763 : }
2764 522042 : return true;
2765 : }
2766 :
2767 : /*
2768 : * FastPathGetRelationLockEntry
2769 : * Return the PROCLOCK for a lock originally taken via the fast-path,
2770 : * transferring it to the primary lock table if necessary.
2771 : *
2772 : * Note: caller takes care of updating the locallock object.
2773 : */
2774 : static PROCLOCK *
2775 754 : FastPathGetRelationLockEntry(LOCALLOCK *locallock)
2776 : {
2777 754 : LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
2778 754 : LOCKTAG *locktag = &locallock->tag.lock;
2779 754 : PROCLOCK *proclock = NULL;
2780 754 : LWLock *partitionLock = LockHashPartitionLock(locallock->hashcode);
2781 754 : Oid relid = locktag->locktag_field2;
2782 : uint32 f;
2783 :
2784 754 : LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
2785 :
2786 12040 : for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2787 : {
2788 : uint32 lockmode;
2789 :
2790 : /* Look for an allocated slot matching the given relid. */
2791 12024 : if (relid != MyProc->fpRelId[f] || FAST_PATH_GET_BITS(MyProc, f) == 0)
2792 11286 : continue;
2793 :
2794 : /* If we don't have a lock of the given mode, forget it! */
2795 738 : lockmode = locallock->tag.mode;
2796 738 : if (!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2797 0 : break;
2798 :
2799 : /* Find or create lock object. */
2800 738 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2801 :
2802 738 : proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
2803 : locallock->hashcode, lockmode);
2804 738 : if (!proclock)
2805 : {
2806 0 : LWLockRelease(partitionLock);
2807 0 : LWLockRelease(&MyProc->fpInfoLock);
2808 0 : ereport(ERROR,
2809 : (errcode(ERRCODE_OUT_OF_MEMORY),
2810 : errmsg("out of shared memory"),
2811 : errhint("You might need to increase max_locks_per_transaction.")));
2812 : }
2813 738 : GrantLock(proclock->tag.myLock, proclock, lockmode);
2814 738 : FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
2815 :
2816 738 : LWLockRelease(partitionLock);
2817 :
2818 : /* No need to examine remaining slots. */
2819 738 : break;
2820 : }
2821 :
2822 754 : LWLockRelease(&MyProc->fpInfoLock);
2823 :
2824 : /* Lock may have already been transferred by some other backend. */
2825 754 : if (proclock == NULL)
2826 : {
2827 : LOCK *lock;
2828 : PROCLOCKTAG proclocktag;
2829 : uint32 proclock_hashcode;
2830 :
2831 16 : LWLockAcquire(partitionLock, LW_SHARED);
2832 :
2833 16 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
2834 : locktag,
2835 : locallock->hashcode,
2836 : HASH_FIND,
2837 : NULL);
2838 16 : if (!lock)
2839 0 : elog(ERROR, "failed to re-find shared lock object");
2840 :
2841 16 : proclocktag.myLock = lock;
2842 16 : proclocktag.myProc = MyProc;
2843 :
2844 16 : proclock_hashcode = ProcLockHashCode(&proclocktag, locallock->hashcode);
2845 : proclock = (PROCLOCK *)
2846 16 : hash_search_with_hash_value(LockMethodProcLockHash,
2847 : &proclocktag,
2848 : proclock_hashcode,
2849 : HASH_FIND,
2850 : NULL);
2851 16 : if (!proclock)
2852 0 : elog(ERROR, "failed to re-find shared proclock object");
2853 16 : LWLockRelease(partitionLock);
2854 : }
2855 :
2856 754 : return proclock;
2857 : }
2858 :
2859 : /*
2860 : * GetLockConflicts
2861 : * Get an array of VirtualTransactionIds of xacts currently holding locks
2862 : * that would conflict with the specified lock/lockmode.
2863 : * xacts merely awaiting such a lock are NOT reported.
2864 : *
2865 : * The result array is palloc'd and is terminated with an invalid VXID.
2866 : * *countp, if not null, is updated to the number of items set.
2867 : *
2868 : * Of course, the result could be out of date by the time it's returned, so
2869 : * use of this function has to be thought about carefully. Similarly, a
2870 : * PGPROC with no "lxid" will be considered non-conflicting regardless of any
2871 : * lock it holds. Existing callers don't care about a locker after that
2872 : * locker's pg_xact updates complete. CommitTransaction() clears "lxid" after
2873 : * pg_xact updates and before releasing locks.
2874 : *
2875 : * Note we never include the current xact's vxid in the result array,
2876 : * since an xact never blocks itself.
2877 : */
2878 : VirtualTransactionId *
2879 2250 : GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
2880 : {
2881 : static VirtualTransactionId *vxids;
2882 2250 : LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
2883 : LockMethod lockMethodTable;
2884 : LOCK *lock;
2885 : LOCKMASK conflictMask;
2886 : dlist_iter proclock_iter;
2887 : PROCLOCK *proclock;
2888 : uint32 hashcode;
2889 : LWLock *partitionLock;
2890 2250 : int count = 0;
2891 2250 : int fast_count = 0;
2892 :
2893 2250 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2894 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2895 2250 : lockMethodTable = LockMethods[lockmethodid];
2896 2250 : if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
2897 0 : elog(ERROR, "unrecognized lock mode: %d", lockmode);
2898 :
2899 : /*
2900 : * Allocate memory to store results, and fill with InvalidVXID. We only
2901 : * need enough space for MaxBackends + max_prepared_xacts + a terminator.
2902 : * InHotStandby allocate once in TopMemoryContext.
2903 : */
2904 2250 : if (InHotStandby)
2905 : {
2906 8 : if (vxids == NULL)
2907 2 : vxids = (VirtualTransactionId *)
2908 2 : MemoryContextAlloc(TopMemoryContext,
2909 : sizeof(VirtualTransactionId) *
2910 2 : (MaxBackends + max_prepared_xacts + 1));
2911 : }
2912 : else
2913 2242 : vxids = (VirtualTransactionId *)
2914 2242 : palloc0(sizeof(VirtualTransactionId) *
2915 2242 : (MaxBackends + max_prepared_xacts + 1));
2916 :
2917 : /* Compute hash code and partition lock, and look up conflicting modes. */
2918 2250 : hashcode = LockTagHashCode(locktag);
2919 2250 : partitionLock = LockHashPartitionLock(hashcode);
2920 2250 : conflictMask = lockMethodTable->conflictTab[lockmode];
2921 :
2922 : /*
2923 : * Fast path locks might not have been entered in the primary lock table.
2924 : * If the lock we're dealing with could conflict with such a lock, we must
2925 : * examine each backend's fast-path array for conflicts.
2926 : */
2927 2250 : if (ConflictsWithRelationFastPath(locktag, lockmode))
2928 : {
2929 : int i;
2930 2250 : Oid relid = locktag->locktag_field2;
2931 : VirtualTransactionId vxid;
2932 :
2933 : /*
2934 : * Iterate over relevant PGPROCs. Anything held by a prepared
2935 : * transaction will have been transferred to the primary lock table,
2936 : * so we need not worry about those. This is all a bit fuzzy, because
2937 : * new locks could be taken after we've visited a particular
2938 : * partition, but the callers had better be prepared to deal with that
2939 : * anyway, since the locks could equally well be taken between the
2940 : * time we return the value and the time the caller does something
2941 : * with it.
2942 : */
2943 253070 : for (i = 0; i < ProcGlobal->allProcCount; i++)
2944 : {
2945 250820 : PGPROC *proc = &ProcGlobal->allProcs[i];
2946 : uint32 f;
2947 :
2948 : /* A backend never blocks itself */
2949 250820 : if (proc == MyProc)
2950 2250 : continue;
2951 :
2952 248570 : LWLockAcquire(&proc->fpInfoLock, LW_SHARED);
2953 :
2954 : /*
2955 : * If the target backend isn't referencing the same database as
2956 : * the lock, then we needn't examine the individual relation IDs
2957 : * at all; none of them can be relevant.
2958 : *
2959 : * See FastPathTransferRelationLocks() for discussion of why we do
2960 : * this test after acquiring the lock.
2961 : */
2962 248570 : if (proc->databaseId != locktag->locktag_field1)
2963 : {
2964 103238 : LWLockRelease(&proc->fpInfoLock);
2965 103238 : continue;
2966 : }
2967 :
2968 2470176 : for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2969 : {
2970 : uint32 lockmask;
2971 :
2972 : /* Look for an allocated slot matching the given relid. */
2973 2325216 : if (relid != proc->fpRelId[f])
2974 2323476 : continue;
2975 1740 : lockmask = FAST_PATH_GET_BITS(proc, f);
2976 1740 : if (!lockmask)
2977 1368 : continue;
2978 372 : lockmask <<= FAST_PATH_LOCKNUMBER_OFFSET;
2979 :
2980 : /*
2981 : * There can only be one entry per relation, so if we found it
2982 : * and it doesn't conflict, we can skip the rest of the slots.
2983 : */
2984 372 : if ((lockmask & conflictMask) == 0)
2985 10 : break;
2986 :
2987 : /* Conflict! */
2988 362 : GET_VXID_FROM_PGPROC(vxid, *proc);
2989 :
2990 362 : if (VirtualTransactionIdIsValid(vxid))
2991 362 : vxids[count++] = vxid;
2992 : /* else, xact already committed or aborted */
2993 :
2994 : /* No need to examine remaining slots. */
2995 362 : break;
2996 : }
2997 :
2998 145332 : LWLockRelease(&proc->fpInfoLock);
2999 : }
3000 : }
3001 :
3002 : /* Remember how many fast-path conflicts we found. */
3003 2250 : fast_count = count;
3004 :
3005 : /*
3006 : * Look up the lock object matching the tag.
3007 : */
3008 2250 : LWLockAcquire(partitionLock, LW_SHARED);
3009 :
3010 2250 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
3011 : locktag,
3012 : hashcode,
3013 : HASH_FIND,
3014 : NULL);
3015 2250 : if (!lock)
3016 : {
3017 : /*
3018 : * If the lock object doesn't exist, there is nothing holding a lock
3019 : * on this lockable object.
3020 : */
3021 140 : LWLockRelease(partitionLock);
3022 140 : vxids[count].backendId = InvalidBackendId;
3023 140 : vxids[count].localTransactionId = InvalidLocalTransactionId;
3024 140 : if (countp)
3025 0 : *countp = count;
3026 140 : return vxids;
3027 : }
3028 :
3029 : /*
3030 : * Examine each existing holder (or awaiter) of the lock.
3031 : */
3032 4254 : dlist_foreach(proclock_iter, &lock->procLocks)
3033 : {
3034 2144 : proclock = dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
3035 :
3036 2144 : if (conflictMask & proclock->holdMask)
3037 : {
3038 2136 : PGPROC *proc = proclock->tag.myProc;
3039 :
3040 : /* A backend never blocks itself */
3041 2136 : if (proc != MyProc)
3042 : {
3043 : VirtualTransactionId vxid;
3044 :
3045 34 : GET_VXID_FROM_PGPROC(vxid, *proc);
3046 :
3047 34 : if (VirtualTransactionIdIsValid(vxid))
3048 : {
3049 : int i;
3050 :
3051 : /* Avoid duplicate entries. */
3052 56 : for (i = 0; i < fast_count; ++i)
3053 22 : if (VirtualTransactionIdEquals(vxids[i], vxid))
3054 0 : break;
3055 34 : if (i >= fast_count)
3056 34 : vxids[count++] = vxid;
3057 : }
3058 : /* else, xact already committed or aborted */
3059 : }
3060 : }
3061 : }
3062 :
3063 2110 : LWLockRelease(partitionLock);
3064 :
3065 2110 : if (count > MaxBackends + max_prepared_xacts) /* should never happen */
3066 0 : elog(PANIC, "too many conflicting locks found");
3067 :
3068 2110 : vxids[count].backendId = InvalidBackendId;
3069 2110 : vxids[count].localTransactionId = InvalidLocalTransactionId;
3070 2110 : if (countp)
3071 2104 : *countp = count;
3072 2110 : return vxids;
3073 : }
3074 :
3075 : /*
3076 : * Find a lock in the shared lock table and release it. It is the caller's
3077 : * responsibility to verify that this is a sane thing to do. (For example, it
3078 : * would be bad to release a lock here if there might still be a LOCALLOCK
3079 : * object with pointers to it.)
3080 : *
3081 : * We currently use this in two situations: first, to release locks held by
3082 : * prepared transactions on commit (see lock_twophase_postcommit); and second,
3083 : * to release locks taken via the fast-path, transferred to the main hash
3084 : * table, and then released (see LockReleaseAll).
3085 : */
3086 : static void
3087 4168 : LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc,
3088 : LOCKTAG *locktag, LOCKMODE lockmode,
3089 : bool decrement_strong_lock_count)
3090 : {
3091 : LOCK *lock;
3092 : PROCLOCK *proclock;
3093 : PROCLOCKTAG proclocktag;
3094 : uint32 hashcode;
3095 : uint32 proclock_hashcode;
3096 : LWLock *partitionLock;
3097 : bool wakeupNeeded;
3098 :
3099 4168 : hashcode = LockTagHashCode(locktag);
3100 4168 : partitionLock = LockHashPartitionLock(hashcode);
3101 :
3102 4168 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3103 :
3104 : /*
3105 : * Re-find the lock object (it had better be there).
3106 : */
3107 4168 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
3108 : locktag,
3109 : hashcode,
3110 : HASH_FIND,
3111 : NULL);
3112 4168 : if (!lock)
3113 0 : elog(PANIC, "failed to re-find shared lock object");
3114 :
3115 : /*
3116 : * Re-find the proclock object (ditto).
3117 : */
3118 4168 : proclocktag.myLock = lock;
3119 4168 : proclocktag.myProc = proc;
3120 :
3121 4168 : proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
3122 :
3123 4168 : proclock = (PROCLOCK *) hash_search_with_hash_value(LockMethodProcLockHash,
3124 : &proclocktag,
3125 : proclock_hashcode,
3126 : HASH_FIND,
3127 : NULL);
3128 4168 : if (!proclock)
3129 0 : elog(PANIC, "failed to re-find shared proclock object");
3130 :
3131 : /*
3132 : * Double-check that we are actually holding a lock of the type we want to
3133 : * release.
3134 : */
3135 4168 : if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
3136 : {
3137 : PROCLOCK_PRINT("lock_twophase_postcommit: WRONGTYPE", proclock);
3138 0 : LWLockRelease(partitionLock);
3139 0 : elog(WARNING, "you don't own a lock of type %s",
3140 : lockMethodTable->lockModeNames[lockmode]);
3141 0 : return;
3142 : }
3143 :
3144 : /*
3145 : * Do the releasing. CleanUpLock will waken any now-wakable waiters.
3146 : */
3147 4168 : wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
3148 :
3149 4168 : CleanUpLock(lock, proclock,
3150 : lockMethodTable, hashcode,
3151 : wakeupNeeded);
3152 :
3153 4168 : LWLockRelease(partitionLock);
3154 :
3155 : /*
3156 : * Decrement strong lock count. This logic is needed only for 2PC.
3157 : */
3158 4168 : if (decrement_strong_lock_count
3159 1706 : && ConflictsWithRelationFastPath(locktag, lockmode))
3160 : {
3161 128 : uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
3162 :
3163 128 : SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
3164 : Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
3165 128 : FastPathStrongRelationLocks->count[fasthashcode]--;
3166 128 : SpinLockRelease(&FastPathStrongRelationLocks->mutex);
3167 : }
3168 : }
3169 :
3170 : /*
3171 : * CheckForSessionAndXactLocks
3172 : * Check to see if transaction holds both session-level and xact-level
3173 : * locks on the same object; if so, throw an error.
3174 : *
3175 : * If we have both session- and transaction-level locks on the same object,
3176 : * PREPARE TRANSACTION must fail. This should never happen with regular
3177 : * locks, since we only take those at session level in some special operations
3178 : * like VACUUM. It's possible to hit this with advisory locks, though.
3179 : *
3180 : * It would be nice if we could keep the session hold and give away the
3181 : * transactional hold to the prepared xact. However, that would require two
3182 : * PROCLOCK objects, and we cannot be sure that another PROCLOCK will be
3183 : * available when it comes time for PostPrepare_Locks to do the deed.
3184 : * So for now, we error out while we can still do so safely.
3185 : *
3186 : * Since the LOCALLOCK table stores a separate entry for each lockmode,
3187 : * we can't implement this check by examining LOCALLOCK entries in isolation.
3188 : * We must build a transient hashtable that is indexed by locktag only.
3189 : */
3190 : static void
3191 734 : CheckForSessionAndXactLocks(void)
3192 : {
3193 : typedef struct
3194 : {
3195 : LOCKTAG lock; /* identifies the lockable object */
3196 : bool sessLock; /* is any lockmode held at session level? */
3197 : bool xactLock; /* is any lockmode held at xact level? */
3198 : } PerLockTagEntry;
3199 :
3200 : HASHCTL hash_ctl;
3201 : HTAB *lockhtab;
3202 : HASH_SEQ_STATUS status;
3203 : LOCALLOCK *locallock;
3204 :
3205 : /* Create a local hash table keyed by LOCKTAG only */
3206 734 : hash_ctl.keysize = sizeof(LOCKTAG);
3207 734 : hash_ctl.entrysize = sizeof(PerLockTagEntry);
3208 734 : hash_ctl.hcxt = CurrentMemoryContext;
3209 :
3210 734 : lockhtab = hash_create("CheckForSessionAndXactLocks table",
3211 : 256, /* arbitrary initial size */
3212 : &hash_ctl,
3213 : HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
3214 :
3215 : /* Scan local lock table to find entries for each LOCKTAG */
3216 734 : hash_seq_init(&status, LockMethodLocalHash);
3217 :
3218 2448 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3219 : {
3220 1718 : LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3221 : PerLockTagEntry *hentry;
3222 : bool found;
3223 : int i;
3224 :
3225 : /*
3226 : * Ignore VXID locks. We don't want those to be held by prepared
3227 : * transactions, since they aren't meaningful after a restart.
3228 : */
3229 1718 : if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3230 0 : continue;
3231 :
3232 : /* Ignore it if we don't actually hold the lock */
3233 1718 : if (locallock->nLocks <= 0)
3234 0 : continue;
3235 :
3236 : /* Otherwise, find or make an entry in lockhtab */
3237 1718 : hentry = (PerLockTagEntry *) hash_search(lockhtab,
3238 1718 : &locallock->tag.lock,
3239 : HASH_ENTER, &found);
3240 1718 : if (!found) /* initialize, if newly created */
3241 1626 : hentry->sessLock = hentry->xactLock = false;
3242 :
3243 : /* Scan to see if we hold lock at session or xact level or both */
3244 3436 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
3245 : {
3246 1718 : if (lockOwners[i].owner == NULL)
3247 18 : hentry->sessLock = true;
3248 : else
3249 1700 : hentry->xactLock = true;
3250 : }
3251 :
3252 : /*
3253 : * We can throw error immediately when we see both types of locks; no
3254 : * need to wait around to see if there are more violations.
3255 : */
3256 1718 : if (hentry->sessLock && hentry->xactLock)
3257 4 : ereport(ERROR,
3258 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3259 : errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3260 : }
3261 :
3262 : /* Success, so clean up */
3263 730 : hash_destroy(lockhtab);
3264 730 : }
3265 :
3266 : /*
3267 : * AtPrepare_Locks
3268 : * Do the preparatory work for a PREPARE: make 2PC state file records
3269 : * for all locks currently held.
3270 : *
3271 : * Session-level locks are ignored, as are VXID locks.
3272 : *
3273 : * For the most part, we don't need to touch shared memory for this ---
3274 : * all the necessary state information is in the locallock table.
3275 : * Fast-path locks are an exception, however: we move any such locks to
3276 : * the main table before allowing PREPARE TRANSACTION to succeed.
3277 : */
3278 : void
3279 734 : AtPrepare_Locks(void)
3280 : {
3281 : HASH_SEQ_STATUS status;
3282 : LOCALLOCK *locallock;
3283 :
3284 : /* First, verify there aren't locks of both xact and session level */
3285 734 : CheckForSessionAndXactLocks();
3286 :
3287 : /* Now do the per-locallock cleanup work */
3288 730 : hash_seq_init(&status, LockMethodLocalHash);
3289 :
3290 2436 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3291 : {
3292 : TwoPhaseLockRecord record;
3293 1706 : LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3294 : bool haveSessionLock;
3295 : bool haveXactLock;
3296 : int i;
3297 :
3298 : /*
3299 : * Ignore VXID locks. We don't want those to be held by prepared
3300 : * transactions, since they aren't meaningful after a restart.
3301 : */
3302 1706 : if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3303 14 : continue;
3304 :
3305 : /* Ignore it if we don't actually hold the lock */
3306 1706 : if (locallock->nLocks <= 0)
3307 0 : continue;
3308 :
3309 : /* Scan to see whether we hold it at session or transaction level */
3310 1706 : haveSessionLock = haveXactLock = false;
3311 3412 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
3312 : {
3313 1706 : if (lockOwners[i].owner == NULL)
3314 14 : haveSessionLock = true;
3315 : else
3316 1692 : haveXactLock = true;
3317 : }
3318 :
3319 : /* Ignore it if we have only session lock */
3320 1706 : if (!haveXactLock)
3321 14 : continue;
3322 :
3323 : /* This can't happen, because we already checked it */
3324 1692 : if (haveSessionLock)
3325 0 : ereport(ERROR,
3326 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3327 : errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3328 :
3329 : /*
3330 : * If the local lock was taken via the fast-path, we need to move it
3331 : * to the primary lock table, or just get a pointer to the existing
3332 : * primary lock table entry if by chance it's already been
3333 : * transferred.
3334 : */
3335 1692 : if (locallock->proclock == NULL)
3336 : {
3337 754 : locallock->proclock = FastPathGetRelationLockEntry(locallock);
3338 754 : locallock->lock = locallock->proclock->tag.myLock;
3339 : }
3340 :
3341 : /*
3342 : * Arrange to not release any strong lock count held by this lock
3343 : * entry. We must retain the count until the prepared transaction is
3344 : * committed or rolled back.
3345 : */
3346 1692 : locallock->holdsStrongLockCount = false;
3347 :
3348 : /*
3349 : * Create a 2PC record.
3350 : */
3351 1692 : memcpy(&(record.locktag), &(locallock->tag.lock), sizeof(LOCKTAG));
3352 1692 : record.lockmode = locallock->tag.mode;
3353 :
3354 1692 : RegisterTwoPhaseRecord(TWOPHASE_RM_LOCK_ID, 0,
3355 : &record, sizeof(TwoPhaseLockRecord));
3356 : }
3357 730 : }
3358 :
3359 : /*
3360 : * PostPrepare_Locks
3361 : * Clean up after successful PREPARE
3362 : *
3363 : * Here, we want to transfer ownership of our locks to a dummy PGPROC
3364 : * that's now associated with the prepared transaction, and we want to
3365 : * clean out the corresponding entries in the LOCALLOCK table.
3366 : *
3367 : * Note: by removing the LOCALLOCK entries, we are leaving dangling
3368 : * pointers in the transaction's resource owner. This is OK at the
3369 : * moment since resowner.c doesn't try to free locks retail at a toplevel
3370 : * transaction commit or abort. We could alternatively zero out nLocks
3371 : * and leave the LOCALLOCK entries to be garbage-collected by LockReleaseAll,
3372 : * but that probably costs more cycles.
3373 : */
3374 : void
3375 730 : PostPrepare_Locks(TransactionId xid)
3376 : {
3377 730 : PGPROC *newproc = TwoPhaseGetDummyProc(xid, false);
3378 : HASH_SEQ_STATUS status;
3379 : LOCALLOCK *locallock;
3380 : LOCK *lock;
3381 : PROCLOCK *proclock;
3382 : PROCLOCKTAG proclocktag;
3383 : int partition;
3384 :
3385 : /* Can't prepare a lock group follower. */
3386 : Assert(MyProc->lockGroupLeader == NULL ||
3387 : MyProc->lockGroupLeader == MyProc);
3388 :
3389 : /* This is a critical section: any error means big trouble */
3390 730 : START_CRIT_SECTION();
3391 :
3392 : /*
3393 : * First we run through the locallock table and get rid of unwanted
3394 : * entries, then we scan the process's proclocks and transfer them to the
3395 : * target proc.
3396 : *
3397 : * We do this separately because we may have multiple locallock entries
3398 : * pointing to the same proclock, and we daren't end up with any dangling
3399 : * pointers.
3400 : */
3401 730 : hash_seq_init(&status, LockMethodLocalHash);
3402 :
3403 2436 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3404 : {
3405 1706 : LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3406 : bool haveSessionLock;
3407 : bool haveXactLock;
3408 : int i;
3409 :
3410 1706 : if (locallock->proclock == NULL || locallock->lock == NULL)
3411 : {
3412 : /*
3413 : * We must've run out of shared memory while trying to set up this
3414 : * lock. Just forget the local entry.
3415 : */
3416 : Assert(locallock->nLocks == 0);
3417 0 : RemoveLocalLock(locallock);
3418 0 : continue;
3419 : }
3420 :
3421 : /* Ignore VXID locks */
3422 1706 : if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3423 0 : continue;
3424 :
3425 : /* Scan to see whether we hold it at session or transaction level */
3426 1706 : haveSessionLock = haveXactLock = false;
3427 3412 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
3428 : {
3429 1706 : if (lockOwners[i].owner == NULL)
3430 14 : haveSessionLock = true;
3431 : else
3432 1692 : haveXactLock = true;
3433 : }
3434 :
3435 : /* Ignore it if we have only session lock */
3436 1706 : if (!haveXactLock)
3437 14 : continue;
3438 :
3439 : /* This can't happen, because we already checked it */
3440 1692 : if (haveSessionLock)
3441 0 : ereport(PANIC,
3442 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3443 : errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3444 :
3445 : /* Mark the proclock to show we need to release this lockmode */
3446 1692 : if (locallock->nLocks > 0)
3447 1692 : locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
3448 :
3449 : /* And remove the locallock hashtable entry */
3450 1692 : RemoveLocalLock(locallock);
3451 : }
3452 :
3453 : /*
3454 : * Now, scan each lock partition separately.
3455 : */
3456 12410 : for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
3457 : {
3458 : LWLock *partitionLock;
3459 11680 : dlist_head *procLocks = &(MyProc->myProcLocks[partition]);
3460 : dlist_mutable_iter proclock_iter;
3461 :
3462 11680 : partitionLock = LockHashPartitionLockByIndex(partition);
3463 :
3464 : /*
3465 : * If the proclock list for this partition is empty, we can skip
3466 : * acquiring the partition lock. This optimization is safer than the
3467 : * situation in LockReleaseAll, because we got rid of any fast-path
3468 : * locks during AtPrepare_Locks, so there cannot be any case where
3469 : * another backend is adding something to our lists now. For safety,
3470 : * though, we code this the same way as in LockReleaseAll.
3471 : */
3472 11680 : if (dlist_is_empty(procLocks))
3473 10062 : continue; /* needn't examine this partition */
3474 :
3475 1618 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3476 :
3477 3320 : dlist_foreach_modify(proclock_iter, procLocks)
3478 : {
3479 1702 : proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
3480 :
3481 : Assert(proclock->tag.myProc == MyProc);
3482 :
3483 1702 : lock = proclock->tag.myLock;
3484 :
3485 : /* Ignore VXID locks */
3486 1702 : if (lock->tag.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3487 84 : continue;
3488 :
3489 : PROCLOCK_PRINT("PostPrepare_Locks", proclock);
3490 : LOCK_PRINT("PostPrepare_Locks", lock, 0);
3491 : Assert(lock->nRequested >= 0);
3492 : Assert(lock->nGranted >= 0);
3493 : Assert(lock->nGranted <= lock->nRequested);
3494 : Assert((proclock->holdMask & ~lock->grantMask) == 0);
3495 :
3496 : /* Ignore it if nothing to release (must be a session lock) */
3497 1618 : if (proclock->releaseMask == 0)
3498 14 : continue;
3499 :
3500 : /* Else we should be releasing all locks */
3501 1604 : if (proclock->releaseMask != proclock->holdMask)
3502 0 : elog(PANIC, "we seem to have dropped a bit somewhere");
3503 :
3504 : /*
3505 : * We cannot simply modify proclock->tag.myProc to reassign
3506 : * ownership of the lock, because that's part of the hash key and
3507 : * the proclock would then be in the wrong hash chain. Instead
3508 : * use hash_update_hash_key. (We used to create a new hash entry,
3509 : * but that risks out-of-memory failure if other processes are
3510 : * busy making proclocks too.) We must unlink the proclock from
3511 : * our procLink chain and put it into the new proc's chain, too.
3512 : *
3513 : * Note: the updated proclock hash key will still belong to the
3514 : * same hash partition, cf proclock_hash(). So the partition lock
3515 : * we already hold is sufficient for this.
3516 : */
3517 1604 : dlist_delete(&proclock->procLink);
3518 :
3519 : /*
3520 : * Create the new hash key for the proclock.
3521 : */
3522 1604 : proclocktag.myLock = lock;
3523 1604 : proclocktag.myProc = newproc;
3524 :
3525 : /*
3526 : * Update groupLeader pointer to point to the new proc. (We'd
3527 : * better not be a member of somebody else's lock group!)
3528 : */
3529 : Assert(proclock->groupLeader == proclock->tag.myProc);
3530 1604 : proclock->groupLeader = newproc;
3531 :
3532 : /*
3533 : * Update the proclock. We should not find any existing entry for
3534 : * the same hash key, since there can be only one entry for any
3535 : * given lock with my own proc.
3536 : */
3537 1604 : if (!hash_update_hash_key(LockMethodProcLockHash,
3538 : proclock,
3539 : &proclocktag))
3540 0 : elog(PANIC, "duplicate entry found while reassigning a prepared transaction's locks");
3541 :
3542 : /* Re-link into the new proc's proclock list */
3543 1604 : dlist_push_tail(&newproc->myProcLocks[partition], &proclock->procLink);
3544 :
3545 : PROCLOCK_PRINT("PostPrepare_Locks: updated", proclock);
3546 : } /* loop over PROCLOCKs within this partition */
3547 :
3548 1618 : LWLockRelease(partitionLock);
3549 : } /* loop over partitions */
3550 :
3551 730 : END_CRIT_SECTION();
3552 730 : }
3553 :
3554 :
3555 : /*
3556 : * Estimate shared-memory space used for lock tables
3557 : */
3558 : Size
3559 5192 : LockShmemSize(void)
3560 : {
3561 5192 : Size size = 0;
3562 : long max_table_size;
3563 :
3564 : /* lock hash table */
3565 5192 : max_table_size = NLOCKENTS();
3566 5192 : size = add_size(size, hash_estimate_size(max_table_size, sizeof(LOCK)));
3567 :
3568 : /* proclock hash table */
3569 5192 : max_table_size *= 2;
3570 5192 : size = add_size(size, hash_estimate_size(max_table_size, sizeof(PROCLOCK)));
3571 :
3572 : /*
3573 : * Since NLOCKENTS is only an estimate, add 10% safety margin.
3574 : */
3575 5192 : size = add_size(size, size / 10);
3576 :
3577 5192 : return size;
3578 : }
3579 :
3580 : /*
3581 : * GetLockStatusData - Return a summary of the lock manager's internal
3582 : * status, for use in a user-level reporting function.
3583 : *
3584 : * The return data consists of an array of LockInstanceData objects,
3585 : * which are a lightly abstracted version of the PROCLOCK data structures,
3586 : * i.e. there is one entry for each unique lock and interested PGPROC.
3587 : * It is the caller's responsibility to match up related items (such as
3588 : * references to the same lockable object or PGPROC) if wanted.
3589 : *
3590 : * The design goal is to hold the LWLocks for as short a time as possible;
3591 : * thus, this function simply makes a copy of the necessary data and releases
3592 : * the locks, allowing the caller to contemplate and format the data for as
3593 : * long as it pleases.
3594 : */
3595 : LockData *
3596 460 : GetLockStatusData(void)
3597 : {
3598 : LockData *data;
3599 : PROCLOCK *proclock;
3600 : HASH_SEQ_STATUS seqstat;
3601 : int els;
3602 : int el;
3603 : int i;
3604 :
3605 460 : data = (LockData *) palloc(sizeof(LockData));
3606 :
3607 : /* Guess how much space we'll need. */
3608 460 : els = MaxBackends;
3609 460 : el = 0;
3610 460 : data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * els);
3611 :
3612 : /*
3613 : * First, we iterate through the per-backend fast-path arrays, locking
3614 : * them one at a time. This might produce an inconsistent picture of the
3615 : * system state, but taking all of those LWLocks at the same time seems
3616 : * impractical (in particular, note MAX_SIMUL_LWLOCKS). It shouldn't
3617 : * matter too much, because none of these locks can be involved in lock
3618 : * conflicts anyway - anything that might must be present in the main lock
3619 : * table. (For the same reason, we don't sweat about making leaderPid
3620 : * completely valid. We cannot safely dereference another backend's
3621 : * lockGroupLeader field without holding all lock partition locks, and
3622 : * it's not worth that.)
3623 : */
3624 47280 : for (i = 0; i < ProcGlobal->allProcCount; ++i)
3625 : {
3626 46820 : PGPROC *proc = &ProcGlobal->allProcs[i];
3627 : uint32 f;
3628 :
3629 46820 : LWLockAcquire(&proc->fpInfoLock, LW_SHARED);
3630 :
3631 795940 : for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; ++f)
3632 : {
3633 : LockInstanceData *instance;
3634 749120 : uint32 lockbits = FAST_PATH_GET_BITS(proc, f);
3635 :
3636 : /* Skip unallocated slots. */
3637 749120 : if (!lockbits)
3638 744156 : continue;
3639 :
3640 4964 : if (el >= els)
3641 : {
3642 4 : els += MaxBackends;
3643 4 : data->locks = (LockInstanceData *)
3644 4 : repalloc(data->locks, sizeof(LockInstanceData) * els);
3645 : }
3646 :
3647 4964 : instance = &data->locks[el];
3648 4964 : SET_LOCKTAG_RELATION(instance->locktag, proc->databaseId,
3649 : proc->fpRelId[f]);
3650 4964 : instance->holdMask = lockbits << FAST_PATH_LOCKNUMBER_OFFSET;
3651 4964 : instance->waitLockMode = NoLock;
3652 4964 : instance->backend = proc->backendId;
3653 4964 : instance->lxid = proc->lxid;
3654 4964 : instance->pid = proc->pid;
3655 4964 : instance->leaderPid = proc->pid;
3656 4964 : instance->fastpath = true;
3657 :
3658 : /*
3659 : * Successfully taking fast path lock means there were no
3660 : * conflicting locks.
3661 : */
3662 4964 : instance->waitStart = 0;
3663 :
3664 4964 : el++;
3665 : }
3666 :
3667 46820 : if (proc->fpVXIDLock)
3668 : {
3669 : VirtualTransactionId vxid;
3670 : LockInstanceData *instance;
3671 :
3672 1452 : if (el >= els)
3673 : {
3674 0 : els += MaxBackends;
3675 0 : data->locks = (LockInstanceData *)
3676 0 : repalloc(data->locks, sizeof(LockInstanceData) * els);
3677 : }
3678 :
3679 1452 : vxid.backendId = proc->backendId;
3680 1452 : vxid.localTransactionId = proc->fpLocalTransactionId;
3681 :
3682 1452 : instance = &data->locks[el];
3683 1452 : SET_LOCKTAG_VIRTUALTRANSACTION(instance->locktag, vxid);
3684 1452 : instance->holdMask = LOCKBIT_ON(ExclusiveLock);
3685 1452 : instance->waitLockMode = NoLock;
3686 1452 : instance->backend = proc->backendId;
3687 1452 : instance->lxid = proc->lxid;
3688 1452 : instance->pid = proc->pid;
3689 1452 : instance->leaderPid = proc->pid;
3690 1452 : instance->fastpath = true;
3691 1452 : instance->waitStart = 0;
3692 :
3693 1452 : el++;
3694 : }
3695 :
3696 46820 : LWLockRelease(&proc->fpInfoLock);
3697 : }
3698 :
3699 : /*
3700 : * Next, acquire lock on the entire shared lock data structure. We do
3701 : * this so that, at least for locks in the primary lock table, the state
3702 : * will be self-consistent.
3703 : *
3704 : * Since this is a read-only operation, we take shared instead of
3705 : * exclusive lock. There's not a whole lot of point to this, because all
3706 : * the normal operations require exclusive lock, but it doesn't hurt
3707 : * anything either. It will at least allow two backends to do
3708 : * GetLockStatusData in parallel.
3709 : *
3710 : * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
3711 : */
3712 7820 : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3713 7360 : LWLockAcquire(LockHashPartitionLockByIndex(i), LW_SHARED);
3714 :
3715 : /* Now we can safely count the number of proclocks */
3716 460 : data->nelements = el + hash_get_num_entries(LockMethodProcLockHash);
3717 460 : if (data->nelements > els)
3718 : {
3719 2 : els = data->nelements;
3720 2 : data->locks = (LockInstanceData *)
3721 2 : repalloc(data->locks, sizeof(LockInstanceData) * els);
3722 : }
3723 :
3724 : /* Now scan the tables to copy the data */
3725 460 : hash_seq_init(&seqstat, LockMethodProcLockHash);
3726 :
3727 3094 : while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
3728 : {
3729 2634 : PGPROC *proc = proclock->tag.myProc;
3730 2634 : LOCK *lock = proclock->tag.myLock;
3731 2634 : LockInstanceData *instance = &data->locks[el];
3732 :
3733 2634 : memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
3734 2634 : instance->holdMask = proclock->holdMask;
3735 2634 : if (proc->waitLock == proclock->tag.myLock)
3736 16 : instance->waitLockMode = proc->waitLockMode;
3737 : else
3738 2618 : instance->waitLockMode = NoLock;
3739 2634 : instance->backend = proc->backendId;
3740 2634 : instance->lxid = proc->lxid;
3741 2634 : instance->pid = proc->pid;
3742 2634 : instance->leaderPid = proclock->groupLeader->pid;
3743 2634 : instance->fastpath = false;
3744 2634 : instance->waitStart = (TimestampTz) pg_atomic_read_u64(&proc->waitStart);
3745 :
3746 2634 : el++;
3747 : }
3748 :
3749 : /*
3750 : * And release locks. We do this in reverse order for two reasons: (1)
3751 : * Anyone else who needs more than one of the locks will be trying to lock
3752 : * them in increasing order; we don't want to release the other process
3753 : * until it can get all the locks it needs. (2) This avoids O(N^2)
3754 : * behavior inside LWLockRelease.
3755 : */
3756 7820 : for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3757 7360 : LWLockRelease(LockHashPartitionLockByIndex(i));
3758 :
3759 : Assert(el == data->nelements);
3760 :
3761 460 : return data;
3762 : }
3763 :
3764 : /*
3765 : * GetBlockerStatusData - Return a summary of the lock manager's state
3766 : * concerning locks that are blocking the specified PID or any member of
3767 : * the PID's lock group, for use in a user-level reporting function.
3768 : *
3769 : * For each PID within the lock group that is awaiting some heavyweight lock,
3770 : * the return data includes an array of LockInstanceData objects, which are
3771 : * the same data structure used by GetLockStatusData; but unlike that function,
3772 : * this one reports only the PROCLOCKs associated with the lock that that PID
3773 : * is blocked on. (Hence, all the locktags should be the same for any one
3774 : * blocked PID.) In addition, we return an array of the PIDs of those backends
3775 : * that are ahead of the blocked PID in the lock's wait queue. These can be
3776 : * compared with the PIDs in the LockInstanceData objects to determine which
3777 : * waiters are ahead of or behind the blocked PID in the queue.
3778 : *
3779 : * If blocked_pid isn't a valid backend PID or nothing in its lock group is
3780 : * waiting on any heavyweight lock, return empty arrays.
3781 : *
3782 : * The design goal is to hold the LWLocks for as short a time as possible;
3783 : * thus, this function simply makes a copy of the necessary data and releases
3784 : * the locks, allowing the caller to contemplate and format the data for as
3785 : * long as it pleases.
3786 : */
3787 : BlockedProcsData *
3788 6282 : GetBlockerStatusData(int blocked_pid)
3789 : {
3790 : BlockedProcsData *data;
3791 : PGPROC *proc;
3792 : int i;
3793 :
3794 6282 : data = (BlockedProcsData *) palloc(sizeof(BlockedProcsData));
3795 :
3796 : /*
3797 : * Guess how much space we'll need, and preallocate. Most of the time
3798 : * this will avoid needing to do repalloc while holding the LWLocks. (We
3799 : * assume, but check with an Assert, that MaxBackends is enough entries
3800 : * for the procs[] array; the other two could need enlargement, though.)
3801 : */
3802 6282 : data->nprocs = data->nlocks = data->npids = 0;
3803 6282 : data->maxprocs = data->maxlocks = data->maxpids = MaxBackends;
3804 6282 : data->procs = (BlockedProcData *) palloc(sizeof(BlockedProcData) * data->maxprocs);
3805 6282 : data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * data->maxlocks);
3806 6282 : data->waiter_pids = (int *) palloc(sizeof(int) * data->maxpids);
3807 :
3808 : /*
3809 : * In order to search the ProcArray for blocked_pid and assume that that
3810 : * entry won't immediately disappear under us, we must hold ProcArrayLock.
3811 : * In addition, to examine the lock grouping fields of any other backend,
3812 : * we must hold all the hash partition locks. (Only one of those locks is
3813 : * actually relevant for any one lock group, but we can't know which one
3814 : * ahead of time.) It's fairly annoying to hold all those locks
3815 : * throughout this, but it's no worse than GetLockStatusData(), and it
3816 : * does have the advantage that we're guaranteed to return a
3817 : * self-consistent instantaneous state.
3818 : */
3819 6282 : LWLockAcquire(ProcArrayLock, LW_SHARED);
3820 :
3821 6282 : proc = BackendPidGetProcWithLock(blocked_pid);
3822 :
3823 : /* Nothing to do if it's gone */
3824 6282 : if (proc != NULL)
3825 : {
3826 : /*
3827 : * Acquire lock on the entire shared lock data structure. See notes
3828 : * in GetLockStatusData().
3829 : */
3830 106794 : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3831 100512 : LWLockAcquire(LockHashPartitionLockByIndex(i), LW_SHARED);
3832 :
3833 6282 : if (proc->lockGroupLeader == NULL)
3834 : {
3835 : /* Easy case, proc is not a lock group member */
3836 6064 : GetSingleProcBlockerStatusData(proc, data);
3837 : }
3838 : else
3839 : {
3840 : /* Examine all procs in proc's lock group */
3841 : dlist_iter iter;
3842 :
3843 514 : dlist_foreach(iter, &proc->lockGroupLeader->lockGroupMembers)
3844 : {
3845 : PGPROC *memberProc;
3846 :
3847 296 : memberProc = dlist_container(PGPROC, lockGroupLink, iter.cur);
3848 296 : GetSingleProcBlockerStatusData(memberProc, data);
3849 : }
3850 : }
3851 :
3852 : /*
3853 : * And release locks. See notes in GetLockStatusData().
3854 : */
3855 106794 : for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3856 100512 : LWLockRelease(LockHashPartitionLockByIndex(i));
3857 :
3858 : Assert(data->nprocs <= data->maxprocs);
3859 : }
3860 :
3861 6282 : LWLockRelease(ProcArrayLock);
3862 :
3863 6282 : return data;
3864 : }
3865 :
3866 : /* Accumulate data about one possibly-blocked proc for GetBlockerStatusData */
3867 : static void
3868 6360 : GetSingleProcBlockerStatusData(PGPROC *blocked_proc, BlockedProcsData *data)
3869 : {
3870 6360 : LOCK *theLock = blocked_proc->waitLock;
3871 : BlockedProcData *bproc;
3872 : dlist_iter proclock_iter;
3873 : dlist_iter proc_iter;
3874 : dclist_head *waitQueue;
3875 : int queue_size;
3876 :
3877 : /* Nothing to do if this proc is not blocked */
3878 6360 : if (theLock == NULL)
3879 4182 : return;
3880 :
3881 : /* Set up a procs[] element */
3882 2178 : bproc = &data->procs[data->nprocs++];
3883 2178 : bproc->pid = blocked_proc->pid;
3884 2178 : bproc->first_lock = data->nlocks;
3885 2178 : bproc->first_waiter = data->npids;
3886 :
3887 : /*
3888 : * We may ignore the proc's fast-path arrays, since nothing in those could
3889 : * be related to a contended lock.
3890 : */
3891 :
3892 : /* Collect all PROCLOCKs associated with theLock */
3893 6624 : dlist_foreach(proclock_iter, &theLock->procLocks)
3894 : {
3895 4446 : PROCLOCK *proclock =
3896 4446 : dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
3897 4446 : PGPROC *proc = proclock->tag.myProc;
3898 4446 : LOCK *lock = proclock->tag.myLock;
3899 : LockInstanceData *instance;
3900 :
3901 4446 : if (data->nlocks >= data->maxlocks)
3902 : {
3903 0 : data->maxlocks += MaxBackends;
3904 0 : data->locks = (LockInstanceData *)
3905 0 : repalloc(data->locks, sizeof(LockInstanceData) * data->maxlocks);
3906 : }
3907 :
3908 4446 : instance = &data->locks[data->nlocks];
3909 4446 : memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
3910 4446 : instance->holdMask = proclock->holdMask;
3911 4446 : if (proc->waitLock == lock)
3912 2252 : instance->waitLockMode = proc->waitLockMode;
3913 : else
3914 2194 : instance->waitLockMode = NoLock;
3915 4446 : instance->backend = proc->backendId;
3916 4446 : instance->lxid = proc->lxid;
3917 4446 : instance->pid = proc->pid;
3918 4446 : instance->leaderPid = proclock->groupLeader->pid;
3919 4446 : instance->fastpath = false;
3920 4446 : data->nlocks++;
3921 : }
3922 :
3923 : /* Enlarge waiter_pids[] if it's too small to hold all wait queue PIDs */
3924 2178 : waitQueue = &(theLock->waitProcs);
3925 2178 : queue_size = dclist_count(waitQueue);
3926 :
3927 2178 : if (queue_size > data->maxpids - data->npids)
3928 : {
3929 0 : data->maxpids = Max(data->maxpids + MaxBackends,
3930 : data->npids + queue_size);
3931 0 : data->waiter_pids = (int *) repalloc(data->waiter_pids,
3932 0 : sizeof(int) * data->maxpids);
3933 : }
3934 :
3935 : /* Collect PIDs from the lock's wait queue, stopping at blocked_proc */
3936 2214 : dclist_foreach(proc_iter, waitQueue)
3937 : {
3938 2214 : PGPROC *queued_proc = dlist_container(PGPROC, links, proc_iter.cur);
3939 2214 : if (queued_proc == blocked_proc)
3940 2178 : break;
3941 36 : data->waiter_pids[data->npids++] = queued_proc->pid;
3942 36 : queued_proc = (PGPROC *) queued_proc->links.next;
3943 : }
3944 :
3945 2178 : bproc->num_locks = data->nlocks - bproc->first_lock;
3946 2178 : bproc->num_waiters = data->npids - bproc->first_waiter;
3947 : }
3948 :
3949 : /*
3950 : * Returns a list of currently held AccessExclusiveLocks, for use by
3951 : * LogStandbySnapshot(). The result is a palloc'd array,
3952 : * with the number of elements returned into *nlocks.
3953 : *
3954 : * XXX This currently takes a lock on all partitions of the lock table,
3955 : * but it's possible to do better. By reference counting locks and storing
3956 : * the value in the ProcArray entry for each backend we could tell if any
3957 : * locks need recording without having to acquire the partition locks and
3958 : * scan the lock table. Whether that's worth the additional overhead
3959 : * is pretty dubious though.
3960 : */
3961 : xl_standby_lock *
3962 3266 : GetRunningTransactionLocks(int *nlocks)
3963 : {
3964 : xl_standby_lock *accessExclusiveLocks;
3965 : PROCLOCK *proclock;
3966 : HASH_SEQ_STATUS seqstat;
3967 : int i;
3968 : int index;
3969 : int els;
3970 :
3971 : /*
3972 : * Acquire lock on the entire shared lock data structure.
3973 : *
3974 : * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
3975 : */
3976 55522 : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3977 52256 : LWLockAcquire(LockHashPartitionLockByIndex(i), LW_SHARED);
3978 :
3979 : /* Now we can safely count the number of proclocks */
3980 3266 : els = hash_get_num_entries(LockMethodProcLockHash);
3981 :
3982 : /*
3983 : * Allocating enough space for all locks in the lock table is overkill,
3984 : * but it's more convenient and faster than having to enlarge the array.
3985 : */
3986 3266 : accessExclusiveLocks = palloc(els * sizeof(xl_standby_lock));
3987 :
3988 : /* Now scan the tables to copy the data */
3989 3266 : hash_seq_init(&seqstat, LockMethodProcLockHash);
3990 :
3991 : /*
3992 : * If lock is a currently granted AccessExclusiveLock then it will have
3993 : * just one proclock holder, so locks are never accessed twice in this
3994 : * particular case. Don't copy this code for use elsewhere because in the
3995 : * general case this will give you duplicate locks when looking at
3996 : * non-exclusive lock types.
3997 : */
3998 3266 : index = 0;
3999 11116 : while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
4000 : {
4001 : /* make sure this definition matches the one used in LockAcquire */
4002 7850 : if ((proclock->holdMask & LOCKBIT_ON(AccessExclusiveLock)) &&
4003 224 : proclock->tag.myLock->tag.locktag_type == LOCKTAG_RELATION)
4004 : {
4005 162 : PGPROC *proc = proclock->tag.myProc;
4006 162 : LOCK *lock = proclock->tag.myLock;
4007 162 : TransactionId xid = proc->xid;
4008 :
4009 : /*
4010 : * Don't record locks for transactions if we know they have
4011 : * already issued their WAL record for commit but not yet released
4012 : * lock. It is still possible that we see locks held by already
4013 : * complete transactions, if they haven't yet zeroed their xids.
4014 : */
4015 162 : if (!TransactionIdIsValid(xid))
4016 4 : continue;
4017 :
4018 158 : accessExclusiveLocks[index].xid = xid;
4019 158 : accessExclusiveLocks[index].dbOid = lock->tag.locktag_field1;
4020 158 : accessExclusiveLocks[index].relOid = lock->tag.locktag_field2;
4021 :
4022 158 : index++;
4023 : }
4024 : }
4025 :
4026 : Assert(index <= els);
4027 :
4028 : /*
4029 : * And release locks. We do this in reverse order for two reasons: (1)
4030 : * Anyone else who needs more than one of the locks will be trying to lock
4031 : * them in increasing order; we don't want to release the other process
4032 : * until it can get all the locks it needs. (2) This avoids O(N^2)
4033 : * behavior inside LWLockRelease.
4034 : */
4035 55522 : for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
4036 52256 : LWLockRelease(LockHashPartitionLockByIndex(i));
4037 :
4038 3266 : *nlocks = index;
4039 3266 : return accessExclusiveLocks;
4040 : }
4041 :
4042 : /* Provide the textual name of any lock mode */
4043 : const char *
4044 9374 : GetLockmodeName(LOCKMETHODID lockmethodid, LOCKMODE mode)
4045 : {
4046 : Assert(lockmethodid > 0 && lockmethodid < lengthof(LockMethods));
4047 : Assert(mode > 0 && mode <= LockMethods[lockmethodid]->numLockModes);
4048 9374 : return LockMethods[lockmethodid]->lockModeNames[mode];
4049 : }
4050 :
4051 : #ifdef LOCK_DEBUG
4052 : /*
4053 : * Dump all locks in the given proc's myProcLocks lists.
4054 : *
4055 : * Caller is responsible for having acquired appropriate LWLocks.
4056 : */
4057 : void
4058 : DumpLocks(PGPROC *proc)
4059 : {
4060 : int i;
4061 :
4062 : if (proc == NULL)
4063 : return;
4064 :
4065 : if (proc->waitLock)
4066 : LOCK_PRINT("DumpLocks: waiting on", proc->waitLock, 0);
4067 :
4068 : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4069 : {
4070 : dlist_head *procLocks = &proc->myProcLocks[i];
4071 : dlist_iter iter;
4072 :
4073 : dlist_foreach(iter, procLocks)
4074 : {
4075 : PROCLOCK *proclock = dlist_container(PROCLOCK, procLink, iter.cur);
4076 : LOCK *lock = proclock->tag.myLock;
4077 :
4078 : Assert(proclock->tag.myProc == proc);
4079 : PROCLOCK_PRINT("DumpLocks", proclock);
4080 : LOCK_PRINT("DumpLocks", lock, 0);
4081 : }
4082 : }
4083 : }
4084 :
4085 : /*
4086 : * Dump all lmgr locks.
4087 : *
4088 : * Caller is responsible for having acquired appropriate LWLocks.
4089 : */
4090 : void
4091 : DumpAllLocks(void)
4092 : {
4093 : PGPROC *proc;
4094 : PROCLOCK *proclock;
4095 : LOCK *lock;
4096 : HASH_SEQ_STATUS status;
4097 :
4098 : proc = MyProc;
4099 :
4100 : if (proc && proc->waitLock)
4101 : LOCK_PRINT("DumpAllLocks: waiting on", proc->waitLock, 0);
4102 :
4103 : hash_seq_init(&status, LockMethodProcLockHash);
4104 :
4105 : while ((proclock = (PROCLOCK *) hash_seq_search(&status)) != NULL)
4106 : {
4107 : PROCLOCK_PRINT("DumpAllLocks", proclock);
4108 :
4109 : lock = proclock->tag.myLock;
4110 : if (lock)
4111 : LOCK_PRINT("DumpAllLocks", lock, 0);
4112 : else
4113 : elog(LOG, "DumpAllLocks: proclock->tag.myLock = NULL");
4114 : }
4115 : }
4116 : #endif /* LOCK_DEBUG */
4117 :
4118 : /*
4119 : * LOCK 2PC resource manager's routines
4120 : */
4121 :
4122 : /*
4123 : * Re-acquire a lock belonging to a transaction that was prepared.
4124 : *
4125 : * Because this function is run at db startup, re-acquiring the locks should
4126 : * never conflict with running transactions because there are none. We
4127 : * assume that the lock state represented by the stored 2PC files is legal.
4128 : *
4129 : * When switching from Hot Standby mode to normal operation, the locks will
4130 : * be already held by the startup process. The locks are acquired for the new
4131 : * procs without checking for conflicts, so we don't get a conflict between the
4132 : * startup process and the dummy procs, even though we will momentarily have
4133 : * a situation where two procs are holding the same AccessExclusiveLock,
4134 : * which isn't normally possible because the conflict. If we're in standby
4135 : * mode, but a recovery snapshot hasn't been established yet, it's possible
4136 : * that some but not all of the locks are already held by the startup process.
4137 : *
4138 : * This approach is simple, but also a bit dangerous, because if there isn't
4139 : * enough shared memory to acquire the locks, an error will be thrown, which
4140 : * is promoted to FATAL and recovery will abort, bringing down postmaster.
4141 : * A safer approach would be to transfer the locks like we do in
4142 : * AtPrepare_Locks, but then again, in hot standby mode it's possible for
4143 : * read-only backends to use up all the shared lock memory anyway, so that
4144 : * replaying the WAL record that needs to acquire a lock will throw an error
4145 : * and PANIC anyway.
4146 : */
4147 : void
4148 158 : lock_twophase_recover(TransactionId xid, uint16 info,
4149 : void *recdata, uint32 len)
4150 : {
4151 158 : TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4152 158 : PGPROC *proc = TwoPhaseGetDummyProc(xid, false);
4153 : LOCKTAG *locktag;
4154 : LOCKMODE lockmode;
4155 : LOCKMETHODID lockmethodid;
4156 : LOCK *lock;
4157 : PROCLOCK *proclock;
4158 : PROCLOCKTAG proclocktag;
4159 : bool found;
4160 : uint32 hashcode;
4161 : uint32 proclock_hashcode;
4162 : int partition;
4163 : LWLock *partitionLock;
4164 : LockMethod lockMethodTable;
4165 :
4166 : Assert(len == sizeof(TwoPhaseLockRecord));
4167 158 : locktag = &rec->locktag;
4168 158 : lockmode = rec->lockmode;
4169 158 : lockmethodid = locktag->locktag_lockmethodid;
4170 :
4171 158 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4172 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4173 158 : lockMethodTable = LockMethods[lockmethodid];
4174 :
4175 158 : hashcode = LockTagHashCode(locktag);
4176 158 : partition = LockHashPartition(hashcode);
4177 158 : partitionLock = LockHashPartitionLock(hashcode);
4178 :
4179 158 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4180 :
4181 : /*
4182 : * Find or create a lock with this tag.
4183 : */
4184 158 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
4185 : locktag,
4186 : hashcode,
4187 : HASH_ENTER_NULL,
4188 : &found);
4189 158 : if (!lock)
4190 : {
4191 0 : LWLockRelease(partitionLock);
4192 0 : ereport(ERROR,
4193 : (errcode(ERRCODE_OUT_OF_MEMORY),
4194 : errmsg("out of shared memory"),
4195 : errhint("You might need to increase max_locks_per_transaction.")));
4196 : }
4197 :
4198 : /*
4199 : * if it's a new lock object, initialize it
4200 : */
4201 158 : if (!found)
4202 : {
4203 142 : lock->grantMask = 0;
4204 142 : lock->waitMask = 0;
4205 142 : dlist_init(&lock->procLocks);
4206 142 : dclist_init(&lock->waitProcs);
4207 142 : lock->nRequested = 0;
4208 142 : lock->nGranted = 0;
4209 852 : MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
4210 142 : MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
4211 : LOCK_PRINT("lock_twophase_recover: new", lock, lockmode);
4212 : }
4213 : else
4214 : {
4215 : LOCK_PRINT("lock_twophase_recover: found", lock, lockmode);
4216 : Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
4217 : Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
4218 : Assert(lock->nGranted <= lock->nRequested);
4219 : }
4220 :
4221 : /*
4222 : * Create the hash key for the proclock table.
4223 : */
4224 158 : proclocktag.myLock = lock;
4225 158 : proclocktag.myProc = proc;
4226 :
4227 158 : proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
4228 :
4229 : /*
4230 : * Find or create a proclock entry with this tag
4231 : */
4232 158 : proclock = (PROCLOCK *) hash_search_with_hash_value(LockMethodProcLockHash,
4233 : &proclocktag,
4234 : proclock_hashcode,
4235 : HASH_ENTER_NULL,
4236 : &found);
4237 158 : if (!proclock)
4238 : {
4239 : /* Oops, not enough shmem for the proclock */
4240 0 : if (lock->nRequested == 0)
4241 : {
4242 : /*
4243 : * There are no other requestors of this lock, so garbage-collect
4244 : * the lock object. We *must* do this to avoid a permanent leak
4245 : * of shared memory, because there won't be anything to cause
4246 : * anyone to release the lock object later.
4247 : */
4248 : Assert(dlist_is_empty(&lock->procLocks));
4249 0 : if (!hash_search_with_hash_value(LockMethodLockHash,
4250 0 : &(lock->tag),
4251 : hashcode,
4252 : HASH_REMOVE,
4253 : NULL))
4254 0 : elog(PANIC, "lock table corrupted");
4255 : }
4256 0 : LWLockRelease(partitionLock);
4257 0 : ereport(ERROR,
4258 : (errcode(ERRCODE_OUT_OF_MEMORY),
4259 : errmsg("out of shared memory"),
4260 : errhint("You might need to increase max_locks_per_transaction.")));
4261 : }
4262 :
4263 : /*
4264 : * If new, initialize the new entry
4265 : */
4266 158 : if (!found)
4267 : {
4268 : Assert(proc->lockGroupLeader == NULL);
4269 150 : proclock->groupLeader = proc;
4270 150 : proclock->holdMask = 0;
4271 150 : proclock->releaseMask = 0;
4272 : /* Add proclock to appropriate lists */
4273 150 : dlist_push_tail(&lock->procLocks, &proclock->lockLink);
4274 150 : dlist_push_tail(&proc->myProcLocks[partition],
4275 : &proclock->procLink);
4276 : PROCLOCK_PRINT("lock_twophase_recover: new", proclock);
4277 : }
4278 : else
4279 : {
4280 : PROCLOCK_PRINT("lock_twophase_recover: found", proclock);
4281 : Assert((proclock->holdMask & ~lock->grantMask) == 0);
4282 : }
4283 :
4284 : /*
4285 : * lock->nRequested and lock->requested[] count the total number of
4286 : * requests, whether granted or waiting, so increment those immediately.
4287 : */
4288 158 : lock->nRequested++;
4289 158 : lock->requested[lockmode]++;
4290 : Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
4291 :
4292 : /*
4293 : * We shouldn't already hold the desired lock.
4294 : */
4295 158 : if (proclock->holdMask & LOCKBIT_ON(lockmode))
4296 0 : elog(ERROR, "lock %s on object %u/%u/%u is already held",
4297 : lockMethodTable->lockModeNames[lockmode],
4298 : lock->tag.locktag_field1, lock->tag.locktag_field2,
4299 : lock->tag.locktag_field3);
4300 :
4301 : /*
4302 : * We ignore any possible conflicts and just grant ourselves the lock. Not
4303 : * only because we don't bother, but also to avoid deadlocks when
4304 : * switching from standby to normal mode. See function comment.
4305 : */
4306 158 : GrantLock(lock, proclock, lockmode);
4307 :
4308 : /*
4309 : * Bump strong lock count, to make sure any fast-path lock requests won't
4310 : * be granted without consulting the primary lock table.
4311 : */
4312 158 : if (ConflictsWithRelationFastPath(&lock->tag, lockmode))
4313 : {
4314 26 : uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
4315 :
4316 26 : SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
4317 26 : FastPathStrongRelationLocks->count[fasthashcode]++;
4318 26 : SpinLockRelease(&FastPathStrongRelationLocks->mutex);
4319 : }
4320 :
4321 158 : LWLockRelease(partitionLock);
4322 158 : }
4323 :
4324 : /*
4325 : * Re-acquire a lock belonging to a transaction that was prepared, when
4326 : * starting up into hot standby mode.
4327 : */
4328 : void
4329 0 : lock_twophase_standby_recover(TransactionId xid, uint16 info,
4330 : void *recdata, uint32 len)
4331 : {
4332 0 : TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4333 : LOCKTAG *locktag;
4334 : LOCKMODE lockmode;
4335 : LOCKMETHODID lockmethodid;
4336 :
4337 : Assert(len == sizeof(TwoPhaseLockRecord));
4338 0 : locktag = &rec->locktag;
4339 0 : lockmode = rec->lockmode;
4340 0 : lockmethodid = locktag->locktag_lockmethodid;
4341 :
4342 0 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4343 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4344 :
4345 0 : if (lockmode == AccessExclusiveLock &&
4346 0 : locktag->locktag_type == LOCKTAG_RELATION)
4347 : {
4348 0 : StandbyAcquireAccessExclusiveLock(xid,
4349 : locktag->locktag_field1 /* dboid */ ,
4350 : locktag->locktag_field2 /* reloid */ );
4351 : }
4352 0 : }
4353 :
4354 :
4355 : /*
4356 : * 2PC processing routine for COMMIT PREPARED case.
4357 : *
4358 : * Find and release the lock indicated by the 2PC record.
4359 : */
4360 : void
4361 1706 : lock_twophase_postcommit(TransactionId xid, uint16 info,
4362 : void *recdata, uint32 len)
4363 : {
4364 1706 : TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4365 1706 : PGPROC *proc = TwoPhaseGetDummyProc(xid, true);
4366 : LOCKTAG *locktag;
4367 : LOCKMETHODID lockmethodid;
4368 : LockMethod lockMethodTable;
4369 :
4370 : Assert(len == sizeof(TwoPhaseLockRecord));
4371 1706 : locktag = &rec->locktag;
4372 1706 : lockmethodid = locktag->locktag_lockmethodid;
4373 :
4374 1706 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4375 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4376 1706 : lockMethodTable = LockMethods[lockmethodid];
4377 :
4378 1706 : LockRefindAndRelease(lockMethodTable, proc, locktag, rec->lockmode, true);
4379 1706 : }
4380 :
4381 : /*
4382 : * 2PC processing routine for ROLLBACK PREPARED case.
4383 : *
4384 : * This is actually just the same as the COMMIT case.
4385 : */
4386 : void
4387 238 : lock_twophase_postabort(TransactionId xid, uint16 info,
4388 : void *recdata, uint32 len)
4389 : {
4390 238 : lock_twophase_postcommit(xid, info, recdata, len);
4391 238 : }
4392 :
4393 : /*
4394 : * VirtualXactLockTableInsert
4395 : *
4396 : * Take vxid lock via the fast-path. There can't be any pre-existing
4397 : * lockers, as we haven't advertised this vxid via the ProcArray yet.
4398 : *
4399 : * Since MyProc->fpLocalTransactionId will normally contain the same data
4400 : * as MyProc->lxid, you might wonder if we really need both. The
4401 : * difference is that MyProc->lxid is set and cleared unlocked, and
4402 : * examined by procarray.c, while fpLocalTransactionId is protected by
4403 : * fpInfoLock and is used only by the locking subsystem. Doing it this
4404 : * way makes it easier to verify that there are no funny race conditions.
4405 : *
4406 : * We don't bother recording this lock in the local lock table, since it's
4407 : * only ever released at the end of a transaction. Instead,
4408 : * LockReleaseAll() calls VirtualXactLockTableCleanup().
4409 : */
4410 : void
4411 941054 : VirtualXactLockTableInsert(VirtualTransactionId vxid)
4412 : {
4413 : Assert(VirtualTransactionIdIsValid(vxid));
4414 :
4415 941054 : LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
4416 :
4417 : Assert(MyProc->backendId == vxid.backendId);
4418 : Assert(MyProc->fpLocalTransactionId == InvalidLocalTransactionId);
4419 : Assert(MyProc->fpVXIDLock == false);
4420 :
4421 941054 : MyProc->fpVXIDLock = true;
4422 941054 : MyProc->fpLocalTransactionId = vxid.localTransactionId;
4423 :
4424 941054 : LWLockRelease(&MyProc->fpInfoLock);
4425 941054 : }
4426 :
4427 : /*
4428 : * VirtualXactLockTableCleanup
4429 : *
4430 : * Check whether a VXID lock has been materialized; if so, release it,
4431 : * unblocking waiters.
4432 : */
4433 : void
4434 941674 : VirtualXactLockTableCleanup(void)
4435 : {
4436 : bool fastpath;
4437 : LocalTransactionId lxid;
4438 :
4439 : Assert(MyProc->backendId != InvalidBackendId);
4440 :
4441 : /*
4442 : * Clean up shared memory state.
4443 : */
4444 941674 : LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
4445 :
4446 941674 : fastpath = MyProc->fpVXIDLock;
4447 941674 : lxid = MyProc->fpLocalTransactionId;
4448 941674 : MyProc->fpVXIDLock = false;
4449 941674 : MyProc->fpLocalTransactionId = InvalidLocalTransactionId;
4450 :
4451 941674 : LWLockRelease(&MyProc->fpInfoLock);
4452 :
4453 : /*
4454 : * If fpVXIDLock has been cleared without touching fpLocalTransactionId,
4455 : * that means someone transferred the lock to the main lock table.
4456 : */
4457 941674 : if (!fastpath && LocalTransactionIdIsValid(lxid))
4458 : {
4459 : VirtualTransactionId vxid;
4460 : LOCKTAG locktag;
4461 :
4462 496 : vxid.backendId = MyBackendId;
4463 496 : vxid.localTransactionId = lxid;
4464 496 : SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid);
4465 :
4466 496 : LockRefindAndRelease(LockMethods[DEFAULT_LOCKMETHOD], MyProc,
4467 : &locktag, ExclusiveLock, false);
4468 : }
4469 941674 : }
4470 :
4471 : /*
4472 : * XactLockForVirtualXact
4473 : *
4474 : * If TransactionIdIsValid(xid), this is essentially XactLockTableWait(xid,
4475 : * NULL, NULL, XLTW_None) or ConditionalXactLockTableWait(xid). Unlike those
4476 : * functions, it assumes "xid" is never a subtransaction and that "xid" is
4477 : * prepared, committed, or aborted.
4478 : *
4479 : * If !TransactionIdIsValid(xid), this locks every prepared XID having been
4480 : * known as "vxid" before its PREPARE TRANSACTION.
4481 : */
4482 : static bool
4483 520 : XactLockForVirtualXact(VirtualTransactionId vxid,
4484 : TransactionId xid, bool wait)
4485 : {
4486 520 : bool more = false;
4487 :
4488 : /* There is no point to wait for 2PCs if you have no 2PCs. */
4489 520 : if (max_prepared_xacts == 0)
4490 116 : return true;
4491 :
4492 : do
4493 : {
4494 : LockAcquireResult lar;
4495 : LOCKTAG tag;
4496 :
4497 : /* Clear state from previous iterations. */
4498 404 : if (more)
4499 : {
4500 0 : xid = InvalidTransactionId;
4501 0 : more = false;
4502 : }
4503 :
4504 : /* If we have no xid, try to find one. */
4505 404 : if (!TransactionIdIsValid(xid))
4506 202 : xid = TwoPhaseGetXidByVirtualXID(vxid, &more);
4507 404 : if (!TransactionIdIsValid(xid))
4508 : {
4509 : Assert(!more);
4510 176 : return true;
4511 : }
4512 :
4513 : /* Check or wait for XID completion. */
4514 228 : SET_LOCKTAG_TRANSACTION(tag, xid);
4515 228 : lar = LockAcquire(&tag, ShareLock, false, !wait);
4516 228 : if (lar == LOCKACQUIRE_NOT_AVAIL)
4517 0 : return false;
4518 228 : LockRelease(&tag, ShareLock, false);
4519 228 : } while (more);
4520 :
4521 228 : return true;
4522 : }
4523 :
4524 : /*
4525 : * VirtualXactLock
4526 : *
4527 : * If wait = true, wait as long as the given VXID or any XID acquired by the
4528 : * same transaction is still running. Then, return true.
4529 : *
4530 : * If wait = false, just check whether that VXID or one of those XIDs is still
4531 : * running, and return true or false.
4532 : */
4533 : bool
4534 600 : VirtualXactLock(VirtualTransactionId vxid, bool wait)
4535 : {
4536 : LOCKTAG tag;
4537 : PGPROC *proc;
4538 600 : TransactionId xid = InvalidTransactionId;
4539 :
4540 : Assert(VirtualTransactionIdIsValid(vxid));
4541 :
4542 600 : if (VirtualTransactionIdIsRecoveredPreparedXact(vxid))
4543 : /* no vxid lock; localTransactionId is a normal, locked XID */
4544 2 : return XactLockForVirtualXact(vxid, vxid.localTransactionId, wait);
4545 :
4546 598 : SET_LOCKTAG_VIRTUALTRANSACTION(tag, vxid);
4547 :
4548 : /*
4549 : * If a lock table entry must be made, this is the PGPROC on whose behalf
4550 : * it must be done. Note that the transaction might end or the PGPROC
4551 : * might be reassigned to a new backend before we get around to examining
4552 : * it, but it doesn't matter. If we find upon examination that the
4553 : * relevant lxid is no longer running here, that's enough to prove that
4554 : * it's no longer running anywhere.
4555 : */
4556 598 : proc = BackendIdGetProc(vxid.backendId);
4557 598 : if (proc == NULL)
4558 6 : return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4559 :
4560 : /*
4561 : * We must acquire this lock before checking the backendId and lxid
4562 : * against the ones we're waiting for. The target backend will only set
4563 : * or clear lxid while holding this lock.
4564 : */
4565 592 : LWLockAcquire(&proc->fpInfoLock, LW_EXCLUSIVE);
4566 :
4567 592 : if (proc->backendId != vxid.backendId
4568 592 : || proc->fpLocalTransactionId != vxid.localTransactionId)
4569 : {
4570 : /* VXID ended */
4571 54 : LWLockRelease(&proc->fpInfoLock);
4572 54 : return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4573 : }
4574 :
4575 : /*
4576 : * If we aren't asked to wait, there's no need to set up a lock table
4577 : * entry. The transaction is still in progress, so just return false.
4578 : */
4579 538 : if (!wait)
4580 : {
4581 30 : LWLockRelease(&proc->fpInfoLock);
4582 30 : return false;
4583 : }
4584 :
4585 : /*
4586 : * OK, we're going to need to sleep on the VXID. But first, we must set
4587 : * up the primary lock table entry, if needed (ie, convert the proc's
4588 : * fast-path lock on its VXID to a regular lock).
4589 : */
4590 508 : if (proc->fpVXIDLock)
4591 : {
4592 : PROCLOCK *proclock;
4593 : uint32 hashcode;
4594 : LWLock *partitionLock;
4595 :
4596 496 : hashcode = LockTagHashCode(&tag);
4597 :
4598 496 : partitionLock = LockHashPartitionLock(hashcode);
4599 496 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4600 :
4601 496 : proclock = SetupLockInTable(LockMethods[DEFAULT_LOCKMETHOD], proc,
4602 : &tag, hashcode, ExclusiveLock);
4603 496 : if (!proclock)
4604 : {
4605 0 : LWLockRelease(partitionLock);
4606 0 : LWLockRelease(&proc->fpInfoLock);
4607 0 : ereport(ERROR,
4608 : (errcode(ERRCODE_OUT_OF_MEMORY),
4609 : errmsg("out of shared memory"),
4610 : errhint("You might need to increase max_locks_per_transaction.")));
4611 : }
4612 496 : GrantLock(proclock->tag.myLock, proclock, ExclusiveLock);
4613 :
4614 496 : LWLockRelease(partitionLock);
4615 :
4616 496 : proc->fpVXIDLock = false;
4617 : }
4618 :
4619 : /*
4620 : * If the proc has an XID now, we'll avoid a TwoPhaseGetXidByVirtualXID()
4621 : * search. The proc might have assigned this XID but not yet locked it,
4622 : * in which case the proc will lock this XID before releasing the VXID.
4623 : * The fpInfoLock critical section excludes VirtualXactLockTableCleanup(),
4624 : * so we won't save an XID of a different VXID. It doesn't matter whether
4625 : * we save this before or after setting up the primary lock table entry.
4626 : */
4627 508 : xid = proc->xid;
4628 :
4629 : /* Done with proc->fpLockBits */
4630 508 : LWLockRelease(&proc->fpInfoLock);
4631 :
4632 : /* Time to wait. */
4633 508 : (void) LockAcquire(&tag, ShareLock, false, false);
4634 :
4635 458 : LockRelease(&tag, ShareLock, false);
4636 458 : return XactLockForVirtualXact(vxid, xid, wait);
4637 : }
4638 :
4639 : /*
4640 : * LockWaiterCount
4641 : *
4642 : * Find the number of lock requester on this locktag
4643 : */
4644 : int
4645 36 : LockWaiterCount(const LOCKTAG *locktag)
4646 : {
4647 36 : LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
4648 : LOCK *lock;
4649 : bool found;
4650 : uint32 hashcode;
4651 : LWLock *partitionLock;
4652 36 : int waiters = 0;
4653 :
4654 36 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4655 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4656 :
4657 36 : hashcode = LockTagHashCode(locktag);
4658 36 : partitionLock = LockHashPartitionLock(hashcode);
4659 36 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4660 :
4661 36 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
4662 : locktag,
4663 : hashcode,
4664 : HASH_FIND,
4665 : &found);
4666 36 : if (found)
4667 : {
4668 : Assert(lock != NULL);
4669 36 : waiters = lock->nRequested;
4670 : }
4671 36 : LWLockRelease(partitionLock);
4672 :
4673 36 : return waiters;
4674 : }
|