Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * proc.c
4 : * routines to manage per-process shared memory data structure
5 : *
6 : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/storage/lmgr/proc.c
12 : *
13 : *-------------------------------------------------------------------------
14 : */
15 : /*
16 : * Interface (a):
17 : * JoinWaitQueue(), ProcSleep(), ProcWakeup()
18 : *
19 : * Waiting for a lock causes the backend to be put to sleep. Whoever releases
20 : * the lock wakes the process up again (and gives it an error code so it knows
21 : * whether it was awoken on an error condition).
22 : *
23 : * Interface (b):
24 : *
25 : * ProcReleaseLocks -- frees the locks associated with current transaction
26 : *
27 : * ProcKill -- destroys the shared memory state (and locks)
28 : * associated with the process.
29 : */
30 : #include "postgres.h"
31 :
32 : #include <signal.h>
33 : #include <unistd.h>
34 : #include <sys/time.h>
35 :
36 : #include "access/transam.h"
37 : #include "access/twophase.h"
38 : #include "access/xlogutils.h"
39 : #include "miscadmin.h"
40 : #include "pgstat.h"
41 : #include "postmaster/autovacuum.h"
42 : #include "replication/slotsync.h"
43 : #include "replication/syncrep.h"
44 : #include "storage/condition_variable.h"
45 : #include "storage/ipc.h"
46 : #include "storage/lmgr.h"
47 : #include "storage/pmsignal.h"
48 : #include "storage/proc.h"
49 : #include "storage/procarray.h"
50 : #include "storage/procsignal.h"
51 : #include "storage/spin.h"
52 : #include "storage/standby.h"
53 : #include "utils/memutils.h"
54 : #include "utils/timeout.h"
55 : #include "utils/timestamp.h"
56 :
57 : /* GUC variables */
58 : int DeadlockTimeout = 1000;
59 : int StatementTimeout = 0;
60 : int LockTimeout = 0;
61 : int IdleInTransactionSessionTimeout = 0;
62 : int TransactionTimeout = 0;
63 : int IdleSessionTimeout = 0;
64 : bool log_lock_waits = false;
65 :
66 : /* Pointer to this process's PGPROC struct, if any */
67 : PGPROC *MyProc = NULL;
68 :
69 : /*
70 : * This spinlock protects the freelist of recycled PGPROC structures.
71 : * We cannot use an LWLock because the LWLock manager depends on already
72 : * having a PGPROC and a wait semaphore! But these structures are touched
73 : * relatively infrequently (only at backend startup or shutdown) and not for
74 : * very long, so a spinlock is okay.
75 : */
76 : NON_EXEC_STATIC slock_t *ProcStructLock = NULL;
77 :
78 : /* Pointers to shared-memory structures */
79 : PROC_HDR *ProcGlobal = NULL;
80 : NON_EXEC_STATIC PGPROC *AuxiliaryProcs = NULL;
81 : PGPROC *PreparedXactProcs = NULL;
82 :
83 : static DeadLockState deadlock_state = DS_NOT_YET_CHECKED;
84 :
85 : /* Is a deadlock check pending? */
86 : static volatile sig_atomic_t got_deadlock_timeout;
87 :
88 : static void RemoveProcFromArray(int code, Datum arg);
89 : static void ProcKill(int code, Datum arg);
90 : static void AuxiliaryProcKill(int code, Datum arg);
91 : static void CheckDeadLock(void);
92 :
93 :
94 : /*
95 : * Report shared-memory space needed by PGPROC.
96 : */
97 : static Size
98 6006 : PGProcShmemSize(void)
99 : {
100 6006 : Size size = 0;
101 : Size TotalProcs =
102 6006 : add_size(MaxBackends, add_size(NUM_AUXILIARY_PROCS, max_prepared_xacts));
103 :
104 6006 : size = add_size(size, mul_size(TotalProcs, sizeof(PGPROC)));
105 6006 : size = add_size(size, mul_size(TotalProcs, sizeof(*ProcGlobal->xids)));
106 6006 : size = add_size(size, mul_size(TotalProcs, sizeof(*ProcGlobal->subxidStates)));
107 6006 : size = add_size(size, mul_size(TotalProcs, sizeof(*ProcGlobal->statusFlags)));
108 :
109 6006 : return size;
110 : }
111 :
112 : /*
113 : * Report shared-memory space needed by Fast-Path locks.
114 : */
115 : static Size
116 6006 : FastPathLockShmemSize(void)
117 : {
118 6006 : Size size = 0;
119 : Size TotalProcs =
120 6006 : add_size(MaxBackends, add_size(NUM_AUXILIARY_PROCS, max_prepared_xacts));
121 : Size fpLockBitsSize,
122 : fpRelIdSize;
123 :
124 : /*
125 : * Memory needed for PGPROC fast-path lock arrays. Make sure the sizes are
126 : * nicely aligned in each backend.
127 : */
128 6006 : fpLockBitsSize = MAXALIGN(FastPathLockGroupsPerBackend * sizeof(uint64));
129 6006 : fpRelIdSize = MAXALIGN(FastPathLockSlotsPerBackend() * sizeof(Oid));
130 :
131 6006 : size = add_size(size, mul_size(TotalProcs, (fpLockBitsSize + fpRelIdSize)));
132 :
133 6006 : return size;
134 : }
135 :
136 : /*
137 : * Report shared-memory space needed by InitProcGlobal.
138 : */
139 : Size
140 3906 : ProcGlobalShmemSize(void)
141 : {
142 3906 : Size size = 0;
143 :
144 : /* ProcGlobal */
145 3906 : size = add_size(size, sizeof(PROC_HDR));
146 3906 : size = add_size(size, sizeof(slock_t));
147 :
148 3906 : size = add_size(size, PGProcShmemSize());
149 3906 : size = add_size(size, FastPathLockShmemSize());
150 :
151 3906 : return size;
152 : }
153 :
154 : /*
155 : * Report number of semaphores needed by InitProcGlobal.
156 : */
157 : int
158 3906 : ProcGlobalSemas(void)
159 : {
160 : /*
161 : * We need a sema per backend (including autovacuum), plus one for each
162 : * auxiliary process.
163 : */
164 3906 : return MaxBackends + NUM_AUXILIARY_PROCS;
165 : }
166 :
167 : /*
168 : * InitProcGlobal -
169 : * Initialize the global process table during postmaster or standalone
170 : * backend startup.
171 : *
172 : * We also create all the per-process semaphores we will need to support
173 : * the requested number of backends. We used to allocate semaphores
174 : * only when backends were actually started up, but that is bad because
175 : * it lets Postgres fail under load --- a lot of Unix systems are
176 : * (mis)configured with small limits on the number of semaphores, and
177 : * running out when trying to start another backend is a common failure.
178 : * So, now we grab enough semaphores to support the desired max number
179 : * of backends immediately at initialization --- if the sysadmin has set
180 : * MaxConnections, max_worker_processes, max_wal_senders, or
181 : * autovacuum_worker_slots higher than his kernel will support, he'll
182 : * find out sooner rather than later.
183 : *
184 : * Another reason for creating semaphores here is that the semaphore
185 : * implementation typically requires us to create semaphores in the
186 : * postmaster, not in backends.
187 : *
188 : * Note: this is NOT called by individual backends under a postmaster,
189 : * not even in the EXEC_BACKEND case. The ProcGlobal and AuxiliaryProcs
190 : * pointers must be propagated specially for EXEC_BACKEND operation.
191 : */
192 : void
193 2100 : InitProcGlobal(void)
194 : {
195 : PGPROC *procs;
196 : int i,
197 : j;
198 : bool found;
199 2100 : uint32 TotalProcs = MaxBackends + NUM_AUXILIARY_PROCS + max_prepared_xacts;
200 :
201 : /* Used for setup of per-backend fast-path slots. */
202 : char *fpPtr,
203 : *fpEndPtr PG_USED_FOR_ASSERTS_ONLY;
204 : Size fpLockBitsSize,
205 : fpRelIdSize;
206 : Size requestSize;
207 : char *ptr;
208 :
209 : /* Create the ProcGlobal shared structure */
210 2100 : ProcGlobal = (PROC_HDR *)
211 2100 : ShmemInitStruct("Proc Header", sizeof(PROC_HDR), &found);
212 : Assert(!found);
213 :
214 : /*
215 : * Initialize the data structures.
216 : */
217 2100 : ProcGlobal->spins_per_delay = DEFAULT_SPINS_PER_DELAY;
218 2100 : dlist_init(&ProcGlobal->freeProcs);
219 2100 : dlist_init(&ProcGlobal->autovacFreeProcs);
220 2100 : dlist_init(&ProcGlobal->bgworkerFreeProcs);
221 2100 : dlist_init(&ProcGlobal->walsenderFreeProcs);
222 2100 : ProcGlobal->startupBufferPinWaitBufId = -1;
223 2100 : ProcGlobal->walwriterProc = INVALID_PROC_NUMBER;
224 2100 : ProcGlobal->checkpointerProc = INVALID_PROC_NUMBER;
225 2100 : pg_atomic_init_u32(&ProcGlobal->procArrayGroupFirst, INVALID_PROC_NUMBER);
226 2100 : pg_atomic_init_u32(&ProcGlobal->clogGroupFirst, INVALID_PROC_NUMBER);
227 :
228 : /*
229 : * Create and initialize all the PGPROC structures we'll need. There are
230 : * six separate consumers: (1) normal backends, (2) autovacuum workers and
231 : * special workers, (3) background workers, (4) walsenders, (5) auxiliary
232 : * processes, and (6) prepared transactions. (For largely-historical
233 : * reasons, we combine autovacuum and special workers into one category
234 : * with a single freelist.) Each PGPROC structure is dedicated to exactly
235 : * one of these purposes, and they do not move between groups.
236 : */
237 2100 : requestSize = PGProcShmemSize();
238 :
239 2100 : ptr = ShmemInitStruct("PGPROC structures",
240 : requestSize,
241 : &found);
242 :
243 2100 : MemSet(ptr, 0, requestSize);
244 :
245 2100 : procs = (PGPROC *) ptr;
246 2100 : ptr = (char *) ptr + TotalProcs * sizeof(PGPROC);
247 :
248 2100 : ProcGlobal->allProcs = procs;
249 : /* XXX allProcCount isn't really all of them; it excludes prepared xacts */
250 2100 : ProcGlobal->allProcCount = MaxBackends + NUM_AUXILIARY_PROCS;
251 :
252 : /*
253 : * Allocate arrays mirroring PGPROC fields in a dense manner. See
254 : * PROC_HDR.
255 : *
256 : * XXX: It might make sense to increase padding for these arrays, given
257 : * how hotly they are accessed.
258 : */
259 2100 : ProcGlobal->xids = (TransactionId *) ptr;
260 2100 : ptr = (char *) ptr + (TotalProcs * sizeof(*ProcGlobal->xids));
261 :
262 2100 : ProcGlobal->subxidStates = (XidCacheStatus *) ptr;
263 2100 : ptr = (char *) ptr + (TotalProcs * sizeof(*ProcGlobal->subxidStates));
264 :
265 2100 : ProcGlobal->statusFlags = (uint8 *) ptr;
266 2100 : ptr = (char *) ptr + (TotalProcs * sizeof(*ProcGlobal->statusFlags));
267 :
268 : /* make sure wer didn't overflow */
269 : Assert((ptr > (char *) procs) && (ptr <= (char *) procs + requestSize));
270 :
271 : /*
272 : * Allocate arrays for fast-path locks. Those are variable-length, so
273 : * can't be included in PGPROC directly. We allocate a separate piece of
274 : * shared memory and then divide that between backends.
275 : */
276 2100 : fpLockBitsSize = MAXALIGN(FastPathLockGroupsPerBackend * sizeof(uint64));
277 2100 : fpRelIdSize = MAXALIGN(FastPathLockSlotsPerBackend() * sizeof(Oid));
278 :
279 2100 : requestSize = FastPathLockShmemSize();
280 :
281 2100 : fpPtr = ShmemInitStruct("Fast-Path Lock Array",
282 : requestSize,
283 : &found);
284 :
285 2100 : MemSet(fpPtr, 0, requestSize);
286 :
287 : /* For asserts checking we did not overflow. */
288 2100 : fpEndPtr = fpPtr + requestSize;
289 :
290 281164 : for (i = 0; i < TotalProcs; i++)
291 : {
292 279064 : PGPROC *proc = &procs[i];
293 :
294 : /* Common initialization for all PGPROCs, regardless of type. */
295 :
296 : /*
297 : * Set the fast-path lock arrays, and move the pointer. We interleave
298 : * the two arrays, to (hopefully) get some locality for each backend.
299 : */
300 279064 : proc->fpLockBits = (uint64 *) fpPtr;
301 279064 : fpPtr += fpLockBitsSize;
302 :
303 279064 : proc->fpRelId = (Oid *) fpPtr;
304 279064 : fpPtr += fpRelIdSize;
305 :
306 : Assert(fpPtr <= fpEndPtr);
307 :
308 : /*
309 : * Set up per-PGPROC semaphore, latch, and fpInfoLock. Prepared xact
310 : * dummy PGPROCs don't need these though - they're never associated
311 : * with a real process
312 : */
313 279064 : if (i < MaxBackends + NUM_AUXILIARY_PROCS)
314 : {
315 277386 : proc->sem = PGSemaphoreCreate();
316 277386 : InitSharedLatch(&(proc->procLatch));
317 277386 : LWLockInitialize(&(proc->fpInfoLock), LWTRANCHE_LOCK_FASTPATH);
318 : }
319 :
320 : /*
321 : * Newly created PGPROCs for normal backends, autovacuum workers,
322 : * special workers, bgworkers, and walsenders must be queued up on the
323 : * appropriate free list. Because there can only ever be a small,
324 : * fixed number of auxiliary processes, no free list is used in that
325 : * case; InitAuxiliaryProcess() instead uses a linear search. PGPROCs
326 : * for prepared transactions are added to a free list by
327 : * TwoPhaseShmemInit().
328 : */
329 279064 : if (i < MaxConnections)
330 : {
331 : /* PGPROC for normal backend, add to freeProcs list */
332 139682 : dlist_push_tail(&ProcGlobal->freeProcs, &proc->links);
333 139682 : proc->procgloballist = &ProcGlobal->freeProcs;
334 : }
335 139382 : else if (i < MaxConnections + autovacuum_worker_slots + NUM_SPECIAL_WORKER_PROCS)
336 : {
337 : /* PGPROC for AV or special worker, add to autovacFreeProcs list */
338 27712 : dlist_push_tail(&ProcGlobal->autovacFreeProcs, &proc->links);
339 27712 : proc->procgloballist = &ProcGlobal->autovacFreeProcs;
340 : }
341 111670 : else if (i < MaxConnections + autovacuum_worker_slots + NUM_SPECIAL_WORKER_PROCS + max_worker_processes)
342 : {
343 : /* PGPROC for bgworker, add to bgworkerFreeProcs list */
344 16796 : dlist_push_tail(&ProcGlobal->bgworkerFreeProcs, &proc->links);
345 16796 : proc->procgloballist = &ProcGlobal->bgworkerFreeProcs;
346 : }
347 94874 : else if (i < MaxBackends)
348 : {
349 : /* PGPROC for walsender, add to walsenderFreeProcs list */
350 13396 : dlist_push_tail(&ProcGlobal->walsenderFreeProcs, &proc->links);
351 13396 : proc->procgloballist = &ProcGlobal->walsenderFreeProcs;
352 : }
353 :
354 : /* Initialize myProcLocks[] shared memory queues. */
355 4744088 : for (j = 0; j < NUM_LOCK_PARTITIONS; j++)
356 4465024 : dlist_init(&(proc->myProcLocks[j]));
357 :
358 : /* Initialize lockGroupMembers list. */
359 279064 : dlist_init(&proc->lockGroupMembers);
360 :
361 : /*
362 : * Initialize the atomic variables, otherwise, it won't be safe to
363 : * access them for backends that aren't currently in use.
364 : */
365 279064 : pg_atomic_init_u32(&(proc->procArrayGroupNext), INVALID_PROC_NUMBER);
366 279064 : pg_atomic_init_u32(&(proc->clogGroupNext), INVALID_PROC_NUMBER);
367 279064 : pg_atomic_init_u64(&(proc->waitStart), 0);
368 : }
369 :
370 : /* Should have consumed exactly the expected amount of fast-path memory. */
371 : Assert(fpPtr == fpEndPtr);
372 :
373 : /*
374 : * Save pointers to the blocks of PGPROC structures reserved for auxiliary
375 : * processes and prepared transactions.
376 : */
377 2100 : AuxiliaryProcs = &procs[MaxBackends];
378 2100 : PreparedXactProcs = &procs[MaxBackends + NUM_AUXILIARY_PROCS];
379 :
380 : /* Create ProcStructLock spinlock, too */
381 2100 : ProcStructLock = (slock_t *) ShmemInitStruct("ProcStructLock spinlock",
382 : sizeof(slock_t),
383 : &found);
384 2100 : SpinLockInit(ProcStructLock);
385 2100 : }
386 :
387 : /*
388 : * InitProcess -- initialize a per-process PGPROC entry for this backend
389 : */
390 : void
391 35086 : InitProcess(void)
392 : {
393 : dlist_head *procgloballist;
394 :
395 : /*
396 : * ProcGlobal should be set up already (if we are a backend, we inherit
397 : * this by fork() or EXEC_BACKEND mechanism from the postmaster).
398 : */
399 35086 : if (ProcGlobal == NULL)
400 0 : elog(PANIC, "proc header uninitialized");
401 :
402 35086 : if (MyProc != NULL)
403 0 : elog(ERROR, "you already exist");
404 :
405 : /*
406 : * Before we start accessing the shared memory in a serious way, mark
407 : * ourselves as an active postmaster child; this is so that the postmaster
408 : * can detect it if we exit without cleaning up.
409 : */
410 35086 : if (IsUnderPostmaster)
411 34872 : RegisterPostmasterChildActive();
412 :
413 : /*
414 : * Decide which list should supply our PGPROC. This logic must match the
415 : * way the freelists were constructed in InitProcGlobal().
416 : */
417 35086 : if (AmAutoVacuumWorkerProcess() || AmSpecialWorkerProcess())
418 3154 : procgloballist = &ProcGlobal->autovacFreeProcs;
419 31932 : else if (AmBackgroundWorkerProcess())
420 4410 : procgloballist = &ProcGlobal->bgworkerFreeProcs;
421 27522 : else if (AmWalSenderProcess())
422 2204 : procgloballist = &ProcGlobal->walsenderFreeProcs;
423 : else
424 25318 : procgloballist = &ProcGlobal->freeProcs;
425 :
426 : /*
427 : * Try to get a proc struct from the appropriate free list. If this
428 : * fails, we must be out of PGPROC structures (not to mention semaphores).
429 : *
430 : * While we are holding the ProcStructLock, also copy the current shared
431 : * estimate of spins_per_delay to local storage.
432 : */
433 35086 : SpinLockAcquire(ProcStructLock);
434 :
435 35086 : set_spins_per_delay(ProcGlobal->spins_per_delay);
436 :
437 35086 : if (!dlist_is_empty(procgloballist))
438 : {
439 35080 : MyProc = dlist_container(PGPROC, links, dlist_pop_head_node(procgloballist));
440 35080 : SpinLockRelease(ProcStructLock);
441 : }
442 : else
443 : {
444 : /*
445 : * If we reach here, all the PGPROCs are in use. This is one of the
446 : * possible places to detect "too many backends", so give the standard
447 : * error message. XXX do we need to give a different failure message
448 : * in the autovacuum case?
449 : */
450 6 : SpinLockRelease(ProcStructLock);
451 6 : if (AmWalSenderProcess())
452 4 : ereport(FATAL,
453 : (errcode(ERRCODE_TOO_MANY_CONNECTIONS),
454 : errmsg("number of requested standby connections exceeds \"max_wal_senders\" (currently %d)",
455 : max_wal_senders)));
456 2 : ereport(FATAL,
457 : (errcode(ERRCODE_TOO_MANY_CONNECTIONS),
458 : errmsg("sorry, too many clients already")));
459 : }
460 35080 : MyProcNumber = GetNumberFromPGProc(MyProc);
461 :
462 : /*
463 : * Cross-check that the PGPROC is of the type we expect; if this were not
464 : * the case, it would get returned to the wrong list.
465 : */
466 : Assert(MyProc->procgloballist == procgloballist);
467 :
468 : /*
469 : * Initialize all fields of MyProc, except for those previously
470 : * initialized by InitProcGlobal.
471 : */
472 35080 : dlist_node_init(&MyProc->links);
473 35080 : MyProc->waitStatus = PROC_WAIT_STATUS_OK;
474 35080 : MyProc->fpVXIDLock = false;
475 35080 : MyProc->fpLocalTransactionId = InvalidLocalTransactionId;
476 35080 : MyProc->xid = InvalidTransactionId;
477 35080 : MyProc->xmin = InvalidTransactionId;
478 35080 : MyProc->pid = MyProcPid;
479 35080 : MyProc->vxid.procNumber = MyProcNumber;
480 35080 : MyProc->vxid.lxid = InvalidLocalTransactionId;
481 : /* databaseId and roleId will be filled in later */
482 35080 : MyProc->databaseId = InvalidOid;
483 35080 : MyProc->roleId = InvalidOid;
484 35080 : MyProc->tempNamespaceId = InvalidOid;
485 35080 : MyProc->isRegularBackend = AmRegularBackendProcess();
486 35080 : MyProc->delayChkptFlags = 0;
487 35080 : MyProc->statusFlags = 0;
488 : /* NB -- autovac launcher intentionally does not set IS_AUTOVACUUM */
489 35080 : if (AmAutoVacuumWorkerProcess())
490 2400 : MyProc->statusFlags |= PROC_IS_AUTOVACUUM;
491 35080 : MyProc->lwWaiting = LW_WS_NOT_WAITING;
492 35080 : MyProc->lwWaitMode = 0;
493 35080 : MyProc->waitLock = NULL;
494 35080 : MyProc->waitProcLock = NULL;
495 35080 : pg_atomic_write_u64(&MyProc->waitStart, 0);
496 : #ifdef USE_ASSERT_CHECKING
497 : {
498 : int i;
499 :
500 : /* Last process should have released all locks. */
501 : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
502 : Assert(dlist_is_empty(&(MyProc->myProcLocks[i])));
503 : }
504 : #endif
505 35080 : MyProc->recoveryConflictPending = false;
506 :
507 : /* Initialize fields for sync rep */
508 35080 : MyProc->waitLSN = 0;
509 35080 : MyProc->syncRepState = SYNC_REP_NOT_WAITING;
510 35080 : dlist_node_init(&MyProc->syncRepLinks);
511 :
512 : /* Initialize fields for group XID clearing. */
513 35080 : MyProc->procArrayGroupMember = false;
514 35080 : MyProc->procArrayGroupMemberXid = InvalidTransactionId;
515 : Assert(pg_atomic_read_u32(&MyProc->procArrayGroupNext) == INVALID_PROC_NUMBER);
516 :
517 : /* Check that group locking fields are in a proper initial state. */
518 : Assert(MyProc->lockGroupLeader == NULL);
519 : Assert(dlist_is_empty(&MyProc->lockGroupMembers));
520 :
521 : /* Initialize wait event information. */
522 35080 : MyProc->wait_event_info = 0;
523 :
524 : /* Initialize fields for group transaction status update. */
525 35080 : MyProc->clogGroupMember = false;
526 35080 : MyProc->clogGroupMemberXid = InvalidTransactionId;
527 35080 : MyProc->clogGroupMemberXidStatus = TRANSACTION_STATUS_IN_PROGRESS;
528 35080 : MyProc->clogGroupMemberPage = -1;
529 35080 : MyProc->clogGroupMemberLsn = InvalidXLogRecPtr;
530 : Assert(pg_atomic_read_u32(&MyProc->clogGroupNext) == INVALID_PROC_NUMBER);
531 :
532 : /*
533 : * Acquire ownership of the PGPROC's latch, so that we can use WaitLatch
534 : * on it. That allows us to repoint the process latch, which so far
535 : * points to process local one, to the shared one.
536 : */
537 35080 : OwnLatch(&MyProc->procLatch);
538 35080 : SwitchToSharedLatch();
539 :
540 : /* now that we have a proc, report wait events to shared memory */
541 35080 : pgstat_set_wait_event_storage(&MyProc->wait_event_info);
542 :
543 : /*
544 : * We might be reusing a semaphore that belonged to a failed process. So
545 : * be careful and reinitialize its value here. (This is not strictly
546 : * necessary anymore, but seems like a good idea for cleanliness.)
547 : */
548 35080 : PGSemaphoreReset(MyProc->sem);
549 :
550 : /*
551 : * Arrange to clean up at backend exit.
552 : */
553 35080 : on_shmem_exit(ProcKill, 0);
554 :
555 : /*
556 : * Now that we have a PGPROC, we could try to acquire locks, so initialize
557 : * local state needed for LWLocks, and the deadlock checker.
558 : */
559 35080 : InitLWLockAccess();
560 35080 : InitDeadLockChecking();
561 :
562 : #ifdef EXEC_BACKEND
563 :
564 : /*
565 : * Initialize backend-local pointers to all the shared data structures.
566 : * (We couldn't do this until now because it needs LWLocks.)
567 : */
568 : if (IsUnderPostmaster)
569 : AttachSharedMemoryStructs();
570 : #endif
571 35080 : }
572 :
573 : /*
574 : * InitProcessPhase2 -- make MyProc visible in the shared ProcArray.
575 : *
576 : * This is separate from InitProcess because we can't acquire LWLocks until
577 : * we've created a PGPROC, but in the EXEC_BACKEND case ProcArrayAdd won't
578 : * work until after we've done AttachSharedMemoryStructs.
579 : */
580 : void
581 35062 : InitProcessPhase2(void)
582 : {
583 : Assert(MyProc != NULL);
584 :
585 : /*
586 : * Add our PGPROC to the PGPROC array in shared memory.
587 : */
588 35062 : ProcArrayAdd(MyProc);
589 :
590 : /*
591 : * Arrange to clean that up at backend exit.
592 : */
593 35062 : on_shmem_exit(RemoveProcFromArray, 0);
594 35062 : }
595 :
596 : /*
597 : * InitAuxiliaryProcess -- create a PGPROC entry for an auxiliary process
598 : *
599 : * This is called by bgwriter and similar processes so that they will have a
600 : * MyProc value that's real enough to let them wait for LWLocks. The PGPROC
601 : * and sema that are assigned are one of the extra ones created during
602 : * InitProcGlobal.
603 : *
604 : * Auxiliary processes are presently not expected to wait for real (lockmgr)
605 : * locks, so we need not set up the deadlock checker. They are never added
606 : * to the ProcArray or the sinval messaging mechanism, either. They also
607 : * don't get a VXID assigned, since this is only useful when we actually
608 : * hold lockmgr locks.
609 : *
610 : * Startup process however uses locks but never waits for them in the
611 : * normal backend sense. Startup process also takes part in sinval messaging
612 : * as a sendOnly process, so never reads messages from sinval queue. So
613 : * Startup process does have a VXID and does show up in pg_locks.
614 : */
615 : void
616 8094 : InitAuxiliaryProcess(void)
617 : {
618 : PGPROC *auxproc;
619 : int proctype;
620 :
621 : /*
622 : * ProcGlobal should be set up already (if we are a backend, we inherit
623 : * this by fork() or EXEC_BACKEND mechanism from the postmaster).
624 : */
625 8094 : if (ProcGlobal == NULL || AuxiliaryProcs == NULL)
626 0 : elog(PANIC, "proc header uninitialized");
627 :
628 8094 : if (MyProc != NULL)
629 0 : elog(ERROR, "you already exist");
630 :
631 8094 : if (IsUnderPostmaster)
632 8094 : RegisterPostmasterChildActive();
633 :
634 : /*
635 : * We use the ProcStructLock to protect assignment and releasing of
636 : * AuxiliaryProcs entries.
637 : *
638 : * While we are holding the ProcStructLock, also copy the current shared
639 : * estimate of spins_per_delay to local storage.
640 : */
641 8094 : SpinLockAcquire(ProcStructLock);
642 :
643 8094 : set_spins_per_delay(ProcGlobal->spins_per_delay);
644 :
645 : /*
646 : * Find a free auxproc ... *big* trouble if there isn't one ...
647 : */
648 34806 : for (proctype = 0; proctype < NUM_AUXILIARY_PROCS; proctype++)
649 : {
650 34806 : auxproc = &AuxiliaryProcs[proctype];
651 34806 : if (auxproc->pid == 0)
652 8094 : break;
653 : }
654 8094 : if (proctype >= NUM_AUXILIARY_PROCS)
655 : {
656 0 : SpinLockRelease(ProcStructLock);
657 0 : elog(FATAL, "all AuxiliaryProcs are in use");
658 : }
659 :
660 : /* Mark auxiliary proc as in use by me */
661 : /* use volatile pointer to prevent code rearrangement */
662 8094 : ((volatile PGPROC *) auxproc)->pid = MyProcPid;
663 :
664 8094 : SpinLockRelease(ProcStructLock);
665 :
666 8094 : MyProc = auxproc;
667 8094 : MyProcNumber = GetNumberFromPGProc(MyProc);
668 :
669 : /*
670 : * Initialize all fields of MyProc, except for those previously
671 : * initialized by InitProcGlobal.
672 : */
673 8094 : dlist_node_init(&MyProc->links);
674 8094 : MyProc->waitStatus = PROC_WAIT_STATUS_OK;
675 8094 : MyProc->fpVXIDLock = false;
676 8094 : MyProc->fpLocalTransactionId = InvalidLocalTransactionId;
677 8094 : MyProc->xid = InvalidTransactionId;
678 8094 : MyProc->xmin = InvalidTransactionId;
679 8094 : MyProc->vxid.procNumber = INVALID_PROC_NUMBER;
680 8094 : MyProc->vxid.lxid = InvalidLocalTransactionId;
681 8094 : MyProc->databaseId = InvalidOid;
682 8094 : MyProc->roleId = InvalidOid;
683 8094 : MyProc->tempNamespaceId = InvalidOid;
684 8094 : MyProc->isRegularBackend = false;
685 8094 : MyProc->delayChkptFlags = 0;
686 8094 : MyProc->statusFlags = 0;
687 8094 : MyProc->lwWaiting = LW_WS_NOT_WAITING;
688 8094 : MyProc->lwWaitMode = 0;
689 8094 : MyProc->waitLock = NULL;
690 8094 : MyProc->waitProcLock = NULL;
691 8094 : pg_atomic_write_u64(&MyProc->waitStart, 0);
692 : #ifdef USE_ASSERT_CHECKING
693 : {
694 : int i;
695 :
696 : /* Last process should have released all locks. */
697 : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
698 : Assert(dlist_is_empty(&(MyProc->myProcLocks[i])));
699 : }
700 : #endif
701 :
702 : /*
703 : * Acquire ownership of the PGPROC's latch, so that we can use WaitLatch
704 : * on it. That allows us to repoint the process latch, which so far
705 : * points to process local one, to the shared one.
706 : */
707 8094 : OwnLatch(&MyProc->procLatch);
708 8094 : SwitchToSharedLatch();
709 :
710 : /* now that we have a proc, report wait events to shared memory */
711 8094 : pgstat_set_wait_event_storage(&MyProc->wait_event_info);
712 :
713 : /* Check that group locking fields are in a proper initial state. */
714 : Assert(MyProc->lockGroupLeader == NULL);
715 : Assert(dlist_is_empty(&MyProc->lockGroupMembers));
716 :
717 : /*
718 : * We might be reusing a semaphore that belonged to a failed process. So
719 : * be careful and reinitialize its value here. (This is not strictly
720 : * necessary anymore, but seems like a good idea for cleanliness.)
721 : */
722 8094 : PGSemaphoreReset(MyProc->sem);
723 :
724 : /*
725 : * Arrange to clean up at process exit.
726 : */
727 8094 : on_shmem_exit(AuxiliaryProcKill, Int32GetDatum(proctype));
728 :
729 : /*
730 : * Now that we have a PGPROC, we could try to acquire lightweight locks.
731 : * Initialize local state needed for them. (Heavyweight locks cannot be
732 : * acquired in aux processes.)
733 : */
734 8094 : InitLWLockAccess();
735 :
736 : #ifdef EXEC_BACKEND
737 :
738 : /*
739 : * Initialize backend-local pointers to all the shared data structures.
740 : * (We couldn't do this until now because it needs LWLocks.)
741 : */
742 : if (IsUnderPostmaster)
743 : AttachSharedMemoryStructs();
744 : #endif
745 8094 : }
746 :
747 : /*
748 : * Used from bufmgr to share the value of the buffer that Startup waits on,
749 : * or to reset the value to "not waiting" (-1). This allows processing
750 : * of recovery conflicts for buffer pins. Set is made before backends look
751 : * at this value, so locking not required, especially since the set is
752 : * an atomic integer set operation.
753 : */
754 : void
755 40 : SetStartupBufferPinWaitBufId(int bufid)
756 : {
757 : /* use volatile pointer to prevent code rearrangement */
758 40 : volatile PROC_HDR *procglobal = ProcGlobal;
759 :
760 40 : procglobal->startupBufferPinWaitBufId = bufid;
761 40 : }
762 :
763 : /*
764 : * Used by backends when they receive a request to check for buffer pin waits.
765 : */
766 : int
767 10 : GetStartupBufferPinWaitBufId(void)
768 : {
769 : /* use volatile pointer to prevent code rearrangement */
770 10 : volatile PROC_HDR *procglobal = ProcGlobal;
771 :
772 10 : return procglobal->startupBufferPinWaitBufId;
773 : }
774 :
775 : /*
776 : * Check whether there are at least N free PGPROC objects. If false is
777 : * returned, *nfree will be set to the number of free PGPROC objects.
778 : * Otherwise, *nfree will be set to n.
779 : *
780 : * Note: this is designed on the assumption that N will generally be small.
781 : */
782 : bool
783 480 : HaveNFreeProcs(int n, int *nfree)
784 : {
785 : dlist_iter iter;
786 :
787 : Assert(n > 0);
788 : Assert(nfree);
789 :
790 480 : SpinLockAcquire(ProcStructLock);
791 :
792 480 : *nfree = 0;
793 1434 : dlist_foreach(iter, &ProcGlobal->freeProcs)
794 : {
795 1426 : (*nfree)++;
796 1426 : if (*nfree == n)
797 472 : break;
798 : }
799 :
800 480 : SpinLockRelease(ProcStructLock);
801 :
802 480 : return (*nfree == n);
803 : }
804 :
805 : /*
806 : * Cancel any pending wait for lock, when aborting a transaction, and revert
807 : * any strong lock count acquisition for a lock being acquired.
808 : *
809 : * (Normally, this would only happen if we accept a cancel/die
810 : * interrupt while waiting; but an ereport(ERROR) before or during the lock
811 : * wait is within the realm of possibility, too.)
812 : */
813 : void
814 925854 : LockErrorCleanup(void)
815 : {
816 : LOCALLOCK *lockAwaited;
817 : LWLock *partitionLock;
818 : DisableTimeoutParams timeouts[2];
819 :
820 925854 : HOLD_INTERRUPTS();
821 :
822 925854 : AbortStrongLockAcquire();
823 :
824 : /* Nothing to do if we weren't waiting for a lock */
825 925854 : lockAwaited = GetAwaitedLock();
826 925854 : if (lockAwaited == NULL)
827 : {
828 925774 : RESUME_INTERRUPTS();
829 925774 : return;
830 : }
831 :
832 : /*
833 : * Turn off the deadlock and lock timeout timers, if they are still
834 : * running (see ProcSleep). Note we must preserve the LOCK_TIMEOUT
835 : * indicator flag, since this function is executed before
836 : * ProcessInterrupts when responding to SIGINT; else we'd lose the
837 : * knowledge that the SIGINT came from a lock timeout and not an external
838 : * source.
839 : */
840 80 : timeouts[0].id = DEADLOCK_TIMEOUT;
841 80 : timeouts[0].keep_indicator = false;
842 80 : timeouts[1].id = LOCK_TIMEOUT;
843 80 : timeouts[1].keep_indicator = true;
844 80 : disable_timeouts(timeouts, 2);
845 :
846 : /* Unlink myself from the wait queue, if on it (might not be anymore!) */
847 80 : partitionLock = LockHashPartitionLock(lockAwaited->hashcode);
848 80 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
849 :
850 80 : if (!dlist_node_is_detached(&MyProc->links))
851 : {
852 : /* We could not have been granted the lock yet */
853 80 : RemoveFromWaitQueue(MyProc, lockAwaited->hashcode);
854 : }
855 : else
856 : {
857 : /*
858 : * Somebody kicked us off the lock queue already. Perhaps they
859 : * granted us the lock, or perhaps they detected a deadlock. If they
860 : * did grant us the lock, we'd better remember it in our local lock
861 : * table.
862 : */
863 0 : if (MyProc->waitStatus == PROC_WAIT_STATUS_OK)
864 0 : GrantAwaitedLock();
865 : }
866 :
867 80 : ResetAwaitedLock();
868 :
869 80 : LWLockRelease(partitionLock);
870 :
871 80 : RESUME_INTERRUPTS();
872 : }
873 :
874 :
875 : /*
876 : * ProcReleaseLocks() -- release locks associated with current transaction
877 : * at main transaction commit or abort
878 : *
879 : * At main transaction commit, we release standard locks except session locks.
880 : * At main transaction abort, we release all locks including session locks.
881 : *
882 : * Advisory locks are released only if they are transaction-level;
883 : * session-level holds remain, whether this is a commit or not.
884 : *
885 : * At subtransaction commit, we don't release any locks (so this func is not
886 : * needed at all); we will defer the releasing to the parent transaction.
887 : * At subtransaction abort, we release all locks held by the subtransaction;
888 : * this is implemented by retail releasing of the locks under control of
889 : * the ResourceOwner mechanism.
890 : */
891 : void
892 866294 : ProcReleaseLocks(bool isCommit)
893 : {
894 866294 : if (!MyProc)
895 0 : return;
896 : /* If waiting, get off wait queue (should only be needed after error) */
897 866294 : LockErrorCleanup();
898 : /* Release standard locks, including session-level if aborting */
899 866294 : LockReleaseAll(DEFAULT_LOCKMETHOD, !isCommit);
900 : /* Release transaction-level advisory locks */
901 866294 : LockReleaseAll(USER_LOCKMETHOD, false);
902 : }
903 :
904 :
905 : /*
906 : * RemoveProcFromArray() -- Remove this process from the shared ProcArray.
907 : */
908 : static void
909 35062 : RemoveProcFromArray(int code, Datum arg)
910 : {
911 : Assert(MyProc != NULL);
912 35062 : ProcArrayRemove(MyProc, InvalidTransactionId);
913 35062 : }
914 :
915 : /*
916 : * ProcKill() -- Destroy the per-proc data structure for
917 : * this process. Release any of its held LW locks.
918 : */
919 : static void
920 35080 : ProcKill(int code, Datum arg)
921 : {
922 : PGPROC *proc;
923 : dlist_head *procgloballist;
924 :
925 : Assert(MyProc != NULL);
926 :
927 : /* not safe if forked by system(), etc. */
928 35080 : if (MyProc->pid != (int) getpid())
929 0 : elog(PANIC, "ProcKill() called in child process");
930 :
931 : /* Make sure we're out of the sync rep lists */
932 35080 : SyncRepCleanupAtProcExit();
933 :
934 : #ifdef USE_ASSERT_CHECKING
935 : {
936 : int i;
937 :
938 : /* Last process should have released all locks. */
939 : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
940 : Assert(dlist_is_empty(&(MyProc->myProcLocks[i])));
941 : }
942 : #endif
943 :
944 : /*
945 : * Release any LW locks I am holding. There really shouldn't be any, but
946 : * it's cheap to check again before we cut the knees off the LWLock
947 : * facility by releasing our PGPROC ...
948 : */
949 35080 : LWLockReleaseAll();
950 :
951 : /* Cancel any pending condition variable sleep, too */
952 35080 : ConditionVariableCancelSleep();
953 :
954 : /*
955 : * Detach from any lock group of which we are a member. If the leader
956 : * exits before all other group members, its PGPROC will remain allocated
957 : * until the last group process exits; that process must return the
958 : * leader's PGPROC to the appropriate list.
959 : */
960 35080 : if (MyProc->lockGroupLeader != NULL)
961 : {
962 2878 : PGPROC *leader = MyProc->lockGroupLeader;
963 2878 : LWLock *leader_lwlock = LockHashPartitionLockByProc(leader);
964 :
965 2878 : LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
966 : Assert(!dlist_is_empty(&leader->lockGroupMembers));
967 2878 : dlist_delete(&MyProc->lockGroupLink);
968 2878 : if (dlist_is_empty(&leader->lockGroupMembers))
969 : {
970 138 : leader->lockGroupLeader = NULL;
971 138 : if (leader != MyProc)
972 : {
973 0 : procgloballist = leader->procgloballist;
974 :
975 : /* Leader exited first; return its PGPROC. */
976 0 : SpinLockAcquire(ProcStructLock);
977 0 : dlist_push_head(procgloballist, &leader->links);
978 0 : SpinLockRelease(ProcStructLock);
979 : }
980 : }
981 2740 : else if (leader != MyProc)
982 2740 : MyProc->lockGroupLeader = NULL;
983 2878 : LWLockRelease(leader_lwlock);
984 : }
985 :
986 : /*
987 : * Reset MyLatch to the process local one. This is so that signal
988 : * handlers et al can continue using the latch after the shared latch
989 : * isn't ours anymore.
990 : *
991 : * Similarly, stop reporting wait events to MyProc->wait_event_info.
992 : *
993 : * After that clear MyProc and disown the shared latch.
994 : */
995 35080 : SwitchBackToLocalLatch();
996 35080 : pgstat_reset_wait_event_storage();
997 :
998 35080 : proc = MyProc;
999 35080 : MyProc = NULL;
1000 35080 : MyProcNumber = INVALID_PROC_NUMBER;
1001 35080 : DisownLatch(&proc->procLatch);
1002 :
1003 : /* Mark the proc no longer in use */
1004 35080 : proc->pid = 0;
1005 35080 : proc->vxid.procNumber = INVALID_PROC_NUMBER;
1006 35080 : proc->vxid.lxid = InvalidTransactionId;
1007 :
1008 35080 : procgloballist = proc->procgloballist;
1009 35080 : SpinLockAcquire(ProcStructLock);
1010 :
1011 : /*
1012 : * If we're still a member of a locking group, that means we're a leader
1013 : * which has somehow exited before its children. The last remaining child
1014 : * will release our PGPROC. Otherwise, release it now.
1015 : */
1016 35080 : if (proc->lockGroupLeader == NULL)
1017 : {
1018 : /* Since lockGroupLeader is NULL, lockGroupMembers should be empty. */
1019 : Assert(dlist_is_empty(&proc->lockGroupMembers));
1020 :
1021 : /* Return PGPROC structure (and semaphore) to appropriate freelist */
1022 35080 : dlist_push_tail(procgloballist, &proc->links);
1023 : }
1024 :
1025 : /* Update shared estimate of spins_per_delay */
1026 35080 : ProcGlobal->spins_per_delay = update_spins_per_delay(ProcGlobal->spins_per_delay);
1027 :
1028 35080 : SpinLockRelease(ProcStructLock);
1029 :
1030 : /* wake autovac launcher if needed -- see comments in FreeWorkerInfo */
1031 35080 : if (AutovacuumLauncherPid != 0)
1032 2398 : kill(AutovacuumLauncherPid, SIGUSR2);
1033 35080 : }
1034 :
1035 : /*
1036 : * AuxiliaryProcKill() -- Cut-down version of ProcKill for auxiliary
1037 : * processes (bgwriter, etc). The PGPROC and sema are not released, only
1038 : * marked as not-in-use.
1039 : */
1040 : static void
1041 8094 : AuxiliaryProcKill(int code, Datum arg)
1042 : {
1043 8094 : int proctype = DatumGetInt32(arg);
1044 : PGPROC *auxproc PG_USED_FOR_ASSERTS_ONLY;
1045 : PGPROC *proc;
1046 :
1047 : Assert(proctype >= 0 && proctype < NUM_AUXILIARY_PROCS);
1048 :
1049 : /* not safe if forked by system(), etc. */
1050 8094 : if (MyProc->pid != (int) getpid())
1051 0 : elog(PANIC, "AuxiliaryProcKill() called in child process");
1052 :
1053 8094 : auxproc = &AuxiliaryProcs[proctype];
1054 :
1055 : Assert(MyProc == auxproc);
1056 :
1057 : /* Release any LW locks I am holding (see notes above) */
1058 8094 : LWLockReleaseAll();
1059 :
1060 : /* Cancel any pending condition variable sleep, too */
1061 8094 : ConditionVariableCancelSleep();
1062 :
1063 : /* look at the equivalent ProcKill() code for comments */
1064 8094 : SwitchBackToLocalLatch();
1065 8094 : pgstat_reset_wait_event_storage();
1066 :
1067 8094 : proc = MyProc;
1068 8094 : MyProc = NULL;
1069 8094 : MyProcNumber = INVALID_PROC_NUMBER;
1070 8094 : DisownLatch(&proc->procLatch);
1071 :
1072 8094 : SpinLockAcquire(ProcStructLock);
1073 :
1074 : /* Mark auxiliary proc no longer in use */
1075 8094 : proc->pid = 0;
1076 8094 : proc->vxid.procNumber = INVALID_PROC_NUMBER;
1077 8094 : proc->vxid.lxid = InvalidTransactionId;
1078 :
1079 : /* Update shared estimate of spins_per_delay */
1080 8094 : ProcGlobal->spins_per_delay = update_spins_per_delay(ProcGlobal->spins_per_delay);
1081 :
1082 8094 : SpinLockRelease(ProcStructLock);
1083 8094 : }
1084 :
1085 : /*
1086 : * AuxiliaryPidGetProc -- get PGPROC for an auxiliary process
1087 : * given its PID
1088 : *
1089 : * Returns NULL if not found.
1090 : */
1091 : PGPROC *
1092 9678 : AuxiliaryPidGetProc(int pid)
1093 : {
1094 9678 : PGPROC *result = NULL;
1095 : int index;
1096 :
1097 9678 : if (pid == 0) /* never match dummy PGPROCs */
1098 6 : return NULL;
1099 :
1100 42862 : for (index = 0; index < NUM_AUXILIARY_PROCS; index++)
1101 : {
1102 42862 : PGPROC *proc = &AuxiliaryProcs[index];
1103 :
1104 42862 : if (proc->pid == pid)
1105 : {
1106 9672 : result = proc;
1107 9672 : break;
1108 : }
1109 : }
1110 9672 : return result;
1111 : }
1112 :
1113 :
1114 : /*
1115 : * JoinWaitQueue -- join the wait queue on the specified lock
1116 : *
1117 : * It's not actually guaranteed that we need to wait when this function is
1118 : * called, because it could be that when we try to find a position at which
1119 : * to insert ourself into the wait queue, we discover that we must be inserted
1120 : * ahead of everyone who wants a lock that conflict with ours. In that case,
1121 : * we get the lock immediately. Because of this, it's sensible for this function
1122 : * to have a dontWait argument, despite the name.
1123 : *
1124 : * On entry, the caller has already set up LOCK and PROCLOCK entries to
1125 : * reflect that we have "requested" the lock. The caller is responsible for
1126 : * cleaning that up, if we end up not joining the queue after all.
1127 : *
1128 : * The lock table's partition lock must be held at entry, and is still held
1129 : * at exit. The caller must release it before calling ProcSleep().
1130 : *
1131 : * Result is one of the following:
1132 : *
1133 : * PROC_WAIT_STATUS_OK - lock was immediately granted
1134 : * PROC_WAIT_STATUS_WAITING - joined the wait queue; call ProcSleep()
1135 : * PROC_WAIT_STATUS_ERROR - immediate deadlock was detected, or would
1136 : * need to wait and dontWait == true
1137 : *
1138 : * NOTES: The process queue is now a priority queue for locking.
1139 : */
1140 : ProcWaitStatus
1141 4192 : JoinWaitQueue(LOCALLOCK *locallock, LockMethod lockMethodTable, bool dontWait)
1142 : {
1143 4192 : LOCKMODE lockmode = locallock->tag.mode;
1144 4192 : LOCK *lock = locallock->lock;
1145 4192 : PROCLOCK *proclock = locallock->proclock;
1146 4192 : uint32 hashcode = locallock->hashcode;
1147 4192 : LWLock *partitionLock PG_USED_FOR_ASSERTS_ONLY = LockHashPartitionLock(hashcode);
1148 4192 : dclist_head *waitQueue = &lock->waitProcs;
1149 4192 : PGPROC *insert_before = NULL;
1150 : LOCKMASK myProcHeldLocks;
1151 : LOCKMASK myHeldLocks;
1152 4192 : bool early_deadlock = false;
1153 4192 : PGPROC *leader = MyProc->lockGroupLeader;
1154 :
1155 : Assert(LWLockHeldByMeInMode(partitionLock, LW_EXCLUSIVE));
1156 :
1157 : /*
1158 : * Set bitmask of locks this process already holds on this object.
1159 : */
1160 4192 : myHeldLocks = MyProc->heldLocks = proclock->holdMask;
1161 :
1162 : /*
1163 : * Determine which locks we're already holding.
1164 : *
1165 : * If group locking is in use, locks held by members of my locking group
1166 : * need to be included in myHeldLocks. This is not required for relation
1167 : * extension lock which conflict among group members. However, including
1168 : * them in myHeldLocks will give group members the priority to get those
1169 : * locks as compared to other backends which are also trying to acquire
1170 : * those locks. OTOH, we can avoid giving priority to group members for
1171 : * that kind of locks, but there doesn't appear to be a clear advantage of
1172 : * the same.
1173 : */
1174 4192 : myProcHeldLocks = proclock->holdMask;
1175 4192 : myHeldLocks = myProcHeldLocks;
1176 4192 : if (leader != NULL)
1177 : {
1178 : dlist_iter iter;
1179 :
1180 56 : dlist_foreach(iter, &lock->procLocks)
1181 : {
1182 : PROCLOCK *otherproclock;
1183 :
1184 42 : otherproclock = dlist_container(PROCLOCK, lockLink, iter.cur);
1185 :
1186 42 : if (otherproclock->groupLeader == leader)
1187 20 : myHeldLocks |= otherproclock->holdMask;
1188 : }
1189 : }
1190 :
1191 : /*
1192 : * Determine where to add myself in the wait queue.
1193 : *
1194 : * Normally I should go at the end of the queue. However, if I already
1195 : * hold locks that conflict with the request of any previous waiter, put
1196 : * myself in the queue just in front of the first such waiter. This is not
1197 : * a necessary step, since deadlock detection would move me to before that
1198 : * waiter anyway; but it's relatively cheap to detect such a conflict
1199 : * immediately, and avoid delaying till deadlock timeout.
1200 : *
1201 : * Special case: if I find I should go in front of some waiter, check to
1202 : * see if I conflict with already-held locks or the requests before that
1203 : * waiter. If not, then just grant myself the requested lock immediately.
1204 : * This is the same as the test for immediate grant in LockAcquire, except
1205 : * we are only considering the part of the wait queue before my insertion
1206 : * point.
1207 : */
1208 4192 : if (myHeldLocks != 0 && !dclist_is_empty(waitQueue))
1209 : {
1210 12 : LOCKMASK aheadRequests = 0;
1211 : dlist_iter iter;
1212 :
1213 12 : dclist_foreach(iter, waitQueue)
1214 : {
1215 12 : PGPROC *proc = dlist_container(PGPROC, links, iter.cur);
1216 :
1217 : /*
1218 : * If we're part of the same locking group as this waiter, its
1219 : * locks neither conflict with ours nor contribute to
1220 : * aheadRequests.
1221 : */
1222 12 : if (leader != NULL && leader == proc->lockGroupLeader)
1223 0 : continue;
1224 :
1225 : /* Must he wait for me? */
1226 12 : if (lockMethodTable->conflictTab[proc->waitLockMode] & myHeldLocks)
1227 : {
1228 : /* Must I wait for him ? */
1229 12 : if (lockMethodTable->conflictTab[lockmode] & proc->heldLocks)
1230 : {
1231 : /*
1232 : * Yes, so we have a deadlock. Easiest way to clean up
1233 : * correctly is to call RemoveFromWaitQueue(), but we
1234 : * can't do that until we are *on* the wait queue. So, set
1235 : * a flag to check below, and break out of loop. Also,
1236 : * record deadlock info for later message.
1237 : */
1238 2 : RememberSimpleDeadLock(MyProc, lockmode, lock, proc);
1239 2 : early_deadlock = true;
1240 2 : break;
1241 : }
1242 : /* I must go before this waiter. Check special case. */
1243 10 : if ((lockMethodTable->conflictTab[lockmode] & aheadRequests) == 0 &&
1244 10 : !LockCheckConflicts(lockMethodTable, lockmode, lock,
1245 : proclock))
1246 : {
1247 : /* Skip the wait and just grant myself the lock. */
1248 10 : GrantLock(lock, proclock, lockmode);
1249 10 : return PROC_WAIT_STATUS_OK;
1250 : }
1251 :
1252 : /* Put myself into wait queue before conflicting process */
1253 0 : insert_before = proc;
1254 0 : break;
1255 : }
1256 : /* Nope, so advance to next waiter */
1257 0 : aheadRequests |= LOCKBIT_ON(proc->waitLockMode);
1258 : }
1259 : }
1260 :
1261 : /*
1262 : * If we detected deadlock, give up without waiting. This must agree with
1263 : * CheckDeadLock's recovery code.
1264 : */
1265 4182 : if (early_deadlock)
1266 2 : return PROC_WAIT_STATUS_ERROR;
1267 :
1268 : /*
1269 : * At this point we know that we'd really need to sleep. If we've been
1270 : * commanded not to do that, bail out.
1271 : */
1272 4180 : if (dontWait)
1273 1496 : return PROC_WAIT_STATUS_ERROR;
1274 :
1275 : /*
1276 : * Insert self into queue, at the position determined above.
1277 : */
1278 2684 : if (insert_before)
1279 0 : dclist_insert_before(waitQueue, &insert_before->links, &MyProc->links);
1280 : else
1281 2684 : dclist_push_tail(waitQueue, &MyProc->links);
1282 :
1283 2684 : lock->waitMask |= LOCKBIT_ON(lockmode);
1284 :
1285 : /* Set up wait information in PGPROC object, too */
1286 2684 : MyProc->heldLocks = myProcHeldLocks;
1287 2684 : MyProc->waitLock = lock;
1288 2684 : MyProc->waitProcLock = proclock;
1289 2684 : MyProc->waitLockMode = lockmode;
1290 :
1291 2684 : MyProc->waitStatus = PROC_WAIT_STATUS_WAITING;
1292 :
1293 2684 : return PROC_WAIT_STATUS_WAITING;
1294 : }
1295 :
1296 : /*
1297 : * ProcSleep -- put process to sleep waiting on lock
1298 : *
1299 : * This must be called when JoinWaitQueue() returns PROC_WAIT_STATUS_WAITING.
1300 : * Returns after the lock has been granted, or if a deadlock is detected. Can
1301 : * also bail out with ereport(ERROR), if some other error condition, or a
1302 : * timeout or cancellation is triggered.
1303 : *
1304 : * Result is one of the following:
1305 : *
1306 : * PROC_WAIT_STATUS_OK - lock was granted
1307 : * PROC_WAIT_STATUS_ERROR - a deadlock was detected
1308 : */
1309 : ProcWaitStatus
1310 2684 : ProcSleep(LOCALLOCK *locallock)
1311 : {
1312 2684 : LOCKMODE lockmode = locallock->tag.mode;
1313 2684 : LOCK *lock = locallock->lock;
1314 2684 : uint32 hashcode = locallock->hashcode;
1315 2684 : LWLock *partitionLock = LockHashPartitionLock(hashcode);
1316 2684 : TimestampTz standbyWaitStart = 0;
1317 2684 : bool allow_autovacuum_cancel = true;
1318 2684 : bool logged_recovery_conflict = false;
1319 : ProcWaitStatus myWaitStatus;
1320 :
1321 : /* The caller must've armed the on-error cleanup mechanism */
1322 : Assert(GetAwaitedLock() == locallock);
1323 : Assert(!LWLockHeldByMe(partitionLock));
1324 :
1325 : /*
1326 : * Now that we will successfully clean up after an ereport, it's safe to
1327 : * check to see if there's a buffer pin deadlock against the Startup
1328 : * process. Of course, that's only necessary if we're doing Hot Standby
1329 : * and are not the Startup process ourselves.
1330 : */
1331 2684 : if (RecoveryInProgress() && !InRecovery)
1332 2 : CheckRecoveryConflictDeadlock();
1333 :
1334 : /* Reset deadlock_state before enabling the timeout handler */
1335 2684 : deadlock_state = DS_NOT_YET_CHECKED;
1336 2684 : got_deadlock_timeout = false;
1337 :
1338 : /*
1339 : * Set timer so we can wake up after awhile and check for a deadlock. If a
1340 : * deadlock is detected, the handler sets MyProc->waitStatus =
1341 : * PROC_WAIT_STATUS_ERROR, allowing us to know that we must report failure
1342 : * rather than success.
1343 : *
1344 : * By delaying the check until we've waited for a bit, we can avoid
1345 : * running the rather expensive deadlock-check code in most cases.
1346 : *
1347 : * If LockTimeout is set, also enable the timeout for that. We can save a
1348 : * few cycles by enabling both timeout sources in one call.
1349 : *
1350 : * If InHotStandby we set lock waits slightly later for clarity with other
1351 : * code.
1352 : */
1353 2684 : if (!InHotStandby)
1354 : {
1355 2682 : if (LockTimeout > 0)
1356 : {
1357 : EnableTimeoutParams timeouts[2];
1358 :
1359 200 : timeouts[0].id = DEADLOCK_TIMEOUT;
1360 200 : timeouts[0].type = TMPARAM_AFTER;
1361 200 : timeouts[0].delay_ms = DeadlockTimeout;
1362 200 : timeouts[1].id = LOCK_TIMEOUT;
1363 200 : timeouts[1].type = TMPARAM_AFTER;
1364 200 : timeouts[1].delay_ms = LockTimeout;
1365 200 : enable_timeouts(timeouts, 2);
1366 : }
1367 : else
1368 2482 : enable_timeout_after(DEADLOCK_TIMEOUT, DeadlockTimeout);
1369 :
1370 : /*
1371 : * Use the current time obtained for the deadlock timeout timer as
1372 : * waitStart (i.e., the time when this process started waiting for the
1373 : * lock). Since getting the current time newly can cause overhead, we
1374 : * reuse the already-obtained time to avoid that overhead.
1375 : *
1376 : * Note that waitStart is updated without holding the lock table's
1377 : * partition lock, to avoid the overhead by additional lock
1378 : * acquisition. This can cause "waitstart" in pg_locks to become NULL
1379 : * for a very short period of time after the wait started even though
1380 : * "granted" is false. This is OK in practice because we can assume
1381 : * that users are likely to look at "waitstart" when waiting for the
1382 : * lock for a long time.
1383 : */
1384 2682 : pg_atomic_write_u64(&MyProc->waitStart,
1385 2682 : get_timeout_start_time(DEADLOCK_TIMEOUT));
1386 : }
1387 2 : else if (log_recovery_conflict_waits)
1388 : {
1389 : /*
1390 : * Set the wait start timestamp if logging is enabled and in hot
1391 : * standby.
1392 : */
1393 2 : standbyWaitStart = GetCurrentTimestamp();
1394 : }
1395 :
1396 : /*
1397 : * If somebody wakes us between LWLockRelease and WaitLatch, the latch
1398 : * will not wait. But a set latch does not necessarily mean that the lock
1399 : * is free now, as there are many other sources for latch sets than
1400 : * somebody releasing the lock.
1401 : *
1402 : * We process interrupts whenever the latch has been set, so cancel/die
1403 : * interrupts are processed quickly. This means we must not mind losing
1404 : * control to a cancel/die interrupt here. We don't, because we have no
1405 : * shared-state-change work to do after being granted the lock (the
1406 : * grantor did it all). We do have to worry about canceling the deadlock
1407 : * timeout and updating the locallock table, but if we lose control to an
1408 : * error, LockErrorCleanup will fix that up.
1409 : */
1410 : do
1411 : {
1412 5516 : if (InHotStandby)
1413 : {
1414 8 : bool maybe_log_conflict =
1415 8 : (standbyWaitStart != 0 && !logged_recovery_conflict);
1416 :
1417 : /* Set a timer and wait for that or for the lock to be granted */
1418 8 : ResolveRecoveryConflictWithLock(locallock->tag.lock,
1419 : maybe_log_conflict);
1420 :
1421 : /*
1422 : * Emit the log message if the startup process is waiting longer
1423 : * than deadlock_timeout for recovery conflict on lock.
1424 : */
1425 8 : if (maybe_log_conflict)
1426 : {
1427 4 : TimestampTz now = GetCurrentTimestamp();
1428 :
1429 4 : if (TimestampDifferenceExceeds(standbyWaitStart, now,
1430 : DeadlockTimeout))
1431 : {
1432 : VirtualTransactionId *vxids;
1433 : int cnt;
1434 :
1435 2 : vxids = GetLockConflicts(&locallock->tag.lock,
1436 : AccessExclusiveLock, &cnt);
1437 :
1438 : /*
1439 : * Log the recovery conflict and the list of PIDs of
1440 : * backends holding the conflicting lock. Note that we do
1441 : * logging even if there are no such backends right now
1442 : * because the startup process here has already waited
1443 : * longer than deadlock_timeout.
1444 : */
1445 2 : LogRecoveryConflict(PROCSIG_RECOVERY_CONFLICT_LOCK,
1446 : standbyWaitStart, now,
1447 2 : cnt > 0 ? vxids : NULL, true);
1448 2 : logged_recovery_conflict = true;
1449 : }
1450 : }
1451 : }
1452 : else
1453 : {
1454 5508 : (void) WaitLatch(MyLatch, WL_LATCH_SET | WL_EXIT_ON_PM_DEATH, 0,
1455 5508 : PG_WAIT_LOCK | locallock->tag.lock.locktag_type);
1456 5508 : ResetLatch(MyLatch);
1457 : /* check for deadlocks first, as that's probably log-worthy */
1458 5508 : if (got_deadlock_timeout)
1459 : {
1460 52 : CheckDeadLock();
1461 52 : got_deadlock_timeout = false;
1462 : }
1463 5508 : CHECK_FOR_INTERRUPTS();
1464 : }
1465 :
1466 : /*
1467 : * waitStatus could change from PROC_WAIT_STATUS_WAITING to something
1468 : * else asynchronously. Read it just once per loop to prevent
1469 : * surprising behavior (such as missing log messages).
1470 : */
1471 5436 : myWaitStatus = *((volatile ProcWaitStatus *) &MyProc->waitStatus);
1472 :
1473 : /*
1474 : * If we are not deadlocked, but are waiting on an autovacuum-induced
1475 : * task, send a signal to interrupt it.
1476 : */
1477 5436 : if (deadlock_state == DS_BLOCKED_BY_AUTOVACUUM && allow_autovacuum_cancel)
1478 : {
1479 0 : PGPROC *autovac = GetBlockingAutoVacuumPgproc();
1480 : uint8 statusFlags;
1481 : uint8 lockmethod_copy;
1482 : LOCKTAG locktag_copy;
1483 :
1484 : /*
1485 : * Grab info we need, then release lock immediately. Note this
1486 : * coding means that there is a tiny chance that the process
1487 : * terminates its current transaction and starts a different one
1488 : * before we have a change to send the signal; the worst possible
1489 : * consequence is that a for-wraparound vacuum is canceled. But
1490 : * that could happen in any case unless we were to do kill() with
1491 : * the lock held, which is much more undesirable.
1492 : */
1493 0 : LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
1494 0 : statusFlags = ProcGlobal->statusFlags[autovac->pgxactoff];
1495 0 : lockmethod_copy = lock->tag.locktag_lockmethodid;
1496 0 : locktag_copy = lock->tag;
1497 0 : LWLockRelease(ProcArrayLock);
1498 :
1499 : /*
1500 : * Only do it if the worker is not working to protect against Xid
1501 : * wraparound.
1502 : */
1503 0 : if ((statusFlags & PROC_IS_AUTOVACUUM) &&
1504 0 : !(statusFlags & PROC_VACUUM_FOR_WRAPAROUND))
1505 : {
1506 0 : int pid = autovac->pid;
1507 :
1508 : /* report the case, if configured to do so */
1509 0 : if (message_level_is_interesting(DEBUG1))
1510 : {
1511 : StringInfoData locktagbuf;
1512 : StringInfoData logbuf; /* errdetail for server log */
1513 :
1514 0 : initStringInfo(&locktagbuf);
1515 0 : initStringInfo(&logbuf);
1516 0 : DescribeLockTag(&locktagbuf, &locktag_copy);
1517 0 : appendStringInfo(&logbuf,
1518 : "Process %d waits for %s on %s.",
1519 : MyProcPid,
1520 : GetLockmodeName(lockmethod_copy, lockmode),
1521 : locktagbuf.data);
1522 :
1523 0 : ereport(DEBUG1,
1524 : (errmsg_internal("sending cancel to blocking autovacuum PID %d",
1525 : pid),
1526 : errdetail_log("%s", logbuf.data)));
1527 :
1528 0 : pfree(locktagbuf.data);
1529 0 : pfree(logbuf.data);
1530 : }
1531 :
1532 : /* send the autovacuum worker Back to Old Kent Road */
1533 0 : if (kill(pid, SIGINT) < 0)
1534 : {
1535 : /*
1536 : * There's a race condition here: once we release the
1537 : * ProcArrayLock, it's possible for the autovac worker to
1538 : * close up shop and exit before we can do the kill().
1539 : * Therefore, we do not whinge about no-such-process.
1540 : * Other errors such as EPERM could conceivably happen if
1541 : * the kernel recycles the PID fast enough, but such cases
1542 : * seem improbable enough that it's probably best to issue
1543 : * a warning if we see some other errno.
1544 : */
1545 0 : if (errno != ESRCH)
1546 0 : ereport(WARNING,
1547 : (errmsg("could not send signal to process %d: %m",
1548 : pid)));
1549 : }
1550 : }
1551 :
1552 : /* prevent signal from being sent again more than once */
1553 0 : allow_autovacuum_cancel = false;
1554 : }
1555 :
1556 : /*
1557 : * If awoken after the deadlock check interrupt has run, and
1558 : * log_lock_waits is on, then report about the wait.
1559 : */
1560 5436 : if (log_lock_waits && deadlock_state != DS_NOT_YET_CHECKED)
1561 : {
1562 : StringInfoData buf,
1563 : lock_waiters_sbuf,
1564 : lock_holders_sbuf;
1565 : const char *modename;
1566 : long secs;
1567 : int usecs;
1568 : long msecs;
1569 38 : int lockHoldersNum = 0;
1570 :
1571 38 : initStringInfo(&buf);
1572 38 : initStringInfo(&lock_waiters_sbuf);
1573 38 : initStringInfo(&lock_holders_sbuf);
1574 :
1575 38 : DescribeLockTag(&buf, &locallock->tag.lock);
1576 38 : modename = GetLockmodeName(locallock->tag.lock.locktag_lockmethodid,
1577 : lockmode);
1578 38 : TimestampDifference(get_timeout_start_time(DEADLOCK_TIMEOUT),
1579 : GetCurrentTimestamp(),
1580 : &secs, &usecs);
1581 38 : msecs = secs * 1000 + usecs / 1000;
1582 38 : usecs = usecs % 1000;
1583 :
1584 : /* Gather a list of all lock holders and waiters */
1585 38 : LWLockAcquire(partitionLock, LW_SHARED);
1586 38 : GetLockHoldersAndWaiters(locallock, &lock_holders_sbuf,
1587 : &lock_waiters_sbuf, &lockHoldersNum);
1588 38 : LWLockRelease(partitionLock);
1589 :
1590 38 : if (deadlock_state == DS_SOFT_DEADLOCK)
1591 6 : ereport(LOG,
1592 : (errmsg("process %d avoided deadlock for %s on %s by rearranging queue order after %ld.%03d ms",
1593 : MyProcPid, modename, buf.data, msecs, usecs),
1594 : (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1595 : "Processes holding the lock: %s. Wait queue: %s.",
1596 : lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
1597 32 : else if (deadlock_state == DS_HARD_DEADLOCK)
1598 : {
1599 : /*
1600 : * This message is a bit redundant with the error that will be
1601 : * reported subsequently, but in some cases the error report
1602 : * might not make it to the log (eg, if it's caught by an
1603 : * exception handler), and we want to ensure all long-wait
1604 : * events get logged.
1605 : */
1606 4 : ereport(LOG,
1607 : (errmsg("process %d detected deadlock while waiting for %s on %s after %ld.%03d ms",
1608 : MyProcPid, modename, buf.data, msecs, usecs),
1609 : (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1610 : "Processes holding the lock: %s. Wait queue: %s.",
1611 : lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
1612 : }
1613 :
1614 38 : if (myWaitStatus == PROC_WAIT_STATUS_WAITING)
1615 18 : ereport(LOG,
1616 : (errmsg("process %d still waiting for %s on %s after %ld.%03d ms",
1617 : MyProcPid, modename, buf.data, msecs, usecs),
1618 : (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1619 : "Processes holding the lock: %s. Wait queue: %s.",
1620 : lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
1621 20 : else if (myWaitStatus == PROC_WAIT_STATUS_OK)
1622 16 : ereport(LOG,
1623 : (errmsg("process %d acquired %s on %s after %ld.%03d ms",
1624 : MyProcPid, modename, buf.data, msecs, usecs)));
1625 : else
1626 : {
1627 : Assert(myWaitStatus == PROC_WAIT_STATUS_ERROR);
1628 :
1629 : /*
1630 : * Currently, the deadlock checker always kicks its own
1631 : * process, which means that we'll only see
1632 : * PROC_WAIT_STATUS_ERROR when deadlock_state ==
1633 : * DS_HARD_DEADLOCK, and there's no need to print redundant
1634 : * messages. But for completeness and future-proofing, print
1635 : * a message if it looks like someone else kicked us off the
1636 : * lock.
1637 : */
1638 4 : if (deadlock_state != DS_HARD_DEADLOCK)
1639 0 : ereport(LOG,
1640 : (errmsg("process %d failed to acquire %s on %s after %ld.%03d ms",
1641 : MyProcPid, modename, buf.data, msecs, usecs),
1642 : (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1643 : "Processes holding the lock: %s. Wait queue: %s.",
1644 : lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
1645 : }
1646 :
1647 : /*
1648 : * At this point we might still need to wait for the lock. Reset
1649 : * state so we don't print the above messages again.
1650 : */
1651 38 : deadlock_state = DS_NO_DEADLOCK;
1652 :
1653 38 : pfree(buf.data);
1654 38 : pfree(lock_holders_sbuf.data);
1655 38 : pfree(lock_waiters_sbuf.data);
1656 : }
1657 5436 : } while (myWaitStatus == PROC_WAIT_STATUS_WAITING);
1658 :
1659 : /*
1660 : * Disable the timers, if they are still running. As in LockErrorCleanup,
1661 : * we must preserve the LOCK_TIMEOUT indicator flag: if a lock timeout has
1662 : * already caused QueryCancelPending to become set, we want the cancel to
1663 : * be reported as a lock timeout, not a user cancel.
1664 : */
1665 2604 : if (!InHotStandby)
1666 : {
1667 2602 : if (LockTimeout > 0)
1668 : {
1669 : DisableTimeoutParams timeouts[2];
1670 :
1671 188 : timeouts[0].id = DEADLOCK_TIMEOUT;
1672 188 : timeouts[0].keep_indicator = false;
1673 188 : timeouts[1].id = LOCK_TIMEOUT;
1674 188 : timeouts[1].keep_indicator = true;
1675 188 : disable_timeouts(timeouts, 2);
1676 : }
1677 : else
1678 2414 : disable_timeout(DEADLOCK_TIMEOUT, false);
1679 : }
1680 :
1681 : /*
1682 : * Emit the log message if recovery conflict on lock was resolved but the
1683 : * startup process waited longer than deadlock_timeout for it.
1684 : */
1685 2604 : if (InHotStandby && logged_recovery_conflict)
1686 2 : LogRecoveryConflict(PROCSIG_RECOVERY_CONFLICT_LOCK,
1687 : standbyWaitStart, GetCurrentTimestamp(),
1688 : NULL, false);
1689 :
1690 : /*
1691 : * We don't have to do anything else, because the awaker did all the
1692 : * necessary updates of the lock table and MyProc. (The caller is
1693 : * responsible for updating the local lock table.)
1694 : */
1695 2604 : return myWaitStatus;
1696 : }
1697 :
1698 :
1699 : /*
1700 : * ProcWakeup -- wake up a process by setting its latch.
1701 : *
1702 : * Also remove the process from the wait queue and set its links invalid.
1703 : *
1704 : * The appropriate lock partition lock must be held by caller.
1705 : *
1706 : * XXX: presently, this code is only used for the "success" case, and only
1707 : * works correctly for that case. To clean up in failure case, would need
1708 : * to twiddle the lock's request counts too --- see RemoveFromWaitQueue.
1709 : * Hence, in practice the waitStatus parameter must be PROC_WAIT_STATUS_OK.
1710 : */
1711 : void
1712 2620 : ProcWakeup(PGPROC *proc, ProcWaitStatus waitStatus)
1713 : {
1714 2620 : if (dlist_node_is_detached(&proc->links))
1715 0 : return;
1716 :
1717 : Assert(proc->waitStatus == PROC_WAIT_STATUS_WAITING);
1718 :
1719 : /* Remove process from wait queue */
1720 2620 : dclist_delete_from_thoroughly(&proc->waitLock->waitProcs, &proc->links);
1721 :
1722 : /* Clean up process' state and pass it the ok/fail signal */
1723 2620 : proc->waitLock = NULL;
1724 2620 : proc->waitProcLock = NULL;
1725 2620 : proc->waitStatus = waitStatus;
1726 2620 : pg_atomic_write_u64(&MyProc->waitStart, 0);
1727 :
1728 : /* And awaken it */
1729 2620 : SetLatch(&proc->procLatch);
1730 : }
1731 :
1732 : /*
1733 : * ProcLockWakeup -- routine for waking up processes when a lock is
1734 : * released (or a prior waiter is aborted). Scan all waiters
1735 : * for lock, waken any that are no longer blocked.
1736 : *
1737 : * The appropriate lock partition lock must be held by caller.
1738 : */
1739 : void
1740 2662 : ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
1741 : {
1742 2662 : dclist_head *waitQueue = &lock->waitProcs;
1743 2662 : LOCKMASK aheadRequests = 0;
1744 : dlist_mutable_iter miter;
1745 :
1746 2662 : if (dclist_is_empty(waitQueue))
1747 88 : return;
1748 :
1749 6340 : dclist_foreach_modify(miter, waitQueue)
1750 : {
1751 3766 : PGPROC *proc = dlist_container(PGPROC, links, miter.cur);
1752 3766 : LOCKMODE lockmode = proc->waitLockMode;
1753 :
1754 : /*
1755 : * Waken if (a) doesn't conflict with requests of earlier waiters, and
1756 : * (b) doesn't conflict with already-held locks.
1757 : */
1758 3766 : if ((lockMethodTable->conflictTab[lockmode] & aheadRequests) == 0 &&
1759 3148 : !LockCheckConflicts(lockMethodTable, lockmode, lock,
1760 : proc->waitProcLock))
1761 : {
1762 : /* OK to waken */
1763 2620 : GrantLock(lock, proc->waitProcLock, lockmode);
1764 : /* removes proc from the lock's waiting process queue */
1765 2620 : ProcWakeup(proc, PROC_WAIT_STATUS_OK);
1766 : }
1767 : else
1768 : {
1769 : /*
1770 : * Lock conflicts: Don't wake, but remember requested mode for
1771 : * later checks.
1772 : */
1773 1146 : aheadRequests |= LOCKBIT_ON(lockmode);
1774 : }
1775 : }
1776 : }
1777 :
1778 : /*
1779 : * CheckDeadLock
1780 : *
1781 : * We only get to this routine, if DEADLOCK_TIMEOUT fired while waiting for a
1782 : * lock to be released by some other process. Check if there's a deadlock; if
1783 : * not, just return. (But signal ProcSleep to log a message, if
1784 : * log_lock_waits is true.) If we have a real deadlock, remove ourselves from
1785 : * the lock's wait queue and signal an error to ProcSleep.
1786 : */
1787 : static void
1788 52 : CheckDeadLock(void)
1789 : {
1790 : int i;
1791 :
1792 : /*
1793 : * Acquire exclusive lock on the entire shared lock data structures. Must
1794 : * grab LWLocks in partition-number order to avoid LWLock deadlock.
1795 : *
1796 : * Note that the deadlock check interrupt had better not be enabled
1797 : * anywhere that this process itself holds lock partition locks, else this
1798 : * will wait forever. Also note that LWLockAcquire creates a critical
1799 : * section, so that this routine cannot be interrupted by cancel/die
1800 : * interrupts.
1801 : */
1802 884 : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
1803 832 : LWLockAcquire(LockHashPartitionLockByIndex(i), LW_EXCLUSIVE);
1804 :
1805 : /*
1806 : * Check to see if we've been awoken by anyone in the interim.
1807 : *
1808 : * If we have, we can return and resume our transaction -- happy day.
1809 : * Before we are awoken the process releasing the lock grants it to us so
1810 : * we know that we don't have to wait anymore.
1811 : *
1812 : * We check by looking to see if we've been unlinked from the wait queue.
1813 : * This is safe because we hold the lock partition lock.
1814 : */
1815 52 : if (MyProc->links.prev == NULL ||
1816 52 : MyProc->links.next == NULL)
1817 0 : goto check_done;
1818 :
1819 : #ifdef LOCK_DEBUG
1820 : if (Debug_deadlocks)
1821 : DumpAllLocks();
1822 : #endif
1823 :
1824 : /* Run the deadlock check, and set deadlock_state for use by ProcSleep */
1825 52 : deadlock_state = DeadLockCheck(MyProc);
1826 :
1827 52 : if (deadlock_state == DS_HARD_DEADLOCK)
1828 : {
1829 : /*
1830 : * Oops. We have a deadlock.
1831 : *
1832 : * Get this process out of wait state. (Note: we could do this more
1833 : * efficiently by relying on lockAwaited, but use this coding to
1834 : * preserve the flexibility to kill some other transaction than the
1835 : * one detecting the deadlock.)
1836 : *
1837 : * RemoveFromWaitQueue sets MyProc->waitStatus to
1838 : * PROC_WAIT_STATUS_ERROR, so ProcSleep will report an error after we
1839 : * return from the signal handler.
1840 : */
1841 : Assert(MyProc->waitLock != NULL);
1842 10 : RemoveFromWaitQueue(MyProc, LockTagHashCode(&(MyProc->waitLock->tag)));
1843 :
1844 : /*
1845 : * We're done here. Transaction abort caused by the error that
1846 : * ProcSleep will raise will cause any other locks we hold to be
1847 : * released, thus allowing other processes to wake up; we don't need
1848 : * to do that here. NOTE: an exception is that releasing locks we
1849 : * hold doesn't consider the possibility of waiters that were blocked
1850 : * behind us on the lock we just failed to get, and might now be
1851 : * wakable because we're not in front of them anymore. However,
1852 : * RemoveFromWaitQueue took care of waking up any such processes.
1853 : */
1854 : }
1855 :
1856 : /*
1857 : * And release locks. We do this in reverse order for two reasons: (1)
1858 : * Anyone else who needs more than one of the locks will be trying to lock
1859 : * them in increasing order; we don't want to release the other process
1860 : * until it can get all the locks it needs. (2) This avoids O(N^2)
1861 : * behavior inside LWLockRelease.
1862 : */
1863 42 : check_done:
1864 884 : for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
1865 832 : LWLockRelease(LockHashPartitionLockByIndex(i));
1866 52 : }
1867 :
1868 : /*
1869 : * CheckDeadLockAlert - Handle the expiry of deadlock_timeout.
1870 : *
1871 : * NB: Runs inside a signal handler, be careful.
1872 : */
1873 : void
1874 52 : CheckDeadLockAlert(void)
1875 : {
1876 52 : int save_errno = errno;
1877 :
1878 52 : got_deadlock_timeout = true;
1879 :
1880 : /*
1881 : * Have to set the latch again, even if handle_sig_alarm already did. Back
1882 : * then got_deadlock_timeout wasn't yet set... It's unlikely that this
1883 : * ever would be a problem, but setting a set latch again is cheap.
1884 : *
1885 : * Note that, when this function runs inside procsignal_sigusr1_handler(),
1886 : * the handler function sets the latch again after the latch is set here.
1887 : */
1888 52 : SetLatch(MyLatch);
1889 52 : errno = save_errno;
1890 52 : }
1891 :
1892 : /*
1893 : * GetLockHoldersAndWaiters - get lock holders and waiters for a lock
1894 : *
1895 : * Fill lock_holders_sbuf and lock_waiters_sbuf with the PIDs of processes holding
1896 : * and waiting for the lock, and set lockHoldersNum to the number of lock holders.
1897 : *
1898 : * The lock table's partition lock must be held on entry and remains held on exit.
1899 : */
1900 : void
1901 38 : GetLockHoldersAndWaiters(LOCALLOCK *locallock, StringInfo lock_holders_sbuf,
1902 : StringInfo lock_waiters_sbuf, int *lockHoldersNum)
1903 : {
1904 : dlist_iter proc_iter;
1905 : PROCLOCK *curproclock;
1906 38 : LOCK *lock = locallock->lock;
1907 38 : bool first_holder = true,
1908 38 : first_waiter = true;
1909 :
1910 : #ifdef USE_ASSERT_CHECKING
1911 : {
1912 : uint32 hashcode = locallock->hashcode;
1913 : LWLock *partitionLock = LockHashPartitionLock(hashcode);
1914 :
1915 : Assert(LWLockHeldByMe(partitionLock));
1916 : }
1917 : #endif
1918 :
1919 38 : *lockHoldersNum = 0;
1920 :
1921 : /*
1922 : * Loop over the lock's procLocks to gather a list of all holders and
1923 : * waiters. Thus we will be able to provide more detailed information for
1924 : * lock debugging purposes.
1925 : *
1926 : * lock->procLocks contains all processes which hold or wait for this
1927 : * lock.
1928 : */
1929 114 : dlist_foreach(proc_iter, &lock->procLocks)
1930 : {
1931 76 : curproclock =
1932 76 : dlist_container(PROCLOCK, lockLink, proc_iter.cur);
1933 :
1934 : /*
1935 : * We are a waiter if myProc->waitProcLock == curproclock; we are a
1936 : * holder if it is NULL or something different.
1937 : */
1938 76 : if (curproclock->tag.myProc->waitProcLock == curproclock)
1939 : {
1940 34 : if (first_waiter)
1941 : {
1942 20 : appendStringInfo(lock_waiters_sbuf, "%d",
1943 20 : curproclock->tag.myProc->pid);
1944 20 : first_waiter = false;
1945 : }
1946 : else
1947 14 : appendStringInfo(lock_waiters_sbuf, ", %d",
1948 14 : curproclock->tag.myProc->pid);
1949 : }
1950 : else
1951 : {
1952 42 : if (first_holder)
1953 : {
1954 38 : appendStringInfo(lock_holders_sbuf, "%d",
1955 38 : curproclock->tag.myProc->pid);
1956 38 : first_holder = false;
1957 : }
1958 : else
1959 4 : appendStringInfo(lock_holders_sbuf, ", %d",
1960 4 : curproclock->tag.myProc->pid);
1961 :
1962 42 : (*lockHoldersNum)++;
1963 : }
1964 : }
1965 38 : }
1966 :
1967 : /*
1968 : * ProcWaitForSignal - wait for a signal from another backend.
1969 : *
1970 : * As this uses the generic process latch the caller has to be robust against
1971 : * unrelated wakeups: Always check that the desired state has occurred, and
1972 : * wait again if not.
1973 : */
1974 : void
1975 32 : ProcWaitForSignal(uint32 wait_event_info)
1976 : {
1977 32 : (void) WaitLatch(MyLatch, WL_LATCH_SET | WL_EXIT_ON_PM_DEATH, 0,
1978 : wait_event_info);
1979 32 : ResetLatch(MyLatch);
1980 32 : CHECK_FOR_INTERRUPTS();
1981 32 : }
1982 :
1983 : /*
1984 : * ProcSendSignal - set the latch of a backend identified by ProcNumber
1985 : */
1986 : void
1987 6 : ProcSendSignal(ProcNumber procNumber)
1988 : {
1989 6 : if (procNumber < 0 || procNumber >= ProcGlobal->allProcCount)
1990 0 : elog(ERROR, "procNumber out of range");
1991 :
1992 6 : SetLatch(&ProcGlobal->allProcs[procNumber].procLatch);
1993 6 : }
1994 :
1995 : /*
1996 : * BecomeLockGroupLeader - designate process as lock group leader
1997 : *
1998 : * Once this function has returned, other processes can join the lock group
1999 : * by calling BecomeLockGroupMember.
2000 : */
2001 : void
2002 1172 : BecomeLockGroupLeader(void)
2003 : {
2004 : LWLock *leader_lwlock;
2005 :
2006 : /* If we already did it, we don't need to do it again. */
2007 1172 : if (MyProc->lockGroupLeader == MyProc)
2008 1034 : return;
2009 :
2010 : /* We had better not be a follower. */
2011 : Assert(MyProc->lockGroupLeader == NULL);
2012 :
2013 : /* Create single-member group, containing only ourselves. */
2014 138 : leader_lwlock = LockHashPartitionLockByProc(MyProc);
2015 138 : LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
2016 138 : MyProc->lockGroupLeader = MyProc;
2017 138 : dlist_push_head(&MyProc->lockGroupMembers, &MyProc->lockGroupLink);
2018 138 : LWLockRelease(leader_lwlock);
2019 : }
2020 :
2021 : /*
2022 : * BecomeLockGroupMember - designate process as lock group member
2023 : *
2024 : * This is pretty straightforward except for the possibility that the leader
2025 : * whose group we're trying to join might exit before we manage to do so;
2026 : * and the PGPROC might get recycled for an unrelated process. To avoid
2027 : * that, we require the caller to pass the PID of the intended PGPROC as
2028 : * an interlock. Returns true if we successfully join the intended lock
2029 : * group, and false if not.
2030 : */
2031 : bool
2032 2740 : BecomeLockGroupMember(PGPROC *leader, int pid)
2033 : {
2034 : LWLock *leader_lwlock;
2035 2740 : bool ok = false;
2036 :
2037 : /* Group leader can't become member of group */
2038 : Assert(MyProc != leader);
2039 :
2040 : /* Can't already be a member of a group */
2041 : Assert(MyProc->lockGroupLeader == NULL);
2042 :
2043 : /* PID must be valid. */
2044 : Assert(pid != 0);
2045 :
2046 : /*
2047 : * Get lock protecting the group fields. Note LockHashPartitionLockByProc
2048 : * calculates the proc number based on the PGPROC slot without looking at
2049 : * its contents, so we will acquire the correct lock even if the leader
2050 : * PGPROC is in process of being recycled.
2051 : */
2052 2740 : leader_lwlock = LockHashPartitionLockByProc(leader);
2053 2740 : LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
2054 :
2055 : /* Is this the leader we're looking for? */
2056 2740 : if (leader->pid == pid && leader->lockGroupLeader == leader)
2057 : {
2058 : /* OK, join the group */
2059 2740 : ok = true;
2060 2740 : MyProc->lockGroupLeader = leader;
2061 2740 : dlist_push_tail(&leader->lockGroupMembers, &MyProc->lockGroupLink);
2062 : }
2063 2740 : LWLockRelease(leader_lwlock);
2064 :
2065 2740 : return ok;
2066 : }
|