Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * procsignal.c
4 : * Routines for interprocess signaling
5 : *
6 : *
7 : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
8 : * Portions Copyright (c) 1994, Regents of the University of California
9 : *
10 : * IDENTIFICATION
11 : * src/backend/storage/ipc/procsignal.c
12 : *
13 : *-------------------------------------------------------------------------
14 : */
15 : #include "postgres.h"
16 :
17 : #include <signal.h>
18 : #include <unistd.h>
19 :
20 : #include "access/parallel.h"
21 : #include "commands/async.h"
22 : #include "miscadmin.h"
23 : #include "pgstat.h"
24 : #include "port/pg_bitutils.h"
25 : #include "replication/logicalworker.h"
26 : #include "replication/walsender.h"
27 : #include "storage/condition_variable.h"
28 : #include "storage/ipc.h"
29 : #include "storage/latch.h"
30 : #include "storage/shmem.h"
31 : #include "storage/sinval.h"
32 : #include "storage/smgr.h"
33 : #include "tcop/tcopprot.h"
34 : #include "utils/memutils.h"
35 :
36 : /*
37 : * The SIGUSR1 signal is multiplexed to support signaling multiple event
38 : * types. The specific reason is communicated via flags in shared memory.
39 : * We keep a boolean flag for each possible "reason", so that different
40 : * reasons can be signaled to a process concurrently. (However, if the same
41 : * reason is signaled more than once nearly simultaneously, the process may
42 : * observe it only once.)
43 : *
44 : * Each process that wants to receive signals registers its process ID
45 : * in the ProcSignalSlots array. The array is indexed by ProcNumber to make
46 : * slot allocation simple, and to avoid having to search the array when you
47 : * know the ProcNumber of the process you're signaling. (We do support
48 : * signaling without ProcNumber, but it's a bit less efficient.)
49 : *
50 : * The fields in each slot are protected by a spinlock, pss_mutex. pss_pid can
51 : * also be read without holding the spinlock, as a quick preliminary check
52 : * when searching for a particular PID in the array.
53 : *
54 : * pss_signalFlags are intended to be set in cases where we don't need to
55 : * keep track of whether or not the target process has handled the signal,
56 : * but sometimes we need confirmation, as when making a global state change
57 : * that cannot be considered complete until all backends have taken notice
58 : * of it. For such use cases, we set a bit in pss_barrierCheckMask and then
59 : * increment the current "barrier generation"; when the new barrier generation
60 : * (or greater) appears in the pss_barrierGeneration flag of every process,
61 : * we know that the message has been received everywhere.
62 : */
63 : typedef struct
64 : {
65 : pg_atomic_uint32 pss_pid;
66 : int pss_cancel_key_len; /* 0 means no cancellation is possible */
67 : uint8 pss_cancel_key[MAX_CANCEL_KEY_LENGTH];
68 : volatile sig_atomic_t pss_signalFlags[NUM_PROCSIGNALS];
69 : slock_t pss_mutex; /* protects the above fields */
70 :
71 : /* Barrier-related fields (not protected by pss_mutex) */
72 : pg_atomic_uint64 pss_barrierGeneration;
73 : pg_atomic_uint32 pss_barrierCheckMask;
74 : ConditionVariable pss_barrierCV;
75 : } ProcSignalSlot;
76 :
77 : /*
78 : * Information that is global to the entire ProcSignal system can be stored
79 : * here.
80 : *
81 : * psh_barrierGeneration is the highest barrier generation in existence.
82 : */
83 : struct ProcSignalHeader
84 : {
85 : pg_atomic_uint64 psh_barrierGeneration;
86 : ProcSignalSlot psh_slot[FLEXIBLE_ARRAY_MEMBER];
87 : };
88 :
89 : /*
90 : * We reserve a slot for each possible ProcNumber, plus one for each
91 : * possible auxiliary process type. (This scheme assumes there is not
92 : * more than one of any auxiliary process type at a time, except for
93 : * IO workers.)
94 : */
95 : #define NumProcSignalSlots (MaxBackends + NUM_AUXILIARY_PROCS)
96 :
97 : /* Check whether the relevant type bit is set in the flags. */
98 : #define BARRIER_SHOULD_CHECK(flags, type) \
99 : (((flags) & (((uint32) 1) << (uint32) (type))) != 0)
100 :
101 : /* Clear the relevant type bit from the flags. */
102 : #define BARRIER_CLEAR_BIT(flags, type) \
103 : ((flags) &= ~(((uint32) 1) << (uint32) (type)))
104 :
105 : NON_EXEC_STATIC ProcSignalHeader *ProcSignal = NULL;
106 : static ProcSignalSlot *MyProcSignalSlot = NULL;
107 :
108 : static bool CheckProcSignal(ProcSignalReason reason);
109 : static void CleanupProcSignalState(int status, Datum arg);
110 : static void ResetProcSignalBarrierBits(uint32 flags);
111 :
112 : /*
113 : * ProcSignalShmemSize
114 : * Compute space needed for ProcSignal's shared memory
115 : */
116 : Size
117 6006 : ProcSignalShmemSize(void)
118 : {
119 : Size size;
120 :
121 6006 : size = mul_size(NumProcSignalSlots, sizeof(ProcSignalSlot));
122 6006 : size = add_size(size, offsetof(ProcSignalHeader, psh_slot));
123 6006 : return size;
124 : }
125 :
126 : /*
127 : * ProcSignalShmemInit
128 : * Allocate and initialize ProcSignal's shared memory
129 : */
130 : void
131 2100 : ProcSignalShmemInit(void)
132 : {
133 2100 : Size size = ProcSignalShmemSize();
134 : bool found;
135 :
136 2100 : ProcSignal = (ProcSignalHeader *)
137 2100 : ShmemInitStruct("ProcSignal", size, &found);
138 :
139 : /* If we're first, initialize. */
140 2100 : if (!found)
141 : {
142 : int i;
143 :
144 2100 : pg_atomic_init_u64(&ProcSignal->psh_barrierGeneration, 0);
145 :
146 279486 : for (i = 0; i < NumProcSignalSlots; ++i)
147 : {
148 277386 : ProcSignalSlot *slot = &ProcSignal->psh_slot[i];
149 :
150 277386 : SpinLockInit(&slot->pss_mutex);
151 277386 : pg_atomic_init_u32(&slot->pss_pid, 0);
152 277386 : slot->pss_cancel_key_len = 0;
153 277386 : MemSet(slot->pss_signalFlags, 0, sizeof(slot->pss_signalFlags));
154 277386 : pg_atomic_init_u64(&slot->pss_barrierGeneration, PG_UINT64_MAX);
155 277386 : pg_atomic_init_u32(&slot->pss_barrierCheckMask, 0);
156 277386 : ConditionVariableInit(&slot->pss_barrierCV);
157 : }
158 : }
159 2100 : }
160 :
161 : /*
162 : * ProcSignalInit
163 : * Register the current process in the ProcSignal array
164 : */
165 : void
166 43406 : ProcSignalInit(const uint8 *cancel_key, int cancel_key_len)
167 : {
168 : ProcSignalSlot *slot;
169 : uint64 barrier_generation;
170 : uint32 old_pss_pid;
171 :
172 : Assert(cancel_key_len >= 0 && cancel_key_len <= MAX_CANCEL_KEY_LENGTH);
173 43406 : if (MyProcNumber < 0)
174 0 : elog(ERROR, "MyProcNumber not set");
175 43406 : if (MyProcNumber >= NumProcSignalSlots)
176 0 : elog(ERROR, "unexpected MyProcNumber %d in ProcSignalInit (max %d)", MyProcNumber, NumProcSignalSlots);
177 43406 : slot = &ProcSignal->psh_slot[MyProcNumber];
178 :
179 43406 : SpinLockAcquire(&slot->pss_mutex);
180 :
181 : /* Value used for sanity check below */
182 43406 : old_pss_pid = pg_atomic_read_u32(&slot->pss_pid);
183 :
184 : /* Clear out any leftover signal reasons */
185 43406 : MemSet(slot->pss_signalFlags, 0, NUM_PROCSIGNALS * sizeof(sig_atomic_t));
186 :
187 : /*
188 : * Initialize barrier state. Since we're a brand-new process, there
189 : * shouldn't be any leftover backend-private state that needs to be
190 : * updated. Therefore, we can broadcast the latest barrier generation and
191 : * disregard any previously-set check bits.
192 : *
193 : * NB: This only works if this initialization happens early enough in the
194 : * startup sequence that we haven't yet cached any state that might need
195 : * to be invalidated. That's also why we have a memory barrier here, to be
196 : * sure that any later reads of memory happen strictly after this.
197 : */
198 43406 : pg_atomic_write_u32(&slot->pss_barrierCheckMask, 0);
199 : barrier_generation =
200 43406 : pg_atomic_read_u64(&ProcSignal->psh_barrierGeneration);
201 43406 : pg_atomic_write_u64(&slot->pss_barrierGeneration, barrier_generation);
202 :
203 43406 : if (cancel_key_len > 0)
204 27434 : memcpy(slot->pss_cancel_key, cancel_key, cancel_key_len);
205 43406 : slot->pss_cancel_key_len = cancel_key_len;
206 43406 : pg_atomic_write_u32(&slot->pss_pid, MyProcPid);
207 :
208 43406 : SpinLockRelease(&slot->pss_mutex);
209 :
210 : /* Spinlock is released, do the check */
211 43406 : if (old_pss_pid != 0)
212 0 : elog(LOG, "process %d taking over ProcSignal slot %d, but it's not empty",
213 : MyProcPid, MyProcNumber);
214 :
215 : /* Remember slot location for CheckProcSignal */
216 43406 : MyProcSignalSlot = slot;
217 :
218 : /* Set up to release the slot on process exit */
219 43406 : on_shmem_exit(CleanupProcSignalState, (Datum) 0);
220 43406 : }
221 :
222 : /*
223 : * CleanupProcSignalState
224 : * Remove current process from ProcSignal mechanism
225 : *
226 : * This function is called via on_shmem_exit() during backend shutdown.
227 : */
228 : static void
229 43406 : CleanupProcSignalState(int status, Datum arg)
230 : {
231 : pid_t old_pid;
232 43406 : ProcSignalSlot *slot = MyProcSignalSlot;
233 :
234 : /*
235 : * Clear MyProcSignalSlot, so that a SIGUSR1 received after this point
236 : * won't try to access it after it's no longer ours (and perhaps even
237 : * after we've unmapped the shared memory segment).
238 : */
239 : Assert(MyProcSignalSlot != NULL);
240 43406 : MyProcSignalSlot = NULL;
241 :
242 : /* sanity check */
243 43406 : SpinLockAcquire(&slot->pss_mutex);
244 43406 : old_pid = pg_atomic_read_u32(&slot->pss_pid);
245 43406 : if (old_pid != MyProcPid)
246 : {
247 : /*
248 : * don't ERROR here. We're exiting anyway, and don't want to get into
249 : * infinite loop trying to exit
250 : */
251 0 : SpinLockRelease(&slot->pss_mutex);
252 0 : elog(LOG, "process %d releasing ProcSignal slot %d, but it contains %d",
253 : MyProcPid, (int) (slot - ProcSignal->psh_slot), (int) old_pid);
254 0 : return; /* XXX better to zero the slot anyway? */
255 : }
256 :
257 : /* Mark the slot as unused */
258 43406 : pg_atomic_write_u32(&slot->pss_pid, 0);
259 43406 : slot->pss_cancel_key_len = 0;
260 :
261 : /*
262 : * Make this slot look like it's absorbed all possible barriers, so that
263 : * no barrier waits block on it.
264 : */
265 43406 : pg_atomic_write_u64(&slot->pss_barrierGeneration, PG_UINT64_MAX);
266 :
267 43406 : SpinLockRelease(&slot->pss_mutex);
268 :
269 43406 : ConditionVariableBroadcast(&slot->pss_barrierCV);
270 : }
271 :
272 : /*
273 : * SendProcSignal
274 : * Send a signal to a Postgres process
275 : *
276 : * Providing procNumber is optional, but it will speed up the operation.
277 : *
278 : * On success (a signal was sent), zero is returned.
279 : * On error, -1 is returned, and errno is set (typically to ESRCH or EPERM).
280 : *
281 : * Not to be confused with ProcSendSignal
282 : */
283 : int
284 11424 : SendProcSignal(pid_t pid, ProcSignalReason reason, ProcNumber procNumber)
285 : {
286 : volatile ProcSignalSlot *slot;
287 :
288 11424 : if (procNumber != INVALID_PROC_NUMBER)
289 : {
290 : Assert(procNumber < NumProcSignalSlots);
291 11312 : slot = &ProcSignal->psh_slot[procNumber];
292 :
293 11312 : SpinLockAcquire(&slot->pss_mutex);
294 11312 : if (pg_atomic_read_u32(&slot->pss_pid) == pid)
295 : {
296 : /* Atomically set the proper flag */
297 11312 : slot->pss_signalFlags[reason] = true;
298 11312 : SpinLockRelease(&slot->pss_mutex);
299 : /* Send signal */
300 11312 : return kill(pid, SIGUSR1);
301 : }
302 0 : SpinLockRelease(&slot->pss_mutex);
303 : }
304 : else
305 : {
306 : /*
307 : * procNumber not provided, so search the array using pid. We search
308 : * the array back to front so as to reduce search overhead. Passing
309 : * INVALID_PROC_NUMBER means that the target is most likely an
310 : * auxiliary process, which will have a slot near the end of the
311 : * array.
312 : */
313 : int i;
314 :
315 4980 : for (i = NumProcSignalSlots - 1; i >= 0; i--)
316 : {
317 4980 : slot = &ProcSignal->psh_slot[i];
318 :
319 4980 : if (pg_atomic_read_u32(&slot->pss_pid) == pid)
320 : {
321 112 : SpinLockAcquire(&slot->pss_mutex);
322 112 : if (pg_atomic_read_u32(&slot->pss_pid) == pid)
323 : {
324 : /* Atomically set the proper flag */
325 112 : slot->pss_signalFlags[reason] = true;
326 112 : SpinLockRelease(&slot->pss_mutex);
327 : /* Send signal */
328 112 : return kill(pid, SIGUSR1);
329 : }
330 0 : SpinLockRelease(&slot->pss_mutex);
331 : }
332 : }
333 : }
334 :
335 0 : errno = ESRCH;
336 0 : return -1;
337 : }
338 :
339 : /*
340 : * EmitProcSignalBarrier
341 : * Send a signal to every Postgres process
342 : *
343 : * The return value of this function is the barrier "generation" created
344 : * by this operation. This value can be passed to WaitForProcSignalBarrier
345 : * to wait until it is known that every participant in the ProcSignal
346 : * mechanism has absorbed the signal (or started afterwards).
347 : *
348 : * Note that it would be a bad idea to use this for anything that happens
349 : * frequently, as interrupting every backend could cause a noticeable
350 : * performance hit.
351 : *
352 : * Callers are entitled to assume that this function will not throw ERROR
353 : * or FATAL.
354 : */
355 : uint64
356 182 : EmitProcSignalBarrier(ProcSignalBarrierType type)
357 : {
358 182 : uint32 flagbit = 1 << (uint32) type;
359 : uint64 generation;
360 :
361 : /*
362 : * Set all the flags.
363 : *
364 : * Note that pg_atomic_fetch_or_u32 has full barrier semantics, so this is
365 : * totally ordered with respect to anything the caller did before, and
366 : * anything that we do afterwards. (This is also true of the later call to
367 : * pg_atomic_add_fetch_u64.)
368 : */
369 21104 : for (int i = 0; i < NumProcSignalSlots; i++)
370 : {
371 20922 : volatile ProcSignalSlot *slot = &ProcSignal->psh_slot[i];
372 :
373 20922 : pg_atomic_fetch_or_u32(&slot->pss_barrierCheckMask, flagbit);
374 : }
375 :
376 : /*
377 : * Increment the generation counter.
378 : */
379 : generation =
380 182 : pg_atomic_add_fetch_u64(&ProcSignal->psh_barrierGeneration, 1);
381 :
382 : /*
383 : * Signal all the processes, so that they update their advertised barrier
384 : * generation.
385 : *
386 : * Concurrency is not a problem here. Backends that have exited don't
387 : * matter, and new backends that have joined since we entered this
388 : * function must already have current state, since the caller is
389 : * responsible for making sure that the relevant state is entirely visible
390 : * before calling this function in the first place. We still have to wake
391 : * them up - because we can't distinguish between such backends and older
392 : * backends that need to update state - but they won't actually need to
393 : * change any state.
394 : */
395 21104 : for (int i = NumProcSignalSlots - 1; i >= 0; i--)
396 : {
397 20922 : volatile ProcSignalSlot *slot = &ProcSignal->psh_slot[i];
398 20922 : pid_t pid = pg_atomic_read_u32(&slot->pss_pid);
399 :
400 20922 : if (pid != 0)
401 : {
402 1578 : SpinLockAcquire(&slot->pss_mutex);
403 1578 : pid = pg_atomic_read_u32(&slot->pss_pid);
404 1578 : if (pid != 0)
405 : {
406 : /* see SendProcSignal for details */
407 1578 : slot->pss_signalFlags[PROCSIG_BARRIER] = true;
408 1578 : SpinLockRelease(&slot->pss_mutex);
409 1578 : kill(pid, SIGUSR1);
410 : }
411 : else
412 0 : SpinLockRelease(&slot->pss_mutex);
413 : }
414 : }
415 :
416 182 : return generation;
417 : }
418 :
419 : /*
420 : * WaitForProcSignalBarrier - wait until it is guaranteed that all changes
421 : * requested by a specific call to EmitProcSignalBarrier() have taken effect.
422 : */
423 : void
424 182 : WaitForProcSignalBarrier(uint64 generation)
425 : {
426 : Assert(generation <= pg_atomic_read_u64(&ProcSignal->psh_barrierGeneration));
427 :
428 182 : elog(DEBUG1,
429 : "waiting for all backends to process ProcSignalBarrier generation "
430 : UINT64_FORMAT,
431 : generation);
432 :
433 21104 : for (int i = NumProcSignalSlots - 1; i >= 0; i--)
434 : {
435 20922 : ProcSignalSlot *slot = &ProcSignal->psh_slot[i];
436 : uint64 oldval;
437 :
438 : /*
439 : * It's important that we check only pss_barrierGeneration here and
440 : * not pss_barrierCheckMask. Bits in pss_barrierCheckMask get cleared
441 : * before the barrier is actually absorbed, but pss_barrierGeneration
442 : * is updated only afterward.
443 : */
444 20922 : oldval = pg_atomic_read_u64(&slot->pss_barrierGeneration);
445 21666 : while (oldval < generation)
446 : {
447 744 : if (ConditionVariableTimedSleep(&slot->pss_barrierCV,
448 : 5000,
449 : WAIT_EVENT_PROC_SIGNAL_BARRIER))
450 0 : ereport(LOG,
451 : (errmsg("still waiting for backend with PID %d to accept ProcSignalBarrier",
452 : (int) pg_atomic_read_u32(&slot->pss_pid))));
453 744 : oldval = pg_atomic_read_u64(&slot->pss_barrierGeneration);
454 : }
455 20922 : ConditionVariableCancelSleep();
456 : }
457 :
458 182 : elog(DEBUG1,
459 : "finished waiting for all backends to process ProcSignalBarrier generation "
460 : UINT64_FORMAT,
461 : generation);
462 :
463 : /*
464 : * The caller is probably calling this function because it wants to read
465 : * the shared state or perform further writes to shared state once all
466 : * backends are known to have absorbed the barrier. However, the read of
467 : * pss_barrierGeneration was performed unlocked; insert a memory barrier
468 : * to separate it from whatever follows.
469 : */
470 182 : pg_memory_barrier();
471 182 : }
472 :
473 : /*
474 : * Handle receipt of an interrupt indicating a global barrier event.
475 : *
476 : * All the actual work is deferred to ProcessProcSignalBarrier(), because we
477 : * cannot safely access the barrier generation inside the signal handler as
478 : * 64bit atomics might use spinlock based emulation, even for reads. As this
479 : * routine only gets called when PROCSIG_BARRIER is sent that won't cause a
480 : * lot of unnecessary work.
481 : */
482 : static void
483 1160 : HandleProcSignalBarrierInterrupt(void)
484 : {
485 1160 : InterruptPending = true;
486 1160 : ProcSignalBarrierPending = true;
487 : /* latch will be set by procsignal_sigusr1_handler */
488 1160 : }
489 :
490 : /*
491 : * Perform global barrier related interrupt checking.
492 : *
493 : * Any backend that participates in ProcSignal signaling must arrange to
494 : * call this function periodically. It is called from CHECK_FOR_INTERRUPTS(),
495 : * which is enough for normal backends, but not necessarily for all types of
496 : * background processes.
497 : */
498 : void
499 1160 : ProcessProcSignalBarrier(void)
500 : {
501 : uint64 local_gen;
502 : uint64 shared_gen;
503 : volatile uint32 flags;
504 :
505 : Assert(MyProcSignalSlot);
506 :
507 : /* Exit quickly if there's no work to do. */
508 1160 : if (!ProcSignalBarrierPending)
509 0 : return;
510 1160 : ProcSignalBarrierPending = false;
511 :
512 : /*
513 : * It's not unlikely to process multiple barriers at once, before the
514 : * signals for all the barriers have arrived. To avoid unnecessary work in
515 : * response to subsequent signals, exit early if we already have processed
516 : * all of them.
517 : */
518 1160 : local_gen = pg_atomic_read_u64(&MyProcSignalSlot->pss_barrierGeneration);
519 1160 : shared_gen = pg_atomic_read_u64(&ProcSignal->psh_barrierGeneration);
520 :
521 : Assert(local_gen <= shared_gen);
522 :
523 1160 : if (local_gen == shared_gen)
524 0 : return;
525 :
526 : /*
527 : * Get and clear the flags that are set for this backend. Note that
528 : * pg_atomic_exchange_u32 is a full barrier, so we're guaranteed that the
529 : * read of the barrier generation above happens before we atomically
530 : * extract the flags, and that any subsequent state changes happen
531 : * afterward.
532 : *
533 : * NB: In order to avoid race conditions, we must zero
534 : * pss_barrierCheckMask first and only afterwards try to do barrier
535 : * processing. If we did it in the other order, someone could send us
536 : * another barrier of some type right after we called the
537 : * barrier-processing function but before we cleared the bit. We would
538 : * have no way of knowing that the bit needs to stay set in that case, so
539 : * the need to call the barrier-processing function again would just get
540 : * forgotten. So instead, we tentatively clear all the bits and then put
541 : * back any for which we don't manage to successfully absorb the barrier.
542 : */
543 1160 : flags = pg_atomic_exchange_u32(&MyProcSignalSlot->pss_barrierCheckMask, 0);
544 :
545 : /*
546 : * If there are no flags set, then we can skip doing any real work.
547 : * Otherwise, establish a PG_TRY block, so that we don't lose track of
548 : * which types of barrier processing are needed if an ERROR occurs.
549 : */
550 1160 : if (flags != 0)
551 : {
552 1160 : bool success = true;
553 :
554 1160 : PG_TRY();
555 : {
556 : /*
557 : * Process each type of barrier. The barrier-processing functions
558 : * should normally return true, but may return false if the
559 : * barrier can't be absorbed at the current time. This should be
560 : * rare, because it's pretty expensive. Every single
561 : * CHECK_FOR_INTERRUPTS() will return here until we manage to
562 : * absorb the barrier, and that cost will add up in a hurry.
563 : *
564 : * NB: It ought to be OK to call the barrier-processing functions
565 : * unconditionally, but it's more efficient to call only the ones
566 : * that might need us to do something based on the flags.
567 : */
568 2320 : while (flags != 0)
569 : {
570 : ProcSignalBarrierType type;
571 1160 : bool processed = true;
572 :
573 1160 : type = (ProcSignalBarrierType) pg_rightmost_one_pos32(flags);
574 1160 : switch (type)
575 : {
576 1160 : case PROCSIGNAL_BARRIER_SMGRRELEASE:
577 1160 : processed = ProcessBarrierSmgrRelease();
578 1160 : break;
579 : }
580 :
581 : /*
582 : * To avoid an infinite loop, we must always unset the bit in
583 : * flags.
584 : */
585 1160 : BARRIER_CLEAR_BIT(flags, type);
586 :
587 : /*
588 : * If we failed to process the barrier, reset the shared bit
589 : * so we try again later, and set a flag so that we don't bump
590 : * our generation.
591 : */
592 1160 : if (!processed)
593 : {
594 0 : ResetProcSignalBarrierBits(((uint32) 1) << type);
595 0 : success = false;
596 : }
597 : }
598 : }
599 0 : PG_CATCH();
600 : {
601 : /*
602 : * If an ERROR occurred, we'll need to try again later to handle
603 : * that barrier type and any others that haven't been handled yet
604 : * or weren't successfully absorbed.
605 : */
606 0 : ResetProcSignalBarrierBits(flags);
607 0 : PG_RE_THROW();
608 : }
609 1160 : PG_END_TRY();
610 :
611 : /*
612 : * If some barrier types were not successfully absorbed, we will have
613 : * to try again later.
614 : */
615 1160 : if (!success)
616 0 : return;
617 : }
618 :
619 : /*
620 : * State changes related to all types of barriers that might have been
621 : * emitted have now been handled, so we can update our notion of the
622 : * generation to the one we observed before beginning the updates. If
623 : * things have changed further, it'll get fixed up when this function is
624 : * next called.
625 : */
626 1160 : pg_atomic_write_u64(&MyProcSignalSlot->pss_barrierGeneration, shared_gen);
627 1160 : ConditionVariableBroadcast(&MyProcSignalSlot->pss_barrierCV);
628 : }
629 :
630 : /*
631 : * If it turns out that we couldn't absorb one or more barrier types, either
632 : * because the barrier-processing functions returned false or due to an error,
633 : * arrange for processing to be retried later.
634 : */
635 : static void
636 0 : ResetProcSignalBarrierBits(uint32 flags)
637 : {
638 0 : pg_atomic_fetch_or_u32(&MyProcSignalSlot->pss_barrierCheckMask, flags);
639 0 : ProcSignalBarrierPending = true;
640 0 : InterruptPending = true;
641 0 : }
642 :
643 : /*
644 : * CheckProcSignal - check to see if a particular reason has been
645 : * signaled, and clear the signal flag. Should be called after receiving
646 : * SIGUSR1.
647 : */
648 : static bool
649 294270 : CheckProcSignal(ProcSignalReason reason)
650 : {
651 294270 : volatile ProcSignalSlot *slot = MyProcSignalSlot;
652 :
653 294270 : if (slot != NULL)
654 : {
655 : /*
656 : * Careful here --- don't clear flag if we haven't seen it set.
657 : * pss_signalFlags is of type "volatile sig_atomic_t" to allow us to
658 : * read it here safely, without holding the spinlock.
659 : */
660 294180 : if (slot->pss_signalFlags[reason])
661 : {
662 10606 : slot->pss_signalFlags[reason] = false;
663 10606 : return true;
664 : }
665 : }
666 :
667 283664 : return false;
668 : }
669 :
670 : /*
671 : * procsignal_sigusr1_handler - handle SIGUSR1 signal.
672 : */
673 : void
674 19618 : procsignal_sigusr1_handler(SIGNAL_ARGS)
675 : {
676 19618 : if (CheckProcSignal(PROCSIG_CATCHUP_INTERRUPT))
677 5636 : HandleCatchupInterrupt();
678 :
679 19618 : if (CheckProcSignal(PROCSIG_NOTIFY_INTERRUPT))
680 40 : HandleNotifyInterrupt();
681 :
682 19618 : if (CheckProcSignal(PROCSIG_PARALLEL_MESSAGE))
683 3606 : HandleParallelMessageInterrupt();
684 :
685 19618 : if (CheckProcSignal(PROCSIG_WALSND_INIT_STOPPING))
686 72 : HandleWalSndInitStopping();
687 :
688 19618 : if (CheckProcSignal(PROCSIG_BARRIER))
689 1160 : HandleProcSignalBarrierInterrupt();
690 :
691 19618 : if (CheckProcSignal(PROCSIG_LOG_MEMORY_CONTEXT))
692 18 : HandleLogMemoryContextInterrupt();
693 :
694 19618 : if (CheckProcSignal(PROCSIG_GET_MEMORY_CONTEXT))
695 12 : HandleGetMemoryContextInterrupt();
696 :
697 19618 : if (CheckProcSignal(PROCSIG_PARALLEL_APPLY_MESSAGE))
698 24 : HandleParallelApplyMessageInterrupt();
699 :
700 19618 : if (CheckProcSignal(PROCSIG_RECOVERY_CONFLICT_DATABASE))
701 4 : HandleRecoveryConflictInterrupt(PROCSIG_RECOVERY_CONFLICT_DATABASE);
702 :
703 19618 : if (CheckProcSignal(PROCSIG_RECOVERY_CONFLICT_TABLESPACE))
704 2 : HandleRecoveryConflictInterrupt(PROCSIG_RECOVERY_CONFLICT_TABLESPACE);
705 :
706 19618 : if (CheckProcSignal(PROCSIG_RECOVERY_CONFLICT_LOCK))
707 2 : HandleRecoveryConflictInterrupt(PROCSIG_RECOVERY_CONFLICT_LOCK);
708 :
709 19618 : if (CheckProcSignal(PROCSIG_RECOVERY_CONFLICT_SNAPSHOT))
710 2 : HandleRecoveryConflictInterrupt(PROCSIG_RECOVERY_CONFLICT_SNAPSHOT);
711 :
712 19618 : if (CheckProcSignal(PROCSIG_RECOVERY_CONFLICT_LOGICALSLOT))
713 10 : HandleRecoveryConflictInterrupt(PROCSIG_RECOVERY_CONFLICT_LOGICALSLOT);
714 :
715 19618 : if (CheckProcSignal(PROCSIG_RECOVERY_CONFLICT_STARTUP_DEADLOCK))
716 16 : HandleRecoveryConflictInterrupt(PROCSIG_RECOVERY_CONFLICT_STARTUP_DEADLOCK);
717 :
718 19618 : if (CheckProcSignal(PROCSIG_RECOVERY_CONFLICT_BUFFERPIN))
719 2 : HandleRecoveryConflictInterrupt(PROCSIG_RECOVERY_CONFLICT_BUFFERPIN);
720 :
721 19618 : SetLatch(MyLatch);
722 19618 : }
723 :
724 : /*
725 : * Send a query cancellation signal to backend.
726 : *
727 : * Note: This is called from a backend process before authentication. We
728 : * cannot take LWLocks yet, but that's OK; we rely on atomic reads of the
729 : * fields in the ProcSignal slots.
730 : */
731 : void
732 32 : SendCancelRequest(int backendPID, const uint8 *cancel_key, int cancel_key_len)
733 : {
734 : Assert(backendPID != 0);
735 :
736 : /*
737 : * See if we have a matching backend. Reading the pss_pid and
738 : * pss_cancel_key fields is racy, a backend might die and remove itself
739 : * from the array at any time. The probability of the cancellation key
740 : * matching wrong process is miniscule, however, so we can live with that.
741 : * PIDs are reused too, so sending the signal based on PID is inherently
742 : * racy anyway, although OS's avoid reusing PIDs too soon.
743 : */
744 522 : for (int i = 0; i < NumProcSignalSlots; i++)
745 : {
746 522 : ProcSignalSlot *slot = &ProcSignal->psh_slot[i];
747 : bool match;
748 :
749 522 : if (pg_atomic_read_u32(&slot->pss_pid) != backendPID)
750 490 : continue;
751 :
752 : /* Acquire the spinlock and re-check */
753 32 : SpinLockAcquire(&slot->pss_mutex);
754 32 : if (pg_atomic_read_u32(&slot->pss_pid) != backendPID)
755 : {
756 0 : SpinLockRelease(&slot->pss_mutex);
757 0 : continue;
758 : }
759 : else
760 : {
761 64 : match = slot->pss_cancel_key_len == cancel_key_len &&
762 32 : timingsafe_bcmp(slot->pss_cancel_key, cancel_key, cancel_key_len) == 0;
763 :
764 32 : SpinLockRelease(&slot->pss_mutex);
765 :
766 32 : if (match)
767 : {
768 : /* Found a match; signal that backend to cancel current op */
769 32 : ereport(DEBUG2,
770 : (errmsg_internal("processing cancel request: sending SIGINT to process %d",
771 : backendPID)));
772 :
773 : /*
774 : * If we have setsid(), signal the backend's whole process
775 : * group
776 : */
777 : #ifdef HAVE_SETSID
778 32 : kill(-backendPID, SIGINT);
779 : #else
780 : kill(backendPID, SIGINT);
781 : #endif
782 : }
783 : else
784 : {
785 : /* Right PID, wrong key: no way, Jose */
786 0 : ereport(LOG,
787 : (errmsg("wrong key in cancel request for process %d",
788 : backendPID)));
789 : }
790 32 : return;
791 : }
792 : }
793 :
794 : /* No matching backend */
795 0 : ereport(LOG,
796 : (errmsg("PID %d in cancel request did not match any process",
797 : backendPID)));
798 : }
|