Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * procsignal.c
4 : * Routines for interprocess signaling
5 : *
6 : *
7 : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
8 : * Portions Copyright (c) 1994, Regents of the University of California
9 : *
10 : * IDENTIFICATION
11 : * src/backend/storage/ipc/procsignal.c
12 : *
13 : *-------------------------------------------------------------------------
14 : */
15 : #include "postgres.h"
16 :
17 : #include <signal.h>
18 : #include <unistd.h>
19 :
20 : #include "access/parallel.h"
21 : #include "commands/async.h"
22 : #include "miscadmin.h"
23 : #include "pgstat.h"
24 : #include "port/pg_bitutils.h"
25 : #include "replication/logicalworker.h"
26 : #include "replication/walsender.h"
27 : #include "storage/condition_variable.h"
28 : #include "storage/ipc.h"
29 : #include "storage/latch.h"
30 : #include "storage/shmem.h"
31 : #include "storage/sinval.h"
32 : #include "storage/smgr.h"
33 : #include "tcop/tcopprot.h"
34 : #include "utils/memutils.h"
35 :
36 : /*
37 : * The SIGUSR1 signal is multiplexed to support signaling multiple event
38 : * types. The specific reason is communicated via flags in shared memory.
39 : * We keep a boolean flag for each possible "reason", so that different
40 : * reasons can be signaled to a process concurrently. (However, if the same
41 : * reason is signaled more than once nearly simultaneously, the process may
42 : * observe it only once.)
43 : *
44 : * Each process that wants to receive signals registers its process ID
45 : * in the ProcSignalSlots array. The array is indexed by ProcNumber to make
46 : * slot allocation simple, and to avoid having to search the array when you
47 : * know the ProcNumber of the process you're signaling. (We do support
48 : * signaling without ProcNumber, but it's a bit less efficient.)
49 : *
50 : * The fields in each slot are protected by a spinlock, pss_mutex. pss_pid can
51 : * also be read without holding the spinlock, as a quick preliminary check
52 : * when searching for a particular PID in the array.
53 : *
54 : * pss_signalFlags are intended to be set in cases where we don't need to
55 : * keep track of whether or not the target process has handled the signal,
56 : * but sometimes we need confirmation, as when making a global state change
57 : * that cannot be considered complete until all backends have taken notice
58 : * of it. For such use cases, we set a bit in pss_barrierCheckMask and then
59 : * increment the current "barrier generation"; when the new barrier generation
60 : * (or greater) appears in the pss_barrierGeneration flag of every process,
61 : * we know that the message has been received everywhere.
62 : */
63 : typedef struct
64 : {
65 : pg_atomic_uint32 pss_pid;
66 : bool pss_cancel_key_valid;
67 : int32 pss_cancel_key;
68 : volatile sig_atomic_t pss_signalFlags[NUM_PROCSIGNALS];
69 : slock_t pss_mutex; /* protects the above fields */
70 :
71 : /* Barrier-related fields (not protected by pss_mutex) */
72 : pg_atomic_uint64 pss_barrierGeneration;
73 : pg_atomic_uint32 pss_barrierCheckMask;
74 : ConditionVariable pss_barrierCV;
75 : } ProcSignalSlot;
76 :
77 : /*
78 : * Information that is global to the entire ProcSignal system can be stored
79 : * here.
80 : *
81 : * psh_barrierGeneration is the highest barrier generation in existence.
82 : */
83 : struct ProcSignalHeader
84 : {
85 : pg_atomic_uint64 psh_barrierGeneration;
86 : ProcSignalSlot psh_slot[FLEXIBLE_ARRAY_MEMBER];
87 : };
88 :
89 : /*
90 : * We reserve a slot for each possible ProcNumber, plus one for each
91 : * possible auxiliary process type. (This scheme assumes there is not
92 : * more than one of any auxiliary process type at a time.)
93 : */
94 : #define NumProcSignalSlots (MaxBackends + NUM_AUXILIARY_PROCS)
95 :
96 : /* Check whether the relevant type bit is set in the flags. */
97 : #define BARRIER_SHOULD_CHECK(flags, type) \
98 : (((flags) & (((uint32) 1) << (uint32) (type))) != 0)
99 :
100 : /* Clear the relevant type bit from the flags. */
101 : #define BARRIER_CLEAR_BIT(flags, type) \
102 : ((flags) &= ~(((uint32) 1) << (uint32) (type)))
103 :
104 : NON_EXEC_STATIC ProcSignalHeader *ProcSignal = NULL;
105 : static ProcSignalSlot *MyProcSignalSlot = NULL;
106 :
107 : static bool CheckProcSignal(ProcSignalReason reason);
108 : static void CleanupProcSignalState(int status, Datum arg);
109 : static void ResetProcSignalBarrierBits(uint32 flags);
110 :
111 : /*
112 : * ProcSignalShmemSize
113 : * Compute space needed for ProcSignal's shared memory
114 : */
115 : Size
116 5484 : ProcSignalShmemSize(void)
117 : {
118 : Size size;
119 :
120 5484 : size = mul_size(NumProcSignalSlots, sizeof(ProcSignalSlot));
121 5484 : size = add_size(size, offsetof(ProcSignalHeader, psh_slot));
122 5484 : return size;
123 : }
124 :
125 : /*
126 : * ProcSignalShmemInit
127 : * Allocate and initialize ProcSignal's shared memory
128 : */
129 : void
130 1918 : ProcSignalShmemInit(void)
131 : {
132 1918 : Size size = ProcSignalShmemSize();
133 : bool found;
134 :
135 1918 : ProcSignal = (ProcSignalHeader *)
136 1918 : ShmemInitStruct("ProcSignal", size, &found);
137 :
138 : /* If we're first, initialize. */
139 1918 : if (!found)
140 : {
141 : int i;
142 :
143 1918 : pg_atomic_init_u64(&ProcSignal->psh_barrierGeneration, 0);
144 :
145 190142 : for (i = 0; i < NumProcSignalSlots; ++i)
146 : {
147 188224 : ProcSignalSlot *slot = &ProcSignal->psh_slot[i];
148 :
149 188224 : SpinLockInit(&slot->pss_mutex);
150 188224 : pg_atomic_init_u32(&slot->pss_pid, 0);
151 188224 : slot->pss_cancel_key_valid = false;
152 188224 : slot->pss_cancel_key = 0;
153 188224 : MemSet(slot->pss_signalFlags, 0, sizeof(slot->pss_signalFlags));
154 188224 : pg_atomic_init_u64(&slot->pss_barrierGeneration, PG_UINT64_MAX);
155 188224 : pg_atomic_init_u32(&slot->pss_barrierCheckMask, 0);
156 188224 : ConditionVariableInit(&slot->pss_barrierCV);
157 : }
158 : }
159 1918 : }
160 :
161 : /*
162 : * ProcSignalInit
163 : * Register the current process in the ProcSignal array
164 : */
165 : void
166 37268 : ProcSignalInit(bool cancel_key_valid, int32 cancel_key)
167 : {
168 : ProcSignalSlot *slot;
169 : uint64 barrier_generation;
170 :
171 37268 : if (MyProcNumber < 0)
172 0 : elog(ERROR, "MyProcNumber not set");
173 37268 : if (MyProcNumber >= NumProcSignalSlots)
174 0 : elog(ERROR, "unexpected MyProcNumber %d in ProcSignalInit (max %d)", MyProcNumber, NumProcSignalSlots);
175 37268 : slot = &ProcSignal->psh_slot[MyProcNumber];
176 :
177 : /* sanity check */
178 37268 : SpinLockAcquire(&slot->pss_mutex);
179 37268 : if (pg_atomic_read_u32(&slot->pss_pid) != 0)
180 : {
181 0 : SpinLockRelease(&slot->pss_mutex);
182 0 : elog(LOG, "process %d taking over ProcSignal slot %d, but it's not empty",
183 : MyProcPid, MyProcNumber);
184 : }
185 :
186 : /* Clear out any leftover signal reasons */
187 37268 : MemSet(slot->pss_signalFlags, 0, NUM_PROCSIGNALS * sizeof(sig_atomic_t));
188 :
189 : /*
190 : * Initialize barrier state. Since we're a brand-new process, there
191 : * shouldn't be any leftover backend-private state that needs to be
192 : * updated. Therefore, we can broadcast the latest barrier generation and
193 : * disregard any previously-set check bits.
194 : *
195 : * NB: This only works if this initialization happens early enough in the
196 : * startup sequence that we haven't yet cached any state that might need
197 : * to be invalidated. That's also why we have a memory barrier here, to be
198 : * sure that any later reads of memory happen strictly after this.
199 : */
200 37268 : pg_atomic_write_u32(&slot->pss_barrierCheckMask, 0);
201 : barrier_generation =
202 37268 : pg_atomic_read_u64(&ProcSignal->psh_barrierGeneration);
203 37268 : pg_atomic_write_u64(&slot->pss_barrierGeneration, barrier_generation);
204 :
205 37268 : slot->pss_cancel_key_valid = cancel_key_valid;
206 37268 : slot->pss_cancel_key = cancel_key;
207 37268 : pg_atomic_write_u32(&slot->pss_pid, MyProcPid);
208 :
209 37268 : SpinLockRelease(&slot->pss_mutex);
210 :
211 : /* Remember slot location for CheckProcSignal */
212 37268 : MyProcSignalSlot = slot;
213 :
214 : /* Set up to release the slot on process exit */
215 37268 : on_shmem_exit(CleanupProcSignalState, (Datum) 0);
216 37268 : }
217 :
218 : /*
219 : * CleanupProcSignalState
220 : * Remove current process from ProcSignal mechanism
221 : *
222 : * This function is called via on_shmem_exit() during backend shutdown.
223 : */
224 : static void
225 37268 : CleanupProcSignalState(int status, Datum arg)
226 : {
227 : pid_t old_pid;
228 37268 : ProcSignalSlot *slot = MyProcSignalSlot;
229 :
230 : /*
231 : * Clear MyProcSignalSlot, so that a SIGUSR1 received after this point
232 : * won't try to access it after it's no longer ours (and perhaps even
233 : * after we've unmapped the shared memory segment).
234 : */
235 : Assert(MyProcSignalSlot != NULL);
236 37268 : MyProcSignalSlot = NULL;
237 :
238 : /* sanity check */
239 37268 : SpinLockAcquire(&slot->pss_mutex);
240 37268 : old_pid = pg_atomic_read_u32(&slot->pss_pid);
241 37268 : if (old_pid != MyProcPid)
242 : {
243 : /*
244 : * don't ERROR here. We're exiting anyway, and don't want to get into
245 : * infinite loop trying to exit
246 : */
247 0 : SpinLockRelease(&slot->pss_mutex);
248 0 : elog(LOG, "process %d releasing ProcSignal slot %d, but it contains %d",
249 : MyProcPid, (int) (slot - ProcSignal->psh_slot), (int) old_pid);
250 0 : return; /* XXX better to zero the slot anyway? */
251 : }
252 :
253 : /* Mark the slot as unused */
254 37268 : pg_atomic_write_u32(&slot->pss_pid, 0);
255 37268 : slot->pss_cancel_key_valid = false;
256 37268 : slot->pss_cancel_key = 0;
257 :
258 : /*
259 : * Make this slot look like it's absorbed all possible barriers, so that
260 : * no barrier waits block on it.
261 : */
262 37268 : pg_atomic_write_u64(&slot->pss_barrierGeneration, PG_UINT64_MAX);
263 :
264 37268 : SpinLockRelease(&slot->pss_mutex);
265 :
266 37268 : ConditionVariableBroadcast(&slot->pss_barrierCV);
267 : }
268 :
269 : /*
270 : * SendProcSignal
271 : * Send a signal to a Postgres process
272 : *
273 : * Providing procNumber is optional, but it will speed up the operation.
274 : *
275 : * On success (a signal was sent), zero is returned.
276 : * On error, -1 is returned, and errno is set (typically to ESRCH or EPERM).
277 : *
278 : * Not to be confused with ProcSendSignal
279 : */
280 : int
281 10966 : SendProcSignal(pid_t pid, ProcSignalReason reason, ProcNumber procNumber)
282 : {
283 : volatile ProcSignalSlot *slot;
284 :
285 10966 : if (procNumber != INVALID_PROC_NUMBER)
286 : {
287 : Assert(procNumber < NumProcSignalSlots);
288 10858 : slot = &ProcSignal->psh_slot[procNumber];
289 :
290 10858 : SpinLockAcquire(&slot->pss_mutex);
291 10858 : if (pg_atomic_read_u32(&slot->pss_pid) == pid)
292 : {
293 : /* Atomically set the proper flag */
294 10858 : slot->pss_signalFlags[reason] = true;
295 10858 : SpinLockRelease(&slot->pss_mutex);
296 : /* Send signal */
297 10858 : return kill(pid, SIGUSR1);
298 : }
299 0 : SpinLockRelease(&slot->pss_mutex);
300 : }
301 : else
302 : {
303 : /*
304 : * procNumber not provided, so search the array using pid. We search
305 : * the array back to front so as to reduce search overhead. Passing
306 : * INVALID_PROC_NUMBER means that the target is most likely an
307 : * auxiliary process, which will have a slot near the end of the
308 : * array.
309 : */
310 : int i;
311 :
312 1334 : for (i = NumProcSignalSlots - 1; i >= 0; i--)
313 : {
314 1334 : slot = &ProcSignal->psh_slot[i];
315 :
316 1334 : if (pg_atomic_read_u32(&slot->pss_pid) == pid)
317 : {
318 108 : SpinLockAcquire(&slot->pss_mutex);
319 108 : if (pg_atomic_read_u32(&slot->pss_pid) == pid)
320 : {
321 : /* Atomically set the proper flag */
322 108 : slot->pss_signalFlags[reason] = true;
323 108 : SpinLockRelease(&slot->pss_mutex);
324 : /* Send signal */
325 108 : return kill(pid, SIGUSR1);
326 : }
327 0 : SpinLockRelease(&slot->pss_mutex);
328 : }
329 : }
330 : }
331 :
332 0 : errno = ESRCH;
333 0 : return -1;
334 : }
335 :
336 : /*
337 : * EmitProcSignalBarrier
338 : * Send a signal to every Postgres process
339 : *
340 : * The return value of this function is the barrier "generation" created
341 : * by this operation. This value can be passed to WaitForProcSignalBarrier
342 : * to wait until it is known that every participant in the ProcSignal
343 : * mechanism has absorbed the signal (or started afterwards).
344 : *
345 : * Note that it would be a bad idea to use this for anything that happens
346 : * frequently, as interrupting every backend could cause a noticeable
347 : * performance hit.
348 : *
349 : * Callers are entitled to assume that this function will not throw ERROR
350 : * or FATAL.
351 : */
352 : uint64
353 158 : EmitProcSignalBarrier(ProcSignalBarrierType type)
354 : {
355 158 : uint32 flagbit = 1 << (uint32) type;
356 : uint64 generation;
357 :
358 : /*
359 : * Set all the flags.
360 : *
361 : * Note that pg_atomic_fetch_or_u32 has full barrier semantics, so this is
362 : * totally ordered with respect to anything the caller did before, and
363 : * anything that we do afterwards. (This is also true of the later call to
364 : * pg_atomic_add_fetch_u64.)
365 : */
366 12088 : for (int i = 0; i < NumProcSignalSlots; i++)
367 : {
368 11930 : volatile ProcSignalSlot *slot = &ProcSignal->psh_slot[i];
369 :
370 11930 : pg_atomic_fetch_or_u32(&slot->pss_barrierCheckMask, flagbit);
371 : }
372 :
373 : /*
374 : * Increment the generation counter.
375 : */
376 : generation =
377 158 : pg_atomic_add_fetch_u64(&ProcSignal->psh_barrierGeneration, 1);
378 :
379 : /*
380 : * Signal all the processes, so that they update their advertised barrier
381 : * generation.
382 : *
383 : * Concurrency is not a problem here. Backends that have exited don't
384 : * matter, and new backends that have joined since we entered this
385 : * function must already have current state, since the caller is
386 : * responsible for making sure that the relevant state is entirely visible
387 : * before calling this function in the first place. We still have to wake
388 : * them up - because we can't distinguish between such backends and older
389 : * backends that need to update state - but they won't actually need to
390 : * change any state.
391 : */
392 12088 : for (int i = NumProcSignalSlots - 1; i >= 0; i--)
393 : {
394 11930 : volatile ProcSignalSlot *slot = &ProcSignal->psh_slot[i];
395 11930 : pid_t pid = pg_atomic_read_u32(&slot->pss_pid);
396 :
397 11930 : if (pid != 0)
398 : {
399 944 : SpinLockAcquire(&slot->pss_mutex);
400 944 : pid = pg_atomic_read_u32(&slot->pss_pid);
401 944 : if (pid != 0)
402 : {
403 : /* see SendProcSignal for details */
404 944 : slot->pss_signalFlags[PROCSIG_BARRIER] = true;
405 944 : SpinLockRelease(&slot->pss_mutex);
406 944 : kill(pid, SIGUSR1);
407 : }
408 : else
409 0 : SpinLockRelease(&slot->pss_mutex);
410 : }
411 : }
412 :
413 158 : return generation;
414 : }
415 :
416 : /*
417 : * WaitForProcSignalBarrier - wait until it is guaranteed that all changes
418 : * requested by a specific call to EmitProcSignalBarrier() have taken effect.
419 : */
420 : void
421 158 : WaitForProcSignalBarrier(uint64 generation)
422 : {
423 : Assert(generation <= pg_atomic_read_u64(&ProcSignal->psh_barrierGeneration));
424 :
425 158 : elog(DEBUG1,
426 : "waiting for all backends to process ProcSignalBarrier generation "
427 : UINT64_FORMAT,
428 : generation);
429 :
430 12088 : for (int i = NumProcSignalSlots - 1; i >= 0; i--)
431 : {
432 11930 : ProcSignalSlot *slot = &ProcSignal->psh_slot[i];
433 : uint64 oldval;
434 :
435 : /*
436 : * It's important that we check only pss_barrierGeneration here and
437 : * not pss_barrierCheckMask. Bits in pss_barrierCheckMask get cleared
438 : * before the barrier is actually absorbed, but pss_barrierGeneration
439 : * is updated only afterward.
440 : */
441 11930 : oldval = pg_atomic_read_u64(&slot->pss_barrierGeneration);
442 12458 : while (oldval < generation)
443 : {
444 528 : if (ConditionVariableTimedSleep(&slot->pss_barrierCV,
445 : 5000,
446 : WAIT_EVENT_PROC_SIGNAL_BARRIER))
447 0 : ereport(LOG,
448 : (errmsg("still waiting for backend with PID %d to accept ProcSignalBarrier",
449 : (int) pg_atomic_read_u32(&slot->pss_pid))));
450 528 : oldval = pg_atomic_read_u64(&slot->pss_barrierGeneration);
451 : }
452 11930 : ConditionVariableCancelSleep();
453 : }
454 :
455 158 : elog(DEBUG1,
456 : "finished waiting for all backends to process ProcSignalBarrier generation "
457 : UINT64_FORMAT,
458 : generation);
459 :
460 : /*
461 : * The caller is probably calling this function because it wants to read
462 : * the shared state or perform further writes to shared state once all
463 : * backends are known to have absorbed the barrier. However, the read of
464 : * pss_barrierGeneration was performed unlocked; insert a memory barrier
465 : * to separate it from whatever follows.
466 : */
467 158 : pg_memory_barrier();
468 158 : }
469 :
470 : /*
471 : * Handle receipt of an interrupt indicating a global barrier event.
472 : *
473 : * All the actual work is deferred to ProcessProcSignalBarrier(), because we
474 : * cannot safely access the barrier generation inside the signal handler as
475 : * 64bit atomics might use spinlock based emulation, even for reads. As this
476 : * routine only gets called when PROCSIG_BARRIER is sent that won't cause a
477 : * lot of unnecessary work.
478 : */
479 : static void
480 708 : HandleProcSignalBarrierInterrupt(void)
481 : {
482 708 : InterruptPending = true;
483 708 : ProcSignalBarrierPending = true;
484 : /* latch will be set by procsignal_sigusr1_handler */
485 708 : }
486 :
487 : /*
488 : * Perform global barrier related interrupt checking.
489 : *
490 : * Any backend that participates in ProcSignal signaling must arrange to
491 : * call this function periodically. It is called from CHECK_FOR_INTERRUPTS(),
492 : * which is enough for normal backends, but not necessarily for all types of
493 : * background processes.
494 : */
495 : void
496 708 : ProcessProcSignalBarrier(void)
497 : {
498 : uint64 local_gen;
499 : uint64 shared_gen;
500 : volatile uint32 flags;
501 :
502 : Assert(MyProcSignalSlot);
503 :
504 : /* Exit quickly if there's no work to do. */
505 708 : if (!ProcSignalBarrierPending)
506 0 : return;
507 708 : ProcSignalBarrierPending = false;
508 :
509 : /*
510 : * It's not unlikely to process multiple barriers at once, before the
511 : * signals for all the barriers have arrived. To avoid unnecessary work in
512 : * response to subsequent signals, exit early if we already have processed
513 : * all of them.
514 : */
515 708 : local_gen = pg_atomic_read_u64(&MyProcSignalSlot->pss_barrierGeneration);
516 708 : shared_gen = pg_atomic_read_u64(&ProcSignal->psh_barrierGeneration);
517 :
518 : Assert(local_gen <= shared_gen);
519 :
520 708 : if (local_gen == shared_gen)
521 0 : return;
522 :
523 : /*
524 : * Get and clear the flags that are set for this backend. Note that
525 : * pg_atomic_exchange_u32 is a full barrier, so we're guaranteed that the
526 : * read of the barrier generation above happens before we atomically
527 : * extract the flags, and that any subsequent state changes happen
528 : * afterward.
529 : *
530 : * NB: In order to avoid race conditions, we must zero
531 : * pss_barrierCheckMask first and only afterwards try to do barrier
532 : * processing. If we did it in the other order, someone could send us
533 : * another barrier of some type right after we called the
534 : * barrier-processing function but before we cleared the bit. We would
535 : * have no way of knowing that the bit needs to stay set in that case, so
536 : * the need to call the barrier-processing function again would just get
537 : * forgotten. So instead, we tentatively clear all the bits and then put
538 : * back any for which we don't manage to successfully absorb the barrier.
539 : */
540 708 : flags = pg_atomic_exchange_u32(&MyProcSignalSlot->pss_barrierCheckMask, 0);
541 :
542 : /*
543 : * If there are no flags set, then we can skip doing any real work.
544 : * Otherwise, establish a PG_TRY block, so that we don't lose track of
545 : * which types of barrier processing are needed if an ERROR occurs.
546 : */
547 708 : if (flags != 0)
548 : {
549 708 : bool success = true;
550 :
551 708 : PG_TRY();
552 : {
553 : /*
554 : * Process each type of barrier. The barrier-processing functions
555 : * should normally return true, but may return false if the
556 : * barrier can't be absorbed at the current time. This should be
557 : * rare, because it's pretty expensive. Every single
558 : * CHECK_FOR_INTERRUPTS() will return here until we manage to
559 : * absorb the barrier, and that cost will add up in a hurry.
560 : *
561 : * NB: It ought to be OK to call the barrier-processing functions
562 : * unconditionally, but it's more efficient to call only the ones
563 : * that might need us to do something based on the flags.
564 : */
565 1416 : while (flags != 0)
566 : {
567 : ProcSignalBarrierType type;
568 708 : bool processed = true;
569 :
570 708 : type = (ProcSignalBarrierType) pg_rightmost_one_pos32(flags);
571 708 : switch (type)
572 : {
573 708 : case PROCSIGNAL_BARRIER_SMGRRELEASE:
574 708 : processed = ProcessBarrierSmgrRelease();
575 708 : break;
576 : }
577 :
578 : /*
579 : * To avoid an infinite loop, we must always unset the bit in
580 : * flags.
581 : */
582 708 : BARRIER_CLEAR_BIT(flags, type);
583 :
584 : /*
585 : * If we failed to process the barrier, reset the shared bit
586 : * so we try again later, and set a flag so that we don't bump
587 : * our generation.
588 : */
589 708 : if (!processed)
590 : {
591 0 : ResetProcSignalBarrierBits(((uint32) 1) << type);
592 0 : success = false;
593 : }
594 : }
595 : }
596 0 : PG_CATCH();
597 : {
598 : /*
599 : * If an ERROR occurred, we'll need to try again later to handle
600 : * that barrier type and any others that haven't been handled yet
601 : * or weren't successfully absorbed.
602 : */
603 0 : ResetProcSignalBarrierBits(flags);
604 0 : PG_RE_THROW();
605 : }
606 708 : PG_END_TRY();
607 :
608 : /*
609 : * If some barrier types were not successfully absorbed, we will have
610 : * to try again later.
611 : */
612 708 : if (!success)
613 0 : return;
614 : }
615 :
616 : /*
617 : * State changes related to all types of barriers that might have been
618 : * emitted have now been handled, so we can update our notion of the
619 : * generation to the one we observed before beginning the updates. If
620 : * things have changed further, it'll get fixed up when this function is
621 : * next called.
622 : */
623 708 : pg_atomic_write_u64(&MyProcSignalSlot->pss_barrierGeneration, shared_gen);
624 708 : ConditionVariableBroadcast(&MyProcSignalSlot->pss_barrierCV);
625 : }
626 :
627 : /*
628 : * If it turns out that we couldn't absorb one or more barrier types, either
629 : * because the barrier-processing functions returned false or due to an error,
630 : * arrange for processing to be retried later.
631 : */
632 : static void
633 0 : ResetProcSignalBarrierBits(uint32 flags)
634 : {
635 0 : pg_atomic_fetch_or_u32(&MyProcSignalSlot->pss_barrierCheckMask, flags);
636 0 : ProcSignalBarrierPending = true;
637 0 : InterruptPending = true;
638 0 : }
639 :
640 : /*
641 : * CheckProcSignal - check to see if a particular reason has been
642 : * signaled, and clear the signal flag. Should be called after receiving
643 : * SIGUSR1.
644 : */
645 : static bool
646 260260 : CheckProcSignal(ProcSignalReason reason)
647 : {
648 260260 : volatile ProcSignalSlot *slot = MyProcSignalSlot;
649 :
650 260260 : if (slot != NULL)
651 : {
652 : /*
653 : * Careful here --- don't clear flag if we haven't seen it set.
654 : * pss_signalFlags is of type "volatile sig_atomic_t" to allow us to
655 : * read it here safely, without holding the spinlock.
656 : */
657 260120 : if (slot->pss_signalFlags[reason])
658 : {
659 9728 : slot->pss_signalFlags[reason] = false;
660 9728 : return true;
661 : }
662 : }
663 :
664 250532 : return false;
665 : }
666 :
667 : /*
668 : * procsignal_sigusr1_handler - handle SIGUSR1 signal.
669 : */
670 : void
671 18590 : procsignal_sigusr1_handler(SIGNAL_ARGS)
672 : {
673 18590 : if (CheckProcSignal(PROCSIG_CATCHUP_INTERRUPT))
674 5300 : HandleCatchupInterrupt();
675 :
676 18590 : if (CheckProcSignal(PROCSIG_NOTIFY_INTERRUPT))
677 40 : HandleNotifyInterrupt();
678 :
679 18590 : if (CheckProcSignal(PROCSIG_PARALLEL_MESSAGE))
680 3526 : HandleParallelMessageInterrupt();
681 :
682 18590 : if (CheckProcSignal(PROCSIG_WALSND_INIT_STOPPING))
683 68 : HandleWalSndInitStopping();
684 :
685 18590 : if (CheckProcSignal(PROCSIG_BARRIER))
686 708 : HandleProcSignalBarrierInterrupt();
687 :
688 18590 : if (CheckProcSignal(PROCSIG_LOG_MEMORY_CONTEXT))
689 18 : HandleLogMemoryContextInterrupt();
690 :
691 18590 : if (CheckProcSignal(PROCSIG_PARALLEL_APPLY_MESSAGE))
692 30 : HandleParallelApplyMessageInterrupt();
693 :
694 18590 : if (CheckProcSignal(PROCSIG_RECOVERY_CONFLICT_DATABASE))
695 4 : HandleRecoveryConflictInterrupt(PROCSIG_RECOVERY_CONFLICT_DATABASE);
696 :
697 18590 : if (CheckProcSignal(PROCSIG_RECOVERY_CONFLICT_TABLESPACE))
698 2 : HandleRecoveryConflictInterrupt(PROCSIG_RECOVERY_CONFLICT_TABLESPACE);
699 :
700 18590 : if (CheckProcSignal(PROCSIG_RECOVERY_CONFLICT_LOCK))
701 2 : HandleRecoveryConflictInterrupt(PROCSIG_RECOVERY_CONFLICT_LOCK);
702 :
703 18590 : if (CheckProcSignal(PROCSIG_RECOVERY_CONFLICT_SNAPSHOT))
704 2 : HandleRecoveryConflictInterrupt(PROCSIG_RECOVERY_CONFLICT_SNAPSHOT);
705 :
706 18590 : if (CheckProcSignal(PROCSIG_RECOVERY_CONFLICT_LOGICALSLOT))
707 10 : HandleRecoveryConflictInterrupt(PROCSIG_RECOVERY_CONFLICT_LOGICALSLOT);
708 :
709 18590 : if (CheckProcSignal(PROCSIG_RECOVERY_CONFLICT_STARTUP_DEADLOCK))
710 16 : HandleRecoveryConflictInterrupt(PROCSIG_RECOVERY_CONFLICT_STARTUP_DEADLOCK);
711 :
712 18590 : if (CheckProcSignal(PROCSIG_RECOVERY_CONFLICT_BUFFERPIN))
713 2 : HandleRecoveryConflictInterrupt(PROCSIG_RECOVERY_CONFLICT_BUFFERPIN);
714 :
715 18590 : SetLatch(MyLatch);
716 18590 : }
717 :
718 : /*
719 : * Send a query cancellation signal to backend.
720 : *
721 : * Note: This is called from a backend process before authentication. We
722 : * cannot take LWLocks yet, but that's OK; we rely on atomic reads of the
723 : * fields in the ProcSignal slots.
724 : */
725 : void
726 20 : SendCancelRequest(int backendPID, int32 cancelAuthCode)
727 : {
728 : Assert(backendPID != 0);
729 :
730 : /*
731 : * See if we have a matching backend. Reading the pss_pid and
732 : * pss_cancel_key fields is racy, a backend might die and remove itself
733 : * from the array at any time. The probability of the cancellation key
734 : * matching wrong process is miniscule, however, so we can live with that.
735 : * PIDs are reused too, so sending the signal based on PID is inherently
736 : * racy anyway, although OS's avoid reusing PIDs too soon.
737 : */
738 306 : for (int i = 0; i < NumProcSignalSlots; i++)
739 : {
740 306 : ProcSignalSlot *slot = &ProcSignal->psh_slot[i];
741 : bool match;
742 :
743 306 : if (pg_atomic_read_u32(&slot->pss_pid) != backendPID)
744 286 : continue;
745 :
746 : /* Acquire the spinlock and re-check */
747 20 : SpinLockAcquire(&slot->pss_mutex);
748 20 : if (pg_atomic_read_u32(&slot->pss_pid) != backendPID)
749 : {
750 0 : SpinLockRelease(&slot->pss_mutex);
751 0 : continue;
752 : }
753 : else
754 : {
755 20 : match = slot->pss_cancel_key_valid && slot->pss_cancel_key == cancelAuthCode;
756 :
757 20 : SpinLockRelease(&slot->pss_mutex);
758 :
759 20 : if (match)
760 : {
761 : /* Found a match; signal that backend to cancel current op */
762 20 : ereport(DEBUG2,
763 : (errmsg_internal("processing cancel request: sending SIGINT to process %d",
764 : backendPID)));
765 :
766 : /*
767 : * If we have setsid(), signal the backend's whole process
768 : * group
769 : */
770 : #ifdef HAVE_SETSID
771 20 : kill(-backendPID, SIGINT);
772 : #else
773 : kill(backendPID, SIGINT);
774 : #endif
775 : }
776 : else
777 : {
778 : /* Right PID, wrong key: no way, Jose */
779 0 : ereport(LOG,
780 : (errmsg("wrong key in cancel request for process %d",
781 : backendPID)));
782 : }
783 20 : return;
784 : }
785 : }
786 :
787 : /* No matching backend */
788 0 : ereport(LOG,
789 : (errmsg("PID %d in cancel request did not match any process",
790 : backendPID)));
791 : }
|