Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * procsignal.c
4 : * Routines for interprocess signaling
5 : *
6 : *
7 : * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
8 : * Portions Copyright (c) 1994, Regents of the University of California
9 : *
10 : * IDENTIFICATION
11 : * src/backend/storage/ipc/procsignal.c
12 : *
13 : *-------------------------------------------------------------------------
14 : */
15 : #include "postgres.h"
16 :
17 : #include <signal.h>
18 : #include <unistd.h>
19 :
20 : #include "access/parallel.h"
21 : #include "commands/async.h"
22 : #include "miscadmin.h"
23 : #include "pgstat.h"
24 : #include "port/pg_bitutils.h"
25 : #include "replication/logicalworker.h"
26 : #include "replication/walsender.h"
27 : #include "storage/condition_variable.h"
28 : #include "storage/ipc.h"
29 : #include "storage/latch.h"
30 : #include "storage/shmem.h"
31 : #include "storage/sinval.h"
32 : #include "storage/smgr.h"
33 : #include "tcop/tcopprot.h"
34 : #include "utils/memutils.h"
35 :
36 : /*
37 : * The SIGUSR1 signal is multiplexed to support signaling multiple event
38 : * types. The specific reason is communicated via flags in shared memory.
39 : * We keep a boolean flag for each possible "reason", so that different
40 : * reasons can be signaled to a process concurrently. (However, if the same
41 : * reason is signaled more than once nearly simultaneously, the process may
42 : * observe it only once.)
43 : *
44 : * Each process that wants to receive signals registers its process ID
45 : * in the ProcSignalSlots array. The array is indexed by ProcNumber to make
46 : * slot allocation simple, and to avoid having to search the array when you
47 : * know the ProcNumber of the process you're signaling. (We do support
48 : * signaling without ProcNumber, but it's a bit less efficient.)
49 : *
50 : * The fields in each slot are protected by a spinlock, pss_mutex. pss_pid can
51 : * also be read without holding the spinlock, as a quick preliminary check
52 : * when searching for a particular PID in the array.
53 : *
54 : * pss_signalFlags are intended to be set in cases where we don't need to
55 : * keep track of whether or not the target process has handled the signal,
56 : * but sometimes we need confirmation, as when making a global state change
57 : * that cannot be considered complete until all backends have taken notice
58 : * of it. For such use cases, we set a bit in pss_barrierCheckMask and then
59 : * increment the current "barrier generation"; when the new barrier generation
60 : * (or greater) appears in the pss_barrierGeneration flag of every process,
61 : * we know that the message has been received everywhere.
62 : */
63 : typedef struct
64 : {
65 : pg_atomic_uint32 pss_pid;
66 : bool pss_cancel_key_valid;
67 : int32 pss_cancel_key;
68 : volatile sig_atomic_t pss_signalFlags[NUM_PROCSIGNALS];
69 : slock_t pss_mutex; /* protects the above fields */
70 :
71 : /* Barrier-related fields (not protected by pss_mutex) */
72 : pg_atomic_uint64 pss_barrierGeneration;
73 : pg_atomic_uint32 pss_barrierCheckMask;
74 : ConditionVariable pss_barrierCV;
75 : } ProcSignalSlot;
76 :
77 : /*
78 : * Information that is global to the entire ProcSignal system can be stored
79 : * here.
80 : *
81 : * psh_barrierGeneration is the highest barrier generation in existence.
82 : */
83 : struct ProcSignalHeader
84 : {
85 : pg_atomic_uint64 psh_barrierGeneration;
86 : ProcSignalSlot psh_slot[FLEXIBLE_ARRAY_MEMBER];
87 : };
88 :
89 : /*
90 : * We reserve a slot for each possible ProcNumber, plus one for each
91 : * possible auxiliary process type. (This scheme assumes there is not
92 : * more than one of any auxiliary process type at a time.)
93 : */
94 : #define NumProcSignalSlots (MaxBackends + NUM_AUXILIARY_PROCS)
95 :
96 : /* Check whether the relevant type bit is set in the flags. */
97 : #define BARRIER_SHOULD_CHECK(flags, type) \
98 : (((flags) & (((uint32) 1) << (uint32) (type))) != 0)
99 :
100 : /* Clear the relevant type bit from the flags. */
101 : #define BARRIER_CLEAR_BIT(flags, type) \
102 : ((flags) &= ~(((uint32) 1) << (uint32) (type)))
103 :
104 : NON_EXEC_STATIC ProcSignalHeader *ProcSignal = NULL;
105 : static ProcSignalSlot *MyProcSignalSlot = NULL;
106 :
107 : static bool CheckProcSignal(ProcSignalReason reason);
108 : static void CleanupProcSignalState(int status, Datum arg);
109 : static void ResetProcSignalBarrierBits(uint32 flags);
110 :
111 : /*
112 : * ProcSignalShmemSize
113 : * Compute space needed for ProcSignal's shared memory
114 : */
115 : Size
116 5436 : ProcSignalShmemSize(void)
117 : {
118 : Size size;
119 :
120 5436 : size = mul_size(NumProcSignalSlots, sizeof(ProcSignalSlot));
121 5436 : size = add_size(size, offsetof(ProcSignalHeader, psh_slot));
122 5436 : return size;
123 : }
124 :
125 : /*
126 : * ProcSignalShmemInit
127 : * Allocate and initialize ProcSignal's shared memory
128 : */
129 : void
130 1902 : ProcSignalShmemInit(void)
131 : {
132 1902 : Size size = ProcSignalShmemSize();
133 : bool found;
134 :
135 1902 : ProcSignal = (ProcSignalHeader *)
136 1902 : ShmemInitStruct("ProcSignal", size, &found);
137 :
138 : /* If we're first, initialize. */
139 1902 : if (!found)
140 : {
141 : int i;
142 :
143 1902 : pg_atomic_init_u64(&ProcSignal->psh_barrierGeneration, 0);
144 :
145 171440 : for (i = 0; i < NumProcSignalSlots; ++i)
146 : {
147 169538 : ProcSignalSlot *slot = &ProcSignal->psh_slot[i];
148 :
149 169538 : SpinLockInit(&slot->pss_mutex);
150 169538 : pg_atomic_init_u32(&slot->pss_pid, 0);
151 169538 : slot->pss_cancel_key_valid = false;
152 169538 : slot->pss_cancel_key = 0;
153 169538 : MemSet(slot->pss_signalFlags, 0, sizeof(slot->pss_signalFlags));
154 169538 : pg_atomic_init_u64(&slot->pss_barrierGeneration, PG_UINT64_MAX);
155 169538 : pg_atomic_init_u32(&slot->pss_barrierCheckMask, 0);
156 169538 : ConditionVariableInit(&slot->pss_barrierCV);
157 : }
158 : }
159 1902 : }
160 :
161 : /*
162 : * ProcSignalInit
163 : * Register the current process in the ProcSignal array
164 : */
165 : void
166 35548 : ProcSignalInit(bool cancel_key_valid, int32 cancel_key)
167 : {
168 : ProcSignalSlot *slot;
169 : uint64 barrier_generation;
170 :
171 35548 : if (MyProcNumber < 0)
172 0 : elog(ERROR, "MyProcNumber not set");
173 35548 : if (MyProcNumber >= NumProcSignalSlots)
174 0 : elog(ERROR, "unexpected MyProcNumber %d in ProcSignalInit (max %d)", MyProcNumber, NumProcSignalSlots);
175 35548 : slot = &ProcSignal->psh_slot[MyProcNumber];
176 :
177 : /* sanity check */
178 35548 : SpinLockAcquire(&slot->pss_mutex);
179 35548 : if (pg_atomic_read_u32(&slot->pss_pid) != 0)
180 : {
181 0 : SpinLockRelease(&slot->pss_mutex);
182 0 : elog(LOG, "process %d taking over ProcSignal slot %d, but it's not empty",
183 : MyProcPid, MyProcNumber);
184 : }
185 :
186 : /* Clear out any leftover signal reasons */
187 35548 : MemSet(slot->pss_signalFlags, 0, NUM_PROCSIGNALS * sizeof(sig_atomic_t));
188 :
189 : /*
190 : * Initialize barrier state. Since we're a brand-new process, there
191 : * shouldn't be any leftover backend-private state that needs to be
192 : * updated. Therefore, we can broadcast the latest barrier generation and
193 : * disregard any previously-set check bits.
194 : *
195 : * NB: This only works if this initialization happens early enough in the
196 : * startup sequence that we haven't yet cached any state that might need
197 : * to be invalidated. That's also why we have a memory barrier here, to be
198 : * sure that any later reads of memory happen strictly after this.
199 : */
200 35548 : pg_atomic_write_u32(&slot->pss_barrierCheckMask, 0);
201 : barrier_generation =
202 35548 : pg_atomic_read_u64(&ProcSignal->psh_barrierGeneration);
203 35548 : pg_atomic_write_u64(&slot->pss_barrierGeneration, barrier_generation);
204 :
205 35548 : slot->pss_cancel_key_valid = cancel_key_valid;
206 35548 : slot->pss_cancel_key = cancel_key;
207 35548 : pg_atomic_write_u32(&slot->pss_pid, MyProcPid);
208 :
209 35548 : SpinLockRelease(&slot->pss_mutex);
210 :
211 : /* Remember slot location for CheckProcSignal */
212 35548 : MyProcSignalSlot = slot;
213 :
214 : /* Set up to release the slot on process exit */
215 35548 : on_shmem_exit(CleanupProcSignalState, (Datum) 0);
216 35548 : }
217 :
218 : /*
219 : * CleanupProcSignalState
220 : * Remove current process from ProcSignal mechanism
221 : *
222 : * This function is called via on_shmem_exit() during backend shutdown.
223 : */
224 : static void
225 35548 : CleanupProcSignalState(int status, Datum arg)
226 : {
227 : pid_t old_pid;
228 35548 : ProcSignalSlot *slot = MyProcSignalSlot;
229 :
230 : /*
231 : * Clear MyProcSignalSlot, so that a SIGUSR1 received after this point
232 : * won't try to access it after it's no longer ours (and perhaps even
233 : * after we've unmapped the shared memory segment).
234 : */
235 : Assert(MyProcSignalSlot != NULL);
236 35548 : MyProcSignalSlot = NULL;
237 :
238 : /* sanity check */
239 35548 : SpinLockAcquire(&slot->pss_mutex);
240 35548 : old_pid = pg_atomic_read_u32(&slot->pss_pid);
241 35548 : if (old_pid != MyProcPid)
242 : {
243 : /*
244 : * don't ERROR here. We're exiting anyway, and don't want to get into
245 : * infinite loop trying to exit
246 : */
247 0 : SpinLockRelease(&slot->pss_mutex);
248 0 : elog(LOG, "process %d releasing ProcSignal slot %d, but it contains %d",
249 : MyProcPid, (int) (slot - ProcSignal->psh_slot), (int) old_pid);
250 0 : return; /* XXX better to zero the slot anyway? */
251 : }
252 :
253 : /* Mark the slot as unused */
254 35548 : pg_atomic_write_u32(&slot->pss_pid, 0);
255 35548 : slot->pss_cancel_key_valid = false;
256 35548 : slot->pss_cancel_key = 0;
257 :
258 : /*
259 : * Make this slot look like it's absorbed all possible barriers, so that
260 : * no barrier waits block on it.
261 : */
262 35548 : pg_atomic_write_u64(&slot->pss_barrierGeneration, PG_UINT64_MAX);
263 :
264 35548 : SpinLockRelease(&slot->pss_mutex);
265 :
266 35548 : ConditionVariableBroadcast(&slot->pss_barrierCV);
267 : }
268 :
269 : /*
270 : * SendProcSignal
271 : * Send a signal to a Postgres process
272 : *
273 : * Providing procNumber is optional, but it will speed up the operation.
274 : *
275 : * On success (a signal was sent), zero is returned.
276 : * On error, -1 is returned, and errno is set (typically to ESRCH or EPERM).
277 : *
278 : * Not to be confused with ProcSendSignal
279 : */
280 : int
281 10948 : SendProcSignal(pid_t pid, ProcSignalReason reason, ProcNumber procNumber)
282 : {
283 : volatile ProcSignalSlot *slot;
284 :
285 10948 : if (procNumber != INVALID_PROC_NUMBER)
286 : {
287 : Assert(procNumber < NumProcSignalSlots);
288 10840 : slot = &ProcSignal->psh_slot[procNumber];
289 :
290 10840 : SpinLockAcquire(&slot->pss_mutex);
291 10840 : if (pg_atomic_read_u32(&slot->pss_pid) == pid)
292 : {
293 : /* Atomically set the proper flag */
294 10840 : slot->pss_signalFlags[reason] = true;
295 10840 : SpinLockRelease(&slot->pss_mutex);
296 : /* Send signal */
297 10840 : return kill(pid, SIGUSR1);
298 : }
299 0 : SpinLockRelease(&slot->pss_mutex);
300 : }
301 : else
302 : {
303 : /*
304 : * procNumber not provided, so search the array using pid. We search
305 : * the array back to front so as to reduce search overhead. Passing
306 : * INVALID_PROC_NUMBER means that the target is most likely an
307 : * auxiliary process, which will have a slot near the end of the
308 : * array.
309 : */
310 : int i;
311 :
312 1332 : for (i = NumProcSignalSlots - 1; i >= 0; i--)
313 : {
314 1332 : slot = &ProcSignal->psh_slot[i];
315 :
316 1332 : if (pg_atomic_read_u32(&slot->pss_pid) == pid)
317 : {
318 108 : SpinLockAcquire(&slot->pss_mutex);
319 108 : if (pg_atomic_read_u32(&slot->pss_pid) == pid)
320 : {
321 : /* Atomically set the proper flag */
322 108 : slot->pss_signalFlags[reason] = true;
323 108 : SpinLockRelease(&slot->pss_mutex);
324 : /* Send signal */
325 108 : return kill(pid, SIGUSR1);
326 : }
327 0 : SpinLockRelease(&slot->pss_mutex);
328 : }
329 : }
330 : }
331 :
332 0 : errno = ESRCH;
333 0 : return -1;
334 : }
335 :
336 : /*
337 : * EmitProcSignalBarrier
338 : * Send a signal to every Postgres process
339 : *
340 : * The return value of this function is the barrier "generation" created
341 : * by this operation. This value can be passed to WaitForProcSignalBarrier
342 : * to wait until it is known that every participant in the ProcSignal
343 : * mechanism has absorbed the signal (or started afterwards).
344 : *
345 : * Note that it would be a bad idea to use this for anything that happens
346 : * frequently, as interrupting every backend could cause a noticeable
347 : * performance hit.
348 : *
349 : * Callers are entitled to assume that this function will not throw ERROR
350 : * or FATAL.
351 : */
352 : uint64
353 160 : EmitProcSignalBarrier(ProcSignalBarrierType type)
354 : {
355 160 : uint32 flagbit = 1 << (uint32) type;
356 : uint64 generation;
357 :
358 : /*
359 : * Set all the flags.
360 : *
361 : * Note that pg_atomic_fetch_or_u32 has full barrier semantics, so this is
362 : * totally ordered with respect to anything the caller did before, and
363 : * anything that we do afterwards. (This is also true of the later call to
364 : * pg_atomic_add_fetch_u64.)
365 : */
366 11414 : for (int i = 0; i < NumProcSignalSlots; i++)
367 : {
368 11254 : volatile ProcSignalSlot *slot = &ProcSignal->psh_slot[i];
369 :
370 11254 : pg_atomic_fetch_or_u32(&slot->pss_barrierCheckMask, flagbit);
371 : }
372 :
373 : /*
374 : * Increment the generation counter.
375 : */
376 : generation =
377 160 : pg_atomic_add_fetch_u64(&ProcSignal->psh_barrierGeneration, 1);
378 :
379 : /*
380 : * Signal all the processes, so that they update their advertised barrier
381 : * generation.
382 : *
383 : * Concurrency is not a problem here. Backends that have exited don't
384 : * matter, and new backends that have joined since we entered this
385 : * function must already have current state, since the caller is
386 : * responsible for making sure that the relevant state is entirely visible
387 : * before calling this function in the first place. We still have to wake
388 : * them up - because we can't distinguish between such backends and older
389 : * backends that need to update state - but they won't actually need to
390 : * change any state.
391 : */
392 11414 : for (int i = NumProcSignalSlots - 1; i >= 0; i--)
393 : {
394 11254 : volatile ProcSignalSlot *slot = &ProcSignal->psh_slot[i];
395 11254 : pid_t pid = pg_atomic_read_u32(&slot->pss_pid);
396 :
397 11254 : if (pid != 0)
398 : {
399 946 : SpinLockAcquire(&slot->pss_mutex);
400 946 : pid = pg_atomic_read_u32(&slot->pss_pid);
401 946 : if (pid != 0)
402 : {
403 : /* see SendProcSignal for details */
404 946 : slot->pss_signalFlags[PROCSIG_BARRIER] = true;
405 946 : SpinLockRelease(&slot->pss_mutex);
406 946 : kill(pid, SIGUSR1);
407 : }
408 : else
409 0 : SpinLockRelease(&slot->pss_mutex);
410 : }
411 : }
412 :
413 160 : return generation;
414 : }
415 :
416 : /*
417 : * WaitForProcSignalBarrier - wait until it is guaranteed that all changes
418 : * requested by a specific call to EmitProcSignalBarrier() have taken effect.
419 : */
420 : void
421 160 : WaitForProcSignalBarrier(uint64 generation)
422 : {
423 : Assert(generation <= pg_atomic_read_u64(&ProcSignal->psh_barrierGeneration));
424 :
425 160 : elog(DEBUG1,
426 : "waiting for all backends to process ProcSignalBarrier generation "
427 : UINT64_FORMAT,
428 : generation);
429 :
430 11414 : for (int i = NumProcSignalSlots - 1; i >= 0; i--)
431 : {
432 11254 : ProcSignalSlot *slot = &ProcSignal->psh_slot[i];
433 : uint64 oldval;
434 :
435 : /*
436 : * It's important that we check only pss_barrierGeneration here and
437 : * not pss_barrierCheckMask. Bits in pss_barrierCheckMask get cleared
438 : * before the barrier is actually absorbed, but pss_barrierGeneration
439 : * is updated only afterward.
440 : */
441 11254 : oldval = pg_atomic_read_u64(&slot->pss_barrierGeneration);
442 11774 : while (oldval < generation)
443 : {
444 520 : if (ConditionVariableTimedSleep(&slot->pss_barrierCV,
445 : 5000,
446 : WAIT_EVENT_PROC_SIGNAL_BARRIER))
447 0 : ereport(LOG,
448 : (errmsg("still waiting for backend with PID %d to accept ProcSignalBarrier",
449 : (int) pg_atomic_read_u32(&slot->pss_pid))));
450 520 : oldval = pg_atomic_read_u64(&slot->pss_barrierGeneration);
451 : }
452 11254 : ConditionVariableCancelSleep();
453 : }
454 :
455 160 : elog(DEBUG1,
456 : "finished waiting for all backends to process ProcSignalBarrier generation "
457 : UINT64_FORMAT,
458 : generation);
459 :
460 : /*
461 : * The caller is probably calling this function because it wants to read
462 : * the shared state or perform further writes to shared state once all
463 : * backends are known to have absorbed the barrier. However, the read of
464 : * pss_barrierGeneration was performed unlocked; insert a memory barrier
465 : * to separate it from whatever follows.
466 : */
467 160 : pg_memory_barrier();
468 160 : }
469 :
470 : /*
471 : * Handle receipt of an interrupt indicating a global barrier event.
472 : *
473 : * All the actual work is deferred to ProcessProcSignalBarrier(), because we
474 : * cannot safely access the barrier generation inside the signal handler as
475 : * 64bit atomics might use spinlock based emulation, even for reads. As this
476 : * routine only gets called when PROCSIG_BARRIER is sent that won't cause a
477 : * lot of unnecessary work.
478 : */
479 : static void
480 700 : HandleProcSignalBarrierInterrupt(void)
481 : {
482 700 : InterruptPending = true;
483 700 : ProcSignalBarrierPending = true;
484 : /* latch will be set by procsignal_sigusr1_handler */
485 700 : }
486 :
487 : /*
488 : * Perform global barrier related interrupt checking.
489 : *
490 : * Any backend that participates in ProcSignal signaling must arrange to
491 : * call this function periodically. It is called from CHECK_FOR_INTERRUPTS(),
492 : * which is enough for normal backends, but not necessarily for all types of
493 : * background processes.
494 : */
495 : void
496 700 : ProcessProcSignalBarrier(void)
497 : {
498 : uint64 local_gen;
499 : uint64 shared_gen;
500 : volatile uint32 flags;
501 :
502 : Assert(MyProcSignalSlot);
503 :
504 : /* Exit quickly if there's no work to do. */
505 700 : if (!ProcSignalBarrierPending)
506 0 : return;
507 700 : ProcSignalBarrierPending = false;
508 :
509 : /*
510 : * It's not unlikely to process multiple barriers at once, before the
511 : * signals for all the barriers have arrived. To avoid unnecessary work in
512 : * response to subsequent signals, exit early if we already have processed
513 : * all of them.
514 : */
515 700 : local_gen = pg_atomic_read_u64(&MyProcSignalSlot->pss_barrierGeneration);
516 700 : shared_gen = pg_atomic_read_u64(&ProcSignal->psh_barrierGeneration);
517 :
518 : Assert(local_gen <= shared_gen);
519 :
520 700 : if (local_gen == shared_gen)
521 0 : return;
522 :
523 : /*
524 : * Get and clear the flags that are set for this backend. Note that
525 : * pg_atomic_exchange_u32 is a full barrier, so we're guaranteed that the
526 : * read of the barrier generation above happens before we atomically
527 : * extract the flags, and that any subsequent state changes happen
528 : * afterward.
529 : *
530 : * NB: In order to avoid race conditions, we must zero
531 : * pss_barrierCheckMask first and only afterwards try to do barrier
532 : * processing. If we did it in the other order, someone could send us
533 : * another barrier of some type right after we called the
534 : * barrier-processing function but before we cleared the bit. We would
535 : * have no way of knowing that the bit needs to stay set in that case, so
536 : * the need to call the barrier-processing function again would just get
537 : * forgotten. So instead, we tentatively clear all the bits and then put
538 : * back any for which we don't manage to successfully absorb the barrier.
539 : */
540 700 : flags = pg_atomic_exchange_u32(&MyProcSignalSlot->pss_barrierCheckMask, 0);
541 :
542 : /*
543 : * If there are no flags set, then we can skip doing any real work.
544 : * Otherwise, establish a PG_TRY block, so that we don't lose track of
545 : * which types of barrier processing are needed if an ERROR occurs.
546 : */
547 700 : if (flags != 0)
548 : {
549 700 : bool success = true;
550 :
551 700 : PG_TRY();
552 : {
553 : /*
554 : * Process each type of barrier. The barrier-processing functions
555 : * should normally return true, but may return false if the
556 : * barrier can't be absorbed at the current time. This should be
557 : * rare, because it's pretty expensive. Every single
558 : * CHECK_FOR_INTERRUPTS() will return here until we manage to
559 : * absorb the barrier, and that cost will add up in a hurry.
560 : *
561 : * NB: It ought to be OK to call the barrier-processing functions
562 : * unconditionally, but it's more efficient to call only the ones
563 : * that might need us to do something based on the flags.
564 : */
565 1400 : while (flags != 0)
566 : {
567 : ProcSignalBarrierType type;
568 700 : bool processed = true;
569 :
570 700 : type = (ProcSignalBarrierType) pg_rightmost_one_pos32(flags);
571 700 : switch (type)
572 : {
573 700 : case PROCSIGNAL_BARRIER_SMGRRELEASE:
574 700 : processed = ProcessBarrierSmgrRelease();
575 700 : break;
576 : }
577 :
578 : /*
579 : * To avoid an infinite loop, we must always unset the bit in
580 : * flags.
581 : */
582 700 : BARRIER_CLEAR_BIT(flags, type);
583 :
584 : /*
585 : * If we failed to process the barrier, reset the shared bit
586 : * so we try again later, and set a flag so that we don't bump
587 : * our generation.
588 : */
589 700 : if (!processed)
590 : {
591 0 : ResetProcSignalBarrierBits(((uint32) 1) << type);
592 0 : success = false;
593 : }
594 : }
595 : }
596 0 : PG_CATCH();
597 : {
598 : /*
599 : * If an ERROR occurred, we'll need to try again later to handle
600 : * that barrier type and any others that haven't been handled yet
601 : * or weren't successfully absorbed.
602 : */
603 0 : ResetProcSignalBarrierBits(flags);
604 0 : PG_RE_THROW();
605 : }
606 700 : PG_END_TRY();
607 :
608 : /*
609 : * If some barrier types were not successfully absorbed, we will have
610 : * to try again later.
611 : */
612 700 : if (!success)
613 0 : return;
614 : }
615 :
616 : /*
617 : * State changes related to all types of barriers that might have been
618 : * emitted have now been handled, so we can update our notion of the
619 : * generation to the one we observed before beginning the updates. If
620 : * things have changed further, it'll get fixed up when this function is
621 : * next called.
622 : */
623 700 : pg_atomic_write_u64(&MyProcSignalSlot->pss_barrierGeneration, shared_gen);
624 700 : ConditionVariableBroadcast(&MyProcSignalSlot->pss_barrierCV);
625 : }
626 :
627 : /*
628 : * If it turns out that we couldn't absorb one or more barrier types, either
629 : * because the barrier-processing functions returned false or due to an error,
630 : * arrange for processing to be retried later.
631 : */
632 : static void
633 0 : ResetProcSignalBarrierBits(uint32 flags)
634 : {
635 0 : pg_atomic_fetch_or_u32(&MyProcSignalSlot->pss_barrierCheckMask, flags);
636 0 : ProcSignalBarrierPending = true;
637 0 : InterruptPending = true;
638 0 : }
639 :
640 : /*
641 : * CheckProcSignal - check to see if a particular reason has been
642 : * signaled, and clear the signal flag. Should be called after receiving
643 : * SIGUSR1.
644 : */
645 : static bool
646 259168 : CheckProcSignal(ProcSignalReason reason)
647 : {
648 259168 : volatile ProcSignalSlot *slot = MyProcSignalSlot;
649 :
650 259168 : if (slot != NULL)
651 : {
652 : /*
653 : * Careful here --- don't clear flag if we haven't seen it set.
654 : * pss_signalFlags is of type "volatile sig_atomic_t" to allow us to
655 : * read it here safely, without holding the spinlock.
656 : */
657 259000 : if (slot->pss_signalFlags[reason])
658 : {
659 9664 : slot->pss_signalFlags[reason] = false;
660 9664 : return true;
661 : }
662 : }
663 :
664 249504 : return false;
665 : }
666 :
667 : /*
668 : * procsignal_sigusr1_handler - handle SIGUSR1 signal.
669 : */
670 : void
671 18512 : procsignal_sigusr1_handler(SIGNAL_ARGS)
672 : {
673 18512 : if (CheckProcSignal(PROCSIG_CATCHUP_INTERRUPT))
674 5286 : HandleCatchupInterrupt();
675 :
676 18512 : if (CheckProcSignal(PROCSIG_NOTIFY_INTERRUPT))
677 40 : HandleNotifyInterrupt();
678 :
679 18512 : if (CheckProcSignal(PROCSIG_PARALLEL_MESSAGE))
680 3486 : HandleParallelMessageInterrupt();
681 :
682 18512 : if (CheckProcSignal(PROCSIG_WALSND_INIT_STOPPING))
683 68 : HandleWalSndInitStopping();
684 :
685 18512 : if (CheckProcSignal(PROCSIG_BARRIER))
686 700 : HandleProcSignalBarrierInterrupt();
687 :
688 18512 : if (CheckProcSignal(PROCSIG_LOG_MEMORY_CONTEXT))
689 18 : HandleLogMemoryContextInterrupt();
690 :
691 18512 : if (CheckProcSignal(PROCSIG_PARALLEL_APPLY_MESSAGE))
692 28 : HandleParallelApplyMessageInterrupt();
693 :
694 18512 : if (CheckProcSignal(PROCSIG_RECOVERY_CONFLICT_DATABASE))
695 4 : HandleRecoveryConflictInterrupt(PROCSIG_RECOVERY_CONFLICT_DATABASE);
696 :
697 18512 : if (CheckProcSignal(PROCSIG_RECOVERY_CONFLICT_TABLESPACE))
698 2 : HandleRecoveryConflictInterrupt(PROCSIG_RECOVERY_CONFLICT_TABLESPACE);
699 :
700 18512 : if (CheckProcSignal(PROCSIG_RECOVERY_CONFLICT_LOCK))
701 2 : HandleRecoveryConflictInterrupt(PROCSIG_RECOVERY_CONFLICT_LOCK);
702 :
703 18512 : if (CheckProcSignal(PROCSIG_RECOVERY_CONFLICT_SNAPSHOT))
704 2 : HandleRecoveryConflictInterrupt(PROCSIG_RECOVERY_CONFLICT_SNAPSHOT);
705 :
706 18512 : if (CheckProcSignal(PROCSIG_RECOVERY_CONFLICT_LOGICALSLOT))
707 10 : HandleRecoveryConflictInterrupt(PROCSIG_RECOVERY_CONFLICT_LOGICALSLOT);
708 :
709 18512 : if (CheckProcSignal(PROCSIG_RECOVERY_CONFLICT_STARTUP_DEADLOCK))
710 16 : HandleRecoveryConflictInterrupt(PROCSIG_RECOVERY_CONFLICT_STARTUP_DEADLOCK);
711 :
712 18512 : if (CheckProcSignal(PROCSIG_RECOVERY_CONFLICT_BUFFERPIN))
713 2 : HandleRecoveryConflictInterrupt(PROCSIG_RECOVERY_CONFLICT_BUFFERPIN);
714 :
715 18512 : SetLatch(MyLatch);
716 18512 : }
717 :
718 : /*
719 : * Send a query cancellation signal to backend.
720 : *
721 : * Note: This is called from a backend process before authentication. We
722 : * cannot take LWLocks yet, but that's OK; we rely on atomic reads of the
723 : * fields in the ProcSignal slots.
724 : */
725 : void
726 18 : SendCancelRequest(int backendPID, int32 cancelAuthCode)
727 : {
728 : Assert(backendPID != 0);
729 :
730 : /*
731 : * See if we have a matching backend. Reading the pss_pid and
732 : * pss_cancel_key fields is racy, a backend might die and remove itself
733 : * from the array at any time. The probability of the cancellation key
734 : * matching wrong process is miniscule, however, so we can live with that.
735 : * PIDs are reused too, so sending the signal based on PID is inherently
736 : * racy anyway, although OS's avoid reusing PIDs too soon.
737 : */
738 188 : for (int i = 0; i < NumProcSignalSlots; i++)
739 : {
740 188 : ProcSignalSlot *slot = &ProcSignal->psh_slot[i];
741 : bool match;
742 :
743 188 : if (pg_atomic_read_u32(&slot->pss_pid) != backendPID)
744 170 : continue;
745 :
746 : /* Acquire the spinlock and re-check */
747 18 : SpinLockAcquire(&slot->pss_mutex);
748 18 : if (pg_atomic_read_u32(&slot->pss_pid) != backendPID)
749 : {
750 0 : SpinLockRelease(&slot->pss_mutex);
751 0 : continue;
752 : }
753 : else
754 : {
755 18 : match = slot->pss_cancel_key_valid && slot->pss_cancel_key == cancelAuthCode;
756 :
757 18 : SpinLockRelease(&slot->pss_mutex);
758 :
759 18 : if (match)
760 : {
761 : /* Found a match; signal that backend to cancel current op */
762 18 : ereport(DEBUG2,
763 : (errmsg_internal("processing cancel request: sending SIGINT to process %d",
764 : backendPID)));
765 :
766 : /*
767 : * If we have setsid(), signal the backend's whole process
768 : * group
769 : */
770 : #ifdef HAVE_SETSID
771 18 : kill(-backendPID, SIGINT);
772 : #else
773 : kill(backendPID, SIGINT);
774 : #endif
775 : }
776 : else
777 : {
778 : /* Right PID, wrong key: no way, Jose */
779 0 : ereport(LOG,
780 : (errmsg("wrong key in cancel request for process %d",
781 : backendPID)));
782 : }
783 18 : return;
784 : }
785 : }
786 :
787 : /* No matching backend */
788 0 : ereport(LOG,
789 : (errmsg("PID %d in cancel request did not match any process",
790 : backendPID)));
791 : }
|