Line data Source code
1 : /*--------------------------------------------------------------------
2 : * bgworker.c
3 : * POSTGRES pluggable background workers implementation
4 : *
5 : * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
6 : *
7 : * IDENTIFICATION
8 : * src/backend/postmaster/bgworker.c
9 : *
10 : *-------------------------------------------------------------------------
11 : */
12 :
13 : #include "postgres.h"
14 :
15 : #include "access/parallel.h"
16 : #include "libpq/pqsignal.h"
17 : #include "miscadmin.h"
18 : #include "pgstat.h"
19 : #include "port/atomics.h"
20 : #include "postmaster/bgworker_internals.h"
21 : #include "postmaster/interrupt.h"
22 : #include "postmaster/postmaster.h"
23 : #include "replication/logicallauncher.h"
24 : #include "replication/logicalworker.h"
25 : #include "storage/dsm.h"
26 : #include "storage/ipc.h"
27 : #include "storage/latch.h"
28 : #include "storage/lwlock.h"
29 : #include "storage/pg_shmem.h"
30 : #include "storage/pmsignal.h"
31 : #include "storage/proc.h"
32 : #include "storage/procsignal.h"
33 : #include "storage/shmem.h"
34 : #include "tcop/tcopprot.h"
35 : #include "utils/ascii.h"
36 : #include "utils/ps_status.h"
37 : #include "utils/timeout.h"
38 :
39 : /*
40 : * The postmaster's list of registered background workers, in private memory.
41 : */
42 : slist_head BackgroundWorkerList = SLIST_STATIC_INIT(BackgroundWorkerList);
43 :
44 : /*
45 : * BackgroundWorkerSlots exist in shared memory and can be accessed (via
46 : * the BackgroundWorkerArray) by both the postmaster and by regular backends.
47 : * However, the postmaster cannot take locks, even spinlocks, because this
48 : * might allow it to crash or become wedged if shared memory gets corrupted.
49 : * Such an outcome is intolerable. Therefore, we need a lockless protocol
50 : * for coordinating access to this data.
51 : *
52 : * The 'in_use' flag is used to hand off responsibility for the slot between
53 : * the postmaster and the rest of the system. When 'in_use' is false,
54 : * the postmaster will ignore the slot entirely, except for the 'in_use' flag
55 : * itself, which it may read. In this state, regular backends may modify the
56 : * slot. Once a backend sets 'in_use' to true, the slot becomes the
57 : * responsibility of the postmaster. Regular backends may no longer modify it,
58 : * but the postmaster may examine it. Thus, a backend initializing a slot
59 : * must fully initialize the slot - and insert a write memory barrier - before
60 : * marking it as in use.
61 : *
62 : * As an exception, however, even when the slot is in use, regular backends
63 : * may set the 'terminate' flag for a slot, telling the postmaster not
64 : * to restart it. Once the background worker is no longer running, the slot
65 : * will be released for reuse.
66 : *
67 : * In addition to coordinating with the postmaster, backends modifying this
68 : * data structure must coordinate with each other. Since they can take locks,
69 : * this is straightforward: any backend wishing to manipulate a slot must
70 : * take BackgroundWorkerLock in exclusive mode. Backends wishing to read
71 : * data that might get concurrently modified by other backends should take
72 : * this lock in shared mode. No matter what, backends reading this data
73 : * structure must be able to tolerate concurrent modifications by the
74 : * postmaster.
75 : */
76 : typedef struct BackgroundWorkerSlot
77 : {
78 : bool in_use;
79 : bool terminate;
80 : pid_t pid; /* InvalidPid = not started yet; 0 = dead */
81 : uint64 generation; /* incremented when slot is recycled */
82 : BackgroundWorker worker;
83 : } BackgroundWorkerSlot;
84 :
85 : /*
86 : * In order to limit the total number of parallel workers (according to
87 : * max_parallel_workers GUC), we maintain the number of active parallel
88 : * workers. Since the postmaster cannot take locks, two variables are used for
89 : * this purpose: the number of registered parallel workers (modified by the
90 : * backends, protected by BackgroundWorkerLock) and the number of terminated
91 : * parallel workers (modified only by the postmaster, lockless). The active
92 : * number of parallel workers is the number of registered workers minus the
93 : * terminated ones. These counters can of course overflow, but it's not
94 : * important here since the subtraction will still give the right number.
95 : */
96 : typedef struct BackgroundWorkerArray
97 : {
98 : int total_slots;
99 : uint32 parallel_register_count;
100 : uint32 parallel_terminate_count;
101 : BackgroundWorkerSlot slot[FLEXIBLE_ARRAY_MEMBER];
102 : } BackgroundWorkerArray;
103 :
104 : struct BackgroundWorkerHandle
105 : {
106 : int slot;
107 : uint64 generation;
108 : };
109 :
110 : static BackgroundWorkerArray *BackgroundWorkerData;
111 :
112 : /*
113 : * List of internal background worker entry points. We need this for
114 : * reasons explained in LookupBackgroundWorkerFunction(), below.
115 : */
116 : static const struct
117 : {
118 : const char *fn_name;
119 : bgworker_main_type fn_addr;
120 : } InternalBGWorkers[] =
121 :
122 : {
123 : {
124 : "ParallelWorkerMain", ParallelWorkerMain
125 : },
126 : {
127 : "ApplyLauncherMain", ApplyLauncherMain
128 : },
129 : {
130 : "ApplyWorkerMain", ApplyWorkerMain
131 : },
132 : {
133 : "ParallelApplyWorkerMain", ParallelApplyWorkerMain
134 : }
135 : };
136 :
137 : /* Private functions. */
138 : static bgworker_main_type LookupBackgroundWorkerFunction(const char *libraryname, const char *funcname);
139 :
140 :
141 : /*
142 : * Calculate shared memory needed.
143 : */
144 : Size
145 9092 : BackgroundWorkerShmemSize(void)
146 : {
147 : Size size;
148 :
149 : /* Array of workers is variably sized. */
150 9092 : size = offsetof(BackgroundWorkerArray, slot);
151 9092 : size = add_size(size, mul_size(max_worker_processes,
152 : sizeof(BackgroundWorkerSlot)));
153 :
154 9092 : return size;
155 : }
156 :
157 : /*
158 : * Initialize shared memory.
159 : */
160 : void
161 3636 : BackgroundWorkerShmemInit(void)
162 : {
163 : bool found;
164 :
165 3636 : BackgroundWorkerData = ShmemInitStruct("Background Worker Data",
166 : BackgroundWorkerShmemSize(),
167 : &found);
168 3636 : if (!IsUnderPostmaster)
169 : {
170 : slist_iter siter;
171 3636 : int slotno = 0;
172 :
173 3636 : BackgroundWorkerData->total_slots = max_worker_processes;
174 3636 : BackgroundWorkerData->parallel_register_count = 0;
175 3636 : BackgroundWorkerData->parallel_terminate_count = 0;
176 :
177 : /*
178 : * Copy contents of worker list into shared memory. Record the shared
179 : * memory slot assigned to each worker. This ensures a 1-to-1
180 : * correspondence between the postmaster's private list and the array
181 : * in shared memory.
182 : */
183 4838 : slist_foreach(siter, &BackgroundWorkerList)
184 : {
185 1202 : BackgroundWorkerSlot *slot = &BackgroundWorkerData->slot[slotno];
186 : RegisteredBgWorker *rw;
187 :
188 1202 : rw = slist_container(RegisteredBgWorker, rw_lnode, siter.cur);
189 : Assert(slotno < max_worker_processes);
190 1202 : slot->in_use = true;
191 1202 : slot->terminate = false;
192 1202 : slot->pid = InvalidPid;
193 1202 : slot->generation = 0;
194 1202 : rw->rw_shmem_slot = slotno;
195 1202 : rw->rw_worker.bgw_notify_pid = 0; /* might be reinit after crash */
196 1202 : memcpy(&slot->worker, &rw->rw_worker, sizeof(BackgroundWorker));
197 1202 : ++slotno;
198 : }
199 :
200 : /*
201 : * Mark any remaining slots as not in use.
202 : */
203 31522 : while (slotno < max_worker_processes)
204 : {
205 27886 : BackgroundWorkerSlot *slot = &BackgroundWorkerData->slot[slotno];
206 :
207 27886 : slot->in_use = false;
208 27886 : ++slotno;
209 : }
210 : }
211 : else
212 : Assert(found);
213 3636 : }
214 :
215 : /*
216 : * Search the postmaster's backend-private list of RegisteredBgWorker objects
217 : * for the one that maps to the given slot number.
218 : */
219 : static RegisteredBgWorker *
220 6192 : FindRegisteredWorkerBySlotNumber(int slotno)
221 : {
222 : slist_iter siter;
223 :
224 14876 : slist_foreach(siter, &BackgroundWorkerList)
225 : {
226 : RegisteredBgWorker *rw;
227 :
228 11604 : rw = slist_container(RegisteredBgWorker, rw_lnode, siter.cur);
229 11604 : if (rw->rw_shmem_slot == slotno)
230 2920 : return rw;
231 : }
232 :
233 3272 : return NULL;
234 : }
235 :
236 : /*
237 : * Notice changes to shared memory made by other backends.
238 : * Accept new worker requests only if allow_new_workers is true.
239 : *
240 : * This code runs in the postmaster, so we must be very careful not to assume
241 : * that shared memory contents are sane. Otherwise, a rogue backend could
242 : * take out the postmaster.
243 : */
244 : void
245 1860 : BackgroundWorkerStateChange(bool allow_new_workers)
246 : {
247 : int slotno;
248 :
249 : /*
250 : * The total number of slots stored in shared memory should match our
251 : * notion of max_worker_processes. If it does not, something is very
252 : * wrong. Further down, we always refer to this value as
253 : * max_worker_processes, in case shared memory gets corrupted while we're
254 : * looping.
255 : */
256 1860 : if (max_worker_processes != BackgroundWorkerData->total_slots)
257 : {
258 0 : ereport(LOG,
259 : (errmsg("inconsistent background worker state (max_worker_processes=%d, total_slots=%d)",
260 : max_worker_processes,
261 : BackgroundWorkerData->total_slots)));
262 0 : return;
263 : }
264 :
265 : /*
266 : * Iterate through slots, looking for newly-registered workers or workers
267 : * who must die.
268 : */
269 16740 : for (slotno = 0; slotno < max_worker_processes; ++slotno)
270 : {
271 14880 : BackgroundWorkerSlot *slot = &BackgroundWorkerData->slot[slotno];
272 : RegisteredBgWorker *rw;
273 :
274 14880 : if (!slot->in_use)
275 8688 : continue;
276 :
277 : /*
278 : * Make sure we don't see the in_use flag before the updated slot
279 : * contents.
280 : */
281 6192 : pg_read_barrier();
282 :
283 : /* See whether we already know about this worker. */
284 6192 : rw = FindRegisteredWorkerBySlotNumber(slotno);
285 6192 : if (rw != NULL)
286 : {
287 : /*
288 : * In general, the worker data can't change after it's initially
289 : * registered. However, someone can set the terminate flag.
290 : */
291 2920 : if (slot->terminate && !rw->rw_terminate)
292 : {
293 6 : rw->rw_terminate = true;
294 6 : if (rw->rw_pid != 0)
295 6 : kill(rw->rw_pid, SIGTERM);
296 : else
297 : {
298 : /* Report never-started, now-terminated worker as dead. */
299 0 : ReportBackgroundWorkerPID(rw);
300 : }
301 : }
302 2920 : continue;
303 : }
304 :
305 : /*
306 : * If we aren't allowing new workers, then immediately mark it for
307 : * termination; the next stanza will take care of cleaning it up.
308 : * Doing this ensures that any process waiting for the worker will get
309 : * awoken, even though the worker will never be allowed to run.
310 : */
311 3272 : if (!allow_new_workers)
312 0 : slot->terminate = true;
313 :
314 : /*
315 : * If the worker is marked for termination, we don't need to add it to
316 : * the registered workers list; we can just free the slot. However, if
317 : * bgw_notify_pid is set, the process that registered the worker may
318 : * need to know that we've processed the terminate request, so be sure
319 : * to signal it.
320 : */
321 3272 : if (slot->terminate)
322 : {
323 : int notify_pid;
324 :
325 : /*
326 : * We need a memory barrier here to make sure that the load of
327 : * bgw_notify_pid and the update of parallel_terminate_count
328 : * complete before the store to in_use.
329 : */
330 0 : notify_pid = slot->worker.bgw_notify_pid;
331 0 : if ((slot->worker.bgw_flags & BGWORKER_CLASS_PARALLEL) != 0)
332 0 : BackgroundWorkerData->parallel_terminate_count++;
333 0 : slot->pid = 0;
334 :
335 0 : pg_memory_barrier();
336 0 : slot->in_use = false;
337 :
338 0 : if (notify_pid != 0)
339 0 : kill(notify_pid, SIGUSR1);
340 :
341 0 : continue;
342 : }
343 :
344 : /*
345 : * Copy the registration data into the registered workers list.
346 : */
347 3272 : rw = malloc(sizeof(RegisteredBgWorker));
348 3272 : if (rw == NULL)
349 : {
350 0 : ereport(LOG,
351 : (errcode(ERRCODE_OUT_OF_MEMORY),
352 : errmsg("out of memory")));
353 0 : return;
354 : }
355 :
356 : /*
357 : * Copy strings in a paranoid way. If shared memory is corrupted, the
358 : * source data might not even be NUL-terminated.
359 : */
360 3272 : ascii_safe_strlcpy(rw->rw_worker.bgw_name,
361 3272 : slot->worker.bgw_name, BGW_MAXLEN);
362 3272 : ascii_safe_strlcpy(rw->rw_worker.bgw_type,
363 3272 : slot->worker.bgw_type, BGW_MAXLEN);
364 3272 : ascii_safe_strlcpy(rw->rw_worker.bgw_library_name,
365 3272 : slot->worker.bgw_library_name, BGW_MAXLEN);
366 3272 : ascii_safe_strlcpy(rw->rw_worker.bgw_function_name,
367 3272 : slot->worker.bgw_function_name, BGW_MAXLEN);
368 :
369 : /*
370 : * Copy various fixed-size fields.
371 : *
372 : * flags, start_time, and restart_time are examined by the postmaster,
373 : * but nothing too bad will happen if they are corrupted. The
374 : * remaining fields will only be examined by the child process. It
375 : * might crash, but we won't.
376 : */
377 3272 : rw->rw_worker.bgw_flags = slot->worker.bgw_flags;
378 3272 : rw->rw_worker.bgw_start_time = slot->worker.bgw_start_time;
379 3272 : rw->rw_worker.bgw_restart_time = slot->worker.bgw_restart_time;
380 3272 : rw->rw_worker.bgw_main_arg = slot->worker.bgw_main_arg;
381 3272 : memcpy(rw->rw_worker.bgw_extra, slot->worker.bgw_extra, BGW_EXTRALEN);
382 :
383 : /*
384 : * Copy the PID to be notified about state changes, but only if the
385 : * postmaster knows about a backend with that PID. It isn't an error
386 : * if the postmaster doesn't know about the PID, because the backend
387 : * that requested the worker could have died (or been killed) just
388 : * after doing so. Nonetheless, at least until we get some experience
389 : * with how this plays out in the wild, log a message at a relative
390 : * high debug level.
391 : */
392 3272 : rw->rw_worker.bgw_notify_pid = slot->worker.bgw_notify_pid;
393 3272 : if (!PostmasterMarkPIDForWorkerNotify(rw->rw_worker.bgw_notify_pid))
394 : {
395 0 : elog(DEBUG1, "worker notification PID %d is not valid",
396 : (int) rw->rw_worker.bgw_notify_pid);
397 0 : rw->rw_worker.bgw_notify_pid = 0;
398 : }
399 :
400 : /* Initialize postmaster bookkeeping. */
401 3272 : rw->rw_backend = NULL;
402 3272 : rw->rw_pid = 0;
403 3272 : rw->rw_child_slot = 0;
404 3272 : rw->rw_crashed_at = 0;
405 3272 : rw->rw_shmem_slot = slotno;
406 3272 : rw->rw_terminate = false;
407 :
408 : /* Log it! */
409 3272 : ereport(DEBUG1,
410 : (errmsg_internal("registering background worker \"%s\"",
411 : rw->rw_worker.bgw_name)));
412 :
413 3272 : slist_push_head(&BackgroundWorkerList, &rw->rw_lnode);
414 : }
415 : }
416 :
417 : /*
418 : * Forget about a background worker that's no longer needed.
419 : *
420 : * The worker must be identified by passing an slist_mutable_iter that
421 : * points to it. This convention allows deletion of workers during
422 : * searches of the worker list, and saves having to search the list again.
423 : *
424 : * Caller is responsible for notifying bgw_notify_pid, if appropriate.
425 : *
426 : * This function must be invoked only in the postmaster.
427 : */
428 : void
429 3266 : ForgetBackgroundWorker(slist_mutable_iter *cur)
430 : {
431 : RegisteredBgWorker *rw;
432 : BackgroundWorkerSlot *slot;
433 :
434 3266 : rw = slist_container(RegisteredBgWorker, rw_lnode, cur->cur);
435 :
436 : Assert(rw->rw_shmem_slot < max_worker_processes);
437 3266 : slot = &BackgroundWorkerData->slot[rw->rw_shmem_slot];
438 : Assert(slot->in_use);
439 :
440 : /*
441 : * We need a memory barrier here to make sure that the update of
442 : * parallel_terminate_count completes before the store to in_use.
443 : */
444 3266 : if ((rw->rw_worker.bgw_flags & BGWORKER_CLASS_PARALLEL) != 0)
445 2606 : BackgroundWorkerData->parallel_terminate_count++;
446 :
447 3266 : pg_memory_barrier();
448 3266 : slot->in_use = false;
449 :
450 3266 : ereport(DEBUG1,
451 : (errmsg_internal("unregistering background worker \"%s\"",
452 : rw->rw_worker.bgw_name)));
453 :
454 3266 : slist_delete_current(cur);
455 3266 : free(rw);
456 3266 : }
457 :
458 : /*
459 : * Report the PID of a newly-launched background worker in shared memory.
460 : *
461 : * This function should only be called from the postmaster.
462 : */
463 : void
464 4316 : ReportBackgroundWorkerPID(RegisteredBgWorker *rw)
465 : {
466 : BackgroundWorkerSlot *slot;
467 :
468 : Assert(rw->rw_shmem_slot < max_worker_processes);
469 4316 : slot = &BackgroundWorkerData->slot[rw->rw_shmem_slot];
470 4316 : slot->pid = rw->rw_pid;
471 :
472 4316 : if (rw->rw_worker.bgw_notify_pid != 0)
473 3272 : kill(rw->rw_worker.bgw_notify_pid, SIGUSR1);
474 4316 : }
475 :
476 : /*
477 : * Report that the PID of a background worker is now zero because a
478 : * previously-running background worker has exited.
479 : *
480 : * This function should only be called from the postmaster.
481 : */
482 : void
483 3892 : ReportBackgroundWorkerExit(slist_mutable_iter *cur)
484 : {
485 : RegisteredBgWorker *rw;
486 : BackgroundWorkerSlot *slot;
487 : int notify_pid;
488 :
489 3892 : rw = slist_container(RegisteredBgWorker, rw_lnode, cur->cur);
490 :
491 : Assert(rw->rw_shmem_slot < max_worker_processes);
492 3892 : slot = &BackgroundWorkerData->slot[rw->rw_shmem_slot];
493 3892 : slot->pid = rw->rw_pid;
494 3892 : notify_pid = rw->rw_worker.bgw_notify_pid;
495 :
496 : /*
497 : * If this worker is slated for deregistration, do that before notifying
498 : * the process which started it. Otherwise, if that process tries to
499 : * reuse the slot immediately, it might not be available yet. In theory
500 : * that could happen anyway if the process checks slot->pid at just the
501 : * wrong moment, but this makes the window narrower.
502 : */
503 3892 : if (rw->rw_terminate ||
504 892 : rw->rw_worker.bgw_restart_time == BGW_NEVER_RESTART)
505 3266 : ForgetBackgroundWorker(cur);
506 :
507 3892 : if (notify_pid != 0)
508 3222 : kill(notify_pid, SIGUSR1);
509 3892 : }
510 :
511 : /*
512 : * Cancel SIGUSR1 notifications for a PID belonging to an exiting backend.
513 : *
514 : * This function should only be called from the postmaster.
515 : */
516 : void
517 368 : BackgroundWorkerStopNotifications(pid_t pid)
518 : {
519 : slist_iter siter;
520 :
521 1082 : slist_foreach(siter, &BackgroundWorkerList)
522 : {
523 : RegisteredBgWorker *rw;
524 :
525 714 : rw = slist_container(RegisteredBgWorker, rw_lnode, siter.cur);
526 714 : if (rw->rw_worker.bgw_notify_pid == pid)
527 36 : rw->rw_worker.bgw_notify_pid = 0;
528 : }
529 368 : }
530 :
531 : /*
532 : * Cancel any not-yet-started worker requests that have waiting processes.
533 : *
534 : * This is called during a normal ("smart" or "fast") database shutdown.
535 : * After this point, no new background workers will be started, so anything
536 : * that might be waiting for them needs to be kicked off its wait. We do
537 : * that by canceling the bgworker registration entirely, which is perhaps
538 : * overkill, but since we're shutting down it does not matter whether the
539 : * registration record sticks around.
540 : *
541 : * This function should only be called from the postmaster.
542 : */
543 : void
544 688 : ForgetUnstartedBackgroundWorkers(void)
545 : {
546 : slist_mutable_iter iter;
547 :
548 1434 : slist_foreach_modify(iter, &BackgroundWorkerList)
549 : {
550 : RegisteredBgWorker *rw;
551 : BackgroundWorkerSlot *slot;
552 :
553 746 : rw = slist_container(RegisteredBgWorker, rw_lnode, iter.cur);
554 : Assert(rw->rw_shmem_slot < max_worker_processes);
555 746 : slot = &BackgroundWorkerData->slot[rw->rw_shmem_slot];
556 :
557 : /* If it's not yet started, and there's someone waiting ... */
558 746 : if (slot->pid == InvalidPid &&
559 62 : rw->rw_worker.bgw_notify_pid != 0)
560 : {
561 : /* ... then zap it, and notify the waiter */
562 0 : int notify_pid = rw->rw_worker.bgw_notify_pid;
563 :
564 0 : ForgetBackgroundWorker(&iter);
565 0 : if (notify_pid != 0)
566 0 : kill(notify_pid, SIGUSR1);
567 : }
568 : }
569 688 : }
570 :
571 : /*
572 : * Reset background worker crash state.
573 : *
574 : * We assume that, after a crash-and-restart cycle, background workers without
575 : * the never-restart flag should be restarted immediately, instead of waiting
576 : * for bgw_restart_time to elapse. On the other hand, workers with that flag
577 : * should be forgotten immediately, since we won't ever restart them.
578 : *
579 : * This function should only be called from the postmaster.
580 : */
581 : void
582 8 : ResetBackgroundWorkerCrashTimes(void)
583 : {
584 : slist_mutable_iter iter;
585 :
586 16 : slist_foreach_modify(iter, &BackgroundWorkerList)
587 : {
588 : RegisteredBgWorker *rw;
589 :
590 8 : rw = slist_container(RegisteredBgWorker, rw_lnode, iter.cur);
591 :
592 8 : if (rw->rw_worker.bgw_restart_time == BGW_NEVER_RESTART)
593 : {
594 : /*
595 : * Workers marked BGW_NEVER_RESTART shouldn't get relaunched after
596 : * the crash, so forget about them. (If we wait until after the
597 : * crash to forget about them, and they are parallel workers,
598 : * parallel_terminate_count will get incremented after we've
599 : * already zeroed parallel_register_count, which would be bad.)
600 : */
601 0 : ForgetBackgroundWorker(&iter);
602 : }
603 : else
604 : {
605 : /*
606 : * The accounting which we do via parallel_register_count and
607 : * parallel_terminate_count would get messed up if a worker marked
608 : * parallel could survive a crash and restart cycle. All such
609 : * workers should be marked BGW_NEVER_RESTART, and thus control
610 : * should never reach this branch.
611 : */
612 : Assert((rw->rw_worker.bgw_flags & BGWORKER_CLASS_PARALLEL) == 0);
613 :
614 : /*
615 : * Allow this worker to be restarted immediately after we finish
616 : * resetting.
617 : */
618 8 : rw->rw_crashed_at = 0;
619 :
620 : /*
621 : * If there was anyone waiting for it, they're history.
622 : */
623 8 : rw->rw_worker.bgw_notify_pid = 0;
624 : }
625 : }
626 8 : }
627 :
628 : #ifdef EXEC_BACKEND
629 : /*
630 : * In EXEC_BACKEND mode, workers use this to retrieve their details from
631 : * shared memory.
632 : */
633 : BackgroundWorker *
634 : BackgroundWorkerEntry(int slotno)
635 : {
636 : static BackgroundWorker myEntry;
637 : BackgroundWorkerSlot *slot;
638 :
639 : Assert(slotno < BackgroundWorkerData->total_slots);
640 : slot = &BackgroundWorkerData->slot[slotno];
641 : Assert(slot->in_use);
642 :
643 : /* must copy this in case we don't intend to retain shmem access */
644 : memcpy(&myEntry, &slot->worker, sizeof myEntry);
645 : return &myEntry;
646 : }
647 : #endif
648 :
649 : /*
650 : * Complain about the BackgroundWorker definition using error level elevel.
651 : * Return true if it looks ok, false if not (unless elevel >= ERROR, in
652 : * which case we won't return at all in the not-OK case).
653 : */
654 : static bool
655 4396 : SanityCheckBackgroundWorker(BackgroundWorker *worker, int elevel)
656 : {
657 : /* sanity check for flags */
658 :
659 : /*
660 : * We used to support workers not connected to shared memory, but don't
661 : * anymore. Thus this is a required flag now. We're not removing the flag
662 : * for compatibility reasons and because the flag still provides some
663 : * signal when reading code.
664 : */
665 4396 : if (!(worker->bgw_flags & BGWORKER_SHMEM_ACCESS))
666 : {
667 0 : ereport(elevel,
668 : (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
669 : errmsg("background worker \"%s\": background workers without shared memory access are not supported",
670 : worker->bgw_name)));
671 0 : return false;
672 : }
673 :
674 4396 : if (worker->bgw_flags & BGWORKER_BACKEND_DATABASE_CONNECTION)
675 : {
676 4378 : if (worker->bgw_start_time == BgWorkerStart_PostmasterStart)
677 : {
678 0 : ereport(elevel,
679 : (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
680 : errmsg("background worker \"%s\": cannot request database access if starting at postmaster start",
681 : worker->bgw_name)));
682 0 : return false;
683 : }
684 :
685 : /* XXX other checks? */
686 : }
687 :
688 4396 : if ((worker->bgw_restart_time < 0 &&
689 3196 : worker->bgw_restart_time != BGW_NEVER_RESTART) ||
690 4396 : (worker->bgw_restart_time > USECS_PER_DAY / 1000))
691 : {
692 0 : ereport(elevel,
693 : (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
694 : errmsg("background worker \"%s\": invalid restart interval",
695 : worker->bgw_name)));
696 0 : return false;
697 : }
698 :
699 : /*
700 : * Parallel workers may not be configured for restart, because the
701 : * parallel_register_count/parallel_terminate_count accounting can't
702 : * handle parallel workers lasting through a crash-and-restart cycle.
703 : */
704 4396 : if (worker->bgw_restart_time != BGW_NEVER_RESTART &&
705 1200 : (worker->bgw_flags & BGWORKER_CLASS_PARALLEL) != 0)
706 : {
707 0 : ereport(elevel,
708 : (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
709 : errmsg("background worker \"%s\": parallel workers may not be configured for restart",
710 : worker->bgw_name)));
711 0 : return false;
712 : }
713 :
714 : /*
715 : * If bgw_type is not filled in, use bgw_name.
716 : */
717 4396 : if (strcmp(worker->bgw_type, "") == 0)
718 0 : strcpy(worker->bgw_type, worker->bgw_name);
719 :
720 4396 : return true;
721 : }
722 :
723 : /*
724 : * Standard SIGTERM handler for background workers
725 : */
726 : static void
727 0 : bgworker_die(SIGNAL_ARGS)
728 : {
729 0 : sigprocmask(SIG_SETMASK, &BlockSig, NULL);
730 :
731 0 : ereport(FATAL,
732 : (errcode(ERRCODE_ADMIN_SHUTDOWN),
733 : errmsg("terminating background worker \"%s\" due to administrator command",
734 : MyBgworkerEntry->bgw_type)));
735 : }
736 :
737 : /*
738 : * Start a new background worker
739 : *
740 : * This is the main entry point for background worker, to be called from
741 : * postmaster.
742 : */
743 : void
744 3898 : StartBackgroundWorker(void)
745 : {
746 : sigjmp_buf local_sigjmp_buf;
747 3898 : BackgroundWorker *worker = MyBgworkerEntry;
748 : bgworker_main_type entrypt;
749 :
750 3898 : if (worker == NULL)
751 0 : elog(FATAL, "unable to find bgworker entry");
752 :
753 3898 : IsBackgroundWorker = true;
754 :
755 3898 : MyBackendType = B_BG_WORKER;
756 3898 : init_ps_display(worker->bgw_name);
757 :
758 3898 : SetProcessingMode(InitProcessing);
759 :
760 : /* Apply PostAuthDelay */
761 3898 : if (PostAuthDelay > 0)
762 0 : pg_usleep(PostAuthDelay * 1000000L);
763 :
764 : /*
765 : * Set up signal handlers.
766 : */
767 3898 : if (worker->bgw_flags & BGWORKER_BACKEND_DATABASE_CONNECTION)
768 : {
769 : /*
770 : * SIGINT is used to signal canceling the current action
771 : */
772 3880 : pqsignal(SIGINT, StatementCancelHandler);
773 3880 : pqsignal(SIGUSR1, procsignal_sigusr1_handler);
774 3880 : pqsignal(SIGFPE, FloatExceptionHandler);
775 :
776 : /* XXX Any other handlers needed here? */
777 : }
778 : else
779 : {
780 18 : pqsignal(SIGINT, SIG_IGN);
781 18 : pqsignal(SIGUSR1, SIG_IGN);
782 18 : pqsignal(SIGFPE, SIG_IGN);
783 : }
784 3898 : pqsignal(SIGTERM, bgworker_die);
785 : /* SIGQUIT handler was already set up by InitPostmasterChild */
786 3898 : pqsignal(SIGHUP, SIG_IGN);
787 :
788 3898 : InitializeTimeouts(); /* establishes SIGALRM handler */
789 :
790 3898 : pqsignal(SIGPIPE, SIG_IGN);
791 3898 : pqsignal(SIGUSR2, SIG_IGN);
792 3898 : pqsignal(SIGCHLD, SIG_DFL);
793 :
794 : /*
795 : * If an exception is encountered, processing resumes here.
796 : *
797 : * We just need to clean up, report the error, and go away.
798 : */
799 3898 : if (sigsetjmp(local_sigjmp_buf, 1) != 0)
800 : {
801 : /* Since not using PG_TRY, must reset error stack by hand */
802 128 : error_context_stack = NULL;
803 :
804 : /* Prevent interrupts while cleaning up */
805 128 : HOLD_INTERRUPTS();
806 :
807 : /*
808 : * sigsetjmp will have blocked all signals, but we may need to accept
809 : * signals while communicating with our parallel leader. Once we've
810 : * done HOLD_INTERRUPTS() it should be safe to unblock signals.
811 : */
812 128 : BackgroundWorkerUnblockSignals();
813 :
814 : /* Report the error to the parallel leader and the server log */
815 128 : EmitErrorReport();
816 :
817 : /*
818 : * Do we need more cleanup here? For shmem-connected bgworkers, we
819 : * will call InitProcess below, which will install ProcKill as exit
820 : * callback. That will take care of releasing locks, etc.
821 : */
822 :
823 : /* and go away */
824 128 : proc_exit(1);
825 : }
826 :
827 : /* We can now handle ereport(ERROR) */
828 3898 : PG_exception_stack = &local_sigjmp_buf;
829 :
830 : /*
831 : * Create a per-backend PGPROC struct in shared memory, except in the
832 : * EXEC_BACKEND case where this was done in SubPostmasterMain. We must do
833 : * this before we can use LWLocks (and in the EXEC_BACKEND case we already
834 : * had to do some stuff with LWLocks).
835 : */
836 : #ifndef EXEC_BACKEND
837 3898 : InitProcess();
838 : #endif
839 :
840 : /*
841 : * Early initialization.
842 : */
843 3898 : BaseInit();
844 :
845 : /*
846 : * Look up the entry point function, loading its library if necessary.
847 : */
848 7796 : entrypt = LookupBackgroundWorkerFunction(worker->bgw_library_name,
849 3898 : worker->bgw_function_name);
850 :
851 : /*
852 : * Note that in normal processes, we would call InitPostgres here. For a
853 : * worker, however, we don't know what database to connect to, yet; so we
854 : * need to wait until the user code does it via
855 : * BackgroundWorkerInitializeConnection().
856 : */
857 :
858 : /*
859 : * Now invoke the user-defined worker code
860 : */
861 3898 : entrypt(worker->bgw_main_arg);
862 :
863 : /* ... and if it returns, we're done */
864 2606 : proc_exit(0);
865 : }
866 :
867 : /*
868 : * Register a new static background worker.
869 : *
870 : * This can only be called directly from postmaster or in the _PG_init
871 : * function of a module library that's loaded by shared_preload_libraries;
872 : * otherwise it will have no effect.
873 : */
874 : void
875 1204 : RegisterBackgroundWorker(BackgroundWorker *worker)
876 : {
877 : RegisteredBgWorker *rw;
878 : static int numworkers = 0;
879 :
880 1204 : if (!IsUnderPostmaster)
881 1204 : ereport(DEBUG1,
882 : (errmsg_internal("registering background worker \"%s\"", worker->bgw_name)));
883 :
884 1204 : if (!process_shared_preload_libraries_in_progress &&
885 1196 : strcmp(worker->bgw_library_name, "postgres") != 0)
886 : {
887 0 : if (!IsUnderPostmaster)
888 0 : ereport(LOG,
889 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
890 : errmsg("background worker \"%s\": must be registered in shared_preload_libraries",
891 : worker->bgw_name)));
892 0 : return;
893 : }
894 :
895 1204 : if (!SanityCheckBackgroundWorker(worker, LOG))
896 0 : return;
897 :
898 1204 : if (worker->bgw_notify_pid != 0)
899 : {
900 0 : ereport(LOG,
901 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
902 : errmsg("background worker \"%s\": only dynamic background workers can request notification",
903 : worker->bgw_name)));
904 0 : return;
905 : }
906 :
907 : /*
908 : * Enforce maximum number of workers. Note this is overly restrictive: we
909 : * could allow more non-shmem-connected workers, because these don't count
910 : * towards the MAX_BACKENDS limit elsewhere. For now, it doesn't seem
911 : * important to relax this restriction.
912 : */
913 1204 : if (++numworkers > max_worker_processes)
914 : {
915 0 : ereport(LOG,
916 : (errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
917 : errmsg("too many background workers"),
918 : errdetail_plural("Up to %d background worker can be registered with the current settings.",
919 : "Up to %d background workers can be registered with the current settings.",
920 : max_worker_processes,
921 : max_worker_processes),
922 : errhint("Consider increasing the configuration parameter \"max_worker_processes\".")));
923 0 : return;
924 : }
925 :
926 : /*
927 : * Copy the registration data into the registered workers list.
928 : */
929 1204 : rw = malloc(sizeof(RegisteredBgWorker));
930 1204 : if (rw == NULL)
931 : {
932 0 : ereport(LOG,
933 : (errcode(ERRCODE_OUT_OF_MEMORY),
934 : errmsg("out of memory")));
935 0 : return;
936 : }
937 :
938 1204 : rw->rw_worker = *worker;
939 1204 : rw->rw_backend = NULL;
940 1204 : rw->rw_pid = 0;
941 1204 : rw->rw_child_slot = 0;
942 1204 : rw->rw_crashed_at = 0;
943 1204 : rw->rw_terminate = false;
944 :
945 1204 : slist_push_head(&BackgroundWorkerList, &rw->rw_lnode);
946 : }
947 :
948 : /*
949 : * Register a new background worker from a regular backend.
950 : *
951 : * Returns true on success and false on failure. Failure typically indicates
952 : * that no background worker slots are currently available.
953 : *
954 : * If handle != NULL, we'll set *handle to a pointer that can subsequently
955 : * be used as an argument to GetBackgroundWorkerPid(). The caller can
956 : * free this pointer using pfree(), if desired.
957 : */
958 : bool
959 3192 : RegisterDynamicBackgroundWorker(BackgroundWorker *worker,
960 : BackgroundWorkerHandle **handle)
961 : {
962 : int slotno;
963 3192 : bool success = false;
964 : bool parallel;
965 3192 : uint64 generation = 0;
966 :
967 : /*
968 : * We can't register dynamic background workers from the postmaster. If
969 : * this is a standalone backend, we're the only process and can't start
970 : * any more. In a multi-process environment, it might be theoretically
971 : * possible, but we don't currently support it due to locking
972 : * considerations; see comments on the BackgroundWorkerSlot data
973 : * structure.
974 : */
975 3192 : if (!IsUnderPostmaster)
976 0 : return false;
977 :
978 3192 : if (!SanityCheckBackgroundWorker(worker, ERROR))
979 0 : return false;
980 :
981 3192 : parallel = (worker->bgw_flags & BGWORKER_CLASS_PARALLEL) != 0;
982 :
983 3192 : LWLockAcquire(BackgroundWorkerLock, LW_EXCLUSIVE);
984 :
985 : /*
986 : * If this is a parallel worker, check whether there are already too many
987 : * parallel workers; if so, don't register another one. Our view of
988 : * parallel_terminate_count may be slightly stale, but that doesn't really
989 : * matter: we would have gotten the same result if we'd arrived here
990 : * slightly earlier anyway. There's no help for it, either, since the
991 : * postmaster must not take locks; a memory barrier wouldn't guarantee
992 : * anything useful.
993 : */
994 3192 : if (parallel && (BackgroundWorkerData->parallel_register_count -
995 2626 : BackgroundWorkerData->parallel_terminate_count) >=
996 : max_parallel_workers)
997 : {
998 : Assert(BackgroundWorkerData->parallel_register_count -
999 : BackgroundWorkerData->parallel_terminate_count <=
1000 : MAX_PARALLEL_WORKER_LIMIT);
1001 18 : LWLockRelease(BackgroundWorkerLock);
1002 18 : return false;
1003 : }
1004 :
1005 : /*
1006 : * Look for an unused slot. If we find one, grab it.
1007 : */
1008 9812 : for (slotno = 0; slotno < BackgroundWorkerData->total_slots; ++slotno)
1009 : {
1010 9810 : BackgroundWorkerSlot *slot = &BackgroundWorkerData->slot[slotno];
1011 :
1012 9810 : if (!slot->in_use)
1013 : {
1014 3172 : memcpy(&slot->worker, worker, sizeof(BackgroundWorker));
1015 3172 : slot->pid = InvalidPid; /* indicates not started yet */
1016 3172 : slot->generation++;
1017 3172 : slot->terminate = false;
1018 3172 : generation = slot->generation;
1019 3172 : if (parallel)
1020 2606 : BackgroundWorkerData->parallel_register_count++;
1021 :
1022 : /*
1023 : * Make sure postmaster doesn't see the slot as in use before it
1024 : * sees the new contents.
1025 : */
1026 3172 : pg_write_barrier();
1027 :
1028 3172 : slot->in_use = true;
1029 3172 : success = true;
1030 3172 : break;
1031 : }
1032 : }
1033 :
1034 3174 : LWLockRelease(BackgroundWorkerLock);
1035 :
1036 : /* If we found a slot, tell the postmaster to notice the change. */
1037 3174 : if (success)
1038 3172 : SendPostmasterSignal(PMSIGNAL_BACKGROUND_WORKER_CHANGE);
1039 :
1040 : /*
1041 : * If we found a slot and the user has provided a handle, initialize it.
1042 : */
1043 3174 : if (success && handle)
1044 : {
1045 3172 : *handle = palloc(sizeof(BackgroundWorkerHandle));
1046 3172 : (*handle)->slot = slotno;
1047 3172 : (*handle)->generation = generation;
1048 : }
1049 :
1050 3174 : return success;
1051 : }
1052 :
1053 : /*
1054 : * Get the PID of a dynamically-registered background worker.
1055 : *
1056 : * If the worker is determined to be running, the return value will be
1057 : * BGWH_STARTED and *pidp will get the PID of the worker process. If the
1058 : * postmaster has not yet attempted to start the worker, the return value will
1059 : * be BGWH_NOT_YET_STARTED. Otherwise, the return value is BGWH_STOPPED.
1060 : *
1061 : * BGWH_STOPPED can indicate either that the worker is temporarily stopped
1062 : * (because it is configured for automatic restart and exited non-zero),
1063 : * or that the worker is permanently stopped (because it exited with exit
1064 : * code 0, or was not configured for automatic restart), or even that the
1065 : * worker was unregistered without ever starting (either because startup
1066 : * failed and the worker is not configured for automatic restart, or because
1067 : * TerminateBackgroundWorker was used before the worker was successfully
1068 : * started).
1069 : */
1070 : BgwHandleStatus
1071 17211882 : GetBackgroundWorkerPid(BackgroundWorkerHandle *handle, pid_t *pidp)
1072 : {
1073 : BackgroundWorkerSlot *slot;
1074 : pid_t pid;
1075 :
1076 : Assert(handle->slot < max_worker_processes);
1077 17211882 : slot = &BackgroundWorkerData->slot[handle->slot];
1078 :
1079 : /*
1080 : * We could probably arrange to synchronize access to data using memory
1081 : * barriers only, but for now, let's just keep it simple and grab the
1082 : * lock. It seems unlikely that there will be enough traffic here to
1083 : * result in meaningful contention.
1084 : */
1085 17211882 : LWLockAcquire(BackgroundWorkerLock, LW_SHARED);
1086 :
1087 : /*
1088 : * The generation number can't be concurrently changed while we hold the
1089 : * lock. The pid, which is updated by the postmaster, can change at any
1090 : * time, but we assume such changes are atomic. So the value we read
1091 : * won't be garbage, but it might be out of date by the time the caller
1092 : * examines it (but that's unavoidable anyway).
1093 : *
1094 : * The in_use flag could be in the process of changing from true to false,
1095 : * but if it is already false then it can't change further.
1096 : */
1097 17211882 : if (handle->generation != slot->generation || !slot->in_use)
1098 2608 : pid = 0;
1099 : else
1100 17209274 : pid = slot->pid;
1101 :
1102 : /* All done. */
1103 17211882 : LWLockRelease(BackgroundWorkerLock);
1104 :
1105 17211882 : if (pid == 0)
1106 2608 : return BGWH_STOPPED;
1107 17209274 : else if (pid == InvalidPid)
1108 5122492 : return BGWH_NOT_YET_STARTED;
1109 12086782 : *pidp = pid;
1110 12086782 : return BGWH_STARTED;
1111 : }
1112 :
1113 : /*
1114 : * Wait for a background worker to start up.
1115 : *
1116 : * This is like GetBackgroundWorkerPid(), except that if the worker has not
1117 : * yet started, we wait for it to do so; thus, BGWH_NOT_YET_STARTED is never
1118 : * returned. However, if the postmaster has died, we give up and return
1119 : * BGWH_POSTMASTER_DIED, since it that case we know that startup will not
1120 : * take place.
1121 : *
1122 : * The caller *must* have set our PID as the worker's bgw_notify_pid,
1123 : * else we will not be awoken promptly when the worker's state changes.
1124 : */
1125 : BgwHandleStatus
1126 4 : WaitForBackgroundWorkerStartup(BackgroundWorkerHandle *handle, pid_t *pidp)
1127 : {
1128 : BgwHandleStatus status;
1129 : int rc;
1130 :
1131 : for (;;)
1132 2 : {
1133 : pid_t pid;
1134 :
1135 4 : CHECK_FOR_INTERRUPTS();
1136 :
1137 4 : status = GetBackgroundWorkerPid(handle, &pid);
1138 4 : if (status == BGWH_STARTED)
1139 2 : *pidp = pid;
1140 4 : if (status != BGWH_NOT_YET_STARTED)
1141 2 : break;
1142 :
1143 2 : rc = WaitLatch(MyLatch,
1144 : WL_LATCH_SET | WL_POSTMASTER_DEATH, 0,
1145 : WAIT_EVENT_BGWORKER_STARTUP);
1146 :
1147 2 : if (rc & WL_POSTMASTER_DEATH)
1148 : {
1149 0 : status = BGWH_POSTMASTER_DIED;
1150 0 : break;
1151 : }
1152 :
1153 2 : ResetLatch(MyLatch);
1154 : }
1155 :
1156 2 : return status;
1157 : }
1158 :
1159 : /*
1160 : * Wait for a background worker to stop.
1161 : *
1162 : * If the worker hasn't yet started, or is running, we wait for it to stop
1163 : * and then return BGWH_STOPPED. However, if the postmaster has died, we give
1164 : * up and return BGWH_POSTMASTER_DIED, because it's the postmaster that
1165 : * notifies us when a worker's state changes.
1166 : *
1167 : * The caller *must* have set our PID as the worker's bgw_notify_pid,
1168 : * else we will not be awoken promptly when the worker's state changes.
1169 : */
1170 : BgwHandleStatus
1171 5050 : WaitForBackgroundWorkerShutdown(BackgroundWorkerHandle *handle)
1172 : {
1173 : BgwHandleStatus status;
1174 : int rc;
1175 :
1176 : for (;;)
1177 2442 : {
1178 : pid_t pid;
1179 :
1180 5050 : CHECK_FOR_INTERRUPTS();
1181 :
1182 5050 : status = GetBackgroundWorkerPid(handle, &pid);
1183 5050 : if (status == BGWH_STOPPED)
1184 2608 : break;
1185 :
1186 2442 : rc = WaitLatch(MyLatch,
1187 : WL_LATCH_SET | WL_POSTMASTER_DEATH, 0,
1188 : WAIT_EVENT_BGWORKER_SHUTDOWN);
1189 :
1190 2442 : if (rc & WL_POSTMASTER_DEATH)
1191 : {
1192 0 : status = BGWH_POSTMASTER_DIED;
1193 0 : break;
1194 : }
1195 :
1196 2442 : ResetLatch(MyLatch);
1197 : }
1198 :
1199 2608 : return status;
1200 : }
1201 :
1202 : /*
1203 : * Instruct the postmaster to terminate a background worker.
1204 : *
1205 : * Note that it's safe to do this without regard to whether the worker is
1206 : * still running, or even if the worker may already have exited and been
1207 : * unregistered.
1208 : */
1209 : void
1210 6 : TerminateBackgroundWorker(BackgroundWorkerHandle *handle)
1211 : {
1212 : BackgroundWorkerSlot *slot;
1213 6 : bool signal_postmaster = false;
1214 :
1215 : Assert(handle->slot < max_worker_processes);
1216 6 : slot = &BackgroundWorkerData->slot[handle->slot];
1217 :
1218 : /* Set terminate flag in shared memory, unless slot has been reused. */
1219 6 : LWLockAcquire(BackgroundWorkerLock, LW_EXCLUSIVE);
1220 6 : if (handle->generation == slot->generation)
1221 : {
1222 6 : slot->terminate = true;
1223 6 : signal_postmaster = true;
1224 : }
1225 6 : LWLockRelease(BackgroundWorkerLock);
1226 :
1227 : /* Make sure the postmaster notices the change to shared memory. */
1228 6 : if (signal_postmaster)
1229 6 : SendPostmasterSignal(PMSIGNAL_BACKGROUND_WORKER_CHANGE);
1230 6 : }
1231 :
1232 : /*
1233 : * Look up (and possibly load) a bgworker entry point function.
1234 : *
1235 : * For functions contained in the core code, we use library name "postgres"
1236 : * and consult the InternalBGWorkers array. External functions are
1237 : * looked up, and loaded if necessary, using load_external_function().
1238 : *
1239 : * The point of this is to pass function names as strings across process
1240 : * boundaries. We can't pass actual function addresses because of the
1241 : * possibility that the function has been loaded at a different address
1242 : * in a different process. This is obviously a hazard for functions in
1243 : * loadable libraries, but it can happen even for functions in the core code
1244 : * on platforms using EXEC_BACKEND (e.g., Windows).
1245 : *
1246 : * At some point it might be worthwhile to get rid of InternalBGWorkers[]
1247 : * in favor of applying load_external_function() for core functions too;
1248 : * but that raises portability issues that are not worth addressing now.
1249 : */
1250 : static bgworker_main_type
1251 3898 : LookupBackgroundWorkerFunction(const char *libraryname, const char *funcname)
1252 : {
1253 : /*
1254 : * If the function is to be loaded from postgres itself, search the
1255 : * InternalBGWorkers array.
1256 : */
1257 3898 : if (strcmp(libraryname, "postgres") == 0)
1258 : {
1259 : int i;
1260 :
1261 5792 : for (i = 0; i < lengthof(InternalBGWorkers); i++)
1262 : {
1263 5792 : if (strcmp(InternalBGWorkers[i].fn_name, funcname) == 0)
1264 3872 : return InternalBGWorkers[i].fn_addr;
1265 : }
1266 :
1267 : /* We can only reach this by programming error. */
1268 0 : elog(ERROR, "internal function \"%s\" not found", funcname);
1269 : }
1270 :
1271 : /* Otherwise load from external library. */
1272 26 : return (bgworker_main_type)
1273 26 : load_external_function(libraryname, funcname, true, NULL);
1274 : }
1275 :
1276 : /*
1277 : * Given a PID, get the bgw_type of the background worker. Returns NULL if
1278 : * not a valid background worker.
1279 : *
1280 : * The return value is in static memory belonging to this function, so it has
1281 : * to be used before calling this function again. This is so that the caller
1282 : * doesn't have to worry about the background worker locking protocol.
1283 : */
1284 : const char *
1285 1148 : GetBackgroundWorkerTypeByPid(pid_t pid)
1286 : {
1287 : int slotno;
1288 1148 : bool found = false;
1289 : static char result[BGW_MAXLEN];
1290 :
1291 1148 : LWLockAcquire(BackgroundWorkerLock, LW_SHARED);
1292 :
1293 1236 : for (slotno = 0; slotno < BackgroundWorkerData->total_slots; slotno++)
1294 : {
1295 1236 : BackgroundWorkerSlot *slot = &BackgroundWorkerData->slot[slotno];
1296 :
1297 1236 : if (slot->pid > 0 && slot->pid == pid)
1298 : {
1299 1148 : strcpy(result, slot->worker.bgw_type);
1300 1148 : found = true;
1301 1148 : break;
1302 : }
1303 : }
1304 :
1305 1148 : LWLockRelease(BackgroundWorkerLock);
1306 :
1307 1148 : if (!found)
1308 0 : return NULL;
1309 :
1310 1148 : return result;
1311 : }
|