LCOV - code coverage report
Current view: top level - src/backend/storage/ipc - waiteventset.c (source / functions) Coverage Total Hit
Test: PostgreSQL 19devel Lines: 88.3 % 247 218
Test Date: 2026-03-24 02:15:55 Functions: 100.0 % 17 17
Legend: Lines:     hit not hit

            Line data    Source code
       1              : /*-------------------------------------------------------------------------
       2              :  *
       3              :  * waiteventset.c
       4              :  *    ppoll()/pselect() like abstraction
       5              :  *
       6              :  * WaitEvents are an abstraction for waiting for one or more events at a time.
       7              :  * The waiting can be done in a race free fashion, similar ppoll() or
       8              :  * pselect() (as opposed to plain poll()/select()).
       9              :  *
      10              :  * You can wait for:
      11              :  * - a latch being set from another process or from signal handler in the same
      12              :  *   process (WL_LATCH_SET)
      13              :  * - data to become readable or writeable on a socket (WL_SOCKET_*)
      14              :  * - postmaster death (WL_POSTMASTER_DEATH or WL_EXIT_ON_PM_DEATH)
      15              :  * - timeout (WL_TIMEOUT)
      16              :  *
      17              :  * Implementation
      18              :  * --------------
      19              :  *
      20              :  * The poll() implementation uses the so-called self-pipe trick to overcome the
      21              :  * race condition involved with poll() and setting a global flag in the signal
      22              :  * handler. When a latch is set and the current process is waiting for it, the
      23              :  * signal handler wakes up the poll() in WaitLatch by writing a byte to a pipe.
      24              :  * A signal by itself doesn't interrupt poll() on all platforms, and even on
      25              :  * platforms where it does, a signal that arrives just before the poll() call
      26              :  * does not prevent poll() from entering sleep. An incoming byte on a pipe
      27              :  * however reliably interrupts the sleep, and causes poll() to return
      28              :  * immediately even if the signal arrives before poll() begins.
      29              :  *
      30              :  * The epoll() implementation overcomes the race with a different technique: it
      31              :  * keeps SIGURG blocked and consumes from a signalfd() descriptor instead.  We
      32              :  * don't need to register a signal handler or create our own self-pipe.  We
      33              :  * assume that any system that has Linux epoll() also has Linux signalfd().
      34              :  *
      35              :  * The kqueue() implementation waits for SIGURG with EVFILT_SIGNAL.
      36              :  *
      37              :  * The Windows implementation uses Windows events that are inherited by all
      38              :  * postmaster child processes. There's no need for the self-pipe trick there.
      39              :  *
      40              :  * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
      41              :  * Portions Copyright (c) 1994, Regents of the University of California
      42              :  *
      43              :  * IDENTIFICATION
      44              :  *    src/backend/storage/ipc/waiteventset.c
      45              :  *
      46              :  *-------------------------------------------------------------------------
      47              :  */
      48              : #include "postgres.h"
      49              : 
      50              : #include <fcntl.h>
      51              : #include <limits.h>
      52              : #include <signal.h>
      53              : #include <unistd.h>
      54              : #ifdef HAVE_SYS_EPOLL_H
      55              : #include <sys/epoll.h>
      56              : #endif
      57              : #ifdef HAVE_SYS_EVENT_H
      58              : #include <sys/event.h>
      59              : #endif
      60              : #ifdef HAVE_SYS_SIGNALFD_H
      61              : #include <sys/signalfd.h>
      62              : #endif
      63              : #ifdef HAVE_POLL_H
      64              : #include <poll.h>
      65              : #endif
      66              : 
      67              : #include "libpq/pqsignal.h"
      68              : #include "miscadmin.h"
      69              : #include "pgstat.h"
      70              : #include "port/atomics.h"
      71              : #include "portability/instr_time.h"
      72              : #include "postmaster/postmaster.h"
      73              : #include "storage/fd.h"
      74              : #include "storage/ipc.h"
      75              : #include "storage/pmsignal.h"
      76              : #include "storage/latch.h"
      77              : #include "storage/waiteventset.h"
      78              : #include "utils/memutils.h"
      79              : #include "utils/resowner.h"
      80              : #include "utils/wait_event.h"
      81              : 
      82              : /*
      83              :  * Select the fd readiness primitive to use. Normally the "most modern"
      84              :  * primitive supported by the OS will be used, but for testing it can be
      85              :  * useful to manually specify the used primitive.  If desired, just add a
      86              :  * define somewhere before this block.
      87              :  */
      88              : #if defined(WAIT_USE_EPOLL) || defined(WAIT_USE_POLL) || \
      89              :     defined(WAIT_USE_KQUEUE) || defined(WAIT_USE_WIN32)
      90              : /* don't overwrite manual choice */
      91              : #elif defined(HAVE_SYS_EPOLL_H)
      92              : #define WAIT_USE_EPOLL
      93              : #elif defined(HAVE_KQUEUE)
      94              : #define WAIT_USE_KQUEUE
      95              : #elif defined(HAVE_POLL)
      96              : #define WAIT_USE_POLL
      97              : #elif WIN32
      98              : #define WAIT_USE_WIN32
      99              : #else
     100              : #error "no wait set implementation available"
     101              : #endif
     102              : 
     103              : /*
     104              :  * By default, we use a self-pipe with poll() and a signalfd with epoll(), if
     105              :  * available.  For testing the choice can also be manually specified.
     106              :  */
     107              : #if defined(WAIT_USE_POLL) || defined(WAIT_USE_EPOLL)
     108              : #if defined(WAIT_USE_SELF_PIPE) || defined(WAIT_USE_SIGNALFD)
     109              : /* don't overwrite manual choice */
     110              : #elif defined(WAIT_USE_EPOLL) && defined(HAVE_SYS_SIGNALFD_H)
     111              : #define WAIT_USE_SIGNALFD
     112              : #else
     113              : #define WAIT_USE_SELF_PIPE
     114              : #endif
     115              : #endif
     116              : 
     117              : /* typedef in waiteventset.h */
     118              : struct WaitEventSet
     119              : {
     120              :     ResourceOwner owner;
     121              : 
     122              :     int         nevents;        /* number of registered events */
     123              :     int         nevents_space;  /* maximum number of events in this set */
     124              : 
     125              :     /*
     126              :      * Array, of nevents_space length, storing the definition of events this
     127              :      * set is waiting for.
     128              :      */
     129              :     WaitEvent  *events;
     130              : 
     131              :     /*
     132              :      * If WL_LATCH_SET is specified in any wait event, latch is a pointer to
     133              :      * said latch, and latch_pos the offset in the ->events array. This is
     134              :      * useful because we check the state of the latch before performing doing
     135              :      * syscalls related to waiting.
     136              :      */
     137              :     Latch      *latch;
     138              :     int         latch_pos;
     139              : 
     140              :     /*
     141              :      * WL_EXIT_ON_PM_DEATH is converted to WL_POSTMASTER_DEATH, but this flag
     142              :      * is set so that we'll exit immediately if postmaster death is detected,
     143              :      * instead of returning.
     144              :      */
     145              :     bool        exit_on_postmaster_death;
     146              : 
     147              : #if defined(WAIT_USE_EPOLL)
     148              :     int         epoll_fd;
     149              :     /* epoll_wait returns events in a user provided arrays, allocate once */
     150              :     struct epoll_event *epoll_ret_events;
     151              : #elif defined(WAIT_USE_KQUEUE)
     152              :     int         kqueue_fd;
     153              :     /* kevent returns events in a user provided arrays, allocate once */
     154              :     struct kevent *kqueue_ret_events;
     155              :     bool        report_postmaster_not_running;
     156              : #elif defined(WAIT_USE_POLL)
     157              :     /* poll expects events to be waited on every poll() call, prepare once */
     158              :     struct pollfd *pollfds;
     159              : #elif defined(WAIT_USE_WIN32)
     160              : 
     161              :     /*
     162              :      * Array of windows events. The first element always contains
     163              :      * pgwin32_signal_event, so the remaining elements are offset by one (i.e.
     164              :      * event->pos + 1).
     165              :      */
     166              :     HANDLE     *handles;
     167              : #endif
     168              : };
     169              : 
     170              : #ifndef WIN32
     171              : /* Are we currently in WaitLatch? The signal handler would like to know. */
     172              : static volatile sig_atomic_t waiting = false;
     173              : #endif
     174              : 
     175              : #ifdef WAIT_USE_SIGNALFD
     176              : /* On Linux, we'll receive SIGURG via a signalfd file descriptor. */
     177              : static int  signal_fd = -1;
     178              : #endif
     179              : 
     180              : #ifdef WAIT_USE_SELF_PIPE
     181              : /* Read and write ends of the self-pipe */
     182              : static int  selfpipe_readfd = -1;
     183              : static int  selfpipe_writefd = -1;
     184              : 
     185              : /* Process owning the self-pipe --- needed for checking purposes */
     186              : static int  selfpipe_owner_pid = 0;
     187              : 
     188              : /* Private function prototypes */
     189              : static void latch_sigurg_handler(SIGNAL_ARGS);
     190              : static void sendSelfPipeByte(void);
     191              : #endif
     192              : 
     193              : #if defined(WAIT_USE_SELF_PIPE) || defined(WAIT_USE_SIGNALFD)
     194              : static void drain(void);
     195              : #endif
     196              : 
     197              : #if defined(WAIT_USE_EPOLL)
     198              : static void WaitEventAdjustEpoll(WaitEventSet *set, WaitEvent *event, int action);
     199              : #elif defined(WAIT_USE_KQUEUE)
     200              : static void WaitEventAdjustKqueue(WaitEventSet *set, WaitEvent *event, int old_events);
     201              : #elif defined(WAIT_USE_POLL)
     202              : static void WaitEventAdjustPoll(WaitEventSet *set, WaitEvent *event);
     203              : #elif defined(WAIT_USE_WIN32)
     204              : static void WaitEventAdjustWin32(WaitEventSet *set, WaitEvent *event);
     205              : #endif
     206              : 
     207              : static inline int WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
     208              :                                         WaitEvent *occurred_events, int nevents);
     209              : 
     210              : /* ResourceOwner support to hold WaitEventSets */
     211              : static void ResOwnerReleaseWaitEventSet(Datum res);
     212              : 
     213              : static const ResourceOwnerDesc wait_event_set_resowner_desc =
     214              : {
     215              :     .name = "WaitEventSet",
     216              :     .release_phase = RESOURCE_RELEASE_AFTER_LOCKS,
     217              :     .release_priority = RELEASE_PRIO_WAITEVENTSETS,
     218              :     .ReleaseResource = ResOwnerReleaseWaitEventSet,
     219              :     .DebugPrint = NULL
     220              : };
     221              : 
     222              : /* Convenience wrappers over ResourceOwnerRemember/Forget */
     223              : static inline void
     224        81079 : ResourceOwnerRememberWaitEventSet(ResourceOwner owner, WaitEventSet *set)
     225              : {
     226        81079 :     ResourceOwnerRemember(owner, PointerGetDatum(set), &wait_event_set_resowner_desc);
     227        81079 : }
     228              : static inline void
     229        81078 : ResourceOwnerForgetWaitEventSet(ResourceOwner owner, WaitEventSet *set)
     230              : {
     231        81078 :     ResourceOwnerForget(owner, PointerGetDatum(set), &wait_event_set_resowner_desc);
     232        81078 : }
     233              : 
     234              : 
     235              : /*
     236              :  * Initialize the process-local wait event infrastructure.
     237              :  *
     238              :  * This must be called once during startup of any process that can wait on
     239              :  * latches, before it issues any InitLatch() or OwnLatch() calls.
     240              :  */
     241              : void
     242        25630 : InitializeWaitEventSupport(void)
     243              : {
     244              : #if defined(WAIT_USE_SELF_PIPE)
     245              :     int         pipefd[2];
     246              : 
     247              :     if (IsUnderPostmaster)
     248              :     {
     249              :         /*
     250              :          * We might have inherited connections to a self-pipe created by the
     251              :          * postmaster.  It's critical that child processes create their own
     252              :          * self-pipes, of course, and we really want them to close the
     253              :          * inherited FDs for safety's sake.
     254              :          */
     255              :         if (selfpipe_owner_pid != 0)
     256              :         {
     257              :             /* Assert we go through here but once in a child process */
     258              :             Assert(selfpipe_owner_pid != MyProcPid);
     259              :             /* Release postmaster's pipe FDs; ignore any error */
     260              :             (void) close(selfpipe_readfd);
     261              :             (void) close(selfpipe_writefd);
     262              :             /* Clean up, just for safety's sake; we'll set these below */
     263              :             selfpipe_readfd = selfpipe_writefd = -1;
     264              :             selfpipe_owner_pid = 0;
     265              :             /* Keep fd.c's accounting straight */
     266              :             ReleaseExternalFD();
     267              :             ReleaseExternalFD();
     268              :         }
     269              :         else
     270              :         {
     271              :             /*
     272              :              * Postmaster didn't create a self-pipe ... or else we're in an
     273              :              * EXEC_BACKEND build, in which case it doesn't matter since the
     274              :              * postmaster's pipe FDs were closed by the action of FD_CLOEXEC.
     275              :              * fd.c won't have state to clean up, either.
     276              :              */
     277              :             Assert(selfpipe_readfd == -1);
     278              :         }
     279              :     }
     280              :     else
     281              :     {
     282              :         /* In postmaster or standalone backend, assert we do this but once */
     283              :         Assert(selfpipe_readfd == -1);
     284              :         Assert(selfpipe_owner_pid == 0);
     285              :     }
     286              : 
     287              :     /*
     288              :      * Set up the self-pipe that allows a signal handler to wake up the
     289              :      * poll()/epoll_wait() in WaitLatch. Make the write-end non-blocking, so
     290              :      * that SetLatch won't block if the event has already been set many times
     291              :      * filling the kernel buffer. Make the read-end non-blocking too, so that
     292              :      * we can easily clear the pipe by reading until EAGAIN or EWOULDBLOCK.
     293              :      * Also, make both FDs close-on-exec, since we surely do not want any
     294              :      * child processes messing with them.
     295              :      */
     296              :     if (pipe(pipefd) < 0)
     297              :         elog(FATAL, "pipe() failed: %m");
     298              :     if (fcntl(pipefd[0], F_SETFL, O_NONBLOCK) == -1)
     299              :         elog(FATAL, "fcntl(F_SETFL) failed on read-end of self-pipe: %m");
     300              :     if (fcntl(pipefd[1], F_SETFL, O_NONBLOCK) == -1)
     301              :         elog(FATAL, "fcntl(F_SETFL) failed on write-end of self-pipe: %m");
     302              :     if (fcntl(pipefd[0], F_SETFD, FD_CLOEXEC) == -1)
     303              :         elog(FATAL, "fcntl(F_SETFD) failed on read-end of self-pipe: %m");
     304              :     if (fcntl(pipefd[1], F_SETFD, FD_CLOEXEC) == -1)
     305              :         elog(FATAL, "fcntl(F_SETFD) failed on write-end of self-pipe: %m");
     306              : 
     307              :     selfpipe_readfd = pipefd[0];
     308              :     selfpipe_writefd = pipefd[1];
     309              :     selfpipe_owner_pid = MyProcPid;
     310              : 
     311              :     /* Tell fd.c about these two long-lived FDs */
     312              :     ReserveExternalFD();
     313              :     ReserveExternalFD();
     314              : 
     315              :     pqsignal(SIGURG, latch_sigurg_handler);
     316              : #endif
     317              : 
     318              : #ifdef WAIT_USE_SIGNALFD
     319              :     sigset_t    signalfd_mask;
     320              : 
     321        25630 :     if (IsUnderPostmaster)
     322              :     {
     323              :         /*
     324              :          * It would probably be safe to re-use the inherited signalfd since
     325              :          * signalfds only see the current process's pending signals, but it
     326              :          * seems less surprising to close it and create our own.
     327              :          */
     328        24408 :         if (signal_fd != -1)
     329              :         {
     330              :             /* Release postmaster's signal FD; ignore any error */
     331        24408 :             (void) close(signal_fd);
     332        24408 :             signal_fd = -1;
     333        24408 :             ReleaseExternalFD();
     334              :         }
     335              :     }
     336              : 
     337              :     /* Block SIGURG, because we'll receive it through a signalfd. */
     338        25630 :     sigaddset(&UnBlockSig, SIGURG);
     339              : 
     340              :     /* Set up the signalfd to receive SIGURG notifications. */
     341        25630 :     sigemptyset(&signalfd_mask);
     342        25630 :     sigaddset(&signalfd_mask, SIGURG);
     343        25630 :     signal_fd = signalfd(-1, &signalfd_mask, SFD_NONBLOCK | SFD_CLOEXEC);
     344        25630 :     if (signal_fd < 0)
     345            0 :         elog(FATAL, "signalfd() failed");
     346        25630 :     ReserveExternalFD();
     347              : #endif
     348              : 
     349              : #ifdef WAIT_USE_KQUEUE
     350              :     /* Ignore SIGURG, because we'll receive it via kqueue. */
     351              :     pqsignal(SIGURG, SIG_IGN);
     352              : #endif
     353        25630 : }
     354              : 
     355              : /*
     356              :  * Create a WaitEventSet with space for nevents different events to wait for.
     357              :  *
     358              :  * These events can then be efficiently waited upon together, using
     359              :  * WaitEventSetWait().
     360              :  *
     361              :  * The WaitEventSet is tracked by the given 'resowner'.  Use NULL for session
     362              :  * lifetime.
     363              :  */
     364              : WaitEventSet *
     365       138573 : CreateWaitEventSet(ResourceOwner resowner, int nevents)
     366              : {
     367              :     WaitEventSet *set;
     368              :     char       *data;
     369       138573 :     Size        sz = 0;
     370              : 
     371              :     /*
     372              :      * Use MAXALIGN size/alignment to guarantee that later uses of memory are
     373              :      * aligned correctly. E.g. epoll_event might need 8 byte alignment on some
     374              :      * platforms, but earlier allocations like WaitEventSet and WaitEvent
     375              :      * might not be sized to guarantee that when purely using sizeof().
     376              :      */
     377       138573 :     sz += MAXALIGN(sizeof(WaitEventSet));
     378       138573 :     sz += MAXALIGN(sizeof(WaitEvent) * nevents);
     379              : 
     380              : #if defined(WAIT_USE_EPOLL)
     381       138573 :     sz += MAXALIGN(sizeof(struct epoll_event) * nevents);
     382              : #elif defined(WAIT_USE_KQUEUE)
     383              :     sz += MAXALIGN(sizeof(struct kevent) * nevents);
     384              : #elif defined(WAIT_USE_POLL)
     385              :     sz += MAXALIGN(sizeof(struct pollfd) * nevents);
     386              : #elif defined(WAIT_USE_WIN32)
     387              :     /* need space for the pgwin32_signal_event */
     388              :     sz += MAXALIGN(sizeof(HANDLE) * (nevents + 1));
     389              : #endif
     390              : 
     391       138573 :     if (resowner != NULL)
     392        81079 :         ResourceOwnerEnlarge(resowner);
     393              : 
     394       138573 :     data = (char *) MemoryContextAllocZero(TopMemoryContext, sz);
     395              : 
     396       138573 :     set = (WaitEventSet *) data;
     397       138573 :     data += MAXALIGN(sizeof(WaitEventSet));
     398              : 
     399       138573 :     set->events = (WaitEvent *) data;
     400       138573 :     data += MAXALIGN(sizeof(WaitEvent) * nevents);
     401              : 
     402              : #if defined(WAIT_USE_EPOLL)
     403       138573 :     set->epoll_ret_events = (struct epoll_event *) data;
     404       138573 :     data += MAXALIGN(sizeof(struct epoll_event) * nevents);
     405              : #elif defined(WAIT_USE_KQUEUE)
     406              :     set->kqueue_ret_events = (struct kevent *) data;
     407              :     data += MAXALIGN(sizeof(struct kevent) * nevents);
     408              : #elif defined(WAIT_USE_POLL)
     409              :     set->pollfds = (struct pollfd *) data;
     410              :     data += MAXALIGN(sizeof(struct pollfd) * nevents);
     411              : #elif defined(WAIT_USE_WIN32)
     412              :     set->handles = (HANDLE) data;
     413              :     data += MAXALIGN(sizeof(HANDLE) * nevents);
     414              : #endif
     415              : 
     416       138573 :     set->latch = NULL;
     417       138573 :     set->nevents_space = nevents;
     418       138573 :     set->exit_on_postmaster_death = false;
     419              : 
     420       138573 :     if (resowner != NULL)
     421              :     {
     422        81079 :         ResourceOwnerRememberWaitEventSet(resowner, set);
     423        81079 :         set->owner = resowner;
     424              :     }
     425              : 
     426              : #if defined(WAIT_USE_EPOLL)
     427       138573 :     if (!AcquireExternalFD())
     428            0 :         elog(ERROR, "AcquireExternalFD, for epoll_create1, failed: %m");
     429       138573 :     set->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
     430       138573 :     if (set->epoll_fd < 0)
     431              :     {
     432            0 :         ReleaseExternalFD();
     433            0 :         elog(ERROR, "epoll_create1 failed: %m");
     434              :     }
     435              : #elif defined(WAIT_USE_KQUEUE)
     436              :     if (!AcquireExternalFD())
     437              :         elog(ERROR, "AcquireExternalFD, for kqueue, failed: %m");
     438              :     set->kqueue_fd = kqueue();
     439              :     if (set->kqueue_fd < 0)
     440              :     {
     441              :         ReleaseExternalFD();
     442              :         elog(ERROR, "kqueue failed: %m");
     443              :     }
     444              :     if (fcntl(set->kqueue_fd, F_SETFD, FD_CLOEXEC) == -1)
     445              :     {
     446              :         int         save_errno = errno;
     447              : 
     448              :         close(set->kqueue_fd);
     449              :         ReleaseExternalFD();
     450              :         errno = save_errno;
     451              :         elog(ERROR, "fcntl(F_SETFD) failed on kqueue descriptor: %m");
     452              :     }
     453              :     set->report_postmaster_not_running = false;
     454              : #elif defined(WAIT_USE_WIN32)
     455              : 
     456              :     /*
     457              :      * To handle signals while waiting, we need to add a win32 specific event.
     458              :      * We accounted for the additional event at the top of this routine. See
     459              :      * port/win32/signal.c for more details.
     460              :      *
     461              :      * Note: pgwin32_signal_event should be first to ensure that it will be
     462              :      * reported when multiple events are set.  We want to guarantee that
     463              :      * pending signals are serviced.
     464              :      */
     465              :     set->handles[0] = pgwin32_signal_event;
     466              : #endif
     467              : 
     468       138573 :     return set;
     469              : }
     470              : 
     471              : /*
     472              :  * Free a previously created WaitEventSet.
     473              :  *
     474              :  * Note: preferably, this shouldn't have to free any resources that could be
     475              :  * inherited across an exec().  If it did, we'd likely leak those resources in
     476              :  * many scenarios.  For the epoll case, we ensure that by setting EPOLL_CLOEXEC
     477              :  * when the FD is created.  For the Windows case, we assume that the handles
     478              :  * involved are non-inheritable.
     479              :  */
     480              : void
     481        98436 : FreeWaitEventSet(WaitEventSet *set)
     482              : {
     483        98436 :     if (set->owner)
     484              :     {
     485        81078 :         ResourceOwnerForgetWaitEventSet(set->owner, set);
     486        81078 :         set->owner = NULL;
     487              :     }
     488              : 
     489              : #if defined(WAIT_USE_EPOLL)
     490        98436 :     close(set->epoll_fd);
     491        98436 :     ReleaseExternalFD();
     492              : #elif defined(WAIT_USE_KQUEUE)
     493              :     close(set->kqueue_fd);
     494              :     ReleaseExternalFD();
     495              : #elif defined(WAIT_USE_WIN32)
     496              :     for (WaitEvent *cur_event = set->events;
     497              :          cur_event < (set->events + set->nevents);
     498              :          cur_event++)
     499              :     {
     500              :         if (cur_event->events & WL_LATCH_SET)
     501              :         {
     502              :             /* uses the latch's HANDLE */
     503              :         }
     504              :         else if (cur_event->events & WL_POSTMASTER_DEATH)
     505              :         {
     506              :             /* uses PostmasterHandle */
     507              :         }
     508              :         else
     509              :         {
     510              :             /* Clean up the event object we created for the socket */
     511              :             WSAEventSelect(cur_event->fd, NULL, 0);
     512              :             WSACloseEvent(set->handles[cur_event->pos + 1]);
     513              :         }
     514              :     }
     515              : #endif
     516              : 
     517        98436 :     pfree(set);
     518        98436 : }
     519              : 
     520              : /*
     521              :  * Free a previously created WaitEventSet in a child process after a fork().
     522              :  */
     523              : void
     524        20476 : FreeWaitEventSetAfterFork(WaitEventSet *set)
     525              : {
     526              : #if defined(WAIT_USE_EPOLL)
     527        20476 :     close(set->epoll_fd);
     528        20476 :     ReleaseExternalFD();
     529              : #elif defined(WAIT_USE_KQUEUE)
     530              :     /* kqueues are not normally inherited by child processes */
     531              :     ReleaseExternalFD();
     532              : #endif
     533              : 
     534        20476 :     pfree(set);
     535        20476 : }
     536              : 
     537              : /* ---
     538              :  * Add an event to the set. Possible events are:
     539              :  * - WL_LATCH_SET: Wait for the latch to be set
     540              :  * - WL_POSTMASTER_DEATH: Wait for postmaster to die
     541              :  * - WL_SOCKET_READABLE: Wait for socket to become readable,
     542              :  *   can be combined in one event with other WL_SOCKET_* events
     543              :  * - WL_SOCKET_WRITEABLE: Wait for socket to become writeable,
     544              :  *   can be combined with other WL_SOCKET_* events
     545              :  * - WL_SOCKET_CONNECTED: Wait for socket connection to be established,
     546              :  *   can be combined with other WL_SOCKET_* events (on non-Windows
     547              :  *   platforms, this is the same as WL_SOCKET_WRITEABLE)
     548              :  * - WL_SOCKET_ACCEPT: Wait for new connection to a server socket,
     549              :  *   can be combined with other WL_SOCKET_* events (on non-Windows
     550              :  *   platforms, this is the same as WL_SOCKET_READABLE)
     551              :  * - WL_SOCKET_CLOSED: Wait for socket to be closed by remote peer.
     552              :  * - WL_EXIT_ON_PM_DEATH: Exit immediately if the postmaster dies
     553              :  *
     554              :  * Returns the offset in WaitEventSet->events (starting from 0), which can be
     555              :  * used to modify previously added wait events using ModifyWaitEvent().
     556              :  *
     557              :  * In the WL_LATCH_SET case the latch must be owned by the current process,
     558              :  * i.e. it must be a process-local latch initialized with InitLatch, or a
     559              :  * shared latch associated with the current process by calling OwnLatch.
     560              :  *
     561              :  * In the WL_SOCKET_READABLE/WRITEABLE/CONNECTED/ACCEPT cases, EOF and error
     562              :  * conditions cause the socket to be reported as readable/writable/connected,
     563              :  * so that the caller can deal with the condition.
     564              :  *
     565              :  * The user_data pointer specified here will be set for the events returned
     566              :  * by WaitEventSetWait(), allowing to easily associate additional data with
     567              :  * events.
     568              :  */
     569              : int
     570       387621 : AddWaitEventToSet(WaitEventSet *set, uint32 events, pgsocket fd, Latch *latch,
     571              :                   void *user_data)
     572              : {
     573              :     WaitEvent  *event;
     574              : 
     575              :     /* not enough space */
     576              :     Assert(set->nevents < set->nevents_space);
     577              : 
     578       387621 :     if (events == WL_EXIT_ON_PM_DEATH)
     579              :     {
     580       121886 :         events = WL_POSTMASTER_DEATH;
     581       121886 :         set->exit_on_postmaster_death = true;
     582              :     }
     583              : 
     584       387621 :     if (latch)
     585              :     {
     586       138123 :         if (latch->owner_pid != MyProcPid)
     587            0 :             elog(ERROR, "cannot wait on a latch owned by another process");
     588       138123 :         if (set->latch)
     589            0 :             elog(ERROR, "cannot wait on more than one latch");
     590       138123 :         if ((events & WL_LATCH_SET) != WL_LATCH_SET)
     591            0 :             elog(ERROR, "latch events only support being set");
     592              :     }
     593              :     else
     594              :     {
     595       249498 :         if (events & WL_LATCH_SET)
     596            0 :             elog(ERROR, "cannot wait on latch without a specified latch");
     597              :     }
     598              : 
     599              :     /* waiting for socket readiness without a socket indicates a bug */
     600       387621 :     if (fd == PGINVALID_SOCKET && (events & WL_SOCKET_MASK))
     601            0 :         elog(ERROR, "cannot wait on socket event without a socket");
     602              : 
     603       387621 :     event = &set->events[set->nevents];
     604       387621 :     event->pos = set->nevents++;
     605       387621 :     event->fd = fd;
     606       387621 :     event->events = events;
     607       387621 :     event->user_data = user_data;
     608              : #ifdef WIN32
     609              :     event->reset = false;
     610              : #endif
     611              : 
     612       387621 :     if (events == WL_LATCH_SET)
     613              :     {
     614       138123 :         set->latch = latch;
     615       138123 :         set->latch_pos = event->pos;
     616              : #if defined(WAIT_USE_SELF_PIPE)
     617              :         event->fd = selfpipe_readfd;
     618              : #elif defined(WAIT_USE_SIGNALFD)
     619       138123 :         event->fd = signal_fd;
     620              : #else
     621              :         event->fd = PGINVALID_SOCKET;
     622              : #ifdef WAIT_USE_EPOLL
     623              :         return event->pos;
     624              : #endif
     625              : #endif
     626              :     }
     627       249498 :     else if (events == WL_POSTMASTER_DEATH)
     628              :     {
     629              : #ifndef WIN32
     630       136412 :         event->fd = postmaster_alive_fds[POSTMASTER_FD_WATCH];
     631              : #endif
     632              :     }
     633              : 
     634              :     /* perform wait primitive specific initialization, if needed */
     635              : #if defined(WAIT_USE_EPOLL)
     636       387621 :     WaitEventAdjustEpoll(set, event, EPOLL_CTL_ADD);
     637              : #elif defined(WAIT_USE_KQUEUE)
     638              :     WaitEventAdjustKqueue(set, event, 0);
     639              : #elif defined(WAIT_USE_POLL)
     640              :     WaitEventAdjustPoll(set, event);
     641              : #elif defined(WAIT_USE_WIN32)
     642              :     WaitEventAdjustWin32(set, event);
     643              : #endif
     644              : 
     645       387621 :     return event->pos;
     646              : }
     647              : 
     648              : /*
     649              :  * Change the event mask and, in the WL_LATCH_SET case, the latch associated
     650              :  * with the WaitEvent.  The latch may be changed to NULL to disable the latch
     651              :  * temporarily, and then set back to a latch later.
     652              :  *
     653              :  * 'pos' is the id returned by AddWaitEventToSet.
     654              :  */
     655              : void
     656      2762380 : ModifyWaitEvent(WaitEventSet *set, int pos, uint32 events, Latch *latch)
     657              : {
     658              :     WaitEvent  *event;
     659              : #if defined(WAIT_USE_KQUEUE)
     660              :     int         old_events;
     661              : #endif
     662              : 
     663              :     Assert(pos < set->nevents);
     664              : 
     665      2762380 :     event = &set->events[pos];
     666              : #if defined(WAIT_USE_KQUEUE)
     667              :     old_events = event->events;
     668              : #endif
     669              : 
     670              :     /*
     671              :      * Allow switching between WL_POSTMASTER_DEATH and WL_EXIT_ON_PM_DEATH.
     672              :      *
     673              :      * Note that because WL_EXIT_ON_PM_DEATH is mapped to WL_POSTMASTER_DEATH
     674              :      * in AddWaitEventToSet(), this needs to be checked before the fast-path
     675              :      * below that checks if 'events' has changed.
     676              :      */
     677      2762380 :     if (event->events == WL_POSTMASTER_DEATH)
     678              :     {
     679      1134631 :         if (events != WL_POSTMASTER_DEATH && events != WL_EXIT_ON_PM_DEATH)
     680            0 :             elog(ERROR, "cannot remove postmaster death event");
     681      1134631 :         set->exit_on_postmaster_death = ((events & WL_EXIT_ON_PM_DEATH) != 0);
     682      1134631 :         return;
     683              :     }
     684              : 
     685              :     /*
     686              :      * If neither the event mask nor the associated latch changes, return
     687              :      * early. That's an important optimization for some sockets, where
     688              :      * ModifyWaitEvent is frequently used to switch from waiting for reads to
     689              :      * waiting on writes.
     690              :      */
     691      1627749 :     if (events == event->events &&
     692      1601887 :         (!(event->events & WL_LATCH_SET) || set->latch == latch))
     693      1563464 :         return;
     694              : 
     695        64285 :     if (event->events & WL_LATCH_SET && events != event->events)
     696            0 :         elog(ERROR, "cannot modify latch event");
     697              : 
     698              :     /* FIXME: validate event mask */
     699        64285 :     event->events = events;
     700              : 
     701        64285 :     if (events == WL_LATCH_SET)
     702              :     {
     703        38423 :         if (latch && latch->owner_pid != MyProcPid)
     704            0 :             elog(ERROR, "cannot wait on a latch owned by another process");
     705        38423 :         set->latch = latch;
     706              : 
     707              :         /*
     708              :          * On Unix, we don't need to modify the kernel object because the
     709              :          * underlying pipe (if there is one) is the same for all latches so we
     710              :          * can return immediately.  On Windows, we need to update our array of
     711              :          * handles, but we leave the old one in place and tolerate spurious
     712              :          * wakeups if the latch is disabled.
     713              :          */
     714              : #if defined(WAIT_USE_WIN32)
     715              :         if (!latch)
     716              :             return;
     717              : #else
     718        38423 :         return;
     719              : #endif
     720              :     }
     721              : 
     722              : #if defined(WAIT_USE_EPOLL)
     723        25862 :     WaitEventAdjustEpoll(set, event, EPOLL_CTL_MOD);
     724              : #elif defined(WAIT_USE_KQUEUE)
     725              :     WaitEventAdjustKqueue(set, event, old_events);
     726              : #elif defined(WAIT_USE_POLL)
     727              :     WaitEventAdjustPoll(set, event);
     728              : #elif defined(WAIT_USE_WIN32)
     729              :     WaitEventAdjustWin32(set, event);
     730              : #endif
     731              : }
     732              : 
     733              : #if defined(WAIT_USE_EPOLL)
     734              : /*
     735              :  * action can be one of EPOLL_CTL_ADD | EPOLL_CTL_MOD | EPOLL_CTL_DEL
     736              :  */
     737              : static void
     738       413483 : WaitEventAdjustEpoll(WaitEventSet *set, WaitEvent *event, int action)
     739              : {
     740              :     struct epoll_event epoll_ev;
     741              :     int         rc;
     742              : 
     743              :     /* pointer to our event, returned by epoll_wait */
     744       413483 :     epoll_ev.data.ptr = event;
     745              :     /* always wait for errors */
     746       413483 :     epoll_ev.events = EPOLLERR | EPOLLHUP;
     747              : 
     748              :     /* prepare pollfd entry once */
     749       413483 :     if (event->events == WL_LATCH_SET)
     750              :     {
     751              :         Assert(set->latch != NULL);
     752       138123 :         epoll_ev.events |= EPOLLIN;
     753              :     }
     754       275360 :     else if (event->events == WL_POSTMASTER_DEATH)
     755              :     {
     756       136412 :         epoll_ev.events |= EPOLLIN;
     757              :     }
     758              :     else
     759              :     {
     760              :         Assert(event->fd != PGINVALID_SOCKET);
     761              :         Assert(event->events & (WL_SOCKET_READABLE |
     762              :                                 WL_SOCKET_WRITEABLE |
     763              :                                 WL_SOCKET_CLOSED));
     764              : 
     765       138948 :         if (event->events & WL_SOCKET_READABLE)
     766       122331 :             epoll_ev.events |= EPOLLIN;
     767       138948 :         if (event->events & WL_SOCKET_WRITEABLE)
     768        22363 :             epoll_ev.events |= EPOLLOUT;
     769       138948 :         if (event->events & WL_SOCKET_CLOSED)
     770            0 :             epoll_ev.events |= EPOLLRDHUP;
     771              :     }
     772              : 
     773              :     /*
     774              :      * Even though unused, we also pass epoll_ev as the data argument if
     775              :      * EPOLL_CTL_DEL is passed as action.  There used to be an epoll bug
     776              :      * requiring that, and actually it makes the code simpler...
     777              :      */
     778       413483 :     rc = epoll_ctl(set->epoll_fd, action, event->fd, &epoll_ev);
     779              : 
     780       413483 :     if (rc < 0)
     781            0 :         ereport(ERROR,
     782              :                 (errcode_for_socket_access(),
     783              :                  errmsg("%s() failed: %m",
     784              :                         "epoll_ctl")));
     785       413483 : }
     786              : #endif
     787              : 
     788              : #if defined(WAIT_USE_POLL)
     789              : static void
     790              : WaitEventAdjustPoll(WaitEventSet *set, WaitEvent *event)
     791              : {
     792              :     struct pollfd *pollfd = &set->pollfds[event->pos];
     793              : 
     794              :     pollfd->revents = 0;
     795              :     pollfd->fd = event->fd;
     796              : 
     797              :     /* prepare pollfd entry once */
     798              :     if (event->events == WL_LATCH_SET)
     799              :     {
     800              :         Assert(set->latch != NULL);
     801              :         pollfd->events = POLLIN;
     802              :     }
     803              :     else if (event->events == WL_POSTMASTER_DEATH)
     804              :     {
     805              :         pollfd->events = POLLIN;
     806              :     }
     807              :     else
     808              :     {
     809              :         Assert(event->events & (WL_SOCKET_READABLE |
     810              :                                 WL_SOCKET_WRITEABLE |
     811              :                                 WL_SOCKET_CLOSED));
     812              :         pollfd->events = 0;
     813              :         if (event->events & WL_SOCKET_READABLE)
     814              :             pollfd->events |= POLLIN;
     815              :         if (event->events & WL_SOCKET_WRITEABLE)
     816              :             pollfd->events |= POLLOUT;
     817              : #ifdef POLLRDHUP
     818              :         if (event->events & WL_SOCKET_CLOSED)
     819              :             pollfd->events |= POLLRDHUP;
     820              : #endif
     821              :     }
     822              : 
     823              :     Assert(event->fd != PGINVALID_SOCKET);
     824              : }
     825              : #endif
     826              : 
     827              : #if defined(WAIT_USE_KQUEUE)
     828              : 
     829              : /*
     830              :  * On most BSD family systems, the udata member of struct kevent is of type
     831              :  * void *, so we could directly convert to/from WaitEvent *.  Unfortunately,
     832              :  * NetBSD has it as intptr_t, so here we wallpaper over that difference with
     833              :  * an lvalue cast.
     834              :  */
     835              : #define AccessWaitEvent(k_ev) (*((WaitEvent **)(&(k_ev)->udata)))
     836              : 
     837              : static inline void
     838              : WaitEventAdjustKqueueAdd(struct kevent *k_ev, int filter, int action,
     839              :                          WaitEvent *event)
     840              : {
     841              :     k_ev->ident = event->fd;
     842              :     k_ev->filter = filter;
     843              :     k_ev->flags = action;
     844              :     k_ev->fflags = 0;
     845              :     k_ev->data = 0;
     846              :     AccessWaitEvent(k_ev) = event;
     847              : }
     848              : 
     849              : static inline void
     850              : WaitEventAdjustKqueueAddPostmaster(struct kevent *k_ev, WaitEvent *event)
     851              : {
     852              :     /* For now postmaster death can only be added, not removed. */
     853              :     k_ev->ident = PostmasterPid;
     854              :     k_ev->filter = EVFILT_PROC;
     855              :     k_ev->flags = EV_ADD;
     856              :     k_ev->fflags = NOTE_EXIT;
     857              :     k_ev->data = 0;
     858              :     AccessWaitEvent(k_ev) = event;
     859              : }
     860              : 
     861              : static inline void
     862              : WaitEventAdjustKqueueAddLatch(struct kevent *k_ev, WaitEvent *event)
     863              : {
     864              :     /* For now latch can only be added, not removed. */
     865              :     k_ev->ident = SIGURG;
     866              :     k_ev->filter = EVFILT_SIGNAL;
     867              :     k_ev->flags = EV_ADD;
     868              :     k_ev->fflags = 0;
     869              :     k_ev->data = 0;
     870              :     AccessWaitEvent(k_ev) = event;
     871              : }
     872              : 
     873              : /*
     874              :  * old_events is the previous event mask, used to compute what has changed.
     875              :  */
     876              : static void
     877              : WaitEventAdjustKqueue(WaitEventSet *set, WaitEvent *event, int old_events)
     878              : {
     879              :     int         rc;
     880              :     struct kevent k_ev[2];
     881              :     int         count = 0;
     882              :     bool        new_filt_read = false;
     883              :     bool        old_filt_read = false;
     884              :     bool        new_filt_write = false;
     885              :     bool        old_filt_write = false;
     886              : 
     887              :     if (old_events == event->events)
     888              :         return;
     889              : 
     890              :     Assert(event->events != WL_LATCH_SET || set->latch != NULL);
     891              :     Assert(event->events == WL_LATCH_SET ||
     892              :            event->events == WL_POSTMASTER_DEATH ||
     893              :            (event->events & (WL_SOCKET_READABLE |
     894              :                              WL_SOCKET_WRITEABLE |
     895              :                              WL_SOCKET_CLOSED)));
     896              : 
     897              :     if (event->events == WL_POSTMASTER_DEATH)
     898              :     {
     899              :         /*
     900              :          * Unlike all the other implementations, we detect postmaster death
     901              :          * using process notification instead of waiting on the postmaster
     902              :          * alive pipe.
     903              :          */
     904              :         WaitEventAdjustKqueueAddPostmaster(&k_ev[count++], event);
     905              :     }
     906              :     else if (event->events == WL_LATCH_SET)
     907              :     {
     908              :         /* We detect latch wakeup using a signal event. */
     909              :         WaitEventAdjustKqueueAddLatch(&k_ev[count++], event);
     910              :     }
     911              :     else
     912              :     {
     913              :         /*
     914              :          * We need to compute the adds and deletes required to get from the
     915              :          * old event mask to the new event mask, since kevent treats readable
     916              :          * and writable as separate events.
     917              :          */
     918              :         if (old_events & (WL_SOCKET_READABLE | WL_SOCKET_CLOSED))
     919              :             old_filt_read = true;
     920              :         if (event->events & (WL_SOCKET_READABLE | WL_SOCKET_CLOSED))
     921              :             new_filt_read = true;
     922              :         if (old_events & WL_SOCKET_WRITEABLE)
     923              :             old_filt_write = true;
     924              :         if (event->events & WL_SOCKET_WRITEABLE)
     925              :             new_filt_write = true;
     926              :         if (old_filt_read && !new_filt_read)
     927              :             WaitEventAdjustKqueueAdd(&k_ev[count++], EVFILT_READ, EV_DELETE,
     928              :                                      event);
     929              :         else if (!old_filt_read && new_filt_read)
     930              :             WaitEventAdjustKqueueAdd(&k_ev[count++], EVFILT_READ, EV_ADD,
     931              :                                      event);
     932              :         if (old_filt_write && !new_filt_write)
     933              :             WaitEventAdjustKqueueAdd(&k_ev[count++], EVFILT_WRITE, EV_DELETE,
     934              :                                      event);
     935              :         else if (!old_filt_write && new_filt_write)
     936              :             WaitEventAdjustKqueueAdd(&k_ev[count++], EVFILT_WRITE, EV_ADD,
     937              :                                      event);
     938              :     }
     939              : 
     940              :     /* For WL_SOCKET_READ -> WL_SOCKET_CLOSED, no change needed. */
     941              :     if (count == 0)
     942              :         return;
     943              : 
     944              :     Assert(count <= 2);
     945              : 
     946              :     rc = kevent(set->kqueue_fd, &k_ev[0], count, NULL, 0, NULL);
     947              : 
     948              :     /*
     949              :      * When adding the postmaster's pid, we have to consider that it might
     950              :      * already have exited and perhaps even been replaced by another process
     951              :      * with the same pid.  If so, we have to defer reporting this as an event
     952              :      * until the next call to WaitEventSetWaitBlock().
     953              :      */
     954              : 
     955              :     if (rc < 0)
     956              :     {
     957              :         if (event->events == WL_POSTMASTER_DEATH &&
     958              :             (errno == ESRCH || errno == EACCES))
     959              :             set->report_postmaster_not_running = true;
     960              :         else
     961              :             ereport(ERROR,
     962              :                     (errcode_for_socket_access(),
     963              :                      errmsg("%s() failed: %m",
     964              :                             "kevent")));
     965              :     }
     966              :     else if (event->events == WL_POSTMASTER_DEATH &&
     967              :              PostmasterPid != getppid() &&
     968              :              !PostmasterIsAlive())
     969              :     {
     970              :         /*
     971              :          * The extra PostmasterIsAliveInternal() check prevents false alarms
     972              :          * on systems that give a different value for getppid() while being
     973              :          * traced by a debugger.
     974              :          */
     975              :         set->report_postmaster_not_running = true;
     976              :     }
     977              : }
     978              : 
     979              : #endif
     980              : 
     981              : #if defined(WAIT_USE_WIN32)
     982              : StaticAssertDecl(WSA_INVALID_EVENT == NULL, "");
     983              : 
     984              : static void
     985              : WaitEventAdjustWin32(WaitEventSet *set, WaitEvent *event)
     986              : {
     987              :     HANDLE     *handle = &set->handles[event->pos + 1];
     988              : 
     989              :     if (event->events == WL_LATCH_SET)
     990              :     {
     991              :         Assert(set->latch != NULL);
     992              :         *handle = set->latch->event;
     993              :     }
     994              :     else if (event->events == WL_POSTMASTER_DEATH)
     995              :     {
     996              :         *handle = PostmasterHandle;
     997              :     }
     998              :     else
     999              :     {
    1000              :         int         flags = FD_CLOSE;   /* always check for errors/EOF */
    1001              : 
    1002              :         if (event->events & WL_SOCKET_READABLE)
    1003              :             flags |= FD_READ;
    1004              :         if (event->events & WL_SOCKET_WRITEABLE)
    1005              :             flags |= FD_WRITE;
    1006              :         if (event->events & WL_SOCKET_CONNECTED)
    1007              :             flags |= FD_CONNECT;
    1008              :         if (event->events & WL_SOCKET_ACCEPT)
    1009              :             flags |= FD_ACCEPT;
    1010              : 
    1011              :         if (*handle == WSA_INVALID_EVENT)
    1012              :         {
    1013              :             *handle = WSACreateEvent();
    1014              :             if (*handle == WSA_INVALID_EVENT)
    1015              :                 elog(ERROR, "failed to create event for socket: error code %d",
    1016              :                      WSAGetLastError());
    1017              :         }
    1018              :         if (WSAEventSelect(event->fd, *handle, flags) != 0)
    1019              :             elog(ERROR, "failed to set up event for socket: error code %d",
    1020              :                  WSAGetLastError());
    1021              : 
    1022              :         Assert(event->fd != PGINVALID_SOCKET);
    1023              :     }
    1024              : }
    1025              : #endif
    1026              : 
    1027              : /*
    1028              :  * Wait for events added to the set to happen, or until the timeout is
    1029              :  * reached.  At most nevents occurred events are returned.
    1030              :  *
    1031              :  * If timeout = -1, block until an event occurs; if 0, check sockets for
    1032              :  * readiness, but don't block; if > 0, block for at most timeout milliseconds.
    1033              :  *
    1034              :  * Returns the number of events occurred, or 0 if the timeout was reached.
    1035              :  *
    1036              :  * Returned events will have the fd, pos, user_data fields set to the
    1037              :  * values associated with the registered event.
    1038              :  */
    1039              : int
    1040      1864189 : WaitEventSetWait(WaitEventSet *set, long timeout,
    1041              :                  WaitEvent *occurred_events, int nevents,
    1042              :                  uint32 wait_event_info)
    1043              : {
    1044      1864189 :     int         returned_events = 0;
    1045              :     instr_time  start_time;
    1046              :     instr_time  cur_time;
    1047      1864189 :     long        cur_timeout = -1;
    1048              : 
    1049              :     Assert(nevents > 0);
    1050              : 
    1051              :     /*
    1052              :      * Initialize timeout if requested.  We must record the current time so
    1053              :      * that we can determine the remaining timeout if interrupted.
    1054              :      */
    1055      1864189 :     if (timeout >= 0)
    1056              :     {
    1057       396118 :         INSTR_TIME_SET_CURRENT(start_time);
    1058              :         Assert(timeout >= 0 && timeout <= INT_MAX);
    1059       396118 :         cur_timeout = timeout;
    1060              :     }
    1061              :     else
    1062      1468071 :         INSTR_TIME_SET_ZERO(start_time);
    1063              : 
    1064      1864189 :     pgstat_report_wait_start(wait_event_info);
    1065              : 
    1066              : #ifndef WIN32
    1067      1864189 :     waiting = true;
    1068              : #else
    1069              :     /* Ensure that signals are serviced even if latch is already set */
    1070              :     pgwin32_dispatch_queued_signals();
    1071              : #endif
    1072      3768241 :     while (returned_events == 0)
    1073              :     {
    1074              :         int         rc;
    1075              : 
    1076              :         /*
    1077              :          * Check if the latch is set already first.  If so, we either exit
    1078              :          * immediately or ask the kernel for further events available right
    1079              :          * now without waiting, depending on how many events the caller wants.
    1080              :          *
    1081              :          * If someone sets the latch between this and the
    1082              :          * WaitEventSetWaitBlock() below, the setter will write a byte to the
    1083              :          * pipe (or signal us and the signal handler will do that), and the
    1084              :          * readiness routine will return immediately.
    1085              :          *
    1086              :          * On unix, If there's a pending byte in the self pipe, we'll notice
    1087              :          * whenever blocking. Only clearing the pipe in that case avoids
    1088              :          * having to drain it every time WaitLatchOrSocket() is used. Should
    1089              :          * the pipe-buffer fill up we're still ok, because the pipe is in
    1090              :          * nonblocking mode. It's unlikely for that to happen, because the
    1091              :          * self pipe isn't filled unless we're blocking (waiting = true), or
    1092              :          * from inside a signal handler in latch_sigurg_handler().
    1093              :          *
    1094              :          * On windows, we'll also notice if there's a pending event for the
    1095              :          * latch when blocking, but there's no danger of anything filling up,
    1096              :          * as "Setting an event that is already set has no effect.".
    1097              :          *
    1098              :          * Note: we assume that the kernel calls involved in latch management
    1099              :          * will provide adequate synchronization on machines with weak memory
    1100              :          * ordering, so that we cannot miss seeing is_set if a notification
    1101              :          * has already been queued.
    1102              :          */
    1103      2049681 :         if (set->latch && !set->latch->is_set)
    1104              :         {
    1105              :             /* about to sleep on a latch */
    1106      1783451 :             set->latch->maybe_sleeping = true;
    1107      1783451 :             pg_memory_barrier();
    1108              :             /* and recheck */
    1109              :         }
    1110              : 
    1111      2049681 :         if (set->latch && set->latch->is_set)
    1112              :         {
    1113       265809 :             occurred_events->fd = PGINVALID_SOCKET;
    1114       265809 :             occurred_events->pos = set->latch_pos;
    1115       265809 :             occurred_events->user_data =
    1116       265809 :                 set->events[set->latch_pos].user_data;
    1117       265809 :             occurred_events->events = WL_LATCH_SET;
    1118       265809 :             occurred_events++;
    1119       265809 :             returned_events++;
    1120              : 
    1121              :             /* could have been set above */
    1122       265809 :             set->latch->maybe_sleeping = false;
    1123              : 
    1124       265809 :             if (returned_events == nevents)
    1125       113064 :                 break;          /* output buffer full already */
    1126              : 
    1127              :             /*
    1128              :              * Even though we already have an event, we'll poll just once with
    1129              :              * zero timeout to see what non-latch events we can fit into the
    1130              :              * output buffer at the same time.
    1131              :              */
    1132       152745 :             cur_timeout = 0;
    1133       152745 :             timeout = 0;
    1134              :         }
    1135              : 
    1136              :         /*
    1137              :          * Wait for events using the readiness primitive chosen at the top of
    1138              :          * this file. If -1 is returned, a timeout has occurred, if 0 we have
    1139              :          * to retry, everything >= 1 is the number of returned events.
    1140              :          */
    1141      1936617 :         rc = WaitEventSetWaitBlock(set, cur_timeout,
    1142              :                                    occurred_events, nevents - returned_events);
    1143              : 
    1144      1936573 :         if (set->latch &&
    1145      1936109 :             set->latch->maybe_sleeping)
    1146      1783364 :             set->latch->maybe_sleeping = false;
    1147              : 
    1148      1936573 :         if (rc == -1)
    1149        32517 :             break;              /* timeout occurred */
    1150              :         else
    1151      1904056 :             returned_events += rc;
    1152              : 
    1153              :         /* If we're not done, update cur_timeout for next iteration */
    1154      1904056 :         if (returned_events == 0 && timeout >= 0)
    1155              :         {
    1156       172536 :             INSTR_TIME_SET_CURRENT(cur_time);
    1157       172536 :             INSTR_TIME_SUBTRACT(cur_time, start_time);
    1158       172536 :             cur_timeout = timeout - (long) INSTR_TIME_GET_MILLISEC(cur_time);
    1159       172536 :             if (cur_timeout <= 0)
    1160            4 :                 break;
    1161              :         }
    1162              :     }
    1163              : #ifndef WIN32
    1164      1864145 :     waiting = false;
    1165              : #endif
    1166              : 
    1167      1864145 :     pgstat_report_wait_end();
    1168              : 
    1169      1864145 :     return returned_events;
    1170              : }
    1171              : 
    1172              : 
    1173              : #if defined(WAIT_USE_EPOLL)
    1174              : 
    1175              : /*
    1176              :  * Wait using linux's epoll_wait(2).
    1177              :  *
    1178              :  * This is the preferable wait method, as several readiness notifications are
    1179              :  * delivered, without having to iterate through all of set->events. The return
    1180              :  * epoll_event struct contain a pointer to our events, making association
    1181              :  * easy.
    1182              :  */
    1183              : static inline int
    1184      1936617 : WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
    1185              :                       WaitEvent *occurred_events, int nevents)
    1186              : {
    1187      1936617 :     int         returned_events = 0;
    1188              :     int         rc;
    1189              :     WaitEvent  *cur_event;
    1190              :     struct epoll_event *cur_epoll_event;
    1191              : 
    1192              :     /* Sleep */
    1193      1936617 :     rc = epoll_wait(set->epoll_fd, set->epoll_ret_events,
    1194      1936617 :                     Min(nevents, set->nevents_space), cur_timeout);
    1195              : 
    1196              :     /* Check return code */
    1197      1936617 :     if (rc < 0)
    1198              :     {
    1199              :         /* EINTR is okay, otherwise complain */
    1200       169549 :         if (errno != EINTR)
    1201              :         {
    1202            0 :             waiting = false;
    1203            0 :             ereport(ERROR,
    1204              :                     (errcode_for_socket_access(),
    1205              :                      errmsg("%s() failed: %m",
    1206              :                             "epoll_wait")));
    1207              :         }
    1208       169549 :         return 0;
    1209              :     }
    1210      1767068 :     else if (rc == 0)
    1211              :     {
    1212              :         /* timeout exceeded */
    1213        32517 :         return -1;
    1214              :     }
    1215              : 
    1216              :     /*
    1217              :      * At least one event occurred, iterate over the returned epoll events
    1218              :      * until they're either all processed, or we've returned all the events
    1219              :      * the caller desired.
    1220              :      */
    1221      1734551 :     for (cur_epoll_event = set->epoll_ret_events;
    1222      3469098 :          cur_epoll_event < (set->epoll_ret_events + rc) &&
    1223              :          returned_events < nevents;
    1224      1734547 :          cur_epoll_event++)
    1225              :     {
    1226              :         /* epoll's data pointer is set to the associated WaitEvent */
    1227      1734591 :         cur_event = (WaitEvent *) cur_epoll_event->data.ptr;
    1228              : 
    1229      1734591 :         occurred_events->pos = cur_event->pos;
    1230      1734591 :         occurred_events->user_data = cur_event->user_data;
    1231      1734591 :         occurred_events->events = 0;
    1232              : 
    1233      1734591 :         if (cur_event->events == WL_LATCH_SET &&
    1234      1208158 :             cur_epoll_event->events & (EPOLLIN | EPOLLERR | EPOLLHUP))
    1235              :         {
    1236              :             /* Drain the signalfd. */
    1237      1208158 :             drain();
    1238              : 
    1239      1208158 :             if (set->latch && set->latch->maybe_sleeping && set->latch->is_set)
    1240              :             {
    1241      1040519 :                 occurred_events->fd = PGINVALID_SOCKET;
    1242      1040519 :                 occurred_events->events = WL_LATCH_SET;
    1243      1040519 :                 occurred_events++;
    1244      1040519 :                 returned_events++;
    1245              :             }
    1246              :         }
    1247       526433 :         else if (cur_event->events == WL_POSTMASTER_DEATH &&
    1248           44 :                  cur_epoll_event->events & (EPOLLIN | EPOLLERR | EPOLLHUP))
    1249              :         {
    1250              :             /*
    1251              :              * We expect an EPOLLHUP when the remote end is closed, but
    1252              :              * because we don't expect the pipe to become readable or to have
    1253              :              * any errors either, treat those cases as postmaster death, too.
    1254              :              *
    1255              :              * Be paranoid about a spurious event signaling the postmaster as
    1256              :              * being dead.  There have been reports about that happening with
    1257              :              * older primitives (select(2) to be specific), and a spurious
    1258              :              * WL_POSTMASTER_DEATH event would be painful. Re-checking doesn't
    1259              :              * cost much.
    1260              :              */
    1261           44 :             if (!PostmasterIsAliveInternal())
    1262              :             {
    1263           44 :                 if (set->exit_on_postmaster_death)
    1264           44 :                     proc_exit(1);
    1265            0 :                 occurred_events->fd = PGINVALID_SOCKET;
    1266            0 :                 occurred_events->events = WL_POSTMASTER_DEATH;
    1267            0 :                 occurred_events++;
    1268            0 :                 returned_events++;
    1269              :             }
    1270              :         }
    1271       526389 :         else if (cur_event->events & (WL_SOCKET_READABLE |
    1272              :                                       WL_SOCKET_WRITEABLE |
    1273              :                                       WL_SOCKET_CLOSED))
    1274              :         {
    1275              :             Assert(cur_event->fd != PGINVALID_SOCKET);
    1276              : 
    1277       526389 :             if ((cur_event->events & WL_SOCKET_READABLE) &&
    1278       504251 :                 (cur_epoll_event->events & (EPOLLIN | EPOLLERR | EPOLLHUP)))
    1279              :             {
    1280              :                 /* data available in socket, or EOF */
    1281       488731 :                 occurred_events->events |= WL_SOCKET_READABLE;
    1282              :             }
    1283              : 
    1284       526389 :             if ((cur_event->events & WL_SOCKET_WRITEABLE) &&
    1285        57878 :                 (cur_epoll_event->events & (EPOLLOUT | EPOLLERR | EPOLLHUP)))
    1286              :             {
    1287              :                 /* writable, or EOF */
    1288        57371 :                 occurred_events->events |= WL_SOCKET_WRITEABLE;
    1289              :             }
    1290              : 
    1291       526389 :             if ((cur_event->events & WL_SOCKET_CLOSED) &&
    1292            0 :                 (cur_epoll_event->events & (EPOLLRDHUP | EPOLLERR | EPOLLHUP)))
    1293              :             {
    1294              :                 /* remote peer shut down, or error */
    1295            0 :                 occurred_events->events |= WL_SOCKET_CLOSED;
    1296              :             }
    1297              : 
    1298       526389 :             if (occurred_events->events != 0)
    1299              :             {
    1300       526389 :                 occurred_events->fd = cur_event->fd;
    1301       526389 :                 occurred_events++;
    1302       526389 :                 returned_events++;
    1303              :             }
    1304              :         }
    1305              :     }
    1306              : 
    1307      1734507 :     return returned_events;
    1308              : }
    1309              : 
    1310              : #elif defined(WAIT_USE_KQUEUE)
    1311              : 
    1312              : /*
    1313              :  * Wait using kevent(2) on BSD-family systems and macOS.
    1314              :  *
    1315              :  * For now this mirrors the epoll code, but in future it could modify the fd
    1316              :  * set in the same call to kevent as it uses for waiting instead of doing that
    1317              :  * with separate system calls.
    1318              :  */
    1319              : static int
    1320              : WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
    1321              :                       WaitEvent *occurred_events, int nevents)
    1322              : {
    1323              :     int         returned_events = 0;
    1324              :     int         rc;
    1325              :     WaitEvent  *cur_event;
    1326              :     struct kevent *cur_kqueue_event;
    1327              :     struct timespec timeout;
    1328              :     struct timespec *timeout_p;
    1329              : 
    1330              :     if (cur_timeout < 0)
    1331              :         timeout_p = NULL;
    1332              :     else
    1333              :     {
    1334              :         timeout.tv_sec = cur_timeout / 1000;
    1335              :         timeout.tv_nsec = (cur_timeout % 1000) * 1000000;
    1336              :         timeout_p = &timeout;
    1337              :     }
    1338              : 
    1339              :     /*
    1340              :      * Report postmaster events discovered by WaitEventAdjustKqueue() or an
    1341              :      * earlier call to WaitEventSetWait().
    1342              :      */
    1343              :     if (unlikely(set->report_postmaster_not_running))
    1344              :     {
    1345              :         if (set->exit_on_postmaster_death)
    1346              :             proc_exit(1);
    1347              :         occurred_events->fd = PGINVALID_SOCKET;
    1348              :         occurred_events->events = WL_POSTMASTER_DEATH;
    1349              :         return 1;
    1350              :     }
    1351              : 
    1352              :     /* Sleep */
    1353              :     rc = kevent(set->kqueue_fd, NULL, 0,
    1354              :                 set->kqueue_ret_events,
    1355              :                 Min(nevents, set->nevents_space),
    1356              :                 timeout_p);
    1357              : 
    1358              :     /* Check return code */
    1359              :     if (rc < 0)
    1360              :     {
    1361              :         /* EINTR is okay, otherwise complain */
    1362              :         if (errno != EINTR)
    1363              :         {
    1364              :             waiting = false;
    1365              :             ereport(ERROR,
    1366              :                     (errcode_for_socket_access(),
    1367              :                      errmsg("%s() failed: %m",
    1368              :                             "kevent")));
    1369              :         }
    1370              :         return 0;
    1371              :     }
    1372              :     else if (rc == 0)
    1373              :     {
    1374              :         /* timeout exceeded */
    1375              :         return -1;
    1376              :     }
    1377              : 
    1378              :     /*
    1379              :      * At least one event occurred, iterate over the returned kqueue events
    1380              :      * until they're either all processed, or we've returned all the events
    1381              :      * the caller desired.
    1382              :      */
    1383              :     for (cur_kqueue_event = set->kqueue_ret_events;
    1384              :          cur_kqueue_event < (set->kqueue_ret_events + rc) &&
    1385              :          returned_events < nevents;
    1386              :          cur_kqueue_event++)
    1387              :     {
    1388              :         /* kevent's udata points to the associated WaitEvent */
    1389              :         cur_event = AccessWaitEvent(cur_kqueue_event);
    1390              : 
    1391              :         occurred_events->pos = cur_event->pos;
    1392              :         occurred_events->user_data = cur_event->user_data;
    1393              :         occurred_events->events = 0;
    1394              : 
    1395              :         if (cur_event->events == WL_LATCH_SET &&
    1396              :             cur_kqueue_event->filter == EVFILT_SIGNAL)
    1397              :         {
    1398              :             if (set->latch && set->latch->maybe_sleeping && set->latch->is_set)
    1399              :             {
    1400              :                 occurred_events->fd = PGINVALID_SOCKET;
    1401              :                 occurred_events->events = WL_LATCH_SET;
    1402              :                 occurred_events++;
    1403              :                 returned_events++;
    1404              :             }
    1405              :         }
    1406              :         else if (cur_event->events == WL_POSTMASTER_DEATH &&
    1407              :                  cur_kqueue_event->filter == EVFILT_PROC &&
    1408              :                  (cur_kqueue_event->fflags & NOTE_EXIT) != 0)
    1409              :         {
    1410              :             /*
    1411              :              * The kernel will tell this kqueue object only once about the
    1412              :              * exit of the postmaster, so let's remember that for next time so
    1413              :              * that we provide level-triggered semantics.
    1414              :              */
    1415              :             set->report_postmaster_not_running = true;
    1416              : 
    1417              :             if (set->exit_on_postmaster_death)
    1418              :                 proc_exit(1);
    1419              :             occurred_events->fd = PGINVALID_SOCKET;
    1420              :             occurred_events->events = WL_POSTMASTER_DEATH;
    1421              :             occurred_events++;
    1422              :             returned_events++;
    1423              :         }
    1424              :         else if (cur_event->events & (WL_SOCKET_READABLE |
    1425              :                                       WL_SOCKET_WRITEABLE |
    1426              :                                       WL_SOCKET_CLOSED))
    1427              :         {
    1428              :             Assert(cur_event->fd >= 0);
    1429              : 
    1430              :             if ((cur_event->events & WL_SOCKET_READABLE) &&
    1431              :                 (cur_kqueue_event->filter == EVFILT_READ))
    1432              :             {
    1433              :                 /* readable, or EOF */
    1434              :                 occurred_events->events |= WL_SOCKET_READABLE;
    1435              :             }
    1436              : 
    1437              :             if ((cur_event->events & WL_SOCKET_CLOSED) &&
    1438              :                 (cur_kqueue_event->filter == EVFILT_READ) &&
    1439              :                 (cur_kqueue_event->flags & EV_EOF))
    1440              :             {
    1441              :                 /* the remote peer has shut down */
    1442              :                 occurred_events->events |= WL_SOCKET_CLOSED;
    1443              :             }
    1444              : 
    1445              :             if ((cur_event->events & WL_SOCKET_WRITEABLE) &&
    1446              :                 (cur_kqueue_event->filter == EVFILT_WRITE))
    1447              :             {
    1448              :                 /* writable, or EOF */
    1449              :                 occurred_events->events |= WL_SOCKET_WRITEABLE;
    1450              :             }
    1451              : 
    1452              :             if (occurred_events->events != 0)
    1453              :             {
    1454              :                 occurred_events->fd = cur_event->fd;
    1455              :                 occurred_events++;
    1456              :                 returned_events++;
    1457              :             }
    1458              :         }
    1459              :     }
    1460              : 
    1461              :     return returned_events;
    1462              : }
    1463              : 
    1464              : #elif defined(WAIT_USE_POLL)
    1465              : 
    1466              : /*
    1467              :  * Wait using poll(2).
    1468              :  *
    1469              :  * This allows to receive readiness notifications for several events at once,
    1470              :  * but requires iterating through all of set->pollfds.
    1471              :  */
    1472              : static inline int
    1473              : WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
    1474              :                       WaitEvent *occurred_events, int nevents)
    1475              : {
    1476              :     int         returned_events = 0;
    1477              :     int         rc;
    1478              :     WaitEvent  *cur_event;
    1479              :     struct pollfd *cur_pollfd;
    1480              : 
    1481              :     /* Sleep */
    1482              :     rc = poll(set->pollfds, set->nevents, cur_timeout);
    1483              : 
    1484              :     /* Check return code */
    1485              :     if (rc < 0)
    1486              :     {
    1487              :         /* EINTR is okay, otherwise complain */
    1488              :         if (errno != EINTR)
    1489              :         {
    1490              :             waiting = false;
    1491              :             ereport(ERROR,
    1492              :                     (errcode_for_socket_access(),
    1493              :                      errmsg("%s() failed: %m",
    1494              :                             "poll")));
    1495              :         }
    1496              :         return 0;
    1497              :     }
    1498              :     else if (rc == 0)
    1499              :     {
    1500              :         /* timeout exceeded */
    1501              :         return -1;
    1502              :     }
    1503              : 
    1504              :     for (cur_event = set->events, cur_pollfd = set->pollfds;
    1505              :          cur_event < (set->events + set->nevents) &&
    1506              :          returned_events < nevents;
    1507              :          cur_event++, cur_pollfd++)
    1508              :     {
    1509              :         /* no activity on this FD, skip */
    1510              :         if (cur_pollfd->revents == 0)
    1511              :             continue;
    1512              : 
    1513              :         occurred_events->pos = cur_event->pos;
    1514              :         occurred_events->user_data = cur_event->user_data;
    1515              :         occurred_events->events = 0;
    1516              : 
    1517              :         if (cur_event->events == WL_LATCH_SET &&
    1518              :             (cur_pollfd->revents & (POLLIN | POLLHUP | POLLERR | POLLNVAL)))
    1519              :         {
    1520              :             /* There's data in the self-pipe, clear it. */
    1521              :             drain();
    1522              : 
    1523              :             if (set->latch && set->latch->maybe_sleeping && set->latch->is_set)
    1524              :             {
    1525              :                 occurred_events->fd = PGINVALID_SOCKET;
    1526              :                 occurred_events->events = WL_LATCH_SET;
    1527              :                 occurred_events++;
    1528              :                 returned_events++;
    1529              :             }
    1530              :         }
    1531              :         else if (cur_event->events == WL_POSTMASTER_DEATH &&
    1532              :                  (cur_pollfd->revents & (POLLIN | POLLHUP | POLLERR | POLLNVAL)))
    1533              :         {
    1534              :             /*
    1535              :              * We expect a POLLHUP when the remote end is closed, but because
    1536              :              * we don't expect the pipe to become readable or to have any
    1537              :              * errors either, treat those cases as postmaster death, too.
    1538              :              *
    1539              :              * Be paranoid about a spurious event signaling the postmaster as
    1540              :              * being dead.  There have been reports about that happening with
    1541              :              * older primitives (select(2) to be specific), and a spurious
    1542              :              * WL_POSTMASTER_DEATH event would be painful.  Re-checking
    1543              :              * doesn't cost much.
    1544              :              */
    1545              :             if (!PostmasterIsAliveInternal())
    1546              :             {
    1547              :                 if (set->exit_on_postmaster_death)
    1548              :                     proc_exit(1);
    1549              :                 occurred_events->fd = PGINVALID_SOCKET;
    1550              :                 occurred_events->events = WL_POSTMASTER_DEATH;
    1551              :                 occurred_events++;
    1552              :                 returned_events++;
    1553              :             }
    1554              :         }
    1555              :         else if (cur_event->events & (WL_SOCKET_READABLE |
    1556              :                                       WL_SOCKET_WRITEABLE |
    1557              :                                       WL_SOCKET_CLOSED))
    1558              :         {
    1559              :             int         errflags = POLLHUP | POLLERR | POLLNVAL;
    1560              : 
    1561              :             Assert(cur_event->fd >= PGINVALID_SOCKET);
    1562              : 
    1563              :             if ((cur_event->events & WL_SOCKET_READABLE) &&
    1564              :                 (cur_pollfd->revents & (POLLIN | errflags)))
    1565              :             {
    1566              :                 /* data available in socket, or EOF */
    1567              :                 occurred_events->events |= WL_SOCKET_READABLE;
    1568              :             }
    1569              : 
    1570              :             if ((cur_event->events & WL_SOCKET_WRITEABLE) &&
    1571              :                 (cur_pollfd->revents & (POLLOUT | errflags)))
    1572              :             {
    1573              :                 /* writeable, or EOF */
    1574              :                 occurred_events->events |= WL_SOCKET_WRITEABLE;
    1575              :             }
    1576              : 
    1577              : #ifdef POLLRDHUP
    1578              :             if ((cur_event->events & WL_SOCKET_CLOSED) &&
    1579              :                 (cur_pollfd->revents & (POLLRDHUP | errflags)))
    1580              :             {
    1581              :                 /* remote peer closed, or error */
    1582              :                 occurred_events->events |= WL_SOCKET_CLOSED;
    1583              :             }
    1584              : #endif
    1585              : 
    1586              :             if (occurred_events->events != 0)
    1587              :             {
    1588              :                 occurred_events->fd = cur_event->fd;
    1589              :                 occurred_events++;
    1590              :                 returned_events++;
    1591              :             }
    1592              :         }
    1593              :     }
    1594              :     return returned_events;
    1595              : }
    1596              : 
    1597              : #elif defined(WAIT_USE_WIN32)
    1598              : 
    1599              : /*
    1600              :  * Wait using Windows' WaitForMultipleObjects().  Each call only "consumes" one
    1601              :  * event, so we keep calling until we've filled up our output buffer to match
    1602              :  * the behavior of the other implementations.
    1603              :  *
    1604              :  * https://blogs.msdn.microsoft.com/oldnewthing/20150409-00/?p=44273
    1605              :  */
    1606              : static inline int
    1607              : WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
    1608              :                       WaitEvent *occurred_events, int nevents)
    1609              : {
    1610              :     int         returned_events = 0;
    1611              :     DWORD       rc;
    1612              :     WaitEvent  *cur_event;
    1613              : 
    1614              :     /* Reset any wait events that need it */
    1615              :     for (cur_event = set->events;
    1616              :          cur_event < (set->events + set->nevents);
    1617              :          cur_event++)
    1618              :     {
    1619              :         if (cur_event->reset)
    1620              :         {
    1621              :             WaitEventAdjustWin32(set, cur_event);
    1622              :             cur_event->reset = false;
    1623              :         }
    1624              : 
    1625              :         /*
    1626              :          * We associate the socket with a new event handle for each
    1627              :          * WaitEventSet.  FD_CLOSE is only generated once if the other end
    1628              :          * closes gracefully.  Therefore we might miss the FD_CLOSE
    1629              :          * notification, if it was delivered to another event after we stopped
    1630              :          * waiting for it.  Close that race by peeking for EOF after setting
    1631              :          * up this handle to receive notifications, and before entering the
    1632              :          * sleep.
    1633              :          *
    1634              :          * XXX If we had one event handle for the lifetime of a socket, we
    1635              :          * wouldn't need this.
    1636              :          */
    1637              :         if (cur_event->events & WL_SOCKET_READABLE)
    1638              :         {
    1639              :             char        c;
    1640              :             WSABUF      buf;
    1641              :             DWORD       received;
    1642              :             DWORD       flags;
    1643              : 
    1644              :             buf.buf = &c;
    1645              :             buf.len = 1;
    1646              :             flags = MSG_PEEK;
    1647              :             if (WSARecv(cur_event->fd, &buf, 1, &received, &flags, NULL, NULL) == 0)
    1648              :             {
    1649              :                 occurred_events->pos = cur_event->pos;
    1650              :                 occurred_events->user_data = cur_event->user_data;
    1651              :                 occurred_events->events = WL_SOCKET_READABLE;
    1652              :                 occurred_events->fd = cur_event->fd;
    1653              :                 return 1;
    1654              :             }
    1655              :         }
    1656              : 
    1657              :         /*
    1658              :          * Windows does not guarantee to log an FD_WRITE network event
    1659              :          * indicating that more data can be sent unless the previous send()
    1660              :          * failed with WSAEWOULDBLOCK.  While our caller might well have made
    1661              :          * such a call, we cannot assume that here.  Therefore, if waiting for
    1662              :          * write-ready, force the issue by doing a dummy send().  If the dummy
    1663              :          * send() succeeds, assume that the socket is in fact write-ready, and
    1664              :          * return immediately.  Also, if it fails with something other than
    1665              :          * WSAEWOULDBLOCK, return a write-ready indication to let our caller
    1666              :          * deal with the error condition.
    1667              :          */
    1668              :         if (cur_event->events & WL_SOCKET_WRITEABLE)
    1669              :         {
    1670              :             char        c;
    1671              :             WSABUF      buf;
    1672              :             DWORD       sent;
    1673              :             int         r;
    1674              : 
    1675              :             buf.buf = &c;
    1676              :             buf.len = 0;
    1677              : 
    1678              :             r = WSASend(cur_event->fd, &buf, 1, &sent, 0, NULL, NULL);
    1679              :             if (r == 0 || WSAGetLastError() != WSAEWOULDBLOCK)
    1680              :             {
    1681              :                 occurred_events->pos = cur_event->pos;
    1682              :                 occurred_events->user_data = cur_event->user_data;
    1683              :                 occurred_events->events = WL_SOCKET_WRITEABLE;
    1684              :                 occurred_events->fd = cur_event->fd;
    1685              :                 return 1;
    1686              :             }
    1687              :         }
    1688              :     }
    1689              : 
    1690              :     /*
    1691              :      * Sleep.
    1692              :      *
    1693              :      * Need to wait for ->nevents + 1, because signal handle is in [0].
    1694              :      */
    1695              :     rc = WaitForMultipleObjects(set->nevents + 1, set->handles, FALSE,
    1696              :                                 cur_timeout);
    1697              : 
    1698              :     /* Check return code */
    1699              :     if (rc == WAIT_FAILED)
    1700              :         elog(ERROR, "WaitForMultipleObjects() failed: error code %lu",
    1701              :              GetLastError());
    1702              :     else if (rc == WAIT_TIMEOUT)
    1703              :     {
    1704              :         /* timeout exceeded */
    1705              :         return -1;
    1706              :     }
    1707              : 
    1708              :     if (rc == WAIT_OBJECT_0)
    1709              :     {
    1710              :         /* Service newly-arrived signals */
    1711              :         pgwin32_dispatch_queued_signals();
    1712              :         return 0;               /* retry */
    1713              :     }
    1714              : 
    1715              :     /*
    1716              :      * With an offset of one, due to the always present pgwin32_signal_event,
    1717              :      * the handle offset directly corresponds to a wait event.
    1718              :      */
    1719              :     cur_event = (WaitEvent *) &set->events[rc - WAIT_OBJECT_0 - 1];
    1720              : 
    1721              :     for (;;)
    1722              :     {
    1723              :         int         next_pos;
    1724              :         int         count;
    1725              : 
    1726              :         occurred_events->pos = cur_event->pos;
    1727              :         occurred_events->user_data = cur_event->user_data;
    1728              :         occurred_events->events = 0;
    1729              : 
    1730              :         if (cur_event->events == WL_LATCH_SET)
    1731              :         {
    1732              :             /*
    1733              :              * We cannot use set->latch->event to reset the fired event if we
    1734              :              * aren't waiting on this latch now.
    1735              :              */
    1736              :             if (!ResetEvent(set->handles[cur_event->pos + 1]))
    1737              :                 elog(ERROR, "ResetEvent failed: error code %lu", GetLastError());
    1738              : 
    1739              :             if (set->latch && set->latch->maybe_sleeping && set->latch->is_set)
    1740              :             {
    1741              :                 occurred_events->fd = PGINVALID_SOCKET;
    1742              :                 occurred_events->events = WL_LATCH_SET;
    1743              :                 occurred_events++;
    1744              :                 returned_events++;
    1745              :             }
    1746              :         }
    1747              :         else if (cur_event->events == WL_POSTMASTER_DEATH)
    1748              :         {
    1749              :             /*
    1750              :              * Postmaster apparently died.  Since the consequences of falsely
    1751              :              * returning WL_POSTMASTER_DEATH could be pretty unpleasant, we
    1752              :              * take the trouble to positively verify this with
    1753              :              * PostmasterIsAlive(), even though there is no known reason to
    1754              :              * think that the event could be falsely set on Windows.
    1755              :              */
    1756              :             if (!PostmasterIsAliveInternal())
    1757              :             {
    1758              :                 if (set->exit_on_postmaster_death)
    1759              :                     proc_exit(1);
    1760              :                 occurred_events->fd = PGINVALID_SOCKET;
    1761              :                 occurred_events->events = WL_POSTMASTER_DEATH;
    1762              :                 occurred_events++;
    1763              :                 returned_events++;
    1764              :             }
    1765              :         }
    1766              :         else if (cur_event->events & WL_SOCKET_MASK)
    1767              :         {
    1768              :             WSANETWORKEVENTS resEvents;
    1769              :             HANDLE      handle = set->handles[cur_event->pos + 1];
    1770              : 
    1771              :             Assert(cur_event->fd);
    1772              : 
    1773              :             occurred_events->fd = cur_event->fd;
    1774              : 
    1775              :             ZeroMemory(&resEvents, sizeof(resEvents));
    1776              :             if (WSAEnumNetworkEvents(cur_event->fd, handle, &resEvents) != 0)
    1777              :                 elog(ERROR, "failed to enumerate network events: error code %d",
    1778              :                      WSAGetLastError());
    1779              :             if ((cur_event->events & WL_SOCKET_READABLE) &&
    1780              :                 (resEvents.lNetworkEvents & FD_READ))
    1781              :             {
    1782              :                 /* data available in socket */
    1783              :                 occurred_events->events |= WL_SOCKET_READABLE;
    1784              : 
    1785              :                 /*------
    1786              :                  * WaitForMultipleObjects doesn't guarantee that a read event
    1787              :                  * will be returned if the latch is set at the same time.  Even
    1788              :                  * if it did, the caller might drop that event expecting it to
    1789              :                  * reoccur on next call.  So, we must force the event to be
    1790              :                  * reset if this WaitEventSet is used again in order to avoid
    1791              :                  * an indefinite hang.
    1792              :                  *
    1793              :                  * Refer
    1794              :                  * https://msdn.microsoft.com/en-us/library/windows/desktop/ms741576(v=vs.85).aspx
    1795              :                  * for the behavior of socket events.
    1796              :                  *------
    1797              :                  */
    1798              :                 cur_event->reset = true;
    1799              :             }
    1800              :             if ((cur_event->events & WL_SOCKET_WRITEABLE) &&
    1801              :                 (resEvents.lNetworkEvents & FD_WRITE))
    1802              :             {
    1803              :                 /* writeable */
    1804              :                 occurred_events->events |= WL_SOCKET_WRITEABLE;
    1805              :             }
    1806              :             if ((cur_event->events & WL_SOCKET_CONNECTED) &&
    1807              :                 (resEvents.lNetworkEvents & FD_CONNECT))
    1808              :             {
    1809              :                 /* connected */
    1810              :                 occurred_events->events |= WL_SOCKET_CONNECTED;
    1811              :             }
    1812              :             if ((cur_event->events & WL_SOCKET_ACCEPT) &&
    1813              :                 (resEvents.lNetworkEvents & FD_ACCEPT))
    1814              :             {
    1815              :                 /* incoming connection could be accepted */
    1816              :                 occurred_events->events |= WL_SOCKET_ACCEPT;
    1817              :             }
    1818              :             if (resEvents.lNetworkEvents & FD_CLOSE)
    1819              :             {
    1820              :                 /* EOF/error, so signal all caller-requested socket flags */
    1821              :                 occurred_events->events |= (cur_event->events & WL_SOCKET_MASK);
    1822              :             }
    1823              : 
    1824              :             if (occurred_events->events != 0)
    1825              :             {
    1826              :                 occurred_events++;
    1827              :                 returned_events++;
    1828              :             }
    1829              :         }
    1830              : 
    1831              :         /* Is the output buffer full? */
    1832              :         if (returned_events == nevents)
    1833              :             break;
    1834              : 
    1835              :         /* Have we run out of possible events? */
    1836              :         next_pos = cur_event->pos + 1;
    1837              :         if (next_pos == set->nevents)
    1838              :             break;
    1839              : 
    1840              :         /*
    1841              :          * Poll the rest of the event handles in the array starting at
    1842              :          * next_pos being careful to skip over the initial signal handle too.
    1843              :          * This time we use a zero timeout.
    1844              :          */
    1845              :         count = set->nevents - next_pos;
    1846              :         rc = WaitForMultipleObjects(count,
    1847              :                                     set->handles + 1 + next_pos,
    1848              :                                     false,
    1849              :                                     0);
    1850              : 
    1851              :         /*
    1852              :          * We don't distinguish between errors and WAIT_TIMEOUT here because
    1853              :          * we already have events to report.
    1854              :          */
    1855              :         if (rc < WAIT_OBJECT_0 || rc >= WAIT_OBJECT_0 + count)
    1856              :             break;
    1857              : 
    1858              :         /* We have another event to decode. */
    1859              :         cur_event = &set->events[next_pos + (rc - WAIT_OBJECT_0)];
    1860              :     }
    1861              : 
    1862              :     return returned_events;
    1863              : }
    1864              : #endif
    1865              : 
    1866              : /*
    1867              :  * Return whether the current build options can report WL_SOCKET_CLOSED.
    1868              :  */
    1869              : bool
    1870         1222 : WaitEventSetCanReportClosed(void)
    1871              : {
    1872              : #if (defined(WAIT_USE_POLL) && defined(POLLRDHUP)) || \
    1873              :     defined(WAIT_USE_EPOLL) || \
    1874              :     defined(WAIT_USE_KQUEUE)
    1875         1222 :     return true;
    1876              : #else
    1877              :     return false;
    1878              : #endif
    1879              : }
    1880              : 
    1881              : /*
    1882              :  * Get the number of wait events registered in a given WaitEventSet.
    1883              :  */
    1884              : int
    1885          131 : GetNumRegisteredWaitEvents(WaitEventSet *set)
    1886              : {
    1887          131 :     return set->nevents;
    1888              : }
    1889              : 
    1890              : #if defined(WAIT_USE_SELF_PIPE)
    1891              : 
    1892              : /*
    1893              :  * SetLatch uses SIGURG to wake up the process waiting on the latch.
    1894              :  *
    1895              :  * Wake up WaitLatch, if we're waiting.
    1896              :  */
    1897              : static void
    1898              : latch_sigurg_handler(SIGNAL_ARGS)
    1899              : {
    1900              :     if (waiting)
    1901              :         sendSelfPipeByte();
    1902              : }
    1903              : 
    1904              : /* Send one byte to the self-pipe, to wake up WaitLatch */
    1905              : static void
    1906              : sendSelfPipeByte(void)
    1907              : {
    1908              :     int         rc;
    1909              :     char        dummy = 0;
    1910              : 
    1911              : retry:
    1912              :     rc = write(selfpipe_writefd, &dummy, 1);
    1913              :     if (rc < 0)
    1914              :     {
    1915              :         /* If interrupted by signal, just retry */
    1916              :         if (errno == EINTR)
    1917              :             goto retry;
    1918              : 
    1919              :         /*
    1920              :          * If the pipe is full, we don't need to retry, the data that's there
    1921              :          * already is enough to wake up WaitLatch.
    1922              :          */
    1923              :         if (errno == EAGAIN || errno == EWOULDBLOCK)
    1924              :             return;
    1925              : 
    1926              :         /*
    1927              :          * Oops, the write() failed for some other reason. We might be in a
    1928              :          * signal handler, so it's not safe to elog(). We have no choice but
    1929              :          * silently ignore the error.
    1930              :          */
    1931              :         return;
    1932              :     }
    1933              : }
    1934              : 
    1935              : #endif
    1936              : 
    1937              : #if defined(WAIT_USE_SELF_PIPE) || defined(WAIT_USE_SIGNALFD)
    1938              : 
    1939              : /*
    1940              :  * Read all available data from self-pipe or signalfd.
    1941              :  *
    1942              :  * Note: this is only called when waiting = true.  If it fails and doesn't
    1943              :  * return, it must reset that flag first (though ideally, this will never
    1944              :  * happen).
    1945              :  */
    1946              : static void
    1947      1208158 : drain(void)
    1948              : {
    1949              :     char        buf[1024];
    1950              :     int         rc;
    1951              :     int         fd;
    1952              : 
    1953              : #ifdef WAIT_USE_SELF_PIPE
    1954              :     fd = selfpipe_readfd;
    1955              : #else
    1956      1208158 :     fd = signal_fd;
    1957              : #endif
    1958              : 
    1959              :     for (;;)
    1960              :     {
    1961      1208158 :         rc = read(fd, buf, sizeof(buf));
    1962      1208158 :         if (rc < 0)
    1963              :         {
    1964            0 :             if (errno == EAGAIN || errno == EWOULDBLOCK)
    1965              :                 break;          /* the descriptor is empty */
    1966            0 :             else if (errno == EINTR)
    1967            0 :                 continue;       /* retry */
    1968              :             else
    1969              :             {
    1970            0 :                 waiting = false;
    1971              : #ifdef WAIT_USE_SELF_PIPE
    1972              :                 elog(ERROR, "read() on self-pipe failed: %m");
    1973              : #else
    1974            0 :                 elog(ERROR, "read() on signalfd failed: %m");
    1975              : #endif
    1976              :             }
    1977              :         }
    1978      1208158 :         else if (rc == 0)
    1979              :         {
    1980            0 :             waiting = false;
    1981              : #ifdef WAIT_USE_SELF_PIPE
    1982              :             elog(ERROR, "unexpected EOF on self-pipe");
    1983              : #else
    1984            0 :             elog(ERROR, "unexpected EOF on signalfd");
    1985              : #endif
    1986              :         }
    1987      1208158 :         else if (rc < sizeof(buf))
    1988              :         {
    1989              :             /* we successfully drained the pipe; no need to read() again */
    1990      1208158 :             break;
    1991              :         }
    1992              :         /* else buffer wasn't big enough, so read again */
    1993              :     }
    1994      1208158 : }
    1995              : 
    1996              : #endif
    1997              : 
    1998              : static void
    1999            1 : ResOwnerReleaseWaitEventSet(Datum res)
    2000              : {
    2001            1 :     WaitEventSet *set = (WaitEventSet *) DatumGetPointer(res);
    2002              : 
    2003              :     Assert(set->owner != NULL);
    2004            1 :     set->owner = NULL;
    2005            1 :     FreeWaitEventSet(set);
    2006            1 : }
    2007              : 
    2008              : #ifndef WIN32
    2009              : /*
    2010              :  * Wake up my process if it's currently sleeping in WaitEventSetWaitBlock()
    2011              :  *
    2012              :  * NB: be sure to save and restore errno around it.  (That's standard practice
    2013              :  * in most signal handlers, of course, but we used to omit it in handlers that
    2014              :  * only set a flag.) XXX
    2015              :  *
    2016              :  * NB: this function is called from critical sections and signal handlers so
    2017              :  * throwing an error is not a good idea.
    2018              :  *
    2019              :  * On Windows, Latch uses SetEvent directly and this is not used.
    2020              :  */
    2021              : void
    2022       169872 : WakeupMyProc(void)
    2023              : {
    2024              : #if defined(WAIT_USE_SELF_PIPE)
    2025              :     if (waiting)
    2026              :         sendSelfPipeByte();
    2027              : #else
    2028       169872 :     if (waiting)
    2029       169872 :         kill(MyProcPid, SIGURG);
    2030              : #endif
    2031       169872 : }
    2032              : 
    2033              : /* Similar to WakeupMyProc, but wake up another process */
    2034              : void
    2035      1112297 : WakeupOtherProc(int pid)
    2036              : {
    2037      1112297 :     kill(pid, SIGURG);
    2038      1112297 : }
    2039              : #endif
        

Generated by: LCOV version 2.0-1