LCOV - code coverage report
Current view: top level - src/backend/storage/ipc - latch.c (source / functions) Hit Total Coverage
Test: PostgreSQL 15devel Lines: 245 278 88.1 %
Date: 2021-12-04 22:09:09 Functions: 19 20 95.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * latch.c
       4             :  *    Routines for inter-process latches
       5             :  *
       6             :  * The poll() implementation uses the so-called self-pipe trick to overcome the
       7             :  * race condition involved with poll() and setting a global flag in the signal
       8             :  * handler. When a latch is set and the current process is waiting for it, the
       9             :  * signal handler wakes up the poll() in WaitLatch by writing a byte to a pipe.
      10             :  * A signal by itself doesn't interrupt poll() on all platforms, and even on
      11             :  * platforms where it does, a signal that arrives just before the poll() call
      12             :  * does not prevent poll() from entering sleep. An incoming byte on a pipe
      13             :  * however reliably interrupts the sleep, and causes poll() to return
      14             :  * immediately even if the signal arrives before poll() begins.
      15             :  *
      16             :  * The epoll() implementation overcomes the race with a different technique: it
      17             :  * keeps SIGURG blocked and consumes from a signalfd() descriptor instead.  We
      18             :  * don't need to register a signal handler or create our own self-pipe.  We
      19             :  * assume that any system that has Linux epoll() also has Linux signalfd().
      20             :  *
      21             :  * The kqueue() implementation waits for SIGURG with EVFILT_SIGNAL.
      22             :  *
      23             :  * The Windows implementation uses Windows events that are inherited by all
      24             :  * postmaster child processes. There's no need for the self-pipe trick there.
      25             :  *
      26             :  * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
      27             :  * Portions Copyright (c) 1994, Regents of the University of California
      28             :  *
      29             :  * IDENTIFICATION
      30             :  *    src/backend/storage/ipc/latch.c
      31             :  *
      32             :  *-------------------------------------------------------------------------
      33             :  */
      34             : #include "postgres.h"
      35             : 
      36             : #include <fcntl.h>
      37             : #include <limits.h>
      38             : #include <signal.h>
      39             : #include <unistd.h>
      40             : #ifdef HAVE_SYS_EPOLL_H
      41             : #include <sys/epoll.h>
      42             : #endif
      43             : #ifdef HAVE_SYS_EVENT_H
      44             : #include <sys/event.h>
      45             : #endif
      46             : #ifdef HAVE_POLL_H
      47             : #include <poll.h>
      48             : #endif
      49             : 
      50             : #include "libpq/pqsignal.h"
      51             : #include "miscadmin.h"
      52             : #include "pgstat.h"
      53             : #include "port/atomics.h"
      54             : #include "portability/instr_time.h"
      55             : #include "postmaster/postmaster.h"
      56             : #include "storage/fd.h"
      57             : #include "storage/ipc.h"
      58             : #include "storage/latch.h"
      59             : #include "storage/pmsignal.h"
      60             : #include "storage/shmem.h"
      61             : #include "utils/memutils.h"
      62             : 
      63             : /*
      64             :  * Select the fd readiness primitive to use. Normally the "most modern"
      65             :  * primitive supported by the OS will be used, but for testing it can be
      66             :  * useful to manually specify the used primitive.  If desired, just add a
      67             :  * define somewhere before this block.
      68             :  */
      69             : #if defined(WAIT_USE_EPOLL) || defined(WAIT_USE_POLL) || \
      70             :     defined(WAIT_USE_KQUEUE) || defined(WAIT_USE_WIN32)
      71             : /* don't overwrite manual choice */
      72             : #elif defined(HAVE_SYS_EPOLL_H)
      73             : #define WAIT_USE_EPOLL
      74             : #elif defined(HAVE_KQUEUE)
      75             : #define WAIT_USE_KQUEUE
      76             : #elif defined(HAVE_POLL)
      77             : #define WAIT_USE_POLL
      78             : #elif WIN32
      79             : #define WAIT_USE_WIN32
      80             : #else
      81             : #error "no wait set implementation available"
      82             : #endif
      83             : 
      84             : #ifdef WAIT_USE_EPOLL
      85             : #include <sys/signalfd.h>
      86             : #endif
      87             : 
      88             : /* typedef in latch.h */
      89             : struct WaitEventSet
      90             : {
      91             :     int         nevents;        /* number of registered events */
      92             :     int         nevents_space;  /* maximum number of events in this set */
      93             : 
      94             :     /*
      95             :      * Array, of nevents_space length, storing the definition of events this
      96             :      * set is waiting for.
      97             :      */
      98             :     WaitEvent  *events;
      99             : 
     100             :     /*
     101             :      * If WL_LATCH_SET is specified in any wait event, latch is a pointer to
     102             :      * said latch, and latch_pos the offset in the ->events array. This is
     103             :      * useful because we check the state of the latch before performing doing
     104             :      * syscalls related to waiting.
     105             :      */
     106             :     Latch      *latch;
     107             :     int         latch_pos;
     108             : 
     109             :     /*
     110             :      * WL_EXIT_ON_PM_DEATH is converted to WL_POSTMASTER_DEATH, but this flag
     111             :      * is set so that we'll exit immediately if postmaster death is detected,
     112             :      * instead of returning.
     113             :      */
     114             :     bool        exit_on_postmaster_death;
     115             : 
     116             : #if defined(WAIT_USE_EPOLL)
     117             :     int         epoll_fd;
     118             :     /* epoll_wait returns events in a user provided arrays, allocate once */
     119             :     struct epoll_event *epoll_ret_events;
     120             : #elif defined(WAIT_USE_KQUEUE)
     121             :     int         kqueue_fd;
     122             :     /* kevent returns events in a user provided arrays, allocate once */
     123             :     struct kevent *kqueue_ret_events;
     124             :     bool        report_postmaster_not_running;
     125             : #elif defined(WAIT_USE_POLL)
     126             :     /* poll expects events to be waited on every poll() call, prepare once */
     127             :     struct pollfd *pollfds;
     128             : #elif defined(WAIT_USE_WIN32)
     129             : 
     130             :     /*
     131             :      * Array of windows events. The first element always contains
     132             :      * pgwin32_signal_event, so the remaining elements are offset by one (i.e.
     133             :      * event->pos + 1).
     134             :      */
     135             :     HANDLE     *handles;
     136             : #endif
     137             : };
     138             : 
     139             : /* A common WaitEventSet used to implement WatchLatch() */
     140             : static WaitEventSet *LatchWaitSet;
     141             : 
     142             : /* The position of the latch in LatchWaitSet. */
     143             : #define LatchWaitSetLatchPos 0
     144             : 
     145             : #ifndef WIN32
     146             : /* Are we currently in WaitLatch? The signal handler would like to know. */
     147             : static volatile sig_atomic_t waiting = false;
     148             : #endif
     149             : 
     150             : #ifdef WAIT_USE_EPOLL
     151             : /* On Linux, we'll receive SIGURG via a signalfd file descriptor. */
     152             : static int  signal_fd = -1;
     153             : #endif
     154             : 
     155             : #if defined(WAIT_USE_POLL)
     156             : /* Read and write ends of the self-pipe */
     157             : static int  selfpipe_readfd = -1;
     158             : static int  selfpipe_writefd = -1;
     159             : 
     160             : /* Process owning the self-pipe --- needed for checking purposes */
     161             : static int  selfpipe_owner_pid = 0;
     162             : 
     163             : /* Private function prototypes */
     164             : static void latch_sigurg_handler(SIGNAL_ARGS);
     165             : static void sendSelfPipeByte(void);
     166             : #endif
     167             : 
     168             : #if defined(WAIT_USE_POLL) || defined(WAIT_USE_EPOLL)
     169             : static void drain(void);
     170             : #endif
     171             : 
     172             : #if defined(WAIT_USE_EPOLL)
     173             : static void WaitEventAdjustEpoll(WaitEventSet *set, WaitEvent *event, int action);
     174             : #elif defined(WAIT_USE_KQUEUE)
     175             : static void WaitEventAdjustKqueue(WaitEventSet *set, WaitEvent *event, int old_events);
     176             : #elif defined(WAIT_USE_POLL)
     177             : static void WaitEventAdjustPoll(WaitEventSet *set, WaitEvent *event);
     178             : #elif defined(WAIT_USE_WIN32)
     179             : static void WaitEventAdjustWin32(WaitEventSet *set, WaitEvent *event);
     180             : #endif
     181             : 
     182             : static inline int WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
     183             :                                         WaitEvent *occurred_events, int nevents);
     184             : 
     185             : /*
     186             :  * Initialize the process-local latch infrastructure.
     187             :  *
     188             :  * This must be called once during startup of any process that can wait on
     189             :  * latches, before it issues any InitLatch() or OwnLatch() calls.
     190             :  */
     191             : void
     192       20518 : InitializeLatchSupport(void)
     193             : {
     194             : #if defined(WAIT_USE_POLL)
     195             :     int         pipefd[2];
     196             : 
     197             :     if (IsUnderPostmaster)
     198             :     {
     199             :         /*
     200             :          * We might have inherited connections to a self-pipe created by the
     201             :          * postmaster.  It's critical that child processes create their own
     202             :          * self-pipes, of course, and we really want them to close the
     203             :          * inherited FDs for safety's sake.
     204             :          */
     205             :         if (selfpipe_owner_pid != 0)
     206             :         {
     207             :             /* Assert we go through here but once in a child process */
     208             :             Assert(selfpipe_owner_pid != MyProcPid);
     209             :             /* Release postmaster's pipe FDs; ignore any error */
     210             :             (void) close(selfpipe_readfd);
     211             :             (void) close(selfpipe_writefd);
     212             :             /* Clean up, just for safety's sake; we'll set these below */
     213             :             selfpipe_readfd = selfpipe_writefd = -1;
     214             :             selfpipe_owner_pid = 0;
     215             :             /* Keep fd.c's accounting straight */
     216             :             ReleaseExternalFD();
     217             :             ReleaseExternalFD();
     218             :         }
     219             :         else
     220             :         {
     221             :             /*
     222             :              * Postmaster didn't create a self-pipe ... or else we're in an
     223             :              * EXEC_BACKEND build, in which case it doesn't matter since the
     224             :              * postmaster's pipe FDs were closed by the action of FD_CLOEXEC.
     225             :              * fd.c won't have state to clean up, either.
     226             :              */
     227             :             Assert(selfpipe_readfd == -1);
     228             :         }
     229             :     }
     230             :     else
     231             :     {
     232             :         /* In postmaster or standalone backend, assert we do this but once */
     233             :         Assert(selfpipe_readfd == -1);
     234             :         Assert(selfpipe_owner_pid == 0);
     235             :     }
     236             : 
     237             :     /*
     238             :      * Set up the self-pipe that allows a signal handler to wake up the
     239             :      * poll()/epoll_wait() in WaitLatch. Make the write-end non-blocking, so
     240             :      * that SetLatch won't block if the event has already been set many times
     241             :      * filling the kernel buffer. Make the read-end non-blocking too, so that
     242             :      * we can easily clear the pipe by reading until EAGAIN or EWOULDBLOCK.
     243             :      * Also, make both FDs close-on-exec, since we surely do not want any
     244             :      * child processes messing with them.
     245             :      */
     246             :     if (pipe(pipefd) < 0)
     247             :         elog(FATAL, "pipe() failed: %m");
     248             :     if (fcntl(pipefd[0], F_SETFL, O_NONBLOCK) == -1)
     249             :         elog(FATAL, "fcntl(F_SETFL) failed on read-end of self-pipe: %m");
     250             :     if (fcntl(pipefd[1], F_SETFL, O_NONBLOCK) == -1)
     251             :         elog(FATAL, "fcntl(F_SETFL) failed on write-end of self-pipe: %m");
     252             :     if (fcntl(pipefd[0], F_SETFD, FD_CLOEXEC) == -1)
     253             :         elog(FATAL, "fcntl(F_SETFD) failed on read-end of self-pipe: %m");
     254             :     if (fcntl(pipefd[1], F_SETFD, FD_CLOEXEC) == -1)
     255             :         elog(FATAL, "fcntl(F_SETFD) failed on write-end of self-pipe: %m");
     256             : 
     257             :     selfpipe_readfd = pipefd[0];
     258             :     selfpipe_writefd = pipefd[1];
     259             :     selfpipe_owner_pid = MyProcPid;
     260             : 
     261             :     /* Tell fd.c about these two long-lived FDs */
     262             :     ReserveExternalFD();
     263             :     ReserveExternalFD();
     264             : 
     265             :     pqsignal(SIGURG, latch_sigurg_handler);
     266             : #endif
     267             : 
     268             : #ifdef WAIT_USE_EPOLL
     269             :     sigset_t    signalfd_mask;
     270             : 
     271             :     /* Block SIGURG, because we'll receive it through a signalfd. */
     272       20518 :     sigaddset(&UnBlockSig, SIGURG);
     273             : 
     274             :     /* Set up the signalfd to receive SIGURG notifications. */
     275       20518 :     sigemptyset(&signalfd_mask);
     276       20518 :     sigaddset(&signalfd_mask, SIGURG);
     277       20518 :     signal_fd = signalfd(-1, &signalfd_mask, SFD_NONBLOCK | SFD_CLOEXEC);
     278       20518 :     if (signal_fd < 0)
     279           0 :         elog(FATAL, "signalfd() failed");
     280       20518 :     ReserveExternalFD();
     281             : #endif
     282             : 
     283             : #ifdef WAIT_USE_KQUEUE
     284             :     /* Ignore SIGURG, because we'll receive it via kqueue. */
     285             :     pqsignal(SIGURG, SIG_IGN);
     286             : #endif
     287       20518 : }
     288             : 
     289             : void
     290       20518 : InitializeLatchWaitSet(void)
     291             : {
     292             :     int         latch_pos PG_USED_FOR_ASSERTS_ONLY;
     293             : 
     294             :     Assert(LatchWaitSet == NULL);
     295             : 
     296             :     /* Set up the WaitEventSet used by WaitLatch(). */
     297       20518 :     LatchWaitSet = CreateWaitEventSet(TopMemoryContext, 2);
     298       20518 :     latch_pos = AddWaitEventToSet(LatchWaitSet, WL_LATCH_SET, PGINVALID_SOCKET,
     299             :                                   MyLatch, NULL);
     300       20518 :     if (IsUnderPostmaster)
     301       18568 :         AddWaitEventToSet(LatchWaitSet, WL_EXIT_ON_PM_DEATH,
     302             :                           PGINVALID_SOCKET, NULL, NULL);
     303             : 
     304             :     Assert(latch_pos == LatchWaitSetLatchPos);
     305       20518 : }
     306             : 
     307             : void
     308           0 : ShutdownLatchSupport(void)
     309             : {
     310             : #if defined(WAIT_USE_POLL)
     311             :     pqsignal(SIGURG, SIG_IGN);
     312             : #endif
     313             : 
     314           0 :     if (LatchWaitSet)
     315             :     {
     316           0 :         FreeWaitEventSet(LatchWaitSet);
     317           0 :         LatchWaitSet = NULL;
     318             :     }
     319             : 
     320             : #if defined(WAIT_USE_POLL)
     321             :     close(selfpipe_readfd);
     322             :     close(selfpipe_writefd);
     323             :     selfpipe_readfd = -1;
     324             :     selfpipe_writefd = -1;
     325             :     selfpipe_owner_pid = InvalidPid;
     326             : #endif
     327             : 
     328             : #if defined(WAIT_USE_EPOLL)
     329           0 :     close(signal_fd);
     330           0 :     signal_fd = -1;
     331             : #endif
     332           0 : }
     333             : 
     334             : /*
     335             :  * Initialize a process-local latch.
     336             :  */
     337             : void
     338       20518 : InitLatch(Latch *latch)
     339             : {
     340       20518 :     latch->is_set = false;
     341       20518 :     latch->maybe_sleeping = false;
     342       20518 :     latch->owner_pid = MyProcPid;
     343       20518 :     latch->is_shared = false;
     344             : 
     345             : #if defined(WAIT_USE_POLL)
     346             :     /* Assert InitializeLatchSupport has been called in this process */
     347             :     Assert(selfpipe_readfd >= 0 && selfpipe_owner_pid == MyProcPid);
     348             : #elif defined(WAIT_USE_WIN32)
     349             :     latch->event = CreateEvent(NULL, TRUE, FALSE, NULL);
     350             :     if (latch->event == NULL)
     351             :         elog(ERROR, "CreateEvent failed: error code %lu", GetLastError());
     352             : #endif                          /* WIN32 */
     353       20518 : }
     354             : 
     355             : /*
     356             :  * Initialize a shared latch that can be set from other processes. The latch
     357             :  * is initially owned by no-one; use OwnLatch to associate it with the
     358             :  * current process.
     359             :  *
     360             :  * InitSharedLatch needs to be called in postmaster before forking child
     361             :  * processes, usually right after allocating the shared memory block
     362             :  * containing the latch with ShmemInitStruct. (The Unix implementation
     363             :  * doesn't actually require that, but the Windows one does.) Because of
     364             :  * this restriction, we have no concurrency issues to worry about here.
     365             :  *
     366             :  * Note that other handles created in this module are never marked as
     367             :  * inheritable.  Thus we do not need to worry about cleaning up child
     368             :  * process references to postmaster-private latches or WaitEventSets.
     369             :  */
     370             : void
     371      320426 : InitSharedLatch(Latch *latch)
     372             : {
     373             : #ifdef WIN32
     374             :     SECURITY_ATTRIBUTES sa;
     375             : 
     376             :     /*
     377             :      * Set up security attributes to specify that the events are inherited.
     378             :      */
     379             :     ZeroMemory(&sa, sizeof(sa));
     380             :     sa.nLength = sizeof(sa);
     381             :     sa.bInheritHandle = TRUE;
     382             : 
     383             :     latch->event = CreateEvent(&sa, TRUE, FALSE, NULL);
     384             :     if (latch->event == NULL)
     385             :         elog(ERROR, "CreateEvent failed: error code %lu", GetLastError());
     386             : #endif
     387             : 
     388      320426 :     latch->is_set = false;
     389      320426 :     latch->maybe_sleeping = false;
     390      320426 :     latch->owner_pid = 0;
     391      320426 :     latch->is_shared = true;
     392      320426 : }
     393             : 
     394             : /*
     395             :  * Associate a shared latch with the current process, allowing it to
     396             :  * wait on the latch.
     397             :  *
     398             :  * Although there is a sanity check for latch-already-owned, we don't do
     399             :  * any sort of locking here, meaning that we could fail to detect the error
     400             :  * if two processes try to own the same latch at about the same time.  If
     401             :  * there is any risk of that, caller must provide an interlock to prevent it.
     402             :  */
     403             : void
     404       18630 : OwnLatch(Latch *latch)
     405             : {
     406             :     /* Sanity checks */
     407             :     Assert(latch->is_shared);
     408             : 
     409             : #if defined(WAIT_USE_POLL)
     410             :     /* Assert InitializeLatchSupport has been called in this process */
     411             :     Assert(selfpipe_readfd >= 0 && selfpipe_owner_pid == MyProcPid);
     412             : #endif
     413             : 
     414       18630 :     if (latch->owner_pid != 0)
     415           0 :         elog(ERROR, "latch already owned");
     416             : 
     417       18630 :     latch->owner_pid = MyProcPid;
     418       18630 : }
     419             : 
     420             : /*
     421             :  * Disown a shared latch currently owned by the current process.
     422             :  */
     423             : void
     424       18578 : DisownLatch(Latch *latch)
     425             : {
     426             :     Assert(latch->is_shared);
     427             :     Assert(latch->owner_pid == MyProcPid);
     428             : 
     429       18578 :     latch->owner_pid = 0;
     430       18578 : }
     431             : 
     432             : /*
     433             :  * Wait for a given latch to be set, or for postmaster death, or until timeout
     434             :  * is exceeded. 'wakeEvents' is a bitmask that specifies which of those events
     435             :  * to wait for. If the latch is already set (and WL_LATCH_SET is given), the
     436             :  * function returns immediately.
     437             :  *
     438             :  * The "timeout" is given in milliseconds. It must be >= 0 if WL_TIMEOUT flag
     439             :  * is given.  Although it is declared as "long", we don't actually support
     440             :  * timeouts longer than INT_MAX milliseconds.  Note that some extra overhead
     441             :  * is incurred when WL_TIMEOUT is given, so avoid using a timeout if possible.
     442             :  *
     443             :  * The latch must be owned by the current process, ie. it must be a
     444             :  * process-local latch initialized with InitLatch, or a shared latch
     445             :  * associated with the current process by calling OwnLatch.
     446             :  *
     447             :  * Returns bit mask indicating which condition(s) caused the wake-up. Note
     448             :  * that if multiple wake-up conditions are true, there is no guarantee that
     449             :  * we return all of them in one call, but we will return at least one.
     450             :  */
     451             : int
     452      783594 : WaitLatch(Latch *latch, int wakeEvents, long timeout,
     453             :           uint32 wait_event_info)
     454             : {
     455             :     WaitEvent   event;
     456             : 
     457             :     /* Postmaster-managed callers must handle postmaster death somehow. */
     458             :     Assert(!IsUnderPostmaster ||
     459             :            (wakeEvents & WL_EXIT_ON_PM_DEATH) ||
     460             :            (wakeEvents & WL_POSTMASTER_DEATH));
     461             : 
     462             :     /*
     463             :      * Some callers may have a latch other than MyLatch, or no latch at all,
     464             :      * or want to handle postmaster death differently.  It's cheap to assign
     465             :      * those, so just do it every time.
     466             :      */
     467      783594 :     if (!(wakeEvents & WL_LATCH_SET))
     468           0 :         latch = NULL;
     469      783594 :     ModifyWaitEvent(LatchWaitSet, LatchWaitSetLatchPos, WL_LATCH_SET, latch);
     470      783594 :     LatchWaitSet->exit_on_postmaster_death =
     471      783594 :         ((wakeEvents & WL_EXIT_ON_PM_DEATH) != 0);
     472             : 
     473      783594 :     if (WaitEventSetWait(LatchWaitSet,
     474      783594 :                          (wakeEvents & WL_TIMEOUT) ? timeout : -1,
     475             :                          &event, 1,
     476             :                          wait_event_info) == 0)
     477       20462 :         return WL_TIMEOUT;
     478             :     else
     479      763102 :         return event.events;
     480             : }
     481             : 
     482             : /*
     483             :  * Like WaitLatch, but with an extra socket argument for WL_SOCKET_*
     484             :  * conditions.
     485             :  *
     486             :  * When waiting on a socket, EOF and error conditions always cause the socket
     487             :  * to be reported as readable/writable/connected, so that the caller can deal
     488             :  * with the condition.
     489             :  *
     490             :  * wakeEvents must include either WL_EXIT_ON_PM_DEATH for automatic exit
     491             :  * if the postmaster dies or WL_POSTMASTER_DEATH for a flag set in the
     492             :  * return value if the postmaster dies.  The latter is useful for rare cases
     493             :  * where some behavior other than immediate exit is needed.
     494             :  *
     495             :  * NB: These days this is just a wrapper around the WaitEventSet API. When
     496             :  * using a latch very frequently, consider creating a longer living
     497             :  * WaitEventSet instead; that's more efficient.
     498             :  */
     499             : int
     500       78310 : WaitLatchOrSocket(Latch *latch, int wakeEvents, pgsocket sock,
     501             :                   long timeout, uint32 wait_event_info)
     502             : {
     503       78310 :     int         ret = 0;
     504             :     int         rc;
     505             :     WaitEvent   event;
     506       78310 :     WaitEventSet *set = CreateWaitEventSet(CurrentMemoryContext, 3);
     507             : 
     508       78310 :     if (wakeEvents & WL_TIMEOUT)
     509             :         Assert(timeout >= 0);
     510             :     else
     511       18064 :         timeout = -1;
     512             : 
     513       78310 :     if (wakeEvents & WL_LATCH_SET)
     514       78150 :         AddWaitEventToSet(set, WL_LATCH_SET, PGINVALID_SOCKET,
     515             :                           latch, NULL);
     516             : 
     517             :     /* Postmaster-managed callers must handle postmaster death somehow. */
     518             :     Assert(!IsUnderPostmaster ||
     519             :            (wakeEvents & WL_EXIT_ON_PM_DEATH) ||
     520             :            (wakeEvents & WL_POSTMASTER_DEATH));
     521             : 
     522       78310 :     if ((wakeEvents & WL_POSTMASTER_DEATH) && IsUnderPostmaster)
     523           0 :         AddWaitEventToSet(set, WL_POSTMASTER_DEATH, PGINVALID_SOCKET,
     524             :                           NULL, NULL);
     525             : 
     526       78310 :     if ((wakeEvents & WL_EXIT_ON_PM_DEATH) && IsUnderPostmaster)
     527       78310 :         AddWaitEventToSet(set, WL_EXIT_ON_PM_DEATH, PGINVALID_SOCKET,
     528             :                           NULL, NULL);
     529             : 
     530       78310 :     if (wakeEvents & WL_SOCKET_MASK)
     531             :     {
     532             :         int         ev;
     533             : 
     534       78310 :         ev = wakeEvents & WL_SOCKET_MASK;
     535       78310 :         AddWaitEventToSet(set, ev, sock, NULL, NULL);
     536             :     }
     537             : 
     538       78310 :     rc = WaitEventSetWait(set, timeout, &event, 1, wait_event_info);
     539             : 
     540       78310 :     if (rc == 0)
     541        2182 :         ret |= WL_TIMEOUT;
     542             :     else
     543             :     {
     544       76128 :         ret |= event.events & (WL_LATCH_SET |
     545             :                                WL_POSTMASTER_DEATH |
     546             :                                WL_SOCKET_MASK);
     547             :     }
     548             : 
     549       78310 :     FreeWaitEventSet(set);
     550             : 
     551       78310 :     return ret;
     552             : }
     553             : 
     554             : /*
     555             :  * Sets a latch and wakes up anyone waiting on it.
     556             :  *
     557             :  * This is cheap if the latch is already set, otherwise not so much.
     558             :  *
     559             :  * NB: when calling this in a signal handler, be sure to save and restore
     560             :  * errno around it.  (That's standard practice in most signal handlers, of
     561             :  * course, but we used to omit it in handlers that only set a flag.)
     562             :  *
     563             :  * NB: this function is called from critical sections and signal handlers so
     564             :  * throwing an error is not a good idea.
     565             :  */
     566             : void
     567      948718 : SetLatch(Latch *latch)
     568             : {
     569             : #ifndef WIN32
     570             :     pid_t       owner_pid;
     571             : #else
     572             :     HANDLE      handle;
     573             : #endif
     574             : 
     575             :     /*
     576             :      * The memory barrier has to be placed here to ensure that any flag
     577             :      * variables possibly changed by this process have been flushed to main
     578             :      * memory, before we check/set is_set.
     579             :      */
     580      948718 :     pg_memory_barrier();
     581             : 
     582             :     /* Quick exit if already set */
     583      948718 :     if (latch->is_set)
     584      131982 :         return;
     585             : 
     586      816736 :     latch->is_set = true;
     587             : 
     588      816736 :     pg_memory_barrier();
     589      816736 :     if (!latch->maybe_sleeping)
     590      120490 :         return;
     591             : 
     592             : #ifndef WIN32
     593             : 
     594             :     /*
     595             :      * See if anyone's waiting for the latch. It can be the current process if
     596             :      * we're in a signal handler. We use the self-pipe or SIGURG to ourselves
     597             :      * to wake up WaitEventSetWaitBlock() without races in that case. If it's
     598             :      * another process, send a signal.
     599             :      *
     600             :      * Fetch owner_pid only once, in case the latch is concurrently getting
     601             :      * owned or disowned. XXX: This assumes that pid_t is atomic, which isn't
     602             :      * guaranteed to be true! In practice, the effective range of pid_t fits
     603             :      * in a 32 bit integer, and so should be atomic. In the worst case, we
     604             :      * might end up signaling the wrong process. Even then, you're very
     605             :      * unlucky if a process with that bogus pid exists and belongs to
     606             :      * Postgres; and PG database processes should handle excess SIGUSR1
     607             :      * interrupts without a problem anyhow.
     608             :      *
     609             :      * Another sort of race condition that's possible here is for a new
     610             :      * process to own the latch immediately after we look, so we don't signal
     611             :      * it. This is okay so long as all callers of ResetLatch/WaitLatch follow
     612             :      * the standard coding convention of waiting at the bottom of their loops,
     613             :      * not the top, so that they'll correctly process latch-setting events
     614             :      * that happen before they enter the loop.
     615             :      */
     616      696246 :     owner_pid = latch->owner_pid;
     617      696246 :     if (owner_pid == 0)
     618           0 :         return;
     619      696246 :     else if (owner_pid == MyProcPid)
     620             :     {
     621             : #if defined(WAIT_USE_POLL)
     622             :         if (waiting)
     623             :             sendSelfPipeByte();
     624             : #else
     625       13988 :         if (waiting)
     626       13988 :             kill(MyProcPid, SIGURG);
     627             : #endif
     628             :     }
     629             :     else
     630      682258 :         kill(owner_pid, SIGURG);
     631             : 
     632             : #else
     633             : 
     634             :     /*
     635             :      * See if anyone's waiting for the latch. It can be the current process if
     636             :      * we're in a signal handler.
     637             :      *
     638             :      * Use a local variable here just in case somebody changes the event field
     639             :      * concurrently (which really should not happen).
     640             :      */
     641             :     handle = latch->event;
     642             :     if (handle)
     643             :     {
     644             :         SetEvent(handle);
     645             : 
     646             :         /*
     647             :          * Note that we silently ignore any errors. We might be in a signal
     648             :          * handler or other critical path where it's not safe to call elog().
     649             :          */
     650             :     }
     651             : #endif
     652             : 
     653             : }
     654             : 
     655             : /*
     656             :  * Clear the latch. Calling WaitLatch after this will sleep, unless
     657             :  * the latch is set again before the WaitLatch call.
     658             :  */
     659             : void
     660     1816298 : ResetLatch(Latch *latch)
     661             : {
     662             :     /* Only the owner should reset the latch */
     663             :     Assert(latch->owner_pid == MyProcPid);
     664             :     Assert(latch->maybe_sleeping == false);
     665             : 
     666     1816298 :     latch->is_set = false;
     667             : 
     668             :     /*
     669             :      * Ensure that the write to is_set gets flushed to main memory before we
     670             :      * examine any flag variables.  Otherwise a concurrent SetLatch might
     671             :      * falsely conclude that it needn't signal us, even though we have missed
     672             :      * seeing some flag updates that SetLatch was supposed to inform us of.
     673             :      */
     674     1816298 :     pg_memory_barrier();
     675     1816298 : }
     676             : 
     677             : /*
     678             :  * Create a WaitEventSet with space for nevents different events to wait for.
     679             :  *
     680             :  * These events can then be efficiently waited upon together, using
     681             :  * WaitEventSetWait().
     682             :  */
     683             : WaitEventSet *
     684      111790 : CreateWaitEventSet(MemoryContext context, int nevents)
     685             : {
     686             :     WaitEventSet *set;
     687             :     char       *data;
     688      111790 :     Size        sz = 0;
     689             : 
     690             :     /*
     691             :      * Use MAXALIGN size/alignment to guarantee that later uses of memory are
     692             :      * aligned correctly. E.g. epoll_event might need 8 byte alignment on some
     693             :      * platforms, but earlier allocations like WaitEventSet and WaitEvent
     694             :      * might not sized to guarantee that when purely using sizeof().
     695             :      */
     696      111790 :     sz += MAXALIGN(sizeof(WaitEventSet));
     697      111790 :     sz += MAXALIGN(sizeof(WaitEvent) * nevents);
     698             : 
     699             : #if defined(WAIT_USE_EPOLL)
     700      111790 :     sz += MAXALIGN(sizeof(struct epoll_event) * nevents);
     701             : #elif defined(WAIT_USE_KQUEUE)
     702             :     sz += MAXALIGN(sizeof(struct kevent) * nevents);
     703             : #elif defined(WAIT_USE_POLL)
     704             :     sz += MAXALIGN(sizeof(struct pollfd) * nevents);
     705             : #elif defined(WAIT_USE_WIN32)
     706             :     /* need space for the pgwin32_signal_event */
     707             :     sz += MAXALIGN(sizeof(HANDLE) * (nevents + 1));
     708             : #endif
     709             : 
     710      111790 :     data = (char *) MemoryContextAllocZero(context, sz);
     711             : 
     712      111790 :     set = (WaitEventSet *) data;
     713      111790 :     data += MAXALIGN(sizeof(WaitEventSet));
     714             : 
     715      111790 :     set->events = (WaitEvent *) data;
     716      111790 :     data += MAXALIGN(sizeof(WaitEvent) * nevents);
     717             : 
     718             : #if defined(WAIT_USE_EPOLL)
     719      111790 :     set->epoll_ret_events = (struct epoll_event *) data;
     720      111790 :     data += MAXALIGN(sizeof(struct epoll_event) * nevents);
     721             : #elif defined(WAIT_USE_KQUEUE)
     722             :     set->kqueue_ret_events = (struct kevent *) data;
     723             :     data += MAXALIGN(sizeof(struct kevent) * nevents);
     724             : #elif defined(WAIT_USE_POLL)
     725             :     set->pollfds = (struct pollfd *) data;
     726             :     data += MAXALIGN(sizeof(struct pollfd) * nevents);
     727             : #elif defined(WAIT_USE_WIN32)
     728             :     set->handles = (HANDLE) data;
     729             :     data += MAXALIGN(sizeof(HANDLE) * nevents);
     730             : #endif
     731             : 
     732      111790 :     set->latch = NULL;
     733      111790 :     set->nevents_space = nevents;
     734      111790 :     set->exit_on_postmaster_death = false;
     735             : 
     736             : #if defined(WAIT_USE_EPOLL)
     737      111790 :     if (!AcquireExternalFD())
     738             :     {
     739             :         /* treat this as though epoll_create1 itself returned EMFILE */
     740           0 :         elog(ERROR, "epoll_create1 failed: %m");
     741             :     }
     742      111790 :     set->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
     743      111790 :     if (set->epoll_fd < 0)
     744             :     {
     745           0 :         ReleaseExternalFD();
     746           0 :         elog(ERROR, "epoll_create1 failed: %m");
     747             :     }
     748             : #elif defined(WAIT_USE_KQUEUE)
     749             :     if (!AcquireExternalFD())
     750             :     {
     751             :         /* treat this as though kqueue itself returned EMFILE */
     752             :         elog(ERROR, "kqueue failed: %m");
     753             :     }
     754             :     set->kqueue_fd = kqueue();
     755             :     if (set->kqueue_fd < 0)
     756             :     {
     757             :         ReleaseExternalFD();
     758             :         elog(ERROR, "kqueue failed: %m");
     759             :     }
     760             :     if (fcntl(set->kqueue_fd, F_SETFD, FD_CLOEXEC) == -1)
     761             :     {
     762             :         int         save_errno = errno;
     763             : 
     764             :         close(set->kqueue_fd);
     765             :         ReleaseExternalFD();
     766             :         errno = save_errno;
     767             :         elog(ERROR, "fcntl(F_SETFD) failed on kqueue descriptor: %m");
     768             :     }
     769             :     set->report_postmaster_not_running = false;
     770             : #elif defined(WAIT_USE_WIN32)
     771             : 
     772             :     /*
     773             :      * To handle signals while waiting, we need to add a win32 specific event.
     774             :      * We accounted for the additional event at the top of this routine. See
     775             :      * port/win32/signal.c for more details.
     776             :      *
     777             :      * Note: pgwin32_signal_event should be first to ensure that it will be
     778             :      * reported when multiple events are set.  We want to guarantee that
     779             :      * pending signals are serviced.
     780             :      */
     781             :     set->handles[0] = pgwin32_signal_event;
     782             :     StaticAssertStmt(WSA_INVALID_EVENT == NULL, "");
     783             : #endif
     784             : 
     785      111790 :     return set;
     786             : }
     787             : 
     788             : /*
     789             :  * Free a previously created WaitEventSet.
     790             :  *
     791             :  * Note: preferably, this shouldn't have to free any resources that could be
     792             :  * inherited across an exec().  If it did, we'd likely leak those resources in
     793             :  * many scenarios.  For the epoll case, we ensure that by setting EPOLL_CLOEXEC
     794             :  * when the FD is created.  For the Windows case, we assume that the handles
     795             :  * involved are non-inheritable.
     796             :  */
     797             : void
     798       79468 : FreeWaitEventSet(WaitEventSet *set)
     799             : {
     800             : #if defined(WAIT_USE_EPOLL)
     801       79468 :     close(set->epoll_fd);
     802       79468 :     ReleaseExternalFD();
     803             : #elif defined(WAIT_USE_KQUEUE)
     804             :     close(set->kqueue_fd);
     805             :     ReleaseExternalFD();
     806             : #elif defined(WAIT_USE_WIN32)
     807             :     WaitEvent  *cur_event;
     808             : 
     809             :     for (cur_event = set->events;
     810             :          cur_event < (set->events + set->nevents);
     811             :          cur_event++)
     812             :     {
     813             :         if (cur_event->events & WL_LATCH_SET)
     814             :         {
     815             :             /* uses the latch's HANDLE */
     816             :         }
     817             :         else if (cur_event->events & WL_POSTMASTER_DEATH)
     818             :         {
     819             :             /* uses PostmasterHandle */
     820             :         }
     821             :         else
     822             :         {
     823             :             /* Clean up the event object we created for the socket */
     824             :             WSAEventSelect(cur_event->fd, NULL, 0);
     825             :             WSACloseEvent(set->handles[cur_event->pos + 1]);
     826             :         }
     827             :     }
     828             : #endif
     829             : 
     830       79468 :     pfree(set);
     831       79468 : }
     832             : 
     833             : /* ---
     834             :  * Add an event to the set. Possible events are:
     835             :  * - WL_LATCH_SET: Wait for the latch to be set
     836             :  * - WL_POSTMASTER_DEATH: Wait for postmaster to die
     837             :  * - WL_SOCKET_READABLE: Wait for socket to become readable,
     838             :  *   can be combined in one event with other WL_SOCKET_* events
     839             :  * - WL_SOCKET_WRITEABLE: Wait for socket to become writeable,
     840             :  *   can be combined with other WL_SOCKET_* events
     841             :  * - WL_SOCKET_CONNECTED: Wait for socket connection to be established,
     842             :  *   can be combined with other WL_SOCKET_* events (on non-Windows
     843             :  *   platforms, this is the same as WL_SOCKET_WRITEABLE)
     844             :  * - WL_EXIT_ON_PM_DEATH: Exit immediately if the postmaster dies
     845             :  *
     846             :  * Returns the offset in WaitEventSet->events (starting from 0), which can be
     847             :  * used to modify previously added wait events using ModifyWaitEvent().
     848             :  *
     849             :  * In the WL_LATCH_SET case the latch must be owned by the current process,
     850             :  * i.e. it must be a process-local latch initialized with InitLatch, or a
     851             :  * shared latch associated with the current process by calling OwnLatch.
     852             :  *
     853             :  * In the WL_SOCKET_READABLE/WRITEABLE/CONNECTED cases, EOF and error
     854             :  * conditions cause the socket to be reported as readable/writable/connected,
     855             :  * so that the caller can deal with the condition.
     856             :  *
     857             :  * The user_data pointer specified here will be set for the events returned
     858             :  * by WaitEventSetWait(), allowing to easily associate additional data with
     859             :  * events.
     860             :  */
     861             : int
     862      312666 : AddWaitEventToSet(WaitEventSet *set, uint32 events, pgsocket fd, Latch *latch,
     863             :                   void *user_data)
     864             : {
     865             :     WaitEvent  *event;
     866             : 
     867             :     /* not enough space */
     868             :     Assert(set->nevents < set->nevents_space);
     869             : 
     870      312666 :     if (events == WL_EXIT_ON_PM_DEATH)
     871             :     {
     872       97092 :         events = WL_POSTMASTER_DEATH;
     873       97092 :         set->exit_on_postmaster_death = true;
     874             :     }
     875             : 
     876      312666 :     if (latch)
     877             :     {
     878      111416 :         if (latch->owner_pid != MyProcPid)
     879           0 :             elog(ERROR, "cannot wait on a latch owned by another process");
     880      111416 :         if (set->latch)
     881           0 :             elog(ERROR, "cannot wait on more than one latch");
     882      111416 :         if ((events & WL_LATCH_SET) != WL_LATCH_SET)
     883           0 :             elog(ERROR, "latch events only support being set");
     884             :     }
     885             :     else
     886             :     {
     887      201250 :         if (events & WL_LATCH_SET)
     888           0 :             elog(ERROR, "cannot wait on latch without a specified latch");
     889             :     }
     890             : 
     891             :     /* waiting for socket readiness without a socket indicates a bug */
     892      312666 :     if (fd == PGINVALID_SOCKET && (events & WL_SOCKET_MASK))
     893           0 :         elog(ERROR, "cannot wait on socket event without a socket");
     894             : 
     895      312666 :     event = &set->events[set->nevents];
     896      312666 :     event->pos = set->nevents++;
     897      312666 :     event->fd = fd;
     898      312666 :     event->events = events;
     899      312666 :     event->user_data = user_data;
     900             : #ifdef WIN32
     901             :     event->reset = false;
     902             : #endif
     903             : 
     904      312666 :     if (events == WL_LATCH_SET)
     905             :     {
     906      111416 :         set->latch = latch;
     907      111416 :         set->latch_pos = event->pos;
     908             : #if defined(WAIT_USE_POLL)
     909             :         event->fd = selfpipe_readfd;
     910             : #elif defined(WAIT_USE_EPOLL)
     911      111416 :         event->fd = signal_fd;
     912             : #else
     913             :         event->fd = PGINVALID_SOCKET;
     914             : #ifdef WAIT_USE_EPOLL
     915             :         return event->pos;
     916             : #endif
     917             : #endif
     918             :     }
     919      201250 :     else if (events == WL_POSTMASTER_DEATH)
     920             :     {
     921             : #ifndef WIN32
     922      109838 :         event->fd = postmaster_alive_fds[POSTMASTER_FD_WATCH];
     923             : #endif
     924             :     }
     925             : 
     926             :     /* perform wait primitive specific initialization, if needed */
     927             : #if defined(WAIT_USE_EPOLL)
     928      312666 :     WaitEventAdjustEpoll(set, event, EPOLL_CTL_ADD);
     929             : #elif defined(WAIT_USE_KQUEUE)
     930             :     WaitEventAdjustKqueue(set, event, 0);
     931             : #elif defined(WAIT_USE_POLL)
     932             :     WaitEventAdjustPoll(set, event);
     933             : #elif defined(WAIT_USE_WIN32)
     934             :     WaitEventAdjustWin32(set, event);
     935             : #endif
     936             : 
     937      312666 :     return event->pos;
     938             : }
     939             : 
     940             : /*
     941             :  * Change the event mask and, in the WL_LATCH_SET case, the latch associated
     942             :  * with the WaitEvent.  The latch may be changed to NULL to disable the latch
     943             :  * temporarily, and then set back to a latch later.
     944             :  *
     945             :  * 'pos' is the id returned by AddWaitEventToSet.
     946             :  */
     947             : void
     948      985326 : ModifyWaitEvent(WaitEventSet *set, int pos, uint32 events, Latch *latch)
     949             : {
     950             :     WaitEvent  *event;
     951             : #if defined(WAIT_USE_KQUEUE)
     952             :     int         old_events;
     953             : #endif
     954             : 
     955             :     Assert(pos < set->nevents);
     956             : 
     957      985326 :     event = &set->events[pos];
     958             : #if defined(WAIT_USE_KQUEUE)
     959             :     old_events = event->events;
     960             : #endif
     961             : 
     962             :     /*
     963             :      * If neither the event mask nor the associated latch changes, return
     964             :      * early. That's an important optimization for some sockets, where
     965             :      * ModifyWaitEvent is frequently used to switch from waiting for reads to
     966             :      * waiting on writes.
     967             :      */
     968      985326 :     if (events == event->events &&
     969      975500 :         (!(event->events & WL_LATCH_SET) || set->latch == latch))
     970      947716 :         return;
     971             : 
     972       37610 :     if (event->events & WL_LATCH_SET &&
     973       27784 :         events != event->events)
     974             :     {
     975           0 :         elog(ERROR, "cannot modify latch event");
     976             :     }
     977             : 
     978       37610 :     if (event->events & WL_POSTMASTER_DEATH)
     979             :     {
     980           0 :         elog(ERROR, "cannot modify postmaster death event");
     981             :     }
     982             : 
     983             :     /* FIXME: validate event mask */
     984       37610 :     event->events = events;
     985             : 
     986       37610 :     if (events == WL_LATCH_SET)
     987             :     {
     988       27784 :         if (latch && latch->owner_pid != MyProcPid)
     989           0 :             elog(ERROR, "cannot wait on a latch owned by another process");
     990       27784 :         set->latch = latch;
     991             : 
     992             :         /*
     993             :          * On Unix, we don't need to modify the kernel object because the
     994             :          * underlying pipe (if there is one) is the same for all latches so we
     995             :          * can return immediately.  On Windows, we need to update our array of
     996             :          * handles, but we leave the old one in place and tolerate spurious
     997             :          * wakeups if the latch is disabled.
     998             :          */
     999             : #if defined(WAIT_USE_WIN32)
    1000             :         if (!latch)
    1001             :             return;
    1002             : #else
    1003       27784 :         return;
    1004             : #endif
    1005             :     }
    1006             : 
    1007             : #if defined(WAIT_USE_EPOLL)
    1008        9826 :     WaitEventAdjustEpoll(set, event, EPOLL_CTL_MOD);
    1009             : #elif defined(WAIT_USE_KQUEUE)
    1010             :     WaitEventAdjustKqueue(set, event, old_events);
    1011             : #elif defined(WAIT_USE_POLL)
    1012             :     WaitEventAdjustPoll(set, event);
    1013             : #elif defined(WAIT_USE_WIN32)
    1014             :     WaitEventAdjustWin32(set, event);
    1015             : #endif
    1016             : }
    1017             : 
    1018             : #if defined(WAIT_USE_EPOLL)
    1019             : /*
    1020             :  * action can be one of EPOLL_CTL_ADD | EPOLL_CTL_MOD | EPOLL_CTL_DEL
    1021             :  */
    1022             : static void
    1023      322492 : WaitEventAdjustEpoll(WaitEventSet *set, WaitEvent *event, int action)
    1024             : {
    1025             :     struct epoll_event epoll_ev;
    1026             :     int         rc;
    1027             : 
    1028             :     /* pointer to our event, returned by epoll_wait */
    1029      322492 :     epoll_ev.data.ptr = event;
    1030             :     /* always wait for errors */
    1031      322492 :     epoll_ev.events = EPOLLERR | EPOLLHUP;
    1032             : 
    1033             :     /* prepare pollfd entry once */
    1034      322492 :     if (event->events == WL_LATCH_SET)
    1035             :     {
    1036             :         Assert(set->latch != NULL);
    1037      111416 :         epoll_ev.events |= EPOLLIN;
    1038             :     }
    1039      211076 :     else if (event->events == WL_POSTMASTER_DEATH)
    1040             :     {
    1041      109838 :         epoll_ev.events |= EPOLLIN;
    1042             :     }
    1043             :     else
    1044             :     {
    1045             :         Assert(event->fd != PGINVALID_SOCKET);
    1046             :         Assert(event->events & (WL_SOCKET_READABLE | WL_SOCKET_WRITEABLE));
    1047             : 
    1048      101238 :         if (event->events & WL_SOCKET_READABLE)
    1049       88186 :             epoll_ev.events |= EPOLLIN;
    1050      101238 :         if (event->events & WL_SOCKET_WRITEABLE)
    1051       13262 :             epoll_ev.events |= EPOLLOUT;
    1052             :     }
    1053             : 
    1054             :     /*
    1055             :      * Even though unused, we also pass epoll_ev as the data argument if
    1056             :      * EPOLL_CTL_DEL is passed as action.  There used to be an epoll bug
    1057             :      * requiring that, and actually it makes the code simpler...
    1058             :      */
    1059      322492 :     rc = epoll_ctl(set->epoll_fd, action, event->fd, &epoll_ev);
    1060             : 
    1061      322492 :     if (rc < 0)
    1062           0 :         ereport(ERROR,
    1063             :                 (errcode_for_socket_access(),
    1064             :                  errmsg("%s() failed: %m",
    1065             :                         "epoll_ctl")));
    1066      322492 : }
    1067             : #endif
    1068             : 
    1069             : #if defined(WAIT_USE_POLL)
    1070             : static void
    1071             : WaitEventAdjustPoll(WaitEventSet *set, WaitEvent *event)
    1072             : {
    1073             :     struct pollfd *pollfd = &set->pollfds[event->pos];
    1074             : 
    1075             :     pollfd->revents = 0;
    1076             :     pollfd->fd = event->fd;
    1077             : 
    1078             :     /* prepare pollfd entry once */
    1079             :     if (event->events == WL_LATCH_SET)
    1080             :     {
    1081             :         Assert(set->latch != NULL);
    1082             :         pollfd->events = POLLIN;
    1083             :     }
    1084             :     else if (event->events == WL_POSTMASTER_DEATH)
    1085             :     {
    1086             :         pollfd->events = POLLIN;
    1087             :     }
    1088             :     else
    1089             :     {
    1090             :         Assert(event->events & (WL_SOCKET_READABLE | WL_SOCKET_WRITEABLE));
    1091             :         pollfd->events = 0;
    1092             :         if (event->events & WL_SOCKET_READABLE)
    1093             :             pollfd->events |= POLLIN;
    1094             :         if (event->events & WL_SOCKET_WRITEABLE)
    1095             :             pollfd->events |= POLLOUT;
    1096             :     }
    1097             : 
    1098             :     Assert(event->fd != PGINVALID_SOCKET);
    1099             : }
    1100             : #endif
    1101             : 
    1102             : #if defined(WAIT_USE_KQUEUE)
    1103             : 
    1104             : /*
    1105             :  * On most BSD family systems, the udata member of struct kevent is of type
    1106             :  * void *, so we could directly convert to/from WaitEvent *.  Unfortunately,
    1107             :  * NetBSD has it as intptr_t, so here we wallpaper over that difference with
    1108             :  * an lvalue cast.
    1109             :  */
    1110             : #define AccessWaitEvent(k_ev) (*((WaitEvent **)(&(k_ev)->udata)))
    1111             : 
    1112             : static inline void
    1113             : WaitEventAdjustKqueueAdd(struct kevent *k_ev, int filter, int action,
    1114             :                          WaitEvent *event)
    1115             : {
    1116             :     k_ev->ident = event->fd;
    1117             :     k_ev->filter = filter;
    1118             :     k_ev->flags = action;
    1119             :     k_ev->fflags = 0;
    1120             :     k_ev->data = 0;
    1121             :     AccessWaitEvent(k_ev) = event;
    1122             : }
    1123             : 
    1124             : static inline void
    1125             : WaitEventAdjustKqueueAddPostmaster(struct kevent *k_ev, WaitEvent *event)
    1126             : {
    1127             :     /* For now postmaster death can only be added, not removed. */
    1128             :     k_ev->ident = PostmasterPid;
    1129             :     k_ev->filter = EVFILT_PROC;
    1130             :     k_ev->flags = EV_ADD;
    1131             :     k_ev->fflags = NOTE_EXIT;
    1132             :     k_ev->data = 0;
    1133             :     AccessWaitEvent(k_ev) = event;
    1134             : }
    1135             : 
    1136             : static inline void
    1137             : WaitEventAdjustKqueueAddLatch(struct kevent *k_ev, WaitEvent *event)
    1138             : {
    1139             :     /* For now latch can only be added, not removed. */
    1140             :     k_ev->ident = SIGURG;
    1141             :     k_ev->filter = EVFILT_SIGNAL;
    1142             :     k_ev->flags = EV_ADD;
    1143             :     k_ev->fflags = 0;
    1144             :     k_ev->data = 0;
    1145             :     AccessWaitEvent(k_ev) = event;
    1146             : }
    1147             : 
    1148             : /*
    1149             :  * old_events is the previous event mask, used to compute what has changed.
    1150             :  */
    1151             : static void
    1152             : WaitEventAdjustKqueue(WaitEventSet *set, WaitEvent *event, int old_events)
    1153             : {
    1154             :     int         rc;
    1155             :     struct kevent k_ev[2];
    1156             :     int         count = 0;
    1157             :     bool        new_filt_read = false;
    1158             :     bool        old_filt_read = false;
    1159             :     bool        new_filt_write = false;
    1160             :     bool        old_filt_write = false;
    1161             : 
    1162             :     if (old_events == event->events)
    1163             :         return;
    1164             : 
    1165             :     Assert(event->events != WL_LATCH_SET || set->latch != NULL);
    1166             :     Assert(event->events == WL_LATCH_SET ||
    1167             :            event->events == WL_POSTMASTER_DEATH ||
    1168             :            (event->events & (WL_SOCKET_READABLE | WL_SOCKET_WRITEABLE)));
    1169             : 
    1170             :     if (event->events == WL_POSTMASTER_DEATH)
    1171             :     {
    1172             :         /*
    1173             :          * Unlike all the other implementations, we detect postmaster death
    1174             :          * using process notification instead of waiting on the postmaster
    1175             :          * alive pipe.
    1176             :          */
    1177             :         WaitEventAdjustKqueueAddPostmaster(&k_ev[count++], event);
    1178             :     }
    1179             :     else if (event->events == WL_LATCH_SET)
    1180             :     {
    1181             :         /* We detect latch wakeup using a signal event. */
    1182             :         WaitEventAdjustKqueueAddLatch(&k_ev[count++], event);
    1183             :     }
    1184             :     else
    1185             :     {
    1186             :         /*
    1187             :          * We need to compute the adds and deletes required to get from the
    1188             :          * old event mask to the new event mask, since kevent treats readable
    1189             :          * and writable as separate events.
    1190             :          */
    1191             :         if (old_events & WL_SOCKET_READABLE)
    1192             :             old_filt_read = true;
    1193             :         if (event->events & WL_SOCKET_READABLE)
    1194             :             new_filt_read = true;
    1195             :         if (old_events & WL_SOCKET_WRITEABLE)
    1196             :             old_filt_write = true;
    1197             :         if (event->events & WL_SOCKET_WRITEABLE)
    1198             :             new_filt_write = true;
    1199             :         if (old_filt_read && !new_filt_read)
    1200             :             WaitEventAdjustKqueueAdd(&k_ev[count++], EVFILT_READ, EV_DELETE,
    1201             :                                      event);
    1202             :         else if (!old_filt_read && new_filt_read)
    1203             :             WaitEventAdjustKqueueAdd(&k_ev[count++], EVFILT_READ, EV_ADD,
    1204             :                                      event);
    1205             :         if (old_filt_write && !new_filt_write)
    1206             :             WaitEventAdjustKqueueAdd(&k_ev[count++], EVFILT_WRITE, EV_DELETE,
    1207             :                                      event);
    1208             :         else if (!old_filt_write && new_filt_write)
    1209             :             WaitEventAdjustKqueueAdd(&k_ev[count++], EVFILT_WRITE, EV_ADD,
    1210             :                                      event);
    1211             :     }
    1212             : 
    1213             :     Assert(count > 0);
    1214             :     Assert(count <= 2);
    1215             : 
    1216             :     rc = kevent(set->kqueue_fd, &k_ev[0], count, NULL, 0, NULL);
    1217             : 
    1218             :     /*
    1219             :      * When adding the postmaster's pid, we have to consider that it might
    1220             :      * already have exited and perhaps even been replaced by another process
    1221             :      * with the same pid.  If so, we have to defer reporting this as an event
    1222             :      * until the next call to WaitEventSetWaitBlock().
    1223             :      */
    1224             : 
    1225             :     if (rc < 0)
    1226             :     {
    1227             :         if (event->events == WL_POSTMASTER_DEATH &&
    1228             :             (errno == ESRCH || errno == EACCES))
    1229             :             set->report_postmaster_not_running = true;
    1230             :         else
    1231             :             ereport(ERROR,
    1232             :                     (errcode_for_socket_access(),
    1233             :                      errmsg("%s() failed: %m",
    1234             :                             "kevent")));
    1235             :     }
    1236             :     else if (event->events == WL_POSTMASTER_DEATH &&
    1237             :              PostmasterPid != getppid() &&
    1238             :              !PostmasterIsAlive())
    1239             :     {
    1240             :         /*
    1241             :          * The extra PostmasterIsAliveInternal() check prevents false alarms
    1242             :          * on systems that give a different value for getppid() while being
    1243             :          * traced by a debugger.
    1244             :          */
    1245             :         set->report_postmaster_not_running = true;
    1246             :     }
    1247             : }
    1248             : 
    1249             : #endif
    1250             : 
    1251             : #if defined(WAIT_USE_WIN32)
    1252             : static void
    1253             : WaitEventAdjustWin32(WaitEventSet *set, WaitEvent *event)
    1254             : {
    1255             :     HANDLE     *handle = &set->handles[event->pos + 1];
    1256             : 
    1257             :     if (event->events == WL_LATCH_SET)
    1258             :     {
    1259             :         Assert(set->latch != NULL);
    1260             :         *handle = set->latch->event;
    1261             :     }
    1262             :     else if (event->events == WL_POSTMASTER_DEATH)
    1263             :     {
    1264             :         *handle = PostmasterHandle;
    1265             :     }
    1266             :     else
    1267             :     {
    1268             :         int         flags = FD_CLOSE;   /* always check for errors/EOF */
    1269             : 
    1270             :         if (event->events & WL_SOCKET_READABLE)
    1271             :             flags |= FD_READ;
    1272             :         if (event->events & WL_SOCKET_WRITEABLE)
    1273             :             flags |= FD_WRITE;
    1274             :         if (event->events & WL_SOCKET_CONNECTED)
    1275             :             flags |= FD_CONNECT;
    1276             : 
    1277             :         if (*handle == WSA_INVALID_EVENT)
    1278             :         {
    1279             :             *handle = WSACreateEvent();
    1280             :             if (*handle == WSA_INVALID_EVENT)
    1281             :                 elog(ERROR, "failed to create event for socket: error code %d",
    1282             :                      WSAGetLastError());
    1283             :         }
    1284             :         if (WSAEventSelect(event->fd, *handle, flags) != 0)
    1285             :             elog(ERROR, "failed to set up event for socket: error code %d",
    1286             :                  WSAGetLastError());
    1287             : 
    1288             :         Assert(event->fd != PGINVALID_SOCKET);
    1289             :     }
    1290             : }
    1291             : #endif
    1292             : 
    1293             : /*
    1294             :  * Wait for events added to the set to happen, or until the timeout is
    1295             :  * reached.  At most nevents occurred events are returned.
    1296             :  *
    1297             :  * If timeout = -1, block until an event occurs; if 0, check sockets for
    1298             :  * readiness, but don't block; if > 0, block for at most timeout milliseconds.
    1299             :  *
    1300             :  * Returns the number of events occurred, or 0 if the timeout was reached.
    1301             :  *
    1302             :  * Returned events will have the fd, pos, user_data fields set to the
    1303             :  * values associated with the registered event.
    1304             :  */
    1305             : int
    1306     1156870 : WaitEventSetWait(WaitEventSet *set, long timeout,
    1307             :                  WaitEvent *occurred_events, int nevents,
    1308             :                  uint32 wait_event_info)
    1309             : {
    1310     1156870 :     int         returned_events = 0;
    1311             :     instr_time  start_time;
    1312             :     instr_time  cur_time;
    1313     1156870 :     long        cur_timeout = -1;
    1314             : 
    1315             :     Assert(nevents > 0);
    1316             : 
    1317             :     /*
    1318             :      * Initialize timeout if requested.  We must record the current time so
    1319             :      * that we can determine the remaining timeout if interrupted.
    1320             :      */
    1321     1156870 :     if (timeout >= 0)
    1322             :     {
    1323      109586 :         INSTR_TIME_SET_CURRENT(start_time);
    1324             :         Assert(timeout >= 0 && timeout <= INT_MAX);
    1325      109586 :         cur_timeout = timeout;
    1326             :     }
    1327             : 
    1328     1156870 :     pgstat_report_wait_start(wait_event_info);
    1329             : 
    1330             : #ifndef WIN32
    1331     1156870 :     waiting = true;
    1332             : #else
    1333             :     /* Ensure that signals are serviced even if latch is already set */
    1334             :     pgwin32_dispatch_queued_signals();
    1335             : #endif
    1336     2209610 :     while (returned_events == 0)
    1337             :     {
    1338             :         int         rc;
    1339             : 
    1340             :         /*
    1341             :          * Check if the latch is set already. If so, leave the loop
    1342             :          * immediately, avoid blocking again. We don't attempt to report any
    1343             :          * other events that might also be satisfied.
    1344             :          *
    1345             :          * If someone sets the latch between this and the
    1346             :          * WaitEventSetWaitBlock() below, the setter will write a byte to the
    1347             :          * pipe (or signal us and the signal handler will do that), and the
    1348             :          * readiness routine will return immediately.
    1349             :          *
    1350             :          * On unix, If there's a pending byte in the self pipe, we'll notice
    1351             :          * whenever blocking. Only clearing the pipe in that case avoids
    1352             :          * having to drain it every time WaitLatchOrSocket() is used. Should
    1353             :          * the pipe-buffer fill up we're still ok, because the pipe is in
    1354             :          * nonblocking mode. It's unlikely for that to happen, because the
    1355             :          * self pipe isn't filled unless we're blocking (waiting = true), or
    1356             :          * from inside a signal handler in latch_sigurg_handler().
    1357             :          *
    1358             :          * On windows, we'll also notice if there's a pending event for the
    1359             :          * latch when blocking, but there's no danger of anything filling up,
    1360             :          * as "Setting an event that is already set has no effect.".
    1361             :          *
    1362             :          * Note: we assume that the kernel calls involved in latch management
    1363             :          * will provide adequate synchronization on machines with weak memory
    1364             :          * ordering, so that we cannot miss seeing is_set if a notification
    1365             :          * has already been queued.
    1366             :          */
    1367     1180962 :         if (set->latch && !set->latch->is_set)
    1368             :         {
    1369             :             /* about to sleep on a latch */
    1370     1075078 :             set->latch->maybe_sleeping = true;
    1371     1075078 :             pg_memory_barrier();
    1372             :             /* and recheck */
    1373             :         }
    1374             : 
    1375     1180962 :         if (set->latch && set->latch->is_set)
    1376             :         {
    1377      105530 :             occurred_events->fd = PGINVALID_SOCKET;
    1378      105530 :             occurred_events->pos = set->latch_pos;
    1379      105530 :             occurred_events->user_data =
    1380      105530 :                 set->events[set->latch_pos].user_data;
    1381      105530 :             occurred_events->events = WL_LATCH_SET;
    1382      105530 :             occurred_events++;
    1383      105530 :             returned_events++;
    1384             : 
    1385             :             /* could have been set above */
    1386      105530 :             set->latch->maybe_sleeping = false;
    1387             : 
    1388      105530 :             break;
    1389             :         }
    1390             : 
    1391             :         /*
    1392             :          * Wait for events using the readiness primitive chosen at the top of
    1393             :          * this file. If -1 is returned, a timeout has occurred, if 0 we have
    1394             :          * to retry, everything >= 1 is the number of returned events.
    1395             :          */
    1396     1075432 :         rc = WaitEventSetWaitBlock(set, cur_timeout,
    1397             :                                    occurred_events, nevents);
    1398             : 
    1399     1075402 :         if (set->latch)
    1400             :         {
    1401             :             Assert(set->latch->maybe_sleeping);
    1402     1075030 :             set->latch->maybe_sleeping = false;
    1403             :         }
    1404             : 
    1405     1075402 :         if (rc == -1)
    1406       22658 :             break;              /* timeout occurred */
    1407             :         else
    1408     1052744 :             returned_events = rc;
    1409             : 
    1410             :         /* If we're not done, update cur_timeout for next iteration */
    1411     1052744 :         if (returned_events == 0 && timeout >= 0)
    1412             :         {
    1413       11438 :             INSTR_TIME_SET_CURRENT(cur_time);
    1414       12998 :             INSTR_TIME_SUBTRACT(cur_time, start_time);
    1415       11438 :             cur_timeout = timeout - (long) INSTR_TIME_GET_MILLISEC(cur_time);
    1416       11438 :             if (cur_timeout <= 0)
    1417           4 :                 break;
    1418             :         }
    1419             :     }
    1420             : #ifndef WIN32
    1421     1156840 :     waiting = false;
    1422             : #endif
    1423             : 
    1424     1156840 :     pgstat_report_wait_end();
    1425             : 
    1426     1156840 :     return returned_events;
    1427             : }
    1428             : 
    1429             : 
    1430             : #if defined(WAIT_USE_EPOLL)
    1431             : 
    1432             : /*
    1433             :  * Wait using linux's epoll_wait(2).
    1434             :  *
    1435             :  * This is the preferable wait method, as several readiness notifications are
    1436             :  * delivered, without having to iterate through all of set->events. The return
    1437             :  * epoll_event struct contain a pointer to our events, making association
    1438             :  * easy.
    1439             :  */
    1440             : static inline int
    1441     1075432 : WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
    1442             :                       WaitEvent *occurred_events, int nevents)
    1443             : {
    1444     1075432 :     int         returned_events = 0;
    1445             :     int         rc;
    1446             :     WaitEvent  *cur_event;
    1447             :     struct epoll_event *cur_epoll_event;
    1448             : 
    1449             :     /* Sleep */
    1450     1075432 :     rc = epoll_wait(set->epoll_fd, set->epoll_ret_events,
    1451             :                     nevents, cur_timeout);
    1452             : 
    1453             :     /* Check return code */
    1454     1075432 :     if (rc < 0)
    1455             :     {
    1456             :         /* EINTR is okay, otherwise complain */
    1457       13898 :         if (errno != EINTR)
    1458             :         {
    1459           0 :             waiting = false;
    1460           0 :             ereport(ERROR,
    1461             :                     (errcode_for_socket_access(),
    1462             :                      errmsg("%s() failed: %m",
    1463             :                             "epoll_wait")));
    1464             :         }
    1465       13898 :         return 0;
    1466             :     }
    1467     1061534 :     else if (rc == 0)
    1468             :     {
    1469             :         /* timeout exceeded */
    1470       22658 :         return -1;
    1471             :     }
    1472             : 
    1473             :     /*
    1474             :      * At least one event occurred, iterate over the returned epoll events
    1475             :      * until they're either all processed, or we've returned all the events
    1476             :      * the caller desired.
    1477             :      */
    1478     1038876 :     for (cur_epoll_event = set->epoll_ret_events;
    1479     2077790 :          cur_epoll_event < (set->epoll_ret_events + rc) &&
    1480             :          returned_events < nevents;
    1481     1038914 :          cur_epoll_event++)
    1482             :     {
    1483             :         /* epoll's data pointer is set to the associated WaitEvent */
    1484     1038944 :         cur_event = (WaitEvent *) cur_epoll_event->data.ptr;
    1485             : 
    1486     1038944 :         occurred_events->pos = cur_event->pos;
    1487     1038944 :         occurred_events->user_data = cur_event->user_data;
    1488     1038944 :         occurred_events->events = 0;
    1489             : 
    1490     1038944 :         if (cur_event->events == WL_LATCH_SET &&
    1491      685940 :             cur_epoll_event->events & (EPOLLIN | EPOLLERR | EPOLLHUP))
    1492             :         {
    1493             :             /* Drain the signalfd. */
    1494      685940 :             drain();
    1495             : 
    1496      685940 :             if (set->latch && set->latch->is_set)
    1497             :             {
    1498      675742 :                 occurred_events->fd = PGINVALID_SOCKET;
    1499      675742 :                 occurred_events->events = WL_LATCH_SET;
    1500      675742 :                 occurred_events++;
    1501      675742 :                 returned_events++;
    1502             :             }
    1503             :         }
    1504      353004 :         else if (cur_event->events == WL_POSTMASTER_DEATH &&
    1505          36 :                  cur_epoll_event->events & (EPOLLIN | EPOLLERR | EPOLLHUP))
    1506             :         {
    1507             :             /*
    1508             :              * We expect an EPOLLHUP when the remote end is closed, but
    1509             :              * because we don't expect the pipe to become readable or to have
    1510             :              * any errors either, treat those cases as postmaster death, too.
    1511             :              *
    1512             :              * Be paranoid about a spurious event signaling the postmaster as
    1513             :              * being dead.  There have been reports about that happening with
    1514             :              * older primitives (select(2) to be specific), and a spurious
    1515             :              * WL_POSTMASTER_DEATH event would be painful. Re-checking doesn't
    1516             :              * cost much.
    1517             :              */
    1518          36 :             if (!PostmasterIsAliveInternal())
    1519             :             {
    1520          36 :                 if (set->exit_on_postmaster_death)
    1521          30 :                     proc_exit(1);
    1522           6 :                 occurred_events->fd = PGINVALID_SOCKET;
    1523           6 :                 occurred_events->events = WL_POSTMASTER_DEATH;
    1524           6 :                 occurred_events++;
    1525           6 :                 returned_events++;
    1526             :             }
    1527             :         }
    1528      352968 :         else if (cur_event->events & (WL_SOCKET_READABLE | WL_SOCKET_WRITEABLE))
    1529             :         {
    1530             :             Assert(cur_event->fd != PGINVALID_SOCKET);
    1531             : 
    1532      352968 :             if ((cur_event->events & WL_SOCKET_READABLE) &&
    1533      336832 :                 (cur_epoll_event->events & (EPOLLIN | EPOLLERR | EPOLLHUP)))
    1534             :             {
    1535             :                 /* data available in socket, or EOF */
    1536      334666 :                 occurred_events->events |= WL_SOCKET_READABLE;
    1537             :             }
    1538             : 
    1539      352968 :             if ((cur_event->events & WL_SOCKET_WRITEABLE) &&
    1540       18610 :                 (cur_epoll_event->events & (EPOLLOUT | EPOLLERR | EPOLLHUP)))
    1541             :             {
    1542             :                 /* writable, or EOF */
    1543       18532 :                 occurred_events->events |= WL_SOCKET_WRITEABLE;
    1544             :             }
    1545             : 
    1546      352968 :             if (occurred_events->events != 0)
    1547             :             {
    1548      352968 :                 occurred_events->fd = cur_event->fd;
    1549      352968 :                 occurred_events++;
    1550      352968 :                 returned_events++;
    1551             :             }
    1552             :         }
    1553             :     }
    1554             : 
    1555     1038846 :     return returned_events;
    1556             : }
    1557             : 
    1558             : #elif defined(WAIT_USE_KQUEUE)
    1559             : 
    1560             : /*
    1561             :  * Wait using kevent(2) on BSD-family systems and macOS.
    1562             :  *
    1563             :  * For now this mirrors the epoll code, but in future it could modify the fd
    1564             :  * set in the same call to kevent as it uses for waiting instead of doing that
    1565             :  * with separate system calls.
    1566             :  */
    1567             : static int
    1568             : WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
    1569             :                       WaitEvent *occurred_events, int nevents)
    1570             : {
    1571             :     int         returned_events = 0;
    1572             :     int         rc;
    1573             :     WaitEvent  *cur_event;
    1574             :     struct kevent *cur_kqueue_event;
    1575             :     struct timespec timeout;
    1576             :     struct timespec *timeout_p;
    1577             : 
    1578             :     if (cur_timeout < 0)
    1579             :         timeout_p = NULL;
    1580             :     else
    1581             :     {
    1582             :         timeout.tv_sec = cur_timeout / 1000;
    1583             :         timeout.tv_nsec = (cur_timeout % 1000) * 1000000;
    1584             :         timeout_p = &timeout;
    1585             :     }
    1586             : 
    1587             :     /*
    1588             :      * Report postmaster events discovered by WaitEventAdjustKqueue() or an
    1589             :      * earlier call to WaitEventSetWait().
    1590             :      */
    1591             :     if (unlikely(set->report_postmaster_not_running))
    1592             :     {
    1593             :         if (set->exit_on_postmaster_death)
    1594             :             proc_exit(1);
    1595             :         occurred_events->fd = PGINVALID_SOCKET;
    1596             :         occurred_events->events = WL_POSTMASTER_DEATH;
    1597             :         return 1;
    1598             :     }
    1599             : 
    1600             :     /* Sleep */
    1601             :     rc = kevent(set->kqueue_fd, NULL, 0,
    1602             :                 set->kqueue_ret_events, nevents,
    1603             :                 timeout_p);
    1604             : 
    1605             :     /* Check return code */
    1606             :     if (rc < 0)
    1607             :     {
    1608             :         /* EINTR is okay, otherwise complain */
    1609             :         if (errno != EINTR)
    1610             :         {
    1611             :             waiting = false;
    1612             :             ereport(ERROR,
    1613             :                     (errcode_for_socket_access(),
    1614             :                      errmsg("%s() failed: %m",
    1615             :                             "kevent")));
    1616             :         }
    1617             :         return 0;
    1618             :     }
    1619             :     else if (rc == 0)
    1620             :     {
    1621             :         /* timeout exceeded */
    1622             :         return -1;
    1623             :     }
    1624             : 
    1625             :     /*
    1626             :      * At least one event occurred, iterate over the returned kqueue events
    1627             :      * until they're either all processed, or we've returned all the events
    1628             :      * the caller desired.
    1629             :      */
    1630             :     for (cur_kqueue_event = set->kqueue_ret_events;
    1631             :          cur_kqueue_event < (set->kqueue_ret_events + rc) &&
    1632             :          returned_events < nevents;
    1633             :          cur_kqueue_event++)
    1634             :     {
    1635             :         /* kevent's udata points to the associated WaitEvent */
    1636             :         cur_event = AccessWaitEvent(cur_kqueue_event);
    1637             : 
    1638             :         occurred_events->pos = cur_event->pos;
    1639             :         occurred_events->user_data = cur_event->user_data;
    1640             :         occurred_events->events = 0;
    1641             : 
    1642             :         if (cur_event->events == WL_LATCH_SET &&
    1643             :             cur_kqueue_event->filter == EVFILT_SIGNAL)
    1644             :         {
    1645             :             if (set->latch && set->latch->is_set)
    1646             :             {
    1647             :                 occurred_events->fd = PGINVALID_SOCKET;
    1648             :                 occurred_events->events = WL_LATCH_SET;
    1649             :                 occurred_events++;
    1650             :                 returned_events++;
    1651             :             }
    1652             :         }
    1653             :         else if (cur_event->events == WL_POSTMASTER_DEATH &&
    1654             :                  cur_kqueue_event->filter == EVFILT_PROC &&
    1655             :                  (cur_kqueue_event->fflags & NOTE_EXIT) != 0)
    1656             :         {
    1657             :             /*
    1658             :              * The kernel will tell this kqueue object only once about the
    1659             :              * exit of the postmaster, so let's remember that for next time so
    1660             :              * that we provide level-triggered semantics.
    1661             :              */
    1662             :             set->report_postmaster_not_running = true;
    1663             : 
    1664             :             if (set->exit_on_postmaster_death)
    1665             :                 proc_exit(1);
    1666             :             occurred_events->fd = PGINVALID_SOCKET;
    1667             :             occurred_events->events = WL_POSTMASTER_DEATH;
    1668             :             occurred_events++;
    1669             :             returned_events++;
    1670             :         }
    1671             :         else if (cur_event->events & (WL_SOCKET_READABLE | WL_SOCKET_WRITEABLE))
    1672             :         {
    1673             :             Assert(cur_event->fd >= 0);
    1674             : 
    1675             :             if ((cur_event->events & WL_SOCKET_READABLE) &&
    1676             :                 (cur_kqueue_event->filter == EVFILT_READ))
    1677             :             {
    1678             :                 /* readable, or EOF */
    1679             :                 occurred_events->events |= WL_SOCKET_READABLE;
    1680             :             }
    1681             : 
    1682             :             if ((cur_event->events & WL_SOCKET_WRITEABLE) &&
    1683             :                 (cur_kqueue_event->filter == EVFILT_WRITE))
    1684             :             {
    1685             :                 /* writable, or EOF */
    1686             :                 occurred_events->events |= WL_SOCKET_WRITEABLE;
    1687             :             }
    1688             : 
    1689             :             if (occurred_events->events != 0)
    1690             :             {
    1691             :                 occurred_events->fd = cur_event->fd;
    1692             :                 occurred_events++;
    1693             :                 returned_events++;
    1694             :             }
    1695             :         }
    1696             :     }
    1697             : 
    1698             :     return returned_events;
    1699             : }
    1700             : 
    1701             : #elif defined(WAIT_USE_POLL)
    1702             : 
    1703             : /*
    1704             :  * Wait using poll(2).
    1705             :  *
    1706             :  * This allows to receive readiness notifications for several events at once,
    1707             :  * but requires iterating through all of set->pollfds.
    1708             :  */
    1709             : static inline int
    1710             : WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
    1711             :                       WaitEvent *occurred_events, int nevents)
    1712             : {
    1713             :     int         returned_events = 0;
    1714             :     int         rc;
    1715             :     WaitEvent  *cur_event;
    1716             :     struct pollfd *cur_pollfd;
    1717             : 
    1718             :     /* Sleep */
    1719             :     rc = poll(set->pollfds, set->nevents, (int) cur_timeout);
    1720             : 
    1721             :     /* Check return code */
    1722             :     if (rc < 0)
    1723             :     {
    1724             :         /* EINTR is okay, otherwise complain */
    1725             :         if (errno != EINTR)
    1726             :         {
    1727             :             waiting = false;
    1728             :             ereport(ERROR,
    1729             :                     (errcode_for_socket_access(),
    1730             :                      errmsg("%s() failed: %m",
    1731             :                             "poll")));
    1732             :         }
    1733             :         return 0;
    1734             :     }
    1735             :     else if (rc == 0)
    1736             :     {
    1737             :         /* timeout exceeded */
    1738             :         return -1;
    1739             :     }
    1740             : 
    1741             :     for (cur_event = set->events, cur_pollfd = set->pollfds;
    1742             :          cur_event < (set->events + set->nevents) &&
    1743             :          returned_events < nevents;
    1744             :          cur_event++, cur_pollfd++)
    1745             :     {
    1746             :         /* no activity on this FD, skip */
    1747             :         if (cur_pollfd->revents == 0)
    1748             :             continue;
    1749             : 
    1750             :         occurred_events->pos = cur_event->pos;
    1751             :         occurred_events->user_data = cur_event->user_data;
    1752             :         occurred_events->events = 0;
    1753             : 
    1754             :         if (cur_event->events == WL_LATCH_SET &&
    1755             :             (cur_pollfd->revents & (POLLIN | POLLHUP | POLLERR | POLLNVAL)))
    1756             :         {
    1757             :             /* There's data in the self-pipe, clear it. */
    1758             :             drain();
    1759             : 
    1760             :             if (set->latch && set->latch->is_set)
    1761             :             {
    1762             :                 occurred_events->fd = PGINVALID_SOCKET;
    1763             :                 occurred_events->events = WL_LATCH_SET;
    1764             :                 occurred_events++;
    1765             :                 returned_events++;
    1766             :             }
    1767             :         }
    1768             :         else if (cur_event->events == WL_POSTMASTER_DEATH &&
    1769             :                  (cur_pollfd->revents & (POLLIN | POLLHUP | POLLERR | POLLNVAL)))
    1770             :         {
    1771             :             /*
    1772             :              * We expect an POLLHUP when the remote end is closed, but because
    1773             :              * we don't expect the pipe to become readable or to have any
    1774             :              * errors either, treat those cases as postmaster death, too.
    1775             :              *
    1776             :              * Be paranoid about a spurious event signaling the postmaster as
    1777             :              * being dead.  There have been reports about that happening with
    1778             :              * older primitives (select(2) to be specific), and a spurious
    1779             :              * WL_POSTMASTER_DEATH event would be painful. Re-checking doesn't
    1780             :              * cost much.
    1781             :              */
    1782             :             if (!PostmasterIsAliveInternal())
    1783             :             {
    1784             :                 if (set->exit_on_postmaster_death)
    1785             :                     proc_exit(1);
    1786             :                 occurred_events->fd = PGINVALID_SOCKET;
    1787             :                 occurred_events->events = WL_POSTMASTER_DEATH;
    1788             :                 occurred_events++;
    1789             :                 returned_events++;
    1790             :             }
    1791             :         }
    1792             :         else if (cur_event->events & (WL_SOCKET_READABLE | WL_SOCKET_WRITEABLE))
    1793             :         {
    1794             :             int         errflags = POLLHUP | POLLERR | POLLNVAL;
    1795             : 
    1796             :             Assert(cur_event->fd >= PGINVALID_SOCKET);
    1797             : 
    1798             :             if ((cur_event->events & WL_SOCKET_READABLE) &&
    1799             :                 (cur_pollfd->revents & (POLLIN | errflags)))
    1800             :             {
    1801             :                 /* data available in socket, or EOF */
    1802             :                 occurred_events->events |= WL_SOCKET_READABLE;
    1803             :             }
    1804             : 
    1805             :             if ((cur_event->events & WL_SOCKET_WRITEABLE) &&
    1806             :                 (cur_pollfd->revents & (POLLOUT | errflags)))
    1807             :             {
    1808             :                 /* writeable, or EOF */
    1809             :                 occurred_events->events |= WL_SOCKET_WRITEABLE;
    1810             :             }
    1811             : 
    1812             :             if (occurred_events->events != 0)
    1813             :             {
    1814             :                 occurred_events->fd = cur_event->fd;
    1815             :                 occurred_events++;
    1816             :                 returned_events++;
    1817             :             }
    1818             :         }
    1819             :     }
    1820             :     return returned_events;
    1821             : }
    1822             : 
    1823             : #elif defined(WAIT_USE_WIN32)
    1824             : 
    1825             : /*
    1826             :  * Wait using Windows' WaitForMultipleObjects().
    1827             :  *
    1828             :  * Unfortunately this will only ever return a single readiness notification at
    1829             :  * a time.  Note that while the official documentation for
    1830             :  * WaitForMultipleObjects is ambiguous about multiple events being "consumed"
    1831             :  * with a single bWaitAll = FALSE call,
    1832             :  * https://blogs.msdn.microsoft.com/oldnewthing/20150409-00/?p=44273 confirms
    1833             :  * that only one event is "consumed".
    1834             :  */
    1835             : static inline int
    1836             : WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
    1837             :                       WaitEvent *occurred_events, int nevents)
    1838             : {
    1839             :     int         returned_events = 0;
    1840             :     DWORD       rc;
    1841             :     WaitEvent  *cur_event;
    1842             : 
    1843             :     /* Reset any wait events that need it */
    1844             :     for (cur_event = set->events;
    1845             :          cur_event < (set->events + set->nevents);
    1846             :          cur_event++)
    1847             :     {
    1848             :         if (cur_event->reset)
    1849             :         {
    1850             :             WaitEventAdjustWin32(set, cur_event);
    1851             :             cur_event->reset = false;
    1852             :         }
    1853             : 
    1854             :         /*
    1855             :          * Windows does not guarantee to log an FD_WRITE network event
    1856             :          * indicating that more data can be sent unless the previous send()
    1857             :          * failed with WSAEWOULDBLOCK.  While our caller might well have made
    1858             :          * such a call, we cannot assume that here.  Therefore, if waiting for
    1859             :          * write-ready, force the issue by doing a dummy send().  If the dummy
    1860             :          * send() succeeds, assume that the socket is in fact write-ready, and
    1861             :          * return immediately.  Also, if it fails with something other than
    1862             :          * WSAEWOULDBLOCK, return a write-ready indication to let our caller
    1863             :          * deal with the error condition.
    1864             :          */
    1865             :         if (cur_event->events & WL_SOCKET_WRITEABLE)
    1866             :         {
    1867             :             char        c;
    1868             :             WSABUF      buf;
    1869             :             DWORD       sent;
    1870             :             int         r;
    1871             : 
    1872             :             buf.buf = &c;
    1873             :             buf.len = 0;
    1874             : 
    1875             :             r = WSASend(cur_event->fd, &buf, 1, &sent, 0, NULL, NULL);
    1876             :             if (r == 0 || WSAGetLastError() != WSAEWOULDBLOCK)
    1877             :             {
    1878             :                 occurred_events->pos = cur_event->pos;
    1879             :                 occurred_events->user_data = cur_event->user_data;
    1880             :                 occurred_events->events = WL_SOCKET_WRITEABLE;
    1881             :                 occurred_events->fd = cur_event->fd;
    1882             :                 return 1;
    1883             :             }
    1884             :         }
    1885             :     }
    1886             : 
    1887             :     /*
    1888             :      * Sleep.
    1889             :      *
    1890             :      * Need to wait for ->nevents + 1, because signal handle is in [0].
    1891             :      */
    1892             :     rc = WaitForMultipleObjects(set->nevents + 1, set->handles, FALSE,
    1893             :                                 cur_timeout);
    1894             : 
    1895             :     /* Check return code */
    1896             :     if (rc == WAIT_FAILED)
    1897             :         elog(ERROR, "WaitForMultipleObjects() failed: error code %lu",
    1898             :              GetLastError());
    1899             :     else if (rc == WAIT_TIMEOUT)
    1900             :     {
    1901             :         /* timeout exceeded */
    1902             :         return -1;
    1903             :     }
    1904             : 
    1905             :     if (rc == WAIT_OBJECT_0)
    1906             :     {
    1907             :         /* Service newly-arrived signals */
    1908             :         pgwin32_dispatch_queued_signals();
    1909             :         return 0;               /* retry */
    1910             :     }
    1911             : 
    1912             :     /*
    1913             :      * With an offset of one, due to the always present pgwin32_signal_event,
    1914             :      * the handle offset directly corresponds to a wait event.
    1915             :      */
    1916             :     cur_event = (WaitEvent *) &set->events[rc - WAIT_OBJECT_0 - 1];
    1917             : 
    1918             :     occurred_events->pos = cur_event->pos;
    1919             :     occurred_events->user_data = cur_event->user_data;
    1920             :     occurred_events->events = 0;
    1921             : 
    1922             :     if (cur_event->events == WL_LATCH_SET)
    1923             :     {
    1924             :         /*
    1925             :          * We cannot use set->latch->event to reset the fired event if we
    1926             :          * aren't waiting on this latch now.
    1927             :          */
    1928             :         if (!ResetEvent(set->handles[cur_event->pos + 1]))
    1929             :             elog(ERROR, "ResetEvent failed: error code %lu", GetLastError());
    1930             : 
    1931             :         if (set->latch && set->latch->is_set)
    1932             :         {
    1933             :             occurred_events->fd = PGINVALID_SOCKET;
    1934             :             occurred_events->events = WL_LATCH_SET;
    1935             :             occurred_events++;
    1936             :             returned_events++;
    1937             :         }
    1938             :     }
    1939             :     else if (cur_event->events == WL_POSTMASTER_DEATH)
    1940             :     {
    1941             :         /*
    1942             :          * Postmaster apparently died.  Since the consequences of falsely
    1943             :          * returning WL_POSTMASTER_DEATH could be pretty unpleasant, we take
    1944             :          * the trouble to positively verify this with PostmasterIsAlive(),
    1945             :          * even though there is no known reason to think that the event could
    1946             :          * be falsely set on Windows.
    1947             :          */
    1948             :         if (!PostmasterIsAliveInternal())
    1949             :         {
    1950             :             if (set->exit_on_postmaster_death)
    1951             :                 proc_exit(1);
    1952             :             occurred_events->fd = PGINVALID_SOCKET;
    1953             :             occurred_events->events = WL_POSTMASTER_DEATH;
    1954             :             occurred_events++;
    1955             :             returned_events++;
    1956             :         }
    1957             :     }
    1958             :     else if (cur_event->events & WL_SOCKET_MASK)
    1959             :     {
    1960             :         WSANETWORKEVENTS resEvents;
    1961             :         HANDLE      handle = set->handles[cur_event->pos + 1];
    1962             : 
    1963             :         Assert(cur_event->fd);
    1964             : 
    1965             :         occurred_events->fd = cur_event->fd;
    1966             : 
    1967             :         ZeroMemory(&resEvents, sizeof(resEvents));
    1968             :         if (WSAEnumNetworkEvents(cur_event->fd, handle, &resEvents) != 0)
    1969             :             elog(ERROR, "failed to enumerate network events: error code %d",
    1970             :                  WSAGetLastError());
    1971             :         if ((cur_event->events & WL_SOCKET_READABLE) &&
    1972             :             (resEvents.lNetworkEvents & FD_READ))
    1973             :         {
    1974             :             /* data available in socket */
    1975             :             occurred_events->events |= WL_SOCKET_READABLE;
    1976             : 
    1977             :             /*------
    1978             :              * WaitForMultipleObjects doesn't guarantee that a read event will
    1979             :              * be returned if the latch is set at the same time.  Even if it
    1980             :              * did, the caller might drop that event expecting it to reoccur
    1981             :              * on next call.  So, we must force the event to be reset if this
    1982             :              * WaitEventSet is used again in order to avoid an indefinite
    1983             :              * hang.  Refer https://msdn.microsoft.com/en-us/library/windows/desktop/ms741576(v=vs.85).aspx
    1984             :              * for the behavior of socket events.
    1985             :              *------
    1986             :              */
    1987             :             cur_event->reset = true;
    1988             :         }
    1989             :         if ((cur_event->events & WL_SOCKET_WRITEABLE) &&
    1990             :             (resEvents.lNetworkEvents & FD_WRITE))
    1991             :         {
    1992             :             /* writeable */
    1993             :             occurred_events->events |= WL_SOCKET_WRITEABLE;
    1994             :         }
    1995             :         if ((cur_event->events & WL_SOCKET_CONNECTED) &&
    1996             :             (resEvents.lNetworkEvents & FD_CONNECT))
    1997             :         {
    1998             :             /* connected */
    1999             :             occurred_events->events |= WL_SOCKET_CONNECTED;
    2000             :         }
    2001             :         if (resEvents.lNetworkEvents & FD_CLOSE)
    2002             :         {
    2003             :             /* EOF/error, so signal all caller-requested socket flags */
    2004             :             occurred_events->events |= (cur_event->events & WL_SOCKET_MASK);
    2005             :         }
    2006             : 
    2007             :         if (occurred_events->events != 0)
    2008             :         {
    2009             :             occurred_events++;
    2010             :             returned_events++;
    2011             :         }
    2012             :     }
    2013             : 
    2014             :     return returned_events;
    2015             : }
    2016             : #endif
    2017             : 
    2018             : /*
    2019             :  * Get the number of wait events registered in a given WaitEventSet.
    2020             :  */
    2021             : int
    2022         230 : GetNumRegisteredWaitEvents(WaitEventSet *set)
    2023             : {
    2024         230 :     return set->nevents;
    2025             : }
    2026             : 
    2027             : #if defined(WAIT_USE_POLL)
    2028             : 
    2029             : /*
    2030             :  * SetLatch uses SIGURG to wake up the process waiting on the latch.
    2031             :  *
    2032             :  * Wake up WaitLatch, if we're waiting.
    2033             :  */
    2034             : static void
    2035             : latch_sigurg_handler(SIGNAL_ARGS)
    2036             : {
    2037             :     int         save_errno = errno;
    2038             : 
    2039             :     if (waiting)
    2040             :         sendSelfPipeByte();
    2041             : 
    2042             :     errno = save_errno;
    2043             : }
    2044             : 
    2045             : /* Send one byte to the self-pipe, to wake up WaitLatch */
    2046             : static void
    2047             : sendSelfPipeByte(void)
    2048             : {
    2049             :     int         rc;
    2050             :     char        dummy = 0;
    2051             : 
    2052             : retry:
    2053             :     rc = write(selfpipe_writefd, &dummy, 1);
    2054             :     if (rc < 0)
    2055             :     {
    2056             :         /* If interrupted by signal, just retry */
    2057             :         if (errno == EINTR)
    2058             :             goto retry;
    2059             : 
    2060             :         /*
    2061             :          * If the pipe is full, we don't need to retry, the data that's there
    2062             :          * already is enough to wake up WaitLatch.
    2063             :          */
    2064             :         if (errno == EAGAIN || errno == EWOULDBLOCK)
    2065             :             return;
    2066             : 
    2067             :         /*
    2068             :          * Oops, the write() failed for some other reason. We might be in a
    2069             :          * signal handler, so it's not safe to elog(). We have no choice but
    2070             :          * silently ignore the error.
    2071             :          */
    2072             :         return;
    2073             :     }
    2074             : }
    2075             : 
    2076             : #endif
    2077             : 
    2078             : #if defined(WAIT_USE_POLL) || defined(WAIT_USE_EPOLL)
    2079             : 
    2080             : /*
    2081             :  * Read all available data from self-pipe or signalfd.
    2082             :  *
    2083             :  * Note: this is only called when waiting = true.  If it fails and doesn't
    2084             :  * return, it must reset that flag first (though ideally, this will never
    2085             :  * happen).
    2086             :  */
    2087             : static void
    2088      685940 : drain(void)
    2089             : {
    2090             :     char        buf[1024];
    2091             :     int         rc;
    2092             :     int         fd;
    2093             : 
    2094             : #ifdef WAIT_USE_POLL
    2095             :     fd = selfpipe_readfd;
    2096             : #else
    2097      685940 :     fd = signal_fd;
    2098             : #endif
    2099             : 
    2100             :     for (;;)
    2101             :     {
    2102      685940 :         rc = read(fd, buf, sizeof(buf));
    2103      685940 :         if (rc < 0)
    2104             :         {
    2105           0 :             if (errno == EAGAIN || errno == EWOULDBLOCK)
    2106             :                 break;          /* the descriptor is empty */
    2107           0 :             else if (errno == EINTR)
    2108           0 :                 continue;       /* retry */
    2109             :             else
    2110             :             {
    2111           0 :                 waiting = false;
    2112             : #ifdef WAIT_USE_POLL
    2113             :                 elog(ERROR, "read() on self-pipe failed: %m");
    2114             : #else
    2115           0 :                 elog(ERROR, "read() on signalfd failed: %m");
    2116             : #endif
    2117             :             }
    2118             :         }
    2119      685940 :         else if (rc == 0)
    2120             :         {
    2121           0 :             waiting = false;
    2122             : #ifdef WAIT_USE_POLL
    2123             :             elog(ERROR, "unexpected EOF on self-pipe");
    2124             : #else
    2125           0 :             elog(ERROR, "unexpected EOF on signalfd");
    2126             : #endif
    2127             :         }
    2128      685940 :         else if (rc < sizeof(buf))
    2129             :         {
    2130             :             /* we successfully drained the pipe; no need to read() again */
    2131      685940 :             break;
    2132             :         }
    2133             :         /* else buffer wasn't big enough, so read again */
    2134             :     }
    2135      685940 : }
    2136             : 
    2137             : #endif

Generated by: LCOV version 1.14