LCOV - code coverage report
Current view: top level - src/include/storage - s_lock.h (source / functions) Hit Total Coverage
Test: PostgreSQL 17devel Lines: 15 15 100.0 %
Date: 2024-04-24 01:11:45 Functions: 3 3 100.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * s_lock.h
       4             :  *     Hardware-dependent implementation of spinlocks.
       5             :  *
       6             :  *  NOTE: none of the macros in this file are intended to be called directly.
       7             :  *  Call them through the hardware-independent macros in spin.h.
       8             :  *
       9             :  *  The following hardware-dependent macros must be provided for each
      10             :  *  supported platform:
      11             :  *
      12             :  *  void S_INIT_LOCK(slock_t *lock)
      13             :  *      Initialize a spinlock (to the unlocked state).
      14             :  *
      15             :  *  int S_LOCK(slock_t *lock)
      16             :  *      Acquire a spinlock, waiting if necessary.
      17             :  *      Time out and abort() if unable to acquire the lock in a
      18             :  *      "reasonable" amount of time --- typically ~ 1 minute.
      19             :  *      Should return number of "delays"; see s_lock.c
      20             :  *
      21             :  *  void S_UNLOCK(slock_t *lock)
      22             :  *      Unlock a previously acquired lock.
      23             :  *
      24             :  *  bool S_LOCK_FREE(slock_t *lock)
      25             :  *      Tests if the lock is free. Returns true if free, false if locked.
      26             :  *      This does *not* change the state of the lock.
      27             :  *
      28             :  *  void SPIN_DELAY(void)
      29             :  *      Delay operation to occur inside spinlock wait loop.
      30             :  *
      31             :  *  Note to implementors: there are default implementations for all these
      32             :  *  macros at the bottom of the file.  Check if your platform can use
      33             :  *  these or needs to override them.
      34             :  *
      35             :  *  Usually, S_LOCK() is implemented in terms of even lower-level macros
      36             :  *  TAS() and TAS_SPIN():
      37             :  *
      38             :  *  int TAS(slock_t *lock)
      39             :  *      Atomic test-and-set instruction.  Attempt to acquire the lock,
      40             :  *      but do *not* wait.  Returns 0 if successful, nonzero if unable
      41             :  *      to acquire the lock.
      42             :  *
      43             :  *  int TAS_SPIN(slock_t *lock)
      44             :  *      Like TAS(), but this version is used when waiting for a lock
      45             :  *      previously found to be contended.  By default, this is the
      46             :  *      same as TAS(), but on some architectures it's better to poll a
      47             :  *      contended lock using an unlocked instruction and retry the
      48             :  *      atomic test-and-set only when it appears free.
      49             :  *
      50             :  *  TAS() and TAS_SPIN() are NOT part of the API, and should never be called
      51             :  *  directly.
      52             :  *
      53             :  *  CAUTION: on some platforms TAS() and/or TAS_SPIN() may sometimes report
      54             :  *  failure to acquire a lock even when the lock is not locked.  For example,
      55             :  *  on Alpha TAS() will "fail" if interrupted.  Therefore a retry loop must
      56             :  *  always be used, even if you are certain the lock is free.
      57             :  *
      58             :  *  It is the responsibility of these macros to make sure that the compiler
      59             :  *  does not re-order accesses to shared memory to precede the actual lock
      60             :  *  acquisition, or follow the lock release.  Prior to PostgreSQL 9.5, this
      61             :  *  was the caller's responsibility, which meant that callers had to use
      62             :  *  volatile-qualified pointers to refer to both the spinlock itself and the
      63             :  *  shared data being accessed within the spinlocked critical section.  This
      64             :  *  was notationally awkward, easy to forget (and thus error-prone), and
      65             :  *  prevented some useful compiler optimizations.  For these reasons, we
      66             :  *  now require that the macros themselves prevent compiler re-ordering,
      67             :  *  so that the caller doesn't need to take special precautions.
      68             :  *
      69             :  *  On platforms with weak memory ordering, the TAS(), TAS_SPIN(), and
      70             :  *  S_UNLOCK() macros must further include hardware-level memory fence
      71             :  *  instructions to prevent similar re-ordering at the hardware level.
      72             :  *  TAS() and TAS_SPIN() must guarantee that loads and stores issued after
      73             :  *  the macro are not executed until the lock has been obtained.  Conversely,
      74             :  *  S_UNLOCK() must guarantee that loads and stores issued before the macro
      75             :  *  have been executed before the lock is released.
      76             :  *
      77             :  *  On most supported platforms, TAS() uses a tas() function written
      78             :  *  in assembly language to execute a hardware atomic-test-and-set
      79             :  *  instruction.  Equivalent OS-supplied mutex routines could be used too.
      80             :  *
      81             :  *  If no system-specific TAS() is available (ie, HAVE_SPINLOCKS is not
      82             :  *  defined), then we fall back on an emulation that uses SysV semaphores
      83             :  *  (see spin.c).  This emulation will be MUCH MUCH slower than a proper TAS()
      84             :  *  implementation, because of the cost of a kernel call per lock or unlock.
      85             :  *  An old report is that Postgres spends around 40% of its time in semop(2)
      86             :  *  when using the SysV semaphore code.
      87             :  *
      88             :  *
      89             :  * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
      90             :  * Portions Copyright (c) 1994, Regents of the University of California
      91             :  *
      92             :  *    src/include/storage/s_lock.h
      93             :  *
      94             :  *-------------------------------------------------------------------------
      95             :  */
      96             : #ifndef S_LOCK_H
      97             : #define S_LOCK_H
      98             : 
      99             : #ifdef FRONTEND
     100             : #error "s_lock.h may not be included from frontend code"
     101             : #endif
     102             : 
     103             : #ifdef HAVE_SPINLOCKS   /* skip spinlocks if requested */
     104             : 
     105             : #if defined(__GNUC__) || defined(__INTEL_COMPILER)
     106             : /*************************************************************************
     107             :  * All the gcc inlines
     108             :  * Gcc consistently defines the CPU as __cpu__.
     109             :  * Other compilers use __cpu or __cpu__ so we test for both in those cases.
     110             :  */
     111             : 
     112             : /*----------
     113             :  * Standard gcc asm format (assuming "volatile slock_t *lock"):
     114             : 
     115             :     __asm__ __volatile__(
     116             :         "  instruction \n"
     117             :         "  instruction \n"
     118             :         "  instruction \n"
     119             : :       "=r"(_res), "+m"(*lock)     // return register, in/out lock value
     120             : :       "r"(lock)                 // lock pointer, in input register
     121             : :       "memory", "cc");            // show clobbered registers here
     122             : 
     123             :  * The output-operands list (after first colon) should always include
     124             :  * "+m"(*lock), whether or not the asm code actually refers to this
     125             :  * operand directly.  This ensures that gcc believes the value in the
     126             :  * lock variable is used and set by the asm code.  Also, the clobbers
     127             :  * list (after third colon) should always include "memory"; this prevents
     128             :  * gcc from thinking it can cache the values of shared-memory fields
     129             :  * across the asm code.  Add "cc" if your asm code changes the condition
     130             :  * code register, and also list any temp registers the code uses.
     131             :  *----------
     132             :  */
     133             : 
     134             : 
     135             : #ifdef __i386__     /* 32-bit i386 */
     136             : #define HAS_TEST_AND_SET
     137             : 
     138             : typedef unsigned char slock_t;
     139             : 
     140             : #define TAS(lock) tas(lock)
     141             : 
     142             : static __inline__ int
     143             : tas(volatile slock_t *lock)
     144             : {
     145             :     slock_t     _res = 1;
     146             : 
     147             :     /*
     148             :      * Use a non-locking test before asserting the bus lock.  Note that the
     149             :      * extra test appears to be a small loss on some x86 platforms and a small
     150             :      * win on others; it's by no means clear that we should keep it.
     151             :      *
     152             :      * When this was last tested, we didn't have separate TAS() and TAS_SPIN()
     153             :      * macros.  Nowadays it probably would be better to do a non-locking test
     154             :      * in TAS_SPIN() but not in TAS(), like on x86_64, but no-one's done the
     155             :      * testing to verify that.  Without some empirical evidence, better to
     156             :      * leave it alone.
     157             :      */
     158             :     __asm__ __volatile__(
     159             :         "  cmpb    $0,%1   \n"
     160             :         "  jne     1f      \n"
     161             :         "  lock            \n"
     162             :         "  xchgb   %0,%1   \n"
     163             :         "1: \n"
     164             : :       "+q"(_res), "+m"(*lock)
     165             : :       /* no inputs */
     166             : :       "memory", "cc");
     167             :     return (int) _res;
     168             : }
     169             : 
     170             : #define SPIN_DELAY() spin_delay()
     171             : 
     172             : static __inline__ void
     173             : spin_delay(void)
     174             : {
     175             :     /*
     176             :      * This sequence is equivalent to the PAUSE instruction ("rep" is
     177             :      * ignored by old IA32 processors if the following instruction is
     178             :      * not a string operation); the IA-32 Architecture Software
     179             :      * Developer's Manual, Vol. 3, Section 7.7.2 describes why using
     180             :      * PAUSE in the inner loop of a spin lock is necessary for good
     181             :      * performance:
     182             :      *
     183             :      *     The PAUSE instruction improves the performance of IA-32
     184             :      *     processors supporting Hyper-Threading Technology when
     185             :      *     executing spin-wait loops and other routines where one
     186             :      *     thread is accessing a shared lock or semaphore in a tight
     187             :      *     polling loop. When executing a spin-wait loop, the
     188             :      *     processor can suffer a severe performance penalty when
     189             :      *     exiting the loop because it detects a possible memory order
     190             :      *     violation and flushes the core processor's pipeline. The
     191             :      *     PAUSE instruction provides a hint to the processor that the
     192             :      *     code sequence is a spin-wait loop. The processor uses this
     193             :      *     hint to avoid the memory order violation and prevent the
     194             :      *     pipeline flush. In addition, the PAUSE instruction
     195             :      *     de-pipelines the spin-wait loop to prevent it from
     196             :      *     consuming execution resources excessively.
     197             :      */
     198             :     __asm__ __volatile__(
     199             :         " rep; nop         \n");
     200             : }
     201             : 
     202             : #endif   /* __i386__ */
     203             : 
     204             : 
     205             : #ifdef __x86_64__       /* AMD Opteron, Intel EM64T */
     206             : #define HAS_TEST_AND_SET
     207             : 
     208             : typedef unsigned char slock_t;
     209             : 
     210             : #define TAS(lock) tas(lock)
     211             : 
     212             : /*
     213             :  * On Intel EM64T, it's a win to use a non-locking test before the xchg proper,
     214             :  * but only when spinning.
     215             :  *
     216             :  * See also Implementing Scalable Atomic Locks for Multi-Core Intel(tm) EM64T
     217             :  * and IA32, by Michael Chynoweth and Mary R. Lee. As of this writing, it is
     218             :  * available at:
     219             :  * http://software.intel.com/en-us/articles/implementing-scalable-atomic-locks-for-multi-core-intel-em64t-and-ia32-architectures
     220             :  */
     221             : #define TAS_SPIN(lock)    (*(lock) ? 1 : TAS(lock))
     222             : 
     223             : static __inline__ int
     224    92429884 : tas(volatile slock_t *lock)
     225             : {
     226    92429884 :     slock_t     _res = 1;
     227             : 
     228    92429884 :     __asm__ __volatile__(
     229             :         "  lock            \n"
     230             :         "  xchgb   %0,%1   \n"
     231             : :       "+q"(_res), "+m"(*lock)
     232             : :       /* no inputs */
     233             : :       "memory", "cc");
     234    92429884 :     return (int) _res;
     235             : }
     236             : 
     237             : #define SPIN_DELAY() spin_delay()
     238             : 
     239             : static __inline__ void
     240      178234 : spin_delay(void)
     241             : {
     242             :     /*
     243             :      * Adding a PAUSE in the spin delay loop is demonstrably a no-op on
     244             :      * Opteron, but it may be of some use on EM64T, so we keep it.
     245             :      */
     246      178234 :     __asm__ __volatile__(
     247             :         " rep; nop         \n");
     248      178234 : }
     249             : 
     250             : #endif   /* __x86_64__ */
     251             : 
     252             : 
     253             : /*
     254             :  * On ARM and ARM64, we use __sync_lock_test_and_set(int *, int) if available.
     255             :  *
     256             :  * We use the int-width variant of the builtin because it works on more chips
     257             :  * than other widths.
     258             :  */
     259             : #if defined(__arm__) || defined(__arm) || defined(__aarch64__)
     260             : #ifdef HAVE_GCC__SYNC_INT32_TAS
     261             : #define HAS_TEST_AND_SET
     262             : 
     263             : #define TAS(lock) tas(lock)
     264             : 
     265             : typedef int slock_t;
     266             : 
     267             : static __inline__ int
     268             : tas(volatile slock_t *lock)
     269             : {
     270             :     return __sync_lock_test_and_set(lock, 1);
     271             : }
     272             : 
     273             : #define S_UNLOCK(lock) __sync_lock_release(lock)
     274             : 
     275             : /*
     276             :  * Using an ISB instruction to delay in spinlock loops appears beneficial on
     277             :  * high-core-count ARM64 processors.  It seems mostly a wash for smaller gear,
     278             :  * and ISB doesn't exist at all on pre-v7 ARM chips.
     279             :  */
     280             : #if defined(__aarch64__)
     281             : 
     282             : #define SPIN_DELAY() spin_delay()
     283             : 
     284             : static __inline__ void
     285             : spin_delay(void)
     286             : {
     287             :     __asm__ __volatile__(
     288             :         " isb;             \n");
     289             : }
     290             : 
     291             : #endif   /* __aarch64__ */
     292             : #endif   /* HAVE_GCC__SYNC_INT32_TAS */
     293             : #endif   /* __arm__ || __arm || __aarch64__ */
     294             : 
     295             : 
     296             : /* S/390 and S/390x Linux (32- and 64-bit zSeries) */
     297             : #if defined(__s390__) || defined(__s390x__)
     298             : #define HAS_TEST_AND_SET
     299             : 
     300             : typedef unsigned int slock_t;
     301             : 
     302             : #define TAS(lock)      tas(lock)
     303             : 
     304             : static __inline__ int
     305             : tas(volatile slock_t *lock)
     306             : {
     307             :     int         _res = 0;
     308             : 
     309             :     __asm__ __volatile__(
     310             :         "  cs  %0,%3,0(%2)     \n"
     311             : :       "+d"(_res), "+m"(*lock)
     312             : :       "a"(lock), "d"(1)
     313             : :       "memory", "cc");
     314             :     return _res;
     315             : }
     316             : 
     317             : #endif   /* __s390__ || __s390x__ */
     318             : 
     319             : 
     320             : #if defined(__sparc__)      /* Sparc */
     321             : /*
     322             :  * Solaris has always run sparc processors in TSO (total store) mode, but
     323             :  * linux didn't use to and the *BSDs still don't. So, be careful about
     324             :  * acquire/release semantics. The CPU will treat superfluous members as
     325             :  * NOPs, so it's just code space.
     326             :  */
     327             : #define HAS_TEST_AND_SET
     328             : 
     329             : typedef unsigned char slock_t;
     330             : 
     331             : #define TAS(lock) tas(lock)
     332             : 
     333             : static __inline__ int
     334             : tas(volatile slock_t *lock)
     335             : {
     336             :     slock_t     _res;
     337             : 
     338             :     /*
     339             :      *  See comment in src/backend/port/tas/sunstudio_sparc.s for why this
     340             :      *  uses "ldstub", and that file uses "cas".  gcc currently generates
     341             :      *  sparcv7-targeted binaries, so "cas" use isn't possible.
     342             :      */
     343             :     __asm__ __volatile__(
     344             :         "  ldstub  [%2], %0    \n"
     345             : :       "=r"(_res), "+m"(*lock)
     346             : :       "r"(lock)
     347             : :       "memory");
     348             : #if defined(__sparcv7) || defined(__sparc_v7__)
     349             :     /*
     350             :      * No stbar or membar available, luckily no actually produced hardware
     351             :      * requires a barrier.
     352             :      */
     353             : #elif defined(__sparcv8) || defined(__sparc_v8__)
     354             :     /* stbar is available (and required for both PSO, RMO), membar isn't */
     355             :     __asm__ __volatile__ ("stbar    \n":::"memory");
     356             : #else
     357             :     /*
     358             :      * #LoadStore (RMO) | #LoadLoad (RMO) together are the appropriate acquire
     359             :      * barrier for sparcv8+ upwards.
     360             :      */
     361             :     __asm__ __volatile__ ("membar #LoadStore | #LoadLoad \n":::"memory");
     362             : #endif
     363             :     return (int) _res;
     364             : }
     365             : 
     366             : #if defined(__sparcv7) || defined(__sparc_v7__)
     367             : /*
     368             :  * No stbar or membar available, luckily no actually produced hardware
     369             :  * requires a barrier.  We fall through to the default gcc definition of
     370             :  * S_UNLOCK in this case.
     371             :  */
     372             : #elif defined(__sparcv8) || defined(__sparc_v8__)
     373             : /* stbar is available (and required for both PSO, RMO), membar isn't */
     374             : #define S_UNLOCK(lock)  \
     375             : do \
     376             : { \
     377             :     __asm__ __volatile__ ("stbar    \n":::"memory"); \
     378             :     *((volatile slock_t *) (lock)) = 0; \
     379             : } while (0)
     380             : #else
     381             : /*
     382             :  * #LoadStore (RMO) | #StoreStore (RMO, PSO) together are the appropriate
     383             :  * release barrier for sparcv8+ upwards.
     384             :  */
     385             : #define S_UNLOCK(lock)  \
     386             : do \
     387             : { \
     388             :     __asm__ __volatile__ ("membar #LoadStore | #StoreStore \n":::"memory"); \
     389             :     *((volatile slock_t *) (lock)) = 0; \
     390             : } while (0)
     391             : #endif
     392             : 
     393             : #endif   /* __sparc__ */
     394             : 
     395             : 
     396             : /* PowerPC */
     397             : #if defined(__ppc__) || defined(__powerpc__) || defined(__ppc64__) || defined(__powerpc64__)
     398             : #define HAS_TEST_AND_SET
     399             : 
     400             : typedef unsigned int slock_t;
     401             : 
     402             : #define TAS(lock) tas(lock)
     403             : 
     404             : /* On PPC, it's a win to use a non-locking test before the lwarx */
     405             : #define TAS_SPIN(lock)  (*(lock) ? 1 : TAS(lock))
     406             : 
     407             : /*
     408             :  * The second operand of addi can hold a constant zero or a register number,
     409             :  * hence constraint "=&b" to avoid allocating r0.  "b" stands for "address
     410             :  * base register"; most operands having this register-or-zero property are
     411             :  * address bases, e.g. the second operand of lwax.
     412             :  *
     413             :  * NOTE: per the Enhanced PowerPC Architecture manual, v1.0 dated 7-May-2002,
     414             :  * an isync is a sufficient synchronization barrier after a lwarx/stwcx loop.
     415             :  * But if the spinlock is in ordinary memory, we can use lwsync instead for
     416             :  * better performance.
     417             :  */
     418             : static __inline__ int
     419             : tas(volatile slock_t *lock)
     420             : {
     421             :     slock_t _t;
     422             :     int _res;
     423             : 
     424             :     __asm__ __volatile__(
     425             : "  lwarx   %0,0,%3,1   \n"
     426             : "  cmpwi   %0,0        \n"
     427             : "  bne     1f          \n"
     428             : "  addi    %0,%0,1     \n"
     429             : "  stwcx.  %0,0,%3     \n"
     430             : "  beq     2f          \n"
     431             : "1: \n"
     432             : "  li      %1,1        \n"
     433             : "  b       3f          \n"
     434             : "2: \n"
     435             : "  lwsync              \n"
     436             : "  li      %1,0        \n"
     437             : "3: \n"
     438             : :   "=&b"(_t), "=r"(_res), "+m"(*lock)
     439             : :   "r"(lock)
     440             : :   "memory", "cc");
     441             :     return _res;
     442             : }
     443             : 
     444             : /*
     445             :  * PowerPC S_UNLOCK is almost standard but requires a "sync" instruction.
     446             :  * But we can use lwsync instead for better performance.
     447             :  */
     448             : #define S_UNLOCK(lock)  \
     449             : do \
     450             : { \
     451             :     __asm__ __volatile__ ("    lwsync \n" ::: "memory"); \
     452             :     *((volatile slock_t *) (lock)) = 0; \
     453             : } while (0)
     454             : 
     455             : #endif /* powerpc */
     456             : 
     457             : 
     458             : #if defined(__mips__) && !defined(__sgi)    /* non-SGI MIPS */
     459             : #define HAS_TEST_AND_SET
     460             : 
     461             : typedef unsigned int slock_t;
     462             : 
     463             : #define TAS(lock) tas(lock)
     464             : 
     465             : /*
     466             :  * Original MIPS-I processors lacked the LL/SC instructions, but if we are
     467             :  * so unfortunate as to be running on one of those, we expect that the kernel
     468             :  * will handle the illegal-instruction traps and emulate them for us.  On
     469             :  * anything newer (and really, MIPS-I is extinct) LL/SC is the only sane
     470             :  * choice because any other synchronization method must involve a kernel
     471             :  * call.  Unfortunately, many toolchains still default to MIPS-I as the
     472             :  * codegen target; if the symbol __mips shows that that's the case, we
     473             :  * have to force the assembler to accept LL/SC.
     474             :  *
     475             :  * R10000 and up processors require a separate SYNC, which has the same
     476             :  * issues as LL/SC.
     477             :  */
     478             : #if __mips < 2
     479             : #define MIPS_SET_MIPS2  "       .set mips2          \n"
     480             : #else
     481             : #define MIPS_SET_MIPS2
     482             : #endif
     483             : 
     484             : static __inline__ int
     485             : tas(volatile slock_t *lock)
     486             : {
     487             :     volatile slock_t *_l = lock;
     488             :     int         _res;
     489             :     int         _tmp;
     490             : 
     491             :     __asm__ __volatile__(
     492             :         "       .set push           \n"
     493             :         MIPS_SET_MIPS2
     494             :         "       .set noreorder      \n"
     495             :         "       .set nomacro        \n"
     496             :         "       ll      %0, %2      \n"
     497             :         "       or      %1, %0, 1   \n"
     498             :         "       sc      %1, %2      \n"
     499             :         "       xori    %1, 1       \n"
     500             :         "       or      %0, %0, %1  \n"
     501             :         "       sync                \n"
     502             :         "       .set pop              "
     503             : :       "=&r" (_res), "=&r" (_tmp), "+R" (*_l)
     504             : :       /* no inputs */
     505             : :       "memory");
     506             :     return _res;
     507             : }
     508             : 
     509             : /* MIPS S_UNLOCK is almost standard but requires a "sync" instruction */
     510             : #define S_UNLOCK(lock)  \
     511             : do \
     512             : { \
     513             :     __asm__ __volatile__( \
     514             :         "       .set push           \n" \
     515             :         MIPS_SET_MIPS2 \
     516             :         "       .set noreorder      \n" \
     517             :         "       .set nomacro        \n" \
     518             :         "       sync                \n" \
     519             :         "       .set pop              " \
     520             : :       /* no outputs */ \
     521             : :       /* no inputs */ \
     522             : :       "memory"); \
     523             :     *((volatile slock_t *) (lock)) = 0; \
     524             : } while (0)
     525             : 
     526             : #endif /* __mips__ && !__sgi */
     527             : 
     528             : 
     529             : #if defined(__hppa) || defined(__hppa__)    /* HP PA-RISC */
     530             : /*
     531             :  * HP's PA-RISC
     532             :  *
     533             :  * Because LDCWX requires a 16-byte-aligned address, we declare slock_t as a
     534             :  * 16-byte struct.  The active word in the struct is whichever has the aligned
     535             :  * address; the other three words just sit at -1.
     536             :  */
     537             : #define HAS_TEST_AND_SET
     538             : 
     539             : typedef struct
     540             : {
     541             :     int         sema[4];
     542             : } slock_t;
     543             : 
     544             : #define TAS_ACTIVE_WORD(lock)   ((volatile int *) (((uintptr_t) (lock) + 15) & ~15))
     545             : 
     546             : static __inline__ int
     547             : tas(volatile slock_t *lock)
     548             : {
     549             :     volatile int *lockword = TAS_ACTIVE_WORD(lock);
     550             :     int         lockval;
     551             : 
     552             :     /*
     553             :      * The LDCWX instruction atomically clears the target word and
     554             :      * returns the previous value.  Hence, if the instruction returns
     555             :      * 0, someone else has already acquired the lock before we tested
     556             :      * it (i.e., we have failed).
     557             :      *
     558             :      * Notice that this means that we actually clear the word to set
     559             :      * the lock and set the word to clear the lock.  This is the
     560             :      * opposite behavior from the SPARC LDSTUB instruction.  For some
     561             :      * reason everything that H-P does is rather baroque...
     562             :      *
     563             :      * For details about the LDCWX instruction, see the "Precision
     564             :      * Architecture and Instruction Reference Manual" (09740-90014 of June
     565             :      * 1987), p. 5-38.
     566             :      */
     567             :     __asm__ __volatile__(
     568             :         "  ldcwx   0(0,%2),%0  \n"
     569             : :       "=r"(lockval), "+m"(*lockword)
     570             : :       "r"(lockword)
     571             : :       "memory");
     572             :     return (lockval == 0);
     573             : }
     574             : 
     575             : #define S_UNLOCK(lock)  \
     576             :     do { \
     577             :         __asm__ __volatile__("" : : : "memory"); \
     578             :         *TAS_ACTIVE_WORD(lock) = -1; \
     579             :     } while (0)
     580             : 
     581             : #define S_INIT_LOCK(lock) \
     582             :     do { \
     583             :         volatile slock_t *lock_ = (lock); \
     584             :         lock_->sema[0] = -1; \
     585             :         lock_->sema[1] = -1; \
     586             :         lock_->sema[2] = -1; \
     587             :         lock_->sema[3] = -1; \
     588             :     } while (0)
     589             : 
     590             : #define S_LOCK_FREE(lock)   (*TAS_ACTIVE_WORD(lock) != 0)
     591             : 
     592             : #endif   /* __hppa || __hppa__ */
     593             : 
     594             : 
     595             : /*
     596             :  * If we have no platform-specific knowledge, but we found that the compiler
     597             :  * provides __sync_lock_test_and_set(), use that.  Prefer the int-width
     598             :  * version over the char-width version if we have both, on the rather dubious
     599             :  * grounds that that's known to be more likely to work in the ARM ecosystem.
     600             :  * (But we dealt with ARM above.)
     601             :  */
     602             : #if !defined(HAS_TEST_AND_SET)
     603             : 
     604             : #if defined(HAVE_GCC__SYNC_INT32_TAS)
     605             : #define HAS_TEST_AND_SET
     606             : 
     607             : #define TAS(lock) tas(lock)
     608             : 
     609             : typedef int slock_t;
     610             : 
     611             : static __inline__ int
     612             : tas(volatile slock_t *lock)
     613             : {
     614             :     return __sync_lock_test_and_set(lock, 1);
     615             : }
     616             : 
     617             : #define S_UNLOCK(lock) __sync_lock_release(lock)
     618             : 
     619             : #elif defined(HAVE_GCC__SYNC_CHAR_TAS)
     620             : #define HAS_TEST_AND_SET
     621             : 
     622             : #define TAS(lock) tas(lock)
     623             : 
     624             : typedef char slock_t;
     625             : 
     626             : static __inline__ int
     627             : tas(volatile slock_t *lock)
     628             : {
     629             :     return __sync_lock_test_and_set(lock, 1);
     630             : }
     631             : 
     632             : #define S_UNLOCK(lock) __sync_lock_release(lock)
     633             : 
     634             : #endif   /* HAVE_GCC__SYNC_INT32_TAS */
     635             : 
     636             : #endif  /* !defined(HAS_TEST_AND_SET) */
     637             : 
     638             : 
     639             : /*
     640             :  * Default implementation of S_UNLOCK() for gcc/icc.
     641             :  *
     642             :  * Note that this implementation is unsafe for any platform that can reorder
     643             :  * a memory access (either load or store) after a following store.  That
     644             :  * happens not to be possible on x86 and most legacy architectures (some are
     645             :  * single-processor!), but many modern systems have weaker memory ordering.
     646             :  * Those that do must define their own version of S_UNLOCK() rather than
     647             :  * relying on this one.
     648             :  */
     649             : #if !defined(S_UNLOCK)
     650             : #define S_UNLOCK(lock)  \
     651             :     do { __asm__ __volatile__("" : : : "memory");  *(lock) = 0; } while (0)
     652             : #endif
     653             : 
     654             : #endif  /* defined(__GNUC__) || defined(__INTEL_COMPILER) */
     655             : 
     656             : 
     657             : /*
     658             :  * ---------------------------------------------------------------------
     659             :  * Platforms that use non-gcc inline assembly:
     660             :  * ---------------------------------------------------------------------
     661             :  */
     662             : 
     663             : #if !defined(HAS_TEST_AND_SET)  /* We didn't trigger above, let's try here */
     664             : 
     665             : /* These are in sunstudio_(sparc|x86).s */
     666             : 
     667             : #if defined(__SUNPRO_C) && (defined(__i386) || defined(__x86_64__) || defined(__sparc__) || defined(__sparc))
     668             : #define HAS_TEST_AND_SET
     669             : 
     670             : #if defined(__i386) || defined(__x86_64__) || defined(__sparcv9) || defined(__sparcv8plus)
     671             : typedef unsigned int slock_t;
     672             : #else
     673             : typedef unsigned char slock_t;
     674             : #endif
     675             : 
     676             : extern slock_t pg_atomic_cas(volatile slock_t *lock, slock_t with,
     677             :                                       slock_t cmp);
     678             : 
     679             : #define TAS(a) (pg_atomic_cas((a), 1, 0) != 0)
     680             : #endif
     681             : 
     682             : 
     683             : #ifdef _MSC_VER
     684             : typedef LONG slock_t;
     685             : 
     686             : #define HAS_TEST_AND_SET
     687             : #define TAS(lock) (InterlockedCompareExchange(lock, 1, 0))
     688             : 
     689             : #define SPIN_DELAY() spin_delay()
     690             : 
     691             : /* If using Visual C++ on Win64, inline assembly is unavailable.
     692             :  * Use a _mm_pause intrinsic instead of rep nop.
     693             :  */
     694             : #if defined(_WIN64)
     695             : static __forceinline void
     696             : spin_delay(void)
     697             : {
     698             :     _mm_pause();
     699             : }
     700             : #else
     701             : static __forceinline void
     702             : spin_delay(void)
     703             : {
     704             :     /* See comment for gcc code. Same code, MASM syntax */
     705             :     __asm rep nop;
     706             : }
     707             : #endif
     708             : 
     709             : #include <intrin.h>
     710             : #pragma intrinsic(_ReadWriteBarrier)
     711             : 
     712             : #define S_UNLOCK(lock)  \
     713             :     do { _ReadWriteBarrier(); (*(lock)) = 0; } while (0)
     714             : 
     715             : #endif
     716             : 
     717             : 
     718             : #endif  /* !defined(HAS_TEST_AND_SET) */
     719             : 
     720             : 
     721             : /* Blow up if we didn't have any way to do spinlocks */
     722             : #ifndef HAS_TEST_AND_SET
     723             : #error PostgreSQL does not have native spinlock support on this platform.  To continue the compilation, rerun configure using --disable-spinlocks.  However, performance will be poor.  Please report this to pgsql-bugs@lists.postgresql.org.
     724             : #endif
     725             : 
     726             : 
     727             : #else   /* !HAVE_SPINLOCKS */
     728             : 
     729             : 
     730             : /*
     731             :  * Fake spinlock implementation using semaphores --- slow and prone
     732             :  * to fall foul of kernel limits on number of semaphores, so don't use this
     733             :  * unless you must!  The subroutines appear in spin.c.
     734             :  */
     735             : typedef int slock_t;
     736             : 
     737             : extern bool s_lock_free_sema(volatile slock_t *lock);
     738             : extern void s_unlock_sema(volatile slock_t *lock);
     739             : extern void s_init_lock_sema(volatile slock_t *lock, bool nested);
     740             : extern int  tas_sema(volatile slock_t *lock);
     741             : 
     742             : #define S_LOCK_FREE(lock)   s_lock_free_sema(lock)
     743             : #define S_UNLOCK(lock)   s_unlock_sema(lock)
     744             : #define S_INIT_LOCK(lock)   s_init_lock_sema(lock, false)
     745             : #define TAS(lock)   tas_sema(lock)
     746             : 
     747             : 
     748             : #endif  /* HAVE_SPINLOCKS */
     749             : 
     750             : 
     751             : /*
     752             :  * Default Definitions - override these above as needed.
     753             :  */
     754             : 
     755             : #if !defined(S_LOCK)
     756             : #define S_LOCK(lock) \
     757             :     (TAS(lock) ? s_lock((lock), __FILE__, __LINE__, __func__) : 0)
     758             : #endif   /* S_LOCK */
     759             : 
     760             : #if !defined(S_LOCK_FREE)
     761             : #define S_LOCK_FREE(lock)   (*(lock) == 0)
     762             : #endif   /* S_LOCK_FREE */
     763             : 
     764             : #if !defined(S_UNLOCK)
     765             : /*
     766             :  * Our default implementation of S_UNLOCK is essentially *(lock) = 0.  This
     767             :  * is unsafe if the platform can reorder a memory access (either load or
     768             :  * store) after a following store; platforms where this is possible must
     769             :  * define their own S_UNLOCK.  But CPU reordering is not the only concern:
     770             :  * if we simply defined S_UNLOCK() as an inline macro, the compiler might
     771             :  * reorder instructions from inside the critical section to occur after the
     772             :  * lock release.  Since the compiler probably can't know what the external
     773             :  * function s_unlock is doing, putting the same logic there should be adequate.
     774             :  * A sufficiently-smart globally optimizing compiler could break that
     775             :  * assumption, though, and the cost of a function call for every spinlock
     776             :  * release may hurt performance significantly, so we use this implementation
     777             :  * only for platforms where we don't know of a suitable intrinsic.  For the
     778             :  * most part, those are relatively obscure platform/compiler combinations to
     779             :  * which the PostgreSQL project does not have access.
     780             :  */
     781             : #define USE_DEFAULT_S_UNLOCK
     782             : extern void s_unlock(volatile slock_t *lock);
     783             : #define S_UNLOCK(lock)      s_unlock(lock)
     784             : #endif   /* S_UNLOCK */
     785             : 
     786             : #if !defined(S_INIT_LOCK)
     787             : #define S_INIT_LOCK(lock)   S_UNLOCK(lock)
     788             : #endif   /* S_INIT_LOCK */
     789             : 
     790             : #if !defined(SPIN_DELAY)
     791             : #define SPIN_DELAY()    ((void) 0)
     792             : #endif   /* SPIN_DELAY */
     793             : 
     794             : #if !defined(TAS)
     795             : extern int  tas(volatile slock_t *lock);        /* in port/.../tas.s, or
     796             :                                                  * s_lock.c */
     797             : 
     798             : #define TAS(lock)       tas(lock)
     799             : #endif   /* TAS */
     800             : 
     801             : #if !defined(TAS_SPIN)
     802             : #define TAS_SPIN(lock)  TAS(lock)
     803             : #endif   /* TAS_SPIN */
     804             : 
     805             : 
     806             : /*
     807             :  * Platform-independent out-of-line support routines
     808             :  */
     809             : extern int s_lock(volatile slock_t *lock, const char *file, int line, const char *func);
     810             : 
     811             : /* Support for dynamic adjustment of spins_per_delay */
     812             : #define DEFAULT_SPINS_PER_DELAY  100
     813             : 
     814             : extern void set_spins_per_delay(int shared_spins_per_delay);
     815             : extern int  update_spins_per_delay(int shared_spins_per_delay);
     816             : 
     817             : /*
     818             :  * Support for spin delay which is useful in various places where
     819             :  * spinlock-like procedures take place.
     820             :  */
     821             : typedef struct
     822             : {
     823             :     int         spins;
     824             :     int         delays;
     825             :     int         cur_delay;
     826             :     const char *file;
     827             :     int         line;
     828             :     const char *func;
     829             : } SpinDelayStatus;
     830             : 
     831             : static inline void
     832    47423808 : init_spin_delay(SpinDelayStatus *status,
     833             :                 const char *file, int line, const char *func)
     834             : {
     835    47423808 :     status->spins = 0;
     836    47423808 :     status->delays = 0;
     837    47423808 :     status->cur_delay = 0;
     838    47423808 :     status->file = file;
     839    47423808 :     status->line = line;
     840    47423808 :     status->func = func;
     841    47423808 : }
     842             : 
     843             : #define init_local_spin_delay(status) init_spin_delay(status, __FILE__, __LINE__, __func__)
     844             : extern void perform_spin_delay(SpinDelayStatus *status);
     845             : extern void finish_spin_delay(SpinDelayStatus *status);
     846             : 
     847             : #endif   /* S_LOCK_H */

Generated by: LCOV version 1.14