LCOV - code coverage report
Current view: top level - src/include/port - atomics.h (source / functions) Hit Total Coverage
Test: PostgreSQL 18devel Lines: 78 78 100.0 %
Date: 2024-11-21 08:14:44 Functions: 31 31 100.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * atomics.h
       4             :  *    Atomic operations.
       5             :  *
       6             :  * Hardware and compiler dependent functions for manipulating memory
       7             :  * atomically and dealing with cache coherency. Used to implement locking
       8             :  * facilities and lockless algorithms/data structures.
       9             :  *
      10             :  * To bring up postgres on a platform/compiler at the very least
      11             :  * implementations for the following operations should be provided:
      12             :  * * pg_compiler_barrier(), pg_write_barrier(), pg_read_barrier()
      13             :  * * pg_atomic_compare_exchange_u32(), pg_atomic_fetch_add_u32()
      14             :  * * pg_atomic_test_set_flag(), pg_atomic_init_flag(), pg_atomic_clear_flag()
      15             :  * * PG_HAVE_8BYTE_SINGLE_COPY_ATOMICITY should be defined if appropriate.
      16             :  *
      17             :  * There exist generic, hardware independent, implementations for several
      18             :  * compilers which might be sufficient, although possibly not optimal, for a
      19             :  * new platform. If no such generic implementation is available spinlocks will
      20             :  * be used to implement the 64-bit parts of the API.
      21             :  *
      22             :  * Implement _u64 atomics if and only if your platform can use them
      23             :  * efficiently (and obviously correctly).
      24             :  *
      25             :  * Use higher level functionality (lwlocks, spinlocks, heavyweight locks)
      26             :  * whenever possible. Writing correct code using these facilities is hard.
      27             :  *
      28             :  * For an introduction to using memory barriers within the PostgreSQL backend,
      29             :  * see src/backend/storage/lmgr/README.barrier
      30             :  *
      31             :  * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
      32             :  * Portions Copyright (c) 1994, Regents of the University of California
      33             :  *
      34             :  * src/include/port/atomics.h
      35             :  *
      36             :  *-------------------------------------------------------------------------
      37             :  */
      38             : #ifndef ATOMICS_H
      39             : #define ATOMICS_H
      40             : 
      41             : #ifdef FRONTEND
      42             : #error "atomics.h may not be included from frontend code"
      43             : #endif
      44             : 
      45             : #define INSIDE_ATOMICS_H
      46             : 
      47             : #include <limits.h>
      48             : 
      49             : /*
      50             :  * First a set of architecture specific files is included.
      51             :  *
      52             :  * These files can provide the full set of atomics or can do pretty much
      53             :  * nothing if all the compilers commonly used on these platforms provide
      54             :  * usable generics.
      55             :  *
      56             :  * Don't add an inline assembly of the actual atomic operations if all the
      57             :  * common implementations of your platform provide intrinsics. Intrinsics are
      58             :  * much easier to understand and potentially support more architectures.
      59             :  *
      60             :  * It will often make sense to define memory barrier semantics here, since
      61             :  * e.g. generic compiler intrinsics for x86 memory barriers can't know that
      62             :  * postgres doesn't need x86 read/write barriers do anything more than a
      63             :  * compiler barrier.
      64             :  *
      65             :  */
      66             : #if defined(__arm__) || defined(__arm) || defined(__aarch64__)
      67             : #include "port/atomics/arch-arm.h"
      68             : #elif defined(__i386__) || defined(__i386) || defined(__x86_64__)
      69             : #include "port/atomics/arch-x86.h"
      70             : #elif defined(__ppc__) || defined(__powerpc__) || defined(__ppc64__) || defined(__powerpc64__)
      71             : #include "port/atomics/arch-ppc.h"
      72             : #endif
      73             : 
      74             : /*
      75             :  * Compiler specific, but architecture independent implementations.
      76             :  *
      77             :  * Provide architecture independent implementations of the atomic
      78             :  * facilities. At the very least compiler barriers should be provided, but a
      79             :  * full implementation of
      80             :  * * pg_compiler_barrier(), pg_write_barrier(), pg_read_barrier()
      81             :  * * pg_atomic_compare_exchange_u32(), pg_atomic_fetch_add_u32()
      82             :  * using compiler intrinsics are a good idea.
      83             :  */
      84             : /*
      85             :  * gcc or compatible, including clang and icc.
      86             :  */
      87             : #if defined(__GNUC__) || defined(__INTEL_COMPILER)
      88             : #include "port/atomics/generic-gcc.h"
      89             : #elif defined(_MSC_VER)
      90             : #include "port/atomics/generic-msvc.h"
      91             : #elif defined(__SUNPRO_C) && !defined(__GNUC__)
      92             : #include "port/atomics/generic-sunpro.h"
      93             : #else
      94             : /* Unknown compiler. */
      95             : #endif
      96             : 
      97             : /* Fail if we couldn't find implementations of required facilities. */
      98             : #if !defined(PG_HAVE_ATOMIC_U32_SUPPORT)
      99             : #error "could not find an implementation of pg_atomic_uint32"
     100             : #endif
     101             : #if !defined(pg_compiler_barrier_impl)
     102             : #error "could not find an implementation of pg_compiler_barrier"
     103             : #endif
     104             : #if !defined(pg_memory_barrier_impl)
     105             : #error "could not find an implementation of pg_memory_barrier_impl"
     106             : #endif
     107             : 
     108             : 
     109             : /*
     110             :  * Provide a spinlock-based implementation of the 64 bit variants, if
     111             :  * necessary.
     112             :  */
     113             : #include "port/atomics/fallback.h"
     114             : 
     115             : /*
     116             :  * Provide additional operations using supported infrastructure. These are
     117             :  * expected to be efficient if the underlying atomic operations are efficient.
     118             :  */
     119             : #include "port/atomics/generic.h"
     120             : 
     121             : 
     122             : /*
     123             :  * pg_compiler_barrier - prevent the compiler from moving code across
     124             :  *
     125             :  * A compiler barrier need not (and preferably should not) emit any actual
     126             :  * machine code, but must act as an optimization fence: the compiler must not
     127             :  * reorder loads or stores to main memory around the barrier.  However, the
     128             :  * CPU may still reorder loads or stores at runtime, if the architecture's
     129             :  * memory model permits this.
     130             :  */
     131             : #define pg_compiler_barrier()   pg_compiler_barrier_impl()
     132             : 
     133             : /*
     134             :  * pg_memory_barrier - prevent the CPU from reordering memory access
     135             :  *
     136             :  * A memory barrier must act as a compiler barrier, and in addition must
     137             :  * guarantee that all loads and stores issued prior to the barrier are
     138             :  * completed before any loads or stores issued after the barrier.  Unless
     139             :  * loads and stores are totally ordered (which is not the case on most
     140             :  * architectures) this requires issuing some sort of memory fencing
     141             :  * instruction.
     142             :  */
     143             : #define pg_memory_barrier() pg_memory_barrier_impl()
     144             : 
     145             : /*
     146             :  * pg_(read|write)_barrier - prevent the CPU from reordering memory access
     147             :  *
     148             :  * A read barrier must act as a compiler barrier, and in addition must
     149             :  * guarantee that any loads issued prior to the barrier are completed before
     150             :  * any loads issued after the barrier.  Similarly, a write barrier acts
     151             :  * as a compiler barrier, and also orders stores.  Read and write barriers
     152             :  * are thus weaker than a full memory barrier, but stronger than a compiler
     153             :  * barrier.  In practice, on machines with strong memory ordering, read and
     154             :  * write barriers may require nothing more than a compiler barrier.
     155             :  */
     156             : #define pg_read_barrier()   pg_read_barrier_impl()
     157             : #define pg_write_barrier()  pg_write_barrier_impl()
     158             : 
     159             : /*
     160             :  * Spinloop delay - Allow CPU to relax in busy loops
     161             :  */
     162             : #define pg_spin_delay() pg_spin_delay_impl()
     163             : 
     164             : /*
     165             :  * pg_atomic_init_flag - initialize atomic flag.
     166             :  *
     167             :  * No barrier semantics.
     168             :  */
     169             : static inline void
     170        5696 : pg_atomic_init_flag(volatile pg_atomic_flag *ptr)
     171             : {
     172        5696 :     pg_atomic_init_flag_impl(ptr);
     173        5696 : }
     174             : 
     175             : /*
     176             :  * pg_atomic_test_set_flag - TAS()
     177             :  *
     178             :  * Returns true if the flag has successfully been set, false otherwise.
     179             :  *
     180             :  * Acquire (including read barrier) semantics.
     181             :  */
     182             : static inline bool
     183      153030 : pg_atomic_test_set_flag(volatile pg_atomic_flag *ptr)
     184             : {
     185      153030 :     return pg_atomic_test_set_flag_impl(ptr);
     186             : }
     187             : 
     188             : /*
     189             :  * pg_atomic_unlocked_test_flag - Check if the lock is free
     190             :  *
     191             :  * Returns true if the flag currently is not set, false otherwise.
     192             :  *
     193             :  * No barrier semantics.
     194             :  */
     195             : static inline bool
     196      236260 : pg_atomic_unlocked_test_flag(volatile pg_atomic_flag *ptr)
     197             : {
     198      236260 :     return pg_atomic_unlocked_test_flag_impl(ptr);
     199             : }
     200             : 
     201             : /*
     202             :  * pg_atomic_clear_flag - release lock set by TAS()
     203             :  *
     204             :  * Release (including write barrier) semantics.
     205             :  */
     206             : static inline void
     207        1066 : pg_atomic_clear_flag(volatile pg_atomic_flag *ptr)
     208             : {
     209        1066 :     pg_atomic_clear_flag_impl(ptr);
     210        1066 : }
     211             : 
     212             : 
     213             : /*
     214             :  * pg_atomic_init_u32 - initialize atomic variable
     215             :  *
     216             :  * Has to be done before any concurrent usage..
     217             :  *
     218             :  * No barrier semantics.
     219             :  */
     220             : static inline void
     221    40383722 : pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
     222             : {
     223             :     AssertPointerAlignment(ptr, 4);
     224             : 
     225    40383722 :     pg_atomic_init_u32_impl(ptr, val);
     226    40383722 : }
     227             : 
     228             : /*
     229             :  * pg_atomic_read_u32 - unlocked read from atomic variable.
     230             :  *
     231             :  * The read is guaranteed to return a value as it has been written by this or
     232             :  * another process at some point in the past. There's however no cache
     233             :  * coherency interaction guaranteeing the value hasn't since been written to
     234             :  * again.
     235             :  *
     236             :  * No barrier semantics.
     237             :  */
     238             : static inline uint32
     239   991378502 : pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
     240             : {
     241             :     AssertPointerAlignment(ptr, 4);
     242   991378502 :     return pg_atomic_read_u32_impl(ptr);
     243             : }
     244             : 
     245             : /*
     246             :  * pg_atomic_read_membarrier_u32 - read with barrier semantics.
     247             :  *
     248             :  * This read is guaranteed to return the current value, provided that the value
     249             :  * is only ever updated via operations with barrier semantics, such as
     250             :  * pg_atomic_compare_exchange_u32() and pg_atomic_write_membarrier_u32().
     251             :  * While this may be less performant than pg_atomic_read_u32(), it may be
     252             :  * easier to reason about correctness with this function in less performance-
     253             :  * sensitive code.
     254             :  *
     255             :  * Full barrier semantics.
     256             :  */
     257             : static inline uint32
     258             : pg_atomic_read_membarrier_u32(volatile pg_atomic_uint32 *ptr)
     259             : {
     260             :     AssertPointerAlignment(ptr, 4);
     261             : 
     262             :     return pg_atomic_read_membarrier_u32_impl(ptr);
     263             : }
     264             : 
     265             : /*
     266             :  * pg_atomic_write_u32 - write to atomic variable.
     267             :  *
     268             :  * The write is guaranteed to succeed as a whole, i.e. it's not possible to
     269             :  * observe a partial write for any reader.  Note that this correctly interacts
     270             :  * with pg_atomic_compare_exchange_u32, in contrast to
     271             :  * pg_atomic_unlocked_write_u32().
     272             :  *
     273             :  * No barrier semantics.
     274             :  */
     275             : static inline void
     276    62998438 : pg_atomic_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
     277             : {
     278             :     AssertPointerAlignment(ptr, 4);
     279             : 
     280    62998438 :     pg_atomic_write_u32_impl(ptr, val);
     281    62998438 : }
     282             : 
     283             : /*
     284             :  * pg_atomic_unlocked_write_u32 - unlocked write to atomic variable.
     285             :  *
     286             :  * The write is guaranteed to succeed as a whole, i.e. it's not possible to
     287             :  * observe a partial write for any reader.  But note that writing this way is
     288             :  * not guaranteed to correctly interact with read-modify-write operations like
     289             :  * pg_atomic_compare_exchange_u32.  This should only be used in cases where
     290             :  * minor performance regressions due to atomics emulation are unacceptable.
     291             :  *
     292             :  * No barrier semantics.
     293             :  */
     294             : static inline void
     295     3463718 : pg_atomic_unlocked_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
     296             : {
     297             :     AssertPointerAlignment(ptr, 4);
     298             : 
     299     3463718 :     pg_atomic_unlocked_write_u32_impl(ptr, val);
     300     3463718 : }
     301             : 
     302             : /*
     303             :  * pg_atomic_write_membarrier_u32 - write with barrier semantics.
     304             :  *
     305             :  * The write is guaranteed to succeed as a whole, i.e., it's not possible to
     306             :  * observe a partial write for any reader.  Note that this correctly interacts
     307             :  * with both pg_atomic_compare_exchange_u32() and
     308             :  * pg_atomic_read_membarrier_u32().  While this may be less performant than
     309             :  * pg_atomic_write_u32(), it may be easier to reason about correctness with
     310             :  * this function in less performance-sensitive code.
     311             :  *
     312             :  * Full barrier semantics.
     313             :  */
     314             : static inline void
     315          26 : pg_atomic_write_membarrier_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
     316             : {
     317             :     AssertPointerAlignment(ptr, 4);
     318             : 
     319          26 :     pg_atomic_write_membarrier_u32_impl(ptr, val);
     320          26 : }
     321             : 
     322             : /*
     323             :  * pg_atomic_exchange_u32 - exchange newval with current value
     324             :  *
     325             :  * Returns the old value of 'ptr' before the swap.
     326             :  *
     327             :  * Full barrier semantics.
     328             :  */
     329             : static inline uint32
     330       19154 : pg_atomic_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 newval)
     331             : {
     332             :     AssertPointerAlignment(ptr, 4);
     333             : 
     334       19154 :     return pg_atomic_exchange_u32_impl(ptr, newval);
     335             : }
     336             : 
     337             : /*
     338             :  * pg_atomic_compare_exchange_u32 - CAS operation
     339             :  *
     340             :  * Atomically compare the current value of ptr with *expected and store newval
     341             :  * iff ptr and *expected have the same value. The current value of *ptr will
     342             :  * always be stored in *expected.
     343             :  *
     344             :  * Return true if values have been exchanged, false otherwise.
     345             :  *
     346             :  * Full barrier semantics.
     347             :  */
     348             : static inline bool
     349   924255930 : pg_atomic_compare_exchange_u32(volatile pg_atomic_uint32 *ptr,
     350             :                                uint32 *expected, uint32 newval)
     351             : {
     352             :     AssertPointerAlignment(ptr, 4);
     353             :     AssertPointerAlignment(expected, 4);
     354             : 
     355   924255930 :     return pg_atomic_compare_exchange_u32_impl(ptr, expected, newval);
     356             : }
     357             : 
     358             : /*
     359             :  * pg_atomic_fetch_add_u32 - atomically add to variable
     360             :  *
     361             :  * Returns the value of ptr before the arithmetic operation.
     362             :  *
     363             :  * Full barrier semantics.
     364             :  */
     365             : static inline uint32
     366    10796200 : pg_atomic_fetch_add_u32(volatile pg_atomic_uint32 *ptr, int32 add_)
     367             : {
     368             :     AssertPointerAlignment(ptr, 4);
     369    10796200 :     return pg_atomic_fetch_add_u32_impl(ptr, add_);
     370             : }
     371             : 
     372             : /*
     373             :  * pg_atomic_fetch_sub_u32 - atomically subtract from variable
     374             :  *
     375             :  * Returns the value of ptr before the arithmetic operation. Note that sub_
     376             :  * may not be INT_MIN due to platform limitations.
     377             :  *
     378             :  * Full barrier semantics.
     379             :  */
     380             : static inline uint32
     381     1519670 : pg_atomic_fetch_sub_u32(volatile pg_atomic_uint32 *ptr, int32 sub_)
     382             : {
     383             :     AssertPointerAlignment(ptr, 4);
     384             :     Assert(sub_ != INT_MIN);
     385     1519670 :     return pg_atomic_fetch_sub_u32_impl(ptr, sub_);
     386             : }
     387             : 
     388             : /*
     389             :  * pg_atomic_fetch_and_u32 - atomically bit-and and_ with variable
     390             :  *
     391             :  * Returns the value of ptr before the arithmetic operation.
     392             :  *
     393             :  * Full barrier semantics.
     394             :  */
     395             : static inline uint32
     396     8781036 : pg_atomic_fetch_and_u32(volatile pg_atomic_uint32 *ptr, uint32 and_)
     397             : {
     398             :     AssertPointerAlignment(ptr, 4);
     399     8781036 :     return pg_atomic_fetch_and_u32_impl(ptr, and_);
     400             : }
     401             : 
     402             : /*
     403             :  * pg_atomic_fetch_or_u32 - atomically bit-or or_ with variable
     404             :  *
     405             :  * Returns the value of ptr before the arithmetic operation.
     406             :  *
     407             :  * Full barrier semantics.
     408             :  */
     409             : static inline uint32
     410    82902932 : pg_atomic_fetch_or_u32(volatile pg_atomic_uint32 *ptr, uint32 or_)
     411             : {
     412             :     AssertPointerAlignment(ptr, 4);
     413    82902932 :     return pg_atomic_fetch_or_u32_impl(ptr, or_);
     414             : }
     415             : 
     416             : /*
     417             :  * pg_atomic_add_fetch_u32 - atomically add to variable
     418             :  *
     419             :  * Returns the value of ptr after the arithmetic operation.
     420             :  *
     421             :  * Full barrier semantics.
     422             :  */
     423             : static inline uint32
     424         888 : pg_atomic_add_fetch_u32(volatile pg_atomic_uint32 *ptr, int32 add_)
     425             : {
     426             :     AssertPointerAlignment(ptr, 4);
     427         888 :     return pg_atomic_add_fetch_u32_impl(ptr, add_);
     428             : }
     429             : 
     430             : /*
     431             :  * pg_atomic_sub_fetch_u32 - atomically subtract from variable
     432             :  *
     433             :  * Returns the value of ptr after the arithmetic operation. Note that sub_ may
     434             :  * not be INT_MIN due to platform limitations.
     435             :  *
     436             :  * Full barrier semantics.
     437             :  */
     438             : static inline uint32
     439   671138716 : pg_atomic_sub_fetch_u32(volatile pg_atomic_uint32 *ptr, int32 sub_)
     440             : {
     441             :     AssertPointerAlignment(ptr, 4);
     442             :     Assert(sub_ != INT_MIN);
     443   671138716 :     return pg_atomic_sub_fetch_u32_impl(ptr, sub_);
     444             : }
     445             : 
     446             : /* ----
     447             :  * The 64 bit operations have the same semantics as their 32bit counterparts
     448             :  * if they are available. Check the corresponding 32bit function for
     449             :  * documentation.
     450             :  * ----
     451             :  */
     452             : static inline void
     453     5348034 : pg_atomic_init_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
     454             : {
     455             :     /*
     456             :      * Can't necessarily enforce alignment - and don't need it - when using
     457             :      * the spinlock based fallback implementation. Therefore only assert when
     458             :      * not using it.
     459             :      */
     460             : #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
     461             :     AssertPointerAlignment(ptr, 8);
     462             : #endif
     463     5348034 :     pg_atomic_init_u64_impl(ptr, val);
     464     5348034 : }
     465             : 
     466             : static inline uint64
     467   556677282 : pg_atomic_read_u64(volatile pg_atomic_uint64 *ptr)
     468             : {
     469             : #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
     470             :     AssertPointerAlignment(ptr, 8);
     471             : #endif
     472   556677282 :     return pg_atomic_read_u64_impl(ptr);
     473             : }
     474             : 
     475             : static inline uint64
     476     4390170 : pg_atomic_read_membarrier_u64(volatile pg_atomic_uint64 *ptr)
     477             : {
     478             : #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
     479             :     AssertPointerAlignment(ptr, 8);
     480             : #endif
     481     4390170 :     return pg_atomic_read_membarrier_u64_impl(ptr);
     482             : }
     483             : 
     484             : static inline void
     485    37517332 : pg_atomic_write_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
     486             : {
     487             : #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
     488             :     AssertPointerAlignment(ptr, 8);
     489             : #endif
     490    37517332 :     pg_atomic_write_u64_impl(ptr, val);
     491    37517332 : }
     492             : 
     493             : static inline void
     494        1634 : pg_atomic_write_membarrier_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
     495             : {
     496             : #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
     497             :     AssertPointerAlignment(ptr, 8);
     498             : #endif
     499        1634 :     pg_atomic_write_membarrier_u64_impl(ptr, val);
     500        1634 : }
     501             : 
     502             : static inline uint64
     503    32588538 : pg_atomic_exchange_u64(volatile pg_atomic_uint64 *ptr, uint64 newval)
     504             : {
     505             : #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
     506             :     AssertPointerAlignment(ptr, 8);
     507             : #endif
     508    32588538 :     return pg_atomic_exchange_u64_impl(ptr, newval);
     509             : }
     510             : 
     511             : static inline bool
     512     3618260 : pg_atomic_compare_exchange_u64(volatile pg_atomic_uint64 *ptr,
     513             :                                uint64 *expected, uint64 newval)
     514             : {
     515             : #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
     516             :     AssertPointerAlignment(ptr, 8);
     517             : #endif
     518     3618260 :     return pg_atomic_compare_exchange_u64_impl(ptr, expected, newval);
     519             : }
     520             : 
     521             : static inline uint64
     522      191964 : pg_atomic_fetch_add_u64(volatile pg_atomic_uint64 *ptr, int64 add_)
     523             : {
     524             : #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
     525             :     AssertPointerAlignment(ptr, 8);
     526             : #endif
     527      191964 :     return pg_atomic_fetch_add_u64_impl(ptr, add_);
     528             : }
     529             : 
     530             : static inline uint64
     531           6 : pg_atomic_fetch_sub_u64(volatile pg_atomic_uint64 *ptr, int64 sub_)
     532             : {
     533             : #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
     534             :     AssertPointerAlignment(ptr, 8);
     535             : #endif
     536             :     Assert(sub_ != PG_INT64_MIN);
     537           6 :     return pg_atomic_fetch_sub_u64_impl(ptr, sub_);
     538             : }
     539             : 
     540             : static inline uint64
     541          18 : pg_atomic_fetch_and_u64(volatile pg_atomic_uint64 *ptr, uint64 and_)
     542             : {
     543             : #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
     544             :     AssertPointerAlignment(ptr, 8);
     545             : #endif
     546          18 :     return pg_atomic_fetch_and_u64_impl(ptr, and_);
     547             : }
     548             : 
     549             : static inline uint64
     550          12 : pg_atomic_fetch_or_u64(volatile pg_atomic_uint64 *ptr, uint64 or_)
     551             : {
     552             : #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
     553             :     AssertPointerAlignment(ptr, 8);
     554             : #endif
     555          12 :     return pg_atomic_fetch_or_u64_impl(ptr, or_);
     556             : }
     557             : 
     558             : static inline uint64
     559         166 : pg_atomic_add_fetch_u64(volatile pg_atomic_uint64 *ptr, int64 add_)
     560             : {
     561             : #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
     562             :     AssertPointerAlignment(ptr, 8);
     563             : #endif
     564         166 :     return pg_atomic_add_fetch_u64_impl(ptr, add_);
     565             : }
     566             : 
     567             : static inline uint64
     568           6 : pg_atomic_sub_fetch_u64(volatile pg_atomic_uint64 *ptr, int64 sub_)
     569             : {
     570             : #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
     571             :     AssertPointerAlignment(ptr, 8);
     572             : #endif
     573             :     Assert(sub_ != PG_INT64_MIN);
     574           6 :     return pg_atomic_sub_fetch_u64_impl(ptr, sub_);
     575             : }
     576             : 
     577             : /*
     578             :  * Monotonically advance the given variable using only atomic operations until
     579             :  * it's at least the target value.  Returns the latest value observed, which
     580             :  * may or may not be the target value.
     581             :  *
     582             :  * Full barrier semantics (even when value is unchanged).
     583             :  */
     584             : static inline uint64
     585      742510 : pg_atomic_monotonic_advance_u64(volatile pg_atomic_uint64 *ptr, uint64 target)
     586             : {
     587             :     uint64      currval;
     588             : 
     589             : #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
     590             :     AssertPointerAlignment(ptr, 8);
     591             : #endif
     592             : 
     593      742510 :     currval = pg_atomic_read_u64_impl(ptr);
     594      742510 :     if (currval >= target)
     595             :     {
     596       53226 :         pg_memory_barrier();
     597       53226 :         return currval;
     598             :     }
     599             : 
     600      692412 :     while (currval < target)
     601             :     {
     602      689314 :         if (pg_atomic_compare_exchange_u64(ptr, &currval, target))
     603      686186 :             return target;
     604             :     }
     605             : 
     606        3098 :     return currval;
     607             : }
     608             : 
     609             : #undef INSIDE_ATOMICS_H
     610             : 
     611             : #endif                          /* ATOMICS_H */

Generated by: LCOV version 1.14