LCOV - code coverage report
Current view: top level - src/include/port - atomics.h (source / functions) Hit Total Coverage
Test: PostgreSQL 19devel Lines: 78 78 100.0 %
Date: 2025-12-30 11:17:10 Functions: 31 31 100.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * atomics.h
       4             :  *    Atomic operations.
       5             :  *
       6             :  * Hardware and compiler dependent functions for manipulating memory
       7             :  * atomically and dealing with cache coherency. Used to implement locking
       8             :  * facilities and lockless algorithms/data structures.
       9             :  *
      10             :  * To bring up postgres on a platform/compiler at the very least
      11             :  * implementations for the following operations should be provided:
      12             :  * * pg_compiler_barrier(), pg_write_barrier(), pg_read_barrier()
      13             :  * * pg_atomic_compare_exchange_u32(), pg_atomic_fetch_add_u32()
      14             :  * * pg_atomic_test_set_flag(), pg_atomic_init_flag(), pg_atomic_clear_flag()
      15             :  * * PG_HAVE_8BYTE_SINGLE_COPY_ATOMICITY should be defined if appropriate.
      16             :  *
      17             :  * There exist generic, hardware independent, implementations for several
      18             :  * compilers which might be sufficient, although possibly not optimal, for a
      19             :  * new platform. If no such generic implementation is available spinlocks will
      20             :  * be used to implement the 64-bit parts of the API.
      21             :  *
      22             :  * Implement _u64 atomics if and only if your platform can use them
      23             :  * efficiently (and obviously correctly).
      24             :  *
      25             :  * Use higher level functionality (lwlocks, spinlocks, heavyweight locks)
      26             :  * whenever possible. Writing correct code using these facilities is hard.
      27             :  *
      28             :  * For an introduction to using memory barriers within the PostgreSQL backend,
      29             :  * see src/backend/storage/lmgr/README.barrier
      30             :  *
      31             :  * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
      32             :  * Portions Copyright (c) 1994, Regents of the University of California
      33             :  *
      34             :  * src/include/port/atomics.h
      35             :  *
      36             :  *-------------------------------------------------------------------------
      37             :  */
      38             : #ifndef ATOMICS_H
      39             : #define ATOMICS_H
      40             : 
      41             : #ifdef FRONTEND
      42             : #error "atomics.h may not be included from frontend code"
      43             : #endif
      44             : 
      45             : #define INSIDE_ATOMICS_H
      46             : 
      47             : #include <limits.h>
      48             : 
      49             : /*
      50             :  * First a set of architecture specific files is included.
      51             :  *
      52             :  * These files can provide the full set of atomics or can do pretty much
      53             :  * nothing if all the compilers commonly used on these platforms provide
      54             :  * usable generics.
      55             :  *
      56             :  * Don't add an inline assembly of the actual atomic operations if all the
      57             :  * common implementations of your platform provide intrinsics. Intrinsics are
      58             :  * much easier to understand and potentially support more architectures.
      59             :  *
      60             :  * It will often make sense to define memory barrier semantics here, since
      61             :  * e.g. generic compiler intrinsics for x86 memory barriers can't know that
      62             :  * postgres doesn't need x86 read/write barriers do anything more than a
      63             :  * compiler barrier.
      64             :  *
      65             :  */
      66             : #if defined(__arm__) || defined(__arm) || defined(__aarch64__)
      67             : #include "port/atomics/arch-arm.h"
      68             : #elif defined(__i386__) || defined(__i386) || defined(__x86_64__)
      69             : #include "port/atomics/arch-x86.h"
      70             : #elif defined(__ppc__) || defined(__powerpc__) || defined(__ppc64__) || defined(__powerpc64__)
      71             : #include "port/atomics/arch-ppc.h"
      72             : #endif
      73             : 
      74             : /*
      75             :  * Compiler specific, but architecture independent implementations.
      76             :  *
      77             :  * Provide architecture independent implementations of the atomic
      78             :  * facilities. At the very least compiler barriers should be provided, but a
      79             :  * full implementation of
      80             :  * * pg_compiler_barrier(), pg_write_barrier(), pg_read_barrier()
      81             :  * * pg_atomic_compare_exchange_u32(), pg_atomic_fetch_add_u32()
      82             :  * using compiler intrinsics are a good idea.
      83             :  */
      84             : /*
      85             :  * gcc or compatible, including clang and icc.
      86             :  */
      87             : #if defined(__GNUC__) || defined(__INTEL_COMPILER)
      88             : #include "port/atomics/generic-gcc.h"
      89             : #elif defined(_MSC_VER)
      90             : #include "port/atomics/generic-msvc.h"
      91             : #else
      92             : /* Unknown compiler. */
      93             : #endif
      94             : 
      95             : /* Fail if we couldn't find implementations of required facilities. */
      96             : #if !defined(PG_HAVE_ATOMIC_U32_SUPPORT)
      97             : #error "could not find an implementation of pg_atomic_uint32"
      98             : #endif
      99             : #if !defined(pg_compiler_barrier_impl)
     100             : #error "could not find an implementation of pg_compiler_barrier"
     101             : #endif
     102             : #if !defined(pg_memory_barrier_impl)
     103             : #error "could not find an implementation of pg_memory_barrier_impl"
     104             : #endif
     105             : 
     106             : 
     107             : /*
     108             :  * Provide a spinlock-based implementation of the 64 bit variants, if
     109             :  * necessary.
     110             :  */
     111             : #include "port/atomics/fallback.h"
     112             : 
     113             : /*
     114             :  * Provide additional operations using supported infrastructure. These are
     115             :  * expected to be efficient if the underlying atomic operations are efficient.
     116             :  */
     117             : #include "port/atomics/generic.h"
     118             : 
     119             : 
     120             : /*
     121             :  * pg_compiler_barrier - prevent the compiler from moving code across
     122             :  *
     123             :  * A compiler barrier need not (and preferably should not) emit any actual
     124             :  * machine code, but must act as an optimization fence: the compiler must not
     125             :  * reorder loads or stores to main memory around the barrier.  However, the
     126             :  * CPU may still reorder loads or stores at runtime, if the architecture's
     127             :  * memory model permits this.
     128             :  */
     129             : #define pg_compiler_barrier()   pg_compiler_barrier_impl()
     130             : 
     131             : /*
     132             :  * pg_memory_barrier - prevent the CPU from reordering memory access
     133             :  *
     134             :  * A memory barrier must act as a compiler barrier, and in addition must
     135             :  * guarantee that all loads and stores issued prior to the barrier are
     136             :  * completed before any loads or stores issued after the barrier.  Unless
     137             :  * loads and stores are totally ordered (which is not the case on most
     138             :  * architectures) this requires issuing some sort of memory fencing
     139             :  * instruction.
     140             :  */
     141             : #define pg_memory_barrier() pg_memory_barrier_impl()
     142             : 
     143             : /*
     144             :  * pg_(read|write)_barrier - prevent the CPU from reordering memory access
     145             :  *
     146             :  * A read barrier must act as a compiler barrier, and in addition must
     147             :  * guarantee that any loads issued prior to the barrier are completed before
     148             :  * any loads issued after the barrier.  Similarly, a write barrier acts
     149             :  * as a compiler barrier, and also orders stores.  Read and write barriers
     150             :  * are thus weaker than a full memory barrier, but stronger than a compiler
     151             :  * barrier.  In practice, on machines with strong memory ordering, read and
     152             :  * write barriers may require nothing more than a compiler barrier.
     153             :  */
     154             : #define pg_read_barrier()   pg_read_barrier_impl()
     155             : #define pg_write_barrier()  pg_write_barrier_impl()
     156             : 
     157             : /*
     158             :  * Spinloop delay - Allow CPU to relax in busy loops
     159             :  */
     160             : #define pg_spin_delay() pg_spin_delay_impl()
     161             : 
     162             : /*
     163             :  * pg_atomic_init_flag - initialize atomic flag.
     164             :  *
     165             :  * No barrier semantics.
     166             :  */
     167             : static inline void
     168       24796 : pg_atomic_init_flag(volatile pg_atomic_flag *ptr)
     169             : {
     170       24796 :     pg_atomic_init_flag_impl(ptr);
     171       24796 : }
     172             : 
     173             : /*
     174             :  * pg_atomic_test_set_flag - TAS()
     175             :  *
     176             :  * Returns true if the flag has successfully been set, false otherwise.
     177             :  *
     178             :  * Acquire (including read barrier) semantics.
     179             :  */
     180             : static inline bool
     181      294168 : pg_atomic_test_set_flag(volatile pg_atomic_flag *ptr)
     182             : {
     183      294168 :     return pg_atomic_test_set_flag_impl(ptr);
     184             : }
     185             : 
     186             : /*
     187             :  * pg_atomic_unlocked_test_flag - Check if the lock is free
     188             :  *
     189             :  * Returns true if the flag currently is not set, false otherwise.
     190             :  *
     191             :  * No barrier semantics.
     192             :  */
     193             : static inline bool
     194      597188 : pg_atomic_unlocked_test_flag(volatile pg_atomic_flag *ptr)
     195             : {
     196      597188 :     return pg_atomic_unlocked_test_flag_impl(ptr);
     197             : }
     198             : 
     199             : /*
     200             :  * pg_atomic_clear_flag - release lock set by TAS()
     201             :  *
     202             :  * Release (including write barrier) semantics.
     203             :  */
     204             : static inline void
     205        2850 : pg_atomic_clear_flag(volatile pg_atomic_flag *ptr)
     206             : {
     207        2850 :     pg_atomic_clear_flag_impl(ptr);
     208        2850 : }
     209             : 
     210             : 
     211             : /*
     212             :  * pg_atomic_init_u32 - initialize atomic variable
     213             :  *
     214             :  * Has to be done before any concurrent usage..
     215             :  *
     216             :  * No barrier semantics.
     217             :  */
     218             : static inline void
     219    49394646 : pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
     220             : {
     221             :     AssertPointerAlignment(ptr, 4);
     222             : 
     223    49394646 :     pg_atomic_init_u32_impl(ptr, val);
     224    49394646 : }
     225             : 
     226             : /*
     227             :  * pg_atomic_read_u32 - unlocked read from atomic variable.
     228             :  *
     229             :  * The read is guaranteed to return a value as it has been written by this or
     230             :  * another process at some point in the past. There's however no cache
     231             :  * coherency interaction guaranteeing the value hasn't since been written to
     232             :  * again.
     233             :  *
     234             :  * No barrier semantics.
     235             :  */
     236             : static inline uint32
     237  1017182250 : pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
     238             : {
     239             :     AssertPointerAlignment(ptr, 4);
     240  1017182250 :     return pg_atomic_read_u32_impl(ptr);
     241             : }
     242             : 
     243             : /*
     244             :  * pg_atomic_read_membarrier_u32 - read with barrier semantics.
     245             :  *
     246             :  * This read is guaranteed to return the current value, provided that the value
     247             :  * is only ever updated via operations with barrier semantics, such as
     248             :  * pg_atomic_compare_exchange_u32() and pg_atomic_write_membarrier_u32().
     249             :  * While this may be less performant than pg_atomic_read_u32(), it may be
     250             :  * easier to reason about correctness with this function in less performance-
     251             :  * sensitive code.
     252             :  *
     253             :  * Full barrier semantics.
     254             :  */
     255             : static inline uint32
     256             : pg_atomic_read_membarrier_u32(volatile pg_atomic_uint32 *ptr)
     257             : {
     258             :     AssertPointerAlignment(ptr, 4);
     259             : 
     260             :     return pg_atomic_read_membarrier_u32_impl(ptr);
     261             : }
     262             : 
     263             : /*
     264             :  * pg_atomic_write_u32 - write to atomic variable.
     265             :  *
     266             :  * The write is guaranteed to succeed as a whole, i.e. it's not possible to
     267             :  * observe a partial write for any reader.  Note that this correctly interacts
     268             :  * with pg_atomic_compare_exchange_u32, in contrast to
     269             :  * pg_atomic_unlocked_write_u32().
     270             :  *
     271             :  * No barrier semantics.
     272             :  */
     273             : static inline void
     274      138588 : pg_atomic_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
     275             : {
     276             :     AssertPointerAlignment(ptr, 4);
     277             : 
     278      138588 :     pg_atomic_write_u32_impl(ptr, val);
     279      138588 : }
     280             : 
     281             : /*
     282             :  * pg_atomic_unlocked_write_u32 - unlocked write to atomic variable.
     283             :  *
     284             :  * Write to an atomic variable, without atomicity guarantees. I.e. it is not
     285             :  * guaranteed that a concurrent reader will not see a torn value, nor is this
     286             :  * guaranteed to correctly interact with concurrent read-modify-write
     287             :  * operations like pg_atomic_compare_exchange_u32.  This should only be used
     288             :  * in cases where minor performance regressions due to atomic operations are
     289             :  * unacceptable and where exclusive access is guaranteed via some external
     290             :  * means.
     291             :  *
     292             :  * No barrier semantics.
     293             :  */
     294             : static inline void
     295     8733106 : pg_atomic_unlocked_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
     296             : {
     297             :     AssertPointerAlignment(ptr, 4);
     298             : 
     299     8733106 :     pg_atomic_unlocked_write_u32_impl(ptr, val);
     300     8733106 : }
     301             : 
     302             : /*
     303             :  * pg_atomic_write_membarrier_u32 - write with barrier semantics.
     304             :  *
     305             :  * The write is guaranteed to succeed as a whole, i.e., it's not possible to
     306             :  * observe a partial write for any reader.  Note that this correctly interacts
     307             :  * with both pg_atomic_compare_exchange_u32() and
     308             :  * pg_atomic_read_membarrier_u32().  While this may be less performant than
     309             :  * pg_atomic_write_u32(), it may be easier to reason about correctness with
     310             :  * this function in less performance-sensitive code.
     311             :  *
     312             :  * Full barrier semantics.
     313             :  */
     314             : static inline void
     315          28 : pg_atomic_write_membarrier_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
     316             : {
     317             :     AssertPointerAlignment(ptr, 4);
     318             : 
     319          28 :     pg_atomic_write_membarrier_u32_impl(ptr, val);
     320          28 : }
     321             : 
     322             : /*
     323             :  * pg_atomic_exchange_u32 - exchange newval with current value
     324             :  *
     325             :  * Returns the old value of 'ptr' before the swap.
     326             :  *
     327             :  * Full barrier semantics.
     328             :  */
     329             : static inline uint32
     330       30010 : pg_atomic_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 newval)
     331             : {
     332             :     AssertPointerAlignment(ptr, 4);
     333             : 
     334       30010 :     return pg_atomic_exchange_u32_impl(ptr, newval);
     335             : }
     336             : 
     337             : /*
     338             :  * pg_atomic_compare_exchange_u32 - CAS operation
     339             :  *
     340             :  * Atomically compare the current value of ptr with *expected and store newval
     341             :  * iff ptr and *expected have the same value. The current value of *ptr will
     342             :  * always be stored in *expected.
     343             :  *
     344             :  * Return true if values have been exchanged, false otherwise.
     345             :  *
     346             :  * Full barrier semantics.
     347             :  */
     348             : static inline bool
     349   980261850 : pg_atomic_compare_exchange_u32(volatile pg_atomic_uint32 *ptr,
     350             :                                uint32 *expected, uint32 newval)
     351             : {
     352             :     AssertPointerAlignment(ptr, 4);
     353             :     AssertPointerAlignment(expected, 4);
     354             : 
     355   980261850 :     return pg_atomic_compare_exchange_u32_impl(ptr, expected, newval);
     356             : }
     357             : 
     358             : /*
     359             :  * pg_atomic_fetch_add_u32 - atomically add to variable
     360             :  *
     361             :  * Returns the value of ptr before the arithmetic operation.
     362             :  *
     363             :  * Full barrier semantics.
     364             :  */
     365             : static inline uint32
     366    15128924 : pg_atomic_fetch_add_u32(volatile pg_atomic_uint32 *ptr, int32 add_)
     367             : {
     368             :     AssertPointerAlignment(ptr, 4);
     369    15128924 :     return pg_atomic_fetch_add_u32_impl(ptr, add_);
     370             : }
     371             : 
     372             : /*
     373             :  * pg_atomic_fetch_sub_u32 - atomically subtract from variable
     374             :  *
     375             :  * Returns the value of ptr before the arithmetic operation. Note that sub_
     376             :  * may not be INT_MIN due to platform limitations.
     377             :  *
     378             :  * Full barrier semantics.
     379             :  */
     380             : static inline uint32
     381   138221330 : pg_atomic_fetch_sub_u32(volatile pg_atomic_uint32 *ptr, int32 sub_)
     382             : {
     383             :     AssertPointerAlignment(ptr, 4);
     384             :     Assert(sub_ != INT_MIN);
     385   138221330 :     return pg_atomic_fetch_sub_u32_impl(ptr, sub_);
     386             : }
     387             : 
     388             : /*
     389             :  * pg_atomic_fetch_and_u32 - atomically bit-and and_ with variable
     390             :  *
     391             :  * Returns the value of ptr before the arithmetic operation.
     392             :  *
     393             :  * Full barrier semantics.
     394             :  */
     395             : static inline uint32
     396    13733274 : pg_atomic_fetch_and_u32(volatile pg_atomic_uint32 *ptr, uint32 and_)
     397             : {
     398             :     AssertPointerAlignment(ptr, 4);
     399    13733274 :     return pg_atomic_fetch_and_u32_impl(ptr, and_);
     400             : }
     401             : 
     402             : /*
     403             :  * pg_atomic_fetch_or_u32 - atomically bit-or or_ with variable
     404             :  *
     405             :  * Returns the value of ptr before the arithmetic operation.
     406             :  *
     407             :  * Full barrier semantics.
     408             :  */
     409             : static inline uint32
     410   101072632 : pg_atomic_fetch_or_u32(volatile pg_atomic_uint32 *ptr, uint32 or_)
     411             : {
     412             :     AssertPointerAlignment(ptr, 4);
     413   101072632 :     return pg_atomic_fetch_or_u32_impl(ptr, or_);
     414             : }
     415             : 
     416             : /*
     417             :  * pg_atomic_add_fetch_u32 - atomically add to variable
     418             :  *
     419             :  * Returns the value of ptr after the arithmetic operation.
     420             :  *
     421             :  * Full barrier semantics.
     422             :  */
     423             : static inline uint32
     424         784 : pg_atomic_add_fetch_u32(volatile pg_atomic_uint32 *ptr, int32 add_)
     425             : {
     426             :     AssertPointerAlignment(ptr, 4);
     427         784 :     return pg_atomic_add_fetch_u32_impl(ptr, add_);
     428             : }
     429             : 
     430             : /*
     431             :  * pg_atomic_sub_fetch_u32 - atomically subtract from variable
     432             :  *
     433             :  * Returns the value of ptr after the arithmetic operation. Note that sub_ may
     434             :  * not be INT_MIN due to platform limitations.
     435             :  *
     436             :  * Full barrier semantics.
     437             :  */
     438             : static inline uint32
     439   746421996 : pg_atomic_sub_fetch_u32(volatile pg_atomic_uint32 *ptr, int32 sub_)
     440             : {
     441             :     AssertPointerAlignment(ptr, 4);
     442             :     Assert(sub_ != INT_MIN);
     443   746421996 :     return pg_atomic_sub_fetch_u32_impl(ptr, sub_);
     444             : }
     445             : 
     446             : /* ----
     447             :  * The 64 bit operations have the same semantics as their 32bit counterparts
     448             :  * if they are available. Check the corresponding 32bit function for
     449             :  * documentation.
     450             :  * ----
     451             :  */
     452             : static inline void
     453     6038598 : pg_atomic_init_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
     454             : {
     455             :     /*
     456             :      * Can't necessarily enforce alignment - and don't need it - when using
     457             :      * the spinlock based fallback implementation. Therefore only assert when
     458             :      * not using it.
     459             :      */
     460             : #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
     461             :     AssertPointerAlignment(ptr, 8);
     462             : #endif
     463     6038598 :     pg_atomic_init_u64_impl(ptr, val);
     464     6038598 : }
     465             : 
     466             : static inline uint64
     467   577290934 : pg_atomic_read_u64(volatile pg_atomic_uint64 *ptr)
     468             : {
     469             : #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
     470             :     AssertPointerAlignment(ptr, 8);
     471             : #endif
     472   577290934 :     return pg_atomic_read_u64_impl(ptr);
     473             : }
     474             : 
     475             : static inline uint64
     476     4999500 : pg_atomic_read_membarrier_u64(volatile pg_atomic_uint64 *ptr)
     477             : {
     478             : #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
     479             :     AssertPointerAlignment(ptr, 8);
     480             : #endif
     481     4999500 :     return pg_atomic_read_membarrier_u64_impl(ptr);
     482             : }
     483             : 
     484             : static inline void
     485    40551448 : pg_atomic_write_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
     486             : {
     487             : #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
     488             :     AssertPointerAlignment(ptr, 8);
     489             : #endif
     490    40551448 :     pg_atomic_write_u64_impl(ptr, val);
     491    40551448 : }
     492             : 
     493             : static inline void
     494             : pg_atomic_unlocked_write_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
     495             : {
     496             : #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
     497             :     AssertPointerAlignment(ptr, 8);
     498             : #endif
     499             : 
     500             :     pg_atomic_unlocked_write_u64_impl(ptr, val);
     501             : }
     502             : 
     503             : static inline void
     504        1972 : pg_atomic_write_membarrier_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
     505             : {
     506             : #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
     507             :     AssertPointerAlignment(ptr, 8);
     508             : #endif
     509        1972 :     pg_atomic_write_membarrier_u64_impl(ptr, val);
     510        1972 : }
     511             : 
     512             : static inline uint64
     513    36344756 : pg_atomic_exchange_u64(volatile pg_atomic_uint64 *ptr, uint64 newval)
     514             : {
     515             : #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
     516             :     AssertPointerAlignment(ptr, 8);
     517             : #endif
     518    36344756 :     return pg_atomic_exchange_u64_impl(ptr, newval);
     519             : }
     520             : 
     521             : static inline bool
     522     3367996 : pg_atomic_compare_exchange_u64(volatile pg_atomic_uint64 *ptr,
     523             :                                uint64 *expected, uint64 newval)
     524             : {
     525             : #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
     526             :     AssertPointerAlignment(ptr, 8);
     527             : #endif
     528     3367996 :     return pg_atomic_compare_exchange_u64_impl(ptr, expected, newval);
     529             : }
     530             : 
     531             : static inline uint64
     532      193488 : pg_atomic_fetch_add_u64(volatile pg_atomic_uint64 *ptr, int64 add_)
     533             : {
     534             : #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
     535             :     AssertPointerAlignment(ptr, 8);
     536             : #endif
     537      193488 :     return pg_atomic_fetch_add_u64_impl(ptr, add_);
     538             : }
     539             : 
     540             : static inline uint64
     541           6 : pg_atomic_fetch_sub_u64(volatile pg_atomic_uint64 *ptr, int64 sub_)
     542             : {
     543             : #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
     544             :     AssertPointerAlignment(ptr, 8);
     545             : #endif
     546             :     Assert(sub_ != PG_INT64_MIN);
     547           6 :     return pg_atomic_fetch_sub_u64_impl(ptr, sub_);
     548             : }
     549             : 
     550             : static inline uint64
     551          18 : pg_atomic_fetch_and_u64(volatile pg_atomic_uint64 *ptr, uint64 and_)
     552             : {
     553             : #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
     554             :     AssertPointerAlignment(ptr, 8);
     555             : #endif
     556          18 :     return pg_atomic_fetch_and_u64_impl(ptr, and_);
     557             : }
     558             : 
     559             : static inline uint64
     560          12 : pg_atomic_fetch_or_u64(volatile pg_atomic_uint64 *ptr, uint64 or_)
     561             : {
     562             : #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
     563             :     AssertPointerAlignment(ptr, 8);
     564             : #endif
     565          12 :     return pg_atomic_fetch_or_u64_impl(ptr, or_);
     566             : }
     567             : 
     568             : static inline uint64
     569        1182 : pg_atomic_add_fetch_u64(volatile pg_atomic_uint64 *ptr, int64 add_)
     570             : {
     571             : #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
     572             :     AssertPointerAlignment(ptr, 8);
     573             : #endif
     574        1182 :     return pg_atomic_add_fetch_u64_impl(ptr, add_);
     575             : }
     576             : 
     577             : static inline uint64
     578          10 : pg_atomic_sub_fetch_u64(volatile pg_atomic_uint64 *ptr, int64 sub_)
     579             : {
     580             : #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
     581             :     AssertPointerAlignment(ptr, 8);
     582             : #endif
     583             :     Assert(sub_ != PG_INT64_MIN);
     584          10 :     return pg_atomic_sub_fetch_u64_impl(ptr, sub_);
     585             : }
     586             : 
     587             : /*
     588             :  * Monotonically advance the given variable using only atomic operations until
     589             :  * it's at least the target value.  Returns the latest value observed, which
     590             :  * may or may not be the target value.
     591             :  *
     592             :  * Full barrier semantics (even when value is unchanged).
     593             :  */
     594             : static inline uint64
     595      891998 : pg_atomic_monotonic_advance_u64(volatile pg_atomic_uint64 *ptr, uint64 target)
     596             : {
     597             :     uint64      currval;
     598             : 
     599             : #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
     600             :     AssertPointerAlignment(ptr, 8);
     601             : #endif
     602             : 
     603      891998 :     currval = pg_atomic_read_u64_impl(ptr);
     604      891998 :     if (currval >= target)
     605             :     {
     606      117642 :         pg_memory_barrier();
     607      117642 :         return currval;
     608             :     }
     609             : 
     610      776036 :     while (currval < target)
     611             :     {
     612      774370 :         if (pg_atomic_compare_exchange_u64(ptr, &currval, target))
     613      772690 :             return target;
     614             :     }
     615             : 
     616        1666 :     return currval;
     617             : }
     618             : 
     619             : #undef INSIDE_ATOMICS_H
     620             : 
     621             : #endif                          /* ATOMICS_H */

Generated by: LCOV version 1.16