LCOV - code coverage report
Current view: top level - src/include/port/atomics - generic-gcc.h (source / functions) Hit Total Coverage
Test: PostgreSQL 19devel Lines: 21 21 100.0 %
Date: 2025-12-03 22:17:38 Functions: 10 10 100.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * generic-gcc.h
       4             :  *    Atomic operations, implemented using gcc (or compatible) intrinsics.
       5             :  *
       6             :  * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
       7             :  * Portions Copyright (c) 1994, Regents of the University of California
       8             :  *
       9             :  * NOTES:
      10             :  *
      11             :  * Documentation:
      12             :  * * Legacy __sync Built-in Functions for Atomic Memory Access
      13             :  *   https://gcc.gnu.org/onlinedocs/gcc-4.8.2/gcc/_005f_005fsync-Builtins.html
      14             :  * * Built-in functions for memory model aware atomic operations
      15             :  *   https://gcc.gnu.org/onlinedocs/gcc-4.8.2/gcc/_005f_005fatomic-Builtins.html
      16             :  *
      17             :  * src/include/port/atomics/generic-gcc.h
      18             :  *
      19             :  *-------------------------------------------------------------------------
      20             :  */
      21             : 
      22             : /* intentionally no include guards, should only be included by atomics.h */
      23             : #ifndef INSIDE_ATOMICS_H
      24             : #error "should be included via atomics.h"
      25             : #endif
      26             : 
      27             : /*
      28             :  * An empty asm block should be a sufficient compiler barrier.
      29             :  */
      30             : #define pg_compiler_barrier_impl()  __asm__ __volatile__("" ::: "memory")
      31             : 
      32             : /*
      33             :  * If we're on GCC, we should be able to get a memory barrier
      34             :  * out of this compiler built-in.  But we prefer to rely on platform specific
      35             :  * definitions where possible, and use this only as a fallback.
      36             :  */
      37             : #if !defined(pg_memory_barrier_impl)
      38             : #   if defined(HAVE_GCC__ATOMIC_INT32_CAS)
      39             : #       define pg_memory_barrier_impl()     __atomic_thread_fence(__ATOMIC_SEQ_CST)
      40             : #   elif defined(__GNUC__)
      41             : #       define pg_memory_barrier_impl()     __sync_synchronize()
      42             : #   endif
      43             : #endif /* !defined(pg_memory_barrier_impl) */
      44             : 
      45             : #if !defined(pg_read_barrier_impl) && defined(HAVE_GCC__ATOMIC_INT32_CAS)
      46             : /* acquire semantics include read barrier semantics */
      47             : #       define pg_read_barrier_impl() do \
      48             : { \
      49             :     pg_compiler_barrier_impl(); \
      50             :     __atomic_thread_fence(__ATOMIC_ACQUIRE); \
      51             : } while (0)
      52             : #endif
      53             : 
      54             : #if !defined(pg_write_barrier_impl) && defined(HAVE_GCC__ATOMIC_INT32_CAS)
      55             : /* release semantics include write barrier semantics */
      56             : #       define pg_write_barrier_impl() do \
      57             : { \
      58             :     pg_compiler_barrier_impl(); \
      59             :     __atomic_thread_fence(__ATOMIC_RELEASE); \
      60             : } while (0)
      61             : #endif
      62             : 
      63             : 
      64             : /* generic gcc based atomic flag implementation */
      65             : #if !defined(PG_HAVE_ATOMIC_FLAG_SUPPORT) \
      66             :     && (defined(HAVE_GCC__SYNC_INT32_TAS) || defined(HAVE_GCC__SYNC_CHAR_TAS))
      67             : 
      68             : #define PG_HAVE_ATOMIC_FLAG_SUPPORT
      69             : typedef struct pg_atomic_flag
      70             : {
      71             :     /*
      72             :      * If we have a choice, use int-width TAS, because that is more efficient
      73             :      * and/or more reliably implemented on most non-Intel platforms.  (Note
      74             :      * that this code isn't used on x86[_64]; see arch-x86.h for that.)
      75             :      */
      76             : #ifdef HAVE_GCC__SYNC_INT32_TAS
      77             :     volatile int value;
      78             : #else
      79             :     volatile char value;
      80             : #endif
      81             : } pg_atomic_flag;
      82             : 
      83             : #endif /* !ATOMIC_FLAG_SUPPORT && SYNC_INT32_TAS */
      84             : 
      85             : /* generic gcc based atomic uint32 implementation */
      86             : #if !defined(PG_HAVE_ATOMIC_U32_SUPPORT) \
      87             :     && (defined(HAVE_GCC__ATOMIC_INT32_CAS) || defined(HAVE_GCC__SYNC_INT32_CAS))
      88             : 
      89             : #define PG_HAVE_ATOMIC_U32_SUPPORT
      90             : typedef struct pg_atomic_uint32
      91             : {
      92             :     volatile uint32 value;
      93             : } pg_atomic_uint32;
      94             : 
      95             : #endif /* defined(HAVE_GCC__ATOMIC_INT32_CAS) || defined(HAVE_GCC__SYNC_INT32_CAS) */
      96             : 
      97             : /* generic gcc based atomic uint64 implementation */
      98             : #if !defined(PG_HAVE_ATOMIC_U64_SUPPORT) \
      99             :     && !defined(PG_DISABLE_64_BIT_ATOMICS) \
     100             :     && (defined(HAVE_GCC__ATOMIC_INT64_CAS) || defined(HAVE_GCC__SYNC_INT64_CAS))
     101             : 
     102             : #define PG_HAVE_ATOMIC_U64_SUPPORT
     103             : typedef struct pg_atomic_uint64
     104             : {
     105             :     alignas(8) volatile uint64 value;
     106             : } pg_atomic_uint64;
     107             : 
     108             : #endif /* defined(HAVE_GCC__ATOMIC_INT64_CAS) || defined(HAVE_GCC__SYNC_INT64_CAS) */
     109             : 
     110             : #ifdef PG_HAVE_ATOMIC_FLAG_SUPPORT
     111             : 
     112             : #if defined(HAVE_GCC__SYNC_CHAR_TAS) || defined(HAVE_GCC__SYNC_INT32_TAS)
     113             : 
     114             : #ifndef PG_HAVE_ATOMIC_TEST_SET_FLAG
     115             : #define PG_HAVE_ATOMIC_TEST_SET_FLAG
     116             : static inline bool
     117             : pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr)
     118             : {
     119             :     /* NB: only an acquire barrier, not a full one */
     120             :     /* some platform only support a 1 here */
     121             :     return __sync_lock_test_and_set(&ptr->value, 1) == 0;
     122             : }
     123             : #endif
     124             : 
     125             : #endif /* defined(HAVE_GCC__SYNC_*_TAS) */
     126             : 
     127             : #ifndef PG_HAVE_ATOMIC_UNLOCKED_TEST_FLAG
     128             : #define PG_HAVE_ATOMIC_UNLOCKED_TEST_FLAG
     129             : static inline bool
     130      785406 : pg_atomic_unlocked_test_flag_impl(volatile pg_atomic_flag *ptr)
     131             : {
     132      785406 :     return ptr->value == 0;
     133             : }
     134             : #endif
     135             : 
     136             : #ifndef PG_HAVE_ATOMIC_CLEAR_FLAG
     137             : #define PG_HAVE_ATOMIC_CLEAR_FLAG
     138             : static inline void
     139             : pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr)
     140             : {
     141             :     __sync_lock_release(&ptr->value);
     142             : }
     143             : #endif
     144             : 
     145             : #ifndef PG_HAVE_ATOMIC_INIT_FLAG
     146             : #define PG_HAVE_ATOMIC_INIT_FLAG
     147             : static inline void
     148       24206 : pg_atomic_init_flag_impl(volatile pg_atomic_flag *ptr)
     149             : {
     150       24206 :     pg_atomic_clear_flag_impl(ptr);
     151       24206 : }
     152             : #endif
     153             : 
     154             : #endif /* defined(PG_HAVE_ATOMIC_FLAG_SUPPORT) */
     155             : 
     156             : /* prefer __atomic, it has a better API */
     157             : #if !defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32) && defined(HAVE_GCC__ATOMIC_INT32_CAS)
     158             : #define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32
     159             : static inline bool
     160             : pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
     161             :                                     uint32 *expected, uint32 newval)
     162             : {
     163             :     /* FIXME: we can probably use a lower consistency model */
     164             :     return __atomic_compare_exchange_n(&ptr->value, expected, newval, false,
     165             :                                        __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
     166             : }
     167             : #endif
     168             : 
     169             : #if !defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32) && defined(HAVE_GCC__SYNC_INT32_CAS)
     170             : #define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32
     171             : static inline bool
     172             : pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
     173             :                                     uint32 *expected, uint32 newval)
     174             : {
     175             :     bool    ret;
     176             :     uint32  current;
     177             :     current = __sync_val_compare_and_swap(&ptr->value, *expected, newval);
     178             :     ret = current == *expected;
     179             :     *expected = current;
     180             :     return ret;
     181             : }
     182             : #endif
     183             : 
     184             : /*
     185             :  * __sync_lock_test_and_set() only supports setting the value to 1 on some
     186             :  * platforms, so we only provide an __atomic implementation for
     187             :  * pg_atomic_exchange.
     188             :  *
     189             :  * We assume the availability of 32-bit __atomic_compare_exchange_n() implies
     190             :  * the availability of 32-bit __atomic_exchange_n().
     191             :  */
     192             : #if !defined(PG_HAVE_ATOMIC_EXCHANGE_U32) && defined(HAVE_GCC__ATOMIC_INT32_CAS)
     193             : #define PG_HAVE_ATOMIC_EXCHANGE_U32
     194             : static inline uint32
     195       29702 : pg_atomic_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 newval)
     196             : {
     197       29702 :     return __atomic_exchange_n(&ptr->value, newval, __ATOMIC_SEQ_CST);
     198             : }
     199             : #endif
     200             : 
     201             : /* if we have 32-bit __sync_val_compare_and_swap, assume we have these too: */
     202             : 
     203             : #if !defined(PG_HAVE_ATOMIC_FETCH_ADD_U32) && defined(HAVE_GCC__SYNC_INT32_CAS)
     204             : #define PG_HAVE_ATOMIC_FETCH_ADD_U32
     205             : static inline uint32
     206             : pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
     207             : {
     208             :     return __sync_fetch_and_add(&ptr->value, add_);
     209             : }
     210             : #endif
     211             : 
     212             : #if !defined(PG_HAVE_ATOMIC_FETCH_SUB_U32) && defined(HAVE_GCC__SYNC_INT32_CAS)
     213             : #define PG_HAVE_ATOMIC_FETCH_SUB_U32
     214             : static inline uint32
     215   882646052 : pg_atomic_fetch_sub_u32_impl(volatile pg_atomic_uint32 *ptr, int32 sub_)
     216             : {
     217   882646052 :     return __sync_fetch_and_sub(&ptr->value, sub_);
     218             : }
     219             : #endif
     220             : 
     221             : #if !defined(PG_HAVE_ATOMIC_FETCH_AND_U32) && defined(HAVE_GCC__SYNC_INT32_CAS)
     222             : #define PG_HAVE_ATOMIC_FETCH_AND_U32
     223             : static inline uint32
     224     8582898 : pg_atomic_fetch_and_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 and_)
     225             : {
     226     8582898 :     return __sync_fetch_and_and(&ptr->value, and_);
     227             : }
     228             : #endif
     229             : 
     230             : #if !defined(PG_HAVE_ATOMIC_FETCH_OR_U32) && defined(HAVE_GCC__SYNC_INT32_CAS)
     231             : #define PG_HAVE_ATOMIC_FETCH_OR_U32
     232             : static inline uint32
     233    80030994 : pg_atomic_fetch_or_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 or_)
     234             : {
     235    80030994 :     return __sync_fetch_and_or(&ptr->value, or_);
     236             : }
     237             : #endif
     238             : 
     239             : 
     240             : #if !defined(PG_DISABLE_64_BIT_ATOMICS)
     241             : 
     242             : #if !defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64) && defined(HAVE_GCC__ATOMIC_INT64_CAS)
     243             : #define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64
     244             : static inline bool
     245             : pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
     246             :                                     uint64 *expected, uint64 newval)
     247             : {
     248             :     AssertPointerAlignment(expected, 8);
     249             :     return __atomic_compare_exchange_n(&ptr->value, expected, newval, false,
     250             :                                        __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
     251             : }
     252             : #endif
     253             : 
     254             : #if !defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64) && defined(HAVE_GCC__SYNC_INT64_CAS)
     255             : #define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64
     256             : static inline bool
     257             : pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
     258             :                                     uint64 *expected, uint64 newval)
     259             : {
     260             :     bool    ret;
     261             :     uint64  current;
     262             : 
     263             :     AssertPointerAlignment(expected, 8);
     264             :     current = __sync_val_compare_and_swap(&ptr->value, *expected, newval);
     265             :     ret = current == *expected;
     266             :     *expected = current;
     267             :     return ret;
     268             : }
     269             : #endif
     270             : 
     271             : /*
     272             :  * __sync_lock_test_and_set() only supports setting the value to 1 on some
     273             :  * platforms, so we only provide an __atomic implementation for
     274             :  * pg_atomic_exchange.
     275             :  *
     276             :  * We assume the availability of 64-bit __atomic_compare_exchange_n() implies
     277             :  * the availability of 64-bit __atomic_exchange_n().
     278             :  */
     279             : #if !defined(PG_HAVE_ATOMIC_EXCHANGE_U64) && defined(HAVE_GCC__ATOMIC_INT64_CAS)
     280             : #define PG_HAVE_ATOMIC_EXCHANGE_U64
     281             : static inline uint64
     282    34611174 : pg_atomic_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 newval)
     283             : {
     284    34611174 :     return __atomic_exchange_n(&ptr->value, newval, __ATOMIC_SEQ_CST);
     285             : }
     286             : #endif
     287             : 
     288             : /* if we have 64-bit __sync_val_compare_and_swap, assume we have these too: */
     289             : 
     290             : #if !defined(PG_HAVE_ATOMIC_FETCH_ADD_U64) && defined(HAVE_GCC__SYNC_INT64_CAS)
     291             : #define PG_HAVE_ATOMIC_FETCH_ADD_U64
     292             : static inline uint64
     293             : pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
     294             : {
     295             :     return __sync_fetch_and_add(&ptr->value, add_);
     296             : }
     297             : #endif
     298             : 
     299             : #if !defined(PG_HAVE_ATOMIC_FETCH_SUB_U64) && defined(HAVE_GCC__SYNC_INT64_CAS)
     300             : #define PG_HAVE_ATOMIC_FETCH_SUB_U64
     301             : static inline uint64
     302          14 : pg_atomic_fetch_sub_u64_impl(volatile pg_atomic_uint64 *ptr, int64 sub_)
     303             : {
     304          14 :     return __sync_fetch_and_sub(&ptr->value, sub_);
     305             : }
     306             : #endif
     307             : 
     308             : #if !defined(PG_HAVE_ATOMIC_FETCH_AND_U64) && defined(HAVE_GCC__SYNC_INT64_CAS)
     309             : #define PG_HAVE_ATOMIC_FETCH_AND_U64
     310             : static inline uint64
     311          18 : pg_atomic_fetch_and_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 and_)
     312             : {
     313          18 :     return __sync_fetch_and_and(&ptr->value, and_);
     314             : }
     315             : #endif
     316             : 
     317             : #if !defined(PG_HAVE_ATOMIC_FETCH_OR_U64) && defined(HAVE_GCC__SYNC_INT64_CAS)
     318             : #define PG_HAVE_ATOMIC_FETCH_OR_U64
     319             : static inline uint64
     320          12 : pg_atomic_fetch_or_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 or_)
     321             : {
     322          12 :     return __sync_fetch_and_or(&ptr->value, or_);
     323             : }
     324             : #endif
     325             : 
     326             : #endif /* !defined(PG_DISABLE_64_BIT_ATOMICS) */

Generated by: LCOV version 1.16