LCOV - code coverage report
Current view: top level - src/include/port/atomics - generic-gcc.h (source / functions) Hit Total Coverage
Test: PostgreSQL 19devel Lines: 21 21 100.0 %
Date: 2025-11-13 05:17:35 Functions: 10 10 100.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * generic-gcc.h
       4             :  *    Atomic operations, implemented using gcc (or compatible) intrinsics.
       5             :  *
       6             :  * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
       7             :  * Portions Copyright (c) 1994, Regents of the University of California
       8             :  *
       9             :  * NOTES:
      10             :  *
      11             :  * Documentation:
      12             :  * * Legacy __sync Built-in Functions for Atomic Memory Access
      13             :  *   https://gcc.gnu.org/onlinedocs/gcc-4.8.2/gcc/_005f_005fsync-Builtins.html
      14             :  * * Built-in functions for memory model aware atomic operations
      15             :  *   https://gcc.gnu.org/onlinedocs/gcc-4.8.2/gcc/_005f_005fatomic-Builtins.html
      16             :  *
      17             :  * src/include/port/atomics/generic-gcc.h
      18             :  *
      19             :  *-------------------------------------------------------------------------
      20             :  */
      21             : 
      22             : /* intentionally no include guards, should only be included by atomics.h */
      23             : #ifndef INSIDE_ATOMICS_H
      24             : #error "should be included via atomics.h"
      25             : #endif
      26             : 
      27             : /*
      28             :  * An empty asm block should be a sufficient compiler barrier.
      29             :  */
      30             : #define pg_compiler_barrier_impl()  __asm__ __volatile__("" ::: "memory")
      31             : 
      32             : /*
      33             :  * If we're on GCC, we should be able to get a memory barrier
      34             :  * out of this compiler built-in.  But we prefer to rely on platform specific
      35             :  * definitions where possible, and use this only as a fallback.
      36             :  */
      37             : #if !defined(pg_memory_barrier_impl)
      38             : #   if defined(HAVE_GCC__ATOMIC_INT32_CAS)
      39             : #       define pg_memory_barrier_impl()     __atomic_thread_fence(__ATOMIC_SEQ_CST)
      40             : #   elif defined(__GNUC__)
      41             : #       define pg_memory_barrier_impl()     __sync_synchronize()
      42             : #   endif
      43             : #endif /* !defined(pg_memory_barrier_impl) */
      44             : 
      45             : #if !defined(pg_read_barrier_impl) && defined(HAVE_GCC__ATOMIC_INT32_CAS)
      46             : /* acquire semantics include read barrier semantics */
      47             : #       define pg_read_barrier_impl() do \
      48             : { \
      49             :     pg_compiler_barrier_impl(); \
      50             :     __atomic_thread_fence(__ATOMIC_ACQUIRE); \
      51             : } while (0)
      52             : #endif
      53             : 
      54             : #if !defined(pg_write_barrier_impl) && defined(HAVE_GCC__ATOMIC_INT32_CAS)
      55             : /* release semantics include write barrier semantics */
      56             : #       define pg_write_barrier_impl() do \
      57             : { \
      58             :     pg_compiler_barrier_impl(); \
      59             :     __atomic_thread_fence(__ATOMIC_RELEASE); \
      60             : } while (0)
      61             : #endif
      62             : 
      63             : 
      64             : /* generic gcc based atomic flag implementation */
      65             : #if !defined(PG_HAVE_ATOMIC_FLAG_SUPPORT) \
      66             :     && (defined(HAVE_GCC__SYNC_INT32_TAS) || defined(HAVE_GCC__SYNC_CHAR_TAS))
      67             : 
      68             : #define PG_HAVE_ATOMIC_FLAG_SUPPORT
      69             : typedef struct pg_atomic_flag
      70             : {
      71             :     /*
      72             :      * If we have a choice, use int-width TAS, because that is more efficient
      73             :      * and/or more reliably implemented on most non-Intel platforms.  (Note
      74             :      * that this code isn't used on x86[_64]; see arch-x86.h for that.)
      75             :      */
      76             : #ifdef HAVE_GCC__SYNC_INT32_TAS
      77             :     volatile int value;
      78             : #else
      79             :     volatile char value;
      80             : #endif
      81             : } pg_atomic_flag;
      82             : 
      83             : #endif /* !ATOMIC_FLAG_SUPPORT && SYNC_INT32_TAS */
      84             : 
      85             : /* generic gcc based atomic uint32 implementation */
      86             : #if !defined(PG_HAVE_ATOMIC_U32_SUPPORT) \
      87             :     && (defined(HAVE_GCC__ATOMIC_INT32_CAS) || defined(HAVE_GCC__SYNC_INT32_CAS))
      88             : 
      89             : #define PG_HAVE_ATOMIC_U32_SUPPORT
      90             : typedef struct pg_atomic_uint32
      91             : {
      92             :     volatile uint32 value;
      93             : } pg_atomic_uint32;
      94             : 
      95             : #endif /* defined(HAVE_GCC__ATOMIC_INT32_CAS) || defined(HAVE_GCC__SYNC_INT32_CAS) */
      96             : 
      97             : /* generic gcc based atomic uint64 implementation */
      98             : #if !defined(PG_HAVE_ATOMIC_U64_SUPPORT) \
      99             :     && !defined(PG_DISABLE_64_BIT_ATOMICS) \
     100             :     && (defined(HAVE_GCC__ATOMIC_INT64_CAS) || defined(HAVE_GCC__SYNC_INT64_CAS))
     101             : 
     102             : #define PG_HAVE_ATOMIC_U64_SUPPORT
     103             : 
     104             : typedef struct pg_atomic_uint64
     105             : {
     106             :     volatile uint64 value pg_attribute_aligned(8);
     107             : } pg_atomic_uint64;
     108             : 
     109             : #endif /* defined(HAVE_GCC__ATOMIC_INT64_CAS) || defined(HAVE_GCC__SYNC_INT64_CAS) */
     110             : 
     111             : #ifdef PG_HAVE_ATOMIC_FLAG_SUPPORT
     112             : 
     113             : #if defined(HAVE_GCC__SYNC_CHAR_TAS) || defined(HAVE_GCC__SYNC_INT32_TAS)
     114             : 
     115             : #ifndef PG_HAVE_ATOMIC_TEST_SET_FLAG
     116             : #define PG_HAVE_ATOMIC_TEST_SET_FLAG
     117             : static inline bool
     118             : pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr)
     119             : {
     120             :     /* NB: only an acquire barrier, not a full one */
     121             :     /* some platform only support a 1 here */
     122             :     return __sync_lock_test_and_set(&ptr->value, 1) == 0;
     123             : }
     124             : #endif
     125             : 
     126             : #endif /* defined(HAVE_GCC__SYNC_*_TAS) */
     127             : 
     128             : #ifndef PG_HAVE_ATOMIC_UNLOCKED_TEST_FLAG
     129             : #define PG_HAVE_ATOMIC_UNLOCKED_TEST_FLAG
     130             : static inline bool
     131      671346 : pg_atomic_unlocked_test_flag_impl(volatile pg_atomic_flag *ptr)
     132             : {
     133      671346 :     return ptr->value == 0;
     134             : }
     135             : #endif
     136             : 
     137             : #ifndef PG_HAVE_ATOMIC_CLEAR_FLAG
     138             : #define PG_HAVE_ATOMIC_CLEAR_FLAG
     139             : static inline void
     140             : pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr)
     141             : {
     142             :     __sync_lock_release(&ptr->value);
     143             : }
     144             : #endif
     145             : 
     146             : #ifndef PG_HAVE_ATOMIC_INIT_FLAG
     147             : #define PG_HAVE_ATOMIC_INIT_FLAG
     148             : static inline void
     149       24078 : pg_atomic_init_flag_impl(volatile pg_atomic_flag *ptr)
     150             : {
     151       24078 :     pg_atomic_clear_flag_impl(ptr);
     152       24078 : }
     153             : #endif
     154             : 
     155             : #endif /* defined(PG_HAVE_ATOMIC_FLAG_SUPPORT) */
     156             : 
     157             : /* prefer __atomic, it has a better API */
     158             : #if !defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32) && defined(HAVE_GCC__ATOMIC_INT32_CAS)
     159             : #define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32
     160             : static inline bool
     161             : pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
     162             :                                     uint32 *expected, uint32 newval)
     163             : {
     164             :     /* FIXME: we can probably use a lower consistency model */
     165             :     return __atomic_compare_exchange_n(&ptr->value, expected, newval, false,
     166             :                                        __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
     167             : }
     168             : #endif
     169             : 
     170             : #if !defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32) && defined(HAVE_GCC__SYNC_INT32_CAS)
     171             : #define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32
     172             : static inline bool
     173             : pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
     174             :                                     uint32 *expected, uint32 newval)
     175             : {
     176             :     bool    ret;
     177             :     uint32  current;
     178             :     current = __sync_val_compare_and_swap(&ptr->value, *expected, newval);
     179             :     ret = current == *expected;
     180             :     *expected = current;
     181             :     return ret;
     182             : }
     183             : #endif
     184             : 
     185             : /*
     186             :  * __sync_lock_test_and_set() only supports setting the value to 1 on some
     187             :  * platforms, so we only provide an __atomic implementation for
     188             :  * pg_atomic_exchange.
     189             :  *
     190             :  * We assume the availability of 32-bit __atomic_compare_exchange_n() implies
     191             :  * the availability of 32-bit __atomic_exchange_n().
     192             :  */
     193             : #if !defined(PG_HAVE_ATOMIC_EXCHANGE_U32) && defined(HAVE_GCC__ATOMIC_INT32_CAS)
     194             : #define PG_HAVE_ATOMIC_EXCHANGE_U32
     195             : static inline uint32
     196       29664 : pg_atomic_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 newval)
     197             : {
     198       29664 :     return __atomic_exchange_n(&ptr->value, newval, __ATOMIC_SEQ_CST);
     199             : }
     200             : #endif
     201             : 
     202             : /* if we have 32-bit __sync_val_compare_and_swap, assume we have these too: */
     203             : 
     204             : #if !defined(PG_HAVE_ATOMIC_FETCH_ADD_U32) && defined(HAVE_GCC__SYNC_INT32_CAS)
     205             : #define PG_HAVE_ATOMIC_FETCH_ADD_U32
     206             : static inline uint32
     207             : pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
     208             : {
     209             :     return __sync_fetch_and_add(&ptr->value, add_);
     210             : }
     211             : #endif
     212             : 
     213             : #if !defined(PG_HAVE_ATOMIC_FETCH_SUB_U32) && defined(HAVE_GCC__SYNC_INT32_CAS)
     214             : #define PG_HAVE_ATOMIC_FETCH_SUB_U32
     215             : static inline uint32
     216   874435824 : pg_atomic_fetch_sub_u32_impl(volatile pg_atomic_uint32 *ptr, int32 sub_)
     217             : {
     218   874435824 :     return __sync_fetch_and_sub(&ptr->value, sub_);
     219             : }
     220             : #endif
     221             : 
     222             : #if !defined(PG_HAVE_ATOMIC_FETCH_AND_U32) && defined(HAVE_GCC__SYNC_INT32_CAS)
     223             : #define PG_HAVE_ATOMIC_FETCH_AND_U32
     224             : static inline uint32
     225     8579886 : pg_atomic_fetch_and_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 and_)
     226             : {
     227     8579886 :     return __sync_fetch_and_and(&ptr->value, and_);
     228             : }
     229             : #endif
     230             : 
     231             : #if !defined(PG_HAVE_ATOMIC_FETCH_OR_U32) && defined(HAVE_GCC__SYNC_INT32_CAS)
     232             : #define PG_HAVE_ATOMIC_FETCH_OR_U32
     233             : static inline uint32
     234    80724782 : pg_atomic_fetch_or_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 or_)
     235             : {
     236    80724782 :     return __sync_fetch_and_or(&ptr->value, or_);
     237             : }
     238             : #endif
     239             : 
     240             : 
     241             : #if !defined(PG_DISABLE_64_BIT_ATOMICS)
     242             : 
     243             : #if !defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64) && defined(HAVE_GCC__ATOMIC_INT64_CAS)
     244             : #define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64
     245             : static inline bool
     246             : pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
     247             :                                     uint64 *expected, uint64 newval)
     248             : {
     249             :     AssertPointerAlignment(expected, 8);
     250             :     return __atomic_compare_exchange_n(&ptr->value, expected, newval, false,
     251             :                                        __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
     252             : }
     253             : #endif
     254             : 
     255             : #if !defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64) && defined(HAVE_GCC__SYNC_INT64_CAS)
     256             : #define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64
     257             : static inline bool
     258             : pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
     259             :                                     uint64 *expected, uint64 newval)
     260             : {
     261             :     bool    ret;
     262             :     uint64  current;
     263             : 
     264             :     AssertPointerAlignment(expected, 8);
     265             :     current = __sync_val_compare_and_swap(&ptr->value, *expected, newval);
     266             :     ret = current == *expected;
     267             :     *expected = current;
     268             :     return ret;
     269             : }
     270             : #endif
     271             : 
     272             : /*
     273             :  * __sync_lock_test_and_set() only supports setting the value to 1 on some
     274             :  * platforms, so we only provide an __atomic implementation for
     275             :  * pg_atomic_exchange.
     276             :  *
     277             :  * We assume the availability of 64-bit __atomic_compare_exchange_n() implies
     278             :  * the availability of 64-bit __atomic_exchange_n().
     279             :  */
     280             : #if !defined(PG_HAVE_ATOMIC_EXCHANGE_U64) && defined(HAVE_GCC__ATOMIC_INT64_CAS)
     281             : #define PG_HAVE_ATOMIC_EXCHANGE_U64
     282             : static inline uint64
     283    34669198 : pg_atomic_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 newval)
     284             : {
     285    34669198 :     return __atomic_exchange_n(&ptr->value, newval, __ATOMIC_SEQ_CST);
     286             : }
     287             : #endif
     288             : 
     289             : /* if we have 64-bit __sync_val_compare_and_swap, assume we have these too: */
     290             : 
     291             : #if !defined(PG_HAVE_ATOMIC_FETCH_ADD_U64) && defined(HAVE_GCC__SYNC_INT64_CAS)
     292             : #define PG_HAVE_ATOMIC_FETCH_ADD_U64
     293             : static inline uint64
     294             : pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
     295             : {
     296             :     return __sync_fetch_and_add(&ptr->value, add_);
     297             : }
     298             : #endif
     299             : 
     300             : #if !defined(PG_HAVE_ATOMIC_FETCH_SUB_U64) && defined(HAVE_GCC__SYNC_INT64_CAS)
     301             : #define PG_HAVE_ATOMIC_FETCH_SUB_U64
     302             : static inline uint64
     303          14 : pg_atomic_fetch_sub_u64_impl(volatile pg_atomic_uint64 *ptr, int64 sub_)
     304             : {
     305          14 :     return __sync_fetch_and_sub(&ptr->value, sub_);
     306             : }
     307             : #endif
     308             : 
     309             : #if !defined(PG_HAVE_ATOMIC_FETCH_AND_U64) && defined(HAVE_GCC__SYNC_INT64_CAS)
     310             : #define PG_HAVE_ATOMIC_FETCH_AND_U64
     311             : static inline uint64
     312          18 : pg_atomic_fetch_and_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 and_)
     313             : {
     314          18 :     return __sync_fetch_and_and(&ptr->value, and_);
     315             : }
     316             : #endif
     317             : 
     318             : #if !defined(PG_HAVE_ATOMIC_FETCH_OR_U64) && defined(HAVE_GCC__SYNC_INT64_CAS)
     319             : #define PG_HAVE_ATOMIC_FETCH_OR_U64
     320             : static inline uint64
     321          12 : pg_atomic_fetch_or_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 or_)
     322             : {
     323          12 :     return __sync_fetch_and_or(&ptr->value, or_);
     324             : }
     325             : #endif
     326             : 
     327             : #endif /* !defined(PG_DISABLE_64_BIT_ATOMICS) */

Generated by: LCOV version 1.16