LCOV - code coverage report
Current view: top level - src/include/port/atomics - generic-gcc.h (source / functions) Hit Total Coverage
Test: PostgreSQL 17devel Lines: 21 21 100.0 %
Date: 2024-04-26 16:11:01 Functions: 10 10 100.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * generic-gcc.h
       4             :  *    Atomic operations, implemented using gcc (or compatible) intrinsics.
       5             :  *
       6             :  * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
       7             :  * Portions Copyright (c) 1994, Regents of the University of California
       8             :  *
       9             :  * NOTES:
      10             :  *
      11             :  * Documentation:
      12             :  * * Legacy __sync Built-in Functions for Atomic Memory Access
      13             :  *   https://gcc.gnu.org/onlinedocs/gcc-4.8.2/gcc/_005f_005fsync-Builtins.html
      14             :  * * Built-in functions for memory model aware atomic operations
      15             :  *   https://gcc.gnu.org/onlinedocs/gcc-4.8.2/gcc/_005f_005fatomic-Builtins.html
      16             :  *
      17             :  * src/include/port/atomics/generic-gcc.h
      18             :  *
      19             :  *-------------------------------------------------------------------------
      20             :  */
      21             : 
      22             : /* intentionally no include guards, should only be included by atomics.h */
      23             : #ifndef INSIDE_ATOMICS_H
      24             : #error "should be included via atomics.h"
      25             : #endif
      26             : 
      27             : /*
      28             :  * An empty asm block should be a sufficient compiler barrier.
      29             :  */
      30             : #define pg_compiler_barrier_impl()  __asm__ __volatile__("" ::: "memory")
      31             : 
      32             : /*
      33             :  * If we're on GCC 4.1.0 or higher, we should be able to get a memory barrier
      34             :  * out of this compiler built-in.  But we prefer to rely on platform specific
      35             :  * definitions where possible, and use this only as a fallback.
      36             :  */
      37             : #if !defined(pg_memory_barrier_impl)
      38             : #   if defined(HAVE_GCC__ATOMIC_INT32_CAS)
      39             : #       define pg_memory_barrier_impl()     __atomic_thread_fence(__ATOMIC_SEQ_CST)
      40             : #   elif (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
      41             : #       define pg_memory_barrier_impl()     __sync_synchronize()
      42             : #   endif
      43             : #endif /* !defined(pg_memory_barrier_impl) */
      44             : 
      45             : #if !defined(pg_read_barrier_impl) && defined(HAVE_GCC__ATOMIC_INT32_CAS)
      46             : /* acquire semantics include read barrier semantics */
      47             : #       define pg_read_barrier_impl()       __atomic_thread_fence(__ATOMIC_ACQUIRE)
      48             : #endif
      49             : 
      50             : #if !defined(pg_write_barrier_impl) && defined(HAVE_GCC__ATOMIC_INT32_CAS)
      51             : /* release semantics include write barrier semantics */
      52             : #       define pg_write_barrier_impl()      __atomic_thread_fence(__ATOMIC_RELEASE)
      53             : #endif
      54             : 
      55             : 
      56             : #ifdef HAVE_ATOMICS
      57             : 
      58             : /* generic gcc based atomic flag implementation */
      59             : #if !defined(PG_HAVE_ATOMIC_FLAG_SUPPORT) \
      60             :     && (defined(HAVE_GCC__SYNC_INT32_TAS) || defined(HAVE_GCC__SYNC_CHAR_TAS))
      61             : 
      62             : #define PG_HAVE_ATOMIC_FLAG_SUPPORT
      63             : typedef struct pg_atomic_flag
      64             : {
      65             :     /*
      66             :      * If we have a choice, use int-width TAS, because that is more efficient
      67             :      * and/or more reliably implemented on most non-Intel platforms.  (Note
      68             :      * that this code isn't used on x86[_64]; see arch-x86.h for that.)
      69             :      */
      70             : #ifdef HAVE_GCC__SYNC_INT32_TAS
      71             :     volatile int value;
      72             : #else
      73             :     volatile char value;
      74             : #endif
      75             : } pg_atomic_flag;
      76             : 
      77             : #endif /* !ATOMIC_FLAG_SUPPORT && SYNC_INT32_TAS */
      78             : 
      79             : /* generic gcc based atomic uint32 implementation */
      80             : #if !defined(PG_HAVE_ATOMIC_U32_SUPPORT) \
      81             :     && (defined(HAVE_GCC__ATOMIC_INT32_CAS) || defined(HAVE_GCC__SYNC_INT32_CAS))
      82             : 
      83             : #define PG_HAVE_ATOMIC_U32_SUPPORT
      84             : typedef struct pg_atomic_uint32
      85             : {
      86             :     volatile uint32 value;
      87             : } pg_atomic_uint32;
      88             : 
      89             : #endif /* defined(HAVE_GCC__ATOMIC_INT32_CAS) || defined(HAVE_GCC__SYNC_INT32_CAS) */
      90             : 
      91             : /* generic gcc based atomic uint64 implementation */
      92             : #if !defined(PG_HAVE_ATOMIC_U64_SUPPORT) \
      93             :     && !defined(PG_DISABLE_64_BIT_ATOMICS) \
      94             :     && (defined(HAVE_GCC__ATOMIC_INT64_CAS) || defined(HAVE_GCC__SYNC_INT64_CAS))
      95             : 
      96             : #define PG_HAVE_ATOMIC_U64_SUPPORT
      97             : 
      98             : typedef struct pg_atomic_uint64
      99             : {
     100             :     volatile uint64 value pg_attribute_aligned(8);
     101             : } pg_atomic_uint64;
     102             : 
     103             : #endif /* defined(HAVE_GCC__ATOMIC_INT64_CAS) || defined(HAVE_GCC__SYNC_INT64_CAS) */
     104             : 
     105             : #ifdef PG_HAVE_ATOMIC_FLAG_SUPPORT
     106             : 
     107             : #if defined(HAVE_GCC__SYNC_CHAR_TAS) || defined(HAVE_GCC__SYNC_INT32_TAS)
     108             : 
     109             : #ifndef PG_HAVE_ATOMIC_TEST_SET_FLAG
     110             : #define PG_HAVE_ATOMIC_TEST_SET_FLAG
     111             : static inline bool
     112             : pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr)
     113             : {
     114             :     /* NB: only an acquire barrier, not a full one */
     115             :     /* some platform only support a 1 here */
     116             :     return __sync_lock_test_and_set(&ptr->value, 1) == 0;
     117             : }
     118             : #endif
     119             : 
     120             : #endif /* defined(HAVE_GCC__SYNC_*_TAS) */
     121             : 
     122             : #ifndef PG_HAVE_ATOMIC_UNLOCKED_TEST_FLAG
     123             : #define PG_HAVE_ATOMIC_UNLOCKED_TEST_FLAG
     124             : static inline bool
     125         922 : pg_atomic_unlocked_test_flag_impl(volatile pg_atomic_flag *ptr)
     126             : {
     127         922 :     return ptr->value == 0;
     128             : }
     129             : #endif
     130             : 
     131             : #ifndef PG_HAVE_ATOMIC_CLEAR_FLAG
     132             : #define PG_HAVE_ATOMIC_CLEAR_FLAG
     133             : static inline void
     134             : pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr)
     135             : {
     136             :     __sync_lock_release(&ptr->value);
     137             : }
     138             : #endif
     139             : 
     140             : #ifndef PG_HAVE_ATOMIC_INIT_FLAG
     141             : #define PG_HAVE_ATOMIC_INIT_FLAG
     142             : static inline void
     143        5310 : pg_atomic_init_flag_impl(volatile pg_atomic_flag *ptr)
     144             : {
     145        5310 :     pg_atomic_clear_flag_impl(ptr);
     146        5310 : }
     147             : #endif
     148             : 
     149             : #endif /* defined(PG_HAVE_ATOMIC_FLAG_SUPPORT) */
     150             : 
     151             : /* prefer __atomic, it has a better API */
     152             : #if !defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32) && defined(HAVE_GCC__ATOMIC_INT32_CAS)
     153             : #define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32
     154             : static inline bool
     155             : pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
     156             :                                     uint32 *expected, uint32 newval)
     157             : {
     158             :     /* FIXME: we can probably use a lower consistency model */
     159             :     return __atomic_compare_exchange_n(&ptr->value, expected, newval, false,
     160             :                                        __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
     161             : }
     162             : #endif
     163             : 
     164             : #if !defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32) && defined(HAVE_GCC__SYNC_INT32_CAS)
     165             : #define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32
     166             : static inline bool
     167             : pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
     168             :                                     uint32 *expected, uint32 newval)
     169             : {
     170             :     bool    ret;
     171             :     uint32  current;
     172             :     current = __sync_val_compare_and_swap(&ptr->value, *expected, newval);
     173             :     ret = current == *expected;
     174             :     *expected = current;
     175             :     return ret;
     176             : }
     177             : #endif
     178             : 
     179             : /*
     180             :  * __sync_lock_test_and_set() only supports setting the value to 1 on some
     181             :  * platforms, so we only provide an __atomic implementation for
     182             :  * pg_atomic_exchange.
     183             :  *
     184             :  * We assume the availability of 32-bit __atomic_compare_exchange_n() implies
     185             :  * the availability of 32-bit __atomic_exchange_n().
     186             :  */
     187             : #if !defined(PG_HAVE_ATOMIC_EXCHANGE_U32) && defined(HAVE_GCC__ATOMIC_INT32_CAS)
     188             : #define PG_HAVE_ATOMIC_EXCHANGE_U32
     189             : static inline uint32
     190       14136 : pg_atomic_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 newval)
     191             : {
     192       14136 :     return __atomic_exchange_n(&ptr->value, newval, __ATOMIC_SEQ_CST);
     193             : }
     194             : #endif
     195             : 
     196             : /* if we have 32-bit __sync_val_compare_and_swap, assume we have these too: */
     197             : 
     198             : #if !defined(PG_HAVE_ATOMIC_FETCH_ADD_U32) && defined(HAVE_GCC__SYNC_INT32_CAS)
     199             : #define PG_HAVE_ATOMIC_FETCH_ADD_U32
     200             : static inline uint32
     201             : pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
     202             : {
     203             :     return __sync_fetch_and_add(&ptr->value, add_);
     204             : }
     205             : #endif
     206             : 
     207             : #if !defined(PG_HAVE_ATOMIC_FETCH_SUB_U32) && defined(HAVE_GCC__SYNC_INT32_CAS)
     208             : #define PG_HAVE_ATOMIC_FETCH_SUB_U32
     209             : static inline uint32
     210   385001600 : pg_atomic_fetch_sub_u32_impl(volatile pg_atomic_uint32 *ptr, int32 sub_)
     211             : {
     212   385001600 :     return __sync_fetch_and_sub(&ptr->value, sub_);
     213             : }
     214             : #endif
     215             : 
     216             : #if !defined(PG_HAVE_ATOMIC_FETCH_AND_U32) && defined(HAVE_GCC__SYNC_INT32_CAS)
     217             : #define PG_HAVE_ATOMIC_FETCH_AND_U32
     218             : static inline uint32
     219     1556518 : pg_atomic_fetch_and_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 and_)
     220             : {
     221     1556518 :     return __sync_fetch_and_and(&ptr->value, and_);
     222             : }
     223             : #endif
     224             : 
     225             : #if !defined(PG_HAVE_ATOMIC_FETCH_OR_U32) && defined(HAVE_GCC__SYNC_INT32_CAS)
     226             : #define PG_HAVE_ATOMIC_FETCH_OR_U32
     227             : static inline uint32
     228    49083246 : pg_atomic_fetch_or_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 or_)
     229             : {
     230    49083246 :     return __sync_fetch_and_or(&ptr->value, or_);
     231             : }
     232             : #endif
     233             : 
     234             : 
     235             : #if !defined(PG_DISABLE_64_BIT_ATOMICS)
     236             : 
     237             : #if !defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64) && defined(HAVE_GCC__ATOMIC_INT64_CAS)
     238             : #define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64
     239             : static inline bool
     240             : pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
     241             :                                     uint64 *expected, uint64 newval)
     242             : {
     243             :     return __atomic_compare_exchange_n(&ptr->value, expected, newval, false,
     244             :                                        __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
     245             : }
     246             : #endif
     247             : 
     248             : #if !defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64) && defined(HAVE_GCC__SYNC_INT64_CAS)
     249             : #define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64
     250             : static inline bool
     251             : pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
     252             :                                     uint64 *expected, uint64 newval)
     253             : {
     254             :     bool    ret;
     255             :     uint64  current;
     256             :     current = __sync_val_compare_and_swap(&ptr->value, *expected, newval);
     257             :     ret = current == *expected;
     258             :     *expected = current;
     259             :     return ret;
     260             : }
     261             : #endif
     262             : 
     263             : /*
     264             :  * __sync_lock_test_and_set() only supports setting the value to 1 on some
     265             :  * platforms, so we only provide an __atomic implementation for
     266             :  * pg_atomic_exchange.
     267             :  *
     268             :  * We assume the availability of 64-bit __atomic_compare_exchange_n() implies
     269             :  * the availability of 64-bit __atomic_exchange_n().
     270             :  */
     271             : #if !defined(PG_HAVE_ATOMIC_EXCHANGE_U64) && defined(HAVE_GCC__ATOMIC_INT64_CAS)
     272             : #define PG_HAVE_ATOMIC_EXCHANGE_U64
     273             : static inline uint64
     274    27014056 : pg_atomic_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 newval)
     275             : {
     276    27014056 :     return __atomic_exchange_n(&ptr->value, newval, __ATOMIC_SEQ_CST);
     277             : }
     278             : #endif
     279             : 
     280             : /* if we have 64-bit __sync_val_compare_and_swap, assume we have these too: */
     281             : 
     282             : #if !defined(PG_HAVE_ATOMIC_FETCH_ADD_U64) && defined(HAVE_GCC__SYNC_INT64_CAS)
     283             : #define PG_HAVE_ATOMIC_FETCH_ADD_U64
     284             : static inline uint64
     285             : pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
     286             : {
     287             :     return __sync_fetch_and_add(&ptr->value, add_);
     288             : }
     289             : #endif
     290             : 
     291             : #if !defined(PG_HAVE_ATOMIC_FETCH_SUB_U64) && defined(HAVE_GCC__SYNC_INT64_CAS)
     292             : #define PG_HAVE_ATOMIC_FETCH_SUB_U64
     293             : static inline uint64
     294        1326 : pg_atomic_fetch_sub_u64_impl(volatile pg_atomic_uint64 *ptr, int64 sub_)
     295             : {
     296        1326 :     return __sync_fetch_and_sub(&ptr->value, sub_);
     297             : }
     298             : #endif
     299             : 
     300             : #if !defined(PG_HAVE_ATOMIC_FETCH_AND_U64) && defined(HAVE_GCC__SYNC_INT64_CAS)
     301             : #define PG_HAVE_ATOMIC_FETCH_AND_U64
     302             : static inline uint64
     303          18 : pg_atomic_fetch_and_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 and_)
     304             : {
     305          18 :     return __sync_fetch_and_and(&ptr->value, and_);
     306             : }
     307             : #endif
     308             : 
     309             : #if !defined(PG_HAVE_ATOMIC_FETCH_OR_U64) && defined(HAVE_GCC__SYNC_INT64_CAS)
     310             : #define PG_HAVE_ATOMIC_FETCH_OR_U64
     311             : static inline uint64
     312          12 : pg_atomic_fetch_or_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 or_)
     313             : {
     314          12 :     return __sync_fetch_and_or(&ptr->value, or_);
     315             : }
     316             : #endif
     317             : 
     318             : #endif /* !defined(PG_DISABLE_64_BIT_ATOMICS) */
     319             : 
     320             : #endif /* defined(HAVE_ATOMICS) */

Generated by: LCOV version 1.14