Ruby 3.5.0dev (2025-08-27 revision 5ff7b2c582a56fe7d92248adf093fd278a334066)
atomic.h
Go to the documentation of this file.
1#ifndef RUBY_ATOMIC_H /*-*-C++-*-vi:se ft=cpp:*/
2#define RUBY_ATOMIC_H
27#include "ruby/internal/config.h"
28
29#ifdef STDC_HEADERS
30# include <stddef.h> /* size_t */
31#endif
32
33#ifdef HAVE_SYS_TYPES_H
34# include <sys/types.h> /* ssize_t */
35#endif
36
37#if RBIMPL_COMPILER_SINCE(MSVC, 13, 0, 0)
38# pragma intrinsic(_InterlockedOr)
39#elif defined(__sun) && defined(HAVE_ATOMIC_H)
40# include <atomic.h>
41#endif
42
43#include "ruby/assert.h"
44#include "ruby/backward/2/limits.h"
49#include "ruby/internal/cast.h"
50#include "ruby/internal/value.h"
53
54/*
55 * Asserts that your environment supports more than one atomic types. These
56 * days systems tend to have such property (C11 was a standard of decades ago,
57 * right?) but we still support older ones.
58 */
59#if defined(__DOXYGEN__) || defined(HAVE_GCC_ATOMIC_BUILTINS) || defined(HAVE_GCC_SYNC_BUILTINS)
60# define RUBY_ATOMIC_GENERIC_MACRO 1
61#endif
62
68#if defined(__DOXYGEN__)
69using rb_atomic_t = std::atomic<unsigned>;
70#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
71typedef unsigned int rb_atomic_t;
72#elif defined(HAVE_GCC_SYNC_BUILTINS)
73typedef unsigned int rb_atomic_t;
74#elif defined(_WIN32)
75# include <winsock2.h> // to prevent macro redefinitions
76# include <windows.h> // for `LONG` and `Interlocked` functions
77typedef LONG rb_atomic_t;
78#elif defined(__sun) && defined(HAVE_ATOMIC_H)
79typedef unsigned int rb_atomic_t;
80#elif defined(HAVE_STDATOMIC_H)
81# include <stdatomic.h>
82typedef unsigned int rb_atomic_t;
83#else
84# error No atomic operation found
85#endif
86
87/* Memory ordering constants */
88#if defined(HAVE_GCC_ATOMIC_BUILTINS)
89# define RBIMPL_ATOMIC_RELAXED __ATOMIC_RELAXED
90# define RBIMPL_ATOMIC_ACQUIRE __ATOMIC_ACQUIRE
91# define RBIMPL_ATOMIC_RELEASE __ATOMIC_RELEASE
92# define RBIMPL_ATOMIC_ACQ_REL __ATOMIC_ACQ_REL
93# define RBIMPL_ATOMIC_SEQ_CST __ATOMIC_SEQ_CST
94#elif defined(HAVE_STDATOMIC_H)
95# define RBIMPL_ATOMIC_RELAXED memory_order_relaxed
96# define RBIMPL_ATOMIC_ACQUIRE memory_order_acquire
97# define RBIMPL_ATOMIC_RELEASE memory_order_release
98# define RBIMPL_ATOMIC_ACQ_REL memory_order_acq_rel
99# define RBIMPL_ATOMIC_SEQ_CST memory_order_seq_cst
100#else
101/* Dummy values for unsupported platforms */
102# define RBIMPL_ATOMIC_RELAXED 0
103# define RBIMPL_ATOMIC_ACQUIRE 1
104# define RBIMPL_ATOMIC_RELEASE 2
105# define RBIMPL_ATOMIC_ACQ_REL 3
106# define RBIMPL_ATOMIC_SEQ_CST 4
107#endif
108
118#define RUBY_ATOMIC_FETCH_ADD(var, val) rbimpl_atomic_fetch_add(&(var), (val), RBIMPL_ATOMIC_SEQ_CST)
119
129#define RUBY_ATOMIC_FETCH_SUB(var, val) rbimpl_atomic_fetch_sub(&(var), (val), RBIMPL_ATOMIC_SEQ_CST)
130
141#define RUBY_ATOMIC_OR(var, val) rbimpl_atomic_or(&(var), (val), RBIMPL_ATOMIC_SEQ_CST)
142
152#define RUBY_ATOMIC_EXCHANGE(var, val) rbimpl_atomic_exchange(&(var), (val), RBIMPL_ATOMIC_SEQ_CST)
153
165#define RUBY_ATOMIC_CAS(var, oldval, newval) \
166 rbimpl_atomic_cas(&(var), (oldval), (newval), RBIMPL_ATOMIC_SEQ_CST, RBIMPL_ATOMIC_SEQ_CST)
167
175#define RUBY_ATOMIC_LOAD(var) rbimpl_atomic_load(&(var), RBIMPL_ATOMIC_SEQ_CST)
176
185#define RUBY_ATOMIC_SET(var, val) rbimpl_atomic_store(&(var), (val), RBIMPL_ATOMIC_SEQ_CST)
186
195#define RUBY_ATOMIC_ADD(var, val) rbimpl_atomic_add(&(var), (val), RBIMPL_ATOMIC_SEQ_CST)
196
205#define RUBY_ATOMIC_SUB(var, val) rbimpl_atomic_sub(&(var), (val), RBIMPL_ATOMIC_SEQ_CST)
206
214#define RUBY_ATOMIC_INC(var) rbimpl_atomic_inc(&(var), RBIMPL_ATOMIC_SEQ_CST)
215
223#define RUBY_ATOMIC_DEC(var) rbimpl_atomic_dec(&(var), RBIMPL_ATOMIC_SEQ_CST)
224
235#define RUBY_ATOMIC_SIZE_FETCH_ADD(var, val) rbimpl_atomic_size_fetch_add(&(var), (val), RBIMPL_ATOMIC_SEQ_CST)
236
246#define RUBY_ATOMIC_SIZE_INC(var) rbimpl_atomic_size_inc(&(var), RBIMPL_ATOMIC_SEQ_CST)
247
257#define RUBY_ATOMIC_SIZE_DEC(var) rbimpl_atomic_size_dec(&(var), RBIMPL_ATOMIC_SEQ_CST)
258
270#define RUBY_ATOMIC_SIZE_EXCHANGE(var, val) \
271 rbimpl_atomic_size_exchange(&(var), (val), RBIMPL_ATOMIC_SEQ_CST)
272
284#define RUBY_ATOMIC_SIZE_CAS(var, oldval, newval) \
285 rbimpl_atomic_size_cas(&(var), (oldval), (newval), RBIMPL_ATOMIC_SEQ_CST, RBIMPL_ATOMIC_SEQ_CST)
286
297#define RUBY_ATOMIC_SIZE_ADD(var, val) rbimpl_atomic_size_add(&(var), (val), RBIMPL_ATOMIC_SEQ_CST)
298
309#define RUBY_ATOMIC_SIZE_SUB(var, val) rbimpl_atomic_size_sub(&(var), (val), RBIMPL_ATOMIC_SEQ_CST)
310
327#define RUBY_ATOMIC_PTR_EXCHANGE(var, val) \
328 RBIMPL_CAST(rbimpl_atomic_ptr_exchange((void **)&(var), (void *)val, RBIMPL_ATOMIC_SEQ_CST))
329
338#define RUBY_ATOMIC_PTR_LOAD(var) \
339 RBIMPL_CAST(rbimpl_atomic_ptr_load((void **)&var, RBIMPL_ATOMIC_SEQ_CST))
340
351#define RUBY_ATOMIC_PTR_SET(var, val) \
352 rbimpl_atomic_ptr_store((volatile void **)&(var), (val), RBIMPL_ATOMIC_SEQ_CST)
353
365#define RUBY_ATOMIC_PTR_CAS(var, oldval, newval) \
366 RBIMPL_CAST(rbimpl_atomic_ptr_cas((void **)&(var), (void *)(oldval), (void *)(newval), RBIMPL_ATOMIC_SEQ_CST, RBIMPL_ATOMIC_SEQ_CST))
367
378#define RUBY_ATOMIC_VALUE_SET(var, val) \
379 rbimpl_atomic_value_store(&(var), (val), RBIMPL_ATOMIC_SEQ_CST)
380
392#define RUBY_ATOMIC_VALUE_EXCHANGE(var, val) \
393 rbimpl_atomic_value_exchange(&(var), (val), RBIMPL_ATOMIC_SEQ_CST)
394
406#define RUBY_ATOMIC_VALUE_CAS(var, oldval, newval) \
407 rbimpl_atomic_value_cas(&(var), (oldval), (newval), RBIMPL_ATOMIC_SEQ_CST, RBIMPL_ATOMIC_SEQ_CST)
408
413static inline rb_atomic_t
414rbimpl_atomic_fetch_add(volatile rb_atomic_t *ptr, rb_atomic_t val, int memory_order)
415{
416 (void)memory_order;
417#if 0
418
419#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
420 return __atomic_fetch_add(ptr, val, memory_order);
421
422#elif defined(HAVE_GCC_SYNC_BUILTINS)
423 return __sync_fetch_and_add(ptr, val);
424
425#elif defined(_WIN32)
426 return InterlockedExchangeAdd(ptr, val);
427
428#elif defined(__sun) && defined(HAVE_ATOMIC_H)
429 /*
430 * `atomic_add_int_nv` takes its second argument as `int`! Meanwhile our
431 * `rb_atomic_t` is unsigned. We cannot pass `val` as-is. We have to
432 * manually check integer overflow.
433 */
434 RBIMPL_ASSERT_OR_ASSUME(val <= INT_MAX);
435 return atomic_add_int_nv(ptr, val) - val;
436
437#elif defined(HAVE_STDATOMIC_H)
438 return atomic_fetch_add_explicit((_Atomic volatile rb_atomic_t *)ptr, val, memory_order);
439
440#else
441# error Unsupported platform.
442#endif
443}
444
449static inline size_t
450rbimpl_atomic_size_fetch_add(volatile size_t *ptr, size_t val, int memory_order)
451{
452 (void)memory_order;
453#if 0
454
455#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
456 return __atomic_fetch_add(ptr, val, memory_order);
457
458#elif defined(HAVE_GCC_SYNC_BUILTINS)
459 return __sync_fetch_and_add(ptr, val);
460
461#elif defined(_WIN32)
462 return InterlockedExchangeAdd64(ptr, val);
463
464#elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
465 /* Ditto for `atomic_add_int_nv`. */
466 RBIMPL_ASSERT_OR_ASSUME(val <= LONG_MAX);
467 atomic_add_long(ptr, val);
468
469#elif defined(__sun) && defined(HAVE_ATOMIC_H)
470 RBIMPL_STATIC_ASSERT(size_of_rb_atomic_t, sizeof *ptr == sizeof(rb_atomic_t));
471
472 volatile rb_atomic_t *const tmp = RBIMPL_CAST((volatile rb_atomic_t *)ptr);
473 rbimpl_atomic_fetch_add(tmp, val, memory_order);
474
475#elif defined(HAVE_STDATOMIC_H)
476 return atomic_fetch_add_explicit((_Atomic volatile size_t *)ptr, val, memory_order);
477
478#else
479# error Unsupported platform.
480#endif
481}
482
486static inline void
487rbimpl_atomic_add(volatile rb_atomic_t *ptr, rb_atomic_t val, int memory_order)
488{
489 (void)memory_order;
490#if 0
491
492#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
493 /*
494 * GCC on amd64 is smart enough to detect this `__atomic_add_fetch`'s
495 * return value is not used, then compiles it into single `LOCK ADD`
496 * instruction.
497 */
498 __atomic_add_fetch(ptr, val, memory_order);
499
500#elif defined(HAVE_GCC_SYNC_BUILTINS)
501 __sync_add_and_fetch(ptr, val);
502
503#elif defined(_WIN32)
504 /*
505 * `InterlockedExchangeAdd` is `LOCK XADD`. It seems there also is
506 * `_InterlockedAdd` intrinsic in ARM Windows but not for x86? Sticking to
507 * `InterlockedExchangeAdd` for better portability.
508 */
509 InterlockedExchangeAdd(ptr, val);
510
511#elif defined(__sun) && defined(HAVE_ATOMIC_H)
512 /* Ditto for `atomic_add_int_nv`. */
513 RBIMPL_ASSERT_OR_ASSUME(val <= INT_MAX);
514 atomic_add_int(ptr, val);
515
516#elif defined(HAVE_STDATOMIC_H)
517 atomic_fetch_add_explicit((_Atomic volatile rb_atomic_t *)ptr, val, memory_order);
518
519#else
520# error Unsupported platform.
521#endif
522}
523
527static inline void
528rbimpl_atomic_size_add(volatile size_t *ptr, size_t val, int memory_order)
529{
530 (void)memory_order;
531#if 0
532
533#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
534 __atomic_add_fetch(ptr, val, memory_order);
535
536#elif defined(HAVE_GCC_SYNC_BUILTINS)
537 __sync_add_and_fetch(ptr, val);
538
539#elif defined(_WIN64)
540 /* Ditto for `InterlockeExchangedAdd`. */
541 InterlockedExchangeAdd64(ptr, val);
542
543#elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
544 /* Ditto for `atomic_add_int_nv`. */
545 RBIMPL_ASSERT_OR_ASSUME(val <= LONG_MAX);
546 atomic_add_long(ptr, val);
547
548#elif defined(_WIN32) || (defined(__sun) && defined(HAVE_ATOMIC_H))
549 RBIMPL_STATIC_ASSERT(size_of_rb_atomic_t, sizeof *ptr == sizeof(rb_atomic_t));
550
551 volatile rb_atomic_t *const tmp = RBIMPL_CAST((volatile rb_atomic_t *)ptr);
552 rbimpl_atomic_add(tmp, val, memory_order);
553
554#elif defined(HAVE_STDATOMIC_H)
555 atomic_fetch_add_explicit((_Atomic volatile size_t *)ptr, val, memory_order);
556
557#else
558# error Unsupported platform.
559#endif
560}
561
565static inline void
566rbimpl_atomic_inc(volatile rb_atomic_t *ptr, int memory_order)
567{
568 (void)memory_order;
569#if 0
570
571#elif defined(HAVE_GCC_ATOMIC_BUILTINS) || defined(HAVE_GCC_SYNC_BUILTINS)
572 rbimpl_atomic_add(ptr, 1, memory_order);
573
574#elif defined(_WIN32)
575 InterlockedIncrement(ptr);
576
577#elif defined(__sun) && defined(HAVE_ATOMIC_H)
578 atomic_inc_uint(ptr);
579
580#elif defined(HAVE_STDATOMIC_H)
581 rbimpl_atomic_add(ptr, 1, memory_order);
582
583#else
584# error Unsupported platform.
585#endif
586}
587
591static inline void
592rbimpl_atomic_size_inc(volatile size_t *ptr, int memory_order)
593{
594 (void)memory_order;
595#if 0
596
597#elif defined(HAVE_GCC_ATOMIC_BUILTINS) || defined(HAVE_GCC_SYNC_BUILTINS)
598 rbimpl_atomic_size_add(ptr, 1, memory_order);
599
600#elif defined(_WIN64)
601 InterlockedIncrement64(ptr);
602
603#elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
604 atomic_inc_ulong(ptr);
605
606#elif defined(_WIN32) || (defined(__sun) && defined(HAVE_ATOMIC_H))
607 RBIMPL_STATIC_ASSERT(size_of_size_t, sizeof *ptr == sizeof(rb_atomic_t));
608
609 rbimpl_atomic_size_add(ptr, 1, memory_order);
610
611#elif defined(HAVE_STDATOMIC_H)
612 rbimpl_atomic_size_add(ptr, 1, memory_order);
613
614#else
615# error Unsupported platform.
616#endif
617}
618
622static inline rb_atomic_t
623rbimpl_atomic_fetch_sub(volatile rb_atomic_t *ptr, rb_atomic_t val, int memory_order)
624{
625 (void)memory_order;
626#if 0
627
628#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
629 return __atomic_fetch_sub(ptr, val, memory_order);
630
631#elif defined(HAVE_GCC_SYNC_BUILTINS)
632 return __sync_fetch_and_sub(ptr, val);
633
634#elif defined(_WIN32)
635 /* rb_atomic_t is signed here! Safe to do `-val`. */
636 return InterlockedExchangeAdd(ptr, -val);
637
638#elif defined(__sun) && defined(HAVE_ATOMIC_H)
639 /* Ditto for `rbimpl_atomic_fetch_add`. */
640 const signed neg = -1;
641 RBIMPL_ASSERT_OR_ASSUME(val <= INT_MAX);
642 return atomic_add_int_nv(ptr, neg * val) + val;
643
644#elif defined(HAVE_STDATOMIC_H)
645 return atomic_fetch_sub_explicit((_Atomic volatile rb_atomic_t *)ptr, val, memory_order);
646
647#else
648# error Unsupported platform.
649#endif
650}
651
655static inline void
656rbimpl_atomic_sub(volatile rb_atomic_t *ptr, rb_atomic_t val, int memory_order)
657{
658 (void)memory_order;
659#if 0
660
661#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
662 __atomic_sub_fetch(ptr, val, memory_order);
663
664#elif defined(HAVE_GCC_SYNC_BUILTINS)
665 __sync_sub_and_fetch(ptr, val);
666
667#elif defined(_WIN32)
668 InterlockedExchangeAdd(ptr, -val);
669
670#elif defined(__sun) && defined(HAVE_ATOMIC_H)
671 const signed neg = -1;
672 RBIMPL_ASSERT_OR_ASSUME(val <= INT_MAX);
673 atomic_add_int(ptr, neg * val);
674
675#elif defined(HAVE_STDATOMIC_H)
676 atomic_fetch_sub_explicit((_Atomic volatile rb_atomic_t *)ptr, val, memory_order);
677
678#else
679# error Unsupported platform.
680#endif
681}
682
686static inline void
687rbimpl_atomic_size_sub(volatile size_t *ptr, size_t val, int memory_order)
688{
689 (void)memory_order;
690#if 0
691
692#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
693 __atomic_sub_fetch(ptr, val, memory_order);
694
695#elif defined(HAVE_GCC_SYNC_BUILTINS)
696 __sync_sub_and_fetch(ptr, val);
697
698#elif defined(_WIN64)
699 const ssize_t neg = -1;
700 InterlockedExchangeAdd64(ptr, neg * val);
701
702#elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
703 const signed neg = -1;
704 RBIMPL_ASSERT_OR_ASSUME(val <= LONG_MAX);
705 atomic_add_long(ptr, neg * val);
706
707#elif defined(_WIN32) || (defined(__sun) && defined(HAVE_ATOMIC_H))
708 RBIMPL_STATIC_ASSERT(size_of_rb_atomic_t, sizeof *ptr == sizeof(rb_atomic_t));
709
710 volatile rb_atomic_t *const tmp = RBIMPL_CAST((volatile rb_atomic_t *)ptr);
711 rbimpl_atomic_sub(tmp, val, memory_order);
712
713#elif defined(HAVE_STDATOMIC_H)
714 atomic_fetch_sub_explicit((_Atomic volatile size_t *)ptr, val, memory_order);
715
716#else
717# error Unsupported platform.
718#endif
719}
720
724static inline void
725rbimpl_atomic_dec(volatile rb_atomic_t *ptr, int memory_order)
726{
727 (void)memory_order;
728#if 0
729
730#elif defined(HAVE_GCC_ATOMIC_BUILTINS) || defined(HAVE_GCC_SYNC_BUILTINS)
731 rbimpl_atomic_sub(ptr, 1, memory_order);
732
733#elif defined(_WIN32)
734 InterlockedDecrement(ptr);
735
736#elif defined(__sun) && defined(HAVE_ATOMIC_H)
737 atomic_dec_uint(ptr);
738
739#elif defined(HAVE_STDATOMIC_H)
740 rbimpl_atomic_sub(ptr, 1, memory_order);
741
742#else
743# error Unsupported platform.
744#endif
745}
746
750static inline void
751rbimpl_atomic_size_dec(volatile size_t *ptr, int memory_order)
752{
753 (void)memory_order;
754#if 0
755
756#elif defined(HAVE_GCC_ATOMIC_BUILTINS) || defined(HAVE_GCC_SYNC_BUILTINS)
757 rbimpl_atomic_size_sub(ptr, 1, memory_order);
758
759#elif defined(_WIN64)
760 InterlockedDecrement64(ptr);
761
762#elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
763 atomic_dec_ulong(ptr);
764
765#elif defined(_WIN32) || (defined(__sun) && defined(HAVE_ATOMIC_H))
766 RBIMPL_STATIC_ASSERT(size_of_size_t, sizeof *ptr == sizeof(rb_atomic_t));
767
768 rbimpl_atomic_size_sub(ptr, 1, memory_order);
769
770#elif defined(HAVE_STDATOMIC_H)
771 rbimpl_atomic_size_sub(ptr, 1, memory_order);
772
773#else
774# error Unsupported platform.
775#endif
776}
777
781static inline void
782rbimpl_atomic_or(volatile rb_atomic_t *ptr, rb_atomic_t val, int memory_order)
783{
784 (void)memory_order;
785#if 0
786
787#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
788 __atomic_or_fetch(ptr, val, memory_order);
789
790#elif defined(HAVE_GCC_SYNC_BUILTINS)
791 __sync_or_and_fetch(ptr, val);
792
793#elif RBIMPL_COMPILER_SINCE(MSVC, 13, 0, 0)
794 _InterlockedOr(ptr, val);
795
796#elif defined(_WIN32) && defined(__GNUC__)
797 /* This was for old MinGW. Maybe not needed any longer? */
798 __asm__(
799 "lock\n\t"
800 "orl\t%1, %0"
801 : "=m"(ptr)
802 : "Ir"(val));
803
804#elif defined(_WIN32) && defined(_M_IX86)
805 __asm mov eax, ptr;
806 __asm mov ecx, val;
807 __asm lock or [eax], ecx;
808
809#elif defined(__sun) && defined(HAVE_ATOMIC_H)
810 atomic_or_uint(ptr, val);
811
812#elif !defined(_WIN32) && defined(HAVE_STDATOMIC_H)
813 atomic_fetch_or_explicit((_Atomic volatile rb_atomic_t *)ptr, val, memory_order);
814
815#else
816# error Unsupported platform.
817#endif
818}
819
820/* Nobody uses this but for theoretical backwards compatibility... */
821#if RBIMPL_COMPILER_BEFORE(MSVC, 13, 0, 0)
822static inline rb_atomic_t
823rb_w32_atomic_or(volatile rb_atomic_t *var, rb_atomic_t val)
824{
825 return rbimpl_atomic_or(var, val);
826}
827#endif
828
832static inline rb_atomic_t
833rbimpl_atomic_exchange(volatile rb_atomic_t *ptr, rb_atomic_t val, int memory_order)
834{
835 (void)memory_order;
836#if 0
837
838#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
839 return __atomic_exchange_n(ptr, val, memory_order);
840
841#elif defined(HAVE_GCC_SYNC_BUILTINS)
842 return __sync_lock_test_and_set(ptr, val);
843
844#elif defined(_WIN32)
845 return InterlockedExchange(ptr, val);
846
847#elif defined(__sun) && defined(HAVE_ATOMIC_H)
848 return atomic_swap_uint(ptr, val);
849
850#elif defined(HAVE_STDATOMIC_H)
851 return atomic_exchange_explicit((_Atomic volatile rb_atomic_t *)ptr, val, memory_order);
852
853#else
854# error Unsupported platform.
855#endif
856}
857
861static inline size_t
862rbimpl_atomic_size_exchange(volatile size_t *ptr, size_t val, int memory_order)
863{
864 (void)memory_order;
865#if 0
866
867#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
868 return __atomic_exchange_n(ptr, val, memory_order);
869
870#elif defined(HAVE_GCC_SYNC_BUILTINS)
871 return __sync_lock_test_and_set(ptr, val);
872
873#elif defined(_WIN64)
874 return InterlockedExchange64(ptr, val);
875
876#elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
877 return atomic_swap_ulong(ptr, val);
878
879#elif defined(_WIN32) || (defined(__sun) && defined(HAVE_ATOMIC_H))
880 RBIMPL_STATIC_ASSERT(size_of_size_t, sizeof *ptr == sizeof(rb_atomic_t));
881
882 volatile rb_atomic_t *const tmp = RBIMPL_CAST((volatile rb_atomic_t *)ptr);
883 const rb_atomic_t ret = rbimpl_atomic_exchange(tmp, val, memory_order);
884 return RBIMPL_CAST((size_t)ret);
885
886#elif defined(HAVE_STDATOMIC_H)
887 return atomic_exchange_explicit((_Atomic volatile size_t *)ptr, val, memory_order);
888
889#else
890# error Unsupported platform.
891#endif
892}
893
897static inline void
898rbimpl_atomic_size_store(volatile size_t *ptr, size_t val, int memory_order)
899{
900 (void)memory_order;
901#if 0
902
903#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
904 __atomic_store_n(ptr, val, memory_order);
905
906#else
907 rbimpl_atomic_size_exchange(ptr, val, memory_order);
908
909#endif
910}
911
915static inline void *
916rbimpl_atomic_ptr_exchange(void *volatile *ptr, const void *val, int memory_order)
917{
918 (void)memory_order;
919#if 0
920
921#elif defined(InterlockedExchangePointer)
922 /* const_cast */
923 PVOID *pptr = RBIMPL_CAST((PVOID *)ptr);
924 PVOID pval = RBIMPL_CAST((PVOID)val);
925 return InterlockedExchangePointer(pptr, pval);
926
927#elif defined(__sun) && defined(HAVE_ATOMIC_H)
928 return atomic_swap_ptr(ptr, RBIMPL_CAST((void *)val));
929
930#else
931 RBIMPL_STATIC_ASSERT(sizeof_voidp, sizeof *ptr == sizeof(size_t));
932
933 const size_t sval = RBIMPL_CAST((size_t)val);
934 volatile size_t *const sptr = RBIMPL_CAST((volatile size_t *)ptr);
935 const size_t sret = rbimpl_atomic_size_exchange(sptr, sval, memory_order);
936 return RBIMPL_CAST((void *)sret);
937
938#endif
939}
940
944static inline void
945rbimpl_atomic_ptr_store(volatile void **ptr, void *val, int memory_order)
946{
947 RBIMPL_STATIC_ASSERT(sizeof_value, sizeof *ptr == sizeof(size_t));
948
949 const size_t sval = RBIMPL_CAST((size_t)val);
950 volatile size_t *const sptr = RBIMPL_CAST((volatile size_t *)ptr);
951 rbimpl_atomic_size_store(sptr, sval, memory_order);
952}
953
957static inline VALUE
958rbimpl_atomic_value_exchange(volatile VALUE *ptr, VALUE val, int memory_order)
959{
960 RBIMPL_STATIC_ASSERT(sizeof_value, sizeof *ptr == sizeof(size_t));
961
962 const size_t sval = RBIMPL_CAST((size_t)val);
963 volatile size_t *const sptr = RBIMPL_CAST((volatile size_t *)ptr);
964 const size_t sret = rbimpl_atomic_size_exchange(sptr, sval, memory_order);
965 return RBIMPL_CAST((VALUE)sret);
966}
967
971static inline void
972rbimpl_atomic_value_store(volatile VALUE *ptr, VALUE val, int memory_order)
973{
974 RBIMPL_STATIC_ASSERT(sizeof_value, sizeof *ptr == sizeof(size_t));
975
976 const size_t sval = RBIMPL_CAST((size_t)val);
977 volatile size_t *const sptr = RBIMPL_CAST((volatile size_t *)ptr);
978 rbimpl_atomic_size_store(sptr, sval, memory_order);
979}
980
984static inline rb_atomic_t
985rbimpl_atomic_load(volatile rb_atomic_t *ptr, int memory_order)
986{
987 (void)memory_order;
988#if 0
989
990#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
991 return __atomic_load_n(ptr, memory_order);
992#else
993 return rbimpl_atomic_fetch_add(ptr, 0, memory_order);
994#endif
995}
996
1000static inline void
1001rbimpl_atomic_store(volatile rb_atomic_t *ptr, rb_atomic_t val, int memory_order)
1002{
1003 (void)memory_order;
1004#if 0
1005
1006#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
1007 __atomic_store_n(ptr, val, memory_order);
1008
1009#else
1010 /* Maybe std::atomic<rb_atomic_t>::store can be faster? */
1011 rbimpl_atomic_exchange(ptr, val, memory_order);
1012
1013#endif
1014}
1015
1019static inline rb_atomic_t
1020rbimpl_atomic_cas(volatile rb_atomic_t *ptr, rb_atomic_t oldval, rb_atomic_t newval, int success_memorder, int failure_memorder)
1021{
1022 (void)success_memorder;
1023 (void)failure_memorder;
1024#if 0
1025
1026#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
1027 __atomic_compare_exchange_n(
1028 ptr, &oldval, newval, 0, success_memorder, failure_memorder);
1029 return oldval;
1030
1031#elif defined(HAVE_GCC_SYNC_BUILTINS)
1032 return __sync_val_compare_and_swap(ptr, oldval, newval);
1033
1034#elif RBIMPL_COMPILER_SINCE(MSVC, 13, 0, 0)
1035 return InterlockedCompareExchange(ptr, newval, oldval);
1036
1037#elif defined(_WIN32)
1038 PVOID *pptr = RBIMPL_CAST((PVOID *)ptr);
1039 PVOID pold = RBIMPL_CAST((PVOID)oldval);
1040 PVOID pnew = RBIMPL_CAST((PVOID)newval);
1041 PVOID pret = InterlockedCompareExchange(pptr, pnew, pold);
1042 return RBIMPL_CAST((rb_atomic_t)pret);
1043
1044#elif defined(__sun) && defined(HAVE_ATOMIC_H)
1045 return atomic_cas_uint(ptr, oldval, newval);
1046
1047#elif defined(HAVE_STDATOMIC_H)
1048 atomic_compare_exchange_strong_explicit(
1049 (_Atomic volatile rb_atomic_t *)ptr, &oldval, newval, success_memorder, failure_memorder);
1050 return oldval;
1051
1052#else
1053# error Unsupported platform.
1054#endif
1055}
1056
1057/* Nobody uses this but for theoretical backwards compatibility... */
1058#if RBIMPL_COMPILER_BEFORE(MSVC, 13, 0, 0)
1059static inline rb_atomic_t
1060rb_w32_atomic_cas(volatile rb_atomic_t *var, rb_atomic_t oldval, rb_atomic_t newval)
1061{
1062 return rbimpl_atomic_cas(var, oldval, newval);
1063}
1064#endif
1065
1069static inline size_t
1070rbimpl_atomic_size_cas(volatile size_t *ptr, size_t oldval, size_t newval, int success_memorder, int failure_memorder)
1071{
1072 (void)success_memorder;
1073 (void)failure_memorder;
1074#if 0
1075
1076#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
1077 __atomic_compare_exchange_n(
1078 ptr, &oldval, newval, 0, success_memorder, failure_memorder);
1079 return oldval;
1080
1081#elif defined(HAVE_GCC_SYNC_BUILTINS)
1082 return __sync_val_compare_and_swap(ptr, oldval, newval);
1083
1084#elif defined(_WIN64)
1085 return InterlockedCompareExchange64(ptr, newval, oldval);
1086
1087#elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
1088 return atomic_cas_ulong(ptr, oldval, newval);
1089
1090#elif defined(_WIN32) || (defined(__sun) && defined(HAVE_ATOMIC_H))
1091 RBIMPL_STATIC_ASSERT(size_of_size_t, sizeof *ptr == sizeof(rb_atomic_t));
1092
1093 volatile rb_atomic_t *tmp = RBIMPL_CAST((volatile rb_atomic_t *)ptr);
1094 return rbimpl_atomic_cas(tmp, oldval, newval, success_memorder, failure_memorder);
1095
1096#elif defined(HAVE_STDATOMIC_H)
1097 atomic_compare_exchange_strong_explicit(
1098 (_Atomic volatile size_t *)ptr, &oldval, newval, success_memorder, failure_memorder);
1099 return oldval;
1100
1101#else
1102# error Unsupported platform.
1103#endif
1104}
1105
1109static inline void *
1110rbimpl_atomic_ptr_cas(void **ptr, const void *oldval, const void *newval, int success_memorder, int failure_memorder)
1111{
1112 (void)success_memorder;
1113 (void)failure_memorder;
1114#if 0
1115
1116#elif defined(InterlockedExchangePointer)
1117 /* ... Can we say that InterlockedCompareExchangePtr surly exists when
1118 * InterlockedExchangePointer is defined? Seems so but...?*/
1119 PVOID *pptr = RBIMPL_CAST((PVOID *)ptr);
1120 PVOID pold = RBIMPL_CAST((PVOID)oldval);
1121 PVOID pnew = RBIMPL_CAST((PVOID)newval);
1122 return InterlockedCompareExchangePointer(pptr, pnew, pold);
1123
1124#elif defined(__sun) && defined(HAVE_ATOMIC_H)
1125 void *pold = RBIMPL_CAST((void *)oldval);
1126 void *pnew = RBIMPL_CAST((void *)newval);
1127 return atomic_cas_ptr(ptr, pold, pnew);
1128
1129
1130#else
1131 RBIMPL_STATIC_ASSERT(sizeof_voidp, sizeof *ptr == sizeof(size_t));
1132
1133 const size_t snew = RBIMPL_CAST((size_t)newval);
1134 const size_t sold = RBIMPL_CAST((size_t)oldval);
1135 volatile size_t *const sptr = RBIMPL_CAST((volatile size_t *)ptr);
1136 const size_t sret = rbimpl_atomic_size_cas(sptr, sold, snew, success_memorder, failure_memorder);
1137 return RBIMPL_CAST((void *)sret);
1138
1139#endif
1140}
1141
1145static inline void *
1146rbimpl_atomic_ptr_load(void **ptr, int memory_order)
1147{
1148 (void)memory_order;
1149#if 0
1150
1151#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
1152 return __atomic_load_n(ptr, memory_order);
1153#else
1154 void *val = *ptr;
1155 return rbimpl_atomic_ptr_cas(ptr, val, val, memory_order, memory_order);
1156#endif
1157}
1158
1162static inline VALUE
1163rbimpl_atomic_value_load(volatile VALUE *ptr, int memory_order)
1164{
1165 return RBIMPL_CAST((VALUE)rbimpl_atomic_ptr_load((void **)ptr, memory_order));
1166}
1167
1171static inline VALUE
1172rbimpl_atomic_value_cas(volatile VALUE *ptr, VALUE oldval, VALUE newval, int success_memorder, int failure_memorder)
1173{
1174 RBIMPL_STATIC_ASSERT(sizeof_value, sizeof *ptr == sizeof(size_t));
1175
1176 const size_t snew = RBIMPL_CAST((size_t)newval);
1177 const size_t sold = RBIMPL_CAST((size_t)oldval);
1178 volatile size_t *const sptr = RBIMPL_CAST((volatile size_t *)ptr);
1179 const size_t sret = rbimpl_atomic_size_cas(sptr, sold, snew, success_memorder, failure_memorder);
1180 return RBIMPL_CAST((VALUE)sret);
1181}
1183#endif /* RUBY_ATOMIC_H */
Defines RBIMPL_ATTR_ARTIFICIAL.
#define RBIMPL_ATTR_ARTIFICIAL()
Wraps (or simulates) __attribute__((artificial))
Definition artificial.h:43
#define RBIMPL_ASSERT_OR_ASSUME(...)
This is either RUBY_ASSERT or RBIMPL_ASSUME, depending on RUBY_DEBUG.
Definition assert.h:311
Atomic operations.
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
Defines RBIMPL_COMPILER_SINCE.
Defines RBIMPL_STATIC_ASSERT.
#define RBIMPL_STATIC_ASSERT
Wraps (or simulates) static_assert
Defines RBIMPL_ATTR_NOALIAS.
#define RBIMPL_ATTR_NOALIAS()
Wraps (or simulates) __declspec((noalias))
Definition noalias.h:66
Defines RBIMPL_ATTR_NONNULL.
#define RBIMPL_ATTR_NONNULL(list)
Wraps (or simulates) __attribute__((nonnull))
Definition nonnull.h:30
#define inline
Old Visual Studio versions do not support the inline keyword, so we need to define it to be __inline.
Definition defines.h:91
C99 shim for <stdbool.h>
Defines VALUE and ID.
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40