Ruby 3.5.0dev (2025-08-06 revision 92688f7d570c9c37ccb05b80577e1032aae908b7)
atomic.h
Go to the documentation of this file.
1#ifndef RUBY_ATOMIC_H /*-*-C++-*-vi:se ft=cpp:*/
2#define RUBY_ATOMIC_H
27#include "ruby/internal/config.h"
28
29#ifdef STDC_HEADERS
30# include <stddef.h> /* size_t */
31#endif
32
33#ifdef HAVE_SYS_TYPES_H
34# include <sys/types.h> /* ssize_t */
35#endif
36
37#if RBIMPL_COMPILER_SINCE(MSVC, 13, 0, 0)
38# pragma intrinsic(_InterlockedOr)
39#elif defined(__sun) && defined(HAVE_ATOMIC_H)
40# include <atomic.h>
41#endif
42
43#include "ruby/assert.h"
44#include "ruby/backward/2/limits.h"
49#include "ruby/internal/cast.h"
50#include "ruby/internal/value.h"
53
54/*
55 * Asserts that your environment supports more than one atomic types. These
56 * days systems tend to have such property (C11 was a standard of decades ago,
57 * right?) but we still support older ones.
58 */
59#if defined(__DOXYGEN__) || defined(HAVE_GCC_ATOMIC_BUILTINS) || defined(HAVE_GCC_SYNC_BUILTINS)
60# define RUBY_ATOMIC_GENERIC_MACRO 1
61#endif
62
68#if defined(__DOXYGEN__)
69using rb_atomic_t = std::atomic<unsigned>;
70#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
71typedef unsigned int rb_atomic_t;
72#elif defined(HAVE_GCC_SYNC_BUILTINS)
73typedef unsigned int rb_atomic_t;
74#elif defined(_WIN32)
75# include <winsock2.h> // to prevent macro redefinitions
76# include <windows.h> // for `LONG` and `Interlocked` functions
77typedef LONG rb_atomic_t;
78#elif defined(__sun) && defined(HAVE_ATOMIC_H)
79typedef unsigned int rb_atomic_t;
80#elif defined(HAVE_STDATOMIC_H)
81# include <stdatomic.h>
82typedef unsigned int rb_atomic_t;
83#else
84# error No atomic operation found
85#endif
86
96#define RUBY_ATOMIC_FETCH_ADD(var, val) rbimpl_atomic_fetch_add(&(var), (val))
97
107#define RUBY_ATOMIC_FETCH_SUB(var, val) rbimpl_atomic_fetch_sub(&(var), (val))
108
119#define RUBY_ATOMIC_OR(var, val) rbimpl_atomic_or(&(var), (val))
120
130#define RUBY_ATOMIC_EXCHANGE(var, val) rbimpl_atomic_exchange(&(var), (val))
131
143#define RUBY_ATOMIC_CAS(var, oldval, newval) \
144 rbimpl_atomic_cas(&(var), (oldval), (newval))
145
153#define RUBY_ATOMIC_LOAD(var) rbimpl_atomic_load(&(var))
154
163#define RUBY_ATOMIC_SET(var, val) rbimpl_atomic_set(&(var), (val))
164
173#define RUBY_ATOMIC_ADD(var, val) rbimpl_atomic_add(&(var), (val))
174
183#define RUBY_ATOMIC_SUB(var, val) rbimpl_atomic_sub(&(var), (val))
184
192#define RUBY_ATOMIC_INC(var) rbimpl_atomic_inc(&(var))
193
201#define RUBY_ATOMIC_DEC(var) rbimpl_atomic_dec(&(var))
202
213#define RUBY_ATOMIC_SIZE_FETCH_ADD(var, val) rbimpl_atomic_size_fetch_add(&(var), (val))
214
224#define RUBY_ATOMIC_SIZE_INC(var) rbimpl_atomic_size_inc(&(var))
225
235#define RUBY_ATOMIC_SIZE_DEC(var) rbimpl_atomic_size_dec(&(var))
236
248#define RUBY_ATOMIC_SIZE_EXCHANGE(var, val) \
249 rbimpl_atomic_size_exchange(&(var), (val))
250
262#define RUBY_ATOMIC_SIZE_CAS(var, oldval, newval) \
263 rbimpl_atomic_size_cas(&(var), (oldval), (newval))
264
275#define RUBY_ATOMIC_SIZE_ADD(var, val) rbimpl_atomic_size_add(&(var), (val))
276
287#define RUBY_ATOMIC_SIZE_SUB(var, val) rbimpl_atomic_size_sub(&(var), (val))
288
305#define RUBY_ATOMIC_PTR_EXCHANGE(var, val) \
306 RBIMPL_CAST(rbimpl_atomic_ptr_exchange((void **)&(var), (void *)val))
307
316#define RUBY_ATOMIC_PTR_LOAD(var) \
317 RBIMPL_CAST(rbimpl_atomic_ptr_load((void **)&var))
318
329#define RUBY_ATOMIC_PTR_SET(var, val) \
330 rbimpl_atomic_ptr_set((volatile void **)&(var), (val))
331
343#define RUBY_ATOMIC_PTR_CAS(var, oldval, newval) \
344 RBIMPL_CAST(rbimpl_atomic_ptr_cas((void **)&(var), (void *)(oldval), (void *)(newval)))
345
356#define RUBY_ATOMIC_VALUE_SET(var, val) \
357 rbimpl_atomic_value_set(&(var), (val))
358
370#define RUBY_ATOMIC_VALUE_EXCHANGE(var, val) \
371 rbimpl_atomic_value_exchange(&(var), (val))
372
384#define RUBY_ATOMIC_VALUE_CAS(var, oldval, newval) \
385 rbimpl_atomic_value_cas(&(var), (oldval), (newval))
386
391static inline rb_atomic_t
392rbimpl_atomic_fetch_add(volatile rb_atomic_t *ptr, rb_atomic_t val)
393{
394#if 0
395
396#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
397 return __atomic_fetch_add(ptr, val, __ATOMIC_SEQ_CST);
398
399#elif defined(HAVE_GCC_SYNC_BUILTINS)
400 return __sync_fetch_and_add(ptr, val);
401
402#elif defined(_WIN32)
403 return InterlockedExchangeAdd(ptr, val);
404
405#elif defined(__sun) && defined(HAVE_ATOMIC_H)
406 /*
407 * `atomic_add_int_nv` takes its second argument as `int`! Meanwhile our
408 * `rb_atomic_t` is unsigned. We cannot pass `val` as-is. We have to
409 * manually check integer overflow.
410 */
411 RBIMPL_ASSERT_OR_ASSUME(val <= INT_MAX);
412 return atomic_add_int_nv(ptr, val) - val;
413
414#elif defined(HAVE_STDATOMIC_H)
415 return atomic_fetch_add((_Atomic volatile rb_atomic_t *)ptr, val);
416
417#else
418# error Unsupported platform.
419#endif
420}
421
426static inline size_t
427rbimpl_atomic_size_fetch_add(volatile size_t *ptr, size_t val)
428{
429#if 0
430
431#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
432 return __atomic_fetch_add(ptr, val, __ATOMIC_SEQ_CST);
433
434#elif defined(HAVE_GCC_SYNC_BUILTINS)
435 return __sync_fetch_and_add(ptr, val);
436
437#elif defined(_WIN32)
438 return InterlockedExchangeAdd64(ptr, val);
439
440#elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
441 /* Ditto for `atomic_add_int_nv`. */
442 RBIMPL_ASSERT_OR_ASSUME(val <= LONG_MAX);
443 atomic_add_long(ptr, val);
444
445#elif defined(__sun) && defined(HAVE_ATOMIC_H)
446 RBIMPL_STATIC_ASSERT(size_of_rb_atomic_t, sizeof *ptr == sizeof(rb_atomic_t));
447
448 volatile rb_atomic_t *const tmp = RBIMPL_CAST((volatile rb_atomic_t *)ptr);
449 rbimpl_atomic_fetch_add(tmp, val);
450
451#elif defined(HAVE_STDATOMIC_H)
452 return atomic_fetch_add((_Atomic volatile size_t *)ptr, val);
453
454#else
455# error Unsupported platform.
456#endif
457}
458
462static inline void
463rbimpl_atomic_add(volatile rb_atomic_t *ptr, rb_atomic_t val)
464{
465#if 0
466
467#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
468 /*
469 * GCC on amd64 is smart enough to detect this `__atomic_add_fetch`'s
470 * return value is not used, then compiles it into single `LOCK ADD`
471 * instruction.
472 */
473 __atomic_add_fetch(ptr, val, __ATOMIC_SEQ_CST);
474
475#elif defined(HAVE_GCC_SYNC_BUILTINS)
476 __sync_add_and_fetch(ptr, val);
477
478#elif defined(_WIN32)
479 /*
480 * `InterlockedExchangeAdd` is `LOCK XADD`. It seems there also is
481 * `_InterlockedAdd` intrinsic in ARM Windows but not for x86? Sticking to
482 * `InterlockedExchangeAdd` for better portability.
483 */
484 InterlockedExchangeAdd(ptr, val);
485
486#elif defined(__sun) && defined(HAVE_ATOMIC_H)
487 /* Ditto for `atomic_add_int_nv`. */
488 RBIMPL_ASSERT_OR_ASSUME(val <= INT_MAX);
489 atomic_add_int(ptr, val);
490
491#elif defined(HAVE_STDATOMIC_H)
492 *(_Atomic volatile rb_atomic_t *)ptr += val;
493
494#else
495# error Unsupported platform.
496#endif
497}
498
502static inline void
503rbimpl_atomic_size_add(volatile size_t *ptr, size_t val)
504{
505#if 0
506
507#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
508 __atomic_add_fetch(ptr, val, __ATOMIC_SEQ_CST);
509
510#elif defined(HAVE_GCC_SYNC_BUILTINS)
511 __sync_add_and_fetch(ptr, val);
512
513#elif defined(_WIN64)
514 /* Ditto for `InterlockeExchangedAdd`. */
515 InterlockedExchangeAdd64(ptr, val);
516
517#elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
518 /* Ditto for `atomic_add_int_nv`. */
519 RBIMPL_ASSERT_OR_ASSUME(val <= LONG_MAX);
520 atomic_add_long(ptr, val);
521
522#elif defined(_WIN32) || (defined(__sun) && defined(HAVE_ATOMIC_H))
523 RBIMPL_STATIC_ASSERT(size_of_rb_atomic_t, sizeof *ptr == sizeof(rb_atomic_t));
524
525 volatile rb_atomic_t *const tmp = RBIMPL_CAST((volatile rb_atomic_t *)ptr);
526 rbimpl_atomic_add(tmp, val);
527
528#elif defined(HAVE_STDATOMIC_H)
529 *(_Atomic volatile size_t *)ptr += val;
530
531#else
532# error Unsupported platform.
533#endif
534}
535
539static inline void
540rbimpl_atomic_inc(volatile rb_atomic_t *ptr)
541{
542#if 0
543
544#elif defined(HAVE_GCC_ATOMIC_BUILTINS) || defined(HAVE_GCC_SYNC_BUILTINS)
545 rbimpl_atomic_add(ptr, 1);
546
547#elif defined(_WIN32)
548 InterlockedIncrement(ptr);
549
550#elif defined(__sun) && defined(HAVE_ATOMIC_H)
551 atomic_inc_uint(ptr);
552
553#elif defined(HAVE_STDATOMIC_H)
554 rbimpl_atomic_add(ptr, 1);
555
556#else
557# error Unsupported platform.
558#endif
559}
560
564static inline void
565rbimpl_atomic_size_inc(volatile size_t *ptr)
566{
567#if 0
568
569#elif defined(HAVE_GCC_ATOMIC_BUILTINS) || defined(HAVE_GCC_SYNC_BUILTINS)
570 rbimpl_atomic_size_add(ptr, 1);
571
572#elif defined(_WIN64)
573 InterlockedIncrement64(ptr);
574
575#elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
576 atomic_inc_ulong(ptr);
577
578#elif defined(_WIN32) || (defined(__sun) && defined(HAVE_ATOMIC_H))
579 RBIMPL_STATIC_ASSERT(size_of_size_t, sizeof *ptr == sizeof(rb_atomic_t));
580
581 rbimpl_atomic_size_add(ptr, 1);
582
583#elif defined(HAVE_STDATOMIC_H)
584 rbimpl_atomic_size_add(ptr, 1);
585
586#else
587# error Unsupported platform.
588#endif
589}
590
594static inline rb_atomic_t
595rbimpl_atomic_fetch_sub(volatile rb_atomic_t *ptr, rb_atomic_t val)
596{
597#if 0
598
599#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
600 return __atomic_fetch_sub(ptr, val, __ATOMIC_SEQ_CST);
601
602#elif defined(HAVE_GCC_SYNC_BUILTINS)
603 return __sync_fetch_and_sub(ptr, val);
604
605#elif defined(_WIN32)
606 /* rb_atomic_t is signed here! Safe to do `-val`. */
607 return InterlockedExchangeAdd(ptr, -val);
608
609#elif defined(__sun) && defined(HAVE_ATOMIC_H)
610 /* Ditto for `rbimpl_atomic_fetch_add`. */
611 const signed neg = -1;
612 RBIMPL_ASSERT_OR_ASSUME(val <= INT_MAX);
613 return atomic_add_int_nv(ptr, neg * val) + val;
614
615#elif defined(HAVE_STDATOMIC_H)
616 return atomic_fetch_sub((_Atomic volatile rb_atomic_t *)ptr, val);
617
618#else
619# error Unsupported platform.
620#endif
621}
622
626static inline void
627rbimpl_atomic_sub(volatile rb_atomic_t *ptr, rb_atomic_t val)
628{
629#if 0
630
631#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
632 __atomic_sub_fetch(ptr, val, __ATOMIC_SEQ_CST);
633
634#elif defined(HAVE_GCC_SYNC_BUILTINS)
635 __sync_sub_and_fetch(ptr, val);
636
637#elif defined(_WIN32)
638 InterlockedExchangeAdd(ptr, -val);
639
640#elif defined(__sun) && defined(HAVE_ATOMIC_H)
641 const signed neg = -1;
642 RBIMPL_ASSERT_OR_ASSUME(val <= INT_MAX);
643 atomic_add_int(ptr, neg * val);
644
645#elif defined(HAVE_STDATOMIC_H)
646 *(_Atomic volatile rb_atomic_t *)ptr -= val;
647
648#else
649# error Unsupported platform.
650#endif
651}
652
656static inline void
657rbimpl_atomic_size_sub(volatile size_t *ptr, size_t val)
658{
659#if 0
660
661#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
662 __atomic_sub_fetch(ptr, val, __ATOMIC_SEQ_CST);
663
664#elif defined(HAVE_GCC_SYNC_BUILTINS)
665 __sync_sub_and_fetch(ptr, val);
666
667#elif defined(_WIN64)
668 const ssize_t neg = -1;
669 InterlockedExchangeAdd64(ptr, neg * val);
670
671#elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
672 const signed neg = -1;
673 RBIMPL_ASSERT_OR_ASSUME(val <= LONG_MAX);
674 atomic_add_long(ptr, neg * val);
675
676#elif defined(_WIN32) || (defined(__sun) && defined(HAVE_ATOMIC_H))
677 RBIMPL_STATIC_ASSERT(size_of_rb_atomic_t, sizeof *ptr == sizeof(rb_atomic_t));
678
679 volatile rb_atomic_t *const tmp = RBIMPL_CAST((volatile rb_atomic_t *)ptr);
680 rbimpl_atomic_sub(tmp, val);
681
682#elif defined(HAVE_STDATOMIC_H)
683 *(_Atomic volatile size_t *)ptr -= val;
684
685#else
686# error Unsupported platform.
687#endif
688}
689
693static inline void
694rbimpl_atomic_dec(volatile rb_atomic_t *ptr)
695{
696#if 0
697
698#elif defined(HAVE_GCC_ATOMIC_BUILTINS) || defined(HAVE_GCC_SYNC_BUILTINS)
699 rbimpl_atomic_sub(ptr, 1);
700
701#elif defined(_WIN32)
702 InterlockedDecrement(ptr);
703
704#elif defined(__sun) && defined(HAVE_ATOMIC_H)
705 atomic_dec_uint(ptr);
706
707#elif defined(HAVE_STDATOMIC_H)
708 rbimpl_atomic_sub(ptr, 1);
709
710#else
711# error Unsupported platform.
712#endif
713}
714
718static inline void
719rbimpl_atomic_size_dec(volatile size_t *ptr)
720{
721#if 0
722
723#elif defined(HAVE_GCC_ATOMIC_BUILTINS) || defined(HAVE_GCC_SYNC_BUILTINS)
724 rbimpl_atomic_size_sub(ptr, 1);
725
726#elif defined(_WIN64)
727 InterlockedDecrement64(ptr);
728
729#elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
730 atomic_dec_ulong(ptr);
731
732#elif defined(_WIN32) || (defined(__sun) && defined(HAVE_ATOMIC_H))
733 RBIMPL_STATIC_ASSERT(size_of_size_t, sizeof *ptr == sizeof(rb_atomic_t));
734
735 rbimpl_atomic_size_sub(ptr, 1);
736
737#elif defined(HAVE_STDATOMIC_H)
738 rbimpl_atomic_size_sub(ptr, 1);
739
740#else
741# error Unsupported platform.
742#endif
743}
744
748static inline void
749rbimpl_atomic_or(volatile rb_atomic_t *ptr, rb_atomic_t val)
750{
751#if 0
752
753#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
754 __atomic_or_fetch(ptr, val, __ATOMIC_SEQ_CST);
755
756#elif defined(HAVE_GCC_SYNC_BUILTINS)
757 __sync_or_and_fetch(ptr, val);
758
759#elif RBIMPL_COMPILER_SINCE(MSVC, 13, 0, 0)
760 _InterlockedOr(ptr, val);
761
762#elif defined(_WIN32) && defined(__GNUC__)
763 /* This was for old MinGW. Maybe not needed any longer? */
764 __asm__(
765 "lock\n\t"
766 "orl\t%1, %0"
767 : "=m"(ptr)
768 : "Ir"(val));
769
770#elif defined(_WIN32) && defined(_M_IX86)
771 __asm mov eax, ptr;
772 __asm mov ecx, val;
773 __asm lock or [eax], ecx;
774
775#elif defined(__sun) && defined(HAVE_ATOMIC_H)
776 atomic_or_uint(ptr, val);
777
778#elif !defined(_WIN32) && defined(HAVE_STDATOMIC_H)
779 *(_Atomic volatile rb_atomic_t *)ptr |= val;
780
781#else
782# error Unsupported platform.
783#endif
784}
785
786/* Nobody uses this but for theoretical backwards compatibility... */
787#if RBIMPL_COMPILER_BEFORE(MSVC, 13, 0, 0)
788static inline rb_atomic_t
789rb_w32_atomic_or(volatile rb_atomic_t *var, rb_atomic_t val)
790{
791 return rbimpl_atomic_or(var, val);
792}
793#endif
794
798static inline rb_atomic_t
799rbimpl_atomic_exchange(volatile rb_atomic_t *ptr, rb_atomic_t val)
800{
801#if 0
802
803#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
804 return __atomic_exchange_n(ptr, val, __ATOMIC_SEQ_CST);
805
806#elif defined(HAVE_GCC_SYNC_BUILTINS)
807 return __sync_lock_test_and_set(ptr, val);
808
809#elif defined(_WIN32)
810 return InterlockedExchange(ptr, val);
811
812#elif defined(__sun) && defined(HAVE_ATOMIC_H)
813 return atomic_swap_uint(ptr, val);
814
815#elif defined(HAVE_STDATOMIC_H)
816 return atomic_exchange((_Atomic volatile rb_atomic_t *)ptr, val);
817
818#else
819# error Unsupported platform.
820#endif
821}
822
826static inline size_t
827rbimpl_atomic_size_exchange(volatile size_t *ptr, size_t val)
828{
829#if 0
830
831#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
832 return __atomic_exchange_n(ptr, val, __ATOMIC_SEQ_CST);
833
834#elif defined(HAVE_GCC_SYNC_BUILTINS)
835 return __sync_lock_test_and_set(ptr, val);
836
837#elif defined(_WIN64)
838 return InterlockedExchange64(ptr, val);
839
840#elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
841 return atomic_swap_ulong(ptr, val);
842
843#elif defined(_WIN32) || (defined(__sun) && defined(HAVE_ATOMIC_H))
844 RBIMPL_STATIC_ASSERT(size_of_size_t, sizeof *ptr == sizeof(rb_atomic_t));
845
846 volatile rb_atomic_t *const tmp = RBIMPL_CAST((volatile rb_atomic_t *)ptr);
847 const rb_atomic_t ret = rbimpl_atomic_exchange(tmp, val);
848 return RBIMPL_CAST((size_t)ret);
849
850#elif defined(HAVE_STDATOMIC_H)
851 return atomic_exchange((_Atomic volatile size_t *)ptr, val);
852
853#else
854# error Unsupported platform.
855#endif
856}
857
861static inline void
862rbimpl_atomic_size_set(volatile size_t *ptr, size_t val)
863{
864#if 0
865
866#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
867 __atomic_store_n(ptr, val, __ATOMIC_SEQ_CST);
868
869#else
870 rbimpl_atomic_size_exchange(ptr, val);
871
872#endif
873}
874
878static inline void *
879rbimpl_atomic_ptr_exchange(void *volatile *ptr, const void *val)
880{
881#if 0
882
883#elif defined(InterlockedExchangePointer)
884 /* const_cast */
885 PVOID *pptr = RBIMPL_CAST((PVOID *)ptr);
886 PVOID pval = RBIMPL_CAST((PVOID)val);
887 return InterlockedExchangePointer(pptr, pval);
888
889#elif defined(__sun) && defined(HAVE_ATOMIC_H)
890 return atomic_swap_ptr(ptr, RBIMPL_CAST((void *)val));
891
892#else
893 RBIMPL_STATIC_ASSERT(sizeof_voidp, sizeof *ptr == sizeof(size_t));
894
895 const size_t sval = RBIMPL_CAST((size_t)val);
896 volatile size_t *const sptr = RBIMPL_CAST((volatile size_t *)ptr);
897 const size_t sret = rbimpl_atomic_size_exchange(sptr, sval);
898 return RBIMPL_CAST((void *)sret);
899
900#endif
901}
902
906static inline void
907rbimpl_atomic_ptr_set(volatile void **ptr, void *val)
908{
909 RBIMPL_STATIC_ASSERT(sizeof_value, sizeof *ptr == sizeof(size_t));
910
911 const size_t sval = RBIMPL_CAST((size_t)val);
912 volatile size_t *const sptr = RBIMPL_CAST((volatile size_t *)ptr);
913 rbimpl_atomic_size_set(sptr, sval);
914}
915
919static inline VALUE
920rbimpl_atomic_value_exchange(volatile VALUE *ptr, VALUE val)
921{
922 RBIMPL_STATIC_ASSERT(sizeof_value, sizeof *ptr == sizeof(size_t));
923
924 const size_t sval = RBIMPL_CAST((size_t)val);
925 volatile size_t *const sptr = RBIMPL_CAST((volatile size_t *)ptr);
926 const size_t sret = rbimpl_atomic_size_exchange(sptr, sval);
927 return RBIMPL_CAST((VALUE)sret);
928}
929
933static inline void
934rbimpl_atomic_value_set(volatile VALUE *ptr, VALUE val)
935{
936 RBIMPL_STATIC_ASSERT(sizeof_value, sizeof *ptr == sizeof(size_t));
937
938 const size_t sval = RBIMPL_CAST((size_t)val);
939 volatile size_t *const sptr = RBIMPL_CAST((volatile size_t *)ptr);
940 rbimpl_atomic_size_set(sptr, sval);
941}
942
946static inline rb_atomic_t
947rbimpl_atomic_load(volatile rb_atomic_t *ptr)
948{
949#if 0
950
951#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
952 return __atomic_load_n(ptr, __ATOMIC_SEQ_CST);
953#else
954 return rbimpl_atomic_fetch_add(ptr, 0);
955#endif
956}
957
961static inline void
962rbimpl_atomic_set(volatile rb_atomic_t *ptr, rb_atomic_t val)
963{
964#if 0
965
966#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
967 __atomic_store_n(ptr, val, __ATOMIC_SEQ_CST);
968
969#else
970 /* Maybe std::atomic<rb_atomic_t>::store can be faster? */
971 rbimpl_atomic_exchange(ptr, val);
972
973#endif
974}
975
979static inline rb_atomic_t
980rbimpl_atomic_cas(volatile rb_atomic_t *ptr, rb_atomic_t oldval, rb_atomic_t newval)
981{
982#if 0
983
984#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
985 __atomic_compare_exchange_n(
986 ptr, &oldval, newval, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
987 return oldval;
988
989#elif defined(HAVE_GCC_SYNC_BUILTINS)
990 return __sync_val_compare_and_swap(ptr, oldval, newval);
991
992#elif RBIMPL_COMPILER_SINCE(MSVC, 13, 0, 0)
993 return InterlockedCompareExchange(ptr, newval, oldval);
994
995#elif defined(_WIN32)
996 PVOID *pptr = RBIMPL_CAST((PVOID *)ptr);
997 PVOID pold = RBIMPL_CAST((PVOID)oldval);
998 PVOID pnew = RBIMPL_CAST((PVOID)newval);
999 PVOID pret = InterlockedCompareExchange(pptr, pnew, pold);
1000 return RBIMPL_CAST((rb_atomic_t)pret);
1001
1002#elif defined(__sun) && defined(HAVE_ATOMIC_H)
1003 return atomic_cas_uint(ptr, oldval, newval);
1004
1005#elif defined(HAVE_STDATOMIC_H)
1006 atomic_compare_exchange_strong(
1007 (_Atomic volatile rb_atomic_t *)ptr, &oldval, newval);
1008 return oldval;
1009
1010#else
1011# error Unsupported platform.
1012#endif
1013}
1014
1015/* Nobody uses this but for theoretical backwards compatibility... */
1016#if RBIMPL_COMPILER_BEFORE(MSVC, 13, 0, 0)
1017static inline rb_atomic_t
1018rb_w32_atomic_cas(volatile rb_atomic_t *var, rb_atomic_t oldval, rb_atomic_t newval)
1019{
1020 return rbimpl_atomic_cas(var, oldval, newval);
1021}
1022#endif
1023
1027static inline size_t
1028rbimpl_atomic_size_cas(volatile size_t *ptr, size_t oldval, size_t newval)
1029{
1030#if 0
1031
1032#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
1033 __atomic_compare_exchange_n(
1034 ptr, &oldval, newval, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
1035 return oldval;
1036
1037#elif defined(HAVE_GCC_SYNC_BUILTINS)
1038 return __sync_val_compare_and_swap(ptr, oldval, newval);
1039
1040#elif defined(_WIN64)
1041 return InterlockedCompareExchange64(ptr, newval, oldval);
1042
1043#elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
1044 return atomic_cas_ulong(ptr, oldval, newval);
1045
1046#elif defined(_WIN32) || (defined(__sun) && defined(HAVE_ATOMIC_H))
1047 RBIMPL_STATIC_ASSERT(size_of_size_t, sizeof *ptr == sizeof(rb_atomic_t));
1048
1049 volatile rb_atomic_t *tmp = RBIMPL_CAST((volatile rb_atomic_t *)ptr);
1050 return rbimpl_atomic_cas(tmp, oldval, newval);
1051
1052#elif defined(HAVE_STDATOMIC_H)
1053 atomic_compare_exchange_strong(
1054 (_Atomic volatile size_t *)ptr, &oldval, newval);
1055 return oldval;
1056
1057#else
1058# error Unsupported platform.
1059#endif
1060}
1061
1065static inline void *
1066rbimpl_atomic_ptr_cas(void **ptr, const void *oldval, const void *newval)
1067{
1068#if 0
1069
1070#elif defined(InterlockedExchangePointer)
1071 /* ... Can we say that InterlockedCompareExchangePtr surly exists when
1072 * InterlockedExchangePointer is defined? Seems so but...?*/
1073 PVOID *pptr = RBIMPL_CAST((PVOID *)ptr);
1074 PVOID pold = RBIMPL_CAST((PVOID)oldval);
1075 PVOID pnew = RBIMPL_CAST((PVOID)newval);
1076 return InterlockedCompareExchangePointer(pptr, pnew, pold);
1077
1078#elif defined(__sun) && defined(HAVE_ATOMIC_H)
1079 void *pold = RBIMPL_CAST((void *)oldval);
1080 void *pnew = RBIMPL_CAST((void *)newval);
1081 return atomic_cas_ptr(ptr, pold, pnew);
1082
1083
1084#else
1085 RBIMPL_STATIC_ASSERT(sizeof_voidp, sizeof *ptr == sizeof(size_t));
1086
1087 const size_t snew = RBIMPL_CAST((size_t)newval);
1088 const size_t sold = RBIMPL_CAST((size_t)oldval);
1089 volatile size_t *const sptr = RBIMPL_CAST((volatile size_t *)ptr);
1090 const size_t sret = rbimpl_atomic_size_cas(sptr, sold, snew);
1091 return RBIMPL_CAST((void *)sret);
1092
1093#endif
1094}
1095
1099static inline void *
1100rbimpl_atomic_ptr_load(void **ptr)
1101{
1102#if 0
1103
1104#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
1105 return __atomic_load_n(ptr, __ATOMIC_SEQ_CST);
1106#else
1107 void *val = *ptr;
1108 return rbimpl_atomic_ptr_cas(ptr, val, val);
1109#endif
1110}
1111
1115static inline VALUE
1116rbimpl_atomic_value_cas(volatile VALUE *ptr, VALUE oldval, VALUE newval)
1117{
1118 RBIMPL_STATIC_ASSERT(sizeof_value, sizeof *ptr == sizeof(size_t));
1119
1120 const size_t snew = RBIMPL_CAST((size_t)newval);
1121 const size_t sold = RBIMPL_CAST((size_t)oldval);
1122 volatile size_t *const sptr = RBIMPL_CAST((volatile size_t *)ptr);
1123 const size_t sret = rbimpl_atomic_size_cas(sptr, sold, snew);
1124 return RBIMPL_CAST((VALUE)sret);
1125}
1127#endif /* RUBY_ATOMIC_H */
Defines RBIMPL_ATTR_ARTIFICIAL.
#define RBIMPL_ATTR_ARTIFICIAL()
Wraps (or simulates) __attribute__((artificial))
Definition artificial.h:43
#define RBIMPL_ASSERT_OR_ASSUME(...)
This is either RUBY_ASSERT or RBIMPL_ASSUME, depending on RUBY_DEBUG.
Definition assert.h:311
Atomic operations.
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
Defines RBIMPL_COMPILER_SINCE.
Defines RBIMPL_STATIC_ASSERT.
#define RBIMPL_STATIC_ASSERT
Wraps (or simulates) static_assert
Defines RBIMPL_ATTR_NOALIAS.
#define RBIMPL_ATTR_NOALIAS()
Wraps (or simulates) __declspec((noalias))
Definition noalias.h:66
Defines RBIMPL_ATTR_NONNULL.
#define RBIMPL_ATTR_NONNULL(list)
Wraps (or simulates) __attribute__((nonnull))
Definition nonnull.h:30
#define inline
Old Visual Studio versions do not support the inline keyword, so we need to define it to be __inline.
Definition defines.h:91
C99 shim for <stdbool.h>
Defines VALUE and ID.
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40