Ruby 3.5.0dev (2025-06-06 revision dde9fca63bf4bf56050c734adca3eaae70506179)
atomic.h
Go to the documentation of this file.
1#ifndef RUBY_ATOMIC_H /*-*-C++-*-vi:se ft=cpp:*/
2#define RUBY_ATOMIC_H
27#include "ruby/internal/config.h"
28
29#ifdef STDC_HEADERS
30# include <stddef.h> /* size_t */
31#endif
32
33#ifdef HAVE_SYS_TYPES_H
34# include <sys/types.h> /* ssize_t */
35#endif
36
37#if RBIMPL_COMPILER_SINCE(MSVC, 13, 0, 0)
38# pragma intrinsic(_InterlockedOr)
39#elif defined(__sun) && defined(HAVE_ATOMIC_H)
40# include <atomic.h>
41#endif
42
43#include "ruby/assert.h"
44#include "ruby/backward/2/limits.h"
49#include "ruby/internal/cast.h"
50#include "ruby/internal/value.h"
53
54/*
55 * Asserts that your environment supports more than one atomic types. These
56 * days systems tend to have such property (C11 was a standard of decades ago,
57 * right?) but we still support older ones.
58 */
59#if defined(__DOXYGEN__) || defined(HAVE_GCC_ATOMIC_BUILTINS) || defined(HAVE_GCC_SYNC_BUILTINS)
60# define RUBY_ATOMIC_GENERIC_MACRO 1
61#endif
62
68#if defined(__DOXYGEN__)
69using rb_atomic_t = std::atomic<unsigned>;
70#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
71typedef unsigned int rb_atomic_t;
72#elif defined(HAVE_GCC_SYNC_BUILTINS)
73typedef unsigned int rb_atomic_t;
74#elif defined(_WIN32)
75# include <winsock2.h> // to prevent macro redefinitions
76# include <windows.h> // for `LONG` and `Interlocked` functions
77typedef LONG rb_atomic_t;
78#elif defined(__sun) && defined(HAVE_ATOMIC_H)
79typedef unsigned int rb_atomic_t;
80#else
81# error No atomic operation found
82#endif
83
93#define RUBY_ATOMIC_FETCH_ADD(var, val) rbimpl_atomic_fetch_add(&(var), (val))
94
104#define RUBY_ATOMIC_FETCH_SUB(var, val) rbimpl_atomic_fetch_sub(&(var), (val))
105
116#define RUBY_ATOMIC_OR(var, val) rbimpl_atomic_or(&(var), (val))
117
127#define RUBY_ATOMIC_EXCHANGE(var, val) rbimpl_atomic_exchange(&(var), (val))
128
140#define RUBY_ATOMIC_CAS(var, oldval, newval) \
141 rbimpl_atomic_cas(&(var), (oldval), (newval))
142
150#define RUBY_ATOMIC_LOAD(var) rbimpl_atomic_load(&(var))
151
160#define RUBY_ATOMIC_SET(var, val) rbimpl_atomic_set(&(var), (val))
161
170#define RUBY_ATOMIC_ADD(var, val) rbimpl_atomic_add(&(var), (val))
171
180#define RUBY_ATOMIC_SUB(var, val) rbimpl_atomic_sub(&(var), (val))
181
189#define RUBY_ATOMIC_INC(var) rbimpl_atomic_inc(&(var))
190
198#define RUBY_ATOMIC_DEC(var) rbimpl_atomic_dec(&(var))
199
210#define RUBY_ATOMIC_SIZE_FETCH_ADD(var, val) rbimpl_atomic_size_fetch_add(&(var), (val))
211
221#define RUBY_ATOMIC_SIZE_INC(var) rbimpl_atomic_size_inc(&(var))
222
232#define RUBY_ATOMIC_SIZE_DEC(var) rbimpl_atomic_size_dec(&(var))
233
245#define RUBY_ATOMIC_SIZE_EXCHANGE(var, val) \
246 rbimpl_atomic_size_exchange(&(var), (val))
247
259#define RUBY_ATOMIC_SIZE_CAS(var, oldval, newval) \
260 rbimpl_atomic_size_cas(&(var), (oldval), (newval))
261
272#define RUBY_ATOMIC_SIZE_ADD(var, val) rbimpl_atomic_size_add(&(var), (val))
273
284#define RUBY_ATOMIC_SIZE_SUB(var, val) rbimpl_atomic_size_sub(&(var), (val))
285
302#define RUBY_ATOMIC_PTR_EXCHANGE(var, val) \
303 RBIMPL_CAST(rbimpl_atomic_ptr_exchange((void **)&(var), (void *)val))
304
313#define RUBY_ATOMIC_PTR_LOAD(var) \
314 RBIMPL_CAST(rbimpl_atomic_ptr_load((void **)&var))
315
326#define RUBY_ATOMIC_PTR_SET(var, val) \
327 rbimpl_atomic_ptr_set((volatile void **)&(var), (val))
328
340#define RUBY_ATOMIC_PTR_CAS(var, oldval, newval) \
341 RBIMPL_CAST(rbimpl_atomic_ptr_cas((void **)&(var), (void *)(oldval), (void *)(newval)))
342
353#define RUBY_ATOMIC_VALUE_SET(var, val) \
354 rbimpl_atomic_value_set(&(var), (val))
355
367#define RUBY_ATOMIC_VALUE_EXCHANGE(var, val) \
368 rbimpl_atomic_value_exchange(&(var), (val))
369
381#define RUBY_ATOMIC_VALUE_CAS(var, oldval, newval) \
382 rbimpl_atomic_value_cas(&(var), (oldval), (newval))
383
388static inline rb_atomic_t
389rbimpl_atomic_fetch_add(volatile rb_atomic_t *ptr, rb_atomic_t val)
390{
391#if 0
392
393#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
394 return __atomic_fetch_add(ptr, val, __ATOMIC_SEQ_CST);
395
396#elif defined(HAVE_GCC_SYNC_BUILTINS)
397 return __sync_fetch_and_add(ptr, val);
398
399#elif defined(_WIN32)
400 return InterlockedExchangeAdd(ptr, val);
401
402#elif defined(__sun) && defined(HAVE_ATOMIC_H)
403 /*
404 * `atomic_add_int_nv` takes its second argument as `int`! Meanwhile our
405 * `rb_atomic_t` is unsigned. We cannot pass `val` as-is. We have to
406 * manually check integer overflow.
407 */
408 RBIMPL_ASSERT_OR_ASSUME(val <= INT_MAX);
409 return atomic_add_int_nv(ptr, val) - val;
410
411#else
412# error Unsupported platform.
413#endif
414}
415
420static inline size_t
421rbimpl_atomic_size_fetch_add(volatile size_t *ptr, size_t val)
422{
423#if 0
424
425#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
426 return __atomic_fetch_add(ptr, val, __ATOMIC_SEQ_CST);
427
428#elif defined(HAVE_GCC_SYNC_BUILTINS)
429 return __sync_fetch_and_add(ptr, val);
430
431#elif defined(_WIN32)
432 return InterlockedExchangeAdd64(ptr, val);
433
434#elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
435 /* Ditto for `atomic_add_int_nv`. */
436 RBIMPL_ASSERT_OR_ASSUME(val <= LONG_MAX);
437 atomic_add_long(ptr, val);
438
439#else
440 RBIMPL_STATIC_ASSERT(size_of_rb_atomic_t, sizeof *ptr == sizeof(rb_atomic_t));
441
442 volatile rb_atomic_t *const tmp = RBIMPL_CAST((volatile rb_atomic_t *)ptr);
443 rbimpl_atomic_fetch_add(tmp, val);
444
445#endif
446}
447
451static inline void
452rbimpl_atomic_add(volatile rb_atomic_t *ptr, rb_atomic_t val)
453{
454#if 0
455
456#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
457 /*
458 * GCC on amd64 is smart enough to detect this `__atomic_add_fetch`'s
459 * return value is not used, then compiles it into single `LOCK ADD`
460 * instruction.
461 */
462 __atomic_add_fetch(ptr, val, __ATOMIC_SEQ_CST);
463
464#elif defined(HAVE_GCC_SYNC_BUILTINS)
465 __sync_add_and_fetch(ptr, val);
466
467#elif defined(_WIN32)
468 /*
469 * `InterlockedExchangeAdd` is `LOCK XADD`. It seems there also is
470 * `_InterlockedAdd` intrinsic in ARM Windows but not for x86? Sticking to
471 * `InterlockedExchangeAdd` for better portability.
472 */
473 InterlockedExchangeAdd(ptr, val);
474
475#elif defined(__sun) && defined(HAVE_ATOMIC_H)
476 /* Ditto for `atomic_add_int_nv`. */
477 RBIMPL_ASSERT_OR_ASSUME(val <= INT_MAX);
478 atomic_add_int(ptr, val);
479
480#else
481# error Unsupported platform.
482#endif
483}
484
488static inline void
489rbimpl_atomic_size_add(volatile size_t *ptr, size_t val)
490{
491#if 0
492
493#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
494 __atomic_add_fetch(ptr, val, __ATOMIC_SEQ_CST);
495
496#elif defined(HAVE_GCC_SYNC_BUILTINS)
497 __sync_add_and_fetch(ptr, val);
498
499#elif defined(_WIN64)
500 /* Ditto for `InterlockeExchangedAdd`. */
501 InterlockedExchangeAdd64(ptr, val);
502
503#elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
504 /* Ditto for `atomic_add_int_nv`. */
505 RBIMPL_ASSERT_OR_ASSUME(val <= LONG_MAX);
506 atomic_add_long(ptr, val);
507
508#else
509 RBIMPL_STATIC_ASSERT(size_of_rb_atomic_t, sizeof *ptr == sizeof(rb_atomic_t));
510
511 volatile rb_atomic_t *const tmp = RBIMPL_CAST((volatile rb_atomic_t *)ptr);
512 rbimpl_atomic_add(tmp, val);
513
514#endif
515}
516
520static inline void
521rbimpl_atomic_inc(volatile rb_atomic_t *ptr)
522{
523#if 0
524
525#elif defined(HAVE_GCC_ATOMIC_BUILTINS) || defined(HAVE_GCC_SYNC_BUILTINS)
526 rbimpl_atomic_add(ptr, 1);
527
528#elif defined(_WIN32)
529 InterlockedIncrement(ptr);
530
531#elif defined(__sun) && defined(HAVE_ATOMIC_H)
532 atomic_inc_uint(ptr);
533
534#else
535 rbimpl_atomic_add(ptr, 1);
536
537#endif
538}
539
543static inline void
544rbimpl_atomic_size_inc(volatile size_t *ptr)
545{
546#if 0
547
548#elif defined(HAVE_GCC_ATOMIC_BUILTINS) || defined(HAVE_GCC_SYNC_BUILTINS)
549 rbimpl_atomic_size_add(ptr, 1);
550
551#elif defined(_WIN64)
552 InterlockedIncrement64(ptr);
553
554#elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
555 atomic_inc_ulong(ptr);
556
557#else
558 RBIMPL_STATIC_ASSERT(size_of_size_t, sizeof *ptr == sizeof(rb_atomic_t));
559
560 rbimpl_atomic_size_add(ptr, 1);
561
562#endif
563}
564
568static inline rb_atomic_t
569rbimpl_atomic_fetch_sub(volatile rb_atomic_t *ptr, rb_atomic_t val)
570{
571#if 0
572
573#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
574 return __atomic_fetch_sub(ptr, val, __ATOMIC_SEQ_CST);
575
576#elif defined(HAVE_GCC_SYNC_BUILTINS)
577 return __sync_fetch_and_sub(ptr, val);
578
579#elif defined(_WIN32)
580 /* rb_atomic_t is signed here! Safe to do `-val`. */
581 return InterlockedExchangeAdd(ptr, -val);
582
583#elif defined(__sun) && defined(HAVE_ATOMIC_H)
584 /* Ditto for `rbimpl_atomic_fetch_add`. */
585 const signed neg = -1;
586 RBIMPL_ASSERT_OR_ASSUME(val <= INT_MAX);
587 return atomic_add_int_nv(ptr, neg * val) + val;
588
589#else
590# error Unsupported platform.
591#endif
592}
593
597static inline void
598rbimpl_atomic_sub(volatile rb_atomic_t *ptr, rb_atomic_t val)
599{
600#if 0
601
602#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
603 __atomic_sub_fetch(ptr, val, __ATOMIC_SEQ_CST);
604
605#elif defined(HAVE_GCC_SYNC_BUILTINS)
606 __sync_sub_and_fetch(ptr, val);
607
608#elif defined(_WIN32)
609 InterlockedExchangeAdd(ptr, -val);
610
611#elif defined(__sun) && defined(HAVE_ATOMIC_H)
612 const signed neg = -1;
613 RBIMPL_ASSERT_OR_ASSUME(val <= INT_MAX);
614 atomic_add_int(ptr, neg * val);
615
616#else
617# error Unsupported platform.
618#endif
619}
620
624static inline void
625rbimpl_atomic_size_sub(volatile size_t *ptr, size_t val)
626{
627#if 0
628
629#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
630 __atomic_sub_fetch(ptr, val, __ATOMIC_SEQ_CST);
631
632#elif defined(HAVE_GCC_SYNC_BUILTINS)
633 __sync_sub_and_fetch(ptr, val);
634
635#elif defined(_WIN64)
636 const ssize_t neg = -1;
637 InterlockedExchangeAdd64(ptr, neg * val);
638
639#elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
640 const signed neg = -1;
641 RBIMPL_ASSERT_OR_ASSUME(val <= LONG_MAX);
642 atomic_add_long(ptr, neg * val);
643
644#else
645 RBIMPL_STATIC_ASSERT(size_of_rb_atomic_t, sizeof *ptr == sizeof(rb_atomic_t));
646
647 volatile rb_atomic_t *const tmp = RBIMPL_CAST((volatile rb_atomic_t *)ptr);
648 rbimpl_atomic_sub(tmp, val);
649
650#endif
651}
652
656static inline void
657rbimpl_atomic_dec(volatile rb_atomic_t *ptr)
658{
659#if 0
660
661#elif defined(HAVE_GCC_ATOMIC_BUILTINS) || defined(HAVE_GCC_SYNC_BUILTINS)
662 rbimpl_atomic_sub(ptr, 1);
663
664#elif defined(_WIN32)
665 InterlockedDecrement(ptr);
666
667#elif defined(__sun) && defined(HAVE_ATOMIC_H)
668 atomic_dec_uint(ptr);
669
670#else
671 rbimpl_atomic_sub(ptr, 1);
672
673#endif
674}
675
679static inline void
680rbimpl_atomic_size_dec(volatile size_t *ptr)
681{
682#if 0
683
684#elif defined(HAVE_GCC_ATOMIC_BUILTINS) || defined(HAVE_GCC_SYNC_BUILTINS)
685 rbimpl_atomic_size_sub(ptr, 1);
686
687#elif defined(_WIN64)
688 InterlockedDecrement64(ptr);
689
690#elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
691 atomic_dec_ulong(ptr);
692
693#else
694 RBIMPL_STATIC_ASSERT(size_of_size_t, sizeof *ptr == sizeof(rb_atomic_t));
695
696 rbimpl_atomic_size_sub(ptr, 1);
697
698#endif
699}
700
704static inline void
705rbimpl_atomic_or(volatile rb_atomic_t *ptr, rb_atomic_t val)
706{
707#if 0
708
709#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
710 __atomic_or_fetch(ptr, val, __ATOMIC_SEQ_CST);
711
712#elif defined(HAVE_GCC_SYNC_BUILTINS)
713 __sync_or_and_fetch(ptr, val);
714
715#elif RBIMPL_COMPILER_SINCE(MSVC, 13, 0, 0)
716 _InterlockedOr(ptr, val);
717
718#elif defined(_WIN32) && defined(__GNUC__)
719 /* This was for old MinGW. Maybe not needed any longer? */
720 __asm__(
721 "lock\n\t"
722 "orl\t%1, %0"
723 : "=m"(ptr)
724 : "Ir"(val));
725
726#elif defined(_WIN32) && defined(_M_IX86)
727 __asm mov eax, ptr;
728 __asm mov ecx, val;
729 __asm lock or [eax], ecx;
730
731#elif defined(__sun) && defined(HAVE_ATOMIC_H)
732 atomic_or_uint(ptr, val);
733
734#else
735# error Unsupported platform.
736#endif
737}
738
739/* Nobody uses this but for theoretical backwards compatibility... */
740#if RBIMPL_COMPILER_BEFORE(MSVC, 13, 0, 0)
741static inline rb_atomic_t
742rb_w32_atomic_or(volatile rb_atomic_t *var, rb_atomic_t val)
743{
744 return rbimpl_atomic_or(var, val);
745}
746#endif
747
751static inline rb_atomic_t
752rbimpl_atomic_exchange(volatile rb_atomic_t *ptr, rb_atomic_t val)
753{
754#if 0
755
756#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
757 return __atomic_exchange_n(ptr, val, __ATOMIC_SEQ_CST);
758
759#elif defined(HAVE_GCC_SYNC_BUILTINS)
760 return __sync_lock_test_and_set(ptr, val);
761
762#elif defined(_WIN32)
763 return InterlockedExchange(ptr, val);
764
765#elif defined(__sun) && defined(HAVE_ATOMIC_H)
766 return atomic_swap_uint(ptr, val);
767
768#else
769# error Unsupported platform.
770#endif
771}
772
776static inline size_t
777rbimpl_atomic_size_exchange(volatile size_t *ptr, size_t val)
778{
779#if 0
780
781#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
782 return __atomic_exchange_n(ptr, val, __ATOMIC_SEQ_CST);
783
784#elif defined(HAVE_GCC_SYNC_BUILTINS)
785 return __sync_lock_test_and_set(ptr, val);
786
787#elif defined(_WIN64)
788 return InterlockedExchange64(ptr, val);
789
790#elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
791 return atomic_swap_ulong(ptr, val);
792
793#else
794 RBIMPL_STATIC_ASSERT(size_of_size_t, sizeof *ptr == sizeof(rb_atomic_t));
795
796 volatile rb_atomic_t *const tmp = RBIMPL_CAST((volatile rb_atomic_t *)ptr);
797 const rb_atomic_t ret = rbimpl_atomic_exchange(tmp, val);
798 return RBIMPL_CAST((size_t)ret);
799
800#endif
801}
802
806static inline void
807rbimpl_atomic_size_set(volatile size_t *ptr, size_t val)
808{
809#if 0
810
811#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
812 __atomic_store_n(ptr, val, __ATOMIC_SEQ_CST);
813
814#else
815 rbimpl_atomic_size_exchange(ptr, val);
816
817#endif
818}
819
823static inline void *
824rbimpl_atomic_ptr_exchange(void *volatile *ptr, const void *val)
825{
826#if 0
827
828#elif defined(InterlockedExchangePointer)
829 /* const_cast */
830 PVOID *pptr = RBIMPL_CAST((PVOID *)ptr);
831 PVOID pval = RBIMPL_CAST((PVOID)val);
832 return InterlockedExchangePointer(pptr, pval);
833
834#elif defined(__sun) && defined(HAVE_ATOMIC_H)
835 return atomic_swap_ptr(ptr, RBIMPL_CAST((void *)val));
836
837#else
838 RBIMPL_STATIC_ASSERT(sizeof_voidp, sizeof *ptr == sizeof(size_t));
839
840 const size_t sval = RBIMPL_CAST((size_t)val);
841 volatile size_t *const sptr = RBIMPL_CAST((volatile size_t *)ptr);
842 const size_t sret = rbimpl_atomic_size_exchange(sptr, sval);
843 return RBIMPL_CAST((void *)sret);
844
845#endif
846}
847
851static inline void
852rbimpl_atomic_ptr_set(volatile void **ptr, void *val)
853{
854 RBIMPL_STATIC_ASSERT(sizeof_value, sizeof *ptr == sizeof(size_t));
855
856 const size_t sval = RBIMPL_CAST((size_t)val);
857 volatile size_t *const sptr = RBIMPL_CAST((volatile size_t *)ptr);
858 rbimpl_atomic_size_set(sptr, sval);
859}
860
864static inline VALUE
865rbimpl_atomic_value_exchange(volatile VALUE *ptr, VALUE val)
866{
867 RBIMPL_STATIC_ASSERT(sizeof_value, sizeof *ptr == sizeof(size_t));
868
869 const size_t sval = RBIMPL_CAST((size_t)val);
870 volatile size_t *const sptr = RBIMPL_CAST((volatile size_t *)ptr);
871 const size_t sret = rbimpl_atomic_size_exchange(sptr, sval);
872 return RBIMPL_CAST((VALUE)sret);
873}
874
878static inline void
879rbimpl_atomic_value_set(volatile VALUE *ptr, VALUE val)
880{
881 RBIMPL_STATIC_ASSERT(sizeof_value, sizeof *ptr == sizeof(size_t));
882
883 const size_t sval = RBIMPL_CAST((size_t)val);
884 volatile size_t *const sptr = RBIMPL_CAST((volatile size_t *)ptr);
885 rbimpl_atomic_size_set(sptr, sval);
886}
887
891static inline rb_atomic_t
892rbimpl_atomic_load(volatile rb_atomic_t *ptr)
893{
894#if 0
895
896#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
897 return __atomic_load_n(ptr, __ATOMIC_SEQ_CST);
898#else
899 return rbimpl_atomic_fetch_add(ptr, 0);
900#endif
901}
902
906static inline void
907rbimpl_atomic_set(volatile rb_atomic_t *ptr, rb_atomic_t val)
908{
909#if 0
910
911#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
912 __atomic_store_n(ptr, val, __ATOMIC_SEQ_CST);
913
914#else
915 /* Maybe std::atomic<rb_atomic_t>::store can be faster? */
916 rbimpl_atomic_exchange(ptr, val);
917
918#endif
919}
920
924static inline rb_atomic_t
925rbimpl_atomic_cas(volatile rb_atomic_t *ptr, rb_atomic_t oldval, rb_atomic_t newval)
926{
927#if 0
928
929#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
930 __atomic_compare_exchange_n(
931 ptr, &oldval, newval, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
932 return oldval;
933
934#elif defined(HAVE_GCC_SYNC_BUILTINS)
935 return __sync_val_compare_and_swap(ptr, oldval, newval);
936
937#elif RBIMPL_COMPILER_SINCE(MSVC, 13, 0, 0)
938 return InterlockedCompareExchange(ptr, newval, oldval);
939
940#elif defined(_WIN32)
941 PVOID *pptr = RBIMPL_CAST((PVOID *)ptr);
942 PVOID pold = RBIMPL_CAST((PVOID)oldval);
943 PVOID pnew = RBIMPL_CAST((PVOID)newval);
944 PVOID pret = InterlockedCompareExchange(pptr, pnew, pold);
945 return RBIMPL_CAST((rb_atomic_t)pret);
946
947#elif defined(__sun) && defined(HAVE_ATOMIC_H)
948 return atomic_cas_uint(ptr, oldval, newval);
949
950#else
951# error Unsupported platform.
952#endif
953}
954
955/* Nobody uses this but for theoretical backwards compatibility... */
956#if RBIMPL_COMPILER_BEFORE(MSVC, 13, 0, 0)
957static inline rb_atomic_t
958rb_w32_atomic_cas(volatile rb_atomic_t *var, rb_atomic_t oldval, rb_atomic_t newval)
959{
960 return rbimpl_atomic_cas(var, oldval, newval);
961}
962#endif
963
967static inline size_t
968rbimpl_atomic_size_cas(volatile size_t *ptr, size_t oldval, size_t newval)
969{
970#if 0
971
972#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
973 __atomic_compare_exchange_n(
974 ptr, &oldval, newval, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
975 return oldval;
976
977#elif defined(HAVE_GCC_SYNC_BUILTINS)
978 return __sync_val_compare_and_swap(ptr, oldval, newval);
979
980#elif defined(_WIN64)
981 return InterlockedCompareExchange64(ptr, newval, oldval);
982
983#elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
984 return atomic_cas_ulong(ptr, oldval, newval);
985
986#else
987 RBIMPL_STATIC_ASSERT(size_of_size_t, sizeof *ptr == sizeof(rb_atomic_t));
988
989 volatile rb_atomic_t *tmp = RBIMPL_CAST((volatile rb_atomic_t *)ptr);
990 return rbimpl_atomic_cas(tmp, oldval, newval);
991
992#endif
993}
994
998static inline void *
999rbimpl_atomic_ptr_cas(void **ptr, const void *oldval, const void *newval)
1000{
1001#if 0
1002
1003#elif defined(InterlockedExchangePointer)
1004 /* ... Can we say that InterlockedCompareExchangePtr surly exists when
1005 * InterlockedExchangePointer is defined? Seems so but...?*/
1006 PVOID *pptr = RBIMPL_CAST((PVOID *)ptr);
1007 PVOID pold = RBIMPL_CAST((PVOID)oldval);
1008 PVOID pnew = RBIMPL_CAST((PVOID)newval);
1009 return InterlockedCompareExchangePointer(pptr, pnew, pold);
1010
1011#elif defined(__sun) && defined(HAVE_ATOMIC_H)
1012 void *pold = RBIMPL_CAST((void *)oldval);
1013 void *pnew = RBIMPL_CAST((void *)newval);
1014 return atomic_cas_ptr(ptr, pold, pnew);
1015
1016
1017#else
1018 RBIMPL_STATIC_ASSERT(sizeof_voidp, sizeof *ptr == sizeof(size_t));
1019
1020 const size_t snew = RBIMPL_CAST((size_t)newval);
1021 const size_t sold = RBIMPL_CAST((size_t)oldval);
1022 volatile size_t *const sptr = RBIMPL_CAST((volatile size_t *)ptr);
1023 const size_t sret = rbimpl_atomic_size_cas(sptr, sold, snew);
1024 return RBIMPL_CAST((void *)sret);
1025
1026#endif
1027}
1028
1032static inline void *
1033rbimpl_atomic_ptr_load(void **ptr)
1034{
1035#if 0
1036
1037#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
1038 return __atomic_load_n(ptr, __ATOMIC_SEQ_CST);
1039#else
1040 void *val = *ptr;
1041 return rbimpl_atomic_ptr_cas(ptr, val, val);
1042#endif
1043}
1044
1048static inline VALUE
1049rbimpl_atomic_value_cas(volatile VALUE *ptr, VALUE oldval, VALUE newval)
1050{
1051 RBIMPL_STATIC_ASSERT(sizeof_value, sizeof *ptr == sizeof(size_t));
1052
1053 const size_t snew = RBIMPL_CAST((size_t)newval);
1054 const size_t sold = RBIMPL_CAST((size_t)oldval);
1055 volatile size_t *const sptr = RBIMPL_CAST((volatile size_t *)ptr);
1056 const size_t sret = rbimpl_atomic_size_cas(sptr, sold, snew);
1057 return RBIMPL_CAST((VALUE)sret);
1058}
1060#endif /* RUBY_ATOMIC_H */
Defines RBIMPL_ATTR_ARTIFICIAL.
#define RBIMPL_ATTR_ARTIFICIAL()
Wraps (or simulates) __attribute__((artificial))
Definition artificial.h:43
#define RBIMPL_ASSERT_OR_ASSUME(...)
This is either RUBY_ASSERT or RBIMPL_ASSUME, depending on RUBY_DEBUG.
Definition assert.h:311
Atomic operations.
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
Defines RBIMPL_COMPILER_SINCE.
Defines RBIMPL_STATIC_ASSERT.
#define RBIMPL_STATIC_ASSERT
Wraps (or simulates) static_assert
Defines RBIMPL_ATTR_NOALIAS.
#define RBIMPL_ATTR_NOALIAS()
Wraps (or simulates) __declspec((noalias))
Definition noalias.h:66
Defines RBIMPL_ATTR_NONNULL.
#define RBIMPL_ATTR_NONNULL(list)
Wraps (or simulates) __attribute__((nonnull))
Definition nonnull.h:30
#define inline
Old Visual Studio versions do not support the inline keyword, so we need to define it to be __inline.
Definition defines.h:91
C99 shim for <stdbool.h>
Defines VALUE and ID.
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40