Ruby 3.5.0dev (2025-04-25 revision 62a7f17157c5c67956d95a2582f8f256df13f9e2)
atomic.h
Go to the documentation of this file.
1#ifndef RUBY_ATOMIC_H /*-*-C++-*-vi:se ft=cpp:*/
2#define RUBY_ATOMIC_H
27#include "ruby/internal/config.h"
28
29#ifdef STDC_HEADERS
30# include <stddef.h> /* size_t */
31#endif
32
33#ifdef HAVE_SYS_TYPES_H
34# include <sys/types.h> /* ssize_t */
35#endif
36
37#if RBIMPL_COMPILER_SINCE(MSVC, 13, 0, 0)
38# pragma intrinsic(_InterlockedOr)
39#elif defined(__sun) && defined(HAVE_ATOMIC_H)
40# include <atomic.h>
41#endif
42
43#include "ruby/assert.h"
44#include "ruby/backward/2/limits.h"
49#include "ruby/internal/cast.h"
50#include "ruby/internal/value.h"
53
54/*
55 * Asserts that your environment supports more than one atomic types. These
56 * days systems tend to have such property (C11 was a standard of decades ago,
57 * right?) but we still support older ones.
58 */
59#if defined(__DOXYGEN__) || defined(HAVE_GCC_ATOMIC_BUILTINS) || defined(HAVE_GCC_SYNC_BUILTINS)
60# define RUBY_ATOMIC_GENERIC_MACRO 1
61#endif
62
68#if defined(__DOXYGEN__)
69using rb_atomic_t = std::atomic<unsigned>;
70#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
71typedef unsigned int rb_atomic_t;
72#elif defined(HAVE_GCC_SYNC_BUILTINS)
73typedef unsigned int rb_atomic_t;
74#elif defined(_WIN32)
75# include <winsock2.h> // to prevent macro redefinitions
76# include <windows.h> // for `LONG` and `Interlocked` functions
77typedef LONG rb_atomic_t;
78#elif defined(__sun) && defined(HAVE_ATOMIC_H)
79typedef unsigned int rb_atomic_t;
80#else
81# error No atomic operation found
82#endif
83
93#define RUBY_ATOMIC_FETCH_ADD(var, val) rbimpl_atomic_fetch_add(&(var), (val))
94
104#define RUBY_ATOMIC_FETCH_SUB(var, val) rbimpl_atomic_fetch_sub(&(var), (val))
105
116#define RUBY_ATOMIC_OR(var, val) rbimpl_atomic_or(&(var), (val))
117
127#define RUBY_ATOMIC_EXCHANGE(var, val) rbimpl_atomic_exchange(&(var), (val))
128
140#define RUBY_ATOMIC_CAS(var, oldval, newval) \
141 rbimpl_atomic_cas(&(var), (oldval), (newval))
142
150#define RUBY_ATOMIC_LOAD(var) rbimpl_atomic_load(&(var))
151
160#define RUBY_ATOMIC_SET(var, val) rbimpl_atomic_set(&(var), (val))
161
170#define RUBY_ATOMIC_ADD(var, val) rbimpl_atomic_add(&(var), (val))
171
180#define RUBY_ATOMIC_SUB(var, val) rbimpl_atomic_sub(&(var), (val))
181
189#define RUBY_ATOMIC_INC(var) rbimpl_atomic_inc(&(var))
190
198#define RUBY_ATOMIC_DEC(var) rbimpl_atomic_dec(&(var))
199
209#define RUBY_ATOMIC_SIZE_INC(var) rbimpl_atomic_size_inc(&(var))
210
220#define RUBY_ATOMIC_SIZE_DEC(var) rbimpl_atomic_size_dec(&(var))
221
233#define RUBY_ATOMIC_SIZE_EXCHANGE(var, val) \
234 rbimpl_atomic_size_exchange(&(var), (val))
235
247#define RUBY_ATOMIC_SIZE_CAS(var, oldval, newval) \
248 rbimpl_atomic_size_cas(&(var), (oldval), (newval))
249
260#define RUBY_ATOMIC_SIZE_ADD(var, val) rbimpl_atomic_size_add(&(var), (val))
261
272#define RUBY_ATOMIC_SIZE_SUB(var, val) rbimpl_atomic_size_sub(&(var), (val))
273
290#define RUBY_ATOMIC_PTR_EXCHANGE(var, val) \
291 RBIMPL_CAST(rbimpl_atomic_ptr_exchange((void **)&(var), (void *)val))
292
301#define RUBY_ATOMIC_PTR_LOAD(var) \
302 RBIMPL_CAST(rbimpl_atomic_ptr_load((void **)&var))
303
315#define RUBY_ATOMIC_PTR_CAS(var, oldval, newval) \
316 RBIMPL_CAST(rbimpl_atomic_ptr_cas((void **)&(var), (void *)(oldval), (void *)(newval)))
317
328#define RUBY_ATOMIC_VALUE_SET(var, val) \
329 rbimpl_atomic_value_set(&(var), (val))
330
342#define RUBY_ATOMIC_VALUE_EXCHANGE(var, val) \
343 rbimpl_atomic_value_exchange(&(var), (val))
344
356#define RUBY_ATOMIC_VALUE_CAS(var, oldval, newval) \
357 rbimpl_atomic_value_cas(&(var), (oldval), (newval))
358
363static inline rb_atomic_t
364rbimpl_atomic_fetch_add(volatile rb_atomic_t *ptr, rb_atomic_t val)
365{
366#if 0
367
368#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
369 return __atomic_fetch_add(ptr, val, __ATOMIC_SEQ_CST);
370
371#elif defined(HAVE_GCC_SYNC_BUILTINS)
372 return __sync_fetch_and_add(ptr, val);
373
374#elif defined(_WIN32)
375 return InterlockedExchangeAdd(ptr, val);
376
377#elif defined(__sun) && defined(HAVE_ATOMIC_H)
378 /*
379 * `atomic_add_int_nv` takes its second argument as `int`! Meanwhile our
380 * `rb_atomic_t` is unsigned. We cannot pass `val` as-is. We have to
381 * manually check integer overflow.
382 */
383 RBIMPL_ASSERT_OR_ASSUME(val <= INT_MAX);
384 return atomic_add_int_nv(ptr, val) - val;
385
386#else
387# error Unsupported platform.
388#endif
389}
390
394static inline void
395rbimpl_atomic_add(volatile rb_atomic_t *ptr, rb_atomic_t val)
396{
397#if 0
398
399#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
400 /*
401 * GCC on amd64 is smart enough to detect this `__atomic_add_fetch`'s
402 * return value is not used, then compiles it into single `LOCK ADD`
403 * instruction.
404 */
405 __atomic_add_fetch(ptr, val, __ATOMIC_SEQ_CST);
406
407#elif defined(HAVE_GCC_SYNC_BUILTINS)
408 __sync_add_and_fetch(ptr, val);
409
410#elif defined(_WIN32)
411 /*
412 * `InterlockedExchangeAdd` is `LOCK XADD`. It seems there also is
413 * `_InterlockedAdd` intrinsic in ARM Windows but not for x86? Sticking to
414 * `InterlockedExchangeAdd` for better portability.
415 */
416 InterlockedExchangeAdd(ptr, val);
417
418#elif defined(__sun) && defined(HAVE_ATOMIC_H)
419 /* Ditto for `atomic_add_int_nv`. */
420 RBIMPL_ASSERT_OR_ASSUME(val <= INT_MAX);
421 atomic_add_int(ptr, val);
422
423#else
424# error Unsupported platform.
425#endif
426}
427
431static inline void
432rbimpl_atomic_size_add(volatile size_t *ptr, size_t val)
433{
434#if 0
435
436#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
437 __atomic_add_fetch(ptr, val, __ATOMIC_SEQ_CST);
438
439#elif defined(HAVE_GCC_SYNC_BUILTINS)
440 __sync_add_and_fetch(ptr, val);
441
442#elif defined(_WIN64)
443 /* Ditto for `InterlockeExchangedAdd`. */
444 InterlockedExchangeAdd64(ptr, val);
445
446#elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
447 /* Ditto for `atomic_add_int_nv`. */
448 RBIMPL_ASSERT_OR_ASSUME(val <= LONG_MAX);
449 atomic_add_long(ptr, val);
450
451#else
452 RBIMPL_STATIC_ASSERT(size_of_rb_atomic_t, sizeof *ptr == sizeof(rb_atomic_t));
453
454 volatile rb_atomic_t *const tmp = RBIMPL_CAST((volatile rb_atomic_t *)ptr);
455 rbimpl_atomic_add(tmp, val);
456
457#endif
458}
459
463static inline void
464rbimpl_atomic_inc(volatile rb_atomic_t *ptr)
465{
466#if 0
467
468#elif defined(HAVE_GCC_ATOMIC_BUILTINS) || defined(HAVE_GCC_SYNC_BUILTINS)
469 rbimpl_atomic_add(ptr, 1);
470
471#elif defined(_WIN32)
472 InterlockedIncrement(ptr);
473
474#elif defined(__sun) && defined(HAVE_ATOMIC_H)
475 atomic_inc_uint(ptr);
476
477#else
478 rbimpl_atomic_add(ptr, 1);
479
480#endif
481}
482
486static inline void
487rbimpl_atomic_size_inc(volatile size_t *ptr)
488{
489#if 0
490
491#elif defined(HAVE_GCC_ATOMIC_BUILTINS) || defined(HAVE_GCC_SYNC_BUILTINS)
492 rbimpl_atomic_size_add(ptr, 1);
493
494#elif defined(_WIN64)
495 InterlockedIncrement64(ptr);
496
497#elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
498 atomic_inc_ulong(ptr);
499
500#else
501 RBIMPL_STATIC_ASSERT(size_of_size_t, sizeof *ptr == sizeof(rb_atomic_t));
502
503 rbimpl_atomic_size_add(ptr, 1);
504
505#endif
506}
507
511static inline rb_atomic_t
512rbimpl_atomic_fetch_sub(volatile rb_atomic_t *ptr, rb_atomic_t val)
513{
514#if 0
515
516#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
517 return __atomic_fetch_sub(ptr, val, __ATOMIC_SEQ_CST);
518
519#elif defined(HAVE_GCC_SYNC_BUILTINS)
520 return __sync_fetch_and_sub(ptr, val);
521
522#elif defined(_WIN32)
523 /* rb_atomic_t is signed here! Safe to do `-val`. */
524 return InterlockedExchangeAdd(ptr, -val);
525
526#elif defined(__sun) && defined(HAVE_ATOMIC_H)
527 /* Ditto for `rbimpl_atomic_fetch_add`. */
528 const signed neg = -1;
529 RBIMPL_ASSERT_OR_ASSUME(val <= INT_MAX);
530 return atomic_add_int_nv(ptr, neg * val) + val;
531
532#else
533# error Unsupported platform.
534#endif
535}
536
540static inline void
541rbimpl_atomic_sub(volatile rb_atomic_t *ptr, rb_atomic_t val)
542{
543#if 0
544
545#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
546 __atomic_sub_fetch(ptr, val, __ATOMIC_SEQ_CST);
547
548#elif defined(HAVE_GCC_SYNC_BUILTINS)
549 __sync_sub_and_fetch(ptr, val);
550
551#elif defined(_WIN32)
552 InterlockedExchangeAdd(ptr, -val);
553
554#elif defined(__sun) && defined(HAVE_ATOMIC_H)
555 const signed neg = -1;
556 RBIMPL_ASSERT_OR_ASSUME(val <= INT_MAX);
557 atomic_add_int(ptr, neg * val);
558
559#else
560# error Unsupported platform.
561#endif
562}
563
567static inline void
568rbimpl_atomic_size_sub(volatile size_t *ptr, size_t val)
569{
570#if 0
571
572#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
573 __atomic_sub_fetch(ptr, val, __ATOMIC_SEQ_CST);
574
575#elif defined(HAVE_GCC_SYNC_BUILTINS)
576 __sync_sub_and_fetch(ptr, val);
577
578#elif defined(_WIN64)
579 const ssize_t neg = -1;
580 InterlockedExchangeAdd64(ptr, neg * val);
581
582#elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
583 const signed neg = -1;
584 RBIMPL_ASSERT_OR_ASSUME(val <= LONG_MAX);
585 atomic_add_long(ptr, neg * val);
586
587#else
588 RBIMPL_STATIC_ASSERT(size_of_rb_atomic_t, sizeof *ptr == sizeof(rb_atomic_t));
589
590 volatile rb_atomic_t *const tmp = RBIMPL_CAST((volatile rb_atomic_t *)ptr);
591 rbimpl_atomic_sub(tmp, val);
592
593#endif
594}
595
599static inline void
600rbimpl_atomic_dec(volatile rb_atomic_t *ptr)
601{
602#if 0
603
604#elif defined(HAVE_GCC_ATOMIC_BUILTINS) || defined(HAVE_GCC_SYNC_BUILTINS)
605 rbimpl_atomic_sub(ptr, 1);
606
607#elif defined(_WIN32)
608 InterlockedDecrement(ptr);
609
610#elif defined(__sun) && defined(HAVE_ATOMIC_H)
611 atomic_dec_uint(ptr);
612
613#else
614 rbimpl_atomic_sub(ptr, 1);
615
616#endif
617}
618
622static inline void
623rbimpl_atomic_size_dec(volatile size_t *ptr)
624{
625#if 0
626
627#elif defined(HAVE_GCC_ATOMIC_BUILTINS) || defined(HAVE_GCC_SYNC_BUILTINS)
628 rbimpl_atomic_size_sub(ptr, 1);
629
630#elif defined(_WIN64)
631 InterlockedDecrement64(ptr);
632
633#elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
634 atomic_dec_ulong(ptr);
635
636#else
637 RBIMPL_STATIC_ASSERT(size_of_size_t, sizeof *ptr == sizeof(rb_atomic_t));
638
639 rbimpl_atomic_size_sub(ptr, 1);
640
641#endif
642}
643
647static inline void
648rbimpl_atomic_or(volatile rb_atomic_t *ptr, rb_atomic_t val)
649{
650#if 0
651
652#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
653 __atomic_or_fetch(ptr, val, __ATOMIC_SEQ_CST);
654
655#elif defined(HAVE_GCC_SYNC_BUILTINS)
656 __sync_or_and_fetch(ptr, val);
657
658#elif RBIMPL_COMPILER_SINCE(MSVC, 13, 0, 0)
659 _InterlockedOr(ptr, val);
660
661#elif defined(_WIN32) && defined(__GNUC__)
662 /* This was for old MinGW. Maybe not needed any longer? */
663 __asm__(
664 "lock\n\t"
665 "orl\t%1, %0"
666 : "=m"(ptr)
667 : "Ir"(val));
668
669#elif defined(_WIN32) && defined(_M_IX86)
670 __asm mov eax, ptr;
671 __asm mov ecx, val;
672 __asm lock or [eax], ecx;
673
674#elif defined(__sun) && defined(HAVE_ATOMIC_H)
675 atomic_or_uint(ptr, val);
676
677#else
678# error Unsupported platform.
679#endif
680}
681
682/* Nobody uses this but for theoretical backwards compatibility... */
683#if RBIMPL_COMPILER_BEFORE(MSVC, 13, 0, 0)
684static inline rb_atomic_t
685rb_w32_atomic_or(volatile rb_atomic_t *var, rb_atomic_t val)
686{
687 return rbimpl_atomic_or(var, val);
688}
689#endif
690
694static inline rb_atomic_t
695rbimpl_atomic_exchange(volatile rb_atomic_t *ptr, rb_atomic_t val)
696{
697#if 0
698
699#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
700 return __atomic_exchange_n(ptr, val, __ATOMIC_SEQ_CST);
701
702#elif defined(HAVE_GCC_SYNC_BUILTINS)
703 return __sync_lock_test_and_set(ptr, val);
704
705#elif defined(_WIN32)
706 return InterlockedExchange(ptr, val);
707
708#elif defined(__sun) && defined(HAVE_ATOMIC_H)
709 return atomic_swap_uint(ptr, val);
710
711#else
712# error Unsupported platform.
713#endif
714}
715
719static inline size_t
720rbimpl_atomic_size_exchange(volatile size_t *ptr, size_t val)
721{
722#if 0
723
724#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
725 return __atomic_exchange_n(ptr, val, __ATOMIC_SEQ_CST);
726
727#elif defined(HAVE_GCC_SYNC_BUILTINS)
728 return __sync_lock_test_and_set(ptr, val);
729
730#elif defined(_WIN64)
731 return InterlockedExchange64(ptr, val);
732
733#elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
734 return atomic_swap_ulong(ptr, val);
735
736#else
737 RBIMPL_STATIC_ASSERT(size_of_size_t, sizeof *ptr == sizeof(rb_atomic_t));
738
739 volatile rb_atomic_t *const tmp = RBIMPL_CAST((volatile rb_atomic_t *)ptr);
740 const rb_atomic_t ret = rbimpl_atomic_exchange(tmp, val);
741 return RBIMPL_CAST((size_t)ret);
742
743#endif
744}
745
749static inline void
750rbimpl_atomic_size_set(volatile size_t *ptr, size_t val)
751{
752#if 0
753
754#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
755 __atomic_store_n(ptr, val, __ATOMIC_SEQ_CST);
756
757#else
758 rbimpl_atomic_size_exchange(ptr, val);
759
760#endif
761}
762
766static inline void *
767rbimpl_atomic_ptr_exchange(void *volatile *ptr, const void *val)
768{
769#if 0
770
771#elif defined(InterlockedExchangePointer)
772 /* const_cast */
773 PVOID *pptr = RBIMPL_CAST((PVOID *)ptr);
774 PVOID pval = RBIMPL_CAST((PVOID)val);
775 return InterlockedExchangePointer(pptr, pval);
776
777#elif defined(__sun) && defined(HAVE_ATOMIC_H)
778 return atomic_swap_ptr(ptr, RBIMPL_CAST((void *)val));
779
780#else
781 RBIMPL_STATIC_ASSERT(sizeof_voidp, sizeof *ptr == sizeof(size_t));
782
783 const size_t sval = RBIMPL_CAST((size_t)val);
784 volatile size_t *const sptr = RBIMPL_CAST((volatile size_t *)ptr);
785 const size_t sret = rbimpl_atomic_size_exchange(sptr, sval);
786 return RBIMPL_CAST((void *)sret);
787
788#endif
789}
790
794static inline VALUE
795rbimpl_atomic_value_exchange(volatile VALUE *ptr, VALUE val)
796{
797 RBIMPL_STATIC_ASSERT(sizeof_value, sizeof *ptr == sizeof(size_t));
798
799 const size_t sval = RBIMPL_CAST((size_t)val);
800 volatile size_t *const sptr = RBIMPL_CAST((volatile size_t *)ptr);
801 const size_t sret = rbimpl_atomic_size_exchange(sptr, sval);
802 return RBIMPL_CAST((VALUE)sret);
803}
804
808static inline void
809rbimpl_atomic_value_set(volatile VALUE *ptr, VALUE val)
810{
811 RBIMPL_STATIC_ASSERT(sizeof_value, sizeof *ptr == sizeof(size_t));
812
813 const size_t sval = RBIMPL_CAST((size_t)val);
814 volatile size_t *const sptr = RBIMPL_CAST((volatile size_t *)ptr);
815 rbimpl_atomic_size_set(sptr, sval);
816}
817
821static inline rb_atomic_t
822rbimpl_atomic_load(volatile rb_atomic_t *ptr)
823{
824#if 0
825
826#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
827 return __atomic_load_n(ptr, __ATOMIC_SEQ_CST);
828#else
829 return rbimpl_atomic_fetch_add(ptr, 0);
830#endif
831}
832
836static inline void
837rbimpl_atomic_set(volatile rb_atomic_t *ptr, rb_atomic_t val)
838{
839#if 0
840
841#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
842 __atomic_store_n(ptr, val, __ATOMIC_SEQ_CST);
843
844#else
845 /* Maybe std::atomic<rb_atomic_t>::store can be faster? */
846 rbimpl_atomic_exchange(ptr, val);
847
848#endif
849}
850
854static inline rb_atomic_t
855rbimpl_atomic_cas(volatile rb_atomic_t *ptr, rb_atomic_t oldval, rb_atomic_t newval)
856{
857#if 0
858
859#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
860 __atomic_compare_exchange_n(
861 ptr, &oldval, newval, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
862 return oldval;
863
864#elif defined(HAVE_GCC_SYNC_BUILTINS)
865 return __sync_val_compare_and_swap(ptr, oldval, newval);
866
867#elif RBIMPL_COMPILER_SINCE(MSVC, 13, 0, 0)
868 return InterlockedCompareExchange(ptr, newval, oldval);
869
870#elif defined(_WIN32)
871 PVOID *pptr = RBIMPL_CAST((PVOID *)ptr);
872 PVOID pold = RBIMPL_CAST((PVOID)oldval);
873 PVOID pnew = RBIMPL_CAST((PVOID)newval);
874 PVOID pret = InterlockedCompareExchange(pptr, pnew, pold);
875 return RBIMPL_CAST((rb_atomic_t)pret);
876
877#elif defined(__sun) && defined(HAVE_ATOMIC_H)
878 return atomic_cas_uint(ptr, oldval, newval);
879
880#else
881# error Unsupported platform.
882#endif
883}
884
885/* Nobody uses this but for theoretical backwards compatibility... */
886#if RBIMPL_COMPILER_BEFORE(MSVC, 13, 0, 0)
887static inline rb_atomic_t
888rb_w32_atomic_cas(volatile rb_atomic_t *var, rb_atomic_t oldval, rb_atomic_t newval)
889{
890 return rbimpl_atomic_cas(var, oldval, newval);
891}
892#endif
893
897static inline size_t
898rbimpl_atomic_size_cas(volatile size_t *ptr, size_t oldval, size_t newval)
899{
900#if 0
901
902#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
903 __atomic_compare_exchange_n(
904 ptr, &oldval, newval, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
905 return oldval;
906
907#elif defined(HAVE_GCC_SYNC_BUILTINS)
908 return __sync_val_compare_and_swap(ptr, oldval, newval);
909
910#elif defined(_WIN64)
911 return InterlockedCompareExchange64(ptr, newval, oldval);
912
913#elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
914 return atomic_cas_ulong(ptr, oldval, newval);
915
916#else
917 RBIMPL_STATIC_ASSERT(size_of_size_t, sizeof *ptr == sizeof(rb_atomic_t));
918
919 volatile rb_atomic_t *tmp = RBIMPL_CAST((volatile rb_atomic_t *)ptr);
920 return rbimpl_atomic_cas(tmp, oldval, newval);
921
922#endif
923}
924
928static inline void *
929rbimpl_atomic_ptr_cas(void **ptr, const void *oldval, const void *newval)
930{
931#if 0
932
933#elif defined(InterlockedExchangePointer)
934 /* ... Can we say that InterlockedCompareExchangePtr surly exists when
935 * InterlockedExchangePointer is defined? Seems so but...?*/
936 PVOID *pptr = RBIMPL_CAST((PVOID *)ptr);
937 PVOID pold = RBIMPL_CAST((PVOID)oldval);
938 PVOID pnew = RBIMPL_CAST((PVOID)newval);
939 return InterlockedCompareExchangePointer(pptr, pnew, pold);
940
941#elif defined(__sun) && defined(HAVE_ATOMIC_H)
942 void *pold = RBIMPL_CAST((void *)oldval);
943 void *pnew = RBIMPL_CAST((void *)newval);
944 return atomic_cas_ptr(ptr, pold, pnew);
945
946
947#else
948 RBIMPL_STATIC_ASSERT(sizeof_voidp, sizeof *ptr == sizeof(size_t));
949
950 const size_t snew = RBIMPL_CAST((size_t)newval);
951 const size_t sold = RBIMPL_CAST((size_t)oldval);
952 volatile size_t *const sptr = RBIMPL_CAST((volatile size_t *)ptr);
953 const size_t sret = rbimpl_atomic_size_cas(sptr, sold, snew);
954 return RBIMPL_CAST((void *)sret);
955
956#endif
957}
958
962static inline void *
963rbimpl_atomic_ptr_load(void **ptr)
964{
965#if 0
966
967#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
968 return __atomic_load_n(ptr, __ATOMIC_SEQ_CST);
969#else
970 void *val = *ptr;
971 return rbimpl_atomic_ptr_cas(ptr, val, val);
972#endif
973}
974
978static inline VALUE
979rbimpl_atomic_value_cas(volatile VALUE *ptr, VALUE oldval, VALUE newval)
980{
981 RBIMPL_STATIC_ASSERT(sizeof_value, sizeof *ptr == sizeof(size_t));
982
983 const size_t snew = RBIMPL_CAST((size_t)newval);
984 const size_t sold = RBIMPL_CAST((size_t)oldval);
985 volatile size_t *const sptr = RBIMPL_CAST((volatile size_t *)ptr);
986 const size_t sret = rbimpl_atomic_size_cas(sptr, sold, snew);
987 return RBIMPL_CAST((VALUE)sret);
988}
990#endif /* RUBY_ATOMIC_H */
Defines RBIMPL_ATTR_ARTIFICIAL.
#define RBIMPL_ATTR_ARTIFICIAL()
Wraps (or simulates) __attribute__((artificial))
Definition artificial.h:43
#define RBIMPL_ASSERT_OR_ASSUME(...)
This is either RUBY_ASSERT or RBIMPL_ASSUME, depending on RUBY_DEBUG.
Definition assert.h:311
Atomic operations.
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
Defines RBIMPL_COMPILER_SINCE.
Defines RBIMPL_STATIC_ASSERT.
#define RBIMPL_STATIC_ASSERT
Wraps (or simulates) static_assert
Defines RBIMPL_ATTR_NOALIAS.
#define RBIMPL_ATTR_NOALIAS()
Wraps (or simulates) __declspec((noalias))
Definition noalias.h:66
Defines RBIMPL_ATTR_NONNULL.
#define RBIMPL_ATTR_NONNULL(list)
Wraps (or simulates) __attribute__((nonnull))
Definition nonnull.h:30
#define inline
Old Visual Studio versions do not support the inline keyword, so we need to define it to be __inline.
Definition defines.h:91
C99 shim for <stdbool.h>
Defines VALUE and ID.
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40