Ruby  3.4.0dev (2024-11-05 revision 348a53415339076afc4a02fcd09f3ae36e9c4c61)
atomic.h
Go to the documentation of this file.
1 #ifndef RUBY_ATOMIC_H /*-*-C++-*-vi:se ft=cpp:*/
2 #define RUBY_ATOMIC_H
27 #include "ruby/internal/config.h"
28 
29 #ifdef STDC_HEADERS
30 # include <stddef.h> /* size_t */
31 #endif
32 
33 #ifdef HAVE_SYS_TYPES_H
34 # include <sys/types.h> /* ssize_t */
35 #endif
36 
37 #if RBIMPL_COMPILER_SINCE(MSVC, 13, 0, 0)
38 # pragma intrinsic(_InterlockedOr)
39 #elif defined(__sun) && defined(HAVE_ATOMIC_H)
40 # include <atomic.h>
41 #endif
42 
43 #include "ruby/assert.h"
44 #include "ruby/backward/2/limits.h"
49 #include "ruby/internal/cast.h"
50 #include "ruby/internal/value.h"
52 #include "ruby/internal/stdbool.h"
53 
54 /*
55  * Asserts that your environment supports more than one atomic types. These
56  * days systems tend to have such property (C11 was a standard of decades ago,
57  * right?) but we still support older ones.
58  */
59 #if defined(__DOXYGEN__) || defined(HAVE_GCC_ATOMIC_BUILTINS) || defined(HAVE_GCC_SYNC_BUILTINS)
60 # define RUBY_ATOMIC_GENERIC_MACRO 1
61 #endif
62 
68 #if defined(__DOXYGEN__)
69 using rb_atomic_t = std::atomic<unsigned>;
70 #elif defined(HAVE_GCC_ATOMIC_BUILTINS)
71 typedef unsigned int rb_atomic_t;
72 #elif defined(HAVE_GCC_SYNC_BUILTINS)
73 typedef unsigned int rb_atomic_t;
74 #elif defined(_WIN32)
75 # include <winsock2.h> // to prevent macro redefinitions
76 # include <windows.h> // for `LONG` and `Interlocked` functions
77 typedef LONG rb_atomic_t;
78 #elif defined(__sun) && defined(HAVE_ATOMIC_H)
79 typedef unsigned int rb_atomic_t;
80 #else
81 # error No atomic operation found
82 #endif
83 
93 #define RUBY_ATOMIC_FETCH_ADD(var, val) rbimpl_atomic_fetch_add(&(var), (val))
94 
104 #define RUBY_ATOMIC_FETCH_SUB(var, val) rbimpl_atomic_fetch_sub(&(var), (val))
105 
116 #define RUBY_ATOMIC_OR(var, val) rbimpl_atomic_or(&(var), (val))
117 
127 #define RUBY_ATOMIC_EXCHANGE(var, val) rbimpl_atomic_exchange(&(var), (val))
128 
140 #define RUBY_ATOMIC_CAS(var, oldval, newval) \
141  rbimpl_atomic_cas(&(var), (oldval), (newval))
142 
150 #define RUBY_ATOMIC_LOAD(var) rbimpl_atomic_load(&(var))
151 
160 #define RUBY_ATOMIC_SET(var, val) rbimpl_atomic_set(&(var), (val))
161 
170 #define RUBY_ATOMIC_ADD(var, val) rbimpl_atomic_add(&(var), (val))
171 
180 #define RUBY_ATOMIC_SUB(var, val) rbimpl_atomic_sub(&(var), (val))
181 
189 #define RUBY_ATOMIC_INC(var) rbimpl_atomic_inc(&(var))
190 
198 #define RUBY_ATOMIC_DEC(var) rbimpl_atomic_dec(&(var))
199 
209 #define RUBY_ATOMIC_SIZE_INC(var) rbimpl_atomic_size_inc(&(var))
210 
220 #define RUBY_ATOMIC_SIZE_DEC(var) rbimpl_atomic_size_dec(&(var))
221 
233 #define RUBY_ATOMIC_SIZE_EXCHANGE(var, val) \
234  rbimpl_atomic_size_exchange(&(var), (val))
235 
247 #define RUBY_ATOMIC_SIZE_CAS(var, oldval, newval) \
248  rbimpl_atomic_size_cas(&(var), (oldval), (newval))
249 
260 #define RUBY_ATOMIC_SIZE_ADD(var, val) rbimpl_atomic_size_add(&(var), (val))
261 
272 #define RUBY_ATOMIC_SIZE_SUB(var, val) rbimpl_atomic_size_sub(&(var), (val))
273 
290 #define RUBY_ATOMIC_PTR_EXCHANGE(var, val) \
291  RBIMPL_CAST(rbimpl_atomic_ptr_exchange((void **)&(var), (void *)val))
292 
301 #define RUBY_ATOMIC_PTR_LOAD(var) \
302  RBIMPL_CAST(rbimpl_atomic_ptr_load((void **)&var))
303 
315 #define RUBY_ATOMIC_PTR_CAS(var, oldval, newval) \
316  RBIMPL_CAST(rbimpl_atomic_ptr_cas((void **)&(var), (void *)(oldval), (void *)(newval)))
317 
329 #define RUBY_ATOMIC_VALUE_EXCHANGE(var, val) \
330  rbimpl_atomic_value_exchange(&(var), (val))
331 
343 #define RUBY_ATOMIC_VALUE_CAS(var, oldval, newval) \
344  rbimpl_atomic_value_cas(&(var), (oldval), (newval))
345 
350 static inline rb_atomic_t
351 rbimpl_atomic_fetch_add(volatile rb_atomic_t *ptr, rb_atomic_t val)
352 {
353 #if 0
354 
355 #elif defined(HAVE_GCC_ATOMIC_BUILTINS)
356  return __atomic_fetch_add(ptr, val, __ATOMIC_SEQ_CST);
357 
358 #elif defined(HAVE_GCC_SYNC_BUILTINS)
359  return __sync_fetch_and_add(ptr, val);
360 
361 #elif defined(_WIN32)
362  return InterlockedExchangeAdd(ptr, val);
363 
364 #elif defined(__sun) && defined(HAVE_ATOMIC_H)
365  /*
366  * `atomic_add_int_nv` takes its second argument as `int`! Meanwhile our
367  * `rb_atomic_t` is unsigned. We cannot pass `val` as-is. We have to
368  * manually check integer overflow.
369  */
370  RBIMPL_ASSERT_OR_ASSUME(val <= INT_MAX);
371  return atomic_add_int_nv(ptr, val) - val;
372 
373 #else
374 # error Unsupported platform.
375 #endif
376 }
377 
381 static inline void
382 rbimpl_atomic_add(volatile rb_atomic_t *ptr, rb_atomic_t val)
383 {
384 #if 0
385 
386 #elif defined(HAVE_GCC_ATOMIC_BUILTINS)
387  /*
388  * GCC on amd64 is smart enough to detect this `__atomic_add_fetch`'s
389  * return value is not used, then compiles it into single `LOCK ADD`
390  * instruction.
391  */
392  __atomic_add_fetch(ptr, val, __ATOMIC_SEQ_CST);
393 
394 #elif defined(HAVE_GCC_SYNC_BUILTINS)
395  __sync_add_and_fetch(ptr, val);
396 
397 #elif defined(_WIN32)
398  /*
399  * `InterlockedExchangeAdd` is `LOCK XADD`. It seems there also is
400  * `_InterlockedAdd` intrinsic in ARM Windows but not for x86? Sticking to
401  * `InterlockedExchangeAdd` for better portability.
402  */
403  InterlockedExchangeAdd(ptr, val);
404 
405 #elif defined(__sun) && defined(HAVE_ATOMIC_H)
406  /* Ditto for `atomic_add_int_nv`. */
407  RBIMPL_ASSERT_OR_ASSUME(val <= INT_MAX);
408  atomic_add_int(ptr, val);
409 
410 #else
411 # error Unsupported platform.
412 #endif
413 }
414 
418 static inline void
419 rbimpl_atomic_size_add(volatile size_t *ptr, size_t val)
420 {
421 #if 0
422 
423 #elif defined(HAVE_GCC_ATOMIC_BUILTINS)
424  __atomic_add_fetch(ptr, val, __ATOMIC_SEQ_CST);
425 
426 #elif defined(HAVE_GCC_SYNC_BUILTINS)
427  __sync_add_and_fetch(ptr, val);
428 
429 #elif defined(_WIN64)
430  /* Ditto for `InterlockeExchangedAdd`. */
431  InterlockedExchangeAdd64(ptr, val);
432 
433 #elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
434  /* Ditto for `atomic_add_int_nv`. */
435  RBIMPL_ASSERT_OR_ASSUME(val <= LONG_MAX);
436  atomic_add_long(ptr, val);
437 
438 #else
439  RBIMPL_STATIC_ASSERT(size_of_rb_atomic_t, sizeof *ptr == sizeof(rb_atomic_t));
440 
441  volatile rb_atomic_t *const tmp = RBIMPL_CAST((volatile rb_atomic_t *)ptr);
442  rbimpl_atomic_add(tmp, val);
443 
444 #endif
445 }
446 
450 static inline void
451 rbimpl_atomic_inc(volatile rb_atomic_t *ptr)
452 {
453 #if 0
454 
455 #elif defined(HAVE_GCC_ATOMIC_BUILTINS) || defined(HAVE_GCC_SYNC_BUILTINS)
456  rbimpl_atomic_add(ptr, 1);
457 
458 #elif defined(_WIN32)
459  InterlockedIncrement(ptr);
460 
461 #elif defined(__sun) && defined(HAVE_ATOMIC_H)
462  atomic_inc_uint(ptr);
463 
464 #else
465  rbimpl_atomic_add(ptr, 1);
466 
467 #endif
468 }
469 
473 static inline void
474 rbimpl_atomic_size_inc(volatile size_t *ptr)
475 {
476 #if 0
477 
478 #elif defined(HAVE_GCC_ATOMIC_BUILTINS) || defined(HAVE_GCC_SYNC_BUILTINS)
479  rbimpl_atomic_size_add(ptr, 1);
480 
481 #elif defined(_WIN64)
482  InterlockedIncrement64(ptr);
483 
484 #elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
485  atomic_inc_ulong(ptr);
486 
487 #else
488  RBIMPL_STATIC_ASSERT(size_of_size_t, sizeof *ptr == sizeof(rb_atomic_t));
489 
490  rbimpl_atomic_size_add(ptr, 1);
491 
492 #endif
493 }
494 
498 static inline rb_atomic_t
499 rbimpl_atomic_fetch_sub(volatile rb_atomic_t *ptr, rb_atomic_t val)
500 {
501 #if 0
502 
503 #elif defined(HAVE_GCC_ATOMIC_BUILTINS)
504  return __atomic_fetch_sub(ptr, val, __ATOMIC_SEQ_CST);
505 
506 #elif defined(HAVE_GCC_SYNC_BUILTINS)
507  return __sync_fetch_and_sub(ptr, val);
508 
509 #elif defined(_WIN32)
510  /* rb_atomic_t is signed here! Safe to do `-val`. */
511  return InterlockedExchangeAdd(ptr, -val);
512 
513 #elif defined(__sun) && defined(HAVE_ATOMIC_H)
514  /* Ditto for `rbimpl_atomic_fetch_add`. */
515  const signed neg = -1;
516  RBIMPL_ASSERT_OR_ASSUME(val <= INT_MAX);
517  return atomic_add_int_nv(ptr, neg * val) + val;
518 
519 #else
520 # error Unsupported platform.
521 #endif
522 }
523 
527 static inline void
528 rbimpl_atomic_sub(volatile rb_atomic_t *ptr, rb_atomic_t val)
529 {
530 #if 0
531 
532 #elif defined(HAVE_GCC_ATOMIC_BUILTINS)
533  __atomic_sub_fetch(ptr, val, __ATOMIC_SEQ_CST);
534 
535 #elif defined(HAVE_GCC_SYNC_BUILTINS)
536  __sync_sub_and_fetch(ptr, val);
537 
538 #elif defined(_WIN32)
539  InterlockedExchangeAdd(ptr, -val);
540 
541 #elif defined(__sun) && defined(HAVE_ATOMIC_H)
542  const signed neg = -1;
543  RBIMPL_ASSERT_OR_ASSUME(val <= INT_MAX);
544  atomic_add_int(ptr, neg * val);
545 
546 #else
547 # error Unsupported platform.
548 #endif
549 }
550 
554 static inline void
555 rbimpl_atomic_size_sub(volatile size_t *ptr, size_t val)
556 {
557 #if 0
558 
559 #elif defined(HAVE_GCC_ATOMIC_BUILTINS)
560  __atomic_sub_fetch(ptr, val, __ATOMIC_SEQ_CST);
561 
562 #elif defined(HAVE_GCC_SYNC_BUILTINS)
563  __sync_sub_and_fetch(ptr, val);
564 
565 #elif defined(_WIN64)
566  const ssize_t neg = -1;
567  InterlockedExchangeAdd64(ptr, neg * val);
568 
569 #elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
570  const signed neg = -1;
571  RBIMPL_ASSERT_OR_ASSUME(val <= LONG_MAX);
572  atomic_add_long(ptr, neg * val);
573 
574 #else
575  RBIMPL_STATIC_ASSERT(size_of_rb_atomic_t, sizeof *ptr == sizeof(rb_atomic_t));
576 
577  volatile rb_atomic_t *const tmp = RBIMPL_CAST((volatile rb_atomic_t *)ptr);
578  rbimpl_atomic_sub(tmp, val);
579 
580 #endif
581 }
582 
586 static inline void
587 rbimpl_atomic_dec(volatile rb_atomic_t *ptr)
588 {
589 #if 0
590 
591 #elif defined(HAVE_GCC_ATOMIC_BUILTINS) || defined(HAVE_GCC_SYNC_BUILTINS)
592  rbimpl_atomic_sub(ptr, 1);
593 
594 #elif defined(_WIN32)
595  InterlockedDecrement(ptr);
596 
597 #elif defined(__sun) && defined(HAVE_ATOMIC_H)
598  atomic_dec_uint(ptr);
599 
600 #else
601  rbimpl_atomic_sub(ptr, 1);
602 
603 #endif
604 }
605 
609 static inline void
610 rbimpl_atomic_size_dec(volatile size_t *ptr)
611 {
612 #if 0
613 
614 #elif defined(HAVE_GCC_ATOMIC_BUILTINS) || defined(HAVE_GCC_SYNC_BUILTINS)
615  rbimpl_atomic_size_sub(ptr, 1);
616 
617 #elif defined(_WIN64)
618  InterlockedDecrement64(ptr);
619 
620 #elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
621  atomic_dec_ulong(ptr);
622 
623 #else
624  RBIMPL_STATIC_ASSERT(size_of_size_t, sizeof *ptr == sizeof(rb_atomic_t));
625 
626  rbimpl_atomic_size_sub(ptr, 1);
627 
628 #endif
629 }
630 
634 static inline void
635 rbimpl_atomic_or(volatile rb_atomic_t *ptr, rb_atomic_t val)
636 {
637 #if 0
638 
639 #elif defined(HAVE_GCC_ATOMIC_BUILTINS)
640  __atomic_or_fetch(ptr, val, __ATOMIC_SEQ_CST);
641 
642 #elif defined(HAVE_GCC_SYNC_BUILTINS)
643  __sync_or_and_fetch(ptr, val);
644 
645 #elif RBIMPL_COMPILER_SINCE(MSVC, 13, 0, 0)
646  _InterlockedOr(ptr, val);
647 
648 #elif defined(_WIN32) && defined(__GNUC__)
649  /* This was for old MinGW. Maybe not needed any longer? */
650  __asm__(
651  "lock\n\t"
652  "orl\t%1, %0"
653  : "=m"(ptr)
654  : "Ir"(val));
655 
656 #elif defined(_WIN32) && defined(_M_IX86)
657  __asm mov eax, ptr;
658  __asm mov ecx, val;
659  __asm lock or [eax], ecx;
660 
661 #elif defined(__sun) && defined(HAVE_ATOMIC_H)
662  atomic_or_uint(ptr, val);
663 
664 #else
665 # error Unsupported platform.
666 #endif
667 }
668 
669 /* Nobody uses this but for theoretical backwards compatibility... */
670 #if RBIMPL_COMPILER_BEFORE(MSVC, 13, 0, 0)
671 static inline rb_atomic_t
672 rb_w32_atomic_or(volatile rb_atomic_t *var, rb_atomic_t val)
673 {
674  return rbimpl_atomic_or(var, val);
675 }
676 #endif
677 
681 static inline rb_atomic_t
682 rbimpl_atomic_exchange(volatile rb_atomic_t *ptr, rb_atomic_t val)
683 {
684 #if 0
685 
686 #elif defined(HAVE_GCC_ATOMIC_BUILTINS)
687  return __atomic_exchange_n(ptr, val, __ATOMIC_SEQ_CST);
688 
689 #elif defined(HAVE_GCC_SYNC_BUILTINS)
690  return __sync_lock_test_and_set(ptr, val);
691 
692 #elif defined(_WIN32)
693  return InterlockedExchange(ptr, val);
694 
695 #elif defined(__sun) && defined(HAVE_ATOMIC_H)
696  return atomic_swap_uint(ptr, val);
697 
698 #else
699 # error Unsupported platform.
700 #endif
701 }
702 
706 static inline size_t
707 rbimpl_atomic_size_exchange(volatile size_t *ptr, size_t val)
708 {
709 #if 0
710 
711 #elif defined(HAVE_GCC_ATOMIC_BUILTINS)
712  return __atomic_exchange_n(ptr, val, __ATOMIC_SEQ_CST);
713 
714 #elif defined(HAVE_GCC_SYNC_BUILTINS)
715  return __sync_lock_test_and_set(ptr, val);
716 
717 #elif defined(_WIN64)
718  return InterlockedExchange64(ptr, val);
719 
720 #elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
721  return atomic_swap_ulong(ptr, val);
722 
723 #else
724  RBIMPL_STATIC_ASSERT(size_of_size_t, sizeof *ptr == sizeof(rb_atomic_t));
725 
726  volatile rb_atomic_t *const tmp = RBIMPL_CAST((volatile rb_atomic_t *)ptr);
727  const rb_atomic_t ret = rbimpl_atomic_exchange(tmp, val);
728  return RBIMPL_CAST((size_t)ret);
729 
730 #endif
731 }
732 
736 static inline void *
737 rbimpl_atomic_ptr_exchange(void *volatile *ptr, const void *val)
738 {
739 #if 0
740 
741 #elif defined(InterlockedExchangePointer)
742  /* const_cast */
743  PVOID *pptr = RBIMPL_CAST((PVOID *)ptr);
744  PVOID pval = RBIMPL_CAST((PVOID)val);
745  return InterlockedExchangePointer(pptr, pval);
746 
747 #elif defined(__sun) && defined(HAVE_ATOMIC_H)
748  return atomic_swap_ptr(ptr, RBIMPL_CAST((void *)val));
749 
750 #else
751  RBIMPL_STATIC_ASSERT(sizeof_voidp, sizeof *ptr == sizeof(size_t));
752 
753  const size_t sval = RBIMPL_CAST((size_t)val);
754  volatile size_t *const sptr = RBIMPL_CAST((volatile size_t *)ptr);
755  const size_t sret = rbimpl_atomic_size_exchange(sptr, sval);
756  return RBIMPL_CAST((void *)sret);
757 
758 #endif
759 }
760 
764 static inline VALUE
765 rbimpl_atomic_value_exchange(volatile VALUE *ptr, VALUE val)
766 {
767  RBIMPL_STATIC_ASSERT(sizeof_value, sizeof *ptr == sizeof(size_t));
768 
769  const size_t sval = RBIMPL_CAST((size_t)val);
770  volatile size_t *const sptr = RBIMPL_CAST((volatile size_t *)ptr);
771  const size_t sret = rbimpl_atomic_size_exchange(sptr, sval);
772  return RBIMPL_CAST((VALUE)sret);
773 }
774 
778 static inline rb_atomic_t
779 rbimpl_atomic_load(volatile rb_atomic_t *ptr)
780 {
781 #if 0
782 
783 #elif defined(HAVE_GCC_ATOMIC_BUILTINS)
784  return __atomic_load_n(ptr, __ATOMIC_SEQ_CST);
785 #else
786  return rbimpl_atomic_fetch_add(ptr, 0);
787 #endif
788 }
789 
793 static inline void
794 rbimpl_atomic_set(volatile rb_atomic_t *ptr, rb_atomic_t val)
795 {
796 #if 0
797 
798 #elif defined(HAVE_GCC_ATOMIC_BUILTINS)
799  __atomic_store_n(ptr, val, __ATOMIC_SEQ_CST);
800 
801 #else
802  /* Maybe std::atomic<rb_atomic_t>::store can be faster? */
803  rbimpl_atomic_exchange(ptr, val);
804 
805 #endif
806 }
807 
811 static inline rb_atomic_t
812 rbimpl_atomic_cas(volatile rb_atomic_t *ptr, rb_atomic_t oldval, rb_atomic_t newval)
813 {
814 #if 0
815 
816 #elif defined(HAVE_GCC_ATOMIC_BUILTINS)
817  __atomic_compare_exchange_n(
818  ptr, &oldval, newval, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
819  return oldval;
820 
821 #elif defined(HAVE_GCC_SYNC_BUILTINS)
822  return __sync_val_compare_and_swap(ptr, oldval, newval);
823 
824 #elif RBIMPL_COMPILER_SINCE(MSVC, 13, 0, 0)
825  return InterlockedCompareExchange(ptr, newval, oldval);
826 
827 #elif defined(_WIN32)
828  PVOID *pptr = RBIMPL_CAST((PVOID *)ptr);
829  PVOID pold = RBIMPL_CAST((PVOID)oldval);
830  PVOID pnew = RBIMPL_CAST((PVOID)newval);
831  PVOID pret = InterlockedCompareExchange(pptr, pnew, pold);
832  return RBIMPL_CAST((rb_atomic_t)pret);
833 
834 #elif defined(__sun) && defined(HAVE_ATOMIC_H)
835  return atomic_cas_uint(ptr, oldval, newval);
836 
837 #else
838 # error Unsupported platform.
839 #endif
840 }
841 
842 /* Nobody uses this but for theoretical backwards compatibility... */
843 #if RBIMPL_COMPILER_BEFORE(MSVC, 13, 0, 0)
844 static inline rb_atomic_t
845 rb_w32_atomic_cas(volatile rb_atomic_t *var, rb_atomic_t oldval, rb_atomic_t newval)
846 {
847  return rbimpl_atomic_cas(var, oldval, newval);
848 }
849 #endif
850 
854 static inline size_t
855 rbimpl_atomic_size_cas(volatile size_t *ptr, size_t oldval, size_t newval)
856 {
857 #if 0
858 
859 #elif defined(HAVE_GCC_ATOMIC_BUILTINS)
860  __atomic_compare_exchange_n(
861  ptr, &oldval, newval, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
862  return oldval;
863 
864 #elif defined(HAVE_GCC_SYNC_BUILTINS)
865  return __sync_val_compare_and_swap(ptr, oldval, newval);
866 
867 #elif defined(_WIN64)
868  return InterlockedCompareExchange64(ptr, newval, oldval);
869 
870 #elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
871  return atomic_cas_ulong(ptr, oldval, newval);
872 
873 #else
874  RBIMPL_STATIC_ASSERT(size_of_size_t, sizeof *ptr == sizeof(rb_atomic_t));
875 
876  volatile rb_atomic_t *tmp = RBIMPL_CAST((volatile rb_atomic_t *)ptr);
877  return rbimpl_atomic_cas(tmp, oldval, newval);
878 
879 #endif
880 }
881 
885 static inline void *
886 rbimpl_atomic_ptr_cas(void **ptr, const void *oldval, const void *newval)
887 {
888 #if 0
889 
890 #elif defined(InterlockedExchangePointer)
891  /* ... Can we say that InterlockedCompareExchangePtr surly exists when
892  * InterlockedExchangePointer is defined? Seems so but...?*/
893  PVOID *pptr = RBIMPL_CAST((PVOID *)ptr);
894  PVOID pold = RBIMPL_CAST((PVOID)oldval);
895  PVOID pnew = RBIMPL_CAST((PVOID)newval);
896  return InterlockedCompareExchangePointer(pptr, pnew, pold);
897 
898 #elif defined(__sun) && defined(HAVE_ATOMIC_H)
899  void *pold = RBIMPL_CAST((void *)oldval);
900  void *pnew = RBIMPL_CAST((void *)newval);
901  return atomic_cas_ptr(ptr, pold, pnew);
902 
903 
904 #else
905  RBIMPL_STATIC_ASSERT(sizeof_voidp, sizeof *ptr == sizeof(size_t));
906 
907  const size_t snew = RBIMPL_CAST((size_t)newval);
908  const size_t sold = RBIMPL_CAST((size_t)oldval);
909  volatile size_t *const sptr = RBIMPL_CAST((volatile size_t *)ptr);
910  const size_t sret = rbimpl_atomic_size_cas(sptr, sold, snew);
911  return RBIMPL_CAST((void *)sret);
912 
913 #endif
914 }
915 
919 static inline void *
920 rbimpl_atomic_ptr_load(void **ptr)
921 {
922 #if 0
923 
924 #elif defined(HAVE_GCC_ATOMIC_BUILTINS)
925  return __atomic_load_n(ptr, __ATOMIC_SEQ_CST);
926 #else
927  void *val = *ptr;
928  return rbimpl_atomic_ptr_cas(ptr, val, val);
929 #endif
930 }
931 
935 static inline VALUE
936 rbimpl_atomic_value_cas(volatile VALUE *ptr, VALUE oldval, VALUE newval)
937 {
938  RBIMPL_STATIC_ASSERT(sizeof_value, sizeof *ptr == sizeof(size_t));
939 
940  const size_t snew = RBIMPL_CAST((size_t)newval);
941  const size_t sold = RBIMPL_CAST((size_t)oldval);
942  volatile size_t *const sptr = RBIMPL_CAST((volatile size_t *)ptr);
943  const size_t sret = rbimpl_atomic_size_cas(sptr, sold, snew);
944  return RBIMPL_CAST((VALUE)sret);
945 }
947 #endif /* RUBY_ATOMIC_H */
Defines RBIMPL_ATTR_ARTIFICIAL.
#define RBIMPL_ATTR_ARTIFICIAL()
Wraps (or simulates) __attribute__((artificial))
Definition: artificial.h:41
#define RBIMPL_ASSERT_OR_ASSUME(...)
This is either RUBY_ASSERT or RBIMPL_ASSUME, depending on RUBY_DEBUG.
Definition: assert.h:311
Atomic operations.
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition: atomic.h:69
Defines RBIMPL_COMPILER_SINCE.
Defines RBIMPL_STATIC_ASSERT.
#define RBIMPL_STATIC_ASSERT
Wraps (or simulates) static_assert
Definition: static_assert.h:66
char * ptr
Pointer to the underlying memory region, of at least capa bytes.
Definition: io.h:2
Defines RBIMPL_ATTR_NOALIAS.
#define RBIMPL_ATTR_NOALIAS()
Wraps (or simulates) __declspec((noalias))
Definition: noalias.h:62
Defines RBIMPL_ATTR_NONNULL.
#define RBIMPL_ATTR_NONNULL(list)
Wraps (or simulates) __attribute__((nonnull))
Definition: nonnull.h:27
#define inline
Old Visual Studio versions do not support the inline keyword, so we need to define it to be __inline.
Definition: defines.h:88
C99 shim for <stdbool.h>
Defines VALUE and ID.
uintptr_t VALUE
Type that represents a Ruby object.
Definition: value.h:40