Ruby 3.5.0dev (2025-01-10 revision 5fab31b15e32622c4b71d1d347a41937e9f9c212)
bits.h
1#ifndef INTERNAL_BITS_H /*-*-C-*-vi:se ft=c:*/
2#define INTERNAL_BITS_H
28#include "ruby/internal/config.h"
29#include <limits.h> /* for CHAR_BITS */
30#include <stdint.h> /* for uintptr_t */
31#include "internal/compilers.h" /* for MSC_VERSION_SINCE */
32
33#if MSC_VERSION_SINCE(1310)
34# include <stdlib.h> /* for _byteswap_uint64 */
35#endif
36
37#if defined(HAVE_X86INTRIN_H)
38# include <x86intrin.h> /* for _lzcnt_u64 */
39#elif MSC_VERSION_SINCE(1310)
40# include <intrin.h> /* for the following intrinsics */
41#endif
42
43#if defined(_MSC_VER) && defined(__AVX__)
44# pragma intrinsic(__popcnt)
45# pragma intrinsic(__popcnt64)
46#endif
47
48#if defined(_MSC_VER) && defined(__AVX2__)
49# pragma intrinsic(__lzcnt)
50# pragma intrinsic(__lzcnt64)
51#endif
52
53#if MSC_VERSION_SINCE(1310)
54# pragma intrinsic(_rotl)
55# pragma intrinsic(_rotr)
56# ifdef _WIN64
57# pragma intrinsic(_rotl64)
58# pragma intrinsic(_rotr64)
59# endif
60#endif
61
62#if MSC_VERSION_SINCE(1400)
63# pragma intrinsic(_BitScanForward)
64# pragma intrinsic(_BitScanReverse)
65# ifdef _WIN64
66# pragma intrinsic(_BitScanForward64)
67# pragma intrinsic(_BitScanReverse64)
68# endif
69#endif
70
71#include "ruby/ruby.h" /* for VALUE */
72#include "internal/static_assert.h" /* for STATIC_ASSERT */
73
74/* The most significant bit of the lower part of half-long integer.
75 * If sizeof(long) == 4, this is 0x8000.
76 * If sizeof(long) == 8, this is 0x80000000.
77 */
78#define HALF_LONG_MSB ((SIGNED_VALUE)1<<((SIZEOF_LONG*CHAR_BIT-1)/2))
79
80#define SIGNED_INTEGER_TYPE_P(T) (0 > ((T)0)-1)
81
82#define SIGNED_INTEGER_MIN(T) \
83 ((sizeof(T) == sizeof(int8_t)) ? ((T)INT8_MIN) : \
84 ((sizeof(T) == sizeof(int16_t)) ? ((T)INT16_MIN) : \
85 ((sizeof(T) == sizeof(int32_t)) ? ((T)INT32_MIN) : \
86 ((sizeof(T) == sizeof(int64_t)) ? ((T)INT64_MIN) : \
87 0))))
88
89#define SIGNED_INTEGER_MAX(T) ((T)(SIGNED_INTEGER_MIN(T) ^ ((T)~(T)0)))
90
91#define UNSIGNED_INTEGER_MAX(T) ((T)~(T)0)
92
93#ifndef MUL_OVERFLOW_SIGNED_INTEGER_P
94#if __has_builtin(__builtin_mul_overflow_p)
95# define MUL_OVERFLOW_P(a, b) \
96 __builtin_mul_overflow_p((a), (b), (__typeof__(a * b))0)
97#elif __has_builtin(__builtin_mul_overflow)
98# define MUL_OVERFLOW_P(a, b) \
99 __extension__ ({ __typeof__(a) c; __builtin_mul_overflow((a), (b), &c); })
100#endif
101
102#define MUL_OVERFLOW_SIGNED_INTEGER_P(a, b, min, max) ( \
103 (a) == 0 ? 0 : \
104 (a) == -1 ? (b) < -(max) : \
105 (a) > 0 ? \
106 ((b) > 0 ? (max) / (a) < (b) : (min) / (a) > (b)) : \
107 ((b) > 0 ? (min) / (a) < (b) : (max) / (a) > (b)))
108
109#if __has_builtin(__builtin_mul_overflow_p)
110/* __builtin_mul_overflow_p can take bitfield */
111/* and GCC permits bitfields for integers other than int */
112# define MUL_OVERFLOW_FIXNUM_P(a, b) \
113 __extension__ ({ \
114 struct { long fixnum : sizeof(long) * CHAR_BIT - 1; } c = { 0 }; \
115 __builtin_mul_overflow_p((a), (b), c.fixnum); \
116 })
117#else
118# define MUL_OVERFLOW_FIXNUM_P(a, b) \
119 MUL_OVERFLOW_SIGNED_INTEGER_P(a, b, FIXNUM_MIN, FIXNUM_MAX)
120#endif
121
122#if defined(MUL_OVERFLOW_P) && defined(USE___BUILTIN_MUL_OVERFLOW_LONG_LONG)
123# define MUL_OVERFLOW_LONG_LONG_P(a, b) MUL_OVERFLOW_P(a, b)
124#else
125# define MUL_OVERFLOW_LONG_LONG_P(a, b) MUL_OVERFLOW_SIGNED_INTEGER_P(a, b, LLONG_MIN, LLONG_MAX)
126#endif
127
128#ifdef MUL_OVERFLOW_P
129# define MUL_OVERFLOW_LONG_P(a, b) MUL_OVERFLOW_P(a, b)
130# define MUL_OVERFLOW_INT_P(a, b) MUL_OVERFLOW_P(a, b)
131#else
132# define MUL_OVERFLOW_LONG_P(a, b) MUL_OVERFLOW_SIGNED_INTEGER_P(a, b, LONG_MIN, LONG_MAX)
133# define MUL_OVERFLOW_INT_P(a, b) MUL_OVERFLOW_SIGNED_INTEGER_P(a, b, INT_MIN, INT_MAX)
134#endif
135#endif
136
137#ifndef ADD_OVERFLOW_SIGNED_INTEGER_P
138#if __has_builtin(__builtin_add_overflow_p)
139# define ADD_OVERFLOW_P(a, b) \
140 __builtin_add_overflow_p((a), (b), (__typeof__(a * b))0)
141#elif __has_builtin(__builtin_add_overflow)
142# define ADD_OVERFLOW_P(a, b) \
143 __extension__ ({ __typeof__(a) c; __builtin_add_overflow((a), (b), &c); })
144#endif
145
146#define ADD_OVERFLOW_SIGNED_INTEGER_P(a, b, min, max) ( \
147 (a) > 0 ? (b) > (max) - (a) : (b) < (min) - (a))
148
149#if __has_builtin(__builtin_add_overflow_p)
150/* __builtin_add_overflow_p can take bitfield */
151/* and GCC permits bitfields for integers other than int */
152# define ADD_OVERFLOW_FIXNUM_P(a, b) \
153 __extension__ ({ \
154 struct { long fixnum : sizeof(long) * CHAR_BIT - 1; } c = { 0 }; \
155 __builtin_add_overflow_p((a), (b), c.fixnum); \
156 })
157#else
158# define ADD_OVERFLOW_FIXNUM_P(a, b) \
159 ADD_OVERFLOW_SIGNED_INTEGER_P(a, b, FIXNUM_MIN, FIXNUM_MAX)
160#endif
161
162#if defined(ADD_OVERFLOW_P) && defined(USE___BUILTIN_ADD_OVERFLOW_LONG_LONG)
163# define ADD_OVERFLOW_LONG_LONG_P(a, b) ADD_OVERFLOW_P(a, b)
164#else
165# define ADD_OVERFLOW_LONG_LONG_P(a, b) ADD_OVERFLOW_SIGNED_INTEGER_P(a, b, LLONG_MIN, LLONG_MAX)
166#endif
167
168#ifdef ADD_OVERFLOW_P
169# define ADD_OVERFLOW_LONG_P(a, b) ADD_OVERFLOW_P(a, b)
170# define ADD_OVERFLOW_INT_P(a, b) ADD_OVERFLOW_P(a, b)
171#else
172# define ADD_OVERFLOW_LONG_P(a, b) ADD_OVERFLOW_SIGNED_INTEGER_P(a, b, LONG_MIN, LONG_MAX)
173# define ADD_OVERFLOW_INT_P(a, b) ADD_OVERFLOW_SIGNED_INTEGER_P(a, b, INT_MIN, INT_MAX)
174#endif
175#endif
176
177#ifndef SUB_OVERFLOW_SIGNED_INTEGER_P
178#if __has_builtin(__builtin_sub_overflow_p)
179# define SUB_OVERFLOW_P(a, b) \
180 __builtin_sub_overflow_p((a), (b), (__typeof__(a * b))0)
181#elif __has_builtin(__builtin_sub_overflow)
182# define SUB_OVERFLOW_P(a, b) \
183 __extension__ ({ __typeof__(a) c; __builtin_sub_overflow((a), (b), &c); })
184#endif
185
186#define SUB_OVERFLOW_SIGNED_INTEGER_P(a, b, min, max) ( \
187 (b) > 0 ? (a) < (min) + (b) : (a) > (max) + (b))
188
189#if __has_builtin(__builtin_sub_overflow_p)
190/* __builtin_sub_overflow_p can take bitfield */
191/* and GCC permits bitfields for integers other than int */
192# define SUB_OVERFLOW_FIXNUM_P(a, b) \
193 __extension__ ({ \
194 struct { long fixnum : sizeof(long) * CHAR_BIT - 1; } c = { 0 }; \
195 __builtin_sub_overflow_p((a), (b), c.fixnum); \
196 })
197#else
198# define SUB_OVERFLOW_FIXNUM_P(a, b) \
199 SUB_OVERFLOW_SIGNED_INTEGER_P(a, b, FIXNUM_MIN, FIXNUM_MAX)
200#endif
201
202#if defined(SUB_OVERFLOW_P) && defined(USE___BUILTIN_SUB_OVERFLOW_LONG_LONG)
203# define SUB_OVERFLOW_LONG_LONG_P(a, b) SUB_OVERFLOW_P(a, b)
204#else
205# define SUB_OVERFLOW_LONG_LONG_P(a, b) SUB_OVERFLOW_SIGNED_INTEGER_P(a, b, LLONG_MIN, LLONG_MAX)
206#endif
207
208#ifdef SUB_OVERFLOW_P
209# define SUB_OVERFLOW_LONG_P(a, b) SUB_OVERFLOW_P(a, b)
210# define SUB_OVERFLOW_INT_P(a, b) SUB_OVERFLOW_P(a, b)
211#else
212# define SUB_OVERFLOW_LONG_P(a, b) SUB_OVERFLOW_SIGNED_INTEGER_P(a, b, LONG_MIN, LONG_MAX)
213# define SUB_OVERFLOW_INT_P(a, b) SUB_OVERFLOW_SIGNED_INTEGER_P(a, b, INT_MIN, INT_MAX)
214#endif
215#endif
216
217#ifdef HAVE_UINT128_T
218# define bit_length(x) \
219 (unsigned int) \
220 (sizeof(x) <= sizeof(int32_t) ? 32 - nlz_int32((uint32_t)(x)) : \
221 sizeof(x) <= sizeof(int64_t) ? 64 - nlz_int64((uint64_t)(x)) : \
222 128 - nlz_int128((uint128_t)(x)))
223#else
224# define bit_length(x) \
225 (unsigned int) \
226 (sizeof(x) <= sizeof(int32_t) ? 32 - nlz_int32((uint32_t)(x)) : \
227 64 - nlz_int64((uint64_t)(x)))
228#endif
229
230#ifndef swap16
231# define swap16 ruby_swap16
232#endif
233
234#ifndef swap32
235# define swap32 ruby_swap32
236#endif
237
238#ifndef swap64
239# define swap64 ruby_swap64
240#endif
241
242static inline uint16_t ruby_swap16(uint16_t);
243static inline uint32_t ruby_swap32(uint32_t);
244static inline uint64_t ruby_swap64(uint64_t);
245static inline unsigned nlz_int(unsigned x);
246static inline unsigned nlz_long(unsigned long x);
247static inline unsigned nlz_long_long(unsigned long long x);
248static inline unsigned nlz_intptr(uintptr_t x);
249static inline unsigned nlz_int32(uint32_t x);
250static inline unsigned nlz_int64(uint64_t x);
251#ifdef HAVE_UINT128_T
252static inline unsigned nlz_int128(uint128_t x);
253#endif
254static inline unsigned rb_popcount32(uint32_t x);
255static inline unsigned rb_popcount64(uint64_t x);
256static inline unsigned rb_popcount_intptr(uintptr_t x);
257static inline int ntz_int32(uint32_t x);
258static inline int ntz_int64(uint64_t x);
259static inline int ntz_intptr(uintptr_t x);
260static inline VALUE RUBY_BIT_ROTL(VALUE, int);
261static inline VALUE RUBY_BIT_ROTR(VALUE, int);
262
263static inline uint16_t
264ruby_swap16(uint16_t x)
265{
266#if __has_builtin(__builtin_bswap16)
267 return __builtin_bswap16(x);
268
269#elif MSC_VERSION_SINCE(1310)
270 return _byteswap_ushort(x);
271
272#else
273 return (x << 8) | (x >> 8);
274
275#endif
276}
277
278static inline uint32_t
279ruby_swap32(uint32_t x)
280{
281#if __has_builtin(__builtin_bswap32)
282 return __builtin_bswap32(x);
283
284#elif MSC_VERSION_SINCE(1310)
285 return _byteswap_ulong(x);
286
287#else
288 x = ((x & 0x0000FFFF) << 16) | ((x & 0xFFFF0000) >> 16);
289 x = ((x & 0x00FF00FF) << 8) | ((x & 0xFF00FF00) >> 8);
290 return x;
291
292#endif
293}
294
295static inline uint64_t
296ruby_swap64(uint64_t x)
297{
298#if __has_builtin(__builtin_bswap64)
299 return __builtin_bswap64(x);
300
301#elif MSC_VERSION_SINCE(1310)
302 return _byteswap_uint64(x);
303
304#else
305 x = ((x & 0x00000000FFFFFFFFULL) << 32) | ((x & 0xFFFFFFFF00000000ULL) >> 32);
306 x = ((x & 0x0000FFFF0000FFFFULL) << 16) | ((x & 0xFFFF0000FFFF0000ULL) >> 16);
307 x = ((x & 0x00FF00FF00FF00FFULL) << 8) | ((x & 0xFF00FF00FF00FF00ULL) >> 8);
308 return x;
309
310#endif
311}
312
313static inline unsigned int
314nlz_int32(uint32_t x)
315{
316#if defined(_MSC_VER) && defined(__AVX2__)
317 /* Note: It seems there is no such thing like __LZCNT__ predefined in MSVC.
318 * AMD CPUs have had this instruction for decades (since K10) but for
319 * Intel, Haswell is the oldest one. We need to use __AVX2__ for maximum
320 * safety. */
321 return (unsigned int)__lzcnt(x);
322
323#elif defined(__x86_64__) && defined(__LZCNT__)
324 return (unsigned int)_lzcnt_u32(x);
325
326#elif MSC_VERSION_SINCE(1400) /* &&! defined(__AVX2__) */
327 unsigned long r;
328 return _BitScanReverse(&r, x) ? (31 - (int)r) : 32;
329
330#elif __has_builtin(__builtin_clz)
331 STATIC_ASSERT(sizeof_int, sizeof(int) * CHAR_BIT == 32);
332 return x ? (unsigned int)__builtin_clz(x) : 32;
333
334#else
335 uint32_t y;
336 unsigned n = 32;
337 y = x >> 16; if (y) {n -= 16; x = y;}
338 y = x >> 8; if (y) {n -= 8; x = y;}
339 y = x >> 4; if (y) {n -= 4; x = y;}
340 y = x >> 2; if (y) {n -= 2; x = y;}
341 y = x >> 1; if (y) {return n - 2;}
342 return (unsigned int)(n - x);
343#endif
344}
345
346static inline unsigned int
347nlz_int64(uint64_t x)
348{
349#if defined(_MSC_VER) && defined(__AVX2__)
350 return (unsigned int)__lzcnt64(x);
351
352#elif defined(__x86_64__) && defined(__LZCNT__)
353 return (unsigned int)_lzcnt_u64(x);
354
355#elif defined(_WIN64) && MSC_VERSION_SINCE(1400) /* &&! defined(__AVX2__) */
356 unsigned long r;
357 return _BitScanReverse64(&r, x) ? (63u - (unsigned int)r) : 64;
358
359#elif __has_builtin(__builtin_clzl)
360 if (x == 0) {
361 return 64;
362 }
363 else if (sizeof(long) * CHAR_BIT == 64) {
364 return (unsigned int)__builtin_clzl((unsigned long)x);
365 }
366 else if (sizeof(long long) * CHAR_BIT == 64) {
367 return (unsigned int)__builtin_clzll((unsigned long long)x);
368 }
369 else {
370 /* :FIXME: Is there a way to make this branch a compile-time error? */
372 }
373
374#else
375 uint64_t y;
376 unsigned int n = 64;
377 y = x >> 32; if (y) {n -= 32; x = y;}
378 y = x >> 16; if (y) {n -= 16; x = y;}
379 y = x >> 8; if (y) {n -= 8; x = y;}
380 y = x >> 4; if (y) {n -= 4; x = y;}
381 y = x >> 2; if (y) {n -= 2; x = y;}
382 y = x >> 1; if (y) {return n - 2;}
383 return (unsigned int)(n - x);
384
385#endif
386}
387
388#ifdef HAVE_UINT128_T
389static inline unsigned int
390nlz_int128(uint128_t x)
391{
392 uint64_t y = (uint64_t)(x >> 64);
393
394 if (x == 0) {
395 return 128;
396 }
397 else if (y == 0) {
398 return (unsigned int)nlz_int64(x) + 64;
399 }
400 else {
401 return (unsigned int)nlz_int64(y);
402 }
403}
404#endif
405
406static inline unsigned int
407nlz_int(unsigned int x)
408{
409 if (sizeof(unsigned int) * CHAR_BIT == 32) {
410 return nlz_int32((uint32_t)x);
411 }
412 else if (sizeof(unsigned int) * CHAR_BIT == 64) {
413 return nlz_int64((uint64_t)x);
414 }
415 else {
417 }
418}
419
420static inline unsigned int
421nlz_long(unsigned long x)
422{
423 if (sizeof(unsigned long) * CHAR_BIT == 32) {
424 return nlz_int32((uint32_t)x);
425 }
426 else if (sizeof(unsigned long) * CHAR_BIT == 64) {
427 return nlz_int64((uint64_t)x);
428 }
429 else {
431 }
432}
433
434static inline unsigned int
435nlz_long_long(unsigned long long x)
436{
437 if (sizeof(unsigned long long) * CHAR_BIT == 64) {
438 return nlz_int64((uint64_t)x);
439 }
440#ifdef HAVE_UINT128_T
441 else if (sizeof(unsigned long long) * CHAR_BIT == 128) {
442 return nlz_int128((uint128_t)x);
443 }
444#endif
445 else {
447 }
448}
449
450static inline unsigned int
451nlz_intptr(uintptr_t x)
452{
453 if (sizeof(uintptr_t) == sizeof(unsigned int)) {
454 return nlz_int((unsigned int)x);
455 }
456 if (sizeof(uintptr_t) == sizeof(unsigned long)) {
457 return nlz_long((unsigned long)x);
458 }
459 if (sizeof(uintptr_t) == sizeof(unsigned long long)) {
460 return nlz_long_long((unsigned long long)x);
461 }
462 else {
464 }
465}
466
467static inline unsigned int
468rb_popcount32(uint32_t x)
469{
470#if defined(_MSC_VER) && defined(__AVX__)
471 /* Note: CPUs since Nehalem and Barcelona have had this instruction so SSE
472 * 4.2 should suffice, but it seems there is no such thing like __SSE_4_2__
473 * predefined macro in MSVC. They do have __AVX__ so use it instead. */
474 return (unsigned int)__popcnt(x);
475
476#elif __has_builtin(__builtin_popcount)
477 STATIC_ASSERT(sizeof_int, sizeof(int) * CHAR_BIT >= 32);
478 return (unsigned int)__builtin_popcount(x);
479
480#else
481 x = (x & 0x55555555) + (x >> 1 & 0x55555555);
482 x = (x & 0x33333333) + (x >> 2 & 0x33333333);
483 x = (x & 0x07070707) + (x >> 4 & 0x07070707);
484 x = (x & 0x000f000f) + (x >> 8 & 0x000f000f);
485 x = (x & 0x0000001f) + (x >>16 & 0x0000001f);
486 return (unsigned int)x;
487
488#endif
489}
490
491static inline unsigned int
492rb_popcount64(uint64_t x)
493{
494#if defined(_MSC_VER) && defined(__AVX__)
495 return (unsigned int)__popcnt64(x);
496
497#elif __has_builtin(__builtin_popcount)
498 if (sizeof(long) * CHAR_BIT == 64) {
499 return (unsigned int)__builtin_popcountl((unsigned long)x);
500 }
501 else if (sizeof(long long) * CHAR_BIT == 64) {
502 return (unsigned int)__builtin_popcountll((unsigned long long)x);
503 }
504 else {
505 /* :FIXME: Is there a way to make this branch a compile-time error? */
507 }
508
509#else
510 x = (x & 0x5555555555555555) + (x >> 1 & 0x5555555555555555);
511 x = (x & 0x3333333333333333) + (x >> 2 & 0x3333333333333333);
512 x = (x & 0x0707070707070707) + (x >> 4 & 0x0707070707070707);
513 x = (x & 0x000f000f000f000f) + (x >> 8 & 0x000f000f000f000f);
514 x = (x & 0x0000001f0000001f) + (x >>16 & 0x0000001f0000001f);
515 x = (x & 0x000000000000003f) + (x >>32 & 0x000000000000003f);
516 return (unsigned int)x;
517
518#endif
519}
520
521static inline unsigned int
522rb_popcount_intptr(uintptr_t x)
523{
524 if (sizeof(uintptr_t) * CHAR_BIT == 64) {
525 return rb_popcount64((uint64_t)x);
526 }
527 else if (sizeof(uintptr_t) * CHAR_BIT == 32) {
528 return rb_popcount32((uint32_t)x);
529 }
530 else {
532 }
533}
534
535static inline int
536ntz_int32(uint32_t x)
537{
538#if defined(__x86_64__) && defined(__BMI__)
539 return (unsigned)_tzcnt_u32(x);
540
541#elif MSC_VERSION_SINCE(1400)
542 /* :FIXME: Is there any way to issue TZCNT instead of BSF, apart from using
543 * assembly? Because issuing LZCNT seems possible (see nlz.h). */
544 unsigned long r;
545 return _BitScanForward(&r, x) ? (int)r : 32;
546
547#elif __has_builtin(__builtin_ctz)
548 STATIC_ASSERT(sizeof_int, sizeof(int) * CHAR_BIT == 32);
549 return x ? (unsigned)__builtin_ctz(x) : 32;
550
551#else
552 return rb_popcount32((~x) & (x-1));
553
554#endif
555}
556
557static inline int
558ntz_int64(uint64_t x)
559{
560#if defined(__x86_64__) && defined(__BMI__)
561 return (unsigned)_tzcnt_u64(x);
562
563#elif defined(_WIN64) && MSC_VERSION_SINCE(1400)
564 unsigned long r;
565 return _BitScanForward64(&r, x) ? (int)r : 64;
566
567#elif __has_builtin(__builtin_ctzl)
568 if (x == 0) {
569 return 64;
570 }
571 else if (sizeof(long) * CHAR_BIT == 64) {
572 return (unsigned)__builtin_ctzl((unsigned long)x);
573 }
574 else if (sizeof(long long) * CHAR_BIT == 64) {
575 return (unsigned)__builtin_ctzll((unsigned long long)x);
576 }
577 else {
578 /* :FIXME: Is there a way to make this branch a compile-time error? */
580 }
581
582#else
583 return rb_popcount64((~x) & (x-1));
584
585#endif
586}
587
588static inline int
589ntz_intptr(uintptr_t x)
590{
591 if (sizeof(uintptr_t) * CHAR_BIT == 64) {
592 return ntz_int64((uint64_t)x);
593 }
594 else if (sizeof(uintptr_t) * CHAR_BIT == 32) {
595 return ntz_int32((uint32_t)x);
596 }
597 else {
599 }
600}
601
602static inline VALUE
603RUBY_BIT_ROTL(VALUE v, int n)
604{
605#if __has_builtin(__builtin_rotateleft32) && (SIZEOF_VALUE * CHAR_BIT == 32)
606 return __builtin_rotateleft32(v, n);
607
608#elif __has_builtin(__builtin_rotateleft64) && (SIZEOF_VALUE * CHAR_BIT == 64)
609 return __builtin_rotateleft64(v, n);
610
611#elif MSC_VERSION_SINCE(1310) && (SIZEOF_VALUE * CHAR_BIT == 32)
612 return _rotl(v, n);
613
614#elif MSC_VERSION_SINCE(1310) && (SIZEOF_VALUE * CHAR_BIT == 64)
615 return _rotl64(v, n);
616
617#elif defined(_lrotl) && (SIZEOF_VALUE == SIZEOF_LONG)
618 return _lrotl(v, n);
619
620#else
621 const int m = (sizeof(VALUE) * CHAR_BIT) - 1;
622 return (v << (n & m)) | (v >> (-n & m));
623#endif
624}
625
626static inline VALUE
627RUBY_BIT_ROTR(VALUE v, int n)
628{
629#if __has_builtin(__builtin_rotateright32) && (SIZEOF_VALUE * CHAR_BIT == 32)
630 return __builtin_rotateright32(v, n);
631
632#elif __has_builtin(__builtin_rotateright64) && (SIZEOF_VALUE * CHAR_BIT == 64)
633 return __builtin_rotateright64(v, n);
634
635#elif MSC_VERSION_SINCE(1310) && (SIZEOF_VALUE * CHAR_BIT == 32)
636 return _rotr(v, n);
637
638#elif MSC_VERSION_SINCE(1310) && (SIZEOF_VALUE * CHAR_BIT == 64)
639 return _rotr64(v, n);
640
641#elif defined(_lrotr) && (SIZEOF_VALUE == SIZEOF_LONG)
642 return _lrotr(v, n);
643
644#else
645 const int m = (sizeof(VALUE) * CHAR_BIT) - 1;
646 return (v << (-n & m)) | (v >> (n & m));
647#endif
648}
649
650#endif /* INTERNAL_BITS_H */
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
Definition assume.h:29
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40