Ruby 4.0.0dev (2025-12-06 revision 0346206d3eab2a8e659be0dd52aea6fc7b0ebb06)
bits.h
1#ifndef INTERNAL_BITS_H /*-*-C-*-vi:se ft=c:*/
2#define INTERNAL_BITS_H
28#include "ruby/internal/config.h"
29#include <limits.h> /* for CHAR_BITS */
30#include <stdint.h> /* for uintptr_t */
31#include "internal/compilers.h" /* for MSC_VERSION_SINCE */
32
33#ifdef _MSC_VER
34# include <stdlib.h> /* for _byteswap_uint64 */
35#endif
36
37#if defined(HAVE_X86INTRIN_H)
38# include <x86intrin.h> /* for _lzcnt_u64 */
39#elif defined(_MSC_VER)
40# include <intrin.h> /* for the following intrinsics */
41#endif
42
43#if defined(_MSC_VER) && defined(__AVX__)
44# pragma intrinsic(__popcnt)
45# pragma intrinsic(__popcnt64)
46#endif
47
48#if defined(_MSC_VER) && defined(__AVX2__)
49# pragma intrinsic(__lzcnt)
50# pragma intrinsic(__lzcnt64)
51#endif
52
53#if defined(_MSC_VER)
54# pragma intrinsic(_rotl)
55# pragma intrinsic(_rotr)
56# ifdef _WIN64
57# pragma intrinsic(_rotl64)
58# pragma intrinsic(_rotr64)
59# endif
60# pragma intrinsic(_BitScanForward)
61# pragma intrinsic(_BitScanReverse)
62# ifdef _WIN64
63# pragma intrinsic(_BitScanForward64)
64# pragma intrinsic(_BitScanReverse64)
65# endif
66#endif
67
68#include "ruby/ruby.h" /* for VALUE */
69#include "internal/static_assert.h" /* for STATIC_ASSERT */
70
71/* The most significant bit of the lower part of half-long integer.
72 * If sizeof(long) == 4, this is 0x8000.
73 * If sizeof(long) == 8, this is 0x80000000.
74 */
75#define HALF_LONG_MSB ((SIGNED_VALUE)1<<((SIZEOF_LONG*CHAR_BIT-1)/2))
76
77#define SIGNED_INTEGER_TYPE_P(T) (0 > ((T)0)-1)
78
79#define SIGNED_INTEGER_MIN(T) \
80 ((sizeof(T) == sizeof(int8_t)) ? ((T)INT8_MIN) : \
81 ((sizeof(T) == sizeof(int16_t)) ? ((T)INT16_MIN) : \
82 ((sizeof(T) == sizeof(int32_t)) ? ((T)INT32_MIN) : \
83 ((sizeof(T) == sizeof(int64_t)) ? ((T)INT64_MIN) : \
84 0))))
85
86#define SIGNED_INTEGER_MAX(T) ((T)(SIGNED_INTEGER_MIN(T) ^ ((T)~(T)0)))
87
88#define UNSIGNED_INTEGER_MAX(T) ((T)~(T)0)
89
90#ifndef MUL_OVERFLOW_SIGNED_INTEGER_P
91#if __has_builtin(__builtin_mul_overflow_p)
92# define MUL_OVERFLOW_P(a, b) \
93 __builtin_mul_overflow_p((a), (b), (__typeof__(a * b))0)
94#elif __has_builtin(__builtin_mul_overflow)
95# define MUL_OVERFLOW_P(a, b) \
96 __extension__ ({ __typeof__(a) c; __builtin_mul_overflow((a), (b), &c); })
97#endif
98
99#define MUL_OVERFLOW_SIGNED_INTEGER_P(a, b, min, max) ( \
100 (a) == 0 ? 0 : \
101 (a) == -1 ? (b) < -(max) : \
102 (a) > 0 ? \
103 ((b) > 0 ? (max) / (a) < (b) : (min) / (a) > (b)) : \
104 ((b) > 0 ? (min) / (a) < (b) : (max) / (a) > (b)))
105
106#if __has_builtin(__builtin_mul_overflow_p)
107/* __builtin_mul_overflow_p can take bitfield */
108/* and GCC permits bitfields for integers other than int */
109# define MUL_OVERFLOW_FIXNUM_P(a, b) \
110 __extension__ ({ \
111 struct { long fixnum : sizeof(long) * CHAR_BIT - 1; } c = { 0 }; \
112 __builtin_mul_overflow_p((a), (b), c.fixnum); \
113 })
114#else
115# define MUL_OVERFLOW_FIXNUM_P(a, b) \
116 MUL_OVERFLOW_SIGNED_INTEGER_P(a, b, FIXNUM_MIN, FIXNUM_MAX)
117#endif
118
119#if defined(MUL_OVERFLOW_P) && defined(USE___BUILTIN_MUL_OVERFLOW_LONG_LONG)
120# define MUL_OVERFLOW_LONG_LONG_P(a, b) MUL_OVERFLOW_P(a, b)
121#else
122# define MUL_OVERFLOW_LONG_LONG_P(a, b) MUL_OVERFLOW_SIGNED_INTEGER_P(a, b, LLONG_MIN, LLONG_MAX)
123#endif
124
125#ifdef MUL_OVERFLOW_P
126# define MUL_OVERFLOW_LONG_P(a, b) MUL_OVERFLOW_P(a, b)
127# define MUL_OVERFLOW_INT_P(a, b) MUL_OVERFLOW_P(a, b)
128#else
129# define MUL_OVERFLOW_LONG_P(a, b) MUL_OVERFLOW_SIGNED_INTEGER_P(a, b, LONG_MIN, LONG_MAX)
130# define MUL_OVERFLOW_INT_P(a, b) MUL_OVERFLOW_SIGNED_INTEGER_P(a, b, INT_MIN, INT_MAX)
131#endif
132#endif
133
134#ifndef ADD_OVERFLOW_SIGNED_INTEGER_P
135#if __has_builtin(__builtin_add_overflow_p)
136# define ADD_OVERFLOW_P(a, b) \
137 __builtin_add_overflow_p((a), (b), (__typeof__(a * b))0)
138#elif __has_builtin(__builtin_add_overflow)
139# define ADD_OVERFLOW_P(a, b) \
140 __extension__ ({ __typeof__(a) c; __builtin_add_overflow((a), (b), &c); })
141#endif
142
143#define ADD_OVERFLOW_SIGNED_INTEGER_P(a, b, min, max) ( \
144 (a) > 0 ? (b) > (max) - (a) : (b) < (min) - (a))
145
146#if __has_builtin(__builtin_add_overflow_p)
147/* __builtin_add_overflow_p can take bitfield */
148/* and GCC permits bitfields for integers other than int */
149# define ADD_OVERFLOW_FIXNUM_P(a, b) \
150 __extension__ ({ \
151 struct { long fixnum : sizeof(long) * CHAR_BIT - 1; } c = { 0 }; \
152 __builtin_add_overflow_p((a), (b), c.fixnum); \
153 })
154#else
155# define ADD_OVERFLOW_FIXNUM_P(a, b) \
156 ADD_OVERFLOW_SIGNED_INTEGER_P(a, b, FIXNUM_MIN, FIXNUM_MAX)
157#endif
158
159#if defined(ADD_OVERFLOW_P) && defined(USE___BUILTIN_ADD_OVERFLOW_LONG_LONG)
160# define ADD_OVERFLOW_LONG_LONG_P(a, b) ADD_OVERFLOW_P(a, b)
161#else
162# define ADD_OVERFLOW_LONG_LONG_P(a, b) ADD_OVERFLOW_SIGNED_INTEGER_P(a, b, LLONG_MIN, LLONG_MAX)
163#endif
164
165#ifdef ADD_OVERFLOW_P
166# define ADD_OVERFLOW_LONG_P(a, b) ADD_OVERFLOW_P(a, b)
167# define ADD_OVERFLOW_INT_P(a, b) ADD_OVERFLOW_P(a, b)
168#else
169# define ADD_OVERFLOW_LONG_P(a, b) ADD_OVERFLOW_SIGNED_INTEGER_P(a, b, LONG_MIN, LONG_MAX)
170# define ADD_OVERFLOW_INT_P(a, b) ADD_OVERFLOW_SIGNED_INTEGER_P(a, b, INT_MIN, INT_MAX)
171#endif
172#endif
173
174#ifndef SUB_OVERFLOW_SIGNED_INTEGER_P
175#if __has_builtin(__builtin_sub_overflow_p)
176# define SUB_OVERFLOW_P(a, b) \
177 __builtin_sub_overflow_p((a), (b), (__typeof__(a * b))0)
178#elif __has_builtin(__builtin_sub_overflow)
179# define SUB_OVERFLOW_P(a, b) \
180 __extension__ ({ __typeof__(a) c; __builtin_sub_overflow((a), (b), &c); })
181#endif
182
183#define SUB_OVERFLOW_SIGNED_INTEGER_P(a, b, min, max) ( \
184 (b) > 0 ? (a) < (min) + (b) : (a) > (max) + (b))
185
186#if __has_builtin(__builtin_sub_overflow_p)
187/* __builtin_sub_overflow_p can take bitfield */
188/* and GCC permits bitfields for integers other than int */
189# define SUB_OVERFLOW_FIXNUM_P(a, b) \
190 __extension__ ({ \
191 struct { long fixnum : sizeof(long) * CHAR_BIT - 1; } c = { 0 }; \
192 __builtin_sub_overflow_p((a), (b), c.fixnum); \
193 })
194#else
195# define SUB_OVERFLOW_FIXNUM_P(a, b) \
196 SUB_OVERFLOW_SIGNED_INTEGER_P(a, b, FIXNUM_MIN, FIXNUM_MAX)
197#endif
198
199#if defined(SUB_OVERFLOW_P) && defined(USE___BUILTIN_SUB_OVERFLOW_LONG_LONG)
200# define SUB_OVERFLOW_LONG_LONG_P(a, b) SUB_OVERFLOW_P(a, b)
201#else
202# define SUB_OVERFLOW_LONG_LONG_P(a, b) SUB_OVERFLOW_SIGNED_INTEGER_P(a, b, LLONG_MIN, LLONG_MAX)
203#endif
204
205#ifdef SUB_OVERFLOW_P
206# define SUB_OVERFLOW_LONG_P(a, b) SUB_OVERFLOW_P(a, b)
207# define SUB_OVERFLOW_INT_P(a, b) SUB_OVERFLOW_P(a, b)
208#else
209# define SUB_OVERFLOW_LONG_P(a, b) SUB_OVERFLOW_SIGNED_INTEGER_P(a, b, LONG_MIN, LONG_MAX)
210# define SUB_OVERFLOW_INT_P(a, b) SUB_OVERFLOW_SIGNED_INTEGER_P(a, b, INT_MIN, INT_MAX)
211#endif
212#endif
213
214#ifdef HAVE_UINT128_T
215# define bit_length(x) \
216 (unsigned int) \
217 (sizeof(x) <= sizeof(int32_t) ? 32 - nlz_int32((uint32_t)(x)) : \
218 sizeof(x) <= sizeof(int64_t) ? 64 - nlz_int64((uint64_t)(x)) : \
219 128 - nlz_int128((uint128_t)(x)))
220#else
221# define bit_length(x) \
222 (unsigned int) \
223 (sizeof(x) <= sizeof(int32_t) ? 32 - nlz_int32((uint32_t)(x)) : \
224 64 - nlz_int64((uint64_t)(x)))
225#endif
226
227#ifndef swap16
228# define swap16 ruby_swap16
229#endif
230
231#ifndef swap32
232# define swap32 ruby_swap32
233#endif
234
235#ifndef swap64
236# define swap64 ruby_swap64
237#endif
238
239static inline uint16_t ruby_swap16(uint16_t);
240static inline uint32_t ruby_swap32(uint32_t);
241static inline uint64_t ruby_swap64(uint64_t);
242static inline unsigned nlz_int(unsigned x);
243static inline unsigned nlz_long(unsigned long x);
244static inline unsigned nlz_long_long(unsigned long long x);
245static inline unsigned nlz_intptr(uintptr_t x);
246static inline unsigned nlz_int32(uint32_t x);
247static inline unsigned nlz_int64(uint64_t x);
248#ifdef HAVE_UINT128_T
249static inline unsigned nlz_int128(uint128_t x);
250#endif
251static inline unsigned rb_popcount32(uint32_t x);
252static inline unsigned rb_popcount64(uint64_t x);
253static inline unsigned rb_popcount_intptr(uintptr_t x);
254static inline int ntz_int32(uint32_t x);
255static inline int ntz_int64(uint64_t x);
256static inline int ntz_intptr(uintptr_t x);
257static inline VALUE RUBY_BIT_ROTL(VALUE, int);
258static inline VALUE RUBY_BIT_ROTR(VALUE, int);
259
260static inline uint16_t
261ruby_swap16(uint16_t x)
262{
263#if __has_builtin(__builtin_bswap16)
264 return __builtin_bswap16(x);
265
266#elif defined(_MSC_VER)
267 return _byteswap_ushort(x);
268
269#else
270 return (x << 8) | (x >> 8);
271
272#endif
273}
274
275static inline uint32_t
276ruby_swap32(uint32_t x)
277{
278#if __has_builtin(__builtin_bswap32)
279 return __builtin_bswap32(x);
280
281#elif defined(_MSC_VER)
282 return _byteswap_ulong(x);
283
284#else
285 x = ((x & 0x0000FFFF) << 16) | ((x & 0xFFFF0000) >> 16);
286 x = ((x & 0x00FF00FF) << 8) | ((x & 0xFF00FF00) >> 8);
287 return x;
288
289#endif
290}
291
292static inline uint64_t
293ruby_swap64(uint64_t x)
294{
295#if __has_builtin(__builtin_bswap64)
296 return __builtin_bswap64(x);
297
298#elif defined(_MSC_VER)
299 return _byteswap_uint64(x);
300
301#else
302 x = ((x & 0x00000000FFFFFFFFULL) << 32) | ((x & 0xFFFFFFFF00000000ULL) >> 32);
303 x = ((x & 0x0000FFFF0000FFFFULL) << 16) | ((x & 0xFFFF0000FFFF0000ULL) >> 16);
304 x = ((x & 0x00FF00FF00FF00FFULL) << 8) | ((x & 0xFF00FF00FF00FF00ULL) >> 8);
305 return x;
306
307#endif
308}
309
310static inline unsigned int
311nlz_int32(uint32_t x)
312{
313#if defined(_MSC_VER) && defined(__AVX2__)
314 /* Note: It seems there is no such thing like __LZCNT__ predefined in MSVC.
315 * AMD CPUs have had this instruction for decades (since K10) but for
316 * Intel, Haswell is the oldest one. We need to use __AVX2__ for maximum
317 * safety. */
318 return (unsigned int)__lzcnt(x);
319
320#elif defined(__x86_64__) && defined(__LZCNT__)
321 return (unsigned int)_lzcnt_u32(x);
322
323#elif defined(_MSC_VER) /* &&! defined(__AVX2__) */
324 unsigned long r;
325 return _BitScanReverse(&r, x) ? (31 - (int)r) : 32;
326
327#elif __has_builtin(__builtin_clz)
328 STATIC_ASSERT(sizeof_int, sizeof(int) * CHAR_BIT == 32);
329 return x ? (unsigned int)__builtin_clz(x) : 32;
330
331#else
332 uint32_t y;
333 unsigned n = 32;
334 y = x >> 16; if (y) {n -= 16; x = y;}
335 y = x >> 8; if (y) {n -= 8; x = y;}
336 y = x >> 4; if (y) {n -= 4; x = y;}
337 y = x >> 2; if (y) {n -= 2; x = y;}
338 y = x >> 1; if (y) {return n - 2;}
339 return (unsigned int)(n - x);
340#endif
341}
342
343static inline unsigned int
344nlz_int64(uint64_t x)
345{
346#if defined(_MSC_VER) && defined(__AVX2__)
347 return (unsigned int)__lzcnt64(x);
348
349#elif defined(__x86_64__) && defined(__LZCNT__)
350 return (unsigned int)_lzcnt_u64(x);
351
352#elif defined(_WIN64) && defined(_MSC_VER) /* &&! defined(__AVX2__) */
353 unsigned long r;
354 return _BitScanReverse64(&r, x) ? (63u - (unsigned int)r) : 64;
355
356#elif __has_builtin(__builtin_clzl)
357 if (x == 0) {
358 return 64;
359 }
360 else if (sizeof(long) * CHAR_BIT == 64) {
361 return (unsigned int)__builtin_clzl((unsigned long)x);
362 }
363 else if (sizeof(long long) * CHAR_BIT == 64) {
364 return (unsigned int)__builtin_clzll((unsigned long long)x);
365 }
366 else {
367 /* :FIXME: Is there a way to make this branch a compile-time error? */
369 }
370
371#else
372 uint64_t y;
373 unsigned int n = 64;
374 y = x >> 32; if (y) {n -= 32; x = y;}
375 y = x >> 16; if (y) {n -= 16; x = y;}
376 y = x >> 8; if (y) {n -= 8; x = y;}
377 y = x >> 4; if (y) {n -= 4; x = y;}
378 y = x >> 2; if (y) {n -= 2; x = y;}
379 y = x >> 1; if (y) {return n - 2;}
380 return (unsigned int)(n - x);
381
382#endif
383}
384
385#ifdef HAVE_UINT128_T
386static inline unsigned int
387nlz_int128(uint128_t x)
388{
389 uint64_t y = (uint64_t)(x >> 64);
390
391 if (x == 0) {
392 return 128;
393 }
394 else if (y == 0) {
395 return (unsigned int)nlz_int64(x) + 64;
396 }
397 else {
398 return (unsigned int)nlz_int64(y);
399 }
400}
401#endif
402
403static inline unsigned int
404nlz_int(unsigned int x)
405{
406 if (sizeof(unsigned int) * CHAR_BIT == 32) {
407 return nlz_int32((uint32_t)x);
408 }
409 else if (sizeof(unsigned int) * CHAR_BIT == 64) {
410 return nlz_int64((uint64_t)x);
411 }
412 else {
414 }
415}
416
417static inline unsigned int
418nlz_long(unsigned long x)
419{
420 if (sizeof(unsigned long) * CHAR_BIT == 32) {
421 return nlz_int32((uint32_t)x);
422 }
423 else if (sizeof(unsigned long) * CHAR_BIT == 64) {
424 return nlz_int64((uint64_t)x);
425 }
426 else {
428 }
429}
430
431static inline unsigned int
432nlz_long_long(unsigned long long x)
433{
434 if (sizeof(unsigned long long) * CHAR_BIT == 64) {
435 return nlz_int64((uint64_t)x);
436 }
437#ifdef HAVE_UINT128_T
438 else if (sizeof(unsigned long long) * CHAR_BIT == 128) {
439 return nlz_int128((uint128_t)x);
440 }
441#endif
442 else {
444 }
445}
446
447static inline unsigned int
448nlz_intptr(uintptr_t x)
449{
450 if (sizeof(uintptr_t) == sizeof(unsigned int)) {
451 return nlz_int((unsigned int)x);
452 }
453 if (sizeof(uintptr_t) == sizeof(unsigned long)) {
454 return nlz_long((unsigned long)x);
455 }
456 if (sizeof(uintptr_t) == sizeof(unsigned long long)) {
457 return nlz_long_long((unsigned long long)x);
458 }
459 else {
461 }
462}
463
464static inline unsigned int
465rb_popcount32(uint32_t x)
466{
467#if defined(_MSC_VER) && defined(__AVX__)
468 /* Note: CPUs since Nehalem and Barcelona have had this instruction so SSE
469 * 4.2 should suffice, but it seems there is no such thing like __SSE_4_2__
470 * predefined macro in MSVC. They do have __AVX__ so use it instead. */
471 return (unsigned int)__popcnt(x);
472
473#elif __has_builtin(__builtin_popcount)
474 STATIC_ASSERT(sizeof_int, sizeof(int) * CHAR_BIT >= 32);
475 return (unsigned int)__builtin_popcount(x);
476
477#else
478 x = (x & 0x55555555) + (x >> 1 & 0x55555555);
479 x = (x & 0x33333333) + (x >> 2 & 0x33333333);
480 x = (x & 0x07070707) + (x >> 4 & 0x07070707);
481 x = (x & 0x000f000f) + (x >> 8 & 0x000f000f);
482 x = (x & 0x0000001f) + (x >>16 & 0x0000001f);
483 return (unsigned int)x;
484
485#endif
486}
487
488static inline unsigned int
489rb_popcount64(uint64_t x)
490{
491#if defined(_MSC_VER) && defined(__AVX__)
492 return (unsigned int)__popcnt64(x);
493
494#elif __has_builtin(__builtin_popcount)
495 if (sizeof(long) * CHAR_BIT == 64) {
496 return (unsigned int)__builtin_popcountl((unsigned long)x);
497 }
498 else if (sizeof(long long) * CHAR_BIT == 64) {
499 return (unsigned int)__builtin_popcountll((unsigned long long)x);
500 }
501 else {
502 /* :FIXME: Is there a way to make this branch a compile-time error? */
504 }
505
506#else
507 x = (x & 0x5555555555555555) + (x >> 1 & 0x5555555555555555);
508 x = (x & 0x3333333333333333) + (x >> 2 & 0x3333333333333333);
509 x = (x & 0x0707070707070707) + (x >> 4 & 0x0707070707070707);
510 x = (x & 0x000f000f000f000f) + (x >> 8 & 0x000f000f000f000f);
511 x = (x & 0x0000001f0000001f) + (x >>16 & 0x0000001f0000001f);
512 x = (x & 0x000000000000003f) + (x >>32 & 0x000000000000003f);
513 return (unsigned int)x;
514
515#endif
516}
517
518static inline unsigned int
519rb_popcount_intptr(uintptr_t x)
520{
521 if (sizeof(uintptr_t) * CHAR_BIT == 64) {
522 return rb_popcount64((uint64_t)x);
523 }
524 else if (sizeof(uintptr_t) * CHAR_BIT == 32) {
525 return rb_popcount32((uint32_t)x);
526 }
527 else {
529 }
530}
531
532static inline int
533ntz_int32(uint32_t x)
534{
535#if defined(__x86_64__) && defined(__BMI__)
536 return (unsigned)_tzcnt_u32(x);
537
538#elif defined(_MSC_VER)
539 /* :FIXME: Is there any way to issue TZCNT instead of BSF, apart from using
540 * assembly? Because issuing LZCNT seems possible (see nlz.h). */
541 unsigned long r;
542 return _BitScanForward(&r, x) ? (int)r : 32;
543
544#elif __has_builtin(__builtin_ctz)
545 STATIC_ASSERT(sizeof_int, sizeof(int) * CHAR_BIT == 32);
546 return x ? (unsigned)__builtin_ctz(x) : 32;
547
548#else
549 return rb_popcount32((~x) & (x-1));
550
551#endif
552}
553
554static inline int
555ntz_int64(uint64_t x)
556{
557#if defined(__x86_64__) && defined(__BMI__)
558 return (unsigned)_tzcnt_u64(x);
559
560#elif defined(_WIN64) && defined(_MSC_VER)
561 unsigned long r;
562 return _BitScanForward64(&r, x) ? (int)r : 64;
563
564#elif __has_builtin(__builtin_ctzl)
565 if (x == 0) {
566 return 64;
567 }
568 else if (sizeof(long) * CHAR_BIT == 64) {
569 return (unsigned)__builtin_ctzl((unsigned long)x);
570 }
571 else if (sizeof(long long) * CHAR_BIT == 64) {
572 return (unsigned)__builtin_ctzll((unsigned long long)x);
573 }
574 else {
575 /* :FIXME: Is there a way to make this branch a compile-time error? */
577 }
578
579#else
580 return rb_popcount64((~x) & (x-1));
581
582#endif
583}
584
585static inline int
586ntz_intptr(uintptr_t x)
587{
588 if (sizeof(uintptr_t) * CHAR_BIT == 64) {
589 return ntz_int64((uint64_t)x);
590 }
591 else if (sizeof(uintptr_t) * CHAR_BIT == 32) {
592 return ntz_int32((uint32_t)x);
593 }
594 else {
596 }
597}
598
599static inline VALUE
600RUBY_BIT_ROTL(VALUE v, int n)
601{
602#if __has_builtin(__builtin_rotateleft32) && (SIZEOF_VALUE * CHAR_BIT == 32)
603 return __builtin_rotateleft32(v, n);
604
605#elif __has_builtin(__builtin_rotateleft64) && (SIZEOF_VALUE * CHAR_BIT == 64)
606 return __builtin_rotateleft64(v, n);
607
608#elif defined(_MSC_VER) && (SIZEOF_VALUE * CHAR_BIT == 32)
609 return _rotl(v, n);
610
611#elif defined(_MSC_VER) && (SIZEOF_VALUE * CHAR_BIT == 64)
612 return _rotl64(v, n);
613
614#elif defined(_lrotl) && (SIZEOF_VALUE == SIZEOF_LONG)
615 return _lrotl(v, n);
616
617#else
618 const int m = (sizeof(VALUE) * CHAR_BIT) - 1;
619 return (v << (n & m)) | (v >> (-n & m));
620#endif
621}
622
623static inline VALUE
624RUBY_BIT_ROTR(VALUE v, int n)
625{
626#if __has_builtin(__builtin_rotateright32) && (SIZEOF_VALUE * CHAR_BIT == 32)
627 return __builtin_rotateright32(v, n);
628
629#elif __has_builtin(__builtin_rotateright64) && (SIZEOF_VALUE * CHAR_BIT == 64)
630 return __builtin_rotateright64(v, n);
631
632#elif defined(_MSC_VER) && (SIZEOF_VALUE * CHAR_BIT == 32)
633 return _rotr(v, n);
634
635#elif defined(_MSC_VER) && (SIZEOF_VALUE * CHAR_BIT == 64)
636 return _rotr64(v, n);
637
638#elif defined(_lrotr) && (SIZEOF_VALUE == SIZEOF_LONG)
639 return _lrotr(v, n);
640
641#else
642 const int m = (sizeof(VALUE) * CHAR_BIT) - 1;
643 return (v << (-n & m)) | (v >> (n & m));
644#endif
645}
646
647#endif /* INTERNAL_BITS_H */
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
Definition assume.h:29
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40