14#include "debug_counter.h"
17#include "internal/array.h"
18#include "internal/compar.h"
19#include "internal/enum.h"
20#include "internal/gc.h"
21#include "internal/hash.h"
22#include "internal/numeric.h"
23#include "internal/object.h"
24#include "internal/proc.h"
25#include "internal/rational.h"
26#include "internal/vm.h"
40#include "ruby_assert.h"
43VALUE rb_cArray_empty_frozen;
72#define ARY_DEFAULT_SIZE 16
73#define ARY_MAX_SIZE (LONG_MAX / (int)sizeof(VALUE))
74#define SMALL_ARRAY_LEN 16
78should_be_T_ARRAY(
VALUE ary)
83#define ARY_HEAP_PTR(a) (RUBY_ASSERT(!ARY_EMBED_P(a)), RARRAY(a)->as.heap.ptr)
84#define ARY_HEAP_LEN(a) (RUBY_ASSERT(!ARY_EMBED_P(a)), RARRAY(a)->as.heap.len)
85#define ARY_HEAP_CAPA(a) (RUBY_ASSERT(!ARY_EMBED_P(a)), RUBY_ASSERT(!ARY_SHARED_ROOT_P(a)), \
86 RARRAY(a)->as.heap.aux.capa)
88#define ARY_EMBED_PTR(a) (RUBY_ASSERT(ARY_EMBED_P(a)), RARRAY(a)->as.ary)
89#define ARY_EMBED_LEN(a) \
90 (RUBY_ASSERT(ARY_EMBED_P(a)), \
91 (long)((RBASIC(a)->flags >> RARRAY_EMBED_LEN_SHIFT) & \
92 (RARRAY_EMBED_LEN_MASK >> RARRAY_EMBED_LEN_SHIFT)))
93#define ARY_HEAP_SIZE(a) (RUBY_ASSERT(!ARY_EMBED_P(a)), RUBY_ASSERT(ARY_OWNS_HEAP_P(a)), ARY_CAPA(a) * sizeof(VALUE))
95#define ARY_OWNS_HEAP_P(a) (RUBY_ASSERT(should_be_T_ARRAY((VALUE)(a))), \
96 !FL_TEST_RAW((a), RARRAY_SHARED_FLAG|RARRAY_EMBED_FLAG))
98#define FL_SET_EMBED(a) do { \
99 RUBY_ASSERT(!ARY_SHARED_P(a)); \
100 FL_SET((a), RARRAY_EMBED_FLAG); \
104#define FL_UNSET_EMBED(ary) FL_UNSET((ary), RARRAY_EMBED_FLAG|RARRAY_EMBED_LEN_MASK)
105#define FL_SET_SHARED(ary) do { \
106 RUBY_ASSERT(!ARY_EMBED_P(ary)); \
107 FL_SET((ary), RARRAY_SHARED_FLAG); \
109#define FL_UNSET_SHARED(ary) FL_UNSET((ary), RARRAY_SHARED_FLAG)
111#define ARY_SET_PTR_FORCE(ary, p) \
112 (RARRAY(ary)->as.heap.ptr = (p))
113#define ARY_SET_PTR(ary, p) do { \
114 RUBY_ASSERT(!ARY_EMBED_P(ary)); \
115 RUBY_ASSERT(!OBJ_FROZEN(ary)); \
116 ARY_SET_PTR_FORCE(ary, p); \
118#define ARY_SET_EMBED_LEN(ary, n) do { \
120 RUBY_ASSERT(ARY_EMBED_P(ary)); \
121 RBASIC(ary)->flags &= ~RARRAY_EMBED_LEN_MASK; \
122 RBASIC(ary)->flags |= (tmp_n) << RARRAY_EMBED_LEN_SHIFT; \
124#define ARY_SET_HEAP_LEN(ary, n) do { \
125 RUBY_ASSERT(!ARY_EMBED_P(ary)); \
126 RARRAY(ary)->as.heap.len = (n); \
128#define ARY_SET_LEN(ary, n) do { \
129 if (ARY_EMBED_P(ary)) { \
130 ARY_SET_EMBED_LEN((ary), (n)); \
133 ARY_SET_HEAP_LEN((ary), (n)); \
135 RUBY_ASSERT(RARRAY_LEN(ary) == (n)); \
137#define ARY_INCREASE_PTR(ary, n) do { \
138 RUBY_ASSERT(!ARY_EMBED_P(ary)); \
139 RUBY_ASSERT(!OBJ_FROZEN(ary)); \
140 RARRAY(ary)->as.heap.ptr += (n); \
142#define ARY_INCREASE_LEN(ary, n) do { \
143 RUBY_ASSERT(!OBJ_FROZEN(ary)); \
144 if (ARY_EMBED_P(ary)) { \
145 ARY_SET_EMBED_LEN((ary), RARRAY_LEN(ary)+(n)); \
148 RARRAY(ary)->as.heap.len += (n); \
152#define ARY_CAPA(ary) (ARY_EMBED_P(ary) ? ary_embed_capa(ary) : \
153 ARY_SHARED_ROOT_P(ary) ? RARRAY_LEN(ary) : ARY_HEAP_CAPA(ary))
154#define ARY_SET_CAPA_FORCE(ary, n) \
155 RARRAY(ary)->as.heap.aux.capa = (n);
156#define ARY_SET_CAPA(ary, n) do { \
157 RUBY_ASSERT(!ARY_EMBED_P(ary)); \
158 RUBY_ASSERT(!ARY_SHARED_P(ary)); \
159 RUBY_ASSERT(!OBJ_FROZEN(ary)); \
160 ARY_SET_CAPA_FORCE(ary, n); \
163#define ARY_SHARED_ROOT_OCCUPIED(ary) (!OBJ_FROZEN(ary) && ARY_SHARED_ROOT_REFCNT(ary) == 1)
164#define ARY_SET_SHARED_ROOT_REFCNT(ary, value) do { \
165 RUBY_ASSERT(ARY_SHARED_ROOT_P(ary)); \
166 RUBY_ASSERT(!OBJ_FROZEN(ary)); \
167 RUBY_ASSERT((value) >= 0); \
168 RARRAY(ary)->as.heap.aux.capa = (value); \
170#define FL_SET_SHARED_ROOT(ary) do { \
171 RUBY_ASSERT(!OBJ_FROZEN(ary)); \
172 RUBY_ASSERT(!ARY_EMBED_P(ary)); \
173 FL_SET((ary), RARRAY_SHARED_ROOT_FLAG); \
187ary_embed_capa(
VALUE ary)
189 size_t size = rb_gc_obj_slot_size(ary) - offsetof(
struct RArray, as.
ary);
191 return size /
sizeof(
VALUE);
195ary_embed_size(
long capa)
198 if (size <
sizeof(
struct RArray)) size =
sizeof(
struct RArray);
203ary_embeddable_p(
long capa)
205 return rb_gc_size_allocatable_p(ary_embed_size(
capa));
209rb_ary_embeddable_p(
VALUE ary)
219 return !(ARY_SHARED_ROOT_P(ary) ||
OBJ_FROZEN(ary) || ARY_SHARED_P(ary));
223rb_ary_size_as_embedded(
VALUE ary)
227 if (ARY_EMBED_P(ary)) {
228 real_size = ary_embed_size(ARY_EMBED_LEN(ary));
230 else if (rb_ary_embeddable_p(ary)) {
231 real_size = ary_embed_size(ARY_HEAP_CAPA(ary));
234 real_size =
sizeof(
struct RArray);
241#define ary_verify(ary) ary_verify_(ary, __FILE__, __LINE__)
244ary_verify_(
VALUE ary,
const char *file,
int line)
248 if (ARY_SHARED_P(
ary)) {
257 else if (ARY_EMBED_P(
ary)) {
266 for (i=0; i<
len; i++) {
275#define ary_verify(ary) ((void)0)
304ary_mem_clear(
VALUE ary,
long beg,
long size)
312memfill(
register VALUE *mem,
register long size,
register VALUE val)
323 memfill(
ptr + beg, size, val);
333 if (argc > (
int)(128/
sizeof(
VALUE)) ) {
334 rb_gc_writebarrier_remember(buff_owner_ary);
342 for (i=0; i<argc; i++) {
352 ary_memcpy0(
ary, beg, argc, argv,
ary);
356ary_heap_alloc_buffer(
size_t capa)
364 ruby_sized_xfree((
void *)
ptr, size);
370 ary_heap_free_ptr(
ary, ARY_HEAP_PTR(
ary), ARY_HEAP_SIZE(
ary));
374ary_heap_realloc(
VALUE ary,
size_t new_capa)
387 if (!ARY_EMBED_P(
ary)) {
388 const VALUE *buf = ARY_HEAP_PTR(
ary);
389 long len = ARY_HEAP_LEN(
ary);
392 ARY_SET_EMBED_LEN(
ary,
len);
401ary_resize_capa(
VALUE ary,
long capacity)
407 if (capacity > ary_embed_capa(
ary)) {
408 size_t new_capa = capacity;
409 if (ARY_EMBED_P(
ary)) {
410 long len = ARY_EMBED_LEN(
ary);
411 VALUE *
ptr = ary_heap_alloc_buffer(capacity);
416 ARY_SET_HEAP_LEN(
ary,
len);
419 new_capa = ary_heap_realloc(
ary, capacity);
421 ARY_SET_CAPA(
ary, new_capa);
424 if (!ARY_EMBED_P(
ary)) {
425 long len = ARY_HEAP_LEN(
ary);
426 long old_capa = ARY_HEAP_CAPA(
ary);
429 if (
len > capacity)
len = capacity;
431 ary_heap_free_ptr(
ary,
ptr, old_capa);
444 long capacity = ARY_HEAP_LEN(
ary);
445 long old_capa = ARY_HEAP_CAPA(
ary);
448 if (old_capa > capacity) {
449 size_t new_capa = ary_heap_realloc(
ary, capacity);
450 ARY_SET_CAPA(
ary, new_capa);
459 long new_capa = ARY_CAPA(
ary) / 2;
461 if (new_capa < ARY_DEFAULT_SIZE) {
462 new_capa = ARY_DEFAULT_SIZE;
464 if (new_capa >= ARY_MAX_SIZE - min) {
465 new_capa = (ARY_MAX_SIZE - min) / 2;
468 ary_resize_capa(
ary, new_capa);
487 FL_UNSET_SHARED(
ary);
493 if (ARY_OWNS_HEAP_P(
ary)) {
496 else if (ARY_SHARED_P(
ary)) {
501 ARY_SET_EMBED_LEN(
ary, 0);
526 RB_DEBUG_COUNTER_INC(obj_ary_shared_create);
534 rb_check_frozen(
ary);
541 if (ARY_SHARED_P(
ary)) {
547 if (
len <= ary_embed_capa(
ary)) {
549 FL_UNSET_SHARED(
ary);
553 ARY_SET_EMBED_LEN(
ary,
len);
557 FL_UNSET_SHARED(
ary);
559 ARY_SET_CAPA(
ary, shared_len);
570 ARY_SET_CAPA_FORCE(
ary,
len);
571 ARY_SET_PTR_FORCE(
ary,
ptr);
574 rb_gc_writebarrier_remember(
ary);
582 rb_ary_modify_check(
ary);
583 rb_ary_cancel_sharing(
ary);
587ary_ensure_room_for_push(
VALUE ary,
long add_len)
590 long new_len = old_len + add_len;
593 if (old_len > ARY_MAX_SIZE - add_len) {
596 if (ARY_SHARED_P(
ary)) {
597 if (new_len > ary_embed_capa(
ary)) {
601 rb_ary_modify_check(
ary);
612 ary_double_capa(
ary, new_len);
623 rb_ary_modify_check(
ary);
626 if (new_len >
capa) {
627 ary_double_capa(
ary, new_len);
658 if (!ARY_EMBED_P(
ary) && !ARY_SHARED_P(
ary) && !ARY_SHARED_ROOT_P(
ary)) {
659 ary_shrink_capa(
ary);
675 if (!ARY_EMBED_P(ary1) && ARY_SHARED_P(ary1) &&
676 !ARY_EMBED_P(ary2) && ARY_SHARED_P(ary2) &&
677 ARY_SHARED_ROOT(ary1) == ARY_SHARED_ROOT(ary2) &&
678 ARY_HEAP_LEN(ary1) == ARY_HEAP_LEN(ary2)) {
687 size_t size = ary_embed_size(
capa);
700ary_alloc_heap(
VALUE klass)
704 sizeof(struct
RArray), 0);
706 ary->as.heap.len = 0;
707 ary->as.heap.aux.capa = 0;
708 ary->as.heap.ptr = NULL;
714empty_ary_alloc(
VALUE klass)
716 RUBY_DTRACE_CREATE_HOOK(ARRAY, 0);
717 return ary_alloc_embed(klass, 0);
728 rb_raise(rb_eArgError,
"negative array size (or size too big)");
730 if (
capa > ARY_MAX_SIZE) {
731 rb_raise(rb_eArgError,
"array size too big");
734 RUBY_DTRACE_CREATE_HOOK(ARRAY,
capa);
736 if (ary_embeddable_p(
capa)) {
737 ary = ary_alloc_embed(klass,
capa);
740 ary = ary_alloc_heap(klass);
744 ARY_SET_PTR(
ary, ary_heap_alloc_buffer(
capa));
745 ARY_SET_HEAP_LEN(
ary, 0);
764(rb_ary_new_from_args)(
long n, ...)
773 for (i=0; i<n; i++) {
783rb_ary_tmp_new_from_values(
VALUE klass,
long n,
const VALUE *elts)
787 ary = ary_new(klass, n);
789 ary_memcpy(
ary, 0, n, elts);
799 return rb_ary_tmp_new_from_values(
rb_cArray, n, elts);
805 size_t size = ary_embed_size(
capa);
822 sizeof(struct
RArray), ec);
824 ary->as.heap.len = 0;
825 ary->as.heap.aux.capa = 0;
826 ary->as.heap.ptr = NULL;
837 rb_raise(rb_eArgError,
"negative array size (or size too big)");
839 if (
capa > ARY_MAX_SIZE) {
840 rb_raise(rb_eArgError,
"array size too big");
843 RUBY_DTRACE_CREATE_HOOK(ARRAY,
capa);
845 if (ary_embeddable_p(
capa)) {
846 ary = ec_ary_alloc_embed(ec, klass,
capa);
849 ary = ec_ary_alloc_heap(ec, klass);
853 ARY_SET_PTR(
ary, ary_heap_alloc_buffer(
capa));
854 ARY_SET_HEAP_LEN(
ary, 0);
867 ary_memcpy(
ary, 0, n, elts);
882rb_ary_hidden_new_fill(
long capa)
893 if (ARY_OWNS_HEAP_P(
ary)) {
894 if (USE_DEBUG_COUNTER &&
895 !ARY_SHARED_ROOT_P(
ary) &&
897 RB_DEBUG_COUNTER_INC(obj_ary_extracapa);
900 RB_DEBUG_COUNTER_INC(obj_ary_ptr);
904 RB_DEBUG_COUNTER_INC(obj_ary_embed);
907 if (ARY_SHARED_P(
ary)) {
908 RB_DEBUG_COUNTER_INC(obj_ary_shared);
910 if (ARY_SHARED_ROOT_P(
ary) && ARY_SHARED_ROOT_OCCUPIED(
ary)) {
911 RB_DEBUG_COUNTER_INC(obj_ary_shared_root_occupied);
915static VALUE fake_ary_flags;
918init_fake_ary_flags(
void)
920 struct RArray fake_ary = {0};
928rb_setup_fake_ary(
struct RArray *fake_ary,
const VALUE *list,
long len)
931 RBASIC_CLEAR_CLASS((
VALUE)fake_ary);
937 return (
VALUE)fake_ary;
943 if (ARY_OWNS_HEAP_P(
ary)) {
944 return ARY_CAPA(
ary) *
sizeof(
VALUE);
956 if (ARY_SHARED_P(
ary)) {
957 return ARY_SHARED_ROOT(
ary);
959 else if (ARY_SHARED_ROOT_P(
ary)) {
971 VALUE shared = ary_alloc_heap(0);
972 FL_SET_SHARED_ROOT(shared);
974 if (ARY_EMBED_P(
ary)) {
976 ARY_SET_PTR(shared,
ptr);
980 ARY_SET_HEAP_LEN(
ary,
len);
987 ARY_SET_LEN(shared,
capa);
989 rb_ary_set_shared(
ary, shared);
1003 if (ary_embeddable_p(
len)) {
1008 ARY_SET_EMBED_LEN(subst,
len);
1012 return rb_ary_increment_share(ary_make_shared(
ary));
1025 return rb_convert_type_with_id(
ary,
T_ARRAY,
"Array", idTo_ary);
1027#define to_ary rb_to_array_type
1032 return rb_check_convert_type_with_id(
ary,
T_ARRAY,
"Array", idTo_ary);
1038 return rb_check_convert_type_with_id(
ary,
T_ARRAY,
"Array", idTo_a);
1044 return rb_convert_type_with_id(
ary,
T_ARRAY,
"Array", idTo_a);
1073rb_ary_s_new(
int argc,
VALUE *argv,
VALUE klass)
1079 if (argc > 0 &&
FIXNUM_P(argv[0])) {
1081 if (size < 0) size = 0;
1084 ary = ary_new(klass, size);
1163 if (argc == 1 && !
FIXNUM_P(size)) {
1174 rb_raise(rb_eArgError,
"negative array size");
1176 if (
len > ARY_MAX_SIZE) {
1177 rb_raise(rb_eArgError,
"array size too big");
1181 ary_resize_capa(
ary,
len);
1186 rb_warn(
"block supersedes default value argument");
1188 for (i=0; i<
len; i++) {
1190 ARY_SET_LEN(
ary, i + 1);
1194 ary_memfill(
ary, 0,
len, val);
1211rb_ary_s_create(
int argc,
VALUE *argv,
VALUE klass)
1214 if (argc > 0 && argv) {
1215 ary_memcpy(
ary, 0, argc, argv);
1216 ARY_SET_LEN(
ary, argc);
1230 rb_raise(
rb_eIndexError,
"index %ld too small for array; minimum: %ld",
1234 else if (idx >= ARY_MAX_SIZE) {
1239 if (idx >= ARY_CAPA(
ary)) {
1240 ary_double_capa(
ary, idx);
1247 ARY_SET_LEN(
ary, idx + 1);
1249 ARY_SET(
ary, idx, val);
1259 VALUE result = ary_alloc_heap(klass);
1260 size_t embed_capa = ary_embed_capa(result);
1261 if ((
size_t)
len <= embed_capa) {
1262 FL_SET_EMBED(result);
1264 ARY_SET_EMBED_LEN(result,
len);
1267 VALUE shared = ary_make_shared(
ary);
1272 FL_UNSET_EMBED(result);
1276 rb_ary_set_shared(result, shared);
1278 ARY_INCREASE_PTR(result, offset);
1279 ARY_SET_LEN(result,
len);
1289ary_make_partial_step(
VALUE ary,
VALUE klass,
long offset,
long len,
long step)
1296 const long orig_len =
len;
1298 if (step > 0 && step >=
len) {
1299 VALUE result = ary_new(klass, 1);
1304 ARY_SET_EMBED_LEN(result, 1);
1307 else if (step < 0 && step < -
len) {
1311 long ustep = (step < 0) ? -step : step;
1312 len = roomof(
len, ustep);
1315 long j = offset + ((step > 0) ? 0 : (orig_len - 1));
1317 VALUE result = ary_new(klass,
len);
1318 if (ARY_EMBED_P(result)) {
1322 for (i = 0; i <
len; ++i) {
1326 ARY_SET_EMBED_LEN(result,
len);
1332 for (i = 0; i <
len; ++i) {
1337 ARY_SET_LEN(result,
len);
1349enum ary_take_pos_flags
1356ary_take_first_or_last_n(
VALUE ary,
long n,
enum ary_take_pos_flags last)
1365 rb_raise(rb_eArgError,
"negative array size");
1374ary_take_first_or_last(
int argc,
const VALUE *argv,
VALUE ary,
enum ary_take_pos_flags last)
1381 return ary_take_first_or_last_n(
ary,
NUM2LONG(argv[0]), last);
1403 VALUE target_ary = ary_ensure_room_for_push(
ary, 1);
1407 ARY_SET_LEN(
ary, idx + 1);
1416 VALUE target_ary = ary_ensure_room_for_push(
ary,
len);
1417 ary_memcpy0(
ary, oldlen,
len, argv, target_ary);
1418 ARY_SET_LEN(
ary, oldlen +
len);
1450 rb_ary_modify_check(
ary);
1452 if (n == 0)
return Qnil;
1453 if (ARY_OWNS_HEAP_P(
ary) &&
1454 n * 3 < ARY_CAPA(
ary) &&
1455 ARY_CAPA(
ary) > ARY_DEFAULT_SIZE)
1457 ary_resize_capa(
ary, n * 2);
1462 ARY_SET_LEN(
ary, n - 1);
1506 rb_ary_modify_check(
ary);
1507 result = ary_take_first_or_last(argc, argv,
ary, ARY_TAKE_LAST);
1520 rb_ary_modify_check(
ary);
1526 rb_ary_behead(
ary, 1);
1579 rb_ary_modify_check(
ary);
1580 result = ary_take_first_or_last(argc, argv,
ary, ARY_TAKE_FIRST);
1582 rb_ary_behead(
ary,n);
1594 rb_ary_modify_check(
ary);
1596 if (!ARY_SHARED_P(
ary)) {
1601 ARY_INCREASE_LEN(
ary, -n);
1606 ary_mem_clear(
ary, 0, n);
1607 ary_make_shared(
ary);
1609 else if (ARY_SHARED_ROOT_OCCUPIED(ARY_SHARED_ROOT(
ary))) {
1610 ary_mem_clear(
ary, 0, n);
1613 ARY_INCREASE_PTR(
ary, n);
1614 ARY_INCREASE_LEN(
ary, -n);
1623 if (head - sharedp < argc) {
1624 long room =
capa -
len - argc;
1628 head = sharedp + argc + room;
1630 ARY_SET_PTR(
ary, head - argc);
1634 return ARY_SHARED_ROOT(
ary);
1638ary_modify_for_unshift(
VALUE ary,
int argc)
1641 long new_len =
len + argc;
1643 const VALUE *head, *sharedp;
1647 if (
capa - (
capa >> 6) <= new_len) {
1648 ary_double_capa(
ary, new_len);
1652 if (new_len > ARY_DEFAULT_SIZE * 4 && !ARY_EMBED_P(
ary)) {
1657 ary_make_shared(
ary);
1660 return make_room_for_unshift(
ary, head, (
void *)sharedp, argc,
capa,
len);
1674ary_ensure_room_for_unshift(
VALUE ary,
int argc)
1677 long new_len =
len + argc;
1679 if (
len > ARY_MAX_SIZE - argc) {
1682 else if (! ARY_SHARED_P(
ary)) {
1683 return ary_modify_for_unshift(
ary, argc);
1690 return ary_modify_for_unshift(
ary, argc);
1692 else if (new_len >
capa) {
1693 return ary_modify_for_unshift(
ary, argc);
1699 rb_ary_modify_check(
ary);
1700 return make_room_for_unshift(
ary, head, sharedp, argc,
capa,
len);
1726 rb_ary_modify_check(
ary);
1730 target_ary = ary_ensure_room_for_unshift(
ary, argc);
1731 ary_memcpy0(
ary, 0, argc, argv, target_ary);
1732 ARY_SET_LEN(
ary,
len + argc);
1739 return rb_ary_unshift_m(1, &item,
ary);
1748 if (offset < 0 ||
len <= offset) {
1757 return rb_ary_entry_internal(
ary, offset);
1761rb_ary_subseq_step(
VALUE ary,
long beg,
long len,
long step)
1766 if (beg > alen)
return Qnil;
1767 if (beg < 0 ||
len < 0)
return Qnil;
1769 if (alen <
len || alen < beg +
len) {
1773 if (
len == 0)
return ary_new(klass, 0);
1775 rb_raise(rb_eArgError,
"slice step cannot be zero");
1777 return ary_make_partial(
ary, klass, beg,
len);
1779 return ary_make_partial_step(
ary, klass, beg,
len, step);
1785 return rb_ary_subseq_step(
ary, beg,
len, 1);
1917 return rb_ary_aref2(
ary, argv[0], argv[1]);
1919 return rb_ary_aref1(
ary, argv[0]);
1936 long beg,
len, step;
1943 switch (rb_arithmetic_sequence_beg_len_step(arg, &beg, &
len, &step,
RARRAY_LEN(
ary), 0)) {
1949 return rb_ary_subseq_step(
ary, beg,
len, step);
1994 return ary_take_first_or_last(argc, argv,
ary, ARY_TAKE_FIRST);
2000ary_first(
VALUE self)
2016 return ary_last(
ary);
2019 return ary_take_first_or_last(argc, argv,
ary, ARY_TAKE_LAST);
2072 if (block_given && argc == 2) {
2073 rb_warn(
"block supersedes default value argument");
2081 if (block_given)
return rb_yield(pos);
2083 rb_raise(
rb_eIndexError,
"index %ld outside of array bounds: %ld...%ld",
2129 if (!
NIL_P(if_none)) {
2130 return rb_funcallv(if_none, idCall, 0, 0);
2175 idx = (idx >=
len) ?
len : idx;
2178 if (!
NIL_P(if_none)) {
2179 return rb_funcallv(if_none, idCall, 0, 0);
2236 rb_warn(
"given block not used");
2294 rb_warn(
"given block not used");
2312 if (!
NIL_P(tmp))
return tmp;
2327 rb_raise(
rb_eIndexError,
"index %ld too small for array; minimum: %ld",
2331 if (olen <
len || olen < beg +
len) {
2337 rofs = (rptr >= optr && rptr < optr + olen) ? rptr - optr : -1;
2342 if (beg > ARY_MAX_SIZE - rlen) {
2345 target_ary = ary_ensure_room_for_push(
ary, rlen-
len);
2347 ary_mem_clear(
ary, olen, beg - olen);
2350 ary_memcpy0(
ary, beg, rlen, rptr, target_ary);
2357 if (olen -
len > ARY_MAX_SIZE - rlen) {
2361 alen = olen + rlen -
len;
2362 if (alen >= ARY_CAPA(
ary)) {
2363 ary_double_capa(
ary, alen);
2370 ARY_SET_LEN(
ary, alen);
2374 rb_gc_writebarrier_remember(
ary);
2396 rb_ary_modify_check(
ary);
2397 if (ARY_SHARED_P(
ary)) {
2401 rb_bug(
"probable buffer overflow: %ld for %ld",
len,
capa);
2413 if (
len == olen)
return ary;
2414 if (
len > ARY_MAX_SIZE) {
2418 if (
len > ARY_CAPA(
ary)) {
2419 ary_double_capa(
ary,
len);
2421 ary_mem_clear(
ary, olen,
len - olen);
2424 else if (ARY_EMBED_P(
ary)) {
2425 ARY_SET_EMBED_LEN(
ary,
len);
2427 else if (
len <= ary_embed_capa(
ary)) {
2429 long ptr_capa = ARY_HEAP_SIZE(
ary);
2430 bool is_malloc_ptr = !ARY_SHARED_P(
ary);
2435 ARY_SET_EMBED_LEN(
ary,
len);
2437 if (is_malloc_ptr) ruby_sized_xfree((
void *)
ptr, ptr_capa);
2440 if (olen >
len + ARY_DEFAULT_SIZE) {
2441 size_t new_capa = ary_heap_realloc(
ary,
len);
2442 ARY_SET_CAPA(
ary, new_capa);
2444 ARY_SET_HEAP_LEN(
ary,
len);
2613 long offset, beg,
len;
2616 rb_ary_modify_check(
ary);
2620 return ary_aset_by_rb_ary_splice(
ary, beg,
len, argv[2]);
2624 return ary_aset_by_rb_ary_store(
ary, offset, argv[1]);
2628 return ary_aset_by_rb_ary_splice(
ary, beg,
len, argv[1]);
2632 return ary_aset_by_rb_ary_store(
ary, offset, argv[1]);
2677 rb_ary_modify_check(
ary);
2679 if (argc == 1)
return ary;
2686 rb_raise(
rb_eIndexError,
"index %ld too small for array; minimum: %ld",
2691 rb_ary_splice(
ary, pos, 0, argv + 1, argc - 1);
2701 return rb_ary_length(
ary);
2893 ARY_SET_LEN(dup,
len);
2911recursive_join(
VALUE obj,
VALUE argp,
int recur)
2916 VALUE result = arg[2];
2917 int *first = (
int *)arg[3];
2920 rb_raise(rb_eArgError,
"recursive array join");
2923 ary_join_1(obj,
ary, sep, 0, result, first);
2935 for (i=0; i<max; i++) {
2938 if (i > 0 && !
NIL_P(sep))
2946ary_join_1_str(
VALUE dst,
VALUE src,
int *first)
2950 rb_enc_copy(dst, src);
2959 rb_raise(rb_eArgError,
"recursive array join");
2968 args[3] = (
VALUE)first;
2979 if (i > 0 && !
NIL_P(sep))
2984 ary_join_1_str(result, val, first);
2987 ary_join_1_ary(val,
ary, sep, result, val, first);
2990 ary_join_1_str(result, tmp, first);
2993 ary_join_1_ary(val,
ary, sep, result, tmp, first);
3005 VALUE val, tmp, result;
3014 for (i=0; i < len_memo; i++) {
3018 if (
NIL_P(tmp) || tmp != val) {
3023 rb_enc_associate(result, rb_usascii_encoding());
3024 i = ary_join_0(
ary, sep, i, result);
3026 ary_join_1(
ary,
ary, sep, i, result, &first);
3029 len += RSTRING_LEN(tmp);
3033 len += RSTRING_LEN(val);
3100 else rb_enc_copy(str, s);
3131 return rb_ary_inspect(
ary);
3195 const VALUE e = rb_ary_elt(
ary, i);
3196 const VALUE elt = block_given ? rb_yield_force_blockarg(e) : e;
3198 if (
NIL_P(key_value_pair)) {
3199 rb_raise(
rb_eTypeError,
"wrong element type %"PRIsVALUE
" at %ld (expected array)",
3203 rb_raise(rb_eArgError,
"wrong array length at %ld (expected 2, was %ld)",
3244 ary_reverse(p1, p2);
3290 do *p2-- = *p1++;
while (--
len > 0);
3297rotate_count(
long cnt,
long len)
3299 return (cnt < 0) ? (
len - (~cnt %
len) - 1) : (cnt %
len);
3310 else if (cnt ==
len - 1) {
3318 if (--cnt > 0) ary_reverse(
ptr,
ptr + cnt);
3330 if (
len > 1 && (cnt = rotate_count(cnt,
len)) > 0) {
3422 cnt = rotate_count(cnt,
len);
3425 ary_memcpy(rotated, 0,
len,
ptr + cnt);
3426 ary_memcpy(rotated,
len, cnt,
ptr);
3432struct ary_sort_data {
3438sort_reentered(
VALUE ary)
3440 if (
RBASIC(ary)->klass) {
3447sort_returned(
struct ary_sort_data *data)
3452 sort_reentered(data->ary);
3456sort_1(
const void *ap,
const void *bp,
void *dummy)
3458 struct ary_sort_data *data = dummy;
3459 VALUE retval = sort_reentered(data->ary);
3467 n = rb_cmpint(retval, a, b);
3468 sort_returned(data);
3473sort_2(
const void *ap,
const void *bp,
void *dummy)
3475 struct ary_sort_data *data = dummy;
3476 VALUE retval = sort_reentered(data->ary);
3481 if ((
long)a > (long)b)
return 1;
3482 if ((
long)a < (long)b)
return -1;
3485 if (STRING_P(a) && STRING_P(b) && CMP_OPTIMIZABLE(STRING)) {
3489 return rb_float_cmp(a, b);
3492 retval = rb_funcallv(a, id_cmp, 1, &b);
3493 n = rb_cmpint(retval, a, b);
3494 sort_returned(data);
3515 VALUE tmp = ary_make_substitution(ary);
3516 struct ary_sort_data data;
3518 RBASIC_CLEAR_CLASS(tmp);
3520 data.receiver = ary;
3526 if (ARY_EMBED_P(tmp)) {
3527 if (ARY_SHARED_P(ary)) {
3528 rb_ary_unshare(ary);
3531 if (ARY_EMBED_LEN(tmp) > ARY_CAPA(ary)) {
3532 ary_resize_capa(ary, ARY_EMBED_LEN(tmp));
3534 ary_memcpy(ary, 0, ARY_EMBED_LEN(tmp), ARY_EMBED_PTR(tmp));
3535 ARY_SET_LEN(ary, ARY_EMBED_LEN(tmp));
3538 if (!ARY_EMBED_P(ary) && ARY_HEAP_PTR(ary) == ARY_HEAP_PTR(tmp)) {
3539 FL_UNSET_SHARED(ary);
3544 if (ARY_EMBED_P(ary)) {
3545 FL_UNSET_EMBED(ary);
3547 else if (ARY_SHARED_P(ary)) {
3549 rb_ary_unshare(ary);
3554 ARY_SET_PTR(ary, ARY_HEAP_PTR(tmp));
3555 ARY_SET_HEAP_LEN(ary,
len);
3556 ARY_SET_CAPA(ary, ARY_HEAP_LEN(tmp));
3560 ARY_SET_EMBED_LEN(tmp, 0);
3628rb_ary_bsearch(
VALUE ary)
3630 VALUE index_result = rb_ary_bsearch_index(ary);
3635 return index_result;
3652rb_ary_bsearch_index(
VALUE ary)
3655 int smaller = 0, satisfied = 0;
3659 while (low < high) {
3660 mid = low + ((high - low) / 2);
3667 else if (v ==
Qtrue) {
3671 else if (!
RTEST(v)) {
3676 switch (rb_cmpint(rb_funcallv(v, id_cmp, 1, &zero), v, zero)) {
3678 case 1: smaller = 0;
break;
3679 case -1: smaller = 1;
3684 " (must be numeric, true, false or nil)",
3694 if (!satisfied)
return Qnil;
3728rb_ary_sort_by_bang(
VALUE ary)
3735 sorted =
rb_block_call(ary, rb_intern(
"sort_by"), 0, 0, sort_by_i, 0);
3763rb_ary_collect(
VALUE ary)
3798rb_ary_collect_bang(
VALUE ary)
3814 long beg,
len, i, j;
3816 for (i=0; i<argc; i++) {
3823 long end = olen < beg+
len ? olen : beg+
len;
3824 for (j = beg; j < end; j++) {
3847 const long end = beg +
len;
3971rb_ary_values_at(
int argc,
VALUE *argv,
VALUE ary)
3975 for (i = 0; i < argc; ++i) {
3976 append_values_at_single(result, ary, olen, argv[i]);
4004rb_ary_select(
VALUE ary)
4019struct select_bang_arg {
4025select_bang_i(
VALUE a)
4027 volatile struct select_bang_arg *arg = (
void *)a;
4028 VALUE ary = arg->ary;
4031 for (i1 = i2 = 0; i1 <
RARRAY_LEN(ary); arg->len[0] = ++i1) {
4039 return (i1 == i2) ?
Qnil : ary;
4043select_bang_ensure(
VALUE a)
4045 volatile struct select_bang_arg *arg = (
void *)a;
4046 VALUE ary = arg->ary;
4048 long i1 = arg->len[0], i2 = arg->len[1];
4050 if (i2 <
len && i2 < i1) {
4059 ARY_SET_LEN(ary, i2 + tail);
4087rb_ary_select_bang(
VALUE ary)
4089 struct select_bang_arg args;
4095 args.len[0] = args.len[1] = 0;
4116rb_ary_keep_if(
VALUE ary)
4119 rb_ary_select_bang(ary);
4124ary_resize_smaller(
VALUE ary,
long len)
4128 ARY_SET_LEN(ary,
len);
4129 if (
len * 2 < ARY_CAPA(ary) &&
4130 ARY_CAPA(ary) > ARY_DEFAULT_SIZE) {
4131 ary_resize_capa(ary,
len * 2);
4179 for (i1 = i2 = 0; i1 <
RARRAY_LEN(ary); i1++) {
4198 ary_resize_smaller(ary, i2);
4209 for (i1 = i2 = 0; i1 <
RARRAY_LEN(ary); i1++) {
4224 ary_resize_smaller(ary, i2);
4236 if (pos < 0)
return Qnil;
4244 ARY_INCREASE_LEN(ary, -1);
4284ary_slice_bang_by_rb_ary_splice(
VALUE ary,
long pos,
long len)
4291 else if (pos < -orig_len) {
4297 else if (orig_len < pos) {
4300 if (orig_len < pos +
len) {
4301 len = orig_len - pos;
4308 rb_ary_splice(ary, pos,
len, 0, 0);
4406rb_ary_slice_bang(
int argc,
VALUE *argv,
VALUE ary)
4411 rb_ary_modify_check(ary);
4418 return ary_slice_bang_by_rb_ary_splice(ary, pos,
len);
4425 return ary_slice_bang_by_rb_ary_splice(ary, pos,
len);
4454reject_bang_i(
VALUE a)
4456 volatile struct select_bang_arg *arg = (
void *)a;
4457 VALUE ary = arg->ary;
4460 for (i1 = i2 = 0; i1 <
RARRAY_LEN(ary); arg->len[0] = ++i1) {
4468 return (i1 == i2) ?
Qnil : ary;
4472ary_reject_bang(
VALUE ary)
4474 struct select_bang_arg args;
4475 rb_ary_modify_check(ary);
4477 args.len[0] = args.len[1] = 0;
4502rb_ary_reject_bang(
VALUE ary)
4506 return ary_reject_bang(ary);
4527rb_ary_reject(
VALUE ary)
4533 ary_reject(ary, rejected_ary);
4534 return rejected_ary;
4555rb_ary_delete_if(
VALUE ary)
4559 ary_reject_bang(ary);
4574take_items(
VALUE obj,
long n)
4579 if (n == 0)
return result;
4582 args[0] = result; args[1] = (
VALUE)n;
4583 if (UNDEF_P(rb_check_block_call(obj, idEach, 0, 0, take_i, (
VALUE)args)))
4584 rb_raise(
rb_eTypeError,
"wrong argument type %"PRIsVALUE
" (must respond to :each)",
4690 for (i=0; i<argc; i++) {
4691 argv[i] = take_items(argv[i],
len);
4695 int arity = rb_block_arity();
4704 for (j=0; j<argc; j++) {
4705 tmp[j+1] = rb_ary_elt(argv[j], i);
4717 for (j=0; j<argc; j++) {
4727 for (i=0; i<
len; i++) {
4731 for (j=0; j<argc; j++) {
4757rb_ary_transpose(
VALUE ary)
4759 long elen = -1, alen, i, j;
4760 VALUE tmp, result = 0;
4764 for (i=0; i<alen; i++) {
4765 tmp = to_ary(rb_ary_elt(ary, i));
4769 for (j=0; j<elen; j++) {
4774 rb_raise(
rb_eIndexError,
"element size differs (%ld should be %ld)",
4777 for (j=0; j<elen; j++) {
4778 rb_ary_store(rb_ary_elt(result, j), i, rb_ary_elt(tmp, j));
4802 rb_ary_modify_check(copy);
4803 orig = to_ary(orig);
4804 if (copy == orig)
return copy;
4809 if (
RARRAY_LEN(orig) <= ary_embed_capa(copy)) {
4816 else if (ARY_EMBED_P(orig)) {
4817 long len = ARY_EMBED_LEN(orig);
4818 VALUE *ptr = ary_heap_alloc_buffer(
len);
4820 FL_UNSET_EMBED(copy);
4821 ARY_SET_PTR(copy, ptr);
4822 ARY_SET_LEN(copy,
len);
4823 ARY_SET_CAPA(copy,
len);
4832 VALUE shared_root = ary_make_shared(orig);
4833 FL_UNSET_EMBED(copy);
4834 ARY_SET_PTR(copy, ARY_HEAP_PTR(orig));
4835 ARY_SET_LEN(copy, ARY_HEAP_LEN(orig));
4836 rb_ary_set_shared(copy, shared_root);
4859 rb_ary_modify_check(ary);
4860 if (ARY_SHARED_P(ary)) {
4861 rb_ary_unshare(ary);
4863 ARY_SET_EMBED_LEN(ary, 0);
4866 ARY_SET_LEN(ary, 0);
4867 if (ARY_DEFAULT_SIZE * 2 < ARY_CAPA(ary)) {
4868 ary_resize_capa(ary, ARY_DEFAULT_SIZE * 2);
5059 long beg = 0, end = 0,
len = 0;
5082 if (beg < 0) beg = 0;
5091 if (beg >= ARY_MAX_SIZE ||
len > ARY_MAX_SIZE - beg) {
5092 rb_raise(rb_eArgError,
"argument too big");
5096 if (end >= ARY_CAPA(ary)) {
5097 ary_resize_capa(ary, end);
5100 ARY_SET_LEN(ary, end);
5103 if (UNDEF_P(item)) {
5107 for (i=beg; i<end; i++) {
5114 ary_memfill(ary, beg,
len, item);
5136 long len, xlen, ylen;
5146 ARY_SET_LEN(z,
len);
5175rb_ary_concat_multi(
int argc,
VALUE *argv,
VALUE ary)
5177 rb_ary_modify_check(ary);
5182 else if (argc > 1) {
5185 for (i = 0; i < argc; i++) {
5188 ary_append(ary, args);
5198 return ary_append(x, to_ary(y));
5237 rb_raise(rb_eArgError,
"negative argument");
5240 rb_raise(rb_eArgError,
"argument too big");
5245 ARY_SET_LEN(ary2,
len);
5250 ary_memcpy(ary2, 0, t, ptr);
5251 while (t <=
len/2) {
5328recursive_equal(
VALUE ary1,
VALUE ary2,
int recur)
5331 const VALUE *p1, *p2;
5333 if (recur)
return Qtrue;
5340 for (i = 0; i < len1; i++) {
5388 if (ary1 == ary2)
return Qtrue;
5401recursive_eql(
VALUE ary1,
VALUE ary2,
int recur)
5405 if (recur)
return Qtrue;
5407 if (!
rb_eql(rb_ary_elt(ary1, i), rb_ary_elt(ary2, i)))
5435 if (ary1 == ary2)
return Qtrue;
5443ary_hash_values(
long len,
const VALUE *elements,
const VALUE ary)
5451 for (i=0; i<
len; i++) {
5452 n = rb_hash(elements[i]);
5464rb_ary_hash_values(
long len,
const VALUE *elements)
5466 return ary_hash_values(
len, elements, 0);
5485rb_ary_hash(
VALUE ary)
5536recursive_cmp(
VALUE ary1,
VALUE ary2,
int recur)
5540 if (recur)
return Qundef;
5545 for (i=0; i<
len; i++) {
5546 VALUE e1 = rb_ary_elt(ary1, i), e2 = rb_ary_elt(ary2, i);
5547 VALUE v = rb_funcallv(e1, id_cmp, 1, &e2);
5601 if (ary1 == ary2)
return INT2FIX(0);
5603 if (!UNDEF_P(v))
return v;
5617 rb_hash_add_new_element(hash, elt, elt);
5623ary_tmp_hash_new(
VALUE ary)
5626 VALUE hash = rb_hash_new_with_size(size);
5628 RBASIC_CLEAR_CLASS(hash);
5633ary_make_hash(
VALUE ary)
5635 VALUE hash = ary_tmp_hash_new(ary);
5636 return ary_add_hash(hash, ary);
5646 rb_hash_add_new_element(hash, k, v);
5652ary_make_hash_by(
VALUE ary)
5654 VALUE hash = ary_tmp_hash_new(ary);
5655 return ary_add_hash_by(hash, ary);
5683 ary2 = to_ary(ary2);
5684 if (
RARRAY_LEN(ary2) == 0) {
return ary_make_shared_copy(ary1); }
5689 VALUE elt = rb_ary_elt(ary1, i);
5690 if (rb_ary_includes_by_eql(ary2, elt))
continue;
5696 hash = ary_make_hash(ary2);
5698 if (rb_hash_stlike_lookup(hash,
RARRAY_AREF(ary1, i), NULL))
continue;
5725rb_ary_difference_multi(
int argc,
VALUE *argv,
VALUE ary)
5730 bool *is_hash =
ALLOCV_N(
bool, t0, argc);
5734 for (i = 0; i < argc; i++) {
5735 argv[i] = to_ary(argv[i]);
5736 is_hash[i] = (length > SMALL_ARRAY_LEN &&
RARRAY_LEN(argv[i]) > SMALL_ARRAY_LEN);
5737 if (is_hash[i]) argv[i] = ary_make_hash(argv[i]);
5742 VALUE elt = rb_ary_elt(ary, i);
5743 for (j = 0; j < argc; j++) {
5745 if (rb_hash_stlike_lookup(argv[j], elt, NULL))
5749 if (rb_ary_includes_by_eql(argv[j], elt))
break;
5788 VALUE hash, ary3, v;
5792 ary2 = to_ary(ary2);
5799 if (!rb_ary_includes_by_eql(ary2, v))
continue;
5800 if (rb_ary_includes_by_eql(ary3, v))
continue;
5806 hash = ary_make_hash(ary2);
5811 if (rb_hash_stlike_delete(hash, &vv, 0)) {
5841rb_ary_intersection_multi(
int argc,
VALUE *argv,
VALUE ary)
5846 for (i = 0; i < argc; i++) {
5847 result = rb_ary_and(result, argv[i]);
5854ary_hash_orset(st_data_t *key, st_data_t *value, st_data_t arg,
int existing)
5856 if (existing)
return ST_STOP;
5857 *key = *value = (
VALUE)arg;
5866 VALUE elt = rb_ary_elt(ary, i);
5867 if (rb_ary_includes_by_eql(ary_union, elt))
continue;
5878 if (!rb_hash_stlike_update(hash, (st_data_t)elt, ary_hash_orset, (st_data_t)elt)) {
5904 ary2 = to_ary(ary2);
5907 rb_ary_union(ary3, ary1);
5908 rb_ary_union(ary3, ary2);
5912 hash = ary_make_hash(ary1);
5913 rb_ary_union_hash(hash, ary2);
5915 return rb_hash_values(hash);
5942rb_ary_union_multi(
int argc,
VALUE *argv,
VALUE ary)
5949 for (i = 0; i < argc; i++) {
5950 argv[i] = to_ary(argv[i]);
5954 if (sum <= SMALL_ARRAY_LEN) {
5957 rb_ary_union(ary_union, ary);
5958 for (i = 0; i < argc; i++) rb_ary_union(ary_union, argv[i]);
5963 hash = ary_make_hash(ary);
5964 for (i = 0; i < argc; i++) rb_ary_union_hash(hash, argv[i]);
5966 return rb_hash_values(hash);
5986 VALUE hash, v, result, shorter, longer;
5990 ary2 = to_ary(ary2);
5996 if (rb_ary_includes_by_eql(ary2, v))
return Qtrue;
6008 hash = ary_make_hash(shorter);
6014 if (rb_hash_stlike_lookup(hash, vv, 0)) {
6024ary_max_generic(
VALUE ary,
long i,
VALUE vmax)
6032 if (rb_cmpint(rb_funcallv(vmax, id_cmp, 1, &v), vmax, v) < 0) {
6041ary_max_opt_fixnum(
VALUE ary,
long i,
VALUE vmax)
6048 for (; i < n; ++i) {
6052 if ((
long)vmax < (
long)v) {
6057 return ary_max_generic(ary, i, vmax);
6065ary_max_opt_float(
VALUE ary,
long i,
VALUE vmax)
6072 for (; i < n; ++i) {
6076 if (rb_float_cmp(vmax, v) < 0) {
6081 return ary_max_generic(ary, i, vmax);
6089ary_max_opt_string(
VALUE ary,
long i,
VALUE vmax)
6096 for (; i < n; ++i) {
6105 return ary_max_generic(ary, i, vmax);
6168 return rb_nmin_run(ary, num, 0, 1, 1);
6174 if (UNDEF_P(result) || rb_cmpint(
rb_yield_values(2, v, result), v, result) > 0) {
6182 if (
FIXNUM_P(result) && CMP_OPTIMIZABLE(INTEGER)) {
6183 return ary_max_opt_fixnum(ary, 1, result);
6185 else if (STRING_P(result) && CMP_OPTIMIZABLE(STRING)) {
6186 return ary_max_opt_string(ary, 1, result);
6189 return ary_max_opt_float(ary, 1, result);
6192 return ary_max_generic(ary, 1, result);
6196 if (UNDEF_P(result))
return Qnil;
6201ary_min_generic(
VALUE ary,
long i,
VALUE vmin)
6209 if (rb_cmpint(rb_funcallv(vmin, id_cmp, 1, &v), vmin, v) > 0) {
6218ary_min_opt_fixnum(
VALUE ary,
long i,
VALUE vmin)
6225 for (; i < n; ++i) {
6229 if ((
long)vmin > (
long)a) {
6234 return ary_min_generic(ary, i, vmin);
6242ary_min_opt_float(
VALUE ary,
long i,
VALUE vmin)
6249 for (; i < n; ++i) {
6253 if (rb_float_cmp(vmin, a) > 0) {
6258 return ary_min_generic(ary, i, vmin);
6266ary_min_opt_string(
VALUE ary,
long i,
VALUE vmin)
6273 for (; i < n; ++i) {
6282 return ary_min_generic(ary, i, vmin);
6345 return rb_nmin_run(ary, num, 0, 0, 1);
6351 if (UNDEF_P(result) || rb_cmpint(
rb_yield_values(2, v, result), v, result) < 0) {
6359 if (
FIXNUM_P(result) && CMP_OPTIMIZABLE(INTEGER)) {
6360 return ary_min_opt_fixnum(ary, 1, result);
6362 else if (STRING_P(result) && CMP_OPTIMIZABLE(STRING)) {
6363 return ary_min_opt_string(ary, 1, result);
6366 return ary_min_opt_float(ary, 1, result);
6369 return ary_min_generic(ary, 1, result);
6373 if (UNDEF_P(result))
return Qnil;
6400rb_ary_minmax(
VALUE ary)
6405 return rb_assoc_new(rb_ary_min(0, 0, ary), rb_ary_max(0, 0, ary));
6409push_value(st_data_t key, st_data_t val, st_data_t ary)
6443rb_ary_uniq_bang(
VALUE ary)
6448 rb_ary_modify_check(ary);
6452 hash = ary_make_hash_by(ary);
6454 hash = ary_make_hash(ary);
6460 rb_ary_modify_check(ary);
6461 ARY_SET_LEN(ary, 0);
6462 if (ARY_SHARED_P(ary)) {
6463 rb_ary_unshare(ary);
6466 ary_resize_capa(ary, hash_size);
6499rb_ary_uniq(
VALUE ary)
6508 hash = ary_make_hash_by(ary);
6509 uniq = rb_hash_values(hash);
6512 hash = ary_make_hash(ary);
6513 uniq = rb_hash_values(hash);
6536rb_ary_compact_bang(
VALUE ary)
6553 ary_resize_smaller(ary, n);
6573rb_ary_compact(
VALUE ary)
6576 rb_ary_compact_bang(ary);
6608rb_ary_count(
int argc,
VALUE *argv,
VALUE ary)
6624 VALUE obj = argv[0];
6627 rb_warn(
"given block not used");
6638flatten(
VALUE ary,
int level)
6641 VALUE stack, result, tmp = 0, elt;
6657 ARY_SET_LEN(result, i);
6659 stack = ary_new(0, ARY_DEFAULT_SIZE);
6665 rb_hash_aset(memo, ary,
Qtrue);
6666 rb_hash_aset(memo, tmp,
Qtrue);
6675 if (level >= 0 &&
RARRAY_LEN(stack) / 2 >= level) {
6680 if (
RBASIC(result)->klass) {
6682 rb_hash_clear(memo);
6691 if (rb_hash_aref(memo, tmp) ==
Qtrue) {
6692 rb_hash_clear(memo);
6693 rb_raise(rb_eArgError,
"tried to flatten recursive array");
6695 rb_hash_aset(memo, tmp,
Qtrue);
6707 rb_hash_delete(memo, ary);
6715 rb_hash_clear(memo);
6758rb_ary_flatten_bang(
int argc,
VALUE *argv,
VALUE ary)
6760 int mod = 0, level = -1;
6764 rb_ary_modify_check(ary);
6766 if (level == 0)
return Qnil;
6768 result = flatten(ary, level);
6769 if (result == ary) {
6774 if (mod) ARY_SET_EMBED_LEN(result, 0);
6815rb_ary_flatten(
int argc,
VALUE *argv,
VALUE ary)
6822 if (level == 0)
return ary_make_shared_copy(ary);
6825 result = flatten(ary, level);
6826 if (result == ary) {
6827 result = ary_make_shared_copy(ary);
6833#define RAND_UPTO(max) (long)rb_random_ulong_limited((randgen), (max)-1)
6844 long j = RAND_UPTO(i);
6861 rb_ary_shuffle_bang(ec, ary, randgen);
6870 .flags = RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_FREE_IMMEDIATELY
6877 long n,
len, i, j, k, idx[10];
6878 long rnds[numberof(idx)];
6879 long memo_threshold;
6888 return rb_ary_elt(ary, i);
6891 if (n < 0) rb_raise(rb_eArgError,
"negative sample number");
6893 if (n <= numberof(idx)) {
6894 for (i = 0; i < n; ++i) {
6895 rnds[i] = RAND_UPTO(
len - i);
6900 if (
len < k && n <= numberof(idx)) {
6901 for (i = 0; i < n; ++i) {
6911 return rb_ary_new_from_args(1,
RARRAY_AREF(ary, i));
6923 if (j >= i) l = i, g = ++j;
6924 if (k >= l && (++k >= g)) ++k;
6933 if (n <= numberof(idx)) {
6934 long sorted[numberof(idx)];
6935 sorted[0] = idx[0] = rnds[0];
6936 for (i=1; i<n; i++) {
6938 for (j = 0; j < i; ++j) {
6939 if (k < sorted[j])
break;
6942 memmove(&sorted[j+1], &sorted[j],
sizeof(sorted[0])*(i-j));
6943 sorted[j] = idx[i] = k;
6947 for (i=0; i<n; i++) {
6952 else if (n <= memo_threshold / 2) {
6955 st_table *memo = st_init_numtable_with_size(n);
6959 for (i=0; i<n; i++) {
6960 long r = RAND_UPTO(
len-i) + i;
6962 if (r > max_idx) max_idx = r;
6965 if (
len <= max_idx) n = 0;
6966 else if (n >
len) n =
len;
6968 for (i=0; i<n; i++) {
6969 long j2 = j = ptr_result[i];
6972 if (st_lookup(memo, (st_data_t)i, &value)) i2 = (
long)value;
6973 if (st_lookup(memo, (st_data_t)j, &value)) j2 = (
long)value;
6974 st_insert(memo, (st_data_t)j, (st_data_t)i2);
6975 ptr_result[i] = ptr_ary[j2];
6980 st_free_table(memo);
6985 RBASIC_CLEAR_CLASS(result);
6988 for (i=0; i<n; i++) {
6989 j = RAND_UPTO(
len-i) + i;
6991 ptr_result[j] = ptr_result[i];
6995 RBASIC_SET_CLASS_RAW(result,
rb_cArray);
6997 ARY_SET_LEN(result, n);
7025 if (mul <= 0)
return INT2FIX(0);
7027 return rb_fix_mul_fix(rb_ary_length(self), n);
7064rb_ary_cycle(
int argc,
VALUE *argv,
VALUE ary)
7071 if (argc == 0 ||
NIL_P(argv[0])) {
7076 if (n <= 0)
return Qnil;
7079 while (
RARRAY_LEN(ary) > 0 && (n < 0 || 0 < n--)) {
7093yield_indexed_values(
const VALUE values,
const long r,
const long *
const p)
7098 for (i = 0; i < r; i++) ARY_SET(result, i,
RARRAY_AREF(values, p[i]));
7099 ARY_SET_LEN(result, r);
7101 return !
RBASIC(values)->klass;
7117permute0(
const long n,
const long r,
long *
const p,
char *
const used,
const VALUE values)
7119 long i = 0, index = 0;
7122 const char *
const unused = memchr(&used[i], 0, n-i);
7137 for (i = 0; i < n; ++i) {
7138 if (used[i])
continue;
7140 if (!yield_indexed_values(values, r, p)) {
7156descending_factorial(
long from,
long how_many)
7161 while (--how_many > 0) {
7163 cnt = rb_int_mul(cnt,
LONG2FIX(v));
7173binomial_coefficient(
long comb,
long size)
7177 if (comb > size-comb) {
7183 else if (comb == 0) {
7187 for (i = 1; i < comb; ++i) {
7188 r = rb_int_mul(r,
LONG2FIX(size - i));
7189 r = rb_int_idiv(r,
LONG2FIX(i + 1));
7200 return descending_factorial(n, k);
7246rb_ary_permutation(
int argc,
VALUE *argv,
VALUE ary)
7256 if (r < 0 || n < r) {
7269 long *p =
ALLOCV_N(
long, t0, r+roomof(n,
sizeof(
long)));
7270 char *used = (
char*)(p + r);
7271 VALUE ary0 = ary_make_shared_copy(ary);
7272 RBASIC_CLEAR_CLASS(ary0);
7276 permute0(n, r, p, used, ary0);
7284combinate0(
const long len,
const long n,
long *
const stack,
const VALUE values)
7291 for (lev++; lev < n; lev++) {
7292 stack[lev+1] = stack[lev]+1;
7294 if (!yield_indexed_values(values, n, stack+1)) {
7298 if (lev == 0)
return;
7300 }
while (stack[lev+1]+n ==
len+lev+1);
7310 return binomial_coefficient(k, n);
7365 if (n < 0 ||
len < n) {
7377 VALUE ary0 = ary_make_shared_copy(ary);
7379 long *stack =
ALLOCV_N(
long, t0, n+1);
7381 RBASIC_CLEAR_CLASS(ary0);
7382 combinate0(
len, n, stack, ary0);
7402rpermute0(
const long n,
const long r,
long *
const p,
const VALUE values)
7404 long i = 0, index = 0;
7408 if (++index < r-1) {
7412 for (i = 0; i < n; ++i) {
7414 if (!yield_indexed_values(values, r, p)) {
7419 if (index <= 0)
return;
7420 }
while ((i = ++p[--index]) >= n);
7478rb_ary_repeated_permutation(
VALUE ary,
VALUE num)
7500 VALUE ary0 = ary_make_shared_copy(ary);
7501 RBASIC_CLEAR_CLASS(ary0);
7503 rpermute0(n, r, p, ary0);
7511rcombinate0(
const long n,
const long r,
long *
const p,
const long rest,
const VALUE values)
7513 long i = 0, index = 0;
7517 if (++index < r-1) {
7521 for (; i < n; ++i) {
7523 if (!yield_indexed_values(values, r, p)) {
7528 if (index <= 0)
return;
7529 }
while ((i = ++p[--index]) >= n);
7541 return binomial_coefficient(k, n + k - 1);
7584rb_ary_repeated_combination(
VALUE ary,
VALUE num)
7602 else if (
len == 0) {
7608 VALUE ary0 = ary_make_shared_copy(ary);
7609 RBASIC_CLEAR_CLASS(ary0);
7611 rcombinate0(
len, n, p, n, ary0);
7672rb_ary_product(
int argc,
VALUE *argv,
VALUE ary)
7678 int *counters =
ALLOCV_N(
int, t1, n);
7683 RBASIC_CLEAR_CLASS(t0);
7688 for (i = 1; i < n; i++) arrays[i] =
Qnil;
7689 for (i = 1; i < n; i++) arrays[i] = to_ary(argv[i-1]);
7692 for (i = 0; i < n; i++) counters[i] = 0;
7697 for (i = 0; i < n; i++) {
7699 arrays[i] = ary_make_shared_copy(arrays[i]);
7704 for (i = 0; i < n; i++) {
7710 if (MUL_OVERFLOW_LONG_P(resultlen, k))
7720 for (j = 0; j < n; j++) {
7725 if (
NIL_P(result)) {
7726 FL_SET(t0, RARRAY_SHARED_ROOT_FLAG);
7728 if (!
FL_TEST(t0, RARRAY_SHARED_ROOT_FLAG)) {
7732 FL_UNSET(t0, RARRAY_SHARED_ROOT_FLAG);
7745 while (counters[m] ==
RARRAY_LEN(arrays[m])) {
7748 if (--m < 0)
goto done;
7756 return NIL_P(result) ? ary : result;
7782 rb_raise(rb_eArgError,
"attempt to take negative size");
7809rb_ary_take_while(
VALUE ary)
7817 return rb_ary_take(ary,
LONG2FIX(i));
7845 rb_raise(rb_eArgError,
"attempt to drop negative size");
7872rb_ary_drop_while(
VALUE ary)
7880 return rb_ary_drop(ary,
LONG2FIX(i));
7923rb_ary_any_p(
int argc,
VALUE *argv,
VALUE ary)
7931 rb_warn(
"given block not used");
7938 for (i = 0; i <
len; ++i) {
7990rb_ary_all_p(
int argc,
VALUE *argv,
VALUE ary)
7998 rb_warn(
"given block not used");
8005 for (i = 0; i <
len; ++i) {
8051rb_ary_none_p(
int argc,
VALUE *argv,
VALUE ary)
8059 rb_warn(
"given block not used");
8066 for (i = 0; i <
len; ++i) {
8115rb_ary_one_p(
int argc,
VALUE *argv,
VALUE ary)
8124 rb_warn(
"given block not used");
8128 if (result)
return Qfalse;
8134 for (i = 0; i <
len; ++i) {
8136 if (result)
return Qfalse;
8144 if (result)
return Qfalse;
8176 self = rb_ary_at(self, *argv);
8177 if (!--argc)
return self;
8179 return rb_obj_dig(argc, argv, self,
Qnil);
8183finish_exact_sum(
long n,
VALUE r,
VALUE v,
int z)
8188 v = rb_rational_plus(r, v);
8256 goto init_is_a_value;
8270 else if (RB_BIGNUM_TYPE_P(e))
8271 v = rb_big_plus(e, v);
8276 r = rb_rational_plus(r, e);
8281 v = finish_exact_sum(n, r, v, argc!=0);
8285 v = finish_exact_sum(n, r, v, i!=0);
8297 goto has_float_value;
8307 else if (RB_BIGNUM_TYPE_P(e))
8314 if (isnan(f))
continue;
8320 if (isinf(f) && signbit(x) != signbit(f))
8326 if (isinf(f))
continue;
8329 if (fabs(f) >= fabs(x))
8342 goto has_some_value;
8356rb_ary_deconstruct(
VALUE ary)
8867 fake_ary_flags = init_fake_ary_flags();
8997 rb_vm_register_global_object(rb_cArray_empty_frozen);
9000#include "array.rbinc"
#define RUBY_ASSERT_ALWAYS(expr,...)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
#define RBIMPL_ASSERT_OR_ASSUME(...)
This is either RUBY_ASSERT or RBIMPL_ASSUME, depending on RUBY_DEBUG.
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
void rb_include_module(VALUE klass, VALUE module)
Includes a module to a class.
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
Retrieves argument from argc and argv to given VALUE references according to the format string.
int rb_block_given_p(void)
Determines if the current method is given a block.
#define FL_UNSET_RAW
Old name of RB_FL_UNSET_RAW.
#define rb_str_buf_cat2
Old name of rb_usascii_str_new_cstr.
#define RFLOAT_VALUE
Old name of rb_float_value.
#define T_STRING
Old name of RUBY_T_STRING.
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
#define OBJ_FROZEN
Old name of RB_OBJ_FROZEN.
#define rb_str_buf_new2
Old name of rb_str_buf_new_cstr.
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
#define rb_ary_new4
Old name of rb_ary_new_from_values.
#define FIXABLE
Old name of RB_FIXABLE.
#define LONG2FIX
Old name of RB_INT2FIX.
#define T_RATIONAL
Old name of RUBY_T_RATIONAL.
#define ALLOC_N
Old name of RB_ALLOC_N.
#define NUM2DBL
Old name of rb_num2dbl.
#define FL_SET
Old name of RB_FL_SET.
#define rb_ary_new3
Old name of rb_ary_new_from_args.
#define LONG2NUM
Old name of RB_LONG2NUM.
#define rb_usascii_str_new2
Old name of rb_usascii_str_new_cstr.
#define Qtrue
Old name of RUBY_Qtrue.
#define ST2FIX
Old name of RB_ST2FIX.
#define NUM2INT
Old name of RB_NUM2INT.
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
#define T_ARRAY
Old name of RUBY_T_ARRAY.
#define NIL_P
Old name of RB_NIL_P.
#define ALLOCV_N
Old name of RB_ALLOCV_N.
#define FL_WB_PROTECTED
Old name of RUBY_FL_WB_PROTECTED.
#define DBL2NUM
Old name of rb_float_new.
#define FL_TEST
Old name of RB_FL_TEST.
#define NUM2LONG
Old name of RB_NUM2LONG.
#define FL_UNSET
Old name of RB_FL_UNSET.
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define rb_ary_new2
Old name of rb_ary_new_capa.
#define FL_SET_RAW
Old name of RB_FL_SET_RAW.
#define ALLOCV_END
Old name of RB_ALLOCV_END.
void rb_category_warn(rb_warning_category_t category, const char *fmt,...)
Identical to rb_category_warning(), except it reports unless $VERBOSE is nil.
void rb_iter_break(void)
Breaks from a block.
VALUE rb_eFrozenError
FrozenError exception.
VALUE rb_eRangeError
RangeError exception.
VALUE rb_eTypeError
TypeError exception.
VALUE rb_eRuntimeError
RuntimeError exception.
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
VALUE rb_eIndexError
IndexError exception.
void rb_warning(const char *fmt,...)
Issues a warning.
@ RB_WARN_CATEGORY_DEPRECATED
Warning is for deprecated features.
VALUE rb_cArray
Array class.
VALUE rb_mEnumerable
Enumerable module.
VALUE rb_obj_hide(VALUE obj)
Make the object invisible from Ruby code.
VALUE rb_class_new_instance_pass_kw(int argc, const VALUE *argv, VALUE klass)
Identical to rb_class_new_instance(), except it passes the passed keywords if any to the #initialize ...
VALUE rb_obj_frozen_p(VALUE obj)
Just calls RB_OBJ_FROZEN() inside.
int rb_eql(VALUE lhs, VALUE rhs)
Checks for equality of the passed objects, in terms of Object#eql?.
VALUE rb_cNumeric
Numeric class.
VALUE rb_cRandom
Random class.
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
VALUE rb_inspect(VALUE obj)
Generates a human-readable textual representation of the given object.
double rb_num2dbl(VALUE num)
Converts an instance of rb_cNumeric into C's double.
VALUE rb_equal(VALUE lhs, VALUE rhs)
This function is an optimised version of calling #==.
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
VALUE rb_obj_freeze(VALUE obj)
Just calls rb_obj_freeze_inline() inside.
#define RB_OBJ_WRITTEN(old, oldv, young)
Identical to RB_OBJ_WRITE(), except it doesn't write any values, but only a WB declaration.
#define RB_OBJ_WRITE(old, slot, young)
Declaration of a "back" pointer.
VALUE rb_funcall(VALUE recv, ID mid, int n,...)
Calls a method.
VALUE rb_call_super(int argc, const VALUE *argv)
This resembles ruby's super.
#define RGENGC_WB_PROTECTED_ARRAY
This is a compile-time flag to enable/disable write barrier for struct RArray.
VALUE rb_ary_rotate(VALUE ary, long rot)
Destructively rotates the passed array in-place to towards its end.
VALUE rb_ary_new_from_values(long n, const VALUE *elts)
Identical to rb_ary_new_from_args(), except how objects are passed.
VALUE rb_ary_cmp(VALUE lhs, VALUE rhs)
Recursively compares each elements of the two arrays one-by-one using <=>.
VALUE rb_ary_rassoc(VALUE alist, VALUE key)
Identical to rb_ary_assoc(), except it scans the passed array from the opposite direction.
VALUE rb_ary_concat(VALUE lhs, VALUE rhs)
Destructively appends the contents of latter into the end of former.
VALUE rb_ary_assoc(VALUE alist, VALUE key)
Looks up the passed key, assuming the passed array is an alist.
VALUE rb_ary_reverse(VALUE ary)
Destructively reverses the passed array in-place.
VALUE rb_ary_shared_with_p(VALUE lhs, VALUE rhs)
Queries if the passed two arrays share the same backend storage.
VALUE rb_ary_shift(VALUE ary)
Destructively deletes an element from the beginning of the passed array and returns what was deleted.
VALUE rb_ary_sort(VALUE ary)
Creates a copy of the passed array, whose elements are sorted according to their <=> result.
VALUE rb_ary_resurrect(VALUE ary)
I guess there is no use case of this function in extension libraries, but this is a routine identical...
VALUE rb_ary_dup(VALUE ary)
Duplicates an array.
VALUE rb_ary_includes(VALUE ary, VALUE elem)
Queries if the passed array has the passed entry.
VALUE rb_ary_aref(int argc, const VALUE *argv, VALUE ary)
Queries element(s) of an array.
VALUE rb_get_values_at(VALUE obj, long olen, int argc, const VALUE *argv, VALUE(*func)(VALUE obj, long oidx))
This was a generalisation of Array#values_at, Struct#values_at, and MatchData#values_at.
void rb_ary_free(VALUE ary)
Destroys the given array for no reason.
VALUE rb_ary_each(VALUE ary)
Iteratively yields each element of the passed array to the implicitly passed block if any.
VALUE rb_ary_delete_at(VALUE ary, long pos)
Destructively removes an element which resides at the specific index of the passed array.
VALUE rb_ary_plus(VALUE lhs, VALUE rhs)
Creates a new array, concatenating the former to the latter.
VALUE rb_ary_cat(VALUE ary, const VALUE *train, long len)
Destructively appends multiple elements at the end of the array.
void rb_ary_modify(VALUE ary)
Declares that the array is about to be modified.
VALUE rb_ary_replace(VALUE copy, VALUE orig)
Replaces the contents of the former object with the contents of the latter.
VALUE rb_check_array_type(VALUE obj)
Try converting an object to its array representation using its to_ary method, if any.
VALUE rb_ary_to_ary(VALUE obj)
Force converts an object to an array.
VALUE rb_ary_new(void)
Allocates a new, empty array.
VALUE rb_ary_new_capa(long capa)
Identical to rb_ary_new(), except it additionally specifies how many rooms of objects it should alloc...
VALUE rb_ary_resize(VALUE ary, long len)
Expands or shrinks the passed array to the passed length.
VALUE rb_ary_pop(VALUE ary)
Destructively deletes an element from the end of the passed array and returns what was deleted.
VALUE rb_ary_hidden_new(long capa)
Allocates a hidden (no class) empty array.
VALUE rb_ary_clear(VALUE ary)
Destructively removes everything form an array.
VALUE rb_ary_subseq(VALUE ary, long beg, long len)
Obtains a part of the passed array.
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
VALUE rb_ary_freeze(VALUE obj)
Freeze an array, preventing further modifications.
VALUE rb_ary_to_s(VALUE ary)
Converts an array into a human-readable string.
VALUE rb_ary_entry(VALUE ary, long off)
Queries an element of an array.
VALUE rb_ary_sort_bang(VALUE ary)
Destructively sorts the passed array in-place, according to each elements' <=> result.
VALUE rb_assoc_new(VALUE car, VALUE cdr)
Identical to rb_ary_new_from_values(), except it expects exactly two parameters.
void rb_mem_clear(VALUE *buf, long len)
Fills the memory region with a series of RUBY_Qnil.
VALUE rb_ary_delete(VALUE ary, VALUE elem)
Destructively removes elements from the passed array, so that there would be no elements inside that ...
VALUE rb_ary_join(VALUE ary, VALUE sep)
Recursively stringises the elements of the passed array, flattens that result, then joins the sequenc...
void rb_ary_store(VALUE ary, long key, VALUE val)
Destructively stores the passed value to the passed array's passed index.
#define RETURN_SIZED_ENUMERATOR(obj, argc, argv, size_fn)
This roughly resembles return enum_for(__callee__) unless block_given?.
#define RETURN_ENUMERATOR(obj, argc, argv)
Identical to RETURN_SIZED_ENUMERATOR(), except its size is unknown.
#define UNLIMITED_ARGUMENTS
This macro is used in conjunction with rb_check_arity().
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
VALUE rb_output_fs
The field separator character for outputs, or the $,.
VALUE rb_int_positive_pow(long x, unsigned long y)
Raises the passed x to the power of y.
VALUE rb_range_beg_len(VALUE range, long *begp, long *lenp, long len, int err)
Deconstructs a numerical range.
#define rb_hash_uint(h, i)
Just another name of st_hash_uint.
#define rb_hash_end(h)
Just another name of st_hash_end.
#define rb_str_new(str, len)
Allocates an instance of rb_cString.
#define rb_usascii_str_new(str, len)
Identical to rb_str_new, except it generates a string of "US ASCII" encoding.
#define rb_usascii_str_new_cstr(str)
Identical to rb_str_new_cstr, except it generates a string of "US ASCII" encoding.
VALUE rb_str_buf_append(VALUE dst, VALUE src)
Identical to rb_str_cat_cstr(), except it takes Ruby's string instead of C's.
void rb_str_set_len(VALUE str, long len)
Overwrites the length of the string.
st_index_t rb_hash_start(st_index_t i)
Starts a series of hashing.
int rb_str_cmp(VALUE lhs, VALUE rhs)
Compares two strings, as in strcmp(3).
VALUE rb_check_string_type(VALUE obj)
Try converting an object to its stringised representation using its to_str method,...
VALUE rb_str_buf_new(long capa)
Allocates a "string buffer".
VALUE rb_obj_as_string(VALUE obj)
Try converting an object to its stringised representation using its to_s method, if any.
VALUE rb_exec_recursive(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE h)
"Recursion" API entry point.
VALUE rb_exec_recursive_paired(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE p, VALUE h)
Identical to rb_exec_recursive(), except it checks for the recursion on the ordered pair of { g,...
int rb_respond_to(VALUE obj, ID mid)
Queries if the object responds to the method.
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
int capa
Designed capacity of the buffer.
int len
Length of the buffer.
#define RB_OBJ_SHAREABLE_P(obj)
Queries if the passed object has previously classified as shareable or not.
void ruby_qsort(void *, const size_t, const size_t, int(*)(const void *, const void *, void *), void *)
Reentrant implementation of quick sort.
#define RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)
Shim for block function parameters.
VALUE rb_yield_values(int n,...)
Identical to rb_yield(), except it takes variadic number of parameters and pass them to the block.
VALUE rb_yield_values2(int n, const VALUE *argv)
Identical to rb_yield_values(), except it takes the parameters as a C array instead of variadic argum...
VALUE rb_yield(VALUE val)
Yields the block.
#define RBIMPL_ATTR_MAYBE_UNUSED()
Wraps (or simulates) [[maybe_unused]]
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
#define MEMZERO(p, type, n)
Handy macro to erase a region of memory.
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
#define MEMMOVE(p1, p2, type, n)
Handy macro to call memmove.
VALUE rb_block_call(VALUE q, ID w, int e, const VALUE *r, type *t, VALUE y)
Call a method with a block.
void rb_hash_foreach(VALUE q, int_type *w, VALUE e)
Iteration over the given hash.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define RARRAY_LEN
Just another name of rb_array_len.
#define RARRAY(obj)
Convenient casting macro.
static void RARRAY_ASET(VALUE ary, long i, VALUE v)
Assigns an object in an array.
#define RARRAY_PTR_USE(ary, ptr_name, expr)
Declares a section of code where raw pointers are used.
static VALUE * RARRAY_PTR(VALUE ary)
Wild use of a C pointer.
#define RARRAY_AREF(a, i)
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
#define RBASIC(obj)
Convenient casting macro.
void(* RUBY_DATA_FUNC)(void *)
This is the type of callbacks registered to RData.
#define RHASH_SIZE(h)
Queries the size of the hash.
#define StringValue(v)
Ensures that the parameter object is a String.
#define RTYPEDDATA_DATA(v)
Convenient getter macro.
#define TypedData_Wrap_Struct(klass, data_type, sval)
Converts sval, a pointer to your struct, into a Ruby object.
#define RB_PASS_CALLED_KEYWORDS
Pass keywords if current method is called with keywords, useful for argument delegation.
#define RTEST
This is an old name of RB_TEST.
struct RBasic basic
Basic part, including flags and class.
struct RArray::@46::@47 heap
Arrays that use separated memory region for elements use this pattern.
const VALUE shared_root
Parent of the array.
union RArray::@46 as
Array's specific fields.
const VALUE ary[1]
Embedded elements.
long capa
Capacity of *ptr.
long len
Number of elements of the array.
union RArray::@46::@47::@48 aux
Auxiliary info.
const VALUE * ptr
Pointer to the C array that holds the elements of the array.
VALUE flags
Per-object flags.
This is the struct that holds necessary info for a struct.
const char * wrap_struct_name
Name of structs of this kind.
intptr_t SIGNED_VALUE
A signed integer type that has the same width with VALUE.
uintptr_t VALUE
Type that represents a Ruby object.
static bool RB_FLOAT_TYPE_P(VALUE obj)
Queries if the object is an instance of rb_cFloat.
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.