5#include "internal/imemo.h"
6#include "internal/object.h"
7#include "internal/st.h"
8#include "vm_callinfo.h"
10size_t rb_iseq_memsize(
const rb_iseq_t *iseq);
11void rb_iseq_mark_and_move(
rb_iseq_t *iseq,
bool reference_updating);
15rb_imemo_name(
enum imemo_type
type)
19#define IMEMO_NAME(x) case imemo_##x: return #x;
20 IMEMO_NAME(callcache);
22 IMEMO_NAME(constcache);
30 IMEMO_NAME(throw_data);
35 rb_bug(
"unreachable");
43rb_imemo_new(
enum imemo_type
type,
VALUE v0,
size_t size,
bool is_shareable)
46 NEWOBJ_OF(obj,
void, v0, flags, size, 0);
52rb_imemo_tmpbuf_new(
void)
57 rb_gc_register_pinning_obj((
VALUE)obj);
66rb_alloc_tmp_buffer_with_count(
volatile VALUE *store,
size_t size,
size_t cnt)
71 *store = (
VALUE)tmpbuf;
72 void *ptr = ruby_xmalloc(size);
80rb_alloc_tmp_buffer(
volatile VALUE *store,
long len)
84 if (
len < 0 || (cnt = (
long)roomof(
len,
sizeof(
VALUE))) < 0) {
85 rb_raise(rb_eArgError,
"negative buffer size (or size too big)");
88 return rb_alloc_tmp_buffer_with_count(store,
len, cnt);
92rb_free_tmp_buffer(
volatile VALUE *store)
96 void *ptr = ATOMIC_PTR_EXCHANGE(s->ptr, 0);
105 struct MEMO *memo = IMEMO_NEW(
struct MEMO, imemo_memo, 0);
107 rb_gc_register_pinning_obj((
VALUE)memo);
109 *((
VALUE *)&memo->v1) = a;
110 *((
VALUE *)&memo->v2) = b;
111 *((
VALUE *)&memo->u3.value) = c;
117imemo_fields_new(
VALUE owner,
size_t capa,
bool shareable)
120 if (rb_gc_size_allocatable_p(embedded_size)) {
121 VALUE fields = rb_imemo_new(imemo_fields, owner, embedded_size, shareable);
126 VALUE fields = rb_imemo_new(imemo_fields, owner,
sizeof(
struct rb_fields), shareable);
134rb_imemo_fields_new(
VALUE owner,
size_t capa,
bool shareable)
136 return imemo_fields_new(owner,
capa, shareable);
140imemo_fields_new_complex(
VALUE owner,
size_t capa,
bool shareable)
142 VALUE fields = rb_imemo_new(imemo_fields, owner,
sizeof(
struct rb_fields), shareable);
143 IMEMO_OBJ_FIELDS(fields)->as.complex.table = st_init_numtable_with_size(
capa);
149rb_imemo_fields_new_complex(
VALUE owner,
size_t capa,
bool shareable)
151 return imemo_fields_new_complex(owner,
capa, shareable);
155imemo_fields_trigger_wb_i(st_data_t key, st_data_t value, st_data_t arg)
163imemo_fields_complex_wb_i(st_data_t key, st_data_t value, st_data_t arg)
170rb_imemo_fields_new_complex_tbl(
VALUE owner,
st_table *tbl,
bool shareable)
172 VALUE fields = rb_imemo_new(imemo_fields, owner,
sizeof(
struct rb_fields), shareable);
173 IMEMO_OBJ_FIELDS(fields)->as.complex.table = tbl;
175 st_foreach(tbl, imemo_fields_trigger_wb_i, (st_data_t)fields);
180rb_imemo_fields_clone(
VALUE fields_obj)
182 shape_id_t shape_id = RBASIC_SHAPE_ID(fields_obj);
185 if (rb_shape_too_complex_p(shape_id)) {
186 st_table *src_table = rb_imemo_fields_complex_tbl(fields_obj);
189 clone = rb_imemo_fields_new_complex_tbl(rb_imemo_fields_owner(fields_obj), dest_table,
false );
191 st_replace(dest_table, src_table);
192 RBASIC_SET_SHAPE_ID(clone, shape_id);
194 st_foreach(dest_table, imemo_fields_complex_wb_i, (st_data_t)clone);
197 clone = imemo_fields_new(rb_imemo_fields_owner(fields_obj), RSHAPE_CAPACITY(shape_id),
false );
198 RBASIC_SET_SHAPE_ID(clone, shape_id);
199 VALUE *fields = rb_imemo_fields_ptr(clone);
200 attr_index_t fields_count = RSHAPE_LEN(shape_id);
201 MEMCPY(fields, rb_imemo_fields_ptr(fields_obj),
VALUE, fields_count);
202 for (attr_index_t i = 0; i < fields_count; i++) {
211rb_imemo_fields_clear(
VALUE fields_obj)
215 if (rb_shape_obj_too_complex_p(fields_obj)) {
216 RBASIC_SET_SHAPE_ID(fields_obj, ROOT_TOO_COMPLEX_SHAPE_ID);
219 RBASIC_SET_SHAPE_ID(fields_obj, ROOT_SHAPE_ID);
222 RBASIC_CLEAR_CLASS(fields_obj);
230rb_imemo_memsize(
VALUE obj)
233 switch (imemo_type(obj)) {
234 case imemo_callcache:
238 case imemo_constcache:
249 size += rb_iseq_memsize((
rb_iseq_t *)obj);
260 case imemo_throw_data:
268 if (rb_shape_obj_too_complex_p(obj)) {
269 size += st_memsize(IMEMO_OBJ_FIELDS(obj)->as.complex.table);
272 size += RSHAPE_CAPACITY(RBASIC_SHAPE_ID(obj)) *
sizeof(
VALUE);
277 rb_bug(
"unreachable");
288moved_or_living_object_strictly_p(
VALUE obj)
298 rb_gc_mark_and_move(&ment->owner);
299 rb_gc_mark_and_move(&ment->defined_class);
303 case VM_METHOD_TYPE_ISEQ:
305 rb_gc_mark_and_move_ptr(&def->body.iseq.
iseqptr);
307 rb_gc_mark_and_move_ptr(&def->body.iseq.
cref);
309 if (!reference_updating) {
310 if (def->iseq_overload && ment->defined_class) {
313 rb_gc_mark((
VALUE)ment);
317 case VM_METHOD_TYPE_ATTRSET:
318 case VM_METHOD_TYPE_IVAR:
319 rb_gc_mark_and_move(&def->body.attr.location);
321 case VM_METHOD_TYPE_BMETHOD:
322 if (!rb_gc_checking_shareable()) {
323 rb_gc_mark_and_move(&def->body.bmethod.proc);
326 case VM_METHOD_TYPE_ALIAS:
327 rb_gc_mark_and_move_ptr(&def->body.alias.original_me);
329 case VM_METHOD_TYPE_REFINED:
330 rb_gc_mark_and_move_ptr(&def->body.refined.orig_me);
332 case VM_METHOD_TYPE_CFUNC:
333 case VM_METHOD_TYPE_ZSUPER:
334 case VM_METHOD_TYPE_MISSING:
335 case VM_METHOD_TYPE_OPTIMIZED:
336 case VM_METHOD_TYPE_UNDEF:
337 case VM_METHOD_TYPE_NOTIMPLEMENTED:
344rb_imemo_mark_and_move(
VALUE obj,
bool reference_updating)
346 switch (imemo_type(obj)) {
347 case imemo_callcache: {
369 if (UNDEF_P(cc->klass)) {
374 else if (reference_updating) {
375 if (moved_or_living_object_strictly_p((
VALUE)cc->cme_)) {
376 *((
VALUE *)&cc->klass) = rb_gc_location(cc->klass);
384 vm_cc_invalidate(cc);
391 if ((vm_cc_super_p(cc) || vm_cc_refinement_p(cc))) {
392 rb_gc_mark_movable((
VALUE)cc->cme_);
400 case imemo_constcache: {
403 if ((ice->flags & IMEMO_CONST_CACHE_SHAREABLE) ||
404 !rb_gc_checking_shareable()) {
405 rb_gc_mark_and_move(&ice->value);
413 if (!rb_gc_checking_shareable()) {
415 rb_gc_mark_and_move(&cref->klass_or_self);
418 rb_gc_mark_and_move_ptr(&cref->next);
421 if (!rb_gc_checking_shareable()) {
422 rb_gc_mark_and_move(&cref->refinements);
430 if (LIKELY(env->ep)) {
432 RUBY_ASSERT(rb_gc_location(env->ep[VM_ENV_DATA_INDEX_ENV]) == rb_gc_location(obj));
433 RUBY_ASSERT(reference_updating || VM_ENV_ESCAPED_P(env->ep));
435 for (
unsigned int i = 0; i < env->env_size; i++) {
436 rb_gc_mark_and_move((
VALUE *)&env->env[i]);
439 rb_gc_mark_and_move_ptr(&env->iseq);
441 if (VM_ENV_LOCAL_P(env->ep) && VM_ENV_BOXED_P(env->ep)) {
442 const rb_box_t *box = VM_ENV_BOX(env->ep);
443 if (BOX_USER_P(box)) {
444 rb_gc_mark_and_move((
VALUE *)&box->box_object);
448 if (reference_updating) {
449 ((
VALUE *)env->ep)[VM_ENV_DATA_INDEX_ENV] = rb_gc_location(env->ep[VM_ENV_DATA_INDEX_ENV]);
452 if (!VM_ENV_FLAGS(env->ep, VM_ENV_FLAG_WB_REQUIRED)) {
453 VM_ENV_FLAGS_SET(env->ep, VM_ENV_FLAG_WB_REQUIRED);
455 rb_gc_mark_movable( (
VALUE)rb_vm_env_prev_env(env));
464 if (!reference_updating) {
465 rb_gc_mark_maybe((
VALUE)ifunc->data);
471 rb_iseq_mark_and_move((
rb_iseq_t *)obj, reference_updating);
474 struct MEMO *memo = (
struct MEMO *)obj;
476 rb_gc_mark_and_move((
VALUE *)&memo->v1);
477 rb_gc_mark_and_move((
VALUE *)&memo->v2);
478 if (!reference_updating) {
479 rb_gc_mark_maybe(memo->u3.value);
491 rb_gc_mark_and_move((
VALUE *)&svar->lastline);
492 rb_gc_mark_and_move((
VALUE *)&svar->backref);
493 rb_gc_mark_and_move((
VALUE *)&svar->others);
497 case imemo_throw_data: {
500 rb_gc_mark_and_move((
VALUE *)&throw_data->throw_obj);
507 if (!reference_updating) {
508 rb_gc_mark_locations(m->ptr, m->ptr + m->cnt);
516 if (!rb_gc_checking_shareable()) {
520 if (rb_shape_obj_too_complex_p(obj)) {
521 st_table *tbl = rb_imemo_fields_complex_tbl(obj);
522 if (reference_updating) {
523 rb_gc_ref_update_table_values_only(tbl);
526 rb_mark_tbl_no_pin(tbl);
530 VALUE *fields = rb_imemo_fields_ptr(obj);
531 attr_index_t
len = RSHAPE_LEN(RBASIC_SHAPE_ID(obj));
532 for (attr_index_t i = 0; i <
len; i++) {
533 rb_gc_mark_and_move(&fields[i]);
540 rb_bug(
"unreachable");
548static enum rb_id_table_iterator_result
549free_const_entry_i(
VALUE value,
void *data)
553 return ID_TABLE_CONTINUE;
559 rb_id_table_foreach_values(tbl, free_const_entry_i, 0);
560 rb_id_table_free(tbl);
564imemo_fields_free(
struct rb_fields *fields)
567 if (rb_shape_obj_too_complex_p((
VALUE)fields)) {
568 st_free_table(fields->as.complex.table);
571 xfree(fields->as.external.ptr);
577rb_imemo_free(
VALUE obj)
579 switch (imemo_type(obj)) {
580 case imemo_callcache:
581 RB_DEBUG_COUNTER_INC(obj_imemo_callcache);
584 case imemo_callinfo:{
589 if (ci->kwarg->references == 0)
xfree((
void *)ci->kwarg);
591 RB_DEBUG_COUNTER_INC(obj_imemo_callinfo);
595 case imemo_constcache:
596 RB_DEBUG_COUNTER_INC(obj_imemo_constcache);
600 RB_DEBUG_COUNTER_INC(obj_imemo_cref);
608 RB_DEBUG_COUNTER_INC(obj_imemo_env);
613 RB_DEBUG_COUNTER_INC(obj_imemo_ifunc);
617 RB_DEBUG_COUNTER_INC(obj_imemo_iseq);
621 RB_DEBUG_COUNTER_INC(obj_imemo_memo);
626 RB_DEBUG_COUNTER_INC(obj_imemo_ment);
630 RB_DEBUG_COUNTER_INC(obj_imemo_svar);
633 case imemo_throw_data:
634 RB_DEBUG_COUNTER_INC(obj_imemo_throw_data);
639 RB_DEBUG_COUNTER_INC(obj_imemo_tmpbuf);
643 imemo_fields_free(IMEMO_OBJ_FIELDS(obj));
644 RB_DEBUG_COUNTER_INC(obj_imemo_fields);
647 rb_bug(
"unreachable");
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
#define xfree
Old name of ruby_xfree.
#define Qundef
Old name of RUBY_Qundef.
#define T_IMEMO
Old name of RUBY_T_IMEMO.
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define FL_SHAREABLE
Old name of RUBY_FL_SHAREABLE.
#define T_ICLASS
Old name of RUBY_T_ICLASS.
#define ALLOC_N
Old name of RB_ALLOC_N.
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
#define FL_WB_PROTECTED
Old name of RUBY_FL_WB_PROTECTED.
#define T_CLASS
Old name of RUBY_T_CLASS.
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
#define T_MOVED
Old name of RUBY_T_MOVED.
#define xcalloc
Old name of ruby_xcalloc.
#define FL_USHIFT
Old name of RUBY_FL_USHIFT.
#define FL_SET_RAW
Old name of RB_FL_SET_RAW.
#define RB_OBJ_WRITTEN(old, oldv, young)
Identical to RB_OBJ_WRITE(), except it doesn't write any values, but only a WB declaration.
int capa
Designed capacity of the buffer.
int len
Length of the buffer.
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
VALUE type(ANYARGS)
ANYARGS-ed function type.
#define RBASIC(obj)
Convenient casting macro.
Internal header for Ruby Box.
rb_cref_t * cref
class reference, should be marked
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
IFUNC (Internal FUNCtion)
const VALUE cref_or_me
class reference or rb_method_entry_t
uintptr_t VALUE
Type that represents a Ruby object.
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.