5#include "internal/imemo.h"
6#include "internal/object.h"
7#include "internal/st.h"
8#include "vm_callinfo.h"
10size_t rb_iseq_memsize(
const rb_iseq_t *iseq);
11void rb_iseq_mark_and_move(
rb_iseq_t *iseq,
bool reference_updating);
15rb_imemo_name(
enum imemo_type
type)
19#define IMEMO_NAME(x) case imemo_##x: return #x;
20 IMEMO_NAME(callcache);
22 IMEMO_NAME(constcache);
30 IMEMO_NAME(throw_data);
32 IMEMO_NAME(cvar_entry);
36 rb_bug(
"unreachable");
44rb_imemo_new(
enum imemo_type
type,
VALUE v0,
size_t size,
bool is_shareable)
47 return rb_newobj_of(v0, flags, size);
51rb_imemo_tmpbuf_new(
void)
56 rb_gc_register_pinning_obj((
VALUE)obj);
65rb_alloc_tmp_buffer(
volatile VALUE *store,
long len)
68 rb_raise(rb_eArgError,
"negative buffer size (or size too big)");
74 *store = (
VALUE)tmpbuf;
75 void *ptr = ruby_xmalloc(
len);
83rb_alloc_tmp_buffer_with_count(
volatile VALUE *store,
size_t size,
size_t cnt)
85 return rb_alloc_tmp_buffer(store, (
long)size);
89rb_free_tmp_buffer(
volatile VALUE *store)
94 void *ptr = ATOMIC_PTR_EXCHANGE(s->ptr, 0);
97 ruby_xfree_sized(ptr, size);
104 struct MEMO *memo = IMEMO_NEW(
struct MEMO, imemo_memo, 0);
106 *((
VALUE *)&memo->v1) = a;
107 *((
VALUE *)&memo->v2) = b;
116 struct MEMO *memo = IMEMO_NEW(
struct MEMO, imemo_memo, 0);
118 *((
VALUE *)&memo->v1) = a;
119 *((
VALUE *)&memo->v2) = b;
120 *((
VALUE *)&memo->u3.value) = c;
121 memo->flags |= MEMO_U3_IS_VALUE;
127imemo_fields_new(
VALUE owner,
size_t capa,
bool shareable)
130 if (rb_gc_size_allocatable_p(embedded_size)) {
131 VALUE fields = rb_imemo_new(imemo_fields, owner, embedded_size, shareable);
136 VALUE fields = rb_imemo_new(imemo_fields, owner,
sizeof(
struct rb_fields), shareable);
144rb_imemo_fields_new(
VALUE owner,
size_t capa,
bool shareable)
146 return imemo_fields_new(owner,
capa, shareable);
150imemo_fields_new_complex(
VALUE owner,
size_t capa,
bool shareable)
152 VALUE fields = rb_imemo_new(imemo_fields, owner,
sizeof(
struct rb_fields), shareable);
153 IMEMO_OBJ_FIELDS(fields)->as.complex.table = st_init_numtable_with_size(
capa);
159rb_imemo_fields_new_complex(
VALUE owner,
size_t capa,
bool shareable)
161 return imemo_fields_new_complex(owner,
capa, shareable);
165imemo_fields_trigger_wb_i(st_data_t key, st_data_t value, st_data_t arg)
173imemo_fields_complex_wb_i(st_data_t key, st_data_t value, st_data_t arg)
180rb_imemo_fields_new_complex_tbl(
VALUE owner,
st_table *tbl,
bool shareable)
182 VALUE fields = rb_imemo_new(imemo_fields, owner,
sizeof(
struct rb_fields), shareable);
183 IMEMO_OBJ_FIELDS(fields)->as.complex.table = tbl;
185 st_foreach(tbl, imemo_fields_trigger_wb_i, (st_data_t)fields);
190rb_imemo_fields_clone(
VALUE fields_obj)
192 shape_id_t shape_id = RBASIC_SHAPE_ID(fields_obj);
195 if (rb_shape_too_complex_p(shape_id)) {
196 st_table *src_table = rb_imemo_fields_complex_tbl(fields_obj);
199 clone = rb_imemo_fields_new_complex_tbl(rb_imemo_fields_owner(fields_obj), dest_table,
false );
201 st_replace(dest_table, src_table);
202 RBASIC_SET_SHAPE_ID(clone, shape_id);
204 st_foreach(dest_table, imemo_fields_complex_wb_i, (st_data_t)clone);
207 clone = imemo_fields_new(rb_imemo_fields_owner(fields_obj), RSHAPE_CAPACITY(shape_id),
false );
208 RBASIC_SET_SHAPE_ID(clone, shape_id);
209 VALUE *fields = rb_imemo_fields_ptr(clone);
210 attr_index_t fields_count = RSHAPE_LEN(shape_id);
211 MEMCPY(fields, rb_imemo_fields_ptr(fields_obj),
VALUE, fields_count);
212 for (attr_index_t i = 0; i < fields_count; i++) {
221rb_imemo_fields_clear(
VALUE fields_obj)
225 if (rb_shape_obj_too_complex_p(fields_obj)) {
226 RBASIC_SET_SHAPE_ID(fields_obj, ROOT_TOO_COMPLEX_SHAPE_ID);
229 RBASIC_SET_SHAPE_ID(fields_obj, ROOT_SHAPE_ID);
232 RBASIC_CLEAR_CLASS(fields_obj);
240rb_imemo_memsize(
VALUE obj)
243 switch (imemo_type(obj)) {
244 case imemo_callcache:
248 case imemo_constcache:
259 size += rb_iseq_memsize((
rb_iseq_t *)obj);
270 case imemo_throw_data:
276 case imemo_cvar_entry:
280 if (rb_shape_obj_too_complex_p(obj)) {
281 size += st_memsize(IMEMO_OBJ_FIELDS(obj)->as.complex.table);
284 size += RSHAPE_CAPACITY(RBASIC_SHAPE_ID(obj)) *
sizeof(
VALUE);
289 rb_bug(
"unreachable");
300moved_or_living_object_strictly_p(
VALUE obj)
310 rb_gc_mark_and_move(&ment->owner);
311 rb_gc_mark_and_move(&ment->defined_class);
315 case VM_METHOD_TYPE_ISEQ:
317 rb_gc_mark_and_move_ptr(&def->body.iseq.
iseqptr);
319 rb_gc_mark_and_move_ptr(&def->body.iseq.
cref);
321 if (!reference_updating) {
322 if (def->iseq_overload && ment->defined_class) {
325 rb_gc_mark((
VALUE)ment);
329 case VM_METHOD_TYPE_ATTRSET:
330 case VM_METHOD_TYPE_IVAR:
331 rb_gc_mark_and_move(&def->body.attr.location);
333 case VM_METHOD_TYPE_BMETHOD:
334 if (!rb_gc_checking_shareable()) {
335 rb_gc_mark_and_move(&def->body.bmethod.proc);
338 case VM_METHOD_TYPE_ALIAS:
339 rb_gc_mark_and_move_ptr(&def->body.alias.original_me);
341 case VM_METHOD_TYPE_REFINED:
342 rb_gc_mark_and_move_ptr(&def->body.refined.orig_me);
344 case VM_METHOD_TYPE_CFUNC:
345 case VM_METHOD_TYPE_ZSUPER:
346 case VM_METHOD_TYPE_MISSING:
347 case VM_METHOD_TYPE_OPTIMIZED:
348 case VM_METHOD_TYPE_UNDEF:
349 case VM_METHOD_TYPE_NOTIMPLEMENTED:
356rb_imemo_mark_and_move(
VALUE obj,
bool reference_updating)
358 switch (imemo_type(obj)) {
359 case imemo_callcache: {
381 if (UNDEF_P(cc->klass)) {
386 else if (reference_updating) {
387 if (moved_or_living_object_strictly_p((
VALUE)cc->cme_)) {
388 *((
VALUE *)&cc->klass) = rb_gc_location(cc->klass);
396 vm_cc_invalidate(cc);
403 if ((vm_cc_super_p(cc) || vm_cc_refinement_p(cc))) {
404 rb_gc_mark_movable((
VALUE)cc->cme_);
412 case imemo_constcache: {
415 if ((ice->flags & IMEMO_CONST_CACHE_SHAREABLE) ||
416 !rb_gc_checking_shareable()) {
417 rb_gc_mark_and_move(&ice->value);
425 if (!rb_gc_checking_shareable()) {
427 rb_gc_mark_and_move(&cref->klass_or_self);
430 rb_gc_mark_and_move_ptr(&cref->next);
433 if (!rb_gc_checking_shareable()) {
434 rb_gc_mark_and_move(&cref->refinements);
442 if (LIKELY(env->ep)) {
444 RUBY_ASSERT(rb_gc_location(env->ep[VM_ENV_DATA_INDEX_ENV]) == rb_gc_location(obj));
445 RUBY_ASSERT(reference_updating || VM_ENV_ESCAPED_P(env->ep));
447 for (
unsigned int i = 0; i < env->env_size; i++) {
448 rb_gc_mark_and_move((
VALUE *)&env->env[i]);
451 rb_gc_mark_and_move_ptr(&env->iseq);
453 if (VM_ENV_LOCAL_P(env->ep) && VM_ENV_BOXED_P(env->ep)) {
454 const rb_box_t *box = VM_ENV_BOX(env->ep);
455 if (BOX_USER_P(box)) {
456 rb_gc_mark_and_move((
VALUE *)&box->box_object);
460 if (reference_updating) {
461 ((
VALUE *)env->ep)[VM_ENV_DATA_INDEX_ENV] = rb_gc_location(env->ep[VM_ENV_DATA_INDEX_ENV]);
464 if (!VM_ENV_FLAGS(env->ep, VM_ENV_FLAG_WB_REQUIRED)) {
465 VM_ENV_FLAGS_SET(env->ep, VM_ENV_FLAG_WB_REQUIRED);
467 rb_gc_mark_movable( (
VALUE)rb_vm_env_prev_env(env));
476 if (!reference_updating) {
477 rb_gc_mark_maybe((
VALUE)ifunc->data);
483 rb_iseq_mark_and_move((
rb_iseq_t *)obj, reference_updating);
486 struct MEMO *memo = (
struct MEMO *)obj;
488 rb_gc_mark_and_move((
VALUE *)&memo->v1);
489 rb_gc_mark_and_move((
VALUE *)&memo->v2);
491 rb_gc_mark_and_move((
VALUE *)&memo->u3.value);
503 rb_gc_mark_and_move((
VALUE *)&svar->lastline);
504 rb_gc_mark_and_move((
VALUE *)&svar->backref);
505 rb_gc_mark_and_move((
VALUE *)&svar->others);
509 case imemo_throw_data: {
512 rb_gc_mark_and_move((
VALUE *)&throw_data->throw_obj);
519 if (!reference_updating) {
520 rb_gc_mark_locations(m->ptr, m->ptr + (m->size /
sizeof(
VALUE)));
525 case imemo_cvar_entry: {
527 rb_gc_mark_and_move(&ent->class_value);
528 rb_gc_mark_and_move((
VALUE *)&ent->cref);
534 if (!rb_gc_checking_shareable()) {
538 if (rb_shape_obj_too_complex_p(obj)) {
539 st_table *tbl = rb_imemo_fields_complex_tbl(obj);
540 if (reference_updating) {
541 rb_gc_ref_update_table_values_only(tbl);
544 rb_mark_tbl_no_pin(tbl);
548 VALUE *fields = rb_imemo_fields_ptr(obj);
549 attr_index_t
len = RSHAPE_LEN(RBASIC_SHAPE_ID(obj));
550 for (attr_index_t i = 0; i <
len; i++) {
551 rb_gc_mark_and_move(&fields[i]);
558 rb_bug(
"unreachable");
566static enum rb_id_table_iterator_result
567free_const_entry_i(
VALUE value,
void *data)
571 return ID_TABLE_CONTINUE;
577 rb_id_table_foreach_values(tbl, free_const_entry_i, 0);
578 rb_id_table_free(tbl);
582imemo_fields_free(
struct rb_fields *fields)
585 shape_id_t shape_id = RBASIC_SHAPE_ID((
VALUE)fields);
586 if (rb_shape_too_complex_p(shape_id)) {
587 st_free_table(fields->as.complex.table);
590 SIZED_FREE_N(fields->as.external.ptr, RSHAPE_CAPACITY(shape_id));
596rb_imemo_free(
VALUE obj)
598 switch (imemo_type(obj)) {
599 case imemo_callcache:
600 RB_DEBUG_COUNTER_INC(obj_imemo_callcache);
603 case imemo_callinfo:{
608 if (ci->kwarg->references == 0) {
609 ruby_xfree_sized((
void *)ci->kwarg, rb_callinfo_kwarg_bytes(ci->kwarg->keyword_len));
612 RB_DEBUG_COUNTER_INC(obj_imemo_callinfo);
616 case imemo_constcache:
617 RB_DEBUG_COUNTER_INC(obj_imemo_constcache);
621 RB_DEBUG_COUNTER_INC(obj_imemo_cref);
628 SIZED_FREE_N(env->env, env->env_size);
629 RB_DEBUG_COUNTER_INC(obj_imemo_env);
634 RB_DEBUG_COUNTER_INC(obj_imemo_ifunc);
638 RB_DEBUG_COUNTER_INC(obj_imemo_iseq);
642 RB_DEBUG_COUNTER_INC(obj_imemo_memo);
647 RB_DEBUG_COUNTER_INC(obj_imemo_ment);
651 RB_DEBUG_COUNTER_INC(obj_imemo_svar);
654 case imemo_throw_data:
655 RB_DEBUG_COUNTER_INC(obj_imemo_throw_data);
660 RB_DEBUG_COUNTER_INC(obj_imemo_tmpbuf);
663 case imemo_cvar_entry:
664 RB_DEBUG_COUNTER_INC(obj_imemo_cvar_entry);
668 imemo_fields_free(IMEMO_OBJ_FIELDS(obj));
669 RB_DEBUG_COUNTER_INC(obj_imemo_fields);
672 rb_bug(
"unreachable");
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
#define Qundef
Old name of RUBY_Qundef.
#define T_IMEMO
Old name of RUBY_T_IMEMO.
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define FL_SHAREABLE
Old name of RUBY_FL_SHAREABLE.
#define T_ICLASS
Old name of RUBY_T_ICLASS.
#define ALLOC_N
Old name of RB_ALLOC_N.
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
#define T_CLASS
Old name of RUBY_T_CLASS.
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
#define T_MOVED
Old name of RUBY_T_MOVED.
#define xcalloc
Old name of ruby_xcalloc.
#define FL_USHIFT
Old name of RUBY_FL_USHIFT.
#define FL_SET_RAW
Old name of RB_FL_SET_RAW.
#define RB_OBJ_WRITTEN(old, oldv, young)
Identical to RB_OBJ_WRITE(), except it doesn't write any values, but only a WB declaration.
int capa
Designed capacity of the buffer.
int len
Length of the buffer.
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
VALUE type(ANYARGS)
ANYARGS-ed function type.
#define RBASIC(obj)
Convenient casting macro.
Internal header for Ruby Box.
rb_cref_t * cref
class reference, should be marked
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
IFUNC (Internal FUNCtion)
const VALUE cref_or_me
class reference or rb_method_entry_t
uintptr_t VALUE
Type that represents a Ruby object.
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.