Ruby 3.5.0dev (2025-01-10 revision 5fab31b15e32622c4b71d1d347a41937e9f9c212)
imemo.c (5fab31b15e32622c4b71d1d347a41937e9f9c212)
1
2#include "constant.h"
3#include "id_table.h"
4#include "internal.h"
5#include "internal/imemo.h"
6#include "vm_callinfo.h"
7
8size_t rb_iseq_memsize(const rb_iseq_t *iseq);
9void rb_iseq_mark_and_move(rb_iseq_t *iseq, bool reference_updating);
10void rb_iseq_free(const rb_iseq_t *iseq);
11
12const char *
13rb_imemo_name(enum imemo_type type)
14{
15 // put no default case to get a warning if an imemo type is missing
16 switch (type) {
17#define IMEMO_NAME(x) case imemo_##x: return #x;
18 IMEMO_NAME(ast);
19 IMEMO_NAME(callcache);
20 IMEMO_NAME(callinfo);
21 IMEMO_NAME(constcache);
22 IMEMO_NAME(cref);
23 IMEMO_NAME(env);
24 IMEMO_NAME(ifunc);
25 IMEMO_NAME(iseq);
26 IMEMO_NAME(memo);
27 IMEMO_NAME(ment);
28 IMEMO_NAME(parser_strterm);
29 IMEMO_NAME(svar);
30 IMEMO_NAME(throw_data);
31 IMEMO_NAME(tmpbuf);
32#undef IMEMO_NAME
33 default:
34 rb_bug("unreachable");
35 }
36}
37
38/* =========================================================================
39 * allocation
40 * ========================================================================= */
41
43rb_imemo_new(enum imemo_type type, VALUE v0, size_t size)
44{
46 NEWOBJ_OF(obj, void, v0, flags, size, 0);
47
48 return (VALUE)obj;
49}
50
51static rb_imemo_tmpbuf_t *
52rb_imemo_tmpbuf_new(void)
53{
54 size_t size = sizeof(struct rb_imemo_tmpbuf_struct);
55 VALUE flags = T_IMEMO | (imemo_tmpbuf << FL_USHIFT);
56 NEWOBJ_OF(obj, struct rb_imemo_tmpbuf_struct, 0, flags, size, 0);
57
58 return obj;
59}
60
61void *
62rb_alloc_tmp_buffer_with_count(volatile VALUE *store, size_t size, size_t cnt)
63{
64 void *ptr;
65 rb_imemo_tmpbuf_t *tmpbuf;
66
67 /* Keep the order; allocate an empty imemo first then xmalloc, to
68 * get rid of potential memory leak */
69 tmpbuf = rb_imemo_tmpbuf_new();
70 *store = (VALUE)tmpbuf;
71 ptr = ruby_xmalloc(size);
72 tmpbuf->ptr = ptr;
73 tmpbuf->cnt = cnt;
74
75 return ptr;
76}
77
78void *
79rb_alloc_tmp_buffer(volatile VALUE *store, long len)
80{
81 long cnt;
82
83 if (len < 0 || (cnt = (long)roomof(len, sizeof(VALUE))) < 0) {
84 rb_raise(rb_eArgError, "negative buffer size (or size too big)");
85 }
86
87 return rb_alloc_tmp_buffer_with_count(store, len, cnt);
88}
89
90void
91rb_free_tmp_buffer(volatile VALUE *store)
92{
93 rb_imemo_tmpbuf_t *s = (rb_imemo_tmpbuf_t*)ATOMIC_VALUE_EXCHANGE(*store, 0);
94 if (s) {
95 void *ptr = ATOMIC_PTR_EXCHANGE(s->ptr, 0);
96 s->cnt = 0;
97 ruby_xfree(ptr);
98 }
99}
100
102rb_imemo_tmpbuf_parser_heap(void *buf, rb_imemo_tmpbuf_t *old_heap, size_t cnt)
103{
104 rb_imemo_tmpbuf_t *tmpbuf = rb_imemo_tmpbuf_new();
105 tmpbuf->ptr = buf;
106 tmpbuf->next = old_heap;
107 tmpbuf->cnt = cnt;
108
109 return tmpbuf;
110}
111
112/* =========================================================================
113 * memsize
114 * ========================================================================= */
115
116size_t
117rb_imemo_memsize(VALUE obj)
118{
119 size_t size = 0;
120 switch (imemo_type(obj)) {
121 case imemo_ast:
122 rb_bug("imemo_ast is obsolete");
123
124 break;
125 case imemo_callcache:
126 break;
127 case imemo_callinfo:
128 break;
129 case imemo_constcache:
130 break;
131 case imemo_cref:
132 break;
133 case imemo_env:
134 size += ((rb_env_t *)obj)->env_size * sizeof(VALUE);
135
136 break;
137 case imemo_ifunc:
138 break;
139 case imemo_iseq:
140 size += rb_iseq_memsize((rb_iseq_t *)obj);
141
142 break;
143 case imemo_memo:
144 break;
145 case imemo_ment:
146 size += sizeof(((rb_method_entry_t *)obj)->def);
147
148 break;
149 case imemo_parser_strterm:
150 break;
151 case imemo_svar:
152 break;
153 case imemo_throw_data:
154 break;
155 case imemo_tmpbuf:
156 size += ((rb_imemo_tmpbuf_t *)obj)->cnt * sizeof(VALUE);
157
158 break;
159 default:
160 rb_bug("unreachable");
161 }
162
163 return size;
164}
165
166/* =========================================================================
167 * mark
168 * ========================================================================= */
169
170static enum rb_id_table_iterator_result
171cc_table_mark_i(VALUE ccs_ptr, void *data)
172{
173 struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
174 VM_ASSERT(vm_ccs_p(ccs));
175#if VM_CHECK_MODE > 0
176 VALUE klass = (VALUE)data;
177
178 VALUE lookup_val;
179 VM_ASSERT(rb_id_table_lookup(RCLASS_CC_TBL(klass), ccs->cme->called_id, &lookup_val));
180 VM_ASSERT(lookup_val == ccs_ptr);
181#endif
182
183 if (METHOD_ENTRY_INVALIDATED(ccs->cme)) {
184 rb_vm_ccs_free(ccs);
185 return ID_TABLE_DELETE;
186 }
187 else {
188 rb_gc_mark_movable((VALUE)ccs->cme);
189
190 for (int i=0; i<ccs->len; i++) {
191 VM_ASSERT(klass == ccs->entries[i].cc->klass);
192 VM_ASSERT(vm_cc_check_cme(ccs->entries[i].cc, ccs->cme));
193
194 rb_gc_mark_movable((VALUE)ccs->entries[i].cc);
195 }
196 return ID_TABLE_CONTINUE;
197 }
198}
199
200void
201rb_cc_table_mark(VALUE klass)
202{
203 struct rb_id_table *cc_tbl = RCLASS_CC_TBL(klass);
204 if (cc_tbl) {
205 rb_id_table_foreach_values(cc_tbl, cc_table_mark_i, (void *)klass);
206 }
207}
208
209static bool
210moved_or_living_object_strictly_p(VALUE obj)
211{
212 return obj && (!rb_objspace_garbage_object_p(obj) || BUILTIN_TYPE(obj) == T_MOVED);
213}
214
215static void
216mark_and_move_method_entry(rb_method_entry_t *ment, bool reference_updating)
217{
218 rb_method_definition_t *def = ment->def;
219
220 rb_gc_mark_and_move(&ment->owner);
221 rb_gc_mark_and_move(&ment->defined_class);
222
223 if (def) {
224 switch (def->type) {
225 case VM_METHOD_TYPE_ISEQ:
226 if (def->body.iseq.iseqptr) {
227 rb_gc_mark_and_move_ptr(&def->body.iseq.iseqptr);
228 }
229 rb_gc_mark_and_move_ptr(&def->body.iseq.cref);
230
231 if (!reference_updating) {
232 if (def->iseq_overload && ment->defined_class) {
233 // it can be a key of "overloaded_cme" table
234 // so it should be pinned.
235 rb_gc_mark((VALUE)ment);
236 }
237 }
238 break;
239 case VM_METHOD_TYPE_ATTRSET:
240 case VM_METHOD_TYPE_IVAR:
241 rb_gc_mark_and_move(&def->body.attr.location);
242 break;
243 case VM_METHOD_TYPE_BMETHOD:
244 rb_gc_mark_and_move(&def->body.bmethod.proc);
245 if (!reference_updating) {
246 if (def->body.bmethod.hooks) rb_hook_list_mark(def->body.bmethod.hooks);
247 }
248 break;
249 case VM_METHOD_TYPE_ALIAS:
250 rb_gc_mark_and_move_ptr(&def->body.alias.original_me);
251 return;
252 case VM_METHOD_TYPE_REFINED:
253 rb_gc_mark_and_move_ptr(&def->body.refined.orig_me);
254 break;
255 case VM_METHOD_TYPE_CFUNC:
256 case VM_METHOD_TYPE_ZSUPER:
257 case VM_METHOD_TYPE_MISSING:
258 case VM_METHOD_TYPE_OPTIMIZED:
259 case VM_METHOD_TYPE_UNDEF:
260 case VM_METHOD_TYPE_NOTIMPLEMENTED:
261 break;
262 }
263 }
264}
265
266void
267rb_imemo_mark_and_move(VALUE obj, bool reference_updating)
268{
269 switch (imemo_type(obj)) {
270 case imemo_ast:
271 rb_bug("imemo_ast is obsolete");
272
273 break;
274 case imemo_callcache: {
275 /* cc is callcache.
276 *
277 * cc->klass (klass) should not be marked because if the klass is
278 * free'ed, the cc->klass will be cleared by `vm_cc_invalidate()`.
279 *
280 * cc->cme (cme) should not be marked because if cc is invalidated
281 * when cme is free'ed.
282 * - klass marks cme if klass uses cme.
283 * - caller classe's ccs->cme marks cc->cme.
284 * - if cc is invalidated (klass doesn't refer the cc),
285 * cc is invalidated by `vm_cc_invalidate()` and cc->cme is
286 * not be accessed.
287 * - On the multi-Ractors, cme will be collected with global GC
288 * so that it is safe if GC is not interleaving while accessing
289 * cc and cme.
290 * - However, cc_type_super and cc_type_refinement are not chained
291 * from ccs so cc->cme should be marked; the cme might be
292 * reachable only through cc in these cases.
293 */
294 struct rb_callcache *cc = (struct rb_callcache *)obj;
295 if (reference_updating) {
296 if (!cc->klass) {
297 // already invalidated
298 }
299 else {
300 if (moved_or_living_object_strictly_p(cc->klass) &&
301 moved_or_living_object_strictly_p((VALUE)cc->cme_)) {
302 *((VALUE *)&cc->klass) = rb_gc_location(cc->klass);
303 *((struct rb_callable_method_entry_struct **)&cc->cme_) =
304 (struct rb_callable_method_entry_struct *)rb_gc_location((VALUE)cc->cme_);
305 }
306 else {
307 vm_cc_invalidate(cc);
308 }
309 }
310 }
311 else {
312 if (vm_cc_super_p(cc) || vm_cc_refinement_p(cc)) {
313 rb_gc_mark_movable((VALUE)cc->cme_);
314 rb_gc_mark_movable((VALUE)cc->klass);
315 }
316 }
317
318 break;
319 }
320 case imemo_callinfo:
321 break;
322 case imemo_constcache: {
324
325 rb_gc_mark_and_move(&ice->value);
326
327 break;
328 }
329 case imemo_cref: {
330 rb_cref_t *cref = (rb_cref_t *)obj;
331
332 rb_gc_mark_and_move(&cref->klass_or_self);
333 rb_gc_mark_and_move_ptr(&cref->next);
334 rb_gc_mark_and_move(&cref->refinements);
335
336 break;
337 }
338 case imemo_env: {
339 rb_env_t *env = (rb_env_t *)obj;
340
341 if (LIKELY(env->ep)) {
342 // just after newobj() can be NULL here.
343 RUBY_ASSERT(rb_gc_location(env->ep[VM_ENV_DATA_INDEX_ENV]) == rb_gc_location(obj));
344 RUBY_ASSERT(reference_updating || VM_ENV_ESCAPED_P(env->ep));
345
346 for (unsigned int i = 0; i < env->env_size; i++) {
347 rb_gc_mark_and_move((VALUE *)&env->env[i]);
348 }
349
350 rb_gc_mark_and_move_ptr(&env->iseq);
351
352 if (reference_updating) {
353 ((VALUE *)env->ep)[VM_ENV_DATA_INDEX_ENV] = rb_gc_location(env->ep[VM_ENV_DATA_INDEX_ENV]);
354 }
355 else {
356 if (!VM_ENV_FLAGS(env->ep, VM_ENV_FLAG_WB_REQUIRED)) {
357 VM_ENV_FLAGS_SET(env->ep, VM_ENV_FLAG_WB_REQUIRED);
358 }
359 rb_gc_mark_movable( (VALUE)rb_vm_env_prev_env(env));
360 }
361 }
362
363 break;
364 }
365 case imemo_ifunc: {
366 struct vm_ifunc *ifunc = (struct vm_ifunc *)obj;
367
368 if (!reference_updating) {
369 rb_gc_mark_maybe((VALUE)ifunc->data);
370 }
371
372 break;
373 }
374 case imemo_iseq:
375 rb_iseq_mark_and_move((rb_iseq_t *)obj, reference_updating);
376 break;
377 case imemo_memo: {
378 struct MEMO *memo = (struct MEMO *)obj;
379
380 rb_gc_mark_and_move((VALUE *)&memo->v1);
381 rb_gc_mark_and_move((VALUE *)&memo->v2);
382 if (!reference_updating) {
383 rb_gc_mark_maybe(memo->u3.value);
384 }
385
386 break;
387 }
388 case imemo_ment:
389 mark_and_move_method_entry((rb_method_entry_t *)obj, reference_updating);
390 break;
391 case imemo_parser_strterm:
392 break;
393 case imemo_svar: {
394 struct vm_svar *svar = (struct vm_svar *)obj;
395
396 rb_gc_mark_and_move((VALUE *)&svar->cref_or_me);
397 rb_gc_mark_and_move((VALUE *)&svar->lastline);
398 rb_gc_mark_and_move((VALUE *)&svar->backref);
399 rb_gc_mark_and_move((VALUE *)&svar->others);
400
401 break;
402 }
403 case imemo_throw_data: {
404 struct vm_throw_data *throw_data = (struct vm_throw_data *)obj;
405
406 rb_gc_mark_and_move((VALUE *)&throw_data->throw_obj);
407
408 break;
409 }
410 case imemo_tmpbuf: {
411 const rb_imemo_tmpbuf_t *m = (const rb_imemo_tmpbuf_t *)obj;
412
413 if (!reference_updating) {
414 do {
415 rb_gc_mark_locations(m->ptr, m->ptr + m->cnt);
416 } while ((m = m->next) != NULL);
417 }
418
419 break;
420 }
421 default:
422 rb_bug("unreachable");
423 }
424}
425
426/* =========================================================================
427 * free
428 * ========================================================================= */
429
430static enum rb_id_table_iterator_result
431free_const_entry_i(VALUE value, void *data)
432{
433 rb_const_entry_t *ce = (rb_const_entry_t *)value;
434 xfree(ce);
435 return ID_TABLE_CONTINUE;
436}
437
438void
439rb_free_const_table(struct rb_id_table *tbl)
440{
441 rb_id_table_foreach_values(tbl, free_const_entry_i, 0);
442 rb_id_table_free(tbl);
443}
444
445// alive: if false, target pointers can be freed already.
446static void
447vm_ccs_free(struct rb_class_cc_entries *ccs, int alive, VALUE klass)
448{
449 if (ccs->entries) {
450 for (int i=0; i<ccs->len; i++) {
451 const struct rb_callcache *cc = ccs->entries[i].cc;
452 if (!alive) {
453 // ccs can be free'ed.
454 if (rb_gc_pointer_to_heap_p((VALUE)cc) &&
455 !rb_objspace_garbage_object_p((VALUE)cc) &&
456 IMEMO_TYPE_P(cc, imemo_callcache) &&
457 cc->klass == klass) {
458 // OK. maybe target cc.
459 }
460 else {
461 continue;
462 }
463 }
464
465 VM_ASSERT(!vm_cc_super_p(cc) && !vm_cc_refinement_p(cc));
466 vm_cc_invalidate(cc);
467 }
468 ruby_xfree(ccs->entries);
469 }
470 ruby_xfree(ccs);
471}
472
473void
474rb_vm_ccs_free(struct rb_class_cc_entries *ccs)
475{
476 RB_DEBUG_COUNTER_INC(ccs_free);
477 vm_ccs_free(ccs, true, Qundef);
478}
479
480static enum rb_id_table_iterator_result
481cc_table_free_i(VALUE ccs_ptr, void *data)
482{
483 struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
484 VALUE klass = (VALUE)data;
485 VM_ASSERT(vm_ccs_p(ccs));
486
487 vm_ccs_free(ccs, false, klass);
488
489 return ID_TABLE_CONTINUE;
490}
491
492void
493rb_cc_table_free(VALUE klass)
494{
495 struct rb_id_table *cc_tbl = RCLASS_CC_TBL(klass);
496
497 if (cc_tbl) {
498 rb_id_table_foreach_values(cc_tbl, cc_table_free_i, (void *)klass);
499 rb_id_table_free(cc_tbl);
500 }
501}
502
503void
504rb_imemo_free(VALUE obj)
505{
506 switch (imemo_type(obj)) {
507 case imemo_ast:
508 rb_bug("imemo_ast is obsolete");
509
510 break;
511 case imemo_callcache:
512 RB_DEBUG_COUNTER_INC(obj_imemo_callcache);
513
514 break;
515 case imemo_callinfo:{
516 const struct rb_callinfo *ci = ((const struct rb_callinfo *)obj);
517
518 if (ci->kwarg) {
519 ((struct rb_callinfo_kwarg *)ci->kwarg)->references--;
520 if (ci->kwarg->references == 0) xfree((void *)ci->kwarg);
521 }
522 RB_DEBUG_COUNTER_INC(obj_imemo_callinfo);
523
524 break;
525 }
526 case imemo_constcache:
527 RB_DEBUG_COUNTER_INC(obj_imemo_constcache);
528
529 break;
530 case imemo_cref:
531 RB_DEBUG_COUNTER_INC(obj_imemo_cref);
532
533 break;
534 case imemo_env: {
535 rb_env_t *env = (rb_env_t *)obj;
536
537 RUBY_ASSERT(VM_ENV_ESCAPED_P(env->ep));
538 xfree((VALUE *)env->env);
539 RB_DEBUG_COUNTER_INC(obj_imemo_env);
540
541 break;
542 }
543 case imemo_ifunc:
544 RB_DEBUG_COUNTER_INC(obj_imemo_ifunc);
545 break;
546 case imemo_iseq:
547 rb_iseq_free((rb_iseq_t *)obj);
548 RB_DEBUG_COUNTER_INC(obj_imemo_iseq);
549
550 break;
551 case imemo_memo:
552 RB_DEBUG_COUNTER_INC(obj_imemo_memo);
553
554 break;
555 case imemo_ment:
556 rb_free_method_entry((rb_method_entry_t *)obj);
557 RB_DEBUG_COUNTER_INC(obj_imemo_ment);
558
559 break;
560 case imemo_parser_strterm:
561 RB_DEBUG_COUNTER_INC(obj_imemo_parser_strterm);
562
563 break;
564 case imemo_svar:
565 RB_DEBUG_COUNTER_INC(obj_imemo_svar);
566 break;
567 case imemo_throw_data:
568 RB_DEBUG_COUNTER_INC(obj_imemo_throw_data);
569
570 break;
571 case imemo_tmpbuf:
572 xfree(((rb_imemo_tmpbuf_t *)obj)->ptr);
573 RB_DEBUG_COUNTER_INC(obj_imemo_tmpbuf);
574
575 break;
576 default:
577 rb_bug("unreachable");
578 }
579}
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define Qundef
Old name of RUBY_Qundef.
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition value_type.h:67
#define FL_WB_PROTECTED
Old name of RUBY_FL_WB_PROTECTED.
Definition fl_type.h:59
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define T_MOVED
Old name of RUBY_T_MOVED.
Definition value_type.h:71
#define FL_USHIFT
Old name of RUBY_FL_USHIFT.
Definition fl_type.h:69
int len
Length of the buffer.
Definition io.h:8
VALUE type(ANYARGS)
ANYARGS-ed function type.
MEMO.
Definition imemo.h:105
Definition vm_core.h:259
Definition method.h:62
Definition constant.h:33
CREF (Class REFerence)
Definition method.h:44
Definition method.h:54
rb_cref_t * cref
class reference, should be marked
Definition method.h:136
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:135
IFUNC (Internal FUNCtion)
Definition imemo.h:84
SVAR (Special VARiable)
Definition imemo.h:48
const VALUE cref_or_me
class reference or rb_method_entry_t
Definition imemo.h:50
THROW_DATA.
Definition imemo.h:57
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40