Ruby 3.5.0dev (2025-04-03 revision 1dddc6c78b5f6dc6ae18ee04ebe44abfce3b0433)
ractor.c (1dddc6c78b5f6dc6ae18ee04ebe44abfce3b0433)
1// Ractor implementation
2
3#include "ruby/ruby.h"
4#include "ruby/thread.h"
5#include "ruby/ractor.h"
7#include "vm_core.h"
8#include "eval_intern.h"
9#include "vm_sync.h"
10#include "ractor_core.h"
11#include "internal/complex.h"
12#include "internal/error.h"
13#include "internal/gc.h"
14#include "internal/hash.h"
15#include "internal/object.h"
16#include "internal/ractor.h"
17#include "internal/rational.h"
18#include "internal/struct.h"
19#include "internal/thread.h"
20#include "variable.h"
21#include "yjit.h"
22
24static VALUE rb_cRactorSelector;
25
26VALUE rb_eRactorUnsafeError;
27VALUE rb_eRactorIsolationError;
28static VALUE rb_eRactorError;
29static VALUE rb_eRactorRemoteError;
30static VALUE rb_eRactorMovedError;
31static VALUE rb_eRactorClosedError;
32static VALUE rb_cRactorMovedObject;
33
34static void vm_ractor_blocking_cnt_inc(rb_vm_t *vm, rb_ractor_t *r, const char *file, int line);
35
36// Ractor locking
37
38static void
39ASSERT_ractor_unlocking(rb_ractor_t *r)
40{
41#if RACTOR_CHECK_MODE > 0
42 if (rb_current_execution_context(false) != NULL && r->sync.locked_by == rb_ractor_self(GET_RACTOR())) {
43 rb_bug("recursive ractor locking");
44 }
45#endif
46}
47
48static void
49ASSERT_ractor_locking(rb_ractor_t *r)
50{
51#if RACTOR_CHECK_MODE > 0
52 if (rb_current_execution_context(false) != NULL && r->sync.locked_by != rb_ractor_self(GET_RACTOR())) {
53 rp(r->sync.locked_by);
54 rb_bug("ractor lock is not acquired.");
55 }
56#endif
57}
58
59static void
60ractor_lock(rb_ractor_t *r, const char *file, int line)
61{
62 RUBY_DEBUG_LOG2(file, line, "locking r:%u%s", r->pub.id, rb_current_ractor_raw(false) == r ? " (self)" : "");
63
64 ASSERT_ractor_unlocking(r);
65 rb_native_mutex_lock(&r->sync.lock);
66
67#if RACTOR_CHECK_MODE > 0
68 if (rb_current_execution_context(false) != NULL) {
69 rb_ractor_t *cr = rb_current_ractor_raw(false);
70 r->sync.locked_by = cr ? rb_ractor_self(cr) : Qundef;
71 }
72#endif
73
74 RUBY_DEBUG_LOG2(file, line, "locked r:%u%s", r->pub.id, rb_current_ractor_raw(false) == r ? " (self)" : "");
75}
76
77static void
78ractor_lock_self(rb_ractor_t *cr, const char *file, int line)
79{
80 VM_ASSERT(cr == GET_RACTOR());
81#if RACTOR_CHECK_MODE > 0
82 VM_ASSERT(cr->sync.locked_by != cr->pub.self);
83#endif
84 ractor_lock(cr, file, line);
85}
86
87static void
88ractor_unlock(rb_ractor_t *r, const char *file, int line)
89{
90 ASSERT_ractor_locking(r);
91#if RACTOR_CHECK_MODE > 0
92 r->sync.locked_by = Qnil;
93#endif
94 rb_native_mutex_unlock(&r->sync.lock);
95
96 RUBY_DEBUG_LOG2(file, line, "r:%u%s", r->pub.id, rb_current_ractor_raw(false) == r ? " (self)" : "");
97}
98
99static void
100ractor_unlock_self(rb_ractor_t *cr, const char *file, int line)
101{
102 VM_ASSERT(cr == GET_RACTOR());
103#if RACTOR_CHECK_MODE > 0
104 VM_ASSERT(cr->sync.locked_by == cr->pub.self);
105#endif
106 ractor_unlock(cr, file, line);
107}
108
109#define RACTOR_LOCK(r) ractor_lock(r, __FILE__, __LINE__)
110#define RACTOR_UNLOCK(r) ractor_unlock(r, __FILE__, __LINE__)
111#define RACTOR_LOCK_SELF(r) ractor_lock_self(r, __FILE__, __LINE__)
112#define RACTOR_UNLOCK_SELF(r) ractor_unlock_self(r, __FILE__, __LINE__)
113
114void
115rb_ractor_lock_self(rb_ractor_t *r)
116{
117 RACTOR_LOCK_SELF(r);
118}
119
120void
121rb_ractor_unlock_self(rb_ractor_t *r)
122{
123 RACTOR_UNLOCK_SELF(r);
124}
125
126// Ractor status
127
128static const char *
129ractor_status_str(enum ractor_status status)
130{
131 switch (status) {
132 case ractor_created: return "created";
133 case ractor_running: return "running";
134 case ractor_blocking: return "blocking";
135 case ractor_terminated: return "terminated";
136 }
137 rb_bug("unreachable");
138}
139
140static void
141ractor_status_set(rb_ractor_t *r, enum ractor_status status)
142{
143 RUBY_DEBUG_LOG("r:%u [%s]->[%s]", r->pub.id, ractor_status_str(r->status_), ractor_status_str(status));
144
145 // check 1
146 if (r->status_ != ractor_created) {
147 VM_ASSERT(r == GET_RACTOR()); // only self-modification is allowed.
148 ASSERT_vm_locking();
149 }
150
151 // check2: transition check. assume it will be vanished on non-debug build.
152 switch (r->status_) {
153 case ractor_created:
154 VM_ASSERT(status == ractor_blocking);
155 break;
156 case ractor_running:
157 VM_ASSERT(status == ractor_blocking||
158 status == ractor_terminated);
159 break;
160 case ractor_blocking:
161 VM_ASSERT(status == ractor_running);
162 break;
163 case ractor_terminated:
164 rb_bug("unreachable");
165 break;
166 }
167
168 r->status_ = status;
169}
170
171static bool
172ractor_status_p(rb_ractor_t *r, enum ractor_status status)
173{
174 return rb_ractor_status_p(r, status);
175}
176
177// Ractor data/mark/free
178
179static struct rb_ractor_basket *ractor_queue_at(rb_ractor_t *r, struct rb_ractor_queue *rq, int i);
180static void ractor_local_storage_mark(rb_ractor_t *r);
181static void ractor_local_storage_free(rb_ractor_t *r);
182
183static void
184ractor_queue_mark(struct rb_ractor_queue *rq)
185{
186 for (int i=0; i<rq->cnt; i++) {
187 struct rb_ractor_basket *b = ractor_queue_at(NULL, rq, i);
188 rb_gc_mark(b->sender);
189
190 switch (b->type.e) {
191 case basket_type_yielding:
192 case basket_type_take_basket:
193 case basket_type_deleted:
194 case basket_type_reserved:
195 // ignore
196 break;
197 default:
198 rb_gc_mark(b->p.send.v);
199 }
200 }
201}
202
203static void
204ractor_mark(void *ptr)
205{
206 rb_ractor_t *r = (rb_ractor_t *)ptr;
207
208 ractor_queue_mark(&r->sync.recv_queue);
209 ractor_queue_mark(&r->sync.takers_queue);
210
211 rb_gc_mark(r->receiving_mutex);
212
213 rb_gc_mark(r->loc);
214 rb_gc_mark(r->name);
215 rb_gc_mark(r->r_stdin);
216 rb_gc_mark(r->r_stdout);
217 rb_gc_mark(r->r_stderr);
218 rb_hook_list_mark(&r->pub.hooks);
219
220 if (r->threads.cnt > 0) {
221 rb_thread_t *th = 0;
222 ccan_list_for_each(&r->threads.set, th, lt_node) {
223 VM_ASSERT(th != NULL);
224 rb_gc_mark(th->self);
225 }
226 }
227
228 ractor_local_storage_mark(r);
229}
230
231static void
232ractor_queue_free(struct rb_ractor_queue *rq)
233{
234 free(rq->baskets);
235}
236
237static void
238ractor_free(void *ptr)
239{
240 rb_ractor_t *r = (rb_ractor_t *)ptr;
241 RUBY_DEBUG_LOG("free r:%d", rb_ractor_id(r));
242 rb_native_mutex_destroy(&r->sync.lock);
243#ifdef RUBY_THREAD_WIN32_H
244 rb_native_cond_destroy(&r->sync.cond);
245#endif
246 ractor_queue_free(&r->sync.recv_queue);
247 ractor_queue_free(&r->sync.takers_queue);
248 ractor_local_storage_free(r);
249 rb_hook_list_free(&r->pub.hooks);
250
251 if (r->newobj_cache) {
252 RUBY_ASSERT(r == ruby_single_main_ractor);
253
254 rb_gc_ractor_cache_free(r->newobj_cache);
255 r->newobj_cache = NULL;
256 }
257
258 ruby_xfree(r);
259}
260
261static size_t
262ractor_queue_memsize(const struct rb_ractor_queue *rq)
263{
264 return sizeof(struct rb_ractor_basket) * rq->size;
265}
266
267static size_t
268ractor_memsize(const void *ptr)
269{
270 rb_ractor_t *r = (rb_ractor_t *)ptr;
271
272 // TODO: more correct?
273 return sizeof(rb_ractor_t) +
274 ractor_queue_memsize(&r->sync.recv_queue) +
275 ractor_queue_memsize(&r->sync.takers_queue);
276}
277
278static const rb_data_type_t ractor_data_type = {
279 "ractor",
280 {
281 ractor_mark,
282 ractor_free,
283 ractor_memsize,
284 NULL, // update
285 },
286 0, 0, RUBY_TYPED_FREE_IMMEDIATELY /* | RUBY_TYPED_WB_PROTECTED */
287};
288
289bool
290rb_ractor_p(VALUE gv)
291{
292 if (rb_typeddata_is_kind_of(gv, &ractor_data_type)) {
293 return true;
294 }
295 else {
296 return false;
297 }
298}
299
300static inline rb_ractor_t *
301RACTOR_PTR(VALUE self)
302{
303 VM_ASSERT(rb_ractor_p(self));
304 rb_ractor_t *r = DATA_PTR(self);
305 return r;
306}
307
308static rb_atomic_t ractor_last_id;
309
310#if RACTOR_CHECK_MODE > 0
311uint32_t
312rb_ractor_current_id(void)
313{
314 if (GET_THREAD()->ractor == NULL) {
315 return 1; // main ractor
316 }
317 else {
318 return rb_ractor_id(GET_RACTOR());
319 }
320}
321#endif
322
323// Ractor queue
324
325static void
326ractor_queue_setup(struct rb_ractor_queue *rq)
327{
328 rq->size = 2;
329 rq->cnt = 0;
330 rq->start = 0;
331 rq->baskets = malloc(sizeof(struct rb_ractor_basket) * rq->size);
332}
333
334static struct rb_ractor_basket *
335ractor_queue_head(rb_ractor_t *r, struct rb_ractor_queue *rq)
336{
337 if (r != NULL) ASSERT_ractor_locking(r);
338 return &rq->baskets[rq->start];
339}
340
341static struct rb_ractor_basket *
342ractor_queue_at(rb_ractor_t *r, struct rb_ractor_queue *rq, int i)
343{
344 if (r != NULL) ASSERT_ractor_locking(r);
345 return &rq->baskets[(rq->start + i) % rq->size];
346}
347
348static void
349ractor_queue_advance(rb_ractor_t *r, struct rb_ractor_queue *rq)
350{
351 ASSERT_ractor_locking(r);
352
353 if (rq->reserved_cnt == 0) {
354 rq->cnt--;
355 rq->start = (rq->start + 1) % rq->size;
356 rq->serial++;
357 }
358 else {
359 ractor_queue_at(r, rq, 0)->type.e = basket_type_deleted;
360 }
361}
362
363static bool
364ractor_queue_skip_p(rb_ractor_t *r, struct rb_ractor_queue *rq, int i)
365{
366 struct rb_ractor_basket *b = ractor_queue_at(r, rq, i);
367 return basket_type_p(b, basket_type_deleted) ||
368 basket_type_p(b, basket_type_reserved);
369}
370
371static void
372ractor_queue_compact(rb_ractor_t *r, struct rb_ractor_queue *rq)
373{
374 ASSERT_ractor_locking(r);
375
376 while (rq->cnt > 0 && basket_type_p(ractor_queue_at(r, rq, 0), basket_type_deleted)) {
377 ractor_queue_advance(r, rq);
378 }
379}
380
381static bool
382ractor_queue_empty_p(rb_ractor_t *r, struct rb_ractor_queue *rq)
383{
384 ASSERT_ractor_locking(r);
385
386 if (rq->cnt == 0) {
387 return true;
388 }
389
390 ractor_queue_compact(r, rq);
391
392 for (int i=0; i<rq->cnt; i++) {
393 if (!ractor_queue_skip_p(r, rq, i)) {
394 return false;
395 }
396 }
397
398 return true;
399}
400
401static bool
402ractor_queue_deq(rb_ractor_t *r, struct rb_ractor_queue *rq, struct rb_ractor_basket *basket)
403{
404 ASSERT_ractor_locking(r);
405
406 for (int i=0; i<rq->cnt; i++) {
407 if (!ractor_queue_skip_p(r, rq, i)) {
408 struct rb_ractor_basket *b = ractor_queue_at(r, rq, i);
409 *basket = *b;
410
411 // remove from queue
412 b->type.e = basket_type_deleted;
413 ractor_queue_compact(r, rq);
414 return true;
415 }
416 }
417
418 return false;
419}
420
421static void
422ractor_queue_enq(rb_ractor_t *r, struct rb_ractor_queue *rq, struct rb_ractor_basket *basket)
423{
424 ASSERT_ractor_locking(r);
425
426 if (rq->size <= rq->cnt) {
427 rq->baskets = realloc(rq->baskets, sizeof(struct rb_ractor_basket) * rq->size * 2);
428 for (int i=rq->size - rq->start; i<rq->cnt; i++) {
429 rq->baskets[i + rq->start] = rq->baskets[i + rq->start - rq->size];
430 }
431 rq->size *= 2;
432 }
433 rq->baskets[(rq->start + rq->cnt++) % rq->size] = *basket;
434 // fprintf(stderr, "%s %p->cnt:%d\n", RUBY_FUNCTION_NAME_STRING, (void *)rq, rq->cnt);
435}
436
437static void
438ractor_queue_delete(rb_ractor_t *r, struct rb_ractor_queue *rq, struct rb_ractor_basket *basket)
439{
440 basket->type.e = basket_type_deleted;
441}
442
443// Ractor basket
444
445static VALUE ractor_reset_belonging(VALUE obj); // in this file
446
447static VALUE
448ractor_basket_value(struct rb_ractor_basket *b)
449{
450 switch (b->type.e) {
451 case basket_type_ref:
452 break;
453 case basket_type_copy:
454 case basket_type_move:
455 case basket_type_will:
456 b->type.e = basket_type_ref;
457 b->p.send.v = ractor_reset_belonging(b->p.send.v);
458 break;
459 default:
460 rb_bug("unreachable");
461 }
462
463 return b->p.send.v;
464}
465
466static VALUE
467ractor_basket_accept(struct rb_ractor_basket *b)
468{
469 VALUE v = ractor_basket_value(b);
470
471 if (b->p.send.exception) {
472 VALUE cause = v;
473 VALUE err = rb_exc_new_cstr(rb_eRactorRemoteError, "thrown by remote Ractor.");
474 rb_ivar_set(err, rb_intern("@ractor"), b->sender);
475 rb_ec_setup_exception(NULL, err, cause);
476 rb_exc_raise(err);
477 }
478
479 return v;
480}
481
482// Ractor synchronizations
483
484#if USE_RUBY_DEBUG_LOG
485static const char *
486wait_status_str(enum rb_ractor_wait_status wait_status)
487{
488 switch ((int)wait_status) {
489 case wait_none: return "none";
490 case wait_receiving: return "receiving";
491 case wait_taking: return "taking";
492 case wait_yielding: return "yielding";
493 case wait_receiving|wait_taking: return "receiving|taking";
494 case wait_receiving|wait_yielding: return "receiving|yielding";
495 case wait_taking|wait_yielding: return "taking|yielding";
496 case wait_receiving|wait_taking|wait_yielding: return "receiving|taking|yielding";
497 }
498 rb_bug("unreachable");
499}
500
501static const char *
502wakeup_status_str(enum rb_ractor_wakeup_status wakeup_status)
503{
504 switch (wakeup_status) {
505 case wakeup_none: return "none";
506 case wakeup_by_send: return "by_send";
507 case wakeup_by_yield: return "by_yield";
508 case wakeup_by_take: return "by_take";
509 case wakeup_by_close: return "by_close";
510 case wakeup_by_interrupt: return "by_interrupt";
511 case wakeup_by_retry: return "by_retry";
512 }
513 rb_bug("unreachable");
514}
515
516static const char *
517basket_type_name(enum rb_ractor_basket_type type)
518{
519 switch (type) {
520 case basket_type_none: return "none";
521 case basket_type_ref: return "ref";
522 case basket_type_copy: return "copy";
523 case basket_type_move: return "move";
524 case basket_type_will: return "will";
525 case basket_type_deleted: return "deleted";
526 case basket_type_reserved: return "reserved";
527 case basket_type_take_basket: return "take_basket";
528 case basket_type_yielding: return "yielding";
529 }
530 VM_ASSERT(0);
531 return NULL;
532}
533#endif // USE_RUBY_DEBUG_LOG
534
535static bool
536ractor_sleeping_by(const rb_ractor_t *r, enum rb_ractor_wait_status wait_status)
537{
538 return (r->sync.wait.status & wait_status) && r->sync.wait.wakeup_status == wakeup_none;
539}
540
541#ifdef RUBY_THREAD_PTHREAD_H
542// thread_*.c
543void rb_ractor_sched_wakeup(rb_ractor_t *r);
544#else
545
546static void
547rb_ractor_sched_wakeup(rb_ractor_t *r)
548{
549 rb_native_cond_broadcast(&r->sync.cond);
550}
551#endif
552
553
554static bool
555ractor_wakeup(rb_ractor_t *r, enum rb_ractor_wait_status wait_status, enum rb_ractor_wakeup_status wakeup_status)
556{
557 ASSERT_ractor_locking(r);
558
559 RUBY_DEBUG_LOG("r:%u wait_by:%s -> wait:%s wakeup:%s",
560 rb_ractor_id(r),
561 wait_status_str(r->sync.wait.status),
562 wait_status_str(wait_status),
563 wakeup_status_str(wakeup_status));
564
565 if (ractor_sleeping_by(r, wait_status)) {
566 r->sync.wait.wakeup_status = wakeup_status;
567 rb_ractor_sched_wakeup(r);
568 return true;
569 }
570 else {
571 return false;
572 }
573}
574
575static void
576ractor_sleep_interrupt(void *ptr)
577{
578 rb_ractor_t *r = ptr;
579
580 RACTOR_LOCK(r);
581 {
582 ractor_wakeup(r, wait_receiving | wait_taking | wait_yielding, wakeup_by_interrupt);
583 }
584 RACTOR_UNLOCK(r);
585}
586
587typedef void (*ractor_sleep_cleanup_function)(rb_ractor_t *cr, void *p);
588
589static void
590ractor_check_ints(rb_execution_context_t *ec, rb_ractor_t *cr, ractor_sleep_cleanup_function cf_func, void *cf_data)
591{
592 if (cr->sync.wait.status != wait_none) {
593 enum rb_ractor_wait_status prev_wait_status = cr->sync.wait.status;
594 cr->sync.wait.status = wait_none;
595 cr->sync.wait.wakeup_status = wakeup_by_interrupt;
596
597 RACTOR_UNLOCK(cr);
598 {
599 if (cf_func) {
600 enum ruby_tag_type state;
601 EC_PUSH_TAG(ec);
602 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
603 rb_ec_check_ints(ec);
604 }
605 EC_POP_TAG();
606
607 if (state) {
608 (*cf_func)(cr, cf_data);
609 EC_JUMP_TAG(ec, state);
610 }
611 }
612 else {
613 rb_ec_check_ints(ec);
614 }
615 }
616
617 // reachable?
618 RACTOR_LOCK(cr);
619 cr->sync.wait.status = prev_wait_status;
620 }
621}
622
623#ifdef RUBY_THREAD_PTHREAD_H
624void rb_ractor_sched_sleep(rb_execution_context_t *ec, rb_ractor_t *cr, rb_unblock_function_t *ubf);
625#else
626
627// win32
628static void
629ractor_cond_wait(rb_ractor_t *r)
630{
631#if RACTOR_CHECK_MODE > 0
632 VALUE locked_by = r->sync.locked_by;
633 r->sync.locked_by = Qnil;
634#endif
635 rb_native_cond_wait(&r->sync.cond, &r->sync.lock);
636
637#if RACTOR_CHECK_MODE > 0
638 r->sync.locked_by = locked_by;
639#endif
640}
641
642static void *
643ractor_sleep_wo_gvl(void *ptr)
644{
645 rb_ractor_t *cr = ptr;
646 RACTOR_LOCK_SELF(cr);
647 {
648 VM_ASSERT(cr->sync.wait.status != wait_none);
649 if (cr->sync.wait.wakeup_status == wakeup_none) {
650 ractor_cond_wait(cr);
651 }
652 cr->sync.wait.status = wait_none;
653 }
654 RACTOR_UNLOCK_SELF(cr);
655 return NULL;
656}
657
658static void
659rb_ractor_sched_sleep(rb_execution_context_t *ec, rb_ractor_t *cr, rb_unblock_function_t *ubf)
660{
661 RACTOR_UNLOCK(cr);
662 {
663 rb_nogvl(ractor_sleep_wo_gvl, cr,
664 ubf, cr,
666 }
667 RACTOR_LOCK(cr);
668}
669#endif
670
671static enum rb_ractor_wakeup_status
672ractor_sleep_with_cleanup(rb_execution_context_t *ec, rb_ractor_t *cr, enum rb_ractor_wait_status wait_status,
673 ractor_sleep_cleanup_function cf_func, void *cf_data)
674{
675 enum rb_ractor_wakeup_status wakeup_status;
676 VM_ASSERT(GET_RACTOR() == cr);
677
678 // TODO: multi-threads
679 VM_ASSERT(cr->sync.wait.status == wait_none);
680 VM_ASSERT(wait_status != wait_none);
681 cr->sync.wait.status = wait_status;
682 cr->sync.wait.wakeup_status = wakeup_none;
683
684 // fprintf(stderr, "%s r:%p status:%s, wakeup_status:%s\n", RUBY_FUNCTION_NAME_STRING, (void *)cr,
685 // wait_status_str(cr->sync.wait.status), wakeup_status_str(cr->sync.wait.wakeup_status));
686
687 RUBY_DEBUG_LOG("sleep by %s", wait_status_str(wait_status));
688
689 while (cr->sync.wait.wakeup_status == wakeup_none) {
690 rb_ractor_sched_sleep(ec, cr, ractor_sleep_interrupt);
691 ractor_check_ints(ec, cr, cf_func, cf_data);
692 }
693
694 cr->sync.wait.status = wait_none;
695
696 // TODO: multi-thread
697 wakeup_status = cr->sync.wait.wakeup_status;
698 cr->sync.wait.wakeup_status = wakeup_none;
699
700 RUBY_DEBUG_LOG("wakeup %s", wakeup_status_str(wakeup_status));
701
702 return wakeup_status;
703}
704
705static enum rb_ractor_wakeup_status
706ractor_sleep(rb_execution_context_t *ec, rb_ractor_t *cr, enum rb_ractor_wait_status wait_status)
707{
708 return ractor_sleep_with_cleanup(ec, cr, wait_status, 0, NULL);
709}
710
711// Ractor.receive
712
713static void
714ractor_recursive_receive_if(rb_ractor_t *r)
715{
716 if (r->receiving_mutex && rb_mutex_owned_p(r->receiving_mutex)) {
717 rb_raise(rb_eRactorError, "can not call receive/receive_if recursively");
718 }
719}
720
721static VALUE
722ractor_try_receive(rb_execution_context_t *ec, rb_ractor_t *cr, struct rb_ractor_queue *rq)
723{
724 struct rb_ractor_basket basket;
725 ractor_recursive_receive_if(cr);
726 bool received = false;
727
728 RACTOR_LOCK_SELF(cr);
729 {
730 RUBY_DEBUG_LOG("rq->cnt:%d", rq->cnt);
731 received = ractor_queue_deq(cr, rq, &basket);
732 }
733 RACTOR_UNLOCK_SELF(cr);
734
735 if (!received) {
736 if (cr->sync.incoming_port_closed) {
737 rb_raise(rb_eRactorClosedError, "The incoming port is already closed");
738 }
739 return Qundef;
740 }
741 else {
742 return ractor_basket_accept(&basket);
743 }
744}
745
746static void
747ractor_wait_receive(rb_execution_context_t *ec, rb_ractor_t *cr, struct rb_ractor_queue *rq)
748{
749 VM_ASSERT(cr == rb_ec_ractor_ptr(ec));
750 ractor_recursive_receive_if(cr);
751
752 RACTOR_LOCK(cr);
753 {
754 while (ractor_queue_empty_p(cr, rq) && !cr->sync.incoming_port_closed) {
755 ractor_sleep(ec, cr, wait_receiving);
756 }
757 }
758 RACTOR_UNLOCK(cr);
759}
760
761static VALUE
762ractor_receive(rb_execution_context_t *ec, rb_ractor_t *cr)
763{
764 VM_ASSERT(cr == rb_ec_ractor_ptr(ec));
765 VALUE v;
766 struct rb_ractor_queue *rq = &cr->sync.recv_queue;
767
768 while (UNDEF_P(v = ractor_try_receive(ec, cr, rq))) {
769 ractor_wait_receive(ec, cr, rq);
770 }
771
772 return v;
773}
774
775#if 0
776static void
777rq_dump(struct rb_ractor_queue *rq)
778{
779 bool bug = false;
780 for (int i=0; i<rq->cnt; i++) {
781 struct rb_ractor_basket *b = ractor_queue_at(NULL, rq, i);
782 fprintf(stderr, "%d (start:%d) type:%s %p %s\n", i, rq->start, basket_type_name(b->type),
783 (void *)b, RSTRING_PTR(RARRAY_AREF(b->v, 1)));
784 if (basket_type_p(b, basket_type_reserved) bug = true;
785 }
786 if (bug) rb_bug("!!");
787}
788#endif
789
791 rb_ractor_t *cr;
792 struct rb_ractor_queue *rq;
793 VALUE v;
794 int index;
795 bool success;
796};
797
798static void
799ractor_receive_if_lock(rb_ractor_t *cr)
800{
801 VALUE m = cr->receiving_mutex;
802 if (m == Qfalse) {
803 m = cr->receiving_mutex = rb_mutex_new();
804 }
805 rb_mutex_lock(m);
806}
807
808static VALUE
809receive_if_body(VALUE ptr)
810{
811 struct receive_block_data *data = (struct receive_block_data *)ptr;
812
813 ractor_receive_if_lock(data->cr);
814 VALUE block_result = rb_yield(data->v);
815 rb_ractor_t *cr = data->cr;
816
817 RACTOR_LOCK_SELF(cr);
818 {
819 struct rb_ractor_basket *b = ractor_queue_at(cr, data->rq, data->index);
820 VM_ASSERT(basket_type_p(b, basket_type_reserved));
821 data->rq->reserved_cnt--;
822
823 if (RTEST(block_result)) {
824 ractor_queue_delete(cr, data->rq, b);
825 ractor_queue_compact(cr, data->rq);
826 }
827 else {
828 b->type.e = basket_type_ref;
829 }
830 }
831 RACTOR_UNLOCK_SELF(cr);
832
833 data->success = true;
834
835 if (RTEST(block_result)) {
836 return data->v;
837 }
838 else {
839 return Qundef;
840 }
841}
842
843static VALUE
844receive_if_ensure(VALUE v)
845{
846 struct receive_block_data *data = (struct receive_block_data *)v;
847 rb_ractor_t *cr = data->cr;
848
849 if (!data->success) {
850 RACTOR_LOCK_SELF(cr);
851 {
852 struct rb_ractor_basket *b = ractor_queue_at(cr, data->rq, data->index);
853 VM_ASSERT(basket_type_p(b, basket_type_reserved));
854 b->type.e = basket_type_deleted;
855 data->rq->reserved_cnt--;
856 }
857 RACTOR_UNLOCK_SELF(cr);
858 }
859
860 rb_mutex_unlock(cr->receiving_mutex);
861 return Qnil;
862}
863
864static VALUE
865ractor_receive_if(rb_execution_context_t *ec, VALUE crv, VALUE b)
866{
867 if (!RTEST(b)) rb_raise(rb_eArgError, "no block given");
868
869 rb_ractor_t *cr = rb_ec_ractor_ptr(ec);
870 unsigned int serial = (unsigned int)-1;
871 int index = 0;
872 struct rb_ractor_queue *rq = &cr->sync.recv_queue;
873
874 while (1) {
875 VALUE v = Qundef;
876
877 ractor_wait_receive(ec, cr, rq);
878
879 RACTOR_LOCK_SELF(cr);
880 {
881 if (serial != rq->serial) {
882 serial = rq->serial;
883 index = 0;
884 }
885
886 // check newer version
887 for (int i=index; i<rq->cnt; i++) {
888 if (!ractor_queue_skip_p(cr, rq, i)) {
889 struct rb_ractor_basket *b = ractor_queue_at(cr, rq, i);
890 v = ractor_basket_value(b);
891 b->type.e = basket_type_reserved;
892 rq->reserved_cnt++;
893 index = i;
894 break;
895 }
896 }
897 }
898 RACTOR_UNLOCK_SELF(cr);
899
900 if (!UNDEF_P(v)) {
901 struct receive_block_data data = {
902 .cr = cr,
903 .rq = rq,
904 .v = v,
905 .index = index,
906 .success = false,
907 };
908
909 VALUE result = rb_ensure(receive_if_body, (VALUE)&data,
910 receive_if_ensure, (VALUE)&data);
911
912 if (!UNDEF_P(result)) return result;
913 index++;
914 }
915
916 RUBY_VM_CHECK_INTS(ec);
917 }
918}
919
920static void
921ractor_send_basket(rb_execution_context_t *ec, rb_ractor_t *r, struct rb_ractor_basket *b)
922{
923 bool closed = false;
924
925 RACTOR_LOCK(r);
926 {
927 if (r->sync.incoming_port_closed) {
928 closed = true;
929 }
930 else {
931 ractor_queue_enq(r, &r->sync.recv_queue, b);
932 ractor_wakeup(r, wait_receiving, wakeup_by_send);
933 }
934 }
935 RACTOR_UNLOCK(r);
936
937 if (closed) {
938 rb_raise(rb_eRactorClosedError, "The incoming-port is already closed");
939 }
940}
941
942// Ractor#send
943
944static VALUE ractor_move(VALUE obj); // in this file
945static VALUE ractor_copy(VALUE obj); // in this file
946
947static void
948ractor_basket_prepare_contents(VALUE obj, VALUE move, volatile VALUE *pobj, enum rb_ractor_basket_type *ptype)
949{
950 VALUE v;
951 enum rb_ractor_basket_type type;
952
953 if (rb_ractor_shareable_p(obj)) {
954 type = basket_type_ref;
955 v = obj;
956 }
957 else if (!RTEST(move)) {
958 v = ractor_copy(obj);
959 type = basket_type_copy;
960 }
961 else {
962 type = basket_type_move;
963 v = ractor_move(obj);
964 }
965
966 *pobj = v;
967 *ptype = type;
968}
969
970static void
971ractor_basket_fill_(rb_ractor_t *cr, struct rb_ractor_basket *basket, VALUE obj, bool exc)
972{
973 VM_ASSERT(cr == GET_RACTOR());
974
975 basket->sender = cr->pub.self;
976 basket->p.send.exception = exc;
977 basket->p.send.v = obj;
978}
979
980static void
981ractor_basket_fill(rb_ractor_t *cr, struct rb_ractor_basket *basket, VALUE obj, VALUE move, bool exc)
982{
983 VALUE v;
984 enum rb_ractor_basket_type type;
985 ractor_basket_prepare_contents(obj, move, &v, &type);
986 ractor_basket_fill_(cr, basket, v, exc);
987 basket->type.e = type;
988}
989
990static void
991ractor_basket_fill_will(rb_ractor_t *cr, struct rb_ractor_basket *basket, VALUE obj, bool exc)
992{
993 ractor_basket_fill_(cr, basket, obj, exc);
994 basket->type.e = basket_type_will;
995}
996
997static VALUE
998ractor_send(rb_execution_context_t *ec, rb_ractor_t *r, VALUE obj, VALUE move)
999{
1000 struct rb_ractor_basket basket;
1001 // TODO: Ractor local GC
1002 ractor_basket_fill(rb_ec_ractor_ptr(ec), &basket, obj, move, false);
1003 ractor_send_basket(ec, r, &basket);
1004 return r->pub.self;
1005}
1006
1007// Ractor#take
1008
1009static bool
1010ractor_take_has_will(rb_ractor_t *r)
1011{
1012 ASSERT_ractor_locking(r);
1013
1014 return basket_type_p(&r->sync.will_basket, basket_type_will);
1015}
1016
1017static bool
1018ractor_take_will(rb_ractor_t *r, struct rb_ractor_basket *b)
1019{
1020 ASSERT_ractor_locking(r);
1021
1022 if (ractor_take_has_will(r)) {
1023 *b = r->sync.will_basket;
1024 r->sync.will_basket.type.e = basket_type_none;
1025 return true;
1026 }
1027 else {
1028 VM_ASSERT(basket_type_p(&r->sync.will_basket, basket_type_none));
1029 return false;
1030 }
1031}
1032
1033static bool
1034ractor_take_will_lock(rb_ractor_t *r, struct rb_ractor_basket *b)
1035{
1036 ASSERT_ractor_unlocking(r);
1037 bool taken;
1038
1039 RACTOR_LOCK(r);
1040 {
1041 taken = ractor_take_will(r, b);
1042 }
1043 RACTOR_UNLOCK(r);
1044
1045 return taken;
1046}
1047
1048static bool
1049ractor_register_take(rb_ractor_t *cr, rb_ractor_t *r, struct rb_ractor_basket *take_basket,
1050 bool is_take, struct rb_ractor_selector_take_config *config, bool ignore_error)
1051{
1052 struct rb_ractor_basket b = {
1053 .type.e = basket_type_take_basket,
1054 .sender = cr->pub.self,
1055 .p = {
1056 .take = {
1057 .basket = take_basket,
1058 .config = config,
1059 },
1060 },
1061 };
1062 bool closed = false;
1063
1064 RACTOR_LOCK(r);
1065 {
1066 if (is_take && ractor_take_will(r, take_basket)) {
1067 RUBY_DEBUG_LOG("take over a will of r:%d", rb_ractor_id(r));
1068 }
1069 else if (!is_take && ractor_take_has_will(r)) {
1070 RUBY_DEBUG_LOG("has_will");
1071 VM_ASSERT(config != NULL);
1072 config->closed = true;
1073 }
1074 else if (r->sync.outgoing_port_closed) {
1075 closed = true;
1076 }
1077 else {
1078 RUBY_DEBUG_LOG("register in r:%d", rb_ractor_id(r));
1079 ractor_queue_enq(r, &r->sync.takers_queue, &b);
1080
1081 if (basket_none_p(take_basket)) {
1082 ractor_wakeup(r, wait_yielding, wakeup_by_take);
1083 }
1084 }
1085 }
1086 RACTOR_UNLOCK(r);
1087
1088 if (closed) {
1089 if (!ignore_error) rb_raise(rb_eRactorClosedError, "The outgoing-port is already closed");
1090 return false;
1091 }
1092 else {
1093 return true;
1094 }
1095}
1096
1097static bool
1098ractor_deregister_take(rb_ractor_t *r, struct rb_ractor_basket *take_basket)
1099{
1100 struct rb_ractor_queue *ts = &r->sync.takers_queue;
1101 bool deleted = false;
1102
1103 RACTOR_LOCK(r);
1104 {
1105 if (r->sync.outgoing_port_closed) {
1106 // ok
1107 }
1108 else {
1109 for (int i=0; i<ts->cnt; i++) {
1110 struct rb_ractor_basket *b = ractor_queue_at(r, ts, i);
1111 if (basket_type_p(b, basket_type_take_basket) && b->p.take.basket == take_basket) {
1112 ractor_queue_delete(r, ts, b);
1113 deleted = true;
1114 }
1115 }
1116 if (deleted) {
1117 ractor_queue_compact(r, ts);
1118 }
1119 }
1120 }
1121 RACTOR_UNLOCK(r);
1122
1123 return deleted;
1124}
1125
1126static VALUE
1127ractor_try_take(rb_ractor_t *cr, rb_ractor_t *r, struct rb_ractor_basket *take_basket)
1128{
1129 bool taken;
1130
1131 RACTOR_LOCK_SELF(cr);
1132 {
1133 if (basket_none_p(take_basket) || basket_type_p(take_basket, basket_type_yielding)) {
1134 taken = false;
1135 }
1136 else {
1137 taken = true;
1138 }
1139 }
1140 RACTOR_UNLOCK_SELF(cr);
1141
1142 if (taken) {
1143 RUBY_DEBUG_LOG("taken");
1144 if (basket_type_p(take_basket, basket_type_deleted)) {
1145 VM_ASSERT(r->sync.outgoing_port_closed);
1146 rb_raise(rb_eRactorClosedError, "The outgoing-port is already closed");
1147 }
1148 return ractor_basket_accept(take_basket);
1149 }
1150 else {
1151 RUBY_DEBUG_LOG("not taken");
1152 return Qundef;
1153 }
1154}
1155
1156
1157#if VM_CHECK_MODE > 0
1158static bool
1159ractor_check_specific_take_basket_lock(rb_ractor_t *r, struct rb_ractor_basket *tb)
1160{
1161 bool ret = false;
1162 struct rb_ractor_queue *ts = &r->sync.takers_queue;
1163
1164 RACTOR_LOCK(r);
1165 {
1166 for (int i=0; i<ts->cnt; i++) {
1167 struct rb_ractor_basket *b = ractor_queue_at(r, ts, i);
1168 if (basket_type_p(b, basket_type_take_basket) && b->p.take.basket == tb) {
1169 ret = true;
1170 break;
1171 }
1172 }
1173 }
1174 RACTOR_UNLOCK(r);
1175
1176 return ret;
1177}
1178#endif
1179
1180static void
1181ractor_take_cleanup(rb_ractor_t *cr, rb_ractor_t *r, struct rb_ractor_basket *tb)
1182{
1183 retry:
1184 if (basket_none_p(tb)) { // not yielded yet
1185 if (!ractor_deregister_take(r, tb)) {
1186 // not in r's takers queue
1187 rb_thread_sleep(0);
1188 goto retry;
1189 }
1190 }
1191 else {
1192 VM_ASSERT(!ractor_check_specific_take_basket_lock(r, tb));
1193 }
1194}
1195
1197 rb_ractor_t *r;
1198 struct rb_ractor_basket *tb;
1199};
1200
1201static void
1202ractor_wait_take_cleanup(rb_ractor_t *cr, void *ptr)
1203{
1204 struct take_wait_take_cleanup_data *data = (struct take_wait_take_cleanup_data *)ptr;
1205 ractor_take_cleanup(cr, data->r, data->tb);
1206}
1207
1208static void
1209ractor_wait_take(rb_execution_context_t *ec, rb_ractor_t *cr, rb_ractor_t *r, struct rb_ractor_basket *take_basket)
1210{
1211 struct take_wait_take_cleanup_data data = {
1212 .r = r,
1213 .tb = take_basket,
1214 };
1215
1216 RACTOR_LOCK_SELF(cr);
1217 {
1218 if (basket_none_p(take_basket) || basket_type_p(take_basket, basket_type_yielding)) {
1219 ractor_sleep_with_cleanup(ec, cr, wait_taking, ractor_wait_take_cleanup, &data);
1220 }
1221 }
1222 RACTOR_UNLOCK_SELF(cr);
1223}
1224
1225static VALUE
1226ractor_take(rb_execution_context_t *ec, rb_ractor_t *r)
1227{
1228 RUBY_DEBUG_LOG("from r:%u", rb_ractor_id(r));
1229 VALUE v;
1230 rb_ractor_t *cr = rb_ec_ractor_ptr(ec);
1231
1232 struct rb_ractor_basket take_basket = {
1233 .type.e = basket_type_none,
1234 .sender = 0,
1235 };
1236
1237 ractor_register_take(cr, r, &take_basket, true, NULL, false);
1238
1239 while (UNDEF_P(v = ractor_try_take(cr, r, &take_basket))) {
1240 ractor_wait_take(ec, cr, r, &take_basket);
1241 }
1242
1243 VM_ASSERT(!basket_none_p(&take_basket));
1244 VM_ASSERT(!ractor_check_specific_take_basket_lock(r, &take_basket));
1245
1246 return v;
1247}
1248
1249// Ractor.yield
1250
1251static bool
1252ractor_check_take_basket(rb_ractor_t *cr, struct rb_ractor_queue *rs)
1253{
1254 ASSERT_ractor_locking(cr);
1255
1256 for (int i=0; i<rs->cnt; i++) {
1257 struct rb_ractor_basket *b = ractor_queue_at(cr, rs, i);
1258 if (basket_type_p(b, basket_type_take_basket) &&
1259 basket_none_p(b->p.take.basket)) {
1260 return true;
1261 }
1262 }
1263
1264 return false;
1265}
1266
1267static bool
1268ractor_deq_take_basket(rb_ractor_t *cr, struct rb_ractor_queue *rs, struct rb_ractor_basket *b)
1269{
1270 ASSERT_ractor_unlocking(cr);
1271 struct rb_ractor_basket *first_tb = NULL;
1272 bool found = false;
1273
1274 RACTOR_LOCK_SELF(cr);
1275 {
1276 while (ractor_queue_deq(cr, rs, b)) {
1277 if (basket_type_p(b, basket_type_take_basket)) {
1278 struct rb_ractor_basket *tb = b->p.take.basket;
1279
1280 if (RUBY_ATOMIC_CAS(tb->type.atomic, basket_type_none, basket_type_yielding) == basket_type_none) {
1281 found = true;
1282 break;
1283 }
1284 else {
1285 ractor_queue_enq(cr, rs, b);
1286 if (first_tb == NULL) first_tb = tb;
1287 struct rb_ractor_basket *head = ractor_queue_head(cr, rs);
1288 VM_ASSERT(head != NULL);
1289 if (basket_type_p(head, basket_type_take_basket) && head->p.take.basket == first_tb) {
1290 break; // loop detected
1291 }
1292 }
1293 }
1294 else {
1295 VM_ASSERT(basket_none_p(b));
1296 }
1297 }
1298
1299 if (found && b->p.take.config && !b->p.take.config->oneshot) {
1300 ractor_queue_enq(cr, rs, b);
1301 }
1302 }
1303 RACTOR_UNLOCK_SELF(cr);
1304
1305 return found;
1306}
1307
1308static bool
1309ractor_try_yield(rb_execution_context_t *ec, rb_ractor_t *cr, struct rb_ractor_queue *ts, volatile VALUE obj, VALUE move, bool exc, bool is_will)
1310{
1311 ASSERT_ractor_unlocking(cr);
1312
1313 struct rb_ractor_basket b;
1314
1315 if (ractor_deq_take_basket(cr, ts, &b)) {
1316 VM_ASSERT(basket_type_p(&b, basket_type_take_basket));
1317 VM_ASSERT(basket_type_p(b.p.take.basket, basket_type_yielding));
1318
1319 rb_ractor_t *tr = RACTOR_PTR(b.sender);
1320 struct rb_ractor_basket *tb = b.p.take.basket;
1321 enum rb_ractor_basket_type type;
1322
1323 RUBY_DEBUG_LOG("basket from r:%u", rb_ractor_id(tr));
1324
1325 if (is_will) {
1326 type = basket_type_will;
1327 }
1328 else {
1329 enum ruby_tag_type state;
1330
1331 // begin
1332 EC_PUSH_TAG(ec);
1333 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
1334 // TODO: Ractor local GC
1335 ractor_basket_prepare_contents(obj, move, &obj, &type);
1336 }
1337 EC_POP_TAG();
1338 // rescue
1339 if (state) {
1340 RACTOR_LOCK_SELF(cr);
1341 {
1342 b.p.take.basket->type.e = basket_type_none;
1343 ractor_queue_enq(cr, ts, &b);
1344 }
1345 RACTOR_UNLOCK_SELF(cr);
1346 EC_JUMP_TAG(ec, state);
1347 }
1348 }
1349
1350 RACTOR_LOCK(tr);
1351 {
1352 VM_ASSERT(basket_type_p(tb, basket_type_yielding));
1353 // fill atomic
1354 RUBY_DEBUG_LOG("fill %sbasket from r:%u", is_will ? "will " : "", rb_ractor_id(tr));
1355 ractor_basket_fill_(cr, tb, obj, exc);
1356 if (RUBY_ATOMIC_CAS(tb->type.atomic, basket_type_yielding, type) != basket_type_yielding) {
1357 rb_bug("unreachable");
1358 }
1359 ractor_wakeup(tr, wait_taking, wakeup_by_yield);
1360 }
1361 RACTOR_UNLOCK(tr);
1362
1363 return true;
1364 }
1365 else if (cr->sync.outgoing_port_closed) {
1366 rb_raise(rb_eRactorClosedError, "The outgoing-port is already closed");
1367 }
1368 else {
1369 RUBY_DEBUG_LOG("no take basket");
1370 return false;
1371 }
1372}
1373
1374static void
1375ractor_wait_yield(rb_execution_context_t *ec, rb_ractor_t *cr, struct rb_ractor_queue *ts)
1376{
1377 RACTOR_LOCK_SELF(cr);
1378 {
1379 while (!ractor_check_take_basket(cr, ts) && !cr->sync.outgoing_port_closed) {
1380 ractor_sleep(ec, cr, wait_yielding);
1381 }
1382 }
1383 RACTOR_UNLOCK_SELF(cr);
1384}
1385
1386static VALUE
1387ractor_yield(rb_execution_context_t *ec, rb_ractor_t *cr, VALUE obj, VALUE move)
1388{
1389 struct rb_ractor_queue *ts = &cr->sync.takers_queue;
1390
1391 while (!ractor_try_yield(ec, cr, ts, obj, move, false, false)) {
1392 ractor_wait_yield(ec, cr, ts);
1393 }
1394
1395 return Qnil;
1396}
1397
1398// Ractor::Selector
1399
1401 rb_ractor_t *r;
1402 struct rb_ractor_basket take_basket;
1403 st_table *take_ractors; // rb_ractor_t * => (struct rb_ractor_selector_take_config *)
1404};
1405
1406static int
1407ractor_selector_mark_ractors_i(st_data_t key, st_data_t value, st_data_t data)
1408{
1409 const rb_ractor_t *r = (rb_ractor_t *)key;
1410 rb_gc_mark(r->pub.self);
1411 return ST_CONTINUE;
1412}
1413
1414static void
1415ractor_selector_mark(void *ptr)
1416{
1417 struct rb_ractor_selector *s = ptr;
1418
1419 if (s->take_ractors) {
1420 st_foreach(s->take_ractors, ractor_selector_mark_ractors_i, 0);
1421 }
1422
1423 switch (s->take_basket.type.e) {
1424 case basket_type_ref:
1425 case basket_type_copy:
1426 case basket_type_move:
1427 case basket_type_will:
1428 rb_gc_mark(s->take_basket.sender);
1429 rb_gc_mark(s->take_basket.p.send.v);
1430 break;
1431 default:
1432 break;
1433 }
1434}
1435
1436static int
1437ractor_selector_release_i(st_data_t key, st_data_t val, st_data_t data)
1438{
1439 struct rb_ractor_selector *s = (struct rb_ractor_selector *)data;
1441
1442 if (!config->closed) {
1443 ractor_deregister_take((rb_ractor_t *)key, &s->take_basket);
1444 }
1445 free(config);
1446 return ST_CONTINUE;
1447}
1448
1449static void
1450ractor_selector_free(void *ptr)
1451{
1452 struct rb_ractor_selector *s = ptr;
1453 st_foreach(s->take_ractors, ractor_selector_release_i, (st_data_t)s);
1454 st_free_table(s->take_ractors);
1455 ruby_xfree(ptr);
1456}
1457
1458static size_t
1459ractor_selector_memsize(const void *ptr)
1460{
1461 const struct rb_ractor_selector *s = ptr;
1462 return sizeof(struct rb_ractor_selector) +
1463 st_memsize(s->take_ractors) +
1464 s->take_ractors->num_entries * sizeof(struct rb_ractor_selector_take_config);
1465}
1466
1467static const rb_data_type_t ractor_selector_data_type = {
1468 "ractor/selector",
1469 {
1470 ractor_selector_mark,
1471 ractor_selector_free,
1472 ractor_selector_memsize,
1473 NULL, // update
1474 },
1475 0, 0, RUBY_TYPED_FREE_IMMEDIATELY,
1476};
1477
1478static struct rb_ractor_selector *
1479RACTOR_SELECTOR_PTR(VALUE selv)
1480{
1481 VM_ASSERT(rb_typeddata_is_kind_of(selv, &ractor_selector_data_type));
1482
1483 return (struct rb_ractor_selector *)DATA_PTR(selv);
1484}
1485
1486// Ractor::Selector.new
1487
1488static VALUE
1489ractor_selector_create(VALUE klass)
1490{
1491 struct rb_ractor_selector *s;
1492 VALUE selv = TypedData_Make_Struct(klass, struct rb_ractor_selector, &ractor_selector_data_type, s);
1493 s->take_basket.type.e = basket_type_reserved;
1494 s->take_ractors = st_init_numtable(); // ractor (ptr) -> take_config
1495 return selv;
1496}
1497
1498// Ractor::Selector#add(r)
1499
1500/*
1501 * call-seq:
1502 * add(ractor) -> ractor
1503 *
1504 * Adds _ractor_ to +self+. Raises an exception if _ractor_ is already added.
1505 * Returns _ractor_.
1506 */
1507static VALUE
1508ractor_selector_add(VALUE selv, VALUE rv)
1509{
1510 if (!rb_ractor_p(rv)) {
1511 rb_raise(rb_eArgError, "Not a ractor object");
1512 }
1513
1514 rb_ractor_t *r = RACTOR_PTR(rv);
1515 struct rb_ractor_selector *s = RACTOR_SELECTOR_PTR(selv);
1516
1517 if (st_lookup(s->take_ractors, (st_data_t)r, NULL)) {
1518 rb_raise(rb_eArgError, "already added");
1519 }
1520
1521 struct rb_ractor_selector_take_config *config = malloc(sizeof(struct rb_ractor_selector_take_config));
1522 VM_ASSERT(config != NULL);
1523 config->closed = false;
1524 config->oneshot = false;
1525
1526 if (ractor_register_take(GET_RACTOR(), r, &s->take_basket, false, config, true)) {
1527 st_insert(s->take_ractors, (st_data_t)r, (st_data_t)config);
1528 }
1529
1530 return rv;
1531}
1532
1533// Ractor::Selector#remove(r)
1534
1535/* call-seq:
1536 * remove(ractor) -> ractor
1537 *
1538 * Removes _ractor_ from +self+. Raises an exception if _ractor_ is not added.
1539 * Returns the removed _ractor_.
1540 */
1541static VALUE
1542ractor_selector_remove(VALUE selv, VALUE rv)
1543{
1544 if (!rb_ractor_p(rv)) {
1545 rb_raise(rb_eArgError, "Not a ractor object");
1546 }
1547
1548 rb_ractor_t *r = RACTOR_PTR(rv);
1549 struct rb_ractor_selector *s = RACTOR_SELECTOR_PTR(selv);
1550
1551 RUBY_DEBUG_LOG("r:%u", rb_ractor_id(r));
1552
1553 if (!st_lookup(s->take_ractors, (st_data_t)r, NULL)) {
1554 rb_raise(rb_eArgError, "not added yet");
1555 }
1556
1557 ractor_deregister_take(r, &s->take_basket);
1558 struct rb_ractor_selector_take_config *config;
1559 st_delete(s->take_ractors, (st_data_t *)&r, (st_data_t *)&config);
1560 free(config);
1561
1562 return rv;
1563}
1564
1565// Ractor::Selector#clear
1566
1568 VALUE selv;
1570};
1571
1572static int
1573ractor_selector_clear_i(st_data_t key, st_data_t val, st_data_t data)
1574{
1575 VALUE selv = (VALUE)data;
1576 rb_ractor_t *r = (rb_ractor_t *)key;
1577 ractor_selector_remove(selv, r->pub.self);
1578 return ST_CONTINUE;
1579}
1580
1581/*
1582 * call-seq:
1583 * clear -> self
1584 *
1585 * Removes all ractors from +self+. Raises +self+.
1586 */
1587static VALUE
1588ractor_selector_clear(VALUE selv)
1589{
1590 struct rb_ractor_selector *s = RACTOR_SELECTOR_PTR(selv);
1591
1592 st_foreach(s->take_ractors, ractor_selector_clear_i, (st_data_t)selv);
1593 st_clear(s->take_ractors);
1594 return selv;
1595}
1596
1597/*
1598 * call-seq:
1599 * empty? -> true or false
1600 *
1601 * Returns +true+ if no ractor is added.
1602 */
1603static VALUE
1604ractor_selector_empty_p(VALUE selv)
1605{
1606 struct rb_ractor_selector *s = RACTOR_SELECTOR_PTR(selv);
1607 return s->take_ractors->num_entries == 0 ? Qtrue : Qfalse;
1608}
1609
1610static int
1611ractor_selector_wait_i(st_data_t key, st_data_t val, st_data_t dat)
1612{
1613 rb_ractor_t *r = (rb_ractor_t *)key;
1614 struct rb_ractor_basket *tb = (struct rb_ractor_basket *)dat;
1615 int ret;
1616
1617 if (!basket_none_p(tb)) {
1618 RUBY_DEBUG_LOG("already taken:%s", basket_type_name(tb->type.e));
1619 return ST_STOP;
1620 }
1621
1622 RACTOR_LOCK(r);
1623 {
1624 if (basket_type_p(&r->sync.will_basket, basket_type_will)) {
1625 RUBY_DEBUG_LOG("r:%u has will", rb_ractor_id(r));
1626
1627 if (RUBY_ATOMIC_CAS(tb->type.atomic, basket_type_none, basket_type_will) == basket_type_none) {
1628 ractor_take_will(r, tb);
1629 ret = ST_STOP;
1630 }
1631 else {
1632 RUBY_DEBUG_LOG("has will, but already taken (%s)", basket_type_name(tb->type.e));
1633 ret = ST_CONTINUE;
1634 }
1635 }
1636 else if (r->sync.outgoing_port_closed) {
1637 RUBY_DEBUG_LOG("r:%u is closed", rb_ractor_id(r));
1638
1639 if (RUBY_ATOMIC_CAS(tb->type.atomic, basket_type_none, basket_type_deleted) == basket_type_none) {
1640 tb->sender = r->pub.self;
1641 ret = ST_STOP;
1642 }
1643 else {
1644 RUBY_DEBUG_LOG("closed, but already taken (%s)", basket_type_name(tb->type.e));
1645 ret = ST_CONTINUE;
1646 }
1647 }
1648 else {
1649 RUBY_DEBUG_LOG("wakeup r:%u", rb_ractor_id(r));
1650 ractor_wakeup(r, wait_yielding, wakeup_by_take);
1651 ret = ST_CONTINUE;
1652 }
1653 }
1654 RACTOR_UNLOCK(r);
1655
1656 return ret;
1657}
1658
1659// Ractor::Selector#wait
1660
1661static void
1662ractor_selector_wait_cleaup(rb_ractor_t *cr, void *ptr)
1663{
1664 struct rb_ractor_basket *tb = (struct rb_ractor_basket *)ptr;
1665
1666 RACTOR_LOCK_SELF(cr);
1667 {
1668 while (basket_type_p(tb, basket_type_yielding)) rb_thread_sleep(0);
1669 // if tb->type is not none, taking is succeeded, but interruption ignore it unfortunately.
1670 tb->type.e = basket_type_reserved;
1671 }
1672 RACTOR_UNLOCK_SELF(cr);
1673}
1674
1675/* :nodoc: */
1676static VALUE
1677ractor_selector__wait(VALUE selv, VALUE do_receivev, VALUE do_yieldv, VALUE yield_value, VALUE move)
1678{
1679 rb_execution_context_t *ec = GET_EC();
1680 struct rb_ractor_selector *s = RACTOR_SELECTOR_PTR(selv);
1681 struct rb_ractor_basket *tb = &s->take_basket;
1682 struct rb_ractor_basket taken_basket;
1683 rb_ractor_t *cr = rb_ec_ractor_ptr(ec);
1684 bool do_receive = !!RTEST(do_receivev);
1685 bool do_yield = !!RTEST(do_yieldv);
1686 VALUE ret_v, ret_r;
1687 enum rb_ractor_wait_status wait_status;
1688 struct rb_ractor_queue *rq = &cr->sync.recv_queue;
1689 struct rb_ractor_queue *ts = &cr->sync.takers_queue;
1690
1691 RUBY_DEBUG_LOG("start");
1692
1693 retry:
1694 RUBY_DEBUG_LOG("takers:%ld", s->take_ractors->num_entries);
1695
1696 // setup wait_status
1697 wait_status = wait_none;
1698 if (s->take_ractors->num_entries > 0) wait_status |= wait_taking;
1699 if (do_receive) wait_status |= wait_receiving;
1700 if (do_yield) wait_status |= wait_yielding;
1701
1702 RUBY_DEBUG_LOG("wait:%s", wait_status_str(wait_status));
1703
1704 if (wait_status == wait_none) {
1705 rb_raise(rb_eRactorError, "no taking ractors");
1706 }
1707
1708 // check recv_queue
1709 if (do_receive && !UNDEF_P(ret_v = ractor_try_receive(ec, cr, rq))) {
1710 ret_r = ID2SYM(rb_intern("receive"));
1711 goto success;
1712 }
1713
1714 // check takers
1715 if (do_yield && ractor_try_yield(ec, cr, ts, yield_value, move, false, false)) {
1716 ret_v = Qnil;
1717 ret_r = ID2SYM(rb_intern("yield"));
1718 goto success;
1719 }
1720
1721 // check take_basket
1722 VM_ASSERT(basket_type_p(&s->take_basket, basket_type_reserved));
1723 s->take_basket.type.e = basket_type_none;
1724 // kick all take target ractors
1725 st_foreach(s->take_ractors, ractor_selector_wait_i, (st_data_t)tb);
1726
1727 RACTOR_LOCK_SELF(cr);
1728 {
1729 retry_waiting:
1730 while (1) {
1731 if (!basket_none_p(tb)) {
1732 RUBY_DEBUG_LOG("taken:%s from r:%u", basket_type_name(tb->type.e),
1733 tb->sender ? rb_ractor_id(RACTOR_PTR(tb->sender)) : 0);
1734 break;
1735 }
1736 if (do_receive && !ractor_queue_empty_p(cr, rq)) {
1737 RUBY_DEBUG_LOG("can receive (%d)", rq->cnt);
1738 break;
1739 }
1740 if (do_yield && ractor_check_take_basket(cr, ts)) {
1741 RUBY_DEBUG_LOG("can yield");
1742 break;
1743 }
1744
1745 ractor_sleep_with_cleanup(ec, cr, wait_status, ractor_selector_wait_cleaup, tb);
1746 }
1747
1748 taken_basket = *tb;
1749
1750 // ensure
1751 // tb->type.e = basket_type_reserved # do it atomic in the following code
1752 if (taken_basket.type.e == basket_type_yielding ||
1753 RUBY_ATOMIC_CAS(tb->type.atomic, taken_basket.type.e, basket_type_reserved) != taken_basket.type.e) {
1754
1755 if (basket_type_p(tb, basket_type_yielding)) {
1756 RACTOR_UNLOCK_SELF(cr);
1757 {
1758 rb_thread_sleep(0);
1759 }
1760 RACTOR_LOCK_SELF(cr);
1761 }
1762 goto retry_waiting;
1763 }
1764 }
1765 RACTOR_UNLOCK_SELF(cr);
1766
1767 // check the taken result
1768 switch (taken_basket.type.e) {
1769 case basket_type_none:
1770 VM_ASSERT(do_receive || do_yield);
1771 goto retry;
1772 case basket_type_yielding:
1773 rb_bug("unreachable");
1774 case basket_type_deleted: {
1775 ractor_selector_remove(selv, taken_basket.sender);
1776
1777 rb_ractor_t *r = RACTOR_PTR(taken_basket.sender);
1778 if (ractor_take_will_lock(r, &taken_basket)) {
1779 RUBY_DEBUG_LOG("has_will");
1780 }
1781 else {
1782 RUBY_DEBUG_LOG("no will");
1783 // rb_raise(rb_eRactorClosedError, "The outgoing-port is already closed");
1784 // remove and retry wait
1785 goto retry;
1786 }
1787 break;
1788 }
1789 case basket_type_will:
1790 // no more messages
1791 ractor_selector_remove(selv, taken_basket.sender);
1792 break;
1793 default:
1794 break;
1795 }
1796
1797 RUBY_DEBUG_LOG("taken_basket:%s", basket_type_name(taken_basket.type.e));
1798
1799 ret_v = ractor_basket_accept(&taken_basket);
1800 ret_r = taken_basket.sender;
1801 success:
1802 return rb_ary_new_from_args(2, ret_r, ret_v);
1803}
1804
1805/*
1806 * call-seq:
1807 * wait(receive: false, yield_value: undef, move: false) -> [ractor, value]
1808 *
1809 * Waits until any ractor in _selector_ can be active.
1810 */
1811static VALUE
1812ractor_selector_wait(int argc, VALUE *argv, VALUE selector)
1813{
1814 VALUE options;
1815 ID keywords[3];
1816 VALUE values[3];
1817
1818 keywords[0] = rb_intern("receive");
1819 keywords[1] = rb_intern("yield_value");
1820 keywords[2] = rb_intern("move");
1821
1822 rb_scan_args(argc, argv, "0:", &options);
1823 rb_get_kwargs(options, keywords, 0, numberof(values), values);
1824 return ractor_selector__wait(selector,
1825 values[0] == Qundef ? Qfalse : RTEST(values[0]),
1826 values[1] != Qundef, values[1], values[2]);
1827}
1828
1829static VALUE
1830ractor_selector_new(int argc, VALUE *ractors, VALUE klass)
1831{
1832 VALUE selector = ractor_selector_create(klass);
1833
1834 for (int i=0; i<argc; i++) {
1835 ractor_selector_add(selector, ractors[i]);
1836 }
1837
1838 return selector;
1839}
1840
1841static VALUE
1842ractor_select_internal(rb_execution_context_t *ec, VALUE self, VALUE ractors, VALUE do_receive, VALUE do_yield, VALUE yield_value, VALUE move)
1843{
1844 VALUE selector = ractor_selector_new(RARRAY_LENINT(ractors), (VALUE *)RARRAY_CONST_PTR(ractors), rb_cRactorSelector);
1845 VALUE result;
1846 int state;
1847
1848 EC_PUSH_TAG(ec);
1849 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
1850 result = ractor_selector__wait(selector, do_receive, do_yield, yield_value, move);
1851 }
1852 EC_POP_TAG();
1853 if (state != TAG_NONE) {
1854 // ensure
1855 ractor_selector_clear(selector);
1856
1857 // jump
1858 EC_JUMP_TAG(ec, state);
1859 }
1860
1861 RB_GC_GUARD(ractors);
1862 return result;
1863}
1864
1865// Ractor#close_incoming
1866
1867static VALUE
1868ractor_close_incoming(rb_execution_context_t *ec, rb_ractor_t *r)
1869{
1870 VALUE prev;
1871
1872 RACTOR_LOCK(r);
1873 {
1874 if (!r->sync.incoming_port_closed) {
1875 prev = Qfalse;
1876 r->sync.incoming_port_closed = true;
1877 if (ractor_wakeup(r, wait_receiving, wakeup_by_close)) {
1878 VM_ASSERT(ractor_queue_empty_p(r, &r->sync.recv_queue));
1879 RUBY_DEBUG_LOG("cancel receiving");
1880 }
1881 }
1882 else {
1883 prev = Qtrue;
1884 }
1885 }
1886 RACTOR_UNLOCK(r);
1887 return prev;
1888}
1889
1890// Ractor#close_outgoing
1891
1892static VALUE
1893ractor_close_outgoing(rb_execution_context_t *ec, rb_ractor_t *r)
1894{
1895 VALUE prev;
1896
1897 RACTOR_LOCK(r);
1898 {
1899 struct rb_ractor_queue *ts = &r->sync.takers_queue;
1900 rb_ractor_t *tr;
1901 struct rb_ractor_basket b;
1902
1903 if (!r->sync.outgoing_port_closed) {
1904 prev = Qfalse;
1905 r->sync.outgoing_port_closed = true;
1906 }
1907 else {
1908 VM_ASSERT(ractor_queue_empty_p(r, ts));
1909 prev = Qtrue;
1910 }
1911
1912 // wakeup all taking ractors
1913 while (ractor_queue_deq(r, ts, &b)) {
1914 if (basket_type_p(&b, basket_type_take_basket)) {
1915 tr = RACTOR_PTR(b.sender);
1916 struct rb_ractor_basket *tb = b.p.take.basket;
1917
1918 if (RUBY_ATOMIC_CAS(tb->type.atomic, basket_type_none, basket_type_yielding) == basket_type_none) {
1919 b.p.take.basket->sender = r->pub.self;
1920 if (RUBY_ATOMIC_CAS(tb->type.atomic, basket_type_yielding, basket_type_deleted) != basket_type_yielding) {
1921 rb_bug("unreachable");
1922 }
1923 RUBY_DEBUG_LOG("set delete for r:%u", rb_ractor_id(RACTOR_PTR(b.sender)));
1924 }
1925
1926 if (b.p.take.config) {
1927 b.p.take.config->closed = true;
1928 }
1929
1930 // TODO: deadlock-able?
1931 RACTOR_LOCK(tr);
1932 {
1933 ractor_wakeup(tr, wait_taking, wakeup_by_close);
1934 }
1935 RACTOR_UNLOCK(tr);
1936 }
1937 }
1938
1939 // raising yielding Ractor
1940 ractor_wakeup(r, wait_yielding, wakeup_by_close);
1941
1942 VM_ASSERT(ractor_queue_empty_p(r, ts));
1943 }
1944 RACTOR_UNLOCK(r);
1945 return prev;
1946}
1947
1948// creation/termination
1949
1950static uint32_t
1951ractor_next_id(void)
1952{
1953 uint32_t id;
1954
1955 id = (uint32_t)(RUBY_ATOMIC_FETCH_ADD(ractor_last_id, 1) + 1);
1956
1957 return id;
1958}
1959
1960static void
1961vm_insert_ractor0(rb_vm_t *vm, rb_ractor_t *r, bool single_ractor_mode)
1962{
1963 RUBY_DEBUG_LOG("r:%u ractor.cnt:%u++", r->pub.id, vm->ractor.cnt);
1964 VM_ASSERT(single_ractor_mode || RB_VM_LOCKED_P());
1965
1966 ccan_list_add_tail(&vm->ractor.set, &r->vmlr_node);
1967 vm->ractor.cnt++;
1968
1969 if (r->newobj_cache) {
1970 VM_ASSERT(r == ruby_single_main_ractor);
1971 }
1972 else {
1973 r->newobj_cache = rb_gc_ractor_cache_alloc(r);
1974 }
1975}
1976
1977static void
1978cancel_single_ractor_mode(void)
1979{
1980 // enable multi-ractor mode
1981 RUBY_DEBUG_LOG("enable multi-ractor mode");
1982
1983 VALUE was_disabled = rb_gc_enable();
1984
1985 rb_gc_start();
1986
1987 if (was_disabled) {
1988 rb_gc_disable();
1989 }
1990
1991 ruby_single_main_ractor = NULL;
1992 rb_funcall(rb_cRactor, rb_intern("_activated"), 0);
1993}
1994
1995static void
1996vm_insert_ractor(rb_vm_t *vm, rb_ractor_t *r)
1997{
1998 VM_ASSERT(ractor_status_p(r, ractor_created));
1999
2000 if (rb_multi_ractor_p()) {
2001 RB_VM_LOCK();
2002 {
2003 vm_insert_ractor0(vm, r, false);
2004 vm_ractor_blocking_cnt_inc(vm, r, __FILE__, __LINE__);
2005 }
2006 RB_VM_UNLOCK();
2007 }
2008 else {
2009 if (vm->ractor.cnt == 0) {
2010 // main ractor
2011 vm_insert_ractor0(vm, r, true);
2012 ractor_status_set(r, ractor_blocking);
2013 ractor_status_set(r, ractor_running);
2014 }
2015 else {
2016 cancel_single_ractor_mode();
2017 vm_insert_ractor0(vm, r, true);
2018 vm_ractor_blocking_cnt_inc(vm, r, __FILE__, __LINE__);
2019 }
2020 }
2021}
2022
2023static void
2024vm_remove_ractor(rb_vm_t *vm, rb_ractor_t *cr)
2025{
2026 VM_ASSERT(ractor_status_p(cr, ractor_running));
2027 VM_ASSERT(vm->ractor.cnt > 1);
2028 VM_ASSERT(cr->threads.cnt == 1);
2029
2030 RB_VM_LOCK();
2031 {
2032 RUBY_DEBUG_LOG("ractor.cnt:%u-- terminate_waiting:%d",
2033 vm->ractor.cnt, vm->ractor.sync.terminate_waiting);
2034
2035 VM_ASSERT(vm->ractor.cnt > 0);
2036 ccan_list_del(&cr->vmlr_node);
2037
2038 if (vm->ractor.cnt <= 2 && vm->ractor.sync.terminate_waiting) {
2039 rb_native_cond_signal(&vm->ractor.sync.terminate_cond);
2040 }
2041 vm->ractor.cnt--;
2042
2043 rb_gc_ractor_cache_free(cr->newobj_cache);
2044 cr->newobj_cache = NULL;
2045
2046 ractor_status_set(cr, ractor_terminated);
2047 }
2048 RB_VM_UNLOCK();
2049}
2050
2051static VALUE
2052ractor_alloc(VALUE klass)
2053{
2054 rb_ractor_t *r;
2055 VALUE rv = TypedData_Make_Struct(klass, rb_ractor_t, &ractor_data_type, r);
2057 r->pub.self = rv;
2058 VM_ASSERT(ractor_status_p(r, ractor_created));
2059 return rv;
2060}
2061
2063rb_ractor_main_alloc(void)
2064{
2065 rb_ractor_t *r = ruby_mimcalloc(1, sizeof(rb_ractor_t));
2066 if (r == NULL) {
2067 fprintf(stderr, "[FATAL] failed to allocate memory for main ractor\n");
2068 exit(EXIT_FAILURE);
2069 }
2070 r->pub.id = ++ractor_last_id;
2071 r->loc = Qnil;
2072 r->name = Qnil;
2073 r->pub.self = Qnil;
2074 r->newobj_cache = rb_gc_ractor_cache_alloc(r);
2075 ruby_single_main_ractor = r;
2076
2077 return r;
2078}
2079
2080#if defined(HAVE_WORKING_FORK)
2081void
2082rb_ractor_atfork(rb_vm_t *vm, rb_thread_t *th)
2083{
2084 // initialize as a main ractor
2085 vm->ractor.cnt = 0;
2086 vm->ractor.blocking_cnt = 0;
2087 ruby_single_main_ractor = th->ractor;
2088 th->ractor->status_ = ractor_created;
2089
2090 rb_ractor_living_threads_init(th->ractor);
2091 rb_ractor_living_threads_insert(th->ractor, th);
2092
2093 VM_ASSERT(vm->ractor.blocking_cnt == 0);
2094 VM_ASSERT(vm->ractor.cnt == 1);
2095}
2096#endif
2097
2098void rb_thread_sched_init(struct rb_thread_sched *, bool atfork);
2099
2100void
2101rb_ractor_living_threads_init(rb_ractor_t *r)
2102{
2103 ccan_list_head_init(&r->threads.set);
2104 r->threads.cnt = 0;
2105 r->threads.blocking_cnt = 0;
2106}
2107
2108static void
2109ractor_init(rb_ractor_t *r, VALUE name, VALUE loc)
2110{
2111 ractor_queue_setup(&r->sync.recv_queue);
2112 ractor_queue_setup(&r->sync.takers_queue);
2113 rb_native_mutex_initialize(&r->sync.lock);
2114 rb_native_cond_initialize(&r->barrier_wait_cond);
2115
2116#ifdef RUBY_THREAD_WIN32_H
2117 rb_native_cond_initialize(&r->sync.cond);
2118 rb_native_cond_initialize(&r->barrier_wait_cond);
2119#endif
2120
2121 // thread management
2122 rb_thread_sched_init(&r->threads.sched, false);
2123 rb_ractor_living_threads_init(r);
2124
2125 // naming
2126 if (!NIL_P(name)) {
2127 rb_encoding *enc;
2128 StringValueCStr(name);
2129 enc = rb_enc_get(name);
2130 if (!rb_enc_asciicompat(enc)) {
2131 rb_raise(rb_eArgError, "ASCII incompatible encoding (%s)",
2132 rb_enc_name(enc));
2133 }
2134 name = rb_str_new_frozen(name);
2135 }
2136 r->name = name;
2137 r->loc = loc;
2138}
2139
2140void
2141rb_ractor_main_setup(rb_vm_t *vm, rb_ractor_t *r, rb_thread_t *th)
2142{
2143 r->pub.self = TypedData_Wrap_Struct(rb_cRactor, &ractor_data_type, r);
2144 FL_SET_RAW(r->pub.self, RUBY_FL_SHAREABLE);
2145 ractor_init(r, Qnil, Qnil);
2146 r->threads.main = th;
2147 rb_ractor_living_threads_insert(r, th);
2148}
2149
2150static VALUE
2151ractor_create(rb_execution_context_t *ec, VALUE self, VALUE loc, VALUE name, VALUE args, VALUE block)
2152{
2153 VALUE rv = ractor_alloc(self);
2154 rb_ractor_t *r = RACTOR_PTR(rv);
2155 ractor_init(r, name, loc);
2156
2157 // can block here
2158 r->pub.id = ractor_next_id();
2159 RUBY_DEBUG_LOG("r:%u", r->pub.id);
2160
2161 rb_ractor_t *cr = rb_ec_ractor_ptr(ec);
2162 r->verbose = cr->verbose;
2163 r->debug = cr->debug;
2164
2165 rb_yjit_before_ractor_spawn();
2166 rb_thread_create_ractor(r, args, block);
2167
2168 RB_GC_GUARD(rv);
2169 return rv;
2170}
2171
2172static VALUE
2173ractor_create_func(VALUE klass, VALUE loc, VALUE name, VALUE args, rb_block_call_func_t func)
2174{
2175 VALUE block = rb_proc_new(func, Qnil);
2176 return ractor_create(rb_current_ec_noinline(), klass, loc, name, args, block);
2177}
2178
2179static void
2180ractor_yield_atexit(rb_execution_context_t *ec, rb_ractor_t *cr, VALUE v, bool exc)
2181{
2182 if (cr->sync.outgoing_port_closed) {
2183 return;
2184 }
2185
2186 ASSERT_ractor_unlocking(cr);
2187
2188 struct rb_ractor_queue *ts = &cr->sync.takers_queue;
2189
2190 retry:
2191 if (ractor_try_yield(ec, cr, ts, v, Qfalse, exc, true)) {
2192 // OK.
2193 }
2194 else {
2195 bool retry = false;
2196 RACTOR_LOCK(cr);
2197 {
2198 if (!ractor_check_take_basket(cr, ts)) {
2199 VM_ASSERT(cr->sync.wait.status == wait_none);
2200 RUBY_DEBUG_LOG("leave a will");
2201 ractor_basket_fill_will(cr, &cr->sync.will_basket, v, exc);
2202 }
2203 else {
2204 RUBY_DEBUG_LOG("rare timing!");
2205 retry = true; // another ractor is waiting for the yield.
2206 }
2207 }
2208 RACTOR_UNLOCK(cr);
2209
2210 if (retry) goto retry;
2211 }
2212}
2213
2214void
2215rb_ractor_atexit(rb_execution_context_t *ec, VALUE result)
2216{
2217 rb_ractor_t *cr = rb_ec_ractor_ptr(ec);
2218 ractor_yield_atexit(ec, cr, result, false);
2219}
2220
2221void
2222rb_ractor_atexit_exception(rb_execution_context_t *ec)
2223{
2224 rb_ractor_t *cr = rb_ec_ractor_ptr(ec);
2225 ractor_yield_atexit(ec, cr, ec->errinfo, true);
2226}
2227
2228void
2229rb_ractor_teardown(rb_execution_context_t *ec)
2230{
2231 rb_ractor_t *cr = rb_ec_ractor_ptr(ec);
2232 ractor_close_incoming(ec, cr);
2233 ractor_close_outgoing(ec, cr);
2234
2235 // sync with rb_ractor_terminate_interrupt_main_thread()
2236 RB_VM_LOCK_ENTER();
2237 {
2238 VM_ASSERT(cr->threads.main != NULL);
2239 cr->threads.main = NULL;
2240 }
2241 RB_VM_LOCK_LEAVE();
2242}
2243
2244void
2245rb_ractor_receive_parameters(rb_execution_context_t *ec, rb_ractor_t *r, int len, VALUE *ptr)
2246{
2247 for (int i=0; i<len; i++) {
2248 ptr[i] = ractor_receive(ec, r);
2249 }
2250}
2251
2252void
2253rb_ractor_send_parameters(rb_execution_context_t *ec, rb_ractor_t *r, VALUE args)
2254{
2255 int len = RARRAY_LENINT(args);
2256 for (int i=0; i<len; i++) {
2257 ractor_send(ec, r, RARRAY_AREF(args, i), false);
2258 }
2259}
2260
2261bool
2262rb_ractor_main_p_(void)
2263{
2264 VM_ASSERT(rb_multi_ractor_p());
2265 rb_execution_context_t *ec = GET_EC();
2266 return rb_ec_ractor_ptr(ec) == rb_ec_vm_ptr(ec)->ractor.main_ractor;
2267}
2268
2269bool
2270rb_obj_is_main_ractor(VALUE gv)
2271{
2272 if (!rb_ractor_p(gv)) return false;
2273 rb_ractor_t *r = DATA_PTR(gv);
2274 return r == GET_VM()->ractor.main_ractor;
2275}
2276
2277int
2278rb_ractor_living_thread_num(const rb_ractor_t *r)
2279{
2280 return r->threads.cnt;
2281}
2282
2283// only for current ractor
2284VALUE
2285rb_ractor_thread_list(void)
2286{
2287 rb_ractor_t *r = GET_RACTOR();
2288 rb_thread_t *th = 0;
2289 VALUE ary = rb_ary_new();
2290
2291 ccan_list_for_each(&r->threads.set, th, lt_node) {
2292 switch (th->status) {
2293 case THREAD_RUNNABLE:
2294 case THREAD_STOPPED:
2295 case THREAD_STOPPED_FOREVER:
2296 rb_ary_push(ary, th->self);
2297 default:
2298 break;
2299 }
2300 }
2301
2302 return ary;
2303}
2304
2305void
2306rb_ractor_living_threads_insert(rb_ractor_t *r, rb_thread_t *th)
2307{
2308 VM_ASSERT(th != NULL);
2309
2310 RACTOR_LOCK(r);
2311 {
2312 RUBY_DEBUG_LOG("r(%d)->threads.cnt:%d++", r->pub.id, r->threads.cnt);
2313 ccan_list_add_tail(&r->threads.set, &th->lt_node);
2314 r->threads.cnt++;
2315 }
2316 RACTOR_UNLOCK(r);
2317
2318 // first thread for a ractor
2319 if (r->threads.cnt == 1) {
2320 VM_ASSERT(ractor_status_p(r, ractor_created));
2321 vm_insert_ractor(th->vm, r);
2322 }
2323}
2324
2325static void
2326vm_ractor_blocking_cnt_inc(rb_vm_t *vm, rb_ractor_t *r, const char *file, int line)
2327{
2328 ractor_status_set(r, ractor_blocking);
2329
2330 RUBY_DEBUG_LOG2(file, line, "vm->ractor.blocking_cnt:%d++", vm->ractor.blocking_cnt);
2331 vm->ractor.blocking_cnt++;
2332 VM_ASSERT(vm->ractor.blocking_cnt <= vm->ractor.cnt);
2333}
2334
2335void
2336rb_vm_ractor_blocking_cnt_inc(rb_vm_t *vm, rb_ractor_t *cr, const char *file, int line)
2337{
2338 ASSERT_vm_locking();
2339 VM_ASSERT(GET_RACTOR() == cr);
2340 vm_ractor_blocking_cnt_inc(vm, cr, file, line);
2341}
2342
2343void
2344rb_vm_ractor_blocking_cnt_dec(rb_vm_t *vm, rb_ractor_t *cr, const char *file, int line)
2345{
2346 ASSERT_vm_locking();
2347 VM_ASSERT(GET_RACTOR() == cr);
2348
2349 RUBY_DEBUG_LOG2(file, line, "vm->ractor.blocking_cnt:%d--", vm->ractor.blocking_cnt);
2350 VM_ASSERT(vm->ractor.blocking_cnt > 0);
2351 vm->ractor.blocking_cnt--;
2352
2353 ractor_status_set(cr, ractor_running);
2354}
2355
2356static void
2357ractor_check_blocking(rb_ractor_t *cr, unsigned int remained_thread_cnt, const char *file, int line)
2358{
2359 VM_ASSERT(cr == GET_RACTOR());
2360
2361 RUBY_DEBUG_LOG2(file, line,
2362 "cr->threads.cnt:%u cr->threads.blocking_cnt:%u vm->ractor.cnt:%u vm->ractor.blocking_cnt:%u",
2363 cr->threads.cnt, cr->threads.blocking_cnt,
2364 GET_VM()->ractor.cnt, GET_VM()->ractor.blocking_cnt);
2365
2366 VM_ASSERT(cr->threads.cnt >= cr->threads.blocking_cnt + 1);
2367
2368 if (remained_thread_cnt > 0 &&
2369 // will be block
2370 cr->threads.cnt == cr->threads.blocking_cnt + 1) {
2371 // change ractor status: running -> blocking
2372 rb_vm_t *vm = GET_VM();
2373
2374 RB_VM_LOCK_ENTER();
2375 {
2376 rb_vm_ractor_blocking_cnt_inc(vm, cr, file, line);
2377 }
2378 RB_VM_LOCK_LEAVE();
2379 }
2380}
2381
2382void rb_threadptr_remove(rb_thread_t *th);
2383
2384void
2385rb_ractor_living_threads_remove(rb_ractor_t *cr, rb_thread_t *th)
2386{
2387 VM_ASSERT(cr == GET_RACTOR());
2388 RUBY_DEBUG_LOG("r->threads.cnt:%d--", cr->threads.cnt);
2389 ractor_check_blocking(cr, cr->threads.cnt - 1, __FILE__, __LINE__);
2390
2391 rb_threadptr_remove(th);
2392
2393 if (cr->threads.cnt == 1) {
2394 vm_remove_ractor(th->vm, cr);
2395 }
2396 else {
2397 RACTOR_LOCK(cr);
2398 {
2399 ccan_list_del(&th->lt_node);
2400 cr->threads.cnt--;
2401 }
2402 RACTOR_UNLOCK(cr);
2403 }
2404}
2405
2406void
2407rb_ractor_blocking_threads_inc(rb_ractor_t *cr, const char *file, int line)
2408{
2409 RUBY_DEBUG_LOG2(file, line, "cr->threads.blocking_cnt:%d++", cr->threads.blocking_cnt);
2410
2411 VM_ASSERT(cr->threads.cnt > 0);
2412 VM_ASSERT(cr == GET_RACTOR());
2413
2414 ractor_check_blocking(cr, cr->threads.cnt, __FILE__, __LINE__);
2415 cr->threads.blocking_cnt++;
2416}
2417
2418void
2419rb_ractor_blocking_threads_dec(rb_ractor_t *cr, const char *file, int line)
2420{
2421 RUBY_DEBUG_LOG2(file, line,
2422 "r->threads.blocking_cnt:%d--, r->threads.cnt:%u",
2423 cr->threads.blocking_cnt, cr->threads.cnt);
2424
2425 VM_ASSERT(cr == GET_RACTOR());
2426
2427 if (cr->threads.cnt == cr->threads.blocking_cnt) {
2428 rb_vm_t *vm = GET_VM();
2429
2430 RB_VM_LOCK_ENTER();
2431 {
2432 rb_vm_ractor_blocking_cnt_dec(vm, cr, __FILE__, __LINE__);
2433 }
2434 RB_VM_LOCK_LEAVE();
2435 }
2436
2437 cr->threads.blocking_cnt--;
2438}
2439
2440void
2441rb_ractor_vm_barrier_interrupt_running_thread(rb_ractor_t *r)
2442{
2443 VM_ASSERT(r != GET_RACTOR());
2444 ASSERT_ractor_unlocking(r);
2445 ASSERT_vm_locking();
2446
2447 RACTOR_LOCK(r);
2448 {
2449 if (ractor_status_p(r, ractor_running)) {
2450 rb_execution_context_t *ec = r->threads.running_ec;
2451 if (ec) {
2452 RUBY_VM_SET_VM_BARRIER_INTERRUPT(ec);
2453 }
2454 }
2455 }
2456 RACTOR_UNLOCK(r);
2457}
2458
2459void
2460rb_ractor_terminate_interrupt_main_thread(rb_ractor_t *r)
2461{
2462 VM_ASSERT(r != GET_RACTOR());
2463 ASSERT_ractor_unlocking(r);
2464 ASSERT_vm_locking();
2465
2466 rb_thread_t *main_th = r->threads.main;
2467 if (main_th) {
2468 if (main_th->status != THREAD_KILLED) {
2469 RUBY_VM_SET_TERMINATE_INTERRUPT(main_th->ec);
2470 rb_threadptr_interrupt(main_th);
2471 }
2472 else {
2473 RUBY_DEBUG_LOG("killed (%p)", (void *)main_th);
2474 }
2475 }
2476}
2477
2478void rb_thread_terminate_all(rb_thread_t *th); // thread.c
2479
2480static void
2481ractor_terminal_interrupt_all(rb_vm_t *vm)
2482{
2483 if (vm->ractor.cnt > 1) {
2484 // send terminate notification to all ractors
2485 rb_ractor_t *r = 0;
2486 ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
2487 if (r != vm->ractor.main_ractor) {
2488 RUBY_DEBUG_LOG("r:%d", rb_ractor_id(r));
2489 rb_ractor_terminate_interrupt_main_thread(r);
2490 }
2491 }
2492 }
2493}
2494
2495void rb_add_running_thread(rb_thread_t *th);
2496void rb_del_running_thread(rb_thread_t *th);
2497
2498void
2499rb_ractor_terminate_all(void)
2500{
2501 rb_vm_t *vm = GET_VM();
2502 rb_ractor_t *cr = vm->ractor.main_ractor;
2503
2504 RUBY_DEBUG_LOG("ractor.cnt:%d", (int)vm->ractor.cnt);
2505
2506 VM_ASSERT(cr == GET_RACTOR()); // only main-ractor's main-thread should kick it.
2507
2508 if (vm->ractor.cnt > 1) {
2509 RB_VM_LOCK();
2510 {
2511 ractor_terminal_interrupt_all(vm); // kill all ractors
2512 }
2513 RB_VM_UNLOCK();
2514 }
2515 rb_thread_terminate_all(GET_THREAD()); // kill other threads in main-ractor and wait
2516
2517 RB_VM_LOCK();
2518 {
2519 while (vm->ractor.cnt > 1) {
2520 RUBY_DEBUG_LOG("terminate_waiting:%d", vm->ractor.sync.terminate_waiting);
2521 vm->ractor.sync.terminate_waiting = true;
2522
2523 // wait for 1sec
2524 rb_vm_ractor_blocking_cnt_inc(vm, cr, __FILE__, __LINE__);
2525 rb_del_running_thread(rb_ec_thread_ptr(cr->threads.running_ec));
2526 rb_vm_cond_timedwait(vm, &vm->ractor.sync.terminate_cond, 1000 /* ms */);
2527 rb_add_running_thread(rb_ec_thread_ptr(cr->threads.running_ec));
2528 rb_vm_ractor_blocking_cnt_dec(vm, cr, __FILE__, __LINE__);
2529
2530 ractor_terminal_interrupt_all(vm);
2531 }
2532 }
2533 RB_VM_UNLOCK();
2534}
2535
2537rb_vm_main_ractor_ec(rb_vm_t *vm)
2538{
2539 /* This code needs to carefully work around two bugs:
2540 * - Bug #20016: When M:N threading is enabled, running_ec is NULL if no thread is
2541 * actually currently running (as opposed to without M:N threading, when
2542 * running_ec will still point to the _last_ thread which ran)
2543 * - Bug #20197: If the main thread is sleeping, setting its postponed job
2544 * interrupt flag is pointless; it won't look at the flag until it stops sleeping
2545 * for some reason. It would be better to set the flag on the running ec, which
2546 * will presumably look at it soon.
2547 *
2548 * Solution: use running_ec if it's set, otherwise fall back to the main thread ec.
2549 * This is still susceptible to some rare race conditions (what if the last thread
2550 * to run just entered a long-running sleep?), but seems like the best balance of
2551 * robustness and complexity.
2552 */
2553 rb_execution_context_t *running_ec = vm->ractor.main_ractor->threads.running_ec;
2554 if (running_ec) { return running_ec; }
2555 return vm->ractor.main_thread->ec;
2556}
2557
2558static VALUE
2559ractor_moved_missing(int argc, VALUE *argv, VALUE self)
2560{
2561 rb_raise(rb_eRactorMovedError, "can not send any methods to a moved object");
2562}
2563
2564#ifndef USE_RACTOR_SELECTOR
2565#define USE_RACTOR_SELECTOR 0
2566#endif
2567
2568RUBY_SYMBOL_EXPORT_BEGIN
2569void rb_init_ractor_selector(void);
2570RUBY_SYMBOL_EXPORT_END
2571
2572/*
2573 * Document-class: Ractor::Selector
2574 * :nodoc: currently
2575 *
2576 * Selects multiple Ractors to be activated.
2577 */
2578void
2579rb_init_ractor_selector(void)
2580{
2581 rb_cRactorSelector = rb_define_class_under(rb_cRactor, "Selector", rb_cObject);
2582 rb_undef_alloc_func(rb_cRactorSelector);
2583
2584 rb_define_singleton_method(rb_cRactorSelector, "new", ractor_selector_new , -1);
2585 rb_define_method(rb_cRactorSelector, "add", ractor_selector_add, 1);
2586 rb_define_method(rb_cRactorSelector, "remove", ractor_selector_remove, 1);
2587 rb_define_method(rb_cRactorSelector, "clear", ractor_selector_clear, 0);
2588 rb_define_method(rb_cRactorSelector, "empty?", ractor_selector_empty_p, 0);
2589 rb_define_method(rb_cRactorSelector, "wait", ractor_selector_wait, -1);
2590 rb_define_method(rb_cRactorSelector, "_wait", ractor_selector__wait, 4);
2591}
2592
2593/*
2594 * Document-class: Ractor::ClosedError
2595 *
2596 * Raised when an attempt is made to send a message to a closed port,
2597 * or to retrieve a message from a closed and empty port.
2598 * Ports may be closed explicitly with Ractor#close_outgoing/close_incoming
2599 * and are closed implicitly when a Ractor terminates.
2600 *
2601 * r = Ractor.new { sleep(500) }
2602 * r.close_outgoing
2603 * r.take # Ractor::ClosedError
2604 *
2605 * ClosedError is a descendant of StopIteration, so the closing of the ractor will break
2606 * the loops without propagating the error:
2607 *
2608 * r = Ractor.new do
2609 * loop do
2610 * msg = receive # raises ClosedError and loop traps it
2611 * puts "Received: #{msg}"
2612 * end
2613 * puts "loop exited"
2614 * end
2615 *
2616 * 3.times{|i| r << i}
2617 * r.close_incoming
2618 * r.take
2619 * puts "Continue successfully"
2620 *
2621 * This will print:
2622 *
2623 * Received: 0
2624 * Received: 1
2625 * Received: 2
2626 * loop exited
2627 * Continue successfully
2628 */
2629
2630/*
2631 * Document-class: Ractor::RemoteError
2632 *
2633 * Raised on attempt to Ractor#take if there was an uncaught exception in the Ractor.
2634 * Its +cause+ will contain the original exception, and +ractor+ is the original ractor
2635 * it was raised in.
2636 *
2637 * r = Ractor.new { raise "Something weird happened" }
2638 *
2639 * begin
2640 * r.take
2641 * rescue => e
2642 * p e # => #<Ractor::RemoteError: thrown by remote Ractor.>
2643 * p e.ractor == r # => true
2644 * p e.cause # => #<RuntimeError: Something weird happened>
2645 * end
2646 *
2647 */
2648
2649/*
2650 * Document-class: Ractor::MovedError
2651 *
2652 * Raised on an attempt to access an object which was moved in Ractor#send or Ractor.yield.
2653 *
2654 * r = Ractor.new { sleep }
2655 *
2656 * ary = [1, 2, 3]
2657 * r.send(ary, move: true)
2658 * ary.inspect
2659 * # Ractor::MovedError (can not send any methods to a moved object)
2660 *
2661 */
2662
2663/*
2664 * Document-class: Ractor::MovedObject
2665 *
2666 * A special object which replaces any value that was moved to another ractor in Ractor#send
2667 * or Ractor.yield. Any attempt to access the object results in Ractor::MovedError.
2668 *
2669 * r = Ractor.new { receive }
2670 *
2671 * ary = [1, 2, 3]
2672 * r.send(ary, move: true)
2673 * p Ractor::MovedObject === ary
2674 * # => true
2675 * ary.inspect
2676 * # Ractor::MovedError (can not send any methods to a moved object)
2677 */
2678
2679// Main docs are in ractor.rb, but without this clause there are weird artifacts
2680// in their rendering.
2681/*
2682 * Document-class: Ractor
2683 *
2684 */
2685
2686void
2687Init_Ractor(void)
2688{
2689 rb_cRactor = rb_define_class("Ractor", rb_cObject);
2691
2692 rb_eRactorError = rb_define_class_under(rb_cRactor, "Error", rb_eRuntimeError);
2693 rb_eRactorIsolationError = rb_define_class_under(rb_cRactor, "IsolationError", rb_eRactorError);
2694 rb_eRactorRemoteError = rb_define_class_under(rb_cRactor, "RemoteError", rb_eRactorError);
2695 rb_eRactorMovedError = rb_define_class_under(rb_cRactor, "MovedError", rb_eRactorError);
2696 rb_eRactorClosedError = rb_define_class_under(rb_cRactor, "ClosedError", rb_eStopIteration);
2697 rb_eRactorUnsafeError = rb_define_class_under(rb_cRactor, "UnsafeError", rb_eRactorError);
2698
2699 rb_cRactorMovedObject = rb_define_class_under(rb_cRactor, "MovedObject", rb_cBasicObject);
2700 rb_undef_alloc_func(rb_cRactorMovedObject);
2701 rb_define_method(rb_cRactorMovedObject, "method_missing", ractor_moved_missing, -1);
2702
2703 // override methods defined in BasicObject
2704 rb_define_method(rb_cRactorMovedObject, "__send__", ractor_moved_missing, -1);
2705 rb_define_method(rb_cRactorMovedObject, "!", ractor_moved_missing, -1);
2706 rb_define_method(rb_cRactorMovedObject, "==", ractor_moved_missing, -1);
2707 rb_define_method(rb_cRactorMovedObject, "!=", ractor_moved_missing, -1);
2708 rb_define_method(rb_cRactorMovedObject, "__id__", ractor_moved_missing, -1);
2709 rb_define_method(rb_cRactorMovedObject, "equal?", ractor_moved_missing, -1);
2710 rb_define_method(rb_cRactorMovedObject, "instance_eval", ractor_moved_missing, -1);
2711 rb_define_method(rb_cRactorMovedObject, "instance_exec", ractor_moved_missing, -1);
2712
2713 // internal
2714
2715#if USE_RACTOR_SELECTOR
2716 rb_init_ractor_selector();
2717#endif
2718}
2719
2720void
2721rb_ractor_dump(void)
2722{
2723 rb_vm_t *vm = GET_VM();
2724 rb_ractor_t *r = 0;
2725
2726 ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
2727 if (r != vm->ractor.main_ractor) {
2728 fprintf(stderr, "r:%u (%s)\n", r->pub.id, ractor_status_str(r->status_));
2729 }
2730 }
2731}
2732
2733VALUE
2735{
2736 if (rb_ractor_main_p()) {
2737 return rb_stdin;
2738 }
2739 else {
2740 rb_ractor_t *cr = GET_RACTOR();
2741 return cr->r_stdin;
2742 }
2743}
2744
2745VALUE
2746rb_ractor_stdout(void)
2747{
2748 if (rb_ractor_main_p()) {
2749 return rb_stdout;
2750 }
2751 else {
2752 rb_ractor_t *cr = GET_RACTOR();
2753 return cr->r_stdout;
2754 }
2755}
2756
2757VALUE
2758rb_ractor_stderr(void)
2759{
2760 if (rb_ractor_main_p()) {
2761 return rb_stderr;
2762 }
2763 else {
2764 rb_ractor_t *cr = GET_RACTOR();
2765 return cr->r_stderr;
2766 }
2767}
2768
2769void
2771{
2772 if (rb_ractor_main_p()) {
2773 rb_stdin = in;
2774 }
2775 else {
2776 rb_ractor_t *cr = GET_RACTOR();
2777 RB_OBJ_WRITE(cr->pub.self, &cr->r_stdin, in);
2778 }
2779}
2780
2781void
2783{
2784 if (rb_ractor_main_p()) {
2785 rb_stdout = out;
2786 }
2787 else {
2788 rb_ractor_t *cr = GET_RACTOR();
2789 RB_OBJ_WRITE(cr->pub.self, &cr->r_stdout, out);
2790 }
2791}
2792
2793void
2795{
2796 if (rb_ractor_main_p()) {
2797 rb_stderr = err;
2798 }
2799 else {
2800 rb_ractor_t *cr = GET_RACTOR();
2801 RB_OBJ_WRITE(cr->pub.self, &cr->r_stderr, err);
2802 }
2803}
2804
2806rb_ractor_hooks(rb_ractor_t *cr)
2807{
2808 return &cr->pub.hooks;
2809}
2810
2812
2813// 2: stop search
2814// 1: skip child
2815// 0: continue
2816
2817enum obj_traverse_iterator_result {
2818 traverse_cont,
2819 traverse_skip,
2820 traverse_stop,
2821};
2822
2823typedef enum obj_traverse_iterator_result (*rb_obj_traverse_enter_func)(VALUE obj);
2824typedef enum obj_traverse_iterator_result (*rb_obj_traverse_leave_func)(VALUE obj);
2825typedef enum obj_traverse_iterator_result (*rb_obj_traverse_final_func)(VALUE obj);
2826
2827static enum obj_traverse_iterator_result null_leave(VALUE obj);
2828
2830 rb_obj_traverse_enter_func enter_func;
2831 rb_obj_traverse_leave_func leave_func;
2832
2833 st_table *rec;
2834 VALUE rec_hash;
2835};
2836
2837
2839 bool stop;
2840 struct obj_traverse_data *data;
2841};
2842
2843static int obj_traverse_i(VALUE obj, struct obj_traverse_data *data);
2844
2845static int
2846obj_hash_traverse_i(VALUE key, VALUE val, VALUE ptr)
2847{
2849
2850 if (obj_traverse_i(key, d->data)) {
2851 d->stop = true;
2852 return ST_STOP;
2853 }
2854
2855 if (obj_traverse_i(val, d->data)) {
2856 d->stop = true;
2857 return ST_STOP;
2858 }
2859
2860 return ST_CONTINUE;
2861}
2862
2863static void
2864obj_traverse_reachable_i(VALUE obj, void *ptr)
2865{
2867
2868 if (obj_traverse_i(obj, d->data)) {
2869 d->stop = true;
2870 }
2871}
2872
2873static struct st_table *
2874obj_traverse_rec(struct obj_traverse_data *data)
2875{
2876 if (UNLIKELY(!data->rec)) {
2877 data->rec_hash = rb_ident_hash_new();
2878 data->rec = RHASH_ST_TABLE(data->rec_hash);
2879 }
2880 return data->rec;
2881}
2882
2883static int
2884obj_traverse_ivar_foreach_i(ID key, VALUE val, st_data_t ptr)
2885{
2887
2888 if (obj_traverse_i(val, d->data)) {
2889 d->stop = true;
2890 return ST_STOP;
2891 }
2892
2893 return ST_CONTINUE;
2894}
2895
2896static int
2897obj_traverse_i(VALUE obj, struct obj_traverse_data *data)
2898{
2899 if (RB_SPECIAL_CONST_P(obj)) return 0;
2900
2901 switch (data->enter_func(obj)) {
2902 case traverse_cont: break;
2903 case traverse_skip: return 0; // skip children
2904 case traverse_stop: return 1; // stop search
2905 }
2906
2907 if (UNLIKELY(st_insert(obj_traverse_rec(data), obj, 1))) {
2908 // already traversed
2909 return 0;
2910 }
2911
2912 struct obj_traverse_callback_data d = {
2913 .stop = false,
2914 .data = data,
2915 };
2916 rb_ivar_foreach(obj, obj_traverse_ivar_foreach_i, (st_data_t)&d);
2917 if (d.stop) return 1;
2918
2919 switch (BUILTIN_TYPE(obj)) {
2920 // no child node
2921 case T_STRING:
2922 case T_FLOAT:
2923 case T_BIGNUM:
2924 case T_REGEXP:
2925 case T_FILE:
2926 case T_SYMBOL:
2927 case T_MATCH:
2928 break;
2929
2930 case T_OBJECT:
2931 /* Instance variables already traversed. */
2932 break;
2933
2934 case T_ARRAY:
2935 {
2936 for (int i = 0; i < RARRAY_LENINT(obj); i++) {
2937 VALUE e = rb_ary_entry(obj, i);
2938 if (obj_traverse_i(e, data)) return 1;
2939 }
2940 }
2941 break;
2942
2943 case T_HASH:
2944 {
2945 if (obj_traverse_i(RHASH_IFNONE(obj), data)) return 1;
2946
2947 struct obj_traverse_callback_data d = {
2948 .stop = false,
2949 .data = data,
2950 };
2951 rb_hash_foreach(obj, obj_hash_traverse_i, (VALUE)&d);
2952 if (d.stop) return 1;
2953 }
2954 break;
2955
2956 case T_STRUCT:
2957 {
2958 long len = RSTRUCT_LEN(obj);
2959 const VALUE *ptr = RSTRUCT_CONST_PTR(obj);
2960
2961 for (long i=0; i<len; i++) {
2962 if (obj_traverse_i(ptr[i], data)) return 1;
2963 }
2964 }
2965 break;
2966
2967 case T_RATIONAL:
2968 if (obj_traverse_i(RRATIONAL(obj)->num, data)) return 1;
2969 if (obj_traverse_i(RRATIONAL(obj)->den, data)) return 1;
2970 break;
2971 case T_COMPLEX:
2972 if (obj_traverse_i(RCOMPLEX(obj)->real, data)) return 1;
2973 if (obj_traverse_i(RCOMPLEX(obj)->imag, data)) return 1;
2974 break;
2975
2976 case T_DATA:
2977 case T_IMEMO:
2978 {
2979 struct obj_traverse_callback_data d = {
2980 .stop = false,
2981 .data = data,
2982 };
2983 RB_VM_LOCK_ENTER_NO_BARRIER();
2984 {
2985 rb_objspace_reachable_objects_from(obj, obj_traverse_reachable_i, &d);
2986 }
2987 RB_VM_LOCK_LEAVE_NO_BARRIER();
2988 if (d.stop) return 1;
2989 }
2990 break;
2991
2992 // unreachable
2993 case T_CLASS:
2994 case T_MODULE:
2995 case T_ICLASS:
2996 default:
2997 rp(obj);
2998 rb_bug("unreachable");
2999 }
3000
3001 if (data->leave_func(obj) == traverse_stop) {
3002 return 1;
3003 }
3004 else {
3005 return 0;
3006 }
3007}
3008
3010 rb_obj_traverse_final_func final_func;
3011 int stopped;
3012};
3013
3014static int
3015obj_traverse_final_i(st_data_t key, st_data_t val, st_data_t arg)
3016{
3017 struct rb_obj_traverse_final_data *data = (void *)arg;
3018 if (data->final_func(key)) {
3019 data->stopped = 1;
3020 return ST_STOP;
3021 }
3022 return ST_CONTINUE;
3023}
3024
3025// 0: traverse all
3026// 1: stopped
3027static int
3028rb_obj_traverse(VALUE obj,
3029 rb_obj_traverse_enter_func enter_func,
3030 rb_obj_traverse_leave_func leave_func,
3031 rb_obj_traverse_final_func final_func)
3032{
3033 struct obj_traverse_data data = {
3034 .enter_func = enter_func,
3035 .leave_func = leave_func,
3036 .rec = NULL,
3037 };
3038
3039 if (obj_traverse_i(obj, &data)) return 1;
3040 if (final_func && data.rec) {
3041 struct rb_obj_traverse_final_data f = {final_func, 0};
3042 st_foreach(data.rec, obj_traverse_final_i, (st_data_t)&f);
3043 return f.stopped;
3044 }
3045 return 0;
3046}
3047
3048static int
3049allow_frozen_shareable_p(VALUE obj)
3050{
3051 if (!RB_TYPE_P(obj, T_DATA)) {
3052 return true;
3053 }
3054 else if (RTYPEDDATA_P(obj)) {
3055 const rb_data_type_t *type = RTYPEDDATA_TYPE(obj);
3056 if (type->flags & RUBY_TYPED_FROZEN_SHAREABLE) {
3057 return true;
3058 }
3059 }
3060
3061 return false;
3062}
3063
3064static enum obj_traverse_iterator_result
3065make_shareable_check_shareable(VALUE obj)
3066{
3067 VM_ASSERT(!SPECIAL_CONST_P(obj));
3068
3069 if (rb_ractor_shareable_p(obj)) {
3070 return traverse_skip;
3071 }
3072 else if (!allow_frozen_shareable_p(obj)) {
3073 if (rb_obj_is_proc(obj)) {
3074 rb_proc_ractor_make_shareable(obj);
3075 return traverse_cont;
3076 }
3077 else {
3078 rb_raise(rb_eRactorError, "can not make shareable object for %"PRIsVALUE, obj);
3079 }
3080 }
3081
3082 if (RB_TYPE_P(obj, T_IMEMO)) {
3083 return traverse_skip;
3084 }
3085
3086 if (!RB_OBJ_FROZEN_RAW(obj)) {
3087 rb_funcall(obj, idFreeze, 0);
3088
3089 if (UNLIKELY(!RB_OBJ_FROZEN_RAW(obj))) {
3090 rb_raise(rb_eRactorError, "#freeze does not freeze object correctly");
3091 }
3092
3093 if (RB_OBJ_SHAREABLE_P(obj)) {
3094 return traverse_skip;
3095 }
3096 }
3097
3098 return traverse_cont;
3099}
3100
3101static enum obj_traverse_iterator_result
3102mark_shareable(VALUE obj)
3103{
3105 return traverse_cont;
3106}
3107
3108VALUE
3110{
3111 rb_obj_traverse(obj,
3112 make_shareable_check_shareable,
3113 null_leave, mark_shareable);
3114 return obj;
3115}
3116
3117VALUE
3119{
3120 VALUE copy = ractor_copy(obj);
3121 return rb_ractor_make_shareable(copy);
3122}
3123
3124VALUE
3125rb_ractor_ensure_shareable(VALUE obj, VALUE name)
3126{
3127 if (!rb_ractor_shareable_p(obj)) {
3128 VALUE message = rb_sprintf("cannot assign unshareable object to %"PRIsVALUE,
3129 name);
3130 rb_exc_raise(rb_exc_new_str(rb_eRactorIsolationError, message));
3131 }
3132 return obj;
3133}
3134
3135void
3136rb_ractor_ensure_main_ractor(const char *msg)
3137{
3138 if (!rb_ractor_main_p()) {
3139 rb_raise(rb_eRactorIsolationError, "%s", msg);
3140 }
3141}
3142
3143static enum obj_traverse_iterator_result
3144shareable_p_enter(VALUE obj)
3145{
3146 if (RB_OBJ_SHAREABLE_P(obj)) {
3147 return traverse_skip;
3148 }
3149 else if (RB_TYPE_P(obj, T_CLASS) ||
3150 RB_TYPE_P(obj, T_MODULE) ||
3151 RB_TYPE_P(obj, T_ICLASS)) {
3152 // TODO: remove it
3153 mark_shareable(obj);
3154 return traverse_skip;
3155 }
3156 else if (RB_OBJ_FROZEN_RAW(obj) &&
3157 allow_frozen_shareable_p(obj)) {
3158 return traverse_cont;
3159 }
3160
3161 return traverse_stop; // fail
3162}
3163
3164bool
3165rb_ractor_shareable_p_continue(VALUE obj)
3166{
3167 if (rb_obj_traverse(obj,
3168 shareable_p_enter, null_leave,
3169 mark_shareable)) {
3170 return false;
3171 }
3172 else {
3173 return true;
3174 }
3175}
3176
3177#if RACTOR_CHECK_MODE > 0
3178void
3179rb_ractor_setup_belonging(VALUE obj)
3180{
3181 rb_ractor_setup_belonging_to(obj, rb_ractor_current_id());
3182}
3183
3184static enum obj_traverse_iterator_result
3185reset_belonging_enter(VALUE obj)
3186{
3187 if (rb_ractor_shareable_p(obj)) {
3188 return traverse_skip;
3189 }
3190 else {
3191 rb_ractor_setup_belonging(obj);
3192 return traverse_cont;
3193 }
3194}
3195#endif
3196
3197static enum obj_traverse_iterator_result
3198null_leave(VALUE obj)
3199{
3200 return traverse_cont;
3201}
3202
3203static VALUE
3204ractor_reset_belonging(VALUE obj)
3205{
3206#if RACTOR_CHECK_MODE > 0
3207 rb_obj_traverse(obj, reset_belonging_enter, null_leave, NULL);
3208#endif
3209 return obj;
3210}
3211
3212
3214
3215// 2: stop search
3216// 1: skip child
3217// 0: continue
3218
3220static int obj_traverse_replace_i(VALUE obj, struct obj_traverse_replace_data *data);
3221typedef enum obj_traverse_iterator_result (*rb_obj_traverse_replace_enter_func)(VALUE obj, struct obj_traverse_replace_data *data);
3222typedef enum obj_traverse_iterator_result (*rb_obj_traverse_replace_leave_func)(VALUE obj, struct obj_traverse_replace_data *data);
3223
3225 rb_obj_traverse_replace_enter_func enter_func;
3226 rb_obj_traverse_replace_leave_func leave_func;
3227
3228 st_table *rec;
3229 VALUE rec_hash;
3230
3231 VALUE replacement;
3232 bool move;
3233};
3234
3236 bool stop;
3237 VALUE src;
3238 struct obj_traverse_replace_data *data;
3239};
3240
3241static int
3242obj_hash_traverse_replace_foreach_i(st_data_t key, st_data_t value, st_data_t argp, int error)
3243{
3244 return ST_REPLACE;
3245}
3246
3247static int
3248obj_hash_traverse_replace_i(st_data_t *key, st_data_t *val, st_data_t ptr, int exists)
3249{
3251 struct obj_traverse_replace_data *data = d->data;
3252
3253 if (obj_traverse_replace_i(*key, data)) {
3254 d->stop = true;
3255 return ST_STOP;
3256 }
3257 else if (*key != data->replacement) {
3258 VALUE v = *key = data->replacement;
3259 RB_OBJ_WRITTEN(d->src, Qundef, v);
3260 }
3261
3262 if (obj_traverse_replace_i(*val, data)) {
3263 d->stop = true;
3264 return ST_STOP;
3265 }
3266 else if (*val != data->replacement) {
3267 VALUE v = *val = data->replacement;
3268 RB_OBJ_WRITTEN(d->src, Qundef, v);
3269 }
3270
3271 return ST_CONTINUE;
3272}
3273
3274static int
3275obj_iv_hash_traverse_replace_foreach_i(st_data_t _key, st_data_t _val, st_data_t _data, int _x)
3276{
3277 return ST_REPLACE;
3278}
3279
3280static int
3281obj_iv_hash_traverse_replace_i(st_data_t * _key, st_data_t * val, st_data_t ptr, int exists)
3282{
3284 struct obj_traverse_replace_data *data = d->data;
3285
3286 if (obj_traverse_replace_i(*(VALUE *)val, data)) {
3287 d->stop = true;
3288 return ST_STOP;
3289 }
3290 else if (*(VALUE *)val != data->replacement) {
3291 VALUE v = *(VALUE *)val = data->replacement;
3292 RB_OBJ_WRITTEN(d->src, Qundef, v);
3293 }
3294
3295 return ST_CONTINUE;
3296}
3297
3298static struct st_table *
3299obj_traverse_replace_rec(struct obj_traverse_replace_data *data)
3300{
3301 if (UNLIKELY(!data->rec)) {
3302 data->rec_hash = rb_ident_hash_new();
3303 data->rec = RHASH_ST_TABLE(data->rec_hash);
3304 }
3305 return data->rec;
3306}
3307
3308static void
3309obj_refer_only_shareables_p_i(VALUE obj, void *ptr)
3310{
3311 int *pcnt = (int *)ptr;
3312
3313 if (!rb_ractor_shareable_p(obj)) {
3314 ++*pcnt;
3315 }
3316}
3317
3318static int
3319obj_refer_only_shareables_p(VALUE obj)
3320{
3321 int cnt = 0;
3322 RB_VM_LOCK_ENTER_NO_BARRIER();
3323 {
3324 rb_objspace_reachable_objects_from(obj, obj_refer_only_shareables_p_i, &cnt);
3325 }
3326 RB_VM_LOCK_LEAVE_NO_BARRIER();
3327 return cnt == 0;
3328}
3329
3330static int
3331obj_traverse_replace_i(VALUE obj, struct obj_traverse_replace_data *data)
3332{
3333 st_data_t replacement;
3334
3335 if (RB_SPECIAL_CONST_P(obj)) {
3336 data->replacement = obj;
3337 return 0;
3338 }
3339
3340 switch (data->enter_func(obj, data)) {
3341 case traverse_cont: break;
3342 case traverse_skip: return 0; // skip children
3343 case traverse_stop: return 1; // stop search
3344 }
3345
3346 replacement = (st_data_t)data->replacement;
3347
3348 if (UNLIKELY(st_lookup(obj_traverse_replace_rec(data), (st_data_t)obj, &replacement))) {
3349 data->replacement = (VALUE)replacement;
3350 return 0;
3351 }
3352 else {
3353 st_insert(obj_traverse_replace_rec(data), (st_data_t)obj, replacement);
3354 }
3355
3356 if (!data->move) {
3357 obj = replacement;
3358 }
3359
3360#define CHECK_AND_REPLACE(v) do { \
3361 VALUE _val = (v); \
3362 if (obj_traverse_replace_i(_val, data)) { return 1; } \
3363 else if (data->replacement != _val) { RB_OBJ_WRITE(obj, &v, data->replacement); } \
3364} while (0)
3365
3366 if (UNLIKELY(FL_TEST_RAW(obj, FL_EXIVAR))) {
3367 struct gen_ivtbl *ivtbl;
3368 rb_ivar_generic_ivtbl_lookup(obj, &ivtbl);
3369
3370 if (UNLIKELY(rb_shape_obj_too_complex(obj))) {
3372 .stop = false,
3373 .data = data,
3374 .src = obj,
3375 };
3376 rb_st_foreach_with_replace(
3377 ivtbl->as.complex.table,
3378 obj_iv_hash_traverse_replace_foreach_i,
3379 obj_iv_hash_traverse_replace_i,
3380 (st_data_t)&d
3381 );
3382 if (d.stop) return 1;
3383 }
3384 else {
3385 for (uint32_t i = 0; i < ivtbl->as.shape.numiv; i++) {
3386 if (!UNDEF_P(ivtbl->as.shape.ivptr[i])) {
3387 CHECK_AND_REPLACE(ivtbl->as.shape.ivptr[i]);
3388 }
3389 }
3390 }
3391 }
3392
3393 switch (BUILTIN_TYPE(obj)) {
3394 // no child node
3395 case T_FLOAT:
3396 case T_BIGNUM:
3397 case T_REGEXP:
3398 case T_FILE:
3399 case T_SYMBOL:
3400 case T_MATCH:
3401 break;
3402 case T_STRING:
3403 rb_str_make_independent(obj);
3404 break;
3405
3406 case T_OBJECT:
3407 {
3408 if (rb_shape_obj_too_complex(obj)) {
3410 .stop = false,
3411 .data = data,
3412 .src = obj,
3413 };
3414 rb_st_foreach_with_replace(
3415 ROBJECT_IV_HASH(obj),
3416 obj_iv_hash_traverse_replace_foreach_i,
3417 obj_iv_hash_traverse_replace_i,
3418 (st_data_t)&d
3419 );
3420 if (d.stop) return 1;
3421 }
3422 else {
3423 uint32_t len = ROBJECT_IV_COUNT(obj);
3424 VALUE *ptr = ROBJECT_IVPTR(obj);
3425
3426 for (uint32_t i = 0; i < len; i++) {
3427 CHECK_AND_REPLACE(ptr[i]);
3428 }
3429 }
3430 }
3431 break;
3432
3433 case T_ARRAY:
3434 {
3435 rb_ary_cancel_sharing(obj);
3436
3437 for (int i = 0; i < RARRAY_LENINT(obj); i++) {
3438 VALUE e = rb_ary_entry(obj, i);
3439
3440 if (obj_traverse_replace_i(e, data)) {
3441 return 1;
3442 }
3443 else if (e != data->replacement) {
3444 RARRAY_ASET(obj, i, data->replacement);
3445 }
3446 }
3447 RB_GC_GUARD(obj);
3448 }
3449 break;
3450 case T_HASH:
3451 {
3453 .stop = false,
3454 .data = data,
3455 .src = obj,
3456 };
3457 rb_hash_stlike_foreach_with_replace(obj,
3458 obj_hash_traverse_replace_foreach_i,
3459 obj_hash_traverse_replace_i,
3460 (VALUE)&d);
3461 if (d.stop) return 1;
3462 // TODO: rehash here?
3463
3464 VALUE ifnone = RHASH_IFNONE(obj);
3465 if (obj_traverse_replace_i(ifnone, data)) {
3466 return 1;
3467 }
3468 else if (ifnone != data->replacement) {
3469 RHASH_SET_IFNONE(obj, data->replacement);
3470 }
3471 }
3472 break;
3473
3474 case T_STRUCT:
3475 {
3476 long len = RSTRUCT_LEN(obj);
3477 const VALUE *ptr = RSTRUCT_CONST_PTR(obj);
3478
3479 for (long i=0; i<len; i++) {
3480 CHECK_AND_REPLACE(ptr[i]);
3481 }
3482 }
3483 break;
3484
3485 case T_RATIONAL:
3486 CHECK_AND_REPLACE(RRATIONAL(obj)->num);
3487 CHECK_AND_REPLACE(RRATIONAL(obj)->den);
3488 break;
3489 case T_COMPLEX:
3490 CHECK_AND_REPLACE(RCOMPLEX(obj)->real);
3491 CHECK_AND_REPLACE(RCOMPLEX(obj)->imag);
3492 break;
3493
3494 case T_DATA:
3495 if (!data->move && obj_refer_only_shareables_p(obj)) {
3496 break;
3497 }
3498 else {
3499 rb_raise(rb_eRactorError, "can not %s %"PRIsVALUE" object.",
3500 data->move ? "move" : "copy", rb_class_of(obj));
3501 }
3502
3503 case T_IMEMO:
3504 // not supported yet
3505 return 1;
3506
3507 // unreachable
3508 case T_CLASS:
3509 case T_MODULE:
3510 case T_ICLASS:
3511 default:
3512 rp(obj);
3513 rb_bug("unreachable");
3514 }
3515
3516 data->replacement = (VALUE)replacement;
3517
3518 if (data->leave_func(obj, data) == traverse_stop) {
3519 return 1;
3520 }
3521 else {
3522 return 0;
3523 }
3524}
3525
3526// 0: traverse all
3527// 1: stopped
3528static VALUE
3529rb_obj_traverse_replace(VALUE obj,
3530 rb_obj_traverse_replace_enter_func enter_func,
3531 rb_obj_traverse_replace_leave_func leave_func,
3532 bool move)
3533{
3534 struct obj_traverse_replace_data data = {
3535 .enter_func = enter_func,
3536 .leave_func = leave_func,
3537 .rec = NULL,
3538 .replacement = Qundef,
3539 .move = move,
3540 };
3541
3542 if (obj_traverse_replace_i(obj, &data)) {
3543 return Qundef;
3544 }
3545 else {
3546 return data.replacement;
3547 }
3548}
3549
3550static enum obj_traverse_iterator_result
3551move_enter(VALUE obj, struct obj_traverse_replace_data *data)
3552{
3553 if (rb_ractor_shareable_p(obj)) {
3554 data->replacement = obj;
3555 return traverse_skip;
3556 }
3557 else {
3558 data->replacement = rb_obj_clone(obj);
3559 return traverse_cont;
3560 }
3561}
3562
3563static enum obj_traverse_iterator_result
3564move_leave(VALUE obj, struct obj_traverse_replace_data *data)
3565{
3566 rb_gc_ractor_moved(data->replacement, obj);
3567 RBASIC_SET_CLASS_RAW(obj, rb_cRactorMovedObject);
3568 return traverse_cont;
3569}
3570
3571static VALUE
3572ractor_move(VALUE obj)
3573{
3574 VALUE val = rb_obj_traverse_replace(obj, move_enter, move_leave, true);
3575 if (!UNDEF_P(val)) {
3576 return val;
3577 }
3578 else {
3579 rb_raise(rb_eRactorError, "can not move the object");
3580 }
3581}
3582
3583static enum obj_traverse_iterator_result
3584copy_enter(VALUE obj, struct obj_traverse_replace_data *data)
3585{
3586 if (rb_ractor_shareable_p(obj)) {
3587 data->replacement = obj;
3588 return traverse_skip;
3589 }
3590 else {
3591 data->replacement = rb_obj_clone(obj);
3592 return traverse_cont;
3593 }
3594}
3595
3596static enum obj_traverse_iterator_result
3597copy_leave(VALUE obj, struct obj_traverse_replace_data *data)
3598{
3599 return traverse_cont;
3600}
3601
3602static VALUE
3603ractor_copy(VALUE obj)
3604{
3605 VALUE val = rb_obj_traverse_replace(obj, copy_enter, copy_leave, false);
3606 if (!UNDEF_P(val)) {
3607 return val;
3608 }
3609 else {
3610 rb_raise(rb_eRactorError, "can not copy the object");
3611 }
3612}
3613
3614// Ractor local storage
3615
3617 const struct rb_ractor_local_storage_type *type;
3618 void *main_cache;
3619};
3620
3622 int cnt;
3623 int capa;
3625} freed_ractor_local_keys;
3626
3627static int
3628ractor_local_storage_mark_i(st_data_t key, st_data_t val, st_data_t dmy)
3629{
3631 if (k->type->mark) (*k->type->mark)((void *)val);
3632 return ST_CONTINUE;
3633}
3634
3635static enum rb_id_table_iterator_result
3636idkey_local_storage_mark_i(VALUE val, void *dmy)
3637{
3638 rb_gc_mark(val);
3639 return ID_TABLE_CONTINUE;
3640}
3641
3642static void
3643ractor_local_storage_mark(rb_ractor_t *r)
3644{
3645 if (r->local_storage) {
3646 st_foreach(r->local_storage, ractor_local_storage_mark_i, 0);
3647
3648 for (int i=0; i<freed_ractor_local_keys.cnt; i++) {
3649 rb_ractor_local_key_t key = freed_ractor_local_keys.keys[i];
3650 st_data_t val, k = (st_data_t)key;
3651 if (st_delete(r->local_storage, &k, &val) &&
3652 (key = (rb_ractor_local_key_t)k)->type->free) {
3653 (*key->type->free)((void *)val);
3654 }
3655 }
3656 }
3657
3658 if (r->idkey_local_storage) {
3659 rb_id_table_foreach_values(r->idkey_local_storage, idkey_local_storage_mark_i, NULL);
3660 }
3661
3662 rb_gc_mark(r->local_storage_store_lock);
3663}
3664
3665static int
3666ractor_local_storage_free_i(st_data_t key, st_data_t val, st_data_t dmy)
3667{
3669 if (k->type->free) (*k->type->free)((void *)val);
3670 return ST_CONTINUE;
3671}
3672
3673static void
3674ractor_local_storage_free(rb_ractor_t *r)
3675{
3676 if (r->local_storage) {
3677 st_foreach(r->local_storage, ractor_local_storage_free_i, 0);
3678 st_free_table(r->local_storage);
3679 }
3680
3681 if (r->idkey_local_storage) {
3682 rb_id_table_free(r->idkey_local_storage);
3683 }
3684}
3685
3686static void
3687rb_ractor_local_storage_value_mark(void *ptr)
3688{
3689 rb_gc_mark((VALUE)ptr);
3690}
3691
3692static const struct rb_ractor_local_storage_type ractor_local_storage_type_null = {
3693 NULL,
3694 NULL,
3695};
3696
3698 NULL,
3699 ruby_xfree,
3700};
3701
3702static const struct rb_ractor_local_storage_type ractor_local_storage_type_value = {
3703 rb_ractor_local_storage_value_mark,
3704 NULL,
3705};
3706
3709{
3711 key->type = type ? type : &ractor_local_storage_type_null;
3712 key->main_cache = (void *)Qundef;
3713 return key;
3714}
3715
3718{
3719 return rb_ractor_local_storage_ptr_newkey(&ractor_local_storage_type_value);
3720}
3721
3722void
3723rb_ractor_local_storage_delkey(rb_ractor_local_key_t key)
3724{
3725 RB_VM_LOCK_ENTER();
3726 {
3727 if (freed_ractor_local_keys.cnt == freed_ractor_local_keys.capa) {
3728 freed_ractor_local_keys.capa = freed_ractor_local_keys.capa ? freed_ractor_local_keys.capa * 2 : 4;
3729 REALLOC_N(freed_ractor_local_keys.keys, rb_ractor_local_key_t, freed_ractor_local_keys.capa);
3730 }
3731 freed_ractor_local_keys.keys[freed_ractor_local_keys.cnt++] = key;
3732 }
3733 RB_VM_LOCK_LEAVE();
3734}
3735
3736static bool
3737ractor_local_ref(rb_ractor_local_key_t key, void **pret)
3738{
3739 if (rb_ractor_main_p()) {
3740 if (!UNDEF_P((VALUE)key->main_cache)) {
3741 *pret = key->main_cache;
3742 return true;
3743 }
3744 else {
3745 return false;
3746 }
3747 }
3748 else {
3749 rb_ractor_t *cr = GET_RACTOR();
3750
3751 if (cr->local_storage && st_lookup(cr->local_storage, (st_data_t)key, (st_data_t *)pret)) {
3752 return true;
3753 }
3754 else {
3755 return false;
3756 }
3757 }
3758}
3759
3760static void
3761ractor_local_set(rb_ractor_local_key_t key, void *ptr)
3762{
3763 rb_ractor_t *cr = GET_RACTOR();
3764
3765 if (cr->local_storage == NULL) {
3766 cr->local_storage = st_init_numtable();
3767 }
3768
3769 st_insert(cr->local_storage, (st_data_t)key, (st_data_t)ptr);
3770
3771 if (rb_ractor_main_p()) {
3772 key->main_cache = ptr;
3773 }
3774}
3775
3776VALUE
3778{
3779 void *val;
3780 if (ractor_local_ref(key, &val)) {
3781 return (VALUE)val;
3782 }
3783 else {
3784 return Qnil;
3785 }
3786}
3787
3788bool
3790{
3791 if (ractor_local_ref(key, (void **)val)) {
3792 return true;
3793 }
3794 else {
3795 return false;
3796 }
3797}
3798
3799void
3801{
3802 ractor_local_set(key, (void *)val);
3803}
3804
3805void *
3807{
3808 void *ret;
3809 if (ractor_local_ref(key, &ret)) {
3810 return ret;
3811 }
3812 else {
3813 return NULL;
3814 }
3815}
3816
3817void
3819{
3820 ractor_local_set(key, ptr);
3821}
3822
3823#define DEFAULT_KEYS_CAPA 0x10
3824
3825void
3826rb_ractor_finish_marking(void)
3827{
3828 for (int i=0; i<freed_ractor_local_keys.cnt; i++) {
3829 ruby_xfree(freed_ractor_local_keys.keys[i]);
3830 }
3831 freed_ractor_local_keys.cnt = 0;
3832 if (freed_ractor_local_keys.capa > DEFAULT_KEYS_CAPA) {
3833 freed_ractor_local_keys.capa = DEFAULT_KEYS_CAPA;
3834 REALLOC_N(freed_ractor_local_keys.keys, rb_ractor_local_key_t, DEFAULT_KEYS_CAPA);
3835 }
3836}
3837
3838static VALUE
3839ractor_local_value(rb_execution_context_t *ec, VALUE self, VALUE sym)
3840{
3841 rb_ractor_t *cr = rb_ec_ractor_ptr(ec);
3842 ID id = rb_check_id(&sym);
3843 struct rb_id_table *tbl = cr->idkey_local_storage;
3844 VALUE val;
3845
3846 if (id && tbl && rb_id_table_lookup(tbl, id, &val)) {
3847 return val;
3848 }
3849 else {
3850 return Qnil;
3851 }
3852}
3853
3854static VALUE
3855ractor_local_value_set(rb_execution_context_t *ec, VALUE self, VALUE sym, VALUE val)
3856{
3857 rb_ractor_t *cr = rb_ec_ractor_ptr(ec);
3858 ID id = SYM2ID(rb_to_symbol(sym));
3859 struct rb_id_table *tbl = cr->idkey_local_storage;
3860
3861 if (tbl == NULL) {
3862 tbl = cr->idkey_local_storage = rb_id_table_create(2);
3863 }
3864 rb_id_table_insert(tbl, id, val);
3865 return val;
3866}
3867
3870 struct rb_id_table *tbl;
3871 ID id;
3872 VALUE sym;
3873};
3874
3875static VALUE
3876ractor_local_value_store_i(VALUE ptr)
3877{
3878 VALUE val;
3880
3881 if (rb_id_table_lookup(data->tbl, data->id, &val)) {
3882 // after synchronization, we found already registered entry
3883 }
3884 else {
3885 val = rb_yield(Qnil);
3886 ractor_local_value_set(data->ec, Qnil, data->sym, val);
3887 }
3888 return val;
3889}
3890
3891static VALUE
3892ractor_local_value_store_if_absent(rb_execution_context_t *ec, VALUE self, VALUE sym)
3893{
3894 rb_ractor_t *cr = rb_ec_ractor_ptr(ec);
3895 struct ractor_local_storage_store_data data = {
3896 .ec = ec,
3897 .sym = sym,
3898 .id = SYM2ID(rb_to_symbol(sym)),
3899 .tbl = cr->idkey_local_storage,
3900 };
3901 VALUE val;
3902
3903 if (data.tbl == NULL) {
3904 data.tbl = cr->idkey_local_storage = rb_id_table_create(2);
3905 }
3906 else if (rb_id_table_lookup(data.tbl, data.id, &val)) {
3907 // already set
3908 return val;
3909 }
3910
3911 if (!cr->local_storage_store_lock) {
3912 cr->local_storage_store_lock = rb_mutex_new();
3913 }
3914
3915 return rb_mutex_synchronize(cr->local_storage_store_lock, ractor_local_value_store_i, (VALUE)&data);
3916}
3917
3918// Ractor::Channel (emulate with Ractor)
3919
3921
3922static VALUE
3923ractor_channel_func(RB_BLOCK_CALL_FUNC_ARGLIST(y, c))
3924{
3925 rb_execution_context_t *ec = GET_EC();
3926 rb_ractor_t *cr = rb_ec_ractor_ptr(ec);
3927
3928 while (1) {
3929 int state;
3930
3931 EC_PUSH_TAG(ec);
3932 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
3933 VALUE obj = ractor_receive(ec, cr);
3934 ractor_yield(ec, cr, obj, Qfalse);
3935 }
3936 EC_POP_TAG();
3937
3938 if (state) {
3939 // ignore the error
3940 break;
3941 }
3942 }
3943
3944 return Qnil;
3945}
3946
3947static VALUE
3948rb_ractor_channel_new(void)
3949{
3950#if 0
3951 return rb_funcall(rb_const_get(rb_cRactor, rb_intern("Channel")), rb_intern("new"), 0);
3952#else
3953 // class Channel
3954 // def self.new
3955 // Ractor.new do # func body
3956 // while true
3957 // obj = Ractor.receive
3958 // Ractor.yield obj
3959 // end
3960 // rescue Ractor::ClosedError
3961 // nil
3962 // end
3963 // end
3964 // end
3965
3966 return ractor_create_func(rb_cRactor, Qnil, rb_str_new2("Ractor/channel"), rb_ary_new(), ractor_channel_func);
3967#endif
3968}
3969
3970static VALUE
3971rb_ractor_channel_yield(rb_execution_context_t *ec, VALUE vch, VALUE obj)
3972{
3973 VM_ASSERT(ec == rb_current_ec_noinline());
3974 rb_ractor_channel_t *ch = RACTOR_PTR(vch);
3975
3976 ractor_send(ec, (rb_ractor_t *)ch, obj, Qfalse);
3977 return Qnil;
3978}
3979
3980static VALUE
3981rb_ractor_channel_take(rb_execution_context_t *ec, VALUE vch)
3982{
3983 VM_ASSERT(ec == rb_current_ec_noinline());
3984 rb_ractor_channel_t *ch = RACTOR_PTR(vch);
3985
3986 return ractor_take(ec, (rb_ractor_t *)ch);
3987}
3988
3989static VALUE
3990rb_ractor_channel_close(rb_execution_context_t *ec, VALUE vch)
3991{
3992 VM_ASSERT(ec == rb_current_ec_noinline());
3993 rb_ractor_channel_t *ch = RACTOR_PTR(vch);
3994
3995 ractor_close_incoming(ec, (rb_ractor_t *)ch);
3996 return ractor_close_outgoing(ec, (rb_ractor_t *)ch);
3997}
3998
3999// Ractor#require
4000
4002 VALUE ch;
4003 VALUE result;
4004 VALUE exception;
4005
4006 // require
4007 VALUE feature;
4008
4009 // autoload
4010 VALUE module;
4011 ID name;
4012};
4013
4014static VALUE
4015require_body(VALUE data)
4016{
4017 struct cross_ractor_require *crr = (struct cross_ractor_require *)data;
4018
4019 ID require;
4020 CONST_ID(require, "require");
4021 crr->result = rb_funcallv(Qnil, require, 1, &crr->feature);
4022
4023 return Qnil;
4024}
4025
4026static VALUE
4027require_rescue(VALUE data, VALUE errinfo)
4028{
4029 struct cross_ractor_require *crr = (struct cross_ractor_require *)data;
4030 crr->exception = errinfo;
4031 return Qundef;
4032}
4033
4034static VALUE
4035require_result_copy_body(VALUE data)
4036{
4037 struct cross_ractor_require *crr = (struct cross_ractor_require *)data;
4038
4039 if (crr->exception != Qundef) {
4040 VM_ASSERT(crr->result == Qundef);
4041 crr->exception = ractor_copy(crr->exception);
4042 }
4043 else{
4044 VM_ASSERT(crr->result != Qundef);
4045 crr->result = ractor_copy(crr->result);
4046 }
4047
4048 return Qnil;
4049}
4050
4051static VALUE
4052require_result_copy_resuce(VALUE data, VALUE errinfo)
4053{
4054 struct cross_ractor_require *crr = (struct cross_ractor_require *)data;
4055 crr->exception = errinfo; // ractor_move(crr->exception);
4056 return Qnil;
4057}
4058
4059static VALUE
4060ractor_require_protect(struct cross_ractor_require *crr, VALUE (*func)(VALUE))
4061{
4062 // catch any error
4063 rb_rescue2(func, (VALUE)crr,
4064 require_rescue, (VALUE)crr, rb_eException, 0);
4065
4066 rb_rescue2(require_result_copy_body, (VALUE)crr,
4067 require_result_copy_resuce, (VALUE)crr, rb_eException, 0);
4068
4069 rb_ractor_channel_yield(GET_EC(), crr->ch, Qtrue);
4070 return Qnil;
4071
4072}
4073
4074static VALUE
4075ractore_require_func(void *data)
4076{
4077 struct cross_ractor_require *crr = (struct cross_ractor_require *)data;
4078 return ractor_require_protect(crr, require_body);
4079}
4080
4081VALUE
4082rb_ractor_require(VALUE feature)
4083{
4084 // TODO: make feature shareable
4085 struct cross_ractor_require crr = {
4086 .feature = feature, // TODO: ractor
4087 .ch = rb_ractor_channel_new(),
4088 .result = Qundef,
4089 .exception = Qundef,
4090 };
4091
4092 rb_execution_context_t *ec = GET_EC();
4093 rb_ractor_t *main_r = GET_VM()->ractor.main_ractor;
4094 rb_ractor_interrupt_exec(main_r, ractore_require_func, &crr, 0);
4095
4096 // wait for require done
4097 rb_ractor_channel_take(ec, crr.ch);
4098 rb_ractor_channel_close(ec, crr.ch);
4099
4100 if (crr.exception != Qundef) {
4101 rb_exc_raise(crr.exception);
4102 }
4103 else {
4104 return crr.result;
4105 }
4106}
4107
4108static VALUE
4109ractor_require(rb_execution_context_t *ec, VALUE self, VALUE feature)
4110{
4111 return rb_ractor_require(feature);
4112}
4113
4114static VALUE
4115autoload_load_body(VALUE data)
4116{
4117 struct cross_ractor_require *crr = (struct cross_ractor_require *)data;
4118 crr->result = rb_autoload_load(crr->module, crr->name);
4119 return Qnil;
4120}
4121
4122static VALUE
4123ractor_autoload_load_func(void *data)
4124{
4125 struct cross_ractor_require *crr = (struct cross_ractor_require *)data;
4126 return ractor_require_protect(crr, autoload_load_body);
4127}
4128
4129VALUE
4130rb_ractor_autoload_load(VALUE module, ID name)
4131{
4132 struct cross_ractor_require crr = {
4133 .module = module,
4134 .name = name,
4135 .ch = rb_ractor_channel_new(),
4136 .result = Qundef,
4137 .exception = Qundef,
4138 };
4139
4140 rb_execution_context_t *ec = GET_EC();
4141 rb_ractor_t *main_r = GET_VM()->ractor.main_ractor;
4142 rb_ractor_interrupt_exec(main_r, ractor_autoload_load_func, &crr, 0);
4143
4144 // wait for require done
4145 rb_ractor_channel_take(ec, crr.ch);
4146 rb_ractor_channel_close(ec, crr.ch);
4147
4148 if (crr.exception != Qundef) {
4149 rb_exc_raise(crr.exception);
4150 }
4151 else {
4152 return crr.result;
4153 }
4154}
4155
4156#include "ractor.rbinc"
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
#define RUBY_ATOMIC_CAS(var, oldval, newval)
Atomic compare-and-swap.
Definition atomic.h:140
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define RUBY_ATOMIC_FETCH_ADD(var, val)
Atomically replaces the value pointed by var with the result of addition of val to the old value of v...
Definition atomic.h:93
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
static VALUE RB_OBJ_FROZEN_RAW(VALUE obj)
This is an implementation detail of RB_OBJ_FROZEN().
Definition fl_type.h:883
@ RUBY_FL_SHAREABLE
This flag has something to do with Ractor.
Definition fl_type.h:266
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition class.c:980
VALUE rb_define_class_under(VALUE outer, const char *name, VALUE super)
Defines a class under the namespace of outer.
Definition class.c:1013
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
Retrieves argument from argc and argv to given VALUE references according to the format string.
Definition class.c:2638
int rb_get_kwargs(VALUE keyword_hash, const ID *table, int required, int optional, VALUE *values)
Keyword argument deconstructor.
Definition class.c:2427
#define rb_str_new2
Old name of rb_str_new_cstr.
Definition string.h:1675
#define T_COMPLEX
Old name of RUBY_T_COMPLEX.
Definition value_type.h:59
#define T_FILE
Old name of RUBY_T_FILE.
Definition value_type.h:62
#define FL_EXIVAR
Old name of RUBY_FL_EXIVAR.
Definition fl_type.h:66
#define REALLOC_N
Old name of RB_REALLOC_N.
Definition memory.h:403
#define ALLOC
Old name of RB_ALLOC.
Definition memory.h:400
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define Qundef
Old name of RUBY_Qundef.
#define T_FLOAT
Old name of RUBY_T_FLOAT.
Definition value_type.h:64
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition value_type.h:67
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
Definition value_type.h:57
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
Definition value_type.h:79
#define SYM2ID
Old name of RB_SYM2ID.
Definition symbol.h:45
#define T_DATA
Old name of RUBY_T_DATA.
Definition value_type.h:60
#define T_MODULE
Old name of RUBY_T_MODULE.
Definition value_type.h:70
#define T_RATIONAL
Old name of RUBY_T_RATIONAL.
Definition value_type.h:76
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition fl_type.h:132
#define Qtrue
Old name of RUBY_Qtrue.
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
Definition value_type.h:80
#define T_MATCH
Old name of RUBY_T_MATCH.
Definition value_type.h:69
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition value_type.h:58
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define CONST_ID
Old name of RUBY_CONST_ID.
Definition symbol.h:47
#define FL_SET_RAW
Old name of RB_FL_SET_RAW.
Definition fl_type.h:130
#define T_REGEXP
Old name of RUBY_T_REGEXP.
Definition value_type.h:77
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition eval.c:675
int rb_typeddata_is_kind_of(VALUE obj, const rb_data_type_t *data_type)
Checks if the given object is of given kind.
Definition error.c:1380
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1428
VALUE rb_eStopIteration
StopIteration exception.
Definition enumerator.c:181
VALUE rb_exc_new_str(VALUE etype, VALUE str)
Identical to rb_exc_new_cstr(), except it takes a Ruby's string instead of C's.
Definition error.c:1481
VALUE rb_eException
Mother of all exceptions.
Definition error.c:1422
VALUE rb_cRactor
Ractor class.
Definition ractor.c:23
VALUE rb_stdin
STDIN constant.
Definition io.c:201
VALUE rb_stderr
STDERR constant.
Definition io.c:201
static VALUE rb_class_of(VALUE obj)
Object to class mapping function.
Definition globals.h:172
VALUE rb_cBasicObject
BasicObject class.
Definition object.c:64
VALUE rb_obj_clone(VALUE obj)
Produces a shallow copy of the given object.
Definition object.c:521
VALUE rb_stdout
STDOUT constant.
Definition io.c:201
#define RB_OBJ_WRITTEN(old, oldv, young)
Identical to RB_OBJ_WRITE(), except it doesn't write any values, but only a WB declaration.
Definition gc.h:615
#define RB_OBJ_WRITE(old, slot, young)
Declaration of a "back" pointer.
Definition gc.h:603
VALUE rb_funcall(VALUE recv, ID mid, int n,...)
Calls a method.
Definition vm_eval.c:1099
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
Definition proc.c:119
#define rb_exc_new_cstr(exc, str)
Identical to rb_exc_new(), except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1670
VALUE rb_str_new_frozen(VALUE str)
Creates a frozen copy of the string, if necessary.
Definition string.c:1470
VALUE rb_mutex_new(void)
Creates a mutex.
VALUE rb_mutex_synchronize(VALUE mutex, VALUE(*func)(VALUE arg), VALUE arg)
Obtains the lock, runs the passed function, and releases the lock when it completes.
void rb_unblock_function_t(void *)
This is the type of UBFs.
Definition thread.h:336
VALUE rb_mutex_unlock(VALUE mutex)
Releases the mutex.
VALUE rb_mutex_lock(VALUE mutex)
Attempts to lock the mutex.
void rb_thread_sleep(int sec)
Blocks for the given period of time.
Definition thread.c:1455
VALUE rb_const_get(VALUE space, ID name)
Identical to rb_const_defined(), except it returns the actual defined value.
Definition variable.c:3193
VALUE rb_ivar_set(VALUE obj, ID name, VALUE val)
Identical to rb_iv_set(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1924
VALUE rb_autoload_load(VALUE space, ID name)
Kicks the autoload procedure as if it was "touched".
Definition variable.c:3028
void rb_undef_alloc_func(VALUE klass)
Deletes the allocator function of a class.
Definition vm_method.c:1287
ID rb_check_id(volatile VALUE *namep)
Detects if the given name is already interned or not.
Definition symbol.c:1133
VALUE rb_to_symbol(VALUE name)
Identical to rb_intern_str(), except it generates a dynamic symbol if necessary.
Definition string.c:12522
int len
Length of the buffer.
Definition io.h:8
const struct rb_ractor_local_storage_type rb_ractor_local_storage_type_free
A type of ractor-local storage that destructs itself using ruby_xfree.
Definition ractor.c:3697
VALUE rb_ractor_make_shareable_copy(VALUE obj)
Identical to rb_ractor_make_shareable(), except it returns a (deep) copy of the passed one instead of...
Definition ractor.c:3118
struct rb_ractor_local_key_struct * rb_ractor_local_key_t
(Opaque) struct that holds a ractor-local storage key.
Definition ractor.h:42
void * rb_ractor_local_storage_ptr(rb_ractor_local_key_t key)
Identical to rb_ractor_local_storage_value() except the return type.
Definition ractor.c:3806
void rb_ractor_local_storage_ptr_set(rb_ractor_local_key_t key, void *ptr)
Identical to rb_ractor_local_storage_value_set() except the parameter type.
Definition ractor.c:3818
rb_ractor_local_key_t rb_ractor_local_storage_ptr_newkey(const struct rb_ractor_local_storage_type *type)
Extended version of rb_ractor_local_storage_value_newkey().
Definition ractor.c:3708
VALUE rb_ractor_stdin(void)
Queries the standard input of the current Ractor that is calling this function.
Definition ractor.c:2734
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
Definition ractor.h:249
void rb_ractor_stderr_set(VALUE io)
Assigns an IO to the standard error of the Ractor that is calling this function.
Definition ractor.c:2794
void rb_ractor_local_storage_value_set(rb_ractor_local_key_t key, VALUE val)
Associates the passed value to the passed key.
Definition ractor.c:3800
bool rb_ractor_local_storage_value_lookup(rb_ractor_local_key_t key, VALUE *val)
Queries the key.
Definition ractor.c:3789
#define RB_OBJ_SHAREABLE_P(obj)
Queries if the passed object has previously classified as shareable or not.
Definition ractor.h:235
VALUE rb_ractor_make_shareable(VALUE obj)
Destructively transforms the passed object so that multiple Ractors can share it.
Definition ractor.c:3109
rb_ractor_local_key_t rb_ractor_local_storage_value_newkey(void)
Issues a new key.
Definition ractor.c:3717
void rb_ractor_stdout_set(VALUE io)
Assigns an IO to the standard output of the Ractor that is calling this function.
Definition ractor.c:2782
void rb_ractor_stdin_set(VALUE io)
Assigns an IO to the standard input of the Ractor that is calling this function.
Definition ractor.c:2770
VALUE rb_ractor_local_storage_value(rb_ractor_local_key_t key)
Queries the key.
Definition ractor.c:3777
#define RB_NOGVL_UBF_ASYNC_SAFE
Passing this flag to rb_nogvl() indicates that the passed UBF is async-signal-safe.
Definition thread.h:60
#define RB_NOGVL_INTR_FAIL
Passing this flag to rb_nogvl() prevents it from checking interrupts.
Definition thread.h:48
void * rb_nogvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2, int flags)
Identical to rb_thread_call_without_gvl(), except it additionally takes "flags" that change the behav...
Definition thread.c:1537
#define RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)
Shim for block function parameters.
Definition iterator.h:58
VALUE rb_yield(VALUE val)
Yields the block.
Definition vm_eval.c:1354
rb_block_call_func * rb_block_call_func_t
Shorthand type that represents an iterator-written-in-C function pointer.
Definition iterator.h:88
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:167
VALUE rb_proc_new(type *q, VALUE w)
Creates a rb_cProc instance.
VALUE type(ANYARGS)
ANYARGS-ed function type.
void rb_hash_foreach(VALUE q, int_type *w, VALUE e)
Iteration over the given hash.
void rb_ivar_foreach(VALUE q, int_type *w, VALUE e)
Iteration over each instance variable of the object.
VALUE rb_rescue2(type *q, VALUE w, type *e, VALUE r,...)
An equivalent of rescue clause.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
static int RARRAY_LENINT(VALUE ary)
Identical to rb_array_len(), except it differs for the return type.
Definition rarray.h:281
static void RARRAY_ASET(VALUE ary, long i, VALUE v)
Assigns an object in an array.
Definition rarray.h:386
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition rarray.h:52
#define DATA_PTR(obj)
Convenient getter macro.
Definition rdata.h:67
#define RHASH_SET_IFNONE(h, ifnone)
Destructively updates the default value of the hash.
Definition rhash.h:92
#define RHASH_IFNONE(h)
Definition rhash.h:59
static VALUE * ROBJECT_IVPTR(VALUE obj)
Queries the instance variables.
Definition robject.h:126
#define StringValueCStr(v)
Identical to StringValuePtr, except it additionally checks for the contents for viability as a C stri...
Definition rstring.h:89
static bool RTYPEDDATA_P(VALUE obj)
Checks whether the passed object is RTypedData or RData.
Definition rtypeddata.h:579
#define TypedData_Wrap_Struct(klass, data_type, sval)
Converts sval, a pointer to your struct, into a Ruby object.
Definition rtypeddata.h:449
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
Definition rtypeddata.h:497
static const struct rb_data_type_struct * RTYPEDDATA_TYPE(VALUE obj)
Queries for the type of given object.
Definition rtypeddata.h:602
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
#define RTEST
This is an old name of RB_TEST.
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:200
Type that defines a ractor-local storage.
Definition ractor.h:21
void(* free)(void *ptr)
A function to destruct a ractor-local storage.
Definition ractor.h:37
void(* mark)(void *ptr)
A function to mark a ractor-local storage.
Definition ractor.h:29
Definition st.h:79
Definition string.c:8317
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_cond_initialize(rb_nativethread_cond_t *cond)
Fills the passed condition variable with an initial value.
void rb_native_cond_broadcast(rb_nativethread_cond_t *cond)
Signals a condition variable.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_destroy.
void rb_native_cond_destroy(rb_nativethread_cond_t *cond)
Destroys the passed condition variable.
void rb_native_cond_signal(rb_nativethread_cond_t *cond)
Signals a condition variable.
void rb_native_cond_wait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex)
Waits for the passed condition variable to be signalled.
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376