Ruby 4.1.0dev (2026-04-04 revision 3b6245536cf55da9e8bfcdb03c845fe9ef931d7f)
cont.c (3b6245536cf55da9e8bfcdb03c845fe9ef931d7f)
1/**********************************************************************
2
3 cont.c -
4
5 $Author$
6 created at: Thu May 23 09:03:43 2007
7
8 Copyright (C) 2007 Koichi Sasada
9
10**********************************************************************/
11
12#include "ruby/internal/config.h"
13
14#ifndef _WIN32
15#include <unistd.h>
16#include <sys/mman.h>
17#endif
18
19// On Solaris, madvise() is NOT declared for SUS (XPG4v2) or later,
20// but MADV_* macros are defined when __EXTENSIONS__ is defined.
21#ifdef NEED_MADVICE_PROTOTYPE_USING_CADDR_T
22#include <sys/types.h>
23extern int madvise(caddr_t, size_t, int);
24#endif
25
26#include COROUTINE_H
27
28#include "eval_intern.h"
29#include "internal.h"
30#include "internal/cont.h"
31#include "internal/thread.h"
32#include "internal/error.h"
33#include "internal/eval.h"
34#include "internal/gc.h"
35#include "internal/proc.h"
36#include "internal/sanitizers.h"
37#include "internal/warnings.h"
39#include "yjit.h"
40#include "vm_core.h"
41#include "vm_sync.h"
42#include "id_table.h"
43#include "ractor_core.h"
44#include "zjit.h"
45
46enum {
47 DEBUG = 0,
48 DEBUG_EXPAND = 0,
49 DEBUG_ACQUIRE = 0,
50};
51
52#define RB_PAGE_SIZE (pagesize)
53#define RB_PAGE_MASK (~(RB_PAGE_SIZE - 1))
54static long pagesize;
55
56static const rb_data_type_t rb_cont_data_type;
57static const rb_data_type_t rb_fiber_data_type;
58static VALUE rb_cContinuation;
59static VALUE rb_cFiber;
60static VALUE rb_eFiberError;
61#ifdef RB_EXPERIMENTAL_FIBER_POOL
62static VALUE rb_cFiberPool;
63#endif
64
65#define CAPTURE_JUST_VALID_VM_STACK 1
66
67// Defined in `coroutine/$arch/Context.h`:
68#ifdef COROUTINE_LIMITED_ADDRESS_SPACE
69#define FIBER_POOL_ALLOCATION_FREE
70#define FIBER_POOL_MINIMUM_COUNT 8
71#define FIBER_POOL_MAXIMUM_ALLOCATIONS 32
72#else
73#define FIBER_POOL_MINIMUM_COUNT 32
74#define FIBER_POOL_MAXIMUM_ALLOCATIONS 1024
75#endif
76#ifdef RB_EXPERIMENTAL_FIBER_POOL
77#define FIBER_POOL_ALLOCATION_FREE
78#endif
79
80enum context_type {
81 CONTINUATION_CONTEXT = 0,
82 FIBER_CONTEXT = 1
83};
84
86 VALUE *ptr;
87 size_t size;
88#ifdef CAPTURE_JUST_VALID_VM_STACK
89 size_t slen; /* length of stack (head of ec->vm_stack) */
90 size_t clen; /* length of control frames (tail of ec->vm_stack) */
91#endif
92};
93
94struct fiber_pool;
95
96// Represents a single stack.
98 // A pointer to the memory allocation (lowest address) for the stack.
99 void * base;
100
101 // The current stack pointer, taking into account the direction of the stack.
102 void * current;
103
104 // The size of the stack excluding any guard pages.
105 size_t size;
106
107 // The available stack capacity w.r.t. the current stack offset.
108 size_t available;
109
110 // The pool this stack should be allocated from.
111 struct fiber_pool * pool;
112
113 // If the stack is allocated, the allocation it came from.
114 struct fiber_pool_allocation * allocation;
115};
116
117// A linked list of vacant (unused) stacks.
118// This structure is stored in the first page of a stack if it is not in use.
119// @sa fiber_pool_vacancy_pointer
121 // Details about the vacant stack:
122 struct fiber_pool_stack stack;
123
124 // The vacancy linked list.
125#ifdef FIBER_POOL_ALLOCATION_FREE
126 struct fiber_pool_vacancy * previous;
127#endif
128 struct fiber_pool_vacancy * next;
129};
130
131// Manages singly linked list of mapped regions of memory which contains 1 more more stack:
132//
133// base = +-------------------------------+-----------------------+ +
134// |VM Stack |VM Stack | | |
135// | | | | |
136// | | | | |
137// +-------------------------------+ | |
138// |Machine Stack |Machine Stack | | |
139// | | | | |
140// | | | | |
141// | | | . . . . | | size
142// | | | | |
143// | | | | |
144// | | | | |
145// | | | | |
146// | | | | |
147// +-------------------------------+ | |
148// |Guard Page |Guard Page | | |
149// +-------------------------------+-----------------------+ v
150//
151// +------------------------------------------------------->
152//
153// count
154//
156 // A pointer to the memory mapped region.
157 void * base;
158
159 // The size of the individual stacks.
160 size_t size;
161
162 // The stride of individual stacks (including any guard pages or other accounting details).
163 size_t stride;
164
165 // The number of stacks that were allocated.
166 size_t count;
167
168#ifdef FIBER_POOL_ALLOCATION_FREE
169 // The number of stacks used in this allocation.
170 size_t used;
171#endif
172
173 struct fiber_pool * pool;
174
175 // The allocation linked list.
176#ifdef FIBER_POOL_ALLOCATION_FREE
177 struct fiber_pool_allocation * previous;
178#endif
179 struct fiber_pool_allocation * next;
180};
181
182// A fiber pool manages vacant stacks to reduce the overhead of creating fibers.
184 // A singly-linked list of allocations which contain 1 or more stacks each.
185 struct fiber_pool_allocation * allocations;
186
187 // Free list that provides O(1) stack "allocation".
188 struct fiber_pool_vacancy * vacancies;
189
190 // The size of the stack allocations (excluding any guard page).
191 size_t size;
192
193 // The total number of stacks that have been allocated in this pool.
194 size_t count;
195
196 // The initial number of stacks to allocate.
197 size_t minimum_count;
198
199 // If positive, total stacks in this pool cannot exceed this (shared pool only:
200 // set via RUBY_SHARED_FIBER_POOL_MAXIMUM_COUNT). Expansion fails with errno EAGAIN.
201 size_t maximum_count;
202
203 // Whether to madvise(free) the stack or not.
204 // If this value is set to 1, the stack will be madvise(free)ed
205 // (or equivalent), where possible, when it is returned to the pool.
206 int free_stacks;
207
208 // The number of stacks that have been used in this pool.
209 size_t used;
210
211 // The amount to allocate for the vm_stack.
212 size_t vm_stack_size;
213};
214
215// Continuation contexts used by JITs
217 rb_execution_context_t *ec; // continuation ec
218 struct rb_jit_cont *prev, *next; // used to form lists
219};
220
221// Doubly linked list for enumerating all on-stack ISEQs.
222static struct rb_jit_cont *first_jit_cont;
223
224typedef struct rb_context_struct {
225 enum context_type type;
226 int argc;
227 int kw_splat;
228 VALUE self;
229 VALUE value;
230
231 struct cont_saved_vm_stack saved_vm_stack;
232
233 struct {
234 VALUE *stack;
235 VALUE *stack_src;
236 size_t stack_size;
237 } machine;
238 rb_execution_context_t saved_ec;
239 rb_jmpbuf_t jmpbuf;
240 struct rb_jit_cont *jit_cont; // Continuation contexts for JITs
242
243/*
244 * Fiber status:
245 * [Fiber.new] ------> FIBER_CREATED ----> [Fiber#kill] --> |
246 * | [Fiber#resume] |
247 * v |
248 * +--> FIBER_RESUMED ----> [return] ------> |
249 * [Fiber#resume] | | [Fiber.yield/transfer] |
250 * [Fiber#transfer] | v |
251 * +--- FIBER_SUSPENDED --> [Fiber#kill] --> |
252 * |
253 * |
254 * FIBER_TERMINATED <-------------------+
255 */
256enum fiber_status {
257 FIBER_CREATED,
258 FIBER_RESUMED,
259 FIBER_SUSPENDED,
260 FIBER_TERMINATED
261};
262
263#define FIBER_CREATED_P(fiber) ((fiber)->status == FIBER_CREATED)
264#define FIBER_RESUMED_P(fiber) ((fiber)->status == FIBER_RESUMED)
265#define FIBER_SUSPENDED_P(fiber) ((fiber)->status == FIBER_SUSPENDED)
266#define FIBER_TERMINATED_P(fiber) ((fiber)->status == FIBER_TERMINATED)
267#define FIBER_RUNNABLE_P(fiber) (FIBER_CREATED_P(fiber) || FIBER_SUSPENDED_P(fiber))
268
270 rb_context_t cont;
271 VALUE first_proc;
272 struct rb_fiber_struct *prev;
273 struct rb_fiber_struct *resuming_fiber;
274
275 BITFIELD(enum fiber_status, status, 2);
276 /* Whether the fiber is allowed to implicitly yield. */
277 unsigned int yielding : 1;
278 unsigned int blocking : 1;
279
280 unsigned int killed : 1;
281
282 struct coroutine_context context;
283 struct fiber_pool_stack stack;
284};
285
286static struct fiber_pool shared_fiber_pool = {NULL, NULL, 0, 0, 0, 0};
287
288void
289rb_free_shared_fiber_pool(void)
290{
291 struct fiber_pool_allocation *allocations = shared_fiber_pool.allocations;
292 while (allocations) {
293 struct fiber_pool_allocation *next = allocations->next;
294 SIZED_FREE(allocations);
295 allocations = next;
296 }
297}
298
299static ID fiber_initialize_keywords[3] = {0};
300
301/*
302 * FreeBSD require a first (i.e. addr) argument of mmap(2) is not NULL
303 * if MAP_STACK is passed.
304 * https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=158755
305 */
306#if defined(MAP_STACK) && !defined(__FreeBSD__) && !defined(__FreeBSD_kernel__)
307#define FIBER_STACK_FLAGS (MAP_PRIVATE | MAP_ANON | MAP_STACK)
308#else
309#define FIBER_STACK_FLAGS (MAP_PRIVATE | MAP_ANON)
310#endif
311
312#define ERRNOMSG strerror(errno)
313
314// Locates the stack vacancy details for the given stack.
315inline static struct fiber_pool_vacancy *
316fiber_pool_vacancy_pointer(void * base, size_t size)
317{
318 STACK_GROW_DIR_DETECTION;
319
320 return (struct fiber_pool_vacancy *)(
321 (char*)base + STACK_DIR_UPPER(0, size - RB_PAGE_SIZE)
322 );
323}
324
325#if defined(COROUTINE_SANITIZE_ADDRESS)
326// Compute the base pointer for a vacant stack, for the area which can be poisoned.
327inline static void *
328fiber_pool_stack_poison_base(struct fiber_pool_stack * stack)
329{
330 STACK_GROW_DIR_DETECTION;
331
332 return (char*)stack->base + STACK_DIR_UPPER(RB_PAGE_SIZE, 0);
333}
334
335// Compute the size of the vacant stack, for the area that can be poisoned.
336inline static size_t
337fiber_pool_stack_poison_size(struct fiber_pool_stack * stack)
338{
339 return stack->size - RB_PAGE_SIZE;
340}
341#endif
342
343// Reset the current stack pointer and available size of the given stack.
344inline static void
345fiber_pool_stack_reset(struct fiber_pool_stack * stack)
346{
347 STACK_GROW_DIR_DETECTION;
348
349 stack->current = (char*)stack->base + STACK_DIR_UPPER(0, stack->size);
350 stack->available = stack->size;
351}
352
353// A pointer to the base of the current unused portion of the stack.
354inline static void *
355fiber_pool_stack_base(struct fiber_pool_stack * stack)
356{
357 STACK_GROW_DIR_DETECTION;
358
359 VM_ASSERT(stack->current);
360
361 return STACK_DIR_UPPER(stack->current, (char*)stack->current - stack->available);
362}
363
364// Allocate some memory from the stack. Used to allocate vm_stack inline with machine stack.
365// @sa fiber_initialize_coroutine
366inline static void *
367fiber_pool_stack_alloca(struct fiber_pool_stack * stack, size_t offset)
368{
369 STACK_GROW_DIR_DETECTION;
370
371 if (DEBUG) fprintf(stderr, "fiber_pool_stack_alloca(%p): %"PRIuSIZE"/%"PRIuSIZE"\n", (void*)stack, offset, stack->available);
372 VM_ASSERT(stack->available >= offset);
373
374 // The pointer to the memory being allocated:
375 void * pointer = STACK_DIR_UPPER(stack->current, (char*)stack->current - offset);
376
377 // Move the stack pointer:
378 stack->current = STACK_DIR_UPPER((char*)stack->current + offset, (char*)stack->current - offset);
379 stack->available -= offset;
380
381 return pointer;
382}
383
384// Reset the current stack pointer and available size of the given stack.
385inline static void
386fiber_pool_vacancy_reset(struct fiber_pool_vacancy * vacancy)
387{
388 fiber_pool_stack_reset(&vacancy->stack);
389
390 // Consume one page of the stack because it's used for the vacancy list:
391 fiber_pool_stack_alloca(&vacancy->stack, RB_PAGE_SIZE);
392}
393
394inline static struct fiber_pool_vacancy *
395fiber_pool_vacancy_push(struct fiber_pool_vacancy * vacancy, struct fiber_pool_vacancy * head)
396{
397 vacancy->next = head;
398
399#ifdef FIBER_POOL_ALLOCATION_FREE
400 if (head) {
401 head->previous = vacancy;
402 vacancy->previous = NULL;
403 }
404#endif
405
406 return vacancy;
407}
408
409#ifdef FIBER_POOL_ALLOCATION_FREE
410static void
411fiber_pool_vacancy_remove(struct fiber_pool_vacancy * vacancy)
412{
413 if (vacancy->next) {
414 vacancy->next->previous = vacancy->previous;
415 }
416
417 if (vacancy->previous) {
418 vacancy->previous->next = vacancy->next;
419 }
420 else {
421 // It's the head of the list:
422 vacancy->stack.pool->vacancies = vacancy->next;
423 }
424}
425
426inline static struct fiber_pool_vacancy *
427fiber_pool_vacancy_pop(struct fiber_pool * pool)
428{
429 struct fiber_pool_vacancy * vacancy = pool->vacancies;
430
431 if (vacancy) {
432 fiber_pool_vacancy_remove(vacancy);
433 }
434
435 return vacancy;
436}
437#else
438inline static struct fiber_pool_vacancy *
439fiber_pool_vacancy_pop(struct fiber_pool * pool)
440{
441 struct fiber_pool_vacancy * vacancy = pool->vacancies;
442
443 if (vacancy) {
444 pool->vacancies = vacancy->next;
445 }
446
447 return vacancy;
448}
449#endif
450
451// Initialize the vacant stack. The [base, size] allocation should not include the guard page.
452// @param base The pointer to the lowest address of the allocated memory.
453// @param size The size of the allocated memory.
454inline static struct fiber_pool_vacancy *
455fiber_pool_vacancy_initialize(struct fiber_pool * fiber_pool, struct fiber_pool_vacancy * vacancies, void * base, size_t size)
456{
457 struct fiber_pool_vacancy * vacancy = fiber_pool_vacancy_pointer(base, size);
458
459 vacancy->stack.base = base;
460 vacancy->stack.size = size;
461
462 fiber_pool_vacancy_reset(vacancy);
463
464 vacancy->stack.pool = fiber_pool;
465
466 return fiber_pool_vacancy_push(vacancy, vacancies);
467}
468
469// Allocate a maximum of count stacks, size given by stride.
470// @param count the number of stacks to allocate / were allocated.
471// @param stride the size of the individual stacks.
472// @return [void *] the allocated memory or NULL if allocation failed.
473inline static void *
474fiber_pool_allocate_memory(size_t * count, size_t stride)
475{
476 // We use a divide-by-2 strategy to try and allocate memory. We are trying
477 // to allocate `count` stacks. In normal situation, this won't fail. But
478 // if we ran out of address space, or we are allocating more memory than
479 // the system would allow (e.g. overcommit * physical memory + swap), we
480 // divide count by two and try again. This condition should only be
481 // encountered in edge cases, but we handle it here gracefully.
482 while (*count) {
483#if defined(_WIN32)
484 void * base = VirtualAlloc(0, (*count)*stride, MEM_COMMIT, PAGE_READWRITE);
485
486 if (!base) {
487 errno = rb_w32_map_errno(GetLastError());
488 *count = (*count) >> 1;
489 }
490 else {
491 return base;
492 }
493#else
494 errno = 0;
495 size_t mmap_size = (*count)*stride;
496 void * base = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, FIBER_STACK_FLAGS, -1, 0);
497
498 if (base == MAP_FAILED) {
499 // If the allocation fails, count = count / 2, and try again.
500 *count = (*count) >> 1;
501 }
502 else {
503 ruby_annotate_mmap(base, mmap_size, "Ruby:fiber_pool_allocate_memory");
504#if defined(MADV_FREE_REUSE)
505 // On Mac MADV_FREE_REUSE is necessary for the task_info api
506 // to keep the accounting accurate as possible when a page is marked as reusable
507 // it can possibly not occurring at first call thus re-iterating if necessary.
508 while (madvise(base, mmap_size, MADV_FREE_REUSE) == -1 && errno == EAGAIN);
509#endif
510 return base;
511 }
512#endif
513 }
514
515 return NULL;
516}
517
518// Given an existing fiber pool, expand it by the specified number of stacks.
519//
520// @param count the maximum number of stacks to allocate.
521// @return the new allocation on success, or NULL on failure with errno set.
522// @raise NoMemoryError if the struct or memory allocation fails.
523//
524// Call from fiber_pool_stack_acquire_expand with VM lock held, or from
525// fiber_pool_initialize before the pool is shared across threads.
526// @sa fiber_pool_allocation_free
527static struct fiber_pool_allocation *
528fiber_pool_expand(struct fiber_pool * fiber_pool, size_t count)
529{
530 if (count == 0) {
531 errno = EAGAIN;
532 return NULL;
533 }
534
535 STACK_GROW_DIR_DETECTION;
536
537 size_t size = fiber_pool->size;
538 size_t stride = size + RB_PAGE_SIZE;
539
540 // If the maximum number of stacks is set, and we have reached it, return NULL.
541 if (fiber_pool->maximum_count > 0) {
542 if (fiber_pool->count >= fiber_pool->maximum_count) {
543 errno = EAGAIN;
544 return NULL;
545 }
546 size_t remaining = fiber_pool->maximum_count - fiber_pool->count;
547 if (count > remaining) {
548 count = remaining;
549 }
550 }
551
552 // Allocate metadata before mmap: ruby_xmalloc (RB_ALLOC) raises on failure and
553 // must not run after base is mapped, or the region would leak.
554 struct fiber_pool_allocation * allocation = RB_ALLOC(struct fiber_pool_allocation);
555
556 // Allocate the memory required for the stacks:
557 void * base = fiber_pool_allocate_memory(&count, stride);
558
559 if (base == NULL) {
560 if (!errno) errno = ENOMEM;
561 ruby_xfree(allocation);
562 return NULL;
563 }
564
565 struct fiber_pool_vacancy * vacancies = fiber_pool->vacancies;
566
567 // Initialize fiber pool allocation:
568 allocation->base = base;
569 allocation->size = size;
570 allocation->stride = stride;
571 allocation->count = count;
572#ifdef FIBER_POOL_ALLOCATION_FREE
573 allocation->used = 0;
574#endif
575 allocation->pool = fiber_pool;
576
577 if (DEBUG_EXPAND) {
578 fprintf(stderr, "fiber_pool_expand(%"PRIuSIZE"): %p, %"PRIuSIZE"/%"PRIuSIZE" x [%"PRIuSIZE":%"PRIuSIZE"]\n",
579 count, (void*)fiber_pool, fiber_pool->used, fiber_pool->count, size, fiber_pool->vm_stack_size);
580 }
581
582 // Iterate over all stacks, initializing the vacancy list:
583 for (size_t i = 0; i < count; i += 1) {
584 void * base = (char*)allocation->base + (stride * i);
585 void * page = (char*)base + STACK_DIR_UPPER(size, 0);
586#if defined(_WIN32)
587 DWORD old_protect;
588
589 if (!VirtualProtect(page, RB_PAGE_SIZE, PAGE_READWRITE | PAGE_GUARD, &old_protect)) {
590 int error = rb_w32_map_errno(GetLastError());
591 VirtualFree(allocation->base, 0, MEM_RELEASE);
592 ruby_xfree(allocation);
593 errno = error;
594 return NULL;
595 }
596#elif defined(__wasi__)
597 // wasi-libc's mprotect emulation doesn't support PROT_NONE.
598 (void)page;
599#else
600 if (mprotect(page, RB_PAGE_SIZE, PROT_NONE) < 0) {
601 int error = errno;
602 if (!error) error = ENOMEM;
603 munmap(allocation->base, count*stride);
604 ruby_xfree(allocation);
605 errno = error;
606 return NULL;
607 }
608#endif
609
610 vacancies = fiber_pool_vacancy_initialize(
611 fiber_pool, vacancies,
612 (char*)base + STACK_DIR_UPPER(0, RB_PAGE_SIZE),
613 size
614 );
615
616#ifdef FIBER_POOL_ALLOCATION_FREE
617 vacancies->stack.allocation = allocation;
618#endif
619 }
620
621 // Insert the allocation into the head of the pool:
622 allocation->next = fiber_pool->allocations;
623
624#ifdef FIBER_POOL_ALLOCATION_FREE
625 if (allocation->next) {
626 allocation->next->previous = allocation;
627 }
628
629 allocation->previous = NULL;
630#endif
631
632 fiber_pool->allocations = allocation;
633 fiber_pool->vacancies = vacancies;
634 fiber_pool->count += count;
635
636 return allocation;
637}
638
639// Initialize the specified fiber pool with the given number of stacks.
640// @param vm_stack_size The size of the vm stack to allocate.
641static void
642fiber_pool_initialize(struct fiber_pool * fiber_pool, size_t size, size_t minimum_count, size_t maximum_count, size_t vm_stack_size)
643{
644 VM_ASSERT(vm_stack_size < size);
645
646 fiber_pool->allocations = NULL;
647 fiber_pool->vacancies = NULL;
648 fiber_pool->size = ((size / RB_PAGE_SIZE) + 1) * RB_PAGE_SIZE;
649 fiber_pool->count = 0;
650 fiber_pool->minimum_count = minimum_count;
651 fiber_pool->maximum_count = maximum_count;
652 fiber_pool->free_stacks = 1;
653 fiber_pool->used = 0;
654 fiber_pool->vm_stack_size = vm_stack_size;
655
656 if (fiber_pool->minimum_count > 0) {
657 if (RB_UNLIKELY(!fiber_pool_expand(fiber_pool, fiber_pool->minimum_count))) {
658 rb_raise(rb_eFiberError, "can't allocate initial fiber stacks (%"PRIuSIZE" x %"PRIuSIZE" bytes): %s", fiber_pool->minimum_count, fiber_pool->size, strerror(errno));
659 }
660 }
661}
662
663#ifdef FIBER_POOL_ALLOCATION_FREE
664// Free the list of fiber pool allocations.
665static void
666fiber_pool_allocation_free(struct fiber_pool_allocation * allocation)
667{
668 STACK_GROW_DIR_DETECTION;
669
670 VM_ASSERT(allocation->used == 0);
671
672 if (DEBUG) fprintf(stderr, "fiber_pool_allocation_free: %p base=%p count=%"PRIuSIZE"\n", (void*)allocation, allocation->base, allocation->count);
673
674 size_t i;
675 for (i = 0; i < allocation->count; i += 1) {
676 void * base = (char*)allocation->base + (allocation->stride * i) + STACK_DIR_UPPER(0, RB_PAGE_SIZE);
677
678 struct fiber_pool_vacancy * vacancy = fiber_pool_vacancy_pointer(base, allocation->size);
679
680 // Pop the vacant stack off the free list:
681 fiber_pool_vacancy_remove(vacancy);
682 }
683
684#ifdef _WIN32
685 VirtualFree(allocation->base, 0, MEM_RELEASE);
686#else
687 munmap(allocation->base, allocation->stride * allocation->count);
688#endif
689
690 if (allocation->previous) {
691 allocation->previous->next = allocation->next;
692 }
693 else {
694 // We are the head of the list, so update the pool:
695 allocation->pool->allocations = allocation->next;
696 }
697
698 if (allocation->next) {
699 allocation->next->previous = allocation->previous;
700 }
701
702 allocation->pool->count -= allocation->count;
703
704 SIZED_FREE(allocation);
705}
706#endif
707
708// Number of stacks to request when expanding the pool (clamped to min/max).
709static size_t
710fiber_pool_stack_expand_count(const struct fiber_pool *pool)
711{
712 const size_t maximum_allocations = FIBER_POOL_MAXIMUM_ALLOCATIONS;
713 const size_t minimum_count = FIBER_POOL_MINIMUM_COUNT;
714
715 // We are going try and double the number of stacks in the pool:
716 size_t count = pool->count;
717 if (count > maximum_allocations) count = maximum_allocations;
718 if (count < minimum_count) count = minimum_count;
719
720 // If we have a maximum count, we need to clamp the number of stacks to the maximum:
721 if (pool->maximum_count > 0) {
722 if (pool->count >= pool->maximum_count) {
723 // No expansion is possible:
724 return 0;
725 }
726
727 // Otherwise, compute the number of stacks we can allocate to bring us to the maximum:
728 size_t remaining = pool->maximum_count - pool->count;
729 if (count > remaining) {
730 count = remaining;
731 }
732 }
733
734 return count;
735}
736
737// When the vacancy list is empty, grow the pool (and run GC only if mmap fails). Caller holds the VM lock.
738// Returns NULL if expansion failed after GC + retry; errno is set. Otherwise returns a vacancy.
739static struct fiber_pool_vacancy *
740fiber_pool_stack_acquire_expand(struct fiber_pool *fiber_pool)
741{
742 size_t count = fiber_pool_stack_expand_count(fiber_pool);
743
744 if (DEBUG_ACQUIRE) fprintf(stderr, "fiber_pool_stack_acquire: expanding fiber pool by %"PRIuSIZE" stacks\n", count);
745
746 struct fiber_pool_vacancy *vacancy = NULL;
747
748 if (RB_LIKELY(fiber_pool_expand(fiber_pool, count))) {
749 return fiber_pool_vacancy_pop(fiber_pool);
750 }
751 else {
752 if (DEBUG_ACQUIRE) fprintf(stderr, "fiber_pool_stack_acquire: expand failed (%s), collecting garbage\n", strerror(errno));
753
754 rb_gc();
755
756 // After running GC, the vacancy list may have some stacks:
757 vacancy = fiber_pool_vacancy_pop(fiber_pool);
758 if (RB_LIKELY(vacancy)) {
759 return vacancy;
760 }
761
762 // Recompute count as gc may have freed up some allocations:
763 count = fiber_pool_stack_expand_count(fiber_pool);
764
765 // Try to expand the fiber pool again:
766 if (RB_LIKELY(fiber_pool_expand(fiber_pool, count))) {
767 return fiber_pool_vacancy_pop(fiber_pool);
768 }
769 else {
770 // Okay, we really failed to acquire a stack. Give up and return NULL with errno set:
771 return NULL;
772 }
773 }
774}
775
776// Acquire a stack from the given fiber pool. If none are available, allocate more.
777static struct fiber_pool_stack
778fiber_pool_stack_acquire(struct fiber_pool * fiber_pool)
779{
780 struct fiber_pool_vacancy * vacancy;
781
782 unsigned int lev;
783 RB_VM_LOCK_ENTER_LEV(&lev);
784 {
785 // Fast path: try to acquire a stack from the vacancy list:
786 vacancy = fiber_pool_vacancy_pop(fiber_pool);
787
788 if (DEBUG) fprintf(stderr, "fiber_pool_stack_acquire: %p used=%"PRIuSIZE"\n", (void*)fiber_pool->vacancies, fiber_pool->used);
789
790 // Slow path: If the pool has no vacancies, expand first. Only run GC when expansion fails (e.g. mmap), so we can reclaim stacks from dead fibers before retrying:
791 if (RB_UNLIKELY(!vacancy)) {
792 vacancy = fiber_pool_stack_acquire_expand(fiber_pool);
793
794 // If expansion failed, raise an error:
795 if (RB_UNLIKELY(!vacancy)) {
796 RB_VM_LOCK_LEAVE_LEV(&lev);
797 rb_raise(rb_eFiberError, "can't allocate fiber stack: %s", strerror(errno));
798 }
799 }
800
801 VM_ASSERT(vacancy);
802 VM_ASSERT(vacancy->stack.base);
803
804#if defined(COROUTINE_SANITIZE_ADDRESS)
805 __asan_unpoison_memory_region(fiber_pool_stack_poison_base(&vacancy->stack), fiber_pool_stack_poison_size(&vacancy->stack));
806#endif
807
808 // Take the top item from the free list:
809 fiber_pool->used += 1;
810
811#ifdef FIBER_POOL_ALLOCATION_FREE
812 vacancy->stack.allocation->used += 1;
813#endif
814
815 fiber_pool_stack_reset(&vacancy->stack);
816 }
817 RB_VM_LOCK_LEAVE_LEV(&lev);
818
819 return vacancy->stack;
820}
821
822// We advise the operating system that the stack memory pages are no longer being used.
823// This introduce some performance overhead but allows system to relaim memory when there is pressure.
824static inline void
825fiber_pool_stack_free(struct fiber_pool_stack * stack)
826{
827 void * base = fiber_pool_stack_base(stack);
828 size_t size = stack->available;
829
830 // If this is not true, the vacancy information will almost certainly be destroyed:
831 VM_ASSERT(size <= (stack->size - RB_PAGE_SIZE));
832
833 int advice = stack->pool->free_stacks >> 1;
834
835 if (DEBUG) fprintf(stderr, "fiber_pool_stack_free: %p+%"PRIuSIZE" [base=%p, size=%"PRIuSIZE"] advice=%d\n", base, size, stack->base, stack->size, advice);
836
837 // The pages being used by the stack can be returned back to the system.
838 // That doesn't change the page mapping, but it does allow the system to
839 // reclaim the physical memory.
840 // Since we no longer care about the data itself, we don't need to page
841 // out to disk, since that is costly. Not all systems support that, so
842 // we try our best to select the most efficient implementation.
843 // In addition, it's actually slightly desirable to not do anything here,
844 // but that results in higher memory usage.
845
846#ifdef __wasi__
847 // WebAssembly doesn't support madvise, so we just don't do anything.
848#elif VM_CHECK_MODE > 0 && defined(MADV_DONTNEED)
849 if (!advice) advice = MADV_DONTNEED;
850 // This immediately discards the pages and the memory is reset to zero.
851 madvise(base, size, advice);
852#elif defined(MADV_FREE_REUSABLE)
853 if (!advice) advice = MADV_FREE_REUSABLE;
854 // Darwin / macOS / iOS.
855 // Acknowledge the kernel down to the task info api we make this
856 // page reusable for future use.
857 // As for MADV_FREE_REUSABLE below we ensure in the rare occasions the task was not
858 // completed at the time of the call to re-iterate.
859 while (madvise(base, size, advice) == -1 && errno == EAGAIN);
860#elif defined(MADV_FREE)
861 if (!advice) advice = MADV_FREE;
862 // Recent Linux.
863 madvise(base, size, advice);
864#elif defined(MADV_DONTNEED)
865 if (!advice) advice = MADV_DONTNEED;
866 // Old Linux.
867 madvise(base, size, advice);
868#elif defined(POSIX_MADV_DONTNEED)
869 if (!advice) advice = POSIX_MADV_DONTNEED;
870 // Solaris?
871 posix_madvise(base, size, advice);
872#elif defined(_WIN32)
873 VirtualAlloc(base, size, MEM_RESET, PAGE_READWRITE);
874 // Not available in all versions of Windows.
875 //DiscardVirtualMemory(base, size);
876#endif
877
878#if defined(COROUTINE_SANITIZE_ADDRESS)
879 __asan_poison_memory_region(fiber_pool_stack_poison_base(stack), fiber_pool_stack_poison_size(stack));
880#endif
881}
882
883// Release and return a stack to the vacancy list.
884static void
885fiber_pool_stack_release(struct fiber_pool_stack * stack)
886{
887 struct fiber_pool * pool = stack->pool;
888 struct fiber_pool_vacancy * vacancy = fiber_pool_vacancy_pointer(stack->base, stack->size);
889
890 if (DEBUG) fprintf(stderr, "fiber_pool_stack_release: %p used=%"PRIuSIZE"\n", stack->base, stack->pool->used);
891
892 // Copy the stack details into the vacancy area:
893 vacancy->stack = *stack;
894 // After this point, be careful about updating/using state in stack, since it's copied to the vacancy area.
895
896 // Reset the stack pointers and reserve space for the vacancy data:
897 fiber_pool_vacancy_reset(vacancy);
898
899 // Push the vacancy into the vancancies list:
900 pool->vacancies = fiber_pool_vacancy_push(vacancy, pool->vacancies);
901 pool->used -= 1;
902
903#ifdef FIBER_POOL_ALLOCATION_FREE
904 struct fiber_pool_allocation * allocation = stack->allocation;
905
906 allocation->used -= 1;
907
908 // Release address space and/or dirty memory:
909 if (allocation->used == 0) {
910 fiber_pool_allocation_free(allocation);
911 }
912 else if (stack->pool->free_stacks) {
913 fiber_pool_stack_free(&vacancy->stack);
914 }
915#else
916 // This is entirely optional, but clears the dirty flag from the stack
917 // memory, so it won't get swapped to disk when there is memory pressure:
918 if (stack->pool->free_stacks) {
919 fiber_pool_stack_free(&vacancy->stack);
920 }
921#endif
922}
923
924static inline void
925ec_switch(rb_thread_t *th, rb_fiber_t *fiber)
926{
927 rb_execution_context_t *ec = &fiber->cont.saved_ec;
928#ifdef RUBY_ASAN_ENABLED
929 ec->machine.asan_fake_stack_handle = asan_get_thread_fake_stack_handle();
930#endif
931 rb_ractor_set_current_ec(th->ractor, th->ec = ec);
932 // ruby_current_execution_context_ptr = th->ec = ec;
933
934 /*
935 * timer-thread may set trap interrupt on previous th->ec at any time;
936 * ensure we do not delay (or lose) the trap interrupt handling.
937 */
938 if (th->vm->ractor.main_thread == th &&
939 rb_signal_buff_size() > 0) {
940 RUBY_VM_SET_TRAP_INTERRUPT(ec);
941 }
942
943 VM_ASSERT(ec->fiber_ptr->cont.self == 0 || ec->vm_stack != NULL);
944}
945
946static inline void
947fiber_restore_thread(rb_thread_t *th, rb_fiber_t *fiber)
948{
949 ec_switch(th, fiber);
950 VM_ASSERT(th->ec->fiber_ptr == fiber);
951}
952
953#ifndef COROUTINE_DECL
954# define COROUTINE_DECL COROUTINE
955#endif
956NORETURN(static COROUTINE_DECL fiber_entry(struct coroutine_context * from, struct coroutine_context * to));
957static COROUTINE
958fiber_entry(struct coroutine_context * from, struct coroutine_context * to)
959{
960 rb_fiber_t *fiber = to->argument;
961
962#if defined(COROUTINE_SANITIZE_ADDRESS)
963 // Address sanitizer will copy the previous stack base and stack size into
964 // the "from" fiber. `coroutine_initialize_main` doesn't generally know the
965 // stack bounds (base + size). Therefore, the main fiber `stack_base` and
966 // `stack_size` will be NULL/0. It's specifically important in that case to
967 // get the (base+size) of the previous fiber and save it, so that later when
968 // we return to the main coroutine, we don't supply (NULL, 0) to
969 // __sanitizer_start_switch_fiber which royally messes up the internal state
970 // of ASAN and causes (sometimes) the following message:
971 // "WARNING: ASan is ignoring requested __asan_handle_no_return"
972 __sanitizer_finish_switch_fiber(to->fake_stack, (const void**)&from->stack_base, &from->stack_size);
973#endif
974
975 rb_thread_t *thread = fiber->cont.saved_ec.thread_ptr;
976
977#ifdef COROUTINE_PTHREAD_CONTEXT
978 ruby_thread_set_native(thread);
979#endif
980
981 fiber_restore_thread(thread, fiber);
982
983 rb_fiber_start(fiber);
984
985#ifndef COROUTINE_PTHREAD_CONTEXT
986 VM_UNREACHABLE(fiber_entry);
987#endif
988}
989
990// Initialize a fiber's coroutine's machine stack and vm stack.
991static VALUE *
992fiber_initialize_coroutine(rb_fiber_t *fiber, size_t * vm_stack_size)
993{
994 struct fiber_pool * fiber_pool = fiber->stack.pool;
995 rb_execution_context_t *sec = &fiber->cont.saved_ec;
996 void * vm_stack = NULL;
997
998 VM_ASSERT(fiber_pool != NULL);
999
1000 fiber->stack = fiber_pool_stack_acquire(fiber_pool);
1001 vm_stack = fiber_pool_stack_alloca(&fiber->stack, fiber_pool->vm_stack_size);
1002 *vm_stack_size = fiber_pool->vm_stack_size;
1003
1004 coroutine_initialize(&fiber->context, fiber_entry, fiber_pool_stack_base(&fiber->stack), fiber->stack.available);
1005
1006 // The stack for this execution context is the one we allocated:
1007 sec->machine.stack_start = fiber->stack.current;
1008 sec->machine.stack_maxsize = fiber->stack.available;
1009
1010 fiber->context.argument = (void*)fiber;
1011
1012 return vm_stack;
1013}
1014
1015// Release the stack from the fiber, it's execution context, and return it to
1016// the fiber pool.
1017static void
1018fiber_stack_release(rb_fiber_t * fiber)
1019{
1020 rb_execution_context_t *ec = &fiber->cont.saved_ec;
1021
1022 if (DEBUG) fprintf(stderr, "fiber_stack_release: %p, stack.base=%p\n", (void*)fiber, fiber->stack.base);
1023
1024 // Return the stack back to the fiber pool if it wasn't already:
1025 if (fiber->stack.base) {
1026 fiber_pool_stack_release(&fiber->stack);
1027 fiber->stack.base = NULL;
1028 }
1029
1030 // The stack is no longer associated with this execution context:
1031 rb_ec_clear_vm_stack(ec);
1032}
1033
1034static void
1035fiber_stack_release_locked(rb_fiber_t *fiber)
1036{
1037 if (!ruby_vm_during_cleanup) {
1038 // We can't try to acquire the VM lock here because MMTK calls free in its own native thread which has no ec.
1039 // This assertion will fail on MMTK but we currently don't have CI for debug releases of MMTK, so we can assert for now.
1040 ASSERT_vm_locking_with_barrier();
1041 }
1042 fiber_stack_release(fiber);
1043}
1044
1045static const char *
1046fiber_status_name(enum fiber_status s)
1047{
1048 switch (s) {
1049 case FIBER_CREATED: return "created";
1050 case FIBER_RESUMED: return "resumed";
1051 case FIBER_SUSPENDED: return "suspended";
1052 case FIBER_TERMINATED: return "terminated";
1053 }
1054 VM_UNREACHABLE(fiber_status_name);
1055 return NULL;
1056}
1057
1058static void
1059fiber_verify(const rb_fiber_t *fiber)
1060{
1061#if VM_CHECK_MODE > 0
1062 VM_ASSERT(fiber->cont.saved_ec.fiber_ptr == fiber);
1063
1064 switch (fiber->status) {
1065 case FIBER_RESUMED:
1066 if (fiber->cont.saved_ec.thread_ptr->self == 0) {
1067 VM_ASSERT(fiber->cont.saved_ec.vm_stack != NULL);
1068 }
1069 break;
1070 case FIBER_SUSPENDED:
1071 VM_ASSERT(fiber->cont.saved_ec.vm_stack != NULL);
1072 break;
1073 case FIBER_CREATED:
1074 case FIBER_TERMINATED:
1075 /* TODO */
1076 break;
1077 default:
1078 VM_UNREACHABLE(fiber_verify);
1079 }
1080#endif
1081}
1082
1083inline static void
1084fiber_status_set(rb_fiber_t *fiber, enum fiber_status s)
1085{
1086 // if (DEBUG) fprintf(stderr, "fiber: %p, status: %s -> %s\n", (void *)fiber, fiber_status_name(fiber->status), fiber_status_name(s));
1087 VM_ASSERT(!FIBER_TERMINATED_P(fiber));
1088 VM_ASSERT(fiber->status != s);
1089 fiber_verify(fiber);
1090 fiber->status = s;
1091}
1092
1093static rb_context_t *
1094cont_ptr(VALUE obj)
1095{
1096 rb_context_t *cont;
1097
1098 TypedData_Get_Struct(obj, rb_context_t, &rb_cont_data_type, cont);
1099
1100 return cont;
1101}
1102
1103static rb_fiber_t *
1104fiber_ptr(VALUE obj)
1105{
1106 rb_fiber_t *fiber;
1107
1108 TypedData_Get_Struct(obj, rb_fiber_t, &rb_fiber_data_type, fiber);
1109 if (!fiber) rb_raise(rb_eFiberError, "uninitialized fiber");
1110
1111 return fiber;
1112}
1113
1114NOINLINE(static VALUE cont_capture(volatile int *volatile stat));
1115
1116#define THREAD_MUST_BE_RUNNING(th) do { \
1117 if (!(th)->ec->tag) rb_raise(rb_eThreadError, "not running thread"); \
1118 } while (0)
1119
1121rb_fiber_threadptr(const rb_fiber_t *fiber)
1122{
1123 return fiber->cont.saved_ec.thread_ptr;
1124}
1125
1126static VALUE
1127cont_thread_value(const rb_context_t *cont)
1128{
1129 return cont->saved_ec.thread_ptr->self;
1130}
1131
1132static void
1133cont_compact(void *ptr)
1134{
1135 rb_context_t *cont = ptr;
1136
1137 if (cont->self) {
1138 cont->self = rb_gc_location(cont->self);
1139 }
1140 cont->value = rb_gc_location(cont->value);
1141 rb_execution_context_update(&cont->saved_ec);
1142}
1143
1144static void
1145cont_mark(void *ptr)
1146{
1147 rb_context_t *cont = ptr;
1148
1149 RUBY_MARK_ENTER("cont");
1150 if (cont->self) {
1151 rb_gc_mark_movable(cont->self);
1152 }
1153 rb_gc_mark_movable(cont->value);
1154
1155 rb_execution_context_mark(&cont->saved_ec);
1156 rb_gc_mark(cont_thread_value(cont));
1157
1158 if (cont->saved_vm_stack.ptr) {
1159#ifdef CAPTURE_JUST_VALID_VM_STACK
1160 rb_gc_mark_locations(cont->saved_vm_stack.ptr,
1161 cont->saved_vm_stack.ptr + cont->saved_vm_stack.slen + cont->saved_vm_stack.clen);
1162#else
1163 rb_gc_mark_locations(cont->saved_vm_stack.ptr,
1164 cont->saved_vm_stack.ptr, cont->saved_ec.stack_size);
1165#endif
1166 }
1167
1168 if (cont->machine.stack) {
1169 if (cont->type == CONTINUATION_CONTEXT) {
1170 /* cont */
1171 rb_gc_mark_locations(cont->machine.stack,
1172 cont->machine.stack + cont->machine.stack_size);
1173 }
1174 else {
1175 /* fiber machine context is marked as part of rb_execution_context_mark, no need to
1176 * do anything here. */
1177 }
1178 }
1179
1180 RUBY_MARK_LEAVE("cont");
1181}
1182
1183#if 0
1184static int
1185fiber_is_root_p(const rb_fiber_t *fiber)
1186{
1187 return fiber == fiber->cont.saved_ec.thread_ptr->root_fiber;
1188}
1189#endif
1190
1191static void jit_cont_free(struct rb_jit_cont *cont);
1192
1193static void
1194cont_free(void *ptr)
1195{
1196 rb_context_t *cont = ptr;
1197
1198 RUBY_FREE_ENTER("cont");
1199
1200 if (cont->type == CONTINUATION_CONTEXT) {
1201 SIZED_FREE_N(cont->saved_ec.vm_stack, cont->saved_ec.vm_stack_size);
1202 SIZED_FREE_N(cont->machine.stack, cont->machine.stack_size);
1203 }
1204 else {
1205 rb_fiber_t *fiber = (rb_fiber_t*)cont;
1206 coroutine_destroy(&fiber->context);
1207 fiber_stack_release_locked(fiber);
1208 }
1209
1210 SIZED_FREE_N(cont->saved_vm_stack.ptr, cont->saved_vm_stack.size);
1211
1212 VM_ASSERT(cont->jit_cont != NULL);
1213 jit_cont_free(cont->jit_cont);
1214 /* free rb_cont_t or rb_fiber_t */
1215 if (cont->type == CONTINUATION_CONTEXT) {
1216 SIZED_FREE(cont);
1217 }
1218 else {
1219 SIZED_FREE((rb_fiber_t *)cont);
1220 }
1221 RUBY_FREE_LEAVE("cont");
1222}
1223
1224static size_t
1225cont_memsize(const void *ptr)
1226{
1227 const rb_context_t *cont = ptr;
1228 size_t size = 0;
1229
1230 size = sizeof(*cont);
1231 if (cont->saved_vm_stack.ptr) {
1232#ifdef CAPTURE_JUST_VALID_VM_STACK
1233 size_t n = (cont->saved_vm_stack.slen + cont->saved_vm_stack.clen);
1234#else
1235 size_t n = cont->saved_ec.vm_stack_size;
1236#endif
1237 size += n * sizeof(*cont->saved_vm_stack.ptr);
1238 }
1239
1240 if (cont->machine.stack) {
1241 size += cont->machine.stack_size * sizeof(*cont->machine.stack);
1242 }
1243
1244 return size;
1245}
1246
1247void
1248rb_fiber_update_self(rb_fiber_t *fiber)
1249{
1250 if (fiber->cont.self) {
1251 fiber->cont.self = rb_gc_location(fiber->cont.self);
1252 }
1253 else {
1254 rb_execution_context_update(&fiber->cont.saved_ec);
1255 }
1256}
1257
1258void
1259rb_fiber_mark_self(const rb_fiber_t *fiber)
1260{
1261 rb_gc_mark_movable(fiber->cont.self);
1262}
1263
1264static void
1265fiber_compact(void *ptr)
1266{
1267 rb_fiber_t *fiber = ptr;
1268 fiber->first_proc = rb_gc_location(fiber->first_proc);
1269
1270 if (fiber->prev) rb_fiber_update_self(fiber->prev);
1271
1272 cont_compact(&fiber->cont);
1273 fiber_verify(fiber);
1274}
1275
1276static void
1277fiber_mark(void *ptr)
1278{
1279 rb_fiber_t *fiber = ptr;
1280 RUBY_MARK_ENTER("cont");
1281 fiber_verify(fiber);
1282 rb_gc_mark_movable(fiber->first_proc);
1283 if (fiber->prev) rb_fiber_mark_self(fiber->prev);
1284 cont_mark(&fiber->cont);
1285 RUBY_MARK_LEAVE("cont");
1286}
1287
1288static void
1289fiber_free(void *ptr)
1290{
1291 rb_fiber_t *fiber = ptr;
1292 RUBY_FREE_ENTER("fiber");
1293
1294 if (DEBUG) fprintf(stderr, "fiber_free: %p[%p]\n", (void *)fiber, fiber->stack.base);
1295
1296 if (fiber->cont.saved_ec.local_storage) {
1297 rb_id_table_free(fiber->cont.saved_ec.local_storage);
1298 }
1299
1300 cont_free(&fiber->cont);
1301 RUBY_FREE_LEAVE("fiber");
1302}
1303
1304static size_t
1305fiber_memsize(const void *ptr)
1306{
1307 const rb_fiber_t *fiber = ptr;
1308 size_t size = sizeof(*fiber);
1309 const rb_execution_context_t *saved_ec = &fiber->cont.saved_ec;
1310 const rb_thread_t *th = rb_ec_thread_ptr(saved_ec);
1311
1312 /*
1313 * vm.c::thread_memsize already counts th->ec->local_storage
1314 */
1315 if (saved_ec->local_storage && fiber != th->root_fiber) {
1316 size += rb_id_table_memsize(saved_ec->local_storage);
1317 size += rb_obj_memsize_of(saved_ec->storage);
1318 }
1319
1320 size += cont_memsize(&fiber->cont);
1321 return size;
1322}
1323
1324VALUE
1325rb_obj_is_fiber(VALUE obj)
1326{
1327 return RBOOL(rb_typeddata_is_kind_of(obj, &rb_fiber_data_type));
1328}
1329
1330static void
1331cont_save_machine_stack(rb_thread_t *th, rb_context_t *cont)
1332{
1333 const size_t old_stack_size = cont->machine.stack_size;
1334 size_t size;
1335
1336 SET_MACHINE_STACK_END(&th->ec->machine.stack_end);
1337
1338 if (th->ec->machine.stack_start > th->ec->machine.stack_end) {
1339 size = cont->machine.stack_size = th->ec->machine.stack_start - th->ec->machine.stack_end;
1340 cont->machine.stack_src = th->ec->machine.stack_end;
1341 }
1342 else {
1343 size = cont->machine.stack_size = th->ec->machine.stack_end - th->ec->machine.stack_start;
1344 cont->machine.stack_src = th->ec->machine.stack_start;
1345 }
1346
1347 if (cont->machine.stack) {
1348 SIZED_REALLOC_N(cont->machine.stack, VALUE, cont->machine.stack_size, old_stack_size);
1349 }
1350 else {
1351 cont->machine.stack = ALLOC_N(VALUE, cont->machine.stack_size);
1352 }
1353
1354 FLUSH_REGISTER_WINDOWS;
1355 asan_unpoison_memory_region(cont->machine.stack_src, size, false);
1356 MEMCPY(cont->machine.stack, cont->machine.stack_src, VALUE, size);
1357}
1358
1359static void
1360cont_handle_weak_references(void *ptr)
1361{
1362 rb_context_t *cont = ptr;
1363
1364 if (!cont) return;
1365
1366 if (!rb_gc_handle_weak_references_alive_p(cont->saved_ec.gen_fields_cache.obj) ||
1367 !rb_gc_handle_weak_references_alive_p(cont->saved_ec.gen_fields_cache.fields_obj)) {
1368 cont->saved_ec.gen_fields_cache.obj = Qundef;
1369 cont->saved_ec.gen_fields_cache.fields_obj = Qundef;
1370 }
1371}
1372
1373static const rb_data_type_t rb_cont_data_type = {
1374 "continuation",
1375 {cont_mark, cont_free, cont_memsize, cont_compact, cont_handle_weak_references},
1377};
1378
1379static inline void
1380cont_save_thread(rb_context_t *cont, rb_thread_t *th)
1381{
1382 rb_execution_context_t *sec = &cont->saved_ec;
1383
1384 VM_ASSERT(th->status == THREAD_RUNNABLE);
1385
1386 /* save thread context */
1387 *sec = *th->ec;
1388
1389 /* saved_ec->machine.stack_end should be NULL */
1390 /* because it may happen GC afterward */
1391 sec->machine.stack_end = NULL;
1392}
1393
1394static rb_nativethread_lock_t jit_cont_lock;
1395
1396// Register a new continuation with execution context `ec`. Return JIT info about
1397// the continuation.
1398static struct rb_jit_cont *
1399jit_cont_new(rb_execution_context_t *ec)
1400{
1401 struct rb_jit_cont *cont;
1402
1403 // We need to use calloc instead of something like ZALLOC to avoid triggering GC here.
1404 // When this function is called from rb_thread_alloc through rb_threadptr_root_fiber_setup,
1405 // the thread is still being prepared and marking it causes SEGV.
1406 cont = ruby_mimcalloc(1, sizeof(struct rb_jit_cont));
1407 if (cont == NULL)
1408 rb_memerror();
1409 cont->ec = ec;
1410
1411 rb_native_mutex_lock(&jit_cont_lock);
1412 if (first_jit_cont == NULL) {
1413 cont->next = cont->prev = NULL;
1414 }
1415 else {
1416 cont->prev = NULL;
1417 cont->next = first_jit_cont;
1418 first_jit_cont->prev = cont;
1419 }
1420 first_jit_cont = cont;
1421 rb_native_mutex_unlock(&jit_cont_lock);
1422
1423 return cont;
1424}
1425
1426// Unregister continuation `cont`.
1427static void
1428jit_cont_free(struct rb_jit_cont *cont)
1429{
1430 if (!cont) return;
1431
1432 rb_native_mutex_lock(&jit_cont_lock);
1433 if (cont == first_jit_cont) {
1434 first_jit_cont = cont->next;
1435 if (first_jit_cont != NULL)
1436 first_jit_cont->prev = NULL;
1437 }
1438 else {
1439 cont->prev->next = cont->next;
1440 if (cont->next != NULL)
1441 cont->next->prev = cont->prev;
1442 }
1443 rb_native_mutex_unlock(&jit_cont_lock);
1444
1445 ruby_mimfree(cont);
1446}
1447
1448// Call a given callback against all on-stack ISEQs.
1449void
1450rb_jit_cont_each_iseq(rb_iseq_callback callback, void *data)
1451{
1452 struct rb_jit_cont *cont;
1453 for (cont = first_jit_cont; cont != NULL; cont = cont->next) {
1454 if (cont->ec->vm_stack == NULL)
1455 continue;
1456
1457 const rb_control_frame_t *cfp = cont->ec->cfp;
1458 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(cont->ec, cfp)) {
1459 if (CFP_PC(cfp) && CFP_ISEQ(cfp)) {
1460 const rb_iseq_t *iseq = CFP_ISEQ(cfp);
1461 if (iseq && imemo_type((VALUE)iseq) == imemo_iseq) {
1462 callback(iseq, data);
1463 }
1464 }
1465 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
1466 }
1467 }
1468}
1469
1470#if USE_YJIT
1471// Update the jit_return of all CFPs to leave_exit unless it's leave_exception or not set.
1472// This prevents jit_exec_exception from jumping to the caller after invalidation.
1473void
1474rb_yjit_cancel_jit_return(void *leave_exit, void *leave_exception)
1475{
1476 struct rb_jit_cont *cont;
1477 for (cont = first_jit_cont; cont != NULL; cont = cont->next) {
1478 if (cont->ec->vm_stack == NULL)
1479 continue;
1480
1481 const rb_control_frame_t *cfp = cont->ec->cfp;
1482 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(cont->ec, cfp)) {
1483 if (cfp->jit_return && cfp->jit_return != leave_exception) {
1484 ((rb_control_frame_t *)cfp)->jit_return = leave_exit;
1485 }
1486 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
1487 }
1488 }
1489}
1490#endif
1491
1492// Finish working with jit_cont.
1493void
1494rb_jit_cont_finish(void)
1495{
1496 struct rb_jit_cont *cont, *next;
1497 for (cont = first_jit_cont; cont != NULL; cont = next) {
1498 next = cont->next;
1499 ruby_mimfree(cont); // Don't use xfree because it's allocated by mimcalloc.
1500 }
1501 rb_native_mutex_destroy(&jit_cont_lock);
1502}
1503
1504static void
1505cont_init_jit_cont(rb_context_t *cont)
1506{
1507 VM_ASSERT(cont->jit_cont == NULL);
1508 // We always allocate this since YJIT may be enabled later
1509 cont->jit_cont = jit_cont_new(&(cont->saved_ec));
1510}
1511
1513rb_fiberptr_get_ec(struct rb_fiber_struct *fiber)
1514{
1515 return &fiber->cont.saved_ec;
1516}
1517
1518static void
1519cont_init(rb_context_t *cont, rb_thread_t *th)
1520{
1521 /* save thread context */
1522 cont_save_thread(cont, th);
1523 cont->saved_ec.thread_ptr = th;
1524 cont->saved_ec.local_storage = NULL;
1525 cont->saved_ec.local_storage_recursive_hash = Qnil;
1526 cont->saved_ec.local_storage_recursive_hash_for_trace = Qnil;
1527 cont_init_jit_cont(cont);
1528}
1529
1530static rb_context_t *
1531cont_new(VALUE klass)
1532{
1533 rb_context_t *cont;
1534 volatile VALUE contval;
1535 rb_thread_t *th = GET_THREAD();
1536
1537 THREAD_MUST_BE_RUNNING(th);
1538 contval = TypedData_Make_Struct(klass, rb_context_t, &rb_cont_data_type, cont);
1539 rb_gc_declare_weak_references(contval);
1540 cont->self = contval;
1541 cont_init(cont, th);
1542 return cont;
1543}
1544
1545VALUE
1546rb_fiberptr_self(struct rb_fiber_struct *fiber)
1547{
1548 return fiber->cont.self;
1549}
1550
1551unsigned int
1552rb_fiberptr_blocking(struct rb_fiber_struct *fiber)
1553{
1554 return fiber->blocking;
1555}
1556
1557// Initialize the jit_cont_lock
1558void
1559rb_jit_cont_init(void)
1560{
1561 rb_native_mutex_initialize(&jit_cont_lock);
1562}
1563
1564#if 0
1565void
1566show_vm_stack(const rb_execution_context_t *ec)
1567{
1568 VALUE *p = ec->vm_stack;
1569 while (p < ec->cfp->sp) {
1570 fprintf(stderr, "%3d ", (int)(p - ec->vm_stack));
1571 rb_obj_info_dump(*p);
1572 p++;
1573 }
1574}
1575
1576void
1577show_vm_pcs(const rb_control_frame_t *cfp,
1578 const rb_control_frame_t *end_of_cfp)
1579{
1580 int i=0;
1581 while (cfp != end_of_cfp) {
1582 int pc = 0;
1583 if (CFP_ISEQ(cfp)) {
1584 pc = cfp->pc - ISEQ_BODY(CFP_ISEQ(cfp))->iseq_encoded;
1585 }
1586 fprintf(stderr, "%2d pc: %d\n", i++, pc);
1587 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
1588 }
1589}
1590#endif
1591
1592static VALUE
1593cont_capture(volatile int *volatile stat)
1594{
1595 rb_context_t *volatile cont;
1596 rb_thread_t *th = GET_THREAD();
1597 volatile VALUE contval;
1598 const rb_execution_context_t *ec = th->ec;
1599
1600 THREAD_MUST_BE_RUNNING(th);
1601 rb_vm_stack_to_heap(th->ec);
1602 cont = cont_new(rb_cContinuation);
1603 contval = cont->self;
1604
1605#ifdef CAPTURE_JUST_VALID_VM_STACK
1606 cont->saved_vm_stack.slen = ec->cfp->sp - ec->vm_stack;
1607 cont->saved_vm_stack.clen = ec->vm_stack + ec->vm_stack_size - (VALUE*)ec->cfp;
1608 cont->saved_vm_stack.size = cont->saved_vm_stack.slen + cont->saved_vm_stack.clen;
1609 cont->saved_vm_stack.ptr = ALLOC_N(VALUE, cont->saved_vm_stack.slen + cont->saved_vm_stack.clen);
1610 MEMCPY(cont->saved_vm_stack.ptr,
1611 ec->vm_stack,
1612 VALUE, cont->saved_vm_stack.slen);
1613 MEMCPY(cont->saved_vm_stack.ptr + cont->saved_vm_stack.slen,
1614 (VALUE*)ec->cfp,
1615 VALUE,
1616 cont->saved_vm_stack.clen);
1617#else
1618 cont->saved_vm_stack.size = ec->vm_stack_size;
1619 cont->saved_vm_stack.ptr = ALLOC_N(VALUE, ec->vm_stack_size);
1620 MEMCPY(cont->saved_vm_stack.ptr, ec->vm_stack, VALUE, ec->vm_stack_size);
1621#endif
1622 // At this point, `cfp` is valid but `vm_stack` should be cleared:
1623 rb_ec_set_vm_stack(&cont->saved_ec, NULL, 0);
1624 VM_ASSERT(cont->saved_ec.cfp != NULL);
1625 cont_save_machine_stack(th, cont);
1626
1627 if (ruby_setjmp(cont->jmpbuf)) {
1628 VALUE value;
1629
1630 VAR_INITIALIZED(cont);
1631 value = cont->value;
1632 if (cont->argc == -1) rb_exc_raise(value);
1633 cont->value = Qnil;
1634 *stat = 1;
1635 return value;
1636 }
1637 else {
1638 *stat = 0;
1639 return contval;
1640 }
1641}
1642
1643static inline void
1644cont_restore_thread(rb_context_t *cont)
1645{
1646 rb_thread_t *th = GET_THREAD();
1647
1648 /* restore thread context */
1649 if (cont->type == CONTINUATION_CONTEXT) {
1650 /* continuation */
1651 rb_execution_context_t *sec = &cont->saved_ec;
1652 rb_fiber_t *fiber = NULL;
1653
1654 if (sec->fiber_ptr != NULL) {
1655 fiber = sec->fiber_ptr;
1656 }
1657 else if (th->root_fiber) {
1658 fiber = th->root_fiber;
1659 }
1660
1661 if (fiber && th->ec != &fiber->cont.saved_ec) {
1662 ec_switch(th, fiber);
1663 }
1664
1665 if (th->ec->trace_arg != sec->trace_arg) {
1666 rb_raise(rb_eRuntimeError, "can't call across trace_func");
1667 }
1668
1669#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
1670 if (th->ec->tag != sec->tag) {
1671 /* find the lowest common ancestor tag of the current EC and the saved EC */
1672
1673 struct rb_vm_tag *lowest_common_ancestor = NULL;
1674 size_t num_tags = 0;
1675 size_t num_saved_tags = 0;
1676 for (struct rb_vm_tag *tag = th->ec->tag; tag != NULL; tag = tag->prev) {
1677 ++num_tags;
1678 }
1679 for (struct rb_vm_tag *tag = sec->tag; tag != NULL; tag = tag->prev) {
1680 ++num_saved_tags;
1681 }
1682
1683 size_t min_tags = num_tags <= num_saved_tags ? num_tags : num_saved_tags;
1684
1685 struct rb_vm_tag *tag = th->ec->tag;
1686 while (num_tags > min_tags) {
1687 tag = tag->prev;
1688 --num_tags;
1689 }
1690
1691 struct rb_vm_tag *saved_tag = sec->tag;
1692 while (num_saved_tags > min_tags) {
1693 saved_tag = saved_tag->prev;
1694 --num_saved_tags;
1695 }
1696
1697 while (min_tags > 0) {
1698 if (tag == saved_tag) {
1699 lowest_common_ancestor = tag;
1700 break;
1701 }
1702 tag = tag->prev;
1703 saved_tag = saved_tag->prev;
1704 --min_tags;
1705 }
1706
1707 /* free all the jump buffers between the current EC's tag and the lowest common ancestor tag */
1708 for (struct rb_vm_tag *tag = th->ec->tag; tag != lowest_common_ancestor; tag = tag->prev) {
1709 rb_vm_tag_jmpbuf_deinit(&tag->buf);
1710 }
1711 }
1712#endif
1713
1714 /* copy vm stack */
1715#ifdef CAPTURE_JUST_VALID_VM_STACK
1716 MEMCPY(th->ec->vm_stack,
1717 cont->saved_vm_stack.ptr,
1718 VALUE, cont->saved_vm_stack.slen);
1719 MEMCPY(th->ec->vm_stack + th->ec->vm_stack_size - cont->saved_vm_stack.clen,
1720 cont->saved_vm_stack.ptr + cont->saved_vm_stack.slen,
1721 VALUE, cont->saved_vm_stack.clen);
1722#else
1723 MEMCPY(th->ec->vm_stack, cont->saved_vm_stack.ptr, VALUE, sec->vm_stack_size);
1724#endif
1725 /* other members of ec */
1726
1727 th->ec->cfp = sec->cfp;
1728 th->ec->raised_flag = sec->raised_flag;
1729 th->ec->tag = sec->tag;
1730 th->ec->root_lep = sec->root_lep;
1731 th->ec->root_svar = sec->root_svar;
1732 th->ec->errinfo = sec->errinfo;
1733
1734 VM_ASSERT(th->ec->vm_stack != NULL);
1735 }
1736 else {
1737 /* fiber */
1738 fiber_restore_thread(th, (rb_fiber_t*)cont);
1739 }
1740}
1741
1742NOINLINE(static void fiber_setcontext(rb_fiber_t *new_fiber, rb_fiber_t *old_fiber));
1743
1744static void
1745fiber_setcontext(rb_fiber_t *new_fiber, rb_fiber_t *old_fiber)
1746{
1747 rb_thread_t *th = GET_THREAD();
1748
1749 /* save old_fiber's machine stack - to ensure efficient garbage collection */
1750 if (!FIBER_TERMINATED_P(old_fiber)) {
1751 STACK_GROW_DIR_DETECTION;
1752 SET_MACHINE_STACK_END(&th->ec->machine.stack_end);
1753 if (STACK_DIR_UPPER(0, 1)) {
1754 old_fiber->cont.machine.stack_size = th->ec->machine.stack_start - th->ec->machine.stack_end;
1755 old_fiber->cont.machine.stack = th->ec->machine.stack_end;
1756 }
1757 else {
1758 old_fiber->cont.machine.stack_size = th->ec->machine.stack_end - th->ec->machine.stack_start;
1759 old_fiber->cont.machine.stack = th->ec->machine.stack_start;
1760 }
1761 }
1762
1763 /* these values are used in rb_gc_mark_machine_context to mark the fiber's stack. */
1764 old_fiber->cont.saved_ec.machine.stack_start = th->ec->machine.stack_start;
1765 old_fiber->cont.saved_ec.machine.stack_end = FIBER_TERMINATED_P(old_fiber) ? NULL : th->ec->machine.stack_end;
1766
1767
1768 // if (DEBUG) fprintf(stderr, "fiber_setcontext: %p[%p] -> %p[%p]\n", (void*)old_fiber, old_fiber->stack.base, (void*)new_fiber, new_fiber->stack.base);
1769
1770#if defined(COROUTINE_SANITIZE_ADDRESS)
1771 __sanitizer_start_switch_fiber(FIBER_TERMINATED_P(old_fiber) ? NULL : &old_fiber->context.fake_stack, new_fiber->context.stack_base, new_fiber->context.stack_size);
1772#endif
1773
1774 /* swap machine context */
1775 struct coroutine_context * from = coroutine_transfer(&old_fiber->context, &new_fiber->context);
1776
1777#if defined(COROUTINE_SANITIZE_ADDRESS)
1778 __sanitizer_finish_switch_fiber(old_fiber->context.fake_stack, NULL, NULL);
1779#endif
1780
1781 if (from == NULL) {
1782 rb_syserr_fail(errno, "coroutine_transfer");
1783 }
1784
1785 /* restore thread context */
1786 fiber_restore_thread(th, old_fiber);
1787
1788 // It's possible to get here, and new_fiber is already freed.
1789 // if (DEBUG) fprintf(stderr, "fiber_setcontext: %p[%p] <- %p[%p]\n", (void*)old_fiber, old_fiber->stack.base, (void*)new_fiber, new_fiber->stack.base);
1790}
1791
1792NOINLINE(NORETURN(static void cont_restore_1(rb_context_t *)));
1793
1794static void
1795cont_restore_1(rb_context_t *cont)
1796{
1797 cont_restore_thread(cont);
1798
1799 /* restore machine stack */
1800#if (defined(_M_AMD64) && !defined(__MINGW64__)) || defined(_M_ARM64)
1801 {
1802 /* workaround for x64 and arm64 SEH on Windows */
1803 jmp_buf buf;
1804 setjmp(buf);
1805 _JUMP_BUFFER *bp = (void*)&cont->jmpbuf;
1806 bp->Frame = ((_JUMP_BUFFER*)((void*)&buf))->Frame;
1807 }
1808#endif
1809 if (cont->machine.stack_src) {
1810 FLUSH_REGISTER_WINDOWS;
1811 MEMCPY(cont->machine.stack_src, cont->machine.stack,
1812 VALUE, cont->machine.stack_size);
1813 }
1814
1815 ruby_longjmp(cont->jmpbuf, 1);
1816}
1817
1818NORETURN(NOINLINE(static void cont_restore_0(rb_context_t *, VALUE *)));
1819
1820static void
1821cont_restore_0(rb_context_t *cont, VALUE *addr_in_prev_frame)
1822{
1823 if (cont->machine.stack_src) {
1824#ifdef HAVE_ALLOCA
1825#define STACK_PAD_SIZE 1
1826#else
1827#define STACK_PAD_SIZE 1024
1828#endif
1829 VALUE space[STACK_PAD_SIZE];
1830
1831#if !STACK_GROW_DIRECTION
1832 if (addr_in_prev_frame > &space[0]) {
1833 /* Stack grows downward */
1834#endif
1835#if STACK_GROW_DIRECTION <= 0
1836 volatile VALUE *const end = cont->machine.stack_src;
1837 if (&space[0] > end) {
1838# ifdef HAVE_ALLOCA
1839 volatile VALUE *sp = ALLOCA_N(VALUE, &space[0] - end);
1840 // We need to make sure that the stack pointer is moved,
1841 // but some compilers may remove the allocation by optimization.
1842 // We hope that the following read/write will prevent such an optimization.
1843 *sp = Qfalse;
1844 space[0] = *sp;
1845# else
1846 cont_restore_0(cont, &space[0]);
1847# endif
1848 }
1849#endif
1850#if !STACK_GROW_DIRECTION
1851 }
1852 else {
1853 /* Stack grows upward */
1854#endif
1855#if STACK_GROW_DIRECTION >= 0
1856 volatile VALUE *const end = cont->machine.stack_src + cont->machine.stack_size;
1857 if (&space[STACK_PAD_SIZE] < end) {
1858# ifdef HAVE_ALLOCA
1859 volatile VALUE *sp = ALLOCA_N(VALUE, end - &space[STACK_PAD_SIZE]);
1860 space[0] = *sp;
1861# else
1862 cont_restore_0(cont, &space[STACK_PAD_SIZE-1]);
1863# endif
1864 }
1865#endif
1866#if !STACK_GROW_DIRECTION
1867 }
1868#endif
1869 }
1870 cont_restore_1(cont);
1871}
1872
1873/*
1874 * Document-class: Continuation
1875 *
1876 * Continuation objects are generated by Kernel#callcc,
1877 * after having +require+d <i>continuation</i>. They hold
1878 * a return address and execution context, allowing a nonlocal return
1879 * to the end of the #callcc block from anywhere within a
1880 * program. Continuations are somewhat analogous to a structured
1881 * version of C's <code>setjmp/longjmp</code> (although they contain
1882 * more state, so you might consider them closer to threads).
1883 *
1884 * For instance:
1885 *
1886 * require "continuation"
1887 * arr = [ "Freddie", "Herbie", "Ron", "Max", "Ringo" ]
1888 * callcc{|cc| $cc = cc}
1889 * puts(message = arr.shift)
1890 * $cc.call unless message =~ /Max/
1891 *
1892 * <em>produces:</em>
1893 *
1894 * Freddie
1895 * Herbie
1896 * Ron
1897 * Max
1898 *
1899 * Also you can call callcc in other methods:
1900 *
1901 * require "continuation"
1902 *
1903 * def g
1904 * arr = [ "Freddie", "Herbie", "Ron", "Max", "Ringo" ]
1905 * cc = callcc { |cc| cc }
1906 * puts arr.shift
1907 * return cc, arr.size
1908 * end
1909 *
1910 * def f
1911 * c, size = g
1912 * c.call(c) if size > 1
1913 * end
1914 *
1915 * f
1916 *
1917 * This (somewhat contrived) example allows the inner loop to abandon
1918 * processing early:
1919 *
1920 * require "continuation"
1921 * callcc {|cont|
1922 * for i in 0..4
1923 * print "#{i}: "
1924 * for j in i*5...(i+1)*5
1925 * cont.call() if j == 17
1926 * printf "%3d", j
1927 * end
1928 * end
1929 * }
1930 * puts
1931 *
1932 * <em>produces:</em>
1933 *
1934 * 0: 0 1 2 3 4
1935 * 1: 5 6 7 8 9
1936 * 2: 10 11 12 13 14
1937 * 3: 15 16
1938 */
1939
1940/*
1941 * call-seq:
1942 * callcc {|cont| block } -> obj
1943 *
1944 * Generates a Continuation object, which it passes to
1945 * the associated block. You need to <code>require
1946 * 'continuation'</code> before using this method. Performing a
1947 * <em>cont</em><code>.call</code> will cause the #callcc
1948 * to return (as will falling through the end of the block). The
1949 * value returned by the #callcc is the value of the
1950 * block, or the value passed to <em>cont</em><code>.call</code>. See
1951 * class Continuation for more details. Also see
1952 * Kernel#throw for an alternative mechanism for
1953 * unwinding a call stack.
1954 */
1955
1956static VALUE
1957rb_callcc(VALUE self)
1958{
1959 volatile int called;
1960 volatile VALUE val = cont_capture(&called);
1961
1962 if (called) {
1963 return val;
1964 }
1965 else {
1966 return rb_yield(val);
1967 }
1968}
1969#ifdef RUBY_ASAN_ENABLED
1970/* callcc can't possibly work with ASAN; see bug #20273. Also this function
1971 * definition below avoids a "defined and not used" warning. */
1972MAYBE_UNUSED(static void notusing_callcc(void)) { rb_callcc(Qnil); }
1973# define rb_callcc rb_f_notimplement
1974#endif
1975
1976
1977static VALUE
1978make_passing_arg(int argc, const VALUE *argv)
1979{
1980 switch (argc) {
1981 case -1:
1982 return argv[0];
1983 case 0:
1984 return Qnil;
1985 case 1:
1986 return argv[0];
1987 default:
1988 return rb_ary_new4(argc, argv);
1989 }
1990}
1991
1992typedef VALUE e_proc(VALUE);
1993
1994NORETURN(static VALUE rb_cont_call(int argc, VALUE *argv, VALUE contval));
1995
1996/*
1997 * call-seq:
1998 * cont.call(args, ...)
1999 * cont[args, ...]
2000 *
2001 * Invokes the continuation. The program continues from the end of
2002 * the #callcc block. If no arguments are given, the original #callcc
2003 * returns +nil+. If one argument is given, #callcc returns
2004 * it. Otherwise, an array containing <i>args</i> is returned.
2005 *
2006 * callcc {|cont| cont.call } #=> nil
2007 * callcc {|cont| cont.call 1 } #=> 1
2008 * callcc {|cont| cont.call 1, 2, 3 } #=> [1, 2, 3]
2009 */
2010
2011static VALUE
2012rb_cont_call(int argc, VALUE *argv, VALUE contval)
2013{
2014 rb_context_t *cont = cont_ptr(contval);
2015 rb_thread_t *th = GET_THREAD();
2016
2017 if (cont_thread_value(cont) != th->self) {
2018 rb_raise(rb_eRuntimeError, "continuation called across threads");
2019 }
2020 if (cont->saved_ec.fiber_ptr) {
2021 if (th->ec->fiber_ptr != cont->saved_ec.fiber_ptr) {
2022 rb_raise(rb_eRuntimeError, "continuation called across fiber");
2023 }
2024 }
2025
2026 cont->argc = argc;
2027 cont->value = make_passing_arg(argc, argv);
2028
2029 cont_restore_0(cont, &contval);
2031}
2032
2033/*********/
2034/* fiber */
2035/*********/
2036
2037/*
2038 * Document-class: Fiber
2039 *
2040 * Fibers are primitives for implementing light weight cooperative
2041 * concurrency in Ruby. Basically they are a means of creating code blocks
2042 * that can be paused and resumed, much like threads. The main difference
2043 * is that they are never preempted and that the scheduling must be done by
2044 * the programmer and not the VM.
2045 *
2046 * As opposed to other stackless light weight concurrency models, each fiber
2047 * comes with a stack. This enables the fiber to be paused from deeply
2048 * nested function calls within the fiber block. See the ruby(1)
2049 * manpage to configure the size of the fiber stack(s).
2050 *
2051 * When a fiber is created it will not run automatically. Rather it must
2052 * be explicitly asked to run using the Fiber#resume method.
2053 * The code running inside the fiber can give up control by calling
2054 * Fiber.yield in which case it yields control back to caller (the
2055 * caller of the Fiber#resume).
2056 *
2057 * Upon yielding or termination the Fiber returns the value of the last
2058 * executed expression
2059 *
2060 * For instance:
2061 *
2062 * fiber = Fiber.new do
2063 * Fiber.yield 1
2064 * 2
2065 * end
2066 *
2067 * puts fiber.resume
2068 * puts fiber.resume
2069 * puts fiber.resume
2070 *
2071 * <em>produces</em>
2072 *
2073 * 1
2074 * 2
2075 * FiberError: dead fiber called
2076 *
2077 * The Fiber#resume method accepts an arbitrary number of parameters,
2078 * if it is the first call to #resume then they will be passed as
2079 * block arguments. Otherwise they will be the return value of the
2080 * call to Fiber.yield
2081 *
2082 * Example:
2083 *
2084 * fiber = Fiber.new do |first|
2085 * second = Fiber.yield first + 2
2086 * end
2087 *
2088 * puts fiber.resume 10
2089 * puts fiber.resume 1_000_000
2090 * puts fiber.resume "The fiber will be dead before I can cause trouble"
2091 *
2092 * <em>produces</em>
2093 *
2094 * 12
2095 * 1000000
2096 * FiberError: dead fiber called
2097 *
2098 * == Non-blocking Fibers
2099 *
2100 * The concept of <em>non-blocking fiber</em> was introduced in Ruby 3.0.
2101 * A non-blocking fiber, when reaching an operation that would normally block
2102 * the fiber (like <code>sleep</code>, or wait for another process or I/O)
2103 * will yield control to other fibers and allow the <em>scheduler</em> to
2104 * handle blocking and waking up (resuming) this fiber when it can proceed.
2105 *
2106 * For a Fiber to behave as non-blocking, it need to be created in Fiber.new with
2107 * <tt>blocking: false</tt> (which is the default), and Fiber.scheduler
2108 * should be set with Fiber.set_scheduler. If Fiber.scheduler is not set in
2109 * the current thread, blocking and non-blocking fibers' behavior is identical.
2110 *
2111 * Ruby doesn't provide a scheduler class: it is expected to be implemented by
2112 * the user and correspond to Fiber::Scheduler.
2113 *
2114 * There is also Fiber.schedule method, which is expected to immediately perform
2115 * the given block in a non-blocking manner. Its actual implementation is up to
2116 * the scheduler.
2117 *
2118 */
2119
2120static void
2121fiber_handle_weak_references(void *ptr)
2122{
2123 rb_fiber_t *fiber = ptr;
2124
2125 if (!fiber) return;
2126
2127 if (!rb_gc_handle_weak_references_alive_p(fiber->cont.saved_ec.gen_fields_cache.obj) ||
2128 !rb_gc_handle_weak_references_alive_p(fiber->cont.saved_ec.gen_fields_cache.fields_obj)) {
2129 fiber->cont.saved_ec.gen_fields_cache.obj = Qundef;
2130 fiber->cont.saved_ec.gen_fields_cache.fields_obj = Qundef;
2131 }
2132}
2133
2134static const rb_data_type_t rb_fiber_data_type = {
2135 "fiber",
2136 {fiber_mark, fiber_free, fiber_memsize, fiber_compact, fiber_handle_weak_references},
2138};
2139
2140static VALUE
2141fiber_alloc(VALUE klass)
2142{
2143 VALUE obj = TypedData_Wrap_Struct(klass, &rb_fiber_data_type, 0);
2144 rb_gc_declare_weak_references(obj);
2145 return obj;
2146}
2147
2148static rb_serial_t
2149next_ec_serial(rb_ractor_t *cr)
2150{
2151 return cr->next_ec_serial++;
2152}
2153
2154static rb_fiber_t*
2155fiber_t_alloc(VALUE fiber_value, unsigned int blocking)
2156{
2157 rb_fiber_t *fiber;
2158 rb_thread_t *th = GET_THREAD();
2159
2160 if (DATA_PTR(fiber_value) != 0) {
2161 rb_raise(rb_eRuntimeError, "cannot initialize twice");
2162 }
2163
2164 THREAD_MUST_BE_RUNNING(th);
2165 fiber = ZALLOC(rb_fiber_t);
2166 fiber->cont.self = fiber_value;
2167 fiber->cont.type = FIBER_CONTEXT;
2168 fiber->blocking = blocking;
2169 fiber->killed = 0;
2170 cont_init(&fiber->cont, th);
2171
2172 fiber->cont.saved_ec.fiber_ptr = fiber;
2173 fiber->cont.saved_ec.serial = next_ec_serial(th->ractor);
2174 rb_ec_clear_vm_stack(&fiber->cont.saved_ec);
2175
2176 fiber->prev = NULL;
2177
2178 /* fiber->status == 0 == CREATED
2179 * So that we don't need to set status: fiber_status_set(fiber, FIBER_CREATED); */
2180 VM_ASSERT(FIBER_CREATED_P(fiber));
2181
2182 DATA_PTR(fiber_value) = fiber;
2183
2184 return fiber;
2185}
2186
2187static inline rb_fiber_t*
2188fiber_current(void)
2189{
2190 rb_execution_context_t *ec = GET_EC();
2191 return ec->fiber_ptr;
2192}
2193
2194static inline VALUE
2195current_fiber_storage(void)
2196{
2197 rb_execution_context_t *ec = GET_EC();
2198 return ec->storage;
2199}
2200
2201static inline VALUE
2202inherit_fiber_storage(void)
2203{
2204 return rb_obj_dup(current_fiber_storage());
2205}
2206
2207static inline void
2208fiber_storage_set(struct rb_fiber_struct *fiber, VALUE storage)
2209{
2210 fiber->cont.saved_ec.storage = storage;
2211}
2212
2213static inline VALUE
2214fiber_storage_get(rb_fiber_t *fiber, int allocate)
2215{
2216 VALUE storage = fiber->cont.saved_ec.storage;
2217 if (storage == Qnil && allocate) {
2218 storage = rb_hash_new();
2219 fiber_storage_set(fiber, storage);
2220 }
2221 return storage;
2222}
2223
2224static void
2225storage_access_must_be_from_same_fiber(VALUE self)
2226{
2227 rb_fiber_t *fiber = fiber_ptr(self);
2228 rb_fiber_t *current = fiber_current();
2229 if (fiber != current) {
2230 rb_raise(rb_eArgError, "Fiber storage can only be accessed from the Fiber it belongs to");
2231 }
2232}
2233
2240static VALUE
2241rb_fiber_storage_get(VALUE self)
2242{
2243 storage_access_must_be_from_same_fiber(self);
2244
2245 VALUE storage = fiber_storage_get(fiber_ptr(self), FALSE);
2246
2247 if (storage == Qnil) {
2248 return Qnil;
2249 }
2250 else {
2251 return rb_obj_dup(storage);
2252 }
2253}
2254
2255static int
2256fiber_storage_validate_each(VALUE key, VALUE value, VALUE _argument)
2257{
2258 Check_Type(key, T_SYMBOL);
2259
2260 return ST_CONTINUE;
2261}
2262
2263static void
2264fiber_storage_validate(VALUE value)
2265{
2266 // nil is an allowed value and will be lazily initialized.
2267 if (value == Qnil) return;
2268
2269 if (!RB_TYPE_P(value, T_HASH)) {
2270 rb_raise(rb_eTypeError, "storage must be a hash");
2271 }
2272
2273 if (RB_OBJ_FROZEN(value)) {
2274 rb_raise(rb_eFrozenError, "storage must not be frozen");
2275 }
2276
2277 rb_hash_foreach(value, fiber_storage_validate_each, Qundef);
2278}
2279
2302static VALUE
2303rb_fiber_storage_set(VALUE self, VALUE value)
2304{
2305 if (rb_warning_category_enabled_p(RB_WARN_CATEGORY_EXPERIMENTAL)) {
2307 "Fiber#storage= is experimental and may be removed in the future!");
2308 }
2309
2310 storage_access_must_be_from_same_fiber(self);
2311 fiber_storage_validate(value);
2312
2313 fiber_ptr(self)->cont.saved_ec.storage = rb_obj_dup(value);
2314 return value;
2315}
2316
2327static VALUE
2328rb_fiber_storage_aref(VALUE class, VALUE key)
2329{
2330 key = rb_to_symbol(key);
2331
2332 VALUE storage = fiber_storage_get(fiber_current(), FALSE);
2333 if (storage == Qnil) return Qnil;
2334
2335 return rb_hash_aref(storage, key);
2336}
2337
2348static VALUE
2349rb_fiber_storage_aset(VALUE class, VALUE key, VALUE value)
2350{
2351 key = rb_to_symbol(key);
2352
2353 VALUE storage = fiber_storage_get(fiber_current(), value != Qnil);
2354 if (storage == Qnil) return Qnil;
2355
2356 if (value == Qnil) {
2357 return rb_hash_delete(storage, key);
2358 }
2359 else {
2360 return rb_hash_aset(storage, key, value);
2361 }
2362}
2363
2364static VALUE
2365fiber_initialize(VALUE self, VALUE proc, struct fiber_pool * fiber_pool, unsigned int blocking, VALUE storage)
2366{
2367 if (storage == Qundef || storage == Qtrue) {
2368 // The default, inherit storage (dup) from the current fiber:
2369 storage = inherit_fiber_storage();
2370 }
2371 else /* nil, hash, etc. */ {
2372 fiber_storage_validate(storage);
2373 storage = rb_obj_dup(storage);
2374 }
2375
2376 rb_fiber_t *fiber = fiber_t_alloc(self, blocking);
2377
2378 fiber->cont.saved_ec.storage = storage;
2379 fiber->first_proc = proc;
2380 fiber->stack.base = NULL;
2381 fiber->stack.pool = fiber_pool;
2382
2383 return self;
2384}
2385
2386static void
2387fiber_prepare_stack(rb_fiber_t *fiber)
2388{
2389 rb_context_t *cont = &fiber->cont;
2390 rb_execution_context_t *sec = &cont->saved_ec;
2391
2392 size_t vm_stack_size = 0;
2393 VALUE *vm_stack = fiber_initialize_coroutine(fiber, &vm_stack_size);
2394
2395 /* initialize cont */
2396 cont->saved_vm_stack.ptr = NULL;
2397 rb_ec_initialize_vm_stack(sec, vm_stack, vm_stack_size / sizeof(VALUE));
2398
2399 sec->tag = NULL;
2400 sec->local_storage = NULL;
2401 sec->local_storage_recursive_hash = Qnil;
2402 sec->local_storage_recursive_hash_for_trace = Qnil;
2403}
2404
2405static struct fiber_pool *
2406rb_fiber_pool_default(VALUE pool)
2407{
2408 return &shared_fiber_pool;
2409}
2410
2411VALUE rb_fiber_inherit_storage(struct rb_execution_context_struct *ec, struct rb_fiber_struct *fiber)
2412{
2413 VALUE storage = rb_obj_dup(ec->storage);
2414 fiber->cont.saved_ec.storage = storage;
2415 return storage;
2416}
2417
2418/* :nodoc: */
2419static VALUE
2420rb_fiber_initialize_kw(int argc, VALUE* argv, VALUE self, int kw_splat)
2421{
2422 VALUE pool = Qnil;
2423 VALUE blocking = Qfalse;
2424 VALUE storage = Qundef;
2425
2426 if (kw_splat != RB_NO_KEYWORDS) {
2427 VALUE options = Qnil;
2428 VALUE arguments[3] = {Qundef};
2429
2430 argc = rb_scan_args_kw(kw_splat, argc, argv, ":", &options);
2431 rb_get_kwargs(options, fiber_initialize_keywords, 0, 3, arguments);
2432
2433 if (!UNDEF_P(arguments[0])) {
2434 blocking = arguments[0];
2435 }
2436
2437 if (!UNDEF_P(arguments[1])) {
2438 pool = arguments[1];
2439 }
2440
2441 storage = arguments[2];
2442 }
2443
2444 return fiber_initialize(self, rb_block_proc(), rb_fiber_pool_default(pool), RTEST(blocking), storage);
2445}
2446
2447/*
2448 * call-seq:
2449 * Fiber.new(blocking: false, storage: true) { |*args| ... } -> fiber
2450 *
2451 * Creates new Fiber. Initially, the fiber is not running and can be resumed
2452 * with #resume. Arguments to the first #resume call will be passed to the
2453 * block:
2454 *
2455 * f = Fiber.new do |initial|
2456 * current = initial
2457 * loop do
2458 * puts "current: #{current.inspect}"
2459 * current = Fiber.yield
2460 * end
2461 * end
2462 * f.resume(100) # prints: current: 100
2463 * f.resume(1, 2, 3) # prints: current: [1, 2, 3]
2464 * f.resume # prints: current: nil
2465 * # ... and so on ...
2466 *
2467 * If <tt>blocking: false</tt> is passed to <tt>Fiber.new</tt>, _and_ current
2468 * thread has a Fiber.scheduler defined, the Fiber becomes non-blocking (see
2469 * "Non-blocking Fibers" section in class docs).
2470 *
2471 * If the <tt>storage</tt> is unspecified, the default is to inherit a copy of
2472 * the storage from the current fiber. This is the same as specifying
2473 * <tt>storage: true</tt>.
2474 *
2475 * Fiber[:x] = 1
2476 * Fiber.new do
2477 * Fiber[:x] # => 1
2478 * Fiber[:x] = 2
2479 * end.resume
2480 * Fiber[:x] # => 1
2481 *
2482 * If the given <tt>storage</tt> is <tt>nil</tt>, this function will lazy
2483 * initialize the internal storage, which starts as an empty hash.
2484 *
2485 * Fiber[:x] = "Hello World"
2486 * Fiber.new(storage: nil) do
2487 * Fiber[:x] # nil
2488 * end
2489 *
2490 * Otherwise, the given <tt>storage</tt> is used as the new fiber's storage,
2491 * and it must be an instance of Hash.
2492 *
2493 * Explicitly using <tt>storage: true</tt> is currently experimental and may
2494 * change in the future.
2495 */
2496static VALUE
2497rb_fiber_initialize(int argc, VALUE* argv, VALUE self)
2498{
2499 return rb_fiber_initialize_kw(argc, argv, self, rb_keyword_given_p());
2500}
2501
2502VALUE
2503rb_fiber_new_storage(rb_block_call_func_t func, VALUE obj, VALUE storage)
2504{
2505 return fiber_initialize(fiber_alloc(rb_cFiber), rb_proc_new(func, obj), rb_fiber_pool_default(Qnil), 0, storage);
2506}
2507
2508VALUE
2509rb_fiber_new(rb_block_call_func_t func, VALUE obj)
2510{
2511 return rb_fiber_new_storage(func, obj, Qtrue);
2512}
2513
2514static VALUE
2515rb_fiber_s_schedule_kw(int argc, VALUE* argv, int kw_splat)
2516{
2517 rb_thread_t * th = GET_THREAD();
2518 VALUE scheduler = th->scheduler;
2519 VALUE fiber = Qnil;
2520
2521 if (scheduler != Qnil) {
2522 fiber = rb_fiber_scheduler_fiber(scheduler, argc, argv, kw_splat);
2523 }
2524 else {
2525 rb_raise(rb_eRuntimeError, "No scheduler is available!");
2526 }
2527
2528 return fiber;
2529}
2530
2531/*
2532 * call-seq:
2533 * Fiber.schedule { |*args| ... } -> fiber
2534 *
2535 * The method is <em>expected</em> to immediately run the provided block of code in a
2536 * separate non-blocking fiber.
2537 *
2538 * puts "Go to sleep!"
2539 *
2540 * Fiber.set_scheduler(MyScheduler.new)
2541 *
2542 * Fiber.schedule do
2543 * puts "Going to sleep"
2544 * sleep(1)
2545 * puts "I slept well"
2546 * end
2547 *
2548 * puts "Wakey-wakey, sleepyhead"
2549 *
2550 * Assuming MyScheduler is properly implemented, this program will produce:
2551 *
2552 * Go to sleep!
2553 * Going to sleep
2554 * Wakey-wakey, sleepyhead
2555 * ...1 sec pause here...
2556 * I slept well
2557 *
2558 * ...e.g. on the first blocking operation inside the Fiber (<tt>sleep(1)</tt>),
2559 * the control is yielded to the outside code (main fiber), and <em>at the end
2560 * of that execution</em>, the scheduler takes care of properly resuming all the
2561 * blocked fibers.
2562 *
2563 * Note that the behavior described above is how the method is <em>expected</em>
2564 * to behave, actual behavior is up to the current scheduler's implementation of
2565 * Fiber::Scheduler#fiber method. Ruby doesn't enforce this method to
2566 * behave in any particular way.
2567 *
2568 * If the scheduler is not set, the method raises
2569 * <tt>RuntimeError (No scheduler is available!)</tt>.
2570 *
2571 */
2572static VALUE
2573rb_fiber_s_schedule(int argc, VALUE *argv, VALUE obj)
2574{
2575 return rb_fiber_s_schedule_kw(argc, argv, rb_keyword_given_p());
2576}
2577
2578/*
2579 * call-seq:
2580 * Fiber.scheduler -> obj or nil
2581 *
2582 * Returns the Fiber scheduler, that was last set for the current thread with Fiber.set_scheduler.
2583 * Returns +nil+ if no scheduler is set (which is the default), and non-blocking fibers'
2584 * behavior is the same as blocking.
2585 * (see "Non-blocking fibers" section in class docs for details about the scheduler concept).
2586 *
2587 */
2588static VALUE
2589rb_fiber_s_scheduler(VALUE klass)
2590{
2591 return rb_fiber_scheduler_get();
2592}
2593
2594/*
2595 * call-seq:
2596 * Fiber.current_scheduler -> obj or nil
2597 *
2598 * Returns the Fiber scheduler, that was last set for the current thread with Fiber.set_scheduler
2599 * if and only if the current fiber is non-blocking.
2600 *
2601 */
2602static VALUE
2603rb_fiber_current_scheduler(VALUE klass)
2604{
2606}
2607
2608/*
2609 * call-seq:
2610 * Fiber.set_scheduler(scheduler) -> scheduler
2611 *
2612 * Sets the Fiber scheduler for the current thread. If the scheduler is set, non-blocking
2613 * fibers (created by Fiber.new with <tt>blocking: false</tt>, or by Fiber.schedule)
2614 * call that scheduler's hook methods on potentially blocking operations, and the current
2615 * thread will call scheduler's +close+ method on finalization (allowing the scheduler to
2616 * properly manage all non-finished fibers).
2617 *
2618 * +scheduler+ can be an object of any class corresponding to Fiber::Scheduler. Its
2619 * implementation is up to the user.
2620 *
2621 * See also the "Non-blocking fibers" section in class docs.
2622 *
2623 */
2624static VALUE
2625rb_fiber_set_scheduler(VALUE klass, VALUE scheduler)
2626{
2627 return rb_fiber_scheduler_set(scheduler);
2628}
2629
2630NORETURN(static void rb_fiber_terminate(rb_fiber_t *fiber, int need_interrupt, VALUE err));
2631
2632void
2633rb_fiber_start(rb_fiber_t *fiber)
2634{
2635 rb_thread_t * volatile th = fiber->cont.saved_ec.thread_ptr;
2636
2637 rb_proc_t *proc;
2638 enum ruby_tag_type state;
2639
2640 VM_ASSERT(th->ec == GET_EC());
2641 VM_ASSERT(FIBER_RESUMED_P(fiber));
2642
2643 if (fiber->blocking) {
2644 th->blocking += 1;
2645 }
2646
2647 EC_PUSH_TAG(th->ec);
2648 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
2649 rb_context_t *cont = &VAR_FROM_MEMORY(fiber)->cont;
2650 int argc;
2651 const VALUE *argv, args = cont->value;
2652 GetProcPtr(fiber->first_proc, proc);
2653 argv = (argc = cont->argc) > 1 ? RARRAY_CONST_PTR(args) : &args;
2654 cont->value = Qnil;
2655 th->ec->errinfo = Qnil;
2656 th->ec->root_lep = rb_vm_proc_local_ep(fiber->first_proc);
2657 th->ec->root_svar = Qfalse;
2658
2659 EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_FIBER_SWITCH, th->self, 0, 0, 0, Qnil);
2660 cont->value = rb_vm_invoke_proc(th->ec, proc, argc, argv, cont->kw_splat, VM_BLOCK_HANDLER_NONE);
2661 }
2662 EC_POP_TAG();
2663
2664 int need_interrupt = TRUE;
2665 VALUE err = Qfalse;
2666 if (state) {
2667 err = th->ec->errinfo;
2668 VM_ASSERT(FIBER_RESUMED_P(fiber));
2669
2670 if (state == TAG_RAISE) {
2671 // noop...
2672 }
2673 else if (state == TAG_FATAL && err == RUBY_FATAL_FIBER_KILLED) {
2674 need_interrupt = FALSE;
2675 err = Qfalse;
2676 }
2677 else if (state == TAG_FATAL) {
2678 rb_threadptr_pending_interrupt_enque(th, err);
2679 }
2680 else {
2681 err = rb_vm_make_jump_tag_but_local_jump(state, err);
2682 }
2683 }
2684
2685 rb_fiber_terminate(fiber, need_interrupt, err);
2686}
2687
2688// Set up a "root fiber", which is the fiber that every Ractor has.
2689void
2690rb_threadptr_root_fiber_setup(rb_thread_t *th)
2691{
2692 rb_fiber_t *fiber = ZALLOC(rb_fiber_t);
2693 if (!fiber) {
2694 rb_bug("%s", strerror(errno)); /* ... is it possible to call rb_bug here? */
2695 }
2696
2697 fiber->cont.type = FIBER_CONTEXT;
2698 fiber->cont.saved_ec.fiber_ptr = fiber;
2699 fiber->cont.saved_ec.serial = next_ec_serial(th->ractor);
2700 fiber->cont.saved_ec.thread_ptr = th;
2701 fiber->blocking = 1;
2702 fiber->killed = 0;
2703 fiber_status_set(fiber, FIBER_RESUMED); /* skip CREATED */
2704
2705 coroutine_initialize_main(&fiber->context);
2706
2707 th->ec = &fiber->cont.saved_ec;
2708
2709 cont_init_jit_cont(&fiber->cont);
2710}
2711
2712void
2713rb_root_fiber_obj_setup(rb_thread_t *th)
2714{
2715 rb_fiber_t *fiber = th->ec->fiber_ptr;
2716 VALUE fiber_value = fiber_alloc(rb_cFiber);
2717 DATA_PTR(fiber_value) = fiber;
2718 fiber->cont.self = fiber_value;
2719}
2720
2721void
2722rb_threadptr_root_fiber_release(rb_thread_t *th)
2723{
2724 if (th->root_fiber) {
2725 /* ignore. A root fiber object will free th->ec */
2726 }
2727 else {
2728 rb_execution_context_t *ec = rb_current_execution_context(false);
2729
2730 VM_ASSERT(th->ec->fiber_ptr->cont.type == FIBER_CONTEXT);
2731 VM_ASSERT(th->ec->fiber_ptr->cont.self == 0);
2732
2733 if (ec && th->ec == ec) {
2734 rb_ractor_set_current_ec(th->ractor, NULL);
2735 }
2736 fiber_free(th->ec->fiber_ptr);
2737 th->ec = NULL;
2738 }
2739}
2740
2741void
2742rb_threadptr_root_fiber_terminate(rb_thread_t *th)
2743{
2744 rb_fiber_t *fiber = th->ec->fiber_ptr;
2745
2746 fiber->status = FIBER_TERMINATED;
2747
2748 // The vm_stack is `alloca`ed on the thread stack, so it's gone too:
2749 rb_ec_clear_vm_stack(th->ec);
2750}
2751
2752static inline rb_fiber_t*
2753return_fiber(bool terminate)
2754{
2755 rb_fiber_t *fiber = fiber_current();
2756 rb_fiber_t *prev = fiber->prev;
2757
2758 if (prev) {
2759 fiber->prev = NULL;
2760 prev->resuming_fiber = NULL;
2761 return prev;
2762 }
2763 else {
2764 if (!terminate) {
2765 rb_raise(rb_eFiberError, "attempt to yield on a not resumed fiber");
2766 }
2767
2768 rb_thread_t *th = GET_THREAD();
2769 rb_fiber_t *root_fiber = th->root_fiber;
2770
2771 VM_ASSERT(root_fiber != NULL);
2772
2773 // search resuming fiber
2774 for (fiber = root_fiber; fiber->resuming_fiber; fiber = fiber->resuming_fiber) {
2775 }
2776
2777 return fiber;
2778 }
2779}
2780
2781VALUE
2782rb_fiber_current(void)
2783{
2784 return fiber_current()->cont.self;
2785}
2786
2787// Prepare to execute next_fiber on the given thread.
2788static inline void
2789fiber_store(rb_fiber_t *next_fiber, rb_thread_t *th)
2790{
2791 rb_fiber_t *fiber = th->ec->fiber_ptr;
2792
2793 if (FIBER_CREATED_P(next_fiber)) {
2794 fiber_prepare_stack(next_fiber);
2795 }
2796
2797 VM_ASSERT(FIBER_RESUMED_P(fiber) || FIBER_TERMINATED_P(fiber));
2798 VM_ASSERT(FIBER_RUNNABLE_P(next_fiber));
2799
2800 if (FIBER_RESUMED_P(fiber)) fiber_status_set(fiber, FIBER_SUSPENDED);
2801
2802 fiber_status_set(next_fiber, FIBER_RESUMED);
2803 fiber_setcontext(next_fiber, fiber);
2804}
2805
2806static void
2807fiber_check_killed(rb_fiber_t *fiber)
2808{
2809 VM_ASSERT(fiber == fiber_current());
2810
2811 if (fiber->killed) {
2812 rb_thread_t *thread = fiber->cont.saved_ec.thread_ptr;
2813
2814 thread->ec->errinfo = RUBY_FATAL_FIBER_KILLED;
2815 EC_JUMP_TAG(thread->ec, RUBY_TAG_FATAL);
2816 }
2817}
2818
2819static inline VALUE
2820fiber_switch(rb_fiber_t *fiber, int argc, const VALUE *argv, int kw_splat, rb_fiber_t *resuming_fiber, bool yielding)
2821{
2822 VALUE value;
2823 rb_context_t *cont = &fiber->cont;
2824 rb_thread_t *th = GET_THREAD();
2825
2826 /* make sure the root_fiber object is available */
2827 if (th->root_fiber == NULL) {
2828 th->root_fiber = th->ec->fiber_ptr;
2829 }
2830
2831 if (th->ec->fiber_ptr == fiber) {
2832 /* ignore fiber context switch
2833 * because destination fiber is the same as current fiber
2834 */
2835 return make_passing_arg(argc, argv);
2836 }
2837
2838 if (cont_thread_value(cont) != th->self) {
2839 rb_raise(rb_eFiberError, "fiber called across threads");
2840 }
2841
2842 if (FIBER_TERMINATED_P(fiber)) {
2843 value = rb_exc_new2(rb_eFiberError, "dead fiber called");
2844
2845 if (!FIBER_TERMINATED_P(th->ec->fiber_ptr)) {
2846 rb_exc_raise(value);
2847 VM_UNREACHABLE(fiber_switch);
2848 }
2849 else {
2850 /* th->ec->fiber_ptr is also dead => switch to root fiber */
2851 /* (this means we're being called from rb_fiber_terminate, */
2852 /* and the terminated fiber's return_fiber() is already dead) */
2853 VM_ASSERT(FIBER_SUSPENDED_P(th->root_fiber));
2854
2855 cont = &th->root_fiber->cont;
2856 cont->argc = -1;
2857 cont->value = value;
2858
2859 fiber_setcontext(th->root_fiber, th->ec->fiber_ptr);
2860
2861 VM_UNREACHABLE(fiber_switch);
2862 }
2863 }
2864
2865 VM_ASSERT(FIBER_RUNNABLE_P(fiber));
2866
2867 rb_fiber_t *current_fiber = fiber_current();
2868
2869 VM_ASSERT(!current_fiber->resuming_fiber);
2870
2871 if (resuming_fiber) {
2872 current_fiber->resuming_fiber = resuming_fiber;
2873 fiber->prev = fiber_current();
2874 fiber->yielding = 0;
2875 }
2876
2877 VM_ASSERT(!current_fiber->yielding);
2878 if (yielding) {
2879 current_fiber->yielding = 1;
2880 }
2881
2882 if (current_fiber->blocking) {
2883 th->blocking -= 1;
2884 }
2885
2886 cont->argc = argc;
2887 cont->kw_splat = kw_splat;
2888 cont->value = make_passing_arg(argc, argv);
2889
2890 fiber_store(fiber, th);
2891
2892 // We cannot free the stack until the pthread is joined:
2893#ifndef COROUTINE_PTHREAD_CONTEXT
2894 if (FIBER_TERMINATED_P(fiber)) {
2895 RB_VM_LOCKING() {
2896 fiber_stack_release(fiber);
2897 }
2898 }
2899#endif
2900
2901 if (fiber_current()->blocking) {
2902 th->blocking += 1;
2903 }
2904
2905 RUBY_VM_CHECK_INTS(th->ec);
2906
2907 EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_FIBER_SWITCH, th->self, 0, 0, 0, Qnil);
2908
2909 current_fiber = th->ec->fiber_ptr;
2910 value = current_fiber->cont.value;
2911
2912 fiber_check_killed(current_fiber);
2913
2914 if (current_fiber->cont.argc == -1) {
2915 // Fiber#raise will trigger this path.
2916 rb_exc_raise(value);
2917 }
2918
2919 return value;
2920}
2921
2922VALUE
2923rb_fiber_transfer(VALUE fiber_value, int argc, const VALUE *argv)
2924{
2925 return fiber_switch(fiber_ptr(fiber_value), argc, argv, RB_NO_KEYWORDS, NULL, false);
2926}
2927
2928/*
2929 * call-seq:
2930 * fiber.blocking? -> true or false
2931 *
2932 * Returns +true+ if +fiber+ is blocking and +false+ otherwise.
2933 * Fiber is non-blocking if it was created via passing <tt>blocking: false</tt>
2934 * to Fiber.new, or via Fiber.schedule.
2935 *
2936 * Note that, even if the method returns +false+, the fiber behaves differently
2937 * only if Fiber.scheduler is set in the current thread.
2938 *
2939 * See the "Non-blocking fibers" section in class docs for details.
2940 *
2941 */
2942VALUE
2943rb_fiber_blocking_p(VALUE fiber)
2944{
2945 return RBOOL(fiber_ptr(fiber)->blocking);
2946}
2947
2948static VALUE
2949fiber_blocking_yield(VALUE fiber_value)
2950{
2951 rb_fiber_t *fiber = fiber_ptr(fiber_value);
2952 rb_thread_t * volatile th = fiber->cont.saved_ec.thread_ptr;
2953
2954 VM_ASSERT(fiber->blocking == 0);
2955
2956 // fiber->blocking is `unsigned int : 1`, so we use it as a boolean:
2957 fiber->blocking = 1;
2958
2959 // Once the fiber is blocking, and current, we increment the thread blocking state:
2960 th->blocking += 1;
2961
2962 return rb_yield(fiber_value);
2963}
2964
2965static VALUE
2966fiber_blocking_ensure(VALUE fiber_value)
2967{
2968 rb_fiber_t *fiber = fiber_ptr(fiber_value);
2969 rb_thread_t * volatile th = fiber->cont.saved_ec.thread_ptr;
2970
2971 // We are no longer blocking:
2972 fiber->blocking = 0;
2973 th->blocking -= 1;
2974
2975 return Qnil;
2976}
2977
2978/*
2979 * call-seq:
2980 * Fiber.blocking{|fiber| ...} -> result
2981 *
2982 * Forces the fiber to be blocking for the duration of the block. Returns the
2983 * result of the block.
2984 *
2985 * See the "Non-blocking fibers" section in class docs for details.
2986 *
2987 */
2988VALUE
2989rb_fiber_blocking(VALUE class)
2990{
2991 VALUE fiber_value = rb_fiber_current();
2992 rb_fiber_t *fiber = fiber_ptr(fiber_value);
2993
2994 // If we are already blocking, this is essentially a no-op:
2995 if (fiber->blocking) {
2996 return rb_yield(fiber_value);
2997 }
2998 else {
2999 return rb_ensure(fiber_blocking_yield, fiber_value, fiber_blocking_ensure, fiber_value);
3000 }
3001}
3002
3003/*
3004 * call-seq:
3005 * Fiber.blocking? -> false or 1
3006 *
3007 * Returns +false+ if the current fiber is non-blocking.
3008 * Fiber is non-blocking if it was created via passing <tt>blocking: false</tt>
3009 * to Fiber.new, or via Fiber.schedule.
3010 *
3011 * If the current Fiber is blocking, the method returns 1.
3012 * Future developments may allow for situations where larger integers
3013 * could be returned.
3014 *
3015 * Note that, even if the method returns +false+, Fiber behaves differently
3016 * only if Fiber.scheduler is set in the current thread.
3017 *
3018 * See the "Non-blocking fibers" section in class docs for details.
3019 *
3020 */
3021static VALUE
3022rb_fiber_s_blocking_p(VALUE klass)
3023{
3024 rb_thread_t *thread = GET_THREAD();
3025 unsigned blocking = thread->blocking;
3026
3027 if (blocking == 0)
3028 return Qfalse;
3029
3030 return INT2NUM(blocking);
3031}
3032
3033void
3034rb_fiber_close(rb_fiber_t *fiber)
3035{
3036 fiber_status_set(fiber, FIBER_TERMINATED);
3037 rb_ec_close(&fiber->cont.saved_ec);
3038}
3039
3040static void
3041rb_fiber_terminate(rb_fiber_t *fiber, int need_interrupt, VALUE error)
3042{
3043 VALUE value = fiber->cont.value;
3044
3045 VM_ASSERT(FIBER_RESUMED_P(fiber));
3046 rb_fiber_close(fiber);
3047
3048 fiber->cont.machine.stack = NULL;
3049 fiber->cont.machine.stack_size = 0;
3050
3051 rb_fiber_t *next_fiber = return_fiber(true);
3052
3053 if (need_interrupt) RUBY_VM_SET_INTERRUPT(&next_fiber->cont.saved_ec);
3054
3055 if (RTEST(error))
3056 fiber_switch(next_fiber, -1, &error, RB_NO_KEYWORDS, NULL, false);
3057 else
3058 fiber_switch(next_fiber, 1, &value, RB_NO_KEYWORDS, NULL, false);
3059 ruby_stop(0);
3060}
3061
3062static VALUE
3063fiber_resume_kw(rb_fiber_t *fiber, int argc, const VALUE *argv, int kw_splat)
3064{
3065 rb_fiber_t *current_fiber = fiber_current();
3066
3067 if (argc == -1 && FIBER_CREATED_P(fiber)) {
3068 rb_raise(rb_eFiberError, "cannot raise exception on unborn fiber");
3069 }
3070 else if (FIBER_TERMINATED_P(fiber)) {
3071 rb_raise(rb_eFiberError, "attempt to resume a terminated fiber");
3072 }
3073 else if (fiber == current_fiber) {
3074 rb_raise(rb_eFiberError, "attempt to resume the current fiber");
3075 }
3076 else if (fiber->prev != NULL) {
3077 rb_raise(rb_eFiberError, "attempt to resume a resumed fiber (double resume)");
3078 }
3079 else if (fiber->resuming_fiber) {
3080 rb_raise(rb_eFiberError, "attempt to resume a resuming fiber");
3081 }
3082 else if (fiber->prev == NULL &&
3083 (!fiber->yielding && fiber->status != FIBER_CREATED)) {
3084 rb_raise(rb_eFiberError, "attempt to resume a transferring fiber");
3085 }
3086
3087 return fiber_switch(fiber, argc, argv, kw_splat, fiber, false);
3088}
3089
3090VALUE
3091rb_fiber_resume_kw(VALUE self, int argc, const VALUE *argv, int kw_splat)
3092{
3093 return fiber_resume_kw(fiber_ptr(self), argc, argv, kw_splat);
3094}
3095
3096VALUE
3097rb_fiber_resume(VALUE self, int argc, const VALUE *argv)
3098{
3099 return fiber_resume_kw(fiber_ptr(self), argc, argv, RB_NO_KEYWORDS);
3100}
3101
3102VALUE
3103rb_fiber_yield_kw(int argc, const VALUE *argv, int kw_splat)
3104{
3105 return fiber_switch(return_fiber(false), argc, argv, kw_splat, NULL, true);
3106}
3107
3108VALUE
3109rb_fiber_yield(int argc, const VALUE *argv)
3110{
3111 return fiber_switch(return_fiber(false), argc, argv, RB_NO_KEYWORDS, NULL, true);
3112}
3113
3114void
3115rb_fiber_reset_root_local_storage(rb_thread_t *th)
3116{
3117 if (th->root_fiber && th->root_fiber != th->ec->fiber_ptr) {
3118 th->ec->local_storage = th->root_fiber->cont.saved_ec.local_storage;
3119 }
3120}
3121
3122/*
3123 * call-seq:
3124 * fiber.alive? -> true or false
3125 *
3126 * Returns true if the fiber can still be resumed (or transferred
3127 * to). After finishing execution of the fiber block this method will
3128 * always return +false+.
3129 */
3130VALUE
3131rb_fiber_alive_p(VALUE fiber_value)
3132{
3133 return RBOOL(!FIBER_TERMINATED_P(fiber_ptr(fiber_value)));
3134}
3135
3136/*
3137 * call-seq:
3138 * fiber.resume(args, ...) -> obj
3139 *
3140 * Resumes the fiber from the point at which the last Fiber.yield was
3141 * called, or starts running it if it is the first call to
3142 * #resume. Arguments passed to resume will be the value of the
3143 * Fiber.yield expression or will be passed as block parameters to
3144 * the fiber's block if this is the first #resume.
3145 *
3146 * Alternatively, when resume is called it evaluates to the arguments passed
3147 * to the next Fiber.yield statement inside the fiber's block
3148 * or to the block value if it runs to completion without any
3149 * Fiber.yield
3150 */
3151static VALUE
3152rb_fiber_m_resume(int argc, VALUE *argv, VALUE fiber)
3153{
3154 return rb_fiber_resume_kw(fiber, argc, argv, rb_keyword_given_p());
3155}
3156
3157/*
3158 * call-seq:
3159 * fiber.backtrace -> array
3160 * fiber.backtrace(start) -> array
3161 * fiber.backtrace(start, count) -> array
3162 * fiber.backtrace(start..end) -> array
3163 *
3164 * Returns the current execution stack of the fiber. +start+, +count+ and +end+ allow
3165 * to select only parts of the backtrace.
3166 *
3167 * def level3
3168 * Fiber.yield
3169 * end
3170 *
3171 * def level2
3172 * level3
3173 * end
3174 *
3175 * def level1
3176 * level2
3177 * end
3178 *
3179 * f = Fiber.new { level1 }
3180 *
3181 * # It is empty before the fiber started
3182 * f.backtrace
3183 * #=> []
3184 *
3185 * f.resume
3186 *
3187 * f.backtrace
3188 * #=> ["test.rb:2:in `yield'", "test.rb:2:in `level3'", "test.rb:6:in `level2'", "test.rb:10:in `level1'", "test.rb:13:in `block in <main>'"]
3189 * p f.backtrace(1) # start from the item 1
3190 * #=> ["test.rb:2:in `level3'", "test.rb:6:in `level2'", "test.rb:10:in `level1'", "test.rb:13:in `block in <main>'"]
3191 * p f.backtrace(2, 2) # start from item 2, take 2
3192 * #=> ["test.rb:6:in `level2'", "test.rb:10:in `level1'"]
3193 * p f.backtrace(1..3) # take items from 1 to 3
3194 * #=> ["test.rb:2:in `level3'", "test.rb:6:in `level2'", "test.rb:10:in `level1'"]
3195 *
3196 * f.resume
3197 *
3198 * # It is nil after the fiber is finished
3199 * f.backtrace
3200 * #=> nil
3201 *
3202 */
3203static VALUE
3204rb_fiber_backtrace(int argc, VALUE *argv, VALUE fiber)
3205{
3206 return rb_vm_backtrace(argc, argv, &fiber_ptr(fiber)->cont.saved_ec);
3207}
3208
3209/*
3210 * call-seq:
3211 * fiber.backtrace_locations -> array
3212 * fiber.backtrace_locations(start) -> array
3213 * fiber.backtrace_locations(start, count) -> array
3214 * fiber.backtrace_locations(start..end) -> array
3215 *
3216 * Like #backtrace, but returns each line of the execution stack as a
3217 * Thread::Backtrace::Location. Accepts the same arguments as #backtrace.
3218 *
3219 * f = Fiber.new { Fiber.yield }
3220 * f.resume
3221 * loc = f.backtrace_locations.first
3222 * loc.label #=> "yield"
3223 * loc.path #=> "test.rb"
3224 * loc.lineno #=> 1
3225 *
3226 *
3227 */
3228static VALUE
3229rb_fiber_backtrace_locations(int argc, VALUE *argv, VALUE fiber)
3230{
3231 return rb_vm_backtrace_locations(argc, argv, &fiber_ptr(fiber)->cont.saved_ec);
3232}
3233
3234/*
3235 * call-seq:
3236 * fiber.transfer(args, ...) -> obj
3237 *
3238 * Transfer control to another fiber, resuming it from where it last
3239 * stopped or starting it if it was not resumed before. The calling
3240 * fiber will be suspended much like in a call to
3241 * Fiber.yield.
3242 *
3243 * The fiber which receives the transfer call treats it much like
3244 * a resume call. Arguments passed to transfer are treated like those
3245 * passed to resume.
3246 *
3247 * The two style of control passing to and from fiber (one is #resume and
3248 * Fiber::yield, another is #transfer to and from fiber) can't be freely
3249 * mixed.
3250 *
3251 * * If the Fiber's lifecycle had started with transfer, it will never
3252 * be able to yield or be resumed control passing, only
3253 * finish or transfer back. (It still can resume other fibers that
3254 * are allowed to be resumed.)
3255 * * If the Fiber's lifecycle had started with resume, it can yield
3256 * or transfer to another Fiber, but can receive control back only
3257 * the way compatible with the way it was given away: if it had
3258 * transferred, it only can be transferred back, and if it had
3259 * yielded, it only can be resumed back. After that, it again can
3260 * transfer or yield.
3261 *
3262 * If those rules are broken FiberError is raised.
3263 *
3264 * For an individual Fiber design, yield/resume is easier to use
3265 * (the Fiber just gives away control, it doesn't need to think
3266 * about who the control is given to), while transfer is more flexible
3267 * for complex cases, allowing to build arbitrary graphs of Fibers
3268 * dependent on each other.
3269 *
3270 *
3271 * Example:
3272 *
3273 * manager = nil # For local var to be visible inside worker block
3274 *
3275 * # This fiber would be started with transfer
3276 * # It can't yield, and can't be resumed
3277 * worker = Fiber.new { |work|
3278 * puts "Worker: starts"
3279 * puts "Worker: Performed #{work.inspect}, transferring back"
3280 * # Fiber.yield # this would raise FiberError: attempt to yield on a not resumed fiber
3281 * # manager.resume # this would raise FiberError: attempt to resume a resumed fiber (double resume)
3282 * manager.transfer(work.capitalize)
3283 * }
3284 *
3285 * # This fiber would be started with resume
3286 * # It can yield or transfer, and can be transferred
3287 * # back or resumed
3288 * manager = Fiber.new {
3289 * puts "Manager: starts"
3290 * puts "Manager: transferring 'something' to worker"
3291 * result = worker.transfer('something')
3292 * puts "Manager: worker returned #{result.inspect}"
3293 * # worker.resume # this would raise FiberError: attempt to resume a transferring fiber
3294 * Fiber.yield # this is OK, the fiber transferred from and to, now it can yield
3295 * puts "Manager: finished"
3296 * }
3297 *
3298 * puts "Starting the manager"
3299 * manager.resume
3300 * puts "Resuming the manager"
3301 * # manager.transfer # this would raise FiberError: attempt to transfer to a yielding fiber
3302 * manager.resume
3303 *
3304 * <em>produces</em>
3305 *
3306 * Starting the manager
3307 * Manager: starts
3308 * Manager: transferring 'something' to worker
3309 * Worker: starts
3310 * Worker: Performed "something", transferring back
3311 * Manager: worker returned "Something"
3312 * Resuming the manager
3313 * Manager: finished
3314 *
3315 */
3316static VALUE
3317rb_fiber_m_transfer(int argc, VALUE *argv, VALUE self)
3318{
3319 return rb_fiber_transfer_kw(self, argc, argv, rb_keyword_given_p());
3320}
3321
3322static VALUE
3323fiber_transfer_kw(rb_fiber_t *fiber, int argc, const VALUE *argv, int kw_splat)
3324{
3325 if (fiber->resuming_fiber) {
3326 rb_raise(rb_eFiberError, "attempt to transfer to a resuming fiber");
3327 }
3328
3329 if (fiber->yielding) {
3330 rb_raise(rb_eFiberError, "attempt to transfer to a yielding fiber");
3331 }
3332
3333 return fiber_switch(fiber, argc, argv, kw_splat, NULL, false);
3334}
3335
3336VALUE
3337rb_fiber_transfer_kw(VALUE self, int argc, const VALUE *argv, int kw_splat)
3338{
3339 return fiber_transfer_kw(fiber_ptr(self), argc, argv, kw_splat);
3340}
3341
3342/*
3343 * call-seq:
3344 * Fiber.yield(args, ...) -> obj
3345 *
3346 * Yields control back to the context that resumed the fiber, passing
3347 * along any arguments that were passed to it. The fiber will resume
3348 * processing at this point when #resume is called next.
3349 * Any arguments passed to the next #resume will be the value that
3350 * this Fiber.yield expression evaluates to.
3351 */
3352static VALUE
3353rb_fiber_s_yield(int argc, VALUE *argv, VALUE klass)
3354{
3355 return rb_fiber_yield_kw(argc, argv, rb_keyword_given_p());
3356}
3357
3358static VALUE
3359fiber_raise(rb_fiber_t *fiber, VALUE exception)
3360{
3361 if (fiber == fiber_current()) {
3362 rb_exc_raise(exception);
3363 }
3364 else if (fiber->resuming_fiber) {
3365 return fiber_raise(fiber->resuming_fiber, exception);
3366 }
3367 else if (FIBER_SUSPENDED_P(fiber) && !fiber->yielding) {
3368 return fiber_transfer_kw(fiber, -1, &exception, RB_NO_KEYWORDS);
3369 }
3370 else {
3371 return fiber_resume_kw(fiber, -1, &exception, RB_NO_KEYWORDS);
3372 }
3373}
3374
3375VALUE
3376rb_fiber_raise(VALUE fiber, int argc, VALUE *argv)
3377{
3378 VALUE exception = rb_exception_setup(argc, argv);
3379
3380 return fiber_raise(fiber_ptr(fiber), exception);
3381}
3382
3383/*
3384 * call-seq:
3385 * raise(exception, message = exception.to_s, backtrace = nil, cause: $!)
3386 * raise(message = nil, cause: $!)
3387 *
3388 * Raises an exception in the fiber at the point at which the last
3389 * +Fiber.yield+ was called.
3390 *
3391 * f = Fiber.new {
3392 * puts "Before the yield"
3393 * Fiber.yield 1 # -- exception will be raised here
3394 * puts "After the yield"
3395 * }
3396 *
3397 * p f.resume
3398 * f.raise "Gotcha"
3399 *
3400 * Output
3401 *
3402 * Before the first yield
3403 * 1
3404 * t.rb:8:in 'Fiber.yield': Gotcha (RuntimeError)
3405 * from t.rb:8:in 'block in <main>'
3406 *
3407 * If the fiber has not been started or has
3408 * already run to completion, raises +FiberError+. If the fiber is
3409 * yielding, it is resumed. If it is transferring, it is transferred into.
3410 * But if it is resuming, raises +FiberError+.
3411 *
3412 * Raises +FiberError+ if called on a Fiber belonging to another +Thread+.
3413 *
3414 * See Kernel#raise for more information on arguments.
3415 *
3416 */
3417static VALUE
3418rb_fiber_m_raise(int argc, VALUE *argv, VALUE self)
3419{
3420 return rb_fiber_raise(self, argc, argv);
3421}
3422
3423/*
3424 * call-seq:
3425 * fiber.kill -> nil
3426 *
3427 * Terminates the fiber by raising an uncatchable exception.
3428 * It only terminates the given fiber and no other fiber, returning +nil+ to
3429 * another fiber if that fiber was calling #resume or #transfer.
3430 *
3431 * <tt>Fiber#kill</tt> only interrupts another fiber when it is in Fiber.yield.
3432 * If called on the current fiber then it raises that exception at the <tt>Fiber#kill</tt> call site.
3433 *
3434 * If the fiber has not been started, transition directly to the terminated state.
3435 *
3436 * If the fiber is already terminated, does nothing.
3437 *
3438 * Raises FiberError if called on a fiber belonging to another thread.
3439 */
3440static VALUE
3441rb_fiber_m_kill(VALUE self)
3442{
3443 rb_fiber_t *fiber = fiber_ptr(self);
3444
3445 if (fiber->killed) return Qfalse;
3446 fiber->killed = 1;
3447
3448 if (fiber->status == FIBER_CREATED) {
3449 fiber->status = FIBER_TERMINATED;
3450 }
3451 else if (fiber->status != FIBER_TERMINATED) {
3452 if (fiber_current() == fiber) {
3453 fiber_check_killed(fiber);
3454 }
3455 else {
3456 fiber_raise(fiber_ptr(self), Qnil);
3457 }
3458 }
3459
3460 return self;
3461}
3462
3463/*
3464 * call-seq:
3465 * Fiber.current -> fiber
3466 *
3467 * Returns the current fiber. If you are not running in the context of
3468 * a fiber this method will return the root fiber.
3469 */
3470static VALUE
3471rb_fiber_s_current(VALUE klass)
3472{
3473 return rb_fiber_current();
3474}
3475
3476static VALUE
3477fiber_to_s(VALUE fiber_value)
3478{
3479 const rb_fiber_t *fiber = fiber_ptr(fiber_value);
3480 const rb_proc_t *proc;
3481 char status_info[0x20];
3482
3483 if (fiber->resuming_fiber) {
3484 snprintf(status_info, 0x20, " (%s by resuming)", fiber_status_name(fiber->status));
3485 }
3486 else {
3487 snprintf(status_info, 0x20, " (%s)", fiber_status_name(fiber->status));
3488 }
3489
3490 if (!rb_obj_is_proc(fiber->first_proc)) {
3491 VALUE str = rb_any_to_s(fiber_value);
3492 strlcat(status_info, ">", sizeof(status_info));
3493 rb_str_set_len(str, RSTRING_LEN(str)-1);
3494 rb_str_cat_cstr(str, status_info);
3495 return str;
3496 }
3497 GetProcPtr(fiber->first_proc, proc);
3498 return rb_block_to_s(fiber_value, &proc->block, status_info);
3499}
3500
3501#ifdef HAVE_WORKING_FORK
3502void
3503rb_fiber_atfork(rb_thread_t *th)
3504{
3505 if (th->root_fiber) {
3506 if (&th->root_fiber->cont.saved_ec != th->ec) {
3507 th->root_fiber = th->ec->fiber_ptr;
3508 }
3509 th->root_fiber->prev = 0;
3510 th->root_fiber->blocking = 1;
3511 th->blocking = 1;
3512 }
3513}
3514#endif
3515
3516#ifdef RB_EXPERIMENTAL_FIBER_POOL
3517static void
3518fiber_pool_free(void *ptr)
3519{
3520 struct fiber_pool * fiber_pool = ptr;
3521 RUBY_FREE_ENTER("fiber_pool");
3522
3523 fiber_pool_allocation_free(fiber_pool->allocations);
3524 SIZED_FREE(fiber_pool);
3525
3526 RUBY_FREE_LEAVE("fiber_pool");
3527}
3528
3529static size_t
3530fiber_pool_memsize(const void *ptr)
3531{
3532 const struct fiber_pool * fiber_pool = ptr;
3533 size_t size = sizeof(*fiber_pool);
3534
3535 size += fiber_pool->count * fiber_pool->size;
3536
3537 return size;
3538}
3539
3540static const rb_data_type_t FiberPoolDataType = {
3541 "fiber_pool",
3542 {NULL, fiber_pool_free, fiber_pool_memsize,},
3544};
3545
3546static VALUE
3547fiber_pool_alloc(VALUE klass)
3548{
3549 struct fiber_pool *fiber_pool;
3550
3551 return TypedData_Make_Struct(klass, struct fiber_pool, &FiberPoolDataType, fiber_pool);
3552}
3553
3554static VALUE
3555rb_fiber_pool_initialize(int argc, VALUE* argv, VALUE self)
3556{
3557 rb_thread_t *th = GET_THREAD();
3558 VALUE size = Qnil, count = Qnil, vm_stack_size = Qnil;
3559 struct fiber_pool * fiber_pool = NULL;
3560
3561 // Maybe these should be keyword arguments.
3562 rb_scan_args(argc, argv, "03", &size, &count, &vm_stack_size);
3563
3564 if (NIL_P(size)) {
3565 size = SIZET2NUM(th->vm->default_params.fiber_machine_stack_size);
3566 }
3567
3568 if (NIL_P(count)) {
3569 count = INT2NUM(128);
3570 }
3571
3572 if (NIL_P(vm_stack_size)) {
3573 vm_stack_size = SIZET2NUM(th->vm->default_params.fiber_vm_stack_size);
3574 }
3575
3576 TypedData_Get_Struct(self, struct fiber_pool, &FiberPoolDataType, fiber_pool);
3577
3578 fiber_pool_initialize(fiber_pool, NUM2SIZET(size), NUM2SIZET(count), 0, NUM2SIZET(vm_stack_size));
3579
3580 return self;
3581}
3582#endif
3583
3584/*
3585 * Document-class: FiberError
3586 *
3587 * Raised when an invalid operation is attempted on a Fiber, in
3588 * particular when attempting to call/resume a dead fiber,
3589 * attempting to yield from the root fiber, or calling a fiber across
3590 * threads.
3591 *
3592 * fiber = Fiber.new{}
3593 * fiber.resume #=> nil
3594 * fiber.resume #=> FiberError: dead fiber called
3595 */
3596
3597static size_t
3598shared_fiber_pool_minimum_count(void)
3599{
3600 size_t minimum_count = FIBER_POOL_MINIMUM_COUNT;
3601
3602 const char *minimum_count_env = getenv("RUBY_SHARED_FIBER_POOL_MINIMUM_COUNT");
3603 if (minimum_count_env && minimum_count_env[0]) {
3604 char *end;
3605 unsigned long value = strtoul(minimum_count_env, &end, 10);
3606 if (end != minimum_count_env && *end == '\0') {
3607 minimum_count = (size_t)value;
3608 }
3609 else {
3610 rb_warn("invalid RUBY_SHARED_FIBER_POOL_MINIMUM_COUNT=%s (expected a non-negative integer)", minimum_count_env);
3611 }
3612 }
3613
3614 return minimum_count;
3615}
3616
3617static size_t
3618shared_fiber_pool_maximum_count(void)
3619{
3620 size_t maximum_count = 0;
3621
3622 const char *maximum_count_env = getenv("RUBY_SHARED_FIBER_POOL_MAXIMUM_COUNT");
3623 if (maximum_count_env && maximum_count_env[0]) {
3624 char *end;
3625 unsigned long value = strtoul(maximum_count_env, &end, 10);
3626 if (end != maximum_count_env && *end == '\0') {
3627 maximum_count = (size_t)value;
3628 }
3629 else {
3630 rb_warn("invalid RUBY_SHARED_FIBER_POOL_MAXIMUM_COUNT=%s (expected a non-negative integer)", maximum_count_env);
3631 }
3632 }
3633
3634 return maximum_count;
3635}
3636
3637void
3638Init_Cont(void)
3639{
3640 rb_thread_t *th = GET_THREAD();
3641 size_t vm_stack_size = th->vm->default_params.fiber_vm_stack_size;
3642 size_t machine_stack_size = th->vm->default_params.fiber_machine_stack_size;
3643 size_t stack_size = machine_stack_size + vm_stack_size;
3644
3645#ifdef _WIN32
3646 SYSTEM_INFO info;
3647 GetSystemInfo(&info);
3648 pagesize = info.dwPageSize;
3649#else /* not WIN32 */
3650 pagesize = sysconf(_SC_PAGESIZE);
3651#endif
3652 SET_MACHINE_STACK_END(&th->ec->machine.stack_end);
3653
3654 size_t minimum_count = shared_fiber_pool_minimum_count();
3655 size_t maximum_count = shared_fiber_pool_maximum_count();
3656 fiber_pool_initialize(&shared_fiber_pool, stack_size, minimum_count, maximum_count, vm_stack_size);
3657
3658 fiber_initialize_keywords[0] = rb_intern_const("blocking");
3659 fiber_initialize_keywords[1] = rb_intern_const("pool");
3660 fiber_initialize_keywords[2] = rb_intern_const("storage");
3661
3662 const char *fiber_shared_fiber_pool_free_stacks = getenv("RUBY_SHARED_FIBER_POOL_FREE_STACKS");
3663 if (fiber_shared_fiber_pool_free_stacks) {
3664 shared_fiber_pool.free_stacks = atoi(fiber_shared_fiber_pool_free_stacks);
3665
3666 if (shared_fiber_pool.free_stacks < 0) {
3667 rb_warn("Setting RUBY_SHARED_FIBER_POOL_FREE_STACKS to a negative value is not allowed.");
3668 shared_fiber_pool.free_stacks = 0;
3669 }
3670
3671 if (shared_fiber_pool.free_stacks > 1) {
3672 rb_warn("Setting RUBY_SHARED_FIBER_POOL_FREE_STACKS to a value greater than 1 is operating system specific, and may cause crashes.");
3673 }
3674 }
3675
3676 rb_cFiber = rb_define_class("Fiber", rb_cObject);
3677 rb_define_alloc_func(rb_cFiber, fiber_alloc);
3678 rb_eFiberError = rb_define_class("FiberError", rb_eStandardError);
3679 rb_define_singleton_method(rb_cFiber, "yield", rb_fiber_s_yield, -1);
3680 rb_define_singleton_method(rb_cFiber, "current", rb_fiber_s_current, 0);
3681 rb_define_singleton_method(rb_cFiber, "blocking", rb_fiber_blocking, 0);
3682 rb_define_singleton_method(rb_cFiber, "[]", rb_fiber_storage_aref, 1);
3683 rb_define_singleton_method(rb_cFiber, "[]=", rb_fiber_storage_aset, 2);
3684
3685 rb_define_method(rb_cFiber, "initialize", rb_fiber_initialize, -1);
3686 rb_define_method(rb_cFiber, "blocking?", rb_fiber_blocking_p, 0);
3687 rb_define_method(rb_cFiber, "storage", rb_fiber_storage_get, 0);
3688 rb_define_method(rb_cFiber, "storage=", rb_fiber_storage_set, 1);
3689 rb_define_method(rb_cFiber, "resume", rb_fiber_m_resume, -1);
3690 rb_define_method(rb_cFiber, "raise", rb_fiber_m_raise, -1);
3691 rb_define_method(rb_cFiber, "kill", rb_fiber_m_kill, 0);
3692 rb_define_method(rb_cFiber, "backtrace", rb_fiber_backtrace, -1);
3693 rb_define_method(rb_cFiber, "backtrace_locations", rb_fiber_backtrace_locations, -1);
3694 rb_define_method(rb_cFiber, "to_s", fiber_to_s, 0);
3695 rb_define_alias(rb_cFiber, "inspect", "to_s");
3696 rb_define_method(rb_cFiber, "transfer", rb_fiber_m_transfer, -1);
3697 rb_define_method(rb_cFiber, "alive?", rb_fiber_alive_p, 0);
3698
3699 rb_define_singleton_method(rb_cFiber, "blocking?", rb_fiber_s_blocking_p, 0);
3700 rb_define_singleton_method(rb_cFiber, "scheduler", rb_fiber_s_scheduler, 0);
3701 rb_define_singleton_method(rb_cFiber, "set_scheduler", rb_fiber_set_scheduler, 1);
3702 rb_define_singleton_method(rb_cFiber, "current_scheduler", rb_fiber_current_scheduler, 0);
3703
3704 rb_define_singleton_method(rb_cFiber, "schedule", rb_fiber_s_schedule, -1);
3705
3706 rb_thread_t *current_thread = rb_current_thread();
3707 RUBY_ASSERT(CLASS_OF(current_thread->ec->fiber_ptr->cont.self) == 0);
3708 *(VALUE *)&((struct RBasic *)current_thread->ec->fiber_ptr->cont.self)->klass = rb_cFiber;
3709
3710#ifdef RB_EXPERIMENTAL_FIBER_POOL
3711 /*
3712 * Document-class: Fiber::Pool
3713 * :nodoc: experimental
3714 */
3715 rb_cFiberPool = rb_define_class_under(rb_cFiber, "Pool", rb_cObject);
3716 rb_define_alloc_func(rb_cFiberPool, fiber_pool_alloc);
3717 rb_define_method(rb_cFiberPool, "initialize", rb_fiber_pool_initialize, -1);
3718#endif
3719
3720 rb_provide("fiber.so");
3721}
3722
3723RUBY_SYMBOL_EXPORT_BEGIN
3724
3725void
3726ruby_Init_Continuation_body(void)
3727{
3728 rb_cContinuation = rb_define_class("Continuation", rb_cObject);
3729 rb_undef_alloc_func(rb_cContinuation);
3730 rb_undef_method(CLASS_OF(rb_cContinuation), "new");
3731 rb_define_method(rb_cContinuation, "call", rb_cont_call, -1);
3732 rb_define_method(rb_cContinuation, "[]", rb_cont_call, -1);
3733 rb_define_global_function("callcc", rb_callcc, 0);
3734}
3735
3736RUBY_SYMBOL_EXPORT_END
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
#define rb_define_global_function(mid, func, arity)
Defines rb_mKernel #mid.
#define RUBY_EVENT_FIBER_SWITCH
Encountered a Fiber#yield.
Definition event.h:59
static bool RB_OBJ_FROZEN(VALUE obj)
Checks if an object is frozen.
Definition fl_type.h:711
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition class.c:1523
VALUE rb_define_class_under(VALUE outer, const char *name, VALUE super)
Defines a class under the namespace of outer.
Definition class.c:1554
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
Definition class.c:2890
void rb_undef_method(VALUE klass, const char *name)
Defines an undef of a method.
Definition class.c:2700
int rb_scan_args_kw(int kw_flag, int argc, const VALUE *argv, const char *fmt,...)
Identical to rb_scan_args(), except it also accepts kw_splat.
Definition class.c:3193
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
Retrieves argument from argc and argv to given VALUE references according to the format string.
Definition class.c:3180
int rb_keyword_given_p(void)
Determines if the current method is given a keyword argument.
Definition eval.c:1031
int rb_get_kwargs(VALUE keyword_hash, const ID *table, int required, int optional, VALUE *values)
Keyword argument deconstructor.
Definition class.c:2969
#define Qundef
Old name of RUBY_Qundef.
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
Definition assume.h:29
#define ZALLOC
Old name of RB_ZALLOC.
Definition memory.h:402
#define CLASS_OF
Old name of rb_class_of.
Definition globals.h:205
#define rb_ary_new4
Old name of rb_ary_new_from_values.
Definition array.h:659
#define SIZET2NUM
Old name of RB_SIZE2NUM.
Definition size_t.h:62
#define rb_exc_new2
Old name of rb_exc_new_cstr.
Definition error.h:37
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define ALLOC_N
Old name of RB_ALLOC_N.
Definition memory.h:399
#define Qtrue
Old name of RUBY_Qtrue.
#define INT2NUM
Old name of RB_INT2NUM.
Definition int.h:43
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define NIL_P
Old name of RB_NIL_P.
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
Definition value_type.h:80
#define NUM2SIZET
Old name of RB_NUM2SIZE.
Definition size_t.h:61
void ruby_stop(int ex)
Calls ruby_cleanup() and exits the process.
Definition eval.c:291
void rb_category_warn(rb_warning_category_t category, const char *fmt,...)
Identical to rb_category_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:477
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition eval.c:661
void rb_syserr_fail(int e, const char *mesg)
Raises appropriate exception that represents a C errno.
Definition error.c:3967
VALUE rb_eStandardError
StandardError exception.
Definition error.c:1424
VALUE rb_eFrozenError
FrozenError exception.
Definition error.c:1426
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1427
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1425
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:467
@ RB_WARN_CATEGORY_EXPERIMENTAL
Warning is for experimental features.
Definition error.h:51
VALUE rb_cObject
Object class.
Definition object.c:61
VALUE rb_any_to_s(VALUE obj)
Generates a textual representation of the given object.
Definition object.c:640
VALUE rb_obj_dup(VALUE obj)
Duplicates the given object.
Definition object.c:547
void rb_provide(const char *feature)
Declares that the given feature is already provided by someone else.
Definition load.c:695
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
Definition proc.c:988
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
Definition proc.c:122
void rb_str_set_len(VALUE str, long len)
Overwrites the length of the string.
Definition string.c:3405
#define rb_str_cat_cstr(buf, str)
Identical to rb_str_cat(), except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1657
void rb_undef_alloc_func(VALUE klass)
Deletes the allocator function of a class.
Definition vm_method.c:1731
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
static ID rb_intern_const(const char *str)
This is a "tiny optimisation" over rb_intern().
Definition symbol.h:285
VALUE rb_to_symbol(VALUE name)
Identical to rb_intern_str(), except it generates a dynamic symbol if necessary.
Definition string.c:12703
VALUE rb_yield(VALUE val)
Yields the block.
Definition vm_eval.c:1375
rb_block_call_func * rb_block_call_func_t
Shorthand type that represents an iterator-written-in-C function pointer.
Definition iterator.h:88
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
Definition memory.h:372
#define ALLOCA_N(type, n)
Definition memory.h:292
#define RB_ALLOC(type)
Shorthand of RB_ALLOC_N with n=1.
Definition memory.h:213
VALUE rb_proc_new(type *q, VALUE w)
Creates a rb_cProc instance.
void rb_hash_foreach(VALUE q, int_type *w, VALUE e)
Iteration over the given hash.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition rarray.h:52
#define DATA_PTR(obj)
Convenient getter macro.
Definition rdata.h:67
#define RUBY_TYPED_FREE_IMMEDIATELY
Macros to see if each corresponding flag is defined.
Definition rtypeddata.h:122
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
Definition rtypeddata.h:769
#define TypedData_Wrap_Struct(klass, data_type, sval)
Converts sval, a pointer to your struct, into a Ruby object.
Definition rtypeddata.h:531
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
Definition rtypeddata.h:578
#define errno
Ractor-aware version of errno.
Definition ruby.h:388
#define RB_NO_KEYWORDS
Do not pass keywords.
Definition scan_args.h:69
Scheduler APIs.
VALUE rb_fiber_scheduler_current(void)
Identical to rb_fiber_scheduler_get(), except it also returns RUBY_Qnil in case of a blocking fiber.
Definition scheduler.c:458
VALUE rb_fiber_scheduler_set(VALUE scheduler)
Destructively assigns the passed scheduler to that of the current thread that is calling this functio...
Definition scheduler.c:420
VALUE rb_fiber_scheduler_get(void)
Queries the current scheduler of the current thread that is calling this function.
Definition scheduler.c:370
VALUE rb_fiber_scheduler_fiber(VALUE scheduler, int argc, VALUE *argv, int kw_splat)
Create and schedule a non-blocking fiber.
Definition scheduler.c:1178
#define RTEST
This is an old name of RB_TEST.
Ruby object's base components.
Definition rbasic.h:69
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:229
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_destroy.
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static void Check_Type(VALUE v, enum ruby_value_type t)
Identical to RB_TYPE_P(), except it raises exceptions on predication failure.
Definition value_type.h:433
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376