Ruby 3.5.0dev (2025-06-26 revision b1c09faf67a663bcda430931c987762521efd53a)
cont.c (b1c09faf67a663bcda430931c987762521efd53a)
1/**********************************************************************
2
3 cont.c -
4
5 $Author$
6 created at: Thu May 23 09:03:43 2007
7
8 Copyright (C) 2007 Koichi Sasada
9
10**********************************************************************/
11
12#include "ruby/internal/config.h"
13
14#ifndef _WIN32
15#include <unistd.h>
16#include <sys/mman.h>
17#endif
18
19// On Solaris, madvise() is NOT declared for SUS (XPG4v2) or later,
20// but MADV_* macros are defined when __EXTENSIONS__ is defined.
21#ifdef NEED_MADVICE_PROTOTYPE_USING_CADDR_T
22#include <sys/types.h>
23extern int madvise(caddr_t, size_t, int);
24#endif
25
26#include COROUTINE_H
27
28#include "eval_intern.h"
29#include "internal.h"
30#include "internal/cont.h"
31#include "internal/thread.h"
32#include "internal/error.h"
33#include "internal/gc.h"
34#include "internal/proc.h"
35#include "internal/sanitizers.h"
36#include "internal/warnings.h"
38#include "yjit.h"
39#include "vm_core.h"
40#include "vm_sync.h"
41#include "id_table.h"
42#include "ractor_core.h"
43
44static const int DEBUG = 0;
45
46#define RB_PAGE_SIZE (pagesize)
47#define RB_PAGE_MASK (~(RB_PAGE_SIZE - 1))
48static long pagesize;
49
50static const rb_data_type_t cont_data_type, fiber_data_type;
51static VALUE rb_cContinuation;
52static VALUE rb_cFiber;
53static VALUE rb_eFiberError;
54#ifdef RB_EXPERIMENTAL_FIBER_POOL
55static VALUE rb_cFiberPool;
56#endif
57
58#define CAPTURE_JUST_VALID_VM_STACK 1
59
60// Defined in `coroutine/$arch/Context.h`:
61#ifdef COROUTINE_LIMITED_ADDRESS_SPACE
62#define FIBER_POOL_ALLOCATION_FREE
63#define FIBER_POOL_INITIAL_SIZE 8
64#define FIBER_POOL_ALLOCATION_MAXIMUM_SIZE 32
65#else
66#define FIBER_POOL_INITIAL_SIZE 32
67#define FIBER_POOL_ALLOCATION_MAXIMUM_SIZE 1024
68#endif
69#ifdef RB_EXPERIMENTAL_FIBER_POOL
70#define FIBER_POOL_ALLOCATION_FREE
71#endif
72
73enum context_type {
74 CONTINUATION_CONTEXT = 0,
75 FIBER_CONTEXT = 1
76};
77
79 VALUE *ptr;
80#ifdef CAPTURE_JUST_VALID_VM_STACK
81 size_t slen; /* length of stack (head of ec->vm_stack) */
82 size_t clen; /* length of control frames (tail of ec->vm_stack) */
83#endif
84};
85
86struct fiber_pool;
87
88// Represents a single stack.
90 // A pointer to the memory allocation (lowest address) for the stack.
91 void * base;
92
93 // The current stack pointer, taking into account the direction of the stack.
94 void * current;
95
96 // The size of the stack excluding any guard pages.
97 size_t size;
98
99 // The available stack capacity w.r.t. the current stack offset.
100 size_t available;
101
102 // The pool this stack should be allocated from.
103 struct fiber_pool * pool;
104
105 // If the stack is allocated, the allocation it came from.
106 struct fiber_pool_allocation * allocation;
107};
108
109// A linked list of vacant (unused) stacks.
110// This structure is stored in the first page of a stack if it is not in use.
111// @sa fiber_pool_vacancy_pointer
113 // Details about the vacant stack:
114 struct fiber_pool_stack stack;
115
116 // The vacancy linked list.
117#ifdef FIBER_POOL_ALLOCATION_FREE
118 struct fiber_pool_vacancy * previous;
119#endif
120 struct fiber_pool_vacancy * next;
121};
122
123// Manages singly linked list of mapped regions of memory which contains 1 more more stack:
124//
125// base = +-------------------------------+-----------------------+ +
126// |VM Stack |VM Stack | | |
127// | | | | |
128// | | | | |
129// +-------------------------------+ | |
130// |Machine Stack |Machine Stack | | |
131// | | | | |
132// | | | | |
133// | | | . . . . | | size
134// | | | | |
135// | | | | |
136// | | | | |
137// | | | | |
138// | | | | |
139// +-------------------------------+ | |
140// |Guard Page |Guard Page | | |
141// +-------------------------------+-----------------------+ v
142//
143// +------------------------------------------------------->
144//
145// count
146//
148 // A pointer to the memory mapped region.
149 void * base;
150
151 // The size of the individual stacks.
152 size_t size;
153
154 // The stride of individual stacks (including any guard pages or other accounting details).
155 size_t stride;
156
157 // The number of stacks that were allocated.
158 size_t count;
159
160#ifdef FIBER_POOL_ALLOCATION_FREE
161 // The number of stacks used in this allocation.
162 size_t used;
163#endif
164
165 struct fiber_pool * pool;
166
167 // The allocation linked list.
168#ifdef FIBER_POOL_ALLOCATION_FREE
169 struct fiber_pool_allocation * previous;
170#endif
171 struct fiber_pool_allocation * next;
172};
173
174// A fiber pool manages vacant stacks to reduce the overhead of creating fibers.
176 // A singly-linked list of allocations which contain 1 or more stacks each.
177 struct fiber_pool_allocation * allocations;
178
179 // Free list that provides O(1) stack "allocation".
180 struct fiber_pool_vacancy * vacancies;
181
182 // The size of the stack allocations (excluding any guard page).
183 size_t size;
184
185 // The total number of stacks that have been allocated in this pool.
186 size_t count;
187
188 // The initial number of stacks to allocate.
189 size_t initial_count;
190
191 // Whether to madvise(free) the stack or not.
192 // If this value is set to 1, the stack will be madvise(free)ed
193 // (or equivalent), where possible, when it is returned to the pool.
194 int free_stacks;
195
196 // The number of stacks that have been used in this pool.
197 size_t used;
198
199 // The amount to allocate for the vm_stack.
200 size_t vm_stack_size;
201};
202
203// Continuation contexts used by JITs
205 rb_execution_context_t *ec; // continuation ec
206 struct rb_jit_cont *prev, *next; // used to form lists
207};
208
209// Doubly linked list for enumerating all on-stack ISEQs.
210static struct rb_jit_cont *first_jit_cont;
211
212typedef struct rb_context_struct {
213 enum context_type type;
214 int argc;
215 int kw_splat;
216 VALUE self;
217 VALUE value;
218
219 struct cont_saved_vm_stack saved_vm_stack;
220
221 struct {
222 VALUE *stack;
223 VALUE *stack_src;
224 size_t stack_size;
225 } machine;
226 rb_execution_context_t saved_ec;
227 rb_jmpbuf_t jmpbuf;
228 struct rb_jit_cont *jit_cont; // Continuation contexts for JITs
230
231/*
232 * Fiber status:
233 * [Fiber.new] ------> FIBER_CREATED ----> [Fiber#kill] --> |
234 * | [Fiber#resume] |
235 * v |
236 * +--> FIBER_RESUMED ----> [return] ------> |
237 * [Fiber#resume] | | [Fiber.yield/transfer] |
238 * [Fiber#transfer] | v |
239 * +--- FIBER_SUSPENDED --> [Fiber#kill] --> |
240 * |
241 * |
242 * FIBER_TERMINATED <-------------------+
243 */
244enum fiber_status {
245 FIBER_CREATED,
246 FIBER_RESUMED,
247 FIBER_SUSPENDED,
248 FIBER_TERMINATED
249};
250
251#define FIBER_CREATED_P(fiber) ((fiber)->status == FIBER_CREATED)
252#define FIBER_RESUMED_P(fiber) ((fiber)->status == FIBER_RESUMED)
253#define FIBER_SUSPENDED_P(fiber) ((fiber)->status == FIBER_SUSPENDED)
254#define FIBER_TERMINATED_P(fiber) ((fiber)->status == FIBER_TERMINATED)
255#define FIBER_RUNNABLE_P(fiber) (FIBER_CREATED_P(fiber) || FIBER_SUSPENDED_P(fiber))
256
258 rb_context_t cont;
259 VALUE first_proc;
260 struct rb_fiber_struct *prev;
261 struct rb_fiber_struct *resuming_fiber;
262
263 BITFIELD(enum fiber_status, status, 2);
264 /* Whether the fiber is allowed to implicitly yield. */
265 unsigned int yielding : 1;
266 unsigned int blocking : 1;
267
268 unsigned int killed : 1;
269
270 struct coroutine_context context;
271 struct fiber_pool_stack stack;
272};
273
274static struct fiber_pool shared_fiber_pool = {NULL, NULL, 0, 0, 0, 0};
275
276void
277rb_free_shared_fiber_pool(void)
278{
279 struct fiber_pool_allocation *allocations = shared_fiber_pool.allocations;
280 while (allocations) {
281 struct fiber_pool_allocation *next = allocations->next;
282 xfree(allocations);
283 allocations = next;
284 }
285}
286
287static ID fiber_initialize_keywords[3] = {0};
288
289/*
290 * FreeBSD require a first (i.e. addr) argument of mmap(2) is not NULL
291 * if MAP_STACK is passed.
292 * https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=158755
293 */
294#if defined(MAP_STACK) && !defined(__FreeBSD__) && !defined(__FreeBSD_kernel__)
295#define FIBER_STACK_FLAGS (MAP_PRIVATE | MAP_ANON | MAP_STACK)
296#else
297#define FIBER_STACK_FLAGS (MAP_PRIVATE | MAP_ANON)
298#endif
299
300#define ERRNOMSG strerror(errno)
301
302// Locates the stack vacancy details for the given stack.
303inline static struct fiber_pool_vacancy *
304fiber_pool_vacancy_pointer(void * base, size_t size)
305{
306 STACK_GROW_DIR_DETECTION;
307
308 return (struct fiber_pool_vacancy *)(
309 (char*)base + STACK_DIR_UPPER(0, size - RB_PAGE_SIZE)
310 );
311}
312
313#if defined(COROUTINE_SANITIZE_ADDRESS)
314// Compute the base pointer for a vacant stack, for the area which can be poisoned.
315inline static void *
316fiber_pool_stack_poison_base(struct fiber_pool_stack * stack)
317{
318 STACK_GROW_DIR_DETECTION;
319
320 return (char*)stack->base + STACK_DIR_UPPER(RB_PAGE_SIZE, 0);
321}
322
323// Compute the size of the vacant stack, for the area that can be poisoned.
324inline static size_t
325fiber_pool_stack_poison_size(struct fiber_pool_stack * stack)
326{
327 return stack->size - RB_PAGE_SIZE;
328}
329#endif
330
331// Reset the current stack pointer and available size of the given stack.
332inline static void
333fiber_pool_stack_reset(struct fiber_pool_stack * stack)
334{
335 STACK_GROW_DIR_DETECTION;
336
337 stack->current = (char*)stack->base + STACK_DIR_UPPER(0, stack->size);
338 stack->available = stack->size;
339}
340
341// A pointer to the base of the current unused portion of the stack.
342inline static void *
343fiber_pool_stack_base(struct fiber_pool_stack * stack)
344{
345 STACK_GROW_DIR_DETECTION;
346
347 VM_ASSERT(stack->current);
348
349 return STACK_DIR_UPPER(stack->current, (char*)stack->current - stack->available);
350}
351
352// Allocate some memory from the stack. Used to allocate vm_stack inline with machine stack.
353// @sa fiber_initialize_coroutine
354inline static void *
355fiber_pool_stack_alloca(struct fiber_pool_stack * stack, size_t offset)
356{
357 STACK_GROW_DIR_DETECTION;
358
359 if (DEBUG) fprintf(stderr, "fiber_pool_stack_alloca(%p): %"PRIuSIZE"/%"PRIuSIZE"\n", (void*)stack, offset, stack->available);
360 VM_ASSERT(stack->available >= offset);
361
362 // The pointer to the memory being allocated:
363 void * pointer = STACK_DIR_UPPER(stack->current, (char*)stack->current - offset);
364
365 // Move the stack pointer:
366 stack->current = STACK_DIR_UPPER((char*)stack->current + offset, (char*)stack->current - offset);
367 stack->available -= offset;
368
369 return pointer;
370}
371
372// Reset the current stack pointer and available size of the given stack.
373inline static void
374fiber_pool_vacancy_reset(struct fiber_pool_vacancy * vacancy)
375{
376 fiber_pool_stack_reset(&vacancy->stack);
377
378 // Consume one page of the stack because it's used for the vacancy list:
379 fiber_pool_stack_alloca(&vacancy->stack, RB_PAGE_SIZE);
380}
381
382inline static struct fiber_pool_vacancy *
383fiber_pool_vacancy_push(struct fiber_pool_vacancy * vacancy, struct fiber_pool_vacancy * head)
384{
385 vacancy->next = head;
386
387#ifdef FIBER_POOL_ALLOCATION_FREE
388 if (head) {
389 head->previous = vacancy;
390 vacancy->previous = NULL;
391 }
392#endif
393
394 return vacancy;
395}
396
397#ifdef FIBER_POOL_ALLOCATION_FREE
398static void
399fiber_pool_vacancy_remove(struct fiber_pool_vacancy * vacancy)
400{
401 if (vacancy->next) {
402 vacancy->next->previous = vacancy->previous;
403 }
404
405 if (vacancy->previous) {
406 vacancy->previous->next = vacancy->next;
407 }
408 else {
409 // It's the head of the list:
410 vacancy->stack.pool->vacancies = vacancy->next;
411 }
412}
413
414inline static struct fiber_pool_vacancy *
415fiber_pool_vacancy_pop(struct fiber_pool * pool)
416{
417 struct fiber_pool_vacancy * vacancy = pool->vacancies;
418
419 if (vacancy) {
420 fiber_pool_vacancy_remove(vacancy);
421 }
422
423 return vacancy;
424}
425#else
426inline static struct fiber_pool_vacancy *
427fiber_pool_vacancy_pop(struct fiber_pool * pool)
428{
429 struct fiber_pool_vacancy * vacancy = pool->vacancies;
430
431 if (vacancy) {
432 pool->vacancies = vacancy->next;
433 }
434
435 return vacancy;
436}
437#endif
438
439// Initialize the vacant stack. The [base, size] allocation should not include the guard page.
440// @param base The pointer to the lowest address of the allocated memory.
441// @param size The size of the allocated memory.
442inline static struct fiber_pool_vacancy *
443fiber_pool_vacancy_initialize(struct fiber_pool * fiber_pool, struct fiber_pool_vacancy * vacancies, void * base, size_t size)
444{
445 struct fiber_pool_vacancy * vacancy = fiber_pool_vacancy_pointer(base, size);
446
447 vacancy->stack.base = base;
448 vacancy->stack.size = size;
449
450 fiber_pool_vacancy_reset(vacancy);
451
452 vacancy->stack.pool = fiber_pool;
453
454 return fiber_pool_vacancy_push(vacancy, vacancies);
455}
456
457// Allocate a maximum of count stacks, size given by stride.
458// @param count the number of stacks to allocate / were allocated.
459// @param stride the size of the individual stacks.
460// @return [void *] the allocated memory or NULL if allocation failed.
461inline static void *
462fiber_pool_allocate_memory(size_t * count, size_t stride)
463{
464 // We use a divide-by-2 strategy to try and allocate memory. We are trying
465 // to allocate `count` stacks. In normal situation, this won't fail. But
466 // if we ran out of address space, or we are allocating more memory than
467 // the system would allow (e.g. overcommit * physical memory + swap), we
468 // divide count by two and try again. This condition should only be
469 // encountered in edge cases, but we handle it here gracefully.
470 while (*count > 1) {
471#if defined(_WIN32)
472 void * base = VirtualAlloc(0, (*count)*stride, MEM_COMMIT, PAGE_READWRITE);
473
474 if (!base) {
475 *count = (*count) >> 1;
476 }
477 else {
478 return base;
479 }
480#else
481 errno = 0;
482 size_t mmap_size = (*count)*stride;
483 void * base = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, FIBER_STACK_FLAGS, -1, 0);
484
485 if (base == MAP_FAILED) {
486 // If the allocation fails, count = count / 2, and try again.
487 *count = (*count) >> 1;
488 }
489 else {
490 ruby_annotate_mmap(base, mmap_size, "Ruby:fiber_pool_allocate_memory");
491#if defined(MADV_FREE_REUSE)
492 // On Mac MADV_FREE_REUSE is necessary for the task_info api
493 // to keep the accounting accurate as possible when a page is marked as reusable
494 // it can possibly not occurring at first call thus re-iterating if necessary.
495 while (madvise(base, mmap_size, MADV_FREE_REUSE) == -1 && errno == EAGAIN);
496#endif
497 return base;
498 }
499#endif
500 }
501
502 return NULL;
503}
504
505// Given an existing fiber pool, expand it by the specified number of stacks.
506// @param count the maximum number of stacks to allocate.
507// @return the allocated fiber pool.
508// @sa fiber_pool_allocation_free
509static struct fiber_pool_allocation *
510fiber_pool_expand(struct fiber_pool * fiber_pool, size_t count)
511{
512 struct fiber_pool_allocation * allocation;
513 RB_VM_LOCK_ENTER();
514 {
515 STACK_GROW_DIR_DETECTION;
516
517 size_t size = fiber_pool->size;
518 size_t stride = size + RB_PAGE_SIZE;
519
520 // Allocate the memory required for the stacks:
521 void * base = fiber_pool_allocate_memory(&count, stride);
522
523 if (base == NULL) {
524 rb_raise(rb_eFiberError, "can't alloc machine stack to fiber (%"PRIuSIZE" x %"PRIuSIZE" bytes): %s", count, size, ERRNOMSG);
525 }
526
527 struct fiber_pool_vacancy * vacancies = fiber_pool->vacancies;
528 allocation = RB_ALLOC(struct fiber_pool_allocation);
529
530 // Initialize fiber pool allocation:
531 allocation->base = base;
532 allocation->size = size;
533 allocation->stride = stride;
534 allocation->count = count;
535#ifdef FIBER_POOL_ALLOCATION_FREE
536 allocation->used = 0;
537#endif
538 allocation->pool = fiber_pool;
539
540 if (DEBUG) {
541 fprintf(stderr, "fiber_pool_expand(%"PRIuSIZE"): %p, %"PRIuSIZE"/%"PRIuSIZE" x [%"PRIuSIZE":%"PRIuSIZE"]\n",
542 count, (void*)fiber_pool, fiber_pool->used, fiber_pool->count, size, fiber_pool->vm_stack_size);
543 }
544
545 // Iterate over all stacks, initializing the vacancy list:
546 for (size_t i = 0; i < count; i += 1) {
547 void * base = (char*)allocation->base + (stride * i);
548 void * page = (char*)base + STACK_DIR_UPPER(size, 0);
549#if defined(_WIN32)
550 DWORD old_protect;
551
552 if (!VirtualProtect(page, RB_PAGE_SIZE, PAGE_READWRITE | PAGE_GUARD, &old_protect)) {
553 VirtualFree(allocation->base, 0, MEM_RELEASE);
554 rb_raise(rb_eFiberError, "can't set a guard page: %s", ERRNOMSG);
555 }
556#elif defined(__wasi__)
557 // wasi-libc's mprotect emulation doesn't support PROT_NONE.
558 (void)page;
559#else
560 if (mprotect(page, RB_PAGE_SIZE, PROT_NONE) < 0) {
561 munmap(allocation->base, count*stride);
562 rb_raise(rb_eFiberError, "can't set a guard page: %s", ERRNOMSG);
563 }
564#endif
565
566 vacancies = fiber_pool_vacancy_initialize(
567 fiber_pool, vacancies,
568 (char*)base + STACK_DIR_UPPER(0, RB_PAGE_SIZE),
569 size
570 );
571
572#ifdef FIBER_POOL_ALLOCATION_FREE
573 vacancies->stack.allocation = allocation;
574#endif
575 }
576
577 // Insert the allocation into the head of the pool:
578 allocation->next = fiber_pool->allocations;
579
580#ifdef FIBER_POOL_ALLOCATION_FREE
581 if (allocation->next) {
582 allocation->next->previous = allocation;
583 }
584
585 allocation->previous = NULL;
586#endif
587
588 fiber_pool->allocations = allocation;
589 fiber_pool->vacancies = vacancies;
590 fiber_pool->count += count;
591 }
592 RB_VM_LOCK_LEAVE();
593
594 return allocation;
595}
596
597// Initialize the specified fiber pool with the given number of stacks.
598// @param vm_stack_size The size of the vm stack to allocate.
599static void
600fiber_pool_initialize(struct fiber_pool * fiber_pool, size_t size, size_t count, size_t vm_stack_size)
601{
602 VM_ASSERT(vm_stack_size < size);
603
604 fiber_pool->allocations = NULL;
605 fiber_pool->vacancies = NULL;
606 fiber_pool->size = ((size / RB_PAGE_SIZE) + 1) * RB_PAGE_SIZE;
607 fiber_pool->count = 0;
608 fiber_pool->initial_count = count;
609 fiber_pool->free_stacks = 1;
610 fiber_pool->used = 0;
611
612 fiber_pool->vm_stack_size = vm_stack_size;
613
614 fiber_pool_expand(fiber_pool, count);
615}
616
617#ifdef FIBER_POOL_ALLOCATION_FREE
618// Free the list of fiber pool allocations.
619static void
620fiber_pool_allocation_free(struct fiber_pool_allocation * allocation)
621{
622 STACK_GROW_DIR_DETECTION;
623
624 VM_ASSERT(allocation->used == 0);
625
626 if (DEBUG) fprintf(stderr, "fiber_pool_allocation_free: %p base=%p count=%"PRIuSIZE"\n", (void*)allocation, allocation->base, allocation->count);
627
628 size_t i;
629 for (i = 0; i < allocation->count; i += 1) {
630 void * base = (char*)allocation->base + (allocation->stride * i) + STACK_DIR_UPPER(0, RB_PAGE_SIZE);
631
632 struct fiber_pool_vacancy * vacancy = fiber_pool_vacancy_pointer(base, allocation->size);
633
634 // Pop the vacant stack off the free list:
635 fiber_pool_vacancy_remove(vacancy);
636 }
637
638#ifdef _WIN32
639 VirtualFree(allocation->base, 0, MEM_RELEASE);
640#else
641 munmap(allocation->base, allocation->stride * allocation->count);
642#endif
643
644 if (allocation->previous) {
645 allocation->previous->next = allocation->next;
646 }
647 else {
648 // We are the head of the list, so update the pool:
649 allocation->pool->allocations = allocation->next;
650 }
651
652 if (allocation->next) {
653 allocation->next->previous = allocation->previous;
654 }
655
656 allocation->pool->count -= allocation->count;
657
658 ruby_xfree(allocation);
659}
660#endif
661
662// Acquire a stack from the given fiber pool. If none are available, allocate more.
663static struct fiber_pool_stack
664fiber_pool_stack_acquire(struct fiber_pool * fiber_pool)
665{
666 struct fiber_pool_vacancy * vacancy ;
667 RB_VM_LOCK_ENTER();
668 {
669 vacancy = fiber_pool_vacancy_pop(fiber_pool);
670
671 if (DEBUG) fprintf(stderr, "fiber_pool_stack_acquire: %p used=%"PRIuSIZE"\n", (void*)fiber_pool->vacancies, fiber_pool->used);
672
673 if (!vacancy) {
674 const size_t maximum = FIBER_POOL_ALLOCATION_MAXIMUM_SIZE;
675 const size_t minimum = fiber_pool->initial_count;
676
677 size_t count = fiber_pool->count;
678 if (count > maximum) count = maximum;
679 if (count < minimum) count = minimum;
680
681 fiber_pool_expand(fiber_pool, count);
682
683 // The free list should now contain some stacks:
684 VM_ASSERT(fiber_pool->vacancies);
685
686 vacancy = fiber_pool_vacancy_pop(fiber_pool);
687 }
688
689 VM_ASSERT(vacancy);
690 VM_ASSERT(vacancy->stack.base);
691
692#if defined(COROUTINE_SANITIZE_ADDRESS)
693 __asan_unpoison_memory_region(fiber_pool_stack_poison_base(&vacancy->stack), fiber_pool_stack_poison_size(&vacancy->stack));
694#endif
695
696 // Take the top item from the free list:
697 fiber_pool->used += 1;
698
699#ifdef FIBER_POOL_ALLOCATION_FREE
700 vacancy->stack.allocation->used += 1;
701#endif
702
703 fiber_pool_stack_reset(&vacancy->stack);
704 }
705 RB_VM_LOCK_LEAVE();
706
707 return vacancy->stack;
708}
709
710// We advise the operating system that the stack memory pages are no longer being used.
711// This introduce some performance overhead but allows system to relaim memory when there is pressure.
712static inline void
713fiber_pool_stack_free(struct fiber_pool_stack * stack)
714{
715 void * base = fiber_pool_stack_base(stack);
716 size_t size = stack->available;
717
718 // If this is not true, the vacancy information will almost certainly be destroyed:
719 VM_ASSERT(size <= (stack->size - RB_PAGE_SIZE));
720
721 int advice = stack->pool->free_stacks >> 1;
722
723 if (DEBUG) fprintf(stderr, "fiber_pool_stack_free: %p+%"PRIuSIZE" [base=%p, size=%"PRIuSIZE"] advice=%d\n", base, size, stack->base, stack->size, advice);
724
725 // The pages being used by the stack can be returned back to the system.
726 // That doesn't change the page mapping, but it does allow the system to
727 // reclaim the physical memory.
728 // Since we no longer care about the data itself, we don't need to page
729 // out to disk, since that is costly. Not all systems support that, so
730 // we try our best to select the most efficient implementation.
731 // In addition, it's actually slightly desirable to not do anything here,
732 // but that results in higher memory usage.
733
734#ifdef __wasi__
735 // WebAssembly doesn't support madvise, so we just don't do anything.
736#elif VM_CHECK_MODE > 0 && defined(MADV_DONTNEED)
737 if (!advice) advice = MADV_DONTNEED;
738 // This immediately discards the pages and the memory is reset to zero.
739 madvise(base, size, advice);
740#elif defined(MADV_FREE_REUSABLE)
741 if (!advice) advice = MADV_FREE_REUSABLE;
742 // Darwin / macOS / iOS.
743 // Acknowledge the kernel down to the task info api we make this
744 // page reusable for future use.
745 // As for MADV_FREE_REUSABLE below we ensure in the rare occasions the task was not
746 // completed at the time of the call to re-iterate.
747 while (madvise(base, size, advice) == -1 && errno == EAGAIN);
748#elif defined(MADV_FREE)
749 if (!advice) advice = MADV_FREE;
750 // Recent Linux.
751 madvise(base, size, advice);
752#elif defined(MADV_DONTNEED)
753 if (!advice) advice = MADV_DONTNEED;
754 // Old Linux.
755 madvise(base, size, advice);
756#elif defined(POSIX_MADV_DONTNEED)
757 if (!advice) advice = POSIX_MADV_DONTNEED;
758 // Solaris?
759 posix_madvise(base, size, advice);
760#elif defined(_WIN32)
761 VirtualAlloc(base, size, MEM_RESET, PAGE_READWRITE);
762 // Not available in all versions of Windows.
763 //DiscardVirtualMemory(base, size);
764#endif
765
766#if defined(COROUTINE_SANITIZE_ADDRESS)
767 __asan_poison_memory_region(fiber_pool_stack_poison_base(stack), fiber_pool_stack_poison_size(stack));
768#endif
769}
770
771// Release and return a stack to the vacancy list.
772static void
773fiber_pool_stack_release(struct fiber_pool_stack * stack)
774{
775 struct fiber_pool * pool = stack->pool;
776 RB_VM_LOCK_ENTER();
777 {
778 struct fiber_pool_vacancy * vacancy = fiber_pool_vacancy_pointer(stack->base, stack->size);
779
780 if (DEBUG) fprintf(stderr, "fiber_pool_stack_release: %p used=%"PRIuSIZE"\n", stack->base, stack->pool->used);
781
782 // Copy the stack details into the vacancy area:
783 vacancy->stack = *stack;
784 // After this point, be careful about updating/using state in stack, since it's copied to the vacancy area.
785
786 // Reset the stack pointers and reserve space for the vacancy data:
787 fiber_pool_vacancy_reset(vacancy);
788
789 // Push the vacancy into the vancancies list:
790 pool->vacancies = fiber_pool_vacancy_push(vacancy, pool->vacancies);
791 pool->used -= 1;
792
793#ifdef FIBER_POOL_ALLOCATION_FREE
794 struct fiber_pool_allocation * allocation = stack->allocation;
795
796 allocation->used -= 1;
797
798 // Release address space and/or dirty memory:
799 if (allocation->used == 0) {
800 fiber_pool_allocation_free(allocation);
801 }
802 else if (stack->pool->free_stacks) {
803 fiber_pool_stack_free(&vacancy->stack);
804 }
805#else
806 // This is entirely optional, but clears the dirty flag from the stack
807 // memory, so it won't get swapped to disk when there is memory pressure:
808 if (stack->pool->free_stacks) {
809 fiber_pool_stack_free(&vacancy->stack);
810 }
811#endif
812 }
813 RB_VM_LOCK_LEAVE();
814}
815
816static inline void
817ec_switch(rb_thread_t *th, rb_fiber_t *fiber)
818{
819 rb_execution_context_t *ec = &fiber->cont.saved_ec;
820#ifdef RUBY_ASAN_ENABLED
821 ec->machine.asan_fake_stack_handle = asan_get_thread_fake_stack_handle();
822#endif
823 rb_ractor_set_current_ec(th->ractor, th->ec = ec);
824 // ruby_current_execution_context_ptr = th->ec = ec;
825
826 /*
827 * timer-thread may set trap interrupt on previous th->ec at any time;
828 * ensure we do not delay (or lose) the trap interrupt handling.
829 */
830 if (th->vm->ractor.main_thread == th &&
831 rb_signal_buff_size() > 0) {
832 RUBY_VM_SET_TRAP_INTERRUPT(ec);
833 }
834
835 VM_ASSERT(ec->fiber_ptr->cont.self == 0 || ec->vm_stack != NULL);
836}
837
838static inline void
839fiber_restore_thread(rb_thread_t *th, rb_fiber_t *fiber)
840{
841 ec_switch(th, fiber);
842 VM_ASSERT(th->ec->fiber_ptr == fiber);
843}
844
845#ifndef COROUTINE_DECL
846# define COROUTINE_DECL COROUTINE
847#endif
848NORETURN(static COROUTINE_DECL fiber_entry(struct coroutine_context * from, struct coroutine_context * to));
849static COROUTINE
850fiber_entry(struct coroutine_context * from, struct coroutine_context * to)
851{
852 rb_fiber_t *fiber = to->argument;
853
854#if defined(COROUTINE_SANITIZE_ADDRESS)
855 // Address sanitizer will copy the previous stack base and stack size into
856 // the "from" fiber. `coroutine_initialize_main` doesn't generally know the
857 // stack bounds (base + size). Therefore, the main fiber `stack_base` and
858 // `stack_size` will be NULL/0. It's specifically important in that case to
859 // get the (base+size) of the previous fiber and save it, so that later when
860 // we return to the main coroutine, we don't supply (NULL, 0) to
861 // __sanitizer_start_switch_fiber which royally messes up the internal state
862 // of ASAN and causes (sometimes) the following message:
863 // "WARNING: ASan is ignoring requested __asan_handle_no_return"
864 __sanitizer_finish_switch_fiber(to->fake_stack, (const void**)&from->stack_base, &from->stack_size);
865#endif
866
867 rb_thread_t *thread = fiber->cont.saved_ec.thread_ptr;
868
869#ifdef COROUTINE_PTHREAD_CONTEXT
870 ruby_thread_set_native(thread);
871#endif
872
873 fiber_restore_thread(thread, fiber);
874
875 rb_fiber_start(fiber);
876
877#ifndef COROUTINE_PTHREAD_CONTEXT
878 VM_UNREACHABLE(fiber_entry);
879#endif
880}
881
882// Initialize a fiber's coroutine's machine stack and vm stack.
883static VALUE *
884fiber_initialize_coroutine(rb_fiber_t *fiber, size_t * vm_stack_size)
885{
886 struct fiber_pool * fiber_pool = fiber->stack.pool;
887 rb_execution_context_t *sec = &fiber->cont.saved_ec;
888 void * vm_stack = NULL;
889
890 VM_ASSERT(fiber_pool != NULL);
891
892 fiber->stack = fiber_pool_stack_acquire(fiber_pool);
893 vm_stack = fiber_pool_stack_alloca(&fiber->stack, fiber_pool->vm_stack_size);
894 *vm_stack_size = fiber_pool->vm_stack_size;
895
896 coroutine_initialize(&fiber->context, fiber_entry, fiber_pool_stack_base(&fiber->stack), fiber->stack.available);
897
898 // The stack for this execution context is the one we allocated:
899 sec->machine.stack_start = fiber->stack.current;
900 sec->machine.stack_maxsize = fiber->stack.available;
901
902 fiber->context.argument = (void*)fiber;
903
904 return vm_stack;
905}
906
907// Release the stack from the fiber, it's execution context, and return it to
908// the fiber pool.
909static void
910fiber_stack_release(rb_fiber_t * fiber)
911{
912 rb_execution_context_t *ec = &fiber->cont.saved_ec;
913
914 if (DEBUG) fprintf(stderr, "fiber_stack_release: %p, stack.base=%p\n", (void*)fiber, fiber->stack.base);
915
916 // Return the stack back to the fiber pool if it wasn't already:
917 if (fiber->stack.base) {
918 fiber_pool_stack_release(&fiber->stack);
919 fiber->stack.base = NULL;
920 }
921
922 // The stack is no longer associated with this execution context:
923 rb_ec_clear_vm_stack(ec);
924}
925
926static const char *
927fiber_status_name(enum fiber_status s)
928{
929 switch (s) {
930 case FIBER_CREATED: return "created";
931 case FIBER_RESUMED: return "resumed";
932 case FIBER_SUSPENDED: return "suspended";
933 case FIBER_TERMINATED: return "terminated";
934 }
935 VM_UNREACHABLE(fiber_status_name);
936 return NULL;
937}
938
939static void
940fiber_verify(const rb_fiber_t *fiber)
941{
942#if VM_CHECK_MODE > 0
943 VM_ASSERT(fiber->cont.saved_ec.fiber_ptr == fiber);
944
945 switch (fiber->status) {
946 case FIBER_RESUMED:
947 VM_ASSERT(fiber->cont.saved_ec.vm_stack != NULL);
948 break;
949 case FIBER_SUSPENDED:
950 VM_ASSERT(fiber->cont.saved_ec.vm_stack != NULL);
951 break;
952 case FIBER_CREATED:
953 case FIBER_TERMINATED:
954 /* TODO */
955 break;
956 default:
957 VM_UNREACHABLE(fiber_verify);
958 }
959#endif
960}
961
962inline static void
963fiber_status_set(rb_fiber_t *fiber, enum fiber_status s)
964{
965 // if (DEBUG) fprintf(stderr, "fiber: %p, status: %s -> %s\n", (void *)fiber, fiber_status_name(fiber->status), fiber_status_name(s));
966 VM_ASSERT(!FIBER_TERMINATED_P(fiber));
967 VM_ASSERT(fiber->status != s);
968 fiber_verify(fiber);
969 fiber->status = s;
970}
971
972static rb_context_t *
973cont_ptr(VALUE obj)
974{
975 rb_context_t *cont;
976
977 TypedData_Get_Struct(obj, rb_context_t, &cont_data_type, cont);
978
979 return cont;
980}
981
982static rb_fiber_t *
983fiber_ptr(VALUE obj)
984{
985 rb_fiber_t *fiber;
986
987 TypedData_Get_Struct(obj, rb_fiber_t, &fiber_data_type, fiber);
988 if (!fiber) rb_raise(rb_eFiberError, "uninitialized fiber");
989
990 return fiber;
991}
992
993NOINLINE(static VALUE cont_capture(volatile int *volatile stat));
994
995#define THREAD_MUST_BE_RUNNING(th) do { \
996 if (!(th)->ec->tag) rb_raise(rb_eThreadError, "not running thread"); \
997 } while (0)
998
1000rb_fiber_threadptr(const rb_fiber_t *fiber)
1001{
1002 return fiber->cont.saved_ec.thread_ptr;
1003}
1004
1005static VALUE
1006cont_thread_value(const rb_context_t *cont)
1007{
1008 return cont->saved_ec.thread_ptr->self;
1009}
1010
1011static void
1012cont_compact(void *ptr)
1013{
1014 rb_context_t *cont = ptr;
1015
1016 if (cont->self) {
1017 cont->self = rb_gc_location(cont->self);
1018 }
1019 cont->value = rb_gc_location(cont->value);
1020 rb_execution_context_update(&cont->saved_ec);
1021}
1022
1023static void
1024cont_mark(void *ptr)
1025{
1026 rb_context_t *cont = ptr;
1027
1028 RUBY_MARK_ENTER("cont");
1029 if (cont->self) {
1030 rb_gc_mark_movable(cont->self);
1031 }
1032 rb_gc_mark_movable(cont->value);
1033
1034 rb_execution_context_mark(&cont->saved_ec);
1035 rb_gc_mark(cont_thread_value(cont));
1036
1037 if (cont->saved_vm_stack.ptr) {
1038#ifdef CAPTURE_JUST_VALID_VM_STACK
1039 rb_gc_mark_locations(cont->saved_vm_stack.ptr,
1040 cont->saved_vm_stack.ptr + cont->saved_vm_stack.slen + cont->saved_vm_stack.clen);
1041#else
1042 rb_gc_mark_locations(cont->saved_vm_stack.ptr,
1043 cont->saved_vm_stack.ptr, cont->saved_ec.stack_size);
1044#endif
1045 }
1046
1047 if (cont->machine.stack) {
1048 if (cont->type == CONTINUATION_CONTEXT) {
1049 /* cont */
1050 rb_gc_mark_locations(cont->machine.stack,
1051 cont->machine.stack + cont->machine.stack_size);
1052 }
1053 else {
1054 /* fiber machine context is marked as part of rb_execution_context_mark, no need to
1055 * do anything here. */
1056 }
1057 }
1058
1059 RUBY_MARK_LEAVE("cont");
1060}
1061
1062#if 0
1063static int
1064fiber_is_root_p(const rb_fiber_t *fiber)
1065{
1066 return fiber == fiber->cont.saved_ec.thread_ptr->root_fiber;
1067}
1068#endif
1069
1070static void jit_cont_free(struct rb_jit_cont *cont);
1071
1072static void
1073cont_free(void *ptr)
1074{
1075 rb_context_t *cont = ptr;
1076
1077 RUBY_FREE_ENTER("cont");
1078
1079 if (cont->type == CONTINUATION_CONTEXT) {
1080 ruby_xfree(cont->saved_ec.vm_stack);
1081 RUBY_FREE_UNLESS_NULL(cont->machine.stack);
1082 }
1083 else {
1084 rb_fiber_t *fiber = (rb_fiber_t*)cont;
1085 coroutine_destroy(&fiber->context);
1086 fiber_stack_release(fiber);
1087 }
1088
1089 RUBY_FREE_UNLESS_NULL(cont->saved_vm_stack.ptr);
1090
1091 VM_ASSERT(cont->jit_cont != NULL);
1092 jit_cont_free(cont->jit_cont);
1093 /* free rb_cont_t or rb_fiber_t */
1094 ruby_xfree(ptr);
1095 RUBY_FREE_LEAVE("cont");
1096}
1097
1098static size_t
1099cont_memsize(const void *ptr)
1100{
1101 const rb_context_t *cont = ptr;
1102 size_t size = 0;
1103
1104 size = sizeof(*cont);
1105 if (cont->saved_vm_stack.ptr) {
1106#ifdef CAPTURE_JUST_VALID_VM_STACK
1107 size_t n = (cont->saved_vm_stack.slen + cont->saved_vm_stack.clen);
1108#else
1109 size_t n = cont->saved_ec.vm_stack_size;
1110#endif
1111 size += n * sizeof(*cont->saved_vm_stack.ptr);
1112 }
1113
1114 if (cont->machine.stack) {
1115 size += cont->machine.stack_size * sizeof(*cont->machine.stack);
1116 }
1117
1118 return size;
1119}
1120
1121void
1122rb_fiber_update_self(rb_fiber_t *fiber)
1123{
1124 if (fiber->cont.self) {
1125 fiber->cont.self = rb_gc_location(fiber->cont.self);
1126 }
1127 else {
1128 rb_execution_context_update(&fiber->cont.saved_ec);
1129 }
1130}
1131
1132void
1133rb_fiber_mark_self(const rb_fiber_t *fiber)
1134{
1135 if (fiber->cont.self) {
1136 rb_gc_mark_movable(fiber->cont.self);
1137 }
1138 else {
1139 rb_execution_context_mark(&fiber->cont.saved_ec);
1140 }
1141}
1142
1143static void
1144fiber_compact(void *ptr)
1145{
1146 rb_fiber_t *fiber = ptr;
1147 fiber->first_proc = rb_gc_location(fiber->first_proc);
1148
1149 if (fiber->prev) rb_fiber_update_self(fiber->prev);
1150
1151 cont_compact(&fiber->cont);
1152 fiber_verify(fiber);
1153}
1154
1155static void
1156fiber_mark(void *ptr)
1157{
1158 rb_fiber_t *fiber = ptr;
1159 RUBY_MARK_ENTER("cont");
1160 fiber_verify(fiber);
1161 rb_gc_mark_movable(fiber->first_proc);
1162 if (fiber->prev) rb_fiber_mark_self(fiber->prev);
1163 cont_mark(&fiber->cont);
1164 RUBY_MARK_LEAVE("cont");
1165}
1166
1167static void
1168fiber_free(void *ptr)
1169{
1170 rb_fiber_t *fiber = ptr;
1171 RUBY_FREE_ENTER("fiber");
1172
1173 if (DEBUG) fprintf(stderr, "fiber_free: %p[%p]\n", (void *)fiber, fiber->stack.base);
1174
1175 if (fiber->cont.saved_ec.local_storage) {
1176 rb_id_table_free(fiber->cont.saved_ec.local_storage);
1177 }
1178
1179 cont_free(&fiber->cont);
1180 RUBY_FREE_LEAVE("fiber");
1181}
1182
1183static size_t
1184fiber_memsize(const void *ptr)
1185{
1186 const rb_fiber_t *fiber = ptr;
1187 size_t size = sizeof(*fiber);
1188 const rb_execution_context_t *saved_ec = &fiber->cont.saved_ec;
1189 const rb_thread_t *th = rb_ec_thread_ptr(saved_ec);
1190
1191 /*
1192 * vm.c::thread_memsize already counts th->ec->local_storage
1193 */
1194 if (saved_ec->local_storage && fiber != th->root_fiber) {
1195 size += rb_id_table_memsize(saved_ec->local_storage);
1196 size += rb_obj_memsize_of(saved_ec->storage);
1197 }
1198
1199 size += cont_memsize(&fiber->cont);
1200 return size;
1201}
1202
1203VALUE
1204rb_obj_is_fiber(VALUE obj)
1205{
1206 return RBOOL(rb_typeddata_is_kind_of(obj, &fiber_data_type));
1207}
1208
1209static void
1210cont_save_machine_stack(rb_thread_t *th, rb_context_t *cont)
1211{
1212 size_t size;
1213
1214 SET_MACHINE_STACK_END(&th->ec->machine.stack_end);
1215
1216 if (th->ec->machine.stack_start > th->ec->machine.stack_end) {
1217 size = cont->machine.stack_size = th->ec->machine.stack_start - th->ec->machine.stack_end;
1218 cont->machine.stack_src = th->ec->machine.stack_end;
1219 }
1220 else {
1221 size = cont->machine.stack_size = th->ec->machine.stack_end - th->ec->machine.stack_start;
1222 cont->machine.stack_src = th->ec->machine.stack_start;
1223 }
1224
1225 if (cont->machine.stack) {
1226 REALLOC_N(cont->machine.stack, VALUE, size);
1227 }
1228 else {
1229 cont->machine.stack = ALLOC_N(VALUE, size);
1230 }
1231
1232 FLUSH_REGISTER_WINDOWS;
1233 asan_unpoison_memory_region(cont->machine.stack_src, size, false);
1234 MEMCPY(cont->machine.stack, cont->machine.stack_src, VALUE, size);
1235}
1236
1237static const rb_data_type_t cont_data_type = {
1238 "continuation",
1239 {cont_mark, cont_free, cont_memsize, cont_compact},
1240 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
1241};
1242
1243static inline void
1244cont_save_thread(rb_context_t *cont, rb_thread_t *th)
1245{
1246 rb_execution_context_t *sec = &cont->saved_ec;
1247
1248 VM_ASSERT(th->status == THREAD_RUNNABLE);
1249
1250 /* save thread context */
1251 *sec = *th->ec;
1252
1253 /* saved_ec->machine.stack_end should be NULL */
1254 /* because it may happen GC afterward */
1255 sec->machine.stack_end = NULL;
1256}
1257
1258static rb_nativethread_lock_t jit_cont_lock;
1259
1260// Register a new continuation with execution context `ec`. Return JIT info about
1261// the continuation.
1262static struct rb_jit_cont *
1263jit_cont_new(rb_execution_context_t *ec)
1264{
1265 struct rb_jit_cont *cont;
1266
1267 // We need to use calloc instead of something like ZALLOC to avoid triggering GC here.
1268 // When this function is called from rb_thread_alloc through rb_threadptr_root_fiber_setup,
1269 // the thread is still being prepared and marking it causes SEGV.
1270 cont = calloc(1, sizeof(struct rb_jit_cont));
1271 if (cont == NULL)
1272 rb_memerror();
1273 cont->ec = ec;
1274
1275 rb_native_mutex_lock(&jit_cont_lock);
1276 if (first_jit_cont == NULL) {
1277 cont->next = cont->prev = NULL;
1278 }
1279 else {
1280 cont->prev = NULL;
1281 cont->next = first_jit_cont;
1282 first_jit_cont->prev = cont;
1283 }
1284 first_jit_cont = cont;
1285 rb_native_mutex_unlock(&jit_cont_lock);
1286
1287 return cont;
1288}
1289
1290// Unregister continuation `cont`.
1291static void
1292jit_cont_free(struct rb_jit_cont *cont)
1293{
1294 if (!cont) return;
1295
1296 rb_native_mutex_lock(&jit_cont_lock);
1297 if (cont == first_jit_cont) {
1298 first_jit_cont = cont->next;
1299 if (first_jit_cont != NULL)
1300 first_jit_cont->prev = NULL;
1301 }
1302 else {
1303 cont->prev->next = cont->next;
1304 if (cont->next != NULL)
1305 cont->next->prev = cont->prev;
1306 }
1307 rb_native_mutex_unlock(&jit_cont_lock);
1308
1309 free(cont);
1310}
1311
1312// Call a given callback against all on-stack ISEQs.
1313void
1314rb_jit_cont_each_iseq(rb_iseq_callback callback, void *data)
1315{
1316 struct rb_jit_cont *cont;
1317 for (cont = first_jit_cont; cont != NULL; cont = cont->next) {
1318 if (cont->ec->vm_stack == NULL)
1319 continue;
1320
1321 const rb_control_frame_t *cfp = cont->ec->cfp;
1322 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(cont->ec, cfp)) {
1323 if (cfp->pc && cfp->iseq && imemo_type((VALUE)cfp->iseq) == imemo_iseq) {
1324 callback(cfp->iseq, data);
1325 }
1326 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
1327 }
1328 }
1329}
1330
1331#if USE_YJIT
1332// Update the jit_return of all CFPs to leave_exit unless it's leave_exception or not set.
1333// This prevents jit_exec_exception from jumping to the caller after invalidation.
1334void
1335rb_yjit_cancel_jit_return(void *leave_exit, void *leave_exception)
1336{
1337 struct rb_jit_cont *cont;
1338 for (cont = first_jit_cont; cont != NULL; cont = cont->next) {
1339 if (cont->ec->vm_stack == NULL)
1340 continue;
1341
1342 const rb_control_frame_t *cfp = cont->ec->cfp;
1343 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(cont->ec, cfp)) {
1344 if (cfp->jit_return && cfp->jit_return != leave_exception) {
1345 ((rb_control_frame_t *)cfp)->jit_return = leave_exit;
1346 }
1347 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
1348 }
1349 }
1350}
1351#endif
1352
1353// Finish working with jit_cont.
1354void
1355rb_jit_cont_finish(void)
1356{
1357 struct rb_jit_cont *cont, *next;
1358 for (cont = first_jit_cont; cont != NULL; cont = next) {
1359 next = cont->next;
1360 free(cont); // Don't use xfree because it's allocated by calloc.
1361 }
1362 rb_native_mutex_destroy(&jit_cont_lock);
1363}
1364
1365static void
1366cont_init_jit_cont(rb_context_t *cont)
1367{
1368 VM_ASSERT(cont->jit_cont == NULL);
1369 // We always allocate this since YJIT may be enabled later
1370 cont->jit_cont = jit_cont_new(&(cont->saved_ec));
1371}
1372
1374rb_fiberptr_get_ec(struct rb_fiber_struct *fiber)
1375{
1376 return &fiber->cont.saved_ec;
1377}
1378
1379static void
1380cont_init(rb_context_t *cont, rb_thread_t *th)
1381{
1382 /* save thread context */
1383 cont_save_thread(cont, th);
1384 cont->saved_ec.thread_ptr = th;
1385 cont->saved_ec.local_storage = NULL;
1386 cont->saved_ec.local_storage_recursive_hash = Qnil;
1387 cont->saved_ec.local_storage_recursive_hash_for_trace = Qnil;
1388 cont_init_jit_cont(cont);
1389}
1390
1391static rb_context_t *
1392cont_new(VALUE klass)
1393{
1394 rb_context_t *cont;
1395 volatile VALUE contval;
1396 rb_thread_t *th = GET_THREAD();
1397
1398 THREAD_MUST_BE_RUNNING(th);
1399 contval = TypedData_Make_Struct(klass, rb_context_t, &cont_data_type, cont);
1400 cont->self = contval;
1401 cont_init(cont, th);
1402 return cont;
1403}
1404
1405VALUE
1406rb_fiberptr_self(struct rb_fiber_struct *fiber)
1407{
1408 return fiber->cont.self;
1409}
1410
1411unsigned int
1412rb_fiberptr_blocking(struct rb_fiber_struct *fiber)
1413{
1414 return fiber->blocking;
1415}
1416
1417// Initialize the jit_cont_lock
1418void
1419rb_jit_cont_init(void)
1420{
1421 rb_native_mutex_initialize(&jit_cont_lock);
1422}
1423
1424#if 0
1425void
1426show_vm_stack(const rb_execution_context_t *ec)
1427{
1428 VALUE *p = ec->vm_stack;
1429 while (p < ec->cfp->sp) {
1430 fprintf(stderr, "%3d ", (int)(p - ec->vm_stack));
1431 rb_obj_info_dump(*p);
1432 p++;
1433 }
1434}
1435
1436void
1437show_vm_pcs(const rb_control_frame_t *cfp,
1438 const rb_control_frame_t *end_of_cfp)
1439{
1440 int i=0;
1441 while (cfp != end_of_cfp) {
1442 int pc = 0;
1443 if (cfp->iseq) {
1444 pc = cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded;
1445 }
1446 fprintf(stderr, "%2d pc: %d\n", i++, pc);
1447 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
1448 }
1449}
1450#endif
1451
1452static VALUE
1453cont_capture(volatile int *volatile stat)
1454{
1455 rb_context_t *volatile cont;
1456 rb_thread_t *th = GET_THREAD();
1457 volatile VALUE contval;
1458 const rb_execution_context_t *ec = th->ec;
1459
1460 THREAD_MUST_BE_RUNNING(th);
1461 rb_vm_stack_to_heap(th->ec);
1462 cont = cont_new(rb_cContinuation);
1463 contval = cont->self;
1464
1465#ifdef CAPTURE_JUST_VALID_VM_STACK
1466 cont->saved_vm_stack.slen = ec->cfp->sp - ec->vm_stack;
1467 cont->saved_vm_stack.clen = ec->vm_stack + ec->vm_stack_size - (VALUE*)ec->cfp;
1468 cont->saved_vm_stack.ptr = ALLOC_N(VALUE, cont->saved_vm_stack.slen + cont->saved_vm_stack.clen);
1469 MEMCPY(cont->saved_vm_stack.ptr,
1470 ec->vm_stack,
1471 VALUE, cont->saved_vm_stack.slen);
1472 MEMCPY(cont->saved_vm_stack.ptr + cont->saved_vm_stack.slen,
1473 (VALUE*)ec->cfp,
1474 VALUE,
1475 cont->saved_vm_stack.clen);
1476#else
1477 cont->saved_vm_stack.ptr = ALLOC_N(VALUE, ec->vm_stack_size);
1478 MEMCPY(cont->saved_vm_stack.ptr, ec->vm_stack, VALUE, ec->vm_stack_size);
1479#endif
1480 // At this point, `cfp` is valid but `vm_stack` should be cleared:
1481 rb_ec_set_vm_stack(&cont->saved_ec, NULL, 0);
1482 VM_ASSERT(cont->saved_ec.cfp != NULL);
1483 cont_save_machine_stack(th, cont);
1484
1485 if (ruby_setjmp(cont->jmpbuf)) {
1486 VALUE value;
1487
1488 VAR_INITIALIZED(cont);
1489 value = cont->value;
1490 if (cont->argc == -1) rb_exc_raise(value);
1491 cont->value = Qnil;
1492 *stat = 1;
1493 return value;
1494 }
1495 else {
1496 *stat = 0;
1497 return contval;
1498 }
1499}
1500
1501static inline void
1502cont_restore_thread(rb_context_t *cont)
1503{
1504 rb_thread_t *th = GET_THREAD();
1505
1506 /* restore thread context */
1507 if (cont->type == CONTINUATION_CONTEXT) {
1508 /* continuation */
1509 rb_execution_context_t *sec = &cont->saved_ec;
1510 rb_fiber_t *fiber = NULL;
1511
1512 if (sec->fiber_ptr != NULL) {
1513 fiber = sec->fiber_ptr;
1514 }
1515 else if (th->root_fiber) {
1516 fiber = th->root_fiber;
1517 }
1518
1519 if (fiber && th->ec != &fiber->cont.saved_ec) {
1520 ec_switch(th, fiber);
1521 }
1522
1523 if (th->ec->trace_arg != sec->trace_arg) {
1524 rb_raise(rb_eRuntimeError, "can't call across trace_func");
1525 }
1526
1527#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
1528 if (th->ec->tag != sec->tag) {
1529 /* find the lowest common ancestor tag of the current EC and the saved EC */
1530
1531 struct rb_vm_tag *lowest_common_ancestor = NULL;
1532 size_t num_tags = 0;
1533 size_t num_saved_tags = 0;
1534 for (struct rb_vm_tag *tag = th->ec->tag; tag != NULL; tag = tag->prev) {
1535 ++num_tags;
1536 }
1537 for (struct rb_vm_tag *tag = sec->tag; tag != NULL; tag = tag->prev) {
1538 ++num_saved_tags;
1539 }
1540
1541 size_t min_tags = num_tags <= num_saved_tags ? num_tags : num_saved_tags;
1542
1543 struct rb_vm_tag *tag = th->ec->tag;
1544 while (num_tags > min_tags) {
1545 tag = tag->prev;
1546 --num_tags;
1547 }
1548
1549 struct rb_vm_tag *saved_tag = sec->tag;
1550 while (num_saved_tags > min_tags) {
1551 saved_tag = saved_tag->prev;
1552 --num_saved_tags;
1553 }
1554
1555 while (min_tags > 0) {
1556 if (tag == saved_tag) {
1557 lowest_common_ancestor = tag;
1558 break;
1559 }
1560 tag = tag->prev;
1561 saved_tag = saved_tag->prev;
1562 --min_tags;
1563 }
1564
1565 /* free all the jump buffers between the current EC's tag and the lowest common ancestor tag */
1566 for (struct rb_vm_tag *tag = th->ec->tag; tag != lowest_common_ancestor; tag = tag->prev) {
1567 rb_vm_tag_jmpbuf_deinit(&tag->buf);
1568 }
1569 }
1570#endif
1571
1572 /* copy vm stack */
1573#ifdef CAPTURE_JUST_VALID_VM_STACK
1574 MEMCPY(th->ec->vm_stack,
1575 cont->saved_vm_stack.ptr,
1576 VALUE, cont->saved_vm_stack.slen);
1577 MEMCPY(th->ec->vm_stack + th->ec->vm_stack_size - cont->saved_vm_stack.clen,
1578 cont->saved_vm_stack.ptr + cont->saved_vm_stack.slen,
1579 VALUE, cont->saved_vm_stack.clen);
1580#else
1581 MEMCPY(th->ec->vm_stack, cont->saved_vm_stack.ptr, VALUE, sec->vm_stack_size);
1582#endif
1583 /* other members of ec */
1584
1585 th->ec->cfp = sec->cfp;
1586 th->ec->raised_flag = sec->raised_flag;
1587 th->ec->tag = sec->tag;
1588 th->ec->root_lep = sec->root_lep;
1589 th->ec->root_svar = sec->root_svar;
1590 th->ec->errinfo = sec->errinfo;
1591
1592 VM_ASSERT(th->ec->vm_stack != NULL);
1593 }
1594 else {
1595 /* fiber */
1596 fiber_restore_thread(th, (rb_fiber_t*)cont);
1597 }
1598}
1599
1600NOINLINE(static void fiber_setcontext(rb_fiber_t *new_fiber, rb_fiber_t *old_fiber));
1601
1602static void
1603fiber_setcontext(rb_fiber_t *new_fiber, rb_fiber_t *old_fiber)
1604{
1605 rb_thread_t *th = GET_THREAD();
1606
1607 /* save old_fiber's machine stack - to ensure efficient garbage collection */
1608 if (!FIBER_TERMINATED_P(old_fiber)) {
1609 STACK_GROW_DIR_DETECTION;
1610 SET_MACHINE_STACK_END(&th->ec->machine.stack_end);
1611 if (STACK_DIR_UPPER(0, 1)) {
1612 old_fiber->cont.machine.stack_size = th->ec->machine.stack_start - th->ec->machine.stack_end;
1613 old_fiber->cont.machine.stack = th->ec->machine.stack_end;
1614 }
1615 else {
1616 old_fiber->cont.machine.stack_size = th->ec->machine.stack_end - th->ec->machine.stack_start;
1617 old_fiber->cont.machine.stack = th->ec->machine.stack_start;
1618 }
1619 }
1620
1621 /* these values are used in rb_gc_mark_machine_context to mark the fiber's stack. */
1622 old_fiber->cont.saved_ec.machine.stack_start = th->ec->machine.stack_start;
1623 old_fiber->cont.saved_ec.machine.stack_end = FIBER_TERMINATED_P(old_fiber) ? NULL : th->ec->machine.stack_end;
1624
1625
1626 // if (DEBUG) fprintf(stderr, "fiber_setcontext: %p[%p] -> %p[%p]\n", (void*)old_fiber, old_fiber->stack.base, (void*)new_fiber, new_fiber->stack.base);
1627
1628#if defined(COROUTINE_SANITIZE_ADDRESS)
1629 __sanitizer_start_switch_fiber(FIBER_TERMINATED_P(old_fiber) ? NULL : &old_fiber->context.fake_stack, new_fiber->context.stack_base, new_fiber->context.stack_size);
1630#endif
1631
1632 /* swap machine context */
1633 struct coroutine_context * from = coroutine_transfer(&old_fiber->context, &new_fiber->context);
1634
1635#if defined(COROUTINE_SANITIZE_ADDRESS)
1636 __sanitizer_finish_switch_fiber(old_fiber->context.fake_stack, NULL, NULL);
1637#endif
1638
1639 if (from == NULL) {
1640 rb_syserr_fail(errno, "coroutine_transfer");
1641 }
1642
1643 /* restore thread context */
1644 fiber_restore_thread(th, old_fiber);
1645
1646 // It's possible to get here, and new_fiber is already freed.
1647 // if (DEBUG) fprintf(stderr, "fiber_setcontext: %p[%p] <- %p[%p]\n", (void*)old_fiber, old_fiber->stack.base, (void*)new_fiber, new_fiber->stack.base);
1648}
1649
1650NOINLINE(NORETURN(static void cont_restore_1(rb_context_t *)));
1651
1652static void
1653cont_restore_1(rb_context_t *cont)
1654{
1655 cont_restore_thread(cont);
1656
1657 /* restore machine stack */
1658#if (defined(_M_AMD64) && !defined(__MINGW64__)) || defined(_M_ARM64)
1659 {
1660 /* workaround for x64 and arm64 SEH on Windows */
1661 jmp_buf buf;
1662 setjmp(buf);
1663 _JUMP_BUFFER *bp = (void*)&cont->jmpbuf;
1664 bp->Frame = ((_JUMP_BUFFER*)((void*)&buf))->Frame;
1665 }
1666#endif
1667 if (cont->machine.stack_src) {
1668 FLUSH_REGISTER_WINDOWS;
1669 MEMCPY(cont->machine.stack_src, cont->machine.stack,
1670 VALUE, cont->machine.stack_size);
1671 }
1672
1673 ruby_longjmp(cont->jmpbuf, 1);
1674}
1675
1676NORETURN(NOINLINE(static void cont_restore_0(rb_context_t *, VALUE *)));
1677
1678static void
1679cont_restore_0(rb_context_t *cont, VALUE *addr_in_prev_frame)
1680{
1681 if (cont->machine.stack_src) {
1682#ifdef HAVE_ALLOCA
1683#define STACK_PAD_SIZE 1
1684#else
1685#define STACK_PAD_SIZE 1024
1686#endif
1687 VALUE space[STACK_PAD_SIZE];
1688
1689#if !STACK_GROW_DIRECTION
1690 if (addr_in_prev_frame > &space[0]) {
1691 /* Stack grows downward */
1692#endif
1693#if STACK_GROW_DIRECTION <= 0
1694 volatile VALUE *const end = cont->machine.stack_src;
1695 if (&space[0] > end) {
1696# ifdef HAVE_ALLOCA
1697 volatile VALUE *sp = ALLOCA_N(VALUE, &space[0] - end);
1698 // We need to make sure that the stack pointer is moved,
1699 // but some compilers may remove the allocation by optimization.
1700 // We hope that the following read/write will prevent such an optimization.
1701 *sp = Qfalse;
1702 space[0] = *sp;
1703# else
1704 cont_restore_0(cont, &space[0]);
1705# endif
1706 }
1707#endif
1708#if !STACK_GROW_DIRECTION
1709 }
1710 else {
1711 /* Stack grows upward */
1712#endif
1713#if STACK_GROW_DIRECTION >= 0
1714 volatile VALUE *const end = cont->machine.stack_src + cont->machine.stack_size;
1715 if (&space[STACK_PAD_SIZE] < end) {
1716# ifdef HAVE_ALLOCA
1717 volatile VALUE *sp = ALLOCA_N(VALUE, end - &space[STACK_PAD_SIZE]);
1718 space[0] = *sp;
1719# else
1720 cont_restore_0(cont, &space[STACK_PAD_SIZE-1]);
1721# endif
1722 }
1723#endif
1724#if !STACK_GROW_DIRECTION
1725 }
1726#endif
1727 }
1728 cont_restore_1(cont);
1729}
1730
1731/*
1732 * Document-class: Continuation
1733 *
1734 * Continuation objects are generated by Kernel#callcc,
1735 * after having +require+d <i>continuation</i>. They hold
1736 * a return address and execution context, allowing a nonlocal return
1737 * to the end of the #callcc block from anywhere within a
1738 * program. Continuations are somewhat analogous to a structured
1739 * version of C's <code>setjmp/longjmp</code> (although they contain
1740 * more state, so you might consider them closer to threads).
1741 *
1742 * For instance:
1743 *
1744 * require "continuation"
1745 * arr = [ "Freddie", "Herbie", "Ron", "Max", "Ringo" ]
1746 * callcc{|cc| $cc = cc}
1747 * puts(message = arr.shift)
1748 * $cc.call unless message =~ /Max/
1749 *
1750 * <em>produces:</em>
1751 *
1752 * Freddie
1753 * Herbie
1754 * Ron
1755 * Max
1756 *
1757 * Also you can call callcc in other methods:
1758 *
1759 * require "continuation"
1760 *
1761 * def g
1762 * arr = [ "Freddie", "Herbie", "Ron", "Max", "Ringo" ]
1763 * cc = callcc { |cc| cc }
1764 * puts arr.shift
1765 * return cc, arr.size
1766 * end
1767 *
1768 * def f
1769 * c, size = g
1770 * c.call(c) if size > 1
1771 * end
1772 *
1773 * f
1774 *
1775 * This (somewhat contrived) example allows the inner loop to abandon
1776 * processing early:
1777 *
1778 * require "continuation"
1779 * callcc {|cont|
1780 * for i in 0..4
1781 * print "#{i}: "
1782 * for j in i*5...(i+1)*5
1783 * cont.call() if j == 17
1784 * printf "%3d", j
1785 * end
1786 * end
1787 * }
1788 * puts
1789 *
1790 * <em>produces:</em>
1791 *
1792 * 0: 0 1 2 3 4
1793 * 1: 5 6 7 8 9
1794 * 2: 10 11 12 13 14
1795 * 3: 15 16
1796 */
1797
1798/*
1799 * call-seq:
1800 * callcc {|cont| block } -> obj
1801 *
1802 * Generates a Continuation object, which it passes to
1803 * the associated block. You need to <code>require
1804 * 'continuation'</code> before using this method. Performing a
1805 * <em>cont</em><code>.call</code> will cause the #callcc
1806 * to return (as will falling through the end of the block). The
1807 * value returned by the #callcc is the value of the
1808 * block, or the value passed to <em>cont</em><code>.call</code>. See
1809 * class Continuation for more details. Also see
1810 * Kernel#throw for an alternative mechanism for
1811 * unwinding a call stack.
1812 */
1813
1814static VALUE
1815rb_callcc(VALUE self)
1816{
1817 volatile int called;
1818 volatile VALUE val = cont_capture(&called);
1819
1820 if (called) {
1821 return val;
1822 }
1823 else {
1824 return rb_yield(val);
1825 }
1826}
1827#ifdef RUBY_ASAN_ENABLED
1828/* callcc can't possibly work with ASAN; see bug #20273. Also this function
1829 * definition below avoids a "defined and not used" warning. */
1830MAYBE_UNUSED(static void notusing_callcc(void)) { rb_callcc(Qnil); }
1831# define rb_callcc rb_f_notimplement
1832#endif
1833
1834
1835static VALUE
1836make_passing_arg(int argc, const VALUE *argv)
1837{
1838 switch (argc) {
1839 case -1:
1840 return argv[0];
1841 case 0:
1842 return Qnil;
1843 case 1:
1844 return argv[0];
1845 default:
1846 return rb_ary_new4(argc, argv);
1847 }
1848}
1849
1850typedef VALUE e_proc(VALUE);
1851
1852NORETURN(static VALUE rb_cont_call(int argc, VALUE *argv, VALUE contval));
1853
1854/*
1855 * call-seq:
1856 * cont.call(args, ...)
1857 * cont[args, ...]
1858 *
1859 * Invokes the continuation. The program continues from the end of
1860 * the #callcc block. If no arguments are given, the original #callcc
1861 * returns +nil+. If one argument is given, #callcc returns
1862 * it. Otherwise, an array containing <i>args</i> is returned.
1863 *
1864 * callcc {|cont| cont.call } #=> nil
1865 * callcc {|cont| cont.call 1 } #=> 1
1866 * callcc {|cont| cont.call 1, 2, 3 } #=> [1, 2, 3]
1867 */
1868
1869static VALUE
1870rb_cont_call(int argc, VALUE *argv, VALUE contval)
1871{
1872 rb_context_t *cont = cont_ptr(contval);
1873 rb_thread_t *th = GET_THREAD();
1874
1875 if (cont_thread_value(cont) != th->self) {
1876 rb_raise(rb_eRuntimeError, "continuation called across threads");
1877 }
1878 if (cont->saved_ec.fiber_ptr) {
1879 if (th->ec->fiber_ptr != cont->saved_ec.fiber_ptr) {
1880 rb_raise(rb_eRuntimeError, "continuation called across fiber");
1881 }
1882 }
1883
1884 cont->argc = argc;
1885 cont->value = make_passing_arg(argc, argv);
1886
1887 cont_restore_0(cont, &contval);
1889}
1890
1891/*********/
1892/* fiber */
1893/*********/
1894
1895/*
1896 * Document-class: Fiber
1897 *
1898 * Fibers are primitives for implementing light weight cooperative
1899 * concurrency in Ruby. Basically they are a means of creating code blocks
1900 * that can be paused and resumed, much like threads. The main difference
1901 * is that they are never preempted and that the scheduling must be done by
1902 * the programmer and not the VM.
1903 *
1904 * As opposed to other stackless light weight concurrency models, each fiber
1905 * comes with a stack. This enables the fiber to be paused from deeply
1906 * nested function calls within the fiber block. See the ruby(1)
1907 * manpage to configure the size of the fiber stack(s).
1908 *
1909 * When a fiber is created it will not run automatically. Rather it must
1910 * be explicitly asked to run using the Fiber#resume method.
1911 * The code running inside the fiber can give up control by calling
1912 * Fiber.yield in which case it yields control back to caller (the
1913 * caller of the Fiber#resume).
1914 *
1915 * Upon yielding or termination the Fiber returns the value of the last
1916 * executed expression
1917 *
1918 * For instance:
1919 *
1920 * fiber = Fiber.new do
1921 * Fiber.yield 1
1922 * 2
1923 * end
1924 *
1925 * puts fiber.resume
1926 * puts fiber.resume
1927 * puts fiber.resume
1928 *
1929 * <em>produces</em>
1930 *
1931 * 1
1932 * 2
1933 * FiberError: dead fiber called
1934 *
1935 * The Fiber#resume method accepts an arbitrary number of parameters,
1936 * if it is the first call to #resume then they will be passed as
1937 * block arguments. Otherwise they will be the return value of the
1938 * call to Fiber.yield
1939 *
1940 * Example:
1941 *
1942 * fiber = Fiber.new do |first|
1943 * second = Fiber.yield first + 2
1944 * end
1945 *
1946 * puts fiber.resume 10
1947 * puts fiber.resume 1_000_000
1948 * puts fiber.resume "The fiber will be dead before I can cause trouble"
1949 *
1950 * <em>produces</em>
1951 *
1952 * 12
1953 * 1000000
1954 * FiberError: dead fiber called
1955 *
1956 * == Non-blocking Fibers
1957 *
1958 * The concept of <em>non-blocking fiber</em> was introduced in Ruby 3.0.
1959 * A non-blocking fiber, when reaching an operation that would normally block
1960 * the fiber (like <code>sleep</code>, or wait for another process or I/O)
1961 * will yield control to other fibers and allow the <em>scheduler</em> to
1962 * handle blocking and waking up (resuming) this fiber when it can proceed.
1963 *
1964 * For a Fiber to behave as non-blocking, it need to be created in Fiber.new with
1965 * <tt>blocking: false</tt> (which is the default), and Fiber.scheduler
1966 * should be set with Fiber.set_scheduler. If Fiber.scheduler is not set in
1967 * the current thread, blocking and non-blocking fibers' behavior is identical.
1968 *
1969 * Ruby doesn't provide a scheduler class: it is expected to be implemented by
1970 * the user and correspond to Fiber::Scheduler.
1971 *
1972 * There is also Fiber.schedule method, which is expected to immediately perform
1973 * the given block in a non-blocking manner. Its actual implementation is up to
1974 * the scheduler.
1975 *
1976 */
1977
1978static const rb_data_type_t fiber_data_type = {
1979 "fiber",
1980 {fiber_mark, fiber_free, fiber_memsize, fiber_compact,},
1981 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
1982};
1983
1984static VALUE
1985fiber_alloc(VALUE klass)
1986{
1987 return TypedData_Wrap_Struct(klass, &fiber_data_type, 0);
1988}
1989
1990static rb_fiber_t*
1991fiber_t_alloc(VALUE fiber_value, unsigned int blocking)
1992{
1993 rb_fiber_t *fiber;
1994 rb_thread_t *th = GET_THREAD();
1995
1996 if (DATA_PTR(fiber_value) != 0) {
1997 rb_raise(rb_eRuntimeError, "cannot initialize twice");
1998 }
1999
2000 THREAD_MUST_BE_RUNNING(th);
2001 fiber = ZALLOC(rb_fiber_t);
2002 fiber->cont.self = fiber_value;
2003 fiber->cont.type = FIBER_CONTEXT;
2004 fiber->blocking = blocking;
2005 fiber->killed = 0;
2006 cont_init(&fiber->cont, th);
2007
2008 fiber->cont.saved_ec.fiber_ptr = fiber;
2009 rb_ec_clear_vm_stack(&fiber->cont.saved_ec);
2010
2011 fiber->prev = NULL;
2012
2013 /* fiber->status == 0 == CREATED
2014 * So that we don't need to set status: fiber_status_set(fiber, FIBER_CREATED); */
2015 VM_ASSERT(FIBER_CREATED_P(fiber));
2016
2017 DATA_PTR(fiber_value) = fiber;
2018
2019 return fiber;
2020}
2021
2022static rb_fiber_t *
2023root_fiber_alloc(rb_thread_t *th)
2024{
2025 VALUE fiber_value = fiber_alloc(rb_cFiber);
2026 rb_fiber_t *fiber = th->ec->fiber_ptr;
2027
2028 VM_ASSERT(DATA_PTR(fiber_value) == NULL);
2029 VM_ASSERT(fiber->cont.type == FIBER_CONTEXT);
2030 VM_ASSERT(FIBER_RESUMED_P(fiber));
2031
2032 th->root_fiber = fiber;
2033 DATA_PTR(fiber_value) = fiber;
2034 fiber->cont.self = fiber_value;
2035
2036 coroutine_initialize_main(&fiber->context);
2037
2038 return fiber;
2039}
2040
2041static inline rb_fiber_t*
2042fiber_current(void)
2043{
2044 rb_execution_context_t *ec = GET_EC();
2045 if (ec->fiber_ptr->cont.self == 0) {
2046 root_fiber_alloc(rb_ec_thread_ptr(ec));
2047 }
2048 return ec->fiber_ptr;
2049}
2050
2051static inline VALUE
2052current_fiber_storage(void)
2053{
2054 rb_execution_context_t *ec = GET_EC();
2055 return ec->storage;
2056}
2057
2058static inline VALUE
2059inherit_fiber_storage(void)
2060{
2061 return rb_obj_dup(current_fiber_storage());
2062}
2063
2064static inline void
2065fiber_storage_set(struct rb_fiber_struct *fiber, VALUE storage)
2066{
2067 fiber->cont.saved_ec.storage = storage;
2068}
2069
2070static inline VALUE
2071fiber_storage_get(rb_fiber_t *fiber, int allocate)
2072{
2073 VALUE storage = fiber->cont.saved_ec.storage;
2074 if (storage == Qnil && allocate) {
2075 storage = rb_hash_new();
2076 fiber_storage_set(fiber, storage);
2077 }
2078 return storage;
2079}
2080
2081static void
2082storage_access_must_be_from_same_fiber(VALUE self)
2083{
2084 rb_fiber_t *fiber = fiber_ptr(self);
2085 rb_fiber_t *current = fiber_current();
2086 if (fiber != current) {
2087 rb_raise(rb_eArgError, "Fiber storage can only be accessed from the Fiber it belongs to");
2088 }
2089}
2090
2097static VALUE
2098rb_fiber_storage_get(VALUE self)
2099{
2100 storage_access_must_be_from_same_fiber(self);
2101
2102 VALUE storage = fiber_storage_get(fiber_ptr(self), FALSE);
2103
2104 if (storage == Qnil) {
2105 return Qnil;
2106 }
2107 else {
2108 return rb_obj_dup(storage);
2109 }
2110}
2111
2112static int
2113fiber_storage_validate_each(VALUE key, VALUE value, VALUE _argument)
2114{
2115 Check_Type(key, T_SYMBOL);
2116
2117 return ST_CONTINUE;
2118}
2119
2120static void
2121fiber_storage_validate(VALUE value)
2122{
2123 // nil is an allowed value and will be lazily initialized.
2124 if (value == Qnil) return;
2125
2126 if (!RB_TYPE_P(value, T_HASH)) {
2127 rb_raise(rb_eTypeError, "storage must be a hash");
2128 }
2129
2130 if (RB_OBJ_FROZEN(value)) {
2131 rb_raise(rb_eFrozenError, "storage must not be frozen");
2132 }
2133
2134 rb_hash_foreach(value, fiber_storage_validate_each, Qundef);
2135}
2136
2159static VALUE
2160rb_fiber_storage_set(VALUE self, VALUE value)
2161{
2162 if (rb_warning_category_enabled_p(RB_WARN_CATEGORY_EXPERIMENTAL)) {
2164 "Fiber#storage= is experimental and may be removed in the future!");
2165 }
2166
2167 storage_access_must_be_from_same_fiber(self);
2168 fiber_storage_validate(value);
2169
2170 fiber_ptr(self)->cont.saved_ec.storage = rb_obj_dup(value);
2171 return value;
2172}
2173
2184static VALUE
2185rb_fiber_storage_aref(VALUE class, VALUE key)
2186{
2187 key = rb_to_symbol(key);
2188
2189 VALUE storage = fiber_storage_get(fiber_current(), FALSE);
2190 if (storage == Qnil) return Qnil;
2191
2192 return rb_hash_aref(storage, key);
2193}
2194
2205static VALUE
2206rb_fiber_storage_aset(VALUE class, VALUE key, VALUE value)
2207{
2208 key = rb_to_symbol(key);
2209
2210 VALUE storage = fiber_storage_get(fiber_current(), value != Qnil);
2211 if (storage == Qnil) return Qnil;
2212
2213 if (value == Qnil) {
2214 return rb_hash_delete(storage, key);
2215 }
2216 else {
2217 return rb_hash_aset(storage, key, value);
2218 }
2219}
2220
2221static VALUE
2222fiber_initialize(VALUE self, VALUE proc, struct fiber_pool * fiber_pool, unsigned int blocking, VALUE storage)
2223{
2224 if (storage == Qundef || storage == Qtrue) {
2225 // The default, inherit storage (dup) from the current fiber:
2226 storage = inherit_fiber_storage();
2227 }
2228 else /* nil, hash, etc. */ {
2229 fiber_storage_validate(storage);
2230 storage = rb_obj_dup(storage);
2231 }
2232
2233 rb_fiber_t *fiber = fiber_t_alloc(self, blocking);
2234
2235 fiber->cont.saved_ec.storage = storage;
2236 fiber->first_proc = proc;
2237 fiber->stack.base = NULL;
2238 fiber->stack.pool = fiber_pool;
2239
2240 return self;
2241}
2242
2243static void
2244fiber_prepare_stack(rb_fiber_t *fiber)
2245{
2246 rb_context_t *cont = &fiber->cont;
2247 rb_execution_context_t *sec = &cont->saved_ec;
2248
2249 size_t vm_stack_size = 0;
2250 VALUE *vm_stack = fiber_initialize_coroutine(fiber, &vm_stack_size);
2251
2252 /* initialize cont */
2253 cont->saved_vm_stack.ptr = NULL;
2254 rb_ec_initialize_vm_stack(sec, vm_stack, vm_stack_size / sizeof(VALUE));
2255
2256 sec->tag = NULL;
2257 sec->local_storage = NULL;
2258 sec->local_storage_recursive_hash = Qnil;
2259 sec->local_storage_recursive_hash_for_trace = Qnil;
2260}
2261
2262static struct fiber_pool *
2263rb_fiber_pool_default(VALUE pool)
2264{
2265 return &shared_fiber_pool;
2266}
2267
2268VALUE rb_fiber_inherit_storage(struct rb_execution_context_struct *ec, struct rb_fiber_struct *fiber)
2269{
2270 VALUE storage = rb_obj_dup(ec->storage);
2271 fiber->cont.saved_ec.storage = storage;
2272 return storage;
2273}
2274
2275/* :nodoc: */
2276static VALUE
2277rb_fiber_initialize_kw(int argc, VALUE* argv, VALUE self, int kw_splat)
2278{
2279 VALUE pool = Qnil;
2280 VALUE blocking = Qfalse;
2281 VALUE storage = Qundef;
2282
2283 if (kw_splat != RB_NO_KEYWORDS) {
2284 VALUE options = Qnil;
2285 VALUE arguments[3] = {Qundef};
2286
2287 argc = rb_scan_args_kw(kw_splat, argc, argv, ":", &options);
2288 rb_get_kwargs(options, fiber_initialize_keywords, 0, 3, arguments);
2289
2290 if (!UNDEF_P(arguments[0])) {
2291 blocking = arguments[0];
2292 }
2293
2294 if (!UNDEF_P(arguments[1])) {
2295 pool = arguments[1];
2296 }
2297
2298 storage = arguments[2];
2299 }
2300
2301 return fiber_initialize(self, rb_block_proc(), rb_fiber_pool_default(pool), RTEST(blocking), storage);
2302}
2303
2304/*
2305 * call-seq:
2306 * Fiber.new(blocking: false, storage: true) { |*args| ... } -> fiber
2307 *
2308 * Creates new Fiber. Initially, the fiber is not running and can be resumed
2309 * with #resume. Arguments to the first #resume call will be passed to the
2310 * block:
2311 *
2312 * f = Fiber.new do |initial|
2313 * current = initial
2314 * loop do
2315 * puts "current: #{current.inspect}"
2316 * current = Fiber.yield
2317 * end
2318 * end
2319 * f.resume(100) # prints: current: 100
2320 * f.resume(1, 2, 3) # prints: current: [1, 2, 3]
2321 * f.resume # prints: current: nil
2322 * # ... and so on ...
2323 *
2324 * If <tt>blocking: false</tt> is passed to <tt>Fiber.new</tt>, _and_ current
2325 * thread has a Fiber.scheduler defined, the Fiber becomes non-blocking (see
2326 * "Non-blocking Fibers" section in class docs).
2327 *
2328 * If the <tt>storage</tt> is unspecified, the default is to inherit a copy of
2329 * the storage from the current fiber. This is the same as specifying
2330 * <tt>storage: true</tt>.
2331 *
2332 * Fiber[:x] = 1
2333 * Fiber.new do
2334 * Fiber[:x] # => 1
2335 * Fiber[:x] = 2
2336 * end.resume
2337 * Fiber[:x] # => 1
2338 *
2339 * If the given <tt>storage</tt> is <tt>nil</tt>, this function will lazy
2340 * initialize the internal storage, which starts as an empty hash.
2341 *
2342 * Fiber[:x] = "Hello World"
2343 * Fiber.new(storage: nil) do
2344 * Fiber[:x] # nil
2345 * end
2346 *
2347 * Otherwise, the given <tt>storage</tt> is used as the new fiber's storage,
2348 * and it must be an instance of Hash.
2349 *
2350 * Explicitly using <tt>storage: true</tt> is currently experimental and may
2351 * change in the future.
2352 */
2353static VALUE
2354rb_fiber_initialize(int argc, VALUE* argv, VALUE self)
2355{
2356 return rb_fiber_initialize_kw(argc, argv, self, rb_keyword_given_p());
2357}
2358
2359VALUE
2360rb_fiber_new_storage(rb_block_call_func_t func, VALUE obj, VALUE storage)
2361{
2362 return fiber_initialize(fiber_alloc(rb_cFiber), rb_proc_new(func, obj), rb_fiber_pool_default(Qnil), 0, storage);
2363}
2364
2365VALUE
2366rb_fiber_new(rb_block_call_func_t func, VALUE obj)
2367{
2368 return rb_fiber_new_storage(func, obj, Qtrue);
2369}
2370
2371static VALUE
2372rb_fiber_s_schedule_kw(int argc, VALUE* argv, int kw_splat)
2373{
2374 rb_thread_t * th = GET_THREAD();
2375 VALUE scheduler = th->scheduler;
2376 VALUE fiber = Qnil;
2377
2378 if (scheduler != Qnil) {
2379 fiber = rb_fiber_scheduler_fiber(scheduler, argc, argv, kw_splat);
2380 }
2381 else {
2382 rb_raise(rb_eRuntimeError, "No scheduler is available!");
2383 }
2384
2385 return fiber;
2386}
2387
2388/*
2389 * call-seq:
2390 * Fiber.schedule { |*args| ... } -> fiber
2391 *
2392 * The method is <em>expected</em> to immediately run the provided block of code in a
2393 * separate non-blocking fiber.
2394 *
2395 * puts "Go to sleep!"
2396 *
2397 * Fiber.set_scheduler(MyScheduler.new)
2398 *
2399 * Fiber.schedule do
2400 * puts "Going to sleep"
2401 * sleep(1)
2402 * puts "I slept well"
2403 * end
2404 *
2405 * puts "Wakey-wakey, sleepyhead"
2406 *
2407 * Assuming MyScheduler is properly implemented, this program will produce:
2408 *
2409 * Go to sleep!
2410 * Going to sleep
2411 * Wakey-wakey, sleepyhead
2412 * ...1 sec pause here...
2413 * I slept well
2414 *
2415 * ...e.g. on the first blocking operation inside the Fiber (<tt>sleep(1)</tt>),
2416 * the control is yielded to the outside code (main fiber), and <em>at the end
2417 * of that execution</em>, the scheduler takes care of properly resuming all the
2418 * blocked fibers.
2419 *
2420 * Note that the behavior described above is how the method is <em>expected</em>
2421 * to behave, actual behavior is up to the current scheduler's implementation of
2422 * Fiber::Scheduler#fiber method. Ruby doesn't enforce this method to
2423 * behave in any particular way.
2424 *
2425 * If the scheduler is not set, the method raises
2426 * <tt>RuntimeError (No scheduler is available!)</tt>.
2427 *
2428 */
2429static VALUE
2430rb_fiber_s_schedule(int argc, VALUE *argv, VALUE obj)
2431{
2432 return rb_fiber_s_schedule_kw(argc, argv, rb_keyword_given_p());
2433}
2434
2435/*
2436 * call-seq:
2437 * Fiber.scheduler -> obj or nil
2438 *
2439 * Returns the Fiber scheduler, that was last set for the current thread with Fiber.set_scheduler.
2440 * Returns +nil+ if no scheduler is set (which is the default), and non-blocking fibers'
2441 * behavior is the same as blocking.
2442 * (see "Non-blocking fibers" section in class docs for details about the scheduler concept).
2443 *
2444 */
2445static VALUE
2446rb_fiber_s_scheduler(VALUE klass)
2447{
2448 return rb_fiber_scheduler_get();
2449}
2450
2451/*
2452 * call-seq:
2453 * Fiber.current_scheduler -> obj or nil
2454 *
2455 * Returns the Fiber scheduler, that was last set for the current thread with Fiber.set_scheduler
2456 * if and only if the current fiber is non-blocking.
2457 *
2458 */
2459static VALUE
2460rb_fiber_current_scheduler(VALUE klass)
2461{
2463}
2464
2465/*
2466 * call-seq:
2467 * Fiber.set_scheduler(scheduler) -> scheduler
2468 *
2469 * Sets the Fiber scheduler for the current thread. If the scheduler is set, non-blocking
2470 * fibers (created by Fiber.new with <tt>blocking: false</tt>, or by Fiber.schedule)
2471 * call that scheduler's hook methods on potentially blocking operations, and the current
2472 * thread will call scheduler's +close+ method on finalization (allowing the scheduler to
2473 * properly manage all non-finished fibers).
2474 *
2475 * +scheduler+ can be an object of any class corresponding to Fiber::Scheduler. Its
2476 * implementation is up to the user.
2477 *
2478 * See also the "Non-blocking fibers" section in class docs.
2479 *
2480 */
2481static VALUE
2482rb_fiber_set_scheduler(VALUE klass, VALUE scheduler)
2483{
2484 return rb_fiber_scheduler_set(scheduler);
2485}
2486
2487NORETURN(static void rb_fiber_terminate(rb_fiber_t *fiber, int need_interrupt, VALUE err));
2488
2489void
2490rb_fiber_start(rb_fiber_t *fiber)
2491{
2492 rb_thread_t * volatile th = fiber->cont.saved_ec.thread_ptr;
2493
2494 rb_proc_t *proc;
2495 enum ruby_tag_type state;
2496
2497 VM_ASSERT(th->ec == GET_EC());
2498 VM_ASSERT(FIBER_RESUMED_P(fiber));
2499
2500 if (fiber->blocking) {
2501 th->blocking += 1;
2502 }
2503
2504 EC_PUSH_TAG(th->ec);
2505 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
2506 rb_context_t *cont = &VAR_FROM_MEMORY(fiber)->cont;
2507 int argc;
2508 const VALUE *argv, args = cont->value;
2509 GetProcPtr(fiber->first_proc, proc);
2510 argv = (argc = cont->argc) > 1 ? RARRAY_CONST_PTR(args) : &args;
2511 cont->value = Qnil;
2512 th->ec->errinfo = Qnil;
2513 th->ec->root_lep = rb_vm_proc_local_ep(fiber->first_proc);
2514 th->ec->root_svar = Qfalse;
2515
2516 EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_FIBER_SWITCH, th->self, 0, 0, 0, Qnil);
2517 cont->value = rb_vm_invoke_proc(th->ec, proc, argc, argv, cont->kw_splat, VM_BLOCK_HANDLER_NONE);
2518 }
2519 EC_POP_TAG();
2520
2521 int need_interrupt = TRUE;
2522 VALUE err = Qfalse;
2523 if (state) {
2524 err = th->ec->errinfo;
2525 VM_ASSERT(FIBER_RESUMED_P(fiber));
2526
2527 if (state == TAG_RAISE) {
2528 // noop...
2529 }
2530 else if (state == TAG_FATAL && err == RUBY_FATAL_FIBER_KILLED) {
2531 need_interrupt = FALSE;
2532 err = Qfalse;
2533 }
2534 else if (state == TAG_FATAL) {
2535 rb_threadptr_pending_interrupt_enque(th, err);
2536 }
2537 else {
2538 err = rb_vm_make_jump_tag_but_local_jump(state, err);
2539 }
2540 }
2541
2542 rb_fiber_terminate(fiber, need_interrupt, err);
2543}
2544
2545// Set up a "root fiber", which is the fiber that every Ractor has.
2546void
2547rb_threadptr_root_fiber_setup(rb_thread_t *th)
2548{
2549 rb_fiber_t *fiber = ruby_mimcalloc(1, sizeof(rb_fiber_t));
2550 if (!fiber) {
2551 rb_bug("%s", strerror(errno)); /* ... is it possible to call rb_bug here? */
2552 }
2553 fiber->cont.type = FIBER_CONTEXT;
2554 fiber->cont.saved_ec.fiber_ptr = fiber;
2555 fiber->cont.saved_ec.thread_ptr = th;
2556 fiber->blocking = 1;
2557 fiber->killed = 0;
2558 fiber_status_set(fiber, FIBER_RESUMED); /* skip CREATED */
2559 th->ec = &fiber->cont.saved_ec;
2560 cont_init_jit_cont(&fiber->cont);
2561}
2562
2563void
2564rb_threadptr_root_fiber_release(rb_thread_t *th)
2565{
2566 if (th->root_fiber) {
2567 /* ignore. A root fiber object will free th->ec */
2568 }
2569 else {
2570 rb_execution_context_t *ec = rb_current_execution_context(false);
2571
2572 VM_ASSERT(th->ec->fiber_ptr->cont.type == FIBER_CONTEXT);
2573 VM_ASSERT(th->ec->fiber_ptr->cont.self == 0);
2574
2575 if (ec && th->ec == ec) {
2576 rb_ractor_set_current_ec(th->ractor, NULL);
2577 }
2578 fiber_free(th->ec->fiber_ptr);
2579 th->ec = NULL;
2580 }
2581}
2582
2583void
2584rb_threadptr_root_fiber_terminate(rb_thread_t *th)
2585{
2586 rb_fiber_t *fiber = th->ec->fiber_ptr;
2587
2588 fiber->status = FIBER_TERMINATED;
2589
2590 // The vm_stack is `alloca`ed on the thread stack, so it's gone too:
2591 rb_ec_clear_vm_stack(th->ec);
2592}
2593
2594static inline rb_fiber_t*
2595return_fiber(bool terminate)
2596{
2597 rb_fiber_t *fiber = fiber_current();
2598 rb_fiber_t *prev = fiber->prev;
2599
2600 if (prev) {
2601 fiber->prev = NULL;
2602 prev->resuming_fiber = NULL;
2603 return prev;
2604 }
2605 else {
2606 if (!terminate) {
2607 rb_raise(rb_eFiberError, "attempt to yield on a not resumed fiber");
2608 }
2609
2610 rb_thread_t *th = GET_THREAD();
2611 rb_fiber_t *root_fiber = th->root_fiber;
2612
2613 VM_ASSERT(root_fiber != NULL);
2614
2615 // search resuming fiber
2616 for (fiber = root_fiber; fiber->resuming_fiber; fiber = fiber->resuming_fiber) {
2617 }
2618
2619 return fiber;
2620 }
2621}
2622
2623VALUE
2624rb_fiber_current(void)
2625{
2626 return fiber_current()->cont.self;
2627}
2628
2629// Prepare to execute next_fiber on the given thread.
2630static inline void
2631fiber_store(rb_fiber_t *next_fiber, rb_thread_t *th)
2632{
2633 rb_fiber_t *fiber;
2634
2635 if (th->ec->fiber_ptr != NULL) {
2636 fiber = th->ec->fiber_ptr;
2637 }
2638 else {
2639 /* create root fiber */
2640 fiber = root_fiber_alloc(th);
2641 }
2642
2643 if (FIBER_CREATED_P(next_fiber)) {
2644 fiber_prepare_stack(next_fiber);
2645 }
2646
2647 VM_ASSERT(FIBER_RESUMED_P(fiber) || FIBER_TERMINATED_P(fiber));
2648 VM_ASSERT(FIBER_RUNNABLE_P(next_fiber));
2649
2650 if (FIBER_RESUMED_P(fiber)) fiber_status_set(fiber, FIBER_SUSPENDED);
2651
2652 fiber_status_set(next_fiber, FIBER_RESUMED);
2653 fiber_setcontext(next_fiber, fiber);
2654}
2655
2656static void
2657fiber_check_killed(rb_fiber_t *fiber)
2658{
2659 VM_ASSERT(fiber == fiber_current());
2660
2661 if (fiber->killed) {
2662 rb_thread_t *thread = fiber->cont.saved_ec.thread_ptr;
2663
2664 thread->ec->errinfo = RUBY_FATAL_FIBER_KILLED;
2665 EC_JUMP_TAG(thread->ec, RUBY_TAG_FATAL);
2666 }
2667}
2668
2669static inline VALUE
2670fiber_switch(rb_fiber_t *fiber, int argc, const VALUE *argv, int kw_splat, rb_fiber_t *resuming_fiber, bool yielding)
2671{
2672 VALUE value;
2673 rb_context_t *cont = &fiber->cont;
2674 rb_thread_t *th = GET_THREAD();
2675
2676 /* make sure the root_fiber object is available */
2677 if (th->root_fiber == NULL) root_fiber_alloc(th);
2678
2679 if (th->ec->fiber_ptr == fiber) {
2680 /* ignore fiber context switch
2681 * because destination fiber is the same as current fiber
2682 */
2683 return make_passing_arg(argc, argv);
2684 }
2685
2686 if (cont_thread_value(cont) != th->self) {
2687 rb_raise(rb_eFiberError, "fiber called across threads");
2688 }
2689
2690 if (FIBER_TERMINATED_P(fiber)) {
2691 value = rb_exc_new2(rb_eFiberError, "dead fiber called");
2692
2693 if (!FIBER_TERMINATED_P(th->ec->fiber_ptr)) {
2694 rb_exc_raise(value);
2695 VM_UNREACHABLE(fiber_switch);
2696 }
2697 else {
2698 /* th->ec->fiber_ptr is also dead => switch to root fiber */
2699 /* (this means we're being called from rb_fiber_terminate, */
2700 /* and the terminated fiber's return_fiber() is already dead) */
2701 VM_ASSERT(FIBER_SUSPENDED_P(th->root_fiber));
2702
2703 cont = &th->root_fiber->cont;
2704 cont->argc = -1;
2705 cont->value = value;
2706
2707 fiber_setcontext(th->root_fiber, th->ec->fiber_ptr);
2708
2709 VM_UNREACHABLE(fiber_switch);
2710 }
2711 }
2712
2713 VM_ASSERT(FIBER_RUNNABLE_P(fiber));
2714
2715 rb_fiber_t *current_fiber = fiber_current();
2716
2717 VM_ASSERT(!current_fiber->resuming_fiber);
2718
2719 if (resuming_fiber) {
2720 current_fiber->resuming_fiber = resuming_fiber;
2721 fiber->prev = fiber_current();
2722 fiber->yielding = 0;
2723 }
2724
2725 VM_ASSERT(!current_fiber->yielding);
2726 if (yielding) {
2727 current_fiber->yielding = 1;
2728 }
2729
2730 if (current_fiber->blocking) {
2731 th->blocking -= 1;
2732 }
2733
2734 cont->argc = argc;
2735 cont->kw_splat = kw_splat;
2736 cont->value = make_passing_arg(argc, argv);
2737
2738 fiber_store(fiber, th);
2739
2740 // We cannot free the stack until the pthread is joined:
2741#ifndef COROUTINE_PTHREAD_CONTEXT
2742 if (resuming_fiber && FIBER_TERMINATED_P(fiber)) {
2743 fiber_stack_release(fiber);
2744 }
2745#endif
2746
2747 if (fiber_current()->blocking) {
2748 th->blocking += 1;
2749 }
2750
2751 RUBY_VM_CHECK_INTS(th->ec);
2752
2753 EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_FIBER_SWITCH, th->self, 0, 0, 0, Qnil);
2754
2755 current_fiber = th->ec->fiber_ptr;
2756 value = current_fiber->cont.value;
2757
2758 fiber_check_killed(current_fiber);
2759
2760 if (current_fiber->cont.argc == -1) {
2761 // Fiber#raise will trigger this path.
2762 rb_exc_raise(value);
2763 }
2764
2765 return value;
2766}
2767
2768VALUE
2769rb_fiber_transfer(VALUE fiber_value, int argc, const VALUE *argv)
2770{
2771 return fiber_switch(fiber_ptr(fiber_value), argc, argv, RB_NO_KEYWORDS, NULL, false);
2772}
2773
2774/*
2775 * call-seq:
2776 * fiber.blocking? -> true or false
2777 *
2778 * Returns +true+ if +fiber+ is blocking and +false+ otherwise.
2779 * Fiber is non-blocking if it was created via passing <tt>blocking: false</tt>
2780 * to Fiber.new, or via Fiber.schedule.
2781 *
2782 * Note that, even if the method returns +false+, the fiber behaves differently
2783 * only if Fiber.scheduler is set in the current thread.
2784 *
2785 * See the "Non-blocking fibers" section in class docs for details.
2786 *
2787 */
2788VALUE
2789rb_fiber_blocking_p(VALUE fiber)
2790{
2791 return RBOOL(fiber_ptr(fiber)->blocking);
2792}
2793
2794static VALUE
2795fiber_blocking_yield(VALUE fiber_value)
2796{
2797 rb_fiber_t *fiber = fiber_ptr(fiber_value);
2798 rb_thread_t * volatile th = fiber->cont.saved_ec.thread_ptr;
2799
2800 VM_ASSERT(fiber->blocking == 0);
2801
2802 // fiber->blocking is `unsigned int : 1`, so we use it as a boolean:
2803 fiber->blocking = 1;
2804
2805 // Once the fiber is blocking, and current, we increment the thread blocking state:
2806 th->blocking += 1;
2807
2808 return rb_yield(fiber_value);
2809}
2810
2811static VALUE
2812fiber_blocking_ensure(VALUE fiber_value)
2813{
2814 rb_fiber_t *fiber = fiber_ptr(fiber_value);
2815 rb_thread_t * volatile th = fiber->cont.saved_ec.thread_ptr;
2816
2817 // We are no longer blocking:
2818 fiber->blocking = 0;
2819 th->blocking -= 1;
2820
2821 return Qnil;
2822}
2823
2824/*
2825 * call-seq:
2826 * Fiber.blocking{|fiber| ...} -> result
2827 *
2828 * Forces the fiber to be blocking for the duration of the block. Returns the
2829 * result of the block.
2830 *
2831 * See the "Non-blocking fibers" section in class docs for details.
2832 *
2833 */
2834VALUE
2835rb_fiber_blocking(VALUE class)
2836{
2837 VALUE fiber_value = rb_fiber_current();
2838 rb_fiber_t *fiber = fiber_ptr(fiber_value);
2839
2840 // If we are already blocking, this is essentially a no-op:
2841 if (fiber->blocking) {
2842 return rb_yield(fiber_value);
2843 }
2844 else {
2845 return rb_ensure(fiber_blocking_yield, fiber_value, fiber_blocking_ensure, fiber_value);
2846 }
2847}
2848
2849/*
2850 * call-seq:
2851 * Fiber.blocking? -> false or 1
2852 *
2853 * Returns +false+ if the current fiber is non-blocking.
2854 * Fiber is non-blocking if it was created via passing <tt>blocking: false</tt>
2855 * to Fiber.new, or via Fiber.schedule.
2856 *
2857 * If the current Fiber is blocking, the method returns 1.
2858 * Future developments may allow for situations where larger integers
2859 * could be returned.
2860 *
2861 * Note that, even if the method returns +false+, Fiber behaves differently
2862 * only if Fiber.scheduler is set in the current thread.
2863 *
2864 * See the "Non-blocking fibers" section in class docs for details.
2865 *
2866 */
2867static VALUE
2868rb_fiber_s_blocking_p(VALUE klass)
2869{
2870 rb_thread_t *thread = GET_THREAD();
2871 unsigned blocking = thread->blocking;
2872
2873 if (blocking == 0)
2874 return Qfalse;
2875
2876 return INT2NUM(blocking);
2877}
2878
2879void
2880rb_fiber_close(rb_fiber_t *fiber)
2881{
2882 fiber_status_set(fiber, FIBER_TERMINATED);
2883}
2884
2885static void
2886rb_fiber_terminate(rb_fiber_t *fiber, int need_interrupt, VALUE error)
2887{
2888 VALUE value = fiber->cont.value;
2889
2890 VM_ASSERT(FIBER_RESUMED_P(fiber));
2891 rb_fiber_close(fiber);
2892
2893 fiber->cont.machine.stack = NULL;
2894 fiber->cont.machine.stack_size = 0;
2895
2896 rb_fiber_t *next_fiber = return_fiber(true);
2897
2898 if (need_interrupt) RUBY_VM_SET_INTERRUPT(&next_fiber->cont.saved_ec);
2899
2900 if (RTEST(error))
2901 fiber_switch(next_fiber, -1, &error, RB_NO_KEYWORDS, NULL, false);
2902 else
2903 fiber_switch(next_fiber, 1, &value, RB_NO_KEYWORDS, NULL, false);
2904 ruby_stop(0);
2905}
2906
2907static VALUE
2908fiber_resume_kw(rb_fiber_t *fiber, int argc, const VALUE *argv, int kw_splat)
2909{
2910 rb_fiber_t *current_fiber = fiber_current();
2911
2912 if (argc == -1 && FIBER_CREATED_P(fiber)) {
2913 rb_raise(rb_eFiberError, "cannot raise exception on unborn fiber");
2914 }
2915 else if (FIBER_TERMINATED_P(fiber)) {
2916 rb_raise(rb_eFiberError, "attempt to resume a terminated fiber");
2917 }
2918 else if (fiber == current_fiber) {
2919 rb_raise(rb_eFiberError, "attempt to resume the current fiber");
2920 }
2921 else if (fiber->prev != NULL) {
2922 rb_raise(rb_eFiberError, "attempt to resume a resumed fiber (double resume)");
2923 }
2924 else if (fiber->resuming_fiber) {
2925 rb_raise(rb_eFiberError, "attempt to resume a resuming fiber");
2926 }
2927 else if (fiber->prev == NULL &&
2928 (!fiber->yielding && fiber->status != FIBER_CREATED)) {
2929 rb_raise(rb_eFiberError, "attempt to resume a transferring fiber");
2930 }
2931
2932 return fiber_switch(fiber, argc, argv, kw_splat, fiber, false);
2933}
2934
2935VALUE
2936rb_fiber_resume_kw(VALUE self, int argc, const VALUE *argv, int kw_splat)
2937{
2938 return fiber_resume_kw(fiber_ptr(self), argc, argv, kw_splat);
2939}
2940
2941VALUE
2942rb_fiber_resume(VALUE self, int argc, const VALUE *argv)
2943{
2944 return fiber_resume_kw(fiber_ptr(self), argc, argv, RB_NO_KEYWORDS);
2945}
2946
2947VALUE
2948rb_fiber_yield_kw(int argc, const VALUE *argv, int kw_splat)
2949{
2950 return fiber_switch(return_fiber(false), argc, argv, kw_splat, NULL, true);
2951}
2952
2953VALUE
2954rb_fiber_yield(int argc, const VALUE *argv)
2955{
2956 return fiber_switch(return_fiber(false), argc, argv, RB_NO_KEYWORDS, NULL, true);
2957}
2958
2959void
2960rb_fiber_reset_root_local_storage(rb_thread_t *th)
2961{
2962 if (th->root_fiber && th->root_fiber != th->ec->fiber_ptr) {
2963 th->ec->local_storage = th->root_fiber->cont.saved_ec.local_storage;
2964 }
2965}
2966
2967/*
2968 * call-seq:
2969 * fiber.alive? -> true or false
2970 *
2971 * Returns true if the fiber can still be resumed (or transferred
2972 * to). After finishing execution of the fiber block this method will
2973 * always return +false+.
2974 */
2975VALUE
2976rb_fiber_alive_p(VALUE fiber_value)
2977{
2978 return RBOOL(!FIBER_TERMINATED_P(fiber_ptr(fiber_value)));
2979}
2980
2981/*
2982 * call-seq:
2983 * fiber.resume(args, ...) -> obj
2984 *
2985 * Resumes the fiber from the point at which the last Fiber.yield was
2986 * called, or starts running it if it is the first call to
2987 * #resume. Arguments passed to resume will be the value of the
2988 * Fiber.yield expression or will be passed as block parameters to
2989 * the fiber's block if this is the first #resume.
2990 *
2991 * Alternatively, when resume is called it evaluates to the arguments passed
2992 * to the next Fiber.yield statement inside the fiber's block
2993 * or to the block value if it runs to completion without any
2994 * Fiber.yield
2995 */
2996static VALUE
2997rb_fiber_m_resume(int argc, VALUE *argv, VALUE fiber)
2998{
2999 return rb_fiber_resume_kw(fiber, argc, argv, rb_keyword_given_p());
3000}
3001
3002/*
3003 * call-seq:
3004 * fiber.backtrace -> array
3005 * fiber.backtrace(start) -> array
3006 * fiber.backtrace(start, count) -> array
3007 * fiber.backtrace(start..end) -> array
3008 *
3009 * Returns the current execution stack of the fiber. +start+, +count+ and +end+ allow
3010 * to select only parts of the backtrace.
3011 *
3012 * def level3
3013 * Fiber.yield
3014 * end
3015 *
3016 * def level2
3017 * level3
3018 * end
3019 *
3020 * def level1
3021 * level2
3022 * end
3023 *
3024 * f = Fiber.new { level1 }
3025 *
3026 * # It is empty before the fiber started
3027 * f.backtrace
3028 * #=> []
3029 *
3030 * f.resume
3031 *
3032 * f.backtrace
3033 * #=> ["test.rb:2:in `yield'", "test.rb:2:in `level3'", "test.rb:6:in `level2'", "test.rb:10:in `level1'", "test.rb:13:in `block in <main>'"]
3034 * p f.backtrace(1) # start from the item 1
3035 * #=> ["test.rb:2:in `level3'", "test.rb:6:in `level2'", "test.rb:10:in `level1'", "test.rb:13:in `block in <main>'"]
3036 * p f.backtrace(2, 2) # start from item 2, take 2
3037 * #=> ["test.rb:6:in `level2'", "test.rb:10:in `level1'"]
3038 * p f.backtrace(1..3) # take items from 1 to 3
3039 * #=> ["test.rb:2:in `level3'", "test.rb:6:in `level2'", "test.rb:10:in `level1'"]
3040 *
3041 * f.resume
3042 *
3043 * # It is nil after the fiber is finished
3044 * f.backtrace
3045 * #=> nil
3046 *
3047 */
3048static VALUE
3049rb_fiber_backtrace(int argc, VALUE *argv, VALUE fiber)
3050{
3051 return rb_vm_backtrace(argc, argv, &fiber_ptr(fiber)->cont.saved_ec);
3052}
3053
3054/*
3055 * call-seq:
3056 * fiber.backtrace_locations -> array
3057 * fiber.backtrace_locations(start) -> array
3058 * fiber.backtrace_locations(start, count) -> array
3059 * fiber.backtrace_locations(start..end) -> array
3060 *
3061 * Like #backtrace, but returns each line of the execution stack as a
3062 * Thread::Backtrace::Location. Accepts the same arguments as #backtrace.
3063 *
3064 * f = Fiber.new { Fiber.yield }
3065 * f.resume
3066 * loc = f.backtrace_locations.first
3067 * loc.label #=> "yield"
3068 * loc.path #=> "test.rb"
3069 * loc.lineno #=> 1
3070 *
3071 *
3072 */
3073static VALUE
3074rb_fiber_backtrace_locations(int argc, VALUE *argv, VALUE fiber)
3075{
3076 return rb_vm_backtrace_locations(argc, argv, &fiber_ptr(fiber)->cont.saved_ec);
3077}
3078
3079/*
3080 * call-seq:
3081 * fiber.transfer(args, ...) -> obj
3082 *
3083 * Transfer control to another fiber, resuming it from where it last
3084 * stopped or starting it if it was not resumed before. The calling
3085 * fiber will be suspended much like in a call to
3086 * Fiber.yield.
3087 *
3088 * The fiber which receives the transfer call treats it much like
3089 * a resume call. Arguments passed to transfer are treated like those
3090 * passed to resume.
3091 *
3092 * The two style of control passing to and from fiber (one is #resume and
3093 * Fiber::yield, another is #transfer to and from fiber) can't be freely
3094 * mixed.
3095 *
3096 * * If the Fiber's lifecycle had started with transfer, it will never
3097 * be able to yield or be resumed control passing, only
3098 * finish or transfer back. (It still can resume other fibers that
3099 * are allowed to be resumed.)
3100 * * If the Fiber's lifecycle had started with resume, it can yield
3101 * or transfer to another Fiber, but can receive control back only
3102 * the way compatible with the way it was given away: if it had
3103 * transferred, it only can be transferred back, and if it had
3104 * yielded, it only can be resumed back. After that, it again can
3105 * transfer or yield.
3106 *
3107 * If those rules are broken FiberError is raised.
3108 *
3109 * For an individual Fiber design, yield/resume is easier to use
3110 * (the Fiber just gives away control, it doesn't need to think
3111 * about who the control is given to), while transfer is more flexible
3112 * for complex cases, allowing to build arbitrary graphs of Fibers
3113 * dependent on each other.
3114 *
3115 *
3116 * Example:
3117 *
3118 * manager = nil # For local var to be visible inside worker block
3119 *
3120 * # This fiber would be started with transfer
3121 * # It can't yield, and can't be resumed
3122 * worker = Fiber.new { |work|
3123 * puts "Worker: starts"
3124 * puts "Worker: Performed #{work.inspect}, transferring back"
3125 * # Fiber.yield # this would raise FiberError: attempt to yield on a not resumed fiber
3126 * # manager.resume # this would raise FiberError: attempt to resume a resumed fiber (double resume)
3127 * manager.transfer(work.capitalize)
3128 * }
3129 *
3130 * # This fiber would be started with resume
3131 * # It can yield or transfer, and can be transferred
3132 * # back or resumed
3133 * manager = Fiber.new {
3134 * puts "Manager: starts"
3135 * puts "Manager: transferring 'something' to worker"
3136 * result = worker.transfer('something')
3137 * puts "Manager: worker returned #{result.inspect}"
3138 * # worker.resume # this would raise FiberError: attempt to resume a transferring fiber
3139 * Fiber.yield # this is OK, the fiber transferred from and to, now it can yield
3140 * puts "Manager: finished"
3141 * }
3142 *
3143 * puts "Starting the manager"
3144 * manager.resume
3145 * puts "Resuming the manager"
3146 * # manager.transfer # this would raise FiberError: attempt to transfer to a yielding fiber
3147 * manager.resume
3148 *
3149 * <em>produces</em>
3150 *
3151 * Starting the manager
3152 * Manager: starts
3153 * Manager: transferring 'something' to worker
3154 * Worker: starts
3155 * Worker: Performed "something", transferring back
3156 * Manager: worker returned "Something"
3157 * Resuming the manager
3158 * Manager: finished
3159 *
3160 */
3161static VALUE
3162rb_fiber_m_transfer(int argc, VALUE *argv, VALUE self)
3163{
3164 return rb_fiber_transfer_kw(self, argc, argv, rb_keyword_given_p());
3165}
3166
3167static VALUE
3168fiber_transfer_kw(rb_fiber_t *fiber, int argc, const VALUE *argv, int kw_splat)
3169{
3170 if (fiber->resuming_fiber) {
3171 rb_raise(rb_eFiberError, "attempt to transfer to a resuming fiber");
3172 }
3173
3174 if (fiber->yielding) {
3175 rb_raise(rb_eFiberError, "attempt to transfer to a yielding fiber");
3176 }
3177
3178 return fiber_switch(fiber, argc, argv, kw_splat, NULL, false);
3179}
3180
3181VALUE
3182rb_fiber_transfer_kw(VALUE self, int argc, const VALUE *argv, int kw_splat)
3183{
3184 return fiber_transfer_kw(fiber_ptr(self), argc, argv, kw_splat);
3185}
3186
3187/*
3188 * call-seq:
3189 * Fiber.yield(args, ...) -> obj
3190 *
3191 * Yields control back to the context that resumed the fiber, passing
3192 * along any arguments that were passed to it. The fiber will resume
3193 * processing at this point when #resume is called next.
3194 * Any arguments passed to the next #resume will be the value that
3195 * this Fiber.yield expression evaluates to.
3196 */
3197static VALUE
3198rb_fiber_s_yield(int argc, VALUE *argv, VALUE klass)
3199{
3200 return rb_fiber_yield_kw(argc, argv, rb_keyword_given_p());
3201}
3202
3203static VALUE
3204fiber_raise(rb_fiber_t *fiber, VALUE exception)
3205{
3206 if (fiber == fiber_current()) {
3207 rb_exc_raise(exception);
3208 }
3209 else if (fiber->resuming_fiber) {
3210 return fiber_raise(fiber->resuming_fiber, exception);
3211 }
3212 else if (FIBER_SUSPENDED_P(fiber) && !fiber->yielding) {
3213 return fiber_transfer_kw(fiber, -1, &exception, RB_NO_KEYWORDS);
3214 }
3215 else {
3216 return fiber_resume_kw(fiber, -1, &exception, RB_NO_KEYWORDS);
3217 }
3218}
3219
3220VALUE
3221rb_fiber_raise(VALUE fiber, int argc, const VALUE *argv)
3222{
3223 VALUE exception = rb_make_exception(argc, argv);
3224
3225 return fiber_raise(fiber_ptr(fiber), exception);
3226}
3227
3228/*
3229 * call-seq:
3230 * fiber.raise -> obj
3231 * fiber.raise(string) -> obj
3232 * fiber.raise(exception [, string [, array]]) -> obj
3233 *
3234 * Raises an exception in the fiber at the point at which the last
3235 * +Fiber.yield+ was called. If the fiber has not been started or has
3236 * already run to completion, raises +FiberError+. If the fiber is
3237 * yielding, it is resumed. If it is transferring, it is transferred into.
3238 * But if it is resuming, raises +FiberError+.
3239 *
3240 * With no arguments, raises a +RuntimeError+. With a single +String+
3241 * argument, raises a +RuntimeError+ with the string as a message. Otherwise,
3242 * the first parameter should be the name of an +Exception+ class (or an
3243 * object that returns an +Exception+ object when sent an +exception+
3244 * message). The optional second parameter sets the message associated with
3245 * the exception, and the third parameter is an array of callback information.
3246 * Exceptions are caught by the +rescue+ clause of <code>begin...end</code>
3247 * blocks.
3248 *
3249 * Raises +FiberError+ if called on a Fiber belonging to another +Thread+.
3250 *
3251 * See Kernel#raise for more information.
3252 */
3253static VALUE
3254rb_fiber_m_raise(int argc, VALUE *argv, VALUE self)
3255{
3256 return rb_fiber_raise(self, argc, argv);
3257}
3258
3259/*
3260 * call-seq:
3261 * fiber.kill -> nil
3262 *
3263 * Terminates the fiber by raising an uncatchable exception.
3264 * It only terminates the given fiber and no other fiber, returning +nil+ to
3265 * another fiber if that fiber was calling #resume or #transfer.
3266 *
3267 * <tt>Fiber#kill</tt> only interrupts another fiber when it is in Fiber.yield.
3268 * If called on the current fiber then it raises that exception at the <tt>Fiber#kill</tt> call site.
3269 *
3270 * If the fiber has not been started, transition directly to the terminated state.
3271 *
3272 * If the fiber is already terminated, does nothing.
3273 *
3274 * Raises FiberError if called on a fiber belonging to another thread.
3275 */
3276static VALUE
3277rb_fiber_m_kill(VALUE self)
3278{
3279 rb_fiber_t *fiber = fiber_ptr(self);
3280
3281 if (fiber->killed) return Qfalse;
3282 fiber->killed = 1;
3283
3284 if (fiber->status == FIBER_CREATED) {
3285 fiber->status = FIBER_TERMINATED;
3286 }
3287 else if (fiber->status != FIBER_TERMINATED) {
3288 if (fiber_current() == fiber) {
3289 fiber_check_killed(fiber);
3290 }
3291 else {
3292 fiber_raise(fiber_ptr(self), Qnil);
3293 }
3294 }
3295
3296 return self;
3297}
3298
3299/*
3300 * call-seq:
3301 * Fiber.current -> fiber
3302 *
3303 * Returns the current fiber. If you are not running in the context of
3304 * a fiber this method will return the root fiber.
3305 */
3306static VALUE
3307rb_fiber_s_current(VALUE klass)
3308{
3309 return rb_fiber_current();
3310}
3311
3312static VALUE
3313fiber_to_s(VALUE fiber_value)
3314{
3315 const rb_fiber_t *fiber = fiber_ptr(fiber_value);
3316 const rb_proc_t *proc;
3317 char status_info[0x20];
3318
3319 if (fiber->resuming_fiber) {
3320 snprintf(status_info, 0x20, " (%s by resuming)", fiber_status_name(fiber->status));
3321 }
3322 else {
3323 snprintf(status_info, 0x20, " (%s)", fiber_status_name(fiber->status));
3324 }
3325
3326 if (!rb_obj_is_proc(fiber->first_proc)) {
3327 VALUE str = rb_any_to_s(fiber_value);
3328 strlcat(status_info, ">", sizeof(status_info));
3329 rb_str_set_len(str, RSTRING_LEN(str)-1);
3330 rb_str_cat_cstr(str, status_info);
3331 return str;
3332 }
3333 GetProcPtr(fiber->first_proc, proc);
3334 return rb_block_to_s(fiber_value, &proc->block, status_info);
3335}
3336
3337#ifdef HAVE_WORKING_FORK
3338void
3339rb_fiber_atfork(rb_thread_t *th)
3340{
3341 if (th->root_fiber) {
3342 if (&th->root_fiber->cont.saved_ec != th->ec) {
3343 th->root_fiber = th->ec->fiber_ptr;
3344 }
3345 th->root_fiber->prev = 0;
3346 }
3347}
3348#endif
3349
3350#ifdef RB_EXPERIMENTAL_FIBER_POOL
3351static void
3352fiber_pool_free(void *ptr)
3353{
3354 struct fiber_pool * fiber_pool = ptr;
3355 RUBY_FREE_ENTER("fiber_pool");
3356
3357 fiber_pool_allocation_free(fiber_pool->allocations);
3358 ruby_xfree(fiber_pool);
3359
3360 RUBY_FREE_LEAVE("fiber_pool");
3361}
3362
3363static size_t
3364fiber_pool_memsize(const void *ptr)
3365{
3366 const struct fiber_pool * fiber_pool = ptr;
3367 size_t size = sizeof(*fiber_pool);
3368
3369 size += fiber_pool->count * fiber_pool->size;
3370
3371 return size;
3372}
3373
3374static const rb_data_type_t FiberPoolDataType = {
3375 "fiber_pool",
3376 {NULL, fiber_pool_free, fiber_pool_memsize,},
3377 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
3378};
3379
3380static VALUE
3381fiber_pool_alloc(VALUE klass)
3382{
3383 struct fiber_pool *fiber_pool;
3384
3385 return TypedData_Make_Struct(klass, struct fiber_pool, &FiberPoolDataType, fiber_pool);
3386}
3387
3388static VALUE
3389rb_fiber_pool_initialize(int argc, VALUE* argv, VALUE self)
3390{
3391 rb_thread_t *th = GET_THREAD();
3392 VALUE size = Qnil, count = Qnil, vm_stack_size = Qnil;
3393 struct fiber_pool * fiber_pool = NULL;
3394
3395 // Maybe these should be keyword arguments.
3396 rb_scan_args(argc, argv, "03", &size, &count, &vm_stack_size);
3397
3398 if (NIL_P(size)) {
3399 size = SIZET2NUM(th->vm->default_params.fiber_machine_stack_size);
3400 }
3401
3402 if (NIL_P(count)) {
3403 count = INT2NUM(128);
3404 }
3405
3406 if (NIL_P(vm_stack_size)) {
3407 vm_stack_size = SIZET2NUM(th->vm->default_params.fiber_vm_stack_size);
3408 }
3409
3410 TypedData_Get_Struct(self, struct fiber_pool, &FiberPoolDataType, fiber_pool);
3411
3412 fiber_pool_initialize(fiber_pool, NUM2SIZET(size), NUM2SIZET(count), NUM2SIZET(vm_stack_size));
3413
3414 return self;
3415}
3416#endif
3417
3418/*
3419 * Document-class: FiberError
3420 *
3421 * Raised when an invalid operation is attempted on a Fiber, in
3422 * particular when attempting to call/resume a dead fiber,
3423 * attempting to yield from the root fiber, or calling a fiber across
3424 * threads.
3425 *
3426 * fiber = Fiber.new{}
3427 * fiber.resume #=> nil
3428 * fiber.resume #=> FiberError: dead fiber called
3429 */
3430
3431void
3432Init_Cont(void)
3433{
3434 rb_thread_t *th = GET_THREAD();
3435 size_t vm_stack_size = th->vm->default_params.fiber_vm_stack_size;
3436 size_t machine_stack_size = th->vm->default_params.fiber_machine_stack_size;
3437 size_t stack_size = machine_stack_size + vm_stack_size;
3438
3439#ifdef _WIN32
3440 SYSTEM_INFO info;
3441 GetSystemInfo(&info);
3442 pagesize = info.dwPageSize;
3443#else /* not WIN32 */
3444 pagesize = sysconf(_SC_PAGESIZE);
3445#endif
3446 SET_MACHINE_STACK_END(&th->ec->machine.stack_end);
3447
3448 fiber_pool_initialize(&shared_fiber_pool, stack_size, FIBER_POOL_INITIAL_SIZE, vm_stack_size);
3449
3450 fiber_initialize_keywords[0] = rb_intern_const("blocking");
3451 fiber_initialize_keywords[1] = rb_intern_const("pool");
3452 fiber_initialize_keywords[2] = rb_intern_const("storage");
3453
3454 const char *fiber_shared_fiber_pool_free_stacks = getenv("RUBY_SHARED_FIBER_POOL_FREE_STACKS");
3455 if (fiber_shared_fiber_pool_free_stacks) {
3456 shared_fiber_pool.free_stacks = atoi(fiber_shared_fiber_pool_free_stacks);
3457
3458 if (shared_fiber_pool.free_stacks < 0) {
3459 rb_warn("Setting RUBY_SHARED_FIBER_POOL_FREE_STACKS to a negative value is not allowed.");
3460 shared_fiber_pool.free_stacks = 0;
3461 }
3462
3463 if (shared_fiber_pool.free_stacks > 1) {
3464 rb_warn("Setting RUBY_SHARED_FIBER_POOL_FREE_STACKS to a value greater than 1 is operating system specific, and may cause crashes.");
3465 }
3466 }
3467
3468 rb_cFiber = rb_define_class("Fiber", rb_cObject);
3469 rb_define_alloc_func(rb_cFiber, fiber_alloc);
3470 rb_eFiberError = rb_define_class("FiberError", rb_eStandardError);
3471 rb_define_singleton_method(rb_cFiber, "yield", rb_fiber_s_yield, -1);
3472 rb_define_singleton_method(rb_cFiber, "current", rb_fiber_s_current, 0);
3473 rb_define_singleton_method(rb_cFiber, "blocking", rb_fiber_blocking, 0);
3474 rb_define_singleton_method(rb_cFiber, "[]", rb_fiber_storage_aref, 1);
3475 rb_define_singleton_method(rb_cFiber, "[]=", rb_fiber_storage_aset, 2);
3476
3477 rb_define_method(rb_cFiber, "initialize", rb_fiber_initialize, -1);
3478 rb_define_method(rb_cFiber, "blocking?", rb_fiber_blocking_p, 0);
3479 rb_define_method(rb_cFiber, "storage", rb_fiber_storage_get, 0);
3480 rb_define_method(rb_cFiber, "storage=", rb_fiber_storage_set, 1);
3481 rb_define_method(rb_cFiber, "resume", rb_fiber_m_resume, -1);
3482 rb_define_method(rb_cFiber, "raise", rb_fiber_m_raise, -1);
3483 rb_define_method(rb_cFiber, "kill", rb_fiber_m_kill, 0);
3484 rb_define_method(rb_cFiber, "backtrace", rb_fiber_backtrace, -1);
3485 rb_define_method(rb_cFiber, "backtrace_locations", rb_fiber_backtrace_locations, -1);
3486 rb_define_method(rb_cFiber, "to_s", fiber_to_s, 0);
3487 rb_define_alias(rb_cFiber, "inspect", "to_s");
3488 rb_define_method(rb_cFiber, "transfer", rb_fiber_m_transfer, -1);
3489 rb_define_method(rb_cFiber, "alive?", rb_fiber_alive_p, 0);
3490
3491 rb_define_singleton_method(rb_cFiber, "blocking?", rb_fiber_s_blocking_p, 0);
3492 rb_define_singleton_method(rb_cFiber, "scheduler", rb_fiber_s_scheduler, 0);
3493 rb_define_singleton_method(rb_cFiber, "set_scheduler", rb_fiber_set_scheduler, 1);
3494 rb_define_singleton_method(rb_cFiber, "current_scheduler", rb_fiber_current_scheduler, 0);
3495
3496 rb_define_singleton_method(rb_cFiber, "schedule", rb_fiber_s_schedule, -1);
3497
3498#ifdef RB_EXPERIMENTAL_FIBER_POOL
3499 /*
3500 * Document-class: Fiber::Pool
3501 * :nodoc: experimental
3502 */
3503 rb_cFiberPool = rb_define_class_under(rb_cFiber, "Pool", rb_cObject);
3504 rb_define_alloc_func(rb_cFiberPool, fiber_pool_alloc);
3505 rb_define_method(rb_cFiberPool, "initialize", rb_fiber_pool_initialize, -1);
3506#endif
3507
3508 rb_provide("fiber.so");
3509}
3510
3511RUBY_SYMBOL_EXPORT_BEGIN
3512
3513void
3514ruby_Init_Continuation_body(void)
3515{
3516 rb_cContinuation = rb_define_class("Continuation", rb_cObject);
3517 rb_undef_alloc_func(rb_cContinuation);
3518 rb_undef_method(CLASS_OF(rb_cContinuation), "new");
3519 rb_define_method(rb_cContinuation, "call", rb_cont_call, -1);
3520 rb_define_method(rb_cContinuation, "[]", rb_cont_call, -1);
3521 rb_define_global_function("callcc", rb_callcc, 0);
3522}
3523
3524RUBY_SYMBOL_EXPORT_END
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
#define rb_define_global_function(mid, func, arity)
Defines rb_mKernel #mid.
#define RUBY_EVENT_FIBER_SWITCH
Encountered a Fiber#yield.
Definition event.h:59
static bool RB_OBJ_FROZEN(VALUE obj)
Checks if an object is frozen.
Definition fl_type.h:892
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition class.c:1484
VALUE rb_define_class_under(VALUE outer, const char *name, VALUE super)
Defines a class under the namespace of outer.
Definition class.c:1520
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
Definition class.c:2848
void rb_undef_method(VALUE klass, const char *name)
Defines an undef of a method.
Definition class.c:2668
int rb_scan_args_kw(int kw_flag, int argc, const VALUE *argv, const char *fmt,...)
Identical to rb_scan_args(), except it also accepts kw_splat.
Definition class.c:3151
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
Retrieves argument from argc and argv to given VALUE references according to the format string.
Definition class.c:3138
int rb_keyword_given_p(void)
Determines if the current method is given a keyword argument.
Definition eval.c:956
int rb_get_kwargs(VALUE keyword_hash, const ID *table, int required, int optional, VALUE *values)
Keyword argument deconstructor.
Definition class.c:2927
#define REALLOC_N
Old name of RB_REALLOC_N.
Definition memory.h:403
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define Qundef
Old name of RUBY_Qundef.
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
Definition assume.h:29
#define ZALLOC
Old name of RB_ZALLOC.
Definition memory.h:402
#define CLASS_OF
Old name of rb_class_of.
Definition globals.h:205
#define rb_ary_new4
Old name of rb_ary_new_from_values.
Definition array.h:659
#define SIZET2NUM
Old name of RB_SIZE2NUM.
Definition size_t.h:62
#define rb_exc_new2
Old name of rb_exc_new_cstr.
Definition error.h:37
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define ALLOC_N
Old name of RB_ALLOC_N.
Definition memory.h:399
#define Qtrue
Old name of RUBY_Qtrue.
#define INT2NUM
Old name of RB_INT2NUM.
Definition int.h:43
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define NIL_P
Old name of RB_NIL_P.
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
Definition value_type.h:80
#define NUM2SIZET
Old name of RB_NUM2SIZE.
Definition size_t.h:61
void ruby_stop(int ex)
Calls ruby_cleanup() and exits the process.
Definition eval.c:289
void rb_category_warn(rb_warning_category_t category, const char *fmt,...)
Identical to rb_category_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:476
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition eval.c:682
int rb_typeddata_is_kind_of(VALUE obj, const rb_data_type_t *data_type)
Checks if the given object is of given kind.
Definition error.c:1380
void rb_syserr_fail(int e, const char *mesg)
Raises appropriate exception that represents a C errno.
Definition error.c:3905
VALUE rb_eStandardError
StandardError exception.
Definition error.c:1427
VALUE rb_eFrozenError
FrozenError exception.
Definition error.c:1429
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1430
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1428
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:466
@ RB_WARN_CATEGORY_EXPERIMENTAL
Warning is for experimental features.
Definition error.h:51
VALUE rb_any_to_s(VALUE obj)
Generates a textual representation of the given object.
Definition object.c:646
VALUE rb_obj_dup(VALUE obj)
Duplicates the given object.
Definition object.c:553
void rb_provide(const char *feature)
Declares that the given feature is already provided by someone else.
Definition load.c:765
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
Definition proc.c:847
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
Definition proc.c:120
void rb_str_set_len(VALUE str, long len)
Overwrites the length of the string.
Definition string.c:3692
#define rb_str_cat_cstr(buf, str)
Identical to rb_str_cat(), except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1656
void rb_undef_alloc_func(VALUE klass)
Deletes the allocator function of a class.
Definition vm_method.c:1419
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
static ID rb_intern_const(const char *str)
This is a "tiny optimisation" over rb_intern().
Definition symbol.h:284
VALUE rb_to_symbol(VALUE name)
Identical to rb_intern_str(), except it generates a dynamic symbol if necessary.
Definition string.c:13010
VALUE rb_yield(VALUE val)
Yields the block.
Definition vm_eval.c:1372
rb_block_call_func * rb_block_call_func_t
Shorthand type that represents an iterator-written-in-C function pointer.
Definition iterator.h:88
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
Definition memory.h:372
#define ALLOCA_N(type, n)
Definition memory.h:292
#define RB_ALLOC(type)
Shorthand of RB_ALLOC_N with n=1.
Definition memory.h:213
VALUE rb_proc_new(type *q, VALUE w)
Creates a rb_cProc instance.
void rb_hash_foreach(VALUE q, int_type *w, VALUE e)
Iteration over the given hash.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition rarray.h:52
#define DATA_PTR(obj)
Convenient getter macro.
Definition rdata.h:67
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
Definition rtypeddata.h:515
#define TypedData_Wrap_Struct(klass, data_type, sval)
Converts sval, a pointer to your struct, into a Ruby object.
Definition rtypeddata.h:450
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
Definition rtypeddata.h:497
#define errno
Ractor-aware version of errno.
Definition ruby.h:388
#define RB_NO_KEYWORDS
Do not pass keywords.
Definition scan_args.h:69
Scheduler APIs.
VALUE rb_fiber_scheduler_current(void)
Identical to rb_fiber_scheduler_get(), except it also returns RUBY_Qnil in case of a blocking fiber.
Definition scheduler.c:463
VALUE rb_fiber_scheduler_set(VALUE scheduler)
Destructively assigns the passed scheduler to that of the current thread that is calling this functio...
Definition scheduler.c:424
VALUE rb_fiber_scheduler_get(void)
Queries the current scheduler of the current thread that is calling this function.
Definition scheduler.c:374
VALUE rb_fiber_scheduler_fiber(VALUE scheduler, int argc, VALUE *argv, int kw_splat)
Create and schedule a non-blocking fiber.
Definition scheduler.c:1109
#define RTEST
This is an old name of RB_TEST.
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:203
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_destroy.
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static void Check_Type(VALUE v, enum ruby_value_type t)
Identical to RB_TYPE_P(), except it raises exceptions on predication failure.
Definition value_type.h:433
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376