12#include "ruby/internal/config.h" 
   21#ifdef NEED_MADVICE_PROTOTYPE_USING_CADDR_T 
   23extern int madvise(caddr_t, 
size_t, 
int);
 
   28#include "eval_intern.h" 
   30#include "internal/cont.h" 
   31#include "internal/thread.h" 
   32#include "internal/error.h" 
   33#include "internal/eval.h" 
   34#include "internal/gc.h" 
   35#include "internal/proc.h" 
   36#include "internal/sanitizers.h" 
   37#include "internal/warnings.h" 
   43#include "ractor_core.h" 
   45static const int DEBUG = 0;
 
   47#define RB_PAGE_SIZE (pagesize) 
   48#define RB_PAGE_MASK (~(RB_PAGE_SIZE - 1)) 
   52static VALUE rb_cContinuation;
 
   53static VALUE rb_cFiber;
 
   54static VALUE rb_eFiberError;
 
   55#ifdef RB_EXPERIMENTAL_FIBER_POOL 
   56static VALUE rb_cFiberPool;
 
   59#define CAPTURE_JUST_VALID_VM_STACK 1 
   62#ifdef COROUTINE_LIMITED_ADDRESS_SPACE 
   63#define FIBER_POOL_ALLOCATION_FREE 
   64#define FIBER_POOL_INITIAL_SIZE 8 
   65#define FIBER_POOL_ALLOCATION_MAXIMUM_SIZE 32 
   67#define FIBER_POOL_INITIAL_SIZE 32 
   68#define FIBER_POOL_ALLOCATION_MAXIMUM_SIZE 1024 
   70#ifdef RB_EXPERIMENTAL_FIBER_POOL 
   71#define FIBER_POOL_ALLOCATION_FREE 
   75    CONTINUATION_CONTEXT = 0,
 
   81#ifdef CAPTURE_JUST_VALID_VM_STACK 
 
  118#ifdef FIBER_POOL_ALLOCATION_FREE 
 
  161#ifdef FIBER_POOL_ALLOCATION_FREE 
  169#ifdef FIBER_POOL_ALLOCATION_FREE 
 
  190    size_t initial_count;
 
  201    size_t vm_stack_size;
 
 
  214    enum context_type type;
 
 
  252#define FIBER_CREATED_P(fiber)    ((fiber)->status == FIBER_CREATED) 
  253#define FIBER_RESUMED_P(fiber)    ((fiber)->status == FIBER_RESUMED) 
  254#define FIBER_SUSPENDED_P(fiber)  ((fiber)->status == FIBER_SUSPENDED) 
  255#define FIBER_TERMINATED_P(fiber) ((fiber)->status == FIBER_TERMINATED) 
  256#define FIBER_RUNNABLE_P(fiber)   (FIBER_CREATED_P(fiber) || FIBER_SUSPENDED_P(fiber)) 
  264    BITFIELD(
enum fiber_status, status, 2);
 
  266    unsigned int yielding : 1;
 
  267    unsigned int blocking : 1;
 
  269    unsigned int killed : 1;
 
 
  275static struct fiber_pool shared_fiber_pool = {NULL, NULL, 0, 0, 0, 0};
 
  278rb_free_shared_fiber_pool(
void)
 
  281    while (allocations) {
 
  288static ID fiber_initialize_keywords[3] = {0};
 
  295#if defined(MAP_STACK) && !defined(__FreeBSD__) && !defined(__FreeBSD_kernel__) 
  296#define FIBER_STACK_FLAGS (MAP_PRIVATE | MAP_ANON | MAP_STACK) 
  298#define FIBER_STACK_FLAGS (MAP_PRIVATE | MAP_ANON) 
  301#define ERRNOMSG strerror(errno) 
  305fiber_pool_vacancy_pointer(
void * base, 
size_t size)
 
  307    STACK_GROW_DIR_DETECTION;
 
  310        (
char*)base + STACK_DIR_UPPER(0, size - RB_PAGE_SIZE)
 
  314#if defined(COROUTINE_SANITIZE_ADDRESS) 
  319    STACK_GROW_DIR_DETECTION;
 
  321    return (
char*)stack->base + STACK_DIR_UPPER(RB_PAGE_SIZE, 0);
 
  328    return stack->size - RB_PAGE_SIZE;
 
  336    STACK_GROW_DIR_DETECTION;
 
  338    stack->current = (
char*)stack->base + STACK_DIR_UPPER(0, stack->size);
 
  339    stack->available = stack->size;
 
  346    STACK_GROW_DIR_DETECTION;
 
  348    VM_ASSERT(stack->current);
 
  350    return STACK_DIR_UPPER(stack->current, (
char*)stack->current - stack->available);
 
  358    STACK_GROW_DIR_DETECTION;
 
  360    if (DEBUG) fprintf(stderr, 
"fiber_pool_stack_alloca(%p): %"PRIuSIZE
"/%"PRIuSIZE
"\n", (
void*)stack, offset, stack->available);
 
  361    VM_ASSERT(stack->available >= offset);
 
  364    void * pointer = STACK_DIR_UPPER(stack->current, (
char*)stack->current - offset);
 
  367    stack->current = STACK_DIR_UPPER((
char*)stack->current + offset, (
char*)stack->current - offset);
 
  368    stack->available -= offset;
 
  377    fiber_pool_stack_reset(&vacancy->stack);
 
  380    fiber_pool_stack_alloca(&vacancy->stack, RB_PAGE_SIZE);
 
  386    vacancy->next = head;
 
  388#ifdef FIBER_POOL_ALLOCATION_FREE 
  390        head->previous = vacancy;
 
  391        vacancy->previous = NULL;
 
  398#ifdef FIBER_POOL_ALLOCATION_FREE 
  403        vacancy->next->previous = vacancy->previous;
 
  406    if (vacancy->previous) {
 
  407        vacancy->previous->next = vacancy->next;
 
  411        vacancy->stack.pool->vacancies = vacancy->next;
 
  416fiber_pool_vacancy_pop(
struct fiber_pool * pool)
 
  421        fiber_pool_vacancy_remove(vacancy);
 
  428fiber_pool_vacancy_pop(
struct fiber_pool * pool)
 
  433        pool->vacancies = vacancy->next;
 
  448    vacancy->stack.base = base;
 
  449    vacancy->stack.size = size;
 
  451    fiber_pool_vacancy_reset(vacancy);
 
  455    return fiber_pool_vacancy_push(vacancy, vacancies);
 
  463fiber_pool_allocate_memory(
size_t * count, 
size_t stride)
 
  473        void * base = VirtualAlloc(0, (*count)*stride, MEM_COMMIT, PAGE_READWRITE);
 
  476            *count = (*count) >> 1;
 
  483        size_t mmap_size = (*count)*stride;
 
  484        void * base = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, FIBER_STACK_FLAGS, -1, 0);
 
  486        if (base == MAP_FAILED) {
 
  488            *count = (*count) >> 1;
 
  491            ruby_annotate_mmap(base, mmap_size, 
"Ruby:fiber_pool_allocate_memory");
 
  492#if defined(MADV_FREE_REUSE) 
  496            while (madvise(base, mmap_size, MADV_FREE_REUSE) == -1 && 
errno == EAGAIN);
 
  516        STACK_GROW_DIR_DETECTION;
 
  519        size_t stride = size + RB_PAGE_SIZE;
 
  522        void * base = fiber_pool_allocate_memory(&count, stride);
 
  525            rb_raise(rb_eFiberError, 
"can't alloc machine stack to fiber (%"PRIuSIZE
" x %"PRIuSIZE
" bytes): %s", count, size, ERRNOMSG);
 
  532        allocation->base = base;
 
  533        allocation->size = size;
 
  534        allocation->stride = stride;
 
  535        allocation->count = count;
 
  536#ifdef FIBER_POOL_ALLOCATION_FREE 
  537        allocation->used = 0;
 
  542            fprintf(stderr, 
"fiber_pool_expand(%"PRIuSIZE
"): %p, %"PRIuSIZE
"/%"PRIuSIZE
" x [%"PRIuSIZE
":%"PRIuSIZE
"]\n",
 
  547        for (
size_t i = 0; i < count; i += 1) {
 
  548            void * base = (
char*)allocation->base + (stride * i);
 
  549            void * page = (
char*)base + STACK_DIR_UPPER(size, 0);
 
  553            if (!VirtualProtect(page, RB_PAGE_SIZE, PAGE_READWRITE | PAGE_GUARD, &old_protect)) {
 
  554                VirtualFree(allocation->base, 0, MEM_RELEASE);
 
  555                rb_raise(rb_eFiberError, 
"can't set a guard page: %s", ERRNOMSG);
 
  557#elif defined(__wasi__) 
  561            if (mprotect(page, RB_PAGE_SIZE, PROT_NONE) < 0) {
 
  562                munmap(allocation->base, count*stride);
 
  563                rb_raise(rb_eFiberError, 
"can't set a guard page: %s", ERRNOMSG);
 
  567            vacancies = fiber_pool_vacancy_initialize(
 
  569                (
char*)base + STACK_DIR_UPPER(0, RB_PAGE_SIZE),
 
  573#ifdef FIBER_POOL_ALLOCATION_FREE 
  574            vacancies->stack.allocation = allocation;
 
  581#ifdef FIBER_POOL_ALLOCATION_FREE 
  582        if (allocation->next) {
 
  583            allocation->next->previous = allocation;
 
  586        allocation->previous = NULL;
 
  601fiber_pool_initialize(
struct fiber_pool * 
fiber_pool, 
size_t size, 
size_t count, 
size_t vm_stack_size)
 
  603    VM_ASSERT(vm_stack_size < size);
 
  607    fiber_pool->size = ((size / RB_PAGE_SIZE) + 1) * RB_PAGE_SIZE;
 
  618#ifdef FIBER_POOL_ALLOCATION_FREE 
  623    STACK_GROW_DIR_DETECTION;
 
  625    VM_ASSERT(allocation->used == 0);
 
  627    if (DEBUG) fprintf(stderr, 
"fiber_pool_allocation_free: %p base=%p count=%"PRIuSIZE
"\n", (
void*)allocation, allocation->base, allocation->count);
 
  630    for (i = 0; i < allocation->count; i += 1) {
 
  631        void * base = (
char*)allocation->base + (allocation->stride * i) + STACK_DIR_UPPER(0, RB_PAGE_SIZE);
 
  633        struct fiber_pool_vacancy * vacancy = fiber_pool_vacancy_pointer(base, allocation->size);
 
  636        fiber_pool_vacancy_remove(vacancy);
 
  640    VirtualFree(allocation->base, 0, MEM_RELEASE);
 
  642    munmap(allocation->base, allocation->stride * allocation->count);
 
  645    if (allocation->previous) {
 
  646        allocation->previous->next = allocation->next;
 
  650        allocation->pool->allocations = allocation->next;
 
  653    if (allocation->next) {
 
  654        allocation->next->previous = allocation->previous;
 
  657    allocation->pool->count -= allocation->count;
 
  659    ruby_xfree(allocation);
 
  672        if (DEBUG) fprintf(stderr, 
"fiber_pool_stack_acquire: %p used=%"PRIuSIZE
"\n", (
void*)
fiber_pool->vacancies, 
fiber_pool->used);
 
  675            const size_t maximum = FIBER_POOL_ALLOCATION_MAXIMUM_SIZE;
 
  676            const size_t minimum = 
fiber_pool->initial_count;
 
  679            if (count > maximum) count = maximum;
 
  680            if (count < minimum) count = minimum;
 
  691        VM_ASSERT(vacancy->stack.base);
 
  693#if defined(COROUTINE_SANITIZE_ADDRESS) 
  694        __asan_unpoison_memory_region(fiber_pool_stack_poison_base(&vacancy->stack), fiber_pool_stack_poison_size(&vacancy->stack));
 
  700#ifdef FIBER_POOL_ALLOCATION_FREE 
  701        vacancy->stack.allocation->used += 1;
 
  704        fiber_pool_stack_reset(&vacancy->stack);
 
  708    return vacancy->stack;
 
  716    void * base = fiber_pool_stack_base(stack);
 
  717    size_t size = stack->available;
 
  720    VM_ASSERT(size <= (stack->size - RB_PAGE_SIZE));
 
  722    int advice = stack->pool->free_stacks >> 1;
 
  724    if (DEBUG) fprintf(stderr, 
"fiber_pool_stack_free: %p+%"PRIuSIZE
" [base=%p, size=%"PRIuSIZE
"] advice=%d\n", base, size, stack->base, stack->size, advice);
 
  737#elif VM_CHECK_MODE > 0 && defined(MADV_DONTNEED) 
  738    if (!advice) advice = MADV_DONTNEED;
 
  740    madvise(base, size, advice);
 
  741#elif defined(MADV_FREE_REUSABLE) 
  742    if (!advice) advice = MADV_FREE_REUSABLE;
 
  748    while (madvise(base, size, advice) == -1 && 
errno == EAGAIN);
 
  749#elif defined(MADV_FREE) 
  750    if (!advice) advice = MADV_FREE;
 
  752    madvise(base, size, advice);
 
  753#elif defined(MADV_DONTNEED) 
  754    if (!advice) advice = MADV_DONTNEED;
 
  756    madvise(base, size, advice);
 
  757#elif defined(POSIX_MADV_DONTNEED) 
  758    if (!advice) advice = POSIX_MADV_DONTNEED;
 
  760    posix_madvise(base, size, advice);
 
  762    VirtualAlloc(base, size, MEM_RESET, PAGE_READWRITE);
 
  767#if defined(COROUTINE_SANITIZE_ADDRESS) 
  768    __asan_poison_memory_region(fiber_pool_stack_poison_base(stack), fiber_pool_stack_poison_size(stack));
 
  777    struct fiber_pool_vacancy * vacancy = fiber_pool_vacancy_pointer(stack->base, stack->size);
 
  779    if (DEBUG) fprintf(stderr, 
"fiber_pool_stack_release: %p used=%"PRIuSIZE
"\n", stack->base, stack->pool->used);
 
  782    vacancy->stack = *stack;
 
  786    fiber_pool_vacancy_reset(vacancy);
 
  789    pool->vacancies = fiber_pool_vacancy_push(vacancy, pool->vacancies);
 
  792#ifdef FIBER_POOL_ALLOCATION_FREE 
  795    allocation->used -= 1;
 
  798    if (allocation->used == 0) {
 
  799        fiber_pool_allocation_free(allocation);
 
  801    else if (stack->pool->free_stacks) {
 
  802        fiber_pool_stack_free(&vacancy->stack);
 
  807    if (stack->pool->free_stacks) {
 
  808        fiber_pool_stack_free(&vacancy->stack);
 
  817#ifdef RUBY_ASAN_ENABLED 
  818    ec->machine.asan_fake_stack_handle = asan_get_thread_fake_stack_handle();
 
  820    rb_ractor_set_current_ec(th->ractor, th->ec = ec);
 
  827    if (th->vm->ractor.main_thread == th &&
 
  828        rb_signal_buff_size() > 0) {
 
  829        RUBY_VM_SET_TRAP_INTERRUPT(ec);
 
  832    VM_ASSERT(ec->fiber_ptr->cont.self == 0 || ec->vm_stack != NULL);
 
  838    ec_switch(th, fiber);
 
  839    VM_ASSERT(th->ec->fiber_ptr == fiber);
 
  842#ifndef COROUTINE_DECL 
  843# define COROUTINE_DECL COROUTINE 
  851#if defined(COROUTINE_SANITIZE_ADDRESS) 
  861    __sanitizer_finish_switch_fiber(to->fake_stack, (
const void**)&from->stack_base, &from->stack_size);
 
  864    rb_thread_t *thread = fiber->cont.saved_ec.thread_ptr;
 
  866#ifdef COROUTINE_PTHREAD_CONTEXT 
  867    ruby_thread_set_native(thread);
 
  870    fiber_restore_thread(thread, fiber);
 
  872    rb_fiber_start(fiber);
 
  874#ifndef COROUTINE_PTHREAD_CONTEXT 
  875    VM_UNREACHABLE(fiber_entry);
 
  881fiber_initialize_coroutine(
rb_fiber_t *fiber, 
size_t * vm_stack_size)
 
  885    void * vm_stack = NULL;
 
  889    fiber->stack = fiber_pool_stack_acquire(
fiber_pool);
 
  890    vm_stack = fiber_pool_stack_alloca(&fiber->stack, 
fiber_pool->vm_stack_size);
 
  893    coroutine_initialize(&fiber->context, fiber_entry, fiber_pool_stack_base(&fiber->stack), fiber->stack.available);
 
  896    sec->machine.stack_start = fiber->stack.current;
 
  897    sec->machine.stack_maxsize = fiber->stack.available;
 
  899    fiber->context.argument = (
void*)fiber;
 
  911    if (DEBUG) fprintf(stderr, 
"fiber_stack_release: %p, stack.base=%p\n", (
void*)fiber, fiber->stack.base);
 
  914    if (fiber->stack.base) {
 
  915        fiber_pool_stack_release(&fiber->stack);
 
  916        fiber->stack.base = NULL;
 
  920    rb_ec_clear_vm_stack(ec);
 
  926    if (!ruby_vm_during_cleanup) {
 
  929        ASSERT_vm_locking_with_barrier();
 
  931    fiber_stack_release(fiber);
 
  935fiber_status_name(
enum fiber_status s)
 
  938      case FIBER_CREATED: 
return "created";
 
  939      case FIBER_RESUMED: 
return "resumed";
 
  940      case FIBER_SUSPENDED: 
return "suspended";
 
  941      case FIBER_TERMINATED: 
return "terminated";
 
  943    VM_UNREACHABLE(fiber_status_name);
 
  951    VM_ASSERT(fiber->cont.saved_ec.fiber_ptr == fiber);
 
  953    switch (fiber->status) {
 
  955        VM_ASSERT(fiber->cont.saved_ec.vm_stack != NULL);
 
  957      case FIBER_SUSPENDED:
 
  958        VM_ASSERT(fiber->cont.saved_ec.vm_stack != NULL);
 
  961      case FIBER_TERMINATED:
 
  965        VM_UNREACHABLE(fiber_verify);
 
  971fiber_status_set(
rb_fiber_t *fiber, 
enum fiber_status s)
 
  974    VM_ASSERT(!FIBER_TERMINATED_P(fiber));
 
  975    VM_ASSERT(fiber->status != s);
 
  996    if (!fiber) rb_raise(rb_eFiberError, 
"uninitialized fiber");
 
 1001NOINLINE(
static VALUE cont_capture(
volatile int *
volatile stat));
 
 1003#define THREAD_MUST_BE_RUNNING(th) do { \ 
 1004        if (!(th)->ec->tag) rb_raise(rb_eThreadError, "not running thread"); \ 
 1010    return fiber->cont.saved_ec.thread_ptr;
 
 1016    return cont->saved_ec.thread_ptr->self;
 
 1020cont_compact(
void *ptr)
 
 1025        cont->self = rb_gc_location(cont->self);
 
 1027    cont->value = rb_gc_location(cont->value);
 
 1028    rb_execution_context_update(&cont->saved_ec);
 
 1036    RUBY_MARK_ENTER(
"cont");
 
 1038        rb_gc_mark_movable(cont->self);
 
 1040    rb_gc_mark_movable(cont->value);
 
 1042    rb_execution_context_mark(&cont->saved_ec);
 
 1043    rb_gc_mark(cont_thread_value(cont));
 
 1045    if (cont->saved_vm_stack.ptr) {
 
 1046#ifdef CAPTURE_JUST_VALID_VM_STACK 
 1047        rb_gc_mark_locations(cont->saved_vm_stack.ptr,
 
 1048                             cont->saved_vm_stack.ptr + cont->saved_vm_stack.slen + cont->saved_vm_stack.clen);
 
 1050        rb_gc_mark_locations(cont->saved_vm_stack.ptr,
 
 1051                             cont->saved_vm_stack.ptr, cont->saved_ec.stack_size);
 
 1055    if (cont->machine.stack) {
 
 1056        if (cont->type == CONTINUATION_CONTEXT) {
 
 1058            rb_gc_mark_locations(cont->machine.stack,
 
 1059                                 cont->machine.stack + cont->machine.stack_size);
 
 1067    RUBY_MARK_LEAVE(
"cont");
 
 1074    return fiber == fiber->cont.saved_ec.thread_ptr->root_fiber;
 
 1078static void jit_cont_free(
struct rb_jit_cont *cont);
 
 1085    RUBY_FREE_ENTER(
"cont");
 
 1087    if (cont->type == CONTINUATION_CONTEXT) {
 
 1088        ruby_xfree(cont->saved_ec.vm_stack);
 
 1089        RUBY_FREE_UNLESS_NULL(cont->machine.stack);
 
 1093        coroutine_destroy(&fiber->context);
 
 1094        fiber_stack_release_locked(fiber);
 
 1097    RUBY_FREE_UNLESS_NULL(cont->saved_vm_stack.ptr);
 
 1099    VM_ASSERT(cont->jit_cont != NULL);
 
 1100    jit_cont_free(cont->jit_cont);
 
 1103    RUBY_FREE_LEAVE(
"cont");
 
 1107cont_memsize(
const void *ptr)
 
 1112    size = 
sizeof(*cont);
 
 1113    if (cont->saved_vm_stack.ptr) {
 
 1114#ifdef CAPTURE_JUST_VALID_VM_STACK 
 1115        size_t n = (cont->saved_vm_stack.slen + cont->saved_vm_stack.clen);
 
 1117        size_t n = cont->saved_ec.vm_stack_size;
 
 1119        size += n * 
sizeof(*cont->saved_vm_stack.ptr);
 
 1122    if (cont->machine.stack) {
 
 1123        size += cont->machine.stack_size * 
sizeof(*cont->machine.stack);
 
 1132    if (fiber->cont.self) {
 
 1133        fiber->cont.self = rb_gc_location(fiber->cont.self);
 
 1136        rb_execution_context_update(&fiber->cont.saved_ec);
 
 1143    if (fiber->cont.self) {
 
 1144        rb_gc_mark_movable(fiber->cont.self);
 
 1147        rb_execution_context_mark(&fiber->cont.saved_ec);
 
 1152fiber_compact(
void *ptr)
 
 1155    fiber->first_proc = rb_gc_location(fiber->first_proc);
 
 1157    if (fiber->prev) rb_fiber_update_self(fiber->prev);
 
 1159    cont_compact(&fiber->cont);
 
 1160    fiber_verify(fiber);
 
 1164fiber_mark(
void *ptr)
 
 1167    RUBY_MARK_ENTER(
"cont");
 
 1168    fiber_verify(fiber);
 
 1169    rb_gc_mark_movable(fiber->first_proc);
 
 1170    if (fiber->prev) rb_fiber_mark_self(fiber->prev);
 
 1171    cont_mark(&fiber->cont);
 
 1172    RUBY_MARK_LEAVE(
"cont");
 
 1176fiber_free(
void *ptr)
 
 1179    RUBY_FREE_ENTER(
"fiber");
 
 1181    if (DEBUG) fprintf(stderr, 
"fiber_free: %p[%p]\n", (
void *)fiber, fiber->stack.base);
 
 1183    if (fiber->cont.saved_ec.local_storage) {
 
 1184        rb_id_table_free(fiber->cont.saved_ec.local_storage);
 
 1187    cont_free(&fiber->cont);
 
 1188    RUBY_FREE_LEAVE(
"fiber");
 
 1192fiber_memsize(
const void *ptr)
 
 1195    size_t size = 
sizeof(*fiber);
 
 1197    const rb_thread_t *th = rb_ec_thread_ptr(saved_ec);
 
 1202    if (saved_ec->local_storage && fiber != th->root_fiber) {
 
 1203        size += rb_id_table_memsize(saved_ec->local_storage);
 
 1204        size += rb_obj_memsize_of(saved_ec->storage);
 
 1207    size += cont_memsize(&fiber->cont);
 
 1222    SET_MACHINE_STACK_END(&th->ec->machine.stack_end);
 
 1224    if (th->ec->machine.stack_start > th->ec->machine.stack_end) {
 
 1225        size = cont->machine.stack_size = th->ec->machine.stack_start - th->ec->machine.stack_end;
 
 1226        cont->machine.stack_src = th->ec->machine.stack_end;
 
 1229        size = cont->machine.stack_size = th->ec->machine.stack_end - th->ec->machine.stack_start;
 
 1230        cont->machine.stack_src = th->ec->machine.stack_start;
 
 1233    if (cont->machine.stack) {
 
 1240    FLUSH_REGISTER_WINDOWS;
 
 1241    asan_unpoison_memory_region(cont->machine.stack_src, size, 
false);
 
 1242    MEMCPY(cont->machine.stack, cont->machine.stack_src, 
VALUE, size);
 
 1247    {cont_mark, cont_free, cont_memsize, cont_compact},
 
 1248    0, 0, RUBY_TYPED_FREE_IMMEDIATELY
 
 1256    VM_ASSERT(th->status == THREAD_RUNNABLE);
 
 1263    sec->machine.stack_end = NULL;
 
 1266static rb_nativethread_lock_t jit_cont_lock;
 
 1284    if (first_jit_cont == NULL) {
 
 1285        cont->next = cont->prev = NULL;
 
 1289        cont->next = first_jit_cont;
 
 1290        first_jit_cont->prev = cont;
 
 1292    first_jit_cont = cont;
 
 1305    if (cont == first_jit_cont) {
 
 1306        first_jit_cont = cont->next;
 
 1307        if (first_jit_cont != NULL)
 
 1308            first_jit_cont->prev = NULL;
 
 1311        cont->prev->next = cont->next;
 
 1312        if (cont->next != NULL)
 
 1313            cont->next->prev = cont->prev;
 
 1322rb_jit_cont_each_iseq(rb_iseq_callback callback, 
void *data)
 
 1325    for (cont = first_jit_cont; cont != NULL; cont = cont->next) {
 
 1326        if (cont->ec->vm_stack == NULL)
 
 1330        while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(cont->ec, cfp)) {
 
 1331            if (cfp->pc && cfp->iseq && imemo_type((
VALUE)cfp->iseq) == imemo_iseq) {
 
 1332                callback(cfp->iseq, data);
 
 1334            cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
 
 1343rb_yjit_cancel_jit_return(
void *leave_exit, 
void *leave_exception)
 
 1346    for (cont = first_jit_cont; cont != NULL; cont = cont->next) {
 
 1347        if (cont->ec->vm_stack == NULL)
 
 1351        while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(cont->ec, cfp)) {
 
 1352            if (cfp->jit_return && cfp->jit_return != leave_exception) {
 
 1355            cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
 
 1363rb_jit_cont_finish(
void)
 
 1366    for (cont = first_jit_cont; cont != NULL; cont = next) {
 
 1376    VM_ASSERT(cont->jit_cont == NULL);
 
 1378    cont->jit_cont = jit_cont_new(&(cont->saved_ec));
 
 1384    return &fiber->cont.saved_ec;
 
 1391    cont_save_thread(cont, th);
 
 1392    cont->saved_ec.thread_ptr = th;
 
 1393    cont->saved_ec.local_storage = NULL;
 
 1394    cont->saved_ec.local_storage_recursive_hash = 
Qnil;
 
 1395    cont->saved_ec.local_storage_recursive_hash_for_trace = 
Qnil;
 
 1396    cont_init_jit_cont(cont);
 
 1400cont_new(
VALUE klass)
 
 1403    volatile VALUE contval;
 
 1406    THREAD_MUST_BE_RUNNING(th);
 
 1408    cont->self = contval;
 
 1409    cont_init(cont, th);
 
 1416    return fiber->cont.self;
 
 1422    return fiber->blocking;
 
 1427rb_jit_cont_init(
void)
 
 1436    VALUE *p = ec->vm_stack;
 
 1437    while (p < ec->cfp->sp) {
 
 1438        fprintf(stderr, 
"%3d ", (
int)(p - ec->vm_stack));
 
 1439        rb_obj_info_dump(*p);
 
 1449    while (cfp != end_of_cfp) {
 
 1452            pc = cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded;
 
 1454        fprintf(stderr, 
"%2d pc: %d\n", i++, pc);
 
 1455        cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
 
 1461cont_capture(
volatile int *
volatile stat)
 
 1465    volatile VALUE contval;
 
 1468    THREAD_MUST_BE_RUNNING(th);
 
 1469    rb_vm_stack_to_heap(th->ec);
 
 1470    cont = cont_new(rb_cContinuation);
 
 1471    contval = cont->self;
 
 1473#ifdef CAPTURE_JUST_VALID_VM_STACK 
 1474    cont->saved_vm_stack.slen = ec->cfp->sp - ec->vm_stack;
 
 1475    cont->saved_vm_stack.clen = ec->vm_stack + ec->vm_stack_size - (
VALUE*)ec->cfp;
 
 1476    cont->saved_vm_stack.ptr = 
ALLOC_N(
VALUE, cont->saved_vm_stack.slen + cont->saved_vm_stack.clen);
 
 1477    MEMCPY(cont->saved_vm_stack.ptr,
 
 1479           VALUE, cont->saved_vm_stack.slen);
 
 1480    MEMCPY(cont->saved_vm_stack.ptr + cont->saved_vm_stack.slen,
 
 1483           cont->saved_vm_stack.clen);
 
 1485    cont->saved_vm_stack.ptr = 
ALLOC_N(
VALUE, ec->vm_stack_size);
 
 1486    MEMCPY(cont->saved_vm_stack.ptr, ec->vm_stack, 
VALUE, ec->vm_stack_size);
 
 1489    rb_ec_set_vm_stack(&cont->saved_ec, NULL, 0);
 
 1490    VM_ASSERT(cont->saved_ec.cfp != NULL);
 
 1491    cont_save_machine_stack(th, cont);
 
 1493    if (ruby_setjmp(cont->jmpbuf)) {
 
 1496        VAR_INITIALIZED(cont);
 
 1497        value = cont->value;
 
 1515    if (cont->type == CONTINUATION_CONTEXT) {
 
 1520        if (sec->fiber_ptr != NULL) {
 
 1521            fiber = sec->fiber_ptr;
 
 1523        else if (th->root_fiber) {
 
 1524            fiber = th->root_fiber;
 
 1527        if (fiber && th->ec != &fiber->cont.saved_ec) {
 
 1528            ec_switch(th, fiber);
 
 1531        if (th->ec->trace_arg != sec->trace_arg) {
 
 1535#if defined(__wasm__) && !defined(__EMSCRIPTEN__) 
 1536        if (th->ec->tag != sec->tag) {
 
 1539            struct rb_vm_tag *lowest_common_ancestor = NULL;
 
 1540            size_t num_tags = 0;
 
 1541            size_t num_saved_tags = 0;
 
 1542            for (
struct rb_vm_tag *tag = th->ec->tag; tag != NULL; tag = tag->prev) {
 
 1545            for (
struct rb_vm_tag *tag = sec->tag; tag != NULL; tag = tag->prev) {
 
 1549            size_t min_tags = num_tags <= num_saved_tags ? num_tags : num_saved_tags;
 
 1552            while (num_tags > min_tags) {
 
 1558            while (num_saved_tags > min_tags) {
 
 1559                saved_tag = saved_tag->prev;
 
 1563            while (min_tags > 0) {
 
 1564                if (tag == saved_tag) {
 
 1565                    lowest_common_ancestor = tag;
 
 1569                saved_tag = saved_tag->prev;
 
 1574            for (
struct rb_vm_tag *tag = th->ec->tag; tag != lowest_common_ancestor; tag = tag->prev) {
 
 1575                rb_vm_tag_jmpbuf_deinit(&tag->buf);
 
 1581#ifdef CAPTURE_JUST_VALID_VM_STACK 
 1583               cont->saved_vm_stack.ptr,
 
 1584               VALUE, cont->saved_vm_stack.slen);
 
 1585        MEMCPY(th->ec->vm_stack + th->ec->vm_stack_size - cont->saved_vm_stack.clen,
 
 1586               cont->saved_vm_stack.ptr + cont->saved_vm_stack.slen,
 
 1587               VALUE, cont->saved_vm_stack.clen);
 
 1589        MEMCPY(th->ec->vm_stack, cont->saved_vm_stack.ptr, 
VALUE, sec->vm_stack_size);
 
 1593        th->ec->cfp = sec->cfp;
 
 1594        th->ec->raised_flag = sec->raised_flag;
 
 1595        th->ec->tag = sec->tag;
 
 1596        th->ec->root_lep = sec->root_lep;
 
 1597        th->ec->root_svar = sec->root_svar;
 
 1598        th->ec->errinfo = sec->errinfo;
 
 1600        VM_ASSERT(th->ec->vm_stack != NULL);
 
 1616    if (!FIBER_TERMINATED_P(old_fiber)) {
 
 1617        STACK_GROW_DIR_DETECTION;
 
 1618        SET_MACHINE_STACK_END(&th->ec->machine.stack_end);
 
 1619        if (STACK_DIR_UPPER(0, 1)) {
 
 1620            old_fiber->cont.machine.stack_size = th->ec->machine.stack_start - th->ec->machine.stack_end;
 
 1621            old_fiber->cont.machine.stack = th->ec->machine.stack_end;
 
 1624            old_fiber->cont.machine.stack_size = th->ec->machine.stack_end - th->ec->machine.stack_start;
 
 1625            old_fiber->cont.machine.stack = th->ec->machine.stack_start;
 
 1630    old_fiber->cont.saved_ec.machine.stack_start = th->ec->machine.stack_start;
 
 1631    old_fiber->cont.saved_ec.machine.stack_end = FIBER_TERMINATED_P(old_fiber) ? NULL : th->ec->machine.stack_end;
 
 1636#if defined(COROUTINE_SANITIZE_ADDRESS) 
 1637    __sanitizer_start_switch_fiber(FIBER_TERMINATED_P(old_fiber) ? NULL : &old_fiber->context.fake_stack, new_fiber->context.stack_base, new_fiber->context.stack_size);
 
 1641    struct coroutine_context * from = coroutine_transfer(&old_fiber->context, &new_fiber->context);
 
 1643#if defined(COROUTINE_SANITIZE_ADDRESS) 
 1644    __sanitizer_finish_switch_fiber(old_fiber->context.fake_stack, NULL, NULL);
 
 1652    fiber_restore_thread(th, old_fiber);
 
 1658NOINLINE(NORETURN(
static void cont_restore_1(
rb_context_t *)));
 
 1663    cont_restore_thread(cont);
 
 1666#if (defined(_M_AMD64) && !defined(__MINGW64__)) || defined(_M_ARM64) 
 1671        _JUMP_BUFFER *bp = (
void*)&cont->jmpbuf;
 
 1672        bp->Frame = ((_JUMP_BUFFER*)((
void*)&buf))->Frame;
 
 1675    if (cont->machine.stack_src) {
 
 1676        FLUSH_REGISTER_WINDOWS;
 
 1677        MEMCPY(cont->machine.stack_src, cont->machine.stack,
 
 1678               VALUE, cont->machine.stack_size);
 
 1681    ruby_longjmp(cont->jmpbuf, 1);
 
 1689    if (cont->machine.stack_src) {
 
 1691#define STACK_PAD_SIZE 1 
 1693#define STACK_PAD_SIZE 1024 
 1695        VALUE space[STACK_PAD_SIZE];
 
 1697#if !STACK_GROW_DIRECTION 
 1698        if (addr_in_prev_frame > &space[0]) {
 
 1701#if STACK_GROW_DIRECTION <= 0 
 1702            volatile VALUE *
const end = cont->machine.stack_src;
 
 1703            if (&space[0] > end) {
 
 1712                cont_restore_0(cont, &space[0]);
 
 1716#if !STACK_GROW_DIRECTION 
 1721#if STACK_GROW_DIRECTION >= 0 
 1722            volatile VALUE *
const end = cont->machine.stack_src + cont->machine.stack_size;
 
 1723            if (&space[STACK_PAD_SIZE] < end) {
 
 1728                cont_restore_0(cont, &space[STACK_PAD_SIZE-1]);
 
 1732#if !STACK_GROW_DIRECTION 
 1736    cont_restore_1(cont);
 
 1823rb_callcc(
VALUE self)
 
 1825    volatile int called;
 
 1826    volatile VALUE val = cont_capture(&called);
 
 1835#ifdef RUBY_ASAN_ENABLED 
 1838MAYBE_UNUSED(
static void notusing_callcc(
void)) { rb_callcc(
Qnil); }
 
 1839# define rb_callcc rb_f_notimplement 
 1844make_passing_arg(
int argc, 
const VALUE *argv)
 
 1860NORETURN(
static VALUE rb_cont_call(
int argc, 
VALUE *argv, 
VALUE contval));
 
 1878rb_cont_call(
int argc, 
VALUE *argv, 
VALUE contval)
 
 1883    if (cont_thread_value(cont) != th->self) {
 
 1886    if (cont->saved_ec.fiber_ptr) {
 
 1887        if (th->ec->fiber_ptr != cont->saved_ec.fiber_ptr) {
 
 1893    cont->value = make_passing_arg(argc, argv);
 
 1895    cont_restore_0(cont, &contval);
 
 1988    {fiber_mark, fiber_free, fiber_memsize, fiber_compact,},
 
 1989    0, 0, RUBY_TYPED_FREE_IMMEDIATELY
 
 1993fiber_alloc(
VALUE klass)
 
 1999fiber_t_alloc(
VALUE fiber_value, 
unsigned int blocking)
 
 2008    THREAD_MUST_BE_RUNNING(th);
 
 2010    fiber->cont.self = fiber_value;
 
 2011    fiber->cont.type = FIBER_CONTEXT;
 
 2012    fiber->blocking = blocking;
 
 2014    cont_init(&fiber->cont, th);
 
 2016    fiber->cont.saved_ec.fiber_ptr = fiber;
 
 2017    rb_ec_clear_vm_stack(&fiber->cont.saved_ec);
 
 2023    VM_ASSERT(FIBER_CREATED_P(fiber));
 
 2033    VALUE fiber_value = fiber_alloc(rb_cFiber);
 
 2036    VM_ASSERT(
DATA_PTR(fiber_value) == NULL);
 
 2037    VM_ASSERT(fiber->cont.type == FIBER_CONTEXT);
 
 2038    VM_ASSERT(FIBER_RESUMED_P(fiber));
 
 2040    th->root_fiber = fiber;
 
 2042    fiber->cont.self = fiber_value;
 
 2044    coroutine_initialize_main(&fiber->context);
 
 2053    if (ec->fiber_ptr->cont.self == 0) {
 
 2054        root_fiber_alloc(rb_ec_thread_ptr(ec));
 
 2056    return ec->fiber_ptr;
 
 2060current_fiber_storage(
void)
 
 2067inherit_fiber_storage(
void)
 
 2075    fiber->cont.saved_ec.storage = storage;
 
 2079fiber_storage_get(
rb_fiber_t *fiber, 
int allocate)
 
 2081    VALUE storage = fiber->cont.saved_ec.storage;
 
 2082    if (storage == 
Qnil && allocate) {
 
 2083        storage = rb_hash_new();
 
 2084        fiber_storage_set(fiber, storage);
 
 2090storage_access_must_be_from_same_fiber(
VALUE self)
 
 2094    if (fiber != current) {
 
 2095        rb_raise(rb_eArgError, 
"Fiber storage can only be accessed from the Fiber it belongs to");
 
 2106rb_fiber_storage_get(
VALUE self)
 
 2108    storage_access_must_be_from_same_fiber(self);
 
 2110    VALUE storage = fiber_storage_get(fiber_ptr(self), FALSE);
 
 2112    if (storage == 
Qnil) {
 
 2129fiber_storage_validate(
VALUE value)
 
 2132    if (value == 
Qnil) 
return;
 
 2172          "Fiber#storage= is experimental and may be removed in the future!");
 
 2175    storage_access_must_be_from_same_fiber(self);
 
 2176    fiber_storage_validate(value);
 
 2178    fiber_ptr(self)->cont.saved_ec.storage = 
rb_obj_dup(value);
 
 2197    VALUE storage = fiber_storage_get(fiber_current(), FALSE);
 
 2200    return rb_hash_aref(storage, key);
 
 2218    VALUE storage = fiber_storage_get(fiber_current(), value != 
Qnil);
 
 2221    if (value == 
Qnil) {
 
 2222        return rb_hash_delete(storage, key);
 
 2225        return rb_hash_aset(storage, key, value);
 
 2234        storage = inherit_fiber_storage();
 
 2237        fiber_storage_validate(storage);
 
 2241    rb_fiber_t *fiber = fiber_t_alloc(self, blocking);
 
 2243    fiber->cont.saved_ec.storage = storage;
 
 2244    fiber->first_proc = proc;
 
 2245    fiber->stack.base = NULL;
 
 2257    size_t vm_stack_size = 0;
 
 2258    VALUE *vm_stack = fiber_initialize_coroutine(fiber, &vm_stack_size);
 
 2261    cont->saved_vm_stack.ptr = NULL;
 
 2262    rb_ec_initialize_vm_stack(sec, vm_stack, vm_stack_size / 
sizeof(
VALUE));
 
 2265    sec->local_storage = NULL;
 
 2266    sec->local_storage_recursive_hash = 
Qnil;
 
 2267    sec->local_storage_recursive_hash_for_trace = 
Qnil;
 
 2271rb_fiber_pool_default(
VALUE pool)
 
 2273    return &shared_fiber_pool;
 
 2279    fiber->cont.saved_ec.storage = storage;
 
 2285rb_fiber_initialize_kw(
int argc, 
VALUE* argv, 
VALUE self, 
int kw_splat)
 
 2296        rb_get_kwargs(options, fiber_initialize_keywords, 0, 3, arguments);
 
 2298        if (!UNDEF_P(arguments[0])) {
 
 2299            blocking = arguments[0];
 
 2302        if (!UNDEF_P(arguments[1])) {
 
 2303            pool = arguments[1];
 
 2306        storage = arguments[2];
 
 2309    return fiber_initialize(self, 
rb_block_proc(), rb_fiber_pool_default(pool), 
RTEST(blocking), storage);
 
 2362rb_fiber_initialize(
int argc, 
VALUE* argv, 
VALUE self)
 
 2370    return fiber_initialize(fiber_alloc(rb_cFiber), 
rb_proc_new(func, obj), rb_fiber_pool_default(
Qnil), 0, storage);
 
 
 2376    return rb_fiber_new_storage(func, obj, 
Qtrue);
 
 
 2380rb_fiber_s_schedule_kw(
int argc, 
VALUE* argv, 
int kw_splat)
 
 2383    VALUE scheduler = th->scheduler;
 
 2386    if (scheduler != 
Qnil) {
 
 2438rb_fiber_s_schedule(
int argc, 
VALUE *argv, 
VALUE obj)
 
 2454rb_fiber_s_scheduler(
VALUE klass)
 
 2468rb_fiber_current_scheduler(
VALUE klass)
 
 2490rb_fiber_set_scheduler(
VALUE klass, 
VALUE scheduler)
 
 2495NORETURN(
static void rb_fiber_terminate(
rb_fiber_t *fiber, 
int need_interrupt, 
VALUE err));
 
 2500    rb_thread_t * 
volatile th = fiber->cont.saved_ec.thread_ptr;
 
 2503    enum ruby_tag_type state;
 
 2505    VM_ASSERT(th->ec == GET_EC());
 
 2506    VM_ASSERT(FIBER_RESUMED_P(fiber));
 
 2508    if (fiber->blocking) {
 
 2512    EC_PUSH_TAG(th->ec);
 
 2513    if ((state = EC_EXEC_TAG()) == TAG_NONE) {
 
 2516        const VALUE *argv, args = cont->value;
 
 2517        GetProcPtr(fiber->first_proc, proc);
 
 2520        th->ec->errinfo = 
Qnil;
 
 2521        th->ec->root_lep = rb_vm_proc_local_ep(fiber->first_proc);
 
 2522        th->ec->root_svar = 
Qfalse;
 
 2525        cont->value = rb_vm_invoke_proc(th->ec, proc, argc, argv, cont->kw_splat, VM_BLOCK_HANDLER_NONE);
 
 2529    int need_interrupt = TRUE;
 
 2532        err = th->ec->errinfo;
 
 2533        VM_ASSERT(FIBER_RESUMED_P(fiber));
 
 2535        if (state == TAG_RAISE) {
 
 2538        else if (state == TAG_FATAL && err == RUBY_FATAL_FIBER_KILLED) {
 
 2539            need_interrupt = FALSE;
 
 2542        else if (state == TAG_FATAL) {
 
 2543            rb_threadptr_pending_interrupt_enque(th, err);
 
 2546            err = rb_vm_make_jump_tag_but_local_jump(state, err);
 
 2550    rb_fiber_terminate(fiber, need_interrupt, err);
 
 2559        rb_bug(
"%s", strerror(
errno)); 
 
 2561    fiber->cont.type = FIBER_CONTEXT;
 
 2562    fiber->cont.saved_ec.fiber_ptr = fiber;
 
 2563    fiber->cont.saved_ec.thread_ptr = th;
 
 2564    fiber->blocking = 1;
 
 2566    fiber_status_set(fiber, FIBER_RESUMED); 
 
 2567    th->ec = &fiber->cont.saved_ec;
 
 2568    cont_init_jit_cont(&fiber->cont);
 
 2574    if (th->root_fiber) {
 
 2580        VM_ASSERT(th->ec->fiber_ptr->cont.type == FIBER_CONTEXT);
 
 2581        VM_ASSERT(th->ec->fiber_ptr->cont.self == 0);
 
 2583        if (ec && th->ec == ec) {
 
 2584            rb_ractor_set_current_ec(th->ractor, NULL);
 
 2586        fiber_free(th->ec->fiber_ptr);
 
 2596    fiber->status = FIBER_TERMINATED;
 
 2599    rb_ec_clear_vm_stack(th->ec);
 
 2603return_fiber(
bool terminate)
 
 2610        prev->resuming_fiber = NULL;
 
 2615            rb_raise(rb_eFiberError, 
"attempt to yield on a not resumed fiber");
 
 2621        VM_ASSERT(root_fiber != NULL);
 
 2624        for (fiber = root_fiber; fiber->resuming_fiber; fiber = fiber->resuming_fiber) {
 
 2632rb_fiber_current(
void)
 
 2634    return fiber_current()->cont.self;
 
 
 2643    if (th->ec->fiber_ptr != NULL) {
 
 2644        fiber = th->ec->fiber_ptr;
 
 2648        fiber = root_fiber_alloc(th);
 
 2651    if (FIBER_CREATED_P(next_fiber)) {
 
 2652        fiber_prepare_stack(next_fiber);
 
 2655    VM_ASSERT(FIBER_RESUMED_P(fiber) || FIBER_TERMINATED_P(fiber));
 
 2656    VM_ASSERT(FIBER_RUNNABLE_P(next_fiber));
 
 2658    if (FIBER_RESUMED_P(fiber)) fiber_status_set(fiber, FIBER_SUSPENDED);
 
 2660    fiber_status_set(next_fiber, FIBER_RESUMED);
 
 2661    fiber_setcontext(next_fiber, fiber);
 
 2667    VM_ASSERT(fiber == fiber_current());
 
 2669    if (fiber->killed) {
 
 2670        rb_thread_t *thread = fiber->cont.saved_ec.thread_ptr;
 
 2672        thread->ec->errinfo = RUBY_FATAL_FIBER_KILLED;
 
 2673        EC_JUMP_TAG(thread->ec, RUBY_TAG_FATAL);
 
 2685    if (th->root_fiber == NULL) root_fiber_alloc(th);
 
 2687    if (th->ec->fiber_ptr == fiber) {
 
 2691        return make_passing_arg(argc, argv);
 
 2694    if (cont_thread_value(cont) != th->self) {
 
 2695        rb_raise(rb_eFiberError, 
"fiber called across threads");
 
 2698    if (FIBER_TERMINATED_P(fiber)) {
 
 2699        value = 
rb_exc_new2(rb_eFiberError, 
"dead fiber called");
 
 2701        if (!FIBER_TERMINATED_P(th->ec->fiber_ptr)) {
 
 2703            VM_UNREACHABLE(fiber_switch);
 
 2709            VM_ASSERT(FIBER_SUSPENDED_P(th->root_fiber));
 
 2711            cont = &th->root_fiber->cont;
 
 2713            cont->value = value;
 
 2715            fiber_setcontext(th->root_fiber, th->ec->fiber_ptr);
 
 2717            VM_UNREACHABLE(fiber_switch);
 
 2721    VM_ASSERT(FIBER_RUNNABLE_P(fiber));
 
 2725    VM_ASSERT(!current_fiber->resuming_fiber);
 
 2727    if (resuming_fiber) {
 
 2728        current_fiber->resuming_fiber = resuming_fiber;
 
 2729        fiber->prev = fiber_current();
 
 2730        fiber->yielding = 0;
 
 2733    VM_ASSERT(!current_fiber->yielding);
 
 2735        current_fiber->yielding = 1;
 
 2738    if (current_fiber->blocking) {
 
 2743    cont->kw_splat = kw_splat;
 
 2744    cont->value = make_passing_arg(argc, argv);
 
 2746    fiber_store(fiber, th);
 
 2749#ifndef COROUTINE_PTHREAD_CONTEXT 
 2750    if (resuming_fiber && FIBER_TERMINATED_P(fiber)) {
 
 2752            fiber_stack_release(fiber);
 
 2757    if (fiber_current()->blocking) {
 
 2761    RUBY_VM_CHECK_INTS(th->ec);
 
 2765    current_fiber = th->ec->fiber_ptr;
 
 2766    value = current_fiber->cont.value;
 
 2768    fiber_check_killed(current_fiber);
 
 2770    if (current_fiber->cont.argc == -1) {
 
 2781    return fiber_switch(fiber_ptr(fiber_value), argc, argv, 
RB_NO_KEYWORDS, NULL, 
false);
 
 
 2799rb_fiber_blocking_p(
VALUE fiber)
 
 2801    return RBOOL(fiber_ptr(fiber)->blocking);
 
 2805fiber_blocking_yield(
VALUE fiber_value)
 
 2808    rb_thread_t * 
volatile th = fiber->cont.saved_ec.thread_ptr;
 
 2810    VM_ASSERT(fiber->blocking == 0);
 
 2813    fiber->blocking = 1;
 
 2822fiber_blocking_ensure(
VALUE fiber_value)
 
 2825    rb_thread_t * 
volatile th = fiber->cont.saved_ec.thread_ptr;
 
 2828    fiber->blocking = 0;
 
 2845rb_fiber_blocking(
VALUE class)
 
 2847    VALUE fiber_value = rb_fiber_current();
 
 2851    if (fiber->blocking) {
 
 2855        return rb_ensure(fiber_blocking_yield, fiber_value, fiber_blocking_ensure, fiber_value);
 
 2878rb_fiber_s_blocking_p(
VALUE klass)
 
 2881    unsigned blocking = thread->blocking;
 
 2892    fiber_status_set(fiber, FIBER_TERMINATED);
 
 2898    VALUE value = fiber->cont.value;
 
 2900    VM_ASSERT(FIBER_RESUMED_P(fiber));
 
 2901    rb_fiber_close(fiber);
 
 2903    fiber->cont.machine.stack = NULL;
 
 2904    fiber->cont.machine.stack_size = 0;
 
 2908    if (need_interrupt) RUBY_VM_SET_INTERRUPT(&next_fiber->cont.saved_ec);
 
 2911        fiber_switch(next_fiber, -1, &error, 
RB_NO_KEYWORDS, NULL, 
false);
 
 2913        fiber_switch(next_fiber, 1, &value, 
RB_NO_KEYWORDS, NULL, 
false);
 
 2918fiber_resume_kw(
rb_fiber_t *fiber, 
int argc, 
const VALUE *argv, 
int kw_splat)
 
 2922    if (argc == -1 && FIBER_CREATED_P(fiber)) {
 
 2923        rb_raise(rb_eFiberError, 
"cannot raise exception on unborn fiber");
 
 2925    else if (FIBER_TERMINATED_P(fiber)) {
 
 2926        rb_raise(rb_eFiberError, 
"attempt to resume a terminated fiber");
 
 2928    else if (fiber == current_fiber) {
 
 2929        rb_raise(rb_eFiberError, 
"attempt to resume the current fiber");
 
 2931    else if (fiber->prev != NULL) {
 
 2932        rb_raise(rb_eFiberError, 
"attempt to resume a resumed fiber (double resume)");
 
 2934    else if (fiber->resuming_fiber) {
 
 2935        rb_raise(rb_eFiberError, 
"attempt to resume a resuming fiber");
 
 2937    else if (fiber->prev == NULL &&
 
 2938             (!fiber->yielding && fiber->status != FIBER_CREATED)) {
 
 2939        rb_raise(rb_eFiberError, 
"attempt to resume a transferring fiber");
 
 2942    return fiber_switch(fiber, argc, argv, kw_splat, fiber, 
false);
 
 2946rb_fiber_resume_kw(
VALUE self, 
int argc, 
const VALUE *argv, 
int kw_splat)
 
 2948    return fiber_resume_kw(fiber_ptr(self), argc, argv, kw_splat);
 
 
 2954    return fiber_resume_kw(fiber_ptr(self), argc, argv, 
RB_NO_KEYWORDS);
 
 
 2958rb_fiber_yield_kw(
int argc, 
const VALUE *argv, 
int kw_splat)
 
 2960    return fiber_switch(return_fiber(
false), argc, argv, kw_splat, NULL, 
true);
 
 
 2964rb_fiber_yield(
int argc, 
const VALUE *argv)
 
 2966    return fiber_switch(return_fiber(
false), argc, argv, 
RB_NO_KEYWORDS, NULL, 
true);
 
 
 2972    if (th->root_fiber && th->root_fiber != th->ec->fiber_ptr) {
 
 2973        th->ec->local_storage = th->root_fiber->cont.saved_ec.local_storage;
 
 2988    return RBOOL(!FIBER_TERMINATED_P(fiber_ptr(fiber_value)));
 
 
 3007rb_fiber_m_resume(
int argc, 
VALUE *argv, 
VALUE fiber)
 
 3059rb_fiber_backtrace(
int argc, 
VALUE *argv, 
VALUE fiber)
 
 3061    return rb_vm_backtrace(argc, argv, &fiber_ptr(fiber)->cont.saved_ec);
 
 3084rb_fiber_backtrace_locations(
int argc, 
VALUE *argv, 
VALUE fiber)
 
 3086    return rb_vm_backtrace_locations(argc, argv, &fiber_ptr(fiber)->cont.saved_ec);
 
 3172rb_fiber_m_transfer(
int argc, 
VALUE *argv, 
VALUE self)
 
 3178fiber_transfer_kw(
rb_fiber_t *fiber, 
int argc, 
const VALUE *argv, 
int kw_splat)
 
 3180  if (fiber->resuming_fiber) {
 
 3181      rb_raise(rb_eFiberError, 
"attempt to transfer to a resuming fiber");
 
 3184  if (fiber->yielding) {
 
 3185      rb_raise(rb_eFiberError, 
"attempt to transfer to a yielding fiber");
 
 3188  return fiber_switch(fiber, argc, argv, kw_splat, NULL, 
false);
 
 3192rb_fiber_transfer_kw(
VALUE self, 
int argc, 
const VALUE *argv, 
int kw_splat)
 
 3194    return fiber_transfer_kw(fiber_ptr(self), argc, argv, kw_splat);
 
 
 3208rb_fiber_s_yield(
int argc, 
VALUE *argv, 
VALUE klass)
 
 3216    if (fiber == fiber_current()) {
 
 3219    else if (fiber->resuming_fiber) {
 
 3220        return fiber_raise(fiber->resuming_fiber, exception);
 
 3222    else if (FIBER_SUSPENDED_P(fiber) && !fiber->yielding) {
 
 3233    VALUE exception = rb_exception_setup(argc, argv);
 
 3235    return fiber_raise(fiber_ptr(fiber), exception);
 
 
 3264rb_fiber_m_raise(
int argc, 
VALUE *argv, 
VALUE self)
 
 3266    return rb_fiber_raise(self, argc, argv);
 
 3287rb_fiber_m_kill(
VALUE self)
 
 3291    if (fiber->killed) 
return Qfalse;
 
 3294    if (fiber->status == FIBER_CREATED) {
 
 3295        fiber->status = FIBER_TERMINATED;
 
 3297    else if (fiber->status != FIBER_TERMINATED) {
 
 3298        if (fiber_current() == fiber) {
 
 3299            fiber_check_killed(fiber);
 
 3302            fiber_raise(fiber_ptr(self), 
Qnil);
 
 3317rb_fiber_s_current(
VALUE klass)
 
 3319    return rb_fiber_current();
 
 3323fiber_to_s(
VALUE fiber_value)
 
 3325    const rb_fiber_t *fiber = fiber_ptr(fiber_value);
 
 3327    char status_info[0x20];
 
 3329    if (fiber->resuming_fiber) {
 
 3330        snprintf(status_info, 0x20, 
" (%s by resuming)", fiber_status_name(fiber->status));
 
 3333        snprintf(status_info, 0x20, 
" (%s)", fiber_status_name(fiber->status));
 
 3338        strlcat(status_info, 
">", 
sizeof(status_info));
 
 3343    GetProcPtr(fiber->first_proc, proc);
 
 3344    return rb_block_to_s(fiber_value, &proc->block, status_info);
 
 3347#ifdef HAVE_WORKING_FORK 
 3351    if (th->root_fiber) {
 
 3352        if (&th->root_fiber->cont.saved_ec != th->ec) {
 
 3353            th->root_fiber = th->ec->fiber_ptr;
 
 3355        th->root_fiber->prev = 0;
 
 3360#ifdef RB_EXPERIMENTAL_FIBER_POOL 
 3362fiber_pool_free(
void *ptr)
 
 3365    RUBY_FREE_ENTER(
"fiber_pool");
 
 3367    fiber_pool_allocation_free(
fiber_pool->allocations);
 
 3370    RUBY_FREE_LEAVE(
"fiber_pool");
 
 3374fiber_pool_memsize(
const void *ptr)
 
 3377    size_t size = 
sizeof(*fiber_pool);
 
 3386    {NULL, fiber_pool_free, fiber_pool_memsize,},
 
 3387    0, 0, RUBY_TYPED_FREE_IMMEDIATELY
 
 3391fiber_pool_alloc(
VALUE klass)
 
 3399rb_fiber_pool_initialize(
int argc, 
VALUE* argv, 
VALUE self)
 
 3406    rb_scan_args(argc, argv, 
"03", &size, &count, &vm_stack_size);
 
 3409        size = 
SIZET2NUM(th->vm->default_params.fiber_machine_stack_size);
 
 3416    if (
NIL_P(vm_stack_size)) {
 
 3417        vm_stack_size = 
SIZET2NUM(th->vm->default_params.fiber_vm_stack_size);
 
 3445    size_t vm_stack_size = th->vm->default_params.fiber_vm_stack_size;
 
 3446    size_t machine_stack_size = th->vm->default_params.fiber_machine_stack_size;
 
 3447    size_t stack_size = machine_stack_size + vm_stack_size;
 
 3451    GetSystemInfo(&info);
 
 3452    pagesize = info.dwPageSize;
 
 3454    pagesize = sysconf(_SC_PAGESIZE);
 
 3456    SET_MACHINE_STACK_END(&th->ec->machine.stack_end);
 
 3458    fiber_pool_initialize(&shared_fiber_pool, stack_size, FIBER_POOL_INITIAL_SIZE, vm_stack_size);
 
 3464    const char *fiber_shared_fiber_pool_free_stacks = getenv(
"RUBY_SHARED_FIBER_POOL_FREE_STACKS");
 
 3465    if (fiber_shared_fiber_pool_free_stacks) {
 
 3466        shared_fiber_pool.free_stacks = atoi(fiber_shared_fiber_pool_free_stacks);
 
 3468        if (shared_fiber_pool.free_stacks < 0) {
 
 3469            rb_warn(
"Setting RUBY_SHARED_FIBER_POOL_FREE_STACKS to a negative value is not allowed.");
 
 3470            shared_fiber_pool.free_stacks = 0;
 
 3473        if (shared_fiber_pool.free_stacks > 1) {
 
 3474            rb_warn(
"Setting RUBY_SHARED_FIBER_POOL_FREE_STACKS to a value greater than 1 is operating system specific, and may cause crashes.");
 
 3495    rb_define_method(rb_cFiber, 
"backtrace_locations", rb_fiber_backtrace_locations, -1);
 
 3508#ifdef RB_EXPERIMENTAL_FIBER_POOL 
 3515    rb_define_method(rb_cFiberPool, 
"initialize", rb_fiber_pool_initialize, -1);
 
 3521RUBY_SYMBOL_EXPORT_BEGIN
 
 3524ruby_Init_Continuation_body(
void)
 
 3534RUBY_SYMBOL_EXPORT_END
 
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
#define rb_define_global_function(mid, func, arity)
Defines rb_mKernel #mid.
#define RUBY_EVENT_FIBER_SWITCH
Encountered a Fiber#yield.
static bool RB_OBJ_FROZEN(VALUE obj)
Checks if an object is frozen.
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
VALUE rb_define_class_under(VALUE outer, const char *name, VALUE super)
Defines a class under the namespace of outer.
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
void rb_undef_method(VALUE klass, const char *name)
Defines an undef of a method.
int rb_scan_args_kw(int kw_flag, int argc, const VALUE *argv, const char *fmt,...)
Identical to rb_scan_args(), except it also accepts kw_splat.
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
Retrieves argument from argc and argv to given VALUE references according to the format string.
int rb_keyword_given_p(void)
Determines if the current method is given a keyword argument.
int rb_get_kwargs(VALUE keyword_hash, const ID *table, int required, int optional, VALUE *values)
Keyword argument deconstructor.
#define REALLOC_N
Old name of RB_REALLOC_N.
#define xfree
Old name of ruby_xfree.
#define Qundef
Old name of RUBY_Qundef.
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
#define ZALLOC
Old name of RB_ZALLOC.
#define CLASS_OF
Old name of rb_class_of.
#define rb_ary_new4
Old name of rb_ary_new_from_values.
#define SIZET2NUM
Old name of RB_SIZE2NUM.
#define rb_exc_new2
Old name of rb_exc_new_cstr.
#define T_HASH
Old name of RUBY_T_HASH.
#define ALLOC_N
Old name of RB_ALLOC_N.
#define Qtrue
Old name of RUBY_Qtrue.
#define INT2NUM
Old name of RB_INT2NUM.
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define NIL_P
Old name of RB_NIL_P.
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
#define NUM2SIZET
Old name of RB_NUM2SIZE.
void ruby_stop(int ex)
Calls ruby_cleanup() and exits the process.
void rb_category_warn(rb_warning_category_t category, const char *fmt,...)
Identical to rb_category_warning(), except it reports unless $VERBOSE is nil.
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
int rb_typeddata_is_kind_of(VALUE obj, const rb_data_type_t *data_type)
Checks if the given object is of given kind.
void rb_syserr_fail(int e, const char *mesg)
Raises appropriate exception that represents a C errno.
VALUE rb_eStandardError
StandardError exception.
VALUE rb_eFrozenError
FrozenError exception.
VALUE rb_eTypeError
TypeError exception.
VALUE rb_eRuntimeError
RuntimeError exception.
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
@ RB_WARN_CATEGORY_EXPERIMENTAL
Warning is for experimental features.
VALUE rb_any_to_s(VALUE obj)
Generates a textual representation of the given object.
VALUE rb_obj_dup(VALUE obj)
Duplicates the given object.
void rb_provide(const char *feature)
Declares that the given feature is already provided by someone else.
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
void rb_str_set_len(VALUE str, long len)
Overwrites the length of the string.
#define rb_str_cat_cstr(buf, str)
Identical to rb_str_cat(), except it assumes the passed pointer is a pointer to a C string.
void rb_undef_alloc_func(VALUE klass)
Deletes the allocator function of a class.
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
static ID rb_intern_const(const char *str)
This is a "tiny  optimisation" over rb_intern().
VALUE rb_to_symbol(VALUE name)
Identical to rb_intern_str(), except it generates a dynamic symbol if necessary.
VALUE rb_yield(VALUE val)
Yields the block.
rb_block_call_func * rb_block_call_func_t
Shorthand type that represents an iterator-written-in-C function pointer.
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
#define ALLOCA_N(type, n)
#define RB_ALLOC(type)
Shorthand of RB_ALLOC_N with n=1.
VALUE rb_proc_new(type *q, VALUE w)
Creates a rb_cProc instance.
void rb_hash_foreach(VALUE q, int_type *w, VALUE e)
Iteration over the given hash.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
#define DATA_PTR(obj)
Convenient getter macro.
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
#define TypedData_Wrap_Struct(klass, data_type, sval)
Converts sval, a pointer to your struct, into a Ruby object.
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
#define errno
Ractor-aware version of errno.
#define RB_NO_KEYWORDS
Do not pass keywords.
VALUE rb_fiber_scheduler_current(void)
Identical to rb_fiber_scheduler_get(), except it also returns RUBY_Qnil in case of a blocking fiber.
VALUE rb_fiber_scheduler_set(VALUE scheduler)
Destructively assigns the passed scheduler to that of the current thread that is calling this functio...
VALUE rb_fiber_scheduler_get(void)
Queries the current scheduler of the current thread that is calling this function.
VALUE rb_fiber_scheduler_fiber(VALUE scheduler, int argc, VALUE *argv, int kw_splat)
Create and schedule a non-blocking fiber.
#define RTEST
This is an old name of RB_TEST.
This is the struct that holds necessary info for a struct.
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_destroy.
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
uintptr_t VALUE
Type that represents a Ruby object.
static void Check_Type(VALUE v, enum ruby_value_type t)
Identical to RB_TYPE_P(), except it raises exceptions on predication failure.
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.