12#include "eval_intern.h"
20#include "internal/thread.h"
23#include "ruby_atomic.h"
26static ID id_scheduler_close;
33static ID id_timeout_after;
34static ID id_kernel_sleep;
35static ID id_process_wait;
37static ID id_io_read, id_io_pread;
38static ID id_io_write, id_io_pwrite;
40static ID id_io_select;
43static ID id_address_resolve;
45static ID id_blocking_operation_wait;
46static ID id_fiber_interrupt;
48static ID id_fiber_schedule;
51static VALUE rb_cFiberSchedulerBlockingOperation;
60 RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_QUEUED,
61 RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_EXECUTING,
62 RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_COMPLETED,
63 RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_CANCELLED
64} rb_fiber_blocking_operation_status_t;
67 void *(*function)(
void *);
81blocking_operation_memsize(
const void *ptr)
87 "Fiber::Scheduler::BlockingOperation",
91 blocking_operation_memsize,
100blocking_operation_alloc(
VALUE klass)
105 blocking_operation->function = NULL;
106 blocking_operation->data = NULL;
107 blocking_operation->unblock_function = NULL;
108 blocking_operation->data2 = NULL;
109 blocking_operation->flags = 0;
110 blocking_operation->state = NULL;
111 blocking_operation->status = RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_QUEUED;
120get_blocking_operation(
VALUE obj)
124 return blocking_operation;
136blocking_operation_call(
VALUE self)
140 if (blocking_operation->status != RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_QUEUED) {
141 rb_raise(
rb_eRuntimeError,
"Blocking operation has already been executed!");
144 if (blocking_operation->function == NULL) {
145 rb_raise(
rb_eRuntimeError,
"Blocking operation has no function to execute!");
148 if (blocking_operation->state == NULL) {
153 blocking_operation->status = RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_EXECUTING;
156 blocking_operation->state->result =
rb_nogvl(blocking_operation->function, blocking_operation->data,
157 blocking_operation->unblock_function, blocking_operation->data2,
158 blocking_operation->flags);
159 blocking_operation->state->saved_errno = rb_errno();
162 blocking_operation->status = RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_COMPLETED;
180 return get_blocking_operation(self);
195 if (blocking_operation == NULL) {
199 if (blocking_operation->function == NULL || blocking_operation->state == NULL) {
204 rb_thread_resolve_unblock_function(&blocking_operation->unblock_function, &blocking_operation->data2, GET_THREAD());
207 rb_atomic_t expected = RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_QUEUED;
208 if (
RUBY_ATOMIC_CAS(blocking_operation->status, expected, RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_EXECUTING) != expected) {
214 blocking_operation->state->result = blocking_operation->function(blocking_operation->data);
215 blocking_operation->state->saved_errno =
errno;
218 expected = RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_EXECUTING;
219 if (
RUBY_ATOMIC_CAS(blocking_operation->status, expected, RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_COMPLETED) == expected) {
224 blocking_operation->state->saved_errno = EINTR;
236rb_fiber_scheduler_blocking_operation_new(
void *(*function)(
void *),
void *data,
240 VALUE self = blocking_operation_alloc(rb_cFiberSchedulerBlockingOperation);
243 blocking_operation->function = function;
244 blocking_operation->data = data;
245 blocking_operation->unblock_function = unblock_function;
246 blocking_operation->data2 = data2;
247 blocking_operation->flags = flags;
248 blocking_operation->state = state;
308Init_Fiber_Scheduler(
void)
332 id_blocking_operation_wait =
rb_intern_const(
"blocking_operation_wait");
341 rb_define_method(rb_cFiberSchedulerBlockingOperation,
"call", blocking_operation_call, 0);
344 rb_gc_register_mark_object(rb_cFiberSchedulerBlockingOperation);
358 rb_define_method(rb_cFiberScheduler,
"timeout_after", rb_fiber_scheduler_timeout_after, 3);
377 return thread->scheduler;
381verify_interface(
VALUE scheduler)
384 rb_raise(rb_eArgError,
"Scheduler must implement #block");
388 rb_raise(rb_eArgError,
"Scheduler must implement #unblock");
392 rb_raise(rb_eArgError,
"Scheduler must implement #kernel_sleep");
396 rb_raise(rb_eArgError,
"Scheduler must implement #io_wait");
400 rb_warn(
"Scheduler should implement #fiber_interrupt");
405fiber_scheduler_close(
VALUE scheduler)
411fiber_scheduler_close_ensure(
VALUE _thread)
414 thread->scheduler =
Qnil;
427 if (scheduler !=
Qnil) {
428 verify_interface(scheduler);
435 if (thread->scheduler !=
Qnil) {
437 rb_ensure(fiber_scheduler_close, thread->scheduler, fiber_scheduler_close_ensure, (
VALUE)thread);
440 thread->scheduler = scheduler;
442 return thread->scheduler;
446fiber_scheduler_current_for_threadptr(
rb_thread_t *thread)
450 if (thread->blocking == 0) {
451 return thread->scheduler;
462 return fiber_scheduler_current_for_threadptr(GET_THREAD());
468 return fiber_scheduler_current_for_threadptr(rb_thread_ptr(thread));
473 return fiber_scheduler_current_for_threadptr(thread);
500 if (!UNDEF_P(result))
return result;
503 if (!UNDEF_P(result))
return result;
512 return rb_float_new((
double)timeout->tv_sec + (0.000001 * timeout->tv_usec));
532 return rb_funcall(scheduler, id_kernel_sleep, 1, timeout);
538 return rb_funcallv(scheduler, id_kernel_sleep, argc, argv);
552 if (!UNDEF_P(result))
return result;
590 VALUE arguments[] = {
591 timeout, exception, message
598rb_fiber_scheduler_timeout_afterv(
VALUE scheduler,
int argc,
VALUE * argv)
625 VALUE arguments[] = {
649 return rb_funcall(scheduler, id_block, 2, blocker, timeout);
671 enum ruby_tag_type state;
676 int saved_errno =
errno;
680 int saved_interrupt_mask = ec->interrupt_mask;
681 ec->interrupt_mask |= PENDING_INTERRUPT_MASK;
684 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
685 result =
rb_funcall(scheduler, id_unblock, 2, blocker, fiber);
689 ec->interrupt_mask = saved_interrupt_mask;
692 EC_JUMP_TAG(ec, state);
695 RUBY_VM_CHECK_INTS(ec);
722fiber_scheduler_io_wait(
VALUE _argument) {
725 return rb_funcallv(arguments[0], id_io_wait, 3, arguments + 1);
731 VALUE arguments[] = {
732 scheduler, io, events, timeout
736 return rb_thread_io_blocking_operation(io, fiber_scheduler_io_wait, (
VALUE)&arguments);
738 return fiber_scheduler_io_wait((
VALUE)&arguments);
766 VALUE arguments[] = {
767 readables, writables, exceptables, timeout
810fiber_scheduler_io_read(
VALUE _argument) {
813 return rb_funcallv(arguments[0], id_io_read, 4, arguments + 1);
823 VALUE arguments[] = {
828 return rb_thread_io_blocking_operation(io, fiber_scheduler_io_read, (
VALUE)&arguments);
830 return fiber_scheduler_io_read((
VALUE)&arguments);
849fiber_scheduler_io_pread(
VALUE _argument) {
852 return rb_funcallv(arguments[0], id_io_pread, 5, arguments + 1);
862 VALUE arguments[] = {
867 return rb_thread_io_blocking_operation(io, fiber_scheduler_io_pread, (
VALUE)&arguments);
869 return fiber_scheduler_io_pread((
VALUE)&arguments);
902fiber_scheduler_io_write(
VALUE _argument) {
905 return rb_funcallv(arguments[0], id_io_write, 4, arguments + 1);
915 VALUE arguments[] = {
920 return rb_thread_io_blocking_operation(io, fiber_scheduler_io_write, (
VALUE)&arguments);
922 return fiber_scheduler_io_write((
VALUE)&arguments);
942fiber_scheduler_io_pwrite(
VALUE _argument) {
945 return rb_funcallv(arguments[0], id_io_pwrite, 5, arguments + 1);
957 VALUE arguments[] = {
962 return rb_thread_io_blocking_operation(io, fiber_scheduler_io_pwrite, (
VALUE)&arguments);
964 return fiber_scheduler_io_pwrite((
VALUE)&arguments);
971 VALUE buffer = rb_io_buffer_new(base, size, RB_IO_BUFFER_LOCKED);
975 rb_io_buffer_free_locked(buffer);
983 VALUE buffer = rb_io_buffer_new((
void*)base, size, RB_IO_BUFFER_LOCKED|RB_IO_BUFFER_READONLY);
987 rb_io_buffer_free_locked(buffer);
995 VALUE buffer = rb_io_buffer_new(base, size, RB_IO_BUFFER_LOCKED);
999 rb_io_buffer_free_locked(buffer);
1007 VALUE buffer = rb_io_buffer_new((
void*)base, size, RB_IO_BUFFER_LOCKED|RB_IO_BUFFER_READONLY);
1011 rb_io_buffer_free_locked(buffer);
1027 VALUE arguments[] = {io};
1067 VALUE arguments[] = {
1094 if (!
rb_respond_to(scheduler, id_blocking_operation_wait)) {
1099 VALUE blocking_operation = rb_fiber_scheduler_blocking_operation_new(function, data, unblock_function, data2, flags, state);
1101 VALUE result =
rb_funcall(scheduler, id_blocking_operation_wait, 1, blocking_operation);
1108 operation->function = NULL;
1109 operation->state = NULL;
1110 operation->data = NULL;
1111 operation->data2 = NULL;
1112 operation->unblock_function = NULL;
1115 if (current_status == RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_QUEUED) {
1133 VALUE arguments[] = {
1138 enum ruby_tag_type state;
1142 int saved_interrupt_mask = ec->interrupt_mask;
1143 ec->interrupt_mask |= PENDING_INTERRUPT_MASK;
1146 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
1151 ec->interrupt_mask = saved_interrupt_mask;
1154 EC_JUMP_TAG(ec, state);
1157 RUBY_VM_CHECK_INTS(ec);
1195 if (blocking_operation == NULL) {
1201 switch (current_state) {
1202 case RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_QUEUED:
1204 if (
RUBY_ATOMIC_CAS(blocking_operation->status, current_state, RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_CANCELLED) == current_state) {
1210 case RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_EXECUTING:
1212 if (
RUBY_ATOMIC_CAS(blocking_operation->status, current_state, RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_CANCELLED) != current_state) {
1218 if (unblock_function) {
1220 blocking_operation->unblock_function(blocking_operation->data2);
1225 case RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_COMPLETED:
1226 case RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_CANCELLED:
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
#define RUBY_ATOMIC_CAS(var, oldval, newval)
Atomic compare-and-swap.
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
#define RUBY_ATOMIC_LOAD(var)
Atomic load.
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
VALUE rb_class_new(VALUE super)
Creates a new, anonymous class.
VALUE rb_define_class_under(VALUE outer, const char *name, VALUE super)
Defines a class under the namespace of outer.
#define Qundef
Old name of RUBY_Qundef.
#define SIZET2NUM
Old name of RB_SIZE2NUM.
#define Qnil
Old name of RUBY_Qnil.
VALUE rb_eRuntimeError
RuntimeError exception.
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
VALUE rb_cObject
Object class.
VALUE rb_funcall(VALUE recv, ID mid, int n,...)
Calls a method.
VALUE rb_funcall_passing_block_kw(VALUE recv, ID mid, int argc, const VALUE *argv, int kw_splat)
Identical to rb_funcallv_passing_block(), except you can specify how to handle the last element of th...
void rb_unblock_function_t(void *)
This is the type of UBFs.
int rb_respond_to(VALUE obj, ID mid)
Queries if the object responds to the method.
VALUE rb_check_funcall(VALUE recv, ID mid, int argc, const VALUE *argv)
Identical to rb_funcallv(), except it returns RUBY_Qundef instead of raising rb_eNoMethodError.
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
static ID rb_intern_const(const char *str)
This is a "tiny optimisation" over rb_intern().
VALUE rb_io_timeout(VALUE io)
Get the timeout associated with the specified io object.
@ RUBY_IO_READABLE
IO::READABLE
@ RUBY_IO_WRITABLE
IO::WRITABLE
void * rb_nogvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2, int flags)
Identical to rb_thread_call_without_gvl(), except it additionally takes "flags" that change the behav...
#define RB_UINT2NUM
Just another name of rb_uint2num_inline.
#define RB_INT2NUM
Just another name of rb_int2num_inline.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define OFFT2NUM
Converts a C's off_t into an instance of rb_cInteger.
#define PIDT2NUM
Converts a C's pid_t into an instance of rb_cInteger.
#define RUBY_DEFAULT_FREE
This is a value you can set to RData::dfree.
#define RUBY_TYPED_FREE_IMMEDIATELY
Macros to see if each corresponding flag is defined.
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
#define errno
Ractor-aware version of errno.
VALUE rb_fiber_scheduler_blocking_operation_wait(VALUE scheduler, void *(*function)(void *), void *data, rb_unblock_function_t *unblock_function, void *data2, int flags, struct rb_fiber_scheduler_blocking_operation_state *state)
Defer the execution of the passed function to the scheduler.
VALUE rb_fiber_scheduler_current(void)
Identical to rb_fiber_scheduler_get(), except it also returns RUBY_Qnil in case of a blocking fiber.
VALUE rb_fiber_scheduler_io_pread_memory(VALUE scheduler, VALUE io, rb_off_t from, void *base, size_t size, size_t length)
Non-blocking pread from the passed IO using a native buffer.
VALUE rb_fiber_scheduler_make_timeout(struct timeval *timeout)
Converts the passed timeout to an expression that rb_fiber_scheduler_block() etc.
VALUE rb_fiber_scheduler_io_wait_readable(VALUE scheduler, VALUE io)
Non-blocking wait until the passed IO is ready for reading.
VALUE rb_fiber_scheduler_io_read_memory(VALUE scheduler, VALUE io, void *base, size_t size, size_t length)
Non-blocking read from the passed IO using a native buffer.
VALUE rb_fiber_scheduler_io_pwrite(VALUE scheduler, VALUE io, rb_off_t from, VALUE buffer, size_t length, size_t offset)
Non-blocking write to the passed IO at the specified offset.
VALUE rb_fiber_scheduler_kernel_sleepv(VALUE scheduler, int argc, VALUE *argv)
Identical to rb_fiber_scheduler_kernel_sleep(), except it can pass multiple arguments.
VALUE rb_fiber_scheduler_fiber_interrupt(VALUE scheduler, VALUE fiber, VALUE exception)
Interrupt a fiber by raising an exception.
VALUE rb_fiber_scheduler_io_wait(VALUE scheduler, VALUE io, VALUE events, VALUE timeout)
Non-blocking version of rb_io_wait().
VALUE rb_fiber_scheduler_io_select(VALUE scheduler, VALUE readables, VALUE writables, VALUE exceptables, VALUE timeout)
Non-blocking version of IO.select.
VALUE rb_fiber_scheduler_io_read(VALUE scheduler, VALUE io, VALUE buffer, size_t length, size_t offset)
Non-blocking read from the passed IO.
int rb_fiber_scheduler_blocking_operation_cancel(rb_fiber_scheduler_blocking_operation_t *blocking_operation)
Cancel a blocking operation.
VALUE rb_fiber_scheduler_io_selectv(VALUE scheduler, int argc, VALUE *argv)
Non-blocking version of IO.select, argv variant.
VALUE rb_fiber_scheduler_process_wait(VALUE scheduler, rb_pid_t pid, int flags)
Non-blocking waitpid.
VALUE rb_fiber_scheduler_block(VALUE scheduler, VALUE blocker, VALUE timeout)
Non-blocking wait for the passed "blocker", which is for instance Thread.join or Mutex....
int rb_fiber_scheduler_blocking_operation_execute(rb_fiber_scheduler_blocking_operation_t *blocking_operation)
Execute blocking operation from handle (GVL not required).
VALUE rb_fiber_scheduler_io_pread(VALUE scheduler, VALUE io, rb_off_t from, VALUE buffer, size_t length, size_t offset)
Non-blocking read from the passed IO at the specified offset.
VALUE rb_fiber_scheduler_io_pwrite_memory(VALUE scheduler, VALUE io, rb_off_t from, const void *base, size_t size, size_t length)
Non-blocking pwrite to the passed IO using a native buffer.
VALUE rb_fiber_scheduler_io_write(VALUE scheduler, VALUE io, VALUE buffer, size_t length, size_t offset)
Non-blocking write to the passed IO.
VALUE rb_fiber_scheduler_close(VALUE scheduler)
Closes the passed scheduler object.
rb_fiber_scheduler_blocking_operation_t * rb_fiber_scheduler_blocking_operation_extract(VALUE self)
Extract the blocking operation handle from a BlockingOperationRuby object.
VALUE rb_fiber_scheduler_current_for_thread(VALUE thread)
Identical to rb_fiber_scheduler_current(), except it queries for that of the passed thread value inst...
VALUE rb_fiber_scheduler_kernel_sleep(VALUE scheduler, VALUE duration)
Non-blocking sleep.
VALUE rb_fiber_scheduler_address_resolve(VALUE scheduler, VALUE hostname)
Non-blocking DNS lookup.
VALUE rb_fiber_scheduler_yield(VALUE scheduler)
Yield to the scheduler, to be resumed on the next scheduling cycle.
VALUE rb_fiber_scheduler_set(VALUE scheduler)
Destructively assigns the passed scheduler to that of the current thread that is calling this functio...
VALUE rb_fiber_scheduler_io_write_memory(VALUE scheduler, VALUE io, const void *base, size_t size, size_t length)
Non-blocking write to the passed IO using a native buffer.
VALUE rb_fiber_scheduler_current_for_threadptr(struct rb_thread_struct *thread)
Identical to rb_fiber_scheduler_current_for_thread(), except it expects a threadptr instead of a thre...
VALUE rb_fiber_scheduler_io_wait_writable(VALUE scheduler, VALUE io)
Non-blocking wait until the passed IO is ready for writing.
VALUE rb_fiber_scheduler_io_close(VALUE scheduler, VALUE io)
Non-blocking close the given IO.
VALUE rb_fiber_scheduler_get(void)
Queries the current scheduler of the current thread that is calling this function.
VALUE rb_fiber_scheduler_unblock(VALUE scheduler, VALUE blocker, VALUE fiber)
Wakes up a fiber previously blocked using rb_fiber_scheduler_block().
VALUE rb_fiber_scheduler_fiber(VALUE scheduler, int argc, VALUE *argv, int kw_splat)
Create and schedule a non-blocking fiber.
@ RUBY_Qundef
Represents so-called undef.
This is the struct that holds necessary info for a struct.
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
uintptr_t VALUE
Type that represents a Ruby object.