Ruby 4.1.0dev (2026-03-06 revision 9aca729140424bbf465c11ab8ab53e5cc6602c01)
scheduler.c (9aca729140424bbf465c11ab8ab53e5cc6602c01)
1/**********************************************************************
2
3 scheduler.c
4
5 $Author$
6
7 Copyright (C) 2020 Samuel Grant Dawson Williams
8
9**********************************************************************/
10
11#include "vm_core.h"
12#include "eval_intern.h"
14#include "ruby/io.h"
15#include "ruby/io/buffer.h"
16
17#include "ruby/thread.h"
18
19// For `ruby_thread_has_gvl_p`:
20#include "internal/thread.h"
21
22// For atomic operations:
23#include "ruby_atomic.h"
24
25static ID id_close;
26static ID id_scheduler_close;
27
28static ID id_block;
29static ID id_unblock;
30
31static ID id_yield;
32
33static ID id_timeout_after;
34static ID id_kernel_sleep;
35static ID id_process_wait;
36
37static ID id_io_read, id_io_pread;
38static ID id_io_write, id_io_pwrite;
39static ID id_io_wait;
40static ID id_io_select;
41static ID id_io_close;
42
43static ID id_address_resolve;
44
45static ID id_blocking_operation_wait;
46static ID id_fiber_interrupt;
47
48static ID id_fiber_schedule;
49
50// Our custom blocking operation class
51static VALUE rb_cFiberSchedulerBlockingOperation;
52
53/*
54 * Custom blocking operation structure for blocking operations
55 * This replaces the use of Ruby procs to avoid use-after-free issues
56 * and provides a cleaner C API for native work pools.
57 */
58
59typedef enum {
60 RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_QUEUED, // Submitted but not started
61 RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_EXECUTING, // Currently running
62 RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_COMPLETED, // Finished (success/error)
63 RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_CANCELLED // Cancelled
64} rb_fiber_blocking_operation_status_t;
65
67 void *(*function)(void *);
68 void *data;
69
70 rb_unblock_function_t *unblock_function;
71 void *data2;
72
73 int flags;
75
76 // Execution status
77 volatile rb_atomic_t status;
78};
79
80static size_t
81blocking_operation_memsize(const void *ptr)
82{
84}
85
86static const rb_data_type_t blocking_operation_data_type = {
87 "Fiber::Scheduler::BlockingOperation",
88 {
89 NULL, // nothing to mark
91 blocking_operation_memsize,
92 },
93 0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE
94};
95
96/*
97 * Allocate a new blocking operation
98 */
99static VALUE
100blocking_operation_alloc(VALUE klass)
101{
102 rb_fiber_scheduler_blocking_operation_t *blocking_operation;
103 VALUE obj = TypedData_Make_Struct(klass, rb_fiber_scheduler_blocking_operation_t, &blocking_operation_data_type, blocking_operation);
104
105 blocking_operation->function = NULL;
106 blocking_operation->data = NULL;
107 blocking_operation->unblock_function = NULL;
108 blocking_operation->data2 = NULL;
109 blocking_operation->flags = 0;
110 blocking_operation->state = NULL;
111 blocking_operation->status = RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_QUEUED;
112
113 return obj;
114}
115
116/*
117 * Get the blocking operation struct from a Ruby object
118 */
120get_blocking_operation(VALUE obj)
121{
122 rb_fiber_scheduler_blocking_operation_t *blocking_operation;
123 TypedData_Get_Struct(obj, rb_fiber_scheduler_blocking_operation_t, &blocking_operation_data_type, blocking_operation);
124 return blocking_operation;
125}
126
127/*
128 * Document-method: Fiber::Scheduler::BlockingOperation#call
129 *
130 * Execute the blocking operation. This method releases the GVL and calls
131 * the blocking function, then restores the errno value.
132 *
133 * Returns nil. The actual result is stored in the associated state object.
134 */
135static VALUE
136blocking_operation_call(VALUE self)
137{
138 rb_fiber_scheduler_blocking_operation_t *blocking_operation = get_blocking_operation(self);
139
140 if (blocking_operation->status != RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_QUEUED) {
141 rb_raise(rb_eRuntimeError, "Blocking operation has already been executed!");
142 }
143
144 if (blocking_operation->function == NULL) {
145 rb_raise(rb_eRuntimeError, "Blocking operation has no function to execute!");
146 }
147
148 if (blocking_operation->state == NULL) {
149 rb_raise(rb_eRuntimeError, "Blocking operation has no result object!");
150 }
151
152 // Mark as executing
153 blocking_operation->status = RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_EXECUTING;
154
155 // Execute the blocking operation without GVL
156 blocking_operation->state->result = rb_nogvl(blocking_operation->function, blocking_operation->data,
157 blocking_operation->unblock_function, blocking_operation->data2,
158 blocking_operation->flags);
159 blocking_operation->state->saved_errno = rb_errno();
160
161 // Mark as completed
162 blocking_operation->status = RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_COMPLETED;
163
164 return Qnil;
165}
166
167/*
168 * C API: Extract blocking operation struct from Ruby object (GVL required)
169 *
170 * This function safely extracts the opaque struct from a BlockingOperation VALUE
171 * while holding the GVL. The returned pointer can be passed to worker threads
172 * and used with rb_fiber_scheduler_blocking_operation_execute_opaque_nogvl.
173 *
174 * Returns the opaque struct pointer on success, NULL on error.
175 * Must be called while holding the GVL.
176 */
179{
180 return get_blocking_operation(self);
181}
182
183/*
184 * C API: Execute blocking operation from opaque struct (GVL not required)
185 *
186 * This function executes a blocking operation using the opaque struct pointer
187 * obtained from rb_fiber_scheduler_blocking_operation_extract.
188 * It can be called from native threads without holding the GVL.
189 *
190 * Returns 0 on success, -1 on error.
191 */
192int
194{
195 if (blocking_operation == NULL) {
196 return -1;
197 }
198
199 if (blocking_operation->function == NULL || blocking_operation->state == NULL) {
200 return -1; // Invalid blocking operation
201 }
202
203 // Resolve sentinel values for unblock_function and data2:
204 rb_thread_resolve_unblock_function(&blocking_operation->unblock_function, &blocking_operation->data2, GET_THREAD());
205
206 // Atomically check if we can transition from QUEUED to EXECUTING
207 rb_atomic_t expected = RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_QUEUED;
208 if (RUBY_ATOMIC_CAS(blocking_operation->status, expected, RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_EXECUTING) != expected) {
209 // Already cancelled or in wrong state
210 return -1;
211 }
212
213 // Now we're executing - call the function
214 blocking_operation->state->result = blocking_operation->function(blocking_operation->data);
215 blocking_operation->state->saved_errno = errno;
216
217 // Atomically transition to completed (unless cancelled during execution)
218 expected = RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_EXECUTING;
219 if (RUBY_ATOMIC_CAS(blocking_operation->status, expected, RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_COMPLETED) == expected) {
220 // Successfully completed
221 return 0;
222 } else {
223 // Was cancelled during execution
224 blocking_operation->state->saved_errno = EINTR;
225 return -1;
226 }
227}
228
229/*
230 * C API: Create a new blocking operation
231 *
232 * This creates a blocking operation that can be executed by native work pools.
233 * The blocking operation holds references to the function and data safely.
234 */
235VALUE
236rb_fiber_scheduler_blocking_operation_new(void *(*function)(void *), void *data,
237 rb_unblock_function_t *unblock_function, void *data2,
238 int flags, struct rb_fiber_scheduler_blocking_operation_state *state)
239{
240 VALUE self = blocking_operation_alloc(rb_cFiberSchedulerBlockingOperation);
241 rb_fiber_scheduler_blocking_operation_t *blocking_operation = get_blocking_operation(self);
242
243 blocking_operation->function = function;
244 blocking_operation->data = data;
245 blocking_operation->unblock_function = unblock_function;
246 blocking_operation->data2 = data2;
247 blocking_operation->flags = flags;
248 blocking_operation->state = state;
249
250 return self;
251}
252
253/*
254 *
255 * Document-class: Fiber::Scheduler
256 *
257 * This is not an existing class, but documentation of the interface that Scheduler
258 * object should comply to in order to be used as argument to Fiber.scheduler and handle non-blocking
259 * fibers. See also the "Non-blocking fibers" section in Fiber class docs for explanations
260 * of some concepts.
261 *
262 * Scheduler's behavior and usage are expected to be as follows:
263 *
264 * * When the execution in the non-blocking Fiber reaches some blocking operation (like
265 * sleep, wait for a process, or a non-ready I/O), it calls some of the scheduler's
266 * hook methods, listed below.
267 * * Scheduler somehow registers what the current fiber is waiting on, and yields control
268 * to other fibers with Fiber.yield (so the fiber would be suspended while expecting its
269 * wait to end, and other fibers in the same thread can perform)
270 * * At the end of the current thread execution, the scheduler's method #scheduler_close is called
271 * * The scheduler runs into a wait loop, checking all the blocked fibers (which it has
272 * registered on hook calls) and resuming them when the awaited resource is ready
273 * (e.g. I/O ready or sleep time elapsed).
274 *
275 * This way concurrent execution will be achieved transparently for every
276 * individual Fiber's code.
277 *
278 * Scheduler implementations are provided by gems, like
279 * Async[https://github.com/socketry/async].
280 *
281 * Hook methods are:
282 *
283 * * #io_wait, #io_read, #io_write, #io_pread, #io_pwrite #io_select, and #io_close
284 * * #process_wait
285 * * #kernel_sleep
286 * * #timeout_after
287 * * #address_resolve
288 * * #block and #unblock
289 * * #blocking_operation_wait
290 * * #fiber_interrupt
291 * * #yield
292 * * (the list is expanded as Ruby developers make more methods having non-blocking calls)
293 *
294 * When not specified otherwise, the hook implementations are mandatory: if they are not
295 * implemented, the methods trying to call hook will fail. To provide backward compatibility,
296 * in the future hooks will be optional (if they are not implemented, due to the scheduler
297 * being created for the older Ruby version, the code which needs this hook will not fail,
298 * and will just behave in a blocking fashion).
299 *
300 * It is also strongly recommended that the scheduler implements the #fiber method, which is
301 * delegated to by Fiber.schedule.
302 *
303 * Sample _toy_ implementation of the scheduler can be found in Ruby's code, in
304 * <tt>test/fiber/scheduler.rb</tt>
305 *
306 */
307void
308Init_Fiber_Scheduler(void)
309{
310 id_close = rb_intern_const("close");
311 id_scheduler_close = rb_intern_const("scheduler_close");
312
313 id_block = rb_intern_const("block");
314 id_unblock = rb_intern_const("unblock");
315 id_yield = rb_intern_const("yield");
316
317 id_timeout_after = rb_intern_const("timeout_after");
318 id_kernel_sleep = rb_intern_const("kernel_sleep");
319 id_process_wait = rb_intern_const("process_wait");
320
321 id_io_read = rb_intern_const("io_read");
322 id_io_pread = rb_intern_const("io_pread");
323 id_io_write = rb_intern_const("io_write");
324 id_io_pwrite = rb_intern_const("io_pwrite");
325
326 id_io_wait = rb_intern_const("io_wait");
327 id_io_select = rb_intern_const("io_select");
328 id_io_close = rb_intern_const("io_close");
329
330 id_address_resolve = rb_intern_const("address_resolve");
331
332 id_blocking_operation_wait = rb_intern_const("blocking_operation_wait");
333 id_fiber_interrupt = rb_intern_const("fiber_interrupt");
334
335 id_fiber_schedule = rb_intern_const("fiber");
336
337 // Define an anonymous BlockingOperation class for internal use only
338 // This is completely hidden from Ruby code and cannot be instantiated directly
339 rb_cFiberSchedulerBlockingOperation = rb_class_new(rb_cObject);
340 rb_define_alloc_func(rb_cFiberSchedulerBlockingOperation, blocking_operation_alloc);
341 rb_define_method(rb_cFiberSchedulerBlockingOperation, "call", blocking_operation_call, 0);
342
343 // Register the anonymous class as a GC root so it doesn't get collected
344 rb_gc_register_mark_object(rb_cFiberSchedulerBlockingOperation);
345
346#if 0 /* for RDoc */
347 rb_cFiberScheduler = rb_define_class_under(rb_cFiber, "Scheduler", rb_cObject);
348 rb_define_method(rb_cFiberScheduler, "close", rb_fiber_scheduler_close, 0);
349 rb_define_method(rb_cFiberScheduler, "process_wait", rb_fiber_scheduler_process_wait, 2);
350 rb_define_method(rb_cFiberScheduler, "io_wait", rb_fiber_scheduler_io_wait, 3);
351 rb_define_method(rb_cFiberScheduler, "io_read", rb_fiber_scheduler_io_read, 4);
352 rb_define_method(rb_cFiberScheduler, "io_write", rb_fiber_scheduler_io_write, 4);
353 rb_define_method(rb_cFiberScheduler, "io_pread", rb_fiber_scheduler_io_pread, 5);
354 rb_define_method(rb_cFiberScheduler, "io_pwrite", rb_fiber_scheduler_io_pwrite, 5);
355 rb_define_method(rb_cFiberScheduler, "io_select", rb_fiber_scheduler_io_select, 4);
356 rb_define_method(rb_cFiberScheduler, "kernel_sleep", rb_fiber_scheduler_kernel_sleep, 1);
357 rb_define_method(rb_cFiberScheduler, "address_resolve", rb_fiber_scheduler_address_resolve, 1);
358 rb_define_method(rb_cFiberScheduler, "timeout_after", rb_fiber_scheduler_timeout_after, 3);
359 rb_define_method(rb_cFiberScheduler, "block", rb_fiber_scheduler_block, 2);
360 rb_define_method(rb_cFiberScheduler, "unblock", rb_fiber_scheduler_unblock, 2);
361 rb_define_method(rb_cFiberScheduler, "fiber", rb_fiber_scheduler_fiber, -2);
362 rb_define_method(rb_cFiberScheduler, "blocking_operation_wait", rb_fiber_scheduler_blocking_operation_wait, -2);
363 rb_define_method(rb_cFiberScheduler, "yield", rb_fiber_scheduler_yield, 0);
364 rb_define_method(rb_cFiberScheduler, "fiber_interrupt", rb_fiber_scheduler_fiber_interrupt, 2);
365 rb_define_method(rb_cFiberScheduler, "io_close", rb_fiber_scheduler_io_close, 1);
366#endif
367}
368
369VALUE
371{
372 RUBY_ASSERT(ruby_thread_has_gvl_p());
373
374 rb_thread_t *thread = GET_THREAD();
375 RUBY_ASSERT(thread);
376
377 return thread->scheduler;
378}
379
380static void
381verify_interface(VALUE scheduler)
382{
383 if (!rb_respond_to(scheduler, id_block)) {
384 rb_raise(rb_eArgError, "Scheduler must implement #block");
385 }
386
387 if (!rb_respond_to(scheduler, id_unblock)) {
388 rb_raise(rb_eArgError, "Scheduler must implement #unblock");
389 }
390
391 if (!rb_respond_to(scheduler, id_kernel_sleep)) {
392 rb_raise(rb_eArgError, "Scheduler must implement #kernel_sleep");
393 }
394
395 if (!rb_respond_to(scheduler, id_io_wait)) {
396 rb_raise(rb_eArgError, "Scheduler must implement #io_wait");
397 }
398
399 if (!rb_respond_to(scheduler, id_fiber_interrupt)) {
400 rb_warn("Scheduler should implement #fiber_interrupt");
401 }
402}
403
404static VALUE
405fiber_scheduler_close(VALUE scheduler)
406{
407 return rb_fiber_scheduler_close(scheduler);
408}
409
410static VALUE
411fiber_scheduler_close_ensure(VALUE _thread)
412{
413 rb_thread_t *thread = (rb_thread_t*)_thread;
414 thread->scheduler = Qnil;
415
416 return Qnil;
417}
418
419VALUE
421{
422 RUBY_ASSERT(ruby_thread_has_gvl_p());
423
424 rb_thread_t *thread = GET_THREAD();
425 RUBY_ASSERT(thread);
426
427 if (scheduler != Qnil) {
428 verify_interface(scheduler);
429 }
430
431 // We invoke Scheduler#close when setting it to something else, to ensure
432 // the previous scheduler runs to completion before changing the scheduler.
433 // That way, we do not need to consider interactions, e.g., of a Fiber from
434 // the previous scheduler with the new scheduler.
435 if (thread->scheduler != Qnil) {
436 // rb_fiber_scheduler_close(thread->scheduler);
437 rb_ensure(fiber_scheduler_close, thread->scheduler, fiber_scheduler_close_ensure, (VALUE)thread);
438 }
439
440 thread->scheduler = scheduler;
441
442 return thread->scheduler;
443}
444
445static VALUE
446fiber_scheduler_current_for_threadptr(rb_thread_t *thread)
447{
448 RUBY_ASSERT(thread);
449
450 if (thread->blocking == 0) {
451 return thread->scheduler;
452 }
453 else {
454 return Qnil;
455 }
456}
457
459{
460 RUBY_ASSERT(ruby_thread_has_gvl_p());
461
462 return fiber_scheduler_current_for_threadptr(GET_THREAD());
463}
464
465// This function is allowed to be called without holding the GVL.
467{
468 return fiber_scheduler_current_for_threadptr(rb_thread_ptr(thread));
469}
470
472{
473 return fiber_scheduler_current_for_threadptr(thread);
474}
475
476/*
477 *
478 * Document-method: Fiber::Scheduler#close
479 *
480 * Called when the current thread exits. The scheduler is expected to implement this
481 * method in order to allow all waiting fibers to finalize their execution.
482 *
483 * The suggested pattern is to implement the main event loop in the #close method.
484 *
485 */
486VALUE
488{
489 RUBY_ASSERT(ruby_thread_has_gvl_p());
490
491 VALUE result;
492
493 // The reason for calling `scheduler_close` before calling `close` is for
494 // legacy schedulers which implement `close` and expect the user to call
495 // it. Subsequently, that method would call `Fiber.set_scheduler(nil)`
496 // which should call `scheduler_close`. If it were to call `close`, it
497 // would create an infinite loop.
498
499 result = rb_check_funcall(scheduler, id_scheduler_close, 0, NULL);
500 if (!UNDEF_P(result)) return result;
501
502 result = rb_check_funcall(scheduler, id_close, 0, NULL);
503 if (!UNDEF_P(result)) return result;
504
505 return Qnil;
506}
507
508VALUE
510{
511 if (timeout) {
512 return rb_float_new((double)timeout->tv_sec + (0.000001 * timeout->tv_usec));
513 }
514
515 return Qnil;
516}
517
518/*
519 * Document-method: Fiber::Scheduler#kernel_sleep
520 * call-seq: kernel_sleep(duration = nil)
521 *
522 * Invoked by Kernel#sleep and Thread::Mutex#sleep and is expected to provide
523 * an implementation of sleeping in a non-blocking way. Implementation might
524 * register the current fiber in some list of "which fiber wait until what
525 * moment", call Fiber.yield to pass control, and then in #close resume
526 * the fibers whose wait period has elapsed.
527 *
528 */
529VALUE
531{
532 return rb_funcall(scheduler, id_kernel_sleep, 1, timeout);
533}
534
535VALUE
536rb_fiber_scheduler_kernel_sleepv(VALUE scheduler, int argc, VALUE * argv)
537{
538 return rb_funcallv(scheduler, id_kernel_sleep, argc, argv);
539}
540
547VALUE
549{
550 // First try to call the scheduler's yield method, if it exists:
551 VALUE result = rb_check_funcall(scheduler, id_yield, 0, NULL);
552 if (!UNDEF_P(result)) return result;
553
554 // Otherwise, we can emulate yield by sleeping for 0 seconds:
555 return rb_fiber_scheduler_kernel_sleep(scheduler, RB_INT2NUM(0));
556}
557
558#if 0
559/*
560 * Document-method: Fiber::Scheduler#timeout_after
561 * call-seq: timeout_after(duration, exception_class, *exception_arguments, &block) -> result of block
562 *
563 * Invoked by Timeout.timeout to execute the given +block+ within the given
564 * +duration+. It can also be invoked directly by the scheduler or user code.
565 *
566 * Attempt to limit the execution time of a given +block+ to the given
567 * +duration+ if possible. When a non-blocking operation causes the +block+'s
568 * execution time to exceed the specified +duration+, that non-blocking
569 * operation should be interrupted by raising the specified +exception_class+
570 * constructed with the given +exception_arguments+.
571 *
572 * General execution timeouts are often considered risky. This implementation
573 * will only interrupt non-blocking operations. This is by design because it's
574 * expected that non-blocking operations can fail for a variety of
575 * unpredictable reasons, so applications should already be robust in handling
576 * these conditions and by implication timeouts.
577 *
578 * However, as a result of this design, if the +block+ does not invoke any
579 * non-blocking operations, it will be impossible to interrupt it. If you
580 * desire to provide predictable points for timeouts, consider adding
581 * <tt>sleep(0)</tt>.
582 *
583 * If the block is executed successfully, its result will be returned.
584 *
585 * The exception will typically be raised using Fiber#raise.
586 */
587VALUE
588rb_fiber_scheduler_timeout_after(VALUE scheduler, VALUE timeout, VALUE exception, VALUE message)
589{
590 VALUE arguments[] = {
591 timeout, exception, message
592 };
593
594 return rb_check_funcall(scheduler, id_timeout_after, 3, arguments);
595}
596
597VALUE
598rb_fiber_scheduler_timeout_afterv(VALUE scheduler, int argc, VALUE * argv)
599{
600 return rb_check_funcall(scheduler, id_timeout_after, argc, argv);
601}
602#endif
603
604/*
605 * Document-method: Fiber::Scheduler#process_wait
606 * call-seq: process_wait(pid, flags)
607 *
608 * Invoked by Process::Status.wait in order to wait for a specified process.
609 * See that method description for arguments description.
610 *
611 * Suggested minimal implementation:
612 *
613 * Thread.new do
614 * Process::Status.wait(pid, flags)
615 * end.value
616 *
617 * This hook is optional: if it is not present in the current scheduler,
618 * Process::Status.wait will behave as a blocking method.
619 *
620 * Expected to return a Process::Status instance.
621 */
622VALUE
623rb_fiber_scheduler_process_wait(VALUE scheduler, rb_pid_t pid, int flags)
624{
625 VALUE arguments[] = {
626 PIDT2NUM(pid), RB_INT2NUM(flags)
627 };
628
629 return rb_check_funcall(scheduler, id_process_wait, 2, arguments);
630}
631
632/*
633 * Document-method: Fiber::Scheduler#block
634 * call-seq: block(blocker, timeout = nil)
635 *
636 * Invoked by methods like Thread.join, and by Thread::Mutex, to signify that current
637 * Fiber is blocked until further notice (e.g. #unblock) or until +timeout+ has
638 * elapsed.
639 *
640 * +blocker+ is what we are waiting on, informational only (for debugging and
641 * logging). There are no guarantee about its value.
642 *
643 * Expected to return boolean, specifying whether the blocking operation was
644 * successful or not.
645 */
646VALUE
647rb_fiber_scheduler_block(VALUE scheduler, VALUE blocker, VALUE timeout)
648{
649 return rb_funcall(scheduler, id_block, 2, blocker, timeout);
650}
651
652/*
653 * Document-method: Fiber::Scheduler#unblock
654 * call-seq: unblock(blocker, fiber)
655 *
656 * Invoked to wake up Fiber previously blocked with #block (for example, Thread::Mutex#lock
657 * calls #block and Thread::Mutex#unlock calls #unblock). The scheduler should use
658 * the +fiber+ parameter to understand which fiber is unblocked.
659 *
660 * +blocker+ is what was awaited for, but it is informational only (for debugging
661 * and logging), and it is not guaranteed to be the same value as the +blocker+ for
662 * #block.
663 *
664 */
665VALUE
666rb_fiber_scheduler_unblock(VALUE scheduler, VALUE blocker, VALUE fiber)
667{
668 RUBY_ASSERT(rb_obj_is_fiber(fiber));
669
670 VALUE result;
671 enum ruby_tag_type state;
672
673 // `rb_fiber_scheduler_unblock` can be called from points where `errno` is expected to be preserved. Therefore, we should save and restore it. For example `io_binwrite` calls `rb_fiber_scheduler_unblock` and if `errno` is reset to 0 by user code, it will break the error handling in `io_write`.
674 //
675 // If we explicitly preserve `errno` in `io_binwrite` and other similar functions (e.g. by returning it), this code is no longer needed. I hope in the future we will be able to remove it.
676 int saved_errno = errno;
677
678 // We must prevent interrupts while invoking the unblock method, because otherwise fibers can be left permanently blocked if an interrupt occurs during the execution of user code. See also `rb_fiber_scheduler_fiber_interrupt`.
679 rb_execution_context_t *ec = GET_EC();
680 int saved_interrupt_mask = ec->interrupt_mask;
681 ec->interrupt_mask |= PENDING_INTERRUPT_MASK;
682
683 EC_PUSH_TAG(ec);
684 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
685 result = rb_funcall(scheduler, id_unblock, 2, blocker, fiber);
686 }
687 EC_POP_TAG();
688
689 ec->interrupt_mask = saved_interrupt_mask;
690
691 if (state) {
692 EC_JUMP_TAG(ec, state);
693 }
694
695 RUBY_VM_CHECK_INTS(ec);
696
697 errno = saved_errno;
698
699 return result;
700}
701
702/*
703 * Document-method: Fiber::Scheduler#io_wait
704 * call-seq: io_wait(io, events, timeout)
705 *
706 * Invoked by IO#wait, IO#wait_readable, IO#wait_writable to ask whether the
707 * specified descriptor is ready for specified events within
708 * the specified +timeout+.
709 *
710 * +events+ is a bit mask of <tt>IO::READABLE</tt>, <tt>IO::WRITABLE</tt>, and
711 * <tt>IO::PRIORITY</tt>.
712 *
713 * Suggested implementation should register which Fiber is waiting for which
714 * resources and immediately calling Fiber.yield to pass control to other
715 * fibers. Then, in the #close method, the scheduler might dispatch all the
716 * I/O resources to fibers waiting for it.
717 *
718 * Expected to return the subset of events that are ready immediately.
719 *
720 */
721static VALUE
722fiber_scheduler_io_wait(VALUE _argument) {
723 VALUE *arguments = (VALUE*)_argument;
724
725 return rb_funcallv(arguments[0], id_io_wait, 3, arguments + 1);
726}
727
728VALUE
729rb_fiber_scheduler_io_wait(VALUE scheduler, VALUE io, VALUE events, VALUE timeout)
730{
731 VALUE arguments[] = {
732 scheduler, io, events, timeout
733 };
734
735 if (rb_respond_to(scheduler, id_fiber_interrupt)) {
736 return rb_thread_io_blocking_operation(io, fiber_scheduler_io_wait, (VALUE)&arguments);
737 } else {
738 return fiber_scheduler_io_wait((VALUE)&arguments);
739 }
740}
741
742VALUE
747
748VALUE
753
754/*
755 * Document-method: Fiber::Scheduler#io_select
756 * call-seq: io_select(readables, writables, exceptables, timeout)
757 *
758 * Invoked by IO.select to ask whether the specified descriptors are ready for
759 * specified events within the specified +timeout+.
760 *
761 * Expected to return the 3-tuple of Array of IOs that are ready.
762 *
763 */
764VALUE rb_fiber_scheduler_io_select(VALUE scheduler, VALUE readables, VALUE writables, VALUE exceptables, VALUE timeout)
765{
766 VALUE arguments[] = {
767 readables, writables, exceptables, timeout
768 };
769
770 return rb_fiber_scheduler_io_selectv(scheduler, 4, arguments);
771}
772
774{
775 // I wondered about extracting argv, and checking if there is only a single
776 // IO instance, and instead calling `io_wait`. However, it would require a
777 // decent amount of work and it would be hard to preserve the exact
778 // semantics of IO.select.
779
780 return rb_check_funcall(scheduler, id_io_select, argc, argv);
781}
782
783/*
784 * Document-method: Fiber::Scheduler#io_read
785 * call-seq: io_read(io, buffer, length, offset) -> read length or -errno
786 *
787 * Invoked by IO#read or IO#Buffer.read to read +length+ bytes from +io+ into a
788 * specified +buffer+ (see IO::Buffer) at the given +offset+.
789 *
790 * The +length+ argument is the "minimum length to be read". If the IO buffer
791 * size is 8KiB, but the +length+ is +1024+ (1KiB), up to 8KiB might be read,
792 * but at least 1KiB will be. Generally, the only case where less data than
793 * +length+ will be read is if there is an error reading the data.
794 *
795 * Specifying a +length+ of 0 is valid and means try reading at least once and
796 * return any available data.
797 *
798 * Suggested implementation should try to read from +io+ in a non-blocking
799 * manner and call #io_wait if the +io+ is not ready (which will yield control
800 * to other fibers).
801 *
802 * See IO::Buffer for an interface available to return data.
803 *
804 * Expected to return number of bytes read, or, in case of an error,
805 * <tt>-errno</tt> (negated number corresponding to system's error code).
806 *
807 * The method should be considered _experimental_.
808 */
809static VALUE
810fiber_scheduler_io_read(VALUE _argument) {
811 VALUE *arguments = (VALUE*)_argument;
812
813 return rb_funcallv(arguments[0], id_io_read, 4, arguments + 1);
814}
815
816VALUE
817rb_fiber_scheduler_io_read(VALUE scheduler, VALUE io, VALUE buffer, size_t length, size_t offset)
818{
819 if (!rb_respond_to(scheduler, id_io_read)) {
820 return RUBY_Qundef;
821 }
822
823 VALUE arguments[] = {
824 scheduler, io, buffer, SIZET2NUM(length), SIZET2NUM(offset)
825 };
826
827 if (rb_respond_to(scheduler, id_fiber_interrupt)) {
828 return rb_thread_io_blocking_operation(io, fiber_scheduler_io_read, (VALUE)&arguments);
829 } else {
830 return fiber_scheduler_io_read((VALUE)&arguments);
831 }
832}
833
834/*
835 * Document-method: Fiber::Scheduler#io_pread
836 * call-seq: io_pread(io, buffer, from, length, offset) -> read length or -errno
837 *
838 * Invoked by IO#pread or IO::Buffer#pread to read +length+ bytes from +io+
839 * at offset +from+ into a specified +buffer+ (see IO::Buffer) at the given
840 * +offset+.
841 *
842 * This method is semantically the same as #io_read, but it allows to specify
843 * the offset to read from and is often better for asynchronous IO on the same
844 * file.
845 *
846 * The method should be considered _experimental_.
847 */
848static VALUE
849fiber_scheduler_io_pread(VALUE _argument) {
850 VALUE *arguments = (VALUE*)_argument;
851
852 return rb_funcallv(arguments[0], id_io_pread, 5, arguments + 1);
853}
854
855VALUE
856rb_fiber_scheduler_io_pread(VALUE scheduler, VALUE io, rb_off_t from, VALUE buffer, size_t length, size_t offset)
857{
858 if (!rb_respond_to(scheduler, id_io_pread)) {
859 return RUBY_Qundef;
860 }
861
862 VALUE arguments[] = {
863 scheduler, io, buffer, OFFT2NUM(from), SIZET2NUM(length), SIZET2NUM(offset)
864 };
865
866 if (rb_respond_to(scheduler, id_fiber_interrupt)) {
867 return rb_thread_io_blocking_operation(io, fiber_scheduler_io_pread, (VALUE)&arguments);
868 } else {
869 return fiber_scheduler_io_pread((VALUE)&arguments);
870 }
871}
872
873/*
874 * Document-method: Fiber::Scheduler#io_write
875 * call-seq: io_write(io, buffer, length, offset) -> written length or -errno
876 *
877 * Invoked by IO#write or IO::Buffer#write to write +length+ bytes to +io+ from
878 * from a specified +buffer+ (see IO::Buffer) at the given +offset+.
879 *
880 * The +length+ argument is the "minimum length to be written". If the IO
881 * buffer size is 8KiB, but the +length+ specified is 1024 (1KiB), at most 8KiB
882 * will be written, but at least 1KiB will be. Generally, the only case where
883 * less data than +length+ will be written is if there is an error writing the
884 * data.
885 *
886 * Specifying a +length+ of 0 is valid and means try writing at least once, as
887 * much data as possible.
888 *
889 * Suggested implementation should try to write to +io+ in a non-blocking
890 * manner and call #io_wait if the +io+ is not ready (which will yield control
891 * to other fibers).
892 *
893 * See IO::Buffer for an interface available to get data from buffer
894 * efficiently.
895 *
896 * Expected to return number of bytes written, or, in case of an error,
897 * <tt>-errno</tt> (negated number corresponding to system's error code).
898 *
899 * The method should be considered _experimental_.
900 */
901static VALUE
902fiber_scheduler_io_write(VALUE _argument) {
903 VALUE *arguments = (VALUE*)_argument;
904
905 return rb_funcallv(arguments[0], id_io_write, 4, arguments + 1);
906}
907
908VALUE
909rb_fiber_scheduler_io_write(VALUE scheduler, VALUE io, VALUE buffer, size_t length, size_t offset)
910{
911 if (!rb_respond_to(scheduler, id_io_write)) {
912 return RUBY_Qundef;
913 }
914
915 VALUE arguments[] = {
916 scheduler, io, buffer, SIZET2NUM(length), SIZET2NUM(offset)
917 };
918
919 if (rb_respond_to(scheduler, id_fiber_interrupt)) {
920 return rb_thread_io_blocking_operation(io, fiber_scheduler_io_write, (VALUE)&arguments);
921 } else {
922 return fiber_scheduler_io_write((VALUE)&arguments);
923 }
924}
925
926/*
927 * Document-method: Fiber::Scheduler#io_pwrite
928 * call-seq: io_pwrite(io, buffer, from, length, offset) -> written length or -errno
929 *
930 * Invoked by IO#pwrite or IO::Buffer#pwrite to write +length+ bytes to +io+
931 * at offset +from+ into a specified +buffer+ (see IO::Buffer) at the given
932 * +offset+.
933 *
934 * This method is semantically the same as #io_write, but it allows to specify
935 * the offset to write to and is often better for asynchronous IO on the same
936 * file.
937 *
938 * The method should be considered _experimental_.
939 *
940 */
941static VALUE
942fiber_scheduler_io_pwrite(VALUE _argument) {
943 VALUE *arguments = (VALUE*)_argument;
944
945 return rb_funcallv(arguments[0], id_io_pwrite, 5, arguments + 1);
946}
947
948VALUE
949rb_fiber_scheduler_io_pwrite(VALUE scheduler, VALUE io, rb_off_t from, VALUE buffer, size_t length, size_t offset)
950{
951
952
953 if (!rb_respond_to(scheduler, id_io_pwrite)) {
954 return RUBY_Qundef;
955 }
956
957 VALUE arguments[] = {
958 scheduler, io, buffer, OFFT2NUM(from), SIZET2NUM(length), SIZET2NUM(offset)
959 };
960
961 if (rb_respond_to(scheduler, id_fiber_interrupt)) {
962 return rb_thread_io_blocking_operation(io, fiber_scheduler_io_pwrite, (VALUE)&arguments);
963 } else {
964 return fiber_scheduler_io_pwrite((VALUE)&arguments);
965 }
966}
967
968VALUE
969rb_fiber_scheduler_io_read_memory(VALUE scheduler, VALUE io, void *base, size_t size, size_t length)
970{
971 VALUE buffer = rb_io_buffer_new(base, size, RB_IO_BUFFER_LOCKED);
972
973 VALUE result = rb_fiber_scheduler_io_read(scheduler, io, buffer, length, 0);
974
975 rb_io_buffer_free_locked(buffer);
976
977 return result;
978}
979
980VALUE
981rb_fiber_scheduler_io_write_memory(VALUE scheduler, VALUE io, const void *base, size_t size, size_t length)
982{
983 VALUE buffer = rb_io_buffer_new((void*)base, size, RB_IO_BUFFER_LOCKED|RB_IO_BUFFER_READONLY);
984
985 VALUE result = rb_fiber_scheduler_io_write(scheduler, io, buffer, length, 0);
986
987 rb_io_buffer_free_locked(buffer);
988
989 return result;
990}
991
992VALUE
993rb_fiber_scheduler_io_pread_memory(VALUE scheduler, VALUE io, rb_off_t from, void *base, size_t size, size_t length)
994{
995 VALUE buffer = rb_io_buffer_new(base, size, RB_IO_BUFFER_LOCKED);
996
997 VALUE result = rb_fiber_scheduler_io_pread(scheduler, io, from, buffer, length, 0);
998
999 rb_io_buffer_free_locked(buffer);
1000
1001 return result;
1002}
1003
1004VALUE
1005rb_fiber_scheduler_io_pwrite_memory(VALUE scheduler, VALUE io, rb_off_t from, const void *base, size_t size, size_t length)
1006{
1007 VALUE buffer = rb_io_buffer_new((void*)base, size, RB_IO_BUFFER_LOCKED|RB_IO_BUFFER_READONLY);
1008
1009 VALUE result = rb_fiber_scheduler_io_pwrite(scheduler, io, from, buffer, length, 0);
1010
1011 rb_io_buffer_free_locked(buffer);
1012
1013 return result;
1014}
1015
1016/*
1017 * Document-method: Fiber::Scheduler#io_close
1018 * call-seq: io_close(fd)
1019 *
1020 * Invoked by Ruby's core methods to notify scheduler that the IO object is closed. Note that
1021 * the method will receive an integer file descriptor of the closed object, not an object
1022 * itself.
1023 */
1024VALUE
1026{
1027 VALUE arguments[] = {io};
1028
1029 return rb_check_funcall(scheduler, id_io_close, 1, arguments);
1030}
1031
1032/*
1033 * Document-method: Fiber::Scheduler#address_resolve
1034 * call-seq: address_resolve(hostname) -> array_of_strings or nil
1035 *
1036 * Invoked by any method that performs a non-reverse DNS lookup. The most
1037 * notable method is Addrinfo.getaddrinfo, but there are many other.
1038 *
1039 * The method is expected to return an array of strings corresponding to ip
1040 * addresses the +hostname+ is resolved to, or +nil+ if it can not be resolved.
1041 *
1042 * Fairly exhaustive list of all possible call-sites:
1043 *
1044 * - Addrinfo.getaddrinfo
1045 * - Addrinfo.tcp
1046 * - Addrinfo.udp
1047 * - Addrinfo.ip
1048 * - Addrinfo.new
1049 * - Addrinfo.marshal_load
1050 * - SOCKSSocket.new
1051 * - TCPServer.new
1052 * - TCPSocket.new
1053 * - IPSocket.getaddress
1054 * - TCPSocket.gethostbyname
1055 * - UDPSocket#connect
1056 * - UDPSocket#bind
1057 * - UDPSocket#send
1058 * - Socket.getaddrinfo
1059 * - Socket.gethostbyname
1060 * - Socket.pack_sockaddr_in
1061 * - Socket.sockaddr_in
1062 * - Socket.unpack_sockaddr_in
1063 */
1064VALUE
1066{
1067 VALUE arguments[] = {
1068 hostname
1069 };
1070
1071 return rb_check_funcall(scheduler, id_address_resolve, 1, arguments);
1072}
1073
1074/*
1075 * Document-method: Fiber::Scheduler#blocking_operation_wait
1076 * call-seq: blocking_operation_wait(blocking_operation)
1077 *
1078 * Invoked by Ruby's core methods to run a blocking operation in a non-blocking way.
1079 * The blocking_operation is an opaque object that encapsulates the blocking operation
1080 * and responds to a <tt>#call</tt> method without any arguments.
1081 *
1082 * If the scheduler doesn't implement this method, or if the scheduler doesn't execute
1083 * the blocking operation, Ruby will fall back to the non-scheduler implementation.
1084 *
1085 * Minimal suggested implementation is:
1086 *
1087 * def blocking_operation_wait(blocking_operation)
1088 * Thread.new { blocking_operation.call }.join
1089 * end
1090 */
1091VALUE rb_fiber_scheduler_blocking_operation_wait(VALUE scheduler, void* (*function)(void *), void *data, rb_unblock_function_t *unblock_function, void *data2, int flags, struct rb_fiber_scheduler_blocking_operation_state *state)
1092{
1093 // Check if scheduler supports blocking_operation_wait before creating the object
1094 if (!rb_respond_to(scheduler, id_blocking_operation_wait)) {
1095 return Qundef;
1096 }
1097
1098 // Create a new BlockingOperation with the blocking operation
1099 VALUE blocking_operation = rb_fiber_scheduler_blocking_operation_new(function, data, unblock_function, data2, flags, state);
1100
1101 VALUE result = rb_funcall(scheduler, id_blocking_operation_wait, 1, blocking_operation);
1102
1103 // Get the operation data to check if it was executed
1104 rb_fiber_scheduler_blocking_operation_t *operation = get_blocking_operation(blocking_operation);
1105 rb_atomic_t current_status = RUBY_ATOMIC_LOAD(operation->status);
1106
1107 // Invalidate the operation now that we're done with it
1108 operation->function = NULL;
1109 operation->state = NULL;
1110 operation->data = NULL;
1111 operation->data2 = NULL;
1112 operation->unblock_function = NULL;
1113
1114 // If the blocking operation was never executed, return Qundef to signal the caller to use rb_nogvl instead
1115 if (current_status == RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_QUEUED) {
1116 return Qundef;
1117 }
1118
1119 return result;
1120}
1121
1122/*
1123 * Document-method: Fiber::Scheduler#fiber_interrupt
1124 * call-seq: fiber_interrupt(fiber, exception)
1125 *
1126 * Invoked by Ruby's core methods to notify the scheduler that the blocked fiber should be interrupted
1127 * with an exception. For example, IO#close uses this method to interrupt fibers that are performing
1128 * blocking IO operations.
1129 *
1130 */
1132{
1133 VALUE arguments[] = {
1134 fiber, exception
1135 };
1136
1137 VALUE result;
1138 enum ruby_tag_type state;
1139
1140 // We must prevent interrupts while invoking the fiber_interrupt method, because otherwise fibers can be left permanently blocked if an interrupt occurs during the execution of user code. See also `rb_fiber_scheduler_unblock`.
1141 rb_execution_context_t *ec = GET_EC();
1142 int saved_interrupt_mask = ec->interrupt_mask;
1143 ec->interrupt_mask |= PENDING_INTERRUPT_MASK;
1144
1145 EC_PUSH_TAG(ec);
1146 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
1147 result = rb_check_funcall(scheduler, id_fiber_interrupt, 2, arguments);
1148 }
1149 EC_POP_TAG();
1150
1151 ec->interrupt_mask = saved_interrupt_mask;
1152
1153 if (state) {
1154 EC_JUMP_TAG(ec, state);
1155 }
1156
1157 RUBY_VM_CHECK_INTS(ec);
1158
1159 return result;
1160}
1161
1162/*
1163 * Document-method: Fiber::Scheduler#fiber
1164 * call-seq: fiber(&block)
1165 *
1166 * Implementation of the Fiber.schedule. The method is <em>expected</em> to immediately
1167 * run the given block of code in a separate non-blocking fiber, and to return that Fiber.
1168 *
1169 * Minimal suggested implementation is:
1170 *
1171 * def fiber(&block)
1172 * fiber = Fiber.new(blocking: false, &block)
1173 * fiber.resume
1174 * fiber
1175 * end
1176 */
1177VALUE
1178rb_fiber_scheduler_fiber(VALUE scheduler, int argc, VALUE *argv, int kw_splat)
1179{
1180 return rb_funcall_passing_block_kw(scheduler, id_fiber_schedule, argc, argv, kw_splat);
1181}
1182
1183/*
1184 * C API: Cancel a blocking operation
1185 *
1186 * This function cancels a blocking operation. If the operation is queued,
1187 * it just marks it as cancelled. If it's executing, it marks it as cancelled
1188 * and calls the unblock function to interrupt the operation.
1189 *
1190 * Returns 1 if unblock function was called, 0 if just marked cancelled, -1 on error.
1191 */
1192int
1194{
1195 if (blocking_operation == NULL) {
1196 return -1;
1197 }
1198
1199 rb_atomic_t current_state = RUBY_ATOMIC_LOAD(blocking_operation->status);
1200
1201 switch (current_state) {
1202 case RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_QUEUED:
1203 // Work hasn't started - just mark as cancelled:
1204 if (RUBY_ATOMIC_CAS(blocking_operation->status, current_state, RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_CANCELLED) == current_state) {
1205 // Successfully cancelled before execution:
1206 return 0;
1207 }
1208 // Fall through if state changed between load and CAS
1209
1210 case RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_EXECUTING:
1211 // Work is running - mark cancelled AND call unblock function
1212 if (RUBY_ATOMIC_CAS(blocking_operation->status, current_state, RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_CANCELLED) != current_state) {
1213 // State changed between load and CAS - operation may have completed:
1214 return 0;
1215 }
1216 // Otherwise, we successfully marked it as cancelled, so we can call the unblock function:
1217 rb_unblock_function_t *unblock_function = blocking_operation->unblock_function;
1218 if (unblock_function) {
1219 RUBY_ASSERT(unblock_function != (rb_unblock_function_t *)-1 && "unblock_function is still sentinel value -1, should have been resolved earlier");
1220 blocking_operation->unblock_function(blocking_operation->data2);
1221 }
1222 // Cancelled during execution (unblock function called):
1223 return 1;
1224
1225 case RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_COMPLETED:
1226 case RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_CANCELLED:
1227 // Already finished or cancelled:
1228 return 0;
1229 }
1230
1231 return 0;
1232}
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
#define RUBY_ATOMIC_CAS(var, oldval, newval)
Atomic compare-and-swap.
Definition atomic.h:165
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define RUBY_ATOMIC_LOAD(var)
Atomic load.
Definition atomic.h:175
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
VALUE rb_class_new(VALUE super)
Creates a new, anonymous class.
Definition class.c:976
VALUE rb_define_class_under(VALUE outer, const char *name, VALUE super)
Defines a class under the namespace of outer.
Definition class.c:1627
#define Qundef
Old name of RUBY_Qundef.
#define SIZET2NUM
Old name of RB_SIZE2NUM.
Definition size_t.h:62
#define Qnil
Old name of RUBY_Qnil.
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1416
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:466
VALUE rb_cObject
Object class.
Definition object.c:61
VALUE rb_funcall(VALUE recv, ID mid, int n,...)
Calls a method.
Definition vm_eval.c:1117
VALUE rb_funcall_passing_block_kw(VALUE recv, ID mid, int argc, const VALUE *argv, int kw_splat)
Identical to rb_funcallv_passing_block(), except you can specify how to handle the last element of th...
Definition vm_eval.c:1187
void rb_unblock_function_t(void *)
This is the type of UBFs.
Definition thread.h:336
int rb_respond_to(VALUE obj, ID mid)
Queries if the object responds to the method.
Definition vm_method.c:3457
VALUE rb_check_funcall(VALUE recv, ID mid, int argc, const VALUE *argv)
Identical to rb_funcallv(), except it returns RUBY_Qundef instead of raising rb_eNoMethodError.
Definition vm_eval.c:686
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
static ID rb_intern_const(const char *str)
This is a "tiny optimisation" over rb_intern().
Definition symbol.h:285
VALUE rb_io_timeout(VALUE io)
Get the timeout associated with the specified io object.
Definition io.c:857
@ RUBY_IO_READABLE
IO::READABLE
Definition io.h:97
@ RUBY_IO_WRITABLE
IO::WRITABLE
Definition io.h:98
void * rb_nogvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2, int flags)
Identical to rb_thread_call_without_gvl(), except it additionally takes "flags" that change the behav...
Definition thread.c:1593
#define RB_UINT2NUM
Just another name of rb_uint2num_inline.
Definition int.h:39
#define RB_INT2NUM
Just another name of rb_int2num_inline.
Definition int.h:37
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define OFFT2NUM
Converts a C's off_t into an instance of rb_cInteger.
Definition off_t.h:33
#define PIDT2NUM
Converts a C's pid_t into an instance of rb_cInteger.
Definition pid_t.h:28
#define RUBY_DEFAULT_FREE
This is a value you can set to RData::dfree.
Definition rdata.h:78
#define RUBY_TYPED_FREE_IMMEDIATELY
Macros to see if each corresponding flag is defined.
Definition rtypeddata.h:119
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
Definition rtypeddata.h:736
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
Definition rtypeddata.h:561
#define errno
Ractor-aware version of errno.
Definition ruby.h:388
Scheduler APIs.
VALUE rb_fiber_scheduler_blocking_operation_wait(VALUE scheduler, void *(*function)(void *), void *data, rb_unblock_function_t *unblock_function, void *data2, int flags, struct rb_fiber_scheduler_blocking_operation_state *state)
Defer the execution of the passed function to the scheduler.
Definition scheduler.c:1091
VALUE rb_fiber_scheduler_current(void)
Identical to rb_fiber_scheduler_get(), except it also returns RUBY_Qnil in case of a blocking fiber.
Definition scheduler.c:458
VALUE rb_fiber_scheduler_io_pread_memory(VALUE scheduler, VALUE io, rb_off_t from, void *base, size_t size, size_t length)
Non-blocking pread from the passed IO using a native buffer.
Definition scheduler.c:993
VALUE rb_fiber_scheduler_make_timeout(struct timeval *timeout)
Converts the passed timeout to an expression that rb_fiber_scheduler_block() etc.
Definition scheduler.c:509
VALUE rb_fiber_scheduler_io_wait_readable(VALUE scheduler, VALUE io)
Non-blocking wait until the passed IO is ready for reading.
Definition scheduler.c:743
VALUE rb_fiber_scheduler_io_read_memory(VALUE scheduler, VALUE io, void *base, size_t size, size_t length)
Non-blocking read from the passed IO using a native buffer.
Definition scheduler.c:969
VALUE rb_fiber_scheduler_io_pwrite(VALUE scheduler, VALUE io, rb_off_t from, VALUE buffer, size_t length, size_t offset)
Non-blocking write to the passed IO at the specified offset.
Definition scheduler.c:949
VALUE rb_fiber_scheduler_kernel_sleepv(VALUE scheduler, int argc, VALUE *argv)
Identical to rb_fiber_scheduler_kernel_sleep(), except it can pass multiple arguments.
Definition scheduler.c:536
VALUE rb_fiber_scheduler_fiber_interrupt(VALUE scheduler, VALUE fiber, VALUE exception)
Interrupt a fiber by raising an exception.
Definition scheduler.c:1131
VALUE rb_fiber_scheduler_io_wait(VALUE scheduler, VALUE io, VALUE events, VALUE timeout)
Non-blocking version of rb_io_wait().
Definition scheduler.c:729
VALUE rb_fiber_scheduler_io_select(VALUE scheduler, VALUE readables, VALUE writables, VALUE exceptables, VALUE timeout)
Non-blocking version of IO.select.
Definition scheduler.c:764
VALUE rb_fiber_scheduler_io_read(VALUE scheduler, VALUE io, VALUE buffer, size_t length, size_t offset)
Non-blocking read from the passed IO.
Definition scheduler.c:817
int rb_fiber_scheduler_blocking_operation_cancel(rb_fiber_scheduler_blocking_operation_t *blocking_operation)
Cancel a blocking operation.
Definition scheduler.c:1193
VALUE rb_fiber_scheduler_io_selectv(VALUE scheduler, int argc, VALUE *argv)
Non-blocking version of IO.select, argv variant.
Definition scheduler.c:773
VALUE rb_fiber_scheduler_process_wait(VALUE scheduler, rb_pid_t pid, int flags)
Non-blocking waitpid.
Definition scheduler.c:623
VALUE rb_fiber_scheduler_block(VALUE scheduler, VALUE blocker, VALUE timeout)
Non-blocking wait for the passed "blocker", which is for instance Thread.join or Mutex....
Definition scheduler.c:647
int rb_fiber_scheduler_blocking_operation_execute(rb_fiber_scheduler_blocking_operation_t *blocking_operation)
Execute blocking operation from handle (GVL not required).
Definition scheduler.c:193
VALUE rb_fiber_scheduler_io_pread(VALUE scheduler, VALUE io, rb_off_t from, VALUE buffer, size_t length, size_t offset)
Non-blocking read from the passed IO at the specified offset.
Definition scheduler.c:856
VALUE rb_fiber_scheduler_io_pwrite_memory(VALUE scheduler, VALUE io, rb_off_t from, const void *base, size_t size, size_t length)
Non-blocking pwrite to the passed IO using a native buffer.
Definition scheduler.c:1005
VALUE rb_fiber_scheduler_io_write(VALUE scheduler, VALUE io, VALUE buffer, size_t length, size_t offset)
Non-blocking write to the passed IO.
Definition scheduler.c:909
VALUE rb_fiber_scheduler_close(VALUE scheduler)
Closes the passed scheduler object.
Definition scheduler.c:487
rb_fiber_scheduler_blocking_operation_t * rb_fiber_scheduler_blocking_operation_extract(VALUE self)
Extract the blocking operation handle from a BlockingOperationRuby object.
Definition scheduler.c:178
VALUE rb_fiber_scheduler_current_for_thread(VALUE thread)
Identical to rb_fiber_scheduler_current(), except it queries for that of the passed thread value inst...
Definition scheduler.c:466
VALUE rb_fiber_scheduler_kernel_sleep(VALUE scheduler, VALUE duration)
Non-blocking sleep.
Definition scheduler.c:530
VALUE rb_fiber_scheduler_address_resolve(VALUE scheduler, VALUE hostname)
Non-blocking DNS lookup.
Definition scheduler.c:1065
VALUE rb_fiber_scheduler_yield(VALUE scheduler)
Yield to the scheduler, to be resumed on the next scheduling cycle.
Definition scheduler.c:548
VALUE rb_fiber_scheduler_set(VALUE scheduler)
Destructively assigns the passed scheduler to that of the current thread that is calling this functio...
Definition scheduler.c:420
VALUE rb_fiber_scheduler_io_write_memory(VALUE scheduler, VALUE io, const void *base, size_t size, size_t length)
Non-blocking write to the passed IO using a native buffer.
Definition scheduler.c:981
VALUE rb_fiber_scheduler_current_for_threadptr(struct rb_thread_struct *thread)
Identical to rb_fiber_scheduler_current_for_thread(), except it expects a threadptr instead of a thre...
Definition scheduler.c:471
VALUE rb_fiber_scheduler_io_wait_writable(VALUE scheduler, VALUE io)
Non-blocking wait until the passed IO is ready for writing.
Definition scheduler.c:749
VALUE rb_fiber_scheduler_io_close(VALUE scheduler, VALUE io)
Non-blocking close the given IO.
Definition scheduler.c:1025
VALUE rb_fiber_scheduler_get(void)
Queries the current scheduler of the current thread that is calling this function.
Definition scheduler.c:370
VALUE rb_fiber_scheduler_unblock(VALUE scheduler, VALUE blocker, VALUE fiber)
Wakes up a fiber previously blocked using rb_fiber_scheduler_block().
Definition scheduler.c:666
VALUE rb_fiber_scheduler_fiber(VALUE scheduler, int argc, VALUE *argv, int kw_splat)
Create and schedule a non-blocking fiber.
Definition scheduler.c:1178
@ RUBY_Qundef
Represents so-called undef.
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:211
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40