Ruby 4.0.0dev (2025-12-22 revision cfb324e9d12d0d40a8f9052b97a860737b78224f)
scheduler.c (cfb324e9d12d0d40a8f9052b97a860737b78224f)
1/**********************************************************************
2
3 scheduler.c
4
5 $Author$
6
7 Copyright (C) 2020 Samuel Grant Dawson Williams
8
9**********************************************************************/
10
11#include "vm_core.h"
12#include "eval_intern.h"
14#include "ruby/io.h"
15#include "ruby/io/buffer.h"
16
17#include "ruby/thread.h"
18
19// For `ruby_thread_has_gvl_p`:
20#include "internal/thread.h"
21
22// For atomic operations:
23#include "ruby_atomic.h"
24
25static ID id_close;
26static ID id_scheduler_close;
27
28static ID id_block;
29static ID id_unblock;
30
31static ID id_yield;
32
33static ID id_timeout_after;
34static ID id_kernel_sleep;
35static ID id_process_wait;
36
37static ID id_io_read, id_io_pread;
38static ID id_io_write, id_io_pwrite;
39static ID id_io_wait;
40static ID id_io_select;
41static ID id_io_close;
42
43static ID id_address_resolve;
44
45static ID id_blocking_operation_wait;
46static ID id_fiber_interrupt;
47
48static ID id_fiber_schedule;
49
50// Our custom blocking operation class
51static VALUE rb_cFiberSchedulerBlockingOperation;
52
53/*
54 * Custom blocking operation structure for blocking operations
55 * This replaces the use of Ruby procs to avoid use-after-free issues
56 * and provides a cleaner C API for native work pools.
57 */
58
59typedef enum {
60 RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_QUEUED, // Submitted but not started
61 RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_EXECUTING, // Currently running
62 RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_COMPLETED, // Finished (success/error)
63 RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_CANCELLED // Cancelled
64} rb_fiber_blocking_operation_status_t;
65
67 void *(*function)(void *);
68 void *data;
69
70 rb_unblock_function_t *unblock_function;
71 void *data2;
72
73 int flags;
75
76 // Execution status
77 volatile rb_atomic_t status;
78};
79
80static void
81blocking_operation_mark(void *ptr)
82{
83 // No Ruby objects to mark in our struct
84}
85
86static void
87blocking_operation_free(void *ptr)
88{
90 ruby_xfree(blocking_operation);
91}
92
93static size_t
94blocking_operation_memsize(const void *ptr)
95{
97}
98
99static const rb_data_type_t blocking_operation_data_type = {
100 "Fiber::Scheduler::BlockingOperation",
101 {
102 blocking_operation_mark,
103 blocking_operation_free,
104 blocking_operation_memsize,
105 },
106 0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED
107};
108
109/*
110 * Allocate a new blocking operation
111 */
112static VALUE
113blocking_operation_alloc(VALUE klass)
114{
115 rb_fiber_scheduler_blocking_operation_t *blocking_operation;
116 VALUE obj = TypedData_Make_Struct(klass, rb_fiber_scheduler_blocking_operation_t, &blocking_operation_data_type, blocking_operation);
117
118 blocking_operation->function = NULL;
119 blocking_operation->data = NULL;
120 blocking_operation->unblock_function = NULL;
121 blocking_operation->data2 = NULL;
122 blocking_operation->flags = 0;
123 blocking_operation->state = NULL;
124 blocking_operation->status = RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_QUEUED;
125
126 return obj;
127}
128
129/*
130 * Get the blocking operation struct from a Ruby object
131 */
133get_blocking_operation(VALUE obj)
134{
135 rb_fiber_scheduler_blocking_operation_t *blocking_operation;
136 TypedData_Get_Struct(obj, rb_fiber_scheduler_blocking_operation_t, &blocking_operation_data_type, blocking_operation);
137 return blocking_operation;
138}
139
140/*
141 * Document-method: Fiber::Scheduler::BlockingOperation#call
142 *
143 * Execute the blocking operation. This method releases the GVL and calls
144 * the blocking function, then restores the errno value.
145 *
146 * Returns nil. The actual result is stored in the associated state object.
147 */
148static VALUE
149blocking_operation_call(VALUE self)
150{
151 rb_fiber_scheduler_blocking_operation_t *blocking_operation = get_blocking_operation(self);
152
153 if (blocking_operation->status != RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_QUEUED) {
154 rb_raise(rb_eRuntimeError, "Blocking operation has already been executed!");
155 }
156
157 if (blocking_operation->function == NULL) {
158 rb_raise(rb_eRuntimeError, "Blocking operation has no function to execute!");
159 }
160
161 if (blocking_operation->state == NULL) {
162 rb_raise(rb_eRuntimeError, "Blocking operation has no result object!");
163 }
164
165 // Mark as executing
166 blocking_operation->status = RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_EXECUTING;
167
168 // Execute the blocking operation without GVL
169 blocking_operation->state->result = rb_nogvl(blocking_operation->function, blocking_operation->data,
170 blocking_operation->unblock_function, blocking_operation->data2,
171 blocking_operation->flags);
172 blocking_operation->state->saved_errno = rb_errno();
173
174 // Mark as completed
175 blocking_operation->status = RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_COMPLETED;
176
177 return Qnil;
178}
179
180/*
181 * C API: Extract blocking operation struct from Ruby object (GVL required)
182 *
183 * This function safely extracts the opaque struct from a BlockingOperation VALUE
184 * while holding the GVL. The returned pointer can be passed to worker threads
185 * and used with rb_fiber_scheduler_blocking_operation_execute_opaque_nogvl.
186 *
187 * Returns the opaque struct pointer on success, NULL on error.
188 * Must be called while holding the GVL.
189 */
192{
193 return get_blocking_operation(self);
194}
195
196/*
197 * C API: Execute blocking operation from opaque struct (GVL not required)
198 *
199 * This function executes a blocking operation using the opaque struct pointer
200 * obtained from rb_fiber_scheduler_blocking_operation_extract.
201 * It can be called from native threads without holding the GVL.
202 *
203 * Returns 0 on success, -1 on error.
204 */
205int
207{
208 if (blocking_operation == NULL) {
209 return -1;
210 }
211
212 if (blocking_operation->function == NULL || blocking_operation->state == NULL) {
213 return -1; // Invalid blocking operation
214 }
215
216 // Resolve sentinel values for unblock_function and data2:
217 rb_thread_resolve_unblock_function(&blocking_operation->unblock_function, &blocking_operation->data2, GET_THREAD());
218
219 // Atomically check if we can transition from QUEUED to EXECUTING
220 rb_atomic_t expected = RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_QUEUED;
221 if (RUBY_ATOMIC_CAS(blocking_operation->status, expected, RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_EXECUTING) != expected) {
222 // Already cancelled or in wrong state
223 return -1;
224 }
225
226 // Now we're executing - call the function
227 blocking_operation->state->result = blocking_operation->function(blocking_operation->data);
228 blocking_operation->state->saved_errno = errno;
229
230 // Atomically transition to completed (unless cancelled during execution)
231 expected = RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_EXECUTING;
232 if (RUBY_ATOMIC_CAS(blocking_operation->status, expected, RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_COMPLETED) == expected) {
233 // Successfully completed
234 return 0;
235 } else {
236 // Was cancelled during execution
237 blocking_operation->state->saved_errno = EINTR;
238 return -1;
239 }
240}
241
242/*
243 * C API: Create a new blocking operation
244 *
245 * This creates a blocking operation that can be executed by native work pools.
246 * The blocking operation holds references to the function and data safely.
247 */
248VALUE
249rb_fiber_scheduler_blocking_operation_new(void *(*function)(void *), void *data,
250 rb_unblock_function_t *unblock_function, void *data2,
251 int flags, struct rb_fiber_scheduler_blocking_operation_state *state)
252{
253 VALUE self = blocking_operation_alloc(rb_cFiberSchedulerBlockingOperation);
254 rb_fiber_scheduler_blocking_operation_t *blocking_operation = get_blocking_operation(self);
255
256 blocking_operation->function = function;
257 blocking_operation->data = data;
258 blocking_operation->unblock_function = unblock_function;
259 blocking_operation->data2 = data2;
260 blocking_operation->flags = flags;
261 blocking_operation->state = state;
262
263 return self;
264}
265
266/*
267 *
268 * Document-class: Fiber::Scheduler
269 *
270 * This is not an existing class, but documentation of the interface that Scheduler
271 * object should comply to in order to be used as argument to Fiber.scheduler and handle non-blocking
272 * fibers. See also the "Non-blocking fibers" section in Fiber class docs for explanations
273 * of some concepts.
274 *
275 * Scheduler's behavior and usage are expected to be as follows:
276 *
277 * * When the execution in the non-blocking Fiber reaches some blocking operation (like
278 * sleep, wait for a process, or a non-ready I/O), it calls some of the scheduler's
279 * hook methods, listed below.
280 * * Scheduler somehow registers what the current fiber is waiting on, and yields control
281 * to other fibers with Fiber.yield (so the fiber would be suspended while expecting its
282 * wait to end, and other fibers in the same thread can perform)
283 * * At the end of the current thread execution, the scheduler's method #scheduler_close is called
284 * * The scheduler runs into a wait loop, checking all the blocked fibers (which it has
285 * registered on hook calls) and resuming them when the awaited resource is ready
286 * (e.g. I/O ready or sleep time elapsed).
287 *
288 * This way concurrent execution will be achieved transparently for every
289 * individual Fiber's code.
290 *
291 * Scheduler implementations are provided by gems, like
292 * Async[https://github.com/socketry/async].
293 *
294 * Hook methods are:
295 *
296 * * #io_wait, #io_read, #io_write, #io_pread, #io_pwrite, and #io_select, #io_close
297 * * #process_wait
298 * * #kernel_sleep
299 * * #timeout_after
300 * * #address_resolve
301 * * #block and #unblock
302 * * #blocking_operation_wait
303 * * (the list is expanded as Ruby developers make more methods having non-blocking calls)
304 *
305 * When not specified otherwise, the hook implementations are mandatory: if they are not
306 * implemented, the methods trying to call hook will fail. To provide backward compatibility,
307 * in the future hooks will be optional (if they are not implemented, due to the scheduler
308 * being created for the older Ruby version, the code which needs this hook will not fail,
309 * and will just behave in a blocking fashion).
310 *
311 * It is also strongly recommended that the scheduler implements the #fiber method, which is
312 * delegated to by Fiber.schedule.
313 *
314 * Sample _toy_ implementation of the scheduler can be found in Ruby's code, in
315 * <tt>test/fiber/scheduler.rb</tt>
316 *
317 */
318void
319Init_Fiber_Scheduler(void)
320{
321 id_close = rb_intern_const("close");
322 id_scheduler_close = rb_intern_const("scheduler_close");
323
324 id_block = rb_intern_const("block");
325 id_unblock = rb_intern_const("unblock");
326 id_yield = rb_intern_const("yield");
327
328 id_timeout_after = rb_intern_const("timeout_after");
329 id_kernel_sleep = rb_intern_const("kernel_sleep");
330 id_process_wait = rb_intern_const("process_wait");
331
332 id_io_read = rb_intern_const("io_read");
333 id_io_pread = rb_intern_const("io_pread");
334 id_io_write = rb_intern_const("io_write");
335 id_io_pwrite = rb_intern_const("io_pwrite");
336
337 id_io_wait = rb_intern_const("io_wait");
338 id_io_select = rb_intern_const("io_select");
339 id_io_close = rb_intern_const("io_close");
340
341 id_address_resolve = rb_intern_const("address_resolve");
342
343 id_blocking_operation_wait = rb_intern_const("blocking_operation_wait");
344 id_fiber_interrupt = rb_intern_const("fiber_interrupt");
345
346 id_fiber_schedule = rb_intern_const("fiber");
347
348 // Define an anonymous BlockingOperation class for internal use only
349 // This is completely hidden from Ruby code and cannot be instantiated directly
350 rb_cFiberSchedulerBlockingOperation = rb_class_new(rb_cObject);
351 rb_define_alloc_func(rb_cFiberSchedulerBlockingOperation, blocking_operation_alloc);
352 rb_define_method(rb_cFiberSchedulerBlockingOperation, "call", blocking_operation_call, 0);
353
354 // Register the anonymous class as a GC root so it doesn't get collected
355 rb_gc_register_mark_object(rb_cFiberSchedulerBlockingOperation);
356
357#if 0 /* for RDoc */
358 rb_cFiberScheduler = rb_define_class_under(rb_cFiber, "Scheduler", rb_cObject);
359 rb_define_method(rb_cFiberScheduler, "close", rb_fiber_scheduler_close, 0);
360 rb_define_method(rb_cFiberScheduler, "process_wait", rb_fiber_scheduler_process_wait, 2);
361 rb_define_method(rb_cFiberScheduler, "io_wait", rb_fiber_scheduler_io_wait, 3);
362 rb_define_method(rb_cFiberScheduler, "io_read", rb_fiber_scheduler_io_read, 4);
363 rb_define_method(rb_cFiberScheduler, "io_write", rb_fiber_scheduler_io_write, 4);
364 rb_define_method(rb_cFiberScheduler, "io_pread", rb_fiber_scheduler_io_pread, 5);
365 rb_define_method(rb_cFiberScheduler, "io_pwrite", rb_fiber_scheduler_io_pwrite, 5);
366 rb_define_method(rb_cFiberScheduler, "io_select", rb_fiber_scheduler_io_select, 4);
367 rb_define_method(rb_cFiberScheduler, "kernel_sleep", rb_fiber_scheduler_kernel_sleep, 1);
368 rb_define_method(rb_cFiberScheduler, "address_resolve", rb_fiber_scheduler_address_resolve, 1);
369 rb_define_method(rb_cFiberScheduler, "timeout_after", rb_fiber_scheduler_timeout_after, 3);
370 rb_define_method(rb_cFiberScheduler, "block", rb_fiber_scheduler_block, 2);
371 rb_define_method(rb_cFiberScheduler, "unblock", rb_fiber_scheduler_unblock, 2);
372 rb_define_method(rb_cFiberScheduler, "fiber", rb_fiber_scheduler_fiber, -2);
373 rb_define_method(rb_cFiberScheduler, "blocking_operation_wait", rb_fiber_scheduler_blocking_operation_wait, -2);
374#endif
375}
376
377VALUE
379{
380 RUBY_ASSERT(ruby_thread_has_gvl_p());
381
382 rb_thread_t *thread = GET_THREAD();
383 RUBY_ASSERT(thread);
384
385 return thread->scheduler;
386}
387
388static void
389verify_interface(VALUE scheduler)
390{
391 if (!rb_respond_to(scheduler, id_block)) {
392 rb_raise(rb_eArgError, "Scheduler must implement #block");
393 }
394
395 if (!rb_respond_to(scheduler, id_unblock)) {
396 rb_raise(rb_eArgError, "Scheduler must implement #unblock");
397 }
398
399 if (!rb_respond_to(scheduler, id_kernel_sleep)) {
400 rb_raise(rb_eArgError, "Scheduler must implement #kernel_sleep");
401 }
402
403 if (!rb_respond_to(scheduler, id_io_wait)) {
404 rb_raise(rb_eArgError, "Scheduler must implement #io_wait");
405 }
406
407 if (!rb_respond_to(scheduler, id_fiber_interrupt)) {
408 rb_warn("Scheduler should implement #fiber_interrupt");
409 }
410}
411
412static VALUE
413fiber_scheduler_close(VALUE scheduler)
414{
415 return rb_fiber_scheduler_close(scheduler);
416}
417
418static VALUE
419fiber_scheduler_close_ensure(VALUE _thread)
420{
421 rb_thread_t *thread = (rb_thread_t*)_thread;
422 thread->scheduler = Qnil;
423
424 return Qnil;
425}
426
427VALUE
429{
430 RUBY_ASSERT(ruby_thread_has_gvl_p());
431
432 rb_thread_t *thread = GET_THREAD();
433 RUBY_ASSERT(thread);
434
435 if (scheduler != Qnil) {
436 verify_interface(scheduler);
437 }
438
439 // We invoke Scheduler#close when setting it to something else, to ensure
440 // the previous scheduler runs to completion before changing the scheduler.
441 // That way, we do not need to consider interactions, e.g., of a Fiber from
442 // the previous scheduler with the new scheduler.
443 if (thread->scheduler != Qnil) {
444 // rb_fiber_scheduler_close(thread->scheduler);
445 rb_ensure(fiber_scheduler_close, thread->scheduler, fiber_scheduler_close_ensure, (VALUE)thread);
446 }
447
448 thread->scheduler = scheduler;
449
450 return thread->scheduler;
451}
452
453static VALUE
454fiber_scheduler_current_for_threadptr(rb_thread_t *thread)
455{
456 RUBY_ASSERT(thread);
457
458 if (thread->blocking == 0) {
459 return thread->scheduler;
460 }
461 else {
462 return Qnil;
463 }
464}
465
467{
468 RUBY_ASSERT(ruby_thread_has_gvl_p());
469
470 return fiber_scheduler_current_for_threadptr(GET_THREAD());
471}
472
473// This function is allowed to be called without holding the GVL.
475{
476 return fiber_scheduler_current_for_threadptr(rb_thread_ptr(thread));
477}
478
480{
481 return fiber_scheduler_current_for_threadptr(thread);
482}
483
484/*
485 *
486 * Document-method: Fiber::Scheduler#close
487 *
488 * Called when the current thread exits. The scheduler is expected to implement this
489 * method in order to allow all waiting fibers to finalize their execution.
490 *
491 * The suggested pattern is to implement the main event loop in the #close method.
492 *
493 */
494VALUE
496{
497 RUBY_ASSERT(ruby_thread_has_gvl_p());
498
499 VALUE result;
500
501 // The reason for calling `scheduler_close` before calling `close` is for
502 // legacy schedulers which implement `close` and expect the user to call
503 // it. Subsequently, that method would call `Fiber.set_scheduler(nil)`
504 // which should call `scheduler_close`. If it were to call `close`, it
505 // would create an infinite loop.
506
507 result = rb_check_funcall(scheduler, id_scheduler_close, 0, NULL);
508 if (!UNDEF_P(result)) return result;
509
510 result = rb_check_funcall(scheduler, id_close, 0, NULL);
511 if (!UNDEF_P(result)) return result;
512
513 return Qnil;
514}
515
516VALUE
518{
519 if (timeout) {
520 return rb_float_new((double)timeout->tv_sec + (0.000001 * timeout->tv_usec));
521 }
522
523 return Qnil;
524}
525
526/*
527 * Document-method: Fiber::Scheduler#kernel_sleep
528 * call-seq: kernel_sleep(duration = nil)
529 *
530 * Invoked by Kernel#sleep and Mutex#sleep and is expected to provide
531 * an implementation of sleeping in a non-blocking way. Implementation might
532 * register the current fiber in some list of "which fiber wait until what
533 * moment", call Fiber.yield to pass control, and then in #close resume
534 * the fibers whose wait period has elapsed.
535 *
536 */
537VALUE
539{
540 return rb_funcall(scheduler, id_kernel_sleep, 1, timeout);
541}
542
543VALUE
544rb_fiber_scheduler_kernel_sleepv(VALUE scheduler, int argc, VALUE * argv)
545{
546 return rb_funcallv(scheduler, id_kernel_sleep, argc, argv);
547}
548
555VALUE
557{
558 // First try to call the scheduler's yield method, if it exists:
559 VALUE result = rb_check_funcall(scheduler, id_yield, 0, NULL);
560 if (!UNDEF_P(result)) return result;
561
562 // Otherwise, we can emulate yield by sleeping for 0 seconds:
563 return rb_fiber_scheduler_kernel_sleep(scheduler, RB_INT2NUM(0));
564}
565
566#if 0
567/*
568 * Document-method: Fiber::Scheduler#timeout_after
569 * call-seq: timeout_after(duration, exception_class, *exception_arguments, &block) -> result of block
570 *
571 * Invoked by Timeout.timeout to execute the given +block+ within the given
572 * +duration+. It can also be invoked directly by the scheduler or user code.
573 *
574 * Attempt to limit the execution time of a given +block+ to the given
575 * +duration+ if possible. When a non-blocking operation causes the +block+'s
576 * execution time to exceed the specified +duration+, that non-blocking
577 * operation should be interrupted by raising the specified +exception_class+
578 * constructed with the given +exception_arguments+.
579 *
580 * General execution timeouts are often considered risky. This implementation
581 * will only interrupt non-blocking operations. This is by design because it's
582 * expected that non-blocking operations can fail for a variety of
583 * unpredictable reasons, so applications should already be robust in handling
584 * these conditions and by implication timeouts.
585 *
586 * However, as a result of this design, if the +block+ does not invoke any
587 * non-blocking operations, it will be impossible to interrupt it. If you
588 * desire to provide predictable points for timeouts, consider adding
589 * +sleep(0)+.
590 *
591 * If the block is executed successfully, its result will be returned.
592 *
593 * The exception will typically be raised using Fiber#raise.
594 */
595VALUE
596rb_fiber_scheduler_timeout_after(VALUE scheduler, VALUE timeout, VALUE exception, VALUE message)
597{
598 VALUE arguments[] = {
599 timeout, exception, message
600 };
601
602 return rb_check_funcall(scheduler, id_timeout_after, 3, arguments);
603}
604
605VALUE
606rb_fiber_scheduler_timeout_afterv(VALUE scheduler, int argc, VALUE * argv)
607{
608 return rb_check_funcall(scheduler, id_timeout_after, argc, argv);
609}
610#endif
611
612/*
613 * Document-method: Fiber::Scheduler#process_wait
614 * call-seq: process_wait(pid, flags)
615 *
616 * Invoked by Process::Status.wait in order to wait for a specified process.
617 * See that method description for arguments description.
618 *
619 * Suggested minimal implementation:
620 *
621 * Thread.new do
622 * Process::Status.wait(pid, flags)
623 * end.value
624 *
625 * This hook is optional: if it is not present in the current scheduler,
626 * Process::Status.wait will behave as a blocking method.
627 *
628 * Expected to return a Process::Status instance.
629 */
630VALUE
631rb_fiber_scheduler_process_wait(VALUE scheduler, rb_pid_t pid, int flags)
632{
633 VALUE arguments[] = {
634 PIDT2NUM(pid), RB_INT2NUM(flags)
635 };
636
637 return rb_check_funcall(scheduler, id_process_wait, 2, arguments);
638}
639
640/*
641 * Document-method: Fiber::Scheduler#block
642 * call-seq: block(blocker, timeout = nil)
643 *
644 * Invoked by methods like Thread.join, and by Mutex, to signify that current
645 * Fiber is blocked until further notice (e.g. #unblock) or until +timeout+ has
646 * elapsed.
647 *
648 * +blocker+ is what we are waiting on, informational only (for debugging and
649 * logging). There are no guarantee about its value.
650 *
651 * Expected to return boolean, specifying whether the blocking operation was
652 * successful or not.
653 */
654VALUE
655rb_fiber_scheduler_block(VALUE scheduler, VALUE blocker, VALUE timeout)
656{
657 return rb_funcall(scheduler, id_block, 2, blocker, timeout);
658}
659
660/*
661 * Document-method: Fiber::Scheduler#unblock
662 * call-seq: unblock(blocker, fiber)
663 *
664 * Invoked to wake up Fiber previously blocked with #block (for example, Mutex#lock
665 * calls #block and Mutex#unlock calls #unblock). The scheduler should use
666 * the +fiber+ parameter to understand which fiber is unblocked.
667 *
668 * +blocker+ is what was awaited for, but it is informational only (for debugging
669 * and logging), and it is not guaranteed to be the same value as the +blocker+ for
670 * #block.
671 *
672 */
673VALUE
674rb_fiber_scheduler_unblock(VALUE scheduler, VALUE blocker, VALUE fiber)
675{
676 RUBY_ASSERT(rb_obj_is_fiber(fiber));
677
678 VALUE result;
679 enum ruby_tag_type state;
680
681 // `rb_fiber_scheduler_unblock` can be called from points where `errno` is expected to be preserved. Therefore, we should save and restore it. For example `io_binwrite` calls `rb_fiber_scheduler_unblock` and if `errno` is reset to 0 by user code, it will break the error handling in `io_write`.
682 //
683 // If we explicitly preserve `errno` in `io_binwrite` and other similar functions (e.g. by returning it), this code is no longer needed. I hope in the future we will be able to remove it.
684 int saved_errno = errno;
685
686 // We must prevent interrupts while invoking the unblock method, because otherwise fibers can be left permanently blocked if an interrupt occurs during the execution of user code. See also `rb_fiber_scheduler_fiber_interrupt`.
687 rb_execution_context_t *ec = GET_EC();
688 int saved_interrupt_mask = ec->interrupt_mask;
689 ec->interrupt_mask |= PENDING_INTERRUPT_MASK;
690
691 EC_PUSH_TAG(ec);
692 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
693 result = rb_funcall(scheduler, id_unblock, 2, blocker, fiber);
694 }
695 EC_POP_TAG();
696
697 ec->interrupt_mask = saved_interrupt_mask;
698
699 if (state) {
700 EC_JUMP_TAG(ec, state);
701 }
702
703 RUBY_VM_CHECK_INTS(ec);
704
705 errno = saved_errno;
706
707 return result;
708}
709
710/*
711 * Document-method: Fiber::Scheduler#io_wait
712 * call-seq: io_wait(io, events, timeout)
713 *
714 * Invoked by IO#wait, IO#wait_readable, IO#wait_writable to ask whether the
715 * specified descriptor is ready for specified events within
716 * the specified +timeout+.
717 *
718 * +events+ is a bit mask of <tt>IO::READABLE</tt>, <tt>IO::WRITABLE</tt>, and
719 * <tt>IO::PRIORITY</tt>.
720 *
721 * Suggested implementation should register which Fiber is waiting for which
722 * resources and immediately calling Fiber.yield to pass control to other
723 * fibers. Then, in the #close method, the scheduler might dispatch all the
724 * I/O resources to fibers waiting for it.
725 *
726 * Expected to return the subset of events that are ready immediately.
727 *
728 */
729static VALUE
730fiber_scheduler_io_wait(VALUE _argument) {
731 VALUE *arguments = (VALUE*)_argument;
732
733 return rb_funcallv(arguments[0], id_io_wait, 3, arguments + 1);
734}
735
736VALUE
737rb_fiber_scheduler_io_wait(VALUE scheduler, VALUE io, VALUE events, VALUE timeout)
738{
739 VALUE arguments[] = {
740 scheduler, io, events, timeout
741 };
742
743 if (rb_respond_to(scheduler, id_fiber_interrupt)) {
744 return rb_thread_io_blocking_operation(io, fiber_scheduler_io_wait, (VALUE)&arguments);
745 } else {
746 return fiber_scheduler_io_wait((VALUE)&arguments);
747 }
748}
749
750VALUE
755
756VALUE
761
762/*
763 * Document-method: Fiber::Scheduler#io_select
764 * call-seq: io_select(readables, writables, exceptables, timeout)
765 *
766 * Invoked by IO.select to ask whether the specified descriptors are ready for
767 * specified events within the specified +timeout+.
768 *
769 * Expected to return the 3-tuple of Array of IOs that are ready.
770 *
771 */
772VALUE rb_fiber_scheduler_io_select(VALUE scheduler, VALUE readables, VALUE writables, VALUE exceptables, VALUE timeout)
773{
774 VALUE arguments[] = {
775 readables, writables, exceptables, timeout
776 };
777
778 return rb_fiber_scheduler_io_selectv(scheduler, 4, arguments);
779}
780
782{
783 // I wondered about extracting argv, and checking if there is only a single
784 // IO instance, and instead calling `io_wait`. However, it would require a
785 // decent amount of work and it would be hard to preserve the exact
786 // semantics of IO.select.
787
788 return rb_check_funcall(scheduler, id_io_select, argc, argv);
789}
790
791/*
792 * Document-method: Fiber::Scheduler#io_read
793 * call-seq: io_read(io, buffer, length, offset) -> read length or -errno
794 *
795 * Invoked by IO#read or IO#Buffer.read to read +length+ bytes from +io+ into a
796 * specified +buffer+ (see IO::Buffer) at the given +offset+.
797 *
798 * The +length+ argument is the "minimum length to be read". If the IO buffer
799 * size is 8KiB, but the +length+ is +1024+ (1KiB), up to 8KiB might be read,
800 * but at least 1KiB will be. Generally, the only case where less data than
801 * +length+ will be read is if there is an error reading the data.
802 *
803 * Specifying a +length+ of 0 is valid and means try reading at least once and
804 * return any available data.
805 *
806 * Suggested implementation should try to read from +io+ in a non-blocking
807 * manner and call #io_wait if the +io+ is not ready (which will yield control
808 * to other fibers).
809 *
810 * See IO::Buffer for an interface available to return data.
811 *
812 * Expected to return number of bytes read, or, in case of an error,
813 * <tt>-errno</tt> (negated number corresponding to system's error code).
814 *
815 * The method should be considered _experimental_.
816 */
817static VALUE
818fiber_scheduler_io_read(VALUE _argument) {
819 VALUE *arguments = (VALUE*)_argument;
820
821 return rb_funcallv(arguments[0], id_io_read, 4, arguments + 1);
822}
823
824VALUE
825rb_fiber_scheduler_io_read(VALUE scheduler, VALUE io, VALUE buffer, size_t length, size_t offset)
826{
827 if (!rb_respond_to(scheduler, id_io_read)) {
828 return RUBY_Qundef;
829 }
830
831 VALUE arguments[] = {
832 scheduler, io, buffer, SIZET2NUM(length), SIZET2NUM(offset)
833 };
834
835 if (rb_respond_to(scheduler, id_fiber_interrupt)) {
836 return rb_thread_io_blocking_operation(io, fiber_scheduler_io_read, (VALUE)&arguments);
837 } else {
838 return fiber_scheduler_io_read((VALUE)&arguments);
839 }
840}
841
842/*
843 * Document-method: Fiber::Scheduler#io_pread
844 * call-seq: io_pread(io, buffer, from, length, offset) -> read length or -errno
845 *
846 * Invoked by IO#pread or IO::Buffer#pread to read +length+ bytes from +io+
847 * at offset +from+ into a specified +buffer+ (see IO::Buffer) at the given
848 * +offset+.
849 *
850 * This method is semantically the same as #io_read, but it allows to specify
851 * the offset to read from and is often better for asynchronous IO on the same
852 * file.
853 *
854 * The method should be considered _experimental_.
855 */
856static VALUE
857fiber_scheduler_io_pread(VALUE _argument) {
858 VALUE *arguments = (VALUE*)_argument;
859
860 return rb_funcallv(arguments[0], id_io_pread, 5, arguments + 1);
861}
862
863VALUE
864rb_fiber_scheduler_io_pread(VALUE scheduler, VALUE io, rb_off_t from, VALUE buffer, size_t length, size_t offset)
865{
866 if (!rb_respond_to(scheduler, id_io_pread)) {
867 return RUBY_Qundef;
868 }
869
870 VALUE arguments[] = {
871 scheduler, io, buffer, OFFT2NUM(from), SIZET2NUM(length), SIZET2NUM(offset)
872 };
873
874 if (rb_respond_to(scheduler, id_fiber_interrupt)) {
875 return rb_thread_io_blocking_operation(io, fiber_scheduler_io_pread, (VALUE)&arguments);
876 } else {
877 return fiber_scheduler_io_pread((VALUE)&arguments);
878 }
879}
880
881/*
882 * Document-method: Fiber::Scheduler#io_write
883 * call-seq: io_write(io, buffer, length, offset) -> written length or -errno
884 *
885 * Invoked by IO#write or IO::Buffer#write to write +length+ bytes to +io+ from
886 * from a specified +buffer+ (see IO::Buffer) at the given +offset+.
887 *
888 * The +length+ argument is the "minimum length to be written". If the IO
889 * buffer size is 8KiB, but the +length+ specified is 1024 (1KiB), at most 8KiB
890 * will be written, but at least 1KiB will be. Generally, the only case where
891 * less data than +length+ will be written is if there is an error writing the
892 * data.
893 *
894 * Specifying a +length+ of 0 is valid and means try writing at least once, as
895 * much data as possible.
896 *
897 * Suggested implementation should try to write to +io+ in a non-blocking
898 * manner and call #io_wait if the +io+ is not ready (which will yield control
899 * to other fibers).
900 *
901 * See IO::Buffer for an interface available to get data from buffer
902 * efficiently.
903 *
904 * Expected to return number of bytes written, or, in case of an error,
905 * <tt>-errno</tt> (negated number corresponding to system's error code).
906 *
907 * The method should be considered _experimental_.
908 */
909static VALUE
910fiber_scheduler_io_write(VALUE _argument) {
911 VALUE *arguments = (VALUE*)_argument;
912
913 return rb_funcallv(arguments[0], id_io_write, 4, arguments + 1);
914}
915
916VALUE
917rb_fiber_scheduler_io_write(VALUE scheduler, VALUE io, VALUE buffer, size_t length, size_t offset)
918{
919 if (!rb_respond_to(scheduler, id_io_write)) {
920 return RUBY_Qundef;
921 }
922
923 VALUE arguments[] = {
924 scheduler, io, buffer, SIZET2NUM(length), SIZET2NUM(offset)
925 };
926
927 if (rb_respond_to(scheduler, id_fiber_interrupt)) {
928 return rb_thread_io_blocking_operation(io, fiber_scheduler_io_write, (VALUE)&arguments);
929 } else {
930 return fiber_scheduler_io_write((VALUE)&arguments);
931 }
932}
933
934/*
935 * Document-method: Fiber::Scheduler#io_pwrite
936 * call-seq: io_pwrite(io, buffer, from, length, offset) -> written length or -errno
937 *
938 * Invoked by IO#pwrite or IO::Buffer#pwrite to write +length+ bytes to +io+
939 * at offset +from+ into a specified +buffer+ (see IO::Buffer) at the given
940 * +offset+.
941 *
942 * This method is semantically the same as #io_write, but it allows to specify
943 * the offset to write to and is often better for asynchronous IO on the same
944 * file.
945 *
946 * The method should be considered _experimental_.
947 *
948 */
949static VALUE
950fiber_scheduler_io_pwrite(VALUE _argument) {
951 VALUE *arguments = (VALUE*)_argument;
952
953 return rb_funcallv(arguments[0], id_io_pwrite, 5, arguments + 1);
954}
955
956VALUE
957rb_fiber_scheduler_io_pwrite(VALUE scheduler, VALUE io, rb_off_t from, VALUE buffer, size_t length, size_t offset)
958{
959
960
961 if (!rb_respond_to(scheduler, id_io_pwrite)) {
962 return RUBY_Qundef;
963 }
964
965 VALUE arguments[] = {
966 scheduler, io, buffer, OFFT2NUM(from), SIZET2NUM(length), SIZET2NUM(offset)
967 };
968
969 if (rb_respond_to(scheduler, id_fiber_interrupt)) {
970 return rb_thread_io_blocking_operation(io, fiber_scheduler_io_pwrite, (VALUE)&arguments);
971 } else {
972 return fiber_scheduler_io_pwrite((VALUE)&arguments);
973 }
974}
975
976VALUE
977rb_fiber_scheduler_io_read_memory(VALUE scheduler, VALUE io, void *base, size_t size, size_t length)
978{
979 VALUE buffer = rb_io_buffer_new(base, size, RB_IO_BUFFER_LOCKED);
980
981 VALUE result = rb_fiber_scheduler_io_read(scheduler, io, buffer, length, 0);
982
983 rb_io_buffer_free_locked(buffer);
984
985 return result;
986}
987
988VALUE
989rb_fiber_scheduler_io_write_memory(VALUE scheduler, VALUE io, const void *base, size_t size, size_t length)
990{
991 VALUE buffer = rb_io_buffer_new((void*)base, size, RB_IO_BUFFER_LOCKED|RB_IO_BUFFER_READONLY);
992
993 VALUE result = rb_fiber_scheduler_io_write(scheduler, io, buffer, length, 0);
994
995 rb_io_buffer_free_locked(buffer);
996
997 return result;
998}
999
1000VALUE
1001rb_fiber_scheduler_io_pread_memory(VALUE scheduler, VALUE io, rb_off_t from, void *base, size_t size, size_t length)
1002{
1003 VALUE buffer = rb_io_buffer_new(base, size, RB_IO_BUFFER_LOCKED);
1004
1005 VALUE result = rb_fiber_scheduler_io_pread(scheduler, io, from, buffer, length, 0);
1006
1007 rb_io_buffer_free_locked(buffer);
1008
1009 return result;
1010}
1011
1012VALUE
1013rb_fiber_scheduler_io_pwrite_memory(VALUE scheduler, VALUE io, rb_off_t from, const void *base, size_t size, size_t length)
1014{
1015 VALUE buffer = rb_io_buffer_new((void*)base, size, RB_IO_BUFFER_LOCKED|RB_IO_BUFFER_READONLY);
1016
1017 VALUE result = rb_fiber_scheduler_io_pwrite(scheduler, io, from, buffer, length, 0);
1018
1019 rb_io_buffer_free_locked(buffer);
1020
1021 return result;
1022}
1023
1024VALUE
1026{
1027 VALUE arguments[] = {io};
1028
1029 return rb_check_funcall(scheduler, id_io_close, 1, arguments);
1030}
1031
1032/*
1033 * Document-method: Fiber::Scheduler#address_resolve
1034 * call-seq: address_resolve(hostname) -> array_of_strings or nil
1035 *
1036 * Invoked by any method that performs a non-reverse DNS lookup. The most
1037 * notable method is Addrinfo.getaddrinfo, but there are many other.
1038 *
1039 * The method is expected to return an array of strings corresponding to ip
1040 * addresses the +hostname+ is resolved to, or +nil+ if it can not be resolved.
1041 *
1042 * Fairly exhaustive list of all possible call-sites:
1043 *
1044 * - Addrinfo.getaddrinfo
1045 * - Addrinfo.tcp
1046 * - Addrinfo.udp
1047 * - Addrinfo.ip
1048 * - Addrinfo.new
1049 * - Addrinfo.marshal_load
1050 * - SOCKSSocket.new
1051 * - TCPServer.new
1052 * - TCPSocket.new
1053 * - IPSocket.getaddress
1054 * - TCPSocket.gethostbyname
1055 * - UDPSocket#connect
1056 * - UDPSocket#bind
1057 * - UDPSocket#send
1058 * - Socket.getaddrinfo
1059 * - Socket.gethostbyname
1060 * - Socket.pack_sockaddr_in
1061 * - Socket.sockaddr_in
1062 * - Socket.unpack_sockaddr_in
1063 */
1064VALUE
1066{
1067 VALUE arguments[] = {
1068 hostname
1069 };
1070
1071 return rb_check_funcall(scheduler, id_address_resolve, 1, arguments);
1072}
1073
1074/*
1075 * Document-method: Fiber::Scheduler#blocking_operation_wait
1076 * call-seq: blocking_operation_wait(blocking_operation)
1077 *
1078 * Invoked by Ruby's core methods to run a blocking operation in a non-blocking way.
1079 * The blocking_operation is a Fiber::Scheduler::BlockingOperation that encapsulates the blocking operation.
1080 *
1081 * If the scheduler doesn't implement this method, or if the scheduler doesn't execute
1082 * the blocking operation, Ruby will fall back to the non-scheduler implementation.
1083 *
1084 * Minimal suggested implementation is:
1085 *
1086 * def blocking_operation_wait(blocking_operation)
1087 * Thread.new { blocking_operation.call }.join
1088 * end
1089 */
1090VALUE rb_fiber_scheduler_blocking_operation_wait(VALUE scheduler, void* (*function)(void *), void *data, rb_unblock_function_t *unblock_function, void *data2, int flags, struct rb_fiber_scheduler_blocking_operation_state *state)
1091{
1092 // Check if scheduler supports blocking_operation_wait before creating the object
1093 if (!rb_respond_to(scheduler, id_blocking_operation_wait)) {
1094 return Qundef;
1095 }
1096
1097 // Create a new BlockingOperation with the blocking operation
1098 VALUE blocking_operation = rb_fiber_scheduler_blocking_operation_new(function, data, unblock_function, data2, flags, state);
1099
1100 VALUE result = rb_funcall(scheduler, id_blocking_operation_wait, 1, blocking_operation);
1101
1102 // Get the operation data to check if it was executed
1103 rb_fiber_scheduler_blocking_operation_t *operation = get_blocking_operation(blocking_operation);
1104 rb_atomic_t current_status = RUBY_ATOMIC_LOAD(operation->status);
1105
1106 // Invalidate the operation now that we're done with it
1107 operation->function = NULL;
1108 operation->state = NULL;
1109 operation->data = NULL;
1110 operation->data2 = NULL;
1111 operation->unblock_function = NULL;
1112
1113 // If the blocking operation was never executed, return Qundef to signal the caller to use rb_nogvl instead
1114 if (current_status == RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_QUEUED) {
1115 return Qundef;
1116 }
1117
1118 return result;
1119}
1120
1122{
1123 VALUE arguments[] = {
1124 fiber, exception
1125 };
1126
1127 VALUE result;
1128 enum ruby_tag_type state;
1129
1130 // We must prevent interrupts while invoking the fiber_interrupt method, because otherwise fibers can be left permanently blocked if an interrupt occurs during the execution of user code. See also `rb_fiber_scheduler_unblock`.
1131 rb_execution_context_t *ec = GET_EC();
1132 int saved_interrupt_mask = ec->interrupt_mask;
1133 ec->interrupt_mask |= PENDING_INTERRUPT_MASK;
1134
1135 EC_PUSH_TAG(ec);
1136 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
1137 result = rb_check_funcall(scheduler, id_fiber_interrupt, 2, arguments);
1138 }
1139 EC_POP_TAG();
1140
1141 ec->interrupt_mask = saved_interrupt_mask;
1142
1143 if (state) {
1144 EC_JUMP_TAG(ec, state);
1145 }
1146
1147 RUBY_VM_CHECK_INTS(ec);
1148
1149 return result;
1150}
1151
1152/*
1153 * Document-method: Fiber::Scheduler#fiber
1154 * call-seq: fiber(&block)
1155 *
1156 * Implementation of the Fiber.schedule. The method is <em>expected</em> to immediately
1157 * run the given block of code in a separate non-blocking fiber, and to return that Fiber.
1158 *
1159 * Minimal suggested implementation is:
1160 *
1161 * def fiber(&block)
1162 * fiber = Fiber.new(blocking: false, &block)
1163 * fiber.resume
1164 * fiber
1165 * end
1166 */
1167VALUE
1168rb_fiber_scheduler_fiber(VALUE scheduler, int argc, VALUE *argv, int kw_splat)
1169{
1170 return rb_funcall_passing_block_kw(scheduler, id_fiber_schedule, argc, argv, kw_splat);
1171}
1172
1173/*
1174 * C API: Cancel a blocking operation
1175 *
1176 * This function cancels a blocking operation. If the operation is queued,
1177 * it just marks it as cancelled. If it's executing, it marks it as cancelled
1178 * and calls the unblock function to interrupt the operation.
1179 *
1180 * Returns 1 if unblock function was called, 0 if just marked cancelled, -1 on error.
1181 */
1182int
1184{
1185 if (blocking_operation == NULL) {
1186 return -1;
1187 }
1188
1189 rb_atomic_t current_state = RUBY_ATOMIC_LOAD(blocking_operation->status);
1190
1191 switch (current_state) {
1192 case RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_QUEUED:
1193 // Work hasn't started - just mark as cancelled:
1194 if (RUBY_ATOMIC_CAS(blocking_operation->status, current_state, RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_CANCELLED) == current_state) {
1195 // Successfully cancelled before execution:
1196 return 0;
1197 }
1198 // Fall through if state changed between load and CAS
1199
1200 case RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_EXECUTING:
1201 // Work is running - mark cancelled AND call unblock function
1202 if (RUBY_ATOMIC_CAS(blocking_operation->status, current_state, RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_CANCELLED) != current_state) {
1203 // State changed between load and CAS - operation may have completed:
1204 return 0;
1205 }
1206 // Otherwise, we successfully marked it as cancelled, so we can call the unblock function:
1207 rb_unblock_function_t *unblock_function = blocking_operation->unblock_function;
1208 if (unblock_function) {
1209 RUBY_ASSERT(unblock_function != (rb_unblock_function_t *)-1 && "unblock_function is still sentinel value -1, should have been resolved earlier");
1210 blocking_operation->unblock_function(blocking_operation->data2);
1211 }
1212 // Cancelled during execution (unblock function called):
1213 return 1;
1214
1215 case RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_COMPLETED:
1216 case RB_FIBER_SCHEDULER_BLOCKING_OPERATION_STATUS_CANCELLED:
1217 // Already finished or cancelled:
1218 return 0;
1219 }
1220
1221 return 0;
1222}
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
#define RUBY_ATOMIC_CAS(var, oldval, newval)
Atomic compare-and-swap.
Definition atomic.h:165
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define RUBY_ATOMIC_LOAD(var)
Atomic load.
Definition atomic.h:175
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
VALUE rb_class_new(VALUE super)
Creates a new, anonymous class.
Definition class.c:969
VALUE rb_define_class_under(VALUE outer, const char *name, VALUE super)
Defines a class under the namespace of outer.
Definition class.c:1620
#define Qundef
Old name of RUBY_Qundef.
#define SIZET2NUM
Old name of RB_SIZE2NUM.
Definition size_t.h:62
#define Qnil
Old name of RUBY_Qnil.
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1429
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:466
VALUE rb_funcall(VALUE recv, ID mid, int n,...)
Calls a method.
Definition vm_eval.c:1117
VALUE rb_funcall_passing_block_kw(VALUE recv, ID mid, int argc, const VALUE *argv, int kw_splat)
Identical to rb_funcallv_passing_block(), except you can specify how to handle the last element of th...
Definition vm_eval.c:1187
void rb_unblock_function_t(void *)
This is the type of UBFs.
Definition thread.h:336
int rb_respond_to(VALUE obj, ID mid)
Queries if the object responds to the method.
Definition vm_method.c:3402
VALUE rb_check_funcall(VALUE recv, ID mid, int argc, const VALUE *argv)
Identical to rb_funcallv(), except it returns RUBY_Qundef instead of raising rb_eNoMethodError.
Definition vm_eval.c:686
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
static ID rb_intern_const(const char *str)
This is a "tiny optimisation" over rb_intern().
Definition symbol.h:285
VALUE rb_io_timeout(VALUE io)
Get the timeout associated with the specified io object.
Definition io.c:857
@ RUBY_IO_READABLE
IO::READABLE
Definition io.h:97
@ RUBY_IO_WRITABLE
IO::WRITABLE
Definition io.h:98
void * rb_nogvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2, int flags)
Identical to rb_thread_call_without_gvl(), except it additionally takes "flags" that change the behav...
Definition thread.c:1593
#define RB_UINT2NUM
Just another name of rb_uint2num_inline.
Definition int.h:39
#define RB_INT2NUM
Just another name of rb_int2num_inline.
Definition int.h:37
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define OFFT2NUM
Converts a C's off_t into an instance of rb_cInteger.
Definition off_t.h:33
#define PIDT2NUM
Converts a C's pid_t into an instance of rb_cInteger.
Definition pid_t.h:28
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
Definition rtypeddata.h:649
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
Definition rtypeddata.h:508
#define errno
Ractor-aware version of errno.
Definition ruby.h:388
Scheduler APIs.
VALUE rb_fiber_scheduler_blocking_operation_wait(VALUE scheduler, void *(*function)(void *), void *data, rb_unblock_function_t *unblock_function, void *data2, int flags, struct rb_fiber_scheduler_blocking_operation_state *state)
Defer the execution of the passed function to the scheduler.
Definition scheduler.c:1090
VALUE rb_fiber_scheduler_current(void)
Identical to rb_fiber_scheduler_get(), except it also returns RUBY_Qnil in case of a blocking fiber.
Definition scheduler.c:466
VALUE rb_fiber_scheduler_io_pread_memory(VALUE scheduler, VALUE io, rb_off_t from, void *base, size_t size, size_t length)
Non-blocking pread from the passed IO using a native buffer.
Definition scheduler.c:1001
VALUE rb_fiber_scheduler_make_timeout(struct timeval *timeout)
Converts the passed timeout to an expression that rb_fiber_scheduler_block() etc.
Definition scheduler.c:517
VALUE rb_fiber_scheduler_io_wait_readable(VALUE scheduler, VALUE io)
Non-blocking wait until the passed IO is ready for reading.
Definition scheduler.c:751
VALUE rb_fiber_scheduler_io_read_memory(VALUE scheduler, VALUE io, void *base, size_t size, size_t length)
Non-blocking read from the passed IO using a native buffer.
Definition scheduler.c:977
VALUE rb_fiber_scheduler_io_pwrite(VALUE scheduler, VALUE io, rb_off_t from, VALUE buffer, size_t length, size_t offset)
Non-blocking write to the passed IO at the specified offset.
Definition scheduler.c:957
VALUE rb_fiber_scheduler_kernel_sleepv(VALUE scheduler, int argc, VALUE *argv)
Identical to rb_fiber_scheduler_kernel_sleep(), except it can pass multiple arguments.
Definition scheduler.c:544
VALUE rb_fiber_scheduler_fiber_interrupt(VALUE scheduler, VALUE fiber, VALUE exception)
Interrupt a fiber by raising an exception.
Definition scheduler.c:1121
VALUE rb_fiber_scheduler_io_wait(VALUE scheduler, VALUE io, VALUE events, VALUE timeout)
Non-blocking version of rb_io_wait().
Definition scheduler.c:737
VALUE rb_fiber_scheduler_io_select(VALUE scheduler, VALUE readables, VALUE writables, VALUE exceptables, VALUE timeout)
Non-blocking version of IO.select.
Definition scheduler.c:772
VALUE rb_fiber_scheduler_io_read(VALUE scheduler, VALUE io, VALUE buffer, size_t length, size_t offset)
Non-blocking read from the passed IO.
Definition scheduler.c:825
int rb_fiber_scheduler_blocking_operation_cancel(rb_fiber_scheduler_blocking_operation_t *blocking_operation)
Cancel a blocking operation.
Definition scheduler.c:1183
VALUE rb_fiber_scheduler_io_selectv(VALUE scheduler, int argc, VALUE *argv)
Non-blocking version of IO.select, argv variant.
Definition scheduler.c:781
VALUE rb_fiber_scheduler_process_wait(VALUE scheduler, rb_pid_t pid, int flags)
Non-blocking waitpid.
Definition scheduler.c:631
VALUE rb_fiber_scheduler_block(VALUE scheduler, VALUE blocker, VALUE timeout)
Non-blocking wait for the passed "blocker", which is for instance Thread.join or Mutex....
Definition scheduler.c:655
int rb_fiber_scheduler_blocking_operation_execute(rb_fiber_scheduler_blocking_operation_t *blocking_operation)
Execute blocking operation from handle (GVL not required).
Definition scheduler.c:206
VALUE rb_fiber_scheduler_io_pread(VALUE scheduler, VALUE io, rb_off_t from, VALUE buffer, size_t length, size_t offset)
Non-blocking read from the passed IO at the specified offset.
Definition scheduler.c:864
VALUE rb_fiber_scheduler_io_pwrite_memory(VALUE scheduler, VALUE io, rb_off_t from, const void *base, size_t size, size_t length)
Non-blocking pwrite to the passed IO using a native buffer.
Definition scheduler.c:1013
VALUE rb_fiber_scheduler_io_write(VALUE scheduler, VALUE io, VALUE buffer, size_t length, size_t offset)
Non-blocking write to the passed IO.
Definition scheduler.c:917
VALUE rb_fiber_scheduler_close(VALUE scheduler)
Closes the passed scheduler object.
Definition scheduler.c:495
rb_fiber_scheduler_blocking_operation_t * rb_fiber_scheduler_blocking_operation_extract(VALUE self)
Extract the blocking operation handle from a BlockingOperationRuby object.
Definition scheduler.c:191
VALUE rb_fiber_scheduler_current_for_thread(VALUE thread)
Identical to rb_fiber_scheduler_current(), except it queries for that of the passed thread value inst...
Definition scheduler.c:474
VALUE rb_fiber_scheduler_kernel_sleep(VALUE scheduler, VALUE duration)
Non-blocking sleep.
Definition scheduler.c:538
VALUE rb_fiber_scheduler_address_resolve(VALUE scheduler, VALUE hostname)
Non-blocking DNS lookup.
Definition scheduler.c:1065
VALUE rb_fiber_scheduler_yield(VALUE scheduler)
Yield to the scheduler, to be resumed on the next scheduling cycle.
Definition scheduler.c:556
VALUE rb_fiber_scheduler_set(VALUE scheduler)
Destructively assigns the passed scheduler to that of the current thread that is calling this functio...
Definition scheduler.c:428
VALUE rb_fiber_scheduler_io_write_memory(VALUE scheduler, VALUE io, const void *base, size_t size, size_t length)
Non-blocking write to the passed IO using a native buffer.
Definition scheduler.c:989
VALUE rb_fiber_scheduler_current_for_threadptr(struct rb_thread_struct *thread)
Identical to rb_fiber_scheduler_current_for_thread(), except it expects a threadptr instead of a thre...
Definition scheduler.c:479
VALUE rb_fiber_scheduler_io_wait_writable(VALUE scheduler, VALUE io)
Non-blocking wait until the passed IO is ready for writing.
Definition scheduler.c:757
VALUE rb_fiber_scheduler_io_close(VALUE scheduler, VALUE io)
Non-blocking close the given IO.
Definition scheduler.c:1025
VALUE rb_fiber_scheduler_get(void)
Queries the current scheduler of the current thread that is calling this function.
Definition scheduler.c:378
VALUE rb_fiber_scheduler_unblock(VALUE scheduler, VALUE blocker, VALUE fiber)
Wakes up a fiber previously blocked using rb_fiber_scheduler_block().
Definition scheduler.c:674
VALUE rb_fiber_scheduler_fiber(VALUE scheduler, int argc, VALUE *argv, int kw_splat)
Create and schedule a non-blocking fiber.
Definition scheduler.c:1168
@ RUBY_Qundef
Represents so-called undef.
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:208
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40