Ruby 3.5.0dev (2025-02-22 revision 412997300569c1853c09813e4924b6df3d7e8669)
scheduler.c (412997300569c1853c09813e4924b6df3d7e8669)
1/**********************************************************************
2
3 scheduler.c
4
5 $Author$
6
7 Copyright (C) 2020 Samuel Grant Dawson Williams
8
9**********************************************************************/
10
11#include "vm_core.h"
13#include "ruby/io.h"
14#include "ruby/io/buffer.h"
15
16#include "ruby/thread.h"
17
18// For `ruby_thread_has_gvl_p`.
19#include "internal/thread.h"
20
21static ID id_close;
22static ID id_scheduler_close;
23
24static ID id_block;
25static ID id_unblock;
26
27static ID id_timeout_after;
28static ID id_kernel_sleep;
29static ID id_process_wait;
30
31static ID id_io_read, id_io_pread;
32static ID id_io_write, id_io_pwrite;
33static ID id_io_wait;
34static ID id_io_select;
35static ID id_io_close;
36
37static ID id_address_resolve;
38
39static ID id_blocking_operation_wait;
40
41static ID id_fiber_schedule;
42
43/*
44 * Document-class: Fiber::Scheduler
45 *
46 * This is not an existing class, but documentation of the interface that Scheduler
47 * object should comply to in order to be used as argument to Fiber.scheduler and handle non-blocking
48 * fibers. See also the "Non-blocking fibers" section in Fiber class docs for explanations
49 * of some concepts.
50 *
51 * Scheduler's behavior and usage are expected to be as follows:
52 *
53 * * When the execution in the non-blocking Fiber reaches some blocking operation (like
54 * sleep, wait for a process, or a non-ready I/O), it calls some of the scheduler's
55 * hook methods, listed below.
56 * * Scheduler somehow registers what the current fiber is waiting on, and yields control
57 * to other fibers with Fiber.yield (so the fiber would be suspended while expecting its
58 * wait to end, and other fibers in the same thread can perform)
59 * * At the end of the current thread execution, the scheduler's method #scheduler_close is called
60 * * The scheduler runs into a wait loop, checking all the blocked fibers (which it has
61 * registered on hook calls) and resuming them when the awaited resource is ready
62 * (e.g. I/O ready or sleep time elapsed).
63 *
64 * This way concurrent execution will be achieved transparently for every
65 * individual Fiber's code.
66 *
67 * Scheduler implementations are provided by gems, like
68 * Async[https://github.com/socketry/async].
69 *
70 * Hook methods are:
71 *
72 * * #io_wait, #io_read, #io_write, #io_pread, #io_pwrite, and #io_select, #io_close
73 * * #process_wait
74 * * #kernel_sleep
75 * * #timeout_after
76 * * #address_resolve
77 * * #block and #unblock
78 * * #blocking_operation_wait
79 * * (the list is expanded as Ruby developers make more methods having non-blocking calls)
80 *
81 * When not specified otherwise, the hook implementations are mandatory: if they are not
82 * implemented, the methods trying to call hook will fail. To provide backward compatibility,
83 * in the future hooks will be optional (if they are not implemented, due to the scheduler
84 * being created for the older Ruby version, the code which needs this hook will not fail,
85 * and will just behave in a blocking fashion).
86 *
87 * It is also strongly recommended that the scheduler implements the #fiber method, which is
88 * delegated to by Fiber.schedule.
89 *
90 * Sample _toy_ implementation of the scheduler can be found in Ruby's code, in
91 * <tt>test/fiber/scheduler.rb</tt>
92 *
93 */
94void
95Init_Fiber_Scheduler(void)
96{
97 id_close = rb_intern_const("close");
98 id_scheduler_close = rb_intern_const("scheduler_close");
99
100 id_block = rb_intern_const("block");
101 id_unblock = rb_intern_const("unblock");
102
103 id_timeout_after = rb_intern_const("timeout_after");
104 id_kernel_sleep = rb_intern_const("kernel_sleep");
105 id_process_wait = rb_intern_const("process_wait");
106
107 id_io_read = rb_intern_const("io_read");
108 id_io_pread = rb_intern_const("io_pread");
109 id_io_write = rb_intern_const("io_write");
110 id_io_pwrite = rb_intern_const("io_pwrite");
111
112 id_io_wait = rb_intern_const("io_wait");
113 id_io_select = rb_intern_const("io_select");
114 id_io_close = rb_intern_const("io_close");
115
116 id_address_resolve = rb_intern_const("address_resolve");
117
118 id_blocking_operation_wait = rb_intern_const("blocking_operation_wait");
119
120 id_fiber_schedule = rb_intern_const("fiber");
121
122#if 0 /* for RDoc */
123 rb_cFiberScheduler = rb_define_class_under(rb_cFiber, "Scheduler", rb_cObject);
124 rb_define_method(rb_cFiberScheduler, "close", rb_fiber_scheduler_close, 0);
125 rb_define_method(rb_cFiberScheduler, "process_wait", rb_fiber_scheduler_process_wait, 2);
126 rb_define_method(rb_cFiberScheduler, "io_wait", rb_fiber_scheduler_io_wait, 3);
127 rb_define_method(rb_cFiberScheduler, "io_read", rb_fiber_scheduler_io_read, 4);
128 rb_define_method(rb_cFiberScheduler, "io_write", rb_fiber_scheduler_io_write, 4);
129 rb_define_method(rb_cFiberScheduler, "io_pread", rb_fiber_scheduler_io_pread, 5);
130 rb_define_method(rb_cFiberScheduler, "io_pwrite", rb_fiber_scheduler_io_pwrite, 5);
131 rb_define_method(rb_cFiberScheduler, "io_select", rb_fiber_scheduler_io_select, 4);
132 rb_define_method(rb_cFiberScheduler, "kernel_sleep", rb_fiber_scheduler_kernel_sleep, 1);
133 rb_define_method(rb_cFiberScheduler, "address_resolve", rb_fiber_scheduler_address_resolve, 1);
134 rb_define_method(rb_cFiberScheduler, "timeout_after", rb_fiber_scheduler_timeout_after, 3);
135 rb_define_method(rb_cFiberScheduler, "block", rb_fiber_scheduler_block, 2);
136 rb_define_method(rb_cFiberScheduler, "unblock", rb_fiber_scheduler_unblock, 2);
137 rb_define_method(rb_cFiberScheduler, "fiber", rb_fiber_scheduler, -2);
138 rb_define_method(rb_cFiberScheduler, "blocking_operation_wait", rb_fiber_scheduler_blocking_operation_wait, -2);
139#endif
140}
141
142VALUE
144{
145 RUBY_ASSERT(ruby_thread_has_gvl_p());
146
147 rb_thread_t *thread = GET_THREAD();
148 RUBY_ASSERT(thread);
149
150 return thread->scheduler;
151}
152
153static void
154verify_interface(VALUE scheduler)
155{
156 if (!rb_respond_to(scheduler, id_block)) {
157 rb_raise(rb_eArgError, "Scheduler must implement #block");
158 }
159
160 if (!rb_respond_to(scheduler, id_unblock)) {
161 rb_raise(rb_eArgError, "Scheduler must implement #unblock");
162 }
163
164 if (!rb_respond_to(scheduler, id_kernel_sleep)) {
165 rb_raise(rb_eArgError, "Scheduler must implement #kernel_sleep");
166 }
167
168 if (!rb_respond_to(scheduler, id_io_wait)) {
169 rb_raise(rb_eArgError, "Scheduler must implement #io_wait");
170 }
171}
172
173static VALUE
174fiber_scheduler_close(VALUE scheduler)
175{
176 return rb_fiber_scheduler_close(scheduler);
177}
178
179static VALUE
180fiber_scheduler_close_ensure(VALUE _thread)
181{
182 rb_thread_t *thread = (rb_thread_t*)_thread;
183 thread->scheduler = Qnil;
184
185 return Qnil;
186}
187
188VALUE
190{
191 RUBY_ASSERT(ruby_thread_has_gvl_p());
192
193 rb_thread_t *thread = GET_THREAD();
194 RUBY_ASSERT(thread);
195
196 if (scheduler != Qnil) {
197 verify_interface(scheduler);
198 }
199
200 // We invoke Scheduler#close when setting it to something else, to ensure
201 // the previous scheduler runs to completion before changing the scheduler.
202 // That way, we do not need to consider interactions, e.g., of a Fiber from
203 // the previous scheduler with the new scheduler.
204 if (thread->scheduler != Qnil) {
205 // rb_fiber_scheduler_close(thread->scheduler);
206 rb_ensure(fiber_scheduler_close, thread->scheduler, fiber_scheduler_close_ensure, (VALUE)thread);
207 }
208
209 thread->scheduler = scheduler;
210
211 return thread->scheduler;
212}
213
214static VALUE
215rb_fiber_scheduler_current_for_threadptr(rb_thread_t *thread)
216{
217 RUBY_ASSERT(thread);
218
219 if (thread->blocking == 0) {
220 return thread->scheduler;
221 }
222 else {
223 return Qnil;
224 }
225}
226
227VALUE
229{
230 return rb_fiber_scheduler_current_for_threadptr(GET_THREAD());
231}
232
234{
235 return rb_fiber_scheduler_current_for_threadptr(rb_thread_ptr(thread));
236}
237
238/*
239 *
240 * Document-method: Fiber::Scheduler#close
241 *
242 * Called when the current thread exits. The scheduler is expected to implement this
243 * method in order to allow all waiting fibers to finalize their execution.
244 *
245 * The suggested pattern is to implement the main event loop in the #close method.
246 *
247 */
248VALUE
250{
251 RUBY_ASSERT(ruby_thread_has_gvl_p());
252
253 VALUE result;
254
255 // The reason for calling `scheduler_close` before calling `close` is for
256 // legacy schedulers which implement `close` and expect the user to call
257 // it. Subsequently, that method would call `Fiber.set_scheduler(nil)`
258 // which should call `scheduler_close`. If it were to call `close`, it
259 // would create an infinite loop.
260
261 result = rb_check_funcall(scheduler, id_scheduler_close, 0, NULL);
262 if (!UNDEF_P(result)) return result;
263
264 result = rb_check_funcall(scheduler, id_close, 0, NULL);
265 if (!UNDEF_P(result)) return result;
266
267 return Qnil;
268}
269
270VALUE
272{
273 if (timeout) {
274 return rb_float_new((double)timeout->tv_sec + (0.000001f * timeout->tv_usec));
275 }
276
277 return Qnil;
278}
279
280/*
281 * Document-method: Fiber::Scheduler#kernel_sleep
282 * call-seq: kernel_sleep(duration = nil)
283 *
284 * Invoked by Kernel#sleep and Mutex#sleep and is expected to provide
285 * an implementation of sleeping in a non-blocking way. Implementation might
286 * register the current fiber in some list of "which fiber wait until what
287 * moment", call Fiber.yield to pass control, and then in #close resume
288 * the fibers whose wait period has elapsed.
289 *
290 */
291VALUE
293{
294 return rb_funcall(scheduler, id_kernel_sleep, 1, timeout);
295}
296
297VALUE
298rb_fiber_scheduler_kernel_sleepv(VALUE scheduler, int argc, VALUE * argv)
299{
300 return rb_funcallv(scheduler, id_kernel_sleep, argc, argv);
301}
302
303#if 0
304/*
305 * Document-method: Fiber::Scheduler#timeout_after
306 * call-seq: timeout_after(duration, exception_class, *exception_arguments, &block) -> result of block
307 *
308 * Invoked by Timeout.timeout to execute the given +block+ within the given
309 * +duration+. It can also be invoked directly by the scheduler or user code.
310 *
311 * Attempt to limit the execution time of a given +block+ to the given
312 * +duration+ if possible. When a non-blocking operation causes the +block+'s
313 * execution time to exceed the specified +duration+, that non-blocking
314 * operation should be interrupted by raising the specified +exception_class+
315 * constructed with the given +exception_arguments+.
316 *
317 * General execution timeouts are often considered risky. This implementation
318 * will only interrupt non-blocking operations. This is by design because it's
319 * expected that non-blocking operations can fail for a variety of
320 * unpredictable reasons, so applications should already be robust in handling
321 * these conditions and by implication timeouts.
322 *
323 * However, as a result of this design, if the +block+ does not invoke any
324 * non-blocking operations, it will be impossible to interrupt it. If you
325 * desire to provide predictable points for timeouts, consider adding
326 * +sleep(0)+.
327 *
328 * If the block is executed successfully, its result will be returned.
329 *
330 * The exception will typically be raised using Fiber#raise.
331 */
332VALUE
333rb_fiber_scheduler_timeout_after(VALUE scheduler, VALUE timeout, VALUE exception, VALUE message)
334{
335 VALUE arguments[] = {
336 timeout, exception, message
337 };
338
339 return rb_check_funcall(scheduler, id_timeout_after, 3, arguments);
340}
341
342VALUE
343rb_fiber_scheduler_timeout_afterv(VALUE scheduler, int argc, VALUE * argv)
344{
345 return rb_check_funcall(scheduler, id_timeout_after, argc, argv);
346}
347#endif
348
349/*
350 * Document-method: Fiber::Scheduler#process_wait
351 * call-seq: process_wait(pid, flags)
352 *
353 * Invoked by Process::Status.wait in order to wait for a specified process.
354 * See that method description for arguments description.
355 *
356 * Suggested minimal implementation:
357 *
358 * Thread.new do
359 * Process::Status.wait(pid, flags)
360 * end.value
361 *
362 * This hook is optional: if it is not present in the current scheduler,
363 * Process::Status.wait will behave as a blocking method.
364 *
365 * Expected to return a Process::Status instance.
366 */
367VALUE
368rb_fiber_scheduler_process_wait(VALUE scheduler, rb_pid_t pid, int flags)
369{
370 VALUE arguments[] = {
371 PIDT2NUM(pid), RB_INT2NUM(flags)
372 };
373
374 return rb_check_funcall(scheduler, id_process_wait, 2, arguments);
375}
376
377/*
378 * Document-method: Fiber::Scheduler#block
379 * call-seq: block(blocker, timeout = nil)
380 *
381 * Invoked by methods like Thread.join, and by Mutex, to signify that current
382 * Fiber is blocked until further notice (e.g. #unblock) or until +timeout+ has
383 * elapsed.
384 *
385 * +blocker+ is what we are waiting on, informational only (for debugging and
386 * logging). There are no guarantee about its value.
387 *
388 * Expected to return boolean, specifying whether the blocking operation was
389 * successful or not.
390 */
391VALUE
392rb_fiber_scheduler_block(VALUE scheduler, VALUE blocker, VALUE timeout)
393{
394 return rb_funcall(scheduler, id_block, 2, blocker, timeout);
395}
396
397/*
398 * Document-method: Fiber::Scheduler#unblock
399 * call-seq: unblock(blocker, fiber)
400 *
401 * Invoked to wake up Fiber previously blocked with #block (for example, Mutex#lock
402 * calls #block and Mutex#unlock calls #unblock). The scheduler should use
403 * the +fiber+ parameter to understand which fiber is unblocked.
404 *
405 * +blocker+ is what was awaited for, but it is informational only (for debugging
406 * and logging), and it is not guaranteed to be the same value as the +blocker+ for
407 * #block.
408 *
409 */
410VALUE
411rb_fiber_scheduler_unblock(VALUE scheduler, VALUE blocker, VALUE fiber)
412{
413 RUBY_ASSERT(rb_obj_is_fiber(fiber));
414
415 // `rb_fiber_scheduler_unblock` can be called from points where `errno` is expected to be preserved. Therefore, we should save and restore it. For example `io_binwrite` calls `rb_fiber_scheduler_unblock` and if `errno` is reset to 0 by user code, it will break the error handling in `io_write`.
416 // If we explicitly preserve `errno` in `io_binwrite` and other similar functions (e.g. by returning it), this code is no longer needed. I hope in the future we will be able to remove it.
417 int saved_errno = errno;
418
419 VALUE result = rb_funcall(scheduler, id_unblock, 2, blocker, fiber);
420
421 errno = saved_errno;
422
423 return result;
424}
425
426/*
427 * Document-method: Fiber::Scheduler#io_wait
428 * call-seq: io_wait(io, events, timeout)
429 *
430 * Invoked by IO#wait, IO#wait_readable, IO#wait_writable to ask whether the
431 * specified descriptor is ready for specified events within
432 * the specified +timeout+.
433 *
434 * +events+ is a bit mask of <tt>IO::READABLE</tt>, <tt>IO::WRITABLE</tt>, and
435 * <tt>IO::PRIORITY</tt>.
436 *
437 * Suggested implementation should register which Fiber is waiting for which
438 * resources and immediately calling Fiber.yield to pass control to other
439 * fibers. Then, in the #close method, the scheduler might dispatch all the
440 * I/O resources to fibers waiting for it.
441 *
442 * Expected to return the subset of events that are ready immediately.
443 *
444 */
445VALUE
446rb_fiber_scheduler_io_wait(VALUE scheduler, VALUE io, VALUE events, VALUE timeout)
447{
448 return rb_funcall(scheduler, id_io_wait, 3, io, events, timeout);
449}
450
451VALUE
456
457VALUE
462
463/*
464 * Document-method: Fiber::Scheduler#io_select
465 * call-seq: io_select(readables, writables, exceptables, timeout)
466 *
467 * Invoked by IO.select to ask whether the specified descriptors are ready for
468 * specified events within the specified +timeout+.
469 *
470 * Expected to return the 3-tuple of Array of IOs that are ready.
471 *
472 */
473VALUE rb_fiber_scheduler_io_select(VALUE scheduler, VALUE readables, VALUE writables, VALUE exceptables, VALUE timeout)
474{
475 VALUE arguments[] = {
476 readables, writables, exceptables, timeout
477 };
478
479 return rb_fiber_scheduler_io_selectv(scheduler, 4, arguments);
480}
481
483{
484 // I wondered about extracting argv, and checking if there is only a single
485 // IO instance, and instead calling `io_wait`. However, it would require a
486 // decent amount of work and it would be hard to preserve the exact
487 // semantics of IO.select.
488
489 return rb_check_funcall(scheduler, id_io_select, argc, argv);
490}
491
492/*
493 * Document-method: Fiber::Scheduler#io_read
494 * call-seq: io_read(io, buffer, length, offset) -> read length or -errno
495 *
496 * Invoked by IO#read or IO#Buffer.read to read +length+ bytes from +io+ into a
497 * specified +buffer+ (see IO::Buffer) at the given +offset+.
498 *
499 * The +length+ argument is the "minimum length to be read". If the IO buffer
500 * size is 8KiB, but the +length+ is +1024+ (1KiB), up to 8KiB might be read,
501 * but at least 1KiB will be. Generally, the only case where less data than
502 * +length+ will be read is if there is an error reading the data.
503 *
504 * Specifying a +length+ of 0 is valid and means try reading at least once and
505 * return any available data.
506 *
507 * Suggested implementation should try to read from +io+ in a non-blocking
508 * manner and call #io_wait if the +io+ is not ready (which will yield control
509 * to other fibers).
510 *
511 * See IO::Buffer for an interface available to return data.
512 *
513 * Expected to return number of bytes read, or, in case of an error,
514 * <tt>-errno</tt> (negated number corresponding to system's error code).
515 *
516 * The method should be considered _experimental_.
517 */
518VALUE
519rb_fiber_scheduler_io_read(VALUE scheduler, VALUE io, VALUE buffer, size_t length, size_t offset)
520{
521 VALUE arguments[] = {
522 io, buffer, SIZET2NUM(length), SIZET2NUM(offset)
523 };
524
525 return rb_check_funcall(scheduler, id_io_read, 4, arguments);
526}
527
528/*
529 * Document-method: Fiber::Scheduler#io_read
530 * call-seq: io_pread(io, buffer, from, length, offset) -> read length or -errno
531 *
532 * Invoked by IO#pread or IO::Buffer#pread to read +length+ bytes from +io+
533 * at offset +from+ into a specified +buffer+ (see IO::Buffer) at the given
534 * +offset+.
535 *
536 * This method is semantically the same as #io_read, but it allows to specify
537 * the offset to read from and is often better for asynchronous IO on the same
538 * file.
539 *
540 * The method should be considered _experimental_.
541 */
542VALUE
543rb_fiber_scheduler_io_pread(VALUE scheduler, VALUE io, rb_off_t from, VALUE buffer, size_t length, size_t offset)
544{
545 VALUE arguments[] = {
546 io, buffer, OFFT2NUM(from), SIZET2NUM(length), SIZET2NUM(offset)
547 };
548
549 return rb_check_funcall(scheduler, id_io_pread, 5, arguments);
550}
551
552/*
553 * Document-method: Scheduler#io_write
554 * call-seq: io_write(io, buffer, length, offset) -> written length or -errno
555 *
556 * Invoked by IO#write or IO::Buffer#write to write +length+ bytes to +io+ from
557 * from a specified +buffer+ (see IO::Buffer) at the given +offset+.
558 *
559 * The +length+ argument is the "minimum length to be written". If the IO
560 * buffer size is 8KiB, but the +length+ specified is 1024 (1KiB), at most 8KiB
561 * will be written, but at least 1KiB will be. Generally, the only case where
562 * less data than +length+ will be written is if there is an error writing the
563 * data.
564 *
565 * Specifying a +length+ of 0 is valid and means try writing at least once, as
566 * much data as possible.
567 *
568 * Suggested implementation should try to write to +io+ in a non-blocking
569 * manner and call #io_wait if the +io+ is not ready (which will yield control
570 * to other fibers).
571 *
572 * See IO::Buffer for an interface available to get data from buffer
573 * efficiently.
574 *
575 * Expected to return number of bytes written, or, in case of an error,
576 * <tt>-errno</tt> (negated number corresponding to system's error code).
577 *
578 * The method should be considered _experimental_.
579 */
580VALUE
581rb_fiber_scheduler_io_write(VALUE scheduler, VALUE io, VALUE buffer, size_t length, size_t offset)
582{
583 VALUE arguments[] = {
584 io, buffer, SIZET2NUM(length), SIZET2NUM(offset)
585 };
586
587 return rb_check_funcall(scheduler, id_io_write, 4, arguments);
588}
589
590/*
591 * Document-method: Fiber::Scheduler#io_pwrite
592 * call-seq: io_pwrite(io, buffer, from, length, offset) -> written length or -errno
593 *
594 * Invoked by IO#pwrite or IO::Buffer#pwrite to write +length+ bytes to +io+
595 * at offset +from+ into a specified +buffer+ (see IO::Buffer) at the given
596 * +offset+.
597 *
598 * This method is semantically the same as #io_write, but it allows to specify
599 * the offset to write to and is often better for asynchronous IO on the same
600 * file.
601 *
602 * The method should be considered _experimental_.
603 *
604 */
605VALUE
606rb_fiber_scheduler_io_pwrite(VALUE scheduler, VALUE io, rb_off_t from, VALUE buffer, size_t length, size_t offset)
607{
608 VALUE arguments[] = {
609 io, buffer, OFFT2NUM(from), SIZET2NUM(length), SIZET2NUM(offset)
610 };
611
612 return rb_check_funcall(scheduler, id_io_pwrite, 5, arguments);
613}
614
615VALUE
616rb_fiber_scheduler_io_read_memory(VALUE scheduler, VALUE io, void *base, size_t size, size_t length)
617{
618 VALUE buffer = rb_io_buffer_new(base, size, RB_IO_BUFFER_LOCKED);
619
620 VALUE result = rb_fiber_scheduler_io_read(scheduler, io, buffer, length, 0);
621
622 rb_io_buffer_free_locked(buffer);
623
624 return result;
625}
626
627VALUE
628rb_fiber_scheduler_io_write_memory(VALUE scheduler, VALUE io, const void *base, size_t size, size_t length)
629{
630 VALUE buffer = rb_io_buffer_new((void*)base, size, RB_IO_BUFFER_LOCKED|RB_IO_BUFFER_READONLY);
631
632 VALUE result = rb_fiber_scheduler_io_write(scheduler, io, buffer, length, 0);
633
634 rb_io_buffer_free_locked(buffer);
635
636 return result;
637}
638
639VALUE
640rb_fiber_scheduler_io_pread_memory(VALUE scheduler, VALUE io, rb_off_t from, void *base, size_t size, size_t length)
641{
642 VALUE buffer = rb_io_buffer_new(base, size, RB_IO_BUFFER_LOCKED);
643
644 VALUE result = rb_fiber_scheduler_io_pread(scheduler, io, from, buffer, length, 0);
645
646 rb_io_buffer_free_locked(buffer);
647
648 return result;
649}
650
651VALUE
652rb_fiber_scheduler_io_pwrite_memory(VALUE scheduler, VALUE io, rb_off_t from, const void *base, size_t size, size_t length)
653{
654 VALUE buffer = rb_io_buffer_new((void*)base, size, RB_IO_BUFFER_LOCKED|RB_IO_BUFFER_READONLY);
655
656 VALUE result = rb_fiber_scheduler_io_pwrite(scheduler, io, from, buffer, length, 0);
657
658 rb_io_buffer_free_locked(buffer);
659
660 return result;
661}
662
663VALUE
665{
666 VALUE arguments[] = {io};
667
668 return rb_check_funcall(scheduler, id_io_close, 1, arguments);
669}
670
671/*
672 * Document-method: Fiber::Scheduler#address_resolve
673 * call-seq: address_resolve(hostname) -> array_of_strings or nil
674 *
675 * Invoked by any method that performs a non-reverse DNS lookup. The most
676 * notable method is Addrinfo.getaddrinfo, but there are many other.
677 *
678 * The method is expected to return an array of strings corresponding to ip
679 * addresses the +hostname+ is resolved to, or +nil+ if it can not be resolved.
680 *
681 * Fairly exhaustive list of all possible call-sites:
682 *
683 * - Addrinfo.getaddrinfo
684 * - Addrinfo.tcp
685 * - Addrinfo.udp
686 * - Addrinfo.ip
687 * - Addrinfo.new
688 * - Addrinfo.marshal_load
689 * - SOCKSSocket.new
690 * - TCPServer.new
691 * - TCPSocket.new
692 * - IPSocket.getaddress
693 * - TCPSocket.gethostbyname
694 * - UDPSocket#connect
695 * - UDPSocket#bind
696 * - UDPSocket#send
697 * - Socket.getaddrinfo
698 * - Socket.gethostbyname
699 * - Socket.pack_sockaddr_in
700 * - Socket.sockaddr_in
701 * - Socket.unpack_sockaddr_in
702 */
703VALUE
705{
706 VALUE arguments[] = {
707 hostname
708 };
709
710 return rb_check_funcall(scheduler, id_address_resolve, 1, arguments);
711}
712
714 void *(*function)(void *);
715 void *data;
716 rb_unblock_function_t *unblock_function;
717 void *data2;
718 int flags;
719
721};
722
723static VALUE
724rb_fiber_scheduler_blocking_operation_wait_proc(RB_BLOCK_CALL_FUNC_ARGLIST(value, _arguments))
725{
727
728 if (arguments->state == NULL) {
729 rb_raise(rb_eRuntimeError, "Blocking function was already invoked!");
730 }
731
732 arguments->state->result = rb_nogvl(arguments->function, arguments->data, arguments->unblock_function, arguments->data2, arguments->flags);
733 arguments->state->saved_errno = rb_errno();
734
735 // Make sure it's only invoked once.
736 arguments->state = NULL;
737
738 return Qnil;
739}
740
741/*
742 * Document-method: Fiber::Scheduler#blocking_operation_wait
743 * call-seq: blocking_operation_wait(work)
744 *
745 * Invoked by Ruby's core methods to run a blocking operation in a non-blocking way.
746 *
747 * Minimal suggested implementation is:
748 *
749 * def blocking_operation_wait(work)
750 * Thread.new(&work).join
751 * end
752 */
753VALUE rb_fiber_scheduler_blocking_operation_wait(VALUE scheduler, void* (*function)(void *), void *data, rb_unblock_function_t *unblock_function, void *data2, int flags, struct rb_fiber_scheduler_blocking_operation_state *state)
754{
755 struct rb_blocking_operation_wait_arguments arguments = {
756 .function = function,
757 .data = data,
758 .unblock_function = unblock_function,
759 .data2 = data2,
760 .flags = flags,
761 .state = state
762 };
763
764 VALUE proc = rb_proc_new(rb_fiber_scheduler_blocking_operation_wait_proc, (VALUE)&arguments);
765
766 return rb_check_funcall(scheduler, id_blocking_operation_wait, 1, &proc);
767}
768
769/*
770 * Document-method: Fiber::Scheduler#fiber
771 * call-seq: fiber(&block)
772 *
773 * Implementation of the Fiber.schedule. The method is <em>expected</em> to immediately
774 * run the given block of code in a separate non-blocking fiber, and to return that Fiber.
775 *
776 * Minimal suggested implementation is:
777 *
778 * def fiber(&block)
779 * fiber = Fiber.new(blocking: false, &block)
780 * fiber.resume
781 * fiber
782 * end
783 */
784VALUE
785rb_fiber_scheduler_fiber(VALUE scheduler, int argc, VALUE *argv, int kw_splat)
786{
787 return rb_funcall_passing_block_kw(scheduler, id_fiber_schedule, argc, argv, kw_splat);
788}
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
VALUE rb_define_class_under(VALUE outer, const char *name, VALUE super)
Defines a class under the namespace of outer.
Definition class.c:1012
#define SIZET2NUM
Old name of RB_SIZE2NUM.
Definition size_t.h:62
#define Qnil
Old name of RUBY_Qnil.
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1428
VALUE rb_funcall(VALUE recv, ID mid, int n,...)
Calls a method.
Definition vm_eval.c:1099
VALUE rb_funcall_passing_block_kw(VALUE recv, ID mid, int argc, const VALUE *argv, int kw_splat)
Identical to rb_funcallv_passing_block(), except you can specify how to handle the last element of th...
Definition vm_eval.c:1169
void rb_unblock_function_t(void *)
This is the type of UBFs.
Definition thread.h:336
int rb_respond_to(VALUE obj, ID mid)
Queries if the object responds to the method.
Definition vm_method.c:2953
VALUE rb_check_funcall(VALUE recv, ID mid, int argc, const VALUE *argv)
Identical to rb_funcallv(), except it returns RUBY_Qundef instead of raising rb_eNoMethodError.
Definition vm_eval.c:668
static ID rb_intern_const(const char *str)
This is a "tiny optimisation" over rb_intern().
Definition symbol.h:284
VALUE rb_io_timeout(VALUE io)
Get the timeout associated with the specified io object.
Definition io.c:857
@ RUBY_IO_READABLE
IO::READABLE
Definition io.h:82
@ RUBY_IO_WRITABLE
IO::WRITABLE
Definition io.h:83
void * rb_nogvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2, int flags)
Identical to rb_thread_call_without_gvl(), except it additionally takes "flags" that change the behav...
Definition thread.c:1534
#define RB_UINT2NUM
Just another name of rb_uint2num_inline.
Definition int.h:39
#define RB_INT2NUM
Just another name of rb_int2num_inline.
Definition int.h:37
#define RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)
Shim for block function parameters.
Definition iterator.h:58
VALUE rb_proc_new(type *q, VALUE w)
Creates a rb_cProc instance.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define OFFT2NUM
Converts a C's off_t into an instance of rb_cInteger.
Definition off_t.h:33
#define PIDT2NUM
Converts a C's pid_t into an instance of rb_cInteger.
Definition pid_t.h:28
#define errno
Ractor-aware version of errno.
Definition ruby.h:388
Scheduler APIs.
VALUE rb_fiber_scheduler_blocking_operation_wait(VALUE scheduler, void *(*function)(void *), void *data, rb_unblock_function_t *unblock_function, void *data2, int flags, struct rb_fiber_scheduler_blocking_operation_state *state)
Defer the execution of the passed function to the scheduler.
Definition scheduler.c:753
VALUE rb_fiber_scheduler_current(void)
Identical to rb_fiber_scheduler_get(), except it also returns RUBY_Qnil in case of a blocking fiber.
Definition scheduler.c:228
VALUE rb_fiber_scheduler_io_pread_memory(VALUE scheduler, VALUE io, rb_off_t from, void *base, size_t size, size_t length)
Non-blocking pread from the passed IO using a native buffer.
Definition scheduler.c:640
VALUE rb_fiber_scheduler_make_timeout(struct timeval *timeout)
Converts the passed timeout to an expression that rb_fiber_scheduler_block() etc.
Definition scheduler.c:271
VALUE rb_fiber_scheduler_io_wait_readable(VALUE scheduler, VALUE io)
Non-blocking wait until the passed IO is ready for reading.
Definition scheduler.c:452
VALUE rb_fiber_scheduler_io_read_memory(VALUE scheduler, VALUE io, void *base, size_t size, size_t length)
Non-blocking read from the passed IO using a native buffer.
Definition scheduler.c:616
VALUE rb_fiber_scheduler_io_pwrite(VALUE scheduler, VALUE io, rb_off_t from, VALUE buffer, size_t length, size_t offset)
Non-blocking write to the passed IO at the specified offset.
Definition scheduler.c:606
VALUE rb_fiber_scheduler_kernel_sleepv(VALUE scheduler, int argc, VALUE *argv)
Identical to rb_fiber_scheduler_kernel_sleep(), except it can pass multiple arguments.
Definition scheduler.c:298
VALUE rb_fiber_scheduler_io_wait(VALUE scheduler, VALUE io, VALUE events, VALUE timeout)
Non-blocking version of rb_io_wait().
Definition scheduler.c:446
VALUE rb_fiber_scheduler_io_select(VALUE scheduler, VALUE readables, VALUE writables, VALUE exceptables, VALUE timeout)
Non-blocking version of IO.select.
Definition scheduler.c:473
VALUE rb_fiber_scheduler_io_read(VALUE scheduler, VALUE io, VALUE buffer, size_t length, size_t offset)
Non-blocking read from the passed IO.
Definition scheduler.c:519
VALUE rb_fiber_scheduler_io_selectv(VALUE scheduler, int argc, VALUE *argv)
Non-blocking version of IO.select, argv variant.
Definition scheduler.c:482
VALUE rb_fiber_scheduler_process_wait(VALUE scheduler, rb_pid_t pid, int flags)
Non-blocking waitpid.
Definition scheduler.c:368
VALUE rb_fiber_scheduler_block(VALUE scheduler, VALUE blocker, VALUE timeout)
Non-blocking wait for the passed "blocker", which is for instance Thread.join or Mutex....
Definition scheduler.c:392
VALUE rb_fiber_scheduler_io_pread(VALUE scheduler, VALUE io, rb_off_t from, VALUE buffer, size_t length, size_t offset)
Non-blocking read from the passed IO at the specified offset.
Definition scheduler.c:543
VALUE rb_fiber_scheduler_io_pwrite_memory(VALUE scheduler, VALUE io, rb_off_t from, const void *base, size_t size, size_t length)
Non-blocking pwrite to the passed IO using a native buffer.
Definition scheduler.c:652
VALUE rb_fiber_scheduler_io_write(VALUE scheduler, VALUE io, VALUE buffer, size_t length, size_t offset)
Non-blocking write to the passed IO.
Definition scheduler.c:581
VALUE rb_fiber_scheduler_close(VALUE scheduler)
Closes the passed scheduler object.
Definition scheduler.c:249
VALUE rb_fiber_scheduler_current_for_thread(VALUE thread)
Identical to rb_fiber_scheduler_current(), except it queries for that of the passed thread instead of...
Definition scheduler.c:233
VALUE rb_fiber_scheduler_kernel_sleep(VALUE scheduler, VALUE duration)
Non-blocking sleep.
Definition scheduler.c:292
VALUE rb_fiber_scheduler_address_resolve(VALUE scheduler, VALUE hostname)
Non-blocking DNS lookup.
Definition scheduler.c:704
VALUE rb_fiber_scheduler_set(VALUE scheduler)
Destructively assigns the passed scheduler to that of the current thread that is calling this functio...
Definition scheduler.c:189
VALUE rb_fiber_scheduler_io_write_memory(VALUE scheduler, VALUE io, const void *base, size_t size, size_t length)
Non-blocking write to the passed IO using a native buffer.
Definition scheduler.c:628
VALUE rb_fiber_scheduler_io_wait_writable(VALUE scheduler, VALUE io)
Non-blocking wait until the passed IO is ready for writing.
Definition scheduler.c:458
VALUE rb_fiber_scheduler_io_close(VALUE scheduler, VALUE io)
Non-blocking close the given IO.
Definition scheduler.c:664
VALUE rb_fiber_scheduler_get(void)
Queries the current scheduler of the current thread that is calling this function.
Definition scheduler.c:143
VALUE rb_fiber_scheduler_unblock(VALUE scheduler, VALUE blocker, VALUE fiber)
Wakes up a fiber previously blocked using rb_fiber_scheduler_block().
Definition scheduler.c:411
VALUE rb_fiber_scheduler_fiber(VALUE scheduler, int argc, VALUE *argv, int kw_splat)
Create and schedule a non-blocking fiber.
Definition scheduler.c:785
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40