5static void timer_thread_unregister_waiting(
rb_thread_t *th,
int fd,
enum thread_sched_waiting_flag flags);
11 bool canceled =
false;
15 if (th->sched.waiting_reason.flags) {
17 ccan_list_del_init(&th->sched.waiting_reason.node);
18 timer_thread_unregister_waiting(th, th->sched.waiting_reason.data.fd, th->sched.waiting_reason.flags);
19 th->sched.waiting_reason.flags = thread_sched_waiting_none;
28ubf_event_waiting(
void *ptr)
33 RUBY_DEBUG_LOG(
"th:%u", rb_th_serial(th));
35 VM_ASSERT(th->nt == NULL || !th_has_dedicated_nt(th));
38 th->unblock.func = NULL;
39 th->unblock.arg = NULL;
41 thread_sched_lock(sched, th);
43 bool canceled = timer_thread_cancel_waiting(th);
45 if (sched->running == th) {
46 RUBY_DEBUG_LOG(
"not waiting yet");
49 thread_sched_to_ready_common(sched, th,
true,
false);
52 RUBY_DEBUG_LOG(
"already not waiting");
55 thread_sched_unlock(sched, th);
58static bool timer_thread_register_waiting(
rb_thread_t *th,
int fd,
enum thread_sched_waiting_flag flags, rb_hrtime_t *rel, uint32_t event_serial);
62thread_sched_wait_events(
struct rb_thread_sched *sched,
rb_thread_t *th,
int fd,
enum thread_sched_waiting_flag events, rb_hrtime_t *rel)
64 VM_ASSERT(!th_has_dedicated_nt(th));
66 volatile bool timedout =
false, need_cancel =
false;
68 uint32_t event_serial = ++th->event_serial;
70 if (ubf_set(th, ubf_event_waiting, (
void *)th)) {
74 thread_sched_lock(sched, th);
76 if (timer_thread_register_waiting(th, fd, events, rel, event_serial)) {
77 RUBY_DEBUG_LOG(
"wait fd:%d", fd);
79 RB_VM_SAVE_MACHINE_CONTEXT(th);
83 if (th->sched.waiting_reason.flags == thread_sched_waiting_none) {
87 else if (RUBY_VM_INTERRUPTED(th->ec)) {
92 RUBY_DEBUG_LOG(
"sleep");
94 th->status = THREAD_STOPPED_FOREVER;
95 thread_sched_wakeup_next_thread(sched, th,
true);
96 thread_sched_wait_running_turn(sched, th,
true);
98 RUBY_DEBUG_LOG(
"wakeup");
101 timedout = th->sched.waiting_reason.data.result == 0;
104 timer_thread_cancel_waiting(th);
107 th->status = THREAD_RUNNABLE;
110 RUBY_DEBUG_LOG(
"can not wait fd:%d", fd);
114 thread_sched_unlock(sched, th);
119 VM_ASSERT(sched->running == th);
127get_sysconf_page_size(
void)
129 static long page_size = 0;
131 if (UNLIKELY(page_size == 0)) {
132 page_size = sysconf(_SC_PAGESIZE);
133 VM_ASSERT(page_size < INT_MAX);
135 return (
int)page_size;
138#define MSTACK_CHUNK_SIZE (512 * 1024 * 1024)
139#define MSTACK_PAGE_SIZE get_sysconf_page_size()
140#define MSTACK_CHUNK_PAGE_NUM (MSTACK_CHUNK_SIZE / MSTACK_PAGE_SIZE - 1)
153static struct nt_stack_chunk_header {
154 struct nt_stack_chunk_header *prev_chunk;
155 struct nt_stack_chunk_header *prev_free_chunk;
158 uint16_t stack_count;
159 uint16_t uninitialized_stack_count;
161 uint16_t free_stack_pos;
162 uint16_t free_stack[];
163} *nt_stack_chunks = NULL,
164 *nt_free_stack_chunks = NULL;
166struct nt_machine_stack_footer {
167 struct nt_stack_chunk_header *ch;
171static rb_nativethread_lock_t nt_machine_stack_lock = RB_NATIVETHREAD_LOCK_INIT;
177nt_thread_stack_size(
void)
180 if (LIKELY(msz > 0))
return msz;
183 int sz = (int)(vm->default_params.thread_vm_stack_size + vm->default_params.thread_machine_stack_size + MSTACK_PAGE_SIZE);
184 int page_num = roomof(sz, MSTACK_PAGE_SIZE);
185 msz = (size_t)page_num * MSTACK_PAGE_SIZE;
189static struct nt_stack_chunk_header *
190nt_alloc_thread_stack_chunk(
void)
192 int mmap_flags = MAP_ANONYMOUS | MAP_PRIVATE;
193#if defined(MAP_STACK) && !defined(__FreeBSD__) && !defined(__FreeBSD_kernel__)
194 mmap_flags |= MAP_STACK;
197 const char *m = (
void *)mmap(NULL, MSTACK_CHUNK_SIZE, PROT_READ | PROT_WRITE, mmap_flags, -1, 0);
198 if (m == MAP_FAILED) {
202 ruby_annotate_mmap(m, MSTACK_CHUNK_SIZE,
"Ruby:nt_alloc_thread_stack_chunk");
204 size_t msz = nt_thread_stack_size();
205 int header_page_cnt = 1;
206 int stack_count = ((MSTACK_CHUNK_PAGE_NUM - header_page_cnt) * MSTACK_PAGE_SIZE) / msz;
207 int ch_size =
sizeof(
struct nt_stack_chunk_header) + sizeof(uint16_t) * stack_count;
209 if (ch_size > MSTACK_PAGE_SIZE * header_page_cnt) {
210 header_page_cnt = (ch_size + MSTACK_PAGE_SIZE - 1) / MSTACK_PAGE_SIZE;
211 stack_count = ((MSTACK_CHUNK_PAGE_NUM - header_page_cnt) * MSTACK_PAGE_SIZE) / msz;
214 VM_ASSERT(stack_count <= UINT16_MAX);
216 struct nt_stack_chunk_header *ch = (
struct nt_stack_chunk_header *)m;
218 ch->start_page = header_page_cnt;
219 ch->prev_chunk = nt_stack_chunks;
220 ch->prev_free_chunk = nt_free_stack_chunks;
221 ch->uninitialized_stack_count = ch->stack_count = (uint16_t)stack_count;
222 ch->free_stack_pos = 0;
224 RUBY_DEBUG_LOG(
"ch:%p start_page:%d stack_cnt:%d stack_size:%d", ch, (
int)ch->start_page, (
int)ch->stack_count, (
int)msz);
230nt_stack_chunk_get_stack_start(
struct nt_stack_chunk_header *ch,
size_t idx)
232 const char *m = (
char *)ch;
233 return (
void *)(m + ch->start_page * MSTACK_PAGE_SIZE + idx * nt_thread_stack_size());
236static struct nt_machine_stack_footer *
237nt_stack_chunk_get_msf(
const rb_vm_t *vm,
const char *mstack)
240 const size_t msz = vm->default_params.thread_machine_stack_size;
241 return (
struct nt_machine_stack_footer *)&mstack[msz -
sizeof(
struct nt_machine_stack_footer)];
245nt_stack_chunk_get_stack(
const rb_vm_t *vm,
struct nt_stack_chunk_header *ch,
size_t idx,
void **vm_stack,
void **machine_stack)
250 const char *vstack, *mstack;
251 const char *guard_page;
252 vstack = nt_stack_chunk_get_stack_start(ch, idx);
253 guard_page = vstack + vm->default_params.thread_vm_stack_size;
254 mstack = guard_page + MSTACK_PAGE_SIZE;
256 struct nt_machine_stack_footer *msf = nt_stack_chunk_get_msf(vm, mstack);
261 RUBY_DEBUG_LOG(
"msf:%p vstack:%p-%p guard_page:%p-%p mstack:%p-%p", msf,
262 vstack, (
void *)(guard_page-1),
263 guard_page, (
void *)(mstack-1),
264 mstack, (
void *)(msf));
267 *vm_stack = (
void *)vstack;
268 *machine_stack = (
void *)mstack;
270 return (
void *)guard_page;
275nt_stack_chunk_dump(
void)
277 struct nt_stack_chunk_header *ch;
280 fprintf(stderr,
"** nt_stack_chunks\n");
281 ch = nt_stack_chunks;
282 for (i=0; ch; i++, ch = ch->prev_chunk) {
283 fprintf(stderr,
"%d %p free_pos:%d\n", i, (
void *)ch, (
int)ch->free_stack_pos);
286 fprintf(stderr,
"** nt_free_stack_chunks\n");
287 ch = nt_free_stack_chunks;
288 for (i=0; ch; i++, ch = ch->prev_free_chunk) {
289 fprintf(stderr,
"%d %p free_pos:%d\n", i, (
void *)ch, (
int)ch->free_stack_pos);
294nt_guard_page(
const char *p,
size_t len)
296 if (mprotect((
void *)p,
len, PROT_NONE) != -1) {
305nt_alloc_stack(
rb_vm_t *vm,
void **vm_stack,
void **machine_stack)
312 if (nt_free_stack_chunks) {
313 struct nt_stack_chunk_header *ch = nt_free_stack_chunks;
314 if (ch->free_stack_pos > 0) {
315 RUBY_DEBUG_LOG(
"free_stack_pos:%d", ch->free_stack_pos);
316 nt_stack_chunk_get_stack(vm, ch, ch->free_stack[--ch->free_stack_pos], vm_stack, machine_stack);
318 else if (ch->uninitialized_stack_count > 0) {
319 RUBY_DEBUG_LOG(
"uninitialized_stack_count:%d", ch->uninitialized_stack_count);
321 size_t idx = ch->stack_count - ch->uninitialized_stack_count--;
322 void *guard_page = nt_stack_chunk_get_stack(vm, ch, idx, vm_stack, machine_stack);
323 err = nt_guard_page(guard_page, MSTACK_PAGE_SIZE);
326 nt_free_stack_chunks = ch->prev_free_chunk;
327 ch->prev_free_chunk = NULL;
332 struct nt_stack_chunk_header *p = nt_alloc_thread_stack_chunk();
337 nt_free_stack_chunks = nt_stack_chunks = p;
348nt_madvise_free_or_dontneed(
void *addr,
size_t len)
361#if defined(MADV_FREE)
362 int r = madvise(addr,
len, MADV_FREE);
366#if defined(MADV_DONTNEED)
367 madvise(addr,
len, MADV_DONTNEED);
372nt_free_stack(
void *mstack)
378 struct nt_machine_stack_footer *msf = nt_stack_chunk_get_msf(GET_VM(), mstack);
379 struct nt_stack_chunk_header *ch = msf->ch;
380 int idx = (int)msf->index;
381 void *stack = nt_stack_chunk_get_stack_start(ch, idx);
383 RUBY_DEBUG_LOG(
"stack:%p mstack:%p ch:%p index:%d", stack, mstack, ch, idx);
385 if (ch->prev_free_chunk == NULL) {
386 ch->prev_free_chunk = nt_free_stack_chunks;
387 nt_free_stack_chunks = ch;
389 ch->free_stack[ch->free_stack_pos++] = idx;
392 nt_madvise_free_or_dontneed(stack, nt_thread_stack_size());
399native_thread_check_and_create_shared(
rb_vm_t *vm)
401 bool need_to_make =
false;
405 unsigned int schedulable_ractor_cnt = vm->ractor.cnt;
408 if (!vm->ractor.main_ractor->threads.sched.enable_mn_threads)
409 schedulable_ractor_cnt--;
411 unsigned int snt_cnt = vm->ractor.sched.snt_cnt;
412 if (((
int)snt_cnt < MINIMUM_SNT) ||
413 (snt_cnt < schedulable_ractor_cnt &&
414 snt_cnt < vm->ractor.sched.max_cpu)) {
416 RUBY_DEBUG_LOG(
"added snt:%u dnt:%u ractor_cnt:%u grq_cnt:%u",
417 vm->ractor.sched.snt_cnt,
418 vm->ractor.sched.dnt_cnt,
420 vm->ractor.sched.grq_cnt);
422 vm->ractor.sched.snt_cnt++;
426 RUBY_DEBUG_LOG(
"snt:%d ractor_cnt:%d", (
int)vm->ractor.sched.snt_cnt, (
int)vm->ractor.cnt);
434 return native_thread_create0(nt);
442# define co_start ruby_coroutine_start
449#ifdef RUBY_ASAN_ENABLED
450 __sanitizer_finish_switch_fiber(self->fake_stack,
451 (
const void**)&from->stack_base, &from->stack_size);
456 VM_ASSERT(th->nt != NULL);
457 VM_ASSERT(th == sched->running);
458 VM_ASSERT(sched->lock_owner == NULL);
462 thread_sched_set_locked(sched, th);
463 thread_sched_add_running_thread(TH_SCHED(th), th);
464 thread_sched_unlock(sched, th);
467 call_thread_start_func_2(th);
469 thread_sched_lock(sched, NULL);
471 RUBY_DEBUG_LOG(
"terminated th:%d", (
int)th->serial);
476 bool is_dnt = th_has_dedicated_nt(th);
477 native_thread_assign(NULL, th);
478 rb_ractor_set_current_ec(th->ractor, NULL);
483 th->sched.finished =
true;
484 coroutine_transfer0(self, nt->nt_context,
true);
489 if (next_th && !next_th->nt) {
491 thread_sched_set_unlocked(sched, NULL);
492 th->sched.finished =
true;
493 thread_sched_switch0(th->sched.context, next_th, nt,
true);
497 th->sched.finished =
true;
498 coroutine_transfer0(self, nt->nt_context,
true);
502 rb_bug(
"unreachable");
510 void *vm_stack = NULL, *machine_stack = NULL;
511 int err = nt_alloc_stack(vm, &vm_stack, &machine_stack);
514 VM_ASSERT(vm_stack < machine_stack);
517 size_t vm_stack_words = th->vm->default_params.thread_vm_stack_size/
sizeof(
VALUE);
518 rb_ec_initialize_vm_stack(th->ec, vm_stack, vm_stack_words);
521 size_t machine_stack_size = vm->default_params.thread_machine_stack_size -
sizeof(
struct nt_machine_stack_footer);
522 th->ec->machine.stack_start = (
void *)((uintptr_t)machine_stack + machine_stack_size);
523 th->ec->machine.stack_maxsize = machine_stack_size;
524 th->sched.context_stack = machine_stack;
527 coroutine_initialize(th->sched.context, co_start, machine_stack, machine_stack_size);
528 th->sched.context->argument = th;
530 RUBY_DEBUG_LOG(
"th:%u vm_stack:%p machine_stack:%p", rb_th_serial(th), vm_stack, machine_stack);
531 thread_sched_to_ready(TH_SCHED(th), th);
534 return native_thread_check_and_create_shared(th->vm);
542 rb_bug(
"unreachable");
546thread_sched_wait_events(
struct rb_thread_sched *sched,
rb_thread_t *th,
int fd,
enum thread_sched_waiting_flag events, rb_hrtime_t *rel)
548 rb_bug(
"unreachable");
554#if (HAVE_SYS_EPOLL_H || HAVE_SYS_EVENT_H) && USE_MN_THREADS
557fd_readable_nonblock(
int fd)
559 struct pollfd pfd = {
563 return poll(&pfd, 1, 0) != 0;
567fd_writable_nonblock(
int fd)
569 struct pollfd pfd = {
573 return poll(&pfd, 1, 0) != 0;
577verify_waiting_list(
void)
584 ccan_list_for_each(&timer_th.waiting, w, node) {
587 rb_hrtime_t timeout = w->data.timeout;
588 rb_hrtime_t prev_timeout = w->data.timeout;
589 VM_ASSERT(timeout == 0 || prev_timeout <= timeout);
598static enum thread_sched_waiting_flag
599kqueue_translate_filter_to_flags(int16_t filter)
603 return thread_sched_waiting_io_read;
605 return thread_sched_waiting_io_write;
607 return thread_sched_waiting_timeout;
609 rb_bug(
"kevent filter:%d not supported", filter);
618 int timeout_ms = timer_thread_set_timeout(vm);
620 if (timeout_ms >= 0) {
621 calculated_timeout.tv_sec = timeout_ms / 1000;
622 calculated_timeout.tv_nsec = (timeout_ms % 1000) * 1000000;
623 timeout = &calculated_timeout;
626 return kevent(timer_th.event_fd, NULL, 0, timer_th.finished_events, KQUEUE_EVENTS_MAX, timeout);
632 if ((timer_th.event_fd = kqueue()) == -1) rb_bug(
"kqueue creation failed (errno:%d)",
errno);
633 int flags = fcntl(timer_th.event_fd, F_GETFD);
635 rb_bug(
"kqueue GETFD failed (errno:%d)",
errno);
639 if (fcntl(timer_th.event_fd, F_SETFD, flags) == -1) {
640 rb_bug(
"kqueue SETFD failed (errno:%d)",
errno);
645kqueue_unregister_waiting(
int fd,
enum thread_sched_waiting_flag flags)
651 if (flags & thread_sched_waiting_io_read) {
652 EV_SET(&ke[num_events], fd, EVFILT_READ, EV_DELETE, 0, 0, NULL);
655 if (flags & thread_sched_waiting_io_write) {
656 EV_SET(&ke[num_events], fd, EVFILT_WRITE, EV_DELETE, 0, 0, NULL);
659 if (kevent(timer_th.event_fd, ke, num_events, NULL, 0, NULL) == -1) {
661 rb_bug(
"unregister/kevent fails. errno:%d",
errno);
667kqueue_already_registered(
int fd)
671 ccan_list_for_each(&timer_th.waiting, w, node) {
674 if (w->flags && w->data.fd == fd) {
679 return found_w != NULL;
686timer_thread_register_waiting(
rb_thread_t *th,
int fd,
enum thread_sched_waiting_flag flags, rb_hrtime_t *rel, uint32_t event_serial)
688 RUBY_DEBUG_LOG(
"th:%u fd:%d flag:%d rel:%lu", rb_th_serial(th), fd, flags, rel ? (
unsigned long)*rel : 0);
690 VM_ASSERT(th == NULL || TH_SCHED(th)->running == th);
691 VM_ASSERT(flags != 0);
697 flags |= thread_sched_waiting_timeout;
704 if (rel && *rel > 0) {
705 flags |= thread_sched_waiting_timeout;
712 uint32_t epoll_events = 0;
714 if (flags & thread_sched_waiting_timeout) {
715 VM_ASSERT(rel != NULL);
716 abs = rb_hrtime_add(rb_hrtime_now(), *rel);
719 if (flags & thread_sched_waiting_io_read) {
720 if (!(flags & thread_sched_waiting_io_force) && fd_readable_nonblock(fd)) {
721 RUBY_DEBUG_LOG(
"fd_readable_nonblock");
727 EV_SET(&ke[num_events], fd, EVFILT_READ, EV_ADD, 0, 0, (
void *)th);
730 epoll_events |= EPOLLIN;
735 if (flags & thread_sched_waiting_io_write) {
736 if (!(flags & thread_sched_waiting_io_force) && fd_writable_nonblock(fd)) {
737 RUBY_DEBUG_LOG(
"fd_writable_nonblock");
743 EV_SET(&ke[num_events], fd, EVFILT_WRITE, EV_ADD, 0, 0, (
void *)th);
746 epoll_events |= EPOLLOUT;
754 if (num_events > 0) {
755 if (kqueue_already_registered(fd)) {
760 if (kevent(timer_th.event_fd, ke, num_events, NULL, 0, NULL) == -1) {
761 RUBY_DEBUG_LOG(
"failed (%d)",
errno);
770 rb_bug(
"register/kevent failed(fd:%d, errno:%d)", fd,
errno);
773 RUBY_DEBUG_LOG(
"kevent(add, fd:%d) success", fd);
777 struct epoll_event event = {
778 .events = epoll_events,
783 if (epoll_ctl(timer_th.event_fd, EPOLL_CTL_ADD, fd, &event) == -1) {
784 RUBY_DEBUG_LOG(
"failed (%d)",
errno);
797 rb_bug(
"register/epoll_ctl failed(fd:%d, errno:%d)", fd,
errno);
800 RUBY_DEBUG_LOG(
"epoll_ctl(add, fd:%d, events:%d) success", fd, epoll_events);
805 VM_ASSERT(th->sched.waiting_reason.flags == thread_sched_waiting_none);
809 th->sched.waiting_reason.flags = flags;
810 th->sched.waiting_reason.data.timeout = abs;
811 th->sched.waiting_reason.data.fd = fd;
812 th->sched.waiting_reason.data.result = 0;
813 th->sched.waiting_reason.data.event_serial = event_serial;
817 VM_ASSERT(!(flags & thread_sched_waiting_timeout));
818 ccan_list_add_tail(&timer_th.waiting, &th->sched.waiting_reason.node);
821 RUBY_DEBUG_LOG(
"abs:%lu", (
unsigned long)abs);
822 VM_ASSERT(flags & thread_sched_waiting_timeout);
827 ccan_list_for_each(&timer_th.waiting, w, node) {
828 if ((w->flags & thread_sched_waiting_timeout) &&
829 w->data.timeout < abs) {
838 ccan_list_add_after(&timer_th.waiting, &prev_w->node, &th->sched.waiting_reason.node);
841 ccan_list_add(&timer_th.waiting, &th->sched.waiting_reason.node);
844 verify_waiting_list();
847 timer_thread_wakeup_force();
860timer_thread_unregister_waiting(
rb_thread_t *th,
int fd,
enum thread_sched_waiting_flag flags)
862 if (!(th->sched.waiting_reason.flags & (thread_sched_waiting_io_read | thread_sched_waiting_io_write))) {
866 RUBY_DEBUG_LOG(
"th:%u fd:%d", rb_th_serial(th), fd);
868 kqueue_unregister_waiting(fd, flags);
871 if (epoll_ctl(timer_th.event_fd, EPOLL_CTL_DEL, fd, NULL) == -1) {
878 rb_bug(
"unregister/epoll_ctl fails. errno:%d",
errno);
885timer_thread_setup_mn(
void)
889 RUBY_DEBUG_LOG(
"kqueue_fd:%d", timer_th.event_fd);
891 if ((timer_th.event_fd = epoll_create1(EPOLL_CLOEXEC)) == -1) rb_bug(
"epoll_create (errno:%d)",
errno);
892 RUBY_DEBUG_LOG(
"epoll_fd:%d", timer_th.event_fd);
894 RUBY_DEBUG_LOG(
"comm_fds:%d/%d", timer_th.comm_fds[0], timer_th.comm_fds[1]);
896 timer_thread_register_waiting(NULL, timer_th.comm_fds[0], thread_sched_waiting_io_read | thread_sched_waiting_io_force, NULL, 0);
903 int r = kqueue_wait(vm);
905 int r = epoll_wait(timer_th.event_fd, timer_th.finished_events, EPOLL_EVENTS_MAX, timer_thread_set_timeout(vm));
925timer_thread_polling(
rb_vm_t *vm)
927 int r = event_wait(vm);
929 RUBY_DEBUG_LOG(
"r:%d errno:%d", r,
errno);
933 RUBY_DEBUG_LOG(
"timeout%s",
"");
935 ractor_sched_lock(vm, NULL);
938 timer_thread_check_timeslice(vm);
941 if (vm->ractor.sched.grq_cnt > 0) {
942 RUBY_DEBUG_LOG(
"GRQ cnt: %u", vm->ractor.sched.grq_cnt);
946 ractor_sched_unlock(vm, NULL);
949 native_thread_check_and_create_shared(vm);
959 perror(
"event_wait");
960 rb_bug(
"event_wait errno:%d",
errno);
965 RUBY_DEBUG_LOG(
"%d event(s)", r);
968 for (
int i=0; i<r; i++) {
970 int fd = (int)timer_th.finished_events[i].ident;
971 int16_t filter = timer_th.finished_events[i].filter;
975 RUBY_DEBUG_LOG(
"comm from fd:%d", timer_th.comm_fds[1]);
976 consume_communication_pipe(timer_th.comm_fds[0]);
980 RUBY_DEBUG_LOG(
"io event. wakeup_th:%u event:%s%s",
982 (filter == EVFILT_READ) ?
"read/" :
"",
983 (filter == EVFILT_WRITE) ?
"write/" :
"");
986 thread_sched_lock(sched, th);
989 if (th->sched.waiting_reason.flags) {
991 ccan_list_del_init(&th->sched.waiting_reason.node);
992 timer_thread_unregister_waiting(th, fd, kqueue_translate_filter_to_flags(filter));
994 th->sched.waiting_reason.flags = thread_sched_waiting_none;
995 th->sched.waiting_reason.data.fd = -1;
996 th->sched.waiting_reason.data.result = filter;
997 uint32_t event_serial = th->sched.waiting_reason.data.event_serial;
999 timer_thread_wakeup_thread_locked(sched, th, event_serial);
1006 thread_sched_unlock(sched, th);
1010 for (
int i=0; i<r; i++) {
1015 RUBY_DEBUG_LOG(
"comm from fd:%d", timer_th.comm_fds[1]);
1016 consume_communication_pipe(timer_th.comm_fds[0]);
1020 uint32_t events = timer_th.finished_events[i].events;
1022 RUBY_DEBUG_LOG(
"io event. wakeup_th:%u event:%s%s%s%s%s%s",
1024 (events & EPOLLIN) ?
"in/" :
"",
1025 (events & EPOLLOUT) ?
"out/" :
"",
1026 (events & EPOLLRDHUP) ?
"RDHUP/" :
"",
1027 (events & EPOLLPRI) ?
"pri/" :
"",
1028 (events & EPOLLERR) ?
"err/" :
"",
1029 (events & EPOLLHUP) ?
"hup/" :
"");
1032 thread_sched_lock(sched, th);
1035 if (th->sched.waiting_reason.flags) {
1037 ccan_list_del_init(&th->sched.waiting_reason.node);
1038 timer_thread_unregister_waiting(th, th->sched.waiting_reason.data.fd, th->sched.waiting_reason.flags);
1040 th->sched.waiting_reason.flags = thread_sched_waiting_none;
1041 th->sched.waiting_reason.data.fd = -1;
1042 th->sched.waiting_reason.data.result = (int)events;
1043 uint32_t event_serial = th->sched.waiting_reason.data.event_serial;
1045 timer_thread_wakeup_thread_locked(sched, th, event_serial);
1052 thread_sched_unlock(sched, th);
1062timer_thread_setup_mn(
void)
1068timer_thread_polling(
rb_vm_t *vm)
1070 int timeout = timer_thread_set_timeout(vm);
1072 struct pollfd pfd = {
1073 .fd = timer_th.comm_fds[0],
1077 int r = poll(&pfd, 1, timeout);
1084 timer_thread_check_timeslice(vm);
1096 rb_bug(
"poll errno:%d",
errno);
1101 consume_communication_pipe(timer_th.comm_fds[0]);
1105 rb_bug(
"unreachbale");
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
int len
Length of the buffer.
#define RUBY_INTERNAL_THREAD_EVENT_RESUMED
Triggered when a thread successfully acquired the GVL.
#define RUBY_INTERNAL_THREAD_EVENT_SUSPENDED
Triggered when a thread released the GVL.
#define RBIMPL_ATTR_MAYBE_UNUSED()
Wraps (or simulates) [[maybe_unused]]
#define errno
Ractor-aware version of errno.
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
void rb_native_cond_signal(rb_nativethread_cond_t *cond)
Signals a condition variable.
uintptr_t VALUE
Type that represents a Ruby object.