5static void timer_thread_unregister_waiting(
rb_thread_t *th,
int fd,
enum thread_sched_waiting_flag flags);
10 bool canceled =
false;
12 if (th->sched.waiting_reason.flags) {
15 if (th->sched.waiting_reason.flags) {
17 ccan_list_del_init(&th->sched.waiting_reason.node);
18 if (th->sched.waiting_reason.flags & (thread_sched_waiting_io_read | thread_sched_waiting_io_write)) {
19 timer_thread_unregister_waiting(th, th->sched.waiting_reason.data.fd, th->sched.waiting_reason.flags);
21 th->sched.waiting_reason.flags = thread_sched_waiting_none;
31ubf_event_waiting(
void *ptr)
36 RUBY_DEBUG_LOG(
"th:%u", rb_th_serial(th));
38 VM_ASSERT(th->nt == NULL || !th_has_dedicated_nt(th));
41 th->unblock.func = NULL;
42 th->unblock.arg = NULL;
44 bool canceled = timer_thread_cancel_waiting(th);
46 thread_sched_lock(sched, th);
48 if (sched->running == th) {
49 RUBY_DEBUG_LOG(
"not waiting yet");
52 thread_sched_to_ready_common(sched, th,
true,
false);
55 RUBY_DEBUG_LOG(
"already not waiting");
58 thread_sched_unlock(sched, th);
61static bool timer_thread_register_waiting(
rb_thread_t *th,
int fd,
enum thread_sched_waiting_flag flags, rb_hrtime_t *rel);
65thread_sched_wait_events(
struct rb_thread_sched *sched,
rb_thread_t *th,
int fd,
enum thread_sched_waiting_flag events, rb_hrtime_t *rel)
67 VM_ASSERT(!th_has_dedicated_nt(th));
69 volatile bool timedout =
false, need_cancel =
false;
71 if (timer_thread_register_waiting(th, fd, events, rel)) {
72 RUBY_DEBUG_LOG(
"wait fd:%d", fd);
74 RB_VM_SAVE_MACHINE_CONTEXT(th);
75 setup_ubf(th, ubf_event_waiting, (
void *)th);
79 thread_sched_lock(sched, th);
81 if (th->sched.waiting_reason.flags == thread_sched_waiting_none) {
84 else if (RUBY_VM_INTERRUPTED(th->ec)) {
88 RUBY_DEBUG_LOG(
"sleep");
90 th->status = THREAD_STOPPED_FOREVER;
91 thread_sched_wakeup_next_thread(sched, th,
true);
92 thread_sched_wait_running_turn(sched, th,
true);
94 RUBY_DEBUG_LOG(
"wakeup");
97 timedout = th->sched.waiting_reason.data.result == 0;
99 thread_sched_unlock(sched, th);
102 timer_thread_cancel_waiting(th);
105 setup_ubf(th, NULL, NULL);
107 th->status = THREAD_RUNNABLE;
110 RUBY_DEBUG_LOG(
"can not wait fd:%d", fd);
114 VM_ASSERT(sched->running == th);
122get_sysconf_page_size(
void)
124 static long page_size = 0;
126 if (UNLIKELY(page_size == 0)) {
127 page_size = sysconf(_SC_PAGESIZE);
128 VM_ASSERT(page_size < INT_MAX);
130 return (
int)page_size;
133#define MSTACK_CHUNK_SIZE (512 * 1024 * 1024)
134#define MSTACK_PAGE_SIZE get_sysconf_page_size()
135#define MSTACK_CHUNK_PAGE_NUM (MSTACK_CHUNK_SIZE / MSTACK_PAGE_SIZE - 1)
148static struct nt_stack_chunk_header {
149 struct nt_stack_chunk_header *prev_chunk;
150 struct nt_stack_chunk_header *prev_free_chunk;
153 uint16_t stack_count;
154 uint16_t uninitialized_stack_count;
156 uint16_t free_stack_pos;
157 uint16_t free_stack[];
158} *nt_stack_chunks = NULL,
159 *nt_free_stack_chunks = NULL;
161struct nt_machine_stack_footer {
162 struct nt_stack_chunk_header *ch;
166static rb_nativethread_lock_t nt_machine_stack_lock = RB_NATIVETHREAD_LOCK_INIT;
172nt_thread_stack_size(
void)
175 if (LIKELY(msz > 0))
return msz;
178 int sz = (int)(vm->default_params.thread_vm_stack_size + vm->default_params.thread_machine_stack_size + MSTACK_PAGE_SIZE);
179 int page_num = roomof(sz, MSTACK_PAGE_SIZE);
180 msz = (size_t)page_num * MSTACK_PAGE_SIZE;
184static struct nt_stack_chunk_header *
185nt_alloc_thread_stack_chunk(
void)
187 int mmap_flags = MAP_ANONYMOUS | MAP_PRIVATE;
188#if defined(MAP_STACK) && !defined(__FreeBSD__) && !defined(__FreeBSD_kernel__)
189 mmap_flags |= MAP_STACK;
192 const char *m = (
void *)mmap(NULL, MSTACK_CHUNK_SIZE, PROT_READ | PROT_WRITE, mmap_flags, -1, 0);
193 if (m == MAP_FAILED) {
197 ruby_annotate_mmap(m, MSTACK_CHUNK_SIZE,
"Ruby:nt_alloc_thread_stack_chunk");
199 size_t msz = nt_thread_stack_size();
200 int header_page_cnt = 1;
201 int stack_count = ((MSTACK_CHUNK_PAGE_NUM - header_page_cnt) * MSTACK_PAGE_SIZE) / msz;
202 int ch_size =
sizeof(
struct nt_stack_chunk_header) + sizeof(uint16_t) * stack_count;
204 if (ch_size > MSTACK_PAGE_SIZE * header_page_cnt) {
205 header_page_cnt = (ch_size + MSTACK_PAGE_SIZE - 1) / MSTACK_PAGE_SIZE;
206 stack_count = ((MSTACK_CHUNK_PAGE_NUM - header_page_cnt) * MSTACK_PAGE_SIZE) / msz;
209 VM_ASSERT(stack_count <= UINT16_MAX);
211 struct nt_stack_chunk_header *ch = (
struct nt_stack_chunk_header *)m;
213 ch->start_page = header_page_cnt;
214 ch->prev_chunk = nt_stack_chunks;
215 ch->prev_free_chunk = nt_free_stack_chunks;
216 ch->uninitialized_stack_count = ch->stack_count = (uint16_t)stack_count;
217 ch->free_stack_pos = 0;
219 RUBY_DEBUG_LOG(
"ch:%p start_page:%d stack_cnt:%d stack_size:%d", ch, (
int)ch->start_page, (
int)ch->stack_count, (
int)msz);
225nt_stack_chunk_get_stack_start(
struct nt_stack_chunk_header *ch,
size_t idx)
227 const char *m = (
char *)ch;
228 return (
void *)(m + ch->start_page * MSTACK_PAGE_SIZE + idx * nt_thread_stack_size());
231static struct nt_machine_stack_footer *
232nt_stack_chunk_get_msf(
const rb_vm_t *vm,
const char *mstack)
235 const size_t msz = vm->default_params.thread_machine_stack_size;
236 return (
struct nt_machine_stack_footer *)&mstack[msz -
sizeof(
struct nt_machine_stack_footer)];
240nt_stack_chunk_get_stack(
const rb_vm_t *vm,
struct nt_stack_chunk_header *ch,
size_t idx,
void **vm_stack,
void **machine_stack)
245 const char *vstack, *mstack;
246 const char *guard_page;
247 vstack = nt_stack_chunk_get_stack_start(ch, idx);
248 guard_page = vstack + vm->default_params.thread_vm_stack_size;
249 mstack = guard_page + MSTACK_PAGE_SIZE;
251 struct nt_machine_stack_footer *msf = nt_stack_chunk_get_msf(vm, mstack);
256 RUBY_DEBUG_LOG(
"msf:%p vstack:%p-%p guard_page:%p-%p mstack:%p-%p", msf,
257 vstack, (
void *)(guard_page-1),
258 guard_page, (
void *)(mstack-1),
259 mstack, (
void *)(msf));
262 *vm_stack = (
void *)vstack;
263 *machine_stack = (
void *)mstack;
265 return (
void *)guard_page;
270nt_stack_chunk_dump(
void)
272 struct nt_stack_chunk_header *ch;
275 fprintf(stderr,
"** nt_stack_chunks\n");
276 ch = nt_stack_chunks;
277 for (i=0; ch; i++, ch = ch->prev_chunk) {
278 fprintf(stderr,
"%d %p free_pos:%d\n", i, (
void *)ch, (
int)ch->free_stack_pos);
281 fprintf(stderr,
"** nt_free_stack_chunks\n");
282 ch = nt_free_stack_chunks;
283 for (i=0; ch; i++, ch = ch->prev_free_chunk) {
284 fprintf(stderr,
"%d %p free_pos:%d\n", i, (
void *)ch, (
int)ch->free_stack_pos);
289nt_guard_page(
const char *p,
size_t len)
291 if (mprotect((
void *)p,
len, PROT_NONE) != -1) {
300nt_alloc_stack(
rb_vm_t *vm,
void **vm_stack,
void **machine_stack)
307 if (nt_free_stack_chunks) {
308 struct nt_stack_chunk_header *ch = nt_free_stack_chunks;
309 if (ch->free_stack_pos > 0) {
310 RUBY_DEBUG_LOG(
"free_stack_pos:%d", ch->free_stack_pos);
311 nt_stack_chunk_get_stack(vm, ch, ch->free_stack[--ch->free_stack_pos], vm_stack, machine_stack);
313 else if (ch->uninitialized_stack_count > 0) {
314 RUBY_DEBUG_LOG(
"uninitialized_stack_count:%d", ch->uninitialized_stack_count);
316 size_t idx = ch->stack_count - ch->uninitialized_stack_count--;
317 void *guard_page = nt_stack_chunk_get_stack(vm, ch, idx, vm_stack, machine_stack);
318 err = nt_guard_page(guard_page, MSTACK_PAGE_SIZE);
321 nt_free_stack_chunks = ch->prev_free_chunk;
322 ch->prev_free_chunk = NULL;
327 struct nt_stack_chunk_header *p = nt_alloc_thread_stack_chunk();
332 nt_free_stack_chunks = nt_stack_chunks = p;
343nt_madvise_free_or_dontneed(
void *addr,
size_t len)
356#if defined(MADV_FREE)
357 int r = madvise(addr,
len, MADV_FREE);
361#if defined(MADV_DONTNEED)
362 madvise(addr,
len, MADV_DONTNEED);
367nt_free_stack(
void *mstack)
373 struct nt_machine_stack_footer *msf = nt_stack_chunk_get_msf(GET_VM(), mstack);
374 struct nt_stack_chunk_header *ch = msf->ch;
375 int idx = (int)msf->index;
376 void *stack = nt_stack_chunk_get_stack_start(ch, idx);
378 RUBY_DEBUG_LOG(
"stack:%p mstack:%p ch:%p index:%d", stack, mstack, ch, idx);
380 if (ch->prev_free_chunk == NULL) {
381 ch->prev_free_chunk = nt_free_stack_chunks;
382 nt_free_stack_chunks = ch;
384 ch->free_stack[ch->free_stack_pos++] = idx;
387 nt_madvise_free_or_dontneed(stack, nt_thread_stack_size());
394native_thread_check_and_create_shared(
rb_vm_t *vm)
396 bool need_to_make =
false;
400 unsigned int snt_cnt = vm->ractor.sched.snt_cnt;
401 if (!vm->ractor.main_ractor->threads.sched.enable_mn_threads) snt_cnt++;
403 if (((
int)snt_cnt < MINIMUM_SNT) ||
404 (snt_cnt < vm->ractor.cnt &&
405 snt_cnt < vm->ractor.sched.max_cpu)) {
407 RUBY_DEBUG_LOG(
"added snt:%u dnt:%u ractor_cnt:%u grq_cnt:%u",
408 vm->ractor.sched.snt_cnt,
409 vm->ractor.sched.dnt_cnt,
411 vm->ractor.sched.grq_cnt);
413 vm->ractor.sched.snt_cnt++;
417 RUBY_DEBUG_LOG(
"snt:%d ractor_cnt:%d", (
int)vm->ractor.sched.snt_cnt, (
int)vm->ractor.cnt);
425 return native_thread_create0(nt);
435#ifdef RUBY_ASAN_ENABLED
436 __sanitizer_finish_switch_fiber(self->fake_stack,
437 (
const void**)&from->stack_base, &from->stack_size);
442 VM_ASSERT(th->nt != NULL);
443 VM_ASSERT(th == sched->running);
444 VM_ASSERT(sched->lock_owner == NULL);
448 thread_sched_set_lock_owner(sched, th);
449 thread_sched_add_running_thread(TH_SCHED(th), th);
450 thread_sched_unlock(sched, th);
453 call_thread_start_func_2(th);
455 thread_sched_lock(sched, NULL);
457 RUBY_DEBUG_LOG(
"terminated th:%d", (
int)th->serial);
462 bool is_dnt = th_has_dedicated_nt(th);
463 native_thread_assign(NULL, th);
464 rb_ractor_set_current_ec(th->ractor, NULL);
469 th->sched.finished =
true;
470 coroutine_transfer0(self, nt->nt_context,
true);
474 bool has_ready_ractor = vm->ractor.sched.grq_cnt > 0;
477 if (!has_ready_ractor && next_th && !next_th->nt) {
479 thread_sched_set_lock_owner(sched, NULL);
480 th->sched.finished =
true;
481 thread_sched_switch0(th->sched.context, next_th, nt,
true);
485 th->sched.finished =
true;
486 coroutine_transfer0(self, nt->nt_context,
true);
490 rb_bug(
"unreachable");
498 void *vm_stack = NULL, *machine_stack = NULL;
499 int err = nt_alloc_stack(vm, &vm_stack, &machine_stack);
502 VM_ASSERT(vm_stack < machine_stack);
505 size_t vm_stack_words = th->vm->default_params.thread_vm_stack_size/
sizeof(
VALUE);
506 rb_ec_initialize_vm_stack(th->ec, vm_stack, vm_stack_words);
509 size_t machine_stack_size = vm->default_params.thread_machine_stack_size -
sizeof(
struct nt_machine_stack_footer);
510 th->ec->machine.stack_start = (
void *)((uintptr_t)machine_stack + machine_stack_size);
511 th->ec->machine.stack_maxsize = machine_stack_size;
512 th->sched.context_stack = machine_stack;
515 coroutine_initialize(th->sched.context, co_start, machine_stack, machine_stack_size);
516 th->sched.context->argument = th;
518 RUBY_DEBUG_LOG(
"th:%u vm_stack:%p machine_stack:%p", rb_th_serial(th), vm_stack, machine_stack);
519 thread_sched_to_ready(TH_SCHED(th), th);
522 return native_thread_check_and_create_shared(th->vm);
530 rb_bug(
"unreachable");
534thread_sched_wait_events(
struct rb_thread_sched *sched,
rb_thread_t *th,
int fd,
enum thread_sched_waiting_flag events, rb_hrtime_t *rel)
536 rb_bug(
"unreachable");
542#if (HAVE_SYS_EPOLL_H || HAVE_SYS_EVENT_H) && USE_MN_THREADS
545fd_readable_nonblock(
int fd)
547 struct pollfd pfd = {
551 return poll(&pfd, 1, 0) != 0;
555fd_writable_nonblock(
int fd)
557 struct pollfd pfd = {
561 return poll(&pfd, 1, 0) != 0;
565verify_waiting_list(
void)
572 ccan_list_for_each(&timer_th.waiting, w, node) {
575 rb_hrtime_t timeout = w->data.timeout;
576 rb_hrtime_t prev_timeout = w->data.timeout;
577 VM_ASSERT(timeout == 0 || prev_timeout <= timeout);
586static enum thread_sched_waiting_flag
587kqueue_translate_filter_to_flags(int16_t filter)
591 return thread_sched_waiting_io_read;
593 return thread_sched_waiting_io_write;
595 return thread_sched_waiting_timeout;
597 rb_bug(
"kevent filter:%d not supported", filter);
606 int timeout_ms = timer_thread_set_timeout(vm);
608 if (timeout_ms >= 0) {
609 calculated_timeout.tv_sec = timeout_ms / 1000;
610 calculated_timeout.tv_nsec = (timeout_ms % 1000) * 1000000;
611 timeout = &calculated_timeout;
614 return kevent(timer_th.event_fd, NULL, 0, timer_th.finished_events, KQUEUE_EVENTS_MAX, timeout);
620 if ((timer_th.event_fd = kqueue()) == -1) rb_bug(
"kqueue creation failed (errno:%d)",
errno);
621 int flags = fcntl(timer_th.event_fd, F_GETFD);
623 rb_bug(
"kqueue GETFD failed (errno:%d)",
errno);
627 if (fcntl(timer_th.event_fd, F_SETFD, flags) == -1) {
628 rb_bug(
"kqueue SETFD failed (errno:%d)",
errno);
633kqueue_unregister_waiting(
int fd,
enum thread_sched_waiting_flag flags)
639 if (flags & thread_sched_waiting_io_read) {
640 EV_SET(&ke[num_events], fd, EVFILT_READ, EV_DELETE, 0, 0, NULL);
643 if (flags & thread_sched_waiting_io_write) {
644 EV_SET(&ke[num_events], fd, EVFILT_WRITE, EV_DELETE, 0, 0, NULL);
647 if (kevent(timer_th.event_fd, ke, num_events, NULL, 0, NULL) == -1) {
649 rb_bug(
"unregister/kevent fails. errno:%d",
errno);
655kqueue_already_registered(
int fd)
659 ccan_list_for_each(&timer_th.waiting, w, node) {
662 if (w->flags && w->data.fd == fd) {
667 return found_w != NULL;
674timer_thread_register_waiting(
rb_thread_t *th,
int fd,
enum thread_sched_waiting_flag flags, rb_hrtime_t *rel)
676 RUBY_DEBUG_LOG(
"th:%u fd:%d flag:%d rel:%lu", rb_th_serial(th), fd, flags, rel ? (
unsigned long)*rel : 0);
678 VM_ASSERT(th == NULL || TH_SCHED(th)->running == th);
679 VM_ASSERT(flags != 0);
685 flags |= thread_sched_waiting_timeout;
692 if (rel && *rel > 0) {
693 flags |= thread_sched_waiting_timeout;
700 uint32_t epoll_events = 0;
702 if (flags & thread_sched_waiting_timeout) {
703 VM_ASSERT(rel != NULL);
704 abs = rb_hrtime_add(rb_hrtime_now(), *rel);
707 if (flags & thread_sched_waiting_io_read) {
708 if (!(flags & thread_sched_waiting_io_force) && fd_readable_nonblock(fd)) {
709 RUBY_DEBUG_LOG(
"fd_readable_nonblock");
715 EV_SET(&ke[num_events], fd, EVFILT_READ, EV_ADD, 0, 0, (
void *)th);
718 epoll_events |= EPOLLIN;
723 if (flags & thread_sched_waiting_io_write) {
724 if (!(flags & thread_sched_waiting_io_force) && fd_writable_nonblock(fd)) {
725 RUBY_DEBUG_LOG(
"fd_writable_nonblock");
731 EV_SET(&ke[num_events], fd, EVFILT_WRITE, EV_ADD, 0, 0, (
void *)th);
734 epoll_events |= EPOLLOUT;
742 if (num_events > 0) {
743 if (kqueue_already_registered(fd)) {
748 if (kevent(timer_th.event_fd, ke, num_events, NULL, 0, NULL) == -1) {
749 RUBY_DEBUG_LOG(
"failed (%d)",
errno);
758 rb_bug(
"register/kevent failed(fd:%d, errno:%d)", fd,
errno);
761 RUBY_DEBUG_LOG(
"kevent(add, fd:%d) success", fd);
765 struct epoll_event event = {
766 .events = epoll_events,
771 if (epoll_ctl(timer_th.event_fd, EPOLL_CTL_ADD, fd, &event) == -1) {
772 RUBY_DEBUG_LOG(
"failed (%d)",
errno);
785 rb_bug(
"register/epoll_ctl failed(fd:%d, errno:%d)", fd,
errno);
788 RUBY_DEBUG_LOG(
"epoll_ctl(add, fd:%d, events:%d) success", fd, epoll_events);
793 VM_ASSERT(th->sched.waiting_reason.flags == thread_sched_waiting_none);
797 th->sched.waiting_reason.flags = flags;
798 th->sched.waiting_reason.data.timeout = abs;
799 th->sched.waiting_reason.data.fd = fd;
800 th->sched.waiting_reason.data.result = 0;
804 VM_ASSERT(!(flags & thread_sched_waiting_timeout));
805 ccan_list_add_tail(&timer_th.waiting, &th->sched.waiting_reason.node);
808 RUBY_DEBUG_LOG(
"abs:%lu", (
unsigned long)abs);
809 VM_ASSERT(flags & thread_sched_waiting_timeout);
814 ccan_list_for_each(&timer_th.waiting, w, node) {
815 if ((w->flags & thread_sched_waiting_timeout) &&
816 w->data.timeout < abs) {
825 ccan_list_add_after(&timer_th.waiting, &prev_w->node, &th->sched.waiting_reason.node);
828 ccan_list_add(&timer_th.waiting, &th->sched.waiting_reason.node);
831 verify_waiting_list();
834 timer_thread_wakeup();
847timer_thread_unregister_waiting(
rb_thread_t *th,
int fd,
enum thread_sched_waiting_flag flags)
849 RUBY_DEBUG_LOG(
"th:%u fd:%d", rb_th_serial(th), fd);
851 kqueue_unregister_waiting(fd, flags);
854 if (epoll_ctl(timer_th.event_fd, EPOLL_CTL_DEL, fd, NULL) == -1) {
861 rb_bug(
"unregister/epoll_ctl fails. errno:%d",
errno);
868timer_thread_setup_mn(
void)
872 RUBY_DEBUG_LOG(
"kqueue_fd:%d", timer_th.event_fd);
874 if ((timer_th.event_fd = epoll_create1(EPOLL_CLOEXEC)) == -1) rb_bug(
"epoll_create (errno:%d)",
errno);
875 RUBY_DEBUG_LOG(
"epoll_fd:%d", timer_th.event_fd);
877 RUBY_DEBUG_LOG(
"comm_fds:%d/%d", timer_th.comm_fds[0], timer_th.comm_fds[1]);
879 timer_thread_register_waiting(NULL, timer_th.comm_fds[0], thread_sched_waiting_io_read | thread_sched_waiting_io_force, NULL);
886 int r = kqueue_wait(vm);
888 int r = epoll_wait(timer_th.event_fd, timer_th.finished_events, EPOLL_EVENTS_MAX, timer_thread_set_timeout(vm));
908timer_thread_polling(
rb_vm_t *vm)
910 int r = event_wait(vm);
912 RUBY_DEBUG_LOG(
"r:%d errno:%d", r,
errno);
916 RUBY_DEBUG_LOG(
"timeout%s",
"");
918 ractor_sched_lock(vm, NULL);
921 timer_thread_check_timeslice(vm);
924 if (vm->ractor.sched.grq_cnt > 0) {
925 RUBY_DEBUG_LOG(
"GRQ cnt: %u", vm->ractor.sched.grq_cnt);
929 ractor_sched_unlock(vm, NULL);
932 native_thread_check_and_create_shared(vm);
942 perror(
"event_wait");
943 rb_bug(
"event_wait errno:%d",
errno);
948 RUBY_DEBUG_LOG(
"%d event(s)", r);
951 for (
int i=0; i<r; i++) {
953 int fd = (int)timer_th.finished_events[i].ident;
954 int16_t filter = timer_th.finished_events[i].filter;
958 RUBY_DEBUG_LOG(
"comm from fd:%d", timer_th.comm_fds[1]);
959 consume_communication_pipe(timer_th.comm_fds[0]);
963 RUBY_DEBUG_LOG(
"io event. wakeup_th:%u event:%s%s",
965 (filter == EVFILT_READ) ?
"read/" :
"",
966 (filter == EVFILT_WRITE) ?
"write/" :
"");
970 if (th->sched.waiting_reason.flags) {
972 ccan_list_del_init(&th->sched.waiting_reason.node);
973 timer_thread_unregister_waiting(th, fd, kqueue_translate_filter_to_flags(filter));
975 th->sched.waiting_reason.flags = thread_sched_waiting_none;
976 th->sched.waiting_reason.data.fd = -1;
977 th->sched.waiting_reason.data.result = filter;
979 timer_thread_wakeup_thread(th);
989 for (
int i=0; i<r; i++) {
994 RUBY_DEBUG_LOG(
"comm from fd:%d", timer_th.comm_fds[1]);
995 consume_communication_pipe(timer_th.comm_fds[0]);
999 uint32_t events = timer_th.finished_events[i].events;
1001 RUBY_DEBUG_LOG(
"io event. wakeup_th:%u event:%s%s%s%s%s%s",
1003 (events & EPOLLIN) ?
"in/" :
"",
1004 (events & EPOLLOUT) ?
"out/" :
"",
1005 (events & EPOLLRDHUP) ?
"RDHUP/" :
"",
1006 (events & EPOLLPRI) ?
"pri/" :
"",
1007 (events & EPOLLERR) ?
"err/" :
"",
1008 (events & EPOLLHUP) ?
"hup/" :
"");
1012 if (th->sched.waiting_reason.flags) {
1014 ccan_list_del_init(&th->sched.waiting_reason.node);
1015 timer_thread_unregister_waiting(th, th->sched.waiting_reason.data.fd, th->sched.waiting_reason.flags);
1017 th->sched.waiting_reason.flags = thread_sched_waiting_none;
1018 th->sched.waiting_reason.data.fd = -1;
1019 th->sched.waiting_reason.data.result = (int)events;
1021 timer_thread_wakeup_thread(th);
1037timer_thread_setup_mn(
void)
1043timer_thread_polling(
rb_vm_t *vm)
1045 int timeout = timer_thread_set_timeout(vm);
1047 struct pollfd pfd = {
1048 .fd = timer_th.comm_fds[0],
1052 int r = poll(&pfd, 1, timeout);
1059 timer_thread_check_timeslice(vm);
1071 rb_bug(
"poll errno:%d",
errno);
1076 consume_communication_pipe(timer_th.comm_fds[0]);
1080 rb_bug(
"unreachbale");
int len
Length of the buffer.
#define RUBY_INTERNAL_THREAD_EVENT_RESUMED
Triggered when a thread successfully acquired the GVL.
#define RUBY_INTERNAL_THREAD_EVENT_SUSPENDED
Triggered when a thread released the GVL.
#define RBIMPL_ATTR_MAYBE_UNUSED()
Wraps (or simulates) [[maybe_unused]]
#define errno
Ractor-aware version of errno.
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
void rb_native_cond_signal(rb_nativethread_cond_t *cond)
Signals a condition variable.
uintptr_t VALUE
Type that represents a Ruby object.