5static void timer_thread_unregister_waiting(
rb_thread_t *th,
int fd,
enum thread_sched_waiting_flag flags);
10 bool canceled =
false;
12 if (th->sched.waiting_reason.flags) {
15 if (th->sched.waiting_reason.flags) {
17 ccan_list_del_init(&th->sched.waiting_reason.node);
18 if (th->sched.waiting_reason.flags & (thread_sched_waiting_io_read | thread_sched_waiting_io_write)) {
19 timer_thread_unregister_waiting(th, th->sched.waiting_reason.data.fd, th->sched.waiting_reason.flags);
21 th->sched.waiting_reason.flags = thread_sched_waiting_none;
31ubf_event_waiting(
void *ptr)
36 RUBY_DEBUG_LOG(
"th:%u", rb_th_serial(th));
38 VM_ASSERT(th->nt == NULL || !th_has_dedicated_nt(th));
41 th->unblock.func = NULL;
42 th->unblock.arg = NULL;
44 bool canceled = timer_thread_cancel_waiting(th);
46 thread_sched_lock(sched, th);
48 if (sched->running == th) {
49 RUBY_DEBUG_LOG(
"not waiting yet");
52 thread_sched_to_ready_common(sched, th,
true,
false);
55 RUBY_DEBUG_LOG(
"already not waiting");
58 thread_sched_unlock(sched, th);
61static bool timer_thread_register_waiting(
rb_thread_t *th,
int fd,
enum thread_sched_waiting_flag flags, rb_hrtime_t *rel);
65thread_sched_wait_events(
struct rb_thread_sched *sched,
rb_thread_t *th,
int fd,
enum thread_sched_waiting_flag events, rb_hrtime_t *rel)
67 VM_ASSERT(!th_has_dedicated_nt(th));
69 volatile bool timedout =
false, need_cancel =
false;
71 if (timer_thread_register_waiting(th, fd, events, rel)) {
72 RUBY_DEBUG_LOG(
"wait fd:%d", fd);
74 RB_VM_SAVE_MACHINE_CONTEXT(th);
75 setup_ubf(th, ubf_event_waiting, (
void *)th);
79 thread_sched_lock(sched, th);
81 if (th->sched.waiting_reason.flags == thread_sched_waiting_none) {
84 else if (RUBY_VM_INTERRUPTED(th->ec)) {
88 RUBY_DEBUG_LOG(
"sleep");
90 th->status = THREAD_STOPPED_FOREVER;
91 thread_sched_wakeup_next_thread(sched, th,
true);
92 thread_sched_wait_running_turn(sched, th,
true);
94 RUBY_DEBUG_LOG(
"wakeup");
97 timedout = th->sched.waiting_reason.data.result == 0;
99 thread_sched_unlock(sched, th);
102 timer_thread_cancel_waiting(th);
105 setup_ubf(th, NULL, NULL);
107 th->status = THREAD_RUNNABLE;
110 RUBY_DEBUG_LOG(
"can not wait fd:%d", fd);
114 VM_ASSERT(sched->running == th);
122get_sysconf_page_size(
void)
124 static long page_size = 0;
126 if (UNLIKELY(page_size == 0)) {
127 page_size = sysconf(_SC_PAGESIZE);
128 VM_ASSERT(page_size < INT_MAX);
130 return (
int)page_size;
133#define MSTACK_CHUNK_SIZE (512 * 1024 * 1024)
134#define MSTACK_PAGE_SIZE get_sysconf_page_size()
135#define MSTACK_CHUNK_PAGE_NUM (MSTACK_CHUNK_SIZE / MSTACK_PAGE_SIZE - 1)
148static struct nt_stack_chunk_header {
149 struct nt_stack_chunk_header *prev_chunk;
150 struct nt_stack_chunk_header *prev_free_chunk;
153 uint16_t stack_count;
154 uint16_t uninitialized_stack_count;
156 uint16_t free_stack_pos;
157 uint16_t free_stack[];
158} *nt_stack_chunks = NULL,
159 *nt_free_stack_chunks = NULL;
161struct nt_machine_stack_footer {
162 struct nt_stack_chunk_header *ch;
166static rb_nativethread_lock_t nt_machine_stack_lock = RB_NATIVETHREAD_LOCK_INIT;
172nt_thread_stack_size(
void)
175 if (LIKELY(msz > 0))
return msz;
178 int sz = (int)(vm->default_params.thread_vm_stack_size + vm->default_params.thread_machine_stack_size + MSTACK_PAGE_SIZE);
179 int page_num = roomof(sz, MSTACK_PAGE_SIZE);
180 msz = (size_t)page_num * MSTACK_PAGE_SIZE;
184static struct nt_stack_chunk_header *
185nt_alloc_thread_stack_chunk(
void)
187 int mmap_flags = MAP_ANONYMOUS | MAP_PRIVATE;
188#if defined(MAP_STACK) && !defined(__FreeBSD__) && !defined(__FreeBSD_kernel__)
189 mmap_flags |= MAP_STACK;
192 const char *m = (
void *)mmap(NULL, MSTACK_CHUNK_SIZE, PROT_READ | PROT_WRITE, mmap_flags, -1, 0);
193 if (m == MAP_FAILED) {
197 ruby_annotate_mmap(m, MSTACK_CHUNK_SIZE,
"Ruby:nt_alloc_thread_stack_chunk");
199 size_t msz = nt_thread_stack_size();
200 int header_page_cnt = 1;
201 int stack_count = ((MSTACK_CHUNK_PAGE_NUM - header_page_cnt) * MSTACK_PAGE_SIZE) / msz;
202 int ch_size =
sizeof(
struct nt_stack_chunk_header) + sizeof(uint16_t) * stack_count;
204 if (ch_size > MSTACK_PAGE_SIZE * header_page_cnt) {
205 header_page_cnt = (ch_size + MSTACK_PAGE_SIZE - 1) / MSTACK_PAGE_SIZE;
206 stack_count = ((MSTACK_CHUNK_PAGE_NUM - header_page_cnt) * MSTACK_PAGE_SIZE) / msz;
209 VM_ASSERT(stack_count <= UINT16_MAX);
211 struct nt_stack_chunk_header *ch = (
struct nt_stack_chunk_header *)m;
213 ch->start_page = header_page_cnt;
214 ch->prev_chunk = nt_stack_chunks;
215 ch->prev_free_chunk = nt_free_stack_chunks;
216 ch->uninitialized_stack_count = ch->stack_count = (uint16_t)stack_count;
217 ch->free_stack_pos = 0;
219 RUBY_DEBUG_LOG(
"ch:%p start_page:%d stack_cnt:%d stack_size:%d", ch, (
int)ch->start_page, (
int)ch->stack_count, (
int)msz);
225nt_stack_chunk_get_stack_start(
struct nt_stack_chunk_header *ch,
size_t idx)
227 const char *m = (
char *)ch;
228 return (
void *)(m + ch->start_page * MSTACK_PAGE_SIZE + idx * nt_thread_stack_size());
231static struct nt_machine_stack_footer *
232nt_stack_chunk_get_msf(
const rb_vm_t *vm,
const char *mstack)
235 const size_t msz = vm->default_params.thread_machine_stack_size;
236 return (
struct nt_machine_stack_footer *)&mstack[msz -
sizeof(
struct nt_machine_stack_footer)];
240nt_stack_chunk_get_stack(
const rb_vm_t *vm,
struct nt_stack_chunk_header *ch,
size_t idx,
void **vm_stack,
void **machine_stack)
245 const char *vstack, *mstack;
246 const char *guard_page;
247 vstack = nt_stack_chunk_get_stack_start(ch, idx);
248 guard_page = vstack + vm->default_params.thread_vm_stack_size;
249 mstack = guard_page + MSTACK_PAGE_SIZE;
251 struct nt_machine_stack_footer *msf = nt_stack_chunk_get_msf(vm, mstack);
256 RUBY_DEBUG_LOG(
"msf:%p vstack:%p-%p guard_page:%p-%p mstack:%p-%p", msf,
257 vstack, (
void *)(guard_page-1),
258 guard_page, (
void *)(mstack-1),
259 mstack, (
void *)(msf));
262 *vm_stack = (
void *)vstack;
263 *machine_stack = (
void *)mstack;
265 return (
void *)guard_page;
270nt_stack_chunk_dump(
void)
272 struct nt_stack_chunk_header *ch;
275 fprintf(stderr,
"** nt_stack_chunks\n");
276 ch = nt_stack_chunks;
277 for (i=0; ch; i++, ch = ch->prev_chunk) {
278 fprintf(stderr,
"%d %p free_pos:%d\n", i, (
void *)ch, (
int)ch->free_stack_pos);
281 fprintf(stderr,
"** nt_free_stack_chunks\n");
282 ch = nt_free_stack_chunks;
283 for (i=0; ch; i++, ch = ch->prev_free_chunk) {
284 fprintf(stderr,
"%d %p free_pos:%d\n", i, (
void *)ch, (
int)ch->free_stack_pos);
289nt_guard_page(
const char *p,
size_t len)
291 if (mprotect((
void *)p,
len, PROT_NONE) != -1) {
300nt_alloc_stack(
rb_vm_t *vm,
void **vm_stack,
void **machine_stack)
307 if (nt_free_stack_chunks) {
308 struct nt_stack_chunk_header *ch = nt_free_stack_chunks;
309 if (ch->free_stack_pos > 0) {
310 RUBY_DEBUG_LOG(
"free_stack_pos:%d", ch->free_stack_pos);
311 nt_stack_chunk_get_stack(vm, ch, ch->free_stack[--ch->free_stack_pos], vm_stack, machine_stack);
313 else if (ch->uninitialized_stack_count > 0) {
314 RUBY_DEBUG_LOG(
"uninitialized_stack_count:%d", ch->uninitialized_stack_count);
316 size_t idx = ch->stack_count - ch->uninitialized_stack_count--;
317 void *guard_page = nt_stack_chunk_get_stack(vm, ch, idx, vm_stack, machine_stack);
318 err = nt_guard_page(guard_page, MSTACK_PAGE_SIZE);
321 nt_free_stack_chunks = ch->prev_free_chunk;
322 ch->prev_free_chunk = NULL;
327 struct nt_stack_chunk_header *p = nt_alloc_thread_stack_chunk();
332 nt_free_stack_chunks = nt_stack_chunks = p;
343nt_madvise_free_or_dontneed(
void *addr,
size_t len)
356#if defined(MADV_FREE)
357 int r = madvise(addr,
len, MADV_FREE);
361#if defined(MADV_DONTNEED)
362 madvise(addr,
len, MADV_DONTNEED);
367nt_free_stack(
void *mstack)
373 struct nt_machine_stack_footer *msf = nt_stack_chunk_get_msf(GET_VM(), mstack);
374 struct nt_stack_chunk_header *ch = msf->ch;
375 int idx = (int)msf->index;
376 void *stack = nt_stack_chunk_get_stack_start(ch, idx);
378 RUBY_DEBUG_LOG(
"stack:%p mstack:%p ch:%p index:%d", stack, mstack, ch, idx);
380 if (ch->prev_free_chunk == NULL) {
381 ch->prev_free_chunk = nt_free_stack_chunks;
382 nt_free_stack_chunks = ch;
384 ch->free_stack[ch->free_stack_pos++] = idx;
387 nt_madvise_free_or_dontneed(stack, nt_thread_stack_size());
394native_thread_check_and_create_shared(
rb_vm_t *vm)
396 bool need_to_make =
false;
400 unsigned int snt_cnt = vm->ractor.sched.snt_cnt;
401 if (!vm->ractor.main_ractor->threads.sched.enable_mn_threads) snt_cnt++;
403 if (((
int)snt_cnt < MINIMUM_SNT) ||
404 (snt_cnt < vm->ractor.cnt &&
405 snt_cnt < vm->ractor.sched.max_cpu)) {
407 RUBY_DEBUG_LOG(
"added snt:%u dnt:%u ractor_cnt:%u grq_cnt:%u",
408 vm->ractor.sched.snt_cnt,
409 vm->ractor.sched.dnt_cnt,
411 vm->ractor.sched.grq_cnt);
413 vm->ractor.sched.snt_cnt++;
417 RUBY_DEBUG_LOG(
"snt:%d ractor_cnt:%d", (
int)vm->ractor.sched.snt_cnt, (
int)vm->ractor.cnt);
425 return native_thread_create0(nt);
433# define co_start ruby_coroutine_start
440#ifdef RUBY_ASAN_ENABLED
441 __sanitizer_finish_switch_fiber(self->fake_stack,
442 (
const void**)&from->stack_base, &from->stack_size);
447 VM_ASSERT(th->nt != NULL);
448 VM_ASSERT(th == sched->running);
449 VM_ASSERT(sched->lock_owner == NULL);
453 thread_sched_set_lock_owner(sched, th);
454 thread_sched_add_running_thread(TH_SCHED(th), th);
455 thread_sched_unlock(sched, th);
458 call_thread_start_func_2(th);
460 thread_sched_lock(sched, NULL);
462 RUBY_DEBUG_LOG(
"terminated th:%d", (
int)th->serial);
467 bool is_dnt = th_has_dedicated_nt(th);
468 native_thread_assign(NULL, th);
469 rb_ractor_set_current_ec(th->ractor, NULL);
474 th->sched.finished =
true;
475 coroutine_transfer0(self, nt->nt_context,
true);
479 bool has_ready_ractor = vm->ractor.sched.grq_cnt > 0;
482 if (!has_ready_ractor && next_th && !next_th->nt) {
484 thread_sched_set_lock_owner(sched, NULL);
485 th->sched.finished =
true;
486 thread_sched_switch0(th->sched.context, next_th, nt,
true);
490 th->sched.finished =
true;
491 coroutine_transfer0(self, nt->nt_context,
true);
495 rb_bug(
"unreachable");
503 void *vm_stack = NULL, *machine_stack = NULL;
504 int err = nt_alloc_stack(vm, &vm_stack, &machine_stack);
507 VM_ASSERT(vm_stack < machine_stack);
510 size_t vm_stack_words = th->vm->default_params.thread_vm_stack_size/
sizeof(
VALUE);
511 rb_ec_initialize_vm_stack(th->ec, vm_stack, vm_stack_words);
514 size_t machine_stack_size = vm->default_params.thread_machine_stack_size -
sizeof(
struct nt_machine_stack_footer);
515 th->ec->machine.stack_start = (
void *)((uintptr_t)machine_stack + machine_stack_size);
516 th->ec->machine.stack_maxsize = machine_stack_size;
517 th->sched.context_stack = machine_stack;
520 coroutine_initialize(th->sched.context, co_start, machine_stack, machine_stack_size);
521 th->sched.context->argument = th;
523 RUBY_DEBUG_LOG(
"th:%u vm_stack:%p machine_stack:%p", rb_th_serial(th), vm_stack, machine_stack);
524 thread_sched_to_ready(TH_SCHED(th), th);
527 return native_thread_check_and_create_shared(th->vm);
535 rb_bug(
"unreachable");
539thread_sched_wait_events(
struct rb_thread_sched *sched,
rb_thread_t *th,
int fd,
enum thread_sched_waiting_flag events, rb_hrtime_t *rel)
541 rb_bug(
"unreachable");
547#if (HAVE_SYS_EPOLL_H || HAVE_SYS_EVENT_H) && USE_MN_THREADS
550fd_readable_nonblock(
int fd)
552 struct pollfd pfd = {
556 return poll(&pfd, 1, 0) != 0;
560fd_writable_nonblock(
int fd)
562 struct pollfd pfd = {
566 return poll(&pfd, 1, 0) != 0;
570verify_waiting_list(
void)
577 ccan_list_for_each(&timer_th.waiting, w, node) {
580 rb_hrtime_t timeout = w->data.timeout;
581 rb_hrtime_t prev_timeout = w->data.timeout;
582 VM_ASSERT(timeout == 0 || prev_timeout <= timeout);
591static enum thread_sched_waiting_flag
592kqueue_translate_filter_to_flags(int16_t filter)
596 return thread_sched_waiting_io_read;
598 return thread_sched_waiting_io_write;
600 return thread_sched_waiting_timeout;
602 rb_bug(
"kevent filter:%d not supported", filter);
611 int timeout_ms = timer_thread_set_timeout(vm);
613 if (timeout_ms >= 0) {
614 calculated_timeout.tv_sec = timeout_ms / 1000;
615 calculated_timeout.tv_nsec = (timeout_ms % 1000) * 1000000;
616 timeout = &calculated_timeout;
619 return kevent(timer_th.event_fd, NULL, 0, timer_th.finished_events, KQUEUE_EVENTS_MAX, timeout);
625 if ((timer_th.event_fd = kqueue()) == -1) rb_bug(
"kqueue creation failed (errno:%d)",
errno);
626 int flags = fcntl(timer_th.event_fd, F_GETFD);
628 rb_bug(
"kqueue GETFD failed (errno:%d)",
errno);
632 if (fcntl(timer_th.event_fd, F_SETFD, flags) == -1) {
633 rb_bug(
"kqueue SETFD failed (errno:%d)",
errno);
638kqueue_unregister_waiting(
int fd,
enum thread_sched_waiting_flag flags)
644 if (flags & thread_sched_waiting_io_read) {
645 EV_SET(&ke[num_events], fd, EVFILT_READ, EV_DELETE, 0, 0, NULL);
648 if (flags & thread_sched_waiting_io_write) {
649 EV_SET(&ke[num_events], fd, EVFILT_WRITE, EV_DELETE, 0, 0, NULL);
652 if (kevent(timer_th.event_fd, ke, num_events, NULL, 0, NULL) == -1) {
654 rb_bug(
"unregister/kevent fails. errno:%d",
errno);
660kqueue_already_registered(
int fd)
664 ccan_list_for_each(&timer_th.waiting, w, node) {
667 if (w->flags && w->data.fd == fd) {
672 return found_w != NULL;
679timer_thread_register_waiting(
rb_thread_t *th,
int fd,
enum thread_sched_waiting_flag flags, rb_hrtime_t *rel)
681 RUBY_DEBUG_LOG(
"th:%u fd:%d flag:%d rel:%lu", rb_th_serial(th), fd, flags, rel ? (
unsigned long)*rel : 0);
683 VM_ASSERT(th == NULL || TH_SCHED(th)->running == th);
684 VM_ASSERT(flags != 0);
690 flags |= thread_sched_waiting_timeout;
697 if (rel && *rel > 0) {
698 flags |= thread_sched_waiting_timeout;
705 uint32_t epoll_events = 0;
707 if (flags & thread_sched_waiting_timeout) {
708 VM_ASSERT(rel != NULL);
709 abs = rb_hrtime_add(rb_hrtime_now(), *rel);
712 if (flags & thread_sched_waiting_io_read) {
713 if (!(flags & thread_sched_waiting_io_force) && fd_readable_nonblock(fd)) {
714 RUBY_DEBUG_LOG(
"fd_readable_nonblock");
720 EV_SET(&ke[num_events], fd, EVFILT_READ, EV_ADD, 0, 0, (
void *)th);
723 epoll_events |= EPOLLIN;
728 if (flags & thread_sched_waiting_io_write) {
729 if (!(flags & thread_sched_waiting_io_force) && fd_writable_nonblock(fd)) {
730 RUBY_DEBUG_LOG(
"fd_writable_nonblock");
736 EV_SET(&ke[num_events], fd, EVFILT_WRITE, EV_ADD, 0, 0, (
void *)th);
739 epoll_events |= EPOLLOUT;
747 if (num_events > 0) {
748 if (kqueue_already_registered(fd)) {
753 if (kevent(timer_th.event_fd, ke, num_events, NULL, 0, NULL) == -1) {
754 RUBY_DEBUG_LOG(
"failed (%d)",
errno);
763 rb_bug(
"register/kevent failed(fd:%d, errno:%d)", fd,
errno);
766 RUBY_DEBUG_LOG(
"kevent(add, fd:%d) success", fd);
770 struct epoll_event event = {
771 .events = epoll_events,
776 if (epoll_ctl(timer_th.event_fd, EPOLL_CTL_ADD, fd, &event) == -1) {
777 RUBY_DEBUG_LOG(
"failed (%d)",
errno);
790 rb_bug(
"register/epoll_ctl failed(fd:%d, errno:%d)", fd,
errno);
793 RUBY_DEBUG_LOG(
"epoll_ctl(add, fd:%d, events:%d) success", fd, epoll_events);
798 VM_ASSERT(th->sched.waiting_reason.flags == thread_sched_waiting_none);
802 th->sched.waiting_reason.flags = flags;
803 th->sched.waiting_reason.data.timeout = abs;
804 th->sched.waiting_reason.data.fd = fd;
805 th->sched.waiting_reason.data.result = 0;
809 VM_ASSERT(!(flags & thread_sched_waiting_timeout));
810 ccan_list_add_tail(&timer_th.waiting, &th->sched.waiting_reason.node);
813 RUBY_DEBUG_LOG(
"abs:%lu", (
unsigned long)abs);
814 VM_ASSERT(flags & thread_sched_waiting_timeout);
819 ccan_list_for_each(&timer_th.waiting, w, node) {
820 if ((w->flags & thread_sched_waiting_timeout) &&
821 w->data.timeout < abs) {
830 ccan_list_add_after(&timer_th.waiting, &prev_w->node, &th->sched.waiting_reason.node);
833 ccan_list_add(&timer_th.waiting, &th->sched.waiting_reason.node);
836 verify_waiting_list();
839 timer_thread_wakeup();
852timer_thread_unregister_waiting(
rb_thread_t *th,
int fd,
enum thread_sched_waiting_flag flags)
854 RUBY_DEBUG_LOG(
"th:%u fd:%d", rb_th_serial(th), fd);
856 kqueue_unregister_waiting(fd, flags);
859 if (epoll_ctl(timer_th.event_fd, EPOLL_CTL_DEL, fd, NULL) == -1) {
866 rb_bug(
"unregister/epoll_ctl fails. errno:%d",
errno);
873timer_thread_setup_mn(
void)
877 RUBY_DEBUG_LOG(
"kqueue_fd:%d", timer_th.event_fd);
879 if ((timer_th.event_fd = epoll_create1(EPOLL_CLOEXEC)) == -1) rb_bug(
"epoll_create (errno:%d)",
errno);
880 RUBY_DEBUG_LOG(
"epoll_fd:%d", timer_th.event_fd);
882 RUBY_DEBUG_LOG(
"comm_fds:%d/%d", timer_th.comm_fds[0], timer_th.comm_fds[1]);
884 timer_thread_register_waiting(NULL, timer_th.comm_fds[0], thread_sched_waiting_io_read | thread_sched_waiting_io_force, NULL);
891 int r = kqueue_wait(vm);
893 int r = epoll_wait(timer_th.event_fd, timer_th.finished_events, EPOLL_EVENTS_MAX, timer_thread_set_timeout(vm));
913timer_thread_polling(
rb_vm_t *vm)
915 int r = event_wait(vm);
917 RUBY_DEBUG_LOG(
"r:%d errno:%d", r,
errno);
921 RUBY_DEBUG_LOG(
"timeout%s",
"");
923 ractor_sched_lock(vm, NULL);
926 timer_thread_check_timeslice(vm);
929 if (vm->ractor.sched.grq_cnt > 0) {
930 RUBY_DEBUG_LOG(
"GRQ cnt: %u", vm->ractor.sched.grq_cnt);
934 ractor_sched_unlock(vm, NULL);
937 native_thread_check_and_create_shared(vm);
947 perror(
"event_wait");
948 rb_bug(
"event_wait errno:%d",
errno);
953 RUBY_DEBUG_LOG(
"%d event(s)", r);
956 for (
int i=0; i<r; i++) {
958 int fd = (int)timer_th.finished_events[i].ident;
959 int16_t filter = timer_th.finished_events[i].filter;
963 RUBY_DEBUG_LOG(
"comm from fd:%d", timer_th.comm_fds[1]);
964 consume_communication_pipe(timer_th.comm_fds[0]);
968 RUBY_DEBUG_LOG(
"io event. wakeup_th:%u event:%s%s",
970 (filter == EVFILT_READ) ?
"read/" :
"",
971 (filter == EVFILT_WRITE) ?
"write/" :
"");
975 if (th->sched.waiting_reason.flags) {
977 ccan_list_del_init(&th->sched.waiting_reason.node);
978 timer_thread_unregister_waiting(th, fd, kqueue_translate_filter_to_flags(filter));
980 th->sched.waiting_reason.flags = thread_sched_waiting_none;
981 th->sched.waiting_reason.data.fd = -1;
982 th->sched.waiting_reason.data.result = filter;
984 timer_thread_wakeup_thread(th);
994 for (
int i=0; i<r; i++) {
999 RUBY_DEBUG_LOG(
"comm from fd:%d", timer_th.comm_fds[1]);
1000 consume_communication_pipe(timer_th.comm_fds[0]);
1004 uint32_t events = timer_th.finished_events[i].events;
1006 RUBY_DEBUG_LOG(
"io event. wakeup_th:%u event:%s%s%s%s%s%s",
1008 (events & EPOLLIN) ?
"in/" :
"",
1009 (events & EPOLLOUT) ?
"out/" :
"",
1010 (events & EPOLLRDHUP) ?
"RDHUP/" :
"",
1011 (events & EPOLLPRI) ?
"pri/" :
"",
1012 (events & EPOLLERR) ?
"err/" :
"",
1013 (events & EPOLLHUP) ?
"hup/" :
"");
1017 if (th->sched.waiting_reason.flags) {
1019 ccan_list_del_init(&th->sched.waiting_reason.node);
1020 timer_thread_unregister_waiting(th, th->sched.waiting_reason.data.fd, th->sched.waiting_reason.flags);
1022 th->sched.waiting_reason.flags = thread_sched_waiting_none;
1023 th->sched.waiting_reason.data.fd = -1;
1024 th->sched.waiting_reason.data.result = (int)events;
1026 timer_thread_wakeup_thread(th);
1042timer_thread_setup_mn(
void)
1048timer_thread_polling(
rb_vm_t *vm)
1050 int timeout = timer_thread_set_timeout(vm);
1052 struct pollfd pfd = {
1053 .fd = timer_th.comm_fds[0],
1057 int r = poll(&pfd, 1, timeout);
1064 timer_thread_check_timeslice(vm);
1076 rb_bug(
"poll errno:%d",
errno);
1081 consume_communication_pipe(timer_th.comm_fds[0]);
1085 rb_bug(
"unreachbale");
int len
Length of the buffer.
#define RUBY_INTERNAL_THREAD_EVENT_RESUMED
Triggered when a thread successfully acquired the GVL.
#define RUBY_INTERNAL_THREAD_EVENT_SUSPENDED
Triggered when a thread released the GVL.
#define RBIMPL_ATTR_MAYBE_UNUSED()
Wraps (or simulates) [[maybe_unused]]
#define errno
Ractor-aware version of errno.
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
void rb_native_cond_signal(rb_nativethread_cond_t *cond)
Signals a condition variable.
uintptr_t VALUE
Type that represents a Ruby object.