Ruby  3.4.0dev (2024-11-05 revision e440268d51fe02b303e3817a7a733a0dac1c5091)
vm_insnhelper.h (e440268d51fe02b303e3817a7a733a0dac1c5091)
1 #ifndef RUBY_INSNHELPER_H
2 #define RUBY_INSNHELPER_H
3 /**********************************************************************
4 
5  insnhelper.h - helper macros to implement each instructions
6 
7  $Author$
8  created at: 04/01/01 15:50:34 JST
9 
10  Copyright (C) 2004-2007 Koichi Sasada
11 
12 **********************************************************************/
13 
14 RUBY_EXTERN VALUE ruby_vm_const_missing_count;
15 RUBY_EXTERN rb_serial_t ruby_vm_constant_cache_invalidations;
16 RUBY_EXTERN rb_serial_t ruby_vm_constant_cache_misses;
17 RUBY_EXTERN rb_serial_t ruby_vm_global_cvar_state;
18 
19 #if USE_YJIT || USE_RJIT // We want vm_insns_count on any JIT-enabled build.
20 // Increment vm_insns_count for --yjit-stats. We increment this even when
21 // --yjit or --yjit-stats is not used because branching to skip it is slower.
22 // We also don't use ATOMIC_INC for performance, allowing inaccuracy on Ractors.
23 #define JIT_COLLECT_USAGE_INSN(insn) rb_vm_insns_count++
24 #else
25 #define JIT_COLLECT_USAGE_INSN(insn) // none
26 #endif
27 
28 #if VM_COLLECT_USAGE_DETAILS
29 #define COLLECT_USAGE_INSN(insn) vm_collect_usage_insn(insn)
30 #define COLLECT_USAGE_OPERAND(insn, n, op) vm_collect_usage_operand((insn), (n), ((VALUE)(op)))
31 #define COLLECT_USAGE_REGISTER(reg, s) vm_collect_usage_register((reg), (s))
32 #else
33 #define COLLECT_USAGE_INSN(insn) JIT_COLLECT_USAGE_INSN(insn)
34 #define COLLECT_USAGE_OPERAND(insn, n, op) // none
35 #define COLLECT_USAGE_REGISTER(reg, s) // none
36 #endif
37 
38 /**********************************************************/
39 /* deal with stack */
40 /**********************************************************/
41 
42 #define PUSH(x) (SET_SV(x), INC_SP(1))
43 #define TOPN(n) (*(GET_SP()-(n)-1))
44 #define POPN(n) (DEC_SP(n))
45 #define POP() (DEC_SP(1))
46 #define STACK_ADDR_FROM_TOP(n) (GET_SP()-(n))
47 
48 /**********************************************************/
49 /* deal with registers */
50 /**********************************************************/
51 
52 #define VM_REG_CFP (reg_cfp)
53 #define VM_REG_PC (VM_REG_CFP->pc)
54 #define VM_REG_SP (VM_REG_CFP->sp)
55 #define VM_REG_EP (VM_REG_CFP->ep)
56 
57 #define RESTORE_REGS() do { \
58  VM_REG_CFP = ec->cfp; \
59 } while (0)
60 
61 typedef enum call_type {
62  CALL_PUBLIC,
63  CALL_FCALL,
64  CALL_VCALL,
65  CALL_PUBLIC_KW,
66  CALL_FCALL_KW
67 } call_type;
68 
70  struct rb_call_data cd;
71  CALL_INFO caller_ci;
72 };
73 
74 #if VM_COLLECT_USAGE_DETAILS
75 enum vm_regan_regtype {
76  VM_REGAN_PC = 0,
77  VM_REGAN_SP = 1,
78  VM_REGAN_EP = 2,
79  VM_REGAN_CFP = 3,
80  VM_REGAN_SELF = 4,
81  VM_REGAN_ISEQ = 5
82 };
83 enum vm_regan_acttype {
84  VM_REGAN_ACT_GET = 0,
85  VM_REGAN_ACT_SET = 1
86 };
87 
88 #define COLLECT_USAGE_REGISTER_HELPER(a, b, v) \
89  (COLLECT_USAGE_REGISTER((VM_REGAN_##a), (VM_REGAN_ACT_##b)), (v))
90 #else
91 #define COLLECT_USAGE_REGISTER_HELPER(a, b, v) (v)
92 #endif
93 
94 /* PC */
95 #define GET_PC() (COLLECT_USAGE_REGISTER_HELPER(PC, GET, VM_REG_PC))
96 #define SET_PC(x) (VM_REG_PC = (COLLECT_USAGE_REGISTER_HELPER(PC, SET, (x))))
97 #define GET_CURRENT_INSN() (*GET_PC())
98 #define GET_OPERAND(n) (GET_PC()[(n)])
99 #define ADD_PC(n) (SET_PC(VM_REG_PC + (n)))
100 #define JUMP(dst) (SET_PC(VM_REG_PC + (dst)))
101 
102 /* frame pointer, environment pointer */
103 #define GET_CFP() (COLLECT_USAGE_REGISTER_HELPER(CFP, GET, VM_REG_CFP))
104 #define GET_EP() (COLLECT_USAGE_REGISTER_HELPER(EP, GET, VM_REG_EP))
105 #define SET_EP(x) (VM_REG_EP = (COLLECT_USAGE_REGISTER_HELPER(EP, SET, (x))))
106 #define GET_LEP() (VM_EP_LEP(GET_EP()))
107 
108 /* SP */
109 #define GET_SP() (COLLECT_USAGE_REGISTER_HELPER(SP, GET, VM_REG_SP))
110 #define SET_SP(x) (VM_REG_SP = (COLLECT_USAGE_REGISTER_HELPER(SP, SET, (x))))
111 #define INC_SP(x) (VM_REG_SP += (COLLECT_USAGE_REGISTER_HELPER(SP, SET, (x))))
112 #define DEC_SP(x) (VM_REG_SP -= (COLLECT_USAGE_REGISTER_HELPER(SP, SET, (x))))
113 #define SET_SV(x) (*GET_SP() = rb_ractor_confirm_belonging(x))
114  /* set current stack value as x */
115 
116 /* instruction sequence C struct */
117 #define GET_ISEQ() (GET_CFP()->iseq)
118 
119 /**********************************************************/
120 /* deal with variables */
121 /**********************************************************/
122 
123 #define GET_PREV_EP(ep) ((VALUE *)((ep)[VM_ENV_DATA_INDEX_SPECVAL] & ~0x03))
124 
125 /**********************************************************/
126 /* deal with values */
127 /**********************************************************/
128 
129 #define GET_SELF() (COLLECT_USAGE_REGISTER_HELPER(SELF, GET, GET_CFP()->self))
130 
131 /**********************************************************/
132 /* deal with control flow 2: method/iterator */
133 /**********************************************************/
134 
135 /* set fastpath when cached method is *NOT* protected
136  * because inline method cache does not care about receiver.
137  */
138 
139 static inline void
140 CC_SET_FASTPATH(const struct rb_callcache *cc, vm_call_handler func, bool enabled)
141 {
142  if (LIKELY(enabled)) {
143  vm_cc_call_set(cc, func);
144  }
145 }
146 
147 #define GET_BLOCK_HANDLER() (GET_LEP()[VM_ENV_DATA_INDEX_SPECVAL])
148 
149 /**********************************************************/
150 /* deal with control flow 3: exception */
151 /**********************************************************/
152 
153 
154 /**********************************************************/
155 /* deal with stack canary */
156 /**********************************************************/
157 
158 #if VM_CHECK_MODE > 0
159 #define SETUP_CANARY(cond) \
160  VALUE *canary = 0; \
161  if (cond) { \
162  canary = GET_SP(); \
163  SET_SV(vm_stack_canary); \
164  } \
165  else {\
166  SET_SV(Qfalse); /* cleanup */ \
167  }
168 #define CHECK_CANARY(cond, insn) \
169  if (cond) { \
170  if (*canary == vm_stack_canary) { \
171  *canary = Qfalse; /* cleanup */ \
172  } \
173  else { \
174  rb_vm_canary_is_found_dead(insn, *canary); \
175  } \
176  }
177 #else
178 #define SETUP_CANARY(cond) if (cond) {} else {}
179 #define CHECK_CANARY(cond, insn) if (cond) {(void)(insn);}
180 #endif
181 
182 /**********************************************************/
183 /* others */
184 /**********************************************************/
185 
186 #define CALL_SIMPLE_METHOD() do { \
187  rb_snum_t insn_width = attr_width_opt_send_without_block(0); \
188  ADD_PC(-insn_width); \
189  DISPATCH_ORIGINAL_INSN(opt_send_without_block); \
190 } while (0)
191 
192 #define GET_GLOBAL_CVAR_STATE() (ruby_vm_global_cvar_state)
193 #define INC_GLOBAL_CVAR_STATE() (++ruby_vm_global_cvar_state)
194 
195 static inline struct vm_throw_data *
196 THROW_DATA_NEW(VALUE val, const rb_control_frame_t *cf, int st)
197 {
198  struct vm_throw_data *obj = IMEMO_NEW(struct vm_throw_data, imemo_throw_data, 0);
199  *((VALUE *)&obj->throw_obj) = val;
200  *((struct rb_control_frame_struct **)&obj->catch_frame) = (struct rb_control_frame_struct *)cf;
201  obj->throw_state = st;
202 
203  return obj;
204 }
205 
206 static inline VALUE
207 THROW_DATA_VAL(const struct vm_throw_data *obj)
208 {
209  VM_ASSERT(THROW_DATA_P(obj));
210  return obj->throw_obj;
211 }
212 
213 static inline const rb_control_frame_t *
214 THROW_DATA_CATCH_FRAME(const struct vm_throw_data *obj)
215 {
216  VM_ASSERT(THROW_DATA_P(obj));
217  return obj->catch_frame;
218 }
219 
220 static inline int
221 THROW_DATA_STATE(const struct vm_throw_data *obj)
222 {
223  VM_ASSERT(THROW_DATA_P(obj));
224  return obj->throw_state;
225 }
226 
227 static inline int
228 THROW_DATA_CONSUMED_P(const struct vm_throw_data *obj)
229 {
230  VM_ASSERT(THROW_DATA_P(obj));
231  return obj->flags & THROW_DATA_CONSUMED;
232 }
233 
234 static inline void
235 THROW_DATA_CATCH_FRAME_SET(struct vm_throw_data *obj, const rb_control_frame_t *cfp)
236 {
237  VM_ASSERT(THROW_DATA_P(obj));
238  obj->catch_frame = cfp;
239 }
240 
241 static inline void
242 THROW_DATA_STATE_SET(struct vm_throw_data *obj, int st)
243 {
244  VM_ASSERT(THROW_DATA_P(obj));
245  obj->throw_state = st;
246 }
247 
248 static inline void
249 THROW_DATA_CONSUMED_SET(struct vm_throw_data *obj)
250 {
251  if (THROW_DATA_P(obj) &&
252  THROW_DATA_STATE(obj) == TAG_BREAK) {
253  obj->flags |= THROW_DATA_CONSUMED;
254  }
255 }
256 
257 #define IS_ARGS_SPLAT(ci) (vm_ci_flag(ci) & VM_CALL_ARGS_SPLAT)
258 #define IS_ARGS_KEYWORD(ci) (vm_ci_flag(ci) & VM_CALL_KWARG)
259 #define IS_ARGS_KW_SPLAT(ci) (vm_ci_flag(ci) & VM_CALL_KW_SPLAT)
260 #define IS_ARGS_KW_OR_KW_SPLAT(ci) (vm_ci_flag(ci) & (VM_CALL_KWARG | VM_CALL_KW_SPLAT))
261 #define IS_ARGS_KW_SPLAT_MUT(ci) (vm_ci_flag(ci) & VM_CALL_KW_SPLAT_MUT)
262 
263 static inline bool
264 vm_call_cacheable(const struct rb_callinfo *ci, const struct rb_callcache *cc)
265 {
266  return !(vm_ci_flag(ci) & VM_CALL_FORWARDING) && ((vm_ci_flag(ci) & VM_CALL_FCALL) ||
267  METHOD_ENTRY_VISI(vm_cc_cme(cc)) != METHOD_VISI_PROTECTED);
268 }
269 /* If this returns true, an optimized function returned by `vm_call_iseq_setup_func`
270  can be used as a fastpath. */
271 static inline bool
272 vm_call_iseq_optimizable_p(const struct rb_callinfo *ci, const struct rb_callcache *cc)
273 {
274  return !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) && vm_call_cacheable(ci, cc);
275 }
276 
277 #endif /* RUBY_INSNHELPER_H */
#define RUBY_EXTERN
Declaration of externally visible global variables.
Definition: dllexport.h:45
THROW_DATA.
Definition: imemo.h:61
uintptr_t VALUE
Type that represents a Ruby object.
Definition: value.h:40