Ruby 4.1.0dev (2026-03-23 revision f8459601271ebbc5e1efb101387da955ed1faabb)
arena.h
1#ifndef PRISM_INTERNAL_ARENA_H
2#define PRISM_INTERNAL_ARENA_H
3
8
9#include "prism/arena.h"
10
11#include <stddef.h>
12#include <string.h>
13
14/*
15 * A single block of memory in the arena. Blocks are linked via prev pointers so
16 * they can be freed by walking the chain.
17 */
18typedef struct pm_arena_block {
19 /* The previous block in the chain (for freeing). */
20 struct pm_arena_block *prev;
21
22 /* The total usable bytes in data[]. */
23 size_t capacity;
24
25 /* The number of bytes consumed so far. */
26 size_t used;
27
28 /* The block's data. */
29 char data[PM_FLEX_ARRAY_LENGTH];
31
32/*
33 * A bump allocator. Allocations are made by bumping a pointer within the
34 * current block. When a block is full, a new block is allocated and linked to
35 * the previous one. All blocks are freed at once by walking the chain.
36 */
37struct pm_arena_t {
38 /* The active block (allocate from here). */
39 pm_arena_block_t *current;
40
41 /* The number of blocks allocated. */
42 size_t block_count;
43};
44
45/*
46 * Free all blocks in the arena. After this call, all pointers returned by
47 * pm_arena_alloc and pm_arena_zalloc are invalid.
48 */
49void pm_arena_cleanup(pm_arena_t *arena);
50
51/*
52 * Ensure the arena has at least `capacity` bytes available in its current
53 * block, allocating a new block if necessary. This allows callers to
54 * pre-size the arena to avoid repeated small block allocations.
55 */
56void pm_arena_reserve(pm_arena_t *arena, size_t capacity);
57
58/*
59 * Slow path for pm_arena_alloc: allocate a new block and return a pointer to
60 * the first `size` bytes. Do not call directly — use pm_arena_alloc instead.
61 */
62void * pm_arena_alloc_slow(pm_arena_t *arena, size_t size);
63
64/*
65 * Allocate memory from the arena. The returned memory is NOT zeroed. This
66 * function is infallible — it aborts on allocation failure.
67 *
68 * The fast path (bump pointer within the current block) is inlined at each
69 * call site. The slow path (new block allocation) is out-of-line.
70 */
71static PRISM_FORCE_INLINE void *
72pm_arena_alloc(pm_arena_t *arena, size_t size, size_t alignment) {
73 if (arena->current != NULL) {
74 size_t used_aligned = (arena->current->used + alignment - 1) & ~(alignment - 1);
75 size_t needed = used_aligned + size;
76
77 if (used_aligned >= arena->current->used && needed >= used_aligned && needed <= arena->current->capacity) {
78 arena->current->used = needed;
79 return arena->current->data + used_aligned;
80 }
81 }
82
83 return pm_arena_alloc_slow(arena, size);
84}
85
86/*
87 * Allocate zero-initialized memory from the arena. This function is infallible
88 * — it aborts on allocation failure.
89 */
90static PRISM_INLINE void *
91pm_arena_zalloc(pm_arena_t *arena, size_t size, size_t alignment) {
92 void *ptr = pm_arena_alloc(arena, size, alignment);
93 memset(ptr, 0, size);
94 return ptr;
95}
96
97/*
98 * Allocate memory from the arena and copy the given data into it. This is a
99 * convenience wrapper around pm_arena_alloc + memcpy.
100 */
101static PRISM_INLINE void *
102pm_arena_memdup(pm_arena_t *arena, const void *src, size_t size, size_t alignment) {
103 void *dst = pm_arena_alloc(arena, size, alignment);
104 memcpy(dst, src, size);
105 return dst;
106}
107
108#endif
A bump allocator for the prism parser.
#define PM_FLEX_ARRAY_LENGTH
A macro for helper define a flexible array member.
Definition flex_array.h:12
#define PRISM_FORCE_INLINE
Force a function to be inlined at every call site.
#define PRISM_INLINE
Old Visual Studio versions do not support the inline keyword, so we need to define it to be __inline.
Definition inline.h:12