cryprot_core/
alloc.rs

1//! Allocation utilities for efficiently allocating zeroed memory.
2use std::{
3    alloc::{Layout, handle_alloc_error},
4    fmt::Debug,
5    mem,
6    ops::{Deref, DerefMut},
7    ptr::{self, NonNull},
8    slice,
9};
10
11use bytemuck::Zeroable;
12
13/// An owned memory buffer that is allocated with transparent huge pages.
14///
15/// Using [`HugePageMemory::zeroed`], you can quickly allocate a buffer of
16/// `len` elements of type `T` that is backed by transparent huge pages on Linux
17/// systems. Note that the allocation might be larger than requested to align to
18/// page boundaries. On non Linux systems, the memory will be allocated with the
19/// global allocator and normal page size (equivalent to [`Vec`]).
20pub struct HugePageMemory<T> {
21    ptr: NonNull<T>,
22    len: usize,
23    capacity: usize,
24}
25
26pub const HUGE_PAGE_SIZE: usize = 2 * 1024 * 1024;
27
28impl<T> HugePageMemory<T> {
29    #[inline]
30    pub fn len(&self) -> usize {
31        self.len
32    }
33
34    #[inline]
35    pub fn is_empty(&self) -> bool {
36        self.len() == 0
37    }
38
39    #[inline]
40    pub fn capacity(&self) -> usize {
41        self.capacity
42    }
43
44    /// Sets the len of the HugePageMemory.
45    /// # Panic
46    /// Panics if `new_len > self.capacity()`
47    #[inline]
48    pub fn set_len(&mut self, new_len: usize) {
49        assert!(new_len <= self.capacity());
50        // SAFETY:
51        // new_len <= self.capacity
52        // self[len..new_len] is initialized either because of Self::zeroed
53        // or with data written to it.
54        #[allow(unused_unsafe)]
55        unsafe {
56            self.len = new_len;
57        }
58    }
59}
60
61#[cfg(target_os = "linux")]
62impl<T: Zeroable> HugePageMemory<T> {
63    /// Allocate a buffer of `len` elements that is backed by transparent huge
64    /// pages when possible.
65    pub fn zeroed(len: usize) -> Self {
66        let layout = Self::layout(len);
67        let capacity = layout.size();
68        let ptr = unsafe {
69            // allocate memory using mmap
70            let ptr = libc::mmap(
71                ptr::null_mut(),
72                capacity,
73                libc::PROT_READ | libc::PROT_WRITE,
74                libc::MAP_PRIVATE | libc::MAP_ANONYMOUS,
75                -1,
76                0,
77            );
78            if ptr == libc::MAP_FAILED {
79                handle_alloc_error(layout)
80            }
81            #[cfg(not(miri))]
82            if libc::madvise(ptr, capacity, libc::MADV_HUGEPAGE) != 0 {
83                let err = std::io::Error::last_os_error();
84                match err.raw_os_error() {
85                    Some(
86                        // ENOMEM - Not enough memory/resources available
87                        libc::ENOMEM
88                        // EINVAL - Invalid arguments (shouldn't happen with our layout)
89                        | libc::EINVAL) => {
90                        libc::munmap(ptr, capacity);
91                        handle_alloc_error(layout);
92                    }
93                    // Other errors (e.g., EACCES, EAGAIN)
94                    _ => {
95                        tracing::warn!("Failed to enable huge pages: {}", err);
96                    }
97                }
98            }
99            NonNull::new_unchecked(ptr.cast())
100        };
101
102        Self { ptr, len, capacity }
103    }
104}
105
106impl<T: Zeroable + Clone> HugePageMemory<T> {
107    /// Grows the HugePageMemory to at least `new_size` zeroed elements.
108    pub fn grow_zeroed(&mut self, new_size: usize) {
109        // If new size fits in current capacity, just update length
110        if new_size <= self.capacity() {
111            self.set_len(new_size);
112            return;
113        }
114
115        #[cfg(target_os = "linux")]
116        {
117            self.grow_with_mremap(new_size);
118        }
119
120        #[cfg(not(target_os = "linux"))]
121        {
122            self.grow_with_mmap(new_size);
123        }
124    }
125
126    /// Grow implementation using mremap (Linux-specific)
127    #[cfg(target_os = "linux")]
128    fn grow_with_mremap(&mut self, new_size: usize) {
129        // Calculate new layout
130        let new_layout = Self::layout(new_size);
131        let new_capacity = new_layout.size();
132
133        let new_ptr = unsafe {
134            let remapped_ptr = libc::mremap(
135                self.ptr.as_ptr().cast(),
136                self.capacity,
137                new_capacity,
138                libc::MREMAP_MAYMOVE,
139            );
140
141            if remapped_ptr == libc::MAP_FAILED {
142                libc::munmap(self.ptr.as_ptr().cast(), self.capacity);
143                handle_alloc_error(new_layout);
144            }
145
146            // Successfully remapped
147            #[cfg(not(miri))]
148            if libc::madvise(remapped_ptr, new_capacity, libc::MADV_HUGEPAGE) != 0 {
149                let err = std::io::Error::last_os_error();
150                tracing::warn!("Failed to enable huge pages after mremap: {}", err);
151            }
152
153            NonNull::new_unchecked(remapped_ptr.cast())
154        };
155
156        // Update the struct with new pointer, capacity, and length
157        self.ptr = new_ptr;
158        self.capacity = new_capacity;
159        self.set_len(new_size);
160    }
161
162    /// Fallback grow implementation using mmap
163    #[allow(dead_code)]
164    fn grow_with_mmap(&mut self, new_size: usize) {
165        let mut new = Self::zeroed(new_size);
166        new[..self.len()].clone_from_slice(self);
167        *self = new;
168    }
169}
170
171#[cfg(target_os = "linux")]
172impl<T> HugePageMemory<T> {
173    fn layout(len: usize) -> Layout {
174        let size = len * mem::size_of::<T>();
175        let align = mem::align_of::<T>().min(HUGE_PAGE_SIZE);
176        let layout = Layout::from_size_align(size, align).expect("alloc too large");
177        layout.pad_to_align()
178    }
179}
180
181#[cfg(target_os = "linux")]
182impl<T> Drop for HugePageMemory<T> {
183    #[inline]
184    fn drop(&mut self) {
185        unsafe {
186            libc::munmap(self.ptr.as_ptr().cast(), self.capacity);
187        }
188    }
189}
190
191// Fallback implementation on non unix systems.
192#[cfg(not(target_os = "linux"))]
193impl<T: Zeroable> HugePageMemory<T> {
194    pub fn zeroed(len: usize) -> Self {
195        let v = allocate_zeroed_vec(len);
196        assert_eq!(v.len(), v.capacity());
197        let ptr = NonNull::new(v.leak().as_mut_ptr()).expect("not null");
198        Self {
199            ptr,
200            len,
201            capacity: len,
202        }
203    }
204}
205
206#[cfg(not(target_os = "linux"))]
207impl<T> Drop for HugePageMemory<T> {
208    fn drop(&mut self) {
209        unsafe { Vec::from_raw_parts(self.ptr.as_ptr(), self.len, self.capacity) };
210    }
211}
212
213impl<T> Deref for HugePageMemory<T> {
214    type Target = [T];
215
216    #[inline]
217    fn deref(&self) -> &Self::Target {
218        unsafe { slice::from_raw_parts(self.ptr.as_ptr(), self.len) }
219    }
220}
221
222impl<T> DerefMut for HugePageMemory<T> {
223    #[inline]
224    fn deref_mut(&mut self) -> &mut Self::Target {
225        unsafe { slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len) }
226    }
227}
228
229impl<T> Default for HugePageMemory<T> {
230    fn default() -> Self {
231        Self {
232            ptr: NonNull::dangling(),
233            len: 0,
234            capacity: 0,
235        }
236    }
237}
238
239impl<T: Debug> Debug for HugePageMemory<T> {
240    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
241        f.debug_list().entries(self.iter()).finish()
242    }
243}
244
245unsafe impl<T: Send> Send for HugePageMemory<T> {}
246unsafe impl<T: Sync> Sync for HugePageMemory<T> {}
247
248/// Allocate a zeroed [`Vec`].
249///
250/// This function has less strict boundaries tha [`<Vec as
251/// Buf>::zeroed`](`super::buf::Buf::zeroed`).
252pub fn allocate_zeroed_vec<T: Zeroable>(len: usize) -> Vec<T> {
253    unsafe {
254        let size = len * mem::size_of::<T>();
255        let align = mem::align_of::<T>();
256        let layout = Layout::from_size_align(size, align).expect("len too large");
257        let zeroed = std::alloc::alloc_zeroed(layout);
258        // Safety (see https://doc.rust-lang.org/stable/std/vec/struct.Vec.html#method.from_raw_parts):
259        // - zeroed ptr was allocated via global allocator
260        // - zeroed was allocated with exact alignment of T
261        // - size of T times capacity (len) is equal to size of allocation
262        // - length values are initialized because of alloc_zeroed and T: Zeroable
263        // - allocated size is less than isize::MAX ensured by Layout construction,
264        //   otherwise panic
265        Vec::from_raw_parts(zeroed as *mut T, len, len)
266    }
267}
268
269#[cfg(test)]
270mod tests {
271    use super::{HUGE_PAGE_SIZE, HugePageMemory};
272
273    #[test]
274    fn test_huge_page_memory() {
275        let mut mem = HugePageMemory::<u8>::zeroed(HUGE_PAGE_SIZE + HUGE_PAGE_SIZE / 2);
276        #[cfg(not(miri))] // miri is too slow for this
277        for b in mem.iter() {
278            assert_eq!(0, *b);
279        }
280        assert!(mem[0] == 0);
281        assert!(mem[mem.len() - 1] == 0);
282        mem[42] = 5;
283        mem.set_len(HUGE_PAGE_SIZE);
284        assert_eq!(HUGE_PAGE_SIZE, mem.len());
285    }
286
287    #[test]
288    fn test_set_len_correct_dealloc() {
289        let mut mem = HugePageMemory::<u8>::zeroed(HUGE_PAGE_SIZE);
290        mem.set_len(HUGE_PAGE_SIZE / 2);
291    }
292
293    #[test]
294    #[should_panic]
295    fn test_set_len_panics() {
296        let mut mem = HugePageMemory::<u8>::zeroed(HUGE_PAGE_SIZE);
297        mem.set_len(HUGE_PAGE_SIZE + 1);
298    }
299
300    #[test]
301    fn test_grow() {
302        let mut mem = HugePageMemory::<u8>::zeroed(HUGE_PAGE_SIZE);
303        assert_eq!(0, mem[0]);
304        mem[0] = 1;
305        mem.grow_zeroed(2 * HUGE_PAGE_SIZE);
306        assert_eq!(2 * HUGE_PAGE_SIZE, mem.len());
307        assert_eq!(2 * HUGE_PAGE_SIZE, mem.capacity());
308        assert_eq!(1, mem[0]);
309        assert_eq!(0, mem[HUGE_PAGE_SIZE + 1]);
310    }
311}