vector/internal_telemetry/allocations/allocator/
tracing_allocator.rs

1use std::{
2    alloc::{GlobalAlloc, Layout},
3    sync::atomic::Ordering,
4};
5
6use super::{
7    token::{AllocationGroupId, try_with_suspended_allocation_group},
8    tracer::Tracer,
9};
10use crate::internal_telemetry::allocations::TRACK_ALLOCATIONS;
11
12/// A tracing allocator that groups allocation events by groups.
13///
14/// This allocator can only be used when specified via `#[global_allocator]`.
15pub struct GroupedTraceableAllocator<A, T> {
16    allocator: A,
17    tracer: T,
18}
19
20impl<A, T> GroupedTraceableAllocator<A, T> {
21    /// Creates a new `GroupedTraceableAllocator` that wraps the given allocator and tracer.
22    #[must_use]
23    pub const fn new(allocator: A, tracer: T) -> Self {
24        Self { allocator, tracer }
25    }
26}
27
28unsafe impl<A: GlobalAlloc, T: Tracer> GlobalAlloc for GroupedTraceableAllocator<A, T> {
29    #[inline]
30    unsafe fn alloc(&self, object_layout: Layout) -> *mut u8 {
31        unsafe {
32            if !TRACK_ALLOCATIONS.load(Ordering::Relaxed) {
33                return self.allocator.alloc(object_layout);
34            }
35
36            // Allocate our wrapped layout and make sure the allocation succeeded.
37            let (actual_layout, offset_to_group_id) = get_wrapped_layout(object_layout);
38            let actual_ptr = self.allocator.alloc(actual_layout);
39            if actual_ptr.is_null() {
40                return actual_ptr;
41            }
42
43            let group_id_ptr = actual_ptr.add(offset_to_group_id).cast::<u8>();
44
45            let object_size = object_layout.size();
46
47            try_with_suspended_allocation_group(
48                #[inline(always)]
49                |group_id| {
50                    group_id_ptr.write(group_id.as_raw());
51                    self.tracer.trace_allocation(object_size, group_id);
52                },
53            );
54            actual_ptr
55        }
56    }
57
58    #[inline]
59    unsafe fn dealloc(&self, object_ptr: *mut u8, object_layout: Layout) {
60        unsafe {
61            if !TRACK_ALLOCATIONS.load(Ordering::Relaxed) {
62                self.allocator.dealloc(object_ptr, object_layout);
63                return;
64            }
65            // Regenerate the wrapped layout so we know where we have to look, as the pointer we've given relates to the
66            // requested layout, not the wrapped layout that was actually allocated.
67            let (wrapped_layout, offset_to_group_id) = get_wrapped_layout(object_layout);
68
69            let raw_group_id = object_ptr.add(offset_to_group_id).cast::<u8>().read();
70
71            // Deallocate before tracking, just to make sure we're reclaiming memory as soon as possible.
72            self.allocator.dealloc(object_ptr, wrapped_layout);
73
74            let object_size = object_layout.size();
75            let source_group_id = AllocationGroupId::from_raw(raw_group_id);
76
77            try_with_suspended_allocation_group(
78                #[inline(always)]
79                |_| {
80                    self.tracer.trace_deallocation(object_size, source_group_id);
81                },
82            );
83        }
84    }
85}
86
87#[inline(always)]
88fn get_wrapped_layout(object_layout: Layout) -> (Layout, usize) {
89    static HEADER_LAYOUT: Layout = Layout::new::<u8>();
90
91    // We generate a new allocation layout that gives us a location to store the active allocation group ID ahead
92    // of the requested allocation, which lets us always attempt to retrieve it on the deallocation path.
93    let (actual_layout, offset_to_group_id) = object_layout
94        .extend(HEADER_LAYOUT)
95        .expect("wrapping requested layout resulted in overflow");
96
97    (actual_layout.pad_to_align(), offset_to_group_id)
98}