vector/internal_telemetry/allocations/allocator/
tracing_allocator.rs

1use std::{
2    alloc::{GlobalAlloc, Layout},
3    sync::atomic::Ordering,
4};
5
6use crate::internal_telemetry::allocations::TRACK_ALLOCATIONS;
7
8use super::{
9    token::{try_with_suspended_allocation_group, AllocationGroupId},
10    tracer::Tracer,
11};
12
13/// A tracing allocator that groups allocation events by groups.
14///
15/// This allocator can only be used when specified via `#[global_allocator]`.
16pub struct GroupedTraceableAllocator<A, T> {
17    allocator: A,
18    tracer: T,
19}
20
21impl<A, T> GroupedTraceableAllocator<A, T> {
22    /// Creates a new `GroupedTraceableAllocator` that wraps the given allocator and tracer.
23    #[must_use]
24    pub const fn new(allocator: A, tracer: T) -> Self {
25        Self { allocator, tracer }
26    }
27}
28
29unsafe impl<A: GlobalAlloc, T: Tracer> GlobalAlloc for GroupedTraceableAllocator<A, T> {
30    #[inline]
31    unsafe fn alloc(&self, object_layout: Layout) -> *mut u8 {
32        unsafe {
33            if !TRACK_ALLOCATIONS.load(Ordering::Relaxed) {
34                return self.allocator.alloc(object_layout);
35            }
36
37            // Allocate our wrapped layout and make sure the allocation succeeded.
38            let (actual_layout, offset_to_group_id) = get_wrapped_layout(object_layout);
39            let actual_ptr = self.allocator.alloc(actual_layout);
40            if actual_ptr.is_null() {
41                return actual_ptr;
42            }
43
44            let group_id_ptr = actual_ptr.add(offset_to_group_id).cast::<u8>();
45
46            let object_size = object_layout.size();
47
48            try_with_suspended_allocation_group(
49                #[inline(always)]
50                |group_id| {
51                    group_id_ptr.write(group_id.as_raw());
52                    self.tracer.trace_allocation(object_size, group_id);
53                },
54            );
55            actual_ptr
56        }
57    }
58
59    #[inline]
60    unsafe fn dealloc(&self, object_ptr: *mut u8, object_layout: Layout) {
61        unsafe {
62            if !TRACK_ALLOCATIONS.load(Ordering::Relaxed) {
63                self.allocator.dealloc(object_ptr, object_layout);
64                return;
65            }
66            // Regenerate the wrapped layout so we know where we have to look, as the pointer we've given relates to the
67            // requested layout, not the wrapped layout that was actually allocated.
68            let (wrapped_layout, offset_to_group_id) = get_wrapped_layout(object_layout);
69
70            let raw_group_id = object_ptr.add(offset_to_group_id).cast::<u8>().read();
71
72            // Deallocate before tracking, just to make sure we're reclaiming memory as soon as possible.
73            self.allocator.dealloc(object_ptr, wrapped_layout);
74
75            let object_size = object_layout.size();
76            let source_group_id = AllocationGroupId::from_raw(raw_group_id);
77
78            try_with_suspended_allocation_group(
79                #[inline(always)]
80                |_| {
81                    self.tracer.trace_deallocation(object_size, source_group_id);
82                },
83            );
84        }
85    }
86}
87
88#[inline(always)]
89fn get_wrapped_layout(object_layout: Layout) -> (Layout, usize) {
90    static HEADER_LAYOUT: Layout = Layout::new::<u8>();
91
92    // We generate a new allocation layout that gives us a location to store the active allocation group ID ahead
93    // of the requested allocation, which lets us always attempt to retrieve it on the deallocation path.
94    let (actual_layout, offset_to_group_id) = object_layout
95        .extend(HEADER_LAYOUT)
96        .expect("wrapping requested layout resulted in overflow");
97
98    (actual_layout.pad_to_align(), offset_to_group_id)
99}