1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
use std::{
    alloc::{GlobalAlloc, Layout},
    sync::atomic::Ordering,
};

use crate::internal_telemetry::allocations::TRACK_ALLOCATIONS;

use super::{
    token::{try_with_suspended_allocation_group, AllocationGroupId},
    tracer::Tracer,
};

/// A tracing allocator that groups allocation events by groups.
///
/// This allocator can only be used when specified via `#[global_allocator]`.
pub struct GroupedTraceableAllocator<A, T> {
    allocator: A,
    tracer: T,
}

impl<A, T> GroupedTraceableAllocator<A, T> {
    /// Creates a new `GroupedTraceableAllocator` that wraps the given allocator and tracer.
    #[must_use]
    pub const fn new(allocator: A, tracer: T) -> Self {
        Self { allocator, tracer }
    }
}

unsafe impl<A: GlobalAlloc, T: Tracer> GlobalAlloc for GroupedTraceableAllocator<A, T> {
    #[inline]
    unsafe fn alloc(&self, object_layout: Layout) -> *mut u8 {
        if !TRACK_ALLOCATIONS.load(Ordering::Relaxed) {
            return self.allocator.alloc(object_layout);
        }

        // Allocate our wrapped layout and make sure the allocation succeeded.
        let (actual_layout, offset_to_group_id) = get_wrapped_layout(object_layout);
        let actual_ptr = self.allocator.alloc(actual_layout);
        if actual_ptr.is_null() {
            return actual_ptr;
        }

        let group_id_ptr = actual_ptr.add(offset_to_group_id).cast::<u8>();

        let object_size = object_layout.size();

        try_with_suspended_allocation_group(
            #[inline(always)]
            |group_id| {
                group_id_ptr.write(group_id.as_raw());
                self.tracer.trace_allocation(object_size, group_id);
            },
        );
        actual_ptr
    }

    #[inline]
    unsafe fn dealloc(&self, object_ptr: *mut u8, object_layout: Layout) {
        if !TRACK_ALLOCATIONS.load(Ordering::Relaxed) {
            self.allocator.dealloc(object_ptr, object_layout);
            return;
        }
        // Regenerate the wrapped layout so we know where we have to look, as the pointer we've given relates to the
        // requested layout, not the wrapped layout that was actually allocated.
        let (wrapped_layout, offset_to_group_id) = get_wrapped_layout(object_layout);

        let raw_group_id = object_ptr.add(offset_to_group_id).cast::<u8>().read();

        // Deallocate before tracking, just to make sure we're reclaiming memory as soon as possible.
        self.allocator.dealloc(object_ptr, wrapped_layout);

        let object_size = object_layout.size();
        let source_group_id = AllocationGroupId::from_raw(raw_group_id);

        try_with_suspended_allocation_group(
            #[inline(always)]
            |_| {
                self.tracer.trace_deallocation(object_size, source_group_id);
            },
        );
    }
}

#[inline(always)]
fn get_wrapped_layout(object_layout: Layout) -> (Layout, usize) {
    static HEADER_LAYOUT: Layout = Layout::new::<u8>();

    // We generate a new allocation layout that gives us a location to store the active allocation group ID ahead
    // of the requested allocation, which lets us always attempt to retrieve it on the deallocation path.
    let (actual_layout, offset_to_group_id) = object_layout
        .extend(HEADER_LAYOUT)
        .expect("wrapping requested layout resulted in overflow");

    (actual_layout.pad_to_align(), offset_to_group_id)
}