vector/internal_telemetry/allocations/allocator/token.rs
1use std::{
2 cell::RefCell,
3 num::NonZeroU8,
4 sync::atomic::{AtomicU8, Ordering},
5};
6
7use tracing::Span;
8
9use super::stack::GroupStack;
10use super::tracing::WithAllocationGroup;
11
12thread_local! {
13 /// The currently executing allocation token.
14 ///
15 /// Any allocations which occur on this thread will be associated with whichever token is
16 /// present at the time of the allocation.
17 pub(crate) static LOCAL_ALLOCATION_GROUP_STACK: RefCell<GroupStack<256>> =
18 const { RefCell::new(GroupStack::new()) };
19}
20
21/// The identifier that uniquely identifiers an allocation group.
22#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
23pub struct AllocationGroupId(NonZeroU8);
24
25impl AllocationGroupId {
26 /// The group ID used for allocations which are not made within a registered allocation group.
27 // Group IDs start at 1. The value 0 is reserved for handling runtime allocation edge cases.
28 pub const ROOT: Self = AllocationGroupId::from_raw(1);
29
30 pub(super) const fn from_raw(raw_group_id: u8) -> Self {
31 unsafe { Self(NonZeroU8::new_unchecked(raw_group_id)) }
32 }
33
34 /// Gets the integer representation of this group ID.
35 #[must_use]
36 pub const fn as_raw(self) -> u8 {
37 self.0.get()
38 }
39
40 /// Registers an allocation group ID.
41 ///
42 /// This group ID uniquely identifies a given allocation group, and is the means by which to
43 /// distinguish allocator events between various allocation groups.
44 ///
45 /// Group IDs must be attached to a [`Span`][tracing::Span] in order to become active,
46 /// associating allocations and deallocations within an active span as being attached to the
47 /// given allocation group.
48 pub fn register() -> Option<AllocationGroupId> {
49 static GROUP_ID: AtomicU8 = AtomicU8::new(AllocationGroupId::ROOT.0.get() + 1);
50
51 let group_id = GROUP_ID.fetch_add(1, Ordering::Relaxed);
52
53 if group_id != u8::MAX {
54 Some(AllocationGroupId::from_raw(group_id))
55 } else {
56 None
57 }
58 }
59
60 /// Attaches this allocation group to a [`Span`][tracing::Span].
61 ///
62 /// When the span is entered or exited, the allocation group will also transition from inactive to active, and vise
63 /// versa. In effect, all allocations that occur while the span is entered will be associated with the allocation
64 /// group.
65 pub fn attach_to_span(self, span: &Span) {
66 tracing::dispatcher::get_default(move |dispatch| {
67 if let Some(id) = span.id() {
68 if let Some(ctx) = dispatch.downcast_ref::<WithAllocationGroup>() {
69 (ctx.with_allocation_group)(dispatch, &id, AllocationGroupToken::from(self));
70 }
71 }
72 });
73 }
74}
75
76/// A token that allows controlling when an allocation group is active or inactive.
77pub struct AllocationGroupToken {
78 id: AllocationGroupId,
79}
80
81impl AllocationGroupToken {
82 pub fn enter(&self) {
83 _ = LOCAL_ALLOCATION_GROUP_STACK.try_with(|stack| stack.borrow_mut().push(self.id));
84 }
85
86 pub fn exit(&self) {
87 _ = LOCAL_ALLOCATION_GROUP_STACK.try_with(|stack| stack.borrow_mut().pop());
88 }
89}
90
91impl From<AllocationGroupId> for AllocationGroupToken {
92 fn from(group_id: AllocationGroupId) -> Self {
93 Self { id: group_id }
94 }
95}
96
97/// Calls `f` after suspending the active allocation group, if it was not already suspended.
98///
99/// If the active allocation group is not currently suspended, then `f` is called, after suspending it, with a reference
100/// to the suspended allocation group. If any other call to `try_with_suspended_allocation_group` happens while this
101/// method call is on the stack, `f` in those calls with itself not be called.
102#[inline(always)]
103pub(super) fn try_with_suspended_allocation_group<F>(f: F)
104where
105 F: FnOnce(AllocationGroupId),
106{
107 let _result = LOCAL_ALLOCATION_GROUP_STACK.try_with(
108 #[inline(always)]
109 |group_stack| {
110 // The crux of avoiding reentrancy is `RefCell:try_borrow_mut`, which allows callers to skip trying to run
111 // `f` if they cannot mutably borrow the current allocation group. As `try_borrow_mut` will only let one
112 // mutable borrow happen at a time, the tracker logic is never reentrant.
113 if let Ok(stack) = group_stack.try_borrow_mut() {
114 f(stack.current());
115 }
116 },
117 );
118}
119
120/// Calls `f` after suspending the active allocation group.
121///
122/// In contrast to `try_with_suspended_allocation_group`, this method will always call `f` after attempting to suspend
123/// the active allocation group, even if it was already suspended.
124///
125/// In practice, this method is primarily useful for "run this function and don't trace any (de)allocations at all" while
126/// `try_with_suspended_allocation_group` is primarily useful for "run this function if nobody else is tracing
127/// an (de)allocation right now".
128#[inline(always)]
129pub(super) fn with_suspended_allocation_group<F>(f: F)
130where
131 F: FnOnce(),
132{
133 let _result = LOCAL_ALLOCATION_GROUP_STACK.try_with(
134 #[inline(always)]
135 |group_stack| {
136 // The crux of avoiding reentrancy is `RefCell:try_borrow_mut`, as `try_borrow_mut` will only let one
137 // mutable borrow happen at a time. As we simply want to ensure that the allocation group is suspended, we
138 // don't care what the return value is: calling `try_borrow_mut` and holding on to the result until the end
139 // of the scope is sufficient to either suspend the allocation group or know that it's already suspended and
140 // will stay that way until we're done in this method.
141 let _result = group_stack.try_borrow_mut();
142 f();
143 },
144 );
145}