vector/internal_telemetry/allocations/allocator/token.rs
1use std::{
2 cell::RefCell,
3 num::NonZeroU8,
4 sync::atomic::{AtomicU8, Ordering},
5};
6
7use tracing::Span;
8
9use super::{stack::GroupStack, tracing::WithAllocationGroup};
10
11thread_local! {
12 /// The currently executing allocation token.
13 ///
14 /// Any allocations which occur on this thread will be associated with whichever token is
15 /// present at the time of the allocation.
16 pub(crate) static LOCAL_ALLOCATION_GROUP_STACK: RefCell<GroupStack<256>> =
17 const { RefCell::new(GroupStack::new()) };
18}
19
20/// The identifier that uniquely identifiers an allocation group.
21#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
22pub struct AllocationGroupId(NonZeroU8);
23
24impl AllocationGroupId {
25 /// The group ID used for allocations which are not made within a registered allocation group.
26 // Group IDs start at 1. The value 0 is reserved for handling runtime allocation edge cases.
27 pub const ROOT: Self = AllocationGroupId::from_raw(1);
28
29 pub(super) const fn from_raw(raw_group_id: u8) -> Self {
30 unsafe { Self(NonZeroU8::new_unchecked(raw_group_id)) }
31 }
32
33 /// Gets the integer representation of this group ID.
34 #[must_use]
35 pub const fn as_raw(self) -> u8 {
36 self.0.get()
37 }
38
39 /// Registers an allocation group ID.
40 ///
41 /// This group ID uniquely identifies a given allocation group, and is the means by which to
42 /// distinguish allocator events between various allocation groups.
43 ///
44 /// Group IDs must be attached to a [`Span`][tracing::Span] in order to become active,
45 /// associating allocations and deallocations within an active span as being attached to the
46 /// given allocation group.
47 pub fn register() -> Option<AllocationGroupId> {
48 static GROUP_ID: AtomicU8 = AtomicU8::new(AllocationGroupId::ROOT.0.get() + 1);
49
50 let group_id = GROUP_ID.fetch_add(1, Ordering::Relaxed);
51
52 if group_id != u8::MAX {
53 Some(AllocationGroupId::from_raw(group_id))
54 } else {
55 None
56 }
57 }
58
59 /// Attaches this allocation group to a [`Span`][tracing::Span].
60 ///
61 /// When the span is entered or exited, the allocation group will also transition from inactive to active, and vise
62 /// versa. In effect, all allocations that occur while the span is entered will be associated with the allocation
63 /// group.
64 pub fn attach_to_span(self, span: &Span) {
65 tracing::dispatcher::get_default(move |dispatch| {
66 if let Some(id) = span.id()
67 && let Some(ctx) = dispatch.downcast_ref::<WithAllocationGroup>()
68 {
69 (ctx.with_allocation_group)(dispatch, &id, AllocationGroupToken::from(self));
70 }
71 });
72 }
73}
74
75/// A token that allows controlling when an allocation group is active or inactive.
76pub struct AllocationGroupToken {
77 id: AllocationGroupId,
78}
79
80impl AllocationGroupToken {
81 pub fn enter(&self) {
82 _ = LOCAL_ALLOCATION_GROUP_STACK.try_with(|stack| stack.borrow_mut().push(self.id));
83 }
84
85 pub fn exit(&self) {
86 _ = LOCAL_ALLOCATION_GROUP_STACK.try_with(|stack| stack.borrow_mut().pop());
87 }
88}
89
90impl From<AllocationGroupId> for AllocationGroupToken {
91 fn from(group_id: AllocationGroupId) -> Self {
92 Self { id: group_id }
93 }
94}
95
96/// Calls `f` after suspending the active allocation group, if it was not already suspended.
97///
98/// If the active allocation group is not currently suspended, then `f` is called, after suspending it, with a reference
99/// to the suspended allocation group. If any other call to `try_with_suspended_allocation_group` happens while this
100/// method call is on the stack, `f` in those calls with itself not be called.
101#[inline(always)]
102pub(super) fn try_with_suspended_allocation_group<F>(f: F)
103where
104 F: FnOnce(AllocationGroupId),
105{
106 let _result = LOCAL_ALLOCATION_GROUP_STACK.try_with(
107 #[inline(always)]
108 |group_stack| {
109 // The crux of avoiding reentrancy is `RefCell:try_borrow_mut`, which allows callers to skip trying to run
110 // `f` if they cannot mutably borrow the current allocation group. As `try_borrow_mut` will only let one
111 // mutable borrow happen at a time, the tracker logic is never reentrant.
112 if let Ok(stack) = group_stack.try_borrow_mut() {
113 f(stack.current());
114 }
115 },
116 );
117}
118
119/// Calls `f` after suspending the active allocation group.
120///
121/// In contrast to `try_with_suspended_allocation_group`, this method will always call `f` after attempting to suspend
122/// the active allocation group, even if it was already suspended.
123///
124/// In practice, this method is primarily useful for "run this function and don't trace any (de)allocations at all" while
125/// `try_with_suspended_allocation_group` is primarily useful for "run this function if nobody else is tracing
126/// an (de)allocation right now".
127#[inline(always)]
128pub(super) fn with_suspended_allocation_group<F>(f: F)
129where
130 F: FnOnce(),
131{
132 let _result = LOCAL_ALLOCATION_GROUP_STACK.try_with(
133 #[inline(always)]
134 |group_stack| {
135 // The crux of avoiding reentrancy is `RefCell:try_borrow_mut`, as `try_borrow_mut` will only let one
136 // mutable borrow happen at a time. As we simply want to ensure that the allocation group is suspended, we
137 // don't care what the return value is: calling `try_borrow_mut` and holding on to the result until the end
138 // of the scope is sufficient to either suspend the allocation group or know that it's already suspended and
139 // will stay that way until we're done in this method.
140 let _result = group_stack.try_borrow_mut();
141 f();
142 },
143 );
144}