vector_buffers/topology/channel/
sender.rs1use std::{sync::Arc, time::Instant};
2
3use async_recursion::async_recursion;
4use derivative::Derivative;
5use tokio::sync::Mutex;
6use tracing::Span;
7use vector_common::internal_event::{register, InternalEventHandle, Registered};
8
9use super::limited_queue::LimitedSender;
10use crate::{
11 buffer_usage_data::BufferUsageHandle,
12 internal_events::BufferSendDuration,
13 variants::disk_v2::{self, ProductionFilesystem},
14 Bufferable, WhenFull,
15};
16
17#[derive(Clone, Debug)]
19pub enum SenderAdapter<T: Bufferable> {
20 InMemory(LimitedSender<T>),
22
23 DiskV2(Arc<Mutex<disk_v2::BufferWriter<T, ProductionFilesystem>>>),
25}
26
27impl<T: Bufferable> From<LimitedSender<T>> for SenderAdapter<T> {
28 fn from(v: LimitedSender<T>) -> Self {
29 Self::InMemory(v)
30 }
31}
32
33impl<T: Bufferable> From<disk_v2::BufferWriter<T, ProductionFilesystem>> for SenderAdapter<T> {
34 fn from(v: disk_v2::BufferWriter<T, ProductionFilesystem>) -> Self {
35 Self::DiskV2(Arc::new(Mutex::new(v)))
36 }
37}
38
39impl<T> SenderAdapter<T>
40where
41 T: Bufferable,
42{
43 pub(crate) async fn send(&mut self, item: T) -> crate::Result<()> {
44 match self {
45 Self::InMemory(tx) => tx.send(item).await.map_err(Into::into),
46 Self::DiskV2(writer) => {
47 let mut writer = writer.lock().await;
48
49 writer.write_record(item).await.map(|_| ()).map_err(|e| {
50 error!("Disk buffer writer has encountered an unrecoverable error.");
56
57 e.into()
58 })
59 }
60 }
61 }
62
63 pub(crate) async fn try_send(&mut self, item: T) -> crate::Result<Option<T>> {
64 match self {
65 Self::InMemory(tx) => tx
66 .try_send(item)
67 .map(|()| None)
68 .or_else(|e| Ok(Some(e.into_inner()))),
69 Self::DiskV2(writer) => {
70 let mut writer = writer.lock().await;
71
72 writer.try_write_record(item).await.map_err(|e| {
73 error!("Disk buffer writer has encountered an unrecoverable error.");
79
80 e.into()
81 })
82 }
83 }
84 }
85
86 pub(crate) async fn flush(&mut self) -> crate::Result<()> {
87 match self {
88 Self::InMemory(_) => Ok(()),
89 Self::DiskV2(writer) => {
90 let mut writer = writer.lock().await;
91 writer.flush().await.map_err(|e| {
92 error!("Disk buffer writer has encountered an unrecoverable error.");
94
95 e.into()
96 })
97 }
98 }
99 }
100
101 pub fn capacity(&self) -> Option<usize> {
102 match self {
103 Self::InMemory(tx) => Some(tx.available_capacity()),
104 Self::DiskV2(_) => None,
105 }
106 }
107}
108
109#[derive(Clone, Derivative)]
132#[derivative(Debug)]
133pub struct BufferSender<T: Bufferable> {
134 base: SenderAdapter<T>,
135 overflow: Option<Box<BufferSender<T>>>,
136 when_full: WhenFull,
137 instrumentation: Option<BufferUsageHandle>,
138 #[derivative(Debug = "ignore")]
139 send_duration: Option<Registered<BufferSendDuration>>,
140}
141
142impl<T: Bufferable> BufferSender<T> {
143 pub fn new(base: SenderAdapter<T>, when_full: WhenFull) -> Self {
145 Self {
146 base,
147 overflow: None,
148 when_full,
149 instrumentation: None,
150 send_duration: None,
151 }
152 }
153
154 pub fn with_overflow(base: SenderAdapter<T>, overflow: BufferSender<T>) -> Self {
156 Self {
157 base,
158 overflow: Some(Box::new(overflow)),
159 when_full: WhenFull::Overflow,
160 instrumentation: None,
161 send_duration: None,
162 }
163 }
164
165 #[cfg(test)]
170 pub fn switch_to_overflow(&mut self, overflow: BufferSender<T>) {
171 self.overflow = Some(Box::new(overflow));
172 self.when_full = WhenFull::Overflow;
173 }
174
175 pub fn with_usage_instrumentation(&mut self, handle: BufferUsageHandle) {
177 self.instrumentation = Some(handle);
178 }
179
180 pub fn with_send_duration_instrumentation(&mut self, stage: usize, span: &Span) {
182 let _enter = span.enter();
183 self.send_duration = Some(register(BufferSendDuration { stage }));
184 }
185}
186
187impl<T: Bufferable> BufferSender<T> {
188 #[cfg(test)]
189 pub(crate) fn get_base_ref(&self) -> &SenderAdapter<T> {
190 &self.base
191 }
192
193 #[cfg(test)]
194 pub(crate) fn get_overflow_ref(&self) -> Option<&BufferSender<T>> {
195 self.overflow.as_ref().map(AsRef::as_ref)
196 }
197
198 #[async_recursion]
199 pub async fn send(&mut self, item: T, send_reference: Option<Instant>) -> crate::Result<()> {
200 let item_sizing = self
201 .instrumentation
202 .as_ref()
203 .map(|_| (item.event_count(), item.size_of()));
204
205 let mut sent_to_base = true;
206 let mut was_dropped = false;
207 match self.when_full {
208 WhenFull::Block => self.base.send(item).await?,
209 WhenFull::DropNewest => {
210 if self.base.try_send(item).await?.is_some() {
211 was_dropped = true;
212 }
213 }
214 WhenFull::Overflow => {
215 if let Some(item) = self.base.try_send(item).await? {
216 sent_to_base = false;
217 self.overflow
218 .as_mut()
219 .unwrap_or_else(|| unreachable!("overflow must exist"))
220 .send(item, send_reference)
221 .await?;
222 }
223 }
224 }
225
226 if sent_to_base || was_dropped {
227 if let (Some(send_duration), Some(send_reference)) =
228 (self.send_duration.as_ref(), send_reference)
229 {
230 send_duration.emit(send_reference.elapsed());
231 }
232 }
233
234 if let Some(instrumentation) = self.instrumentation.as_ref() {
235 if let Some((item_count, item_size)) = item_sizing {
236 if sent_to_base {
237 instrumentation.increment_received_event_count_and_byte_size(
238 item_count as u64,
239 item_size as u64,
240 );
241 }
242
243 if was_dropped {
244 instrumentation.increment_dropped_event_count_and_byte_size(
245 item_count as u64,
246 item_size as u64,
247 true,
248 );
249 }
250 }
251 }
252
253 Ok(())
254 }
255
256 #[async_recursion]
257 pub async fn flush(&mut self) -> crate::Result<()> {
258 self.base.flush().await?;
259 if let Some(overflow) = self.overflow.as_mut() {
260 overflow.flush().await?;
261 }
262
263 Ok(())
264 }
265}