vector_buffers/topology/channel/
sender.rs1use std::{sync::Arc, time::Instant};
2
3use async_recursion::async_recursion;
4use derivative::Derivative;
5use tokio::sync::Mutex;
6use tracing::Span;
7use vector_common::internal_event::{InternalEventHandle, Registered, register};
8
9use super::limited_queue::LimitedSender;
10use crate::{
11 Bufferable, WhenFull,
12 buffer_usage_data::BufferUsageHandle,
13 internal_events::BufferSendDuration,
14 variants::disk_v2::{self, ProductionFilesystem},
15};
16
17#[derive(Clone, Debug)]
19pub enum SenderAdapter<T: Bufferable> {
20 InMemory(LimitedSender<T>),
22
23 DiskV2(Arc<Mutex<disk_v2::BufferWriter<T, ProductionFilesystem>>>),
25}
26
27impl<T: Bufferable> From<LimitedSender<T>> for SenderAdapter<T> {
28 fn from(v: LimitedSender<T>) -> Self {
29 Self::InMemory(v)
30 }
31}
32
33impl<T: Bufferable> From<disk_v2::BufferWriter<T, ProductionFilesystem>> for SenderAdapter<T> {
34 fn from(v: disk_v2::BufferWriter<T, ProductionFilesystem>) -> Self {
35 Self::DiskV2(Arc::new(Mutex::new(v)))
36 }
37}
38
39impl<T> SenderAdapter<T>
40where
41 T: Bufferable,
42{
43 pub(crate) async fn send(&mut self, item: T) -> crate::Result<()> {
44 match self {
45 Self::InMemory(tx) => tx.send(item).await.map_err(Into::into),
46 Self::DiskV2(writer) => {
47 let mut writer = writer.lock().await;
48
49 writer.write_record(item).await.map(|_| ()).map_err(|e| {
50 error!("Disk buffer writer has encountered an unrecoverable error.");
56
57 e.into()
58 })
59 }
60 }
61 }
62
63 pub(crate) async fn try_send(&mut self, item: T) -> crate::Result<Option<T>> {
64 match self {
65 Self::InMemory(tx) => tx
66 .try_send(item)
67 .map(|()| None)
68 .or_else(|e| Ok(Some(e.into_inner()))),
69 Self::DiskV2(writer) => {
70 let mut writer = writer.lock().await;
71
72 writer.try_write_record(item).await.map_err(|e| {
73 error!("Disk buffer writer has encountered an unrecoverable error.");
79
80 e.into()
81 })
82 }
83 }
84 }
85
86 pub(crate) async fn flush(&mut self) -> crate::Result<()> {
87 match self {
88 Self::InMemory(_) => Ok(()),
89 Self::DiskV2(writer) => {
90 let mut writer = writer.lock().await;
91 writer.flush().await.map_err(|e| {
92 error!("Disk buffer writer has encountered an unrecoverable error.");
94
95 e.into()
96 })
97 }
98 }
99 }
100
101 pub fn capacity(&self) -> Option<usize> {
102 match self {
103 Self::InMemory(tx) => Some(tx.available_capacity()),
104 Self::DiskV2(_) => None,
105 }
106 }
107}
108
109#[derive(Clone, Derivative)]
132#[derivative(Debug)]
133pub struct BufferSender<T: Bufferable> {
134 base: SenderAdapter<T>,
135 overflow: Option<Box<BufferSender<T>>>,
136 when_full: WhenFull,
137 instrumentation: Option<BufferUsageHandle>,
138 #[derivative(Debug = "ignore")]
139 send_duration: Option<Registered<BufferSendDuration>>,
140}
141
142impl<T: Bufferable> BufferSender<T> {
143 pub fn new(base: SenderAdapter<T>, when_full: WhenFull) -> Self {
145 Self {
146 base,
147 overflow: None,
148 when_full,
149 instrumentation: None,
150 send_duration: None,
151 }
152 }
153
154 pub fn with_overflow(base: SenderAdapter<T>, overflow: BufferSender<T>) -> Self {
156 Self {
157 base,
158 overflow: Some(Box::new(overflow)),
159 when_full: WhenFull::Overflow,
160 instrumentation: None,
161 send_duration: None,
162 }
163 }
164
165 #[cfg(test)]
170 pub fn switch_to_overflow(&mut self, overflow: BufferSender<T>) {
171 self.overflow = Some(Box::new(overflow));
172 self.when_full = WhenFull::Overflow;
173 }
174
175 pub fn with_usage_instrumentation(&mut self, handle: BufferUsageHandle) {
177 self.instrumentation = Some(handle);
178 }
179
180 pub fn with_send_duration_instrumentation(&mut self, stage: usize, span: &Span) {
182 let _enter = span.enter();
183 self.send_duration = Some(register(BufferSendDuration { stage }));
184 }
185}
186
187impl<T: Bufferable> BufferSender<T> {
188 #[cfg(test)]
189 pub(crate) fn get_base_ref(&self) -> &SenderAdapter<T> {
190 &self.base
191 }
192
193 #[cfg(test)]
194 pub(crate) fn get_overflow_ref(&self) -> Option<&BufferSender<T>> {
195 self.overflow.as_ref().map(AsRef::as_ref)
196 }
197
198 #[async_recursion]
199 pub async fn send(&mut self, item: T, send_reference: Option<Instant>) -> crate::Result<()> {
200 let item_sizing = self
201 .instrumentation
202 .as_ref()
203 .map(|_| (item.event_count(), item.size_of()));
204
205 let mut was_dropped = false;
206
207 if let Some(instrumentation) = self.instrumentation.as_ref()
208 && let Some((item_count, item_size)) = item_sizing
209 {
210 instrumentation
211 .increment_received_event_count_and_byte_size(item_count as u64, item_size as u64);
212 }
213 match self.when_full {
214 WhenFull::Block => self.base.send(item).await?,
215 WhenFull::DropNewest => {
216 if self.base.try_send(item).await?.is_some() {
217 was_dropped = true;
218 }
219 }
220 WhenFull::Overflow => {
221 if let Some(item) = self.base.try_send(item).await? {
222 was_dropped = true;
223 self.overflow
224 .as_mut()
225 .unwrap_or_else(|| unreachable!("overflow must exist"))
226 .send(item, send_reference)
227 .await?;
228 }
229 }
230 }
231
232 if let Some(instrumentation) = self.instrumentation.as_ref()
233 && let Some((item_count, item_size)) = item_sizing
234 && was_dropped
235 {
236 instrumentation.increment_dropped_event_count_and_byte_size(
237 item_count as u64,
238 item_size as u64,
239 true,
240 );
241 }
242 if let Some(send_duration) = self.send_duration.as_ref()
243 && let Some(send_reference) = send_reference
244 {
245 send_duration.emit(send_reference.elapsed());
246 }
247
248 Ok(())
249 }
250
251 #[async_recursion]
252 pub async fn flush(&mut self) -> crate::Result<()> {
253 self.base.flush().await?;
254 if let Some(overflow) = self.overflow.as_mut() {
255 overflow.flush().await?;
256 }
257
258 Ok(())
259 }
260}