vector/sinks/datadog/traces/apm_stats/
flusher.rs

1use std::{
2    io::Write,
3    sync::{Arc, Mutex},
4};
5
6use bytes::Bytes;
7use snafu::ResultExt;
8use tokio::sync::oneshot::{Receiver, Sender};
9use vector_lib::{finalization::EventFinalizers, request_metadata::RequestMetadata};
10
11use super::{
12    BUCKET_DURATION_NANOSECONDS, DDTracesMetadata, DatadogTracesEndpoint,
13    DatadogTracesEndpointConfiguration, RequestBuilderError, StatsPayload, aggregation::Aggregator,
14    build_request,
15};
16use crate::{
17    http::{BuildRequestSnafu, HttpClient},
18    internal_events::DatadogTracesAPMStatsError,
19    sinks::util::{Compression, Compressor},
20};
21
22/// Flushes cached APM stats buckets to Datadog on a 10 second interval.
23/// When the sink signals this thread that it is shutting down, all remaining
24/// buckets are flush before the thread exits.
25///
26/// # arguments
27///
28/// * `tripwire`                 - Receiver that the sink signals when shutting down.
29/// * `client`                   - HttpClient to use in sending the stats payloads.
30/// * `compression`              - Compression to use when creating the HTTP requests.
31/// * `endpoint_configuration`   - Endpoint configuration to use when creating the HTTP requests.
32/// * `aggregator`               - The Aggregator object containing cached stats buckets.
33pub async fn flush_apm_stats_thread(
34    mut tripwire: Receiver<Sender<()>>,
35    client: HttpClient,
36    compression: Compression,
37    endpoint_configuration: DatadogTracesEndpointConfiguration,
38    aggregator: Arc<Mutex<Aggregator>>,
39) {
40    let sender = ApmStatsSender {
41        client,
42        compression,
43        endpoint_configuration,
44        aggregator,
45    };
46
47    // flush on the same interval as the stats buckets
48    let mut interval =
49        tokio::time::interval(std::time::Duration::from_nanos(BUCKET_DURATION_NANOSECONDS));
50
51    debug!("Starting APM stats flushing thread.");
52
53    loop {
54        tokio::select! {
55
56        _ = interval.tick() => {
57            // flush the oldest bucket from the cache to Datadog
58            sender.flush_apm_stats(false).await;
59        },
60        signal = &mut tripwire =>  match signal {
61            // sink has signaled us that the process is shutting down
62            Ok(sink_shutdown_ack_sender) => {
63
64                debug!("APM stats flushing thread received exit condition. Flushing remaining stats before exiting.");
65                sender.flush_apm_stats(true).await;
66
67                // signal the sink (who tripped the tripwire), that we are done flushing
68                _ = sink_shutdown_ack_sender.send(());
69                break;
70            }
71            Err(_) => {
72                error!(message = "Tokio Sender unexpectedly dropped.");
73                break;
74            },
75        }
76        }
77    }
78}
79
80struct ApmStatsSender {
81    client: HttpClient,
82    compression: Compression,
83    endpoint_configuration: DatadogTracesEndpointConfiguration,
84    aggregator: Arc<Mutex<Aggregator>>,
85}
86
87impl ApmStatsSender {
88    async fn flush_apm_stats(&self, force: bool) {
89        // explicit scope to minimize duration that the Aggregator is locked.
90        if let Some((payload, api_key)) = {
91            let mut aggregator = self.aggregator.lock().unwrap();
92            let client_stats_payloads = aggregator.flush(force);
93
94            if client_stats_payloads.is_empty() {
95                // no sense proceeding if no payloads to flush
96                None
97            } else {
98                let payload = StatsPayload {
99                    agent_hostname: aggregator.get_agent_hostname(),
100                    agent_env: aggregator.get_agent_env(),
101                    stats: client_stats_payloads,
102                    agent_version: aggregator.get_agent_version(),
103                    client_computed: false,
104                };
105
106                Some((payload, aggregator.get_api_key()))
107            }
108        } && let Err(error) = self.compress_and_send(payload, api_key).await
109        {
110            emit!(DatadogTracesAPMStatsError { error });
111        }
112    }
113
114    async fn compress_and_send(
115        &self,
116        payload: StatsPayload,
117        api_key: Arc<str>,
118    ) -> Result<(), Box<dyn std::error::Error>> {
119        let (metadata, compressed_payload) = self.build_apm_stats_request_data(api_key, payload)?;
120
121        let request_metadata = RequestMetadata::default();
122        let trace_api_request = build_request(
123            (metadata, request_metadata),
124            compressed_payload,
125            self.compression,
126            &self.endpoint_configuration,
127        );
128
129        let http_request = trace_api_request
130            .into_http_request()
131            .context(BuildRequestSnafu)?;
132
133        self.client.send(http_request).await?;
134
135        Ok(())
136    }
137
138    fn build_apm_stats_request_data(
139        &self,
140        api_key: Arc<str>,
141        payload: StatsPayload,
142    ) -> Result<(DDTracesMetadata, Bytes), RequestBuilderError> {
143        let encoded_payload =
144            rmp_serde::to_vec_named(&payload).map_err(|e| RequestBuilderError::FailedToBuild {
145                message: "Encoding failed.",
146                reason: e.to_string(),
147                dropped_events: 0,
148            })?;
149        let uncompressed_size = encoded_payload.len();
150        let metadata = DDTracesMetadata {
151            api_key,
152            endpoint: DatadogTracesEndpoint::APMStats,
153            finalizers: EventFinalizers::default(),
154            uncompressed_size,
155            content_type: "application/msgpack".to_string(),
156        };
157
158        let mut compressor = Compressor::from(self.compression);
159        match compressor.write_all(&encoded_payload) {
160            Ok(()) => {
161                let bytes = compressor.into_inner().freeze();
162
163                Ok((metadata, bytes))
164            }
165            Err(e) => Err(RequestBuilderError::FailedToBuild {
166                message: "Compression failed.",
167                reason: e.to_string(),
168                dropped_events: 0,
169            }),
170        }
171    }
172}