1use std::{collections::HashMap, future::ready, task::Poll};
2
3use bytes::{Bytes, BytesMut};
4use futures::{future::BoxFuture, stream, SinkExt};
5use tower::Service;
6use vector_lib::configurable::configurable_component;
7use vector_lib::{
8 event::metric::{MetricSketch, MetricTags, Quantile},
9 ByteSizeOf, EstimatedJsonEncodedSizeOf,
10};
11
12use crate::{
13 config::{AcknowledgementsConfig, Input, SinkConfig, SinkContext},
14 event::{
15 metric::{Metric, MetricValue, Sample, StatisticKind},
16 Event, KeyString,
17 },
18 http::HttpClient,
19 internal_events::InfluxdbEncodingError,
20 sinks::{
21 influxdb::{
22 encode_timestamp, healthcheck, influx_line_protocol, influxdb_settings, Field,
23 InfluxDb1Settings, InfluxDb2Settings, ProtocolVersion,
24 },
25 util::{
26 buffer::metrics::{MetricNormalize, MetricNormalizer, MetricSet, MetricsBuffer},
27 encode_namespace,
28 http::{HttpBatchService, HttpRetryLogic},
29 statistic::{validate_quantiles, DistributionStatistic},
30 BatchConfig, EncodedEvent, SinkBatchSettings, TowerRequestConfig,
31 },
32 Healthcheck, VectorSink,
33 },
34 tls::{TlsConfig, TlsSettings},
35};
36
37#[derive(Clone)]
38struct InfluxDbSvc {
39 config: InfluxDbConfig,
40 protocol_version: ProtocolVersion,
41 inner: HttpBatchService<BoxFuture<'static, crate::Result<hyper::Request<Bytes>>>>,
42}
43
44#[derive(Clone, Copy, Debug, Default)]
45pub struct InfluxDbDefaultBatchSettings;
46
47impl SinkBatchSettings for InfluxDbDefaultBatchSettings {
48 const MAX_EVENTS: Option<usize> = Some(20);
49 const MAX_BYTES: Option<usize> = None;
50 const TIMEOUT_SECS: f64 = 1.0;
51}
52
53#[configurable_component(sink("influxdb_metrics", "Deliver metric event data to InfluxDB."))]
55#[derive(Clone, Debug, Default)]
56#[serde(deny_unknown_fields)]
57pub struct InfluxDbConfig {
58 #[serde(alias = "namespace")]
63 #[configurable(metadata(docs::examples = "service"))]
64 pub default_namespace: Option<String>,
65
66 #[configurable(metadata(docs::examples = "http://localhost:8086/"))]
70 pub endpoint: String,
71
72 #[serde(flatten)]
73 pub influxdb1_settings: Option<InfluxDb1Settings>,
74
75 #[serde(flatten)]
76 pub influxdb2_settings: Option<InfluxDb2Settings>,
77
78 #[configurable(derived)]
79 #[serde(default)]
80 pub batch: BatchConfig<InfluxDbDefaultBatchSettings>,
81
82 #[configurable(derived)]
83 #[serde(default)]
84 pub request: TowerRequestConfig,
85
86 #[configurable(metadata(docs::additional_props_description = "A tag key/value pair."))]
88 #[configurable(metadata(docs::examples = "example_tags()"))]
89 pub tags: Option<HashMap<String, String>>,
90
91 #[configurable(derived)]
92 pub tls: Option<TlsConfig>,
93
94 #[serde(default = "default_summary_quantiles")]
96 pub quantiles: Vec<f64>,
97
98 #[configurable(derived)]
99 #[serde(
100 default,
101 deserialize_with = "crate::serde::bool_or_struct",
102 skip_serializing_if = "crate::serde::is_default"
103 )]
104 acknowledgements: AcknowledgementsConfig,
105}
106
107pub fn default_summary_quantiles() -> Vec<f64> {
108 vec![0.5, 0.75, 0.9, 0.95, 0.99]
109}
110
111pub fn example_tags() -> HashMap<String, String> {
112 HashMap::from([("region".to_string(), "us-west-1".to_string())])
113}
114
115impl_generate_config_from_default!(InfluxDbConfig);
116
117#[async_trait::async_trait]
118#[typetag::serde(name = "influxdb_metrics")]
119impl SinkConfig for InfluxDbConfig {
120 async fn build(&self, cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> {
121 let tls_settings = TlsSettings::from_options(self.tls.as_ref())?;
122 let client = HttpClient::new(tls_settings, cx.proxy())?;
123 let healthcheck = healthcheck(
124 self.clone().endpoint,
125 self.clone().influxdb1_settings,
126 self.clone().influxdb2_settings,
127 client.clone(),
128 )?;
129 validate_quantiles(&self.quantiles)?;
130 let sink = InfluxDbSvc::new(self.clone(), client)?;
131 Ok((sink, healthcheck))
132 }
133
134 fn input(&self) -> Input {
135 Input::metric()
136 }
137
138 fn acknowledgements(&self) -> &AcknowledgementsConfig {
139 &self.acknowledgements
140 }
141}
142
143impl InfluxDbSvc {
144 pub fn new(config: InfluxDbConfig, client: HttpClient) -> crate::Result<VectorSink> {
145 let settings = influxdb_settings(
146 config.influxdb1_settings.clone(),
147 config.influxdb2_settings.clone(),
148 )?;
149
150 let endpoint = config.endpoint.clone();
151 let token = settings.token();
152 let protocol_version = settings.protocol_version();
153
154 let batch = config.batch.into_batch_settings()?;
155 let request = config.request.into_settings();
156
157 let uri = settings.write_uri(endpoint)?;
158
159 let http_service = HttpBatchService::new(client, create_build_request(uri, token.inner()));
160
161 let influxdb_http_service = InfluxDbSvc {
162 config,
163 protocol_version,
164 inner: http_service,
165 };
166 let mut normalizer = MetricNormalizer::<InfluxMetricNormalize>::default();
167
168 let sink = request
169 .batch_sink(
170 HttpRetryLogic::default(),
171 influxdb_http_service,
172 MetricsBuffer::new(batch.size),
173 batch.timeout,
174 )
175 .with_flat_map(move |event: Event| {
176 stream::iter({
177 let byte_size = event.size_of();
178 let json_size = event.estimated_json_encoded_size_of();
179
180 normalizer
181 .normalize(event.into_metric())
182 .map(|metric| Ok(EncodedEvent::new(metric, byte_size, json_size)))
183 })
184 })
185 .sink_map_err(|error| error!(message = "Fatal influxdb sink error.", %error));
186
187 #[allow(deprecated)]
188 Ok(VectorSink::from_event_sink(sink))
189 }
190}
191
192impl Service<Vec<Metric>> for InfluxDbSvc {
193 type Response = http::Response<Bytes>;
194 type Error = crate::Error;
195 type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>;
196
197 fn poll_ready(&mut self, cx: &mut std::task::Context) -> Poll<Result<(), Self::Error>> {
199 self.inner.poll_ready(cx)
200 }
201
202 fn call(&mut self, items: Vec<Metric>) -> Self::Future {
204 let input = encode_events(
205 self.protocol_version,
206 items,
207 self.config.default_namespace.as_deref(),
208 self.config.tags.as_ref(),
209 &self.config.quantiles,
210 );
211 let body = input.freeze();
212
213 self.inner.call(body)
214 }
215}
216
217fn create_build_request(
218 uri: http::Uri,
219 token: &str,
220) -> impl Fn(Bytes) -> BoxFuture<'static, crate::Result<hyper::Request<Bytes>>>
221 + Sync
222 + Send
223 + 'static
224 + use<> {
225 let auth = format!("Token {token}");
226 move |body| {
227 Box::pin(ready(
228 hyper::Request::post(uri.clone())
229 .header("Content-Type", "text/plain")
230 .header("Authorization", auth.clone())
231 .body(body)
232 .map_err(Into::into),
233 ))
234 }
235}
236
237fn merge_tags(event: &Metric, tags: Option<&HashMap<String, String>>) -> Option<MetricTags> {
238 match (event.tags().cloned(), tags) {
239 (Some(mut event_tags), Some(config_tags)) => {
240 event_tags.extend(config_tags.iter().map(|(k, v)| (k.clone(), v.clone())));
241 Some(event_tags)
242 }
243 (Some(event_tags), None) => Some(event_tags),
244 (None, Some(config_tags)) => Some(
245 config_tags
246 .iter()
247 .map(|(k, v)| (k.clone(), v.clone()))
248 .collect(),
249 ),
250 (None, None) => None,
251 }
252}
253
254#[derive(Default)]
255pub struct InfluxMetricNormalize;
256
257impl MetricNormalize for InfluxMetricNormalize {
258 fn normalize(&mut self, state: &mut MetricSet, metric: Metric) -> Option<Metric> {
259 match (metric.kind(), &metric.value()) {
260 (_, MetricValue::Counter { .. }) => state.make_incremental(metric),
263 (_, MetricValue::Gauge { .. }) => state.make_absolute(metric),
265 _ => Some(metric),
267 }
268 }
269}
270
271fn encode_events(
272 protocol_version: ProtocolVersion,
273 events: Vec<Metric>,
274 default_namespace: Option<&str>,
275 tags: Option<&HashMap<String, String>>,
276 quantiles: &[f64],
277) -> BytesMut {
278 let mut output = BytesMut::new();
279 let count = events.len();
280
281 for event in events.into_iter() {
282 let fullname = encode_namespace(event.namespace().or(default_namespace), '.', event.name());
283 let ts = encode_timestamp(event.timestamp());
284 let tags = merge_tags(&event, tags);
285 let (metric_type, fields) = get_type_and_fields(event.value(), quantiles);
286
287 let mut unwrapped_tags = tags.unwrap_or_default();
288 unwrapped_tags.replace("metric_type".to_owned(), metric_type.to_owned());
289
290 if let Err(error_message) = influx_line_protocol(
291 protocol_version,
292 &fullname,
293 Some(unwrapped_tags),
294 fields,
295 ts,
296 &mut output,
297 ) {
298 emit!(InfluxdbEncodingError {
299 error_message,
300 count,
301 });
302 };
303 }
304
305 if !output.is_empty() {
307 output.truncate(output.len() - 1);
308 }
309 output
310}
311
312fn get_type_and_fields(
313 value: &MetricValue,
314 quantiles: &[f64],
315) -> (&'static str, Option<HashMap<KeyString, Field>>) {
316 match value {
317 MetricValue::Counter { value } => ("counter", Some(to_fields(*value))),
318 MetricValue::Gauge { value } => ("gauge", Some(to_fields(*value))),
319 MetricValue::Set { values } => ("set", Some(to_fields(values.len() as f64))),
320 MetricValue::AggregatedHistogram {
321 buckets,
322 count,
323 sum,
324 } => {
325 let mut fields: HashMap<KeyString, Field> = buckets
326 .iter()
327 .map(|sample| {
328 (
329 format!("bucket_{}", sample.upper_limit).into(),
330 Field::UnsignedInt(sample.count),
331 )
332 })
333 .collect();
334 fields.insert("count".into(), Field::UnsignedInt(*count));
335 fields.insert("sum".into(), Field::Float(*sum));
336
337 ("histogram", Some(fields))
338 }
339 MetricValue::AggregatedSummary {
340 quantiles,
341 count,
342 sum,
343 } => {
344 let mut fields: HashMap<KeyString, Field> = quantiles
345 .iter()
346 .map(|quantile| {
347 (
348 format!("quantile_{}", quantile.quantile).into(),
349 Field::Float(quantile.value),
350 )
351 })
352 .collect();
353 fields.insert("count".into(), Field::UnsignedInt(*count));
354 fields.insert("sum".into(), Field::Float(*sum));
355
356 ("summary", Some(fields))
357 }
358 MetricValue::Distribution { samples, statistic } => {
359 let quantiles = match statistic {
360 StatisticKind::Histogram => &[0.95] as &[_],
361 StatisticKind::Summary => quantiles,
362 };
363 let fields = encode_distribution(samples, quantiles);
364 ("distribution", fields)
365 }
366 MetricValue::Sketch { sketch } => match sketch {
367 MetricSketch::AgentDDSketch(ddsketch) => {
368 let mut fields = [0.5, 0.75, 0.9, 0.99]
371 .iter()
372 .map(|q| {
373 let quantile = Quantile {
374 quantile: *q,
375 value: ddsketch.quantile(*q).unwrap_or(0.0),
376 };
377 (
378 quantile.to_percentile_string().into(),
379 Field::Float(quantile.value),
380 )
381 })
382 .collect::<HashMap<KeyString, _>>();
383 fields.insert(
384 "count".into(),
385 Field::UnsignedInt(u64::from(ddsketch.count())),
386 );
387 fields.insert(
388 "min".into(),
389 Field::Float(ddsketch.min().unwrap_or(f64::MAX)),
390 );
391 fields.insert(
392 "max".into(),
393 Field::Float(ddsketch.max().unwrap_or(f64::MIN)),
394 );
395 fields.insert("sum".into(), Field::Float(ddsketch.sum().unwrap_or(0.0)));
396 fields.insert("avg".into(), Field::Float(ddsketch.avg().unwrap_or(0.0)));
397
398 ("sketch", Some(fields))
399 }
400 },
401 }
402}
403
404fn encode_distribution(samples: &[Sample], quantiles: &[f64]) -> Option<HashMap<KeyString, Field>> {
405 let statistic = DistributionStatistic::from_samples(samples, quantiles)?;
406
407 Some(
408 [
409 ("min".into(), Field::Float(statistic.min)),
410 ("max".into(), Field::Float(statistic.max)),
411 ("median".into(), Field::Float(statistic.median)),
412 ("avg".into(), Field::Float(statistic.avg)),
413 ("sum".into(), Field::Float(statistic.sum)),
414 ("count".into(), Field::Float(statistic.count as f64)),
415 ]
416 .into_iter()
417 .chain(
418 statistic
419 .quantiles
420 .iter()
421 .map(|&(p, val)| (format!("quantile_{p:.2}").into(), Field::Float(val))),
422 )
423 .collect(),
424 )
425}
426
427fn to_fields(value: f64) -> HashMap<KeyString, Field> {
428 [("value".into(), Field::Float(value))]
429 .into_iter()
430 .collect()
431}
432
433#[cfg(test)]
434mod tests {
435 use indoc::indoc;
436 use similar_asserts::assert_eq;
437
438 use super::*;
439 use crate::{
440 event::metric::{Metric, MetricKind, MetricValue, StatisticKind},
441 sinks::influxdb::test_util::{assert_fields, split_line_protocol, tags, ts},
442 };
443
444 #[test]
445 fn generate_config() {
446 crate::test_util::test_generate_config::<InfluxDbConfig>();
447 }
448
449 #[test]
450 fn test_config_with_tags() {
451 let config = indoc! {r#"
452 namespace = "vector"
453 endpoint = "http://localhost:9999"
454 tags = {region="us-west-1"}
455 "#};
456
457 toml::from_str::<InfluxDbConfig>(config).unwrap();
458 }
459
460 #[test]
461 fn test_encode_counter() {
462 let events = vec![
463 Metric::new(
464 "total",
465 MetricKind::Incremental,
466 MetricValue::Counter { value: 1.5 },
467 )
468 .with_namespace(Some("ns"))
469 .with_timestamp(Some(ts())),
470 Metric::new(
471 "check",
472 MetricKind::Incremental,
473 MetricValue::Counter { value: 1.0 },
474 )
475 .with_namespace(Some("ns"))
476 .with_tags(Some(tags()))
477 .with_timestamp(Some(ts())),
478 ];
479
480 let line_protocols = encode_events(ProtocolVersion::V2, events, Some("vector"), None, &[]);
481 assert_eq!(
482 line_protocols,
483 "ns.total,metric_type=counter value=1.5 1542182950000000011\n\
484 ns.check,metric_type=counter,normal_tag=value,true_tag=true value=1 1542182950000000011"
485 );
486 }
487
488 #[test]
489 fn test_encode_gauge() {
490 let events = vec![Metric::new(
491 "meter",
492 MetricKind::Incremental,
493 MetricValue::Gauge { value: -1.5 },
494 )
495 .with_namespace(Some("ns"))
496 .with_tags(Some(tags()))
497 .with_timestamp(Some(ts()))];
498
499 let line_protocols = encode_events(ProtocolVersion::V2, events, None, None, &[]);
500 assert_eq!(
501 line_protocols,
502 "ns.meter,metric_type=gauge,normal_tag=value,true_tag=true value=-1.5 1542182950000000011"
503 );
504 }
505
506 #[test]
507 fn test_encode_set() {
508 let events = vec![Metric::new(
509 "users",
510 MetricKind::Incremental,
511 MetricValue::Set {
512 values: vec!["alice".into(), "bob".into()].into_iter().collect(),
513 },
514 )
515 .with_namespace(Some("ns"))
516 .with_tags(Some(tags()))
517 .with_timestamp(Some(ts()))];
518
519 let line_protocols = encode_events(ProtocolVersion::V2, events, None, None, &[]);
520 assert_eq!(
521 line_protocols,
522 "ns.users,metric_type=set,normal_tag=value,true_tag=true value=2 1542182950000000011"
523 );
524 }
525
526 #[test]
527 fn test_encode_histogram_v1() {
528 let events = vec![Metric::new(
529 "requests",
530 MetricKind::Absolute,
531 MetricValue::AggregatedHistogram {
532 buckets: vector_lib::buckets![1.0 => 1, 2.1 => 2, 3.0 => 3],
533 count: 6,
534 sum: 12.5,
535 },
536 )
537 .with_namespace(Some("ns"))
538 .with_tags(Some(tags()))
539 .with_timestamp(Some(ts()))];
540
541 let line_protocols = encode_events(ProtocolVersion::V1, events, None, None, &[]);
542 let line_protocols =
543 String::from_utf8(line_protocols.freeze().as_ref().to_owned()).unwrap();
544 let line_protocols: Vec<&str> = line_protocols.split('\n').collect();
545 assert_eq!(line_protocols.len(), 1);
546
547 let line_protocol1 = split_line_protocol(line_protocols[0]);
548 assert_eq!("ns.requests", line_protocol1.0);
549 assert_eq!(
550 "metric_type=histogram,normal_tag=value,true_tag=true",
551 line_protocol1.1
552 );
553 assert_fields(
554 line_protocol1.2.to_string(),
555 [
556 "bucket_1=1i",
557 "bucket_2.1=2i",
558 "bucket_3=3i",
559 "count=6i",
560 "sum=12.5",
561 ]
562 .to_vec(),
563 );
564 assert_eq!("1542182950000000011", line_protocol1.3);
565 }
566
567 #[test]
568 fn test_encode_histogram() {
569 let events = vec![Metric::new(
570 "requests",
571 MetricKind::Absolute,
572 MetricValue::AggregatedHistogram {
573 buckets: vector_lib::buckets![1.0 => 1, 2.1 => 2, 3.0 => 3],
574 count: 6,
575 sum: 12.5,
576 },
577 )
578 .with_namespace(Some("ns"))
579 .with_tags(Some(tags()))
580 .with_timestamp(Some(ts()))];
581
582 let line_protocols = encode_events(ProtocolVersion::V2, events, None, None, &[]);
583 let line_protocols =
584 String::from_utf8(line_protocols.freeze().as_ref().to_owned()).unwrap();
585 let line_protocols: Vec<&str> = line_protocols.split('\n').collect();
586 assert_eq!(line_protocols.len(), 1);
587
588 let line_protocol1 = split_line_protocol(line_protocols[0]);
589 assert_eq!("ns.requests", line_protocol1.0);
590 assert_eq!(
591 "metric_type=histogram,normal_tag=value,true_tag=true",
592 line_protocol1.1
593 );
594 assert_fields(
595 line_protocol1.2.to_string(),
596 [
597 "bucket_1=1u",
598 "bucket_2.1=2u",
599 "bucket_3=3u",
600 "count=6u",
601 "sum=12.5",
602 ]
603 .to_vec(),
604 );
605 assert_eq!("1542182950000000011", line_protocol1.3);
606 }
607
608 #[test]
609 fn test_encode_summary_v1() {
610 let events = vec![Metric::new(
611 "requests_sum",
612 MetricKind::Absolute,
613 MetricValue::AggregatedSummary {
614 quantiles: vector_lib::quantiles![0.01 => 1.5, 0.5 => 2.0, 0.99 => 3.0],
615 count: 6,
616 sum: 12.0,
617 },
618 )
619 .with_namespace(Some("ns"))
620 .with_tags(Some(tags()))
621 .with_timestamp(Some(ts()))];
622
623 let line_protocols = encode_events(ProtocolVersion::V1, events, None, None, &[]);
624 let line_protocols =
625 String::from_utf8(line_protocols.freeze().as_ref().to_owned()).unwrap();
626 let line_protocols: Vec<&str> = line_protocols.split('\n').collect();
627 assert_eq!(line_protocols.len(), 1);
628
629 let line_protocol1 = split_line_protocol(line_protocols[0]);
630 assert_eq!("ns.requests_sum", line_protocol1.0);
631 assert_eq!(
632 "metric_type=summary,normal_tag=value,true_tag=true",
633 line_protocol1.1
634 );
635 assert_fields(
636 line_protocol1.2.to_string(),
637 [
638 "count=6i",
639 "quantile_0.01=1.5",
640 "quantile_0.5=2",
641 "quantile_0.99=3",
642 "sum=12",
643 ]
644 .to_vec(),
645 );
646 assert_eq!("1542182950000000011", line_protocol1.3);
647 }
648
649 #[test]
650 fn test_encode_summary() {
651 let events = vec![Metric::new(
652 "requests_sum",
653 MetricKind::Absolute,
654 MetricValue::AggregatedSummary {
655 quantiles: vector_lib::quantiles![0.01 => 1.5, 0.5 => 2.0, 0.99 => 3.0],
656 count: 6,
657 sum: 12.0,
658 },
659 )
660 .with_namespace(Some("ns"))
661 .with_tags(Some(tags()))
662 .with_timestamp(Some(ts()))];
663
664 let line_protocols = encode_events(ProtocolVersion::V2, events, None, None, &[]);
665 let line_protocols =
666 String::from_utf8(line_protocols.freeze().as_ref().to_owned()).unwrap();
667 let line_protocols: Vec<&str> = line_protocols.split('\n').collect();
668 assert_eq!(line_protocols.len(), 1);
669
670 let line_protocol1 = split_line_protocol(line_protocols[0]);
671 assert_eq!("ns.requests_sum", line_protocol1.0);
672 assert_eq!(
673 "metric_type=summary,normal_tag=value,true_tag=true",
674 line_protocol1.1
675 );
676 assert_fields(
677 line_protocol1.2.to_string(),
678 [
679 "count=6u",
680 "quantile_0.01=1.5",
681 "quantile_0.5=2",
682 "quantile_0.99=3",
683 "sum=12",
684 ]
685 .to_vec(),
686 );
687 assert_eq!("1542182950000000011", line_protocol1.3);
688 }
689
690 #[test]
691 fn test_encode_distribution() {
692 let events = vec![
693 Metric::new(
694 "requests",
695 MetricKind::Incremental,
696 MetricValue::Distribution {
697 samples: vector_lib::samples![1.0 => 3, 2.0 => 3, 3.0 => 2],
698 statistic: StatisticKind::Histogram,
699 },
700 )
701 .with_namespace(Some("ns"))
702 .with_tags(Some(tags()))
703 .with_timestamp(Some(ts())),
704 Metric::new(
705 "dense_stats",
706 MetricKind::Incremental,
707 MetricValue::Distribution {
708 samples: (0..20)
709 .map(|v| Sample {
710 value: f64::from(v),
711 rate: 1,
712 })
713 .collect(),
714 statistic: StatisticKind::Histogram,
715 },
716 )
717 .with_namespace(Some("ns"))
718 .with_timestamp(Some(ts())),
719 Metric::new(
720 "sparse_stats",
721 MetricKind::Incremental,
722 MetricValue::Distribution {
723 samples: (1..5)
724 .map(|v| Sample {
725 value: f64::from(v),
726 rate: v,
727 })
728 .collect(),
729 statistic: StatisticKind::Histogram,
730 },
731 )
732 .with_namespace(Some("ns"))
733 .with_timestamp(Some(ts())),
734 ];
735
736 let line_protocols = encode_events(ProtocolVersion::V2, events, None, None, &[]);
737 let line_protocols =
738 String::from_utf8(line_protocols.freeze().as_ref().to_owned()).unwrap();
739 let line_protocols: Vec<&str> = line_protocols.split('\n').collect();
740 assert_eq!(line_protocols.len(), 3);
741
742 let line_protocol1 = split_line_protocol(line_protocols[0]);
743 assert_eq!("ns.requests", line_protocol1.0);
744 assert_eq!(
745 "metric_type=distribution,normal_tag=value,true_tag=true",
746 line_protocol1.1
747 );
748 assert_fields(
749 line_protocol1.2.to_string(),
750 [
751 "avg=1.875",
752 "count=8",
753 "max=3",
754 "median=2",
755 "min=1",
756 "quantile_0.95=3",
757 "sum=15",
758 ]
759 .to_vec(),
760 );
761 assert_eq!("1542182950000000011", line_protocol1.3);
762
763 let line_protocol2 = split_line_protocol(line_protocols[1]);
764 assert_eq!("ns.dense_stats", line_protocol2.0);
765 assert_eq!("metric_type=distribution", line_protocol2.1);
766 assert_fields(
767 line_protocol2.2.to_string(),
768 [
769 "avg=9.5",
770 "count=20",
771 "max=19",
772 "median=9",
773 "min=0",
774 "quantile_0.95=18",
775 "sum=190",
776 ]
777 .to_vec(),
778 );
779 assert_eq!("1542182950000000011", line_protocol2.3);
780
781 let line_protocol3 = split_line_protocol(line_protocols[2]);
782 assert_eq!("ns.sparse_stats", line_protocol3.0);
783 assert_eq!("metric_type=distribution", line_protocol3.1);
784 assert_fields(
785 line_protocol3.2.to_string(),
786 [
787 "avg=3",
788 "count=10",
789 "max=4",
790 "median=3",
791 "min=1",
792 "quantile_0.95=4",
793 "sum=30",
794 ]
795 .to_vec(),
796 );
797 assert_eq!("1542182950000000011", line_protocol3.3);
798 }
799
800 #[test]
801 fn test_encode_distribution_empty_stats() {
802 let events = vec![Metric::new(
803 "requests",
804 MetricKind::Incremental,
805 MetricValue::Distribution {
806 samples: vec![],
807 statistic: StatisticKind::Histogram,
808 },
809 )
810 .with_namespace(Some("ns"))
811 .with_tags(Some(tags()))
812 .with_timestamp(Some(ts()))];
813
814 let line_protocols = encode_events(ProtocolVersion::V2, events, None, None, &[]);
815 assert_eq!(line_protocols.len(), 0);
816 }
817
818 #[test]
819 fn test_encode_distribution_zero_counts_stats() {
820 let events = vec![Metric::new(
821 "requests",
822 MetricKind::Incremental,
823 MetricValue::Distribution {
824 samples: vector_lib::samples![1.0 => 0, 2.0 => 0],
825 statistic: StatisticKind::Histogram,
826 },
827 )
828 .with_namespace(Some("ns"))
829 .with_tags(Some(tags()))
830 .with_timestamp(Some(ts()))];
831
832 let line_protocols = encode_events(ProtocolVersion::V2, events, None, None, &[]);
833 assert_eq!(line_protocols.len(), 0);
834 }
835
836 #[test]
837 fn test_encode_distribution_summary() {
838 let events = vec![Metric::new(
839 "requests",
840 MetricKind::Incremental,
841 MetricValue::Distribution {
842 samples: vector_lib::samples![1.0 => 3, 2.0 => 3, 3.0 => 2],
843 statistic: StatisticKind::Summary,
844 },
845 )
846 .with_namespace(Some("ns"))
847 .with_tags(Some(tags()))
848 .with_timestamp(Some(ts()))];
849
850 let line_protocols = encode_events(
851 ProtocolVersion::V2,
852 events,
853 None,
854 None,
855 &default_summary_quantiles(),
856 );
857 let line_protocols =
858 String::from_utf8(line_protocols.freeze().as_ref().to_owned()).unwrap();
859 let line_protocols: Vec<&str> = line_protocols.split('\n').collect();
860 assert_eq!(line_protocols.len(), 1);
861
862 let line_protocol = split_line_protocol(line_protocols[0]);
863 assert_eq!("ns.requests", line_protocol.0);
864 assert_eq!(
865 "metric_type=distribution,normal_tag=value,true_tag=true",
866 line_protocol.1
867 );
868 assert_fields(
869 line_protocol.2.to_string(),
870 [
871 "avg=1.875",
872 "count=8",
873 "max=3",
874 "median=2",
875 "min=1",
876 "sum=15",
877 "quantile_0.50=2",
878 "quantile_0.75=2",
879 "quantile_0.90=3",
880 "quantile_0.95=3",
881 "quantile_0.99=3",
882 ]
883 .to_vec(),
884 );
885 assert_eq!("1542182950000000011", line_protocol.3);
886 }
887
888 #[test]
889 fn test_encode_with_some_tags() {
890 crate::test_util::trace_init();
891
892 let events = vec![
893 Metric::new(
894 "cpu",
895 MetricKind::Absolute,
896 MetricValue::Gauge { value: 2.5 },
897 )
898 .with_namespace(Some("vector"))
899 .with_timestamp(Some(ts())),
900 Metric::new(
901 "mem",
902 MetricKind::Absolute,
903 MetricValue::Gauge { value: 1000.0 },
904 )
905 .with_namespace(Some("vector"))
906 .with_tags(Some(tags()))
907 .with_timestamp(Some(ts())),
908 ];
909
910 let mut tags = HashMap::new();
911 tags.insert("host".to_owned(), "local".to_owned());
912 tags.insert("datacenter".to_owned(), "us-east".to_owned());
913
914 let line_protocols = encode_events(
915 ProtocolVersion::V1,
916 events,
917 Some("ns"),
918 Some(tags).as_ref(),
919 &[],
920 );
921 let line_protocols =
922 String::from_utf8(line_protocols.freeze().as_ref().to_owned()).unwrap();
923 let line_protocols: Vec<&str> = line_protocols.split('\n').collect();
924 assert_eq!(line_protocols.len(), 2);
925 assert_eq!(
926 line_protocols[0],
927 "vector.cpu,datacenter=us-east,host=local,metric_type=gauge value=2.5 1542182950000000011"
928 );
929 assert_eq!(
930 line_protocols[1],
931 "vector.mem,datacenter=us-east,host=local,metric_type=gauge,normal_tag=value,true_tag=true value=1000 1542182950000000011"
932 );
933 }
934}
935
936#[cfg(feature = "influxdb-integration-tests")]
937#[cfg(test)]
938mod integration_tests {
939 use chrono::{SecondsFormat, Utc};
940 use futures::stream;
941 use similar_asserts::assert_eq;
942 use vector_lib::metric_tags;
943
944 use crate::{
945 config::{SinkConfig, SinkContext},
946 event::{
947 metric::{Metric, MetricKind, MetricValue},
948 Event,
949 },
950 http::HttpClient,
951 sinks::influxdb::{
952 metrics::{default_summary_quantiles, InfluxDbConfig, InfluxDbSvc},
953 test_util::{
954 address_v1, address_v2, cleanup_v1, format_timestamp, onboarding_v1, onboarding_v2,
955 query_v1, BUCKET, ORG, TOKEN,
956 },
957 InfluxDb1Settings, InfluxDb2Settings,
958 },
959 test_util::components::{run_and_assert_sink_compliance, HTTP_SINK_TAGS},
960 tls::{self, TlsConfig},
961 };
962
963 #[tokio::test]
964 async fn inserts_metrics_v1_over_https() {
965 insert_metrics_v1(
966 address_v1(true).as_str(),
967 Some(TlsConfig {
968 ca_file: Some(tls::TEST_PEM_CA_PATH.into()),
969 ..Default::default()
970 }),
971 )
972 .await
973 }
974
975 #[tokio::test]
976 async fn inserts_metrics_v1_over_http() {
977 insert_metrics_v1(address_v1(false).as_str(), None).await
978 }
979
980 async fn insert_metrics_v1(url: &str, tls: Option<TlsConfig>) {
981 crate::test_util::trace_init();
982 let database = onboarding_v1(url).await;
983
984 let cx = SinkContext::default();
985
986 let config = InfluxDbConfig {
987 endpoint: url.to_string(),
988 influxdb1_settings: Some(InfluxDb1Settings {
989 consistency: None,
990 database: database.clone(),
991 retention_policy_name: Some("autogen".to_string()),
992 username: None,
993 password: None,
994 }),
995 influxdb2_settings: None,
996 batch: Default::default(),
997 request: Default::default(),
998 tls,
999 quantiles: default_summary_quantiles(),
1000 tags: None,
1001 default_namespace: None,
1002 acknowledgements: Default::default(),
1003 };
1004
1005 let events: Vec<_> = (0..10).map(create_event).collect();
1006 let (sink, _) = config.build(cx).await.expect("error when building config");
1007 run_and_assert_sink_compliance(sink, stream::iter(events.clone()), &HTTP_SINK_TAGS).await;
1008
1009 let res = query_v1_json(url, &format!("show series on {database}")).await;
1010
1011 assert_eq!(
1028 res["results"][0]["series"][0]["values"]
1029 .as_array()
1030 .unwrap()
1031 .len(),
1032 events.len()
1033 );
1034
1035 for event in events {
1036 let metric = event.into_metric();
1037 let name = format!("{}.{}", metric.namespace().unwrap(), metric.name());
1038 let value = match metric.value() {
1039 MetricValue::Counter { value } => *value,
1040 _ => unreachable!(),
1041 };
1042 let timestamp = format_timestamp(metric.timestamp().unwrap(), SecondsFormat::Nanos);
1043 let res = query_v1_json(url, &format!("select * from {database}..\"{name}\"")).await;
1044
1045 assert_eq!(
1046 res,
1047 serde_json::json! {
1048 {"results": [{
1049 "statement_id": 0,
1050 "series": [{
1051 "name": name,
1052 "columns": ["time", "metric_type", "production", "region", "value"],
1053 "values": [[timestamp, "counter", "true", "us-west-1", value as isize]]
1054 }]
1055 }]}
1056 }
1057 );
1058 }
1059
1060 cleanup_v1(url, &database).await;
1061 }
1062
1063 async fn query_v1_json(url: &str, query: &str) -> serde_json::Value {
1064 let string = query_v1(url, query)
1065 .await
1066 .text()
1067 .await
1068 .expect("Fetching text from InfluxDB query failed");
1069 serde_json::from_str(&string).expect("Error when parsing InfluxDB response JSON")
1070 }
1071
1072 #[tokio::test]
1073 async fn influxdb2_metrics_put_data() {
1074 crate::test_util::trace_init();
1075 let endpoint = address_v2();
1076 onboarding_v2(&endpoint).await;
1077
1078 let cx = SinkContext::default();
1079
1080 let config = InfluxDbConfig {
1081 endpoint,
1082 influxdb1_settings: None,
1083 influxdb2_settings: Some(InfluxDb2Settings {
1084 org: ORG.to_string(),
1085 bucket: BUCKET.to_string(),
1086 token: TOKEN.to_string().into(),
1087 }),
1088 quantiles: default_summary_quantiles(),
1089 batch: Default::default(),
1090 request: Default::default(),
1091 tags: None,
1092 tls: None,
1093 default_namespace: None,
1094 acknowledgements: Default::default(),
1095 };
1096
1097 let metric = format!(
1098 "counter-{}",
1099 Utc::now()
1100 .timestamp_nanos_opt()
1101 .expect("Timestamp out of range")
1102 );
1103 let mut events = Vec::new();
1104 for i in 0..10 {
1105 let event = Event::Metric(
1106 Metric::new(
1107 metric.clone(),
1108 MetricKind::Incremental,
1109 MetricValue::Counter { value: i as f64 },
1110 )
1111 .with_namespace(Some("ns"))
1112 .with_tags(Some(metric_tags!(
1113 "region" => "us-west-1",
1114 "production" => "true",
1115 ))),
1116 );
1117 events.push(event);
1118 }
1119
1120 let client = HttpClient::new(None, cx.proxy()).unwrap();
1121 let sink = InfluxDbSvc::new(config, client).unwrap();
1122 run_and_assert_sink_compliance(sink, stream::iter(events), &HTTP_SINK_TAGS).await;
1123
1124 let mut body = std::collections::HashMap::new();
1125 body.insert("query", format!("from(bucket:\"my-bucket\") |> range(start: 0) |> filter(fn: (r) => r._measurement == \"ns.{metric}\")"));
1126 body.insert("type", "flux".to_owned());
1127
1128 let client = reqwest::Client::builder()
1129 .danger_accept_invalid_certs(true)
1130 .build()
1131 .unwrap();
1132
1133 let res = client
1134 .post(format!("{}/api/v2/query?org=my-org", address_v2()))
1135 .json(&body)
1136 .header("accept", "application/json")
1137 .header("Authorization", "Token my-token")
1138 .send()
1139 .await
1140 .unwrap();
1141 let string = res.text().await.unwrap();
1142
1143 let lines = string.split('\n').collect::<Vec<&str>>();
1144 let header = lines[0].split(',').collect::<Vec<&str>>();
1145 let record = lines[1].split(',').collect::<Vec<&str>>();
1146
1147 assert_eq!(
1148 record[header
1149 .iter()
1150 .position(|&r| r.trim() == "metric_type")
1151 .unwrap()]
1152 .trim(),
1153 "counter"
1154 );
1155 assert_eq!(
1156 record[header
1157 .iter()
1158 .position(|&r| r.trim() == "production")
1159 .unwrap()]
1160 .trim(),
1161 "true"
1162 );
1163 assert_eq!(
1164 record[header.iter().position(|&r| r.trim() == "region").unwrap()].trim(),
1165 "us-west-1"
1166 );
1167 assert_eq!(
1168 record[header
1169 .iter()
1170 .position(|&r| r.trim() == "_measurement")
1171 .unwrap()]
1172 .trim(),
1173 format!("ns.{}", metric)
1174 );
1175 assert_eq!(
1176 record[header.iter().position(|&r| r.trim() == "_field").unwrap()].trim(),
1177 "value"
1178 );
1179 assert_eq!(
1180 record[header.iter().position(|&r| r.trim() == "_value").unwrap()].trim(),
1181 "45"
1182 );
1183 }
1184
1185 fn create_event(i: i32) -> Event {
1186 Event::Metric(
1187 Metric::new(
1188 format!("counter-{i}"),
1189 MetricKind::Incremental,
1190 MetricValue::Counter { value: i as f64 },
1191 )
1192 .with_namespace(Some("ns"))
1193 .with_tags(Some(metric_tags!(
1194 "region" => "us-west-1",
1195 "production" => "true",
1196 )))
1197 .with_timestamp(Some(Utc::now())),
1198 )
1199 }
1200}