1use std::{collections::HashMap, future::ready, task::Poll};
2
3use bytes::{Bytes, BytesMut};
4use futures::{SinkExt, future::BoxFuture, stream};
5use tower::Service;
6use vector_lib::{
7 ByteSizeOf, EstimatedJsonEncodedSizeOf,
8 configurable::configurable_component,
9 event::metric::{MetricSketch, MetricTags, Quantile},
10};
11
12use crate::{
13 config::{AcknowledgementsConfig, Input, SinkConfig, SinkContext},
14 event::{
15 Event, KeyString,
16 metric::{Metric, MetricValue, Sample, StatisticKind},
17 },
18 http::HttpClient,
19 internal_events::InfluxdbEncodingError,
20 sinks::{
21 Healthcheck, VectorSink,
22 influxdb::{
23 Field, InfluxDb1Settings, InfluxDb2Settings, ProtocolVersion, encode_timestamp,
24 healthcheck, influx_line_protocol, influxdb_settings,
25 },
26 util::{
27 BatchConfig, EncodedEvent, SinkBatchSettings, TowerRequestConfig,
28 buffer::metrics::{MetricNormalize, MetricNormalizer, MetricSet, MetricsBuffer},
29 encode_namespace,
30 http::{HttpBatchService, HttpRetryLogic},
31 statistic::{DistributionStatistic, validate_quantiles},
32 },
33 },
34 tls::{TlsConfig, TlsSettings},
35};
36
37#[derive(Clone)]
38struct InfluxDbSvc {
39 config: InfluxDbConfig,
40 protocol_version: ProtocolVersion,
41 inner: HttpBatchService<BoxFuture<'static, crate::Result<hyper::Request<Bytes>>>>,
42}
43
44#[derive(Clone, Copy, Debug, Default)]
45pub struct InfluxDbDefaultBatchSettings;
46
47impl SinkBatchSettings for InfluxDbDefaultBatchSettings {
48 const MAX_EVENTS: Option<usize> = Some(20);
49 const MAX_BYTES: Option<usize> = None;
50 const TIMEOUT_SECS: f64 = 1.0;
51}
52
53#[configurable_component(sink("influxdb_metrics", "Deliver metric event data to InfluxDB."))]
55#[derive(Clone, Debug, Default)]
56#[serde(deny_unknown_fields)]
57pub struct InfluxDbConfig {
58 #[serde(alias = "namespace")]
63 #[configurable(metadata(docs::examples = "service"))]
64 pub default_namespace: Option<String>,
65
66 #[configurable(metadata(docs::examples = "http://localhost:8086/"))]
70 pub endpoint: String,
71
72 #[serde(flatten)]
73 pub influxdb1_settings: Option<InfluxDb1Settings>,
74
75 #[serde(flatten)]
76 pub influxdb2_settings: Option<InfluxDb2Settings>,
77
78 #[configurable(derived)]
79 #[serde(default)]
80 pub batch: BatchConfig<InfluxDbDefaultBatchSettings>,
81
82 #[configurable(derived)]
83 #[serde(default)]
84 pub request: TowerRequestConfig,
85
86 #[configurable(metadata(docs::additional_props_description = "A tag key/value pair."))]
88 #[configurable(metadata(docs::examples = "example_tags()"))]
89 pub tags: Option<HashMap<String, String>>,
90
91 #[configurable(derived)]
92 pub tls: Option<TlsConfig>,
93
94 #[serde(default = "default_summary_quantiles")]
96 pub quantiles: Vec<f64>,
97
98 #[configurable(derived)]
99 #[serde(
100 default,
101 deserialize_with = "crate::serde::bool_or_struct",
102 skip_serializing_if = "crate::serde::is_default"
103 )]
104 acknowledgements: AcknowledgementsConfig,
105}
106
107pub fn default_summary_quantiles() -> Vec<f64> {
108 vec![0.5, 0.75, 0.9, 0.95, 0.99]
109}
110
111pub fn example_tags() -> HashMap<String, String> {
112 HashMap::from([("region".to_string(), "us-west-1".to_string())])
113}
114
115impl_generate_config_from_default!(InfluxDbConfig);
116
117#[async_trait::async_trait]
118#[typetag::serde(name = "influxdb_metrics")]
119impl SinkConfig for InfluxDbConfig {
120 async fn build(&self, cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> {
121 let tls_settings = TlsSettings::from_options(self.tls.as_ref())?;
122 let client = HttpClient::new(tls_settings, cx.proxy())?;
123 let healthcheck = healthcheck(
124 self.clone().endpoint,
125 self.clone().influxdb1_settings,
126 self.clone().influxdb2_settings,
127 client.clone(),
128 )?;
129 validate_quantiles(&self.quantiles)?;
130 let sink = InfluxDbSvc::new(self.clone(), client)?;
131 Ok((sink, healthcheck))
132 }
133
134 fn input(&self) -> Input {
135 Input::metric()
136 }
137
138 fn acknowledgements(&self) -> &AcknowledgementsConfig {
139 &self.acknowledgements
140 }
141}
142
143impl InfluxDbSvc {
144 pub fn new(config: InfluxDbConfig, client: HttpClient) -> crate::Result<VectorSink> {
145 let settings = influxdb_settings(
146 config.influxdb1_settings.clone(),
147 config.influxdb2_settings.clone(),
148 )?;
149
150 let endpoint = config.endpoint.clone();
151 let token = settings.token();
152 let protocol_version = settings.protocol_version();
153
154 let batch = config.batch.into_batch_settings()?;
155 let request = config.request.into_settings();
156
157 let uri = settings.write_uri(endpoint)?;
158
159 let http_service = HttpBatchService::new(client, create_build_request(uri, token.inner()));
160
161 let influxdb_http_service = InfluxDbSvc {
162 config,
163 protocol_version,
164 inner: http_service,
165 };
166 let mut normalizer = MetricNormalizer::<InfluxMetricNormalize>::default();
167
168 let sink = request
169 .batch_sink(
170 HttpRetryLogic::default(),
171 influxdb_http_service,
172 MetricsBuffer::new(batch.size),
173 batch.timeout,
174 )
175 .with_flat_map(move |event: Event| {
176 stream::iter({
177 let byte_size = event.size_of();
178 let json_size = event.estimated_json_encoded_size_of();
179
180 normalizer
181 .normalize(event.into_metric())
182 .map(|metric| Ok(EncodedEvent::new(metric, byte_size, json_size)))
183 })
184 })
185 .sink_map_err(|error| error!(message = "Fatal influxdb sink error.", %error, internal_log_rate_limit = false));
186
187 #[allow(deprecated)]
188 Ok(VectorSink::from_event_sink(sink))
189 }
190}
191
192impl Service<Vec<Metric>> for InfluxDbSvc {
193 type Response = http::Response<Bytes>;
194 type Error = crate::Error;
195 type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>;
196
197 fn poll_ready(&mut self, cx: &mut std::task::Context) -> Poll<Result<(), Self::Error>> {
199 self.inner.poll_ready(cx)
200 }
201
202 fn call(&mut self, items: Vec<Metric>) -> Self::Future {
204 let input = encode_events(
205 self.protocol_version,
206 items,
207 self.config.default_namespace.as_deref(),
208 self.config.tags.as_ref(),
209 &self.config.quantiles,
210 );
211 let body = input.freeze();
212
213 self.inner.call(body)
214 }
215}
216
217fn create_build_request(
218 uri: http::Uri,
219 token: &str,
220) -> impl Fn(Bytes) -> BoxFuture<'static, crate::Result<hyper::Request<Bytes>>>
221+ Sync
222+ Send
223+ 'static
224+ use<> {
225 let auth = format!("Token {token}");
226 move |body| {
227 Box::pin(ready(
228 hyper::Request::post(uri.clone())
229 .header("Content-Type", "text/plain")
230 .header("Authorization", auth.clone())
231 .body(body)
232 .map_err(Into::into),
233 ))
234 }
235}
236
237fn merge_tags(event: &Metric, tags: Option<&HashMap<String, String>>) -> Option<MetricTags> {
238 match (event.tags().cloned(), tags) {
239 (Some(mut event_tags), Some(config_tags)) => {
240 event_tags.extend(config_tags.iter().map(|(k, v)| (k.clone(), v.clone())));
241 Some(event_tags)
242 }
243 (Some(event_tags), None) => Some(event_tags),
244 (None, Some(config_tags)) => Some(
245 config_tags
246 .iter()
247 .map(|(k, v)| (k.clone(), v.clone()))
248 .collect(),
249 ),
250 (None, None) => None,
251 }
252}
253
254#[derive(Default)]
255pub struct InfluxMetricNormalize;
256
257impl MetricNormalize for InfluxMetricNormalize {
258 fn normalize(&mut self, state: &mut MetricSet, metric: Metric) -> Option<Metric> {
259 match (metric.kind(), &metric.value()) {
260 (_, MetricValue::Counter { .. }) => state.make_incremental(metric),
263 (_, MetricValue::Gauge { .. }) => state.make_absolute(metric),
265 _ => Some(metric),
267 }
268 }
269}
270
271fn encode_events(
272 protocol_version: ProtocolVersion,
273 events: Vec<Metric>,
274 default_namespace: Option<&str>,
275 tags: Option<&HashMap<String, String>>,
276 quantiles: &[f64],
277) -> BytesMut {
278 let mut output = BytesMut::new();
279 let count = events.len();
280
281 for event in events.into_iter() {
282 let fullname = encode_namespace(event.namespace().or(default_namespace), '.', event.name());
283 let ts = encode_timestamp(event.timestamp());
284 let tags = merge_tags(&event, tags);
285 let (metric_type, fields) = get_type_and_fields(event.value(), quantiles);
286
287 let mut unwrapped_tags = tags.unwrap_or_default();
288 unwrapped_tags.replace("metric_type".to_owned(), metric_type.to_owned());
289
290 if let Err(error_message) = influx_line_protocol(
291 protocol_version,
292 &fullname,
293 Some(unwrapped_tags),
294 fields,
295 ts,
296 &mut output,
297 ) {
298 emit!(InfluxdbEncodingError {
299 error_message,
300 count,
301 });
302 };
303 }
304
305 if !output.is_empty() {
307 output.truncate(output.len() - 1);
308 }
309 output
310}
311
312fn get_type_and_fields(
313 value: &MetricValue,
314 quantiles: &[f64],
315) -> (&'static str, Option<HashMap<KeyString, Field>>) {
316 match value {
317 MetricValue::Counter { value } => ("counter", Some(to_fields(*value))),
318 MetricValue::Gauge { value } => ("gauge", Some(to_fields(*value))),
319 MetricValue::Set { values } => ("set", Some(to_fields(values.len() as f64))),
320 MetricValue::AggregatedHistogram {
321 buckets,
322 count,
323 sum,
324 } => {
325 let mut fields: HashMap<KeyString, Field> = buckets
326 .iter()
327 .map(|sample| {
328 (
329 format!("bucket_{}", sample.upper_limit).into(),
330 Field::UnsignedInt(sample.count),
331 )
332 })
333 .collect();
334 fields.insert("count".into(), Field::UnsignedInt(*count));
335 fields.insert("sum".into(), Field::Float(*sum));
336
337 ("histogram", Some(fields))
338 }
339 MetricValue::AggregatedSummary {
340 quantiles,
341 count,
342 sum,
343 } => {
344 let mut fields: HashMap<KeyString, Field> = quantiles
345 .iter()
346 .map(|quantile| {
347 (
348 format!("quantile_{}", quantile.quantile).into(),
349 Field::Float(quantile.value),
350 )
351 })
352 .collect();
353 fields.insert("count".into(), Field::UnsignedInt(*count));
354 fields.insert("sum".into(), Field::Float(*sum));
355
356 ("summary", Some(fields))
357 }
358 MetricValue::Distribution { samples, statistic } => {
359 let quantiles = match statistic {
360 StatisticKind::Histogram => &[0.95] as &[_],
361 StatisticKind::Summary => quantiles,
362 };
363 let fields = encode_distribution(samples, quantiles);
364 ("distribution", fields)
365 }
366 MetricValue::Sketch { sketch } => match sketch {
367 MetricSketch::AgentDDSketch(ddsketch) => {
368 let mut fields = [0.5, 0.75, 0.9, 0.99]
371 .iter()
372 .map(|q| {
373 let quantile = Quantile {
374 quantile: *q,
375 value: ddsketch.quantile(*q).unwrap_or(0.0),
376 };
377 (
378 quantile.to_percentile_string().into(),
379 Field::Float(quantile.value),
380 )
381 })
382 .collect::<HashMap<KeyString, _>>();
383 fields.insert(
384 "count".into(),
385 Field::UnsignedInt(u64::from(ddsketch.count())),
386 );
387 fields.insert(
388 "min".into(),
389 Field::Float(ddsketch.min().unwrap_or(f64::MAX)),
390 );
391 fields.insert(
392 "max".into(),
393 Field::Float(ddsketch.max().unwrap_or(f64::MIN)),
394 );
395 fields.insert("sum".into(), Field::Float(ddsketch.sum().unwrap_or(0.0)));
396 fields.insert("avg".into(), Field::Float(ddsketch.avg().unwrap_or(0.0)));
397
398 ("sketch", Some(fields))
399 }
400 },
401 }
402}
403
404fn encode_distribution(samples: &[Sample], quantiles: &[f64]) -> Option<HashMap<KeyString, Field>> {
405 let statistic = DistributionStatistic::from_samples(samples, quantiles)?;
406
407 Some(
408 [
409 ("min".into(), Field::Float(statistic.min)),
410 ("max".into(), Field::Float(statistic.max)),
411 ("median".into(), Field::Float(statistic.median)),
412 ("avg".into(), Field::Float(statistic.avg)),
413 ("sum".into(), Field::Float(statistic.sum)),
414 ("count".into(), Field::Float(statistic.count as f64)),
415 ]
416 .into_iter()
417 .chain(
418 statistic
419 .quantiles
420 .iter()
421 .map(|&(p, val)| (format!("quantile_{p:.2}").into(), Field::Float(val))),
422 )
423 .collect(),
424 )
425}
426
427fn to_fields(value: f64) -> HashMap<KeyString, Field> {
428 [("value".into(), Field::Float(value))]
429 .into_iter()
430 .collect()
431}
432
433#[cfg(test)]
434mod tests {
435 use indoc::indoc;
436 use similar_asserts::assert_eq;
437
438 use super::*;
439 use crate::{
440 event::metric::{Metric, MetricKind, MetricValue, StatisticKind},
441 sinks::influxdb::test_util::{assert_fields, split_line_protocol, tags, ts},
442 };
443
444 #[test]
445 fn generate_config() {
446 crate::test_util::test_generate_config::<InfluxDbConfig>();
447 }
448
449 #[test]
450 fn test_config_with_tags() {
451 let config = indoc! {r#"
452 namespace = "vector"
453 endpoint = "http://localhost:9999"
454 tags = {region="us-west-1"}
455 "#};
456
457 toml::from_str::<InfluxDbConfig>(config).unwrap();
458 }
459
460 #[test]
461 fn test_encode_counter() {
462 let events = vec![
463 Metric::new(
464 "total",
465 MetricKind::Incremental,
466 MetricValue::Counter { value: 1.5 },
467 )
468 .with_namespace(Some("ns"))
469 .with_timestamp(Some(ts())),
470 Metric::new(
471 "check",
472 MetricKind::Incremental,
473 MetricValue::Counter { value: 1.0 },
474 )
475 .with_namespace(Some("ns"))
476 .with_tags(Some(tags()))
477 .with_timestamp(Some(ts())),
478 ];
479
480 let line_protocols = encode_events(ProtocolVersion::V2, events, Some("vector"), None, &[]);
481 assert_eq!(
482 line_protocols,
483 "ns.total,metric_type=counter value=1.5 1542182950000000011\n\
484 ns.check,metric_type=counter,normal_tag=value,true_tag=true value=1 1542182950000000011"
485 );
486 }
487
488 #[test]
489 fn test_encode_gauge() {
490 let events = vec![
491 Metric::new(
492 "meter",
493 MetricKind::Incremental,
494 MetricValue::Gauge { value: -1.5 },
495 )
496 .with_namespace(Some("ns"))
497 .with_tags(Some(tags()))
498 .with_timestamp(Some(ts())),
499 ];
500
501 let line_protocols = encode_events(ProtocolVersion::V2, events, None, None, &[]);
502 assert_eq!(
503 line_protocols,
504 "ns.meter,metric_type=gauge,normal_tag=value,true_tag=true value=-1.5 1542182950000000011"
505 );
506 }
507
508 #[test]
509 fn test_encode_set() {
510 let events = vec![
511 Metric::new(
512 "users",
513 MetricKind::Incremental,
514 MetricValue::Set {
515 values: vec!["alice".into(), "bob".into()].into_iter().collect(),
516 },
517 )
518 .with_namespace(Some("ns"))
519 .with_tags(Some(tags()))
520 .with_timestamp(Some(ts())),
521 ];
522
523 let line_protocols = encode_events(ProtocolVersion::V2, events, None, None, &[]);
524 assert_eq!(
525 line_protocols,
526 "ns.users,metric_type=set,normal_tag=value,true_tag=true value=2 1542182950000000011"
527 );
528 }
529
530 #[test]
531 fn test_encode_histogram_v1() {
532 let events = vec![
533 Metric::new(
534 "requests",
535 MetricKind::Absolute,
536 MetricValue::AggregatedHistogram {
537 buckets: vector_lib::buckets![1.0 => 1, 2.1 => 2, 3.0 => 3],
538 count: 6,
539 sum: 12.5,
540 },
541 )
542 .with_namespace(Some("ns"))
543 .with_tags(Some(tags()))
544 .with_timestamp(Some(ts())),
545 ];
546
547 let line_protocols = encode_events(ProtocolVersion::V1, events, None, None, &[]);
548 let line_protocols =
549 String::from_utf8(line_protocols.freeze().as_ref().to_owned()).unwrap();
550 let line_protocols: Vec<&str> = line_protocols.split('\n').collect();
551 assert_eq!(line_protocols.len(), 1);
552
553 let line_protocol1 = split_line_protocol(line_protocols[0]);
554 assert_eq!("ns.requests", line_protocol1.0);
555 assert_eq!(
556 "metric_type=histogram,normal_tag=value,true_tag=true",
557 line_protocol1.1
558 );
559 assert_fields(
560 line_protocol1.2.to_string(),
561 [
562 "bucket_1=1i",
563 "bucket_2.1=2i",
564 "bucket_3=3i",
565 "count=6i",
566 "sum=12.5",
567 ]
568 .to_vec(),
569 );
570 assert_eq!("1542182950000000011", line_protocol1.3);
571 }
572
573 #[test]
574 fn test_encode_histogram() {
575 let events = vec![
576 Metric::new(
577 "requests",
578 MetricKind::Absolute,
579 MetricValue::AggregatedHistogram {
580 buckets: vector_lib::buckets![1.0 => 1, 2.1 => 2, 3.0 => 3],
581 count: 6,
582 sum: 12.5,
583 },
584 )
585 .with_namespace(Some("ns"))
586 .with_tags(Some(tags()))
587 .with_timestamp(Some(ts())),
588 ];
589
590 let line_protocols = encode_events(ProtocolVersion::V2, events, None, None, &[]);
591 let line_protocols =
592 String::from_utf8(line_protocols.freeze().as_ref().to_owned()).unwrap();
593 let line_protocols: Vec<&str> = line_protocols.split('\n').collect();
594 assert_eq!(line_protocols.len(), 1);
595
596 let line_protocol1 = split_line_protocol(line_protocols[0]);
597 assert_eq!("ns.requests", line_protocol1.0);
598 assert_eq!(
599 "metric_type=histogram,normal_tag=value,true_tag=true",
600 line_protocol1.1
601 );
602 assert_fields(
603 line_protocol1.2.to_string(),
604 [
605 "bucket_1=1u",
606 "bucket_2.1=2u",
607 "bucket_3=3u",
608 "count=6u",
609 "sum=12.5",
610 ]
611 .to_vec(),
612 );
613 assert_eq!("1542182950000000011", line_protocol1.3);
614 }
615
616 #[test]
617 fn test_encode_summary_v1() {
618 let events = vec![
619 Metric::new(
620 "requests_sum",
621 MetricKind::Absolute,
622 MetricValue::AggregatedSummary {
623 quantiles: vector_lib::quantiles![0.01 => 1.5, 0.5 => 2.0, 0.99 => 3.0],
624 count: 6,
625 sum: 12.0,
626 },
627 )
628 .with_namespace(Some("ns"))
629 .with_tags(Some(tags()))
630 .with_timestamp(Some(ts())),
631 ];
632
633 let line_protocols = encode_events(ProtocolVersion::V1, events, None, None, &[]);
634 let line_protocols =
635 String::from_utf8(line_protocols.freeze().as_ref().to_owned()).unwrap();
636 let line_protocols: Vec<&str> = line_protocols.split('\n').collect();
637 assert_eq!(line_protocols.len(), 1);
638
639 let line_protocol1 = split_line_protocol(line_protocols[0]);
640 assert_eq!("ns.requests_sum", line_protocol1.0);
641 assert_eq!(
642 "metric_type=summary,normal_tag=value,true_tag=true",
643 line_protocol1.1
644 );
645 assert_fields(
646 line_protocol1.2.to_string(),
647 [
648 "count=6i",
649 "quantile_0.01=1.5",
650 "quantile_0.5=2",
651 "quantile_0.99=3",
652 "sum=12",
653 ]
654 .to_vec(),
655 );
656 assert_eq!("1542182950000000011", line_protocol1.3);
657 }
658
659 #[test]
660 fn test_encode_summary() {
661 let events = vec![
662 Metric::new(
663 "requests_sum",
664 MetricKind::Absolute,
665 MetricValue::AggregatedSummary {
666 quantiles: vector_lib::quantiles![0.01 => 1.5, 0.5 => 2.0, 0.99 => 3.0],
667 count: 6,
668 sum: 12.0,
669 },
670 )
671 .with_namespace(Some("ns"))
672 .with_tags(Some(tags()))
673 .with_timestamp(Some(ts())),
674 ];
675
676 let line_protocols = encode_events(ProtocolVersion::V2, events, None, None, &[]);
677 let line_protocols =
678 String::from_utf8(line_protocols.freeze().as_ref().to_owned()).unwrap();
679 let line_protocols: Vec<&str> = line_protocols.split('\n').collect();
680 assert_eq!(line_protocols.len(), 1);
681
682 let line_protocol1 = split_line_protocol(line_protocols[0]);
683 assert_eq!("ns.requests_sum", line_protocol1.0);
684 assert_eq!(
685 "metric_type=summary,normal_tag=value,true_tag=true",
686 line_protocol1.1
687 );
688 assert_fields(
689 line_protocol1.2.to_string(),
690 [
691 "count=6u",
692 "quantile_0.01=1.5",
693 "quantile_0.5=2",
694 "quantile_0.99=3",
695 "sum=12",
696 ]
697 .to_vec(),
698 );
699 assert_eq!("1542182950000000011", line_protocol1.3);
700 }
701
702 #[test]
703 fn test_encode_distribution() {
704 let events = vec![
705 Metric::new(
706 "requests",
707 MetricKind::Incremental,
708 MetricValue::Distribution {
709 samples: vector_lib::samples![1.0 => 3, 2.0 => 3, 3.0 => 2],
710 statistic: StatisticKind::Histogram,
711 },
712 )
713 .with_namespace(Some("ns"))
714 .with_tags(Some(tags()))
715 .with_timestamp(Some(ts())),
716 Metric::new(
717 "dense_stats",
718 MetricKind::Incremental,
719 MetricValue::Distribution {
720 samples: (0..20)
721 .map(|v| Sample {
722 value: f64::from(v),
723 rate: 1,
724 })
725 .collect(),
726 statistic: StatisticKind::Histogram,
727 },
728 )
729 .with_namespace(Some("ns"))
730 .with_timestamp(Some(ts())),
731 Metric::new(
732 "sparse_stats",
733 MetricKind::Incremental,
734 MetricValue::Distribution {
735 samples: (1..5)
736 .map(|v| Sample {
737 value: f64::from(v),
738 rate: v,
739 })
740 .collect(),
741 statistic: StatisticKind::Histogram,
742 },
743 )
744 .with_namespace(Some("ns"))
745 .with_timestamp(Some(ts())),
746 ];
747
748 let line_protocols = encode_events(ProtocolVersion::V2, events, None, None, &[]);
749 let line_protocols =
750 String::from_utf8(line_protocols.freeze().as_ref().to_owned()).unwrap();
751 let line_protocols: Vec<&str> = line_protocols.split('\n').collect();
752 assert_eq!(line_protocols.len(), 3);
753
754 let line_protocol1 = split_line_protocol(line_protocols[0]);
755 assert_eq!("ns.requests", line_protocol1.0);
756 assert_eq!(
757 "metric_type=distribution,normal_tag=value,true_tag=true",
758 line_protocol1.1
759 );
760 assert_fields(
761 line_protocol1.2.to_string(),
762 [
763 "avg=1.875",
764 "count=8",
765 "max=3",
766 "median=2",
767 "min=1",
768 "quantile_0.95=3",
769 "sum=15",
770 ]
771 .to_vec(),
772 );
773 assert_eq!("1542182950000000011", line_protocol1.3);
774
775 let line_protocol2 = split_line_protocol(line_protocols[1]);
776 assert_eq!("ns.dense_stats", line_protocol2.0);
777 assert_eq!("metric_type=distribution", line_protocol2.1);
778 assert_fields(
779 line_protocol2.2.to_string(),
780 [
781 "avg=9.5",
782 "count=20",
783 "max=19",
784 "median=9",
785 "min=0",
786 "quantile_0.95=18",
787 "sum=190",
788 ]
789 .to_vec(),
790 );
791 assert_eq!("1542182950000000011", line_protocol2.3);
792
793 let line_protocol3 = split_line_protocol(line_protocols[2]);
794 assert_eq!("ns.sparse_stats", line_protocol3.0);
795 assert_eq!("metric_type=distribution", line_protocol3.1);
796 assert_fields(
797 line_protocol3.2.to_string(),
798 [
799 "avg=3",
800 "count=10",
801 "max=4",
802 "median=3",
803 "min=1",
804 "quantile_0.95=4",
805 "sum=30",
806 ]
807 .to_vec(),
808 );
809 assert_eq!("1542182950000000011", line_protocol3.3);
810 }
811
812 #[test]
813 fn test_encode_distribution_empty_stats() {
814 let events = vec![
815 Metric::new(
816 "requests",
817 MetricKind::Incremental,
818 MetricValue::Distribution {
819 samples: vec![],
820 statistic: StatisticKind::Histogram,
821 },
822 )
823 .with_namespace(Some("ns"))
824 .with_tags(Some(tags()))
825 .with_timestamp(Some(ts())),
826 ];
827
828 let line_protocols = encode_events(ProtocolVersion::V2, events, None, None, &[]);
829 assert_eq!(line_protocols.len(), 0);
830 }
831
832 #[test]
833 fn test_encode_distribution_zero_counts_stats() {
834 let events = vec![
835 Metric::new(
836 "requests",
837 MetricKind::Incremental,
838 MetricValue::Distribution {
839 samples: vector_lib::samples![1.0 => 0, 2.0 => 0],
840 statistic: StatisticKind::Histogram,
841 },
842 )
843 .with_namespace(Some("ns"))
844 .with_tags(Some(tags()))
845 .with_timestamp(Some(ts())),
846 ];
847
848 let line_protocols = encode_events(ProtocolVersion::V2, events, None, None, &[]);
849 assert_eq!(line_protocols.len(), 0);
850 }
851
852 #[test]
853 fn test_encode_distribution_summary() {
854 let events = vec![
855 Metric::new(
856 "requests",
857 MetricKind::Incremental,
858 MetricValue::Distribution {
859 samples: vector_lib::samples![1.0 => 3, 2.0 => 3, 3.0 => 2],
860 statistic: StatisticKind::Summary,
861 },
862 )
863 .with_namespace(Some("ns"))
864 .with_tags(Some(tags()))
865 .with_timestamp(Some(ts())),
866 ];
867
868 let line_protocols = encode_events(
869 ProtocolVersion::V2,
870 events,
871 None,
872 None,
873 &default_summary_quantiles(),
874 );
875 let line_protocols =
876 String::from_utf8(line_protocols.freeze().as_ref().to_owned()).unwrap();
877 let line_protocols: Vec<&str> = line_protocols.split('\n').collect();
878 assert_eq!(line_protocols.len(), 1);
879
880 let line_protocol = split_line_protocol(line_protocols[0]);
881 assert_eq!("ns.requests", line_protocol.0);
882 assert_eq!(
883 "metric_type=distribution,normal_tag=value,true_tag=true",
884 line_protocol.1
885 );
886 assert_fields(
887 line_protocol.2.to_string(),
888 [
889 "avg=1.875",
890 "count=8",
891 "max=3",
892 "median=2",
893 "min=1",
894 "sum=15",
895 "quantile_0.50=2",
896 "quantile_0.75=2",
897 "quantile_0.90=3",
898 "quantile_0.95=3",
899 "quantile_0.99=3",
900 ]
901 .to_vec(),
902 );
903 assert_eq!("1542182950000000011", line_protocol.3);
904 }
905
906 #[test]
907 fn test_encode_with_some_tags() {
908 crate::test_util::trace_init();
909
910 let events = vec![
911 Metric::new(
912 "cpu",
913 MetricKind::Absolute,
914 MetricValue::Gauge { value: 2.5 },
915 )
916 .with_namespace(Some("vector"))
917 .with_timestamp(Some(ts())),
918 Metric::new(
919 "mem",
920 MetricKind::Absolute,
921 MetricValue::Gauge { value: 1000.0 },
922 )
923 .with_namespace(Some("vector"))
924 .with_tags(Some(tags()))
925 .with_timestamp(Some(ts())),
926 ];
927
928 let mut tags = HashMap::new();
929 tags.insert("host".to_owned(), "local".to_owned());
930 tags.insert("datacenter".to_owned(), "us-east".to_owned());
931
932 let line_protocols = encode_events(
933 ProtocolVersion::V1,
934 events,
935 Some("ns"),
936 Some(tags).as_ref(),
937 &[],
938 );
939 let line_protocols =
940 String::from_utf8(line_protocols.freeze().as_ref().to_owned()).unwrap();
941 let line_protocols: Vec<&str> = line_protocols.split('\n').collect();
942 assert_eq!(line_protocols.len(), 2);
943 assert_eq!(
944 line_protocols[0],
945 "vector.cpu,datacenter=us-east,host=local,metric_type=gauge value=2.5 1542182950000000011"
946 );
947 assert_eq!(
948 line_protocols[1],
949 "vector.mem,datacenter=us-east,host=local,metric_type=gauge,normal_tag=value,true_tag=true value=1000 1542182950000000011"
950 );
951 }
952}
953
954#[cfg(feature = "influxdb-integration-tests")]
955#[cfg(test)]
956mod integration_tests {
957 use chrono::{SecondsFormat, Utc};
958 use futures::stream;
959 use similar_asserts::assert_eq;
960 use vector_lib::metric_tags;
961
962 use crate::{
963 config::{SinkConfig, SinkContext},
964 event::{
965 Event,
966 metric::{Metric, MetricKind, MetricValue},
967 },
968 http::HttpClient,
969 sinks::influxdb::{
970 InfluxDb1Settings, InfluxDb2Settings,
971 metrics::{InfluxDbConfig, InfluxDbSvc, default_summary_quantiles},
972 test_util::{
973 BUCKET, ORG, TOKEN, address_v1, address_v2, cleanup_v1, format_timestamp,
974 onboarding_v1, onboarding_v2, query_v1,
975 },
976 },
977 test_util::components::{HTTP_SINK_TAGS, run_and_assert_sink_compliance},
978 tls::{self, TlsConfig},
979 };
980
981 #[tokio::test]
982 async fn inserts_metrics_v1_over_https() {
983 insert_metrics_v1(
984 address_v1(true).as_str(),
985 Some(TlsConfig {
986 ca_file: Some(tls::TEST_PEM_CA_PATH.into()),
987 ..Default::default()
988 }),
989 )
990 .await
991 }
992
993 #[tokio::test]
994 async fn inserts_metrics_v1_over_http() {
995 insert_metrics_v1(address_v1(false).as_str(), None).await
996 }
997
998 async fn insert_metrics_v1(url: &str, tls: Option<TlsConfig>) {
999 crate::test_util::trace_init();
1000 let database = onboarding_v1(url).await;
1001
1002 let cx = SinkContext::default();
1003
1004 let config = InfluxDbConfig {
1005 endpoint: url.to_string(),
1006 influxdb1_settings: Some(InfluxDb1Settings {
1007 consistency: None,
1008 database: database.clone(),
1009 retention_policy_name: Some("autogen".to_string()),
1010 username: None,
1011 password: None,
1012 }),
1013 influxdb2_settings: None,
1014 batch: Default::default(),
1015 request: Default::default(),
1016 tls,
1017 quantiles: default_summary_quantiles(),
1018 tags: None,
1019 default_namespace: None,
1020 acknowledgements: Default::default(),
1021 };
1022
1023 let events: Vec<_> = (0..10).map(create_event).collect();
1024 let (sink, _) = config.build(cx).await.expect("error when building config");
1025 run_and_assert_sink_compliance(sink, stream::iter(events.clone()), &HTTP_SINK_TAGS).await;
1026
1027 let res = query_v1_json(url, &format!("show series on {database}")).await;
1028
1029 assert_eq!(
1046 res["results"][0]["series"][0]["values"]
1047 .as_array()
1048 .unwrap()
1049 .len(),
1050 events.len()
1051 );
1052
1053 for event in events {
1054 let metric = event.into_metric();
1055 let name = format!("{}.{}", metric.namespace().unwrap(), metric.name());
1056 let value = match metric.value() {
1057 MetricValue::Counter { value } => *value,
1058 _ => unreachable!(),
1059 };
1060 let timestamp = format_timestamp(metric.timestamp().unwrap(), SecondsFormat::Nanos);
1061 let res = query_v1_json(url, &format!("select * from {database}..\"{name}\"")).await;
1062
1063 assert_eq!(
1064 res,
1065 serde_json::json! {
1066 {"results": [{
1067 "statement_id": 0,
1068 "series": [{
1069 "name": name,
1070 "columns": ["time", "metric_type", "production", "region", "value"],
1071 "values": [[timestamp, "counter", "true", "us-west-1", value as isize]]
1072 }]
1073 }]}
1074 }
1075 );
1076 }
1077
1078 cleanup_v1(url, &database).await;
1079 }
1080
1081 async fn query_v1_json(url: &str, query: &str) -> serde_json::Value {
1082 let string = query_v1(url, query)
1083 .await
1084 .text()
1085 .await
1086 .expect("Fetching text from InfluxDB query failed");
1087 serde_json::from_str(&string).expect("Error when parsing InfluxDB response JSON")
1088 }
1089
1090 #[tokio::test]
1091 async fn influxdb2_metrics_put_data() {
1092 crate::test_util::trace_init();
1093 let endpoint = address_v2();
1094 onboarding_v2(&endpoint).await;
1095
1096 let cx = SinkContext::default();
1097
1098 let config = InfluxDbConfig {
1099 endpoint,
1100 influxdb1_settings: None,
1101 influxdb2_settings: Some(InfluxDb2Settings {
1102 org: ORG.to_string(),
1103 bucket: BUCKET.to_string(),
1104 token: TOKEN.to_string().into(),
1105 }),
1106 quantiles: default_summary_quantiles(),
1107 batch: Default::default(),
1108 request: Default::default(),
1109 tags: None,
1110 tls: None,
1111 default_namespace: None,
1112 acknowledgements: Default::default(),
1113 };
1114
1115 let metric = format!(
1116 "counter-{}",
1117 Utc::now()
1118 .timestamp_nanos_opt()
1119 .expect("Timestamp out of range")
1120 );
1121 let mut events = Vec::new();
1122 for i in 0..10 {
1123 let event = Event::Metric(
1124 Metric::new(
1125 metric.clone(),
1126 MetricKind::Incremental,
1127 MetricValue::Counter { value: i as f64 },
1128 )
1129 .with_namespace(Some("ns"))
1130 .with_tags(Some(metric_tags!(
1131 "region" => "us-west-1",
1132 "production" => "true",
1133 ))),
1134 );
1135 events.push(event);
1136 }
1137
1138 let client = HttpClient::new(None, cx.proxy()).unwrap();
1139 let sink = InfluxDbSvc::new(config, client).unwrap();
1140 run_and_assert_sink_compliance(sink, stream::iter(events), &HTTP_SINK_TAGS).await;
1141
1142 let mut body = std::collections::HashMap::new();
1143 body.insert("query", format!("from(bucket:\"my-bucket\") |> range(start: 0) |> filter(fn: (r) => r._measurement == \"ns.{metric}\")"));
1144 body.insert("type", "flux".to_owned());
1145
1146 let client = reqwest::Client::builder()
1147 .danger_accept_invalid_certs(true)
1148 .build()
1149 .unwrap();
1150
1151 let res = client
1152 .post(format!("{}/api/v2/query?org=my-org", address_v2()))
1153 .json(&body)
1154 .header("accept", "application/json")
1155 .header("Authorization", "Token my-token")
1156 .send()
1157 .await
1158 .unwrap();
1159 let string = res.text().await.unwrap();
1160
1161 let lines = string.split('\n').collect::<Vec<&str>>();
1162 let header = lines[0].split(',').collect::<Vec<&str>>();
1163 let record = lines[1].split(',').collect::<Vec<&str>>();
1164
1165 assert_eq!(
1166 record[header
1167 .iter()
1168 .position(|&r| r.trim() == "metric_type")
1169 .unwrap()]
1170 .trim(),
1171 "counter"
1172 );
1173 assert_eq!(
1174 record[header
1175 .iter()
1176 .position(|&r| r.trim() == "production")
1177 .unwrap()]
1178 .trim(),
1179 "true"
1180 );
1181 assert_eq!(
1182 record[header.iter().position(|&r| r.trim() == "region").unwrap()].trim(),
1183 "us-west-1"
1184 );
1185 assert_eq!(
1186 record[header
1187 .iter()
1188 .position(|&r| r.trim() == "_measurement")
1189 .unwrap()]
1190 .trim(),
1191 format!("ns.{}", metric)
1192 );
1193 assert_eq!(
1194 record[header.iter().position(|&r| r.trim() == "_field").unwrap()].trim(),
1195 "value"
1196 );
1197 assert_eq!(
1198 record[header.iter().position(|&r| r.trim() == "_value").unwrap()].trim(),
1199 "45"
1200 );
1201 }
1202
1203 fn create_event(i: i32) -> Event {
1204 Event::Metric(
1205 Metric::new(
1206 format!("counter-{i}"),
1207 MetricKind::Incremental,
1208 MetricValue::Counter { value: i as f64 },
1209 )
1210 .with_namespace(Some("ns"))
1211 .with_tags(Some(metric_tags!(
1212 "region" => "us-west-1",
1213 "production" => "true",
1214 )))
1215 .with_timestamp(Some(Utc::now())),
1216 )
1217 }
1218}