vector/sinks/util/
retries.rs

1use std::{
2    borrow::Cow,
3    cmp,
4    future::Future,
5    pin::Pin,
6    task::{Context, Poll},
7    time::Duration,
8};
9
10use futures::FutureExt;
11use tokio::time::{Sleep, sleep};
12use tower::{retry::Policy, timeout::error::Elapsed};
13use vector_lib::configurable::configurable_component;
14
15use crate::Error;
16
17pub enum RetryAction<Request = ()> {
18    /// Indicate that this request should be retried with a reason
19    Retry(Cow<'static, str>),
20    /// Indicate that a portion of this request should be retried with a generic function
21    RetryPartial(Box<dyn Fn(Request) -> Request + Send + Sync>),
22    /// Indicate that this request should not be retried with a reason
23    DontRetry(Cow<'static, str>),
24    /// Indicate that this request should not be retried but the request was successful
25    Successful,
26}
27
28pub trait RetryLogic: Clone + Send + Sync + 'static {
29    type Error: std::error::Error + Send + Sync + 'static;
30    type Request;
31    type Response;
32
33    /// When the Service call returns an `Err` response, this function allows
34    /// implementors to specify what kinds of errors can be retried.
35    fn is_retriable_error(&self, error: &Self::Error) -> bool;
36
37    /// When the Service call returns an `Ok` response, this function allows
38    /// implementors to specify additional logic to determine if the success response
39    /// is actually an error. This is particularly useful when the downstream service
40    /// of a sink returns a transport protocol layer success but error data in the
41    /// response body. For example, an HTTP 200 status, but the body of the response
42    /// contains a list of errors encountered while processing.
43    fn should_retry_response(&self, _response: &Self::Response) -> RetryAction<Self::Request> {
44        // Treat the default as the request is successful
45        RetryAction::Successful
46    }
47
48    /// Optional hook run when an error is determined to be retriable.
49    fn on_retriable_error(&self, _error: &Self::Error) {}
50}
51
52/// The jitter mode to use for retry backoff behavior.
53#[configurable_component]
54#[derive(Clone, Copy, Debug, Default)]
55pub enum JitterMode {
56    /// No jitter.
57    None,
58
59    /// Full jitter.
60    ///
61    /// The random delay is anywhere from 0 up to the maximum current delay calculated by the backoff
62    /// strategy.
63    ///
64    /// Incorporating full jitter into your backoff strategy can greatly reduce the likelihood
65    /// of creating accidental denial of service (DoS) conditions against your own systems when
66    /// many clients are recovering from a failure state.
67    #[default]
68    Full,
69}
70
71#[derive(Debug, Clone)]
72pub struct FibonacciRetryPolicy<L> {
73    remaining_attempts: usize,
74    previous_duration: Duration,
75    current_duration: Duration,
76    jitter_mode: JitterMode,
77    current_jitter_duration: Duration,
78    max_duration: Duration,
79    logic: L,
80}
81
82pub struct RetryPolicyFuture {
83    delay: Pin<Box<Sleep>>,
84}
85
86impl<L: RetryLogic> FibonacciRetryPolicy<L> {
87    pub fn new(
88        remaining_attempts: usize,
89        initial_backoff: Duration,
90        max_duration: Duration,
91        logic: L,
92        jitter_mode: JitterMode,
93    ) -> Self {
94        FibonacciRetryPolicy {
95            remaining_attempts,
96            previous_duration: Duration::from_secs(0),
97            current_duration: initial_backoff,
98            jitter_mode,
99            current_jitter_duration: Self::add_full_jitter(initial_backoff),
100            max_duration,
101            logic,
102        }
103    }
104
105    fn add_full_jitter(d: Duration) -> Duration {
106        let jitter = (rand::random::<u64>() % (d.as_millis() as u64)) + 1;
107        Duration::from_millis(jitter)
108    }
109
110    const fn backoff(&self) -> Duration {
111        match self.jitter_mode {
112            JitterMode::None => self.current_duration,
113            JitterMode::Full => self.current_jitter_duration,
114        }
115    }
116
117    fn advance(&mut self) {
118        let sum = self
119            .previous_duration
120            .checked_add(self.current_duration)
121            .unwrap_or(Duration::MAX);
122        let next_duration = cmp::min(sum, self.max_duration);
123        self.remaining_attempts = self.remaining_attempts.saturating_sub(1);
124        self.previous_duration = self.current_duration;
125        self.current_duration = next_duration;
126        self.current_jitter_duration = Self::add_full_jitter(next_duration);
127    }
128
129    fn build_retry(&mut self) -> RetryPolicyFuture {
130        self.advance();
131        let delay = Box::pin(sleep(self.backoff()));
132
133        debug!(message = "Retrying request.", delay_ms = %self.backoff().as_millis());
134        RetryPolicyFuture { delay }
135    }
136}
137
138impl<Req, Res, L> Policy<Req, Res, Error> for FibonacciRetryPolicy<L>
139where
140    Req: Clone + Send + 'static,
141    L: RetryLogic<Request = Req, Response = Res>,
142{
143    type Future = RetryPolicyFuture;
144
145    // NOTE: in the error cases- `Error` and `EventsDropped` internal events are emitted by the
146    // driver, so only need to log here.
147    fn retry(&mut self, req: &mut Req, result: &mut Result<Res, Error>) -> Option<Self::Future> {
148        match result {
149            Ok(response) => match self.logic.should_retry_response(response) {
150                RetryAction::Retry(reason) => {
151                    if self.remaining_attempts == 0 {
152                        error!(
153                            message = "OK/retry response but retries exhausted; dropping the request.",
154                            reason = ?reason,
155                        );
156                        return None;
157                    }
158
159                    warn!(message = "Retrying after response.", reason = %reason);
160                    Some(self.build_retry())
161                }
162                RetryAction::RetryPartial(modify_request) => {
163                    if self.remaining_attempts == 0 {
164                        error!(
165                            message =
166                                "OK/retry response but retries exhausted; dropping the request.",
167                        );
168                        return None;
169                    }
170                    *req = modify_request(req.clone());
171                    warn!("OK/retrying partial after response.");
172                    Some(self.build_retry())
173                }
174                RetryAction::DontRetry(reason) => {
175                    error!(message = "Not retriable; dropping the request.", ?reason);
176                    None
177                }
178
179                RetryAction::Successful => None,
180            },
181            Err(error) => {
182                if self.remaining_attempts == 0 {
183                    error!(message = "Retries exhausted; dropping the request.", %error);
184                    return None;
185                }
186
187                if let Some(expected) = error.downcast_ref::<L::Error>() {
188                    if self.logic.is_retriable_error(expected) {
189                        self.logic.on_retriable_error(expected);
190                        warn!(message = "Retrying after error.", error = %expected);
191                        Some(self.build_retry())
192                    } else {
193                        error!(
194                            message = "Non-retriable error; dropping the request.",
195                            %error,
196                        );
197                        None
198                    }
199                } else if error.downcast_ref::<Elapsed>().is_some() {
200                    warn!(
201                        "Request timed out. If this happens often while the events are actually reaching their destination, try decreasing `batch.max_bytes` and/or using `compression` if applicable. Alternatively `request.timeout_secs` can be increased."
202                    );
203                    Some(self.build_retry())
204                } else {
205                    error!(
206                        message = "Unexpected error type; dropping the request.",
207                        %error
208                    );
209                    None
210                }
211            }
212        }
213    }
214
215    fn clone_request(&mut self, request: &Req) -> Option<Req> {
216        Some(request.clone())
217    }
218}
219
220// Safety: `L` is never pinned and we use no unsafe pin projections
221// therefore this safe.
222impl Unpin for RetryPolicyFuture {}
223
224impl Future for RetryPolicyFuture {
225    type Output = ();
226
227    fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
228        std::task::ready!(self.delay.poll_unpin(cx));
229        Poll::Ready(())
230    }
231}
232
233impl<Request> RetryAction<Request> {
234    pub const fn is_retryable(&self) -> bool {
235        matches!(self, RetryAction::Retry(_) | RetryAction::RetryPartial(_))
236    }
237
238    pub const fn is_not_retryable(&self) -> bool {
239        matches!(self, RetryAction::DontRetry(_))
240    }
241
242    pub const fn is_successful(&self) -> bool {
243        matches!(self, RetryAction::Successful)
244    }
245}
246
247#[cfg(test)]
248mod tests {
249    use std::{fmt, time::Duration};
250
251    use tokio::time;
252    use tokio_test::{assert_pending, assert_ready_err, assert_ready_ok, task};
253    use tower::retry::RetryLayer;
254    use tower_test::{assert_request_eq, mock};
255
256    use super::*;
257    use crate::test_util::trace_init;
258
259    #[tokio::test]
260    async fn service_error_retry() {
261        trace_init();
262
263        time::pause();
264
265        let policy = FibonacciRetryPolicy::new(
266            5,
267            Duration::from_secs(1),
268            Duration::from_secs(10),
269            SvcRetryLogic,
270            JitterMode::None,
271        );
272
273        let (mut svc, mut handle) = mock::spawn_layer(RetryLayer::new(policy));
274
275        assert_ready_ok!(svc.poll_ready());
276
277        let fut = svc.call("hello");
278        let mut fut = task::spawn(fut);
279
280        assert_request_eq!(handle, "hello").send_error(Error(true));
281
282        assert_pending!(fut.poll());
283
284        time::advance(Duration::from_secs(2)).await;
285        assert_pending!(fut.poll());
286
287        assert_request_eq!(handle, "hello").send_response("world");
288        assert_eq!(fut.await.unwrap(), "world");
289    }
290
291    #[tokio::test]
292    async fn service_error_no_retry() {
293        trace_init();
294
295        let policy = FibonacciRetryPolicy::new(
296            5,
297            Duration::from_secs(1),
298            Duration::from_secs(10),
299            SvcRetryLogic,
300            JitterMode::None,
301        );
302
303        let (mut svc, mut handle) = mock::spawn_layer(RetryLayer::new(policy));
304
305        assert_ready_ok!(svc.poll_ready());
306
307        let mut fut = task::spawn(svc.call("hello"));
308        assert_request_eq!(handle, "hello").send_error(Error(false));
309        assert_ready_err!(fut.poll());
310    }
311
312    #[tokio::test]
313    async fn timeout_error() {
314        trace_init();
315
316        time::pause();
317
318        let policy = FibonacciRetryPolicy::new(
319            5,
320            Duration::from_secs(1),
321            Duration::from_secs(10),
322            SvcRetryLogic,
323            JitterMode::None,
324        );
325
326        let (mut svc, mut handle) = mock::spawn_layer(RetryLayer::new(policy));
327
328        assert_ready_ok!(svc.poll_ready());
329
330        let mut fut = task::spawn(svc.call("hello"));
331        assert_request_eq!(handle, "hello").send_error(Elapsed::new());
332        assert_pending!(fut.poll());
333
334        time::advance(Duration::from_secs(2)).await;
335        assert_pending!(fut.poll());
336
337        assert_request_eq!(handle, "hello").send_response("world");
338        assert_eq!(fut.await.unwrap(), "world");
339    }
340
341    #[test]
342    fn backoff_grows_to_max() {
343        let mut policy = FibonacciRetryPolicy::new(
344            10,
345            Duration::from_secs(1),
346            Duration::from_secs(10),
347            SvcRetryLogic,
348            JitterMode::None,
349        );
350        assert_eq!(Duration::from_secs(1), policy.backoff());
351
352        policy.advance();
353        assert_eq!(Duration::from_secs(1), policy.backoff());
354
355        policy.advance();
356        assert_eq!(Duration::from_secs(2), policy.backoff());
357
358        policy.advance();
359        assert_eq!(Duration::from_secs(3), policy.backoff());
360
361        policy.advance();
362        assert_eq!(Duration::from_secs(5), policy.backoff());
363
364        policy.advance();
365        assert_eq!(Duration::from_secs(8), policy.backoff());
366
367        policy.advance();
368        assert_eq!(Duration::from_secs(10), policy.backoff());
369
370        policy.advance();
371        assert_eq!(Duration::from_secs(10), policy.backoff());
372    }
373
374    #[test]
375    fn backoff_grows_to_max_with_jitter() {
376        let max_duration = Duration::from_secs(10);
377        let mut policy = FibonacciRetryPolicy::new(
378            10,
379            Duration::from_secs(1),
380            max_duration,
381            SvcRetryLogic,
382            JitterMode::Full,
383        );
384
385        let expected_fib = [1, 1, 2, 3, 5, 8];
386
387        for (i, &exp_fib_secs) in expected_fib.iter().enumerate() {
388            let backoff = policy.backoff();
389            let upper_bound = Duration::from_secs(exp_fib_secs);
390
391            // Check if the backoff is within the expected range, considering the jitter
392            assert!(
393                !backoff.is_zero() && backoff <= upper_bound,
394                "Attempt {}: Expected backoff to be within 0 and {:?}, got {:?}",
395                i + 1,
396                upper_bound,
397                backoff
398            );
399
400            policy.advance();
401        }
402
403        // Once the max backoff is reached, it should not exceed the max backoff.
404        for _ in 0..4 {
405            let backoff = policy.backoff();
406            assert!(
407                !backoff.is_zero() && backoff <= max_duration,
408                "Expected backoff to not exceed {max_duration:?}, got {backoff:?}"
409            );
410
411            policy.advance();
412        }
413    }
414
415    #[derive(Debug, Clone)]
416    struct SvcRetryLogic;
417
418    impl RetryLogic for SvcRetryLogic {
419        type Error = Error;
420        type Request = &'static str;
421        type Response = &'static str;
422
423        fn is_retriable_error(&self, error: &Self::Error) -> bool {
424            error.0
425        }
426    }
427
428    #[derive(Debug)]
429    struct Error(bool);
430
431    impl fmt::Display for Error {
432        fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
433            write!(f, "error")
434        }
435    }
436
437    impl std::error::Error for Error {}
438}