vector_buffers/variants/disk_v2/
common.rs

1use std::{
2    path::{Path, PathBuf},
3    time::Duration,
4};
5
6use crc32fast::Hasher;
7use snafu::Snafu;
8
9use super::{
10    io::{Filesystem, ProductionFilesystem},
11    ledger::LEDGER_LEN,
12    record::RECORD_HEADER_LEN,
13};
14
15// We don't want data files to be bigger than 128MB, but we might end up overshooting slightly.
16pub const DEFAULT_MAX_DATA_FILE_SIZE: usize = 128 * 1024 * 1024;
17
18// We allow records to be as large(*) as a data file.
19pub const DEFAULT_MAX_RECORD_SIZE: usize = DEFAULT_MAX_DATA_FILE_SIZE;
20
21// The maximum record size has to be bigger than the record header itself, since we count the record header towards
22// sizing/space usage, etc... but we also use the overaligned version here to make sure we're similarly accounting for
23// what `rkyv` will do when we serialize a record.
24pub const MINIMUM_MAX_RECORD_SIZE: usize = align16(RECORD_HEADER_LEN + 1);
25
26// We want to ensure a reasonable time before we `fsync`/flush to disk, and 500ms should provide that for non-critical
27// workloads.
28//
29// Practically, it's far more definitive than `disk_v1` which does not definitely `fsync` at all, at least with how we
30// have it configured.
31pub const DEFAULT_FLUSH_INTERVAL: Duration = Duration::from_millis(500);
32
33// Using 256KB as it aligns nicely with the I/O size exposed by major cloud providers.  This may not
34// be the underlying block size used by the OS, but it still aligns well with what will happen on
35// the "backend" for cloud providers, which is simply a useful default for when we want to look at
36// buffer throughput and estimate how many IOPS will be consumed, etc.
37pub const DEFAULT_WRITE_BUFFER_SIZE: usize = 256 * 1024;
38
39// We specifically limit ourselves to 0-31 for file IDs in test, because it lets us more quickly
40// create/consume the file IDs so we can test edge cases like file ID rollover and "writer is
41// waiting to open file that reader is still on".
42#[cfg(not(test))]
43pub const MAX_FILE_ID: u16 = u16::MAX;
44#[cfg(test)]
45pub const MAX_FILE_ID: u16 = 6;
46
47// The alignment used by the record serializer.
48const SERIALIZER_ALIGNMENT: usize = 16;
49const MAX_ALIGNABLE_AMOUNT: usize = usize::MAX - SERIALIZER_ALIGNMENT;
50
51pub(crate) fn create_crc32c_hasher() -> Hasher {
52    crc32fast::Hasher::new()
53}
54
55/// Aligns the given amount to 16.
56///
57/// This is required due to the overalignment used in record serialization, such that we can correctly determine minimum
58/// on-disk sizes for various elements, and account for those in size limits, etc.
59pub(crate) const fn align16(amount: usize) -> usize {
60    // The amount must be less than `MAX_ALIGNABLE_AMOUNT` otherwise we'll overflow trying to align it, ending up with a
61    // nonsensical value.
62    assert!(
63        amount <= MAX_ALIGNABLE_AMOUNT,
64        "`amount` must be less than `MAX_ALIGNABLE_AMOUNT`"
65    );
66
67    amount.div_ceil(SERIALIZER_ALIGNMENT) * SERIALIZER_ALIGNMENT
68}
69
70/// Gets the maximum possible data file size given the type-level numerical limits and buffer invariants.
71fn get_maximum_data_file_size() -> u64 {
72    let ledger_len: u64 = LEDGER_LEN
73        .try_into()
74        .expect("Ledger length cannot be greater than `u64`.");
75    (u64::MAX - ledger_len) / 2
76}
77
78/// Gets the minimum buffer size for the given maximum data file size.
79///
80/// This ensures that we are allowed to store enough bytes on-disk, as the buffer design requires being able to always
81/// write to a minimum number of data files, etc. This allow ensures that we're accounting for non-data file disk usage
82/// so that we do not overrun the specified maximum buffer size when considering the sum total of files placed on disk.
83fn get_minimum_buffer_size(max_data_file_size: u64) -> Option<u64> {
84    // We're doing this fallible conversion back-and-forth because we have to interoperate with `u64` and `usize`, and
85    // we need to ensure we're not getting values that can't be represented correctly in both types, as well as ensuring
86    // we're not implicitly overflowing and generating nonsensical numbers.
87    let ledger_len = LEDGER_LEN
88        .try_into()
89        .expect("Ledger length cannot be greater than `u64`.");
90
91    // We always need to be able to allocate two data files, so the buffer size has to be at least as big as 2x data
92    // files at their maximum allowed size, plus an allowance for the size of the ledger state itself.
93    max_data_file_size
94        .checked_mul(2)
95        .and_then(|doubled| doubled.checked_add(ledger_len))
96}
97
98#[derive(Debug, Snafu)]
99pub enum BuildError {
100    #[snafu(display("parameter '{}' was invalid: {}", param_name, reason))]
101    InvalidParameter {
102        param_name: &'static str,
103        reason: String,
104    },
105}
106
107/// Buffer configuration.
108#[derive(Clone, Debug)]
109pub struct DiskBufferConfig<FS> {
110    /// Directory where this buffer will write its files.
111    ///
112    /// Must be unique from all other buffers, whether within the same process or other Vector
113    /// processes on the machine.
114    pub(crate) data_dir: PathBuf,
115
116    /// Maximum size, in bytes, that the buffer can consume.
117    ///
118    /// The actual maximum on-disk buffer size is this amount rounded up to the next multiple of
119    /// `max_data_file_size`, but internally, the next multiple of `max_data_file_size` when
120    /// rounding this amount _down_ is what gets used as the maximum buffer size.
121    ///
122    /// This ensures that we never use more then the documented "rounded to the next multiple"
123    /// amount, as we must account for one full data file's worth of extra data.
124    pub(crate) max_buffer_size: u64,
125
126    /// Maximum size, in bytes, to target for each individual data file.
127    ///
128    /// This value is not strictly obey because we cannot know ahead of encoding/serializing if the
129    /// free space a data file has is enough to hold the write.  In other words, we never attempt to
130    /// write to a data file if it is as larger or larger than this value, but may write a record
131    /// that causes a data file to exceed this value by as much as `max_record_size`.
132    pub(crate) max_data_file_size: u64,
133
134    /// Maximum size, in bytes, of an encoded record.
135    ///
136    /// Any record which, when encoded and serialized, is larger than this amount will not be written
137    /// to the buffer.
138    pub(crate) max_record_size: usize,
139
140    /// Size, in bytes, of the writer's internal buffer.
141    ///
142    /// This buffer is used to coalesce writes to the underlying data file where possible, which in
143    /// turn reduces the number of syscalls needed to issue writes to the underlying data file.
144    pub(crate) write_buffer_size: usize,
145
146    /// Flush interval for ledger and data files.
147    ///
148    /// While data is asynchronously flushed by the OS, and the reader/writer can proceed with a
149    /// "hard" flush (aka `fsync`/`fsyncdata`), the flush interval effectively controls the
150    /// acceptable window of time for data loss.
151    ///
152    /// In the event that data had not yet been durably written to disk, and Vector crashed, the
153    /// amount of data written since the last flush would be lost.
154    pub(crate) flush_interval: Duration,
155
156    /// Filesystem implementation for opening data files.
157    ///
158    /// We allow parameterizing the filesystem implementation for ease of testing.  The "filesystem"
159    /// implementation essentially defines how we open and delete data files, as well as the type of
160    /// the data file objects we get when opening a data file.
161    pub(crate) filesystem: FS,
162}
163
164/// Builder for [`DiskBufferConfig`].
165#[derive(Clone, Debug)]
166pub struct DiskBufferConfigBuilder<FS = ProductionFilesystem>
167where
168    FS: Filesystem,
169{
170    pub(crate) data_dir: PathBuf,
171    pub(crate) max_buffer_size: Option<u64>,
172    pub(crate) max_data_file_size: Option<u64>,
173    pub(crate) max_record_size: Option<usize>,
174    pub(crate) write_buffer_size: Option<usize>,
175    pub(crate) flush_interval: Option<Duration>,
176    pub(crate) filesystem: FS,
177}
178
179impl DiskBufferConfigBuilder {
180    pub fn from_path<P>(data_dir: P) -> DiskBufferConfigBuilder
181    where
182        P: AsRef<Path>,
183    {
184        DiskBufferConfigBuilder {
185            data_dir: data_dir.as_ref().to_path_buf(),
186            max_buffer_size: None,
187            max_data_file_size: None,
188            max_record_size: None,
189            write_buffer_size: None,
190            flush_interval: None,
191            filesystem: ProductionFilesystem,
192        }
193    }
194}
195
196impl<FS> DiskBufferConfigBuilder<FS>
197where
198    FS: Filesystem,
199{
200    /// Sets the maximum size, in bytes, that the buffer can consume.
201    ///
202    /// The actual maximum on-disk buffer size is this amount rounded up to the next multiple of
203    /// `max_data_file_size`, but internally, the next multiple of `max_data_file_size` when
204    /// rounding this amount _down_ is what gets used as the maximum buffer size.
205    ///
206    /// This ensures that we never use more then the documented "rounded to the next multiple"
207    /// amount, as we must account for one full data file's worth of extra data.
208    ///
209    /// Defaults to `usize::MAX`, or effectively no limit.  Due to the internal design of the
210    /// buffer, the effective maximum limit is around `max_data_file_size` * 2^16.
211    #[allow(dead_code)]
212    pub fn max_buffer_size(mut self, amount: u64) -> Self {
213        self.max_buffer_size = Some(amount);
214        self
215    }
216
217    /// Sets the maximum size, in bytes, to target for each individual data file.
218    ///
219    /// This value is not strictly obey because we cannot know ahead of encoding/serializing if the
220    /// free space a data file has is enough to hold the write.  In other words, we never attempt to
221    /// write to a data file if it is as larger or larger than this value, but may write a record
222    /// that causes a data file to exceed this value by as much as `max_record_size`.
223    ///
224    /// Defaults to 128MB.
225    #[allow(dead_code)]
226    pub fn max_data_file_size(mut self, amount: u64) -> Self {
227        self.max_data_file_size = Some(amount);
228        self
229    }
230
231    /// Sets the maximum size, in bytes, of an encoded record.
232    ///
233    /// Any record which, when encoded and serialized, is larger than this amount will not be written
234    /// to the buffer.
235    ///
236    /// Defaults to 128MB.
237    #[allow(dead_code)]
238    pub fn max_record_size(mut self, amount: usize) -> Self {
239        self.max_record_size = Some(amount);
240        self
241    }
242
243    /// Size, in bytes, of the writer's internal buffer.
244    ///
245    /// This buffer is used to coalesce writes to the underlying data file where possible, which in
246    /// turn reduces the number of syscalls needed to issue writes to the underlying data file.
247    ///
248    /// Defaults to 256KB.
249    #[allow(dead_code)]
250    pub fn write_buffer_size(mut self, amount: usize) -> Self {
251        self.write_buffer_size = Some(amount);
252        self
253    }
254
255    /// Sets the flush interval for ledger and data files.
256    ///
257    /// While data is asynchronously flushed by the OS, and the reader/writer can proceed with a
258    /// "hard" flush (aka `fsync`/`fsyncdata`), the flush interval effectively controls the
259    /// acceptable window of time for data loss.
260    ///
261    /// In the event that data had not yet been durably written to disk, and Vector crashed, the
262    /// amount of data written since the last flush would be lost.
263    ///
264    /// Defaults to 500ms.
265    #[allow(dead_code)]
266    pub fn flush_interval(mut self, interval: Duration) -> Self {
267        self.flush_interval = Some(interval);
268        self
269    }
270
271    /// Filesystem implementation for opening data files.
272    ///
273    /// We allow parameterizing the filesystem implementation for ease of testing.  The "filesystem"
274    /// implementation essentially defines how we open and delete data files, as well as the type of
275    /// the data file objects we get when opening a data file.
276    ///
277    /// Defaults to a Tokio-backed implementation.
278    #[allow(dead_code)]
279    pub fn filesystem<FS2>(self, filesystem: FS2) -> DiskBufferConfigBuilder<FS2>
280    where
281        FS2: Filesystem,
282    {
283        DiskBufferConfigBuilder {
284            data_dir: self.data_dir,
285            max_buffer_size: self.max_buffer_size,
286            max_data_file_size: self.max_data_file_size,
287            max_record_size: self.max_record_size,
288            write_buffer_size: self.write_buffer_size,
289            flush_interval: self.flush_interval,
290            filesystem,
291        }
292    }
293
294    /// Consumes this builder and constructs a `DiskBufferConfig`.
295    pub fn build(self) -> Result<DiskBufferConfig<FS>, BuildError> {
296        let max_buffer_size = self.max_buffer_size.unwrap_or(u64::MAX);
297        let max_data_file_size = self.max_data_file_size.unwrap_or_else(|| {
298            u64::try_from(DEFAULT_MAX_DATA_FILE_SIZE)
299                .expect("Default maximum data file size should never be greater than 2^64 bytes.")
300        });
301        let max_record_size = self.max_record_size.unwrap_or(DEFAULT_MAX_RECORD_SIZE);
302        let write_buffer_size = self.write_buffer_size.unwrap_or(DEFAULT_WRITE_BUFFER_SIZE);
303        let flush_interval = self.flush_interval.unwrap_or(DEFAULT_FLUSH_INTERVAL);
304        let filesystem = self.filesystem;
305
306        // Validate the input parameters.
307        if max_data_file_size == 0 {
308            return Err(BuildError::InvalidParameter {
309                param_name: "max_data_file_size",
310                reason: "cannot be zero".to_string(),
311            });
312        }
313
314        let data_file_size_mechanical_limit = get_maximum_data_file_size();
315        if max_data_file_size > data_file_size_mechanical_limit {
316            return Err(BuildError::InvalidParameter {
317                param_name: "max_data_file_size",
318                reason: format!("cannot be greater than {data_file_size_mechanical_limit} bytes"),
319            });
320        }
321
322        let Some(minimum_buffer_size) = get_minimum_buffer_size(max_data_file_size) else {
323            unreachable!("maximum data file size should be correctly limited at this point")
324        };
325
326        if max_buffer_size < minimum_buffer_size {
327            return Err(BuildError::InvalidParameter {
328                param_name: "max_buffer_size",
329                reason: format!("must be greater than or equal to {minimum_buffer_size} bytes"),
330            });
331        }
332
333        if max_record_size == 0 {
334            return Err(BuildError::InvalidParameter {
335                param_name: "max_record_size",
336                reason: "cannot be zero".to_string(),
337            });
338        }
339
340        if max_record_size <= MINIMUM_MAX_RECORD_SIZE {
341            return Err(BuildError::InvalidParameter {
342                param_name: "max_record_size",
343                reason: format!("must be greater than or equal to {MINIMUM_MAX_RECORD_SIZE} bytes",),
344            });
345        }
346
347        let Ok(max_record_size_converted) = u64::try_from(max_record_size) else {
348            return Err(BuildError::InvalidParameter {
349                param_name: "max_record_size",
350                reason: "must be less than 2^64 bytes".to_string(),
351            });
352        };
353
354        if max_record_size_converted > max_data_file_size {
355            return Err(BuildError::InvalidParameter {
356                param_name: "max_record_size",
357                reason: "must be less than or equal to `max_data_file_size`".to_string(),
358            });
359        }
360
361        if write_buffer_size == 0 {
362            return Err(BuildError::InvalidParameter {
363                param_name: "write_buffer_size",
364                reason: "cannot be zero".to_string(),
365            });
366        }
367
368        // Users configure the `max_size` of their disk buffers, which translates to the `max_buffer_size` field here,
369        // and represents the maximum desired size of a disk buffer in terms of on-disk usage. In order to meet this
370        // request, we do a few things internally and also enforce a lower bound on `max_buffer_size` to ensure we can
371        // commit to respecting the communicated maximum buffer size.
372        //
373        // Internally, we track the current buffer size as a function of the sum of the size of all unacknowledged
374        // records.  This means, simply, that if 100 records are written that consume 1KB a piece, our current buffer
375        // size should be around 100KB, and as those records are read and acknowledged, the current buffer size would
376        // drop by 1KB for each of them until eventually it went back down to zero.
377        //
378        // One of the design invariants around data files is that they are written to until they reach the maximum data
379        // file size, such that they are guaranteed to never be greater in size than `max_data_file_size`. This is
380        // coupled with the fact that a data file cannot be deleted from disk until all records written to it have been
381        // read _and_ acknowledged.
382        //
383        // Together, this means that we need to set a lower bound of 2*`max_data_file_size` for `max_buffer_size`.
384        //
385        // First, given the "data file keeps getting written to until we reach its max size" invariant, we know that in
386        // order to commit to the on-disk buffer size not exceeding `max_buffer_size`, the value must be at least as
387        // much as a single full data file, aka `max_data_file_size`.
388        //
389        // Secondly, we also want to ensure that the writer can make progress as the reader makes progress. If the
390        // maximum buffer size was equal to the maximum data file size, the writer would be stalled as soon as the data
391        // file reached the maximum size, until the reader was able to fully read and acknowledge all records, and thus
392        // delete the data file from disk. If we instead require that the maximum buffer size exceeds
393        // `max_data_file_size`, this allows us to open the next data file and start writing to it up until the maximum
394        // buffer size.
395        //
396        // Since we could essentially read and acknowledge all but the last remaining record in a data file, this would
397        // imply we gave the writer the ability to write that much more data, which means we would need at least double
398        // the maximum data file size in order to support the writer being able to make progress in the aforementioned
399        // situation.
400        //
401        // Finally, we come to this calculation. Since the logic dictates that we essentially require at least one extra
402        // data file past the minimum of one, we need to use an _internal_ maximum buffer size of `max_buffer_size` -
403        // `max_data_file_size`, so that as the reader makes progress, the writer never is led to believe it can create
404        // another data file such that the number of active data files, multiplied by `max_data_file_size`, would exceed
405        // `max_buffer_size`.
406        let max_buffer_size = max_buffer_size - max_data_file_size;
407
408        Ok(DiskBufferConfig {
409            data_dir: self.data_dir,
410            max_buffer_size,
411            max_data_file_size,
412            max_record_size,
413            write_buffer_size,
414            flush_interval,
415            filesystem,
416        })
417    }
418}
419
420#[cfg(test)]
421mod tests {
422    use proptest::{prop_assert, proptest, test_runner::Config};
423
424    use crate::variants::disk_v2::common::MAX_ALIGNABLE_AMOUNT;
425
426    use super::{
427        align16, BuildError, DiskBufferConfigBuilder, MINIMUM_MAX_RECORD_SIZE, SERIALIZER_ALIGNMENT,
428    };
429
430    #[test]
431    #[should_panic(expected = "`amount` must be less than `MAX_ALIGNABLE_AMOUNT`")]
432    fn test_align16_too_large() {
433        // We forcefully panic if the input to `align16` is too large to align without overflow, primarily because
434        // that's a huge amount even on 32-bit systems and in non-test code, we only use `align16` in a const context,
435        // so it will panic during compilation, not at runtime.
436        align16(MAX_ALIGNABLE_AMOUNT + 1);
437    }
438
439    proptest! {
440        #![proptest_config(Config::with_cases(1000))]
441        #[test]
442        fn test_align16(input in 0..MAX_ALIGNABLE_AMOUNT) {
443            // You may think to yourself: "this test seems excessive and not necessary", but, au contraire! Our
444            // algorithm depends on integer division rounding towards zero, which is an invariant provided to Rust by
445            // way of LLVM itself. In order to avoid weird surprises down the line if that invariant changes, including
446            // a future where we, or others, potentially compile Vector with an alternative compiler that does not
447            // round towards zero... we're being extra careful and hedging our bet by having such a property test.
448
449            // Make sure we're actually aligned.
450            let aligned = align16(input);
451            prop_assert!(aligned % SERIALIZER_ALIGNMENT == 0);
452
453            // Make sure we're not overaligned, too.
454            let delta = if aligned >= input {
455                aligned - input
456            } else {
457                panic!("`aligned` must never be less than `input` in this test; inputs are crafted to obey `MAX_ALIGNABLE_AMOUNT`");
458            };
459
460            prop_assert!(delta <= SERIALIZER_ALIGNMENT, "`align16` returned overaligned input: input={} aligned={} delta={}", input, aligned, delta);
461        }
462    }
463
464    #[test]
465    fn basic_rejections() {
466        // Maximum data file size cannot be zero.
467        let result = DiskBufferConfigBuilder::from_path("/tmp/dummy/path")
468            .max_data_file_size(0)
469            .build();
470
471        match result {
472            Err(BuildError::InvalidParameter { param_name, .. }) => assert_eq!(
473                param_name, "max_data_file_size",
474                "invalid parameter should have been `max_data_file_size`"
475            ),
476            _ => panic!("expected invalid parameter error"),
477        }
478
479        // Maximum data file size cannot be greater than u64::MAX / 2, since we multiply it by 2 when calculating the
480        // lower bound for the maximum buffer size.
481        let result = DiskBufferConfigBuilder::from_path("/tmp/dummy/path")
482            .max_data_file_size((u64::MAX / 2) + 1)
483            .build();
484
485        match result {
486            Err(BuildError::InvalidParameter { param_name, .. }) => assert_eq!(
487                param_name, "max_data_file_size",
488                "invalid parameter should have been `max_data_file_size`"
489            ),
490            _ => panic!("expected invalid parameter error"),
491        }
492
493        // Maximum buffer size cannot be zero.
494        let result = DiskBufferConfigBuilder::from_path("/tmp/dummy/path")
495            .max_buffer_size(0)
496            .build();
497
498        match result {
499            Err(BuildError::InvalidParameter { param_name, .. }) => assert_eq!(
500                param_name, "max_buffer_size",
501                "invalid parameter should have been `max_buffer_size`"
502            ),
503            _ => panic!("expected invalid parameter error"),
504        }
505
506        // Maximum buffer size cannot be less than 2x the maximum data file size.
507        let result = DiskBufferConfigBuilder::from_path("/tmp/dummy/path")
508            .max_data_file_size(10000)
509            .max_record_size(100)
510            .max_buffer_size(19999)
511            .build();
512
513        match result {
514            Err(BuildError::InvalidParameter { param_name, .. }) => assert_eq!(
515                param_name, "max_buffer_size",
516                "invalid parameter should have been `max_buffer_size`"
517            ),
518            _ => panic!("expected invalid parameter error"),
519        }
520
521        // Maximum record size cannot be zero.
522        let result = DiskBufferConfigBuilder::from_path("/tmp/dummy/path")
523            .max_record_size(0)
524            .build();
525
526        match result {
527            Err(BuildError::InvalidParameter { param_name, .. }) => assert_eq!(
528                param_name, "max_record_size",
529                "invalid parameter should have been `max_record_size`"
530            ),
531            _ => panic!("expected invalid parameter error"),
532        }
533
534        // Maximum record size cannot be less than the minimum record header length.
535        let result = DiskBufferConfigBuilder::from_path("/tmp/dummy/path")
536            .max_record_size(MINIMUM_MAX_RECORD_SIZE - 1)
537            .build();
538
539        match result {
540            Err(BuildError::InvalidParameter { param_name, .. }) => assert_eq!(
541                param_name, "max_record_size",
542                "invalid parameter should have been `max_record_size`"
543            ),
544            _ => panic!("expected invalid parameter error"),
545        }
546
547        // Maximum record size cannot be greater than maximum data file size.
548        let result = DiskBufferConfigBuilder::from_path("/tmp/dummy/path")
549            .max_data_file_size(123_456)
550            .max_record_size(123_457)
551            .build();
552
553        match result {
554            Err(BuildError::InvalidParameter { param_name, .. }) => assert_eq!(
555                param_name, "max_record_size",
556                "invalid parameter should have been `max_record_size`"
557            ),
558            _ => panic!("expected invalid parameter error"),
559        }
560    }
561
562    proptest! {
563        #![proptest_config(Config::with_cases(10000))]
564        #[test]
565        fn ensure_max_buffer_size_lower_bound(max_buffer_size in 1..u64::MAX, max_record_data_file_size in 1..u64::MAX) {
566            let max_data_file_size = max_record_data_file_size;
567            let max_record_size = usize::try_from(max_record_data_file_size)
568                .expect("Maximum record size, and data file size, must be less than 2^64 bytes.");
569
570            let result = DiskBufferConfigBuilder::from_path("/tmp/dummy/path")
571                .max_buffer_size(max_buffer_size)
572                .max_data_file_size(max_data_file_size)
573                .max_record_size(max_record_size)
574                .build();
575
576            // We don't necessarily care about the error cases here, but what we do care about is making sure that, when
577            // the generated configuration is theoretically valid, the calculated maximum buffer size actually meets our expectation of
578            // being at least `max_data_file_size` and `max_data_file_size` less than the input maximum buffer size.
579            if let Ok(config) = result {
580                prop_assert!(config.max_buffer_size >= max_data_file_size, "calculated max buffer size must always be greater than or equal to `max_data_file_size`");
581                prop_assert!(config.max_buffer_size + max_data_file_size == max_buffer_size, "calculated max buffer size must always be less `max_data_file_size` than input max buffer size");
582            }
583        }
584    }
585}