vector_buffers/variants/disk_v2/
common.rs

1use std::{
2    path::{Path, PathBuf},
3    time::Duration,
4};
5
6use crc32fast::Hasher;
7use snafu::Snafu;
8
9use super::{
10    io::{Filesystem, ProductionFilesystem},
11    ledger::LEDGER_LEN,
12    record::RECORD_HEADER_LEN,
13};
14
15// We don't want data files to be bigger than 128MB, but we might end up overshooting slightly.
16pub const DEFAULT_MAX_DATA_FILE_SIZE: usize = 128 * 1024 * 1024;
17
18// We allow records to be as large(*) as a data file.
19pub const DEFAULT_MAX_RECORD_SIZE: usize = DEFAULT_MAX_DATA_FILE_SIZE;
20
21// The maximum record size has to be bigger than the record header itself, since we count the record header towards
22// sizing/space usage, etc... but we also use the overaligned version here to make sure we're similarly accounting for
23// what `rkyv` will do when we serialize a record.
24pub const MINIMUM_MAX_RECORD_SIZE: usize = align16(RECORD_HEADER_LEN + 1);
25
26// We want to ensure a reasonable time before we `fsync`/flush to disk, and 500ms should provide that for non-critical
27// workloads.
28//
29// Practically, it's far more definitive than `disk_v1` which does not definitely `fsync` at all, at least with how we
30// have it configured.
31pub const DEFAULT_FLUSH_INTERVAL: Duration = Duration::from_millis(500);
32
33// Using 256KB as it aligns nicely with the I/O size exposed by major cloud providers.  This may not
34// be the underlying block size used by the OS, but it still aligns well with what will happen on
35// the "backend" for cloud providers, which is simply a useful default for when we want to look at
36// buffer throughput and estimate how many IOPS will be consumed, etc.
37pub const DEFAULT_WRITE_BUFFER_SIZE: usize = 256 * 1024;
38
39// We specifically limit ourselves to 0-31 for file IDs in test, because it lets us more quickly
40// create/consume the file IDs so we can test edge cases like file ID rollover and "writer is
41// waiting to open file that reader is still on".
42#[cfg(not(test))]
43pub const MAX_FILE_ID: u16 = u16::MAX;
44#[cfg(test)]
45pub const MAX_FILE_ID: u16 = 6;
46
47// The alignment used by the record serializer.
48const SERIALIZER_ALIGNMENT: usize = 16;
49const MAX_ALIGNABLE_AMOUNT: usize = usize::MAX - SERIALIZER_ALIGNMENT;
50
51pub(crate) fn create_crc32c_hasher() -> Hasher {
52    crc32fast::Hasher::new()
53}
54
55/// Aligns the given amount to 16.
56///
57/// This is required due to the overalignment used in record serialization, such that we can correctly determine minimum
58/// on-disk sizes for various elements, and account for those in size limits, etc.
59pub(crate) const fn align16(amount: usize) -> usize {
60    // The amount must be less than `MAX_ALIGNABLE_AMOUNT` otherwise we'll overflow trying to align it, ending up with a
61    // nonsensical value.
62    assert!(
63        amount <= MAX_ALIGNABLE_AMOUNT,
64        "`amount` must be less than `MAX_ALIGNABLE_AMOUNT`"
65    );
66
67    amount.div_ceil(SERIALIZER_ALIGNMENT) * SERIALIZER_ALIGNMENT
68}
69
70/// Gets the maximum possible data file size given the type-level numerical limits and buffer invariants.
71fn get_maximum_data_file_size() -> u64 {
72    let ledger_len: u64 = LEDGER_LEN
73        .try_into()
74        .expect("Ledger length cannot be greater than `u64`.");
75    (u64::MAX - ledger_len) / 2
76}
77
78/// Gets the minimum buffer size for the given maximum data file size.
79///
80/// This ensures that we are allowed to store enough bytes on-disk, as the buffer design requires being able to always
81/// write to a minimum number of data files, etc. This allow ensures that we're accounting for non-data file disk usage
82/// so that we do not overrun the specified maximum buffer size when considering the sum total of files placed on disk.
83fn get_minimum_buffer_size(max_data_file_size: u64) -> Option<u64> {
84    // We're doing this fallible conversion back-and-forth because we have to interoperate with `u64` and `usize`, and
85    // we need to ensure we're not getting values that can't be represented correctly in both types, as well as ensuring
86    // we're not implicitly overflowing and generating nonsensical numbers.
87    let ledger_len = LEDGER_LEN
88        .try_into()
89        .expect("Ledger length cannot be greater than `u64`.");
90
91    // We always need to be able to allocate two data files, so the buffer size has to be at least as big as 2x data
92    // files at their maximum allowed size, plus an allowance for the size of the ledger state itself.
93    max_data_file_size
94        .checked_mul(2)
95        .and_then(|doubled| doubled.checked_add(ledger_len))
96}
97
98#[derive(Debug, Snafu)]
99pub enum BuildError {
100    #[snafu(display("parameter '{}' was invalid: {}", param_name, reason))]
101    InvalidParameter {
102        param_name: &'static str,
103        reason: String,
104    },
105}
106
107/// Buffer configuration.
108#[derive(Clone, Debug)]
109pub struct DiskBufferConfig<FS> {
110    /// Directory where this buffer will write its files.
111    ///
112    /// Must be unique from all other buffers, whether within the same process or other Vector
113    /// processes on the machine.
114    pub(crate) data_dir: PathBuf,
115
116    /// Maximum size, in bytes, that the buffer can consume.
117    ///
118    /// The actual maximum on-disk buffer size is this amount rounded up to the next multiple of
119    /// `max_data_file_size`, but internally, the next multiple of `max_data_file_size` when
120    /// rounding this amount _down_ is what gets used as the maximum buffer size.
121    ///
122    /// This ensures that we never use more then the documented "rounded to the next multiple"
123    /// amount, as we must account for one full data file's worth of extra data.
124    pub(crate) max_buffer_size: u64,
125
126    /// Maximum size, in bytes, to target for each individual data file.
127    ///
128    /// This value is not strictly obey because we cannot know ahead of encoding/serializing if the
129    /// free space a data file has is enough to hold the write.  In other words, we never attempt to
130    /// write to a data file if it is as larger or larger than this value, but may write a record
131    /// that causes a data file to exceed this value by as much as `max_record_size`.
132    pub(crate) max_data_file_size: u64,
133
134    /// Maximum size, in bytes, of an encoded record.
135    ///
136    /// Any record which, when encoded and serialized, is larger than this amount will not be written
137    /// to the buffer.
138    pub(crate) max_record_size: usize,
139
140    /// Size, in bytes, of the writer's internal buffer.
141    ///
142    /// This buffer is used to coalesce writes to the underlying data file where possible, which in
143    /// turn reduces the number of syscalls needed to issue writes to the underlying data file.
144    pub(crate) write_buffer_size: usize,
145
146    /// Flush interval for ledger and data files.
147    ///
148    /// While data is asynchronously flushed by the OS, and the reader/writer can proceed with a
149    /// "hard" flush (aka `fsync`/`fsyncdata`), the flush interval effectively controls the
150    /// acceptable window of time for data loss.
151    ///
152    /// In the event that data had not yet been durably written to disk, and Vector crashed, the
153    /// amount of data written since the last flush would be lost.
154    pub(crate) flush_interval: Duration,
155
156    /// Filesystem implementation for opening data files.
157    ///
158    /// We allow parameterizing the filesystem implementation for ease of testing.  The "filesystem"
159    /// implementation essentially defines how we open and delete data files, as well as the type of
160    /// the data file objects we get when opening a data file.
161    pub(crate) filesystem: FS,
162}
163
164/// Builder for [`DiskBufferConfig`].
165#[derive(Clone, Debug)]
166pub struct DiskBufferConfigBuilder<FS = ProductionFilesystem>
167where
168    FS: Filesystem,
169{
170    pub(crate) data_dir: PathBuf,
171    pub(crate) max_buffer_size: Option<u64>,
172    pub(crate) max_data_file_size: Option<u64>,
173    pub(crate) max_record_size: Option<usize>,
174    pub(crate) write_buffer_size: Option<usize>,
175    pub(crate) flush_interval: Option<Duration>,
176    pub(crate) filesystem: FS,
177}
178
179impl DiskBufferConfigBuilder {
180    pub fn from_path<P>(data_dir: P) -> DiskBufferConfigBuilder
181    where
182        P: AsRef<Path>,
183    {
184        DiskBufferConfigBuilder {
185            data_dir: data_dir.as_ref().to_path_buf(),
186            max_buffer_size: None,
187            max_data_file_size: None,
188            max_record_size: None,
189            write_buffer_size: None,
190            flush_interval: None,
191            filesystem: ProductionFilesystem,
192        }
193    }
194}
195
196impl<FS> DiskBufferConfigBuilder<FS>
197where
198    FS: Filesystem,
199{
200    /// Sets the maximum size, in bytes, that the buffer can consume.
201    ///
202    /// The actual maximum on-disk buffer size is this amount rounded up to the next multiple of
203    /// `max_data_file_size`, but internally, the next multiple of `max_data_file_size` when
204    /// rounding this amount _down_ is what gets used as the maximum buffer size.
205    ///
206    /// This ensures that we never use more then the documented "rounded to the next multiple"
207    /// amount, as we must account for one full data file's worth of extra data.
208    ///
209    /// Defaults to `usize::MAX`, or effectively no limit.  Due to the internal design of the
210    /// buffer, the effective maximum limit is around `max_data_file_size` * 2^16.
211    pub fn max_buffer_size(mut self, amount: u64) -> Self {
212        self.max_buffer_size = Some(amount);
213        self
214    }
215
216    /// Sets the maximum size, in bytes, to target for each individual data file.
217    ///
218    /// This value is not strictly obey because we cannot know ahead of encoding/serializing if the
219    /// free space a data file has is enough to hold the write.  In other words, we never attempt to
220    /// write to a data file if it is as larger or larger than this value, but may write a record
221    /// that causes a data file to exceed this value by as much as `max_record_size`.
222    ///
223    /// Defaults to 128MB.
224    #[allow(dead_code)]
225    pub fn max_data_file_size(mut self, amount: u64) -> Self {
226        self.max_data_file_size = Some(amount);
227        self
228    }
229
230    /// Sets the maximum size, in bytes, of an encoded record.
231    ///
232    /// Any record which, when encoded and serialized, is larger than this amount will not be written
233    /// to the buffer.
234    ///
235    /// Defaults to 128MB.
236    #[allow(dead_code)]
237    pub fn max_record_size(mut self, amount: usize) -> Self {
238        self.max_record_size = Some(amount);
239        self
240    }
241
242    /// Size, in bytes, of the writer's internal buffer.
243    ///
244    /// This buffer is used to coalesce writes to the underlying data file where possible, which in
245    /// turn reduces the number of syscalls needed to issue writes to the underlying data file.
246    ///
247    /// Defaults to 256KB.
248    #[allow(dead_code)]
249    pub fn write_buffer_size(mut self, amount: usize) -> Self {
250        self.write_buffer_size = Some(amount);
251        self
252    }
253
254    /// Sets the flush interval for ledger and data files.
255    ///
256    /// While data is asynchronously flushed by the OS, and the reader/writer can proceed with a
257    /// "hard" flush (aka `fsync`/`fsyncdata`), the flush interval effectively controls the
258    /// acceptable window of time for data loss.
259    ///
260    /// In the event that data had not yet been durably written to disk, and Vector crashed, the
261    /// amount of data written since the last flush would be lost.
262    ///
263    /// Defaults to 500ms.
264    #[allow(dead_code)]
265    pub fn flush_interval(mut self, interval: Duration) -> Self {
266        self.flush_interval = Some(interval);
267        self
268    }
269
270    /// Filesystem implementation for opening data files.
271    ///
272    /// We allow parameterizing the filesystem implementation for ease of testing.  The "filesystem"
273    /// implementation essentially defines how we open and delete data files, as well as the type of
274    /// the data file objects we get when opening a data file.
275    ///
276    /// Defaults to a Tokio-backed implementation.
277    #[allow(dead_code)]
278    pub fn filesystem<FS2>(self, filesystem: FS2) -> DiskBufferConfigBuilder<FS2>
279    where
280        FS2: Filesystem,
281    {
282        DiskBufferConfigBuilder {
283            data_dir: self.data_dir,
284            max_buffer_size: self.max_buffer_size,
285            max_data_file_size: self.max_data_file_size,
286            max_record_size: self.max_record_size,
287            write_buffer_size: self.write_buffer_size,
288            flush_interval: self.flush_interval,
289            filesystem,
290        }
291    }
292
293    /// Consumes this builder and constructs a `DiskBufferConfig`.
294    pub fn build(self) -> Result<DiskBufferConfig<FS>, BuildError> {
295        let max_buffer_size = self.max_buffer_size.unwrap_or(u64::MAX);
296        let max_data_file_size = self.max_data_file_size.unwrap_or_else(|| {
297            u64::try_from(DEFAULT_MAX_DATA_FILE_SIZE)
298                .expect("Default maximum data file size should never be greater than 2^64 bytes.")
299        });
300        let max_record_size = self.max_record_size.unwrap_or(DEFAULT_MAX_RECORD_SIZE);
301        let write_buffer_size = self.write_buffer_size.unwrap_or(DEFAULT_WRITE_BUFFER_SIZE);
302        let flush_interval = self.flush_interval.unwrap_or(DEFAULT_FLUSH_INTERVAL);
303        let filesystem = self.filesystem;
304
305        // Validate the input parameters.
306        if max_data_file_size == 0 {
307            return Err(BuildError::InvalidParameter {
308                param_name: "max_data_file_size",
309                reason: "cannot be zero".to_string(),
310            });
311        }
312
313        let data_file_size_mechanical_limit = get_maximum_data_file_size();
314        if max_data_file_size > data_file_size_mechanical_limit {
315            return Err(BuildError::InvalidParameter {
316                param_name: "max_data_file_size",
317                reason: format!("cannot be greater than {data_file_size_mechanical_limit} bytes"),
318            });
319        }
320
321        let Some(minimum_buffer_size) = get_minimum_buffer_size(max_data_file_size) else {
322            unreachable!("maximum data file size should be correctly limited at this point")
323        };
324
325        if max_buffer_size < minimum_buffer_size {
326            return Err(BuildError::InvalidParameter {
327                param_name: "max_buffer_size",
328                reason: format!("must be greater than or equal to {minimum_buffer_size} bytes"),
329            });
330        }
331
332        if max_record_size == 0 {
333            return Err(BuildError::InvalidParameter {
334                param_name: "max_record_size",
335                reason: "cannot be zero".to_string(),
336            });
337        }
338
339        if max_record_size <= MINIMUM_MAX_RECORD_SIZE {
340            return Err(BuildError::InvalidParameter {
341                param_name: "max_record_size",
342                reason: format!("must be greater than or equal to {MINIMUM_MAX_RECORD_SIZE} bytes",),
343            });
344        }
345
346        let Ok(max_record_size_converted) = u64::try_from(max_record_size) else {
347            return Err(BuildError::InvalidParameter {
348                param_name: "max_record_size",
349                reason: "must be less than 2^64 bytes".to_string(),
350            });
351        };
352
353        if max_record_size_converted > max_data_file_size {
354            return Err(BuildError::InvalidParameter {
355                param_name: "max_record_size",
356                reason: "must be less than or equal to `max_data_file_size`".to_string(),
357            });
358        }
359
360        if write_buffer_size == 0 {
361            return Err(BuildError::InvalidParameter {
362                param_name: "write_buffer_size",
363                reason: "cannot be zero".to_string(),
364            });
365        }
366
367        // Users configure the `max_size` of their disk buffers, which translates to the `max_buffer_size` field here,
368        // and represents the maximum desired size of a disk buffer in terms of on-disk usage. In order to meet this
369        // request, we do a few things internally and also enforce a lower bound on `max_buffer_size` to ensure we can
370        // commit to respecting the communicated maximum buffer size.
371        //
372        // Internally, we track the current buffer size as a function of the sum of the size of all unacknowledged
373        // records.  This means, simply, that if 100 records are written that consume 1KB a piece, our current buffer
374        // size should be around 100KB, and as those records are read and acknowledged, the current buffer size would
375        // drop by 1KB for each of them until eventually it went back down to zero.
376        //
377        // One of the design invariants around data files is that they are written to until they reach the maximum data
378        // file size, such that they are guaranteed to never be greater in size than `max_data_file_size`. This is
379        // coupled with the fact that a data file cannot be deleted from disk until all records written to it have been
380        // read _and_ acknowledged.
381        //
382        // Together, this means that we need to set a lower bound of 2*`max_data_file_size` for `max_buffer_size`.
383        //
384        // First, given the "data file keeps getting written to until we reach its max size" invariant, we know that in
385        // order to commit to the on-disk buffer size not exceeding `max_buffer_size`, the value must be at least as
386        // much as a single full data file, aka `max_data_file_size`.
387        //
388        // Secondly, we also want to ensure that the writer can make progress as the reader makes progress. If the
389        // maximum buffer size was equal to the maximum data file size, the writer would be stalled as soon as the data
390        // file reached the maximum size, until the reader was able to fully read and acknowledge all records, and thus
391        // delete the data file from disk. If we instead require that the maximum buffer size exceeds
392        // `max_data_file_size`, this allows us to open the next data file and start writing to it up until the maximum
393        // buffer size.
394        //
395        // Since we could essentially read and acknowledge all but the last remaining record in a data file, this would
396        // imply we gave the writer the ability to write that much more data, which means we would need at least double
397        // the maximum data file size in order to support the writer being able to make progress in the aforementioned
398        // situation.
399        //
400        // Finally, we come to this calculation. Since the logic dictates that we essentially require at least one extra
401        // data file past the minimum of one, we need to use an _internal_ maximum buffer size of `max_buffer_size` -
402        // `max_data_file_size`, so that as the reader makes progress, the writer never is led to believe it can create
403        // another data file such that the number of active data files, multiplied by `max_data_file_size`, would exceed
404        // `max_buffer_size`.
405        let max_buffer_size = max_buffer_size - max_data_file_size;
406
407        Ok(DiskBufferConfig {
408            data_dir: self.data_dir,
409            max_buffer_size,
410            max_data_file_size,
411            max_record_size,
412            write_buffer_size,
413            flush_interval,
414            filesystem,
415        })
416    }
417}
418
419#[cfg(test)]
420mod tests {
421    use proptest::{prop_assert, proptest, test_runner::Config};
422
423    use super::{
424        BuildError, DiskBufferConfigBuilder, MINIMUM_MAX_RECORD_SIZE, SERIALIZER_ALIGNMENT, align16,
425    };
426    use crate::variants::disk_v2::common::MAX_ALIGNABLE_AMOUNT;
427
428    #[test]
429    #[should_panic(expected = "`amount` must be less than `MAX_ALIGNABLE_AMOUNT`")]
430    fn test_align16_too_large() {
431        // We forcefully panic if the input to `align16` is too large to align without overflow, primarily because
432        // that's a huge amount even on 32-bit systems and in non-test code, we only use `align16` in a const context,
433        // so it will panic during compilation, not at runtime.
434        align16(MAX_ALIGNABLE_AMOUNT + 1);
435    }
436
437    proptest! {
438        #![proptest_config(Config::with_cases(1000))]
439        #[test]
440        fn test_align16(input in 0..MAX_ALIGNABLE_AMOUNT) {
441            // You may think to yourself: "this test seems excessive and not necessary", but, au contraire! Our
442            // algorithm depends on integer division rounding towards zero, which is an invariant provided to Rust by
443            // way of LLVM itself. In order to avoid weird surprises down the line if that invariant changes, including
444            // a future where we, or others, potentially compile Vector with an alternative compiler that does not
445            // round towards zero... we're being extra careful and hedging our bet by having such a property test.
446
447            // Make sure we're actually aligned.
448            let aligned = align16(input);
449            prop_assert!(aligned.is_multiple_of(SERIALIZER_ALIGNMENT));
450
451            // Make sure we're not overaligned, too.
452            let delta = if aligned >= input {
453                aligned - input
454            } else {
455                panic!("`aligned` must never be less than `input` in this test; inputs are crafted to obey `MAX_ALIGNABLE_AMOUNT`");
456            };
457
458            prop_assert!(delta <= SERIALIZER_ALIGNMENT, "`align16` returned overaligned input: input={} aligned={} delta={}", input, aligned, delta);
459        }
460    }
461
462    #[test]
463    fn basic_rejections() {
464        // Maximum data file size cannot be zero.
465        let result = DiskBufferConfigBuilder::from_path("/tmp/dummy/path")
466            .max_data_file_size(0)
467            .build();
468
469        match result {
470            Err(BuildError::InvalidParameter { param_name, .. }) => assert_eq!(
471                param_name, "max_data_file_size",
472                "invalid parameter should have been `max_data_file_size`"
473            ),
474            _ => panic!("expected invalid parameter error"),
475        }
476
477        // Maximum data file size cannot be greater than u64::MAX / 2, since we multiply it by 2 when calculating the
478        // lower bound for the maximum buffer size.
479        let result = DiskBufferConfigBuilder::from_path("/tmp/dummy/path")
480            .max_data_file_size((u64::MAX / 2) + 1)
481            .build();
482
483        match result {
484            Err(BuildError::InvalidParameter { param_name, .. }) => assert_eq!(
485                param_name, "max_data_file_size",
486                "invalid parameter should have been `max_data_file_size`"
487            ),
488            _ => panic!("expected invalid parameter error"),
489        }
490
491        // Maximum buffer size cannot be zero.
492        let result = DiskBufferConfigBuilder::from_path("/tmp/dummy/path")
493            .max_buffer_size(0)
494            .build();
495
496        match result {
497            Err(BuildError::InvalidParameter { param_name, .. }) => assert_eq!(
498                param_name, "max_buffer_size",
499                "invalid parameter should have been `max_buffer_size`"
500            ),
501            _ => panic!("expected invalid parameter error"),
502        }
503
504        // Maximum buffer size cannot be less than 2x the maximum data file size.
505        let result = DiskBufferConfigBuilder::from_path("/tmp/dummy/path")
506            .max_data_file_size(10000)
507            .max_record_size(100)
508            .max_buffer_size(19999)
509            .build();
510
511        match result {
512            Err(BuildError::InvalidParameter { param_name, .. }) => assert_eq!(
513                param_name, "max_buffer_size",
514                "invalid parameter should have been `max_buffer_size`"
515            ),
516            _ => panic!("expected invalid parameter error"),
517        }
518
519        // Maximum record size cannot be zero.
520        let result = DiskBufferConfigBuilder::from_path("/tmp/dummy/path")
521            .max_record_size(0)
522            .build();
523
524        match result {
525            Err(BuildError::InvalidParameter { param_name, .. }) => assert_eq!(
526                param_name, "max_record_size",
527                "invalid parameter should have been `max_record_size`"
528            ),
529            _ => panic!("expected invalid parameter error"),
530        }
531
532        // Maximum record size cannot be less than the minimum record header length.
533        let result = DiskBufferConfigBuilder::from_path("/tmp/dummy/path")
534            .max_record_size(MINIMUM_MAX_RECORD_SIZE - 1)
535            .build();
536
537        match result {
538            Err(BuildError::InvalidParameter { param_name, .. }) => assert_eq!(
539                param_name, "max_record_size",
540                "invalid parameter should have been `max_record_size`"
541            ),
542            _ => panic!("expected invalid parameter error"),
543        }
544
545        // Maximum record size cannot be greater than maximum data file size.
546        let result = DiskBufferConfigBuilder::from_path("/tmp/dummy/path")
547            .max_data_file_size(123_456)
548            .max_record_size(123_457)
549            .build();
550
551        match result {
552            Err(BuildError::InvalidParameter { param_name, .. }) => assert_eq!(
553                param_name, "max_record_size",
554                "invalid parameter should have been `max_record_size`"
555            ),
556            _ => panic!("expected invalid parameter error"),
557        }
558    }
559
560    proptest! {
561        #![proptest_config(Config::with_cases(10000))]
562        #[test]
563        fn ensure_max_buffer_size_lower_bound(max_buffer_size in 1..u64::MAX, max_record_data_file_size in 1..u64::MAX) {
564            let max_data_file_size = max_record_data_file_size;
565            let max_record_size = usize::try_from(max_record_data_file_size)
566                .expect("Maximum record size, and data file size, must be less than 2^64 bytes.");
567
568            let result = DiskBufferConfigBuilder::from_path("/tmp/dummy/path")
569                .max_buffer_size(max_buffer_size)
570                .max_data_file_size(max_data_file_size)
571                .max_record_size(max_record_size)
572                .build();
573
574            // We don't necessarily care about the error cases here, but what we do care about is making sure that, when
575            // the generated configuration is theoretically valid, the calculated maximum buffer size actually meets our expectation of
576            // being at least `max_data_file_size` and `max_data_file_size` less than the input maximum buffer size.
577            if let Ok(config) = result {
578                prop_assert!(config.max_buffer_size >= max_data_file_size, "calculated max buffer size must always be greater than or equal to `max_data_file_size`");
579                prop_assert!(config.max_buffer_size + max_data_file_size == max_buffer_size, "calculated max buffer size must always be less `max_data_file_size` than input max buffer size");
580            }
581        }
582    }
583}