//! Different aggregators for output. //! //! An aggregator is something that "controls the order" of the output. Aggregators can either save //! all items that they're and output them once the search is finished, or write them straight //! to the output stream. //! //! Aggregators must be shareable across threads, as the search will be multi-threaded. This is why //! an Aggregator must make sure that the data is protected by a mutex or similar. use super::{super::LogResult, formats::Format, sorting::Sorting}; use std::{ io::Write, sync::Mutex, }; pub trait Aggregator: Sync { fn push_item(&self, item: LogResult, format: &dyn Format, stream: &mut dyn Write); // When the `unsized_locals` feature is stable, we could rewrite this to finish(self, ...). fn finish(self: Box, format: &dyn Format, stream: &mut dyn Write); } /// An aggregator that just pushes through each item to the output stream without any sorting or /// whatsoever. pub struct WriteThrough; impl Aggregator for WriteThrough { fn push_item(&self, item: LogResult, format: &dyn Format, stream: &mut dyn Write) { let text = format.format_result(&item); stream.write_all(text.as_bytes()).unwrap(); stream.flush().unwrap(); } fn finish(self: Box, _: &dyn Format, _: &mut dyn Write) {} } /// An aggregator that keeps all found logs in memory and sorts them before outputting them. #[derive(Debug)] pub struct SortedOutput { sorting: Sorting, items: Mutex>, } impl SortedOutput { pub fn new(sorting: Sorting) -> Self { SortedOutput { sorting, items: Mutex::new(vec![]), } } } impl Aggregator for SortedOutput { fn push_item(&self, item: LogResult, _: &dyn Format, _: &mut dyn Write) { self.items.lock().unwrap().push(item) } fn finish(self: Box, format: &dyn Format, stream: &mut dyn Write) { let SortedOutput { sorting, items } = *self; let mut items = items.into_inner().unwrap(); items.sort_unstable_by(|a, b| sorting.cmp(a, b)); for item in items { let text = format.format_result(&item); stream.write_all(text.as_bytes()).unwrap(); } stream.flush().unwrap(); } }