aboutsummaryrefslogtreecommitdiff
path: root/src/output/aggregators.rs
diff options
context:
space:
mode:
Diffstat (limited to 'src/output/aggregators.rs')
-rw-r--r--src/output/aggregators.rs32
1 files changed, 32 insertions, 0 deletions
diff --git a/src/output/aggregators.rs b/src/output/aggregators.rs
new file mode 100644
index 0000000..9934fb3
--- /dev/null
+++ b/src/output/aggregators.rs
@@ -0,0 +1,32 @@
+//! Different aggregators for output.
+//!
+//! An aggregator is something that "controls the order" of the output. Aggregators can either save
+//! all items that they're and output them once the search is finished, or write them straight
+//! to the output stream.
+//!
+//! Aggregators must be shareable across threads, as the search will be multi-threaded. This is why
+//! an Aggregator must make sure that the data is protected by a mutex or similar.
+use super::{super::LogResult, formats::Format};
+
+use std::{io::Write, sync::Mutex};
+
+pub trait Aggregator: Sync {
+ fn push_item(&self, item: &LogResult, format: &Format, stream: &mut Write);
+ fn finish(self, format: &Format, stream: &mut Write);
+}
+
+
+/// An aggregator that just pushes through each item to the output stream without any sorting or
+/// whatsoever.
+pub struct WriteThrough;
+
+
+impl Aggregator for WriteThrough {
+ fn push_item(&self, item: &LogResult, format: &Format, stream: &mut Write) {
+ let text = format.format_result(item);
+ println!("Aggregator::push_item {:?}", text);
+ stream.write_all(text.as_bytes()).unwrap();
+ }
+
+ fn finish(self, format: &Format, stream: &mut Write) {}
+}