aboutsummaryrefslogtreecommitdiff
path: root/src/output/aggregators.rs
diff options
context:
space:
mode:
authorDaniel <kingdread@gmx.de>2020-05-14 16:13:28 +0200
committerDaniel <kingdread@gmx.de>2020-05-14 16:13:28 +0200
commitc973f2ddb47721254f6f5a81e4c45f0c72d793fd (patch)
tree4aa7c00b87eec09ee1faca957be508efe3dbbb65 /src/output/aggregators.rs
parent331d6b1762d1d9431b210fc98a495d56ad7a1cd1 (diff)
parente2d23d4b76000263e9f939637353bbc4bb9289fd (diff)
downloadraidgrep-c973f2ddb47721254f6f5a81e4c45f0c72d793fd.tar.gz
raidgrep-c973f2ddb47721254f6f5a81e4c45f0c72d793fd.tar.bz2
raidgrep-c973f2ddb47721254f6f5a81e4c45f0c72d793fd.zip
Merge branch 'sorted-output'
Diffstat (limited to 'src/output/aggregators.rs')
-rw-r--r--src/output/aggregators.rs48
1 files changed, 41 insertions, 7 deletions
diff --git a/src/output/aggregators.rs b/src/output/aggregators.rs
index 1b04af3..83171eb 100644
--- a/src/output/aggregators.rs
+++ b/src/output/aggregators.rs
@@ -6,13 +6,14 @@
//!
//! Aggregators must be shareable across threads, as the search will be multi-threaded. This is why
//! an Aggregator must make sure that the data is protected by a mutex or similar.
-use super::{super::LogResult, formats::Format};
+use super::{super::LogResult, formats::Format, sorting::Sorting};
-use std::io::Write;
+use std::{io::Write, sync::Mutex};
pub trait Aggregator: Sync {
- fn push_item(&self, item: &LogResult, format: &dyn Format, stream: &mut dyn Write);
- fn finish(self, format: &dyn Format, stream: &mut dyn Write);
+ fn push_item(&self, item: LogResult, format: &dyn Format, stream: &mut dyn Write);
+ // When the `unsized_locals` feature is stable, we could rewrite this to finish(self, ...).
+ fn finish(self: Box<Self>, format: &dyn Format, stream: &mut dyn Write);
}
/// An aggregator that just pushes through each item to the output stream without any sorting or
@@ -20,11 +21,44 @@ pub trait Aggregator: Sync {
pub struct WriteThrough;
impl Aggregator for WriteThrough {
- fn push_item(&self, item: &LogResult, format: &dyn Format, stream: &mut dyn Write) {
- let text = format.format_result(item);
+ fn push_item(&self, item: LogResult, format: &dyn Format, stream: &mut dyn Write) {
+ let text = format.format_result(&item);
stream.write_all(text.as_bytes()).unwrap();
stream.flush().unwrap();
}
- fn finish(self, _: &dyn Format, _: &mut dyn Write) {}
+ fn finish(self: Box<Self>, _: &dyn Format, _: &mut dyn Write) {}
+}
+
+/// An aggregator that keeps all found logs in memory and sorts them before outputting them.
+#[derive(Debug)]
+pub struct SortedOutput {
+ sorting: Sorting,
+ items: Mutex<Vec<LogResult>>,
+}
+
+impl SortedOutput {
+ pub fn new(sorting: Sorting) -> Self {
+ SortedOutput {
+ sorting,
+ items: Mutex::new(vec![]),
+ }
+ }
+}
+
+impl Aggregator for SortedOutput {
+ fn push_item(&self, item: LogResult, _: &dyn Format, _: &mut dyn Write) {
+ self.items.lock().unwrap().push(item)
+ }
+
+ fn finish(self: Box<Self>, format: &dyn Format, stream: &mut dyn Write) {
+ let SortedOutput { sorting, items } = *self;
+ let mut items = items.into_inner().unwrap();
+ items.sort_unstable_by(|a, b| sorting.cmp(a, b));
+ for item in items {
+ let text = format.format_result(&item);
+ stream.write_all(text.as_bytes()).unwrap();
+ }
+ stream.flush().unwrap();
+ }
}