aboutsummaryrefslogtreecommitdiff
path: root/src/output/aggregators.rs
diff options
context:
space:
mode:
authorDaniel <kingdread@gmx.de>2020-05-13 13:49:43 +0200
committerDaniel <kingdread@gmx.de>2020-05-13 13:49:43 +0200
commitfb2a6088dcc7b57a2c1ac93ec6a8fbcc52584734 (patch)
tree66979361f60dad90f6c9b0eff7c5bf20404357e8 /src/output/aggregators.rs
parent331d6b1762d1d9431b210fc98a495d56ad7a1cd1 (diff)
downloadraidgrep-fb2a6088dcc7b57a2c1ac93ec6a8fbcc52584734.tar.gz
raidgrep-fb2a6088dcc7b57a2c1ac93ec6a8fbcc52584734.tar.bz2
raidgrep-fb2a6088dcc7b57a2c1ac93ec6a8fbcc52584734.zip
first attempt at sorting output
This does currently not work yet, as we cannot call .finish() on dyn Aggregator. This needs to be adjusted. However, this provides the basic infrastructure for producing sorted output, including the required command line parsing.
Diffstat (limited to 'src/output/aggregators.rs')
-rw-r--r--src/output/aggregators.rs46
1 files changed, 41 insertions, 5 deletions
diff --git a/src/output/aggregators.rs b/src/output/aggregators.rs
index 1b04af3..4fa2558 100644
--- a/src/output/aggregators.rs
+++ b/src/output/aggregators.rs
@@ -6,12 +6,15 @@
//!
//! Aggregators must be shareable across threads, as the search will be multi-threaded. This is why
//! an Aggregator must make sure that the data is protected by a mutex or similar.
-use super::{super::LogResult, formats::Format};
+use super::{super::LogResult, formats::Format, sorting::Sorting};
-use std::io::Write;
+use std::{
+ io::Write,
+ sync::Mutex,
+};
pub trait Aggregator: Sync {
- fn push_item(&self, item: &LogResult, format: &dyn Format, stream: &mut dyn Write);
+ fn push_item(&self, item: LogResult, format: &dyn Format, stream: &mut dyn Write);
fn finish(self, format: &dyn Format, stream: &mut dyn Write);
}
@@ -20,11 +23,44 @@ pub trait Aggregator: Sync {
pub struct WriteThrough;
impl Aggregator for WriteThrough {
- fn push_item(&self, item: &LogResult, format: &dyn Format, stream: &mut dyn Write) {
- let text = format.format_result(item);
+ fn push_item(&self, item: LogResult, format: &dyn Format, stream: &mut dyn Write) {
+ let text = format.format_result(&item);
stream.write_all(text.as_bytes()).unwrap();
stream.flush().unwrap();
}
fn finish(self, _: &dyn Format, _: &mut dyn Write) {}
}
+
+/// An aggregator that keeps all found logs in memory and sorts them before outputting them.
+#[derive(Debug)]
+pub struct SortedOutput {
+ sorting: Sorting,
+ items: Mutex<Vec<LogResult>>,
+}
+
+impl SortedOutput {
+ pub fn new(sorting: Sorting) -> Self {
+ SortedOutput {
+ sorting,
+ items: Mutex::new(vec![]),
+ }
+ }
+}
+
+impl Aggregator for SortedOutput {
+ fn push_item(&self, item: LogResult, _: &dyn Format, _: &mut dyn Write) {
+ self.items.lock().unwrap().push(item)
+ }
+
+ fn finish(self, format: &dyn Format, stream: &mut dyn Write) {
+ let SortedOutput { sorting, items } = self;
+ let mut items = items.into_inner().unwrap();
+ items.sort_unstable_by(|a, b| sorting.cmp(a, b));
+ for item in items {
+ let text = format.format_result(&item);
+ stream.write_all(text.as_bytes()).unwrap();
+ }
+ stream.flush().unwrap();
+ }
+}