From f81eb4882a015ff19cb7209980ca3c7dd33bbae1 Mon Sep 17 00:00:00 2001 From: Daniel Schadt Date: Mon, 16 Jan 2023 20:58:53 +0100 Subject: use a single thread to write out files It seems like this does not make the encoding slower, and the main point is that we might want to support SQLite storage for the tiles, in which case it might be good to have only one writer. Even with the FS-based approach, maybe it's good to have a single thread responsible for writing everything, and not hammer the OS with 16 write requests at once. --- src/layer.rs | 16 +++++++++++++++- src/main.rs | 2 +- src/renderer.rs | 41 ++++++++++++++++++++++++++++++----------- 3 files changed, 46 insertions(+), 13 deletions(-) (limited to 'src') diff --git a/src/layer.rs b/src/layer.rs index ff69f7d..2726c81 100644 --- a/src/layer.rs +++ b/src/layer.rs @@ -3,7 +3,11 @@ //! This supports OSM-style "tiled" images, but not all of the tiles have to be present. If a tile //! is not present, a default pixel is returned. The tile is allocated with the first call to a //! mutating operation. -use std::{fs::File, io::BufWriter, path::Path}; +use std::{ + fs::File, + io::{BufWriter, Write}, + path::Path, +}; use color_eyre::eyre::Result; use fnv::FnvHashMap; @@ -119,6 +123,10 @@ where pub fn compress_png>(image: &RgbaImage, path: P) -> Result<()> { let outstream = BufWriter::new(File::create(path)?); + compress_png_stream(image, outstream) +} + +pub fn compress_png_stream(image: &RgbaImage, outstream: W) -> Result<()> { let encoder = PngEncoder::new_with_quality(outstream, CompressionType::Best, FilterType::Adaptive); @@ -127,6 +135,12 @@ pub fn compress_png>(image: &RgbaImage, path: P) -> Result<()> { Ok(()) } +pub fn compress_png_as_bytes(image: &RgbaImage) -> Result> { + let mut buffer = Vec::new(); + compress_png_stream(image, &mut buffer)?; + Ok(buffer) +} + fn zero_pixel() -> P { let zeroes = vec![Zero::zero(); P::CHANNEL_COUNT as usize]; *P::from_slice(&zeroes) diff --git a/src/main.rs b/src/main.rs index e2dacb7..a508fdc 100644 --- a/src/main.rs +++ b/src/main.rs @@ -123,7 +123,7 @@ fn ensure_output_directory>(path: P) -> Result<()> { let metadata = fs::metadata(path); match metadata { Err(e) if e.kind() == ErrorKind::NotFound => { - let parent = path.parent().unwrap_or(Path::new("/")); + let parent = path.parent().unwrap_or_else(|| Path::new("/")); fs::create_dir(path) .context(format!("Could not create output directory at {parent:?}"))? } diff --git a/src/renderer.rs b/src/renderer.rs index 2f29828..de65da6 100644 --- a/src/renderer.rs +++ b/src/renderer.rs @@ -1,6 +1,9 @@ -use std::{fs, path::Path}; +use std::{fs, mem, path::Path, sync::mpsc, thread}; -use color_eyre::eyre::{bail, Result}; +use color_eyre::{ + eyre::{bail, Result}, + Report, +}; use image::{ImageBuffer, Luma, Pixel, RgbaImage}; use nalgebra::{vector, Vector2}; use rayon::iter::ParallelIterator; @@ -143,22 +146,38 @@ pub fn lazy_colorization, F: Fn(usize) + Send + Sync>( return Ok(()); } - layer - .into_parallel_tiles() - .try_for_each(|(tile_x, tile_y, tile)| { - let colorized = colorize_tile(&tile, max.into()); + type Job = (u64, u64, Vec); + let (tx, rx) = mpsc::sync_channel::(30); + + thread::scope(|s| { + let saver = s.spawn(move || loop { + let Ok((tile_x, tile_y, data)) = rx.recv() else { return Ok(()) }; let folder = base_dir.join(tile_x.to_string()); let metadata = folder.metadata(); match metadata { - Err(_) => fs::create_dir_all(&folder)?, + Err(_) => fs::create_dir(&folder)?, Ok(m) if !m.is_dir() => bail!("Output path is not a directory"), _ => {} } let file = folder.join(format!("{tile_y}.png")); - layer::compress_png(&colorized, file)?; - progress_callback(1); - Ok(()) - })?; + fs::write(file, data)?; + }); + + layer + .into_parallel_tiles() + .try_for_each(|(tile_x, tile_y, tile)| { + let colorized = colorize_tile(&tile, max.into()); + let data = layer::compress_png_as_bytes(&colorized)?; + tx.send((tile_x, tile_y, data))?; + progress_callback(1); + Ok::<(), Report>(()) + })?; + + mem::drop(tx); + saver.join().unwrap()?; + Ok::<_, Report>(()) + })?; + Ok(()) } -- cgit v1.2.3