aboutsummaryrefslogtreecommitdiff
path: root/base
diff options
context:
space:
mode:
authormetamuffin <metamuffin@disroot.org>2023-10-02 11:00:54 +0200
committermetamuffin <metamuffin@disroot.org>2023-10-02 11:00:54 +0200
commitaa0695aed60ac8568258aefb9d2ff5e9296111ae (patch)
treef9dc85c46af68ae8eab48b788309c81ac1aad87c /base
parentf0647823b2e430cb42fe176c4e637f09fd69c276 (diff)
downloadjellything-aa0695aed60ac8568258aefb9d2ff5e9296111ae.tar
jellything-aa0695aed60ac8568258aefb9d2ff5e9296111ae.tar.bz2
jellything-aa0695aed60ac8568258aefb9d2ff5e9296111ae.tar.zst
named cache and better error reporting
Diffstat (limited to 'base')
-rw-r--r--base/src/cache.rs21
1 files changed, 14 insertions, 7 deletions
diff --git a/base/src/cache.rs b/base/src/cache.rs
index d840c00..7705a14 100644
--- a/base/src/cache.rs
+++ b/base/src/cache.rs
@@ -1,5 +1,5 @@
use crate::{AssetLocationExt, CONF};
-use anyhow::anyhow;
+use anyhow::{anyhow, Context};
use base64::Engine;
use bincode::{Decode, Encode};
use jellycommon::AssetLocation;
@@ -27,7 +27,8 @@ pub fn cache_location(seed: &[&str]) -> (usize, AssetLocation) {
let d = d.finalize();
let n = d[0] as usize | (d[1] as usize) << 8 | (d[2] as usize) << 16 | (d[3] as usize) << 24;
let fname = base64::engine::general_purpose::URL_SAFE.encode(d);
- let fname = &fname[..22]; // about 128 bits
+ let fname = &fname[..22];
+ let fname = format!("{}-{}", seed[0], fname); // about 128 bits
(n, AssetLocation::Cache(fname.into()))
}
@@ -46,9 +47,13 @@ where
let (bucket, location) = cache_location(seed);
// we need a lock even if it exists since somebody might be still in the process of writing.
let _guard = CACHE_GENERATION_LOCKS[bucket % CACHE_GENERATION_BUCKET_COUNT].lock();
- let exists = tokio::fs::try_exists(location.path()).await?;
+ let exists = tokio::fs::try_exists(location.path())
+ .await
+ .context("unable to test for cache file existance")?;
if !exists {
- let f = tokio::fs::File::create(location.path()).await?;
+ let f = tokio::fs::File::create(location.path())
+ .await
+ .context("creating new cache file")?;
generate(f).await?;
}
drop(_guard);
@@ -64,7 +69,7 @@ where
let _guard = CACHE_GENERATION_LOCKS[bucket % CACHE_GENERATION_BUCKET_COUNT].blocking_lock();
let exists = location.path().exists();
if !exists {
- let f = std::fs::File::create(location.path())?;
+ let f = std::fs::File::create(location.path()).context("creating new cache file")?;
generate(f)?;
}
drop(_guard);
@@ -101,11 +106,13 @@ where
let location = cache_file(seed, move |mut file| {
let object = generate()?;
- bincode::encode_into_std_write(&object, &mut file, bincode::config::standard())?;
+ bincode::encode_into_std_write(&object, &mut file, bincode::config::standard())
+ .context("encoding cache object")?;
Ok(())
})?;
let mut file = std::fs::File::open(location.path())?;
- let object = bincode::decode_from_std_read::<T, _, _>(&mut file, bincode::config::standard())?;
+ let object = bincode::decode_from_std_read::<T, _, _>(&mut file, bincode::config::standard())
+ .context("decoding cache object")?;
let object = Arc::new(object);
let size = file.stream_position()? as usize; // this is an approximation mainly since varint is used in bincode