aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--base/src/cache.rs60
-rw-r--r--base/src/lib.rs61
-rw-r--r--server/src/import.rs2
-rw-r--r--transcoder/src/image.rs2
4 files changed, 65 insertions, 60 deletions
diff --git a/base/src/cache.rs b/base/src/cache.rs
new file mode 100644
index 0000000..2fa2680
--- /dev/null
+++ b/base/src/cache.rs
@@ -0,0 +1,60 @@
+use crate::AssetLocationExt;
+use base64::Engine;
+use jellycommon::AssetLocation;
+use std::{future::Future, sync::LazyLock};
+use tokio::sync::Mutex;
+
+pub fn cache_location(seed: &[&str]) -> (usize, AssetLocation) {
+ use sha2::Digest;
+ let mut d = sha2::Sha512::new();
+ for s in seed {
+ d.update(s.as_bytes());
+ d.update(b"\0");
+ }
+ let d = d.finalize();
+ let n = d[0] as usize | (d[1] as usize) << 8 | (d[2] as usize) << 16 | (d[3] as usize) << 24;
+ let fname = base64::engine::general_purpose::URL_SAFE.encode(d);
+ let fname = &fname[..22]; // about 128 bits
+ (n, AssetLocation::Cache(fname.into()))
+}
+
+const CACHE_GENERATION_BUCKET_COUNT: usize = 1024;
+pub static CACHE_GENERATION_LOCKS: LazyLock<[Mutex<()>; CACHE_GENERATION_BUCKET_COUNT]> =
+ LazyLock::new(|| [(); CACHE_GENERATION_BUCKET_COUNT].map(|_| Mutex::new(())));
+
+pub async fn async_cache_file<Fun, Fut>(
+ seed: &[&str],
+ generate: Fun,
+) -> Result<AssetLocation, anyhow::Error>
+where
+ Fun: FnOnce(tokio::fs::File) -> Fut,
+ Fut: Future<Output = Result<(), anyhow::Error>>,
+{
+ let (bucket, location) = cache_location(seed);
+ // we need a lock even if it exists since somebody might be still in the process of writing.
+ let _guard = CACHE_GENERATION_LOCKS[bucket % CACHE_GENERATION_BUCKET_COUNT].lock();
+ let exists = tokio::fs::try_exists(location.path()).await?;
+ if !exists {
+ let f = tokio::fs::File::create(location.path()).await?;
+ generate(f).await?;
+ }
+ drop(_guard);
+ Ok(location)
+}
+
+pub fn cache_file<Fun>(seed: &[&str], mut generate: Fun) -> Result<AssetLocation, anyhow::Error>
+where
+ Fun: FnMut(std::fs::File) -> Result<(), anyhow::Error>,
+{
+ let (bucket, location) = cache_location(seed);
+ // we need a lock even if it exists since somebody might be still in the process of writing.
+ let _guard = CACHE_GENERATION_LOCKS[bucket % CACHE_GENERATION_BUCKET_COUNT].blocking_lock();
+ let exists = location.path().exists();
+ if !exists {
+ let f = std::fs::File::create(location.path())?;
+ generate(f)?;
+ }
+ drop(_guard);
+ Ok(location)
+}
+
diff --git a/base/src/lib.rs b/base/src/lib.rs
index 5a98be5..f130a8c 100644
--- a/base/src/lib.rs
+++ b/base/src/lib.rs
@@ -5,11 +5,10 @@
*/
#![feature(lazy_cell)]
pub mod permission;
+pub mod cache;
-use base64::Engine;
-use jellycommon::{AssetLocation, config::GlobalConfig};
-use std::{fs::File, future::Future, path::PathBuf, sync::LazyLock};
-use tokio::sync::Mutex;
+use jellycommon::{config::GlobalConfig, AssetLocation};
+use std::{fs::File, path::PathBuf, sync::LazyLock};
pub static CONF: LazyLock<GlobalConfig> = LazyLock::new(|| {
serde_json::from_reader(
@@ -23,60 +22,6 @@ pub static CONF: LazyLock<GlobalConfig> = LazyLock::new(|| {
.unwrap()
});
-pub fn cache_location(seed: &[&str]) -> (usize, AssetLocation) {
- use sha2::Digest;
- let mut d = sha2::Sha512::new();
- for s in seed {
- d.update(s.as_bytes());
- d.update(b"\0");
- }
- let d = d.finalize();
- let n = d[0] as usize | (d[1] as usize) << 8 | (d[2] as usize) << 16 | (d[3] as usize) << 24;
- let fname = base64::engine::general_purpose::URL_SAFE.encode(d);
- let fname = &fname[..22]; // about 128 bits
- (n, AssetLocation::Cache(fname.into()))
-}
-
-const CACHE_GENERATION_BUCKET_COUNT: usize = 1024;
-pub static CACHE_GENERATION_LOCKS: LazyLock<[Mutex<()>; CACHE_GENERATION_BUCKET_COUNT]> =
- LazyLock::new(|| [(); CACHE_GENERATION_BUCKET_COUNT].map(|_| Mutex::new(())));
-
-pub async fn async_cache_file<Fun, Fut>(
- seed: &[&str],
- generate: Fun,
-) -> Result<AssetLocation, anyhow::Error>
-where
- Fun: FnOnce(tokio::fs::File) -> Fut,
- Fut: Future<Output = Result<(), anyhow::Error>>,
-{
- let (bucket, location) = cache_location(seed);
- // we need a lock even if it exists since somebody might be still in the process of writing.
- let _guard = CACHE_GENERATION_LOCKS[bucket % CACHE_GENERATION_BUCKET_COUNT].lock();
- let exists = tokio::fs::try_exists(location.path()).await?;
- if !exists {
- let f = tokio::fs::File::create(location.path()).await?;
- generate(f).await?;
- }
- drop(_guard);
- Ok(location)
-}
-
-pub fn cache_file<Fun>(seed: &[&str], mut generate: Fun) -> Result<AssetLocation, anyhow::Error>
-where
- Fun: FnMut(std::fs::File) -> Result<(), anyhow::Error>,
-{
- let (bucket, location) = cache_location(seed);
- // we need a lock even if it exists since somebody might be still in the process of writing.
- let _guard = CACHE_GENERATION_LOCKS[bucket % CACHE_GENERATION_BUCKET_COUNT].lock();
- let exists = location.path().exists();
- if !exists {
- let f = std::fs::File::create(location.path())?;
- generate(f)?;
- }
- drop(_guard);
- Ok(location)
-}
-
pub trait AssetLocationExt {
fn path(&self) -> PathBuf;
}
diff --git a/server/src/import.rs b/server/src/import.rs
index 8d8198a..d3919b5 100644
--- a/server/src/import.rs
+++ b/server/src/import.rs
@@ -7,7 +7,7 @@ use crate::{database::Database, federation::Federation, CONF};
use anyhow::{anyhow, bail, Context, Ok};
use async_recursion::async_recursion;
use futures::{stream::FuturesUnordered, StreamExt, TryFutureExt};
-use jellybase::async_cache_file;
+use jellybase::cache::async_cache_file;
use jellyclient::Session;
use jellycommon::{AssetLocation, MediaSource, Node, NodePrivate, RemoteImportOptions};
use log::{debug, error, info};
diff --git a/transcoder/src/image.rs b/transcoder/src/image.rs
index 3865348..a9b1533 100644
--- a/transcoder/src/image.rs
+++ b/transcoder/src/image.rs
@@ -1,6 +1,6 @@
use anyhow::Context;
use image::{imageops::FilterType, ImageFormat};
-use jellybase::{async_cache_file, AssetLocationExt};
+use jellybase::{cache::async_cache_file, AssetLocationExt};
use jellycommon::AssetLocation;
use log::{debug, info};
use rgb::FromSlice;