aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authormetamuffin <metamuffin@disroot.org>2025-05-27 18:27:47 +0200
committermetamuffin <metamuffin@disroot.org>2025-05-27 18:28:01 +0200
commit520f93f4fb6959d602b9e45d6bb47a06dcf60219 (patch)
treeb104a5ae592a724930905ff3326c07c7055a9bf8
parenta05c56befe3328448fcc497746986b9e13bfab18 (diff)
downloadgnix-520f93f4fb6959d602b9e45d6bb47a06dcf60219.tar
gnix-520f93f4fb6959d602b9e45d6bb47a06dcf60219.tar.bz2
gnix-520f93f4fb6959d602b9e45d6bb47a06dcf60219.tar.zst
more cache code
-rw-r--r--src/modules/cache.rs55
1 files changed, 38 insertions, 17 deletions
diff --git a/src/modules/cache.rs b/src/modules/cache.rs
index f5cd72d..34db902 100644
--- a/src/modules/cache.rs
+++ b/src/modules/cache.rs
@@ -18,12 +18,14 @@ use super::{Node, NodeContext, NodeKind, NodeRequest, NodeResponse};
use crate::{config::DynNode, error::ServiceError};
use anyhow::Result;
use bytes::Bytes;
+use headers::{CacheControl, HeaderMapExt};
use http::Response;
use http_body_util::{BodyExt, Full};
+use log::debug;
use serde::Deserialize;
use serde_yml::Value;
use sha2::{Digest, Sha256};
-use std::{collections::HashMap, future::Future, pin::Pin, sync::Arc};
+use std::{collections::HashMap, future::Future, pin::Pin, sync::Arc, time::Duration};
use tokio::sync::RwLock;
pub struct CacheKind;
@@ -57,21 +59,24 @@ impl Node for Cache {
request: NodeRequest,
) -> Pin<Box<dyn Future<Output = Result<NodeResponse, ServiceError>> + Send + Sync + 'a>> {
Box::pin(async move {
+ let allow_cache = request.method().is_safe();
+
+ if !allow_cache {
+ return self.config.next.handle(context, request).await;
+ }
+
// not very fast
let mut hasher = Sha256::new();
hasher.update(request.method().as_str().len().to_be_bytes());
hasher.update(request.method().as_str());
hasher.update(request.uri().path().len().to_be_bytes());
hasher.update(request.uri().path());
- hasher.update(if request.uri().query().is_some() {
- [1]
- } else {
- [0]
- });
+ hasher.update([request.uri().query().is_some() as u8]);
if let Some(q) = request.uri().query() {
hasher.update(q.len().to_be_bytes());
hasher.update(q);
}
+ // TODO which headers are important to caching?
hasher.update(request.headers().len().to_be_bytes());
for (k, v) in request.headers() {
hasher.update(k.as_str().len().to_be_bytes());
@@ -82,27 +87,43 @@ impl Node for Cache {
let key: [u8; 32] = hasher.finalize().into();
if let Some(resp) = self.entries.read().await.get(&key) {
+ debug!("hit");
return Ok(resp
.to_owned()
.map(|b| Full::new(b).map_err(|e| match e {}).boxed()));
}
+ debug!("miss");
let response = self.config.next.handle(context, request).await?;
- let h = response.headers().to_owned();
- let s = response.status().to_owned();
- let body = response.collect().await?.to_bytes();
+ let cache_control = response
+ .headers()
+ .typed_get::<CacheControl>()
+ .unwrap_or(CacheControl::new().with_max_age(Duration::from_secs(120))); // TODO what is the correct default?
- let mut r1 = Response::new(Full::new(body.clone()).map_err(|e| match e {}).boxed());
- *r1.headers_mut() = h.clone();
- *r1.status_mut() = s;
+ let allow_store = !cache_control.no_store() && !cache_control.no_cache();
- let mut r2 = Response::new(body);
- *r2.headers_mut() = h;
- *r2.status_mut() = s;
- self.entries.write().await.insert(key, r2);
+ let response = if allow_store {
+ debug!("store");
+ let h = response.headers().to_owned();
+ let s = response.status().to_owned();
+ let body = response.collect().await?.to_bytes();
+
+ let mut r1 = Response::new(Full::new(body.clone()).map_err(|e| match e {}).boxed());
+ *r1.headers_mut() = h.clone();
+ *r1.status_mut() = s;
+
+ let mut r2 = Response::new(body);
+ *r2.headers_mut() = h;
+ *r2.status_mut() = s;
+ self.entries.write().await.insert(key, r2);
+ r1
+ } else {
+ debug!("no store");
+ response
+ };
- Ok(r1)
+ Ok(response)
})
}
}