summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authormetamuffin <metamuffin@disroot.org>2024-08-24 01:33:49 +0200
committermetamuffin <metamuffin@disroot.org>2024-08-24 01:33:49 +0200
commitecb3378d29a4f53604a9287ef7027a299f3d45d5 (patch)
tree3ac242dac925271ec1e655c9db76e8afb8db9742
parent0faa378e1ff9cf9ce0b5b08dac8520a8db49bf2d (diff)
downloadgnix-ecb3378d29a4f53604a9287ef7027a299f3d45d5.tar
gnix-ecb3378d29a4f53604a9287ef7027a299f3d45d5.tar.bz2
gnix-ecb3378d29a4f53604a9287ef7027a299f3d45d5.tar.zst
a bit of doc, stub cache impl
-rw-r--r--src/modules/cache.rs72
-rw-r--r--src/modules/loadbalance.rs4
-rw-r--r--src/modules/mod.rs1
3 files changed, 77 insertions, 0 deletions
diff --git a/src/modules/cache.rs b/src/modules/cache.rs
new file mode 100644
index 0000000..d306ed1
--- /dev/null
+++ b/src/modules/cache.rs
@@ -0,0 +1,72 @@
+//! Response caching module
+//!
+//! Considerations:
+//! - check cache header
+//! - ignore responses that get too large
+//! - ignore requests with body (or too large body)
+//! - LRU cache pruning
+//! - different backends:
+//! - in memory (HashMap)
+//! - on disk (redb? filesystem?)
+//! - external db?
+use super::{Node, NodeContext, NodeKind, NodeRequest, NodeResponse};
+use crate::{config::DynNode, error::ServiceError};
+use anyhow::Result;
+use bytes::Bytes;
+use http::Response;
+use http_body_util::BodyExt;
+use serde::Deserialize;
+use serde_yaml::Value;
+use sha2::{Digest, Sha256};
+use std::{collections::HashMap, future::Future, pin::Pin, sync::Arc};
+
+pub struct CacheKind;
+
+#[derive(Deserialize)]
+struct CacheConfig {
+ next: DynNode,
+}
+
+struct Cache {
+ entries: HashMap<[u8; 32], Response<Bytes>>,
+ config: CacheConfig,
+}
+
+impl NodeKind for CacheKind {
+ fn name(&self) -> &'static str {
+ "cache"
+ }
+ fn instanciate(&self, config: Value) -> Result<Arc<dyn Node>> {
+ let config = serde_yaml::from_value::<CacheConfig>(config)?;
+ Ok(Arc::new(Cache {
+ config,
+ entries: HashMap::new(),
+ }))
+ }
+}
+impl Node for Cache {
+ fn handle<'a>(
+ &'a self,
+ context: &'a mut NodeContext,
+ request: NodeRequest,
+ ) -> Pin<Box<dyn Future<Output = Result<NodeResponse, ServiceError>> + Send + Sync + 'a>> {
+ Box::pin(async move {
+ // totally wrong...
+ let mut hasher = Sha256::new();
+ hasher.update(request.method().as_str());
+ hasher.update(request.uri().path());
+ if let Some(q) = request.uri().query() {
+ hasher.update(q);
+ }
+ let key: [u8; 32] = hasher.finalize().try_into().unwrap();
+
+ if let Some(resp) = self.entries.get(&key) {
+ // Ok(resp.to_owned().map(|b| b.map_err(|e| match e {}).boxed()))
+ }
+
+ let response = self.config.next.handle(context, request).await?;
+
+ Ok(response)
+ })
+ }
+}
diff --git a/src/modules/loadbalance.rs b/src/modules/loadbalance.rs
index 168db33..5358b03 100644
--- a/src/modules/loadbalance.rs
+++ b/src/modules/loadbalance.rs
@@ -1,3 +1,7 @@
+//! Load balancing module
+//!
+//! Given a set of handlers, the handler that is the least busy will handle the next request.
+//! Current implementation does not scale well for many handlers.
use super::{Node, NodeContext, NodeKind, NodeRequest, NodeResponse};
use crate::{config::DynNode, error::ServiceError};
use anyhow::Result;
diff --git a/src/modules/mod.rs b/src/modules/mod.rs
index e0f74ec..2494771 100644
--- a/src/modules/mod.rs
+++ b/src/modules/mod.rs
@@ -21,6 +21,7 @@ pub mod proxy;
pub mod redirect;
pub mod switch;
pub mod loadbalance;
+pub mod cache;
pub type NodeRequest = Request<Incoming>;
pub type NodeResponse = Response<BoxBody<Bytes, ServiceError>>;