From ecb3378d29a4f53604a9287ef7027a299f3d45d5 Mon Sep 17 00:00:00 2001 From: metamuffin Date: Sat, 24 Aug 2024 01:33:49 +0200 Subject: a bit of doc, stub cache impl --- src/modules/cache.rs | 72 ++++++++++++++++++++++++++++++++++++++++++++++ src/modules/loadbalance.rs | 4 +++ src/modules/mod.rs | 1 + 3 files changed, 77 insertions(+) create mode 100644 src/modules/cache.rs (limited to 'src') diff --git a/src/modules/cache.rs b/src/modules/cache.rs new file mode 100644 index 0000000..d306ed1 --- /dev/null +++ b/src/modules/cache.rs @@ -0,0 +1,72 @@ +//! Response caching module +//! +//! Considerations: +//! - check cache header +//! - ignore responses that get too large +//! - ignore requests with body (or too large body) +//! - LRU cache pruning +//! - different backends: +//! - in memory (HashMap) +//! - on disk (redb? filesystem?) +//! - external db? +use super::{Node, NodeContext, NodeKind, NodeRequest, NodeResponse}; +use crate::{config::DynNode, error::ServiceError}; +use anyhow::Result; +use bytes::Bytes; +use http::Response; +use http_body_util::BodyExt; +use serde::Deserialize; +use serde_yaml::Value; +use sha2::{Digest, Sha256}; +use std::{collections::HashMap, future::Future, pin::Pin, sync::Arc}; + +pub struct CacheKind; + +#[derive(Deserialize)] +struct CacheConfig { + next: DynNode, +} + +struct Cache { + entries: HashMap<[u8; 32], Response>, + config: CacheConfig, +} + +impl NodeKind for CacheKind { + fn name(&self) -> &'static str { + "cache" + } + fn instanciate(&self, config: Value) -> Result> { + let config = serde_yaml::from_value::(config)?; + Ok(Arc::new(Cache { + config, + entries: HashMap::new(), + })) + } +} +impl Node for Cache { + fn handle<'a>( + &'a self, + context: &'a mut NodeContext, + request: NodeRequest, + ) -> Pin> + Send + Sync + 'a>> { + Box::pin(async move { + // totally wrong... + let mut hasher = Sha256::new(); + hasher.update(request.method().as_str()); + hasher.update(request.uri().path()); + if let Some(q) = request.uri().query() { + hasher.update(q); + } + let key: [u8; 32] = hasher.finalize().try_into().unwrap(); + + if let Some(resp) = self.entries.get(&key) { + // Ok(resp.to_owned().map(|b| b.map_err(|e| match e {}).boxed())) + } + + let response = self.config.next.handle(context, request).await?; + + Ok(response) + }) + } +} diff --git a/src/modules/loadbalance.rs b/src/modules/loadbalance.rs index 168db33..5358b03 100644 --- a/src/modules/loadbalance.rs +++ b/src/modules/loadbalance.rs @@ -1,3 +1,7 @@ +//! Load balancing module +//! +//! Given a set of handlers, the handler that is the least busy will handle the next request. +//! Current implementation does not scale well for many handlers. use super::{Node, NodeContext, NodeKind, NodeRequest, NodeResponse}; use crate::{config::DynNode, error::ServiceError}; use anyhow::Result; diff --git a/src/modules/mod.rs b/src/modules/mod.rs index e0f74ec..2494771 100644 --- a/src/modules/mod.rs +++ b/src/modules/mod.rs @@ -21,6 +21,7 @@ pub mod proxy; pub mod redirect; pub mod switch; pub mod loadbalance; +pub mod cache; pub type NodeRequest = Request; pub type NodeResponse = Response>; -- cgit v1.2.3-70-g09d2