1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
|
//! Load balancing module
//!
//! Given a set of handlers, the handler that is the least busy will handle the next request.
//! Current implementation does not scale well for many handlers.
use super::{Node, NodeContext, NodeKind, NodeRequest, NodeResponse};
use crate::{config::DynNode, error::ServiceError};
use anyhow::Result;
use serde::Deserialize;
use serde_yml::Value;
use std::{
future::Future,
pin::Pin,
sync::{
atomic::{AtomicUsize, Ordering},
Arc,
},
};
pub struct LoadBalanceKind;
#[derive(Deserialize)]
struct LoadBalanceConfig(Vec<DynNode>);
struct LoadBalance {
load: Vec<AtomicUsize>,
config: LoadBalanceConfig,
}
impl NodeKind for LoadBalanceKind {
fn name(&self) -> &'static str {
"loadbalance"
}
fn instanciate(&self, config: Value) -> Result<Arc<dyn Node>> {
let config = serde_yml::from_value::<LoadBalanceConfig>(config)?;
Ok(Arc::new(LoadBalance {
load: config.0.iter().map(|_| AtomicUsize::new(0)).collect(),
config,
}))
}
}
impl Node for LoadBalance {
fn handle<'a>(
&'a self,
context: &'a mut NodeContext,
request: NodeRequest,
) -> Pin<Box<dyn Future<Output = Result<NodeResponse, ServiceError>> + Send + Sync + 'a>> {
Box::pin(async move {
let index = self
.load
.iter()
.enumerate()
.min_by_key(|(_, k)| k.load(Ordering::Relaxed))
.map(|(i, _)| i)
.ok_or(ServiceError::CustomStatic("zero routes to balance load"))?;
self.load[index].fetch_add(1, Ordering::Relaxed);
let resp = self.config.0[index].handle(context, request).await;
self.load[index].fetch_sub(1, Ordering::Relaxed);
resp
})
}
}
|