lemmy/server/src/rate_limit/rate_limiter.rs
Riley a074564458
Federation async (#848)
* Asyncify more

* I guess these changed

* Clean PR a bit

* Convert more away from failure error

* config changes for testing federation

* It was DNS

So actix-web's client relies on TRust DNS Resolver to figure out
where to send data, but TRust DNS Resolver seems to not play nice
with docker, which expressed itself as not resolving the name to
an IP address _the first time_ when making a request. The fix was
literally to make the request again (which I limited to 3 times
total, and not exceeding the request timeout in total)

* Only retry for connecterror

Since TRust DNS Resolver was causing ConnectError::Timeout,
this change limits the retry to only this error, returning
immediately for any other error

* Use http sig norm 0.4.0-alpha for actix-web 3.0 support

* Blocking function, retry http requests

* cargo +nightly fmt

* Only create one pictrs dir

* Don't yarn build

* cargo +nightly fmt
2020-07-01 08:54:29 -04:00

106 lines
2.6 KiB
Rust

use super::IPAddr;
use crate::{api::APIError, LemmyError};
use log::debug;
use std::{collections::HashMap, time::SystemTime};
use strum::IntoEnumIterator;
#[derive(Debug, Clone)]
pub struct RateLimitBucket {
last_checked: SystemTime,
allowance: f64,
}
#[derive(Eq, PartialEq, Hash, Debug, EnumIter, Copy, Clone)]
pub enum RateLimitType {
Message,
Register,
Post,
}
/// Rate limiting based on rate type and IP addr
#[derive(Debug, Clone)]
pub struct RateLimiter {
pub buckets: HashMap<RateLimitType, HashMap<IPAddr, RateLimitBucket>>,
}
impl Default for RateLimiter {
fn default() -> Self {
Self {
buckets: HashMap::new(),
}
}
}
impl RateLimiter {
fn insert_ip(&mut self, ip: &str) {
for rate_limit_type in RateLimitType::iter() {
if self.buckets.get(&rate_limit_type).is_none() {
self.buckets.insert(rate_limit_type, HashMap::new());
}
if let Some(bucket) = self.buckets.get_mut(&rate_limit_type) {
if bucket.get(ip).is_none() {
bucket.insert(
ip.to_string(),
RateLimitBucket {
last_checked: SystemTime::now(),
allowance: -2f64,
},
);
}
}
}
}
#[allow(clippy::float_cmp)]
pub(super) fn check_rate_limit_full(
&mut self,
type_: RateLimitType,
ip: &str,
rate: i32,
per: i32,
check_only: bool,
) -> Result<(), LemmyError> {
self.insert_ip(ip);
if let Some(bucket) = self.buckets.get_mut(&type_) {
if let Some(rate_limit) = bucket.get_mut(ip) {
let current = SystemTime::now();
let time_passed = current.duration_since(rate_limit.last_checked)?.as_secs() as f64;
// The initial value
if rate_limit.allowance == -2f64 {
rate_limit.allowance = rate as f64;
};
rate_limit.last_checked = current;
rate_limit.allowance += time_passed * (rate as f64 / per as f64);
if !check_only && rate_limit.allowance > rate as f64 {
rate_limit.allowance = rate as f64;
}
if rate_limit.allowance < 1.0 {
debug!(
"Rate limited IP: {}, time_passed: {}, allowance: {}",
ip, time_passed, rate_limit.allowance
);
Err(
APIError {
message: format!("Too many requests. {} per {} seconds", rate, per),
}
.into(),
)
} else {
if !check_only {
rate_limit.allowance -= 1.0;
}
Ok(())
}
} else {
Ok(())
}
} else {
Ok(())
}
}
}