Merge pull request #123 from modrinth/fix-ratelimit
Bump ratelimit to 200 RPM, allow specified IPs to have lax ratelimit …
This commit is contained in:
commit
e2183c2214
3
.env
3
.env
@ -1,7 +1,6 @@
|
|||||||
DEBUG=true
|
DEBUG=true
|
||||||
RUST_LOG=info,sqlx::query=warn
|
RUST_LOG=info,sqlx::query=warn
|
||||||
|
|
||||||
CORS_ORIGINS='["http://localhost:3000","https://modrinth.com"]'
|
|
||||||
CDN_URL=https://cdn.modrinth.com
|
CDN_URL=https://cdn.modrinth.com
|
||||||
|
|
||||||
DATABASE_URL=postgresql://labrinth:labrinth@localhost/labrinth
|
DATABASE_URL=postgresql://labrinth:labrinth@localhost/labrinth
|
||||||
@ -32,3 +31,5 @@ VERSION_INDEX_INTERVAL=1800
|
|||||||
|
|
||||||
GITHUB_CLIENT_ID=3acffb2e808d16d4b226
|
GITHUB_CLIENT_ID=3acffb2e808d16d4b226
|
||||||
GITHUB_CLIENT_SECRET=none
|
GITHUB_CLIENT_SECRET=none
|
||||||
|
|
||||||
|
RATE_LIMIT_IGNORE_IPS='[]'
|
||||||
12
.github/workflows/docker-compile-master.yml
vendored
12
.github/workflows/docker-compile-master.yml
vendored
@ -7,12 +7,22 @@ on:
|
|||||||
env:
|
env:
|
||||||
CARGO_TERM_COLOR: always
|
CARGO_TERM_COLOR: always
|
||||||
SQLX_OFFLINE: true
|
SQLX_OFFLINE: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v2
|
||||||
|
-
|
||||||
|
name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v1
|
||||||
|
-
|
||||||
|
name: Cache Docker layers
|
||||||
|
uses: actions/cache@v2
|
||||||
|
with:
|
||||||
|
path: /tmp/.buildx-cache
|
||||||
|
key: ${{ runner.os }}-buildx-${{ github.sha }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-buildx-
|
||||||
- name: Build and push Docker images
|
- name: Build and push Docker images
|
||||||
uses: docker/build-push-action@v1
|
uses: docker/build-push-action@v1
|
||||||
with:
|
with:
|
||||||
|
|||||||
11
.github/workflows/docker-compile-tag.yml
vendored
11
.github/workflows/docker-compile-tag.yml
vendored
@ -14,6 +14,17 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v2
|
||||||
|
-
|
||||||
|
name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v1
|
||||||
|
-
|
||||||
|
name: Cache Docker layers
|
||||||
|
uses: actions/cache@v2
|
||||||
|
with:
|
||||||
|
path: /tmp/.buildx-cache
|
||||||
|
key: ${{ runner.os }}-buildx-${{ github.sha }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-buildx-
|
||||||
- name: Build and push Docker images
|
- name: Build and push Docker images
|
||||||
uses: docker/build-push-action@v1
|
uses: docker/build-push-action@v1
|
||||||
with:
|
with:
|
||||||
|
|||||||
16
.github/workflows/docker-compile.yml
vendored
16
.github/workflows/docker-compile.yml
vendored
@ -1,22 +1,28 @@
|
|||||||
name: Docker image build
|
name: Docker image build
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
|
||||||
branches-ignore:
|
|
||||||
- master
|
|
||||||
pull_request:
|
pull_request:
|
||||||
env:
|
env:
|
||||||
CARGO_TERM_COLOR: always
|
CARGO_TERM_COLOR: always
|
||||||
SQLX_OFFLINE: true
|
SQLX_OFFLINE: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v2
|
||||||
- name: Build and push Docker images
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v1
|
||||||
|
- name: Cache Docker layers
|
||||||
|
uses: actions/cache@v2
|
||||||
|
with:
|
||||||
|
path: /tmp/.buildx-cache
|
||||||
|
key: ${{ runner.os }}-buildx-${{ github.sha }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-buildx-
|
||||||
|
- name: Build Docker images
|
||||||
uses: docker/build-push-action@v1
|
uses: docker/build-push-action@v1
|
||||||
with:
|
with:
|
||||||
|
push: false
|
||||||
username: ${{ github.actor }}
|
username: ${{ github.actor }}
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
registry: docker.pkg.github.com
|
registry: docker.pkg.github.com
|
||||||
|
|||||||
1358
Cargo.lock
generated
1358
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
18
Cargo.toml
18
Cargo.toml
@ -17,6 +17,7 @@ actix-rt = "1.1.1"
|
|||||||
actix-files = "0.4.0"
|
actix-files = "0.4.0"
|
||||||
actix-multipart = "0.3.0"
|
actix-multipart = "0.3.0"
|
||||||
actix-cors = "0.4.1"
|
actix-cors = "0.4.1"
|
||||||
|
actix-ratelimit = "0.3.0"
|
||||||
|
|
||||||
meilisearch-sdk = "0.4.0"
|
meilisearch-sdk = "0.4.0"
|
||||||
reqwest = { version = "0.10.8", features = ["json"] }
|
reqwest = { version = "0.10.8", features = ["json"] }
|
||||||
@ -42,19 +43,4 @@ futures-timer = "3.0.2"
|
|||||||
rust-s3 = "0.26.1"
|
rust-s3 = "0.26.1"
|
||||||
async-trait = "0.1.41"
|
async-trait = "0.1.41"
|
||||||
|
|
||||||
[dependencies.sqlx]
|
sqlx = { version = "0.4.2", features = ["runtime-actix-rustls", "postgres", "chrono", "offline", "macros", "migrate"] }
|
||||||
git = "https://github.com/launchbadge/sqlx/"
|
|
||||||
branch = "master"
|
|
||||||
default-features = false
|
|
||||||
features = ["runtime-actix", "postgres", "chrono", "offline", "macros", "migrate"]
|
|
||||||
|
|
||||||
[dependencies.sqlx-macros]
|
|
||||||
git = "https://github.com/launchbadge/sqlx/"
|
|
||||||
branch = "master"
|
|
||||||
default-features = false
|
|
||||||
features = ["runtime-actix", "postgres", "chrono", "offline"]
|
|
||||||
|
|
||||||
[dependencies.actix-ratelimit]
|
|
||||||
# Temp until actix-ratelimit bumps version on cargo
|
|
||||||
git = "https://github.com/TerminalWitchcraft/actix-ratelimit"
|
|
||||||
rev = "870822067dfeae7cc0304352d81c4cb79ee27f5a"
|
|
||||||
@ -1,6 +1,6 @@
|
|||||||
use super::DatabaseError;
|
use super::DatabaseError;
|
||||||
use crate::models::ids::random_base62_rng;
|
use crate::models::ids::random_base62_rng;
|
||||||
use sqlx_macros::Type;
|
use sqlx::sqlx_macros::Type;
|
||||||
|
|
||||||
const ID_RETRY_COUNT: usize = 20;
|
const ID_RETRY_COUNT: usize = 20;
|
||||||
|
|
||||||
|
|||||||
54
src/main.rs
54
src/main.rs
@ -1,10 +1,12 @@
|
|||||||
use crate::file_hosting::S3Host;
|
use crate::file_hosting::S3Host;
|
||||||
use actix_cors::Cors;
|
use actix_cors::Cors;
|
||||||
|
use actix_ratelimit::errors::ARError;
|
||||||
use actix_ratelimit::{MemoryStore, MemoryStoreActor, RateLimiter};
|
use actix_ratelimit::{MemoryStore, MemoryStoreActor, RateLimiter};
|
||||||
use actix_web::{http, web, App, HttpServer};
|
use actix_web::{http, web, App, HttpServer};
|
||||||
use env_logger::Env;
|
use env_logger::Env;
|
||||||
use gumdrop::Options;
|
use gumdrop::Options;
|
||||||
use log::{error, info, warn};
|
use log::{error, info, warn};
|
||||||
|
use rand::Rng;
|
||||||
use search::indexing::index_mods;
|
use search::indexing::index_mods;
|
||||||
use search::indexing::IndexingSettings;
|
use search::indexing::IndexingSettings;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
@ -234,32 +236,52 @@ async fn main() -> std::io::Result<()> {
|
|||||||
pepper: crate::models::ids::Base62Id(crate::models::ids::random_base62(11)).to_string(),
|
pepper: crate::models::ids::Base62Id(crate::models::ids::random_base62(11)).to_string(),
|
||||||
};
|
};
|
||||||
|
|
||||||
let allowed_origins = dotenv::var("CORS_ORIGINS")
|
|
||||||
.ok()
|
|
||||||
.and_then(|s| serde_json::from_str::<Vec<String>>(&s).ok())
|
|
||||||
.unwrap_or_else(|| vec![String::from("http://localhost")]);
|
|
||||||
|
|
||||||
let store = MemoryStore::new();
|
let store = MemoryStore::new();
|
||||||
|
|
||||||
info!("Starting Actix HTTP server!");
|
info!("Starting Actix HTTP server!");
|
||||||
|
|
||||||
// Init App
|
// Init App
|
||||||
HttpServer::new(move || {
|
HttpServer::new(move || {
|
||||||
let mut cors = Cors::new()
|
App::new()
|
||||||
|
.wrap(
|
||||||
|
Cors::new()
|
||||||
.allowed_methods(vec!["GET", "POST", "DELETE", "PATCH", "PUT"])
|
.allowed_methods(vec!["GET", "POST", "DELETE", "PATCH", "PUT"])
|
||||||
.allowed_headers(vec![http::header::AUTHORIZATION, http::header::ACCEPT])
|
.allowed_headers(vec![http::header::AUTHORIZATION, http::header::ACCEPT])
|
||||||
.allowed_header(http::header::CONTENT_TYPE)
|
.allowed_header(http::header::CONTENT_TYPE)
|
||||||
.max_age(3600);
|
.send_wildcard()
|
||||||
for allowed_origin in &allowed_origins {
|
.max_age(3600)
|
||||||
cors = cors.allowed_origin(allowed_origin);
|
.finish(),
|
||||||
|
)
|
||||||
|
.wrap(
|
||||||
|
// This is a hacky workaround to allowing the frontend server-side renderer to have
|
||||||
|
// an unlimited rate limit, since there is no current way with this library to
|
||||||
|
// have dynamic rate-limit max requests
|
||||||
|
RateLimiter::new(MemoryStoreActor::from(store.clone()).start())
|
||||||
|
.with_identifier(|req| {
|
||||||
|
let connection_info = req.connection_info();
|
||||||
|
let ip = String::from(
|
||||||
|
connection_info
|
||||||
|
.remote_addr()
|
||||||
|
.ok_or(ARError::IdentificationError)?,
|
||||||
|
);
|
||||||
|
|
||||||
|
let ignore_ips = dotenv::var("RATE_LIMIT_IGNORE_IPS")
|
||||||
|
.ok()
|
||||||
|
.and_then(|s| serde_json::from_str::<Vec<String>>(&s).ok())
|
||||||
|
.unwrap_or_else(Vec::new);
|
||||||
|
|
||||||
|
if ignore_ips.contains(&ip) {
|
||||||
|
// At an even distribution of numbers, this will allow at the most
|
||||||
|
// 3000 requests per minute from the frontend, which is reasonable
|
||||||
|
// (50 requests per second)
|
||||||
|
let random = rand::thread_rng().gen_range(1, 15);
|
||||||
|
return Ok(format!("{}-{}", ip, random));
|
||||||
}
|
}
|
||||||
|
|
||||||
App::new()
|
Ok(ip)
|
||||||
.wrap(cors.finish())
|
})
|
||||||
.wrap(
|
|
||||||
RateLimiter::new(MemoryStoreActor::from(store.clone()).start())
|
|
||||||
.with_interval(std::time::Duration::from_secs(60))
|
.with_interval(std::time::Duration::from_secs(60))
|
||||||
.with_max_requests(100),
|
.with_max_requests(200),
|
||||||
)
|
)
|
||||||
.data(pool.clone())
|
.data(pool.clone())
|
||||||
.data(file_host.clone())
|
.data(file_host.clone())
|
||||||
@ -305,12 +327,12 @@ fn check_env_vars() -> bool {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if dotenv::var("CORS_ORIGINS")
|
if dotenv::var("RATE_LIMIT_IGNORE_IPS")
|
||||||
.ok()
|
.ok()
|
||||||
.and_then(|s| serde_json::from_str::<Vec<String>>(&s).ok())
|
.and_then(|s| serde_json::from_str::<Vec<String>>(&s).ok())
|
||||||
.is_none()
|
.is_none()
|
||||||
{
|
{
|
||||||
warn!("Variable `CORS_ORIGINS` missing in dotenv or not a json array of strings");
|
warn!("Variable `RATE_LIMIT_IGNORE_IPS` missing in dotenv or not a json array of strings");
|
||||||
failed |= true;
|
failed |= true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -7,6 +7,7 @@ use crate::{database, Pepper};
|
|||||||
use actix_web::{delete, get, patch, web, HttpRequest, HttpResponse};
|
use actix_web::{delete, get, patch, web, HttpRequest, HttpResponse};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use sqlx::PgPool;
|
use sqlx::PgPool;
|
||||||
|
use std::borrow::Borrow;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
// TODO: this needs filtering, and a better response type
|
// TODO: this needs filtering, and a better response type
|
||||||
@ -633,6 +634,7 @@ pub struct DownloadRedirect {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// under /api/v1/version_file/{hash}/download
|
// under /api/v1/version_file/{hash}/download
|
||||||
|
#[allow(clippy::await_holding_refcell_ref)]
|
||||||
#[get("{version_id}/download")]
|
#[get("{version_id}/download")]
|
||||||
pub async fn download_version(
|
pub async fn download_version(
|
||||||
req: HttpRequest,
|
req: HttpRequest,
|
||||||
@ -659,7 +661,7 @@ pub async fn download_version(
|
|||||||
|
|
||||||
if let Some(id) = result {
|
if let Some(id) = result {
|
||||||
let real_ip = req.connection_info();
|
let real_ip = req.connection_info();
|
||||||
let ip_option = real_ip.realip_remote_addr();
|
let ip_option = real_ip.borrow().remote_addr();
|
||||||
|
|
||||||
if let Some(ip) = ip_option {
|
if let Some(ip) = ip_option {
|
||||||
let hash = sha1::Sha1::from(format!("{}{}", ip, pepper.pepper)).hexdigest();
|
let hash = sha1::Sha1::from(format!("{}{}", ip, pepper.pepper)).hexdigest();
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user