Merge pull request #123 from modrinth/fix-ratelimit

Bump ratelimit to 200 RPM, allow specified IPs to have lax ratelimit …
This commit is contained in:
Geometrically 2021-01-01 11:19:14 -07:00 committed by GitHub
commit e2183c2214
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 1020 additions and 464 deletions

3
.env
View File

@ -1,7 +1,6 @@
DEBUG=true
RUST_LOG=info,sqlx::query=warn
CORS_ORIGINS='["http://localhost:3000","https://modrinth.com"]'
CDN_URL=https://cdn.modrinth.com
DATABASE_URL=postgresql://labrinth:labrinth@localhost/labrinth
@ -32,3 +31,5 @@ VERSION_INDEX_INTERVAL=1800
GITHUB_CLIENT_ID=3acffb2e808d16d4b226
GITHUB_CLIENT_SECRET=none
RATE_LIMIT_IGNORE_IPS='[]'

View File

@ -7,12 +7,22 @@ on:
env:
CARGO_TERM_COLOR: always
SQLX_OFFLINE: true
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
-
name: Cache Docker layers
uses: actions/cache@v2
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }}
restore-keys: |
${{ runner.os }}-buildx-
- name: Build and push Docker images
uses: docker/build-push-action@v1
with:

View File

@ -14,6 +14,17 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
-
name: Cache Docker layers
uses: actions/cache@v2
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }}
restore-keys: |
${{ runner.os }}-buildx-
- name: Build and push Docker images
uses: docker/build-push-action@v1
with:

View File

@ -1,22 +1,28 @@
name: Docker image build
on:
push:
branches-ignore:
- master
pull_request:
env:
CARGO_TERM_COLOR: always
SQLX_OFFLINE: true
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Build and push Docker images
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Cache Docker layers
uses: actions/cache@v2
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }}
restore-keys: |
${{ runner.os }}-buildx-
- name: Build Docker images
uses: docker/build-push-action@v1
with:
push: false
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
registry: docker.pkg.github.com

1358
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -17,6 +17,7 @@ actix-rt = "1.1.1"
actix-files = "0.4.0"
actix-multipart = "0.3.0"
actix-cors = "0.4.1"
actix-ratelimit = "0.3.0"
meilisearch-sdk = "0.4.0"
reqwest = { version = "0.10.8", features = ["json"] }
@ -42,19 +43,4 @@ futures-timer = "3.0.2"
rust-s3 = "0.26.1"
async-trait = "0.1.41"
[dependencies.sqlx]
git = "https://github.com/launchbadge/sqlx/"
branch = "master"
default-features = false
features = ["runtime-actix", "postgres", "chrono", "offline", "macros", "migrate"]
[dependencies.sqlx-macros]
git = "https://github.com/launchbadge/sqlx/"
branch = "master"
default-features = false
features = ["runtime-actix", "postgres", "chrono", "offline"]
[dependencies.actix-ratelimit]
# Temp until actix-ratelimit bumps version on cargo
git = "https://github.com/TerminalWitchcraft/actix-ratelimit"
rev = "870822067dfeae7cc0304352d81c4cb79ee27f5a"
sqlx = { version = "0.4.2", features = ["runtime-actix-rustls", "postgres", "chrono", "offline", "macros", "migrate"] }

View File

@ -1,6 +1,6 @@
use super::DatabaseError;
use crate::models::ids::random_base62_rng;
use sqlx_macros::Type;
use sqlx::sqlx_macros::Type;
const ID_RETRY_COUNT: usize = 20;

View File

@ -1,10 +1,12 @@
use crate::file_hosting::S3Host;
use actix_cors::Cors;
use actix_ratelimit::errors::ARError;
use actix_ratelimit::{MemoryStore, MemoryStoreActor, RateLimiter};
use actix_web::{http, web, App, HttpServer};
use env_logger::Env;
use gumdrop::Options;
use log::{error, info, warn};
use rand::Rng;
use search::indexing::index_mods;
use search::indexing::IndexingSettings;
use std::sync::Arc;
@ -234,32 +236,52 @@ async fn main() -> std::io::Result<()> {
pepper: crate::models::ids::Base62Id(crate::models::ids::random_base62(11)).to_string(),
};
let allowed_origins = dotenv::var("CORS_ORIGINS")
.ok()
.and_then(|s| serde_json::from_str::<Vec<String>>(&s).ok())
.unwrap_or_else(|| vec![String::from("http://localhost")]);
let store = MemoryStore::new();
info!("Starting Actix HTTP server!");
// Init App
HttpServer::new(move || {
let mut cors = Cors::new()
.allowed_methods(vec!["GET", "POST", "DELETE", "PATCH", "PUT"])
.allowed_headers(vec![http::header::AUTHORIZATION, http::header::ACCEPT])
.allowed_header(http::header::CONTENT_TYPE)
.max_age(3600);
for allowed_origin in &allowed_origins {
cors = cors.allowed_origin(allowed_origin);
}
App::new()
.wrap(cors.finish())
.wrap(
Cors::new()
.allowed_methods(vec!["GET", "POST", "DELETE", "PATCH", "PUT"])
.allowed_headers(vec![http::header::AUTHORIZATION, http::header::ACCEPT])
.allowed_header(http::header::CONTENT_TYPE)
.send_wildcard()
.max_age(3600)
.finish(),
)
.wrap(
// This is a hacky workaround to allowing the frontend server-side renderer to have
// an unlimited rate limit, since there is no current way with this library to
// have dynamic rate-limit max requests
RateLimiter::new(MemoryStoreActor::from(store.clone()).start())
.with_identifier(|req| {
let connection_info = req.connection_info();
let ip = String::from(
connection_info
.remote_addr()
.ok_or(ARError::IdentificationError)?,
);
let ignore_ips = dotenv::var("RATE_LIMIT_IGNORE_IPS")
.ok()
.and_then(|s| serde_json::from_str::<Vec<String>>(&s).ok())
.unwrap_or_else(Vec::new);
if ignore_ips.contains(&ip) {
// At an even distribution of numbers, this will allow at the most
// 3000 requests per minute from the frontend, which is reasonable
// (50 requests per second)
let random = rand::thread_rng().gen_range(1, 15);
return Ok(format!("{}-{}", ip, random));
}
Ok(ip)
})
.with_interval(std::time::Duration::from_secs(60))
.with_max_requests(100),
.with_max_requests(200),
)
.data(pool.clone())
.data(file_host.clone())
@ -305,12 +327,12 @@ fn check_env_vars() -> bool {
}
}
if dotenv::var("CORS_ORIGINS")
if dotenv::var("RATE_LIMIT_IGNORE_IPS")
.ok()
.and_then(|s| serde_json::from_str::<Vec<String>>(&s).ok())
.is_none()
{
warn!("Variable `CORS_ORIGINS` missing in dotenv or not a json array of strings");
warn!("Variable `RATE_LIMIT_IGNORE_IPS` missing in dotenv or not a json array of strings");
failed |= true;
}

View File

@ -7,6 +7,7 @@ use crate::{database, Pepper};
use actix_web::{delete, get, patch, web, HttpRequest, HttpResponse};
use serde::{Deserialize, Serialize};
use sqlx::PgPool;
use std::borrow::Borrow;
use std::sync::Arc;
// TODO: this needs filtering, and a better response type
@ -633,6 +634,7 @@ pub struct DownloadRedirect {
}
// under /api/v1/version_file/{hash}/download
#[allow(clippy::await_holding_refcell_ref)]
#[get("{version_id}/download")]
pub async fn download_version(
req: HttpRequest,
@ -659,7 +661,7 @@ pub async fn download_version(
if let Some(id) = result {
let real_ip = req.connection_info();
let ip_option = real_ip.realip_remote_addr();
let ip_option = real_ip.borrow().remote_addr();
if let Some(ip) = ip_option {
let hash = sha1::Sha1::from(format!("{}{}", ip, pepper.pepper)).hexdigest();