Upgrade to Actix V2, bump SQLX version, code cleanup, intergrate ratelimiter (#288)

* Upgrade to Actix V2, bump SQLX version, code cleanup, intergrate ratelimiter

* Add pack file path validation

* Fix compilation error caused by incorrect merge
This commit is contained in:
Geometrically 2022-02-05 23:08:30 -07:00 committed by GitHub
parent 6a89646e66
commit 6bf5dbabee
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
27 changed files with 1417 additions and 1649 deletions

2
.env
View File

@ -37,6 +37,6 @@ VERSION_INDEX_INTERVAL=1800
GITHUB_CLIENT_ID=none GITHUB_CLIENT_ID=none
GITHUB_CLIENT_SECRET=none GITHUB_CLIENT_SECRET=none
RATE_LIMIT_IGNORE_IPS='[]' RATE_LIMIT_IGNORE_IPS='["127.0.0.1"]'
WHITELISTED_MODPACK_DOMAINS='["cdn.modrinth.com", "edge.forgecdn.net", "github.com", "raw.githubusercontent.com"]' WHITELISTED_MODPACK_DOMAINS='["cdn.modrinth.com", "edge.forgecdn.net", "github.com", "raw.githubusercontent.com"]'

1897
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -12,15 +12,15 @@ path = "src/main.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies] [dependencies]
actix-web = "3.3.2" actix = "0.12.0"
actix-rt = "1.1.1" actix-web = "4.0.0-rc.2"
actix-files = "0.5.0" actix-rt = "2.6.0"
actix-multipart = "0.3.0" tokio-stream = "0.1.8"
actix-cors = "0.5.4" actix-multipart = "0.4.0-beta.13"
actix-ratelimit = "0.3.0" actix-cors = "0.6.0-beta.8"
meilisearch-sdk = "0.6.0" meilisearch-sdk = "0.6.0"
reqwest = { version = "0.10.8", features = ["json"] } reqwest = { version = "0.11.9", features = ["json"] }
yaserde = "0.6.0" yaserde = "0.6.0"
yaserde_derive = "0.6.0" yaserde_derive = "0.6.0"
@ -54,9 +54,8 @@ futures-timer = "3.0.2"
rust-s3 = "0.26.1" rust-s3 = "0.26.1"
async-trait = "0.1.41" async-trait = "0.1.41"
sqlx = { version = "0.4.2", features = ["runtime-actix-rustls", "postgres", "chrono", "offline", "macros", "migrate"] } sqlx = { version = "0.5.10", features = ["runtime-actix-rustls", "postgres", "chrono", "offline", "macros", "migrate"] }
sentry = { version = "0.22.0", features = ["log"] } bytes = "1.1.0"
sentry-actix = "0.22.0"
bytes = "0.5.6" dashmap = "4.0.2"

View File

@ -1,58 +1,3 @@
use std::fs;
use std::path::{Path, PathBuf};
fn main() { fn main() {
let dir = std::env::var("OUT_DIR").unwrap(); println!("cargo:rerun-if-changed=migrations");
let mut target = PathBuf::from(dir);
target.pop();
target.pop();
target.pop();
target.push("migrations");
println!("Output: {}", target.to_str().unwrap());
copy("migrations", target).unwrap();
}
pub fn copy<U: AsRef<Path>, V: AsRef<Path>>(from: U, to: V) -> Result<(), std::io::Error> {
let mut stack = vec![PathBuf::from(from.as_ref())];
let output_root = PathBuf::from(to.as_ref());
let input_root = PathBuf::from(from.as_ref()).components().count();
while let Some(working_path) = stack.pop() {
println!("process: {:?}", &working_path);
// Generate a relative path
let src: PathBuf = working_path.components().skip(input_root).collect();
// Create a destination if missing
let dest = if src.components().count() == 0 {
output_root.clone()
} else {
output_root.join(&src)
};
if fs::metadata(&dest).is_err() {
println!(" mkdir: {:?}", dest);
fs::create_dir_all(&dest)?;
}
for entry in fs::read_dir(working_path)? {
let entry = entry?;
let path = entry.path();
if path.is_dir() {
stack.push(path);
} else {
match path.file_name() {
Some(filename) => {
let dest_path = dest.join(filename);
println!(" copy: {:?} -> {:?}", &path, &dest_path);
fs::copy(&path, &dest_path)?;
}
None => {
println!("failed: {:?}", path);
}
}
}
}
}
Ok(())
} }

View File

@ -1209,28 +1209,6 @@
] ]
} }
}, },
"49b2829b22f6ca82b3f62ea7962d8af22098cfa5a1fc1e06312bf1d3df382280": {
"query": "\n INSERT INTO categories (category, project_type, icon)\n VALUES ($1, $2, $3)\n ON CONFLICT (category, project_type, icon) DO NOTHING\n RETURNING id\n ",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "id",
"type_info": "Int4"
}
],
"parameters": {
"Left": [
"Varchar",
"Int4",
"Varchar"
]
},
"nullable": [
false
]
}
},
"4a4b4166248877eefcd63603945fdcd392f76812bdec7c70f8ffeb06ee7e737f": { "4a4b4166248877eefcd63603945fdcd392f76812bdec7c70f8ffeb06ee7e737f": {
"query": "\n SELECT m.id FROM mods m\n INNER JOIN team_members tm ON tm.team_id = m.team_id\n WHERE tm.user_id = $1 AND tm.role = $2\n ", "query": "\n SELECT m.id FROM mods m\n INNER JOIN team_members tm ON tm.team_id = m.team_id\n WHERE tm.user_id = $1 AND tm.role = $2\n ",
"describe": { "describe": {
@ -2190,68 +2168,6 @@
] ]
} }
}, },
"7367664a589891e560eef5f80bb82220b438e3344eb60074c6b3174a02a72f4e": {
"query": "\n SELECT r.id, rt.name, r.mod_id, r.version_id, r.user_id, r.body, r.reporter, r.created\n FROM reports r\n INNER JOIN report_types rt ON rt.id = r.report_type_id\n WHERE r.id = ANY($1)\n ",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "id",
"type_info": "Int8"
},
{
"ordinal": 1,
"name": "name",
"type_info": "Varchar"
},
{
"ordinal": 2,
"name": "mod_id",
"type_info": "Int8"
},
{
"ordinal": 3,
"name": "version_id",
"type_info": "Int8"
},
{
"ordinal": 4,
"name": "user_id",
"type_info": "Int8"
},
{
"ordinal": 5,
"name": "body",
"type_info": "Varchar"
},
{
"ordinal": 6,
"name": "reporter",
"type_info": "Int8"
},
{
"ordinal": 7,
"name": "created",
"type_info": "Timestamptz"
}
],
"parameters": {
"Left": [
"Int8Array"
]
},
"nullable": [
false,
false,
true,
true,
true,
false,
false,
false
]
}
},
"73bdd6c9e7cd8c1ed582261aebdee0f8fd2734e712ef288a2608564c918009cb": { "73bdd6c9e7cd8c1ed582261aebdee0f8fd2734e712ef288a2608564c918009cb": {
"query": "\n DELETE FROM versions WHERE id = $1\n ", "query": "\n DELETE FROM versions WHERE id = $1\n ",
"describe": { "describe": {
@ -2314,6 +2230,28 @@
] ]
} }
}, },
"7795938e2b23d06b32dc6d79f6b2b8e7ed24bbf4fa61cb3000259ba3d2ecbc6f": {
"query": "\n INSERT INTO categories (category, project_type, icon)\n VALUES ($1, $2, $3)\n ON CONFLICT (category) DO NOTHING\n RETURNING id\n ",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "id",
"type_info": "Int4"
}
],
"parameters": {
"Left": [
"Varchar",
"Int4",
"Varchar"
]
},
"nullable": [
false
]
}
},
"78a60cf0febcc6e35b8ffe38f2c021c13ab660c81c4775bbb26004d30242a1a8": { "78a60cf0febcc6e35b8ffe38f2c021c13ab660c81c4775bbb26004d30242a1a8": {
"query": "\n SELECT gv.id id, gv.version version_, gv.type type_, gv.created created, gv.major major FROM game_versions gv\n WHERE major = $1\n ORDER BY created DESC\n ", "query": "\n SELECT gv.id id, gv.version version_, gv.type type_, gv.created created, gv.major major FROM game_versions gv\n WHERE major = $1\n ORDER BY created DESC\n ",
"describe": { "describe": {
@ -2614,6 +2552,42 @@
"nullable": [] "nullable": []
} }
}, },
"885ca4b21e05079d30dcf3b65619c95b544a10b7b08c3184278a0fe5ebc44b86": {
"query": "\n SELECT c.id id, c.category category, c.icon icon, pt.name project_type\n FROM categories c\n INNER JOIN project_types pt ON c.project_type = pt.id\n ORDER BY c.id\n ",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "id",
"type_info": "Int4"
},
{
"ordinal": 1,
"name": "category",
"type_info": "Varchar"
},
{
"ordinal": 2,
"name": "icon",
"type_info": "Varchar"
},
{
"ordinal": 3,
"name": "project_type",
"type_info": "Varchar"
}
],
"parameters": {
"Left": []
},
"nullable": [
false,
false,
false,
false
]
}
},
"89310b2bc5f020744a9a42dae6f15dfebc1544cdd754939f0d09714353f2aa7c": { "89310b2bc5f020744a9a42dae6f15dfebc1544cdd754939f0d09714353f2aa7c": {
"query": "\n SELECT id, team_id, role, permissions, accepted\n FROM team_members\n WHERE user_id = $1\n ", "query": "\n SELECT id, team_id, role, permissions, accepted\n FROM team_members\n WHERE user_id = $1\n ",
"describe": { "describe": {
@ -4133,6 +4107,27 @@
"nullable": [] "nullable": []
} }
}, },
"bc605f80a615c7d0ca9c8207f8b0c5dc1b8f2ad0f9b3346a00078d59e5e3e253": {
"query": "\n INSERT INTO loaders (loader, icon)\n VALUES ($1, $2)\n ON CONFLICT (loader) DO NOTHING\n RETURNING id\n ",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "id",
"type_info": "Int4"
}
],
"parameters": {
"Left": [
"Varchar",
"Varchar"
]
},
"nullable": [
false
]
}
},
"bc91841f9672608a28bd45a862919f2bd34fac0b3479e3b4b67a9f6bea2a562a": { "bc91841f9672608a28bd45a862919f2bd34fac0b3479e3b4b67a9f6bea2a562a": {
"query": "\n UPDATE mods\n SET issues_url = $1\n WHERE (id = $2)\n ", "query": "\n UPDATE mods\n SET issues_url = $1\n WHERE (id = $2)\n ",
"describe": { "describe": {
@ -4570,27 +4565,6 @@
"nullable": [] "nullable": []
} }
}, },
"cc8eeb14e2069b9e4f92b224d42b283e569258d61be3cc3b3f7564f0dadac89b": {
"query": "\n INSERT INTO loaders (loader, icon)\n VALUES ($1, $2)\n ON CONFLICT (loader, icon) DO NOTHING\n RETURNING id\n ",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "id",
"type_info": "Int4"
}
],
"parameters": {
"Left": [
"Varchar",
"Varchar"
]
},
"nullable": [
false
]
}
},
"ccd913bb2f3006ffe881ce2fc4ef1e721d18fe2eed6ac62627046c955129610c": { "ccd913bb2f3006ffe881ce2fc4ef1e721d18fe2eed6ac62627046c955129610c": {
"query": "SELECT EXISTS(SELECT 1 FROM files WHERE id=$1)", "query": "SELECT EXISTS(SELECT 1 FROM files WHERE id=$1)",
"describe": { "describe": {
@ -4982,37 +4956,63 @@
] ]
} }
}, },
"d7744589d9e20c48f6f726a8a540822c1e521b791ebc2fee86a1108d442aedb8": { "d7127fd7f257cc7779841108c75f6fd8b20f9619bef1cacd0fbaf011cf0b25b3": {
"query": "\n SELECT c.id id, c.category category, c.icon icon, pt.name project_type\n FROM categories c\n INNER JOIN project_types pt ON c.project_type = pt.id\n ", "query": "\n SELECT r.id, rt.name, r.mod_id, r.version_id, r.user_id, r.body, r.reporter, r.created\n FROM reports r\n INNER JOIN report_types rt ON rt.id = r.report_type_id\n WHERE r.id = ANY($1)\n ORDER BY r.created DESC\n ",
"describe": { "describe": {
"columns": [ "columns": [
{ {
"ordinal": 0, "ordinal": 0,
"name": "id", "name": "id",
"type_info": "Int4" "type_info": "Int8"
}, },
{ {
"ordinal": 1, "ordinal": 1,
"name": "category", "name": "name",
"type_info": "Varchar" "type_info": "Varchar"
}, },
{ {
"ordinal": 2, "ordinal": 2,
"name": "icon", "name": "mod_id",
"type_info": "Varchar" "type_info": "Int8"
}, },
{ {
"ordinal": 3, "ordinal": 3,
"name": "project_type", "name": "version_id",
"type_info": "Int8"
},
{
"ordinal": 4,
"name": "user_id",
"type_info": "Int8"
},
{
"ordinal": 5,
"name": "body",
"type_info": "Varchar" "type_info": "Varchar"
},
{
"ordinal": 6,
"name": "reporter",
"type_info": "Int8"
},
{
"ordinal": 7,
"name": "created",
"type_info": "Timestamptz"
} }
], ],
"parameters": { "parameters": {
"Left": [] "Left": [
"Int8Array"
]
}, },
"nullable": [ "nullable": [
false, false,
false, false,
true,
true,
true,
false,
false, false,
false false
] ]

View File

@ -141,6 +141,7 @@ impl Category {
SELECT c.id id, c.category category, c.icon icon, pt.name project_type SELECT c.id id, c.category category, c.icon icon, pt.name project_type
FROM categories c FROM categories c
INNER JOIN project_types pt ON c.project_type = pt.id INNER JOIN project_types pt ON c.project_type = pt.id
ORDER BY c.id
" "
) )
.fetch_many(exec) .fetch_many(exec)
@ -162,8 +163,6 @@ impl Category {
where where
E: sqlx::Executor<'a, Database = sqlx::Postgres>, E: sqlx::Executor<'a, Database = sqlx::Postgres>,
{ {
use sqlx::Done;
let result = sqlx::query!( let result = sqlx::query!(
" "
DELETE FROM categories DELETE FROM categories
@ -227,7 +226,7 @@ impl<'a> CategoryBuilder<'a> {
" "
INSERT INTO categories (category, project_type, icon) INSERT INTO categories (category, project_type, icon)
VALUES ($1, $2, $3) VALUES ($1, $2, $3)
ON CONFLICT (category, project_type, icon) DO NOTHING ON CONFLICT (category) DO NOTHING
RETURNING id RETURNING id
", ",
self.name, self.name,
@ -336,8 +335,6 @@ impl Loader {
where where
E: sqlx::Executor<'a, Database = sqlx::Postgres>, E: sqlx::Executor<'a, Database = sqlx::Postgres>,
{ {
use sqlx::Done;
let result = sqlx::query!( let result = sqlx::query!(
" "
DELETE FROM loaders DELETE FROM loaders
@ -398,7 +395,7 @@ impl<'a> LoaderBuilder<'a> {
" "
INSERT INTO loaders (loader, icon) INSERT INTO loaders (loader, icon)
VALUES ($1, $2) VALUES ($1, $2)
ON CONFLICT (loader, icon) DO NOTHING ON CONFLICT (loader) DO NOTHING
RETURNING id RETURNING id
", ",
self.name, self.name,
@ -597,8 +594,6 @@ impl GameVersion {
where where
E: sqlx::Executor<'a, Database = sqlx::Postgres>, E: sqlx::Executor<'a, Database = sqlx::Postgres>,
{ {
use sqlx::Done;
let result = sqlx::query!( let result = sqlx::query!(
" "
DELETE FROM game_versions DELETE FROM game_versions
@ -761,8 +756,6 @@ impl License {
where where
E: sqlx::Executor<'a, Database = sqlx::Postgres>, E: sqlx::Executor<'a, Database = sqlx::Postgres>,
{ {
use sqlx::Done;
let result = sqlx::query!( let result = sqlx::query!(
" "
DELETE FROM licenses DELETE FROM licenses
@ -909,8 +902,6 @@ impl DonationPlatform {
where where
E: sqlx::Executor<'a, Database = sqlx::Postgres>, E: sqlx::Executor<'a, Database = sqlx::Postgres>,
{ {
use sqlx::Done;
let result = sqlx::query!( let result = sqlx::query!(
" "
DELETE FROM donation_platforms DELETE FROM donation_platforms
@ -1046,8 +1037,6 @@ impl ReportType {
where where
E: sqlx::Executor<'a, Database = sqlx::Postgres>, E: sqlx::Executor<'a, Database = sqlx::Postgres>,
{ {
use sqlx::Done;
let result = sqlx::query!( let result = sqlx::query!(
" "
DELETE FROM report_types DELETE FROM report_types
@ -1199,8 +1188,6 @@ impl ProjectType {
where where
E: sqlx::Executor<'a, Database = sqlx::Postgres>, E: sqlx::Executor<'a, Database = sqlx::Postgres>,
{ {
use sqlx::Done;
let result = sqlx::query!( let result = sqlx::query!(
" "
DELETE FROM project_types DELETE FROM project_types

View File

@ -100,20 +100,21 @@ impl Report {
FROM reports r FROM reports r
INNER JOIN report_types rt ON rt.id = r.report_type_id INNER JOIN report_types rt ON rt.id = r.report_type_id
WHERE r.id = ANY($1) WHERE r.id = ANY($1)
ORDER BY r.created DESC
", ",
&report_ids_parsed &report_ids_parsed
) )
.fetch_many(exec) .fetch_many(exec)
.try_filter_map(|e| async { .try_filter_map(|e| async {
Ok(e.right().map(|row| QueryReport { Ok(e.right().map(|x| QueryReport {
id: ReportId(row.id), id: ReportId(x.id),
report_type: row.name, report_type: x.name,
project_id: row.mod_id.map(ProjectId), project_id: x.mod_id.map(ProjectId),
version_id: row.version_id.map(VersionId), version_id: x.version_id.map(VersionId),
user_id: row.user_id.map(UserId), user_id: x.user_id.map(UserId),
body: row.body, body: x.body,
reporter: UserId(row.reporter), reporter: UserId(x.reporter),
created: row.created, created: x.created,
})) }))
}) })
.try_collect::<Vec<QueryReport>>() .try_collect::<Vec<QueryReport>>()

View File

@ -403,7 +403,6 @@ impl TeamMember {
where where
E: sqlx::Executor<'a, Database = sqlx::Postgres>, E: sqlx::Executor<'a, Database = sqlx::Postgres>,
{ {
use sqlx::Done;
let result = sqlx::query!( let result = sqlx::query!(
" "
DELETE FROM team_members DELETE FROM team_members

View File

@ -1,10 +1,7 @@
use log::info; use log::info;
use sqlx::migrate::{Migrate, MigrateDatabase, Migrator}; use sqlx::migrate::MigrateDatabase;
use sqlx::postgres::{PgPool, PgPoolOptions}; use sqlx::postgres::{PgPool, PgPoolOptions};
use sqlx::{Connection, PgConnection, Postgres}; use sqlx::{Connection, PgConnection, Postgres};
use std::path::Path;
const MIGRATION_FOLDER: &str = "migrations";
pub async fn connect() -> Result<PgPool, sqlx::Error> { pub async fn connect() -> Result<PgPool, sqlx::Error> {
info!("Initializing database connection"); info!("Initializing database connection");
@ -34,31 +31,14 @@ pub async fn check_for_migrations() -> Result<(), sqlx::Error> {
info!("Creating database..."); info!("Creating database...");
Postgres::create_database(uri).await?; Postgres::create_database(uri).await?;
} }
info!("Applying migrations..."); info!("Applying migrations...");
run_migrations(uri).await?;
Ok(())
}
pub async fn run_migrations(uri: &str) -> Result<(), sqlx::Error> {
let migrator = Migrator::new(Path::new(MIGRATION_FOLDER)).await?;
let mut conn: PgConnection = PgConnection::connect(uri).await?; let mut conn: PgConnection = PgConnection::connect(uri).await?;
sqlx::migrate!()
conn.ensure_migrations_table().await?; .run(&mut conn)
.await
let (version, dirty) = conn.version().await?.unwrap_or((0, false)); .expect("Error while running database migrations!");
if dirty {
panic!("The database is dirty! Please check your database status.");
}
for migration in migrator.iter() {
if migration.version > version {
let _elapsed = conn.apply(migration).await?;
} else {
conn.validate(migration).await?;
}
}
Ok(()) Ok(())
} }

View File

@ -1,6 +1,6 @@
use super::{DeleteFileData, FileHost, FileHostingError, UploadFileData}; use super::{DeleteFileData, FileHost, FileHostingError, UploadFileData};
use async_trait::async_trait; use async_trait::async_trait;
use bytes::{Buf, Bytes}; use bytes::Bytes;
use sha2::Digest; use sha2::Digest;
pub struct MockHost(()); pub struct MockHost(());
@ -22,10 +22,10 @@ impl FileHost for MockHost {
let path = std::path::Path::new(&dotenv::var("MOCK_FILE_PATH").unwrap()) let path = std::path::Path::new(&dotenv::var("MOCK_FILE_PATH").unwrap())
.join(file_name.replace("../", "")); .join(file_name.replace("../", ""));
std::fs::create_dir_all(path.parent().ok_or(FileHostingError::InvalidFilename)?)?; std::fs::create_dir_all(path.parent().ok_or(FileHostingError::InvalidFilename)?)?;
let content_sha1 = sha1::Sha1::from(file_bytes.bytes()).hexdigest(); let content_sha1 = sha1::Sha1::from(&*file_bytes).hexdigest();
let content_sha512 = format!("{:x}", sha2::Sha512::digest(file_bytes.bytes())); let content_sha512 = format!("{:x}", sha2::Sha512::digest(&*file_bytes));
std::fs::write(path, file_bytes.bytes())?; std::fs::write(path, &*file_bytes)?;
Ok(UploadFileData { Ok(UploadFileData {
file_id: String::from("MOCK_FILE_ID"), file_id: String::from("MOCK_FILE_ID"),
file_name: file_name.to_string(), file_name: file_name.to_string(),

View File

@ -1,6 +1,6 @@
use crate::file_hosting::{DeleteFileData, FileHost, FileHostingError, UploadFileData}; use crate::file_hosting::{DeleteFileData, FileHost, FileHostingError, UploadFileData};
use async_trait::async_trait; use async_trait::async_trait;
use bytes::{Buf, Bytes}; use bytes::Bytes;
use s3::bucket::Bucket; use s3::bucket::Bucket;
use s3::creds::Credentials; use s3::creds::Credentials;
use s3::region::Region; use s3::region::Region;
@ -42,14 +42,10 @@ impl FileHost for S3Host {
file_bytes: Bytes, file_bytes: Bytes,
) -> Result<UploadFileData, FileHostingError> { ) -> Result<UploadFileData, FileHostingError> {
let content_sha1 = sha1::Sha1::from(&file_bytes).hexdigest(); let content_sha1 = sha1::Sha1::from(&file_bytes).hexdigest();
let content_sha512 = format!("{:x}", sha2::Sha512::digest(file_bytes.bytes())); let content_sha512 = format!("{:x}", sha2::Sha512::digest(&*file_bytes));
self.bucket self.bucket
.put_object_with_content_type( .put_object_with_content_type(format!("/{}", file_name), &*file_bytes, content_type)
format!("/{}", file_name),
file_bytes.bytes(),
content_type,
)
.await?; .await?;
Ok(UploadFileData { Ok(UploadFileData {

View File

@ -1,13 +1,13 @@
use crate::file_hosting::S3Host; use crate::file_hosting::S3Host;
use crate::ratelimit::errors::ARError;
use crate::ratelimit::memory::{MemoryStore, MemoryStoreActor};
use crate::ratelimit::middleware::RateLimiter;
use crate::util::env::{parse_strings_from_var, parse_var}; use crate::util::env::{parse_strings_from_var, parse_var};
use actix_cors::Cors; use actix_cors::Cors;
use actix_ratelimit::errors::ARError;
use actix_ratelimit::{MemoryStore, MemoryStoreActor, RateLimiter};
use actix_web::{http, web, App, HttpServer}; use actix_web::{http, web, App, HttpServer};
use env_logger::Env; use env_logger::Env;
use gumdrop::Options; use gumdrop::Options;
use log::{error, info, warn}; use log::{error, info, warn};
use rand::Rng;
use search::indexing::index_projects; use search::indexing::index_projects;
use search::indexing::IndexingSettings; use search::indexing::IndexingSettings;
use std::sync::atomic::Ordering; use std::sync::atomic::Ordering;
@ -17,6 +17,7 @@ mod database;
mod file_hosting; mod file_hosting;
mod health; mod health;
mod models; mod models;
mod ratelimit;
mod routes; mod routes;
mod scheduler; mod scheduler;
mod search; mod search;
@ -90,14 +91,6 @@ async fn main() -> std::io::Result<()> {
info!("Skipping initial indexing"); info!("Skipping initial indexing");
} }
// DSN is from SENTRY_DSN env variable.
// Has no effect if not set.
let sentry = sentry::init(());
if sentry.is_enabled() {
info!("Enabled Sentry integration");
std::env::set_var("RUST_BACKTRACE", "1");
}
database::check_for_migrations() database::check_for_migrations()
.await .await
.expect("An error occurred while running migrations."); .expect("An error occurred while running migrations.");
@ -266,37 +259,28 @@ async fn main() -> std::io::Result<()> {
header.to_str().map_err(|_| ARError::IdentificationError)? header.to_str().map_err(|_| ARError::IdentificationError)?
} else { } else {
connection_info connection_info
.remote_addr() .peer_addr()
.ok_or(ARError::IdentificationError)? .ok_or(ARError::IdentificationError)?
} }
} else { } else {
connection_info connection_info
.remote_addr() .peer_addr()
.ok_or(ARError::IdentificationError)? .ok_or(ARError::IdentificationError)?
}); });
let ignore_ips =
parse_strings_from_var("RATE_LIMIT_IGNORE_IPS").unwrap_or_default();
if ignore_ips.contains(&ip) {
// At an even distribution of numbers, this will allow at the most
// 18000 requests per minute from the frontend, which is reasonable
// (300 requests per second)
let random = rand::thread_rng().gen_range(1, 30);
return Ok(format!("{}-{}", ip, random));
}
Ok(ip) Ok(ip)
}) })
.with_interval(std::time::Duration::from_secs(60)) .with_interval(std::time::Duration::from_secs(60))
.with_max_requests(300), .with_max_requests(300)
.with_ignore_ips(
parse_strings_from_var("RATE_LIMIT_IGNORE_IPS").unwrap_or_default(),
),
) )
.wrap(sentry_actix::Sentry::new()) .app_data(pool.clone())
.data(pool.clone()) .app_data(file_host.clone())
.data(file_host.clone()) .app_data(indexing_queue.clone())
.data(indexing_queue.clone()) .app_data(search_config.clone())
.data(search_config.clone()) .app_data(ip_salt.clone())
.data(ip_salt.clone())
.configure(routes::v1_config) .configure(routes::v1_config)
.configure(routes::v2_config) .configure(routes::v2_config)
.service(routes::index_get) .service(routes::index_get)

45
src/ratelimit/errors.rs Normal file
View File

@ -0,0 +1,45 @@
//! Errors that can occur during middleware processing stage
use actix_web::ResponseError;
use log::*;
use thiserror::Error;
/// Custom error type. Useful for logging and debugging different kinds of errors.
/// This type can be converted to Actix Error, which defaults to
/// InternalServerError
///
#[derive(Debug, Error)]
pub enum ARError {
/// Read/Write error on store
#[error("read/write operatiion failed: {0}")]
ReadWriteError(String),
/// Identifier error
#[error("client identification failed")]
IdentificationError,
/// Limited Error
#[error("You are being ratelimited. Please wait {reset} seconds. {remaining}/{max_requests} remaining.")]
LimitedError {
max_requests: usize,
remaining: usize,
reset: u64,
},
}
impl ResponseError for ARError {
fn error_response(&self) -> actix_web::web::HttpResponse {
match self {
Self::LimitedError {
max_requests,
remaining,
reset,
} => {
let mut response = actix_web::web::HttpResponse::TooManyRequests();
response.insert_header(("x-ratelimit-limit", max_requests.to_string()));
response.insert_header(("x-ratelimit-remaining", remaining.to_string()));
response.insert_header(("x-ratelimit-reset", reset.to_string()));
response.body(self.to_string())
}
_ => actix_web::web::HttpResponse::build(self.status_code()).body(self.to_string()),
}
}
}

252
src/ratelimit/memory.rs Normal file
View File

@ -0,0 +1,252 @@
//! In memory store for rate limiting
use actix::prelude::*;
use dashmap::DashMap;
use futures::future::{self};
use log::*;
use std::sync::Arc;
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use crate::ratelimit::errors::ARError;
use crate::ratelimit::{ActorMessage, ActorResponse};
/// Type used to create a concurrent hashmap store
#[derive(Clone)]
pub struct MemoryStore {
inner: Arc<DashMap<String, (usize, Duration)>>,
}
impl MemoryStore {
/// Create a new hashmap
///
/// # Example
/// ```rust
/// use actix_ratelimit::MemoryStore;
///
/// let store = MemoryStore::new();
/// ```
pub fn new() -> Self {
debug!("Creating new MemoryStore");
MemoryStore {
inner: Arc::new(DashMap::<String, (usize, Duration)>::new()),
}
}
#[allow(dead_code)]
/// Create a new hashmap with the provided capacity
pub fn with_capacity(capacity: usize) -> Self {
debug!("Creating new MemoryStore");
MemoryStore {
inner: Arc::new(DashMap::<String, (usize, Duration)>::with_capacity(
capacity,
)),
}
}
}
/// Actor for memory store
pub struct MemoryStoreActor {
inner: Arc<DashMap<String, (usize, Duration)>>,
}
impl From<MemoryStore> for MemoryStoreActor {
fn from(store: MemoryStore) -> Self {
MemoryStoreActor { inner: store.inner }
}
}
impl MemoryStoreActor {
/// Starts the memory actor and returns it's address
pub fn start(self) -> Addr<Self> {
debug!("Started memory store");
Supervisor::start(|_| self)
}
}
impl Actor for MemoryStoreActor {
type Context = Context<Self>;
}
impl Supervised for MemoryStoreActor {
fn restarting(&mut self, _: &mut Self::Context) {
debug!("Restarting memory store");
}
}
impl Handler<ActorMessage> for MemoryStoreActor {
type Result = ActorResponse;
fn handle(&mut self, msg: ActorMessage, ctx: &mut Self::Context) -> Self::Result {
match msg {
ActorMessage::Set { key, value, expiry } => {
debug!("Inserting key {} with expiry {}", &key, &expiry.as_secs());
let future_key = String::from(&key);
let now = SystemTime::now();
let now = now.duration_since(UNIX_EPOCH).unwrap();
self.inner.insert(key, (value, now + expiry));
ctx.notify_later(ActorMessage::Remove(future_key), expiry);
ActorResponse::Set(Box::pin(future::ready(Ok(()))))
}
ActorMessage::Update { key, value } => match self.inner.get_mut(&key) {
Some(mut c) => {
let val_mut: &mut (usize, Duration) = c.value_mut();
if val_mut.0 > value {
val_mut.0 -= value;
} else {
val_mut.0 = 0;
}
let new_val = val_mut.0;
ActorResponse::Update(Box::pin(future::ready(Ok(new_val))))
}
None => {
return ActorResponse::Update(Box::pin(future::ready(Err(
ARError::ReadWriteError("memory store: read failed!".to_string()),
))))
}
},
ActorMessage::Get(key) => {
if self.inner.contains_key(&key) {
let val = match self.inner.get(&key) {
Some(c) => c,
None => {
return ActorResponse::Get(Box::pin(future::ready(Err(
ARError::ReadWriteError("memory store: read failed!".to_string()),
))))
}
};
let val = val.value().0;
ActorResponse::Get(Box::pin(future::ready(Ok(Some(val)))))
} else {
ActorResponse::Get(Box::pin(future::ready(Ok(None))))
}
}
ActorMessage::Expire(key) => {
let c = match self.inner.get(&key) {
Some(d) => d,
None => {
return ActorResponse::Expire(Box::pin(future::ready(Err(
ARError::ReadWriteError("memory store: read failed!".to_string()),
))))
}
};
let dur = c.value().1;
let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
let res = dur.checked_sub(now).unwrap_or_else(|| Duration::new(0, 0));
ActorResponse::Expire(Box::pin(future::ready(Ok(res))))
}
ActorMessage::Remove(key) => {
debug!("Removing key: {}", &key);
let val = match self.inner.remove::<String>(&key) {
Some(c) => c,
None => {
return ActorResponse::Remove(Box::pin(future::ready(Err(
ARError::ReadWriteError("memory store: remove failed!".to_string()),
))))
}
};
let val = val.1;
ActorResponse::Remove(Box::pin(future::ready(Ok(val.0))))
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[actix_rt::test]
async fn test_set() {
let store = MemoryStore::new();
let addr = MemoryStoreActor::from(store.clone()).start();
let res = addr
.send(ActorMessage::Set {
key: "hello".to_string(),
value: 30usize,
expiry: Duration::from_secs(5),
})
.await;
let res = res.expect("Failed to send msg");
match res {
ActorResponse::Set(c) => match c.await {
Ok(()) => {}
Err(e) => panic!("Shouldn't happen {}", &e),
},
_ => panic!("Shouldn't happen!"),
}
}
#[actix_rt::test]
async fn test_get() {
let store = MemoryStore::new();
let addr = MemoryStoreActor::from(store.clone()).start();
let expiry = Duration::from_secs(5);
let res = addr
.send(ActorMessage::Set {
key: "hello".to_string(),
value: 30usize,
expiry: expiry,
})
.await;
let res = res.expect("Failed to send msg");
match res {
ActorResponse::Set(c) => match c.await {
Ok(()) => {}
Err(e) => panic!("Shouldn't happen {}", &e),
},
_ => panic!("Shouldn't happen!"),
}
let res2 = addr.send(ActorMessage::Get("hello".to_string())).await;
let res2 = res2.expect("Failed to send msg");
match res2 {
ActorResponse::Get(c) => match c.await {
Ok(d) => {
let d = d.unwrap();
assert_eq!(d, 30usize);
}
Err(e) => panic!("Shouldn't happen {}", &e),
},
_ => panic!("Shouldn't happen!"),
};
}
#[actix_rt::test]
async fn test_expiry() {
let store = MemoryStore::new();
let addr = MemoryStoreActor::from(store.clone()).start();
let expiry = Duration::from_secs(3);
let res = addr
.send(ActorMessage::Set {
key: "hello".to_string(),
value: 30usize,
expiry: expiry,
})
.await;
let res = res.expect("Failed to send msg");
match res {
ActorResponse::Set(c) => match c.await {
Ok(()) => {}
Err(e) => panic!("Shouldn't happen {}", &e),
},
_ => panic!("Shouldn't happen!"),
}
assert_eq!(addr.connected(), true);
let res3 = addr.send(ActorMessage::Expire("hello".to_string())).await;
let res3 = res3.expect("Failed to send msg");
match res3 {
ActorResponse::Expire(c) => match c.await {
Ok(dur) => {
let now = Duration::from_secs(3);
if dur > now {
panic!("Expiry is invalid!");
} else if dur > now + Duration::from_secs(4) {
panic!("Expiry is invalid!");
}
}
Err(e) => {
panic!("Shouldn't happen: {}", &e);
}
},
_ => panic!("Shouldn't happen!"),
};
}
}

279
src/ratelimit/middleware.rs Normal file
View File

@ -0,0 +1,279 @@
//! RateLimiter middleware for actix application
use crate::ratelimit::errors::ARError;
use crate::ratelimit::{ActorMessage, ActorResponse};
use actix::dev::*;
use actix_web::{
dev::{Service, ServiceRequest, ServiceResponse, Transform},
error::Error as AWError,
http::header::{HeaderName, HeaderValue},
};
use futures::future::{ok, Ready};
use log::*;
use std::{
cell::RefCell,
future::Future,
ops::Fn,
pin::Pin,
rc::Rc,
task::{Context, Poll},
time::Duration,
};
/// Type that implements the ratelimit middleware.
///
/// This accepts _interval_ which specifies the
/// window size, _max_requests_ which specifies the maximum number of requests in that window, and
/// _store_ which is essentially a data store used to store client access information. Entry is removed from
/// the store after _interval_.
///
/// # Example
/// ```rust
/// # use std::time::Duration;
/// use actix_ratelimit::{MemoryStore, MemoryStoreActor};
/// use actix_ratelimit::RateLimiter;
///
/// #[actix_rt::main]
/// async fn main() {
/// let store = MemoryStore::new();
/// let ratelimiter = RateLimiter::new(
/// MemoryStoreActor::from(store.clone()).start())
/// .with_interval(Duration::from_secs(60))
/// .with_max_requests(100);
/// }
/// ```
pub struct RateLimiter<T>
where
T: Handler<ActorMessage> + Send + Sync + 'static,
T::Context: ToEnvelope<T, ActorMessage>,
{
interval: Duration,
max_requests: usize,
store: Addr<T>,
identifier: Rc<Box<dyn Fn(&ServiceRequest) -> Result<String, ARError>>>,
ignore_ips: Vec<String>,
}
impl<T> RateLimiter<T>
where
T: Handler<ActorMessage> + Send + Sync + 'static,
<T as Actor>::Context: ToEnvelope<T, ActorMessage>,
{
/// Creates a new instance of `RateLimiter` with the provided address of `StoreActor`.
pub fn new(store: Addr<T>) -> Self {
let identifier = |req: &ServiceRequest| {
let connection_info = req.connection_info();
let ip = connection_info
.peer_addr()
.ok_or(ARError::IdentificationError)?;
Ok(String::from(ip))
};
RateLimiter {
interval: Duration::from_secs(0),
max_requests: 0,
store,
identifier: Rc::new(Box::new(identifier)),
ignore_ips: Vec::new(),
}
}
/// Specify the interval. The counter for a client is reset after this interval
pub fn with_interval(mut self, interval: Duration) -> Self {
self.interval = interval;
self
}
/// Specify the maximum number of requests allowed in the given interval.
pub fn with_max_requests(mut self, max_requests: usize) -> Self {
self.max_requests = max_requests;
self
}
/// Sets IPs that should be ignored by the ratelimiter
pub fn with_ignore_ips(mut self, ignore_ips: Vec<String>) -> Self {
self.ignore_ips = ignore_ips;
self
}
/// Function to get the identifier for the client request
pub fn with_identifier<F: Fn(&ServiceRequest) -> Result<String, ARError> + 'static>(
mut self,
identifier: F,
) -> Self {
self.identifier = Rc::new(Box::new(identifier));
self
}
}
impl<T, S, B> Transform<S, ServiceRequest> for RateLimiter<T>
where
T: Handler<ActorMessage> + Send + Sync + 'static,
T::Context: ToEnvelope<T, ActorMessage>,
S: Service<ServiceRequest, Response = ServiceResponse<B>, Error = AWError> + 'static,
S::Future: 'static,
B: 'static,
{
type Response = ServiceResponse<B>;
type Error = S::Error;
type Transform = RateLimitMiddleware<S, T>;
type InitError = ();
type Future = Ready<Result<Self::Transform, Self::InitError>>;
fn new_transform(&self, service: S) -> Self::Future {
ok(RateLimitMiddleware {
service: Rc::new(RefCell::new(service)),
store: self.store.clone(),
max_requests: self.max_requests,
interval: self.interval.as_secs(),
identifier: self.identifier.clone(),
ignore_ips: self.ignore_ips.clone(),
})
}
}
/// Service factory for RateLimiter
pub struct RateLimitMiddleware<S, T>
where
S: 'static,
T: Handler<ActorMessage> + 'static,
{
service: Rc<RefCell<S>>,
store: Addr<T>,
// Exists here for the sole purpose of knowing the max_requests and interval from RateLimiter
max_requests: usize,
interval: u64,
identifier: Rc<Box<dyn Fn(&ServiceRequest) -> Result<String, ARError> + 'static>>,
ignore_ips: Vec<String>,
}
impl<T, S, B> Service<ServiceRequest> for RateLimitMiddleware<S, T>
where
T: Handler<ActorMessage> + 'static,
S: Service<ServiceRequest, Response = ServiceResponse<B>, Error = AWError> + 'static,
S::Future: 'static,
B: 'static,
T::Context: ToEnvelope<T, ActorMessage>,
{
type Response = ServiceResponse<B>;
type Error = S::Error;
type Future = Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>>>>;
fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.service.borrow_mut().poll_ready(cx)
}
fn call(&self, req: ServiceRequest) -> Self::Future {
let store = self.store.clone();
let srv = self.service.clone();
let max_requests = self.max_requests;
let interval = Duration::from_secs(self.interval);
let identifier = self.identifier.clone();
let ignore_ips = self.ignore_ips.clone();
Box::pin(async move {
let identifier: String = (identifier)(&req)?;
if ignore_ips.contains(&identifier) {
let fut = srv.call(req);
let res = fut.await?;
return Ok(res);
}
let remaining: ActorResponse = store
.send(ActorMessage::Get(String::from(&identifier)))
.await
.map_err(|_| ARError::IdentificationError)?;
match remaining {
ActorResponse::Get(opt) => {
let opt = opt.await?;
if let Some(c) = opt {
// Existing entry in store
let expiry = store
.send(ActorMessage::Expire(String::from(&identifier)))
.await
.map_err(|_| ARError::ReadWriteError("Setting timeout".to_string()))?;
let reset: Duration = match expiry {
ActorResponse::Expire(dur) => dur.await?,
_ => unreachable!(),
};
if c == 0 {
info!("Limit exceeded for client: {}", &identifier);
Err(ARError::LimitedError {
max_requests,
remaining: c,
reset: reset.as_secs(),
}
.into())
} else {
// Decrement value
let res: ActorResponse = store
.send(ActorMessage::Update {
key: identifier,
value: 1,
})
.await
.map_err(|_| {
ARError::ReadWriteError("Decrementing ratelimit".to_string())
})?;
let updated_value: usize = match res {
ActorResponse::Update(c) => c.await?,
_ => unreachable!(),
};
// Execute the request
let fut = srv.call(req);
let mut res = fut.await?;
let headers = res.headers_mut();
// Safe unwraps, since usize is always convertible to string
headers.insert(
HeaderName::from_static("x-ratelimit-limit"),
HeaderValue::from_str(max_requests.to_string().as_str())?,
);
headers.insert(
HeaderName::from_static("x-ratelimit-remaining"),
HeaderValue::from_str(updated_value.to_string().as_str())?,
);
headers.insert(
HeaderName::from_static("x-ratelimit-reset"),
HeaderValue::from_str(reset.as_secs().to_string().as_str())?,
);
Ok(res)
}
} else {
// New client, create entry in store
let current_value = max_requests - 1;
let res = store
.send(ActorMessage::Set {
key: String::from(&identifier),
value: current_value,
expiry: interval,
})
.await
.map_err(|_| {
ARError::ReadWriteError("Creating store entry".to_string())
})?;
match res {
ActorResponse::Set(c) => c.await?,
_ => unreachable!(),
}
let fut = srv.call(req);
let mut res = fut.await?;
let headers = res.headers_mut();
// Safe unwraps, since usize is always convertible to string
headers.insert(
HeaderName::from_static("x-ratelimit-limit"),
HeaderValue::from_str(max_requests.to_string().as_str()).unwrap(),
);
headers.insert(
HeaderName::from_static("x-ratelimit-remaining"),
HeaderValue::from_str(current_value.to_string().as_str()).unwrap(),
);
headers.insert(
HeaderName::from_static("x-ratelimit-reset"),
HeaderValue::from_str(interval.as_secs().to_string().as_str()).unwrap(),
);
Ok(res)
}
}
_ => {
unreachable!();
}
}
})
}
}

64
src/ratelimit/mod.rs Normal file
View File

@ -0,0 +1,64 @@
use std::future::Future;
use std::marker::Send;
use std::pin::Pin;
use std::time::Duration;
use crate::ratelimit::errors::ARError;
use actix::dev::*;
pub mod errors;
pub mod memory;
/// The code for this module was directly taken from https://github.com/TerminalWitchcraft/actix-ratelimit
/// with some modifications including upgrading it to Actix 4!
pub mod middleware;
/// Represents message that can be handled by a `StoreActor`
pub enum ActorMessage {
/// Get the remaining count based on the provided identifier
Get(String),
/// Set the count of the client identified by `key` to `value` valid for `expiry`
Set {
key: String,
value: usize,
expiry: Duration,
},
/// Change the value of count for the client identified by `key` by `value`
Update { key: String, value: usize },
/// Get the expiration time for the client.
Expire(String),
/// Remove the client from the store
Remove(String),
}
impl Message for ActorMessage {
type Result = ActorResponse;
}
/// Wrapper type for `Pin<Box<dyn Future>>` type
pub type Output<T> = Pin<Box<dyn Future<Output = Result<T, ARError>> + Send>>;
/// Represents data returned in response to `Messages` by a `StoreActor`
pub enum ActorResponse {
/// Returned in response to [Messages::Get](enum.Messages.html)
Get(Output<Option<usize>>),
/// Returned in response to [Messages::Set](enum.Messages.html)
Set(Output<()>),
/// Returned in response to [Messages::Update](enum.Messages.html)
Update(Output<usize>),
/// Returned in response to [Messages::Expire](enum.Messages.html)
Expire(Output<Duration>),
/// Returned in response to [Messages::Remove](enum.Messages.html)
Remove(Output<usize>),
}
impl<A, M> MessageResponse<A, M> for ActorResponse
where
A: Actor,
M: actix::Message<Result = ActorResponse>,
{
fn handle(self, _: &mut A::Context, tx: Option<OneshotSender<Self>>) {
if let Some(tx) = tx {
let _ = tx.send(self);
}
}
}

View File

@ -117,7 +117,7 @@ pub async fn init(
); );
Ok(HttpResponse::TemporaryRedirect() Ok(HttpResponse::TemporaryRedirect()
.header("Location", &*url) .append_header(("Location", &*url))
.json(AuthorizationInit { url })) .json(AuthorizationInit { url }))
} }
@ -235,7 +235,7 @@ pub async fn auth_callback(
}; };
Ok(HttpResponse::TemporaryRedirect() Ok(HttpResponse::TemporaryRedirect()
.header("Location", &*redirect_url) .append_header(("Location", &*redirect_url))
.json(AuthorizationInit { url: redirect_url })) .json(AuthorizationInit { url: redirect_url }))
} else { } else {
Err(AuthorizationError::InvalidCredentialsError) Err(AuthorizationError::InvalidCredentialsError)

View File

@ -53,11 +53,12 @@ pub struct MavenPom {
#[get("maven/modrinth/{id}/maven-metadata.xml")] #[get("maven/modrinth/{id}/maven-metadata.xml")]
pub async fn maven_metadata( pub async fn maven_metadata(
req: HttpRequest, req: HttpRequest,
web::Path((project_id,)): web::Path<(String,)>, params: web::Path<(String,)>,
pool: web::Data<PgPool>, pool: web::Data<PgPool>,
) -> Result<HttpResponse, ApiError> { ) -> Result<HttpResponse, ApiError> {
let project_id = params.into_inner().0;
let project_data = let project_data =
database::models::Project::get_full_from_slug_or_project_id(&project_id, &**pool).await?; database::models::Project::get_full_from_slug_or_project_id(&*project_id, &**pool).await?;
let data = if let Some(data) = project_data { let data = if let Some(data) = project_data {
data data
@ -142,9 +143,10 @@ fn find_file<'a>(
#[get("maven/modrinth/{id}/{versionnum}/{file}")] #[get("maven/modrinth/{id}/{versionnum}/{file}")]
pub async fn version_file( pub async fn version_file(
req: HttpRequest, req: HttpRequest,
web::Path((project_id, vnum, file)): web::Path<(String, String, String)>, params: web::Path<(String, String, String)>,
pool: web::Data<PgPool>, pool: web::Data<PgPool>,
) -> Result<HttpResponse, ApiError> { ) -> Result<HttpResponse, ApiError> {
let (project_id, vnum, file) = params.into_inner();
let project_data = let project_data =
database::models::Project::get_full_from_slug_or_project_id(&project_id, &**pool).await?; database::models::Project::get_full_from_slug_or_project_id(&project_id, &**pool).await?;
@ -200,7 +202,7 @@ pub async fn version_file(
.body(yaserde::ser::to_string(&respdata).map_err(ApiError::XmlError)?)); .body(yaserde::ser::to_string(&respdata).map_err(ApiError::XmlError)?));
} else if let Some(selected_file) = find_file(&project_id, &project, &version, &file) { } else if let Some(selected_file) = find_file(&project_id, &project, &version, &file) {
return Ok(HttpResponse::TemporaryRedirect() return Ok(HttpResponse::TemporaryRedirect()
.header("location", &*selected_file.url) .append_header(("location", &*selected_file.url))
.body("")); .body(""));
} }
@ -210,9 +212,10 @@ pub async fn version_file(
#[get("maven/modrinth/{id}/{versionnum}/{file}.sha1")] #[get("maven/modrinth/{id}/{versionnum}/{file}.sha1")]
pub async fn version_file_sha1( pub async fn version_file_sha1(
req: HttpRequest, req: HttpRequest,
web::Path((project_id, vnum, file)): web::Path<(String, String, String)>, params: web::Path<(String, String, String)>,
pool: web::Data<PgPool>, pool: web::Data<PgPool>,
) -> Result<HttpResponse, ApiError> { ) -> Result<HttpResponse, ApiError> {
let (project_id, vnum, file) = params.into_inner();
let project_data = let project_data =
database::models::Project::get_full_from_slug_or_project_id(&project_id, &**pool).await?; database::models::Project::get_full_from_slug_or_project_id(&project_id, &**pool).await?;
@ -260,9 +263,10 @@ pub async fn version_file_sha1(
#[get("maven/modrinth/{id}/{versionnum}/{file}.sha512")] #[get("maven/modrinth/{id}/{versionnum}/{file}.sha512")]
pub async fn version_file_sha512( pub async fn version_file_sha512(
req: HttpRequest, req: HttpRequest,
web::Path((project_id, vnum, file)): web::Path<(String, String, String)>, params: web::Path<(String, String, String)>,
pool: web::Data<PgPool>, pool: web::Data<PgPool>,
) -> Result<HttpResponse, ApiError> { ) -> Result<HttpResponse, ApiError> {
let (project_id, vnum, file) = params.into_inner();
let project_data = let project_data =
database::models::Project::get_full_from_slug_or_project_id(&project_id, &**pool).await?; database::models::Project::get_full_from_slug_or_project_id(&project_id, &**pool).await?;

View File

@ -323,9 +323,7 @@ pub async fn project_create_inner(
))) )))
})?; })?;
let content_disposition = field.content_disposition().ok_or_else(|| { let content_disposition = field.content_disposition();
CreateError::MissingValueError(String::from("Missing content disposition"))
})?;
let name = content_disposition let name = content_disposition
.get_name() .get_name()
.ok_or_else(|| CreateError::MissingValueError(String::from("Missing content name")))?; .ok_or_else(|| CreateError::MissingValueError(String::from("Missing content name")))?;
@ -409,9 +407,7 @@ pub async fn project_create_inner(
while let Some(item) = payload.next().await { while let Some(item) = payload.next().await {
let mut field: Field = item.map_err(CreateError::MultipartError)?; let mut field: Field = item.map_err(CreateError::MultipartError)?;
let content_disposition = field.content_disposition().ok_or_else(|| { let content_disposition = field.content_disposition().clone();
CreateError::MissingValueError("Missing content disposition".to_string())
})?;
let name = content_disposition let name = content_disposition
.get_name() .get_name()

View File

@ -254,7 +254,7 @@ pub async fn download_version(
if let Some(id) = result { if let Some(id) = result {
let real_ip = req.connection_info(); let real_ip = req.connection_info();
let ip_option = real_ip.borrow().remote_addr(); let ip_option = real_ip.borrow().peer_addr();
if let Some(ip) = ip_option { if let Some(ip) = ip_option {
let hash = sha1::Sha1::from(format!("{}{}", ip, pepper.pepper)).hexdigest(); let hash = sha1::Sha1::from(format!("{}{}", ip, pepper.pepper)).hexdigest();
@ -312,7 +312,7 @@ pub async fn download_version(
} }
} }
Ok(HttpResponse::TemporaryRedirect() Ok(HttpResponse::TemporaryRedirect()
.header("Location", &*id.url) .append_header(("Location", &*id.url))
.json(DownloadRedirect { url: id.url })) .json(DownloadRedirect { url: id.url }))
} else { } else {
Ok(HttpResponse::NotFound().body("")) Ok(HttpResponse::NotFound().body(""))

View File

@ -106,9 +106,7 @@ async fn version_create_inner(
while let Some(item) = payload.next().await { while let Some(item) = payload.next().await {
let mut field: Field = item.map_err(CreateError::MultipartError)?; let mut field: Field = item.map_err(CreateError::MultipartError)?;
let content_disposition = field.content_disposition().ok_or_else(|| { let content_disposition = field.content_disposition().clone();
CreateError::MissingValueError("Missing content disposition".to_string())
})?;
let name = content_disposition let name = content_disposition
.get_name() .get_name()
.ok_or_else(|| CreateError::MissingValueError("Missing content name".to_string()))?; .ok_or_else(|| CreateError::MissingValueError("Missing content name".to_string()))?;
@ -511,9 +509,7 @@ async fn upload_file_to_version_inner(
while let Some(item) = payload.next().await { while let Some(item) = payload.next().await {
let mut field: Field = item.map_err(CreateError::MultipartError)?; let mut field: Field = item.map_err(CreateError::MultipartError)?;
let content_disposition = field.content_disposition().ok_or_else(|| { let content_disposition = field.content_disposition().clone();
CreateError::MissingValueError("Missing content disposition".to_string())
})?;
let name = content_disposition let name = content_disposition
.get_name() .get_name()
.ok_or_else(|| CreateError::MissingValueError("Missing content name".to_string()))?; .ok_or_else(|| CreateError::MissingValueError("Missing content name".to_string()))?;

View File

@ -105,7 +105,7 @@ pub async fn download_version(
transaction.commit().await?; transaction.commit().await?;
Ok(HttpResponse::TemporaryRedirect() Ok(HttpResponse::TemporaryRedirect()
.header("Location", &*id.url) .append_header(("Location", &*id.url))
.json(DownloadRedirect { url: id.url })) .json(DownloadRedirect { url: id.url }))
} else { } else {
Ok(HttpResponse::NotFound().body("")) Ok(HttpResponse::NotFound().body(""))
@ -128,10 +128,10 @@ async fn download_version_inner(
if let Some(header) = req.headers().get("CF-Connecting-IP") { if let Some(header) = req.headers().get("CF-Connecting-IP") {
header.to_str().ok() header.to_str().ok()
} else { } else {
real_ip.borrow().remote_addr() real_ip.borrow().peer_addr()
} }
} else { } else {
real_ip.borrow().remote_addr() real_ip.borrow().peer_addr()
}; };
if let Some(ip) = ip_option { if let Some(ip) = ip_option {

View File

@ -18,8 +18,10 @@ impl Scheduler {
F: FnMut() -> R + Send + 'static, F: FnMut() -> R + Send + 'static,
R: std::future::Future<Output = ()> + Send + 'static, R: std::future::Future<Output = ()> + Send + 'static,
{ {
let future = time::interval(interval).for_each_concurrent(2, move |_| task()); let future =
self.arbiter.send(future); IntervalStream::new(time::interval(interval)).for_each_concurrent(2, move |_| task());
self.arbiter.spawn(future);
} }
} }
@ -72,6 +74,7 @@ pub enum VersionIndexingError {
use crate::util::env::parse_var; use crate::util::env::parse_var;
use serde::Deserialize; use serde::Deserialize;
use tokio_stream::wrappers::IntervalStream;
#[derive(Deserialize)] #[derive(Deserialize)]
struct InputFormat<'a> { struct InputFormat<'a> {

View File

@ -3,7 +3,7 @@ use crate::database::models;
use crate::database::models::project_item::QueryProject; use crate::database::models::project_item::QueryProject;
use crate::models::users::{Role, User, UserId}; use crate::models::users::{Role, User, UserId};
use crate::routes::ApiError; use crate::routes::ApiError;
use actix_web::http::HeaderMap; use actix_web::http::header::HeaderMap;
use actix_web::web; use actix_web::web;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use sqlx::PgPool; use sqlx::PgPool;

View File

@ -16,7 +16,7 @@ pub fn project_file_type(ext: &str) -> Option<&str> {
match ext { match ext {
"jar" => Some("application/java-archive"), "jar" => Some("application/java-archive"),
"zip" => Some("application/zip"), "zip" => Some("application/zip"),
"mrpack" => Some("application/zip"), "mrpack" => Some("application/x-modrinth-modpack+zip"),
_ => None, _ => None,
} }
} }

View File

@ -22,7 +22,7 @@ pub enum ValidationError {
#[error("Invalid Input: {0}")] #[error("Invalid Input: {0}")]
InvalidInputError(std::borrow::Cow<'static, str>), InvalidInputError(std::borrow::Cow<'static, str>),
#[error("Error while managing threads")] #[error("Error while managing threads")]
BlockingError, BlockingError(#[from] actix_web::error::BlockingError),
} }
#[derive(Eq, PartialEq)] #[derive(Eq, PartialEq)]
@ -67,7 +67,7 @@ pub async fn validate_file(
game_versions: Vec<GameVersion>, game_versions: Vec<GameVersion>,
all_game_versions: Vec<crate::database::models::categories::GameVersion>, all_game_versions: Vec<crate::database::models::categories::GameVersion>,
) -> Result<ValidationResult, ValidationError> { ) -> Result<ValidationResult, ValidationError> {
let res = actix_web::web::block(move || { actix_web::web::block(move || {
let reader = std::io::Cursor::new(data); let reader = std::io::Cursor::new(data);
let mut zip = zip::ZipArchive::new(reader)?; let mut zip = zip::ZipArchive::new(reader)?;
@ -103,15 +103,7 @@ pub async fn validate_file(
Ok(ValidationResult::Pass) Ok(ValidationResult::Pass)
} }
}) })
.await; .await?
match res {
Ok(x) => Ok(x),
Err(err) => match err {
actix_web::error::BlockingError::Canceled => Err(ValidationError::BlockingError),
actix_web::error::BlockingError::Error(err) => Err(err),
},
}
} }
fn game_version_supported( fn game_version_supported(

View File

@ -4,6 +4,7 @@ use crate::util::validate::validation_errors_to_string;
use crate::validate::{SupportedGameVersions, ValidationError, ValidationResult}; use crate::validate::{SupportedGameVersions, ValidationError, ValidationResult};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::io::{Cursor, Read}; use std::io::{Cursor, Read};
use std::path::Component;
use validator::Validate; use validator::Validate;
use zip::ZipArchive; use zip::ZipArchive;
@ -155,6 +156,22 @@ impl super::Validator for PackValidator {
"All pack files must provide a SHA1 hash!".into(), "All pack files must provide a SHA1 hash!".into(),
)); ));
} }
let path = std::path::Path::new(file.path)
.components()
.next()
.ok_or_else(|| {
ValidationError::InvalidInputError("Invalid pack file path!".into())
})?;
match path {
Component::CurDir | Component::Normal(_) => {}
_ => {
return Err(ValidationError::InvalidInputError(
"Invalid pack file path!".into(),
))
}
};
} }
Ok(ValidationResult::Pass) Ok(ValidationResult::Pass)