Allow multiple labrinth instances (#3360)
* Move a lot of scheduled tasks to be runnable from the command-line * Use pubsub to handle sockets connected to multiple Labrinths * Clippy fix * Fix build and merge some stuff * Fix build fmt : --------- Signed-off-by: Jai Agrawal <18202329+Geometrically@users.noreply.github.com> Co-authored-by: Jai A <jaiagr+gpg@pm.me> Co-authored-by: Jai Agrawal <18202329+Geometrically@users.noreply.github.com>
This commit is contained in:
parent
84a9438a70
commit
c998d2566e
1
Cargo.lock
generated
1
Cargo.lock
generated
@ -4522,6 +4522,7 @@ dependencies = [
|
|||||||
"bytes 1.7.2",
|
"bytes 1.7.2",
|
||||||
"censor",
|
"censor",
|
||||||
"chrono",
|
"chrono",
|
||||||
|
"clap",
|
||||||
"clickhouse",
|
"clickhouse",
|
||||||
"color-thief",
|
"color-thief",
|
||||||
"console-subscriber",
|
"console-subscriber",
|
||||||
|
|||||||
14
apps/labrinth/.sqlx/query-41ec8301348dc912d0e5a16def1179cc9c02b1c0364319e76454dff713abdd45.json
generated
Normal file
14
apps/labrinth/.sqlx/query-41ec8301348dc912d0e5a16def1179cc9c02b1c0364319e76454dff713abdd45.json
generated
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
{
|
||||||
|
"db_name": "PostgreSQL",
|
||||||
|
"query": "\n UPDATE versions\n SET status = requested_status\n WHERE status = $1 AND date_published < CURRENT_DATE AND requested_status IS NOT NULL\n ",
|
||||||
|
"describe": {
|
||||||
|
"columns": [],
|
||||||
|
"parameters": {
|
||||||
|
"Left": [
|
||||||
|
"Text"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"nullable": []
|
||||||
|
},
|
||||||
|
"hash": "41ec8301348dc912d0e5a16def1179cc9c02b1c0364319e76454dff713abdd45"
|
||||||
|
}
|
||||||
14
apps/labrinth/.sqlx/query-4ce906f3bec42a2d4b9ed8b8481bd168aaa2f791305f30adbf3b002ba39da7fa.json
generated
Normal file
14
apps/labrinth/.sqlx/query-4ce906f3bec42a2d4b9ed8b8481bd168aaa2f791305f30adbf3b002ba39da7fa.json
generated
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
{
|
||||||
|
"db_name": "PostgreSQL",
|
||||||
|
"query": "\n UPDATE mods\n SET status = requested_status\n WHERE status = $1 AND approved < CURRENT_DATE AND requested_status IS NOT NULL\n ",
|
||||||
|
"describe": {
|
||||||
|
"columns": [],
|
||||||
|
"parameters": {
|
||||||
|
"Left": [
|
||||||
|
"Text"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"nullable": []
|
||||||
|
},
|
||||||
|
"hash": "4ce906f3bec42a2d4b9ed8b8481bd168aaa2f791305f30adbf3b002ba39da7fa"
|
||||||
|
}
|
||||||
@ -1,14 +0,0 @@
|
|||||||
{
|
|
||||||
"db_name": "PostgreSQL",
|
|
||||||
"query": "\n UPDATE mods\n SET status = requested_status\n WHERE status = $1 AND approved < CURRENT_DATE AND requested_status IS NOT NULL\n ",
|
|
||||||
"describe": {
|
|
||||||
"columns": [],
|
|
||||||
"parameters": {
|
|
||||||
"Left": [
|
|
||||||
"Text"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"nullable": []
|
|
||||||
},
|
|
||||||
"hash": "b971cecafab7046c5952447fd78a6e45856841256d812ce9ae3c07f903c5cc62"
|
|
||||||
}
|
|
||||||
@ -1,15 +0,0 @@
|
|||||||
{
|
|
||||||
"db_name": "PostgreSQL",
|
|
||||||
"query": "\n UPDATE users\n SET badges = $1\n WHERE (id = $2)\n ",
|
|
||||||
"describe": {
|
|
||||||
"columns": [],
|
|
||||||
"parameters": {
|
|
||||||
"Left": [
|
|
||||||
"Int8",
|
|
||||||
"Int8"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"nullable": []
|
|
||||||
},
|
|
||||||
"hash": "bd26a27ce80ca796ae19bc709c92800a0a43dfef4a37a5725403d33ccb20d908"
|
|
||||||
}
|
|
||||||
@ -1,14 +0,0 @@
|
|||||||
{
|
|
||||||
"db_name": "PostgreSQL",
|
|
||||||
"query": "\n UPDATE versions\n SET status = requested_status\n WHERE status = $1 AND date_published < CURRENT_DATE AND requested_status IS NOT NULL\n ",
|
|
||||||
"describe": {
|
|
||||||
"columns": [],
|
|
||||||
"parameters": {
|
|
||||||
"Left": [
|
|
||||||
"Text"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"nullable": []
|
|
||||||
},
|
|
||||||
"hash": "c8a27a122160a0896914c786deef9e8193eb240501d30d5ffb4129e2103efd3d"
|
|
||||||
}
|
|
||||||
15
apps/labrinth/.sqlx/query-f2525e9be3b90fc0c42c8333ca795ff0b6eb1d3c4350d8e025d39d927d4547fc.json
generated
Normal file
15
apps/labrinth/.sqlx/query-f2525e9be3b90fc0c42c8333ca795ff0b6eb1d3c4350d8e025d39d927d4547fc.json
generated
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
{
|
||||||
|
"db_name": "PostgreSQL",
|
||||||
|
"query": "\n UPDATE users\n SET badges = $1\n WHERE (id = $2)\n ",
|
||||||
|
"describe": {
|
||||||
|
"columns": [],
|
||||||
|
"parameters": {
|
||||||
|
"Left": [
|
||||||
|
"Int8",
|
||||||
|
"Int8"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"nullable": []
|
||||||
|
},
|
||||||
|
"hash": "f2525e9be3b90fc0c42c8333ca795ff0b6eb1d3c4350d8e025d39d927d4547fc"
|
||||||
|
}
|
||||||
@ -131,6 +131,8 @@ json-patch = "*"
|
|||||||
|
|
||||||
ariadne = { path = "../../packages/ariadne" }
|
ariadne = { path = "../../packages/ariadne" }
|
||||||
|
|
||||||
|
clap = { version = "4.5", features = ["derive"] }
|
||||||
|
|
||||||
[target.'cfg(not(target_env = "msvc"))'.dependencies]
|
[target.'cfg(not(target_env = "msvc"))'.dependencies]
|
||||||
tikv-jemallocator = { version = "0.6.0", features = ["profiling", "unprefixed_malloc_on_supported_platforms"] }
|
tikv-jemallocator = { version = "0.6.0", features = ["profiling", "unprefixed_malloc_on_supported_platforms"] }
|
||||||
tikv-jemalloc-ctl = { version = "0.6.0", features = ["stats"] }
|
tikv-jemalloc-ctl = { version = "0.6.0", features = ["stats"] }
|
||||||
|
|||||||
278
apps/labrinth/src/background_task.rs
Normal file
278
apps/labrinth/src/background_task.rs
Normal file
@ -0,0 +1,278 @@
|
|||||||
|
use crate::database::redis::RedisPool;
|
||||||
|
use crate::queue::payouts::process_payout;
|
||||||
|
use crate::search;
|
||||||
|
use crate::search::indexing::index_projects;
|
||||||
|
use clap::ValueEnum;
|
||||||
|
use sqlx::Postgres;
|
||||||
|
use tracing::{info, warn};
|
||||||
|
|
||||||
|
#[derive(ValueEnum, Debug, Copy, Clone, PartialEq, Eq)]
|
||||||
|
#[clap(rename_all = "kebab_case")]
|
||||||
|
pub enum BackgroundTask {
|
||||||
|
IndexSearch,
|
||||||
|
ReleaseScheduled,
|
||||||
|
UpdateVersions,
|
||||||
|
Payouts,
|
||||||
|
IndexBilling,
|
||||||
|
IndexSubscriptions,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BackgroundTask {
|
||||||
|
pub async fn run(
|
||||||
|
self,
|
||||||
|
pool: sqlx::Pool<Postgres>,
|
||||||
|
redis_pool: RedisPool,
|
||||||
|
search_config: search::SearchConfig,
|
||||||
|
clickhouse: clickhouse::Client,
|
||||||
|
stripe_client: stripe::Client,
|
||||||
|
) {
|
||||||
|
use BackgroundTask::*;
|
||||||
|
match self {
|
||||||
|
IndexSearch => index_search(pool, redis_pool, search_config).await,
|
||||||
|
ReleaseScheduled => release_scheduled(pool).await,
|
||||||
|
UpdateVersions => update_versions(pool, redis_pool).await,
|
||||||
|
Payouts => payouts(pool, clickhouse).await,
|
||||||
|
IndexBilling => {
|
||||||
|
crate::routes::internal::billing::index_billing(
|
||||||
|
stripe_client,
|
||||||
|
pool,
|
||||||
|
redis_pool,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
IndexSubscriptions => {
|
||||||
|
crate::routes::internal::billing::index_subscriptions(
|
||||||
|
pool, redis_pool,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn index_search(
|
||||||
|
pool: sqlx::Pool<Postgres>,
|
||||||
|
redis_pool: RedisPool,
|
||||||
|
search_config: search::SearchConfig,
|
||||||
|
) {
|
||||||
|
info!("Indexing local database");
|
||||||
|
let result = index_projects(pool, redis_pool, &search_config).await;
|
||||||
|
if let Err(e) = result {
|
||||||
|
warn!("Local project indexing failed: {:?}", e);
|
||||||
|
}
|
||||||
|
info!("Done indexing local database");
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn release_scheduled(pool: sqlx::Pool<Postgres>) {
|
||||||
|
info!("Releasing scheduled versions/projects!");
|
||||||
|
|
||||||
|
let projects_results = sqlx::query!(
|
||||||
|
"
|
||||||
|
UPDATE mods
|
||||||
|
SET status = requested_status
|
||||||
|
WHERE status = $1 AND approved < CURRENT_DATE AND requested_status IS NOT NULL
|
||||||
|
",
|
||||||
|
crate::models::projects::ProjectStatus::Scheduled.as_str(),
|
||||||
|
)
|
||||||
|
.execute(&pool)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
if let Err(e) = projects_results {
|
||||||
|
warn!("Syncing scheduled releases for projects failed: {:?}", e);
|
||||||
|
}
|
||||||
|
|
||||||
|
let versions_results = sqlx::query!(
|
||||||
|
"
|
||||||
|
UPDATE versions
|
||||||
|
SET status = requested_status
|
||||||
|
WHERE status = $1 AND date_published < CURRENT_DATE AND requested_status IS NOT NULL
|
||||||
|
",
|
||||||
|
crate::models::projects::VersionStatus::Scheduled.as_str(),
|
||||||
|
)
|
||||||
|
.execute(&pool)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
if let Err(e) = versions_results {
|
||||||
|
warn!("Syncing scheduled releases for versions failed: {:?}", e);
|
||||||
|
}
|
||||||
|
|
||||||
|
info!("Finished releasing scheduled versions/projects");
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn update_versions(
|
||||||
|
pool: sqlx::Pool<Postgres>,
|
||||||
|
redis_pool: RedisPool,
|
||||||
|
) {
|
||||||
|
info!("Indexing game versions list from Mojang");
|
||||||
|
let result = version_updater::update_versions(&pool, &redis_pool).await;
|
||||||
|
if let Err(e) = result {
|
||||||
|
warn!("Version update failed: {}", e);
|
||||||
|
}
|
||||||
|
info!("Done indexing game versions");
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn payouts(
|
||||||
|
pool: sqlx::Pool<Postgres>,
|
||||||
|
clickhouse: clickhouse::Client,
|
||||||
|
) {
|
||||||
|
info!("Started running payouts");
|
||||||
|
let result = process_payout(&pool, &clickhouse).await;
|
||||||
|
if let Err(e) = result {
|
||||||
|
warn!("Payouts run failed: {:?}", e);
|
||||||
|
}
|
||||||
|
info!("Done running payouts");
|
||||||
|
}
|
||||||
|
|
||||||
|
mod version_updater {
|
||||||
|
use crate::database::models::legacy_loader_fields::MinecraftGameVersion;
|
||||||
|
use crate::database::redis::RedisPool;
|
||||||
|
use chrono::{DateTime, Utc};
|
||||||
|
use serde::Deserialize;
|
||||||
|
use sqlx::Postgres;
|
||||||
|
use thiserror::Error;
|
||||||
|
use tracing::warn;
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct InputFormat<'a> {
|
||||||
|
// latest: LatestFormat,
|
||||||
|
versions: Vec<VersionFormat<'a>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct VersionFormat<'a> {
|
||||||
|
id: String,
|
||||||
|
#[serde(rename = "type")]
|
||||||
|
type_: std::borrow::Cow<'a, str>,
|
||||||
|
#[serde(rename = "releaseTime")]
|
||||||
|
release_time: DateTime<Utc>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Error, Debug)]
|
||||||
|
pub enum VersionIndexingError {
|
||||||
|
#[error("Network error while updating game versions list: {0}")]
|
||||||
|
NetworkError(#[from] reqwest::Error),
|
||||||
|
#[error("Database error while updating game versions list: {0}")]
|
||||||
|
DatabaseError(#[from] crate::database::models::DatabaseError),
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn update_versions(
|
||||||
|
pool: &sqlx::Pool<Postgres>,
|
||||||
|
redis: &RedisPool,
|
||||||
|
) -> Result<(), VersionIndexingError> {
|
||||||
|
let input = reqwest::get(
|
||||||
|
"https://piston-meta.mojang.com/mc/game/version_manifest_v2.json",
|
||||||
|
)
|
||||||
|
.await?
|
||||||
|
.json::<InputFormat>()
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let mut skipped_versions_count = 0u32;
|
||||||
|
|
||||||
|
// A list of version names that contains spaces.
|
||||||
|
// Generated using the command
|
||||||
|
// ```sh
|
||||||
|
// curl https://launchermeta.mojang.com/mc/game/version_manifest.json \
|
||||||
|
// | jq '[.versions[].id | select(contains(" "))]'
|
||||||
|
// ```
|
||||||
|
const HALL_OF_SHAME: [(&str, &str); 12] = [
|
||||||
|
("1.14.2 Pre-Release 4", "1.14.2-pre4"),
|
||||||
|
("1.14.2 Pre-Release 3", "1.14.2-pre3"),
|
||||||
|
("1.14.2 Pre-Release 2", "1.14.2-pre2"),
|
||||||
|
("1.14.2 Pre-Release 1", "1.14.2-pre1"),
|
||||||
|
("1.14.1 Pre-Release 2", "1.14.1-pre2"),
|
||||||
|
("1.14.1 Pre-Release 1", "1.14.1-pre1"),
|
||||||
|
("1.14 Pre-Release 5", "1.14-pre5"),
|
||||||
|
("1.14 Pre-Release 4", "1.14-pre4"),
|
||||||
|
("1.14 Pre-Release 3", "1.14-pre3"),
|
||||||
|
("1.14 Pre-Release 2", "1.14-pre2"),
|
||||||
|
("1.14 Pre-Release 1", "1.14-pre1"),
|
||||||
|
("3D Shareware v1.34", "3D-Shareware-v1.34"),
|
||||||
|
];
|
||||||
|
|
||||||
|
lazy_static::lazy_static! {
|
||||||
|
/// Mojank for some reason has versions released at the same DateTime. This hardcodes them to fix this,
|
||||||
|
/// as most of our ordering logic is with DateTime
|
||||||
|
static ref HALL_OF_SHAME_2: [(&'static str, DateTime<Utc>); 4] = [
|
||||||
|
(
|
||||||
|
"1.4.5",
|
||||||
|
chrono::DateTime::parse_from_rfc3339("2012-12-19T22:00:00+00:00")
|
||||||
|
.unwrap()
|
||||||
|
.into(),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"1.4.6",
|
||||||
|
chrono::DateTime::parse_from_rfc3339("2012-12-19T22:00:01+00:00")
|
||||||
|
.unwrap()
|
||||||
|
.into(),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"1.6.3",
|
||||||
|
chrono::DateTime::parse_from_rfc3339("2013-09-13T10:54:41+00:00")
|
||||||
|
.unwrap()
|
||||||
|
.into(),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"13w37b",
|
||||||
|
chrono::DateTime::parse_from_rfc3339("2013-09-13T10:54:42+00:00")
|
||||||
|
.unwrap()
|
||||||
|
.into(),
|
||||||
|
),
|
||||||
|
];
|
||||||
|
}
|
||||||
|
|
||||||
|
for version in input.versions.into_iter() {
|
||||||
|
let mut name = version.id;
|
||||||
|
if !name
|
||||||
|
.chars()
|
||||||
|
.all(|c| c.is_ascii_alphanumeric() || "-_.".contains(c))
|
||||||
|
{
|
||||||
|
if let Some((_, alternate)) =
|
||||||
|
HALL_OF_SHAME.iter().find(|(version, _)| name == *version)
|
||||||
|
{
|
||||||
|
name = String::from(*alternate);
|
||||||
|
} else {
|
||||||
|
// We'll deal with these manually
|
||||||
|
skipped_versions_count += 1;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let type_ = match &*version.type_ {
|
||||||
|
"release" => "release",
|
||||||
|
"snapshot" => "snapshot",
|
||||||
|
"old_alpha" => "alpha",
|
||||||
|
"old_beta" => "beta",
|
||||||
|
_ => "other",
|
||||||
|
};
|
||||||
|
|
||||||
|
MinecraftGameVersion::builder()
|
||||||
|
.version(&name)?
|
||||||
|
.version_type(type_)?
|
||||||
|
.created(
|
||||||
|
if let Some((_, alternate)) = HALL_OF_SHAME_2
|
||||||
|
.iter()
|
||||||
|
.find(|(version, _)| name == *version)
|
||||||
|
{
|
||||||
|
alternate
|
||||||
|
} else {
|
||||||
|
&version.release_time
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.insert(pool, redis)
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
if skipped_versions_count > 0 {
|
||||||
|
// This will currently always trigger due to 1.14 pre releases
|
||||||
|
// and the shareware april fools update. We could set a threshold
|
||||||
|
// that accounts for those versions and update it whenever we
|
||||||
|
// manually fix another version.
|
||||||
|
warn!(
|
||||||
|
"Skipped {} game versions; check for new versions and add them manually",
|
||||||
|
skipped_versions_count
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -19,6 +19,7 @@ const ACTUAL_EXPIRY: i64 = 60 * 30; // 30 minutes
|
|||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct RedisPool {
|
pub struct RedisPool {
|
||||||
|
pub url: String,
|
||||||
pub pool: deadpool_redis::Pool,
|
pub pool: deadpool_redis::Pool,
|
||||||
meta_namespace: String,
|
meta_namespace: String,
|
||||||
}
|
}
|
||||||
@ -33,9 +34,8 @@ impl RedisPool {
|
|||||||
// testing pool uses a hashmap to mimic redis behaviour for very small data sizes (ie: tests)
|
// testing pool uses a hashmap to mimic redis behaviour for very small data sizes (ie: tests)
|
||||||
// PANICS: production pool will panic if redis url is not set
|
// PANICS: production pool will panic if redis url is not set
|
||||||
pub fn new(meta_namespace: Option<String>) -> Self {
|
pub fn new(meta_namespace: Option<String>) -> Self {
|
||||||
let redis_pool = Config::from_url(
|
let url = dotenvy::var("REDIS_URL").expect("Redis URL not set");
|
||||||
dotenvy::var("REDIS_URL").expect("Redis URL not set"),
|
let pool = Config::from_url(url.clone())
|
||||||
)
|
|
||||||
.builder()
|
.builder()
|
||||||
.expect("Error building Redis pool")
|
.expect("Error building Redis pool")
|
||||||
.max_size(
|
.max_size(
|
||||||
@ -49,7 +49,8 @@ impl RedisPool {
|
|||||||
.expect("Redis connection failed");
|
.expect("Redis connection failed");
|
||||||
|
|
||||||
RedisPool {
|
RedisPool {
|
||||||
pool: redis_pool,
|
url,
|
||||||
|
pool,
|
||||||
meta_namespace: meta_namespace.unwrap_or("".to_string()),
|
meta_namespace: meta_namespace.unwrap_or("".to_string()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -17,15 +17,14 @@ use governor::middleware::StateInformationMiddleware;
|
|||||||
use governor::{Quota, RateLimiter};
|
use governor::{Quota, RateLimiter};
|
||||||
use util::cors::default_cors;
|
use util::cors::default_cors;
|
||||||
|
|
||||||
|
use crate::background_task::update_versions;
|
||||||
use crate::queue::moderation::AutomatedModerationQueue;
|
use crate::queue::moderation::AutomatedModerationQueue;
|
||||||
|
use crate::util::env::{parse_strings_from_var, parse_var};
|
||||||
use crate::util::ratelimit::KeyedRateLimiter;
|
use crate::util::ratelimit::KeyedRateLimiter;
|
||||||
use crate::{
|
use sync::friends::handle_pubsub;
|
||||||
queue::payouts::process_payout,
|
|
||||||
search::indexing::index_projects,
|
|
||||||
util::env::{parse_strings_from_var, parse_var},
|
|
||||||
};
|
|
||||||
|
|
||||||
pub mod auth;
|
pub mod auth;
|
||||||
|
pub mod background_task;
|
||||||
pub mod clickhouse;
|
pub mod clickhouse;
|
||||||
pub mod database;
|
pub mod database;
|
||||||
pub mod file_hosting;
|
pub mod file_hosting;
|
||||||
@ -34,6 +33,7 @@ pub mod queue;
|
|||||||
pub mod routes;
|
pub mod routes;
|
||||||
pub mod scheduler;
|
pub mod scheduler;
|
||||||
pub mod search;
|
pub mod search;
|
||||||
|
pub mod sync;
|
||||||
pub mod util;
|
pub mod util;
|
||||||
pub mod validate;
|
pub mod validate;
|
||||||
|
|
||||||
@ -61,6 +61,7 @@ pub struct LabrinthConfig {
|
|||||||
pub stripe_client: stripe::Client,
|
pub stripe_client: stripe::Client,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn app_setup(
|
pub fn app_setup(
|
||||||
pool: sqlx::Pool<Postgres>,
|
pool: sqlx::Pool<Postgres>,
|
||||||
redis_pool: RedisPool,
|
redis_pool: RedisPool,
|
||||||
@ -68,6 +69,8 @@ pub fn app_setup(
|
|||||||
clickhouse: &mut Client,
|
clickhouse: &mut Client,
|
||||||
file_host: Arc<dyn file_hosting::FileHost + Send + Sync>,
|
file_host: Arc<dyn file_hosting::FileHost + Send + Sync>,
|
||||||
maxmind: Arc<queue::maxmind::MaxMindIndexer>,
|
maxmind: Arc<queue::maxmind::MaxMindIndexer>,
|
||||||
|
stripe_client: stripe::Client,
|
||||||
|
enable_background_tasks: bool,
|
||||||
) -> LabrinthConfig {
|
) -> LabrinthConfig {
|
||||||
info!(
|
info!(
|
||||||
"Starting Labrinth on {}",
|
"Starting Labrinth on {}",
|
||||||
@ -109,12 +112,12 @@ pub fn app_setup(
|
|||||||
async move {}
|
async move {}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
if enable_background_tasks {
|
||||||
// The interval in seconds at which the local database is indexed
|
// The interval in seconds at which the local database is indexed
|
||||||
// for searching. Defaults to 1 hour if unset.
|
// for searching. Defaults to 1 hour if unset.
|
||||||
let local_index_interval = std::time::Duration::from_secs(
|
let local_index_interval = Duration::from_secs(
|
||||||
parse_var("LOCAL_INDEX_INTERVAL").unwrap_or(3600),
|
parse_var("LOCAL_INDEX_INTERVAL").unwrap_or(3600),
|
||||||
);
|
);
|
||||||
|
|
||||||
let pool_ref = pool.clone();
|
let pool_ref = pool.clone();
|
||||||
let search_config_ref = search_config.clone();
|
let search_config_ref = search_config.clone();
|
||||||
let redis_pool_ref = redis_pool.clone();
|
let redis_pool_ref = redis_pool.clone();
|
||||||
@ -123,74 +126,83 @@ pub fn app_setup(
|
|||||||
let redis_pool_ref = redis_pool_ref.clone();
|
let redis_pool_ref = redis_pool_ref.clone();
|
||||||
let search_config_ref = search_config_ref.clone();
|
let search_config_ref = search_config_ref.clone();
|
||||||
async move {
|
async move {
|
||||||
info!("Indexing local database");
|
background_task::index_search(
|
||||||
let result = index_projects(
|
|
||||||
pool_ref,
|
pool_ref,
|
||||||
redis_pool_ref.clone(),
|
redis_pool_ref,
|
||||||
&search_config_ref,
|
search_config_ref,
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
if let Err(e) = result {
|
|
||||||
warn!("Local project indexing failed: {:?}", e);
|
|
||||||
}
|
|
||||||
info!("Done indexing local database");
|
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
// Changes statuses of scheduled projects/versions
|
// Changes statuses of scheduled projects/versions
|
||||||
let pool_ref = pool.clone();
|
let pool_ref = pool.clone();
|
||||||
// TODO: Clear cache when these are run
|
// TODO: Clear cache when these are run
|
||||||
scheduler.run(std::time::Duration::from_secs(60 * 5), move || {
|
scheduler.run(Duration::from_secs(60 * 5), move || {
|
||||||
let pool_ref = pool_ref.clone();
|
let pool_ref = pool_ref.clone();
|
||||||
info!("Releasing scheduled versions/projects!");
|
|
||||||
|
|
||||||
async move {
|
async move {
|
||||||
let projects_results = sqlx::query!(
|
background_task::release_scheduled(pool_ref).await;
|
||||||
"
|
|
||||||
UPDATE mods
|
|
||||||
SET status = requested_status
|
|
||||||
WHERE status = $1 AND approved < CURRENT_DATE AND requested_status IS NOT NULL
|
|
||||||
",
|
|
||||||
crate::models::projects::ProjectStatus::Scheduled.as_str(),
|
|
||||||
)
|
|
||||||
.execute(&pool_ref)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
if let Err(e) = projects_results {
|
|
||||||
warn!("Syncing scheduled releases for projects failed: {:?}", e);
|
|
||||||
}
|
|
||||||
|
|
||||||
let versions_results = sqlx::query!(
|
|
||||||
"
|
|
||||||
UPDATE versions
|
|
||||||
SET status = requested_status
|
|
||||||
WHERE status = $1 AND date_published < CURRENT_DATE AND requested_status IS NOT NULL
|
|
||||||
",
|
|
||||||
crate::models::projects::VersionStatus::Scheduled.as_str(),
|
|
||||||
)
|
|
||||||
.execute(&pool_ref)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
if let Err(e) = versions_results {
|
|
||||||
warn!("Syncing scheduled releases for versions failed: {:?}", e);
|
|
||||||
}
|
|
||||||
|
|
||||||
info!("Finished releasing scheduled versions/projects");
|
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
scheduler::schedule_versions(
|
let version_index_interval = Duration::from_secs(
|
||||||
&mut scheduler,
|
parse_var("VERSION_INDEX_INTERVAL").unwrap_or(1800),
|
||||||
pool.clone(),
|
|
||||||
redis_pool.clone(),
|
|
||||||
);
|
);
|
||||||
|
let pool_ref = pool.clone();
|
||||||
|
let redis_pool_ref = redis_pool.clone();
|
||||||
|
scheduler.run(version_index_interval, move || {
|
||||||
|
let pool_ref = pool_ref.clone();
|
||||||
|
let redis = redis_pool_ref.clone();
|
||||||
|
async move {
|
||||||
|
update_versions(pool_ref, redis).await;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let pool_ref = pool.clone();
|
||||||
|
let client_ref = clickhouse.clone();
|
||||||
|
scheduler.run(Duration::from_secs(60 * 60 * 6), move || {
|
||||||
|
let pool_ref = pool_ref.clone();
|
||||||
|
let client_ref = client_ref.clone();
|
||||||
|
async move {
|
||||||
|
background_task::payouts(pool_ref, client_ref).await;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let pool_ref = pool.clone();
|
||||||
|
let redis_ref = redis_pool.clone();
|
||||||
|
let stripe_client_ref = stripe_client.clone();
|
||||||
|
actix_rt::spawn(async move {
|
||||||
|
loop {
|
||||||
|
routes::internal::billing::index_billing(
|
||||||
|
stripe_client_ref.clone(),
|
||||||
|
pool_ref.clone(),
|
||||||
|
redis_ref.clone(),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
tokio::time::sleep(Duration::from_secs(60 * 5)).await;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let pool_ref = pool.clone();
|
||||||
|
let redis_ref = redis_pool.clone();
|
||||||
|
actix_rt::spawn(async move {
|
||||||
|
loop {
|
||||||
|
routes::internal::billing::index_subscriptions(
|
||||||
|
pool_ref.clone(),
|
||||||
|
redis_ref.clone(),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
tokio::time::sleep(Duration::from_secs(60 * 5)).await;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
let session_queue = web::Data::new(AuthQueue::new());
|
let session_queue = web::Data::new(AuthQueue::new());
|
||||||
|
|
||||||
let pool_ref = pool.clone();
|
let pool_ref = pool.clone();
|
||||||
let redis_ref = redis_pool.clone();
|
let redis_ref = redis_pool.clone();
|
||||||
let session_queue_ref = session_queue.clone();
|
let session_queue_ref = session_queue.clone();
|
||||||
scheduler.run(std::time::Duration::from_secs(60 * 30), move || {
|
scheduler.run(Duration::from_secs(60 * 30), move || {
|
||||||
let pool_ref = pool_ref.clone();
|
let pool_ref = pool_ref.clone();
|
||||||
let redis_ref = redis_ref.clone();
|
let redis_ref = redis_ref.clone();
|
||||||
let session_queue_ref = session_queue_ref.clone();
|
let session_queue_ref = session_queue_ref.clone();
|
||||||
@ -208,7 +220,7 @@ pub fn app_setup(
|
|||||||
let reader = maxmind.clone();
|
let reader = maxmind.clone();
|
||||||
{
|
{
|
||||||
let reader_ref = reader;
|
let reader_ref = reader;
|
||||||
scheduler.run(std::time::Duration::from_secs(60 * 60 * 24), move || {
|
scheduler.run(Duration::from_secs(60 * 60 * 24), move || {
|
||||||
let reader_ref = reader_ref.clone();
|
let reader_ref = reader_ref.clone();
|
||||||
|
|
||||||
async move {
|
async move {
|
||||||
@ -232,7 +244,7 @@ pub fn app_setup(
|
|||||||
let analytics_queue_ref = analytics_queue.clone();
|
let analytics_queue_ref = analytics_queue.clone();
|
||||||
let pool_ref = pool.clone();
|
let pool_ref = pool.clone();
|
||||||
let redis_ref = redis_pool.clone();
|
let redis_ref = redis_pool.clone();
|
||||||
scheduler.run(std::time::Duration::from_secs(15), move || {
|
scheduler.run(Duration::from_secs(15), move || {
|
||||||
let client_ref = client_ref.clone();
|
let client_ref = client_ref.clone();
|
||||||
let analytics_queue_ref = analytics_queue_ref.clone();
|
let analytics_queue_ref = analytics_queue_ref.clone();
|
||||||
let pool_ref = pool_ref.clone();
|
let pool_ref = pool_ref.clone();
|
||||||
@ -251,51 +263,6 @@ pub fn app_setup(
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
|
||||||
let pool_ref = pool.clone();
|
|
||||||
let client_ref = clickhouse.clone();
|
|
||||||
scheduler.run(std::time::Duration::from_secs(60 * 60 * 6), move || {
|
|
||||||
let pool_ref = pool_ref.clone();
|
|
||||||
let client_ref = client_ref.clone();
|
|
||||||
|
|
||||||
async move {
|
|
||||||
info!("Started running payouts");
|
|
||||||
let result = process_payout(&pool_ref, &client_ref).await;
|
|
||||||
if let Err(e) = result {
|
|
||||||
warn!("Payouts run failed: {:?}", e);
|
|
||||||
}
|
|
||||||
info!("Done running payouts");
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
let stripe_client =
|
|
||||||
stripe::Client::new(dotenvy::var("STRIPE_API_KEY").unwrap());
|
|
||||||
{
|
|
||||||
let pool_ref = pool.clone();
|
|
||||||
let redis_ref = redis_pool.clone();
|
|
||||||
let stripe_client_ref = stripe_client.clone();
|
|
||||||
|
|
||||||
actix_rt::spawn(async move {
|
|
||||||
routes::internal::billing::task(
|
|
||||||
stripe_client_ref,
|
|
||||||
pool_ref,
|
|
||||||
redis_ref,
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
let pool_ref = pool.clone();
|
|
||||||
let redis_ref = redis_pool.clone();
|
|
||||||
|
|
||||||
actix_rt::spawn(async move {
|
|
||||||
routes::internal::billing::subscription_task(pool_ref, redis_ref)
|
|
||||||
.await;
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
let ip_salt = Pepper {
|
let ip_salt = Pepper {
|
||||||
pepper: ariadne::ids::Base62Id(ariadne::ids::random_base62(11))
|
pepper: ariadne::ids::Base62Id(ariadne::ids::random_base62(11))
|
||||||
.to_string(),
|
.to_string(),
|
||||||
@ -304,6 +271,16 @@ pub fn app_setup(
|
|||||||
let payouts_queue = web::Data::new(PayoutsQueue::new());
|
let payouts_queue = web::Data::new(PayoutsQueue::new());
|
||||||
let active_sockets = web::Data::new(ActiveSockets::default());
|
let active_sockets = web::Data::new(ActiveSockets::default());
|
||||||
|
|
||||||
|
{
|
||||||
|
let pool = pool.clone();
|
||||||
|
let redis_client = redis::Client::open(redis_pool.url.clone()).unwrap();
|
||||||
|
let sockets = active_sockets.clone();
|
||||||
|
actix_rt::spawn(async move {
|
||||||
|
let pubsub = redis_client.get_async_pubsub().await.unwrap();
|
||||||
|
handle_pubsub(pubsub, pool, sockets).await;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
LabrinthConfig {
|
LabrinthConfig {
|
||||||
pool,
|
pool,
|
||||||
redis_pool,
|
redis_pool,
|
||||||
|
|||||||
@ -1,5 +1,7 @@
|
|||||||
use actix_web::{App, HttpServer};
|
use actix_web::{App, HttpServer};
|
||||||
use actix_web_prom::PrometheusMetricsBuilder;
|
use actix_web_prom::PrometheusMetricsBuilder;
|
||||||
|
use clap::Parser;
|
||||||
|
use labrinth::background_task::BackgroundTask;
|
||||||
use labrinth::database::redis::RedisPool;
|
use labrinth::database::redis::RedisPool;
|
||||||
use labrinth::file_hosting::S3Host;
|
use labrinth::file_hosting::S3Host;
|
||||||
use labrinth::search;
|
use labrinth::search;
|
||||||
@ -23,8 +25,23 @@ pub struct Pepper {
|
|||||||
pub pepper: String,
|
pub pepper: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Parser)]
|
||||||
|
#[command(version)]
|
||||||
|
struct Args {
|
||||||
|
/// Don't run regularly scheduled background tasks. This means the tasks should be run
|
||||||
|
/// manually with --run-background-task.
|
||||||
|
#[arg(long)]
|
||||||
|
no_background_tasks: bool,
|
||||||
|
|
||||||
|
/// Run a single background task and then exit. Perfect for cron jobs.
|
||||||
|
#[arg(long, value_enum, id = "task")]
|
||||||
|
run_background_task: Option<BackgroundTask>,
|
||||||
|
}
|
||||||
|
|
||||||
#[actix_rt::main]
|
#[actix_rt::main]
|
||||||
async fn main() -> std::io::Result<()> {
|
async fn main() -> std::io::Result<()> {
|
||||||
|
let args = Args::parse();
|
||||||
|
|
||||||
dotenvy::dotenv().ok();
|
dotenvy::dotenv().ok();
|
||||||
console_subscriber::init();
|
console_subscriber::init();
|
||||||
|
|
||||||
@ -44,10 +61,12 @@ async fn main() -> std::io::Result<()> {
|
|||||||
std::env::set_var("RUST_BACKTRACE", "1");
|
std::env::set_var("RUST_BACKTRACE", "1");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if args.run_background_task.is_none() {
|
||||||
info!(
|
info!(
|
||||||
"Starting Labrinth on {}",
|
"Starting Labrinth on {}",
|
||||||
dotenvy::var("BIND_ADDR").unwrap()
|
dotenvy::var("BIND_ADDR").unwrap()
|
||||||
);
|
);
|
||||||
|
}
|
||||||
|
|
||||||
database::check_for_migrations()
|
database::check_for_migrations()
|
||||||
.await
|
.await
|
||||||
@ -91,6 +110,18 @@ async fn main() -> std::io::Result<()> {
|
|||||||
info!("Initializing clickhouse connection");
|
info!("Initializing clickhouse connection");
|
||||||
let mut clickhouse = clickhouse::init_client().await.unwrap();
|
let mut clickhouse = clickhouse::init_client().await.unwrap();
|
||||||
|
|
||||||
|
let search_config = search::SearchConfig::new(None);
|
||||||
|
|
||||||
|
let stripe_client =
|
||||||
|
stripe::Client::new(dotenvy::var("STRIPE_API_KEY").unwrap());
|
||||||
|
|
||||||
|
if let Some(task) = args.run_background_task {
|
||||||
|
info!("Running task {task:?} and exiting");
|
||||||
|
task.run(pool, redis_pool, search_config, clickhouse, stripe_client)
|
||||||
|
.await;
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
let maxmind_reader =
|
let maxmind_reader =
|
||||||
Arc::new(queue::maxmind::MaxMindIndexer::new().await.unwrap());
|
Arc::new(queue::maxmind::MaxMindIndexer::new().await.unwrap());
|
||||||
|
|
||||||
@ -115,8 +146,6 @@ async fn main() -> std::io::Result<()> {
|
|||||||
labrinth::routes::debug::jemalloc_mmeory_stats(&prometheus.registry)
|
labrinth::routes::debug::jemalloc_mmeory_stats(&prometheus.registry)
|
||||||
.expect("Failed to register jemalloc metrics");
|
.expect("Failed to register jemalloc metrics");
|
||||||
|
|
||||||
let search_config = search::SearchConfig::new(None);
|
|
||||||
|
|
||||||
let labrinth_config = labrinth::app_setup(
|
let labrinth_config = labrinth::app_setup(
|
||||||
pool.clone(),
|
pool.clone(),
|
||||||
redis_pool.clone(),
|
redis_pool.clone(),
|
||||||
@ -124,6 +153,8 @@ async fn main() -> std::io::Result<()> {
|
|||||||
&mut clickhouse,
|
&mut clickhouse,
|
||||||
file_host.clone(),
|
file_host.clone(),
|
||||||
maxmind_reader.clone(),
|
maxmind_reader.clone(),
|
||||||
|
stripe_client,
|
||||||
|
!args.no_background_tasks,
|
||||||
);
|
);
|
||||||
|
|
||||||
info!("Starting Actix HTTP server!");
|
info!("Starting Actix HTTP server!");
|
||||||
|
|||||||
@ -2091,8 +2091,7 @@ async fn get_or_create_customer(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn subscription_task(pool: PgPool, redis: RedisPool) {
|
pub async fn index_subscriptions(pool: PgPool, redis: RedisPool) {
|
||||||
loop {
|
|
||||||
info!("Indexing subscriptions");
|
info!("Indexing subscriptions");
|
||||||
|
|
||||||
let res = async {
|
let res = async {
|
||||||
@ -2159,8 +2158,7 @@ pub async fn subscription_task(pool: PgPool, redis: RedisPool) {
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
let product_price = if let Some(product_price) =
|
let product_price = if let Some(product_price) = subscription_prices
|
||||||
subscription_prices
|
|
||||||
.iter()
|
.iter()
|
||||||
.find(|x| x.id == subscription.price_id)
|
.find(|x| x.id == subscription.price_id)
|
||||||
{
|
{
|
||||||
@ -2259,26 +2257,25 @@ pub async fn subscription_task(pool: PgPool, redis: RedisPool) {
|
|||||||
};
|
};
|
||||||
|
|
||||||
if let Err(e) = res.await {
|
if let Err(e) = res.await {
|
||||||
warn!("Error indexing billing queue: {:?}", e);
|
warn!("Error indexing subscriptions: {:?}", e);
|
||||||
}
|
}
|
||||||
|
|
||||||
info!("Done indexing billing queue");
|
info!("Done indexing subscriptions");
|
||||||
|
|
||||||
tokio::time::sleep(std::time::Duration::from_secs(60 * 5)).await;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn task(
|
pub async fn index_billing(
|
||||||
stripe_client: stripe::Client,
|
stripe_client: stripe::Client,
|
||||||
pool: PgPool,
|
pool: PgPool,
|
||||||
redis: RedisPool,
|
redis: RedisPool,
|
||||||
) {
|
) {
|
||||||
loop {
|
|
||||||
info!("Indexing billing queue");
|
info!("Indexing billing queue");
|
||||||
let res = async {
|
let res = async {
|
||||||
// If a charge is open and due or has been attempted more than two days ago, it should be processed
|
// If a charge is open and due or has been attempted more than two days ago, it should be processed
|
||||||
let charges_to_do =
|
let charges_to_do =
|
||||||
crate::database::models::charge_item::ChargeItem::get_chargeable(&pool).await?;
|
crate::database::models::charge_item::ChargeItem::get_chargeable(
|
||||||
|
&pool,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
let prices = product_item::ProductPriceItem::get_many(
|
let prices = product_item::ProductPriceItem::get_many(
|
||||||
&charges_to_do
|
&charges_to_do
|
||||||
@ -2306,14 +2303,17 @@ pub async fn task(
|
|||||||
let mut transaction = pool.begin().await?;
|
let mut transaction = pool.begin().await?;
|
||||||
|
|
||||||
for mut charge in charges_to_do {
|
for mut charge in charges_to_do {
|
||||||
let product_price =
|
let product_price = if let Some(price) =
|
||||||
if let Some(price) = prices.iter().find(|x| x.id == charge.price_id) {
|
prices.iter().find(|x| x.id == charge.price_id)
|
||||||
|
{
|
||||||
price
|
price
|
||||||
} else {
|
} else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
let user = if let Some(user) = users.iter().find(|x| x.id == charge.user_id) {
|
let user = if let Some(user) =
|
||||||
|
users.iter().find(|x| x.id == charge.user_id)
|
||||||
|
{
|
||||||
user
|
user
|
||||||
} else {
|
} else {
|
||||||
continue;
|
continue;
|
||||||
@ -2325,7 +2325,10 @@ pub async fn task(
|
|||||||
if let Some(ref interval) = charge.subscription_interval {
|
if let Some(ref interval) = charge.subscription_interval {
|
||||||
intervals.get(interval)
|
intervals.get(interval)
|
||||||
} else {
|
} else {
|
||||||
warn!("Could not find subscription for charge {:?}", charge.id);
|
warn!(
|
||||||
|
"Could not find subscription for charge {:?}",
|
||||||
|
charge.id
|
||||||
|
);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2342,11 +2345,16 @@ pub async fn task(
|
|||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let customer =
|
let customer = stripe::Customer::retrieve(
|
||||||
stripe::Customer::retrieve(&stripe_client, &customer_id, &[]).await?;
|
&stripe_client,
|
||||||
|
&customer_id,
|
||||||
|
&[],
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
let currency =
|
let currency = match Currency::from_str(
|
||||||
match Currency::from_str(&product_price.currency_code.to_lowercase()) {
|
&product_price.currency_code.to_lowercase(),
|
||||||
|
) {
|
||||||
Ok(x) => x,
|
Ok(x) => x,
|
||||||
Err(_) => {
|
Err(_) => {
|
||||||
warn!(
|
warn!(
|
||||||
@ -2357,7 +2365,8 @@ pub async fn task(
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut intent = CreatePaymentIntent::new(*price as i64, currency);
|
let mut intent =
|
||||||
|
CreatePaymentIntent::new(*price as i64, currency);
|
||||||
|
|
||||||
let mut metadata = HashMap::new();
|
let mut metadata = HashMap::new();
|
||||||
metadata.insert(
|
metadata.insert(
|
||||||
@ -2382,11 +2391,13 @@ pub async fn task(
|
|||||||
{
|
{
|
||||||
intent.payment_method = Some(payment_method);
|
intent.payment_method = Some(payment_method);
|
||||||
intent.confirm = Some(true);
|
intent.confirm = Some(true);
|
||||||
intent.off_session = Some(PaymentIntentOffSession::Exists(true));
|
intent.off_session =
|
||||||
|
Some(PaymentIntentOffSession::Exists(true));
|
||||||
|
|
||||||
charge.status = ChargeStatus::Processing;
|
charge.status = ChargeStatus::Processing;
|
||||||
|
|
||||||
stripe::PaymentIntent::create(&stripe_client, intent).await?;
|
stripe::PaymentIntent::create(&stripe_client, intent)
|
||||||
|
.await?;
|
||||||
} else {
|
} else {
|
||||||
charge.status = ChargeStatus::Failed;
|
charge.status = ChargeStatus::Failed;
|
||||||
charge.last_attempt = Some(Utc::now());
|
charge.last_attempt = Some(Utc::now());
|
||||||
@ -2407,7 +2418,4 @@ pub async fn task(
|
|||||||
}
|
}
|
||||||
|
|
||||||
info!("Done indexing billing queue");
|
info!("Done indexing billing queue");
|
||||||
|
|
||||||
tokio::time::sleep(std::time::Duration::from_secs(60 * 5)).await;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@ -9,6 +9,10 @@ use crate::queue::socket::{
|
|||||||
ActiveSocket, ActiveSockets, SocketId, TunnelSocketType,
|
ActiveSocket, ActiveSockets, SocketId, TunnelSocketType,
|
||||||
};
|
};
|
||||||
use crate::routes::ApiError;
|
use crate::routes::ApiError;
|
||||||
|
use crate::sync::friends::{RedisFriendsMessage, FRIENDS_CHANNEL_NAME};
|
||||||
|
use crate::sync::status::{
|
||||||
|
get_user_status, push_back_user_expiry, replace_user_status,
|
||||||
|
};
|
||||||
use actix_web::web::{Data, Payload};
|
use actix_web::web::{Data, Payload};
|
||||||
use actix_web::{get, web, HttpRequest, HttpResponse};
|
use actix_web::{get, web, HttpRequest, HttpResponse};
|
||||||
use actix_ws::Message;
|
use actix_ws::Message;
|
||||||
@ -19,10 +23,15 @@ use ariadne::networking::message::{
|
|||||||
use ariadne::users::UserStatus;
|
use ariadne::users::UserStatus;
|
||||||
use chrono::Utc;
|
use chrono::Utc;
|
||||||
use either::Either;
|
use either::Either;
|
||||||
|
use futures_util::future::select;
|
||||||
use futures_util::{StreamExt, TryStreamExt};
|
use futures_util::{StreamExt, TryStreamExt};
|
||||||
|
use redis::AsyncCommands;
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
use sqlx::PgPool;
|
use sqlx::PgPool;
|
||||||
|
use std::pin::pin;
|
||||||
use std::sync::atomic::Ordering;
|
use std::sync::atomic::Ordering;
|
||||||
|
use tokio::sync::oneshot::error::TryRecvError;
|
||||||
|
use tokio::time::{sleep, Duration};
|
||||||
|
|
||||||
pub fn config(cfg: &mut web::ServiceConfig) {
|
pub fn config(cfg: &mut web::ServiceConfig) {
|
||||||
cfg.service(ws_init);
|
cfg.service(ws_init);
|
||||||
@ -62,6 +71,7 @@ pub async fn ws_init(
|
|||||||
}
|
}
|
||||||
|
|
||||||
let user = User::from_full(db_user);
|
let user = User::from_full(db_user);
|
||||||
|
let user_id = user.id;
|
||||||
|
|
||||||
let (res, mut session, msg_stream) = match actix_ws::handle(&req, body) {
|
let (res, mut session, msg_stream) = match actix_ws::handle(&req, body) {
|
||||||
Ok(x) => x,
|
Ok(x) => x,
|
||||||
@ -79,19 +89,32 @@ pub async fn ws_init(
|
|||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let friend_statuses = if !friends.is_empty() {
|
let friend_statuses = if !friends.is_empty() {
|
||||||
friends
|
let db = db.clone();
|
||||||
.iter()
|
let redis = redis.clone();
|
||||||
.filter_map(|x| {
|
tokio_stream::iter(friends.iter())
|
||||||
db.get_status(
|
.map(|x| {
|
||||||
if x.user_id == user.id.into() {
|
let db = db.clone();
|
||||||
|
let redis = redis.clone();
|
||||||
|
async move {
|
||||||
|
async move {
|
||||||
|
get_user_status(
|
||||||
|
if x.user_id == user_id.into() {
|
||||||
x.friend_id
|
x.friend_id
|
||||||
} else {
|
} else {
|
||||||
x.user_id
|
x.user_id
|
||||||
}
|
}
|
||||||
.into(),
|
.into(),
|
||||||
|
&db,
|
||||||
|
&redis,
|
||||||
)
|
)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
}
|
||||||
})
|
})
|
||||||
|
.buffer_unordered(16)
|
||||||
|
.filter_map(|x| x)
|
||||||
.collect::<Vec<_>>()
|
.collect::<Vec<_>>()
|
||||||
|
.await
|
||||||
} else {
|
} else {
|
||||||
Vec::new()
|
Vec::new()
|
||||||
};
|
};
|
||||||
@ -116,20 +139,42 @@ pub async fn ws_init(
|
|||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
tracing::info!("Connection {socket_id} opened by {}", user.id);
|
tracing::info!("Connection {socket_id} opened by {}", user.id);
|
||||||
|
|
||||||
broadcast_friends(
|
replace_user_status(None, Some(&status), &redis).await?;
|
||||||
user.id,
|
broadcast_friends_message(
|
||||||
ServerToClientMessage::StatusUpdate { status },
|
&redis,
|
||||||
&pool,
|
RedisFriendsMessage::StatusUpdate { status },
|
||||||
&db,
|
|
||||||
Some(friends),
|
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
|
let (shutdown_sender, mut shutdown_receiver) =
|
||||||
|
tokio::sync::oneshot::channel::<()>();
|
||||||
|
|
||||||
|
{
|
||||||
|
let db = db.clone();
|
||||||
|
let redis = redis.clone();
|
||||||
|
actix_web::rt::spawn(async move {
|
||||||
|
while shutdown_receiver.try_recv() == Err(TryRecvError::Empty) {
|
||||||
|
sleep(Duration::from_secs(30)).await;
|
||||||
|
if let Some(socket) = db.sockets.get(&socket_id) {
|
||||||
|
let _ = socket.socket.clone().ping(&[]).await;
|
||||||
|
}
|
||||||
|
let _ = push_back_user_expiry(user_id, &redis).await;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
let mut stream = msg_stream.into_stream();
|
let mut stream = msg_stream.into_stream();
|
||||||
|
|
||||||
actix_web::rt::spawn(async move {
|
actix_web::rt::spawn(async move {
|
||||||
// receive messages from websocket
|
loop {
|
||||||
while let Some(msg) = stream.next().await {
|
let next = pin!(stream.next());
|
||||||
|
let timeout = pin!(sleep(Duration::from_secs(30)));
|
||||||
|
let futures_util::future::Either::Left((Some(msg), _)) =
|
||||||
|
select(next, timeout).await
|
||||||
|
else {
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
|
||||||
let message = match msg {
|
let message = match msg {
|
||||||
Ok(Message::Text(text)) => {
|
Ok(Message::Text(text)) => {
|
||||||
ClientToServerMessage::deserialize(Either::Left(&text))
|
ClientToServerMessage::deserialize(Either::Left(&text))
|
||||||
@ -139,10 +184,7 @@ pub async fn ws_init(
|
|||||||
ClientToServerMessage::deserialize(Either::Right(&bytes))
|
ClientToServerMessage::deserialize(Either::Right(&bytes))
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(Message::Close(_)) => {
|
Ok(Message::Close(_)) => break,
|
||||||
let _ = close_socket(socket_id, &pool, &db).await;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(Message::Ping(msg)) => {
|
Ok(Message::Ping(msg)) => {
|
||||||
if let Some(socket) = db.sockets.get(&socket_id) {
|
if let Some(socket) = db.sockets.get(&socket_id) {
|
||||||
@ -162,8 +204,7 @@ pub async fn ws_init(
|
|||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
if !message.is_binary() {
|
if !message.is_binary() {
|
||||||
tracing::info!(
|
tracing::info!(
|
||||||
"Received message from {socket_id}: {:?}",
|
"Received message from {socket_id}: {message:?}"
|
||||||
message
|
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -172,6 +213,8 @@ pub async fn ws_init(
|
|||||||
if let Some(mut pair) = db.sockets.get_mut(&socket_id) {
|
if let Some(mut pair) = db.sockets.get_mut(&socket_id) {
|
||||||
let ActiveSocket { status, .. } = pair.value_mut();
|
let ActiveSocket { status, .. } = pair.value_mut();
|
||||||
|
|
||||||
|
let old_status = status.clone();
|
||||||
|
|
||||||
if status
|
if status
|
||||||
.profile_name
|
.profile_name
|
||||||
.as_ref()
|
.as_ref()
|
||||||
@ -188,14 +231,17 @@ pub async fn ws_init(
|
|||||||
// We drop the pair to avoid holding the lock for too long
|
// We drop the pair to avoid holding the lock for too long
|
||||||
drop(pair);
|
drop(pair);
|
||||||
|
|
||||||
let _ = broadcast_friends(
|
let _ = replace_user_status(
|
||||||
user.id,
|
Some(&old_status),
|
||||||
ServerToClientMessage::StatusUpdate {
|
Some(&user_status),
|
||||||
|
&redis,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
let _ = broadcast_friends_message(
|
||||||
|
&redis,
|
||||||
|
RedisFriendsMessage::StatusUpdate {
|
||||||
status: user_status,
|
status: user_status,
|
||||||
},
|
},
|
||||||
&pool,
|
|
||||||
&db,
|
|
||||||
None,
|
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
@ -247,12 +293,11 @@ pub async fn ws_init(
|
|||||||
};
|
};
|
||||||
match tunnel_socket.socket_type {
|
match tunnel_socket.socket_type {
|
||||||
TunnelSocketType::Listening => {
|
TunnelSocketType::Listening => {
|
||||||
let _ = broadcast_friends(
|
let _ = broadcast_to_local_friends(
|
||||||
user.id,
|
user.id,
|
||||||
ServerToClientMessage::FriendSocketStoppedListening { user: user.id },
|
ServerToClientMessage::FriendSocketStoppedListening { user: user.id },
|
||||||
&pool,
|
&pool,
|
||||||
&db,
|
&db,
|
||||||
None,
|
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
@ -308,25 +353,48 @@ pub async fn ws_init(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let _ = close_socket(socket_id, &pool, &db).await;
|
let _ = shutdown_sender.send(());
|
||||||
|
let _ = close_socket(socket_id, &pool, &db, &redis).await;
|
||||||
});
|
});
|
||||||
|
|
||||||
Ok(res)
|
Ok(res)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn broadcast_friends(
|
pub async fn broadcast_friends_message(
|
||||||
|
redis: &RedisPool,
|
||||||
|
message: RedisFriendsMessage,
|
||||||
|
) -> Result<(), crate::database::models::DatabaseError> {
|
||||||
|
let _: () = redis
|
||||||
|
.pool
|
||||||
|
.get()
|
||||||
|
.await?
|
||||||
|
.publish(FRIENDS_CHANNEL_NAME, message)
|
||||||
|
.await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn broadcast_to_local_friends(
|
||||||
user_id: UserId,
|
user_id: UserId,
|
||||||
message: ServerToClientMessage,
|
message: ServerToClientMessage,
|
||||||
pool: &PgPool,
|
pool: &PgPool,
|
||||||
sockets: &ActiveSockets,
|
sockets: &ActiveSockets,
|
||||||
friends: Option<Vec<FriendItem>>,
|
) -> Result<(), crate::database::models::DatabaseError> {
|
||||||
|
broadcast_to_known_local_friends(
|
||||||
|
user_id,
|
||||||
|
message,
|
||||||
|
sockets,
|
||||||
|
FriendItem::get_user_friends(user_id.into(), Some(true), pool).await?,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn broadcast_to_known_local_friends(
|
||||||
|
user_id: UserId,
|
||||||
|
message: ServerToClientMessage,
|
||||||
|
sockets: &ActiveSockets,
|
||||||
|
friends: Vec<FriendItem>,
|
||||||
) -> Result<(), crate::database::models::DatabaseError> {
|
) -> Result<(), crate::database::models::DatabaseError> {
|
||||||
// FIXME Probably shouldn't be using database errors for this. Maybe ApiError?
|
// FIXME Probably shouldn't be using database errors for this. Maybe ApiError?
|
||||||
let friends = if let Some(friends) = friends {
|
|
||||||
friends
|
|
||||||
} else {
|
|
||||||
FriendItem::get_user_friends(user_id.into(), Some(true), pool).await?
|
|
||||||
};
|
|
||||||
|
|
||||||
for friend in friends {
|
for friend in friends {
|
||||||
let friend_id = if friend.user_id == user_id.into() {
|
let friend_id = if friend.user_id == user_id.into() {
|
||||||
@ -387,6 +455,7 @@ pub async fn close_socket(
|
|||||||
id: SocketId,
|
id: SocketId,
|
||||||
pool: &PgPool,
|
pool: &PgPool,
|
||||||
db: &ActiveSockets,
|
db: &ActiveSockets,
|
||||||
|
redis: &RedisPool,
|
||||||
) -> Result<(), crate::database::models::DatabaseError> {
|
) -> Result<(), crate::database::models::DatabaseError> {
|
||||||
if let Some((_, socket)) = db.sockets.remove(&id) {
|
if let Some((_, socket)) = db.sockets.remove(&id) {
|
||||||
let user_id = socket.status.user_id;
|
let user_id = socket.status.user_id;
|
||||||
@ -397,12 +466,10 @@ pub async fn close_socket(
|
|||||||
|
|
||||||
let _ = socket.socket.close(None).await;
|
let _ = socket.socket.close(None).await;
|
||||||
|
|
||||||
broadcast_friends(
|
replace_user_status(Some(&socket.status), None, redis).await?;
|
||||||
user_id,
|
broadcast_friends_message(
|
||||||
ServerToClientMessage::UserOffline { id: user_id },
|
redis,
|
||||||
pool,
|
RedisFriendsMessage::UserOffline { user: user_id },
|
||||||
db,
|
|
||||||
None,
|
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
@ -414,14 +481,13 @@ pub async fn close_socket(
|
|||||||
};
|
};
|
||||||
match tunnel_socket.socket_type {
|
match tunnel_socket.socket_type {
|
||||||
TunnelSocketType::Listening => {
|
TunnelSocketType::Listening => {
|
||||||
let _ = broadcast_friends(
|
let _ = broadcast_to_local_friends(
|
||||||
user_id,
|
user_id,
|
||||||
ServerToClientMessage::SocketClosed {
|
ServerToClientMessage::SocketClosed {
|
||||||
socket: owned_socket,
|
socket: owned_socket,
|
||||||
},
|
},
|
||||||
pool,
|
pool,
|
||||||
db,
|
db,
|
||||||
None,
|
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -95,6 +95,8 @@ pub enum ApiError {
|
|||||||
Database(#[from] crate::database::models::DatabaseError),
|
Database(#[from] crate::database::models::DatabaseError),
|
||||||
#[error("Database Error: {0}")]
|
#[error("Database Error: {0}")]
|
||||||
SqlxDatabase(#[from] sqlx::Error),
|
SqlxDatabase(#[from] sqlx::Error),
|
||||||
|
#[error("Database Error: {0}")]
|
||||||
|
RedisDatabase(#[from] redis::RedisError),
|
||||||
#[error("Clickhouse Error: {0}")]
|
#[error("Clickhouse Error: {0}")]
|
||||||
Clickhouse(#[from] clickhouse::error::Error),
|
Clickhouse(#[from] clickhouse::error::Error),
|
||||||
#[error("Internal server error: {0}")]
|
#[error("Internal server error: {0}")]
|
||||||
@ -148,8 +150,9 @@ impl ApiError {
|
|||||||
crate::models::error::ApiError {
|
crate::models::error::ApiError {
|
||||||
error: match self {
|
error: match self {
|
||||||
ApiError::Env(..) => "environment_error",
|
ApiError::Env(..) => "environment_error",
|
||||||
ApiError::SqlxDatabase(..) => "database_error",
|
|
||||||
ApiError::Database(..) => "database_error",
|
ApiError::Database(..) => "database_error",
|
||||||
|
ApiError::SqlxDatabase(..) => "database_error",
|
||||||
|
ApiError::RedisDatabase(..) => "database_error",
|
||||||
ApiError::Authentication(..) => "unauthorized",
|
ApiError::Authentication(..) => "unauthorized",
|
||||||
ApiError::CustomAuthentication(..) => "unauthorized",
|
ApiError::CustomAuthentication(..) => "unauthorized",
|
||||||
ApiError::Xml(..) => "xml_error",
|
ApiError::Xml(..) => "xml_error",
|
||||||
@ -186,6 +189,7 @@ impl actix_web::ResponseError for ApiError {
|
|||||||
ApiError::Env(..) => StatusCode::INTERNAL_SERVER_ERROR,
|
ApiError::Env(..) => StatusCode::INTERNAL_SERVER_ERROR,
|
||||||
ApiError::Database(..) => StatusCode::INTERNAL_SERVER_ERROR,
|
ApiError::Database(..) => StatusCode::INTERNAL_SERVER_ERROR,
|
||||||
ApiError::SqlxDatabase(..) => StatusCode::INTERNAL_SERVER_ERROR,
|
ApiError::SqlxDatabase(..) => StatusCode::INTERNAL_SERVER_ERROR,
|
||||||
|
ApiError::RedisDatabase(..) => StatusCode::INTERNAL_SERVER_ERROR,
|
||||||
ApiError::Clickhouse(..) => StatusCode::INTERNAL_SERVER_ERROR,
|
ApiError::Clickhouse(..) => StatusCode::INTERNAL_SERVER_ERROR,
|
||||||
ApiError::Authentication(..) => StatusCode::UNAUTHORIZED,
|
ApiError::Authentication(..) => StatusCode::UNAUTHORIZED,
|
||||||
ApiError::CustomAuthentication(..) => StatusCode::UNAUTHORIZED,
|
ApiError::CustomAuthentication(..) => StatusCode::UNAUTHORIZED,
|
||||||
|
|||||||
@ -5,8 +5,12 @@ use crate::models::pats::Scopes;
|
|||||||
use crate::models::users::UserFriend;
|
use crate::models::users::UserFriend;
|
||||||
use crate::queue::session::AuthQueue;
|
use crate::queue::session::AuthQueue;
|
||||||
use crate::queue::socket::ActiveSockets;
|
use crate::queue::socket::ActiveSockets;
|
||||||
use crate::routes::internal::statuses::send_message_to_user;
|
use crate::routes::internal::statuses::{
|
||||||
|
broadcast_friends_message, send_message_to_user,
|
||||||
|
};
|
||||||
use crate::routes::ApiError;
|
use crate::routes::ApiError;
|
||||||
|
use crate::sync::friends::RedisFriendsMessage;
|
||||||
|
use crate::sync::status::get_user_status;
|
||||||
use actix_web::{delete, get, post, web, HttpRequest, HttpResponse};
|
use actix_web::{delete, get, post, web, HttpRequest, HttpResponse};
|
||||||
use ariadne::networking::message::ServerToClientMessage;
|
use ariadne::networking::message::ServerToClientMessage;
|
||||||
use chrono::Utc;
|
use chrono::Utc;
|
||||||
@ -76,14 +80,16 @@ pub async fn add_friend(
|
|||||||
user_id: UserId,
|
user_id: UserId,
|
||||||
friend_id: UserId,
|
friend_id: UserId,
|
||||||
sockets: &ActiveSockets,
|
sockets: &ActiveSockets,
|
||||||
|
redis: &RedisPool,
|
||||||
) -> Result<(), ApiError> {
|
) -> Result<(), ApiError> {
|
||||||
if let Some(friend_status) = sockets.get_status(user_id.into())
|
if let Some(friend_status) =
|
||||||
|
get_user_status(user_id.into(), sockets, redis).await
|
||||||
{
|
{
|
||||||
send_message_to_user(
|
broadcast_friends_message(
|
||||||
sockets,
|
redis,
|
||||||
friend_id.into(),
|
RedisFriendsMessage::DirectStatusUpdate {
|
||||||
&ServerToClientMessage::StatusUpdate {
|
to_user: friend_id.into(),
|
||||||
status: friend_status.clone(),
|
status: friend_status,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
@ -92,8 +98,10 @@ pub async fn add_friend(
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
send_friend_status(friend.user_id, friend.friend_id, &db).await?;
|
send_friend_status(friend.user_id, friend.friend_id, &db, &redis)
|
||||||
send_friend_status(friend.friend_id, friend.user_id, &db).await?;
|
.await?;
|
||||||
|
send_friend_status(friend.friend_id, friend.user_id, &db, &redis)
|
||||||
|
.await?;
|
||||||
} else {
|
} else {
|
||||||
if friend.id == user.id.into() {
|
if friend.id == user.id.into() {
|
||||||
return Err(ApiError::InvalidInput(
|
return Err(ApiError::InvalidInput(
|
||||||
|
|||||||
@ -36,181 +36,4 @@ impl Drop for Scheduler {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
use tracing::{info, warn};
|
|
||||||
|
|
||||||
pub fn schedule_versions(
|
|
||||||
scheduler: &mut Scheduler,
|
|
||||||
pool: sqlx::Pool<sqlx::Postgres>,
|
|
||||||
redis: RedisPool,
|
|
||||||
) {
|
|
||||||
let version_index_interval = std::time::Duration::from_secs(
|
|
||||||
parse_var("VERSION_INDEX_INTERVAL").unwrap_or(1800),
|
|
||||||
);
|
|
||||||
|
|
||||||
scheduler.run(version_index_interval, move || {
|
|
||||||
let pool_ref = pool.clone();
|
|
||||||
let redis = redis.clone();
|
|
||||||
async move {
|
|
||||||
info!("Indexing game versions list from Mojang");
|
|
||||||
let result = update_versions(&pool_ref, &redis).await;
|
|
||||||
if let Err(e) = result {
|
|
||||||
warn!("Version update failed: {}", e);
|
|
||||||
}
|
|
||||||
info!("Done indexing game versions");
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
use thiserror::Error;
|
|
||||||
|
|
||||||
#[derive(Error, Debug)]
|
|
||||||
pub enum VersionIndexingError {
|
|
||||||
#[error("Network error while updating game versions list: {0}")]
|
|
||||||
NetworkError(#[from] reqwest::Error),
|
|
||||||
#[error("Database error while updating game versions list: {0}")]
|
|
||||||
DatabaseError(#[from] crate::database::models::DatabaseError),
|
|
||||||
}
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
database::{
|
|
||||||
models::legacy_loader_fields::MinecraftGameVersion, redis::RedisPool,
|
|
||||||
},
|
|
||||||
util::env::parse_var,
|
|
||||||
};
|
|
||||||
use chrono::{DateTime, Utc};
|
|
||||||
use serde::Deserialize;
|
|
||||||
use tokio_stream::wrappers::IntervalStream;
|
use tokio_stream::wrappers::IntervalStream;
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
|
||||||
struct InputFormat<'a> {
|
|
||||||
// latest: LatestFormat,
|
|
||||||
versions: Vec<VersionFormat<'a>>,
|
|
||||||
}
|
|
||||||
#[derive(Deserialize)]
|
|
||||||
struct VersionFormat<'a> {
|
|
||||||
id: String,
|
|
||||||
#[serde(rename = "type")]
|
|
||||||
type_: std::borrow::Cow<'a, str>,
|
|
||||||
#[serde(rename = "releaseTime")]
|
|
||||||
release_time: DateTime<Utc>,
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn update_versions(
|
|
||||||
pool: &sqlx::Pool<sqlx::Postgres>,
|
|
||||||
redis: &RedisPool,
|
|
||||||
) -> Result<(), VersionIndexingError> {
|
|
||||||
let input = reqwest::get(
|
|
||||||
"https://piston-meta.mojang.com/mc/game/version_manifest_v2.json",
|
|
||||||
)
|
|
||||||
.await?
|
|
||||||
.json::<InputFormat>()
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let mut skipped_versions_count = 0u32;
|
|
||||||
|
|
||||||
// A list of version names that contains spaces.
|
|
||||||
// Generated using the command
|
|
||||||
// ```sh
|
|
||||||
// curl https://launchermeta.mojang.com/mc/game/version_manifest.json \
|
|
||||||
// | jq '[.versions[].id | select(contains(" "))]'
|
|
||||||
// ```
|
|
||||||
const HALL_OF_SHAME: [(&str, &str); 12] = [
|
|
||||||
("1.14.2 Pre-Release 4", "1.14.2-pre4"),
|
|
||||||
("1.14.2 Pre-Release 3", "1.14.2-pre3"),
|
|
||||||
("1.14.2 Pre-Release 2", "1.14.2-pre2"),
|
|
||||||
("1.14.2 Pre-Release 1", "1.14.2-pre1"),
|
|
||||||
("1.14.1 Pre-Release 2", "1.14.1-pre2"),
|
|
||||||
("1.14.1 Pre-Release 1", "1.14.1-pre1"),
|
|
||||||
("1.14 Pre-Release 5", "1.14-pre5"),
|
|
||||||
("1.14 Pre-Release 4", "1.14-pre4"),
|
|
||||||
("1.14 Pre-Release 3", "1.14-pre3"),
|
|
||||||
("1.14 Pre-Release 2", "1.14-pre2"),
|
|
||||||
("1.14 Pre-Release 1", "1.14-pre1"),
|
|
||||||
("3D Shareware v1.34", "3D-Shareware-v1.34"),
|
|
||||||
];
|
|
||||||
|
|
||||||
lazy_static::lazy_static! {
|
|
||||||
/// Mojank for some reason has versions released at the same DateTime. This hardcodes them to fix this,
|
|
||||||
/// as most of our ordering logic is with DateTime
|
|
||||||
static ref HALL_OF_SHAME_2: [(&'static str, chrono::DateTime<chrono::Utc>); 4] = [
|
|
||||||
(
|
|
||||||
"1.4.5",
|
|
||||||
chrono::DateTime::parse_from_rfc3339("2012-12-19T22:00:00+00:00")
|
|
||||||
.unwrap()
|
|
||||||
.into(),
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"1.4.6",
|
|
||||||
chrono::DateTime::parse_from_rfc3339("2012-12-19T22:00:01+00:00")
|
|
||||||
.unwrap()
|
|
||||||
.into(),
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"1.6.3",
|
|
||||||
chrono::DateTime::parse_from_rfc3339("2013-09-13T10:54:41+00:00")
|
|
||||||
.unwrap()
|
|
||||||
.into(),
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"13w37b",
|
|
||||||
chrono::DateTime::parse_from_rfc3339("2013-09-13T10:54:42+00:00")
|
|
||||||
.unwrap()
|
|
||||||
.into(),
|
|
||||||
),
|
|
||||||
];
|
|
||||||
}
|
|
||||||
|
|
||||||
for version in input.versions.into_iter() {
|
|
||||||
let mut name = version.id;
|
|
||||||
if !name
|
|
||||||
.chars()
|
|
||||||
.all(|c| c.is_ascii_alphanumeric() || "-_.".contains(c))
|
|
||||||
{
|
|
||||||
if let Some((_, alternate)) =
|
|
||||||
HALL_OF_SHAME.iter().find(|(version, _)| name == *version)
|
|
||||||
{
|
|
||||||
name = String::from(*alternate);
|
|
||||||
} else {
|
|
||||||
// We'll deal with these manually
|
|
||||||
skipped_versions_count += 1;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let type_ = match &*version.type_ {
|
|
||||||
"release" => "release",
|
|
||||||
"snapshot" => "snapshot",
|
|
||||||
"old_alpha" => "alpha",
|
|
||||||
"old_beta" => "beta",
|
|
||||||
_ => "other",
|
|
||||||
};
|
|
||||||
|
|
||||||
MinecraftGameVersion::builder()
|
|
||||||
.version(&name)?
|
|
||||||
.version_type(type_)?
|
|
||||||
.created(
|
|
||||||
if let Some((_, alternate)) =
|
|
||||||
HALL_OF_SHAME_2.iter().find(|(version, _)| name == *version)
|
|
||||||
{
|
|
||||||
alternate
|
|
||||||
} else {
|
|
||||||
&version.release_time
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.insert(pool, redis)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
if skipped_versions_count > 0 {
|
|
||||||
// This will currently always trigger due to 1.14 pre releases
|
|
||||||
// and the shareware april fools update. We could set a threshold
|
|
||||||
// that accounts for those versions and update it whenever we
|
|
||||||
// manually fix another version.
|
|
||||||
warn!(
|
|
||||||
"Skipped {} game versions; check for new versions and add them manually",
|
|
||||||
skipped_versions_count
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|||||||
87
apps/labrinth/src/sync/friends.rs
Normal file
87
apps/labrinth/src/sync/friends.rs
Normal file
@ -0,0 +1,87 @@
|
|||||||
|
use crate::queue::socket::ActiveSockets;
|
||||||
|
use crate::routes::internal::statuses::{
|
||||||
|
broadcast_to_local_friends, send_message_to_user,
|
||||||
|
};
|
||||||
|
use actix_web::web::Data;
|
||||||
|
use ariadne::ids::UserId;
|
||||||
|
use ariadne::networking::message::ServerToClientMessage;
|
||||||
|
use ariadne::users::UserStatus;
|
||||||
|
use redis::aio::PubSub;
|
||||||
|
use redis::{RedisWrite, ToRedisArgs};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use sqlx::PgPool;
|
||||||
|
use tokio_stream::StreamExt;
|
||||||
|
|
||||||
|
pub const FRIENDS_CHANNEL_NAME: &str = "friends";
|
||||||
|
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
#[serde(tag = "type", rename_all = "snake_case")]
|
||||||
|
pub enum RedisFriendsMessage {
|
||||||
|
StatusUpdate { status: UserStatus },
|
||||||
|
UserOffline { user: UserId },
|
||||||
|
DirectStatusUpdate { to_user: UserId, status: UserStatus },
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ToRedisArgs for RedisFriendsMessage {
|
||||||
|
fn write_redis_args<W>(&self, out: &mut W)
|
||||||
|
where
|
||||||
|
W: ?Sized + RedisWrite,
|
||||||
|
{
|
||||||
|
out.write_arg(&serde_json::to_vec(&self).unwrap())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn handle_pubsub(
|
||||||
|
mut pubsub: PubSub,
|
||||||
|
pool: PgPool,
|
||||||
|
sockets: Data<ActiveSockets>,
|
||||||
|
) {
|
||||||
|
pubsub.subscribe(FRIENDS_CHANNEL_NAME).await.unwrap();
|
||||||
|
let mut stream = pubsub.into_on_message();
|
||||||
|
while let Some(message) = stream.next().await {
|
||||||
|
if message.get_channel_name() != FRIENDS_CHANNEL_NAME {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
let payload = serde_json::from_slice(message.get_payload_bytes());
|
||||||
|
|
||||||
|
let pool = pool.clone();
|
||||||
|
let sockets = sockets.clone();
|
||||||
|
actix_rt::spawn(async move {
|
||||||
|
match payload {
|
||||||
|
Ok(RedisFriendsMessage::StatusUpdate { status }) => {
|
||||||
|
let _ = broadcast_to_local_friends(
|
||||||
|
status.user_id,
|
||||||
|
ServerToClientMessage::StatusUpdate { status },
|
||||||
|
&pool,
|
||||||
|
&sockets,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(RedisFriendsMessage::UserOffline { user }) => {
|
||||||
|
let _ = broadcast_to_local_friends(
|
||||||
|
user,
|
||||||
|
ServerToClientMessage::UserOffline { id: user },
|
||||||
|
&pool,
|
||||||
|
&sockets,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(RedisFriendsMessage::DirectStatusUpdate {
|
||||||
|
to_user,
|
||||||
|
status,
|
||||||
|
}) => {
|
||||||
|
let _ = send_message_to_user(
|
||||||
|
&sockets,
|
||||||
|
to_user,
|
||||||
|
&ServerToClientMessage::StatusUpdate { status },
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
|
Err(_) => {}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
2
apps/labrinth/src/sync/mod.rs
Normal file
2
apps/labrinth/src/sync/mod.rs
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
pub mod friends;
|
||||||
|
pub mod status;
|
||||||
71
apps/labrinth/src/sync/status.rs
Normal file
71
apps/labrinth/src/sync/status.rs
Normal file
@ -0,0 +1,71 @@
|
|||||||
|
use crate::database::redis::RedisPool;
|
||||||
|
use crate::queue::socket::ActiveSockets;
|
||||||
|
use ariadne::ids::UserId;
|
||||||
|
use ariadne::users::UserStatus;
|
||||||
|
use redis::AsyncCommands;
|
||||||
|
|
||||||
|
const EXPIRY_TIME_SECONDS: i64 = 60;
|
||||||
|
|
||||||
|
pub async fn get_user_status(
|
||||||
|
user: UserId,
|
||||||
|
local_sockets: &ActiveSockets,
|
||||||
|
redis: &RedisPool,
|
||||||
|
) -> Option<UserStatus> {
|
||||||
|
if let Some(friend_status) = local_sockets.get_status(user) {
|
||||||
|
return Some(friend_status);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Ok(mut conn) = redis.pool.get().await {
|
||||||
|
if let Ok(mut statuses) =
|
||||||
|
conn.sscan::<_, String>(get_field_name(user)).await
|
||||||
|
{
|
||||||
|
if let Some(status_json) = statuses.next_item().await {
|
||||||
|
return serde_json::from_str::<UserStatus>(&status_json).ok();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn replace_user_status(
|
||||||
|
old_status: Option<&UserStatus>,
|
||||||
|
new_status: Option<&UserStatus>,
|
||||||
|
redis: &RedisPool,
|
||||||
|
) -> Result<(), redis::RedisError> {
|
||||||
|
let Some(user) = new_status.or(old_status).map(|x| x.user_id) else {
|
||||||
|
return Ok(());
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Ok(mut conn) = redis.pool.get().await {
|
||||||
|
let field_name = get_field_name(user);
|
||||||
|
let mut pipe = redis::pipe();
|
||||||
|
pipe.atomic();
|
||||||
|
if let Some(status) = old_status {
|
||||||
|
pipe.srem(&field_name, serde_json::to_string(&status).unwrap())
|
||||||
|
.ignore();
|
||||||
|
}
|
||||||
|
if let Some(status) = new_status {
|
||||||
|
pipe.sadd(&field_name, serde_json::to_string(&status).unwrap())
|
||||||
|
.ignore();
|
||||||
|
pipe.expire(&field_name, EXPIRY_TIME_SECONDS).ignore();
|
||||||
|
}
|
||||||
|
return pipe.query_async(&mut conn).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn push_back_user_expiry(
|
||||||
|
user: UserId,
|
||||||
|
redis: &RedisPool,
|
||||||
|
) -> Result<(), redis::RedisError> {
|
||||||
|
if let Ok(mut conn) = redis.pool.get().await {
|
||||||
|
return conn.expire(get_field_name(user), EXPIRY_TIME_SECONDS).await;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_field_name(user: UserId) -> String {
|
||||||
|
format!("user_status:{}", user)
|
||||||
|
}
|
||||||
@ -35,6 +35,9 @@ pub async fn setup(db: &database::TemporaryDatabase) -> LabrinthConfig {
|
|||||||
let maxmind_reader =
|
let maxmind_reader =
|
||||||
Arc::new(queue::maxmind::MaxMindIndexer::new().await.unwrap());
|
Arc::new(queue::maxmind::MaxMindIndexer::new().await.unwrap());
|
||||||
|
|
||||||
|
let stripe_client =
|
||||||
|
stripe::Client::new(dotenvy::var("STRIPE_API_KEY").unwrap());
|
||||||
|
|
||||||
labrinth::app_setup(
|
labrinth::app_setup(
|
||||||
pool.clone(),
|
pool.clone(),
|
||||||
redis_pool.clone(),
|
redis_pool.clone(),
|
||||||
@ -42,6 +45,8 @@ pub async fn setup(db: &database::TemporaryDatabase) -> LabrinthConfig {
|
|||||||
&mut clickhouse,
|
&mut clickhouse,
|
||||||
file_host.clone(),
|
file_host.clone(),
|
||||||
maxmind_reader,
|
maxmind_reader,
|
||||||
|
stripe_client,
|
||||||
|
false,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user