Fix cache stampede issues + generalize cache (#884)

* caching changes

* fix cache stampede issues

* Use pub/sub for better DB fetches

* remove pubsub

* remove debugs

* Fix caches not working

* fix search indexing removal
This commit is contained in:
Geometrically 2024-03-26 21:15:50 -07:00 committed by GitHub
parent decfcb6c27
commit a0aa350a08
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
48 changed files with 1540 additions and 1655 deletions

View File

@ -0,0 +1,46 @@
{
"db_name": "PostgreSQL",
"query": "\n SELECT DISTINCT version_id,\n ARRAY_AGG(DISTINCT l.loader) filter (where l.loader is not null) loaders,\n ARRAY_AGG(DISTINCT pt.name) filter (where pt.name is not null) project_types,\n ARRAY_AGG(DISTINCT g.slug) filter (where g.slug is not null) games,\n ARRAY_AGG(DISTINCT lfl.loader_field_id) filter (where lfl.loader_field_id is not null) loader_fields\n FROM versions v\n INNER JOIN loaders_versions lv ON v.id = lv.version_id\n INNER JOIN loaders l ON lv.loader_id = l.id\n INNER JOIN loaders_project_types lpt ON lpt.joining_loader_id = l.id\n INNER JOIN project_types pt ON pt.id = lpt.joining_project_type_id\n INNER JOIN loaders_project_types_games lptg ON lptg.loader_id = l.id AND lptg.project_type_id = pt.id\n INNER JOIN games g ON lptg.game_id = g.id\n LEFT JOIN loader_fields_loaders lfl ON lfl.loader_id = l.id\n WHERE v.id = ANY($1)\n GROUP BY version_id\n ",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "version_id",
"type_info": "Int8"
},
{
"ordinal": 1,
"name": "loaders",
"type_info": "VarcharArray"
},
{
"ordinal": 2,
"name": "project_types",
"type_info": "VarcharArray"
},
{
"ordinal": 3,
"name": "games",
"type_info": "VarcharArray"
},
{
"ordinal": 4,
"name": "loader_fields",
"type_info": "Int4Array"
}
],
"parameters": {
"Left": [
"Int8Array"
]
},
"nullable": [
false,
null,
null,
null,
null
]
},
"hash": "00a733e8ea78f15743afe6a9d637fa4fb87a205854905fb16cf1b8e715f1e01d"
}

View File

@ -48,5 +48,5 @@
false
]
},
"hash": "603eaa54b3956d68f656008e9b04f1c352857cf2eb15874cee9d31f8d992ab77"
"hash": "04c04958c71c4fab903c46c9185286e7460a6ff7b03cbc90939ac6c7cb526433"
}

View File

@ -0,0 +1,46 @@
{
"db_name": "PostgreSQL",
"query": "\n SELECT DISTINCT mod_id,\n ARRAY_AGG(DISTINCT l.loader) filter (where l.loader is not null) loaders,\n ARRAY_AGG(DISTINCT pt.name) filter (where pt.name is not null) project_types,\n ARRAY_AGG(DISTINCT g.slug) filter (where g.slug is not null) games,\n ARRAY_AGG(DISTINCT lfl.loader_field_id) filter (where lfl.loader_field_id is not null) loader_fields\n FROM versions v\n INNER JOIN loaders_versions lv ON v.id = lv.version_id\n INNER JOIN loaders l ON lv.loader_id = l.id\n INNER JOIN loaders_project_types lpt ON lpt.joining_loader_id = l.id\n INNER JOIN project_types pt ON pt.id = lpt.joining_project_type_id\n INNER JOIN loaders_project_types_games lptg ON lptg.loader_id = l.id AND lptg.project_type_id = pt.id\n INNER JOIN games g ON lptg.game_id = g.id\n LEFT JOIN loader_fields_loaders lfl ON lfl.loader_id = l.id\n WHERE v.id = ANY($1)\n GROUP BY mod_id\n ",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "mod_id",
"type_info": "Int8"
},
{
"ordinal": 1,
"name": "loaders",
"type_info": "VarcharArray"
},
{
"ordinal": 2,
"name": "project_types",
"type_info": "VarcharArray"
},
{
"ordinal": 3,
"name": "games",
"type_info": "VarcharArray"
},
{
"ordinal": 4,
"name": "loader_fields",
"type_info": "Int4Array"
}
],
"parameters": {
"Left": [
"Int8Array"
]
},
"nullable": [
false,
null,
null,
null,
null
]
},
"hash": "0d0f736e563abba7561c9b5de108c772541ad0049f706602d01460238f88ffd8"
}

View File

@ -42,5 +42,5 @@
true
]
},
"hash": "2390acbe75f9956e8e16c29faa90aa2fb6b3e11a417302b62fc4a6b4a1785f75"
"hash": "10f81e605c9ef63153f6879d507dc1d1bb38846e16d9fa6cbd6cceea2efbfd51"
}

View File

@ -55,5 +55,5 @@
true
]
},
"hash": "4deaf065c12dbfd5f585286001fdf66f60524ec13eab7d922db9290237297849"
"hash": "28e5a9147061e78c0c1574ff650a30ead9fe7883d283e08a46155382e7a6c163"
}

View File

@ -1,6 +1,6 @@
{
"db_name": "PostgreSQL",
"query": "\n SELECT v.id id, v.mod_id mod_id, v.author_id author_id, v.name version_name, v.version_number version_number,\n v.changelog changelog, v.date_published date_published, v.downloads downloads,\n v.version_type version_type, v.featured featured, v.status status, v.requested_status requested_status, v.ordering ordering\n FROM versions v\n WHERE v.id = ANY($1)\n ORDER BY v.ordering ASC NULLS LAST, v.date_published ASC;\n ",
"query": "\n SELECT v.id id, v.mod_id mod_id, v.author_id author_id, v.name version_name, v.version_number version_number,\n v.changelog changelog, v.date_published date_published, v.downloads downloads,\n v.version_type version_type, v.featured featured, v.status status, v.requested_status requested_status, v.ordering ordering\n FROM versions v\n WHERE v.id = ANY($1);\n ",
"describe": {
"columns": [
{
@ -90,5 +90,5 @@
true
]
},
"hash": "8615354803791e238cc037b8a105008014ecd9764d198e62cc1ad18fc3185301"
"hash": "32f4aa1ab67fbdcd7187fbae475876bf3d3225ca7b4994440a67cbd6a7b610f6"
}

View File

@ -30,5 +30,5 @@
null
]
},
"hash": "6d867e712d89c915fc15940eadded0a383aa479e7f25f3a408661347e35c6538"
"hash": "34fcb1b5ff6d29fbf4e617cdde9a296e9312aec9ff074dd39a83ee1ccb7678ff"
}

View File

@ -67,5 +67,5 @@
null
]
},
"hash": "0b79ae3825e05ae07058a0a9d02fb0bd68ce37f3c7cf0356d565c23520988816"
"hash": "3689ca9f16fb80c55a0d2fd3c08ae4d0b70b92c8ab9a75afb96297748ec36bd4"
}

View File

@ -43,5 +43,5 @@
false
]
},
"hash": "8ff710a212087299ecc176ecc3cffbe5f411e76909ea458a359b9eea2c543e47"
"hash": "4016797b6c41821d98dd024859088459c9b7157697b2b2fa745bdd21916a4ffc"
}

View File

@ -48,5 +48,5 @@
true
]
},
"hash": "2140809b7b65c44c7de96ce89ca52a1808e134756baf6d847600668b7e0bbc95"
"hash": "43d4eafdbcb449a56551d3d6edeba0d6e196fa6539e3f9df107c23a74ba962af"
}

View File

@ -72,5 +72,5 @@
null
]
},
"hash": "f2f865b1f1428ed9469e8f73796c93a23895e6b10a4eb34aa761d29acfa24fb0"
"hash": "4fc11e55884d6813992fba1d0b3111742a5f98453942fe83e09c2056bda401f4"
}

View File

@ -42,5 +42,5 @@
false
]
},
"hash": "b94d2551866c355159d01f77fe301b191de2a83d3ba3817ea60628a1b45a7a64"
"hash": "623881c24c12e77f6fc57669929be55a34800cd2269da29d555959164919c9a3"
}

View File

@ -72,5 +72,5 @@
true
]
},
"hash": "5c7bc2b59e5bcbe50e556cf28fb7a20de645752beef330b6779ec256f33e666a"
"hash": "64fe01f3dd84c51966150e1278189c04da9e5fcd994ef5162afb1321b9d4b643"
}

View File

@ -18,5 +18,5 @@
false
]
},
"hash": "21d20e5f09cb0729dc16c8609c35cec5a913f3172b53b8ae05da0096a33b4b64"
"hash": "6fac7682527a4a9dc34e121e8b7c356cb8fe1d0ff1f9a19d29937721acaa8842"
}

View File

@ -91,5 +91,5 @@
false
]
},
"hash": "c94faba99d486b11509fff59465b7cc71983551b035e936ce4d9776510afb514"
"hash": "74854bb35744be413458d0609d6511aa4c9802b5fc4ac73abb520cf2577e1d84"
}

View File

@ -32,5 +32,5 @@
false
]
},
"hash": "1af33ce1ecbf8d0ab2dcc6de7d433ca05a82acc32dd447ff51487e0039706fec"
"hash": "7f5cccc8927d3675f91c2b2f5c260466d989b5cd4a73926abacc3989b9e887ab"
}

View File

@ -48,5 +48,5 @@
true
]
},
"hash": "ca53a711735ba065d441356ed744a95e948354bb5b9a6047749fdc2a514f456c"
"hash": "7fa5098b1083af58b86083b659cb647498fcc20e38265b9d316ca8c0a2cbc02a"
}

View File

@ -54,5 +54,5 @@
false
]
},
"hash": "5329254eeb1e80d2a0f4f3bc2b613f3a7d54b0673f1a41f31fe5b5bbc4b5e478"
"hash": "887a217868178265ac9e1011a889173d608e064a3a1b69a135273de380efe44c"
}

View File

@ -169,5 +169,5 @@
null
]
},
"hash": "2fe731da3681f72ec03b89d7139a49ccb1069079d8600daa40688d5f528de83d"
"hash": "92b9298c0b6255b4121bf3079e121da06e6e0cdaa131cc9897cb321eaeb3d10b"
}

View File

@ -61,5 +61,5 @@
true
]
},
"hash": "e6f5a150cbd3bd6b9bde9e5cdad224a45c96d678b69ec12508e81246710e3f6d"
"hash": "a1331f7c6f33234e413978c0d9318365e7de5948b93e8c0c85a1d179f4968517"
}

View File

@ -151,5 +151,5 @@
true
]
},
"hash": "5e7e85c8c1f4b4e600c51669b6591b5cc279bd7482893ec687e83ee22d00a3a0"
"hash": "a47456ecddbd1787301a2765168db0df31980ae48cb2ec37c323da10ba55a785"
}

View File

@ -72,5 +72,5 @@
false
]
},
"hash": "c387574b32f6b70adc88132df96fbbc7dd57a6f633a787dd31aafc0584547345"
"hash": "a5007d03b1b5b2a95814a3070d114c55731403dcd75d44420acce8df5bd2009b"
}

View File

@ -54,5 +54,5 @@
true
]
},
"hash": "e72736bb7fca4df41cf34186b1edf04d6b4d496971aaf87ed1a88e7d64eab823"
"hash": "b49cd556b85c3e74ebb4f1b7d48930c0456321799f20e63f1c3fd3ea0f03f198"
}

View File

@ -60,5 +60,5 @@
false
]
},
"hash": "bb6afad07ebfa3b92399bb07aa9e15fa69bd328f44b4bf991e80f6b91fcd3a50"
"hash": "c07277bcf62120ac4fac8678e09512f3984031919a71af59fc10995fb21f480c"
}

View File

@ -48,5 +48,5 @@
true
]
},
"hash": "99080d0666e06794e44c80e05b17585e0f87c70d9ace28537898f27e7df0ded0"
"hash": "d9c4d536ce0bea290f445c3bccb56b4743f2f3a9ce4b170fb439e0e135ca9d51"
}

View File

@ -1,46 +0,0 @@
{
"db_name": "PostgreSQL",
"query": "\n SELECT DISTINCT mod_id,\n ARRAY_AGG(DISTINCT l.loader) filter (where l.loader is not null) loaders,\n ARRAY_AGG(DISTINCT pt.name) filter (where pt.name is not null) project_types,\n ARRAY_AGG(DISTINCT g.slug) filter (where g.slug is not null) games,\n ARRAY_AGG(DISTINCT lfl.loader_field_id) filter (where lfl.loader_field_id is not null) loader_fields\n FROM versions v\n INNER JOIN loaders_versions lv ON v.id = lv.version_id\n INNER JOIN loaders l ON lv.loader_id = l.id\n INNER JOIN loaders_project_types lpt ON lpt.joining_loader_id = l.id\n INNER JOIN project_types pt ON pt.id = lpt.joining_project_type_id\n INNER JOIN loaders_project_types_games lptg ON lptg.loader_id = l.id AND lptg.project_type_id = pt.id\n INNER JOIN games g ON lptg.game_id = g.id\n LEFT JOIN loader_fields_loaders lfl ON lfl.loader_id = l.id\n WHERE v.id = ANY($1)\n GROUP BY mod_id\n ",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "mod_id",
"type_info": "Int8"
},
{
"ordinal": 1,
"name": "loaders",
"type_info": "VarcharArray"
},
{
"ordinal": 2,
"name": "project_types",
"type_info": "VarcharArray"
},
{
"ordinal": 3,
"name": "games",
"type_info": "VarcharArray"
},
{
"ordinal": 4,
"name": "loader_fields",
"type_info": "Int4Array"
}
],
"parameters": {
"Left": [
"Int8Array"
]
},
"nullable": [
false,
null,
null,
null,
null
]
},
"hash": "e1df7bf2edd30d501a48686c00712784b121db47612bf809d0a0fe0b5d99b681"
}

View File

@ -1,46 +0,0 @@
{
"db_name": "PostgreSQL",
"query": "\n SELECT DISTINCT version_id,\n ARRAY_AGG(DISTINCT l.loader) filter (where l.loader is not null) loaders,\n ARRAY_AGG(DISTINCT pt.name) filter (where pt.name is not null) project_types,\n ARRAY_AGG(DISTINCT g.slug) filter (where g.slug is not null) games,\n ARRAY_AGG(DISTINCT lfl.loader_field_id) filter (where lfl.loader_field_id is not null) loader_fields\n FROM versions v\n INNER JOIN loaders_versions lv ON v.id = lv.version_id\n INNER JOIN loaders l ON lv.loader_id = l.id\n INNER JOIN loaders_project_types lpt ON lpt.joining_loader_id = l.id\n INNER JOIN project_types pt ON pt.id = lpt.joining_project_type_id\n INNER JOIN loaders_project_types_games lptg ON lptg.loader_id = l.id AND lptg.project_type_id = pt.id\n INNER JOIN games g ON lptg.game_id = g.id\n LEFT JOIN loader_fields_loaders lfl ON lfl.loader_id = l.id\n WHERE v.id = ANY($1)\n GROUP BY version_id\n ",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "version_id",
"type_info": "Int8"
},
{
"ordinal": 1,
"name": "loaders",
"type_info": "VarcharArray"
},
{
"ordinal": 2,
"name": "project_types",
"type_info": "VarcharArray"
},
{
"ordinal": 3,
"name": "games",
"type_info": "VarcharArray"
},
{
"ordinal": 4,
"name": "loader_fields",
"type_info": "Int4Array"
}
],
"parameters": {
"Left": [
"Int8Array"
]
},
"nullable": [
false,
null,
null,
null,
null
]
},
"hash": "f3729149bd174541ec4f7ec2145fef0f4ac78e4efb046cc77dcdf43522ef72e2"
}

View File

@ -55,5 +55,5 @@
false
]
},
"hash": "7bb8a2e1e01817ea3778fcd2af039e38d085484dd20abf57d0eff8d7801b728b"
"hash": "f62ec19e7e23ec98ad38f79ba28066f1b13a607923003699378bda895aab3a84"
}

View File

@ -4,6 +4,8 @@ use crate::database::models::DatabaseError;
use crate::database::redis::RedisPool;
use crate::models::collections::CollectionStatus;
use chrono::{DateTime, Utc};
use dashmap::DashMap;
use futures::TryStreamExt;
use serde::{Deserialize, Serialize};
const COLLECTIONS_NAMESPACE: &str = "collections";
@ -155,40 +157,12 @@ impl Collection {
where
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
{
use futures::TryStreamExt;
let mut redis = redis.connect().await?;
if collection_ids.is_empty() {
return Ok(Vec::new());
}
let mut found_collections = Vec::new();
let mut remaining_collections: Vec<CollectionId> = collection_ids.to_vec();
if !collection_ids.is_empty() {
let collections = redis
.multi_get::<String>(
let val = redis
.get_cached_keys(
COLLECTIONS_NAMESPACE,
collection_ids.iter().map(|x| x.0.to_string()),
)
.await?;
for collection in collections {
if let Some(collection) =
collection.and_then(|x| serde_json::from_str::<Collection>(&x).ok())
{
remaining_collections.retain(|x| collection.id.0 != x.0);
found_collections.push(collection);
continue;
}
}
}
if !remaining_collections.is_empty() {
let collection_ids_parsed: Vec<i64> =
remaining_collections.iter().map(|x| x.0).collect();
let db_collections: Vec<Collection> = sqlx::query!(
&collection_ids.iter().map(|x| x.0).collect::<Vec<_>>(),
|collection_ids| async move {
let collections = sqlx::query!(
"
SELECT c.id id, c.name name, c.description description,
c.icon_url icon_url, c.color color, c.created created, c.user_id user_id,
@ -199,15 +173,12 @@ impl Collection {
WHERE c.id = ANY($1)
GROUP BY c.id;
",
&collection_ids_parsed,
&collection_ids,
)
.fetch_many(exec)
.try_filter_map(|e| async {
Ok(e.right().map(|m| {
let id = m.id;
Collection {
id: CollectionId(id),
.fetch(exec)
.try_fold(DashMap::new(), |acc, m| {
let collection = Collection {
id: CollectionId(m.id),
user_id: UserId(m.user_id),
name: m.name.clone(),
description: m.description.clone(),
@ -222,26 +193,19 @@ impl Collection {
.into_iter()
.map(ProjectId)
.collect(),
}
}))
};
acc.insert(m.id, collection);
async move { Ok(acc) }
})
.try_collect::<Vec<Collection>>()
.await?;
for collection in db_collections {
redis
.set_serialized_to_json(
COLLECTIONS_NAMESPACE,
collection.id.0,
&collection,
None,
Ok(collections)
},
)
.await?;
found_collections.push(collection);
}
}
Ok(found_collections)
Ok(val)
}
pub async fn clear_cache(id: CollectionId, redis: &RedisPool) -> Result<(), DatabaseError> {

View File

@ -2,6 +2,7 @@ use super::ids::*;
use crate::database::redis::RedisPool;
use crate::{database::models::DatabaseError, models::images::ImageContext};
use chrono::{DateTime, Utc};
use dashmap::DashMap;
use serde::{Deserialize, Serialize};
const IMAGES_NAMESPACE: &str = "images";
@ -180,46 +181,23 @@ impl Image {
{
use futures::TryStreamExt;
let mut redis = redis.connect().await?;
if image_ids.is_empty() {
return Ok(Vec::new());
}
let mut found_images = Vec::new();
let mut remaining_ids = image_ids.to_vec();
let image_ids = image_ids.iter().map(|x| x.0).collect::<Vec<_>>();
if !image_ids.is_empty() {
let images = redis
.multi_get::<String>(IMAGES_NAMESPACE, image_ids.iter().map(|x| x.to_string()))
.await?;
for image in images {
if let Some(image) = image.and_then(|x| serde_json::from_str::<Image>(&x).ok()) {
remaining_ids.retain(|x| image.id.0 != x.0);
found_images.push(image);
continue;
}
}
}
if !remaining_ids.is_empty() {
let db_images: Vec<Image> = sqlx::query!(
let val = redis.get_cached_keys(
IMAGES_NAMESPACE,
&image_ids.iter().map(|x| x.0).collect::<Vec<_>>(),
|image_ids| async move {
let images = sqlx::query!(
"
SELECT id, url, size, created, owner_id, context, mod_id, version_id, thread_message_id, report_id
FROM uploaded_images
WHERE id = ANY($1)
GROUP BY id;
",
&remaining_ids.iter().map(|x| x.0).collect::<Vec<_>>(),
&image_ids,
)
.fetch_many(exec)
.try_filter_map(|e| async {
Ok(e.right().map(|i| {
let id = i.id;
Image {
id: ImageId(id),
.fetch(exec)
.try_fold(DashMap::new(), |acc, i| {
let img = Image {
id: ImageId(i.id),
url: i.url,
size: i.size as u64,
created: i.created,
@ -229,21 +207,18 @@ impl Image {
version_id: i.version_id.map(VersionId),
thread_message_id: i.thread_message_id.map(ThreadMessageId),
report_id: i.report_id.map(ReportId),
}
}))
};
acc.insert(i.id, img);
async move { Ok(acc) }
})
.try_collect::<Vec<Image>>()
.await?;
for image in db_images {
redis
.set_serialized_to_json(IMAGES_NAMESPACE, image.id.0, &image, None)
.await?;
found_images.push(image);
}
}
Ok(images)
},
).await?;
Ok(found_images)
Ok(val)
}
pub async fn clear_cache(id: ImageId, redis: &RedisPool) -> Result<(), DatabaseError> {

View File

@ -208,6 +208,13 @@ impl<'a> MinecraftGameVersionBuilder<'a> {
.fetch_one(exec)
.await?;
let mut conn = redis.connect().await?;
conn.delete(
crate::database::models::loader_fields::LOADER_FIELD_ENUM_VALUES_NAMESPACE,
game_versions_enum.id.0,
)
.await?;
Ok(LoaderFieldEnumValueId(result.id))
}
}

View File

@ -6,6 +6,7 @@ use super::DatabaseError;
use crate::database::redis::RedisPool;
use chrono::DateTime;
use chrono::Utc;
use dashmap::DashMap;
use futures::TryStreamExt;
use itertools::Itertools;
use serde::{Deserialize, Serialize};
@ -16,7 +17,7 @@ const LOADERS_LIST_NAMESPACE: &str = "loaders";
const LOADER_FIELDS_NAMESPACE: &str = "loader_fields";
const LOADER_FIELDS_NAMESPACE_ALL: &str = "loader_fields_all";
const LOADER_FIELD_ENUMS_ID_NAMESPACE: &str = "loader_field_enums";
const LOADER_FIELD_ENUM_VALUES_NAMESPACE: &str = "loader_field_enum_values";
pub const LOADER_FIELD_ENUM_VALUES_NAMESPACE: &str = "loader_field_enum_values";
#[derive(Clone, Serialize, Deserialize, Debug)]
pub struct Game {
@ -380,30 +381,10 @@ impl LoaderField {
where
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
{
type RedisLoaderFieldTuple = (LoaderId, Vec<LoaderField>);
let mut redis = redis.connect().await?;
let mut loader_ids = loader_ids.to_vec();
let cached_fields: Vec<RedisLoaderFieldTuple> = redis
.multi_get::<String>(LOADER_FIELDS_NAMESPACE, loader_ids.iter().map(|x| x.0))
.await?
.into_iter()
.flatten()
.filter_map(|x: String| serde_json::from_str::<RedisLoaderFieldTuple>(&x).ok())
.collect();
let mut found_loader_fields = HashMap::new();
if !cached_fields.is_empty() {
for (loader_id, fields) in cached_fields {
if loader_ids.contains(&loader_id) {
found_loader_fields.insert(loader_id, fields);
loader_ids.retain(|x| x != &loader_id);
}
}
}
if !loader_ids.is_empty() {
let val = redis.get_cached_keys_raw(
LOADER_FIELDS_NAMESPACE,
&loader_ids.iter().map(|x| x.0).collect::<Vec<_>>(),
|loader_ids| async move {
let result = sqlx::query!(
"
SELECT DISTINCT lf.id, lf.field, lf.field_type, lf.optional, lf.min_val, lf.max_val, lf.enum_type, lfl.loader_id
@ -411,44 +392,36 @@ impl LoaderField {
LEFT JOIN loader_fields_loaders lfl ON lfl.loader_field_id = lf.id
WHERE lfl.loader_id = ANY($1)
",
&loader_ids.iter().map(|x| x.0).collect::<Vec<_>>()
&loader_ids,
)
.fetch_many(exec)
.try_filter_map(|e| async {
Ok(e.right().and_then(|r| {
Some((LoaderId(r.loader_id) ,LoaderField {
.fetch(exec)
.try_fold(DashMap::new(), |acc: DashMap<i32, Vec<LoaderField>>, r| {
if let Some(field_type) = LoaderFieldType::build(&r.field_type, r.enum_type) {
let loader_field = LoaderField {
id: LoaderFieldId(r.id),
field_type: LoaderFieldType::build(&r.field_type, r.enum_type)?,
field_type,
field: r.field,
optional: r.optional,
min_val: r.min_val,
max_val: r.max_val,
}))
}))
};
acc.entry(r.loader_id)
.or_default()
.push(loader_field);
}
async move {
Ok(acc)
}
})
.try_collect::<Vec<(LoaderId, LoaderField)>>()
.await?;
let result: Vec<RedisLoaderFieldTuple> = result
.into_iter()
.fold(
HashMap::new(),
|mut acc: HashMap<LoaderId, Vec<LoaderField>>, x| {
acc.entry(x.0).or_default().push(x.1);
acc
Ok(result)
},
)
.into_iter()
.collect_vec();
).await?;
for (k, v) in result.into_iter() {
redis
.set_serialized_to_json(LOADER_FIELDS_NAMESPACE, k.0, (k, &v), None)
.await?;
found_loader_fields.insert(k, v);
}
}
Ok(found_loader_fields)
Ok(val.into_iter().map(|x| (LoaderId(x.0), x.1)).collect())
}
// Gets all fields for a given loader(s)
@ -597,71 +570,51 @@ impl LoaderFieldEnumValue {
loader_field_enum_ids: &[LoaderFieldEnumId],
exec: E,
redis: &RedisPool,
) -> Result<Vec<(LoaderFieldEnumId, Vec<LoaderFieldEnumValue>)>, DatabaseError>
) -> Result<HashMap<LoaderFieldEnumId, Vec<LoaderFieldEnumValue>>, DatabaseError>
where
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
{
let mut redis = redis.connect().await?;
let mut found_enums = Vec::new();
let mut remaining_enums: Vec<LoaderFieldEnumId> = loader_field_enum_ids.to_vec();
if !remaining_enums.is_empty() {
let enums = redis
.multi_get::<String>(
let val = redis.get_cached_keys_raw(
LOADER_FIELD_ENUM_VALUES_NAMESPACE,
loader_field_enum_ids.iter().map(|x| x.0),
)
.await?;
for lfe in enums {
if let Some(lfe) = lfe.and_then(|x| {
serde_json::from_str::<(LoaderFieldEnumId, Vec<LoaderFieldEnumValue>)>(&x).ok()
}) {
remaining_enums.retain(|x| lfe.0 .0 != x.0);
found_enums.push(lfe.1);
continue;
}
}
}
let remaining_enums = remaining_enums.iter().map(|x| x.0).collect::<Vec<_>>();
let result = sqlx::query!(
&loader_field_enum_ids.iter().map(|x| x.0).collect::<Vec<_>>(),
|loader_field_enum_ids| async move {
let values = sqlx::query!(
"
SELECT id, enum_id, value, ordering, metadata, created FROM loader_field_enum_values
WHERE enum_id = ANY($1)
ORDER BY enum_id, ordering, created DESC
",
&remaining_enums
&loader_field_enum_ids
)
.fetch_many(exec)
.try_filter_map(|e| async {
Ok(e.right().map(|c| LoaderFieldEnumValue {
.fetch(exec)
.try_fold(DashMap::new(), |acc: DashMap<i32, Vec<LoaderFieldEnumValue>>, c| {
let value = LoaderFieldEnumValue {
id: LoaderFieldEnumValueId(c.id),
enum_id: LoaderFieldEnumId(c.enum_id),
value: c.value,
ordering: c.ordering,
created: c.created,
metadata: c.metadata.unwrap_or_default(),
}))
})
.try_collect::<Vec<LoaderFieldEnumValue>>()
.await?;
};
// Convert from an Vec<LoaderFieldEnumValue> to a Vec<(LoaderFieldEnumId, Vec<LoaderFieldEnumValue>)>
let cachable_enum_sets: Vec<(LoaderFieldEnumId, Vec<LoaderFieldEnumValue>)> = result
.clone()
.into_iter()
.group_by(|x| x.enum_id) // we sort by enum_id, so this will group all values of the same enum_id together
.into_iter()
.map(|(k, v)| (k, v.collect::<Vec<_>>().to_vec()))
.collect();
for (k, v) in cachable_enum_sets.iter() {
redis
.set_serialized_to_json(LOADER_FIELD_ENUM_VALUES_NAMESPACE, k.0, v, None)
.await?;
acc.entry(c.enum_id)
.or_default()
.push(value);
async move {
Ok(acc)
}
})
.await?;
Ok(cachable_enum_sets)
Ok(values)
},
).await?;
Ok(val
.into_iter()
.map(|x| (LoaderFieldEnumId(x.0), x.1))
.collect())
}
// Matches filter against metadata of enum values

View File

@ -48,4 +48,6 @@ pub enum DatabaseError {
SerdeCacheError(#[from] serde_json::Error),
#[error("Schema error: {0}")]
SchemaError(String),
#[error("Timeout when waiting for cache subscriber")]
CacheTimeout,
}

View File

@ -1,7 +1,8 @@
use crate::{
database::redis::RedisPool,
models::ids::base62_impl::{parse_base62, to_base62},
};
use crate::{database::redis::RedisPool, models::ids::base62_impl::parse_base62};
use dashmap::DashMap;
use futures::TryStreamExt;
use std::fmt::{Debug, Display};
use std::hash::Hash;
use super::{ids::*, TeamMember};
use serde::{Deserialize, Serialize};
@ -97,7 +98,7 @@ impl Organization {
Self::get_many(&ids, exec, redis).await
}
pub async fn get_many<'a, E, T: ToString>(
pub async fn get_many<'a, E, T: Display + Hash + Eq + PartialEq + Clone + Debug>(
organization_strings: &[T],
exec: E,
redis: &RedisPool,
@ -105,120 +106,56 @@ impl Organization {
where
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
{
use futures::stream::TryStreamExt;
let mut redis = redis.connect().await?;
if organization_strings.is_empty() {
return Ok(Vec::new());
}
let mut found_organizations = Vec::new();
let mut remaining_strings = organization_strings
.iter()
.map(|x| x.to_string())
.collect::<Vec<_>>();
let mut organization_ids = organization_strings
.iter()
.flat_map(|x| parse_base62(&x.to_string()).map(|x| x as i64))
.collect::<Vec<_>>();
organization_ids.append(
&mut redis
.multi_get::<i64>(
ORGANIZATIONS_TITLES_NAMESPACE,
organization_strings
.iter()
.map(|x| x.to_string().to_lowercase())
.collect::<Vec<_>>(),
)
.await?
.into_iter()
.flatten()
.collect(),
);
if !organization_ids.is_empty() {
let organizations = redis
.multi_get::<String>(
let val = redis
.get_cached_keys_with_slug(
ORGANIZATIONS_NAMESPACE,
organization_ids.iter().map(|x| x.to_string()),
)
.await?;
for organization in organizations {
if let Some(organization) =
organization.and_then(|x| serde_json::from_str::<Organization>(&x).ok())
{
remaining_strings.retain(|x| {
&to_base62(organization.id.0 as u64) != x
&& organization.slug.to_lowercase() != x.to_lowercase()
});
found_organizations.push(organization);
continue;
}
}
}
if !remaining_strings.is_empty() {
let organization_ids_parsed: Vec<i64> = remaining_strings
ORGANIZATIONS_TITLES_NAMESPACE,
false,
organization_strings,
|ids| async move {
let org_ids: Vec<i64> = ids
.iter()
.flat_map(|x| parse_base62(&x.to_string()).ok())
.map(|x| x as i64)
.collect();
let slugs = ids
.into_iter()
.map(|x| x.to_string().to_lowercase())
.collect::<Vec<_>>();
let organizations: Vec<Organization> = sqlx::query!(
let organizations = sqlx::query!(
"
SELECT o.id, o.slug, o.name, o.team_id, o.description, o.icon_url, o.color
FROM organizations o
WHERE o.id = ANY($1) OR LOWER(o.slug) = ANY($2)
GROUP BY o.id;
",
&organization_ids_parsed,
&remaining_strings
.into_iter()
.map(|x| x.to_string().to_lowercase())
.collect::<Vec<_>>(),
&org_ids,
&slugs,
)
.fetch_many(exec)
.try_filter_map(|e| async {
Ok(e.right().map(|m| Organization {
.fetch(exec)
.try_fold(DashMap::new(), |acc, m| {
let org = Organization {
id: OrganizationId(m.id),
slug: m.slug,
slug: m.slug.clone(),
name: m.name,
team_id: TeamId(m.team_id),
description: m.description,
icon_url: m.icon_url,
color: m.color.map(|x| x as u32),
}))
};
acc.insert(m.id, (Some(m.slug), org));
async move { Ok(acc) }
})
.try_collect::<Vec<Organization>>()
.await?;
for organization in organizations {
redis
.set_serialized_to_json(
ORGANIZATIONS_NAMESPACE,
organization.id.0,
&organization,
None,
)
.await?;
redis
.set(
ORGANIZATIONS_TITLES_NAMESPACE,
&organization.slug.to_lowercase(),
&organization.id.0.to_string(),
None,
Ok(organizations)
},
)
.await?;
found_organizations.push(organization);
}
}
Ok(found_organizations)
Ok(val)
}
// Gets organization associated with a project ID, if it exists and there is one

View File

@ -1,10 +1,14 @@
use super::ids::*;
use crate::database::models::DatabaseError;
use crate::database::redis::RedisPool;
use crate::models::ids::base62_impl::{parse_base62, to_base62};
use crate::models::ids::base62_impl::parse_base62;
use crate::models::pats::Scopes;
use chrono::{DateTime, Utc};
use dashmap::DashMap;
use futures::TryStreamExt;
use serde::{Deserialize, Serialize};
use std::fmt::{Debug, Display};
use std::hash::Hash;
const PATS_NAMESPACE: &str = "pats";
const PATS_TOKENS_NAMESPACE: &str = "pats_tokens";
@ -51,7 +55,7 @@ impl PersonalAccessToken {
Ok(())
}
pub async fn get<'a, E, T: ToString>(
pub async fn get<'a, E, T: Display + Hash + Eq + PartialEq + Clone + Debug>(
id: T,
exec: E,
redis: &RedisPool,
@ -79,7 +83,7 @@ impl PersonalAccessToken {
PersonalAccessToken::get_many(&ids, exec, redis).await
}
pub async fn get_many<'a, E, T: ToString>(
pub async fn get_many<'a, E, T: Display + Hash + Eq + PartialEq + Clone + Debug>(
pat_strings: &[T],
exec: E,
redis: &RedisPool,
@ -87,105 +91,53 @@ impl PersonalAccessToken {
where
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
{
use futures::TryStreamExt;
let mut redis = redis.connect().await?;
if pat_strings.is_empty() {
return Ok(Vec::new());
}
let mut found_pats = Vec::new();
let mut remaining_strings = pat_strings
.iter()
.map(|x| x.to_string())
.collect::<Vec<_>>();
let mut pat_ids = pat_strings
.iter()
.flat_map(|x| parse_base62(&x.to_string()).map(|x| x as i64))
.collect::<Vec<_>>();
pat_ids.append(
&mut redis
.multi_get::<i64>(
let val = redis
.get_cached_keys_with_slug(
PATS_NAMESPACE,
PATS_TOKENS_NAMESPACE,
pat_strings.iter().map(|x| x.to_string()),
)
.await?
.into_iter()
.flatten()
.collect(),
);
if !pat_ids.is_empty() {
let pats = redis
.multi_get::<String>(PATS_NAMESPACE, pat_ids.iter().map(|x| x.to_string()))
.await?;
for pat in pats {
if let Some(pat) =
pat.and_then(|x| serde_json::from_str::<PersonalAccessToken>(&x).ok())
{
remaining_strings
.retain(|x| &to_base62(pat.id.0 as u64) != x && &pat.access_token != x);
found_pats.push(pat);
continue;
}
}
}
if !remaining_strings.is_empty() {
let pat_ids_parsed: Vec<i64> = remaining_strings
true,
pat_strings,
|ids| async move {
let pat_ids: Vec<i64> = ids
.iter()
.flat_map(|x| parse_base62(&x.to_string()).ok())
.map(|x| x as i64)
.collect();
let db_pats: Vec<PersonalAccessToken> = sqlx::query!(
let slugs = ids.into_iter().map(|x| x.to_string()).collect::<Vec<_>>();
let pats = sqlx::query!(
"
SELECT id, name, access_token, scopes, user_id, created, expires, last_used
FROM pats
WHERE id = ANY($1) OR access_token = ANY($2)
ORDER BY created DESC
",
&pat_ids_parsed,
&remaining_strings
.into_iter()
.map(|x| x.to_string())
.collect::<Vec<_>>(),
&pat_ids,
&slugs,
)
.fetch_many(exec)
.try_filter_map(|e| async {
Ok(e.right().map(|x| PersonalAccessToken {
.fetch(exec)
.try_fold(DashMap::new(), |acc, x| {
let pat = PersonalAccessToken {
id: PatId(x.id),
name: x.name,
access_token: x.access_token,
access_token: x.access_token.clone(),
scopes: Scopes::from_bits(x.scopes as u64).unwrap_or(Scopes::NONE),
user_id: UserId(x.user_id),
created: x.created,
expires: x.expires,
last_used: x.last_used,
}))
})
.try_collect::<Vec<PersonalAccessToken>>()
.await?;
};
for pat in db_pats {
redis
.set_serialized_to_json(PATS_NAMESPACE, pat.id.0, &pat, None)
acc.insert(x.id, (Some(x.access_token), pat));
async move { Ok(acc) }
})
.await?;
redis
.set(
PATS_TOKENS_NAMESPACE,
&pat.access_token,
&pat.id.0.to_string(),
None,
Ok(pats)
},
)
.await?;
found_pats.push(pat);
}
}
Ok(found_pats)
Ok(val)
}
pub async fn get_user_pats<'a, E>(
@ -206,7 +158,6 @@ impl PersonalAccessToken {
return Ok(res.into_iter().map(PatId).collect());
}
use futures::TryStreamExt;
let db_pats: Vec<PatId> = sqlx::query!(
"
SELECT id

View File

@ -5,13 +5,15 @@ use super::{ids::*, User};
use crate::database::models;
use crate::database::models::DatabaseError;
use crate::database::redis::RedisPool;
use crate::models::ids::base62_impl::{parse_base62, to_base62};
use crate::models::ids::base62_impl::parse_base62;
use crate::models::projects::{MonetizationStatus, ProjectStatus};
use chrono::{DateTime, Utc};
use dashmap::{DashMap, DashSet};
use futures::TryStreamExt;
use itertools::Itertools;
use serde::{Deserialize, Serialize};
use std::fmt::{Debug, Display};
use std::hash::Hash;
pub const PROJECTS_NAMESPACE: &str = "projects";
pub const PROJECTS_SLUGS_NAMESPACE: &str = "projects_slugs";
@ -505,7 +507,7 @@ impl Project {
Project::get_many(&ids, exec, redis).await
}
pub async fn get_many<'a, E, T: ToString>(
pub async fn get_many<'a, E, T: Display + Hash + Eq + PartialEq + Clone + Debug>(
project_strings: &[T],
exec: E,
redis: &RedisPool,
@ -513,68 +515,21 @@ impl Project {
where
E: sqlx::Acquire<'a, Database = sqlx::Postgres>,
{
let project_strings = project_strings
.iter()
.map(|x| x.to_string())
.unique()
.collect::<Vec<String>>();
if project_strings.is_empty() {
return Ok(Vec::new());
}
let mut redis = redis.connect().await?;
let mut exec = exec.acquire().await?;
let mut found_projects = Vec::new();
let mut remaining_strings = project_strings.clone();
let mut project_ids = project_strings
.iter()
.flat_map(|x| parse_base62(&x.to_string()).map(|x| x as i64))
.collect::<Vec<_>>();
project_ids.append(
&mut redis
.multi_get::<i64>(
PROJECTS_SLUGS_NAMESPACE,
project_strings.iter().map(|x| x.to_string().to_lowercase()),
)
.await?
.into_iter()
.flatten()
.collect(),
);
if !project_ids.is_empty() {
let projects = redis
.multi_get::<String>(
let val = redis.get_cached_keys_with_slug(
PROJECTS_NAMESPACE,
project_ids.iter().map(|x| x.to_string()),
)
.await?;
for project in projects {
if let Some(project) =
project.and_then(|x| serde_json::from_str::<QueryProject>(&x).ok())
{
remaining_strings.retain(|x| {
&to_base62(project.inner.id.0 as u64) != x
&& project.inner.slug.as_ref().map(|x| x.to_lowercase())
!= Some(x.to_lowercase())
});
found_projects.push(project);
continue;
}
}
}
if !remaining_strings.is_empty() {
let project_ids_parsed: Vec<i64> = remaining_strings
PROJECTS_SLUGS_NAMESPACE,
false,
project_strings,
|ids| async move {
let mut exec = exec.acquire().await?;
let project_ids_parsed: Vec<i64> = ids
.iter()
.flat_map(|x| parse_base62(&x.to_string()).ok())
.map(|x| x as i64)
.collect();
let slugs = remaining_strings
let slugs = ids
.into_iter()
.map(|x| x.to_lowercase())
.map(|x| x.to_string().to_lowercase())
.collect::<Vec<_>>();
let all_version_ids = DashSet::new();
@ -784,7 +739,7 @@ impl Project {
.try_collect()
.await?;
let db_projects: Vec<QueryProject> = sqlx::query!(
let projects = sqlx::query!(
"
SELECT m.id id, m.name name, m.summary summary, m.downloads downloads, m.follows follows,
m.icon_url icon_url, m.description description, m.published published,
@ -805,9 +760,8 @@ impl Project {
&project_ids_parsed,
&slugs,
)
.fetch_many(&mut *exec)
.try_filter_map(|e| async {
Ok(e.right().map(|m| {
.fetch(&mut *exec)
.try_fold(DashMap::new(), |acc, m| {
let id = m.id;
let project_id = ProjectId(id);
let VersionLoaderData {
@ -825,7 +779,7 @@ impl Project {
.filter(|x| loader_loader_field_ids.contains(&x.id))
.collect::<Vec<_>>();
QueryProject {
let project = QueryProject {
inner: Project {
id: ProjectId(id),
team_id: TeamId(m.team_id),
@ -874,30 +828,18 @@ impl Project {
urls,
aggregate_version_fields: VersionField::from_query_json(version_fields, &loader_fields, &loader_field_enum_values, true),
thread_id: ThreadId(m.thread_id),
}}))
};
acc.insert(m.id, (m.slug, project));
async move { Ok(acc) }
})
.try_collect::<Vec<QueryProject>>()
.await?;
for project in db_projects {
redis
.set_serialized_to_json(PROJECTS_NAMESPACE, project.inner.id.0, &project, None)
.await?;
if let Some(slug) = &project.inner.slug {
redis
.set(
PROJECTS_SLUGS_NAMESPACE,
&slug.to_lowercase(),
&project.inner.id.0.to_string(),
None,
)
.await?;
}
found_projects.push(project);
}
}
Ok(projects)
},
).await?;
Ok(found_projects)
Ok(val)
}
pub async fn get_dependencies<'a, E>(

View File

@ -1,9 +1,12 @@
use super::ids::*;
use crate::database::models::DatabaseError;
use crate::database::redis::RedisPool;
use crate::models::ids::base62_impl::{parse_base62, to_base62};
use crate::models::ids::base62_impl::parse_base62;
use chrono::{DateTime, Utc};
use dashmap::DashMap;
use serde::{Deserialize, Serialize};
use std::fmt::{Debug, Display};
use std::hash::Hash;
const SESSIONS_NAMESPACE: &str = "sessions";
const SESSIONS_IDS_NAMESPACE: &str = "sessions_ids";
@ -79,7 +82,7 @@ pub struct Session {
}
impl Session {
pub async fn get<'a, E, T: ToString>(
pub async fn get<'a, E, T: Display + Hash + Eq + PartialEq + Clone + Debug>(
id: T,
exec: E,
redis: &RedisPool,
@ -120,7 +123,7 @@ impl Session {
Session::get_many(&ids, exec, redis).await
}
pub async fn get_many<'a, E, T: ToString>(
pub async fn get_many<'a, E, T: Display + Hash + Eq + PartialEq + Clone + Debug>(
session_strings: &[T],
exec: E,
redis: &RedisPool,
@ -130,61 +133,22 @@ impl Session {
{
use futures::TryStreamExt;
let mut redis = redis.connect().await?;
if session_strings.is_empty() {
return Ok(Vec::new());
}
let mut found_sessions = Vec::new();
let mut remaining_strings = session_strings
.iter()
.map(|x| x.to_string())
.collect::<Vec<_>>();
let mut session_ids = session_strings
.iter()
.flat_map(|x| parse_base62(&x.to_string()).map(|x| x as i64))
.collect::<Vec<_>>();
session_ids.append(
&mut redis
.multi_get::<i64>(
SESSIONS_IDS_NAMESPACE,
session_strings.iter().map(|x| x.to_string()),
)
.await?
.into_iter()
.flatten()
.collect(),
);
if !session_ids.is_empty() {
let sessions = redis
.multi_get::<String>(
let val = redis.get_cached_keys_with_slug(
SESSIONS_NAMESPACE,
session_ids.iter().map(|x| x.to_string()),
)
.await?;
for session in sessions {
if let Some(session) =
session.and_then(|x| serde_json::from_str::<Session>(&x).ok())
{
remaining_strings
.retain(|x| &to_base62(session.id.0 as u64) != x && &session.session != x);
found_sessions.push(session);
continue;
}
}
}
if !remaining_strings.is_empty() {
let session_ids_parsed: Vec<i64> = remaining_strings
SESSIONS_IDS_NAMESPACE,
true,
session_strings,
|ids| async move {
let session_ids: Vec<i64> = ids
.iter()
.flat_map(|x| parse_base62(&x.to_string()).ok())
.map(|x| x as i64)
.collect();
let db_sessions: Vec<Session> = sqlx::query!(
let slugs = ids
.into_iter()
.map(|x| x.to_string())
.collect::<Vec<_>>();
let db_sessions = sqlx::query!(
"
SELECT id, user_id, session, created, last_login, expires, refresh_expires, os, platform,
city, country, ip, user_agent
@ -192,14 +156,14 @@ impl Session {
WHERE id = ANY($1) OR session = ANY($2)
ORDER BY created DESC
",
&session_ids_parsed,
&remaining_strings.into_iter().map(|x| x.to_string()).collect::<Vec<_>>(),
&session_ids,
&slugs,
)
.fetch_many(exec)
.try_filter_map(|e| async {
Ok(e.right().map(|x| Session {
.fetch(exec)
.try_fold(DashMap::new(), |acc, x| {
let session = Session {
id: SessionId(x.id),
session: x.session,
session: x.session.clone(),
user_id: UserId(x.user_id),
created: x.created,
last_login: x.last_login,
@ -211,28 +175,18 @@ impl Session {
country: x.country,
ip: x.ip,
user_agent: x.user_agent,
}))
};
acc.insert(x.id, (Some(x.session), session));
async move { Ok(acc) }
})
.try_collect::<Vec<Session>>()
.await?;
for session in db_sessions {
redis
.set_serialized_to_json(SESSIONS_NAMESPACE, session.id.0, &session, None)
.await?;
redis
.set(
SESSIONS_IDS_NAMESPACE,
&session.session,
&session.id.0.to_string(),
None,
)
.await?;
found_sessions.push(session);
}
}
Ok(db_sessions)
}).await?;
Ok(found_sessions)
Ok(val)
}
pub async fn get_user_sessions<'a, E>(

View File

@ -3,6 +3,8 @@ use crate::{
database::redis::RedisPool,
models::teams::{OrganizationPermissions, ProjectPermissions},
};
use dashmap::DashMap;
use futures::TryStreamExt;
use itertools::Itertools;
use rust_decimal::Decimal;
use serde::{Deserialize, Serialize};
@ -203,41 +205,15 @@ impl TeamMember {
where
E: sqlx::Executor<'a, Database = sqlx::Postgres> + Copy,
{
use futures::stream::TryStreamExt;
if team_ids.is_empty() {
return Ok(Vec::new());
}
let mut redis = redis.connect().await?;
let mut team_ids_parsed: Vec<i64> = team_ids.iter().map(|x| x.0).collect();
let mut found_teams = Vec::new();
let teams = redis
.multi_get::<String>(
let val = redis.get_cached_keys(
TEAMS_NAMESPACE,
team_ids_parsed.iter().map(|x| x.to_string()),
)
.await?;
for team_raw in teams {
if let Some(mut team) = team_raw
.clone()
.and_then(|x| serde_json::from_str::<Vec<TeamMember>>(&x).ok())
{
if let Some(team_id) = team.first().map(|x| x.team_id) {
team_ids_parsed.retain(|x| &team_id.0 != x);
}
found_teams.append(&mut team);
continue;
}
}
if !team_ids_parsed.is_empty() {
let teams: Vec<TeamMember> = sqlx::query!(
&team_ids.iter().map(|x| x.0).collect::<Vec<_>>(),
|team_ids| async move {
let teams = sqlx::query!(
"
SELECT id, team_id, role AS member_role, is_owner, permissions, organization_permissions,
accepted, payouts_split,
@ -246,11 +222,11 @@ impl TeamMember {
WHERE team_id = ANY($1)
ORDER BY team_id, ordering;
",
&team_ids_parsed
&team_ids
)
.fetch_many(exec)
.try_filter_map(|e| async {
Ok(e.right().map(|m| TeamMember {
.fetch(exec)
.try_fold(DashMap::new(), |acc: DashMap<i64, Vec<TeamMember>>, m| {
let member = TeamMember {
id: TeamMemberId(m.id),
team_id: TeamId(m.team_id),
role: m.member_role,
@ -264,26 +240,21 @@ impl TeamMember {
user_id: UserId(m.user_id),
payouts_split: m.payouts_split,
ordering: m.ordering,
}))
};
acc.entry(m.team_id)
.or_default()
.push(member);
async move { Ok(acc) }
})
.try_collect::<Vec<TeamMember>>()
.await?;
for (id, mut members) in teams
.into_iter()
.group_by(|x| x.team_id)
.into_iter()
.map(|(key, group)| (key, group.collect::<Vec<_>>()))
.collect::<Vec<_>>()
{
redis
.set_serialized_to_json(TEAMS_NAMESPACE, id.0, &members, None)
.await?;
found_teams.append(&mut members);
}
}
Ok(teams)
},
).await?;
Ok(found_teams)
Ok(val.into_iter().flatten().collect())
}
pub async fn clear_cache(id: TeamId, redis: &RedisPool) -> Result<(), super::DatabaseError> {
@ -315,8 +286,6 @@ impl TeamMember {
where
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
{
use futures::stream::TryStreamExt;
let team_ids_parsed: Vec<i64> = team_ids.iter().map(|x| x.0).collect();
let team_members = sqlx::query!(

View File

@ -5,8 +5,11 @@ use crate::database::redis::RedisPool;
use crate::models::ids::base62_impl::{parse_base62, to_base62};
use crate::models::users::Badges;
use chrono::{DateTime, Utc};
use dashmap::DashMap;
use rust_decimal::Decimal;
use serde::{Deserialize, Serialize};
use std::fmt::{Debug, Display};
use std::hash::Hash;
const USERS_NAMESPACE: &str = "users";
const USER_USERNAMES_NAMESPACE: &str = "users_usernames";
@ -132,7 +135,7 @@ impl User {
User::get_many(&ids, exec, redis).await
}
pub async fn get_many<'a, E, T: ToString>(
pub async fn get_many<'a, E, T: Display + Hash + Eq + PartialEq + Clone + Debug>(
users_strings: &[T],
exec: E,
redis: &RedisPool,
@ -142,58 +145,23 @@ impl User {
{
use futures::TryStreamExt;
let mut redis = redis.connect().await?;
if users_strings.is_empty() {
return Ok(Vec::new());
}
let mut found_users = Vec::new();
let mut remaining_strings = users_strings
.iter()
.map(|x| x.to_string())
.collect::<Vec<_>>();
let mut user_ids = users_strings
.iter()
.flat_map(|x| parse_base62(&x.to_string()).map(|x| x as i64))
.collect::<Vec<_>>();
user_ids.append(
&mut redis
.multi_get::<i64>(
let val = redis.get_cached_keys_with_slug(
USERS_NAMESPACE,
USER_USERNAMES_NAMESPACE,
users_strings.iter().map(|x| x.to_string().to_lowercase()),
)
.await?
.into_iter()
.flatten()
.collect(),
);
if !user_ids.is_empty() {
let users = redis
.multi_get::<String>(USERS_NAMESPACE, user_ids.iter().map(|x| x.to_string()))
.await?;
for user in users {
if let Some(user) = user.and_then(|x| serde_json::from_str::<User>(&x).ok()) {
remaining_strings.retain(|x| {
&to_base62(user.id.0 as u64) != x
&& user.username.to_lowercase() != x.to_lowercase()
});
found_users.push(user);
continue;
}
}
}
if !remaining_strings.is_empty() {
let user_ids_parsed: Vec<i64> = remaining_strings
false,
users_strings,
|ids| async move {
let user_ids: Vec<i64> = ids
.iter()
.flat_map(|x| parse_base62(&x.to_string()).ok())
.map(|x| x as i64)
.collect();
let db_users: Vec<User> = sqlx::query!(
let slugs = ids
.into_iter()
.map(|x| x.to_string().to_lowercase())
.collect::<Vec<_>>();
let users = sqlx::query!(
"
SELECT id, name, email,
avatar_url, username, bio,
@ -205,15 +173,12 @@ impl User {
FROM users
WHERE id = ANY($1) OR LOWER(username) = ANY($2)
",
&user_ids_parsed,
&remaining_strings
.into_iter()
.map(|x| x.to_string().to_lowercase())
.collect::<Vec<_>>(),
&user_ids,
&slugs,
)
.fetch_many(exec)
.try_filter_map(|e| async {
Ok(e.right().map(|u| User {
.fetch(exec)
.try_fold(DashMap::new(), |acc, u| {
let user = User {
id: UserId(u.id),
github_id: u.github_id,
discord_id: u.discord_id,
@ -225,7 +190,7 @@ impl User {
email: u.email,
email_verified: u.email_verified,
avatar_url: u.avatar_url,
username: u.username,
username: u.username.clone(),
bio: u.bio,
created: u.created,
role: u.role,
@ -237,28 +202,16 @@ impl User {
paypal_email: u.paypal_email,
venmo_handle: u.venmo_handle,
totp_secret: u.totp_secret,
}))
};
acc.insert(u.id, (Some(u.username), user));
async move { Ok(acc) }
})
.try_collect::<Vec<User>>()
.await?;
for user in db_users {
redis
.set_serialized_to_json(USERS_NAMESPACE, user.id.0, &user, None)
.await?;
redis
.set(
USER_USERNAMES_NAMESPACE,
&user.username.to_lowercase(),
&user.id.0.to_string(),
None,
)
.await?;
found_users.push(user);
}
}
Ok(found_users)
Ok(users)
}).await?;
Ok(val)
}
pub async fn get_email<'a, E>(email: &str, exec: E) -> Result<Option<UserId>, sqlx::Error>

View File

@ -8,6 +8,7 @@ use crate::database::redis::RedisPool;
use crate::models::projects::{FileType, VersionStatus};
use chrono::{DateTime, Utc};
use dashmap::{DashMap, DashSet};
use futures::TryStreamExt;
use itertools::Itertools;
use serde::{Deserialize, Serialize};
use std::cmp::Ordering;
@ -469,47 +470,12 @@ impl Version {
where
E: sqlx::Acquire<'a, Database = sqlx::Postgres>,
{
let version_ids = version_ids
.iter()
.unique()
.copied()
.collect::<Vec<VersionId>>();
use futures::stream::TryStreamExt;
if version_ids.is_empty() {
return Ok(Vec::new());
}
let mut exec = exec.acquire().await?;
let mut redis = redis.connect().await?;
let mut version_ids_parsed: Vec<i64> = version_ids.iter().map(|x| x.0).collect();
let mut found_versions = Vec::new();
let versions = redis
.multi_get::<String>(
let mut val = redis.get_cached_keys(
VERSIONS_NAMESPACE,
version_ids_parsed
.clone()
.iter()
.map(|x| x.to_string())
.collect::<Vec<_>>(),
)
.await?;
&version_ids.iter().map(|x| x.0).collect::<Vec<_>>(),
|version_ids| async move {
let mut exec = exec.acquire().await?;
for version in versions {
if let Some(version) =
version.and_then(|x| serde_json::from_str::<QueryVersion>(&x).ok())
{
version_ids_parsed.retain(|x| &version.inner.id.0 != x);
found_versions.push(version);
continue;
}
}
if !version_ids_parsed.is_empty() {
let loader_field_enum_value_ids = DashSet::new();
let version_fields: DashMap<VersionId, Vec<QueryVersionField>> = sqlx::query!(
"
@ -517,7 +483,7 @@ impl Version {
FROM version_fields
WHERE version_id = ANY($1)
",
&version_ids_parsed
&version_ids
)
.fetch(&mut *exec)
.try_fold(
@ -568,7 +534,7 @@ impl Version {
WHERE v.id = ANY($1)
GROUP BY version_id
",
&version_ids_parsed
&version_ids
).fetch(&mut *exec)
.map_ok(|m| {
let version_id = VersionId(m.version_id);
@ -662,7 +628,7 @@ impl Version {
FROM files f
WHERE f.version_id = ANY($1)
",
&version_ids_parsed
&version_ids
).fetch(&mut *exec)
.try_fold(DashMap::new(), |acc : DashMap<VersionId, Vec<File>>, m| {
let file = File {
@ -715,7 +681,7 @@ impl Version {
FROM dependencies d
WHERE dependent_id = ANY($1)
",
&version_ids_parsed
&version_ids
).fetch(&mut *exec)
.try_fold(DashMap::new(), |acc : DashMap<_,Vec<QueryDependency>>, m| {
let dependency = QueryDependency {
@ -732,21 +698,18 @@ impl Version {
}
).await?;
let db_versions: Vec<QueryVersion> = sqlx::query!(
let res = sqlx::query!(
"
SELECT v.id id, v.mod_id mod_id, v.author_id author_id, v.name version_name, v.version_number version_number,
v.changelog changelog, v.date_published date_published, v.downloads downloads,
v.version_type version_type, v.featured featured, v.status status, v.requested_status requested_status, v.ordering ordering
FROM versions v
WHERE v.id = ANY($1)
ORDER BY v.ordering ASC NULLS LAST, v.date_published ASC;
WHERE v.id = ANY($1);
",
&version_ids_parsed
&version_ids
)
.fetch_many(&mut *exec)
.try_filter_map(|e| async {
Ok(e.right().map(|v|
{
.fetch(&mut *exec)
.try_fold(DashMap::new(), |acc, v| {
let version_id = VersionId(v.id);
let VersionLoaderData {
loaders,
@ -763,7 +726,7 @@ impl Version {
.filter(|x| loader_loader_field_ids.contains(&x.id))
.collect::<Vec<_>>();
QueryVersion {
let query_version = QueryVersion {
inner: Version {
id: VersionId(v.id),
project_id: ProjectId(v.mod_id),
@ -821,22 +784,20 @@ impl Version {
project_types,
games,
dependencies,
}
}))
};
acc.insert(v.id, query_version);
async move { Ok(acc) }
})
.try_collect::<Vec<QueryVersion>>()
.await?;
for version in db_versions {
redis
.set_serialized_to_json(VERSIONS_NAMESPACE, version.inner.id.0, &version, None)
.await?;
Ok(res)
},
).await?;
found_versions.push(version);
}
}
val.sort();
Ok(found_versions)
Ok(val)
}
pub async fn get_file_from_hash<'a, 'b, E>(
@ -866,43 +827,11 @@ impl Version {
where
E: sqlx::Executor<'a, Database = sqlx::Postgres> + Copy,
{
use futures::stream::TryStreamExt;
let mut redis = redis.connect().await?;
if hashes.is_empty() {
return Ok(Vec::new());
}
let mut file_ids_parsed = hashes.to_vec();
let mut found_files = Vec::new();
let files = redis
.multi_get::<String>(
let val = redis.get_cached_keys(
VERSION_FILES_NAMESPACE,
file_ids_parsed
.iter()
.map(|hash| format!("{}_{}", algorithm, hash))
.collect::<Vec<_>>(),
)
.await?;
for file in files {
if let Some(mut file) =
file.and_then(|x| serde_json::from_str::<Vec<SingleFile>>(&x).ok())
{
file_ids_parsed.retain(|x| {
!file
.iter()
.any(|y| y.hashes.iter().any(|z| z.0 == &algorithm && z.1 == x))
});
found_files.append(&mut file);
continue;
}
}
if !file_ids_parsed.is_empty() {
let db_files: Vec<SingleFile> = sqlx::query!(
&hashes.iter().map(|x| format!("{algorithm}_{x}")).collect::<Vec<_>>(),
|file_ids| async move {
let files = sqlx::query!(
"
SELECT f.id, f.version_id, v.mod_id, f.url, f.filename, f.is_primary, f.size, f.file_type,
JSONB_AGG(DISTINCT jsonb_build_object('algorithm', h.algorithm, 'hash', encode(h.hash, 'escape'))) filter (where h.hash is not null) hashes
@ -914,62 +843,50 @@ impl Version {
ORDER BY v.date_published
",
algorithm,
&file_ids_parsed.into_iter().map(|x| x.as_bytes().to_vec()).collect::<Vec<_>>(),
&file_ids.into_iter().flat_map(|x| x.split('_').last().map(|x| x.as_bytes().to_vec())).collect::<Vec<_>>(),
)
.fetch_many(executor)
.try_filter_map(|e| async {
Ok(e.right().map(|f| {
.fetch(executor)
.try_fold(DashMap::new(), |acc, f| {
#[derive(Deserialize)]
struct Hash {
pub algorithm: String,
pub hash: String,
}
SingleFile {
let hashes = serde_json::from_value::<Vec<Hash>>(
f.hashes.unwrap_or_default(),
)
.ok()
.unwrap_or_default().into_iter().map(|x| (x.algorithm, x.hash))
.collect::<HashMap<_, _>>();
if let Some(hash) = hashes.get(&algorithm) {
let key = format!("{algorithm}_{hash}");
let file = SingleFile {
id: FileId(f.id),
version_id: VersionId(f.version_id),
project_id: ProjectId(f.mod_id),
url: f.url,
filename: f.filename,
hashes: serde_json::from_value::<Vec<Hash>>(
f.hashes.unwrap_or_default(),
)
.ok()
.unwrap_or_default().into_iter().map(|x| (x.algorithm, x.hash)).collect(),
hashes,
primary: f.is_primary,
size: f.size as u32,
file_type: f.file_type.map(|x| FileType::from_string(&x)),
};
acc.insert(key, file);
}
}
))
async move { Ok(acc) }
})
.try_collect::<Vec<SingleFile>>()
.await?;
let mut save_files: HashMap<String, Vec<SingleFile>> = HashMap::new();
for file in db_files {
for (algo, hash) in &file.hashes {
let key = format!("{}_{}", algo, hash);
if let Some(files) = save_files.get_mut(&key) {
files.push(file.clone());
} else {
save_files.insert(key, vec![file.clone()]);
}
}
Ok(files)
}
).await?;
for (key, mut files) in save_files {
redis
.set_serialized_to_json(VERSION_FILES_NAMESPACE, key, &files, None)
.await?;
found_files.append(&mut files);
}
}
Ok(found_files)
Ok(val)
}
pub async fn clear_cache(

View File

@ -1,10 +1,20 @@
use super::models::DatabaseError;
use crate::models::ids::base62_impl::{parse_base62, to_base62};
use chrono::{TimeZone, Utc};
use dashmap::DashMap;
use deadpool_redis::{Config, Runtime};
use itertools::Itertools;
use redis::{cmd, Cmd, FromRedisValue};
use std::fmt::Display;
use redis::{cmd, Cmd, ExistenceCheck, SetExpiry, SetOptions};
use serde::de::DeserializeOwned;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::fmt::{Debug, Display};
use std::future::Future;
use std::hash::Hash;
use std::pin::Pin;
use std::time::Duration;
const DEFAULT_EXPIRY: i64 = 1800; // 30 minutes
const DEFAULT_EXPIRY: i64 = 60 * 60 * 12; // 12 hours
const ACTUAL_EXPIRY: i64 = 60 * 30; // 30 minutes
#[derive(Clone)]
pub struct RedisPool {
@ -47,6 +57,364 @@ impl RedisPool {
meta_namespace: self.meta_namespace.clone(),
})
}
pub async fn get_cached_keys<F, Fut, T, K>(
&self,
namespace: &str,
keys: &[K],
closure: F,
) -> Result<Vec<T>, DatabaseError>
where
F: FnOnce(Vec<K>) -> Fut,
Fut: Future<Output = Result<DashMap<K, T>, DatabaseError>>,
T: Serialize + DeserializeOwned,
K: Display + Hash + Eq + PartialEq + Clone + DeserializeOwned + Serialize + Debug,
{
Ok(self
.get_cached_keys_raw(namespace, keys, closure)
.await?
.into_iter()
.map(|x| x.1)
.collect())
}
pub async fn get_cached_keys_raw<F, Fut, T, K>(
&self,
namespace: &str,
keys: &[K],
closure: F,
) -> Result<HashMap<K, T>, DatabaseError>
where
F: FnOnce(Vec<K>) -> Fut,
Fut: Future<Output = Result<DashMap<K, T>, DatabaseError>>,
T: Serialize + DeserializeOwned,
K: Display + Hash + Eq + PartialEq + Clone + DeserializeOwned + Serialize + Debug,
{
self.get_cached_keys_raw_with_slug(namespace, None, false, keys, |ids| async move {
Ok(closure(ids)
.await?
.into_iter()
.map(|(key, val)| (key, (None::<String>, val)))
.collect())
})
.await
}
pub async fn get_cached_keys_with_slug<F, Fut, T, I, K, S>(
&self,
namespace: &str,
slug_namespace: &str,
case_sensitive: bool,
keys: &[I],
closure: F,
) -> Result<Vec<T>, DatabaseError>
where
F: FnOnce(Vec<I>) -> Fut,
Fut: Future<Output = Result<DashMap<K, (Option<S>, T)>, DatabaseError>>,
T: Serialize + DeserializeOwned,
I: Display + Hash + Eq + PartialEq + Clone + Debug,
K: Display + Hash + Eq + PartialEq + Clone + DeserializeOwned + Serialize,
S: Display + Clone + DeserializeOwned + Serialize + Debug,
{
Ok(self
.get_cached_keys_raw_with_slug(
namespace,
Some(slug_namespace),
case_sensitive,
keys,
closure,
)
.await?
.into_iter()
.map(|x| x.1)
.collect())
}
pub async fn get_cached_keys_raw_with_slug<F, Fut, T, I, K, S>(
&self,
namespace: &str,
slug_namespace: Option<&str>,
case_sensitive: bool,
keys: &[I],
closure: F,
) -> Result<HashMap<K, T>, DatabaseError>
where
F: FnOnce(Vec<I>) -> Fut,
Fut: Future<Output = Result<DashMap<K, (Option<S>, T)>, DatabaseError>>,
T: Serialize + DeserializeOwned,
I: Display + Hash + Eq + PartialEq + Clone + Debug,
K: Display + Hash + Eq + PartialEq + Clone + DeserializeOwned + Serialize,
S: Display + Clone + DeserializeOwned + Serialize + Debug,
{
let connection = self.connect().await?.connection;
let ids = keys
.iter()
.map(|x| (x.to_string(), x.clone()))
.collect::<DashMap<String, I>>();
if ids.is_empty() {
return Ok(HashMap::new());
}
let get_cached_values =
|ids: DashMap<String, I>, mut connection: deadpool_redis::Connection| async move {
let slug_ids = if let Some(slug_namespace) = slug_namespace {
cmd("MGET")
.arg(
ids.iter()
.map(|x| {
format!(
"{}_{slug_namespace}:{}",
self.meta_namespace,
if case_sensitive {
x.value().to_string()
} else {
x.value().to_string().to_lowercase()
}
)
})
.collect::<Vec<_>>(),
)
.query_async::<_, Vec<Option<String>>>(&mut connection)
.await?
.into_iter()
.flatten()
.collect::<Vec<_>>()
} else {
Vec::new()
};
let cached_values = cmd("MGET")
.arg(
ids.iter()
.map(|x| x.value().to_string())
.chain(ids.iter().filter_map(|x| {
parse_base62(&x.value().to_string())
.ok()
.map(|x| x.to_string())
}))
.chain(slug_ids)
.map(|x| format!("{}_{namespace}:{x}", self.meta_namespace))
.collect::<Vec<_>>(),
)
.query_async::<_, Vec<Option<String>>>(&mut connection)
.await?
.into_iter()
.filter_map(|x| {
x.and_then(|val| serde_json::from_str::<RedisValue<T, K, S>>(&val).ok())
.map(|val| (val.key.clone(), val))
})
.collect::<HashMap<_, _>>();
Ok::<_, DatabaseError>((cached_values, connection, ids))
};
let current_time = Utc::now();
let mut expired_values = HashMap::new();
let (cached_values_raw, mut connection, ids) = get_cached_values(ids, connection).await?;
let mut cached_values = cached_values_raw
.into_iter()
.filter_map(|(key, val)| {
if Utc.timestamp(val.iat + ACTUAL_EXPIRY, 0) < current_time {
expired_values.insert(val.key.to_string(), val);
None
} else {
let key_str = val.key.to_string();
ids.remove(&key_str);
if let Ok(value) = key_str.parse::<u64>() {
let base62 = to_base62(value);
ids.remove(&base62);
}
if let Some(ref alias) = val.alias {
ids.remove(&alias.to_string());
}
Some((key, val))
}
})
.collect::<HashMap<_, _>>();
let subscribe_ids = DashMap::new();
if !ids.is_empty() {
let mut pipe = redis::pipe();
let fetch_ids = ids.iter().map(|x| x.key().clone()).collect::<Vec<_>>();
fetch_ids.iter().for_each(|key| {
pipe.atomic().set_options(
format!("{}_{namespace}:{}/lock", self.meta_namespace, key),
100,
SetOptions::default()
.get(true)
.conditional_set(ExistenceCheck::NX)
.with_expiration(SetExpiry::EX(60)),
);
});
let results = pipe
.query_async::<_, Vec<Option<i32>>>(&mut connection)
.await?;
for (idx, key) in fetch_ids.into_iter().enumerate() {
if let Some(locked) = results.get(idx) {
if locked.is_none() {
continue;
}
}
if let Some((key, raw_key)) = ids.remove(&key) {
if let Some(val) = expired_values.remove(&key) {
if let Some(ref alias) = val.alias {
ids.remove(&alias.to_string());
}
if let Ok(value) = val.key.to_string().parse::<u64>() {
let base62 = to_base62(value);
ids.remove(&base62);
}
cached_values.insert(val.key.clone(), val);
} else {
subscribe_ids.insert(key, raw_key);
}
}
}
}
#[allow(clippy::type_complexity)]
let mut fetch_tasks: Vec<
Pin<Box<dyn Future<Output = Result<HashMap<K, RedisValue<T, K, S>>, DatabaseError>>>>,
> = Vec::new();
if !ids.is_empty() {
fetch_tasks.push(Box::pin(async {
let fetch_ids = ids.iter().map(|x| x.value().clone()).collect::<Vec<_>>();
let vals = closure(fetch_ids).await?;
let mut return_values = HashMap::new();
let mut pipe = redis::pipe();
if !vals.is_empty() {
for (key, (slug, value)) in vals {
let value = RedisValue {
key: key.clone(),
iat: Utc::now().timestamp(),
val: value,
alias: slug.clone(),
};
pipe.atomic().set_ex(
format!("{}_{namespace}:{key}", self.meta_namespace),
serde_json::to_string(&value)?,
DEFAULT_EXPIRY as u64,
);
if let Some(slug) = slug {
ids.remove(&slug.to_string());
if let Some(slug_namespace) = slug_namespace {
let actual_slug = if case_sensitive {
slug.to_string()
} else {
slug.to_string().to_lowercase()
};
pipe.atomic().set_ex(
format!(
"{}_{slug_namespace}:{}",
self.meta_namespace, actual_slug
),
key.to_string(),
DEFAULT_EXPIRY as u64,
);
pipe.atomic().del(format!(
"{}_{namespace}:{}/lock",
self.meta_namespace, actual_slug
));
}
}
let key_str = key.to_string();
ids.remove(&key_str);
if let Ok(value) = key_str.parse::<u64>() {
let base62 = to_base62(value);
ids.remove(&base62);
pipe.atomic()
.del(format!("{}_{namespace}:{base62}/lock", self.meta_namespace));
}
pipe.atomic()
.del(format!("{}_{namespace}:{key}/lock", self.meta_namespace));
return_values.insert(key, value);
}
}
for (key, _) in ids {
pipe.atomic()
.del(format!("{}_{namespace}:{key}/lock", self.meta_namespace));
}
pipe.query_async(&mut connection).await?;
Ok(return_values)
}));
}
if !subscribe_ids.is_empty() {
fetch_tasks.push(Box::pin(async {
let mut connection = self.pool.get().await?;
let mut interval = tokio::time::interval(Duration::from_millis(100));
let start = Utc::now();
loop {
let results = cmd("MGET")
.arg(
subscribe_ids
.iter()
.map(|x| {
format!("{}_{namespace}:{}/lock", self.meta_namespace, x.key())
})
.collect::<Vec<_>>(),
)
.query_async::<_, Vec<Option<String>>>(&mut connection)
.await?;
if results.into_iter().all(|x| x.is_none()) {
break;
}
if (Utc::now() - start) > chrono::Duration::seconds(5) {
return Err(DatabaseError::CacheTimeout);
}
interval.tick().await;
}
let (return_values, _, _) = get_cached_values(subscribe_ids, connection).await?;
Ok(return_values)
}));
}
if !fetch_tasks.is_empty() {
for map in futures::future::try_join_all(fetch_tasks).await? {
for (key, value) in map {
cached_values.insert(key, value);
}
}
}
Ok(cached_values.into_iter().map(|x| (x.0, x.1.val)).collect())
}
}
impl RedisConnection {
@ -120,26 +488,6 @@ impl RedisConnection {
.and_then(|x| serde_json::from_str(&x).ok()))
}
pub async fn multi_get<R>(
&mut self,
namespace: &str,
ids: impl IntoIterator<Item = impl Display>,
) -> Result<Vec<Option<R>>, DatabaseError>
where
R: FromRedisValue,
{
let mut cmd = cmd("MGET");
let ids = ids.into_iter().map(|x| x.to_string()).collect_vec();
redis_args(
&mut cmd,
&ids.into_iter()
.map(|x| format!("{}_{}:{}", self.meta_namespace, namespace, x))
.collect_vec(),
);
Ok(redis_execute(&mut cmd, &mut self.connection).await?)
}
pub async fn delete<T1>(&mut self, namespace: &str, id: T1) -> Result<(), DatabaseError>
where
T1: Display,
@ -177,6 +525,15 @@ impl RedisConnection {
}
}
#[derive(Serialize, Deserialize)]
pub struct RedisValue<T, K, S> {
key: K,
#[serde(skip_serializing_if = "Option::is_none")]
alias: Option<S>,
iat: i64,
val: T,
}
pub fn redis_args(cmd: &mut Cmd, args: &[String]) {
for arg in args {
cmd.arg(arg);

View File

@ -5,7 +5,7 @@ use super::{
use serde::{Deserialize, Serialize};
/// The ID of a team
#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Hash, Debug)]
#[serde(from = "Base62Id")]
#[serde(into = "Base62Id")]
pub struct OrganizationId(pub u64);

View File

@ -5,7 +5,7 @@ use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
/// The ID of a team
#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Hash, Debug)]
#[serde(from = "Base62Id")]
#[serde(into = "Base62Id")]
pub struct PatId(pub u64);

View File

@ -3,7 +3,7 @@ use crate::models::users::UserId;
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Hash, Debug)]
#[serde(from = "Base62Id")]
#[serde(into = "Base62Id")]
pub struct SessionId(pub u64);

View File

@ -117,8 +117,6 @@ impl AnalyticsQueue {
let new_count = if let Some((views, monetized)) = raw_views.get_mut(idx) {
if let Some(count) = count {
println!("len: {} count: {}", views.len(), count);
if count > 3 {
*monetized = false;
continue;

View File

@ -115,6 +115,8 @@ pub async fn pat_full_test() {
"expires": Utc::now() + Duration::days(1), // no longer expired!
}))
.to_request();
println!("PAT ID FOR TEST: {}", id);
let resp = test_env.call(req).await;
assert_status!(&resp, StatusCode::NO_CONTENT);
assert_eq!(mock_pat_test(access_token).await, 200); // Works again

View File

@ -69,7 +69,10 @@ async fn test_get_project() {
.unwrap()
.unwrap();
let cached_project: serde_json::Value = serde_json::from_str(&cached_project).unwrap();
assert_eq!(cached_project["inner"]["slug"], json!(alpha_project_slug));
assert_eq!(
cached_project["val"]["inner"]["slug"],
json!(alpha_project_slug)
);
// Make the request again, this time it should be cached
let resp = api.get_project(alpha_project_id, USER_USER_PAT).await;

View File

@ -55,7 +55,7 @@ async fn test_get_version() {
.unwrap();
let cached_project: serde_json::Value = serde_json::from_str(&cached_project).unwrap();
assert_eq!(
cached_project["inner"]["project_id"],
cached_project["val"]["inner"]["project_id"],
json!(parse_base62(alpha_project_id).unwrap())
);
@ -617,6 +617,7 @@ async fn version_ordering_for_specified_orderings_orders_lower_order_first() {
USER_USER_PAT,
)
.await;
assert_common_version_ids(&versions, vec![new_version_id, alpha_version_id]);
})
.await;