Fix cache stampede issues + generalize cache (#884)

* caching changes

* fix cache stampede issues

* Use pub/sub for better DB fetches

* remove pubsub

* remove debugs

* Fix caches not working

* fix search indexing removal
This commit is contained in:
Geometrically 2024-03-26 21:15:50 -07:00 committed by GitHub
parent decfcb6c27
commit a0aa350a08
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
48 changed files with 1540 additions and 1655 deletions

View File

@ -0,0 +1,46 @@
{
"db_name": "PostgreSQL",
"query": "\n SELECT DISTINCT version_id,\n ARRAY_AGG(DISTINCT l.loader) filter (where l.loader is not null) loaders,\n ARRAY_AGG(DISTINCT pt.name) filter (where pt.name is not null) project_types,\n ARRAY_AGG(DISTINCT g.slug) filter (where g.slug is not null) games,\n ARRAY_AGG(DISTINCT lfl.loader_field_id) filter (where lfl.loader_field_id is not null) loader_fields\n FROM versions v\n INNER JOIN loaders_versions lv ON v.id = lv.version_id\n INNER JOIN loaders l ON lv.loader_id = l.id\n INNER JOIN loaders_project_types lpt ON lpt.joining_loader_id = l.id\n INNER JOIN project_types pt ON pt.id = lpt.joining_project_type_id\n INNER JOIN loaders_project_types_games lptg ON lptg.loader_id = l.id AND lptg.project_type_id = pt.id\n INNER JOIN games g ON lptg.game_id = g.id\n LEFT JOIN loader_fields_loaders lfl ON lfl.loader_id = l.id\n WHERE v.id = ANY($1)\n GROUP BY version_id\n ",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "version_id",
"type_info": "Int8"
},
{
"ordinal": 1,
"name": "loaders",
"type_info": "VarcharArray"
},
{
"ordinal": 2,
"name": "project_types",
"type_info": "VarcharArray"
},
{
"ordinal": 3,
"name": "games",
"type_info": "VarcharArray"
},
{
"ordinal": 4,
"name": "loader_fields",
"type_info": "Int4Array"
}
],
"parameters": {
"Left": [
"Int8Array"
]
},
"nullable": [
false,
null,
null,
null,
null
]
},
"hash": "00a733e8ea78f15743afe6a9d637fa4fb87a205854905fb16cf1b8e715f1e01d"
}

View File

@ -1,6 +1,6 @@
{ {
"db_name": "PostgreSQL", "db_name": "PostgreSQL",
"query": "\n SELECT id, enum_id, value, ordering, metadata, created FROM loader_field_enum_values\n WHERE enum_id = ANY($1)\n ORDER BY enum_id, ordering, created DESC\n ", "query": "\n SELECT id, enum_id, value, ordering, metadata, created FROM loader_field_enum_values\n WHERE enum_id = ANY($1)\n ORDER BY enum_id, ordering, created DESC\n ",
"describe": { "describe": {
"columns": [ "columns": [
{ {
@ -48,5 +48,5 @@
false false
] ]
}, },
"hash": "603eaa54b3956d68f656008e9b04f1c352857cf2eb15874cee9d31f8d992ab77" "hash": "04c04958c71c4fab903c46c9185286e7460a6ff7b03cbc90939ac6c7cb526433"
} }

View File

@ -0,0 +1,46 @@
{
"db_name": "PostgreSQL",
"query": "\n SELECT DISTINCT mod_id,\n ARRAY_AGG(DISTINCT l.loader) filter (where l.loader is not null) loaders,\n ARRAY_AGG(DISTINCT pt.name) filter (where pt.name is not null) project_types,\n ARRAY_AGG(DISTINCT g.slug) filter (where g.slug is not null) games,\n ARRAY_AGG(DISTINCT lfl.loader_field_id) filter (where lfl.loader_field_id is not null) loader_fields\n FROM versions v\n INNER JOIN loaders_versions lv ON v.id = lv.version_id\n INNER JOIN loaders l ON lv.loader_id = l.id\n INNER JOIN loaders_project_types lpt ON lpt.joining_loader_id = l.id\n INNER JOIN project_types pt ON pt.id = lpt.joining_project_type_id\n INNER JOIN loaders_project_types_games lptg ON lptg.loader_id = l.id AND lptg.project_type_id = pt.id\n INNER JOIN games g ON lptg.game_id = g.id\n LEFT JOIN loader_fields_loaders lfl ON lfl.loader_id = l.id\n WHERE v.id = ANY($1)\n GROUP BY mod_id\n ",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "mod_id",
"type_info": "Int8"
},
{
"ordinal": 1,
"name": "loaders",
"type_info": "VarcharArray"
},
{
"ordinal": 2,
"name": "project_types",
"type_info": "VarcharArray"
},
{
"ordinal": 3,
"name": "games",
"type_info": "VarcharArray"
},
{
"ordinal": 4,
"name": "loader_fields",
"type_info": "Int4Array"
}
],
"parameters": {
"Left": [
"Int8Array"
]
},
"nullable": [
false,
null,
null,
null,
null
]
},
"hash": "0d0f736e563abba7561c9b5de108c772541ad0049f706602d01460238f88ffd8"
}

View File

@ -1,6 +1,6 @@
{ {
"db_name": "PostgreSQL", "db_name": "PostgreSQL",
"query": "\n SELECT version_id, field_id, int_value, enum_value, string_value\n FROM version_fields\n WHERE version_id = ANY($1)\n ", "query": "\n SELECT version_id, field_id, int_value, enum_value, string_value\n FROM version_fields\n WHERE version_id = ANY($1)\n ",
"describe": { "describe": {
"columns": [ "columns": [
{ {
@ -42,5 +42,5 @@
true true
] ]
}, },
"hash": "2390acbe75f9956e8e16c29faa90aa2fb6b3e11a417302b62fc4a6b4a1785f75" "hash": "10f81e605c9ef63153f6879d507dc1d1bb38846e16d9fa6cbd6cceea2efbfd51"
} }

View File

@ -1,6 +1,6 @@
{ {
"db_name": "PostgreSQL", "db_name": "PostgreSQL",
"query": "\n SELECT o.id, o.slug, o.name, o.team_id, o.description, o.icon_url, o.color\n FROM organizations o\n WHERE o.id = ANY($1) OR LOWER(o.slug) = ANY($2)\n GROUP BY o.id;\n ", "query": "\n SELECT o.id, o.slug, o.name, o.team_id, o.description, o.icon_url, o.color\n FROM organizations o\n WHERE o.id = ANY($1) OR LOWER(o.slug) = ANY($2)\n GROUP BY o.id;\n ",
"describe": { "describe": {
"columns": [ "columns": [
{ {
@ -55,5 +55,5 @@
true true
] ]
}, },
"hash": "4deaf065c12dbfd5f585286001fdf66f60524ec13eab7d922db9290237297849" "hash": "28e5a9147061e78c0c1574ff650a30ead9fe7883d283e08a46155382e7a6c163"
} }

View File

@ -1,6 +1,6 @@
{ {
"db_name": "PostgreSQL", "db_name": "PostgreSQL",
"query": "\n SELECT v.id id, v.mod_id mod_id, v.author_id author_id, v.name version_name, v.version_number version_number,\n v.changelog changelog, v.date_published date_published, v.downloads downloads,\n v.version_type version_type, v.featured featured, v.status status, v.requested_status requested_status, v.ordering ordering\n FROM versions v\n WHERE v.id = ANY($1)\n ORDER BY v.ordering ASC NULLS LAST, v.date_published ASC;\n ", "query": "\n SELECT v.id id, v.mod_id mod_id, v.author_id author_id, v.name version_name, v.version_number version_number,\n v.changelog changelog, v.date_published date_published, v.downloads downloads,\n v.version_type version_type, v.featured featured, v.status status, v.requested_status requested_status, v.ordering ordering\n FROM versions v\n WHERE v.id = ANY($1);\n ",
"describe": { "describe": {
"columns": [ "columns": [
{ {
@ -90,5 +90,5 @@
true true
] ]
}, },
"hash": "8615354803791e238cc037b8a105008014ecd9764d198e62cc1ad18fc3185301" "hash": "32f4aa1ab67fbdcd7187fbae475876bf3d3225ca7b4994440a67cbd6a7b610f6"
} }

View File

@ -1,6 +1,6 @@
{ {
"db_name": "PostgreSQL", "db_name": "PostgreSQL",
"query": "\n SELECT DISTINCT file_id, algorithm, encode(hash, 'escape') hash\n FROM hashes\n WHERE file_id = ANY($1)\n ", "query": "\n SELECT DISTINCT file_id, algorithm, encode(hash, 'escape') hash\n FROM hashes\n WHERE file_id = ANY($1)\n ",
"describe": { "describe": {
"columns": [ "columns": [
{ {
@ -30,5 +30,5 @@
null null
] ]
}, },
"hash": "6d867e712d89c915fc15940eadded0a383aa479e7f25f3a408661347e35c6538" "hash": "34fcb1b5ff6d29fbf4e617cdde9a296e9312aec9ff074dd39a83ee1ccb7678ff"
} }

View File

@ -1,6 +1,6 @@
{ {
"db_name": "PostgreSQL", "db_name": "PostgreSQL",
"query": "\n SELECT f.id, f.version_id, v.mod_id, f.url, f.filename, f.is_primary, f.size, f.file_type,\n JSONB_AGG(DISTINCT jsonb_build_object('algorithm', h.algorithm, 'hash', encode(h.hash, 'escape'))) filter (where h.hash is not null) hashes\n FROM files f\n INNER JOIN versions v on v.id = f.version_id\n INNER JOIN hashes h on h.file_id = f.id\n WHERE h.algorithm = $1 AND h.hash = ANY($2)\n GROUP BY f.id, v.mod_id, v.date_published\n ORDER BY v.date_published\n ", "query": "\n SELECT f.id, f.version_id, v.mod_id, f.url, f.filename, f.is_primary, f.size, f.file_type,\n JSONB_AGG(DISTINCT jsonb_build_object('algorithm', h.algorithm, 'hash', encode(h.hash, 'escape'))) filter (where h.hash is not null) hashes\n FROM files f\n INNER JOIN versions v on v.id = f.version_id\n INNER JOIN hashes h on h.file_id = f.id\n WHERE h.algorithm = $1 AND h.hash = ANY($2)\n GROUP BY f.id, v.mod_id, v.date_published\n ORDER BY v.date_published\n ",
"describe": { "describe": {
"columns": [ "columns": [
{ {
@ -67,5 +67,5 @@
null null
] ]
}, },
"hash": "0b79ae3825e05ae07058a0a9d02fb0bd68ce37f3c7cf0356d565c23520988816" "hash": "3689ca9f16fb80c55a0d2fd3c08ae4d0b70b92c8ab9a75afb96297748ec36bd4"
} }

View File

@ -1,6 +1,6 @@
{ {
"db_name": "PostgreSQL", "db_name": "PostgreSQL",
"query": "\n SELECT DISTINCT joining_mod_id as mod_id, joining_platform_id as platform_id, lp.name as platform_name, url, lp.donation as donation\n FROM mods_links ml\n INNER JOIN mods m ON ml.joining_mod_id = m.id \n INNER JOIN link_platforms lp ON ml.joining_platform_id = lp.id\n WHERE m.id = ANY($1) OR m.slug = ANY($2)\n ", "query": "\n SELECT DISTINCT joining_mod_id as mod_id, joining_platform_id as platform_id, lp.name as platform_name, url, lp.donation as donation\n FROM mods_links ml\n INNER JOIN mods m ON ml.joining_mod_id = m.id\n INNER JOIN link_platforms lp ON ml.joining_platform_id = lp.id\n WHERE m.id = ANY($1) OR m.slug = ANY($2)\n ",
"describe": { "describe": {
"columns": [ "columns": [
{ {
@ -43,5 +43,5 @@
false false
] ]
}, },
"hash": "8ff710a212087299ecc176ecc3cffbe5f411e76909ea458a359b9eea2c543e47" "hash": "4016797b6c41821d98dd024859088459c9b7157697b2b2fa745bdd21916a4ffc"
} }

View File

@ -1,6 +1,6 @@
{ {
"db_name": "PostgreSQL", "db_name": "PostgreSQL",
"query": "\n SELECT DISTINCT id, enum_id, value, ordering, created, metadata\n FROM loader_field_enum_values lfev\n WHERE id = ANY($1) \n ORDER BY enum_id, ordering, created DESC\n ", "query": "\n SELECT DISTINCT id, enum_id, value, ordering, created, metadata\n FROM loader_field_enum_values lfev\n WHERE id = ANY($1)\n ORDER BY enum_id, ordering, created DESC\n ",
"describe": { "describe": {
"columns": [ "columns": [
{ {
@ -48,5 +48,5 @@
true true
] ]
}, },
"hash": "2140809b7b65c44c7de96ce89ca52a1808e134756baf6d847600668b7e0bbc95" "hash": "43d4eafdbcb449a56551d3d6edeba0d6e196fa6539e3f9df107c23a74ba962af"
} }

View File

@ -1,6 +1,6 @@
{ {
"db_name": "PostgreSQL", "db_name": "PostgreSQL",
"query": "\n SELECT c.id id, c.name name, c.description description,\n c.icon_url icon_url, c.color color, c.created created, c.user_id user_id,\n c.updated updated, c.status status,\n ARRAY_AGG(DISTINCT cm.mod_id) filter (where cm.mod_id is not null) mods\n FROM collections c\n LEFT JOIN collections_mods cm ON cm.collection_id = c.id\n WHERE c.id = ANY($1)\n GROUP BY c.id;\n ", "query": "\n SELECT c.id id, c.name name, c.description description,\n c.icon_url icon_url, c.color color, c.created created, c.user_id user_id,\n c.updated updated, c.status status,\n ARRAY_AGG(DISTINCT cm.mod_id) filter (where cm.mod_id is not null) mods\n FROM collections c\n LEFT JOIN collections_mods cm ON cm.collection_id = c.id\n WHERE c.id = ANY($1)\n GROUP BY c.id;\n ",
"describe": { "describe": {
"columns": [ "columns": [
{ {
@ -72,5 +72,5 @@
null null
] ]
}, },
"hash": "f2f865b1f1428ed9469e8f73796c93a23895e6b10a4eb34aa761d29acfa24fb0" "hash": "4fc11e55884d6813992fba1d0b3111742a5f98453942fe83e09c2056bda401f4"
} }

View File

@ -1,6 +1,6 @@
{ {
"db_name": "PostgreSQL", "db_name": "PostgreSQL",
"query": "\n SELECT DISTINCT dependent_id as version_id, d.mod_dependency_id as dependency_project_id, d.dependency_id as dependency_version_id, d.dependency_file_name as file_name, d.dependency_type as dependency_type\n FROM dependencies d\n WHERE dependent_id = ANY($1)\n ", "query": "\n SELECT DISTINCT dependent_id as version_id, d.mod_dependency_id as dependency_project_id, d.dependency_id as dependency_version_id, d.dependency_file_name as file_name, d.dependency_type as dependency_type\n FROM dependencies d\n WHERE dependent_id = ANY($1)\n ",
"describe": { "describe": {
"columns": [ "columns": [
{ {
@ -42,5 +42,5 @@
false false
] ]
}, },
"hash": "b94d2551866c355159d01f77fe301b191de2a83d3ba3817ea60628a1b45a7a64" "hash": "623881c24c12e77f6fc57669929be55a34800cd2269da29d555959164919c9a3"
} }

View File

@ -1,6 +1,6 @@
{ {
"db_name": "PostgreSQL", "db_name": "PostgreSQL",
"query": "\n SELECT id, url, size, created, owner_id, context, mod_id, version_id, thread_message_id, report_id\n FROM uploaded_images\n WHERE id = ANY($1)\n GROUP BY id;\n ", "query": "\n SELECT id, url, size, created, owner_id, context, mod_id, version_id, thread_message_id, report_id\n FROM uploaded_images\n WHERE id = ANY($1)\n GROUP BY id;\n ",
"describe": { "describe": {
"columns": [ "columns": [
{ {
@ -72,5 +72,5 @@
true true
] ]
}, },
"hash": "5c7bc2b59e5bcbe50e556cf28fb7a20de645752beef330b6779ec256f33e666a" "hash": "64fe01f3dd84c51966150e1278189c04da9e5fcd994ef5162afb1321b9d4b643"
} }

View File

@ -1,6 +1,6 @@
{ {
"db_name": "PostgreSQL", "db_name": "PostgreSQL",
"query": "\n SELECT id\n FROM pats\n WHERE user_id = $1\n ORDER BY created DESC\n ", "query": "\n SELECT id\n FROM pats\n WHERE user_id = $1\n ORDER BY created DESC\n ",
"describe": { "describe": {
"columns": [ "columns": [
{ {
@ -18,5 +18,5 @@
false false
] ]
}, },
"hash": "21d20e5f09cb0729dc16c8609c35cec5a913f3172b53b8ae05da0096a33b4b64" "hash": "6fac7682527a4a9dc34e121e8b7c356cb8fe1d0ff1f9a19d29937721acaa8842"
} }

View File

@ -1,6 +1,6 @@
{ {
"db_name": "PostgreSQL", "db_name": "PostgreSQL",
"query": "\n SELECT id, user_id, session, created, last_login, expires, refresh_expires, os, platform,\n city, country, ip, user_agent\n FROM sessions\n WHERE id = ANY($1) OR session = ANY($2)\n ORDER BY created DESC\n ", "query": "\n SELECT id, user_id, session, created, last_login, expires, refresh_expires, os, platform,\n city, country, ip, user_agent\n FROM sessions\n WHERE id = ANY($1) OR session = ANY($2)\n ORDER BY created DESC\n ",
"describe": { "describe": {
"columns": [ "columns": [
{ {
@ -91,5 +91,5 @@
false false
] ]
}, },
"hash": "c94faba99d486b11509fff59465b7cc71983551b035e936ce4d9776510afb514" "hash": "74854bb35744be413458d0609d6511aa4c9802b5fc4ac73abb520cf2577e1d84"
} }

View File

@ -1,6 +1,6 @@
{ {
"db_name": "PostgreSQL", "db_name": "PostgreSQL",
"query": "\n SELECT DISTINCT mod_id, v.id as id, date_published\n FROM mods m\n INNER JOIN versions v ON m.id = v.mod_id AND v.status = ANY($3)\n WHERE m.id = ANY($1) OR m.slug = ANY($2)\n ", "query": "\n SELECT DISTINCT mod_id, v.id as id, date_published\n FROM mods m\n INNER JOIN versions v ON m.id = v.mod_id AND v.status = ANY($3)\n WHERE m.id = ANY($1) OR m.slug = ANY($2)\n ",
"describe": { "describe": {
"columns": [ "columns": [
{ {
@ -32,5 +32,5 @@
false false
] ]
}, },
"hash": "1af33ce1ecbf8d0ab2dcc6de7d433ca05a82acc32dd447ff51487e0039706fec" "hash": "7f5cccc8927d3675f91c2b2f5c260466d989b5cd4a73926abacc3989b9e887ab"
} }

View File

@ -1,6 +1,6 @@
{ {
"db_name": "PostgreSQL", "db_name": "PostgreSQL",
"query": "\n SELECT DISTINCT mod_id, version_id, field_id, int_value, enum_value, string_value\n FROM versions v\n INNER JOIN version_fields vf ON v.id = vf.version_id\n WHERE v.id = ANY($1)\n ", "query": "\n SELECT DISTINCT mod_id, version_id, field_id, int_value, enum_value, string_value\n FROM versions v\n INNER JOIN version_fields vf ON v.id = vf.version_id\n WHERE v.id = ANY($1)\n ",
"describe": { "describe": {
"columns": [ "columns": [
{ {
@ -48,5 +48,5 @@
true true
] ]
}, },
"hash": "ca53a711735ba065d441356ed744a95e948354bb5b9a6047749fdc2a514f456c" "hash": "7fa5098b1083af58b86083b659cb647498fcc20e38265b9d316ca8c0a2cbc02a"
} }

View File

@ -1,6 +1,6 @@
{ {
"db_name": "PostgreSQL", "db_name": "PostgreSQL",
"query": "\n SELECT DISTINCT id, field, field_type, enum_type, min_val, max_val, optional\n FROM loader_fields lf\n WHERE id = ANY($1) \n ", "query": "\n SELECT DISTINCT id, field, field_type, enum_type, min_val, max_val, optional\n FROM loader_fields lf\n WHERE id = ANY($1)\n ",
"describe": { "describe": {
"columns": [ "columns": [
{ {
@ -54,5 +54,5 @@
false false
] ]
}, },
"hash": "5329254eeb1e80d2a0f4f3bc2b613f3a7d54b0673f1a41f31fe5b5bbc4b5e478" "hash": "887a217868178265ac9e1011a889173d608e064a3a1b69a135273de380efe44c"
} }

View File

@ -1,6 +1,6 @@
{ {
"db_name": "PostgreSQL", "db_name": "PostgreSQL",
"query": "\n SELECT m.id id, m.name name, m.summary summary, m.downloads downloads, m.follows follows,\n m.icon_url icon_url, m.description description, m.published published,\n m.updated updated, m.approved approved, m.queued, m.status status, m.requested_status requested_status,\n m.license_url license_url,\n m.team_id team_id, m.organization_id organization_id, m.license license, m.slug slug, m.moderation_message moderation_message, m.moderation_message_body moderation_message_body,\n m.webhook_sent, m.color,\n t.id thread_id, m.monetization_status monetization_status,\n ARRAY_AGG(DISTINCT c.category) filter (where c.category is not null and mc.is_additional is false) categories,\n ARRAY_AGG(DISTINCT c.category) filter (where c.category is not null and mc.is_additional is true) additional_categories\n FROM mods m \n INNER JOIN threads t ON t.mod_id = m.id\n LEFT JOIN mods_categories mc ON mc.joining_mod_id = m.id\n LEFT JOIN categories c ON mc.joining_category_id = c.id\n WHERE m.id = ANY($1) OR m.slug = ANY($2)\n GROUP BY t.id, m.id;\n ", "query": "\n SELECT m.id id, m.name name, m.summary summary, m.downloads downloads, m.follows follows,\n m.icon_url icon_url, m.description description, m.published published,\n m.updated updated, m.approved approved, m.queued, m.status status, m.requested_status requested_status,\n m.license_url license_url,\n m.team_id team_id, m.organization_id organization_id, m.license license, m.slug slug, m.moderation_message moderation_message, m.moderation_message_body moderation_message_body,\n m.webhook_sent, m.color,\n t.id thread_id, m.monetization_status monetization_status,\n ARRAY_AGG(DISTINCT c.category) filter (where c.category is not null and mc.is_additional is false) categories,\n ARRAY_AGG(DISTINCT c.category) filter (where c.category is not null and mc.is_additional is true) additional_categories\n FROM mods m\n INNER JOIN threads t ON t.mod_id = m.id\n LEFT JOIN mods_categories mc ON mc.joining_mod_id = m.id\n LEFT JOIN categories c ON mc.joining_category_id = c.id\n WHERE m.id = ANY($1) OR m.slug = ANY($2)\n GROUP BY t.id, m.id;\n ",
"describe": { "describe": {
"columns": [ "columns": [
{ {
@ -169,5 +169,5 @@
null null
] ]
}, },
"hash": "2fe731da3681f72ec03b89d7139a49ccb1069079d8600daa40688d5f528de83d" "hash": "92b9298c0b6255b4121bf3079e121da06e6e0cdaa131cc9897cb321eaeb3d10b"
} }

View File

@ -1,6 +1,6 @@
{ {
"db_name": "PostgreSQL", "db_name": "PostgreSQL",
"query": "\n SELECT id, name, access_token, scopes, user_id, created, expires, last_used\n FROM pats\n WHERE id = ANY($1) OR access_token = ANY($2)\n ORDER BY created DESC\n ", "query": "\n SELECT id, name, access_token, scopes, user_id, created, expires, last_used\n FROM pats\n WHERE id = ANY($1) OR access_token = ANY($2)\n ORDER BY created DESC\n ",
"describe": { "describe": {
"columns": [ "columns": [
{ {
@ -61,5 +61,5 @@
true true
] ]
}, },
"hash": "e6f5a150cbd3bd6b9bde9e5cdad224a45c96d678b69ec12508e81246710e3f6d" "hash": "a1331f7c6f33234e413978c0d9318365e7de5948b93e8c0c85a1d179f4968517"
} }

View File

@ -1,6 +1,6 @@
{ {
"db_name": "PostgreSQL", "db_name": "PostgreSQL",
"query": "\n SELECT id, name, email,\n avatar_url, username, bio,\n created, role, badges,\n balance,\n github_id, discord_id, gitlab_id, google_id, steam_id, microsoft_id,\n email_verified, password, totp_secret, paypal_id, paypal_country, paypal_email,\n venmo_handle\n FROM users\n WHERE id = ANY($1) OR LOWER(username) = ANY($2)\n ", "query": "\n SELECT id, name, email,\n avatar_url, username, bio,\n created, role, badges,\n balance,\n github_id, discord_id, gitlab_id, google_id, steam_id, microsoft_id,\n email_verified, password, totp_secret, paypal_id, paypal_country, paypal_email,\n venmo_handle\n FROM users\n WHERE id = ANY($1) OR LOWER(username) = ANY($2)\n ",
"describe": { "describe": {
"columns": [ "columns": [
{ {
@ -151,5 +151,5 @@
true true
] ]
}, },
"hash": "5e7e85c8c1f4b4e600c51669b6591b5cc279bd7482893ec687e83ee22d00a3a0" "hash": "a47456ecddbd1787301a2765168db0df31980ae48cb2ec37c323da10ba55a785"
} }

View File

@ -1,6 +1,6 @@
{ {
"db_name": "PostgreSQL", "db_name": "PostgreSQL",
"query": "\n SELECT id, team_id, role AS member_role, is_owner, permissions, organization_permissions,\n accepted, payouts_split, \n ordering, user_id\n FROM team_members\n WHERE team_id = ANY($1)\n ORDER BY team_id, ordering;\n ", "query": "\n SELECT id, team_id, role AS member_role, is_owner, permissions, organization_permissions,\n accepted, payouts_split,\n ordering, user_id\n FROM team_members\n WHERE team_id = ANY($1)\n ORDER BY team_id, ordering;\n ",
"describe": { "describe": {
"columns": [ "columns": [
{ {
@ -72,5 +72,5 @@
false false
] ]
}, },
"hash": "c387574b32f6b70adc88132df96fbbc7dd57a6f633a787dd31aafc0584547345" "hash": "a5007d03b1b5b2a95814a3070d114c55731403dcd75d44420acce8df5bd2009b"
} }

View File

@ -1,6 +1,6 @@
{ {
"db_name": "PostgreSQL", "db_name": "PostgreSQL",
"query": "\n SELECT DISTINCT version_id, f.id, f.url, f.filename, f.is_primary, f.size, f.file_type\n FROM files f\n WHERE f.version_id = ANY($1)\n ", "query": "\n SELECT DISTINCT version_id, f.id, f.url, f.filename, f.is_primary, f.size, f.file_type\n FROM files f\n WHERE f.version_id = ANY($1)\n ",
"describe": { "describe": {
"columns": [ "columns": [
{ {
@ -54,5 +54,5 @@
true true
] ]
}, },
"hash": "e72736bb7fca4df41cf34186b1edf04d6b4d496971aaf87ed1a88e7d64eab823" "hash": "b49cd556b85c3e74ebb4f1b7d48930c0456321799f20e63f1c3fd3ea0f03f198"
} }

View File

@ -1,6 +1,6 @@
{ {
"db_name": "PostgreSQL", "db_name": "PostgreSQL",
"query": "\n SELECT DISTINCT lf.id, lf.field, lf.field_type, lf.optional, lf.min_val, lf.max_val, lf.enum_type, lfl.loader_id\n FROM loader_fields lf\n LEFT JOIN loader_fields_loaders lfl ON lfl.loader_field_id = lf.id\n WHERE lfl.loader_id = ANY($1)\n ", "query": "\n SELECT DISTINCT lf.id, lf.field, lf.field_type, lf.optional, lf.min_val, lf.max_val, lf.enum_type, lfl.loader_id\n FROM loader_fields lf\n LEFT JOIN loader_fields_loaders lfl ON lfl.loader_field_id = lf.id\n WHERE lfl.loader_id = ANY($1)\n ",
"describe": { "describe": {
"columns": [ "columns": [
{ {
@ -60,5 +60,5 @@
false false
] ]
}, },
"hash": "bb6afad07ebfa3b92399bb07aa9e15fa69bd328f44b4bf991e80f6b91fcd3a50" "hash": "c07277bcf62120ac4fac8678e09512f3984031919a71af59fc10995fb21f480c"
} }

View File

@ -1,6 +1,6 @@
{ {
"db_name": "PostgreSQL", "db_name": "PostgreSQL",
"query": "\n SELECT DISTINCT id, enum_id, value, ordering, created, metadata\n FROM loader_field_enum_values lfev\n WHERE id = ANY($1) \n ORDER BY enum_id, ordering, created ASC\n ", "query": "\n SELECT DISTINCT id, enum_id, value, ordering, created, metadata\n FROM loader_field_enum_values lfev\n WHERE id = ANY($1)\n ORDER BY enum_id, ordering, created ASC\n ",
"describe": { "describe": {
"columns": [ "columns": [
{ {
@ -48,5 +48,5 @@
true true
] ]
}, },
"hash": "99080d0666e06794e44c80e05b17585e0f87c70d9ace28537898f27e7df0ded0" "hash": "d9c4d536ce0bea290f445c3bccb56b4743f2f3a9ce4b170fb439e0e135ca9d51"
} }

View File

@ -1,46 +0,0 @@
{
"db_name": "PostgreSQL",
"query": "\n SELECT DISTINCT mod_id,\n ARRAY_AGG(DISTINCT l.loader) filter (where l.loader is not null) loaders,\n ARRAY_AGG(DISTINCT pt.name) filter (where pt.name is not null) project_types,\n ARRAY_AGG(DISTINCT g.slug) filter (where g.slug is not null) games,\n ARRAY_AGG(DISTINCT lfl.loader_field_id) filter (where lfl.loader_field_id is not null) loader_fields\n FROM versions v\n INNER JOIN loaders_versions lv ON v.id = lv.version_id\n INNER JOIN loaders l ON lv.loader_id = l.id\n INNER JOIN loaders_project_types lpt ON lpt.joining_loader_id = l.id\n INNER JOIN project_types pt ON pt.id = lpt.joining_project_type_id\n INNER JOIN loaders_project_types_games lptg ON lptg.loader_id = l.id AND lptg.project_type_id = pt.id\n INNER JOIN games g ON lptg.game_id = g.id\n LEFT JOIN loader_fields_loaders lfl ON lfl.loader_id = l.id\n WHERE v.id = ANY($1)\n GROUP BY mod_id\n ",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "mod_id",
"type_info": "Int8"
},
{
"ordinal": 1,
"name": "loaders",
"type_info": "VarcharArray"
},
{
"ordinal": 2,
"name": "project_types",
"type_info": "VarcharArray"
},
{
"ordinal": 3,
"name": "games",
"type_info": "VarcharArray"
},
{
"ordinal": 4,
"name": "loader_fields",
"type_info": "Int4Array"
}
],
"parameters": {
"Left": [
"Int8Array"
]
},
"nullable": [
false,
null,
null,
null,
null
]
},
"hash": "e1df7bf2edd30d501a48686c00712784b121db47612bf809d0a0fe0b5d99b681"
}

View File

@ -1,46 +0,0 @@
{
"db_name": "PostgreSQL",
"query": "\n SELECT DISTINCT version_id,\n ARRAY_AGG(DISTINCT l.loader) filter (where l.loader is not null) loaders,\n ARRAY_AGG(DISTINCT pt.name) filter (where pt.name is not null) project_types,\n ARRAY_AGG(DISTINCT g.slug) filter (where g.slug is not null) games,\n ARRAY_AGG(DISTINCT lfl.loader_field_id) filter (where lfl.loader_field_id is not null) loader_fields\n FROM versions v\n INNER JOIN loaders_versions lv ON v.id = lv.version_id\n INNER JOIN loaders l ON lv.loader_id = l.id\n INNER JOIN loaders_project_types lpt ON lpt.joining_loader_id = l.id\n INNER JOIN project_types pt ON pt.id = lpt.joining_project_type_id\n INNER JOIN loaders_project_types_games lptg ON lptg.loader_id = l.id AND lptg.project_type_id = pt.id\n INNER JOIN games g ON lptg.game_id = g.id\n LEFT JOIN loader_fields_loaders lfl ON lfl.loader_id = l.id\n WHERE v.id = ANY($1)\n GROUP BY version_id\n ",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "version_id",
"type_info": "Int8"
},
{
"ordinal": 1,
"name": "loaders",
"type_info": "VarcharArray"
},
{
"ordinal": 2,
"name": "project_types",
"type_info": "VarcharArray"
},
{
"ordinal": 3,
"name": "games",
"type_info": "VarcharArray"
},
{
"ordinal": 4,
"name": "loader_fields",
"type_info": "Int4Array"
}
],
"parameters": {
"Left": [
"Int8Array"
]
},
"nullable": [
false,
null,
null,
null,
null
]
},
"hash": "f3729149bd174541ec4f7ec2145fef0f4ac78e4efb046cc77dcdf43522ef72e2"
}

View File

@ -1,6 +1,6 @@
{ {
"db_name": "PostgreSQL", "db_name": "PostgreSQL",
"query": "\n SELECT DISTINCT mod_id, mg.image_url, mg.featured, mg.name, mg.description, mg.created, mg.ordering\n FROM mods_gallery mg\n INNER JOIN mods m ON mg.mod_id = m.id\n WHERE m.id = ANY($1) OR m.slug = ANY($2)\n ", "query": "\n SELECT DISTINCT mod_id, mg.image_url, mg.featured, mg.name, mg.description, mg.created, mg.ordering\n FROM mods_gallery mg\n INNER JOIN mods m ON mg.mod_id = m.id\n WHERE m.id = ANY($1) OR m.slug = ANY($2)\n ",
"describe": { "describe": {
"columns": [ "columns": [
{ {
@ -55,5 +55,5 @@
false false
] ]
}, },
"hash": "7bb8a2e1e01817ea3778fcd2af039e38d085484dd20abf57d0eff8d7801b728b" "hash": "f62ec19e7e23ec98ad38f79ba28066f1b13a607923003699378bda895aab3a84"
} }

View File

@ -4,6 +4,8 @@ use crate::database::models::DatabaseError;
use crate::database::redis::RedisPool; use crate::database::redis::RedisPool;
use crate::models::collections::CollectionStatus; use crate::models::collections::CollectionStatus;
use chrono::{DateTime, Utc}; use chrono::{DateTime, Utc};
use dashmap::DashMap;
use futures::TryStreamExt;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
const COLLECTIONS_NAMESPACE: &str = "collections"; const COLLECTIONS_NAMESPACE: &str = "collections";
@ -155,93 +157,55 @@ impl Collection {
where where
E: sqlx::Executor<'a, Database = sqlx::Postgres>, E: sqlx::Executor<'a, Database = sqlx::Postgres>,
{ {
use futures::TryStreamExt; let val = redis
.get_cached_keys(
COLLECTIONS_NAMESPACE,
&collection_ids.iter().map(|x| x.0).collect::<Vec<_>>(),
|collection_ids| async move {
let collections = sqlx::query!(
"
SELECT c.id id, c.name name, c.description description,
c.icon_url icon_url, c.color color, c.created created, c.user_id user_id,
c.updated updated, c.status status,
ARRAY_AGG(DISTINCT cm.mod_id) filter (where cm.mod_id is not null) mods
FROM collections c
LEFT JOIN collections_mods cm ON cm.collection_id = c.id
WHERE c.id = ANY($1)
GROUP BY c.id;
",
&collection_ids,
)
.fetch(exec)
.try_fold(DashMap::new(), |acc, m| {
let collection = Collection {
id: CollectionId(m.id),
user_id: UserId(m.user_id),
name: m.name.clone(),
description: m.description.clone(),
icon_url: m.icon_url.clone(),
color: m.color.map(|x| x as u32),
created: m.created,
updated: m.updated,
status: CollectionStatus::from_string(&m.status),
projects: m
.mods
.unwrap_or_default()
.into_iter()
.map(ProjectId)
.collect(),
};
let mut redis = redis.connect().await?; acc.insert(m.id, collection);
async move { Ok(acc) }
})
.await?;
if collection_ids.is_empty() { Ok(collections)
return Ok(Vec::new()); },
}
let mut found_collections = Vec::new();
let mut remaining_collections: Vec<CollectionId> = collection_ids.to_vec();
if !collection_ids.is_empty() {
let collections = redis
.multi_get::<String>(
COLLECTIONS_NAMESPACE,
collection_ids.iter().map(|x| x.0.to_string()),
)
.await?;
for collection in collections {
if let Some(collection) =
collection.and_then(|x| serde_json::from_str::<Collection>(&x).ok())
{
remaining_collections.retain(|x| collection.id.0 != x.0);
found_collections.push(collection);
continue;
}
}
}
if !remaining_collections.is_empty() {
let collection_ids_parsed: Vec<i64> =
remaining_collections.iter().map(|x| x.0).collect();
let db_collections: Vec<Collection> = sqlx::query!(
"
SELECT c.id id, c.name name, c.description description,
c.icon_url icon_url, c.color color, c.created created, c.user_id user_id,
c.updated updated, c.status status,
ARRAY_AGG(DISTINCT cm.mod_id) filter (where cm.mod_id is not null) mods
FROM collections c
LEFT JOIN collections_mods cm ON cm.collection_id = c.id
WHERE c.id = ANY($1)
GROUP BY c.id;
",
&collection_ids_parsed,
) )
.fetch_many(exec)
.try_filter_map(|e| async {
Ok(e.right().map(|m| {
let id = m.id;
Collection {
id: CollectionId(id),
user_id: UserId(m.user_id),
name: m.name.clone(),
description: m.description.clone(),
icon_url: m.icon_url.clone(),
color: m.color.map(|x| x as u32),
created: m.created,
updated: m.updated,
status: CollectionStatus::from_string(&m.status),
projects: m
.mods
.unwrap_or_default()
.into_iter()
.map(ProjectId)
.collect(),
}
}))
})
.try_collect::<Vec<Collection>>()
.await?; .await?;
for collection in db_collections { Ok(val)
redis
.set_serialized_to_json(
COLLECTIONS_NAMESPACE,
collection.id.0,
&collection,
None,
)
.await?;
found_collections.push(collection);
}
}
Ok(found_collections)
} }
pub async fn clear_cache(id: CollectionId, redis: &RedisPool) -> Result<(), DatabaseError> { pub async fn clear_cache(id: CollectionId, redis: &RedisPool) -> Result<(), DatabaseError> {

View File

@ -2,6 +2,7 @@ use super::ids::*;
use crate::database::redis::RedisPool; use crate::database::redis::RedisPool;
use crate::{database::models::DatabaseError, models::images::ImageContext}; use crate::{database::models::DatabaseError, models::images::ImageContext};
use chrono::{DateTime, Utc}; use chrono::{DateTime, Utc};
use dashmap::DashMap;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
const IMAGES_NAMESPACE: &str = "images"; const IMAGES_NAMESPACE: &str = "images";
@ -180,70 +181,44 @@ impl Image {
{ {
use futures::TryStreamExt; use futures::TryStreamExt;
let mut redis = redis.connect().await?; let val = redis.get_cached_keys(
if image_ids.is_empty() { IMAGES_NAMESPACE,
return Ok(Vec::new()); &image_ids.iter().map(|x| x.0).collect::<Vec<_>>(),
} |image_ids| async move {
let images = sqlx::query!(
"
SELECT id, url, size, created, owner_id, context, mod_id, version_id, thread_message_id, report_id
FROM uploaded_images
WHERE id = ANY($1)
GROUP BY id;
",
&image_ids,
)
.fetch(exec)
.try_fold(DashMap::new(), |acc, i| {
let img = Image {
id: ImageId(i.id),
url: i.url,
size: i.size as u64,
created: i.created,
owner_id: UserId(i.owner_id),
context: i.context,
project_id: i.mod_id.map(ProjectId),
version_id: i.version_id.map(VersionId),
thread_message_id: i.thread_message_id.map(ThreadMessageId),
report_id: i.report_id.map(ReportId),
};
let mut found_images = Vec::new(); acc.insert(i.id, img);
let mut remaining_ids = image_ids.to_vec(); async move { Ok(acc) }
})
let image_ids = image_ids.iter().map(|x| x.0).collect::<Vec<_>>();
if !image_ids.is_empty() {
let images = redis
.multi_get::<String>(IMAGES_NAMESPACE, image_ids.iter().map(|x| x.to_string()))
.await?;
for image in images {
if let Some(image) = image.and_then(|x| serde_json::from_str::<Image>(&x).ok()) {
remaining_ids.retain(|x| image.id.0 != x.0);
found_images.push(image);
continue;
}
}
}
if !remaining_ids.is_empty() {
let db_images: Vec<Image> = sqlx::query!(
"
SELECT id, url, size, created, owner_id, context, mod_id, version_id, thread_message_id, report_id
FROM uploaded_images
WHERE id = ANY($1)
GROUP BY id;
",
&remaining_ids.iter().map(|x| x.0).collect::<Vec<_>>(),
)
.fetch_many(exec)
.try_filter_map(|e| async {
Ok(e.right().map(|i| {
let id = i.id;
Image {
id: ImageId(id),
url: i.url,
size: i.size as u64,
created: i.created,
owner_id: UserId(i.owner_id),
context: i.context,
project_id: i.mod_id.map(ProjectId),
version_id: i.version_id.map(VersionId),
thread_message_id: i.thread_message_id.map(ThreadMessageId),
report_id: i.report_id.map(ReportId),
}
}))
})
.try_collect::<Vec<Image>>()
.await?;
for image in db_images {
redis
.set_serialized_to_json(IMAGES_NAMESPACE, image.id.0, &image, None)
.await?; .await?;
found_images.push(image);
}
}
Ok(found_images) Ok(images)
},
).await?;
Ok(val)
} }
pub async fn clear_cache(id: ImageId, redis: &RedisPool) -> Result<(), DatabaseError> { pub async fn clear_cache(id: ImageId, redis: &RedisPool) -> Result<(), DatabaseError> {

View File

@ -208,6 +208,13 @@ impl<'a> MinecraftGameVersionBuilder<'a> {
.fetch_one(exec) .fetch_one(exec)
.await?; .await?;
let mut conn = redis.connect().await?;
conn.delete(
crate::database::models::loader_fields::LOADER_FIELD_ENUM_VALUES_NAMESPACE,
game_versions_enum.id.0,
)
.await?;
Ok(LoaderFieldEnumValueId(result.id)) Ok(LoaderFieldEnumValueId(result.id))
} }
} }

View File

@ -6,6 +6,7 @@ use super::DatabaseError;
use crate::database::redis::RedisPool; use crate::database::redis::RedisPool;
use chrono::DateTime; use chrono::DateTime;
use chrono::Utc; use chrono::Utc;
use dashmap::DashMap;
use futures::TryStreamExt; use futures::TryStreamExt;
use itertools::Itertools; use itertools::Itertools;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
@ -16,7 +17,7 @@ const LOADERS_LIST_NAMESPACE: &str = "loaders";
const LOADER_FIELDS_NAMESPACE: &str = "loader_fields"; const LOADER_FIELDS_NAMESPACE: &str = "loader_fields";
const LOADER_FIELDS_NAMESPACE_ALL: &str = "loader_fields_all"; const LOADER_FIELDS_NAMESPACE_ALL: &str = "loader_fields_all";
const LOADER_FIELD_ENUMS_ID_NAMESPACE: &str = "loader_field_enums"; const LOADER_FIELD_ENUMS_ID_NAMESPACE: &str = "loader_field_enums";
const LOADER_FIELD_ENUM_VALUES_NAMESPACE: &str = "loader_field_enum_values"; pub const LOADER_FIELD_ENUM_VALUES_NAMESPACE: &str = "loader_field_enum_values";
#[derive(Clone, Serialize, Deserialize, Debug)] #[derive(Clone, Serialize, Deserialize, Debug)]
pub struct Game { pub struct Game {
@ -380,75 +381,47 @@ impl LoaderField {
where where
E: sqlx::Executor<'a, Database = sqlx::Postgres>, E: sqlx::Executor<'a, Database = sqlx::Postgres>,
{ {
type RedisLoaderFieldTuple = (LoaderId, Vec<LoaderField>); let val = redis.get_cached_keys_raw(
LOADER_FIELDS_NAMESPACE,
let mut redis = redis.connect().await?; &loader_ids.iter().map(|x| x.0).collect::<Vec<_>>(),
|loader_ids| async move {
let mut loader_ids = loader_ids.to_vec(); let result = sqlx::query!(
let cached_fields: Vec<RedisLoaderFieldTuple> = redis "
.multi_get::<String>(LOADER_FIELDS_NAMESPACE, loader_ids.iter().map(|x| x.0)) SELECT DISTINCT lf.id, lf.field, lf.field_type, lf.optional, lf.min_val, lf.max_val, lf.enum_type, lfl.loader_id
.await? FROM loader_fields lf
.into_iter() LEFT JOIN loader_fields_loaders lfl ON lfl.loader_field_id = lf.id
.flatten() WHERE lfl.loader_id = ANY($1)
.filter_map(|x: String| serde_json::from_str::<RedisLoaderFieldTuple>(&x).ok()) ",
.collect(); &loader_ids,
let mut found_loader_fields = HashMap::new();
if !cached_fields.is_empty() {
for (loader_id, fields) in cached_fields {
if loader_ids.contains(&loader_id) {
found_loader_fields.insert(loader_id, fields);
loader_ids.retain(|x| x != &loader_id);
}
}
}
if !loader_ids.is_empty() {
let result = sqlx::query!(
"
SELECT DISTINCT lf.id, lf.field, lf.field_type, lf.optional, lf.min_val, lf.max_val, lf.enum_type, lfl.loader_id
FROM loader_fields lf
LEFT JOIN loader_fields_loaders lfl ON lfl.loader_field_id = lf.id
WHERE lfl.loader_id = ANY($1)
",
&loader_ids.iter().map(|x| x.0).collect::<Vec<_>>()
)
.fetch_many(exec)
.try_filter_map(|e| async {
Ok(e.right().and_then(|r| {
Some((LoaderId(r.loader_id) ,LoaderField {
id: LoaderFieldId(r.id),
field_type: LoaderFieldType::build(&r.field_type, r.enum_type)?,
field: r.field,
optional: r.optional,
min_val: r.min_val,
max_val: r.max_val,
}))
}))
})
.try_collect::<Vec<(LoaderId, LoaderField)>>()
.await?;
let result: Vec<RedisLoaderFieldTuple> = result
.into_iter()
.fold(
HashMap::new(),
|mut acc: HashMap<LoaderId, Vec<LoaderField>>, x| {
acc.entry(x.0).or_default().push(x.1);
acc
},
) )
.into_iter() .fetch(exec)
.collect_vec(); .try_fold(DashMap::new(), |acc: DashMap<i32, Vec<LoaderField>>, r| {
if let Some(field_type) = LoaderFieldType::build(&r.field_type, r.enum_type) {
let loader_field = LoaderField {
id: LoaderFieldId(r.id),
field_type,
field: r.field,
optional: r.optional,
min_val: r.min_val,
max_val: r.max_val,
};
for (k, v) in result.into_iter() { acc.entry(r.loader_id)
redis .or_default()
.set_serialized_to_json(LOADER_FIELDS_NAMESPACE, k.0, (k, &v), None) .push(loader_field);
}
async move {
Ok(acc)
}
})
.await?; .await?;
found_loader_fields.insert(k, v);
} Ok(result)
} },
Ok(found_loader_fields) ).await?;
Ok(val.into_iter().map(|x| (LoaderId(x.0), x.1)).collect())
} }
// Gets all fields for a given loader(s) // Gets all fields for a given loader(s)
@ -597,71 +570,51 @@ impl LoaderFieldEnumValue {
loader_field_enum_ids: &[LoaderFieldEnumId], loader_field_enum_ids: &[LoaderFieldEnumId],
exec: E, exec: E,
redis: &RedisPool, redis: &RedisPool,
) -> Result<Vec<(LoaderFieldEnumId, Vec<LoaderFieldEnumValue>)>, DatabaseError> ) -> Result<HashMap<LoaderFieldEnumId, Vec<LoaderFieldEnumValue>>, DatabaseError>
where where
E: sqlx::Executor<'a, Database = sqlx::Postgres>, E: sqlx::Executor<'a, Database = sqlx::Postgres>,
{ {
let mut redis = redis.connect().await?; let val = redis.get_cached_keys_raw(
let mut found_enums = Vec::new(); LOADER_FIELD_ENUM_VALUES_NAMESPACE,
let mut remaining_enums: Vec<LoaderFieldEnumId> = loader_field_enum_ids.to_vec(); &loader_field_enum_ids.iter().map(|x| x.0).collect::<Vec<_>>(),
|loader_field_enum_ids| async move {
if !remaining_enums.is_empty() { let values = sqlx::query!(
let enums = redis "
.multi_get::<String>( SELECT id, enum_id, value, ordering, metadata, created FROM loader_field_enum_values
LOADER_FIELD_ENUM_VALUES_NAMESPACE, WHERE enum_id = ANY($1)
loader_field_enum_ids.iter().map(|x| x.0), ORDER BY enum_id, ordering, created DESC
",
&loader_field_enum_ids
) )
.await?; .fetch(exec)
.try_fold(DashMap::new(), |acc: DashMap<i32, Vec<LoaderFieldEnumValue>>, c| {
let value = LoaderFieldEnumValue {
id: LoaderFieldEnumValueId(c.id),
enum_id: LoaderFieldEnumId(c.enum_id),
value: c.value,
ordering: c.ordering,
created: c.created,
metadata: c.metadata.unwrap_or_default(),
};
for lfe in enums { acc.entry(c.enum_id)
if let Some(lfe) = lfe.and_then(|x| { .or_default()
serde_json::from_str::<(LoaderFieldEnumId, Vec<LoaderFieldEnumValue>)>(&x).ok() .push(value);
}) {
remaining_enums.retain(|x| lfe.0 .0 != x.0);
found_enums.push(lfe.1);
continue;
}
}
}
let remaining_enums = remaining_enums.iter().map(|x| x.0).collect::<Vec<_>>(); async move {
let result = sqlx::query!( Ok(acc)
" }
SELECT id, enum_id, value, ordering, metadata, created FROM loader_field_enum_values })
WHERE enum_id = ANY($1) .await?;
ORDER BY enum_id, ordering, created DESC
",
&remaining_enums
)
.fetch_many(exec)
.try_filter_map(|e| async {
Ok(e.right().map(|c| LoaderFieldEnumValue {
id: LoaderFieldEnumValueId(c.id),
enum_id: LoaderFieldEnumId(c.enum_id),
value: c.value,
ordering: c.ordering,
created: c.created,
metadata: c.metadata.unwrap_or_default(),
}))
})
.try_collect::<Vec<LoaderFieldEnumValue>>()
.await?;
// Convert from an Vec<LoaderFieldEnumValue> to a Vec<(LoaderFieldEnumId, Vec<LoaderFieldEnumValue>)> Ok(values)
let cachable_enum_sets: Vec<(LoaderFieldEnumId, Vec<LoaderFieldEnumValue>)> = result },
.clone() ).await?;
Ok(val
.into_iter() .into_iter()
.group_by(|x| x.enum_id) // we sort by enum_id, so this will group all values of the same enum_id together .map(|x| (LoaderFieldEnumId(x.0), x.1))
.into_iter() .collect())
.map(|(k, v)| (k, v.collect::<Vec<_>>().to_vec()))
.collect();
for (k, v) in cachable_enum_sets.iter() {
redis
.set_serialized_to_json(LOADER_FIELD_ENUM_VALUES_NAMESPACE, k.0, v, None)
.await?;
}
Ok(cachable_enum_sets)
} }
// Matches filter against metadata of enum values // Matches filter against metadata of enum values

View File

@ -48,4 +48,6 @@ pub enum DatabaseError {
SerdeCacheError(#[from] serde_json::Error), SerdeCacheError(#[from] serde_json::Error),
#[error("Schema error: {0}")] #[error("Schema error: {0}")]
SchemaError(String), SchemaError(String),
#[error("Timeout when waiting for cache subscriber")]
CacheTimeout,
} }

View File

@ -1,7 +1,8 @@
use crate::{ use crate::{database::redis::RedisPool, models::ids::base62_impl::parse_base62};
database::redis::RedisPool, use dashmap::DashMap;
models::ids::base62_impl::{parse_base62, to_base62}, use futures::TryStreamExt;
}; use std::fmt::{Debug, Display};
use std::hash::Hash;
use super::{ids::*, TeamMember}; use super::{ids::*, TeamMember};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
@ -97,7 +98,7 @@ impl Organization {
Self::get_many(&ids, exec, redis).await Self::get_many(&ids, exec, redis).await
} }
pub async fn get_many<'a, E, T: ToString>( pub async fn get_many<'a, E, T: Display + Hash + Eq + PartialEq + Clone + Debug>(
organization_strings: &[T], organization_strings: &[T],
exec: E, exec: E,
redis: &RedisPool, redis: &RedisPool,
@ -105,120 +106,56 @@ impl Organization {
where where
E: sqlx::Executor<'a, Database = sqlx::Postgres>, E: sqlx::Executor<'a, Database = sqlx::Postgres>,
{ {
use futures::stream::TryStreamExt; let val = redis
.get_cached_keys_with_slug(
let mut redis = redis.connect().await?; ORGANIZATIONS_NAMESPACE,
ORGANIZATIONS_TITLES_NAMESPACE,
if organization_strings.is_empty() { false,
return Ok(Vec::new()); organization_strings,
} |ids| async move {
let org_ids: Vec<i64> = ids
let mut found_organizations = Vec::new();
let mut remaining_strings = organization_strings
.iter()
.map(|x| x.to_string())
.collect::<Vec<_>>();
let mut organization_ids = organization_strings
.iter()
.flat_map(|x| parse_base62(&x.to_string()).map(|x| x as i64))
.collect::<Vec<_>>();
organization_ids.append(
&mut redis
.multi_get::<i64>(
ORGANIZATIONS_TITLES_NAMESPACE,
organization_strings
.iter() .iter()
.flat_map(|x| parse_base62(&x.to_string()).ok())
.map(|x| x as i64)
.collect();
let slugs = ids
.into_iter()
.map(|x| x.to_string().to_lowercase()) .map(|x| x.to_string().to_lowercase())
.collect::<Vec<_>>(), .collect::<Vec<_>>();
)
.await?
.into_iter()
.flatten()
.collect(),
);
if !organization_ids.is_empty() { let organizations = sqlx::query!(
let organizations = redis "
.multi_get::<String>( SELECT o.id, o.slug, o.name, o.team_id, o.description, o.icon_url, o.color
ORGANIZATIONS_NAMESPACE, FROM organizations o
organization_ids.iter().map(|x| x.to_string()), WHERE o.id = ANY($1) OR LOWER(o.slug) = ANY($2)
) GROUP BY o.id;
.await?; ",
&org_ids,
&slugs,
)
.fetch(exec)
.try_fold(DashMap::new(), |acc, m| {
let org = Organization {
id: OrganizationId(m.id),
slug: m.slug.clone(),
name: m.name,
team_id: TeamId(m.team_id),
description: m.description,
icon_url: m.icon_url,
color: m.color.map(|x| x as u32),
};
for organization in organizations { acc.insert(m.id, (Some(m.slug), org));
if let Some(organization) = async move { Ok(acc) }
organization.and_then(|x| serde_json::from_str::<Organization>(&x).ok()) })
{ .await?;
remaining_strings.retain(|x| {
&to_base62(organization.id.0 as u64) != x
&& organization.slug.to_lowercase() != x.to_lowercase()
});
found_organizations.push(organization);
continue;
}
}
}
if !remaining_strings.is_empty() { Ok(organizations)
let organization_ids_parsed: Vec<i64> = remaining_strings },
.iter()
.flat_map(|x| parse_base62(&x.to_string()).ok())
.map(|x| x as i64)
.collect();
let organizations: Vec<Organization> = sqlx::query!(
"
SELECT o.id, o.slug, o.name, o.team_id, o.description, o.icon_url, o.color
FROM organizations o
WHERE o.id = ANY($1) OR LOWER(o.slug) = ANY($2)
GROUP BY o.id;
",
&organization_ids_parsed,
&remaining_strings
.into_iter()
.map(|x| x.to_string().to_lowercase())
.collect::<Vec<_>>(),
) )
.fetch_many(exec)
.try_filter_map(|e| async {
Ok(e.right().map(|m| Organization {
id: OrganizationId(m.id),
slug: m.slug,
name: m.name,
team_id: TeamId(m.team_id),
description: m.description,
icon_url: m.icon_url,
color: m.color.map(|x| x as u32),
}))
})
.try_collect::<Vec<Organization>>()
.await?; .await?;
for organization in organizations { Ok(val)
redis
.set_serialized_to_json(
ORGANIZATIONS_NAMESPACE,
organization.id.0,
&organization,
None,
)
.await?;
redis
.set(
ORGANIZATIONS_TITLES_NAMESPACE,
&organization.slug.to_lowercase(),
&organization.id.0.to_string(),
None,
)
.await?;
found_organizations.push(organization);
}
}
Ok(found_organizations)
} }
// Gets organization associated with a project ID, if it exists and there is one // Gets organization associated with a project ID, if it exists and there is one

View File

@ -1,10 +1,14 @@
use super::ids::*; use super::ids::*;
use crate::database::models::DatabaseError; use crate::database::models::DatabaseError;
use crate::database::redis::RedisPool; use crate::database::redis::RedisPool;
use crate::models::ids::base62_impl::{parse_base62, to_base62}; use crate::models::ids::base62_impl::parse_base62;
use crate::models::pats::Scopes; use crate::models::pats::Scopes;
use chrono::{DateTime, Utc}; use chrono::{DateTime, Utc};
use dashmap::DashMap;
use futures::TryStreamExt;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::fmt::{Debug, Display};
use std::hash::Hash;
const PATS_NAMESPACE: &str = "pats"; const PATS_NAMESPACE: &str = "pats";
const PATS_TOKENS_NAMESPACE: &str = "pats_tokens"; const PATS_TOKENS_NAMESPACE: &str = "pats_tokens";
@ -51,7 +55,7 @@ impl PersonalAccessToken {
Ok(()) Ok(())
} }
pub async fn get<'a, E, T: ToString>( pub async fn get<'a, E, T: Display + Hash + Eq + PartialEq + Clone + Debug>(
id: T, id: T,
exec: E, exec: E,
redis: &RedisPool, redis: &RedisPool,
@ -79,7 +83,7 @@ impl PersonalAccessToken {
PersonalAccessToken::get_many(&ids, exec, redis).await PersonalAccessToken::get_many(&ids, exec, redis).await
} }
pub async fn get_many<'a, E, T: ToString>( pub async fn get_many<'a, E, T: Display + Hash + Eq + PartialEq + Clone + Debug>(
pat_strings: &[T], pat_strings: &[T],
exec: E, exec: E,
redis: &RedisPool, redis: &RedisPool,
@ -87,105 +91,53 @@ impl PersonalAccessToken {
where where
E: sqlx::Executor<'a, Database = sqlx::Postgres>, E: sqlx::Executor<'a, Database = sqlx::Postgres>,
{ {
use futures::TryStreamExt; let val = redis
.get_cached_keys_with_slug(
PATS_NAMESPACE,
PATS_TOKENS_NAMESPACE,
true,
pat_strings,
|ids| async move {
let pat_ids: Vec<i64> = ids
.iter()
.flat_map(|x| parse_base62(&x.to_string()).ok())
.map(|x| x as i64)
.collect();
let slugs = ids.into_iter().map(|x| x.to_string()).collect::<Vec<_>>();
let mut redis = redis.connect().await?; let pats = sqlx::query!(
"
SELECT id, name, access_token, scopes, user_id, created, expires, last_used
FROM pats
WHERE id = ANY($1) OR access_token = ANY($2)
ORDER BY created DESC
",
&pat_ids,
&slugs,
)
.fetch(exec)
.try_fold(DashMap::new(), |acc, x| {
let pat = PersonalAccessToken {
id: PatId(x.id),
name: x.name,
access_token: x.access_token.clone(),
scopes: Scopes::from_bits(x.scopes as u64).unwrap_or(Scopes::NONE),
user_id: UserId(x.user_id),
created: x.created,
expires: x.expires,
last_used: x.last_used,
};
if pat_strings.is_empty() { acc.insert(x.id, (Some(x.access_token), pat));
return Ok(Vec::new()); async move { Ok(acc) }
} })
.await?;
let mut found_pats = Vec::new(); Ok(pats)
let mut remaining_strings = pat_strings },
.iter()
.map(|x| x.to_string())
.collect::<Vec<_>>();
let mut pat_ids = pat_strings
.iter()
.flat_map(|x| parse_base62(&x.to_string()).map(|x| x as i64))
.collect::<Vec<_>>();
pat_ids.append(
&mut redis
.multi_get::<i64>(
PATS_TOKENS_NAMESPACE,
pat_strings.iter().map(|x| x.to_string()),
)
.await?
.into_iter()
.flatten()
.collect(),
);
if !pat_ids.is_empty() {
let pats = redis
.multi_get::<String>(PATS_NAMESPACE, pat_ids.iter().map(|x| x.to_string()))
.await?;
for pat in pats {
if let Some(pat) =
pat.and_then(|x| serde_json::from_str::<PersonalAccessToken>(&x).ok())
{
remaining_strings
.retain(|x| &to_base62(pat.id.0 as u64) != x && &pat.access_token != x);
found_pats.push(pat);
continue;
}
}
}
if !remaining_strings.is_empty() {
let pat_ids_parsed: Vec<i64> = remaining_strings
.iter()
.flat_map(|x| parse_base62(&x.to_string()).ok())
.map(|x| x as i64)
.collect();
let db_pats: Vec<PersonalAccessToken> = sqlx::query!(
"
SELECT id, name, access_token, scopes, user_id, created, expires, last_used
FROM pats
WHERE id = ANY($1) OR access_token = ANY($2)
ORDER BY created DESC
",
&pat_ids_parsed,
&remaining_strings
.into_iter()
.map(|x| x.to_string())
.collect::<Vec<_>>(),
) )
.fetch_many(exec)
.try_filter_map(|e| async {
Ok(e.right().map(|x| PersonalAccessToken {
id: PatId(x.id),
name: x.name,
access_token: x.access_token,
scopes: Scopes::from_bits(x.scopes as u64).unwrap_or(Scopes::NONE),
user_id: UserId(x.user_id),
created: x.created,
expires: x.expires,
last_used: x.last_used,
}))
})
.try_collect::<Vec<PersonalAccessToken>>()
.await?; .await?;
for pat in db_pats { Ok(val)
redis
.set_serialized_to_json(PATS_NAMESPACE, pat.id.0, &pat, None)
.await?;
redis
.set(
PATS_TOKENS_NAMESPACE,
&pat.access_token,
&pat.id.0.to_string(),
None,
)
.await?;
found_pats.push(pat);
}
}
Ok(found_pats)
} }
pub async fn get_user_pats<'a, E>( pub async fn get_user_pats<'a, E>(
@ -206,14 +158,13 @@ impl PersonalAccessToken {
return Ok(res.into_iter().map(PatId).collect()); return Ok(res.into_iter().map(PatId).collect());
} }
use futures::TryStreamExt;
let db_pats: Vec<PatId> = sqlx::query!( let db_pats: Vec<PatId> = sqlx::query!(
" "
SELECT id SELECT id
FROM pats FROM pats
WHERE user_id = $1 WHERE user_id = $1
ORDER BY created DESC ORDER BY created DESC
", ",
user_id.0, user_id.0,
) )
.fetch_many(exec) .fetch_many(exec)

View File

@ -5,13 +5,15 @@ use super::{ids::*, User};
use crate::database::models; use crate::database::models;
use crate::database::models::DatabaseError; use crate::database::models::DatabaseError;
use crate::database::redis::RedisPool; use crate::database::redis::RedisPool;
use crate::models::ids::base62_impl::{parse_base62, to_base62}; use crate::models::ids::base62_impl::parse_base62;
use crate::models::projects::{MonetizationStatus, ProjectStatus}; use crate::models::projects::{MonetizationStatus, ProjectStatus};
use chrono::{DateTime, Utc}; use chrono::{DateTime, Utc};
use dashmap::{DashMap, DashSet}; use dashmap::{DashMap, DashSet};
use futures::TryStreamExt; use futures::TryStreamExt;
use itertools::Itertools; use itertools::Itertools;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::fmt::{Debug, Display};
use std::hash::Hash;
pub const PROJECTS_NAMESPACE: &str = "projects"; pub const PROJECTS_NAMESPACE: &str = "projects";
pub const PROJECTS_SLUGS_NAMESPACE: &str = "projects_slugs"; pub const PROJECTS_SLUGS_NAMESPACE: &str = "projects_slugs";
@ -505,7 +507,7 @@ impl Project {
Project::get_many(&ids, exec, redis).await Project::get_many(&ids, exec, redis).await
} }
pub async fn get_many<'a, E, T: ToString>( pub async fn get_many<'a, E, T: Display + Hash + Eq + PartialEq + Clone + Debug>(
project_strings: &[T], project_strings: &[T],
exec: E, exec: E,
redis: &RedisPool, redis: &RedisPool,
@ -513,301 +515,253 @@ impl Project {
where where
E: sqlx::Acquire<'a, Database = sqlx::Postgres>, E: sqlx::Acquire<'a, Database = sqlx::Postgres>,
{ {
let project_strings = project_strings let val = redis.get_cached_keys_with_slug(
.iter() PROJECTS_NAMESPACE,
.map(|x| x.to_string()) PROJECTS_SLUGS_NAMESPACE,
.unique() false,
.collect::<Vec<String>>(); project_strings,
|ids| async move {
if project_strings.is_empty() { let mut exec = exec.acquire().await?;
return Ok(Vec::new()); let project_ids_parsed: Vec<i64> = ids
}
let mut redis = redis.connect().await?;
let mut exec = exec.acquire().await?;
let mut found_projects = Vec::new();
let mut remaining_strings = project_strings.clone();
let mut project_ids = project_strings
.iter()
.flat_map(|x| parse_base62(&x.to_string()).map(|x| x as i64))
.collect::<Vec<_>>();
project_ids.append(
&mut redis
.multi_get::<i64>(
PROJECTS_SLUGS_NAMESPACE,
project_strings.iter().map(|x| x.to_string().to_lowercase()),
)
.await?
.into_iter()
.flatten()
.collect(),
);
if !project_ids.is_empty() {
let projects = redis
.multi_get::<String>(
PROJECTS_NAMESPACE,
project_ids.iter().map(|x| x.to_string()),
)
.await?;
for project in projects {
if let Some(project) =
project.and_then(|x| serde_json::from_str::<QueryProject>(&x).ok())
{
remaining_strings.retain(|x| {
&to_base62(project.inner.id.0 as u64) != x
&& project.inner.slug.as_ref().map(|x| x.to_lowercase())
!= Some(x.to_lowercase())
});
found_projects.push(project);
continue;
}
}
}
if !remaining_strings.is_empty() {
let project_ids_parsed: Vec<i64> = remaining_strings
.iter()
.flat_map(|x| parse_base62(&x.to_string()).ok())
.map(|x| x as i64)
.collect();
let slugs = remaining_strings
.into_iter()
.map(|x| x.to_lowercase())
.collect::<Vec<_>>();
let all_version_ids = DashSet::new();
let versions: DashMap<ProjectId, Vec<(VersionId, DateTime<Utc>)>> = sqlx::query!(
"
SELECT DISTINCT mod_id, v.id as id, date_published
FROM mods m
INNER JOIN versions v ON m.id = v.mod_id AND v.status = ANY($3)
WHERE m.id = ANY($1) OR m.slug = ANY($2)
",
&project_ids_parsed,
&slugs,
&*crate::models::projects::VersionStatus::iterator()
.filter(|x| x.is_listed())
.map(|x| x.to_string())
.collect::<Vec<String>>()
)
.fetch(&mut *exec)
.try_fold(
DashMap::new(),
|acc: DashMap<ProjectId, Vec<(VersionId, DateTime<Utc>)>>, m| {
let version_id = VersionId(m.id);
let date_published = m.date_published;
all_version_ids.insert(version_id);
acc.entry(ProjectId(m.mod_id))
.or_default()
.push((version_id, date_published));
async move { Ok(acc) }
},
)
.await?;
let loader_field_enum_value_ids = DashSet::new();
let version_fields: DashMap<ProjectId, Vec<QueryVersionField>> = sqlx::query!(
"
SELECT DISTINCT mod_id, version_id, field_id, int_value, enum_value, string_value
FROM versions v
INNER JOIN version_fields vf ON v.id = vf.version_id
WHERE v.id = ANY($1)
",
&all_version_ids.iter().map(|x| x.0).collect::<Vec<_>>()
)
.fetch(&mut *exec)
.try_fold(
DashMap::new(),
|acc: DashMap<ProjectId, Vec<QueryVersionField>>, m| {
let qvf = QueryVersionField {
version_id: VersionId(m.version_id),
field_id: LoaderFieldId(m.field_id),
int_value: m.int_value,
enum_value: m.enum_value.map(LoaderFieldEnumValueId),
string_value: m.string_value,
};
if let Some(enum_value) = m.enum_value {
loader_field_enum_value_ids.insert(LoaderFieldEnumValueId(enum_value));
}
acc.entry(ProjectId(m.mod_id)).or_default().push(qvf);
async move { Ok(acc) }
},
)
.await?;
let loader_field_enum_values: Vec<QueryLoaderFieldEnumValue> = sqlx::query!(
"
SELECT DISTINCT id, enum_id, value, ordering, created, metadata
FROM loader_field_enum_values lfev
WHERE id = ANY($1)
ORDER BY enum_id, ordering, created DESC
",
&loader_field_enum_value_ids
.iter() .iter()
.map(|x| x.0) .flat_map(|x| parse_base62(&x.to_string()).ok())
.collect::<Vec<_>>() .map(|x| x as i64)
) .collect();
.fetch(&mut *exec) let slugs = ids
.map_ok(|m| QueryLoaderFieldEnumValue { .into_iter()
id: LoaderFieldEnumValueId(m.id), .map(|x| x.to_string().to_lowercase())
enum_id: LoaderFieldEnumId(m.enum_id), .collect::<Vec<_>>();
value: m.value,
ordering: m.ordering,
created: m.created,
metadata: m.metadata,
})
.try_collect()
.await?;
let mods_gallery: DashMap<ProjectId, Vec<GalleryItem>> = sqlx::query!( let all_version_ids = DashSet::new();
" let versions: DashMap<ProjectId, Vec<(VersionId, DateTime<Utc>)>> = sqlx::query!(
SELECT DISTINCT mod_id, mg.image_url, mg.featured, mg.name, mg.description, mg.created, mg.ordering "
FROM mods_gallery mg SELECT DISTINCT mod_id, v.id as id, date_published
INNER JOIN mods m ON mg.mod_id = m.id FROM mods m
WHERE m.id = ANY($1) OR m.slug = ANY($2) INNER JOIN versions v ON m.id = v.mod_id AND v.status = ANY($3)
", WHERE m.id = ANY($1) OR m.slug = ANY($2)
&project_ids_parsed, ",
&slugs &project_ids_parsed,
).fetch(&mut *exec) &slugs,
.try_fold(DashMap::new(), |acc : DashMap<ProjectId, Vec<GalleryItem>>, m| { &*crate::models::projects::VersionStatus::iterator()
acc.entry(ProjectId(m.mod_id)) .filter(|x| x.is_listed())
.or_default() .map(|x| x.to_string())
.push(GalleryItem { .collect::<Vec<String>>()
image_url: m.image_url, )
featured: m.featured.unwrap_or(false), .fetch(&mut *exec)
name: m.name, .try_fold(
description: m.description, DashMap::new(),
created: m.created, |acc: DashMap<ProjectId, Vec<(VersionId, DateTime<Utc>)>>, m| {
let version_id = VersionId(m.id);
let date_published = m.date_published;
all_version_ids.insert(version_id);
acc.entry(ProjectId(m.mod_id))
.or_default()
.push((version_id, date_published));
async move { Ok(acc) }
},
)
.await?;
let loader_field_enum_value_ids = DashSet::new();
let version_fields: DashMap<ProjectId, Vec<QueryVersionField>> = sqlx::query!(
"
SELECT DISTINCT mod_id, version_id, field_id, int_value, enum_value, string_value
FROM versions v
INNER JOIN version_fields vf ON v.id = vf.version_id
WHERE v.id = ANY($1)
",
&all_version_ids.iter().map(|x| x.0).collect::<Vec<_>>()
)
.fetch(&mut *exec)
.try_fold(
DashMap::new(),
|acc: DashMap<ProjectId, Vec<QueryVersionField>>, m| {
let qvf = QueryVersionField {
version_id: VersionId(m.version_id),
field_id: LoaderFieldId(m.field_id),
int_value: m.int_value,
enum_value: m.enum_value.map(LoaderFieldEnumValueId),
string_value: m.string_value,
};
if let Some(enum_value) = m.enum_value {
loader_field_enum_value_ids.insert(LoaderFieldEnumValueId(enum_value));
}
acc.entry(ProjectId(m.mod_id)).or_default().push(qvf);
async move { Ok(acc) }
},
)
.await?;
let loader_field_enum_values: Vec<QueryLoaderFieldEnumValue> = sqlx::query!(
"
SELECT DISTINCT id, enum_id, value, ordering, created, metadata
FROM loader_field_enum_values lfev
WHERE id = ANY($1)
ORDER BY enum_id, ordering, created DESC
",
&loader_field_enum_value_ids
.iter()
.map(|x| x.0)
.collect::<Vec<_>>()
)
.fetch(&mut *exec)
.map_ok(|m| QueryLoaderFieldEnumValue {
id: LoaderFieldEnumValueId(m.id),
enum_id: LoaderFieldEnumId(m.enum_id),
value: m.value,
ordering: m.ordering, ordering: m.ordering,
}); created: m.created,
async move { Ok(acc) } metadata: m.metadata,
} })
).await?; .try_collect()
.await?;
let links: DashMap<ProjectId, Vec<LinkUrl>> = sqlx::query!( let mods_gallery: DashMap<ProjectId, Vec<GalleryItem>> = sqlx::query!(
" "
SELECT DISTINCT joining_mod_id as mod_id, joining_platform_id as platform_id, lp.name as platform_name, url, lp.donation as donation SELECT DISTINCT mod_id, mg.image_url, mg.featured, mg.name, mg.description, mg.created, mg.ordering
FROM mods_links ml FROM mods_gallery mg
INNER JOIN mods m ON ml.joining_mod_id = m.id INNER JOIN mods m ON mg.mod_id = m.id
INNER JOIN link_platforms lp ON ml.joining_platform_id = lp.id WHERE m.id = ANY($1) OR m.slug = ANY($2)
WHERE m.id = ANY($1) OR m.slug = ANY($2) ",
", &project_ids_parsed,
&project_ids_parsed, &slugs
&slugs ).fetch(&mut *exec)
).fetch(&mut *exec) .try_fold(DashMap::new(), |acc : DashMap<ProjectId, Vec<GalleryItem>>, m| {
.try_fold(DashMap::new(), |acc : DashMap<ProjectId, Vec<LinkUrl>>, m| { acc.entry(ProjectId(m.mod_id))
acc.entry(ProjectId(m.mod_id)) .or_default()
.or_default() .push(GalleryItem {
.push(LinkUrl { image_url: m.image_url,
platform_id: LinkPlatformId(m.platform_id), featured: m.featured.unwrap_or(false),
platform_name: m.platform_name, name: m.name,
url: m.url, description: m.description,
donation: m.donation, created: m.created,
}); ordering: m.ordering,
async move { Ok(acc) } });
} async move { Ok(acc) }
).await?; }
).await?;
#[derive(Default)] let links: DashMap<ProjectId, Vec<LinkUrl>> = sqlx::query!(
struct VersionLoaderData { "
loaders: Vec<String>, SELECT DISTINCT joining_mod_id as mod_id, joining_platform_id as platform_id, lp.name as platform_name, url, lp.donation as donation
project_types: Vec<String>, FROM mods_links ml
games: Vec<String>, INNER JOIN mods m ON ml.joining_mod_id = m.id
loader_loader_field_ids: Vec<LoaderFieldId>, INNER JOIN link_platforms lp ON ml.joining_platform_id = lp.id
} WHERE m.id = ANY($1) OR m.slug = ANY($2)
",
&project_ids_parsed,
&slugs
).fetch(&mut *exec)
.try_fold(DashMap::new(), |acc : DashMap<ProjectId, Vec<LinkUrl>>, m| {
acc.entry(ProjectId(m.mod_id))
.or_default()
.push(LinkUrl {
platform_id: LinkPlatformId(m.platform_id),
platform_name: m.platform_name,
url: m.url,
donation: m.donation,
});
async move { Ok(acc) }
}
).await?;
let loader_field_ids = DashSet::new(); #[derive(Default)]
let loaders_ptypes_games: DashMap<ProjectId, VersionLoaderData> = sqlx::query!( struct VersionLoaderData {
" loaders: Vec<String>,
SELECT DISTINCT mod_id, project_types: Vec<String>,
ARRAY_AGG(DISTINCT l.loader) filter (where l.loader is not null) loaders, games: Vec<String>,
ARRAY_AGG(DISTINCT pt.name) filter (where pt.name is not null) project_types, loader_loader_field_ids: Vec<LoaderFieldId>,
ARRAY_AGG(DISTINCT g.slug) filter (where g.slug is not null) games,
ARRAY_AGG(DISTINCT lfl.loader_field_id) filter (where lfl.loader_field_id is not null) loader_fields
FROM versions v
INNER JOIN loaders_versions lv ON v.id = lv.version_id
INNER JOIN loaders l ON lv.loader_id = l.id
INNER JOIN loaders_project_types lpt ON lpt.joining_loader_id = l.id
INNER JOIN project_types pt ON pt.id = lpt.joining_project_type_id
INNER JOIN loaders_project_types_games lptg ON lptg.loader_id = l.id AND lptg.project_type_id = pt.id
INNER JOIN games g ON lptg.game_id = g.id
LEFT JOIN loader_fields_loaders lfl ON lfl.loader_id = l.id
WHERE v.id = ANY($1)
GROUP BY mod_id
",
&all_version_ids.iter().map(|x| x.0).collect::<Vec<_>>()
).fetch(&mut *exec)
.map_ok(|m| {
let project_id = ProjectId(m.mod_id);
// Add loader fields to the set we need to fetch
let loader_loader_field_ids = m.loader_fields.unwrap_or_default().into_iter().map(LoaderFieldId).collect::<Vec<_>>();
for loader_field_id in loader_loader_field_ids.iter() {
loader_field_ids.insert(*loader_field_id);
} }
// Add loader + loader associated data to the map let loader_field_ids = DashSet::new();
let version_loader_data = VersionLoaderData { let loaders_ptypes_games: DashMap<ProjectId, VersionLoaderData> = sqlx::query!(
loaders: m.loaders.unwrap_or_default(), "
project_types: m.project_types.unwrap_or_default(), SELECT DISTINCT mod_id,
games: m.games.unwrap_or_default(), ARRAY_AGG(DISTINCT l.loader) filter (where l.loader is not null) loaders,
loader_loader_field_ids, ARRAY_AGG(DISTINCT pt.name) filter (where pt.name is not null) project_types,
}; ARRAY_AGG(DISTINCT g.slug) filter (where g.slug is not null) games,
ARRAY_AGG(DISTINCT lfl.loader_field_id) filter (where lfl.loader_field_id is not null) loader_fields
FROM versions v
INNER JOIN loaders_versions lv ON v.id = lv.version_id
INNER JOIN loaders l ON lv.loader_id = l.id
INNER JOIN loaders_project_types lpt ON lpt.joining_loader_id = l.id
INNER JOIN project_types pt ON pt.id = lpt.joining_project_type_id
INNER JOIN loaders_project_types_games lptg ON lptg.loader_id = l.id AND lptg.project_type_id = pt.id
INNER JOIN games g ON lptg.game_id = g.id
LEFT JOIN loader_fields_loaders lfl ON lfl.loader_id = l.id
WHERE v.id = ANY($1)
GROUP BY mod_id
",
&all_version_ids.iter().map(|x| x.0).collect::<Vec<_>>()
).fetch(&mut *exec)
.map_ok(|m| {
let project_id = ProjectId(m.mod_id);
(project_id, version_loader_data) // Add loader fields to the set we need to fetch
let loader_loader_field_ids = m.loader_fields.unwrap_or_default().into_iter().map(LoaderFieldId).collect::<Vec<_>>();
for loader_field_id in loader_loader_field_ids.iter() {
loader_field_ids.insert(*loader_field_id);
}
} // Add loader + loader associated data to the map
).try_collect().await?; let version_loader_data = VersionLoaderData {
loaders: m.loaders.unwrap_or_default(),
project_types: m.project_types.unwrap_or_default(),
games: m.games.unwrap_or_default(),
loader_loader_field_ids,
};
let loader_fields: Vec<QueryLoaderField> = sqlx::query!( (project_id, version_loader_data)
"
SELECT DISTINCT id, field, field_type, enum_type, min_val, max_val, optional
FROM loader_fields lf
WHERE id = ANY($1)
",
&loader_field_ids.iter().map(|x| x.0).collect::<Vec<_>>()
)
.fetch(&mut *exec)
.map_ok(|m| QueryLoaderField {
id: LoaderFieldId(m.id),
field: m.field,
field_type: m.field_type,
enum_type: m.enum_type.map(LoaderFieldEnumId),
min_val: m.min_val,
max_val: m.max_val,
optional: m.optional,
})
.try_collect()
.await?;
let db_projects: Vec<QueryProject> = sqlx::query!( }
" ).try_collect().await?;
SELECT m.id id, m.name name, m.summary summary, m.downloads downloads, m.follows follows,
m.icon_url icon_url, m.description description, m.published published, let loader_fields: Vec<QueryLoaderField> = sqlx::query!(
m.updated updated, m.approved approved, m.queued, m.status status, m.requested_status requested_status, "
m.license_url license_url, SELECT DISTINCT id, field, field_type, enum_type, min_val, max_val, optional
m.team_id team_id, m.organization_id organization_id, m.license license, m.slug slug, m.moderation_message moderation_message, m.moderation_message_body moderation_message_body, FROM loader_fields lf
m.webhook_sent, m.color, WHERE id = ANY($1)
t.id thread_id, m.monetization_status monetization_status, ",
ARRAY_AGG(DISTINCT c.category) filter (where c.category is not null and mc.is_additional is false) categories, &loader_field_ids.iter().map(|x| x.0).collect::<Vec<_>>()
ARRAY_AGG(DISTINCT c.category) filter (where c.category is not null and mc.is_additional is true) additional_categories )
FROM mods m .fetch(&mut *exec)
INNER JOIN threads t ON t.mod_id = m.id .map_ok(|m| QueryLoaderField {
LEFT JOIN mods_categories mc ON mc.joining_mod_id = m.id id: LoaderFieldId(m.id),
LEFT JOIN categories c ON mc.joining_category_id = c.id field: m.field,
WHERE m.id = ANY($1) OR m.slug = ANY($2) field_type: m.field_type,
GROUP BY t.id, m.id; enum_type: m.enum_type.map(LoaderFieldEnumId),
", min_val: m.min_val,
&project_ids_parsed, max_val: m.max_val,
&slugs, optional: m.optional,
) })
.fetch_many(&mut *exec) .try_collect()
.try_filter_map(|e| async { .await?;
Ok(e.right().map(|m| {
let projects = sqlx::query!(
"
SELECT m.id id, m.name name, m.summary summary, m.downloads downloads, m.follows follows,
m.icon_url icon_url, m.description description, m.published published,
m.updated updated, m.approved approved, m.queued, m.status status, m.requested_status requested_status,
m.license_url license_url,
m.team_id team_id, m.organization_id organization_id, m.license license, m.slug slug, m.moderation_message moderation_message, m.moderation_message_body moderation_message_body,
m.webhook_sent, m.color,
t.id thread_id, m.monetization_status monetization_status,
ARRAY_AGG(DISTINCT c.category) filter (where c.category is not null and mc.is_additional is false) categories,
ARRAY_AGG(DISTINCT c.category) filter (where c.category is not null and mc.is_additional is true) additional_categories
FROM mods m
INNER JOIN threads t ON t.mod_id = m.id
LEFT JOIN mods_categories mc ON mc.joining_mod_id = m.id
LEFT JOIN categories c ON mc.joining_category_id = c.id
WHERE m.id = ANY($1) OR m.slug = ANY($2)
GROUP BY t.id, m.id;
",
&project_ids_parsed,
&slugs,
)
.fetch(&mut *exec)
.try_fold(DashMap::new(), |acc, m| {
let id = m.id; let id = m.id;
let project_id = ProjectId(id); let project_id = ProjectId(id);
let VersionLoaderData { let VersionLoaderData {
@ -815,54 +769,54 @@ impl Project {
project_types, project_types,
games, games,
loader_loader_field_ids, loader_loader_field_ids,
} = loaders_ptypes_games.remove(&project_id).map(|x|x.1).unwrap_or_default(); } = loaders_ptypes_games.remove(&project_id).map(|x|x.1).unwrap_or_default();
let mut versions = versions.remove(&project_id).map(|x| x.1).unwrap_or_default(); let mut versions = versions.remove(&project_id).map(|x| x.1).unwrap_or_default();
let mut gallery = mods_gallery.remove(&project_id).map(|x| x.1).unwrap_or_default(); let mut gallery = mods_gallery.remove(&project_id).map(|x| x.1).unwrap_or_default();
let urls = links.remove(&project_id).map(|x| x.1).unwrap_or_default(); let urls = links.remove(&project_id).map(|x| x.1).unwrap_or_default();
let version_fields = version_fields.remove(&project_id).map(|x| x.1).unwrap_or_default(); let version_fields = version_fields.remove(&project_id).map(|x| x.1).unwrap_or_default();
let loader_fields = loader_fields.iter() let loader_fields = loader_fields.iter()
.filter(|x| loader_loader_field_ids.contains(&x.id)) .filter(|x| loader_loader_field_ids.contains(&x.id))
.collect::<Vec<_>>(); .collect::<Vec<_>>();
QueryProject { let project = QueryProject {
inner: Project { inner: Project {
id: ProjectId(id), id: ProjectId(id),
team_id: TeamId(m.team_id), team_id: TeamId(m.team_id),
organization_id: m.organization_id.map(OrganizationId), organization_id: m.organization_id.map(OrganizationId),
name: m.name.clone(), name: m.name.clone(),
summary: m.summary.clone(), summary: m.summary.clone(),
downloads: m.downloads, downloads: m.downloads,
icon_url: m.icon_url.clone(), icon_url: m.icon_url.clone(),
published: m.published, published: m.published,
updated: m.updated, updated: m.updated,
license_url: m.license_url.clone(), license_url: m.license_url.clone(),
status: ProjectStatus::from_string( status: ProjectStatus::from_string(
&m.status, &m.status,
), ),
requested_status: m.requested_status.map(|x| ProjectStatus::from_string( requested_status: m.requested_status.map(|x| ProjectStatus::from_string(
&x, &x,
)), )),
license: m.license.clone(), license: m.license.clone(),
slug: m.slug.clone(), slug: m.slug.clone(),
description: m.description.clone(), description: m.description.clone(),
follows: m.follows, follows: m.follows,
moderation_message: m.moderation_message, moderation_message: m.moderation_message,
moderation_message_body: m.moderation_message_body, moderation_message_body: m.moderation_message_body,
approved: m.approved, approved: m.approved,
webhook_sent: m.webhook_sent, webhook_sent: m.webhook_sent,
color: m.color.map(|x| x as u32), color: m.color.map(|x| x as u32),
queued: m.queued, queued: m.queued,
monetization_status: MonetizationStatus::from_string( monetization_status: MonetizationStatus::from_string(
&m.monetization_status, &m.monetization_status,
), ),
loaders, loaders,
}, },
categories: m.categories.unwrap_or_default(), categories: m.categories.unwrap_or_default(),
additional_categories: m.additional_categories.unwrap_or_default(), additional_categories: m.additional_categories.unwrap_or_default(),
project_types, project_types,
games, games,
versions: { versions: {
// Each version is a tuple of (VersionId, DateTime<Utc>) // Each version is a tuple of (VersionId, DateTime<Utc>)
versions.sort_by(|a, b| a.1.cmp(&b.1)); versions.sort_by(|a, b| a.1.cmp(&b.1));
versions.into_iter().map(|x| x.0).collect() versions.into_iter().map(|x| x.0).collect()
@ -872,32 +826,20 @@ impl Project {
gallery gallery
}, },
urls, urls,
aggregate_version_fields: VersionField::from_query_json(version_fields, &loader_fields, &loader_field_enum_values, true), aggregate_version_fields: VersionField::from_query_json(version_fields, &loader_fields, &loader_field_enum_values, true),
thread_id: ThreadId(m.thread_id), thread_id: ThreadId(m.thread_id),
}})) };
})
.try_collect::<Vec<QueryProject>>()
.await?;
for project in db_projects { acc.insert(m.id, (m.slug, project));
redis async move { Ok(acc) }
.set_serialized_to_json(PROJECTS_NAMESPACE, project.inner.id.0, &project, None) })
.await?; .await?;
if let Some(slug) = &project.inner.slug {
redis
.set(
PROJECTS_SLUGS_NAMESPACE,
&slug.to_lowercase(),
&project.inner.id.0.to_string(),
None,
)
.await?;
}
found_projects.push(project);
}
}
Ok(found_projects) Ok(projects)
},
).await?;
Ok(val)
} }
pub async fn get_dependencies<'a, E>( pub async fn get_dependencies<'a, E>(

View File

@ -1,9 +1,12 @@
use super::ids::*; use super::ids::*;
use crate::database::models::DatabaseError; use crate::database::models::DatabaseError;
use crate::database::redis::RedisPool; use crate::database::redis::RedisPool;
use crate::models::ids::base62_impl::{parse_base62, to_base62}; use crate::models::ids::base62_impl::parse_base62;
use chrono::{DateTime, Utc}; use chrono::{DateTime, Utc};
use dashmap::DashMap;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::fmt::{Debug, Display};
use std::hash::Hash;
const SESSIONS_NAMESPACE: &str = "sessions"; const SESSIONS_NAMESPACE: &str = "sessions";
const SESSIONS_IDS_NAMESPACE: &str = "sessions_ids"; const SESSIONS_IDS_NAMESPACE: &str = "sessions_ids";
@ -79,7 +82,7 @@ pub struct Session {
} }
impl Session { impl Session {
pub async fn get<'a, E, T: ToString>( pub async fn get<'a, E, T: Display + Hash + Eq + PartialEq + Clone + Debug>(
id: T, id: T,
exec: E, exec: E,
redis: &RedisPool, redis: &RedisPool,
@ -120,7 +123,7 @@ impl Session {
Session::get_many(&ids, exec, redis).await Session::get_many(&ids, exec, redis).await
} }
pub async fn get_many<'a, E, T: ToString>( pub async fn get_many<'a, E, T: Display + Hash + Eq + PartialEq + Clone + Debug>(
session_strings: &[T], session_strings: &[T],
exec: E, exec: E,
redis: &RedisPool, redis: &RedisPool,
@ -130,109 +133,60 @@ impl Session {
{ {
use futures::TryStreamExt; use futures::TryStreamExt;
let mut redis = redis.connect().await?; let val = redis.get_cached_keys_with_slug(
SESSIONS_NAMESPACE,
if session_strings.is_empty() { SESSIONS_IDS_NAMESPACE,
return Ok(Vec::new()); true,
} session_strings,
|ids| async move {
let mut found_sessions = Vec::new(); let session_ids: Vec<i64> = ids
let mut remaining_strings = session_strings .iter()
.iter() .flat_map(|x| parse_base62(&x.to_string()).ok())
.map(|x| x.to_string()) .map(|x| x as i64)
.collect::<Vec<_>>(); .collect();
let slugs = ids
let mut session_ids = session_strings .into_iter()
.iter() .map(|x| x.to_string())
.flat_map(|x| parse_base62(&x.to_string()).map(|x| x as i64)) .collect::<Vec<_>>();
.collect::<Vec<_>>(); let db_sessions = sqlx::query!(
"
session_ids.append( SELECT id, user_id, session, created, last_login, expires, refresh_expires, os, platform,
&mut redis city, country, ip, user_agent
.multi_get::<i64>( FROM sessions
SESSIONS_IDS_NAMESPACE, WHERE id = ANY($1) OR session = ANY($2)
session_strings.iter().map(|x| x.to_string()), ORDER BY created DESC
",
&session_ids,
&slugs,
) )
.await? .fetch(exec)
.into_iter() .try_fold(DashMap::new(), |acc, x| {
.flatten() let session = Session {
.collect(), id: SessionId(x.id),
); session: x.session.clone(),
user_id: UserId(x.user_id),
created: x.created,
last_login: x.last_login,
expires: x.expires,
refresh_expires: x.refresh_expires,
os: x.os,
platform: x.platform,
city: x.city,
country: x.country,
ip: x.ip,
user_agent: x.user_agent,
};
if !session_ids.is_empty() { acc.insert(x.id, (Some(x.session), session));
let sessions = redis
.multi_get::<String>(
SESSIONS_NAMESPACE,
session_ids.iter().map(|x| x.to_string()),
)
.await?;
for session in sessions {
if let Some(session) =
session.and_then(|x| serde_json::from_str::<Session>(&x).ok())
{
remaining_strings
.retain(|x| &to_base62(session.id.0 as u64) != x && &session.session != x);
found_sessions.push(session);
continue;
}
}
}
if !remaining_strings.is_empty() { async move { Ok(acc) }
let session_ids_parsed: Vec<i64> = remaining_strings })
.iter()
.flat_map(|x| parse_base62(&x.to_string()).ok())
.map(|x| x as i64)
.collect();
let db_sessions: Vec<Session> = sqlx::query!(
"
SELECT id, user_id, session, created, last_login, expires, refresh_expires, os, platform,
city, country, ip, user_agent
FROM sessions
WHERE id = ANY($1) OR session = ANY($2)
ORDER BY created DESC
",
&session_ids_parsed,
&remaining_strings.into_iter().map(|x| x.to_string()).collect::<Vec<_>>(),
)
.fetch_many(exec)
.try_filter_map(|e| async {
Ok(e.right().map(|x| Session {
id: SessionId(x.id),
session: x.session,
user_id: UserId(x.user_id),
created: x.created,
last_login: x.last_login,
expires: x.expires,
refresh_expires: x.refresh_expires,
os: x.os,
platform: x.platform,
city: x.city,
country: x.country,
ip: x.ip,
user_agent: x.user_agent,
}))
})
.try_collect::<Vec<Session>>()
.await?;
for session in db_sessions {
redis
.set_serialized_to_json(SESSIONS_NAMESPACE, session.id.0, &session, None)
.await?; .await?;
redis
.set(
SESSIONS_IDS_NAMESPACE,
&session.session,
&session.id.0.to_string(),
None,
)
.await?;
found_sessions.push(session);
}
}
Ok(found_sessions) Ok(db_sessions)
}).await?;
Ok(val)
} }
pub async fn get_user_sessions<'a, E>( pub async fn get_user_sessions<'a, E>(

View File

@ -3,6 +3,8 @@ use crate::{
database::redis::RedisPool, database::redis::RedisPool,
models::teams::{OrganizationPermissions, ProjectPermissions}, models::teams::{OrganizationPermissions, ProjectPermissions},
}; };
use dashmap::DashMap;
use futures::TryStreamExt;
use itertools::Itertools; use itertools::Itertools;
use rust_decimal::Decimal; use rust_decimal::Decimal;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
@ -203,87 +205,56 @@ impl TeamMember {
where where
E: sqlx::Executor<'a, Database = sqlx::Postgres> + Copy, E: sqlx::Executor<'a, Database = sqlx::Postgres> + Copy,
{ {
use futures::stream::TryStreamExt;
if team_ids.is_empty() { if team_ids.is_empty() {
return Ok(Vec::new()); return Ok(Vec::new());
} }
let mut redis = redis.connect().await?; let val = redis.get_cached_keys(
TEAMS_NAMESPACE,
&team_ids.iter().map(|x| x.0).collect::<Vec<_>>(),
|team_ids| async move {
let teams = sqlx::query!(
"
SELECT id, team_id, role AS member_role, is_owner, permissions, organization_permissions,
accepted, payouts_split,
ordering, user_id
FROM team_members
WHERE team_id = ANY($1)
ORDER BY team_id, ordering;
",
&team_ids
)
.fetch(exec)
.try_fold(DashMap::new(), |acc: DashMap<i64, Vec<TeamMember>>, m| {
let member = TeamMember {
id: TeamMemberId(m.id),
team_id: TeamId(m.team_id),
role: m.member_role,
is_owner: m.is_owner,
permissions: ProjectPermissions::from_bits(m.permissions as u64)
.unwrap_or_default(),
organization_permissions: m
.organization_permissions
.map(|p| OrganizationPermissions::from_bits(p as u64).unwrap_or_default()),
accepted: m.accepted,
user_id: UserId(m.user_id),
payouts_split: m.payouts_split,
ordering: m.ordering,
};
let mut team_ids_parsed: Vec<i64> = team_ids.iter().map(|x| x.0).collect(); acc.entry(m.team_id)
.or_default()
.push(member);
let mut found_teams = Vec::new(); async move { Ok(acc) }
})
let teams = redis
.multi_get::<String>(
TEAMS_NAMESPACE,
team_ids_parsed.iter().map(|x| x.to_string()),
)
.await?;
for team_raw in teams {
if let Some(mut team) = team_raw
.clone()
.and_then(|x| serde_json::from_str::<Vec<TeamMember>>(&x).ok())
{
if let Some(team_id) = team.first().map(|x| x.team_id) {
team_ids_parsed.retain(|x| &team_id.0 != x);
}
found_teams.append(&mut team);
continue;
}
}
if !team_ids_parsed.is_empty() {
let teams: Vec<TeamMember> = sqlx::query!(
"
SELECT id, team_id, role AS member_role, is_owner, permissions, organization_permissions,
accepted, payouts_split,
ordering, user_id
FROM team_members
WHERE team_id = ANY($1)
ORDER BY team_id, ordering;
",
&team_ids_parsed
)
.fetch_many(exec)
.try_filter_map(|e| async {
Ok(e.right().map(|m| TeamMember {
id: TeamMemberId(m.id),
team_id: TeamId(m.team_id),
role: m.member_role,
is_owner: m.is_owner,
permissions: ProjectPermissions::from_bits(m.permissions as u64)
.unwrap_or_default(),
organization_permissions: m
.organization_permissions
.map(|p| OrganizationPermissions::from_bits(p as u64).unwrap_or_default()),
accepted: m.accepted,
user_id: UserId(m.user_id),
payouts_split: m.payouts_split,
ordering: m.ordering,
}))
})
.try_collect::<Vec<TeamMember>>()
.await?;
for (id, mut members) in teams
.into_iter()
.group_by(|x| x.team_id)
.into_iter()
.map(|(key, group)| (key, group.collect::<Vec<_>>()))
.collect::<Vec<_>>()
{
redis
.set_serialized_to_json(TEAMS_NAMESPACE, id.0, &members, None)
.await?; .await?;
found_teams.append(&mut members);
}
}
Ok(found_teams) Ok(teams)
},
).await?;
Ok(val.into_iter().flatten().collect())
} }
pub async fn clear_cache(id: TeamId, redis: &RedisPool) -> Result<(), super::DatabaseError> { pub async fn clear_cache(id: TeamId, redis: &RedisPool) -> Result<(), super::DatabaseError> {
@ -315,8 +286,6 @@ impl TeamMember {
where where
E: sqlx::Executor<'a, Database = sqlx::Postgres>, E: sqlx::Executor<'a, Database = sqlx::Postgres>,
{ {
use futures::stream::TryStreamExt;
let team_ids_parsed: Vec<i64> = team_ids.iter().map(|x| x.0).collect(); let team_ids_parsed: Vec<i64> = team_ids.iter().map(|x| x.0).collect();
let team_members = sqlx::query!( let team_members = sqlx::query!(

View File

@ -5,8 +5,11 @@ use crate::database::redis::RedisPool;
use crate::models::ids::base62_impl::{parse_base62, to_base62}; use crate::models::ids::base62_impl::{parse_base62, to_base62};
use crate::models::users::Badges; use crate::models::users::Badges;
use chrono::{DateTime, Utc}; use chrono::{DateTime, Utc};
use dashmap::DashMap;
use rust_decimal::Decimal; use rust_decimal::Decimal;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::fmt::{Debug, Display};
use std::hash::Hash;
const USERS_NAMESPACE: &str = "users"; const USERS_NAMESPACE: &str = "users";
const USER_USERNAMES_NAMESPACE: &str = "users_usernames"; const USER_USERNAMES_NAMESPACE: &str = "users_usernames";
@ -132,7 +135,7 @@ impl User {
User::get_many(&ids, exec, redis).await User::get_many(&ids, exec, redis).await
} }
pub async fn get_many<'a, E, T: ToString>( pub async fn get_many<'a, E, T: Display + Hash + Eq + PartialEq + Clone + Debug>(
users_strings: &[T], users_strings: &[T],
exec: E, exec: E,
redis: &RedisPool, redis: &RedisPool,
@ -142,123 +145,73 @@ impl User {
{ {
use futures::TryStreamExt; use futures::TryStreamExt;
let mut redis = redis.connect().await?; let val = redis.get_cached_keys_with_slug(
USERS_NAMESPACE,
if users_strings.is_empty() { USER_USERNAMES_NAMESPACE,
return Ok(Vec::new()); false,
} users_strings,
|ids| async move {
let mut found_users = Vec::new(); let user_ids: Vec<i64> = ids
let mut remaining_strings = users_strings .iter()
.iter() .flat_map(|x| parse_base62(&x.to_string()).ok())
.map(|x| x.to_string()) .map(|x| x as i64)
.collect::<Vec<_>>(); .collect();
let slugs = ids
let mut user_ids = users_strings
.iter()
.flat_map(|x| parse_base62(&x.to_string()).map(|x| x as i64))
.collect::<Vec<_>>();
user_ids.append(
&mut redis
.multi_get::<i64>(
USER_USERNAMES_NAMESPACE,
users_strings.iter().map(|x| x.to_string().to_lowercase()),
)
.await?
.into_iter()
.flatten()
.collect(),
);
if !user_ids.is_empty() {
let users = redis
.multi_get::<String>(USERS_NAMESPACE, user_ids.iter().map(|x| x.to_string()))
.await?;
for user in users {
if let Some(user) = user.and_then(|x| serde_json::from_str::<User>(&x).ok()) {
remaining_strings.retain(|x| {
&to_base62(user.id.0 as u64) != x
&& user.username.to_lowercase() != x.to_lowercase()
});
found_users.push(user);
continue;
}
}
}
if !remaining_strings.is_empty() {
let user_ids_parsed: Vec<i64> = remaining_strings
.iter()
.flat_map(|x| parse_base62(&x.to_string()).ok())
.map(|x| x as i64)
.collect();
let db_users: Vec<User> = sqlx::query!(
"
SELECT id, name, email,
avatar_url, username, bio,
created, role, badges,
balance,
github_id, discord_id, gitlab_id, google_id, steam_id, microsoft_id,
email_verified, password, totp_secret, paypal_id, paypal_country, paypal_email,
venmo_handle
FROM users
WHERE id = ANY($1) OR LOWER(username) = ANY($2)
",
&user_ids_parsed,
&remaining_strings
.into_iter() .into_iter()
.map(|x| x.to_string().to_lowercase()) .map(|x| x.to_string().to_lowercase())
.collect::<Vec<_>>(), .collect::<Vec<_>>();
)
.fetch_many(exec)
.try_filter_map(|e| async {
Ok(e.right().map(|u| User {
id: UserId(u.id),
github_id: u.github_id,
discord_id: u.discord_id,
gitlab_id: u.gitlab_id,
google_id: u.google_id,
steam_id: u.steam_id,
microsoft_id: u.microsoft_id,
name: u.name,
email: u.email,
email_verified: u.email_verified,
avatar_url: u.avatar_url,
username: u.username,
bio: u.bio,
created: u.created,
role: u.role,
badges: Badges::from_bits(u.badges as u64).unwrap_or_default(),
balance: u.balance,
password: u.password,
paypal_id: u.paypal_id,
paypal_country: u.paypal_country,
paypal_email: u.paypal_email,
venmo_handle: u.venmo_handle,
totp_secret: u.totp_secret,
}))
})
.try_collect::<Vec<User>>()
.await?;
for user in db_users { let users = sqlx::query!(
redis "
.set_serialized_to_json(USERS_NAMESPACE, user.id.0, &user, None) SELECT id, name, email,
.await?; avatar_url, username, bio,
redis created, role, badges,
.set( balance,
USER_USERNAMES_NAMESPACE, github_id, discord_id, gitlab_id, google_id, steam_id, microsoft_id,
&user.username.to_lowercase(), email_verified, password, totp_secret, paypal_id, paypal_country, paypal_email,
&user.id.0.to_string(), venmo_handle
None, FROM users
) WHERE id = ANY($1) OR LOWER(username) = ANY($2)
.await?; ",
found_users.push(user); &user_ids,
} &slugs,
} )
.fetch(exec)
.try_fold(DashMap::new(), |acc, u| {
let user = User {
id: UserId(u.id),
github_id: u.github_id,
discord_id: u.discord_id,
gitlab_id: u.gitlab_id,
google_id: u.google_id,
steam_id: u.steam_id,
microsoft_id: u.microsoft_id,
name: u.name,
email: u.email,
email_verified: u.email_verified,
avatar_url: u.avatar_url,
username: u.username.clone(),
bio: u.bio,
created: u.created,
role: u.role,
badges: Badges::from_bits(u.badges as u64).unwrap_or_default(),
balance: u.balance,
password: u.password,
paypal_id: u.paypal_id,
paypal_country: u.paypal_country,
paypal_email: u.paypal_email,
venmo_handle: u.venmo_handle,
totp_secret: u.totp_secret,
};
Ok(found_users) acc.insert(u.id, (Some(u.username), user));
async move { Ok(acc) }
})
.await?;
Ok(users)
}).await?;
Ok(val)
} }
pub async fn get_email<'a, E>(email: &str, exec: E) -> Result<Option<UserId>, sqlx::Error> pub async fn get_email<'a, E>(email: &str, exec: E) -> Result<Option<UserId>, sqlx::Error>

View File

@ -8,6 +8,7 @@ use crate::database::redis::RedisPool;
use crate::models::projects::{FileType, VersionStatus}; use crate::models::projects::{FileType, VersionStatus};
use chrono::{DateTime, Utc}; use chrono::{DateTime, Utc};
use dashmap::{DashMap, DashSet}; use dashmap::{DashMap, DashSet};
use futures::TryStreamExt;
use itertools::Itertools; use itertools::Itertools;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::cmp::Ordering; use std::cmp::Ordering;
@ -469,301 +470,263 @@ impl Version {
where where
E: sqlx::Acquire<'a, Database = sqlx::Postgres>, E: sqlx::Acquire<'a, Database = sqlx::Postgres>,
{ {
let version_ids = version_ids let mut val = redis.get_cached_keys(
.iter() VERSIONS_NAMESPACE,
.unique() &version_ids.iter().map(|x| x.0).collect::<Vec<_>>(),
.copied() |version_ids| async move {
.collect::<Vec<VersionId>>(); let mut exec = exec.acquire().await?;
use futures::stream::TryStreamExt; let loader_field_enum_value_ids = DashSet::new();
let version_fields: DashMap<VersionId, Vec<QueryVersionField>> = sqlx::query!(
"
SELECT version_id, field_id, int_value, enum_value, string_value
FROM version_fields
WHERE version_id = ANY($1)
",
&version_ids
)
.fetch(&mut *exec)
.try_fold(
DashMap::new(),
|acc: DashMap<VersionId, Vec<QueryVersionField>>, m| {
let qvf = QueryVersionField {
version_id: VersionId(m.version_id),
field_id: LoaderFieldId(m.field_id),
int_value: m.int_value,
enum_value: m.enum_value.map(LoaderFieldEnumValueId),
string_value: m.string_value,
};
if version_ids.is_empty() { if let Some(enum_value) = m.enum_value {
return Ok(Vec::new()); loader_field_enum_value_ids.insert(LoaderFieldEnumValueId(enum_value));
} }
let mut exec = exec.acquire().await?; acc.entry(VersionId(m.version_id)).or_default().push(qvf);
let mut redis = redis.connect().await?; async move { Ok(acc) }
},
)
.await?;
let mut version_ids_parsed: Vec<i64> = version_ids.iter().map(|x| x.0).collect(); #[derive(Default)]
struct VersionLoaderData {
loaders: Vec<String>,
project_types: Vec<String>,
games: Vec<String>,
loader_loader_field_ids: Vec<LoaderFieldId>,
}
let mut found_versions = Vec::new(); let loader_field_ids = DashSet::new();
let loaders_ptypes_games: DashMap<VersionId, VersionLoaderData> = sqlx::query!(
"
SELECT DISTINCT version_id,
ARRAY_AGG(DISTINCT l.loader) filter (where l.loader is not null) loaders,
ARRAY_AGG(DISTINCT pt.name) filter (where pt.name is not null) project_types,
ARRAY_AGG(DISTINCT g.slug) filter (where g.slug is not null) games,
ARRAY_AGG(DISTINCT lfl.loader_field_id) filter (where lfl.loader_field_id is not null) loader_fields
FROM versions v
INNER JOIN loaders_versions lv ON v.id = lv.version_id
INNER JOIN loaders l ON lv.loader_id = l.id
INNER JOIN loaders_project_types lpt ON lpt.joining_loader_id = l.id
INNER JOIN project_types pt ON pt.id = lpt.joining_project_type_id
INNER JOIN loaders_project_types_games lptg ON lptg.loader_id = l.id AND lptg.project_type_id = pt.id
INNER JOIN games g ON lptg.game_id = g.id
LEFT JOIN loader_fields_loaders lfl ON lfl.loader_id = l.id
WHERE v.id = ANY($1)
GROUP BY version_id
",
&version_ids
).fetch(&mut *exec)
.map_ok(|m| {
let version_id = VersionId(m.version_id);
let versions = redis // Add loader fields to the set we need to fetch
.multi_get::<String>( let loader_loader_field_ids = m.loader_fields.unwrap_or_default().into_iter().map(LoaderFieldId).collect::<Vec<_>>();
VERSIONS_NAMESPACE, for loader_field_id in loader_loader_field_ids.iter() {
version_ids_parsed loader_field_ids.insert(*loader_field_id);
.clone() }
.iter()
.map(|x| x.to_string())
.collect::<Vec<_>>(),
)
.await?;
for version in versions { // Add loader + loader associated data to the map
if let Some(version) = let version_loader_data = VersionLoaderData {
version.and_then(|x| serde_json::from_str::<QueryVersion>(&x).ok()) loaders: m.loaders.unwrap_or_default(),
{ project_types: m.project_types.unwrap_or_default(),
version_ids_parsed.retain(|x| &version.inner.id.0 != x); games: m.games.unwrap_or_default(),
found_versions.push(version); loader_loader_field_ids,
continue; };
} (version_id,version_loader_data)
}
if !version_ids_parsed.is_empty() {
let loader_field_enum_value_ids = DashSet::new();
let version_fields: DashMap<VersionId, Vec<QueryVersionField>> = sqlx::query!(
"
SELECT version_id, field_id, int_value, enum_value, string_value
FROM version_fields
WHERE version_id = ANY($1)
",
&version_ids_parsed
)
.fetch(&mut *exec)
.try_fold(
DashMap::new(),
|acc: DashMap<VersionId, Vec<QueryVersionField>>, m| {
let qvf = QueryVersionField {
version_id: VersionId(m.version_id),
field_id: LoaderFieldId(m.field_id),
int_value: m.int_value,
enum_value: m.enum_value.map(LoaderFieldEnumValueId),
string_value: m.string_value,
};
if let Some(enum_value) = m.enum_value {
loader_field_enum_value_ids.insert(LoaderFieldEnumValueId(enum_value));
} }
).try_collect().await?;
acc.entry(VersionId(m.version_id)).or_default().push(qvf); // Fetch all loader fields from any version
async move { Ok(acc) } let loader_fields: Vec<QueryLoaderField> = sqlx::query!(
}, "
) SELECT DISTINCT id, field, field_type, enum_type, min_val, max_val, optional
.await?; FROM loader_fields lf
WHERE id = ANY($1)
",
&loader_field_ids.iter().map(|x| x.0).collect::<Vec<_>>()
)
.fetch(&mut *exec)
.map_ok(|m| QueryLoaderField {
id: LoaderFieldId(m.id),
field: m.field,
field_type: m.field_type,
enum_type: m.enum_type.map(LoaderFieldEnumId),
min_val: m.min_val,
max_val: m.max_val,
optional: m.optional,
})
.try_collect()
.await?;
#[derive(Default)] let loader_field_enum_values: Vec<QueryLoaderFieldEnumValue> = sqlx::query!(
struct VersionLoaderData { "
loaders: Vec<String>, SELECT DISTINCT id, enum_id, value, ordering, created, metadata
project_types: Vec<String>, FROM loader_field_enum_values lfev
games: Vec<String>, WHERE id = ANY($1)
loader_loader_field_ids: Vec<LoaderFieldId>, ORDER BY enum_id, ordering, created ASC
} ",
&loader_field_enum_value_ids
.iter()
.map(|x| x.0)
.collect::<Vec<_>>()
)
.fetch(&mut *exec)
.map_ok(|m| QueryLoaderFieldEnumValue {
id: LoaderFieldEnumValueId(m.id),
enum_id: LoaderFieldEnumId(m.enum_id),
value: m.value,
ordering: m.ordering,
created: m.created,
metadata: m.metadata,
})
.try_collect()
.await?;
let loader_field_ids = DashSet::new(); #[derive(Deserialize)]
let loaders_ptypes_games: DashMap<VersionId, VersionLoaderData> = sqlx::query!( struct Hash {
" pub file_id: FileId,
SELECT DISTINCT version_id, pub algorithm: String,
ARRAY_AGG(DISTINCT l.loader) filter (where l.loader is not null) loaders, pub hash: String,
ARRAY_AGG(DISTINCT pt.name) filter (where pt.name is not null) project_types,
ARRAY_AGG(DISTINCT g.slug) filter (where g.slug is not null) games,
ARRAY_AGG(DISTINCT lfl.loader_field_id) filter (where lfl.loader_field_id is not null) loader_fields
FROM versions v
INNER JOIN loaders_versions lv ON v.id = lv.version_id
INNER JOIN loaders l ON lv.loader_id = l.id
INNER JOIN loaders_project_types lpt ON lpt.joining_loader_id = l.id
INNER JOIN project_types pt ON pt.id = lpt.joining_project_type_id
INNER JOIN loaders_project_types_games lptg ON lptg.loader_id = l.id AND lptg.project_type_id = pt.id
INNER JOIN games g ON lptg.game_id = g.id
LEFT JOIN loader_fields_loaders lfl ON lfl.loader_id = l.id
WHERE v.id = ANY($1)
GROUP BY version_id
",
&version_ids_parsed
).fetch(&mut *exec)
.map_ok(|m| {
let version_id = VersionId(m.version_id);
// Add loader fields to the set we need to fetch
let loader_loader_field_ids = m.loader_fields.unwrap_or_default().into_iter().map(LoaderFieldId).collect::<Vec<_>>();
for loader_field_id in loader_loader_field_ids.iter() {
loader_field_ids.insert(*loader_field_id);
} }
// Add loader + loader associated data to the map #[derive(Deserialize)]
let version_loader_data = VersionLoaderData { struct File {
loaders: m.loaders.unwrap_or_default(), pub id: FileId,
project_types: m.project_types.unwrap_or_default(), pub url: String,
games: m.games.unwrap_or_default(), pub filename: String,
loader_loader_field_ids, pub primary: bool,
}; pub size: u32,
(version_id,version_loader_data) pub file_type: Option<FileType>,
} }
).try_collect().await?;
// Fetch all loader fields from any version let file_ids = DashSet::new();
let loader_fields: Vec<QueryLoaderField> = sqlx::query!( let reverse_file_map = DashMap::new();
" let files : DashMap<VersionId, Vec<File>> = sqlx::query!(
SELECT DISTINCT id, field, field_type, enum_type, min_val, max_val, optional "
FROM loader_fields lf SELECT DISTINCT version_id, f.id, f.url, f.filename, f.is_primary, f.size, f.file_type
WHERE id = ANY($1) FROM files f
", WHERE f.version_id = ANY($1)
&loader_field_ids.iter().map(|x| x.0).collect::<Vec<_>>() ",
) &version_ids
.fetch(&mut *exec) ).fetch(&mut *exec)
.map_ok(|m| QueryLoaderField { .try_fold(DashMap::new(), |acc : DashMap<VersionId, Vec<File>>, m| {
id: LoaderFieldId(m.id), let file = File {
field: m.field, id: FileId(m.id),
field_type: m.field_type, url: m.url,
enum_type: m.enum_type.map(LoaderFieldEnumId), filename: m.filename,
min_val: m.min_val, primary: m.is_primary,
max_val: m.max_val, size: m.size as u32,
optional: m.optional, file_type: m.file_type.map(|x| FileType::from_string(&x)),
}) };
.try_collect()
.await?;
let loader_field_enum_values: Vec<QueryLoaderFieldEnumValue> = sqlx::query!( file_ids.insert(FileId(m.id));
" reverse_file_map.insert(FileId(m.id), VersionId(m.version_id));
SELECT DISTINCT id, enum_id, value, ordering, created, metadata
FROM loader_field_enum_values lfev
WHERE id = ANY($1)
ORDER BY enum_id, ordering, created ASC
",
&loader_field_enum_value_ids
.iter()
.map(|x| x.0)
.collect::<Vec<_>>()
)
.fetch(&mut *exec)
.map_ok(|m| QueryLoaderFieldEnumValue {
id: LoaderFieldEnumValueId(m.id),
enum_id: LoaderFieldEnumId(m.enum_id),
value: m.value,
ordering: m.ordering,
created: m.created,
metadata: m.metadata,
})
.try_collect()
.await?;
#[derive(Deserialize)] acc.entry(VersionId(m.version_id))
struct Hash { .or_default()
pub file_id: FileId, .push(file);
pub algorithm: String, async move { Ok(acc) }
pub hash: String,
}
#[derive(Deserialize)]
struct File {
pub id: FileId,
pub url: String,
pub filename: String,
pub primary: bool,
pub size: u32,
pub file_type: Option<FileType>,
}
let file_ids = DashSet::new();
let reverse_file_map = DashMap::new();
let files : DashMap<VersionId, Vec<File>> = sqlx::query!(
"
SELECT DISTINCT version_id, f.id, f.url, f.filename, f.is_primary, f.size, f.file_type
FROM files f
WHERE f.version_id = ANY($1)
",
&version_ids_parsed
).fetch(&mut *exec)
.try_fold(DashMap::new(), |acc : DashMap<VersionId, Vec<File>>, m| {
let file = File {
id: FileId(m.id),
url: m.url,
filename: m.filename,
primary: m.is_primary,
size: m.size as u32,
file_type: m.file_type.map(|x| FileType::from_string(&x)),
};
file_ids.insert(FileId(m.id));
reverse_file_map.insert(FileId(m.id), VersionId(m.version_id));
acc.entry(VersionId(m.version_id))
.or_default()
.push(file);
async move { Ok(acc) }
}
).await?;
let hashes: DashMap<VersionId, Vec<Hash>> = sqlx::query!(
"
SELECT DISTINCT file_id, algorithm, encode(hash, 'escape') hash
FROM hashes
WHERE file_id = ANY($1)
",
&file_ids.iter().map(|x| x.0).collect::<Vec<_>>()
)
.fetch(&mut *exec)
.try_fold(DashMap::new(), |acc: DashMap<VersionId, Vec<Hash>>, m| {
if let Some(found_hash) = m.hash {
let hash = Hash {
file_id: FileId(m.file_id),
algorithm: m.algorithm,
hash: found_hash,
};
if let Some(version_id) = reverse_file_map.get(&FileId(m.file_id)) {
acc.entry(*version_id).or_default().push(hash);
} }
} ).await?;
async move { Ok(acc) }
})
.await?;
let dependencies : DashMap<VersionId, Vec<QueryDependency>> = sqlx::query!( let hashes: DashMap<VersionId, Vec<Hash>> = sqlx::query!(
" "
SELECT DISTINCT dependent_id as version_id, d.mod_dependency_id as dependency_project_id, d.dependency_id as dependency_version_id, d.dependency_file_name as file_name, d.dependency_type as dependency_type SELECT DISTINCT file_id, algorithm, encode(hash, 'escape') hash
FROM dependencies d FROM hashes
WHERE dependent_id = ANY($1) WHERE file_id = ANY($1)
", ",
&version_ids_parsed &file_ids.iter().map(|x| x.0).collect::<Vec<_>>()
).fetch(&mut *exec) )
.try_fold(DashMap::new(), |acc : DashMap<_,Vec<QueryDependency>>, m| { .fetch(&mut *exec)
let dependency = QueryDependency { .try_fold(DashMap::new(), |acc: DashMap<VersionId, Vec<Hash>>, m| {
project_id: m.dependency_project_id.map(ProjectId), if let Some(found_hash) = m.hash {
version_id: m.dependency_version_id.map(VersionId), let hash = Hash {
file_name: m.file_name, file_id: FileId(m.file_id),
dependency_type: m.dependency_type, algorithm: m.algorithm,
}; hash: found_hash,
};
acc.entry(VersionId(m.version_id)) if let Some(version_id) = reverse_file_map.get(&FileId(m.file_id)) {
.or_default() acc.entry(*version_id).or_default().push(hash);
.push(dependency); }
async move { Ok(acc) } }
} async move { Ok(acc) }
).await?; })
.await?;
let db_versions: Vec<QueryVersion> = sqlx::query!( let dependencies : DashMap<VersionId, Vec<QueryDependency>> = sqlx::query!(
" "
SELECT v.id id, v.mod_id mod_id, v.author_id author_id, v.name version_name, v.version_number version_number, SELECT DISTINCT dependent_id as version_id, d.mod_dependency_id as dependency_project_id, d.dependency_id as dependency_version_id, d.dependency_file_name as file_name, d.dependency_type as dependency_type
v.changelog changelog, v.date_published date_published, v.downloads downloads, FROM dependencies d
v.version_type version_type, v.featured featured, v.status status, v.requested_status requested_status, v.ordering ordering WHERE dependent_id = ANY($1)
FROM versions v ",
WHERE v.id = ANY($1) &version_ids
ORDER BY v.ordering ASC NULLS LAST, v.date_published ASC; ).fetch(&mut *exec)
", .try_fold(DashMap::new(), |acc : DashMap<_,Vec<QueryDependency>>, m| {
&version_ids_parsed let dependency = QueryDependency {
) project_id: m.dependency_project_id.map(ProjectId),
.fetch_many(&mut *exec) version_id: m.dependency_version_id.map(VersionId),
.try_filter_map(|e| async { file_name: m.file_name,
Ok(e.right().map(|v| dependency_type: m.dependency_type,
{ };
acc.entry(VersionId(m.version_id))
.or_default()
.push(dependency);
async move { Ok(acc) }
}
).await?;
let res = sqlx::query!(
"
SELECT v.id id, v.mod_id mod_id, v.author_id author_id, v.name version_name, v.version_number version_number,
v.changelog changelog, v.date_published date_published, v.downloads downloads,
v.version_type version_type, v.featured featured, v.status status, v.requested_status requested_status, v.ordering ordering
FROM versions v
WHERE v.id = ANY($1);
",
&version_ids
)
.fetch(&mut *exec)
.try_fold(DashMap::new(), |acc, v| {
let version_id = VersionId(v.id); let version_id = VersionId(v.id);
let VersionLoaderData { let VersionLoaderData {
loaders, loaders,
project_types, project_types,
games, games,
loader_loader_field_ids, loader_loader_field_ids,
} = loaders_ptypes_games.remove(&version_id).map(|x|x.1).unwrap_or_default(); } = loaders_ptypes_games.remove(&version_id).map(|x|x.1).unwrap_or_default();
let files = files.remove(&version_id).map(|x|x.1).unwrap_or_default(); let files = files.remove(&version_id).map(|x|x.1).unwrap_or_default();
let hashes = hashes.remove(&version_id).map(|x|x.1).unwrap_or_default(); let hashes = hashes.remove(&version_id).map(|x|x.1).unwrap_or_default();
let version_fields = version_fields.remove(&version_id).map(|x|x.1).unwrap_or_default(); let version_fields = version_fields.remove(&version_id).map(|x|x.1).unwrap_or_default();
let dependencies = dependencies.remove(&version_id).map(|x|x.1).unwrap_or_default(); let dependencies = dependencies.remove(&version_id).map(|x|x.1).unwrap_or_default();
let loader_fields = loader_fields.iter() let loader_fields = loader_fields.iter()
.filter(|x| loader_loader_field_ids.contains(&x.id)) .filter(|x| loader_loader_field_ids.contains(&x.id))
.collect::<Vec<_>>(); .collect::<Vec<_>>();
QueryVersion { let query_version = QueryVersion {
inner: Version { inner: Version {
id: VersionId(v.id), id: VersionId(v.id),
project_id: ProjectId(v.mod_id), project_id: ProjectId(v.mod_id),
@ -821,22 +784,20 @@ impl Version {
project_types, project_types,
games, games,
dependencies, dependencies,
} };
}))
})
.try_collect::<Vec<QueryVersion>>()
.await?;
for version in db_versions { acc.insert(v.id, query_version);
redis async move { Ok(acc) }
.set_serialized_to_json(VERSIONS_NAMESPACE, version.inner.id.0, &version, None) })
.await?; .await?;
found_versions.push(version); Ok(res)
} },
} ).await?;
Ok(found_versions) val.sort();
Ok(val)
} }
pub async fn get_file_from_hash<'a, 'b, E>( pub async fn get_file_from_hash<'a, 'b, E>(
@ -866,110 +827,66 @@ impl Version {
where where
E: sqlx::Executor<'a, Database = sqlx::Postgres> + Copy, E: sqlx::Executor<'a, Database = sqlx::Postgres> + Copy,
{ {
use futures::stream::TryStreamExt; let val = redis.get_cached_keys(
VERSION_FILES_NAMESPACE,
let mut redis = redis.connect().await?; &hashes.iter().map(|x| format!("{algorithm}_{x}")).collect::<Vec<_>>(),
|file_ids| async move {
if hashes.is_empty() { let files = sqlx::query!(
return Ok(Vec::new()); "
} SELECT f.id, f.version_id, v.mod_id, f.url, f.filename, f.is_primary, f.size, f.file_type,
JSONB_AGG(DISTINCT jsonb_build_object('algorithm', h.algorithm, 'hash', encode(h.hash, 'escape'))) filter (where h.hash is not null) hashes
let mut file_ids_parsed = hashes.to_vec(); FROM files f
INNER JOIN versions v on v.id = f.version_id
let mut found_files = Vec::new(); INNER JOIN hashes h on h.file_id = f.id
WHERE h.algorithm = $1 AND h.hash = ANY($2)
let files = redis GROUP BY f.id, v.mod_id, v.date_published
.multi_get::<String>( ORDER BY v.date_published
VERSION_FILES_NAMESPACE, ",
file_ids_parsed algorithm,
.iter() &file_ids.into_iter().flat_map(|x| x.split('_').last().map(|x| x.as_bytes().to_vec())).collect::<Vec<_>>(),
.map(|hash| format!("{}_{}", algorithm, hash)) )
.collect::<Vec<_>>(), .fetch(executor)
) .try_fold(DashMap::new(), |acc, f| {
.await?;
for file in files {
if let Some(mut file) =
file.and_then(|x| serde_json::from_str::<Vec<SingleFile>>(&x).ok())
{
file_ids_parsed.retain(|x| {
!file
.iter()
.any(|y| y.hashes.iter().any(|z| z.0 == &algorithm && z.1 == x))
});
found_files.append(&mut file);
continue;
}
}
if !file_ids_parsed.is_empty() {
let db_files: Vec<SingleFile> = sqlx::query!(
"
SELECT f.id, f.version_id, v.mod_id, f.url, f.filename, f.is_primary, f.size, f.file_type,
JSONB_AGG(DISTINCT jsonb_build_object('algorithm', h.algorithm, 'hash', encode(h.hash, 'escape'))) filter (where h.hash is not null) hashes
FROM files f
INNER JOIN versions v on v.id = f.version_id
INNER JOIN hashes h on h.file_id = f.id
WHERE h.algorithm = $1 AND h.hash = ANY($2)
GROUP BY f.id, v.mod_id, v.date_published
ORDER BY v.date_published
",
algorithm,
&file_ids_parsed.into_iter().map(|x| x.as_bytes().to_vec()).collect::<Vec<_>>(),
)
.fetch_many(executor)
.try_filter_map(|e| async {
Ok(e.right().map(|f| {
#[derive(Deserialize)] #[derive(Deserialize)]
struct Hash { struct Hash {
pub algorithm: String, pub algorithm: String,
pub hash: String, pub hash: String,
} }
SingleFile { let hashes = serde_json::from_value::<Vec<Hash>>(
id: FileId(f.id), f.hashes.unwrap_or_default(),
version_id: VersionId(f.version_id), )
project_id: ProjectId(f.mod_id), .ok()
url: f.url, .unwrap_or_default().into_iter().map(|x| (x.algorithm, x.hash))
filename: f.filename, .collect::<HashMap<_, _>>();
hashes: serde_json::from_value::<Vec<Hash>>(
f.hashes.unwrap_or_default(), if let Some(hash) = hashes.get(&algorithm) {
) let key = format!("{algorithm}_{hash}");
.ok()
.unwrap_or_default().into_iter().map(|x| (x.algorithm, x.hash)).collect(), let file = SingleFile {
primary: f.is_primary, id: FileId(f.id),
size: f.size as u32, version_id: VersionId(f.version_id),
file_type: f.file_type.map(|x| FileType::from_string(&x)), project_id: ProjectId(f.mod_id),
url: f.url,
filename: f.filename,
hashes,
primary: f.is_primary,
size: f.size as u32,
file_type: f.file_type.map(|x| FileType::from_string(&x)),
};
acc.insert(key, file);
} }
}
))
})
.try_collect::<Vec<SingleFile>>()
.await?;
let mut save_files: HashMap<String, Vec<SingleFile>> = HashMap::new(); async move { Ok(acc) }
})
for file in db_files {
for (algo, hash) in &file.hashes {
let key = format!("{}_{}", algo, hash);
if let Some(files) = save_files.get_mut(&key) {
files.push(file.clone());
} else {
save_files.insert(key, vec![file.clone()]);
}
}
}
for (key, mut files) in save_files {
redis
.set_serialized_to_json(VERSION_FILES_NAMESPACE, key, &files, None)
.await?; .await?;
found_files.append(&mut files); Ok(files)
} }
} ).await?;
Ok(found_files) Ok(val)
} }
pub async fn clear_cache( pub async fn clear_cache(

View File

@ -1,10 +1,20 @@
use super::models::DatabaseError; use super::models::DatabaseError;
use crate::models::ids::base62_impl::{parse_base62, to_base62};
use chrono::{TimeZone, Utc};
use dashmap::DashMap;
use deadpool_redis::{Config, Runtime}; use deadpool_redis::{Config, Runtime};
use itertools::Itertools; use redis::{cmd, Cmd, ExistenceCheck, SetExpiry, SetOptions};
use redis::{cmd, Cmd, FromRedisValue}; use serde::de::DeserializeOwned;
use std::fmt::Display; use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::fmt::{Debug, Display};
use std::future::Future;
use std::hash::Hash;
use std::pin::Pin;
use std::time::Duration;
const DEFAULT_EXPIRY: i64 = 1800; // 30 minutes const DEFAULT_EXPIRY: i64 = 60 * 60 * 12; // 12 hours
const ACTUAL_EXPIRY: i64 = 60 * 30; // 30 minutes
#[derive(Clone)] #[derive(Clone)]
pub struct RedisPool { pub struct RedisPool {
@ -47,6 +57,364 @@ impl RedisPool {
meta_namespace: self.meta_namespace.clone(), meta_namespace: self.meta_namespace.clone(),
}) })
} }
pub async fn get_cached_keys<F, Fut, T, K>(
&self,
namespace: &str,
keys: &[K],
closure: F,
) -> Result<Vec<T>, DatabaseError>
where
F: FnOnce(Vec<K>) -> Fut,
Fut: Future<Output = Result<DashMap<K, T>, DatabaseError>>,
T: Serialize + DeserializeOwned,
K: Display + Hash + Eq + PartialEq + Clone + DeserializeOwned + Serialize + Debug,
{
Ok(self
.get_cached_keys_raw(namespace, keys, closure)
.await?
.into_iter()
.map(|x| x.1)
.collect())
}
pub async fn get_cached_keys_raw<F, Fut, T, K>(
&self,
namespace: &str,
keys: &[K],
closure: F,
) -> Result<HashMap<K, T>, DatabaseError>
where
F: FnOnce(Vec<K>) -> Fut,
Fut: Future<Output = Result<DashMap<K, T>, DatabaseError>>,
T: Serialize + DeserializeOwned,
K: Display + Hash + Eq + PartialEq + Clone + DeserializeOwned + Serialize + Debug,
{
self.get_cached_keys_raw_with_slug(namespace, None, false, keys, |ids| async move {
Ok(closure(ids)
.await?
.into_iter()
.map(|(key, val)| (key, (None::<String>, val)))
.collect())
})
.await
}
pub async fn get_cached_keys_with_slug<F, Fut, T, I, K, S>(
&self,
namespace: &str,
slug_namespace: &str,
case_sensitive: bool,
keys: &[I],
closure: F,
) -> Result<Vec<T>, DatabaseError>
where
F: FnOnce(Vec<I>) -> Fut,
Fut: Future<Output = Result<DashMap<K, (Option<S>, T)>, DatabaseError>>,
T: Serialize + DeserializeOwned,
I: Display + Hash + Eq + PartialEq + Clone + Debug,
K: Display + Hash + Eq + PartialEq + Clone + DeserializeOwned + Serialize,
S: Display + Clone + DeserializeOwned + Serialize + Debug,
{
Ok(self
.get_cached_keys_raw_with_slug(
namespace,
Some(slug_namespace),
case_sensitive,
keys,
closure,
)
.await?
.into_iter()
.map(|x| x.1)
.collect())
}
pub async fn get_cached_keys_raw_with_slug<F, Fut, T, I, K, S>(
&self,
namespace: &str,
slug_namespace: Option<&str>,
case_sensitive: bool,
keys: &[I],
closure: F,
) -> Result<HashMap<K, T>, DatabaseError>
where
F: FnOnce(Vec<I>) -> Fut,
Fut: Future<Output = Result<DashMap<K, (Option<S>, T)>, DatabaseError>>,
T: Serialize + DeserializeOwned,
I: Display + Hash + Eq + PartialEq + Clone + Debug,
K: Display + Hash + Eq + PartialEq + Clone + DeserializeOwned + Serialize,
S: Display + Clone + DeserializeOwned + Serialize + Debug,
{
let connection = self.connect().await?.connection;
let ids = keys
.iter()
.map(|x| (x.to_string(), x.clone()))
.collect::<DashMap<String, I>>();
if ids.is_empty() {
return Ok(HashMap::new());
}
let get_cached_values =
|ids: DashMap<String, I>, mut connection: deadpool_redis::Connection| async move {
let slug_ids = if let Some(slug_namespace) = slug_namespace {
cmd("MGET")
.arg(
ids.iter()
.map(|x| {
format!(
"{}_{slug_namespace}:{}",
self.meta_namespace,
if case_sensitive {
x.value().to_string()
} else {
x.value().to_string().to_lowercase()
}
)
})
.collect::<Vec<_>>(),
)
.query_async::<_, Vec<Option<String>>>(&mut connection)
.await?
.into_iter()
.flatten()
.collect::<Vec<_>>()
} else {
Vec::new()
};
let cached_values = cmd("MGET")
.arg(
ids.iter()
.map(|x| x.value().to_string())
.chain(ids.iter().filter_map(|x| {
parse_base62(&x.value().to_string())
.ok()
.map(|x| x.to_string())
}))
.chain(slug_ids)
.map(|x| format!("{}_{namespace}:{x}", self.meta_namespace))
.collect::<Vec<_>>(),
)
.query_async::<_, Vec<Option<String>>>(&mut connection)
.await?
.into_iter()
.filter_map(|x| {
x.and_then(|val| serde_json::from_str::<RedisValue<T, K, S>>(&val).ok())
.map(|val| (val.key.clone(), val))
})
.collect::<HashMap<_, _>>();
Ok::<_, DatabaseError>((cached_values, connection, ids))
};
let current_time = Utc::now();
let mut expired_values = HashMap::new();
let (cached_values_raw, mut connection, ids) = get_cached_values(ids, connection).await?;
let mut cached_values = cached_values_raw
.into_iter()
.filter_map(|(key, val)| {
if Utc.timestamp(val.iat + ACTUAL_EXPIRY, 0) < current_time {
expired_values.insert(val.key.to_string(), val);
None
} else {
let key_str = val.key.to_string();
ids.remove(&key_str);
if let Ok(value) = key_str.parse::<u64>() {
let base62 = to_base62(value);
ids.remove(&base62);
}
if let Some(ref alias) = val.alias {
ids.remove(&alias.to_string());
}
Some((key, val))
}
})
.collect::<HashMap<_, _>>();
let subscribe_ids = DashMap::new();
if !ids.is_empty() {
let mut pipe = redis::pipe();
let fetch_ids = ids.iter().map(|x| x.key().clone()).collect::<Vec<_>>();
fetch_ids.iter().for_each(|key| {
pipe.atomic().set_options(
format!("{}_{namespace}:{}/lock", self.meta_namespace, key),
100,
SetOptions::default()
.get(true)
.conditional_set(ExistenceCheck::NX)
.with_expiration(SetExpiry::EX(60)),
);
});
let results = pipe
.query_async::<_, Vec<Option<i32>>>(&mut connection)
.await?;
for (idx, key) in fetch_ids.into_iter().enumerate() {
if let Some(locked) = results.get(idx) {
if locked.is_none() {
continue;
}
}
if let Some((key, raw_key)) = ids.remove(&key) {
if let Some(val) = expired_values.remove(&key) {
if let Some(ref alias) = val.alias {
ids.remove(&alias.to_string());
}
if let Ok(value) = val.key.to_string().parse::<u64>() {
let base62 = to_base62(value);
ids.remove(&base62);
}
cached_values.insert(val.key.clone(), val);
} else {
subscribe_ids.insert(key, raw_key);
}
}
}
}
#[allow(clippy::type_complexity)]
let mut fetch_tasks: Vec<
Pin<Box<dyn Future<Output = Result<HashMap<K, RedisValue<T, K, S>>, DatabaseError>>>>,
> = Vec::new();
if !ids.is_empty() {
fetch_tasks.push(Box::pin(async {
let fetch_ids = ids.iter().map(|x| x.value().clone()).collect::<Vec<_>>();
let vals = closure(fetch_ids).await?;
let mut return_values = HashMap::new();
let mut pipe = redis::pipe();
if !vals.is_empty() {
for (key, (slug, value)) in vals {
let value = RedisValue {
key: key.clone(),
iat: Utc::now().timestamp(),
val: value,
alias: slug.clone(),
};
pipe.atomic().set_ex(
format!("{}_{namespace}:{key}", self.meta_namespace),
serde_json::to_string(&value)?,
DEFAULT_EXPIRY as u64,
);
if let Some(slug) = slug {
ids.remove(&slug.to_string());
if let Some(slug_namespace) = slug_namespace {
let actual_slug = if case_sensitive {
slug.to_string()
} else {
slug.to_string().to_lowercase()
};
pipe.atomic().set_ex(
format!(
"{}_{slug_namespace}:{}",
self.meta_namespace, actual_slug
),
key.to_string(),
DEFAULT_EXPIRY as u64,
);
pipe.atomic().del(format!(
"{}_{namespace}:{}/lock",
self.meta_namespace, actual_slug
));
}
}
let key_str = key.to_string();
ids.remove(&key_str);
if let Ok(value) = key_str.parse::<u64>() {
let base62 = to_base62(value);
ids.remove(&base62);
pipe.atomic()
.del(format!("{}_{namespace}:{base62}/lock", self.meta_namespace));
}
pipe.atomic()
.del(format!("{}_{namespace}:{key}/lock", self.meta_namespace));
return_values.insert(key, value);
}
}
for (key, _) in ids {
pipe.atomic()
.del(format!("{}_{namespace}:{key}/lock", self.meta_namespace));
}
pipe.query_async(&mut connection).await?;
Ok(return_values)
}));
}
if !subscribe_ids.is_empty() {
fetch_tasks.push(Box::pin(async {
let mut connection = self.pool.get().await?;
let mut interval = tokio::time::interval(Duration::from_millis(100));
let start = Utc::now();
loop {
let results = cmd("MGET")
.arg(
subscribe_ids
.iter()
.map(|x| {
format!("{}_{namespace}:{}/lock", self.meta_namespace, x.key())
})
.collect::<Vec<_>>(),
)
.query_async::<_, Vec<Option<String>>>(&mut connection)
.await?;
if results.into_iter().all(|x| x.is_none()) {
break;
}
if (Utc::now() - start) > chrono::Duration::seconds(5) {
return Err(DatabaseError::CacheTimeout);
}
interval.tick().await;
}
let (return_values, _, _) = get_cached_values(subscribe_ids, connection).await?;
Ok(return_values)
}));
}
if !fetch_tasks.is_empty() {
for map in futures::future::try_join_all(fetch_tasks).await? {
for (key, value) in map {
cached_values.insert(key, value);
}
}
}
Ok(cached_values.into_iter().map(|x| (x.0, x.1.val)).collect())
}
} }
impl RedisConnection { impl RedisConnection {
@ -120,26 +488,6 @@ impl RedisConnection {
.and_then(|x| serde_json::from_str(&x).ok())) .and_then(|x| serde_json::from_str(&x).ok()))
} }
pub async fn multi_get<R>(
&mut self,
namespace: &str,
ids: impl IntoIterator<Item = impl Display>,
) -> Result<Vec<Option<R>>, DatabaseError>
where
R: FromRedisValue,
{
let mut cmd = cmd("MGET");
let ids = ids.into_iter().map(|x| x.to_string()).collect_vec();
redis_args(
&mut cmd,
&ids.into_iter()
.map(|x| format!("{}_{}:{}", self.meta_namespace, namespace, x))
.collect_vec(),
);
Ok(redis_execute(&mut cmd, &mut self.connection).await?)
}
pub async fn delete<T1>(&mut self, namespace: &str, id: T1) -> Result<(), DatabaseError> pub async fn delete<T1>(&mut self, namespace: &str, id: T1) -> Result<(), DatabaseError>
where where
T1: Display, T1: Display,
@ -177,6 +525,15 @@ impl RedisConnection {
} }
} }
#[derive(Serialize, Deserialize)]
pub struct RedisValue<T, K, S> {
key: K,
#[serde(skip_serializing_if = "Option::is_none")]
alias: Option<S>,
iat: i64,
val: T,
}
pub fn redis_args(cmd: &mut Cmd, args: &[String]) { pub fn redis_args(cmd: &mut Cmd, args: &[String]) {
for arg in args { for arg in args {
cmd.arg(arg); cmd.arg(arg);

View File

@ -5,7 +5,7 @@ use super::{
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
/// The ID of a team /// The ID of a team
#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] #[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Hash, Debug)]
#[serde(from = "Base62Id")] #[serde(from = "Base62Id")]
#[serde(into = "Base62Id")] #[serde(into = "Base62Id")]
pub struct OrganizationId(pub u64); pub struct OrganizationId(pub u64);

View File

@ -5,7 +5,7 @@ use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
/// The ID of a team /// The ID of a team
#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] #[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Hash, Debug)]
#[serde(from = "Base62Id")] #[serde(from = "Base62Id")]
#[serde(into = "Base62Id")] #[serde(into = "Base62Id")]
pub struct PatId(pub u64); pub struct PatId(pub u64);

View File

@ -3,7 +3,7 @@ use crate::models::users::UserId;
use chrono::{DateTime, Utc}; use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] #[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Hash, Debug)]
#[serde(from = "Base62Id")] #[serde(from = "Base62Id")]
#[serde(into = "Base62Id")] #[serde(into = "Base62Id")]
pub struct SessionId(pub u64); pub struct SessionId(pub u64);

View File

@ -117,8 +117,6 @@ impl AnalyticsQueue {
let new_count = if let Some((views, monetized)) = raw_views.get_mut(idx) { let new_count = if let Some((views, monetized)) = raw_views.get_mut(idx) {
if let Some(count) = count { if let Some(count) = count {
println!("len: {} count: {}", views.len(), count);
if count > 3 { if count > 3 {
*monetized = false; *monetized = false;
continue; continue;

View File

@ -115,6 +115,8 @@ pub async fn pat_full_test() {
"expires": Utc::now() + Duration::days(1), // no longer expired! "expires": Utc::now() + Duration::days(1), // no longer expired!
})) }))
.to_request(); .to_request();
println!("PAT ID FOR TEST: {}", id);
let resp = test_env.call(req).await; let resp = test_env.call(req).await;
assert_status!(&resp, StatusCode::NO_CONTENT); assert_status!(&resp, StatusCode::NO_CONTENT);
assert_eq!(mock_pat_test(access_token).await, 200); // Works again assert_eq!(mock_pat_test(access_token).await, 200); // Works again

View File

@ -69,7 +69,10 @@ async fn test_get_project() {
.unwrap() .unwrap()
.unwrap(); .unwrap();
let cached_project: serde_json::Value = serde_json::from_str(&cached_project).unwrap(); let cached_project: serde_json::Value = serde_json::from_str(&cached_project).unwrap();
assert_eq!(cached_project["inner"]["slug"], json!(alpha_project_slug)); assert_eq!(
cached_project["val"]["inner"]["slug"],
json!(alpha_project_slug)
);
// Make the request again, this time it should be cached // Make the request again, this time it should be cached
let resp = api.get_project(alpha_project_id, USER_USER_PAT).await; let resp = api.get_project(alpha_project_id, USER_USER_PAT).await;

View File

@ -55,7 +55,7 @@ async fn test_get_version() {
.unwrap(); .unwrap();
let cached_project: serde_json::Value = serde_json::from_str(&cached_project).unwrap(); let cached_project: serde_json::Value = serde_json::from_str(&cached_project).unwrap();
assert_eq!( assert_eq!(
cached_project["inner"]["project_id"], cached_project["val"]["inner"]["project_id"],
json!(parse_base62(alpha_project_id).unwrap()) json!(parse_base62(alpha_project_id).unwrap())
); );
@ -617,6 +617,7 @@ async fn version_ordering_for_specified_orderings_orders_lower_order_first() {
USER_USER_PAT, USER_USER_PAT,
) )
.await; .await;
assert_common_version_ids(&versions, vec![new_version_id, alpha_version_id]); assert_common_version_ids(&versions, vec![new_version_id, alpha_version_id]);
}) })
.await; .await;