diff --git a/.sqlx/query-00a733e8ea78f15743afe6a9d637fa4fb87a205854905fb16cf1b8e715f1e01d.json b/.sqlx/query-00a733e8ea78f15743afe6a9d637fa4fb87a205854905fb16cf1b8e715f1e01d.json new file mode 100644 index 000000000..3e046f329 --- /dev/null +++ b/.sqlx/query-00a733e8ea78f15743afe6a9d637fa4fb87a205854905fb16cf1b8e715f1e01d.json @@ -0,0 +1,46 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT DISTINCT version_id,\n ARRAY_AGG(DISTINCT l.loader) filter (where l.loader is not null) loaders,\n ARRAY_AGG(DISTINCT pt.name) filter (where pt.name is not null) project_types,\n ARRAY_AGG(DISTINCT g.slug) filter (where g.slug is not null) games,\n ARRAY_AGG(DISTINCT lfl.loader_field_id) filter (where lfl.loader_field_id is not null) loader_fields\n FROM versions v\n INNER JOIN loaders_versions lv ON v.id = lv.version_id\n INNER JOIN loaders l ON lv.loader_id = l.id\n INNER JOIN loaders_project_types lpt ON lpt.joining_loader_id = l.id\n INNER JOIN project_types pt ON pt.id = lpt.joining_project_type_id\n INNER JOIN loaders_project_types_games lptg ON lptg.loader_id = l.id AND lptg.project_type_id = pt.id\n INNER JOIN games g ON lptg.game_id = g.id\n LEFT JOIN loader_fields_loaders lfl ON lfl.loader_id = l.id\n WHERE v.id = ANY($1)\n GROUP BY version_id\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "version_id", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "loaders", + "type_info": "VarcharArray" + }, + { + "ordinal": 2, + "name": "project_types", + "type_info": "VarcharArray" + }, + { + "ordinal": 3, + "name": "games", + "type_info": "VarcharArray" + }, + { + "ordinal": 4, + "name": "loader_fields", + "type_info": "Int4Array" + } + ], + "parameters": { + "Left": [ + "Int8Array" + ] + }, + "nullable": [ + false, + null, + null, + null, + null + ] + }, + "hash": "00a733e8ea78f15743afe6a9d637fa4fb87a205854905fb16cf1b8e715f1e01d" +} diff --git a/.sqlx/query-603eaa54b3956d68f656008e9b04f1c352857cf2eb15874cee9d31f8d992ab77.json b/.sqlx/query-04c04958c71c4fab903c46c9185286e7460a6ff7b03cbc90939ac6c7cb526433.json similarity index 71% rename from .sqlx/query-603eaa54b3956d68f656008e9b04f1c352857cf2eb15874cee9d31f8d992ab77.json rename to .sqlx/query-04c04958c71c4fab903c46c9185286e7460a6ff7b03cbc90939ac6c7cb526433.json index fbc6462ac..6c62c2b6b 100644 --- a/.sqlx/query-603eaa54b3956d68f656008e9b04f1c352857cf2eb15874cee9d31f8d992ab77.json +++ b/.sqlx/query-04c04958c71c4fab903c46c9185286e7460a6ff7b03cbc90939ac6c7cb526433.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT id, enum_id, value, ordering, metadata, created FROM loader_field_enum_values\n WHERE enum_id = ANY($1)\n ORDER BY enum_id, ordering, created DESC\n ", + "query": "\n SELECT id, enum_id, value, ordering, metadata, created FROM loader_field_enum_values\n WHERE enum_id = ANY($1)\n ORDER BY enum_id, ordering, created DESC\n ", "describe": { "columns": [ { @@ -48,5 +48,5 @@ false ] }, - "hash": "603eaa54b3956d68f656008e9b04f1c352857cf2eb15874cee9d31f8d992ab77" + "hash": "04c04958c71c4fab903c46c9185286e7460a6ff7b03cbc90939ac6c7cb526433" } diff --git a/.sqlx/query-0d0f736e563abba7561c9b5de108c772541ad0049f706602d01460238f88ffd8.json b/.sqlx/query-0d0f736e563abba7561c9b5de108c772541ad0049f706602d01460238f88ffd8.json new file mode 100644 index 000000000..33e8733d6 --- /dev/null +++ b/.sqlx/query-0d0f736e563abba7561c9b5de108c772541ad0049f706602d01460238f88ffd8.json @@ -0,0 +1,46 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT DISTINCT mod_id,\n ARRAY_AGG(DISTINCT l.loader) filter (where l.loader is not null) loaders,\n ARRAY_AGG(DISTINCT pt.name) filter (where pt.name is not null) project_types,\n ARRAY_AGG(DISTINCT g.slug) filter (where g.slug is not null) games,\n ARRAY_AGG(DISTINCT lfl.loader_field_id) filter (where lfl.loader_field_id is not null) loader_fields\n FROM versions v\n INNER JOIN loaders_versions lv ON v.id = lv.version_id\n INNER JOIN loaders l ON lv.loader_id = l.id\n INNER JOIN loaders_project_types lpt ON lpt.joining_loader_id = l.id\n INNER JOIN project_types pt ON pt.id = lpt.joining_project_type_id\n INNER JOIN loaders_project_types_games lptg ON lptg.loader_id = l.id AND lptg.project_type_id = pt.id\n INNER JOIN games g ON lptg.game_id = g.id\n LEFT JOIN loader_fields_loaders lfl ON lfl.loader_id = l.id\n WHERE v.id = ANY($1)\n GROUP BY mod_id\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "mod_id", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "loaders", + "type_info": "VarcharArray" + }, + { + "ordinal": 2, + "name": "project_types", + "type_info": "VarcharArray" + }, + { + "ordinal": 3, + "name": "games", + "type_info": "VarcharArray" + }, + { + "ordinal": 4, + "name": "loader_fields", + "type_info": "Int4Array" + } + ], + "parameters": { + "Left": [ + "Int8Array" + ] + }, + "nullable": [ + false, + null, + null, + null, + null + ] + }, + "hash": "0d0f736e563abba7561c9b5de108c772541ad0049f706602d01460238f88ffd8" +} diff --git a/.sqlx/query-2390acbe75f9956e8e16c29faa90aa2fb6b3e11a417302b62fc4a6b4a1785f75.json b/.sqlx/query-10f81e605c9ef63153f6879d507dc1d1bb38846e16d9fa6cbd6cceea2efbfd51.json similarity index 71% rename from .sqlx/query-2390acbe75f9956e8e16c29faa90aa2fb6b3e11a417302b62fc4a6b4a1785f75.json rename to .sqlx/query-10f81e605c9ef63153f6879d507dc1d1bb38846e16d9fa6cbd6cceea2efbfd51.json index 12c65eea7..4caa17396 100644 --- a/.sqlx/query-2390acbe75f9956e8e16c29faa90aa2fb6b3e11a417302b62fc4a6b4a1785f75.json +++ b/.sqlx/query-10f81e605c9ef63153f6879d507dc1d1bb38846e16d9fa6cbd6cceea2efbfd51.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT version_id, field_id, int_value, enum_value, string_value\n FROM version_fields\n WHERE version_id = ANY($1)\n ", + "query": "\n SELECT version_id, field_id, int_value, enum_value, string_value\n FROM version_fields\n WHERE version_id = ANY($1)\n ", "describe": { "columns": [ { @@ -42,5 +42,5 @@ true ] }, - "hash": "2390acbe75f9956e8e16c29faa90aa2fb6b3e11a417302b62fc4a6b4a1785f75" + "hash": "10f81e605c9ef63153f6879d507dc1d1bb38846e16d9fa6cbd6cceea2efbfd51" } diff --git a/.sqlx/query-4deaf065c12dbfd5f585286001fdf66f60524ec13eab7d922db9290237297849.json b/.sqlx/query-28e5a9147061e78c0c1574ff650a30ead9fe7883d283e08a46155382e7a6c163.json similarity index 71% rename from .sqlx/query-4deaf065c12dbfd5f585286001fdf66f60524ec13eab7d922db9290237297849.json rename to .sqlx/query-28e5a9147061e78c0c1574ff650a30ead9fe7883d283e08a46155382e7a6c163.json index b9780b845..a901da94b 100644 --- a/.sqlx/query-4deaf065c12dbfd5f585286001fdf66f60524ec13eab7d922db9290237297849.json +++ b/.sqlx/query-28e5a9147061e78c0c1574ff650a30ead9fe7883d283e08a46155382e7a6c163.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT o.id, o.slug, o.name, o.team_id, o.description, o.icon_url, o.color\n FROM organizations o\n WHERE o.id = ANY($1) OR LOWER(o.slug) = ANY($2)\n GROUP BY o.id;\n ", + "query": "\n SELECT o.id, o.slug, o.name, o.team_id, o.description, o.icon_url, o.color\n FROM organizations o\n WHERE o.id = ANY($1) OR LOWER(o.slug) = ANY($2)\n GROUP BY o.id;\n ", "describe": { "columns": [ { @@ -55,5 +55,5 @@ true ] }, - "hash": "4deaf065c12dbfd5f585286001fdf66f60524ec13eab7d922db9290237297849" + "hash": "28e5a9147061e78c0c1574ff650a30ead9fe7883d283e08a46155382e7a6c163" } diff --git a/.sqlx/query-8615354803791e238cc037b8a105008014ecd9764d198e62cc1ad18fc3185301.json b/.sqlx/query-32f4aa1ab67fbdcd7187fbae475876bf3d3225ca7b4994440a67cbd6a7b610f6.json similarity index 72% rename from .sqlx/query-8615354803791e238cc037b8a105008014ecd9764d198e62cc1ad18fc3185301.json rename to .sqlx/query-32f4aa1ab67fbdcd7187fbae475876bf3d3225ca7b4994440a67cbd6a7b610f6.json index 5489c2b59..5fc3bd90c 100644 --- a/.sqlx/query-8615354803791e238cc037b8a105008014ecd9764d198e62cc1ad18fc3185301.json +++ b/.sqlx/query-32f4aa1ab67fbdcd7187fbae475876bf3d3225ca7b4994440a67cbd6a7b610f6.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT v.id id, v.mod_id mod_id, v.author_id author_id, v.name version_name, v.version_number version_number,\n v.changelog changelog, v.date_published date_published, v.downloads downloads,\n v.version_type version_type, v.featured featured, v.status status, v.requested_status requested_status, v.ordering ordering\n FROM versions v\n WHERE v.id = ANY($1)\n ORDER BY v.ordering ASC NULLS LAST, v.date_published ASC;\n ", + "query": "\n SELECT v.id id, v.mod_id mod_id, v.author_id author_id, v.name version_name, v.version_number version_number,\n v.changelog changelog, v.date_published date_published, v.downloads downloads,\n v.version_type version_type, v.featured featured, v.status status, v.requested_status requested_status, v.ordering ordering\n FROM versions v\n WHERE v.id = ANY($1);\n ", "describe": { "columns": [ { @@ -90,5 +90,5 @@ true ] }, - "hash": "8615354803791e238cc037b8a105008014ecd9764d198e62cc1ad18fc3185301" + "hash": "32f4aa1ab67fbdcd7187fbae475876bf3d3225ca7b4994440a67cbd6a7b610f6" } diff --git a/.sqlx/query-6d867e712d89c915fc15940eadded0a383aa479e7f25f3a408661347e35c6538.json b/.sqlx/query-34fcb1b5ff6d29fbf4e617cdde9a296e9312aec9ff074dd39a83ee1ccb7678ff.json similarity index 63% rename from .sqlx/query-6d867e712d89c915fc15940eadded0a383aa479e7f25f3a408661347e35c6538.json rename to .sqlx/query-34fcb1b5ff6d29fbf4e617cdde9a296e9312aec9ff074dd39a83ee1ccb7678ff.json index 1b275596c..c1c37d27e 100644 --- a/.sqlx/query-6d867e712d89c915fc15940eadded0a383aa479e7f25f3a408661347e35c6538.json +++ b/.sqlx/query-34fcb1b5ff6d29fbf4e617cdde9a296e9312aec9ff074dd39a83ee1ccb7678ff.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT DISTINCT file_id, algorithm, encode(hash, 'escape') hash\n FROM hashes\n WHERE file_id = ANY($1)\n ", + "query": "\n SELECT DISTINCT file_id, algorithm, encode(hash, 'escape') hash\n FROM hashes\n WHERE file_id = ANY($1)\n ", "describe": { "columns": [ { @@ -30,5 +30,5 @@ null ] }, - "hash": "6d867e712d89c915fc15940eadded0a383aa479e7f25f3a408661347e35c6538" + "hash": "34fcb1b5ff6d29fbf4e617cdde9a296e9312aec9ff074dd39a83ee1ccb7678ff" } diff --git a/.sqlx/query-0b79ae3825e05ae07058a0a9d02fb0bd68ce37f3c7cf0356d565c23520988816.json b/.sqlx/query-3689ca9f16fb80c55a0d2fd3c08ae4d0b70b92c8ab9a75afb96297748ec36bd4.json similarity index 61% rename from .sqlx/query-0b79ae3825e05ae07058a0a9d02fb0bd68ce37f3c7cf0356d565c23520988816.json rename to .sqlx/query-3689ca9f16fb80c55a0d2fd3c08ae4d0b70b92c8ab9a75afb96297748ec36bd4.json index 6d206d589..b1f9dab61 100644 --- a/.sqlx/query-0b79ae3825e05ae07058a0a9d02fb0bd68ce37f3c7cf0356d565c23520988816.json +++ b/.sqlx/query-3689ca9f16fb80c55a0d2fd3c08ae4d0b70b92c8ab9a75afb96297748ec36bd4.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT f.id, f.version_id, v.mod_id, f.url, f.filename, f.is_primary, f.size, f.file_type,\n JSONB_AGG(DISTINCT jsonb_build_object('algorithm', h.algorithm, 'hash', encode(h.hash, 'escape'))) filter (where h.hash is not null) hashes\n FROM files f\n INNER JOIN versions v on v.id = f.version_id\n INNER JOIN hashes h on h.file_id = f.id\n WHERE h.algorithm = $1 AND h.hash = ANY($2)\n GROUP BY f.id, v.mod_id, v.date_published\n ORDER BY v.date_published\n ", + "query": "\n SELECT f.id, f.version_id, v.mod_id, f.url, f.filename, f.is_primary, f.size, f.file_type,\n JSONB_AGG(DISTINCT jsonb_build_object('algorithm', h.algorithm, 'hash', encode(h.hash, 'escape'))) filter (where h.hash is not null) hashes\n FROM files f\n INNER JOIN versions v on v.id = f.version_id\n INNER JOIN hashes h on h.file_id = f.id\n WHERE h.algorithm = $1 AND h.hash = ANY($2)\n GROUP BY f.id, v.mod_id, v.date_published\n ORDER BY v.date_published\n ", "describe": { "columns": [ { @@ -67,5 +67,5 @@ null ] }, - "hash": "0b79ae3825e05ae07058a0a9d02fb0bd68ce37f3c7cf0356d565c23520988816" + "hash": "3689ca9f16fb80c55a0d2fd3c08ae4d0b70b92c8ab9a75afb96297748ec36bd4" } diff --git a/.sqlx/query-8ff710a212087299ecc176ecc3cffbe5f411e76909ea458a359b9eea2c543e47.json b/.sqlx/query-4016797b6c41821d98dd024859088459c9b7157697b2b2fa745bdd21916a4ffc.json similarity index 58% rename from .sqlx/query-8ff710a212087299ecc176ecc3cffbe5f411e76909ea458a359b9eea2c543e47.json rename to .sqlx/query-4016797b6c41821d98dd024859088459c9b7157697b2b2fa745bdd21916a4ffc.json index 082ad636a..811341804 100644 --- a/.sqlx/query-8ff710a212087299ecc176ecc3cffbe5f411e76909ea458a359b9eea2c543e47.json +++ b/.sqlx/query-4016797b6c41821d98dd024859088459c9b7157697b2b2fa745bdd21916a4ffc.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT DISTINCT joining_mod_id as mod_id, joining_platform_id as platform_id, lp.name as platform_name, url, lp.donation as donation\n FROM mods_links ml\n INNER JOIN mods m ON ml.joining_mod_id = m.id \n INNER JOIN link_platforms lp ON ml.joining_platform_id = lp.id\n WHERE m.id = ANY($1) OR m.slug = ANY($2)\n ", + "query": "\n SELECT DISTINCT joining_mod_id as mod_id, joining_platform_id as platform_id, lp.name as platform_name, url, lp.donation as donation\n FROM mods_links ml\n INNER JOIN mods m ON ml.joining_mod_id = m.id\n INNER JOIN link_platforms lp ON ml.joining_platform_id = lp.id\n WHERE m.id = ANY($1) OR m.slug = ANY($2)\n ", "describe": { "columns": [ { @@ -43,5 +43,5 @@ false ] }, - "hash": "8ff710a212087299ecc176ecc3cffbe5f411e76909ea458a359b9eea2c543e47" + "hash": "4016797b6c41821d98dd024859088459c9b7157697b2b2fa745bdd21916a4ffc" } diff --git a/.sqlx/query-2140809b7b65c44c7de96ce89ca52a1808e134756baf6d847600668b7e0bbc95.json b/.sqlx/query-43d4eafdbcb449a56551d3d6edeba0d6e196fa6539e3f9df107c23a74ba962af.json similarity index 69% rename from .sqlx/query-2140809b7b65c44c7de96ce89ca52a1808e134756baf6d847600668b7e0bbc95.json rename to .sqlx/query-43d4eafdbcb449a56551d3d6edeba0d6e196fa6539e3f9df107c23a74ba962af.json index 9b62665fd..c66e86018 100644 --- a/.sqlx/query-2140809b7b65c44c7de96ce89ca52a1808e134756baf6d847600668b7e0bbc95.json +++ b/.sqlx/query-43d4eafdbcb449a56551d3d6edeba0d6e196fa6539e3f9df107c23a74ba962af.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT DISTINCT id, enum_id, value, ordering, created, metadata\n FROM loader_field_enum_values lfev\n WHERE id = ANY($1) \n ORDER BY enum_id, ordering, created DESC\n ", + "query": "\n SELECT DISTINCT id, enum_id, value, ordering, created, metadata\n FROM loader_field_enum_values lfev\n WHERE id = ANY($1)\n ORDER BY enum_id, ordering, created DESC\n ", "describe": { "columns": [ { @@ -48,5 +48,5 @@ true ] }, - "hash": "2140809b7b65c44c7de96ce89ca52a1808e134756baf6d847600668b7e0bbc95" + "hash": "43d4eafdbcb449a56551d3d6edeba0d6e196fa6539e3f9df107c23a74ba962af" } diff --git a/.sqlx/query-f2f865b1f1428ed9469e8f73796c93a23895e6b10a4eb34aa761d29acfa24fb0.json b/.sqlx/query-4fc11e55884d6813992fba1d0b3111742a5f98453942fe83e09c2056bda401f4.json similarity index 66% rename from .sqlx/query-f2f865b1f1428ed9469e8f73796c93a23895e6b10a4eb34aa761d29acfa24fb0.json rename to .sqlx/query-4fc11e55884d6813992fba1d0b3111742a5f98453942fe83e09c2056bda401f4.json index c1b79a18c..5c5d3861a 100644 --- a/.sqlx/query-f2f865b1f1428ed9469e8f73796c93a23895e6b10a4eb34aa761d29acfa24fb0.json +++ b/.sqlx/query-4fc11e55884d6813992fba1d0b3111742a5f98453942fe83e09c2056bda401f4.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT c.id id, c.name name, c.description description,\n c.icon_url icon_url, c.color color, c.created created, c.user_id user_id,\n c.updated updated, c.status status,\n ARRAY_AGG(DISTINCT cm.mod_id) filter (where cm.mod_id is not null) mods\n FROM collections c\n LEFT JOIN collections_mods cm ON cm.collection_id = c.id\n WHERE c.id = ANY($1)\n GROUP BY c.id;\n ", + "query": "\n SELECT c.id id, c.name name, c.description description,\n c.icon_url icon_url, c.color color, c.created created, c.user_id user_id,\n c.updated updated, c.status status,\n ARRAY_AGG(DISTINCT cm.mod_id) filter (where cm.mod_id is not null) mods\n FROM collections c\n LEFT JOIN collections_mods cm ON cm.collection_id = c.id\n WHERE c.id = ANY($1)\n GROUP BY c.id;\n ", "describe": { "columns": [ { @@ -72,5 +72,5 @@ null ] }, - "hash": "f2f865b1f1428ed9469e8f73796c93a23895e6b10a4eb34aa761d29acfa24fb0" + "hash": "4fc11e55884d6813992fba1d0b3111742a5f98453942fe83e09c2056bda401f4" } diff --git a/.sqlx/query-b94d2551866c355159d01f77fe301b191de2a83d3ba3817ea60628a1b45a7a64.json b/.sqlx/query-623881c24c12e77f6fc57669929be55a34800cd2269da29d555959164919c9a3.json similarity index 63% rename from .sqlx/query-b94d2551866c355159d01f77fe301b191de2a83d3ba3817ea60628a1b45a7a64.json rename to .sqlx/query-623881c24c12e77f6fc57669929be55a34800cd2269da29d555959164919c9a3.json index 9c8ffbaf9..6ad1c4b9b 100644 --- a/.sqlx/query-b94d2551866c355159d01f77fe301b191de2a83d3ba3817ea60628a1b45a7a64.json +++ b/.sqlx/query-623881c24c12e77f6fc57669929be55a34800cd2269da29d555959164919c9a3.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT DISTINCT dependent_id as version_id, d.mod_dependency_id as dependency_project_id, d.dependency_id as dependency_version_id, d.dependency_file_name as file_name, d.dependency_type as dependency_type\n FROM dependencies d\n WHERE dependent_id = ANY($1)\n ", + "query": "\n SELECT DISTINCT dependent_id as version_id, d.mod_dependency_id as dependency_project_id, d.dependency_id as dependency_version_id, d.dependency_file_name as file_name, d.dependency_type as dependency_type\n FROM dependencies d\n WHERE dependent_id = ANY($1)\n ", "describe": { "columns": [ { @@ -42,5 +42,5 @@ false ] }, - "hash": "b94d2551866c355159d01f77fe301b191de2a83d3ba3817ea60628a1b45a7a64" + "hash": "623881c24c12e77f6fc57669929be55a34800cd2269da29d555959164919c9a3" } diff --git a/.sqlx/query-5c7bc2b59e5bcbe50e556cf28fb7a20de645752beef330b6779ec256f33e666a.json b/.sqlx/query-64fe01f3dd84c51966150e1278189c04da9e5fcd994ef5162afb1321b9d4b643.json similarity index 78% rename from .sqlx/query-5c7bc2b59e5bcbe50e556cf28fb7a20de645752beef330b6779ec256f33e666a.json rename to .sqlx/query-64fe01f3dd84c51966150e1278189c04da9e5fcd994ef5162afb1321b9d4b643.json index e1d35b117..e24329c38 100644 --- a/.sqlx/query-5c7bc2b59e5bcbe50e556cf28fb7a20de645752beef330b6779ec256f33e666a.json +++ b/.sqlx/query-64fe01f3dd84c51966150e1278189c04da9e5fcd994ef5162afb1321b9d4b643.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT id, url, size, created, owner_id, context, mod_id, version_id, thread_message_id, report_id\n FROM uploaded_images\n WHERE id = ANY($1)\n GROUP BY id;\n ", + "query": "\n SELECT id, url, size, created, owner_id, context, mod_id, version_id, thread_message_id, report_id\n FROM uploaded_images\n WHERE id = ANY($1)\n GROUP BY id;\n ", "describe": { "columns": [ { @@ -72,5 +72,5 @@ true ] }, - "hash": "5c7bc2b59e5bcbe50e556cf28fb7a20de645752beef330b6779ec256f33e666a" + "hash": "64fe01f3dd84c51966150e1278189c04da9e5fcd994ef5162afb1321b9d4b643" } diff --git a/.sqlx/query-21d20e5f09cb0729dc16c8609c35cec5a913f3172b53b8ae05da0096a33b4b64.json b/.sqlx/query-6fac7682527a4a9dc34e121e8b7c356cb8fe1d0ff1f9a19d29937721acaa8842.json similarity index 52% rename from .sqlx/query-21d20e5f09cb0729dc16c8609c35cec5a913f3172b53b8ae05da0096a33b4b64.json rename to .sqlx/query-6fac7682527a4a9dc34e121e8b7c356cb8fe1d0ff1f9a19d29937721acaa8842.json index aff580482..c7ccefa7e 100644 --- a/.sqlx/query-21d20e5f09cb0729dc16c8609c35cec5a913f3172b53b8ae05da0096a33b4b64.json +++ b/.sqlx/query-6fac7682527a4a9dc34e121e8b7c356cb8fe1d0ff1f9a19d29937721acaa8842.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT id\n FROM pats\n WHERE user_id = $1\n ORDER BY created DESC\n ", + "query": "\n SELECT id\n FROM pats\n WHERE user_id = $1\n ORDER BY created DESC\n ", "describe": { "columns": [ { @@ -18,5 +18,5 @@ false ] }, - "hash": "21d20e5f09cb0729dc16c8609c35cec5a913f3172b53b8ae05da0096a33b4b64" + "hash": "6fac7682527a4a9dc34e121e8b7c356cb8fe1d0ff1f9a19d29937721acaa8842" } diff --git a/.sqlx/query-c94faba99d486b11509fff59465b7cc71983551b035e936ce4d9776510afb514.json b/.sqlx/query-74854bb35744be413458d0609d6511aa4c9802b5fc4ac73abb520cf2577e1d84.json similarity index 79% rename from .sqlx/query-c94faba99d486b11509fff59465b7cc71983551b035e936ce4d9776510afb514.json rename to .sqlx/query-74854bb35744be413458d0609d6511aa4c9802b5fc4ac73abb520cf2577e1d84.json index b02376beb..5c8681558 100644 --- a/.sqlx/query-c94faba99d486b11509fff59465b7cc71983551b035e936ce4d9776510afb514.json +++ b/.sqlx/query-74854bb35744be413458d0609d6511aa4c9802b5fc4ac73abb520cf2577e1d84.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT id, user_id, session, created, last_login, expires, refresh_expires, os, platform,\n city, country, ip, user_agent\n FROM sessions\n WHERE id = ANY($1) OR session = ANY($2)\n ORDER BY created DESC\n ", + "query": "\n SELECT id, user_id, session, created, last_login, expires, refresh_expires, os, platform,\n city, country, ip, user_agent\n FROM sessions\n WHERE id = ANY($1) OR session = ANY($2)\n ORDER BY created DESC\n ", "describe": { "columns": [ { @@ -91,5 +91,5 @@ false ] }, - "hash": "c94faba99d486b11509fff59465b7cc71983551b035e936ce4d9776510afb514" + "hash": "74854bb35744be413458d0609d6511aa4c9802b5fc4ac73abb520cf2577e1d84" } diff --git a/.sqlx/query-1af33ce1ecbf8d0ab2dcc6de7d433ca05a82acc32dd447ff51487e0039706fec.json b/.sqlx/query-7f5cccc8927d3675f91c2b2f5c260466d989b5cd4a73926abacc3989b9e887ab.json similarity index 59% rename from .sqlx/query-1af33ce1ecbf8d0ab2dcc6de7d433ca05a82acc32dd447ff51487e0039706fec.json rename to .sqlx/query-7f5cccc8927d3675f91c2b2f5c260466d989b5cd4a73926abacc3989b9e887ab.json index fdfa60d66..b02c6c747 100644 --- a/.sqlx/query-1af33ce1ecbf8d0ab2dcc6de7d433ca05a82acc32dd447ff51487e0039706fec.json +++ b/.sqlx/query-7f5cccc8927d3675f91c2b2f5c260466d989b5cd4a73926abacc3989b9e887ab.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT DISTINCT mod_id, v.id as id, date_published\n FROM mods m\n INNER JOIN versions v ON m.id = v.mod_id AND v.status = ANY($3)\n WHERE m.id = ANY($1) OR m.slug = ANY($2)\n ", + "query": "\n SELECT DISTINCT mod_id, v.id as id, date_published\n FROM mods m\n INNER JOIN versions v ON m.id = v.mod_id AND v.status = ANY($3)\n WHERE m.id = ANY($1) OR m.slug = ANY($2)\n ", "describe": { "columns": [ { @@ -32,5 +32,5 @@ false ] }, - "hash": "1af33ce1ecbf8d0ab2dcc6de7d433ca05a82acc32dd447ff51487e0039706fec" + "hash": "7f5cccc8927d3675f91c2b2f5c260466d989b5cd4a73926abacc3989b9e887ab" } diff --git a/.sqlx/query-ca53a711735ba065d441356ed744a95e948354bb5b9a6047749fdc2a514f456c.json b/.sqlx/query-7fa5098b1083af58b86083b659cb647498fcc20e38265b9d316ca8c0a2cbc02a.json similarity index 68% rename from .sqlx/query-ca53a711735ba065d441356ed744a95e948354bb5b9a6047749fdc2a514f456c.json rename to .sqlx/query-7fa5098b1083af58b86083b659cb647498fcc20e38265b9d316ca8c0a2cbc02a.json index 6f4550b9f..da471b1da 100644 --- a/.sqlx/query-ca53a711735ba065d441356ed744a95e948354bb5b9a6047749fdc2a514f456c.json +++ b/.sqlx/query-7fa5098b1083af58b86083b659cb647498fcc20e38265b9d316ca8c0a2cbc02a.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT DISTINCT mod_id, version_id, field_id, int_value, enum_value, string_value\n FROM versions v\n INNER JOIN version_fields vf ON v.id = vf.version_id\n WHERE v.id = ANY($1)\n ", + "query": "\n SELECT DISTINCT mod_id, version_id, field_id, int_value, enum_value, string_value\n FROM versions v\n INNER JOIN version_fields vf ON v.id = vf.version_id\n WHERE v.id = ANY($1)\n ", "describe": { "columns": [ { @@ -48,5 +48,5 @@ true ] }, - "hash": "ca53a711735ba065d441356ed744a95e948354bb5b9a6047749fdc2a514f456c" + "hash": "7fa5098b1083af58b86083b659cb647498fcc20e38265b9d316ca8c0a2cbc02a" } diff --git a/.sqlx/query-5329254eeb1e80d2a0f4f3bc2b613f3a7d54b0673f1a41f31fe5b5bbc4b5e478.json b/.sqlx/query-887a217868178265ac9e1011a889173d608e064a3a1b69a135273de380efe44c.json similarity index 75% rename from .sqlx/query-5329254eeb1e80d2a0f4f3bc2b613f3a7d54b0673f1a41f31fe5b5bbc4b5e478.json rename to .sqlx/query-887a217868178265ac9e1011a889173d608e064a3a1b69a135273de380efe44c.json index c869cb7b5..abe6d4217 100644 --- a/.sqlx/query-5329254eeb1e80d2a0f4f3bc2b613f3a7d54b0673f1a41f31fe5b5bbc4b5e478.json +++ b/.sqlx/query-887a217868178265ac9e1011a889173d608e064a3a1b69a135273de380efe44c.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT DISTINCT id, field, field_type, enum_type, min_val, max_val, optional\n FROM loader_fields lf\n WHERE id = ANY($1) \n ", + "query": "\n SELECT DISTINCT id, field, field_type, enum_type, min_val, max_val, optional\n FROM loader_fields lf\n WHERE id = ANY($1)\n ", "describe": { "columns": [ { @@ -54,5 +54,5 @@ false ] }, - "hash": "5329254eeb1e80d2a0f4f3bc2b613f3a7d54b0673f1a41f31fe5b5bbc4b5e478" + "hash": "887a217868178265ac9e1011a889173d608e064a3a1b69a135273de380efe44c" } diff --git a/.sqlx/query-2fe731da3681f72ec03b89d7139a49ccb1069079d8600daa40688d5f528de83d.json b/.sqlx/query-92b9298c0b6255b4121bf3079e121da06e6e0cdaa131cc9897cb321eaeb3d10b.json similarity index 68% rename from .sqlx/query-2fe731da3681f72ec03b89d7139a49ccb1069079d8600daa40688d5f528de83d.json rename to .sqlx/query-92b9298c0b6255b4121bf3079e121da06e6e0cdaa131cc9897cb321eaeb3d10b.json index 5d9e7c19c..dc6e4a414 100644 --- a/.sqlx/query-2fe731da3681f72ec03b89d7139a49ccb1069079d8600daa40688d5f528de83d.json +++ b/.sqlx/query-92b9298c0b6255b4121bf3079e121da06e6e0cdaa131cc9897cb321eaeb3d10b.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT m.id id, m.name name, m.summary summary, m.downloads downloads, m.follows follows,\n m.icon_url icon_url, m.description description, m.published published,\n m.updated updated, m.approved approved, m.queued, m.status status, m.requested_status requested_status,\n m.license_url license_url,\n m.team_id team_id, m.organization_id organization_id, m.license license, m.slug slug, m.moderation_message moderation_message, m.moderation_message_body moderation_message_body,\n m.webhook_sent, m.color,\n t.id thread_id, m.monetization_status monetization_status,\n ARRAY_AGG(DISTINCT c.category) filter (where c.category is not null and mc.is_additional is false) categories,\n ARRAY_AGG(DISTINCT c.category) filter (where c.category is not null and mc.is_additional is true) additional_categories\n FROM mods m \n INNER JOIN threads t ON t.mod_id = m.id\n LEFT JOIN mods_categories mc ON mc.joining_mod_id = m.id\n LEFT JOIN categories c ON mc.joining_category_id = c.id\n WHERE m.id = ANY($1) OR m.slug = ANY($2)\n GROUP BY t.id, m.id;\n ", + "query": "\n SELECT m.id id, m.name name, m.summary summary, m.downloads downloads, m.follows follows,\n m.icon_url icon_url, m.description description, m.published published,\n m.updated updated, m.approved approved, m.queued, m.status status, m.requested_status requested_status,\n m.license_url license_url,\n m.team_id team_id, m.organization_id organization_id, m.license license, m.slug slug, m.moderation_message moderation_message, m.moderation_message_body moderation_message_body,\n m.webhook_sent, m.color,\n t.id thread_id, m.monetization_status monetization_status,\n ARRAY_AGG(DISTINCT c.category) filter (where c.category is not null and mc.is_additional is false) categories,\n ARRAY_AGG(DISTINCT c.category) filter (where c.category is not null and mc.is_additional is true) additional_categories\n FROM mods m\n INNER JOIN threads t ON t.mod_id = m.id\n LEFT JOIN mods_categories mc ON mc.joining_mod_id = m.id\n LEFT JOIN categories c ON mc.joining_category_id = c.id\n WHERE m.id = ANY($1) OR m.slug = ANY($2)\n GROUP BY t.id, m.id;\n ", "describe": { "columns": [ { @@ -169,5 +169,5 @@ null ] }, - "hash": "2fe731da3681f72ec03b89d7139a49ccb1069079d8600daa40688d5f528de83d" + "hash": "92b9298c0b6255b4121bf3079e121da06e6e0cdaa131cc9897cb321eaeb3d10b" } diff --git a/.sqlx/query-e6f5a150cbd3bd6b9bde9e5cdad224a45c96d678b69ec12508e81246710e3f6d.json b/.sqlx/query-a1331f7c6f33234e413978c0d9318365e7de5948b93e8c0c85a1d179f4968517.json similarity index 74% rename from .sqlx/query-e6f5a150cbd3bd6b9bde9e5cdad224a45c96d678b69ec12508e81246710e3f6d.json rename to .sqlx/query-a1331f7c6f33234e413978c0d9318365e7de5948b93e8c0c85a1d179f4968517.json index 384c572e6..165e3c68a 100644 --- a/.sqlx/query-e6f5a150cbd3bd6b9bde9e5cdad224a45c96d678b69ec12508e81246710e3f6d.json +++ b/.sqlx/query-a1331f7c6f33234e413978c0d9318365e7de5948b93e8c0c85a1d179f4968517.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT id, name, access_token, scopes, user_id, created, expires, last_used\n FROM pats\n WHERE id = ANY($1) OR access_token = ANY($2)\n ORDER BY created DESC\n ", + "query": "\n SELECT id, name, access_token, scopes, user_id, created, expires, last_used\n FROM pats\n WHERE id = ANY($1) OR access_token = ANY($2)\n ORDER BY created DESC\n ", "describe": { "columns": [ { @@ -61,5 +61,5 @@ true ] }, - "hash": "e6f5a150cbd3bd6b9bde9e5cdad224a45c96d678b69ec12508e81246710e3f6d" + "hash": "a1331f7c6f33234e413978c0d9318365e7de5948b93e8c0c85a1d179f4968517" } diff --git a/.sqlx/query-5e7e85c8c1f4b4e600c51669b6591b5cc279bd7482893ec687e83ee22d00a3a0.json b/.sqlx/query-a47456ecddbd1787301a2765168db0df31980ae48cb2ec37c323da10ba55a785.json similarity index 81% rename from .sqlx/query-5e7e85c8c1f4b4e600c51669b6591b5cc279bd7482893ec687e83ee22d00a3a0.json rename to .sqlx/query-a47456ecddbd1787301a2765168db0df31980ae48cb2ec37c323da10ba55a785.json index 2932ef87e..fca3ad56f 100644 --- a/.sqlx/query-5e7e85c8c1f4b4e600c51669b6591b5cc279bd7482893ec687e83ee22d00a3a0.json +++ b/.sqlx/query-a47456ecddbd1787301a2765168db0df31980ae48cb2ec37c323da10ba55a785.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT id, name, email,\n avatar_url, username, bio,\n created, role, badges,\n balance,\n github_id, discord_id, gitlab_id, google_id, steam_id, microsoft_id,\n email_verified, password, totp_secret, paypal_id, paypal_country, paypal_email,\n venmo_handle\n FROM users\n WHERE id = ANY($1) OR LOWER(username) = ANY($2)\n ", + "query": "\n SELECT id, name, email,\n avatar_url, username, bio,\n created, role, badges,\n balance,\n github_id, discord_id, gitlab_id, google_id, steam_id, microsoft_id,\n email_verified, password, totp_secret, paypal_id, paypal_country, paypal_email,\n venmo_handle\n FROM users\n WHERE id = ANY($1) OR LOWER(username) = ANY($2)\n ", "describe": { "columns": [ { @@ -151,5 +151,5 @@ true ] }, - "hash": "5e7e85c8c1f4b4e600c51669b6591b5cc279bd7482893ec687e83ee22d00a3a0" + "hash": "a47456ecddbd1787301a2765168db0df31980ae48cb2ec37c323da10ba55a785" } diff --git a/.sqlx/query-c387574b32f6b70adc88132df96fbbc7dd57a6f633a787dd31aafc0584547345.json b/.sqlx/query-a5007d03b1b5b2a95814a3070d114c55731403dcd75d44420acce8df5bd2009b.json similarity index 74% rename from .sqlx/query-c387574b32f6b70adc88132df96fbbc7dd57a6f633a787dd31aafc0584547345.json rename to .sqlx/query-a5007d03b1b5b2a95814a3070d114c55731403dcd75d44420acce8df5bd2009b.json index dd76374d9..1b838c4ad 100644 --- a/.sqlx/query-c387574b32f6b70adc88132df96fbbc7dd57a6f633a787dd31aafc0584547345.json +++ b/.sqlx/query-a5007d03b1b5b2a95814a3070d114c55731403dcd75d44420acce8df5bd2009b.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT id, team_id, role AS member_role, is_owner, permissions, organization_permissions,\n accepted, payouts_split, \n ordering, user_id\n FROM team_members\n WHERE team_id = ANY($1)\n ORDER BY team_id, ordering;\n ", + "query": "\n SELECT id, team_id, role AS member_role, is_owner, permissions, organization_permissions,\n accepted, payouts_split,\n ordering, user_id\n FROM team_members\n WHERE team_id = ANY($1)\n ORDER BY team_id, ordering;\n ", "describe": { "columns": [ { @@ -72,5 +72,5 @@ false ] }, - "hash": "c387574b32f6b70adc88132df96fbbc7dd57a6f633a787dd31aafc0584547345" + "hash": "a5007d03b1b5b2a95814a3070d114c55731403dcd75d44420acce8df5bd2009b" } diff --git a/.sqlx/query-e72736bb7fca4df41cf34186b1edf04d6b4d496971aaf87ed1a88e7d64eab823.json b/.sqlx/query-b49cd556b85c3e74ebb4f1b7d48930c0456321799f20e63f1c3fd3ea0f03f198.json similarity index 75% rename from .sqlx/query-e72736bb7fca4df41cf34186b1edf04d6b4d496971aaf87ed1a88e7d64eab823.json rename to .sqlx/query-b49cd556b85c3e74ebb4f1b7d48930c0456321799f20e63f1c3fd3ea0f03f198.json index 20c4ed62e..31772e96b 100644 --- a/.sqlx/query-e72736bb7fca4df41cf34186b1edf04d6b4d496971aaf87ed1a88e7d64eab823.json +++ b/.sqlx/query-b49cd556b85c3e74ebb4f1b7d48930c0456321799f20e63f1c3fd3ea0f03f198.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT DISTINCT version_id, f.id, f.url, f.filename, f.is_primary, f.size, f.file_type\n FROM files f\n WHERE f.version_id = ANY($1)\n ", + "query": "\n SELECT DISTINCT version_id, f.id, f.url, f.filename, f.is_primary, f.size, f.file_type\n FROM files f\n WHERE f.version_id = ANY($1)\n ", "describe": { "columns": [ { @@ -54,5 +54,5 @@ true ] }, - "hash": "e72736bb7fca4df41cf34186b1edf04d6b4d496971aaf87ed1a88e7d64eab823" + "hash": "b49cd556b85c3e74ebb4f1b7d48930c0456321799f20e63f1c3fd3ea0f03f198" } diff --git a/.sqlx/query-bb6afad07ebfa3b92399bb07aa9e15fa69bd328f44b4bf991e80f6b91fcd3a50.json b/.sqlx/query-c07277bcf62120ac4fac8678e09512f3984031919a71af59fc10995fb21f480c.json similarity index 70% rename from .sqlx/query-bb6afad07ebfa3b92399bb07aa9e15fa69bd328f44b4bf991e80f6b91fcd3a50.json rename to .sqlx/query-c07277bcf62120ac4fac8678e09512f3984031919a71af59fc10995fb21f480c.json index 01b0c6981..f7b9866aa 100644 --- a/.sqlx/query-bb6afad07ebfa3b92399bb07aa9e15fa69bd328f44b4bf991e80f6b91fcd3a50.json +++ b/.sqlx/query-c07277bcf62120ac4fac8678e09512f3984031919a71af59fc10995fb21f480c.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT DISTINCT lf.id, lf.field, lf.field_type, lf.optional, lf.min_val, lf.max_val, lf.enum_type, lfl.loader_id\n FROM loader_fields lf\n LEFT JOIN loader_fields_loaders lfl ON lfl.loader_field_id = lf.id\n WHERE lfl.loader_id = ANY($1)\n ", + "query": "\n SELECT DISTINCT lf.id, lf.field, lf.field_type, lf.optional, lf.min_val, lf.max_val, lf.enum_type, lfl.loader_id\n FROM loader_fields lf\n LEFT JOIN loader_fields_loaders lfl ON lfl.loader_field_id = lf.id\n WHERE lfl.loader_id = ANY($1)\n ", "describe": { "columns": [ { @@ -60,5 +60,5 @@ false ] }, - "hash": "bb6afad07ebfa3b92399bb07aa9e15fa69bd328f44b4bf991e80f6b91fcd3a50" + "hash": "c07277bcf62120ac4fac8678e09512f3984031919a71af59fc10995fb21f480c" } diff --git a/.sqlx/query-99080d0666e06794e44c80e05b17585e0f87c70d9ace28537898f27e7df0ded0.json b/.sqlx/query-d9c4d536ce0bea290f445c3bccb56b4743f2f3a9ce4b170fb439e0e135ca9d51.json similarity index 69% rename from .sqlx/query-99080d0666e06794e44c80e05b17585e0f87c70d9ace28537898f27e7df0ded0.json rename to .sqlx/query-d9c4d536ce0bea290f445c3bccb56b4743f2f3a9ce4b170fb439e0e135ca9d51.json index 5d70c257a..7141f46a1 100644 --- a/.sqlx/query-99080d0666e06794e44c80e05b17585e0f87c70d9ace28537898f27e7df0ded0.json +++ b/.sqlx/query-d9c4d536ce0bea290f445c3bccb56b4743f2f3a9ce4b170fb439e0e135ca9d51.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT DISTINCT id, enum_id, value, ordering, created, metadata\n FROM loader_field_enum_values lfev\n WHERE id = ANY($1) \n ORDER BY enum_id, ordering, created ASC\n ", + "query": "\n SELECT DISTINCT id, enum_id, value, ordering, created, metadata\n FROM loader_field_enum_values lfev\n WHERE id = ANY($1)\n ORDER BY enum_id, ordering, created ASC\n ", "describe": { "columns": [ { @@ -48,5 +48,5 @@ true ] }, - "hash": "99080d0666e06794e44c80e05b17585e0f87c70d9ace28537898f27e7df0ded0" + "hash": "d9c4d536ce0bea290f445c3bccb56b4743f2f3a9ce4b170fb439e0e135ca9d51" } diff --git a/.sqlx/query-e1df7bf2edd30d501a48686c00712784b121db47612bf809d0a0fe0b5d99b681.json b/.sqlx/query-e1df7bf2edd30d501a48686c00712784b121db47612bf809d0a0fe0b5d99b681.json deleted file mode 100644 index 793918efb..000000000 --- a/.sqlx/query-e1df7bf2edd30d501a48686c00712784b121db47612bf809d0a0fe0b5d99b681.json +++ /dev/null @@ -1,46 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT DISTINCT mod_id,\n ARRAY_AGG(DISTINCT l.loader) filter (where l.loader is not null) loaders,\n ARRAY_AGG(DISTINCT pt.name) filter (where pt.name is not null) project_types,\n ARRAY_AGG(DISTINCT g.slug) filter (where g.slug is not null) games,\n ARRAY_AGG(DISTINCT lfl.loader_field_id) filter (where lfl.loader_field_id is not null) loader_fields\n FROM versions v\n INNER JOIN loaders_versions lv ON v.id = lv.version_id\n INNER JOIN loaders l ON lv.loader_id = l.id\n INNER JOIN loaders_project_types lpt ON lpt.joining_loader_id = l.id\n INNER JOIN project_types pt ON pt.id = lpt.joining_project_type_id\n INNER JOIN loaders_project_types_games lptg ON lptg.loader_id = l.id AND lptg.project_type_id = pt.id\n INNER JOIN games g ON lptg.game_id = g.id\n LEFT JOIN loader_fields_loaders lfl ON lfl.loader_id = l.id\n WHERE v.id = ANY($1)\n GROUP BY mod_id\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "mod_id", - "type_info": "Int8" - }, - { - "ordinal": 1, - "name": "loaders", - "type_info": "VarcharArray" - }, - { - "ordinal": 2, - "name": "project_types", - "type_info": "VarcharArray" - }, - { - "ordinal": 3, - "name": "games", - "type_info": "VarcharArray" - }, - { - "ordinal": 4, - "name": "loader_fields", - "type_info": "Int4Array" - } - ], - "parameters": { - "Left": [ - "Int8Array" - ] - }, - "nullable": [ - false, - null, - null, - null, - null - ] - }, - "hash": "e1df7bf2edd30d501a48686c00712784b121db47612bf809d0a0fe0b5d99b681" -} diff --git a/.sqlx/query-f3729149bd174541ec4f7ec2145fef0f4ac78e4efb046cc77dcdf43522ef72e2.json b/.sqlx/query-f3729149bd174541ec4f7ec2145fef0f4ac78e4efb046cc77dcdf43522ef72e2.json deleted file mode 100644 index 2863c6bf2..000000000 --- a/.sqlx/query-f3729149bd174541ec4f7ec2145fef0f4ac78e4efb046cc77dcdf43522ef72e2.json +++ /dev/null @@ -1,46 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT DISTINCT version_id,\n ARRAY_AGG(DISTINCT l.loader) filter (where l.loader is not null) loaders,\n ARRAY_AGG(DISTINCT pt.name) filter (where pt.name is not null) project_types,\n ARRAY_AGG(DISTINCT g.slug) filter (where g.slug is not null) games,\n ARRAY_AGG(DISTINCT lfl.loader_field_id) filter (where lfl.loader_field_id is not null) loader_fields\n FROM versions v\n INNER JOIN loaders_versions lv ON v.id = lv.version_id\n INNER JOIN loaders l ON lv.loader_id = l.id\n INNER JOIN loaders_project_types lpt ON lpt.joining_loader_id = l.id\n INNER JOIN project_types pt ON pt.id = lpt.joining_project_type_id\n INNER JOIN loaders_project_types_games lptg ON lptg.loader_id = l.id AND lptg.project_type_id = pt.id\n INNER JOIN games g ON lptg.game_id = g.id\n LEFT JOIN loader_fields_loaders lfl ON lfl.loader_id = l.id\n WHERE v.id = ANY($1)\n GROUP BY version_id\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "version_id", - "type_info": "Int8" - }, - { - "ordinal": 1, - "name": "loaders", - "type_info": "VarcharArray" - }, - { - "ordinal": 2, - "name": "project_types", - "type_info": "VarcharArray" - }, - { - "ordinal": 3, - "name": "games", - "type_info": "VarcharArray" - }, - { - "ordinal": 4, - "name": "loader_fields", - "type_info": "Int4Array" - } - ], - "parameters": { - "Left": [ - "Int8Array" - ] - }, - "nullable": [ - false, - null, - null, - null, - null - ] - }, - "hash": "f3729149bd174541ec4f7ec2145fef0f4ac78e4efb046cc77dcdf43522ef72e2" -} diff --git a/.sqlx/query-7bb8a2e1e01817ea3778fcd2af039e38d085484dd20abf57d0eff8d7801b728b.json b/.sqlx/query-f62ec19e7e23ec98ad38f79ba28066f1b13a607923003699378bda895aab3a84.json similarity index 70% rename from .sqlx/query-7bb8a2e1e01817ea3778fcd2af039e38d085484dd20abf57d0eff8d7801b728b.json rename to .sqlx/query-f62ec19e7e23ec98ad38f79ba28066f1b13a607923003699378bda895aab3a84.json index fdb571de4..d47e6fda1 100644 --- a/.sqlx/query-7bb8a2e1e01817ea3778fcd2af039e38d085484dd20abf57d0eff8d7801b728b.json +++ b/.sqlx/query-f62ec19e7e23ec98ad38f79ba28066f1b13a607923003699378bda895aab3a84.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT DISTINCT mod_id, mg.image_url, mg.featured, mg.name, mg.description, mg.created, mg.ordering\n FROM mods_gallery mg\n INNER JOIN mods m ON mg.mod_id = m.id\n WHERE m.id = ANY($1) OR m.slug = ANY($2)\n ", + "query": "\n SELECT DISTINCT mod_id, mg.image_url, mg.featured, mg.name, mg.description, mg.created, mg.ordering\n FROM mods_gallery mg\n INNER JOIN mods m ON mg.mod_id = m.id\n WHERE m.id = ANY($1) OR m.slug = ANY($2)\n ", "describe": { "columns": [ { @@ -55,5 +55,5 @@ false ] }, - "hash": "7bb8a2e1e01817ea3778fcd2af039e38d085484dd20abf57d0eff8d7801b728b" + "hash": "f62ec19e7e23ec98ad38f79ba28066f1b13a607923003699378bda895aab3a84" } diff --git a/src/database/models/collection_item.rs b/src/database/models/collection_item.rs index a2c29283c..1f703950f 100644 --- a/src/database/models/collection_item.rs +++ b/src/database/models/collection_item.rs @@ -4,6 +4,8 @@ use crate::database::models::DatabaseError; use crate::database::redis::RedisPool; use crate::models::collections::CollectionStatus; use chrono::{DateTime, Utc}; +use dashmap::DashMap; +use futures::TryStreamExt; use serde::{Deserialize, Serialize}; const COLLECTIONS_NAMESPACE: &str = "collections"; @@ -155,93 +157,55 @@ impl Collection { where E: sqlx::Executor<'a, Database = sqlx::Postgres>, { - use futures::TryStreamExt; + let val = redis + .get_cached_keys( + COLLECTIONS_NAMESPACE, + &collection_ids.iter().map(|x| x.0).collect::>(), + |collection_ids| async move { + let collections = sqlx::query!( + " + SELECT c.id id, c.name name, c.description description, + c.icon_url icon_url, c.color color, c.created created, c.user_id user_id, + c.updated updated, c.status status, + ARRAY_AGG(DISTINCT cm.mod_id) filter (where cm.mod_id is not null) mods + FROM collections c + LEFT JOIN collections_mods cm ON cm.collection_id = c.id + WHERE c.id = ANY($1) + GROUP BY c.id; + ", + &collection_ids, + ) + .fetch(exec) + .try_fold(DashMap::new(), |acc, m| { + let collection = Collection { + id: CollectionId(m.id), + user_id: UserId(m.user_id), + name: m.name.clone(), + description: m.description.clone(), + icon_url: m.icon_url.clone(), + color: m.color.map(|x| x as u32), + created: m.created, + updated: m.updated, + status: CollectionStatus::from_string(&m.status), + projects: m + .mods + .unwrap_or_default() + .into_iter() + .map(ProjectId) + .collect(), + }; - let mut redis = redis.connect().await?; + acc.insert(m.id, collection); + async move { Ok(acc) } + }) + .await?; - if collection_ids.is_empty() { - return Ok(Vec::new()); - } - - let mut found_collections = Vec::new(); - let mut remaining_collections: Vec = collection_ids.to_vec(); - - if !collection_ids.is_empty() { - let collections = redis - .multi_get::( - COLLECTIONS_NAMESPACE, - collection_ids.iter().map(|x| x.0.to_string()), - ) - .await?; - - for collection in collections { - if let Some(collection) = - collection.and_then(|x| serde_json::from_str::(&x).ok()) - { - remaining_collections.retain(|x| collection.id.0 != x.0); - found_collections.push(collection); - continue; - } - } - } - - if !remaining_collections.is_empty() { - let collection_ids_parsed: Vec = - remaining_collections.iter().map(|x| x.0).collect(); - let db_collections: Vec = sqlx::query!( - " - SELECT c.id id, c.name name, c.description description, - c.icon_url icon_url, c.color color, c.created created, c.user_id user_id, - c.updated updated, c.status status, - ARRAY_AGG(DISTINCT cm.mod_id) filter (where cm.mod_id is not null) mods - FROM collections c - LEFT JOIN collections_mods cm ON cm.collection_id = c.id - WHERE c.id = ANY($1) - GROUP BY c.id; - ", - &collection_ids_parsed, + Ok(collections) + }, ) - .fetch_many(exec) - .try_filter_map(|e| async { - Ok(e.right().map(|m| { - let id = m.id; - - Collection { - id: CollectionId(id), - user_id: UserId(m.user_id), - name: m.name.clone(), - description: m.description.clone(), - icon_url: m.icon_url.clone(), - color: m.color.map(|x| x as u32), - created: m.created, - updated: m.updated, - status: CollectionStatus::from_string(&m.status), - projects: m - .mods - .unwrap_or_default() - .into_iter() - .map(ProjectId) - .collect(), - } - })) - }) - .try_collect::>() .await?; - for collection in db_collections { - redis - .set_serialized_to_json( - COLLECTIONS_NAMESPACE, - collection.id.0, - &collection, - None, - ) - .await?; - found_collections.push(collection); - } - } - - Ok(found_collections) + Ok(val) } pub async fn clear_cache(id: CollectionId, redis: &RedisPool) -> Result<(), DatabaseError> { diff --git a/src/database/models/image_item.rs b/src/database/models/image_item.rs index 68477304e..28297c157 100644 --- a/src/database/models/image_item.rs +++ b/src/database/models/image_item.rs @@ -2,6 +2,7 @@ use super::ids::*; use crate::database::redis::RedisPool; use crate::{database::models::DatabaseError, models::images::ImageContext}; use chrono::{DateTime, Utc}; +use dashmap::DashMap; use serde::{Deserialize, Serialize}; const IMAGES_NAMESPACE: &str = "images"; @@ -180,70 +181,44 @@ impl Image { { use futures::TryStreamExt; - let mut redis = redis.connect().await?; - if image_ids.is_empty() { - return Ok(Vec::new()); - } + let val = redis.get_cached_keys( + IMAGES_NAMESPACE, + &image_ids.iter().map(|x| x.0).collect::>(), + |image_ids| async move { + let images = sqlx::query!( + " + SELECT id, url, size, created, owner_id, context, mod_id, version_id, thread_message_id, report_id + FROM uploaded_images + WHERE id = ANY($1) + GROUP BY id; + ", + &image_ids, + ) + .fetch(exec) + .try_fold(DashMap::new(), |acc, i| { + let img = Image { + id: ImageId(i.id), + url: i.url, + size: i.size as u64, + created: i.created, + owner_id: UserId(i.owner_id), + context: i.context, + project_id: i.mod_id.map(ProjectId), + version_id: i.version_id.map(VersionId), + thread_message_id: i.thread_message_id.map(ThreadMessageId), + report_id: i.report_id.map(ReportId), + }; - let mut found_images = Vec::new(); - let mut remaining_ids = image_ids.to_vec(); - - let image_ids = image_ids.iter().map(|x| x.0).collect::>(); - - if !image_ids.is_empty() { - let images = redis - .multi_get::(IMAGES_NAMESPACE, image_ids.iter().map(|x| x.to_string())) - .await?; - for image in images { - if let Some(image) = image.and_then(|x| serde_json::from_str::(&x).ok()) { - remaining_ids.retain(|x| image.id.0 != x.0); - found_images.push(image); - continue; - } - } - } - - if !remaining_ids.is_empty() { - let db_images: Vec = sqlx::query!( - " - SELECT id, url, size, created, owner_id, context, mod_id, version_id, thread_message_id, report_id - FROM uploaded_images - WHERE id = ANY($1) - GROUP BY id; - ", - &remaining_ids.iter().map(|x| x.0).collect::>(), - ) - .fetch_many(exec) - .try_filter_map(|e| async { - Ok(e.right().map(|i| { - let id = i.id; - - Image { - id: ImageId(id), - url: i.url, - size: i.size as u64, - created: i.created, - owner_id: UserId(i.owner_id), - context: i.context, - project_id: i.mod_id.map(ProjectId), - version_id: i.version_id.map(VersionId), - thread_message_id: i.thread_message_id.map(ThreadMessageId), - report_id: i.report_id.map(ReportId), - } - })) - }) - .try_collect::>() - .await?; - - for image in db_images { - redis - .set_serialized_to_json(IMAGES_NAMESPACE, image.id.0, &image, None) + acc.insert(i.id, img); + async move { Ok(acc) } + }) .await?; - found_images.push(image); - } - } - Ok(found_images) + Ok(images) + }, + ).await?; + + Ok(val) } pub async fn clear_cache(id: ImageId, redis: &RedisPool) -> Result<(), DatabaseError> { diff --git a/src/database/models/legacy_loader_fields.rs b/src/database/models/legacy_loader_fields.rs index 8fbb425d6..adb4e463d 100644 --- a/src/database/models/legacy_loader_fields.rs +++ b/src/database/models/legacy_loader_fields.rs @@ -208,6 +208,13 @@ impl<'a> MinecraftGameVersionBuilder<'a> { .fetch_one(exec) .await?; + let mut conn = redis.connect().await?; + conn.delete( + crate::database::models::loader_fields::LOADER_FIELD_ENUM_VALUES_NAMESPACE, + game_versions_enum.id.0, + ) + .await?; + Ok(LoaderFieldEnumValueId(result.id)) } } diff --git a/src/database/models/loader_fields.rs b/src/database/models/loader_fields.rs index 5f3f72d4a..e31b07eec 100644 --- a/src/database/models/loader_fields.rs +++ b/src/database/models/loader_fields.rs @@ -6,6 +6,7 @@ use super::DatabaseError; use crate::database::redis::RedisPool; use chrono::DateTime; use chrono::Utc; +use dashmap::DashMap; use futures::TryStreamExt; use itertools::Itertools; use serde::{Deserialize, Serialize}; @@ -16,7 +17,7 @@ const LOADERS_LIST_NAMESPACE: &str = "loaders"; const LOADER_FIELDS_NAMESPACE: &str = "loader_fields"; const LOADER_FIELDS_NAMESPACE_ALL: &str = "loader_fields_all"; const LOADER_FIELD_ENUMS_ID_NAMESPACE: &str = "loader_field_enums"; -const LOADER_FIELD_ENUM_VALUES_NAMESPACE: &str = "loader_field_enum_values"; +pub const LOADER_FIELD_ENUM_VALUES_NAMESPACE: &str = "loader_field_enum_values"; #[derive(Clone, Serialize, Deserialize, Debug)] pub struct Game { @@ -380,75 +381,47 @@ impl LoaderField { where E: sqlx::Executor<'a, Database = sqlx::Postgres>, { - type RedisLoaderFieldTuple = (LoaderId, Vec); - - let mut redis = redis.connect().await?; - - let mut loader_ids = loader_ids.to_vec(); - let cached_fields: Vec = redis - .multi_get::(LOADER_FIELDS_NAMESPACE, loader_ids.iter().map(|x| x.0)) - .await? - .into_iter() - .flatten() - .filter_map(|x: String| serde_json::from_str::(&x).ok()) - .collect(); - - let mut found_loader_fields = HashMap::new(); - if !cached_fields.is_empty() { - for (loader_id, fields) in cached_fields { - if loader_ids.contains(&loader_id) { - found_loader_fields.insert(loader_id, fields); - loader_ids.retain(|x| x != &loader_id); - } - } - } - - if !loader_ids.is_empty() { - let result = sqlx::query!( - " - SELECT DISTINCT lf.id, lf.field, lf.field_type, lf.optional, lf.min_val, lf.max_val, lf.enum_type, lfl.loader_id - FROM loader_fields lf - LEFT JOIN loader_fields_loaders lfl ON lfl.loader_field_id = lf.id - WHERE lfl.loader_id = ANY($1) - ", - &loader_ids.iter().map(|x| x.0).collect::>() - ) - .fetch_many(exec) - .try_filter_map(|e| async { - Ok(e.right().and_then(|r| { - Some((LoaderId(r.loader_id) ,LoaderField { - id: LoaderFieldId(r.id), - field_type: LoaderFieldType::build(&r.field_type, r.enum_type)?, - field: r.field, - optional: r.optional, - min_val: r.min_val, - max_val: r.max_val, - })) - })) - }) - .try_collect::>() - .await?; - - let result: Vec = result - .into_iter() - .fold( - HashMap::new(), - |mut acc: HashMap>, x| { - acc.entry(x.0).or_default().push(x.1); - acc - }, + let val = redis.get_cached_keys_raw( + LOADER_FIELDS_NAMESPACE, + &loader_ids.iter().map(|x| x.0).collect::>(), + |loader_ids| async move { + let result = sqlx::query!( + " + SELECT DISTINCT lf.id, lf.field, lf.field_type, lf.optional, lf.min_val, lf.max_val, lf.enum_type, lfl.loader_id + FROM loader_fields lf + LEFT JOIN loader_fields_loaders lfl ON lfl.loader_field_id = lf.id + WHERE lfl.loader_id = ANY($1) + ", + &loader_ids, ) - .into_iter() - .collect_vec(); + .fetch(exec) + .try_fold(DashMap::new(), |acc: DashMap>, r| { + if let Some(field_type) = LoaderFieldType::build(&r.field_type, r.enum_type) { + let loader_field = LoaderField { + id: LoaderFieldId(r.id), + field_type, + field: r.field, + optional: r.optional, + min_val: r.min_val, + max_val: r.max_val, + }; - for (k, v) in result.into_iter() { - redis - .set_serialized_to_json(LOADER_FIELDS_NAMESPACE, k.0, (k, &v), None) + acc.entry(r.loader_id) + .or_default() + .push(loader_field); + } + + async move { + Ok(acc) + } + }) .await?; - found_loader_fields.insert(k, v); - } - } - Ok(found_loader_fields) + + Ok(result) + }, + ).await?; + + Ok(val.into_iter().map(|x| (LoaderId(x.0), x.1)).collect()) } // Gets all fields for a given loader(s) @@ -597,71 +570,51 @@ impl LoaderFieldEnumValue { loader_field_enum_ids: &[LoaderFieldEnumId], exec: E, redis: &RedisPool, - ) -> Result)>, DatabaseError> + ) -> Result>, DatabaseError> where E: sqlx::Executor<'a, Database = sqlx::Postgres>, { - let mut redis = redis.connect().await?; - let mut found_enums = Vec::new(); - let mut remaining_enums: Vec = loader_field_enum_ids.to_vec(); - - if !remaining_enums.is_empty() { - let enums = redis - .multi_get::( - LOADER_FIELD_ENUM_VALUES_NAMESPACE, - loader_field_enum_ids.iter().map(|x| x.0), + let val = redis.get_cached_keys_raw( + LOADER_FIELD_ENUM_VALUES_NAMESPACE, + &loader_field_enum_ids.iter().map(|x| x.0).collect::>(), + |loader_field_enum_ids| async move { + let values = sqlx::query!( + " + SELECT id, enum_id, value, ordering, metadata, created FROM loader_field_enum_values + WHERE enum_id = ANY($1) + ORDER BY enum_id, ordering, created DESC + ", + &loader_field_enum_ids ) - .await?; + .fetch(exec) + .try_fold(DashMap::new(), |acc: DashMap>, c| { + let value = LoaderFieldEnumValue { + id: LoaderFieldEnumValueId(c.id), + enum_id: LoaderFieldEnumId(c.enum_id), + value: c.value, + ordering: c.ordering, + created: c.created, + metadata: c.metadata.unwrap_or_default(), + }; - for lfe in enums { - if let Some(lfe) = lfe.and_then(|x| { - serde_json::from_str::<(LoaderFieldEnumId, Vec)>(&x).ok() - }) { - remaining_enums.retain(|x| lfe.0 .0 != x.0); - found_enums.push(lfe.1); - continue; - } - } - } + acc.entry(c.enum_id) + .or_default() + .push(value); - let remaining_enums = remaining_enums.iter().map(|x| x.0).collect::>(); - let result = sqlx::query!( - " - SELECT id, enum_id, value, ordering, metadata, created FROM loader_field_enum_values - WHERE enum_id = ANY($1) - ORDER BY enum_id, ordering, created DESC - ", - &remaining_enums - ) - .fetch_many(exec) - .try_filter_map(|e| async { - Ok(e.right().map(|c| LoaderFieldEnumValue { - id: LoaderFieldEnumValueId(c.id), - enum_id: LoaderFieldEnumId(c.enum_id), - value: c.value, - ordering: c.ordering, - created: c.created, - metadata: c.metadata.unwrap_or_default(), - })) - }) - .try_collect::>() - .await?; + async move { + Ok(acc) + } + }) + .await?; - // Convert from an Vec to a Vec<(LoaderFieldEnumId, Vec)> - let cachable_enum_sets: Vec<(LoaderFieldEnumId, Vec)> = result - .clone() + Ok(values) + }, + ).await?; + + Ok(val .into_iter() - .group_by(|x| x.enum_id) // we sort by enum_id, so this will group all values of the same enum_id together - .into_iter() - .map(|(k, v)| (k, v.collect::>().to_vec())) - .collect(); - for (k, v) in cachable_enum_sets.iter() { - redis - .set_serialized_to_json(LOADER_FIELD_ENUM_VALUES_NAMESPACE, k.0, v, None) - .await?; - } - - Ok(cachable_enum_sets) + .map(|x| (LoaderFieldEnumId(x.0), x.1)) + .collect()) } // Matches filter against metadata of enum values diff --git a/src/database/models/mod.rs b/src/database/models/mod.rs index eb931f7d1..eafde1b4b 100644 --- a/src/database/models/mod.rs +++ b/src/database/models/mod.rs @@ -48,4 +48,6 @@ pub enum DatabaseError { SerdeCacheError(#[from] serde_json::Error), #[error("Schema error: {0}")] SchemaError(String), + #[error("Timeout when waiting for cache subscriber")] + CacheTimeout, } diff --git a/src/database/models/organization_item.rs b/src/database/models/organization_item.rs index c0c089499..7f9a90732 100644 --- a/src/database/models/organization_item.rs +++ b/src/database/models/organization_item.rs @@ -1,7 +1,8 @@ -use crate::{ - database::redis::RedisPool, - models::ids::base62_impl::{parse_base62, to_base62}, -}; +use crate::{database::redis::RedisPool, models::ids::base62_impl::parse_base62}; +use dashmap::DashMap; +use futures::TryStreamExt; +use std::fmt::{Debug, Display}; +use std::hash::Hash; use super::{ids::*, TeamMember}; use serde::{Deserialize, Serialize}; @@ -97,7 +98,7 @@ impl Organization { Self::get_many(&ids, exec, redis).await } - pub async fn get_many<'a, E, T: ToString>( + pub async fn get_many<'a, E, T: Display + Hash + Eq + PartialEq + Clone + Debug>( organization_strings: &[T], exec: E, redis: &RedisPool, @@ -105,120 +106,56 @@ impl Organization { where E: sqlx::Executor<'a, Database = sqlx::Postgres>, { - use futures::stream::TryStreamExt; - - let mut redis = redis.connect().await?; - - if organization_strings.is_empty() { - return Ok(Vec::new()); - } - - let mut found_organizations = Vec::new(); - let mut remaining_strings = organization_strings - .iter() - .map(|x| x.to_string()) - .collect::>(); - - let mut organization_ids = organization_strings - .iter() - .flat_map(|x| parse_base62(&x.to_string()).map(|x| x as i64)) - .collect::>(); - - organization_ids.append( - &mut redis - .multi_get::( - ORGANIZATIONS_TITLES_NAMESPACE, - organization_strings + let val = redis + .get_cached_keys_with_slug( + ORGANIZATIONS_NAMESPACE, + ORGANIZATIONS_TITLES_NAMESPACE, + false, + organization_strings, + |ids| async move { + let org_ids: Vec = ids .iter() + .flat_map(|x| parse_base62(&x.to_string()).ok()) + .map(|x| x as i64) + .collect(); + let slugs = ids + .into_iter() .map(|x| x.to_string().to_lowercase()) - .collect::>(), - ) - .await? - .into_iter() - .flatten() - .collect(), - ); + .collect::>(); - if !organization_ids.is_empty() { - let organizations = redis - .multi_get::( - ORGANIZATIONS_NAMESPACE, - organization_ids.iter().map(|x| x.to_string()), - ) - .await?; + let organizations = sqlx::query!( + " + SELECT o.id, o.slug, o.name, o.team_id, o.description, o.icon_url, o.color + FROM organizations o + WHERE o.id = ANY($1) OR LOWER(o.slug) = ANY($2) + GROUP BY o.id; + ", + &org_ids, + &slugs, + ) + .fetch(exec) + .try_fold(DashMap::new(), |acc, m| { + let org = Organization { + id: OrganizationId(m.id), + slug: m.slug.clone(), + name: m.name, + team_id: TeamId(m.team_id), + description: m.description, + icon_url: m.icon_url, + color: m.color.map(|x| x as u32), + }; - for organization in organizations { - if let Some(organization) = - organization.and_then(|x| serde_json::from_str::(&x).ok()) - { - remaining_strings.retain(|x| { - &to_base62(organization.id.0 as u64) != x - && organization.slug.to_lowercase() != x.to_lowercase() - }); - found_organizations.push(organization); - continue; - } - } - } + acc.insert(m.id, (Some(m.slug), org)); + async move { Ok(acc) } + }) + .await?; - if !remaining_strings.is_empty() { - let organization_ids_parsed: Vec = remaining_strings - .iter() - .flat_map(|x| parse_base62(&x.to_string()).ok()) - .map(|x| x as i64) - .collect(); - - let organizations: Vec = sqlx::query!( - " - SELECT o.id, o.slug, o.name, o.team_id, o.description, o.icon_url, o.color - FROM organizations o - WHERE o.id = ANY($1) OR LOWER(o.slug) = ANY($2) - GROUP BY o.id; - ", - &organization_ids_parsed, - &remaining_strings - .into_iter() - .map(|x| x.to_string().to_lowercase()) - .collect::>(), + Ok(organizations) + }, ) - .fetch_many(exec) - .try_filter_map(|e| async { - Ok(e.right().map(|m| Organization { - id: OrganizationId(m.id), - slug: m.slug, - name: m.name, - team_id: TeamId(m.team_id), - description: m.description, - icon_url: m.icon_url, - color: m.color.map(|x| x as u32), - })) - }) - .try_collect::>() .await?; - for organization in organizations { - redis - .set_serialized_to_json( - ORGANIZATIONS_NAMESPACE, - organization.id.0, - &organization, - None, - ) - .await?; - redis - .set( - ORGANIZATIONS_TITLES_NAMESPACE, - &organization.slug.to_lowercase(), - &organization.id.0.to_string(), - None, - ) - .await?; - - found_organizations.push(organization); - } - } - - Ok(found_organizations) + Ok(val) } // Gets organization associated with a project ID, if it exists and there is one diff --git a/src/database/models/pat_item.rs b/src/database/models/pat_item.rs index 9352d6377..4e83e12b5 100644 --- a/src/database/models/pat_item.rs +++ b/src/database/models/pat_item.rs @@ -1,10 +1,14 @@ use super::ids::*; use crate::database::models::DatabaseError; use crate::database::redis::RedisPool; -use crate::models::ids::base62_impl::{parse_base62, to_base62}; +use crate::models::ids::base62_impl::parse_base62; use crate::models::pats::Scopes; use chrono::{DateTime, Utc}; +use dashmap::DashMap; +use futures::TryStreamExt; use serde::{Deserialize, Serialize}; +use std::fmt::{Debug, Display}; +use std::hash::Hash; const PATS_NAMESPACE: &str = "pats"; const PATS_TOKENS_NAMESPACE: &str = "pats_tokens"; @@ -51,7 +55,7 @@ impl PersonalAccessToken { Ok(()) } - pub async fn get<'a, E, T: ToString>( + pub async fn get<'a, E, T: Display + Hash + Eq + PartialEq + Clone + Debug>( id: T, exec: E, redis: &RedisPool, @@ -79,7 +83,7 @@ impl PersonalAccessToken { PersonalAccessToken::get_many(&ids, exec, redis).await } - pub async fn get_many<'a, E, T: ToString>( + pub async fn get_many<'a, E, T: Display + Hash + Eq + PartialEq + Clone + Debug>( pat_strings: &[T], exec: E, redis: &RedisPool, @@ -87,105 +91,53 @@ impl PersonalAccessToken { where E: sqlx::Executor<'a, Database = sqlx::Postgres>, { - use futures::TryStreamExt; + let val = redis + .get_cached_keys_with_slug( + PATS_NAMESPACE, + PATS_TOKENS_NAMESPACE, + true, + pat_strings, + |ids| async move { + let pat_ids: Vec = ids + .iter() + .flat_map(|x| parse_base62(&x.to_string()).ok()) + .map(|x| x as i64) + .collect(); + let slugs = ids.into_iter().map(|x| x.to_string()).collect::>(); - let mut redis = redis.connect().await?; + let pats = sqlx::query!( + " + SELECT id, name, access_token, scopes, user_id, created, expires, last_used + FROM pats + WHERE id = ANY($1) OR access_token = ANY($2) + ORDER BY created DESC + ", + &pat_ids, + &slugs, + ) + .fetch(exec) + .try_fold(DashMap::new(), |acc, x| { + let pat = PersonalAccessToken { + id: PatId(x.id), + name: x.name, + access_token: x.access_token.clone(), + scopes: Scopes::from_bits(x.scopes as u64).unwrap_or(Scopes::NONE), + user_id: UserId(x.user_id), + created: x.created, + expires: x.expires, + last_used: x.last_used, + }; - if pat_strings.is_empty() { - return Ok(Vec::new()); - } - - let mut found_pats = Vec::new(); - let mut remaining_strings = pat_strings - .iter() - .map(|x| x.to_string()) - .collect::>(); - - let mut pat_ids = pat_strings - .iter() - .flat_map(|x| parse_base62(&x.to_string()).map(|x| x as i64)) - .collect::>(); - - pat_ids.append( - &mut redis - .multi_get::( - PATS_TOKENS_NAMESPACE, - pat_strings.iter().map(|x| x.to_string()), - ) - .await? - .into_iter() - .flatten() - .collect(), - ); - - if !pat_ids.is_empty() { - let pats = redis - .multi_get::(PATS_NAMESPACE, pat_ids.iter().map(|x| x.to_string())) - .await?; - for pat in pats { - if let Some(pat) = - pat.and_then(|x| serde_json::from_str::(&x).ok()) - { - remaining_strings - .retain(|x| &to_base62(pat.id.0 as u64) != x && &pat.access_token != x); - found_pats.push(pat); - continue; - } - } - } - - if !remaining_strings.is_empty() { - let pat_ids_parsed: Vec = remaining_strings - .iter() - .flat_map(|x| parse_base62(&x.to_string()).ok()) - .map(|x| x as i64) - .collect(); - let db_pats: Vec = sqlx::query!( - " - SELECT id, name, access_token, scopes, user_id, created, expires, last_used - FROM pats - WHERE id = ANY($1) OR access_token = ANY($2) - ORDER BY created DESC - ", - &pat_ids_parsed, - &remaining_strings - .into_iter() - .map(|x| x.to_string()) - .collect::>(), + acc.insert(x.id, (Some(x.access_token), pat)); + async move { Ok(acc) } + }) + .await?; + Ok(pats) + }, ) - .fetch_many(exec) - .try_filter_map(|e| async { - Ok(e.right().map(|x| PersonalAccessToken { - id: PatId(x.id), - name: x.name, - access_token: x.access_token, - scopes: Scopes::from_bits(x.scopes as u64).unwrap_or(Scopes::NONE), - user_id: UserId(x.user_id), - created: x.created, - expires: x.expires, - last_used: x.last_used, - })) - }) - .try_collect::>() .await?; - for pat in db_pats { - redis - .set_serialized_to_json(PATS_NAMESPACE, pat.id.0, &pat, None) - .await?; - redis - .set( - PATS_TOKENS_NAMESPACE, - &pat.access_token, - &pat.id.0.to_string(), - None, - ) - .await?; - found_pats.push(pat); - } - } - - Ok(found_pats) + Ok(val) } pub async fn get_user_pats<'a, E>( @@ -206,14 +158,13 @@ impl PersonalAccessToken { return Ok(res.into_iter().map(PatId).collect()); } - use futures::TryStreamExt; let db_pats: Vec = sqlx::query!( " - SELECT id - FROM pats - WHERE user_id = $1 - ORDER BY created DESC - ", + SELECT id + FROM pats + WHERE user_id = $1 + ORDER BY created DESC + ", user_id.0, ) .fetch_many(exec) diff --git a/src/database/models/project_item.rs b/src/database/models/project_item.rs index 8a8251bc7..609d90692 100644 --- a/src/database/models/project_item.rs +++ b/src/database/models/project_item.rs @@ -5,13 +5,15 @@ use super::{ids::*, User}; use crate::database::models; use crate::database::models::DatabaseError; use crate::database::redis::RedisPool; -use crate::models::ids::base62_impl::{parse_base62, to_base62}; +use crate::models::ids::base62_impl::parse_base62; use crate::models::projects::{MonetizationStatus, ProjectStatus}; use chrono::{DateTime, Utc}; use dashmap::{DashMap, DashSet}; use futures::TryStreamExt; use itertools::Itertools; use serde::{Deserialize, Serialize}; +use std::fmt::{Debug, Display}; +use std::hash::Hash; pub const PROJECTS_NAMESPACE: &str = "projects"; pub const PROJECTS_SLUGS_NAMESPACE: &str = "projects_slugs"; @@ -505,7 +507,7 @@ impl Project { Project::get_many(&ids, exec, redis).await } - pub async fn get_many<'a, E, T: ToString>( + pub async fn get_many<'a, E, T: Display + Hash + Eq + PartialEq + Clone + Debug>( project_strings: &[T], exec: E, redis: &RedisPool, @@ -513,301 +515,253 @@ impl Project { where E: sqlx::Acquire<'a, Database = sqlx::Postgres>, { - let project_strings = project_strings - .iter() - .map(|x| x.to_string()) - .unique() - .collect::>(); - - if project_strings.is_empty() { - return Ok(Vec::new()); - } - - let mut redis = redis.connect().await?; - let mut exec = exec.acquire().await?; - - let mut found_projects = Vec::new(); - let mut remaining_strings = project_strings.clone(); - - let mut project_ids = project_strings - .iter() - .flat_map(|x| parse_base62(&x.to_string()).map(|x| x as i64)) - .collect::>(); - - project_ids.append( - &mut redis - .multi_get::( - PROJECTS_SLUGS_NAMESPACE, - project_strings.iter().map(|x| x.to_string().to_lowercase()), - ) - .await? - .into_iter() - .flatten() - .collect(), - ); - if !project_ids.is_empty() { - let projects = redis - .multi_get::( - PROJECTS_NAMESPACE, - project_ids.iter().map(|x| x.to_string()), - ) - .await?; - for project in projects { - if let Some(project) = - project.and_then(|x| serde_json::from_str::(&x).ok()) - { - remaining_strings.retain(|x| { - &to_base62(project.inner.id.0 as u64) != x - && project.inner.slug.as_ref().map(|x| x.to_lowercase()) - != Some(x.to_lowercase()) - }); - found_projects.push(project); - continue; - } - } - } - if !remaining_strings.is_empty() { - let project_ids_parsed: Vec = remaining_strings - .iter() - .flat_map(|x| parse_base62(&x.to_string()).ok()) - .map(|x| x as i64) - .collect(); - let slugs = remaining_strings - .into_iter() - .map(|x| x.to_lowercase()) - .collect::>(); - - let all_version_ids = DashSet::new(); - let versions: DashMap)>> = sqlx::query!( - " - SELECT DISTINCT mod_id, v.id as id, date_published - FROM mods m - INNER JOIN versions v ON m.id = v.mod_id AND v.status = ANY($3) - WHERE m.id = ANY($1) OR m.slug = ANY($2) - ", - &project_ids_parsed, - &slugs, - &*crate::models::projects::VersionStatus::iterator() - .filter(|x| x.is_listed()) - .map(|x| x.to_string()) - .collect::>() - ) - .fetch(&mut *exec) - .try_fold( - DashMap::new(), - |acc: DashMap)>>, m| { - let version_id = VersionId(m.id); - let date_published = m.date_published; - all_version_ids.insert(version_id); - acc.entry(ProjectId(m.mod_id)) - .or_default() - .push((version_id, date_published)); - async move { Ok(acc) } - }, - ) - .await?; - - let loader_field_enum_value_ids = DashSet::new(); - let version_fields: DashMap> = sqlx::query!( - " - SELECT DISTINCT mod_id, version_id, field_id, int_value, enum_value, string_value - FROM versions v - INNER JOIN version_fields vf ON v.id = vf.version_id - WHERE v.id = ANY($1) - ", - &all_version_ids.iter().map(|x| x.0).collect::>() - ) - .fetch(&mut *exec) - .try_fold( - DashMap::new(), - |acc: DashMap>, m| { - let qvf = QueryVersionField { - version_id: VersionId(m.version_id), - field_id: LoaderFieldId(m.field_id), - int_value: m.int_value, - enum_value: m.enum_value.map(LoaderFieldEnumValueId), - string_value: m.string_value, - }; - - if let Some(enum_value) = m.enum_value { - loader_field_enum_value_ids.insert(LoaderFieldEnumValueId(enum_value)); - } - - acc.entry(ProjectId(m.mod_id)).or_default().push(qvf); - async move { Ok(acc) } - }, - ) - .await?; - - let loader_field_enum_values: Vec = sqlx::query!( - " - SELECT DISTINCT id, enum_id, value, ordering, created, metadata - FROM loader_field_enum_values lfev - WHERE id = ANY($1) - ORDER BY enum_id, ordering, created DESC - ", - &loader_field_enum_value_ids + let val = redis.get_cached_keys_with_slug( + PROJECTS_NAMESPACE, + PROJECTS_SLUGS_NAMESPACE, + false, + project_strings, + |ids| async move { + let mut exec = exec.acquire().await?; + let project_ids_parsed: Vec = ids .iter() - .map(|x| x.0) - .collect::>() - ) - .fetch(&mut *exec) - .map_ok(|m| QueryLoaderFieldEnumValue { - id: LoaderFieldEnumValueId(m.id), - enum_id: LoaderFieldEnumId(m.enum_id), - value: m.value, - ordering: m.ordering, - created: m.created, - metadata: m.metadata, - }) - .try_collect() - .await?; + .flat_map(|x| parse_base62(&x.to_string()).ok()) + .map(|x| x as i64) + .collect(); + let slugs = ids + .into_iter() + .map(|x| x.to_string().to_lowercase()) + .collect::>(); - let mods_gallery: DashMap> = sqlx::query!( - " - SELECT DISTINCT mod_id, mg.image_url, mg.featured, mg.name, mg.description, mg.created, mg.ordering - FROM mods_gallery mg - INNER JOIN mods m ON mg.mod_id = m.id - WHERE m.id = ANY($1) OR m.slug = ANY($2) - ", - &project_ids_parsed, - &slugs - ).fetch(&mut *exec) - .try_fold(DashMap::new(), |acc : DashMap>, m| { - acc.entry(ProjectId(m.mod_id)) - .or_default() - .push(GalleryItem { - image_url: m.image_url, - featured: m.featured.unwrap_or(false), - name: m.name, - description: m.description, - created: m.created, + let all_version_ids = DashSet::new(); + let versions: DashMap)>> = sqlx::query!( + " + SELECT DISTINCT mod_id, v.id as id, date_published + FROM mods m + INNER JOIN versions v ON m.id = v.mod_id AND v.status = ANY($3) + WHERE m.id = ANY($1) OR m.slug = ANY($2) + ", + &project_ids_parsed, + &slugs, + &*crate::models::projects::VersionStatus::iterator() + .filter(|x| x.is_listed()) + .map(|x| x.to_string()) + .collect::>() + ) + .fetch(&mut *exec) + .try_fold( + DashMap::new(), + |acc: DashMap)>>, m| { + let version_id = VersionId(m.id); + let date_published = m.date_published; + all_version_ids.insert(version_id); + acc.entry(ProjectId(m.mod_id)) + .or_default() + .push((version_id, date_published)); + async move { Ok(acc) } + }, + ) + .await?; + + let loader_field_enum_value_ids = DashSet::new(); + let version_fields: DashMap> = sqlx::query!( + " + SELECT DISTINCT mod_id, version_id, field_id, int_value, enum_value, string_value + FROM versions v + INNER JOIN version_fields vf ON v.id = vf.version_id + WHERE v.id = ANY($1) + ", + &all_version_ids.iter().map(|x| x.0).collect::>() + ) + .fetch(&mut *exec) + .try_fold( + DashMap::new(), + |acc: DashMap>, m| { + let qvf = QueryVersionField { + version_id: VersionId(m.version_id), + field_id: LoaderFieldId(m.field_id), + int_value: m.int_value, + enum_value: m.enum_value.map(LoaderFieldEnumValueId), + string_value: m.string_value, + }; + + if let Some(enum_value) = m.enum_value { + loader_field_enum_value_ids.insert(LoaderFieldEnumValueId(enum_value)); + } + + acc.entry(ProjectId(m.mod_id)).or_default().push(qvf); + async move { Ok(acc) } + }, + ) + .await?; + + let loader_field_enum_values: Vec = sqlx::query!( + " + SELECT DISTINCT id, enum_id, value, ordering, created, metadata + FROM loader_field_enum_values lfev + WHERE id = ANY($1) + ORDER BY enum_id, ordering, created DESC + ", + &loader_field_enum_value_ids + .iter() + .map(|x| x.0) + .collect::>() + ) + .fetch(&mut *exec) + .map_ok(|m| QueryLoaderFieldEnumValue { + id: LoaderFieldEnumValueId(m.id), + enum_id: LoaderFieldEnumId(m.enum_id), + value: m.value, ordering: m.ordering, - }); - async move { Ok(acc) } - } - ).await?; + created: m.created, + metadata: m.metadata, + }) + .try_collect() + .await?; - let links: DashMap> = sqlx::query!( - " - SELECT DISTINCT joining_mod_id as mod_id, joining_platform_id as platform_id, lp.name as platform_name, url, lp.donation as donation - FROM mods_links ml - INNER JOIN mods m ON ml.joining_mod_id = m.id - INNER JOIN link_platforms lp ON ml.joining_platform_id = lp.id - WHERE m.id = ANY($1) OR m.slug = ANY($2) - ", - &project_ids_parsed, - &slugs - ).fetch(&mut *exec) - .try_fold(DashMap::new(), |acc : DashMap>, m| { - acc.entry(ProjectId(m.mod_id)) - .or_default() - .push(LinkUrl { - platform_id: LinkPlatformId(m.platform_id), - platform_name: m.platform_name, - url: m.url, - donation: m.donation, - }); - async move { Ok(acc) } - } - ).await?; + let mods_gallery: DashMap> = sqlx::query!( + " + SELECT DISTINCT mod_id, mg.image_url, mg.featured, mg.name, mg.description, mg.created, mg.ordering + FROM mods_gallery mg + INNER JOIN mods m ON mg.mod_id = m.id + WHERE m.id = ANY($1) OR m.slug = ANY($2) + ", + &project_ids_parsed, + &slugs + ).fetch(&mut *exec) + .try_fold(DashMap::new(), |acc : DashMap>, m| { + acc.entry(ProjectId(m.mod_id)) + .or_default() + .push(GalleryItem { + image_url: m.image_url, + featured: m.featured.unwrap_or(false), + name: m.name, + description: m.description, + created: m.created, + ordering: m.ordering, + }); + async move { Ok(acc) } + } + ).await?; - #[derive(Default)] - struct VersionLoaderData { - loaders: Vec, - project_types: Vec, - games: Vec, - loader_loader_field_ids: Vec, - } + let links: DashMap> = sqlx::query!( + " + SELECT DISTINCT joining_mod_id as mod_id, joining_platform_id as platform_id, lp.name as platform_name, url, lp.donation as donation + FROM mods_links ml + INNER JOIN mods m ON ml.joining_mod_id = m.id + INNER JOIN link_platforms lp ON ml.joining_platform_id = lp.id + WHERE m.id = ANY($1) OR m.slug = ANY($2) + ", + &project_ids_parsed, + &slugs + ).fetch(&mut *exec) + .try_fold(DashMap::new(), |acc : DashMap>, m| { + acc.entry(ProjectId(m.mod_id)) + .or_default() + .push(LinkUrl { + platform_id: LinkPlatformId(m.platform_id), + platform_name: m.platform_name, + url: m.url, + donation: m.donation, + }); + async move { Ok(acc) } + } + ).await?; - let loader_field_ids = DashSet::new(); - let loaders_ptypes_games: DashMap = sqlx::query!( - " - SELECT DISTINCT mod_id, - ARRAY_AGG(DISTINCT l.loader) filter (where l.loader is not null) loaders, - ARRAY_AGG(DISTINCT pt.name) filter (where pt.name is not null) project_types, - ARRAY_AGG(DISTINCT g.slug) filter (where g.slug is not null) games, - ARRAY_AGG(DISTINCT lfl.loader_field_id) filter (where lfl.loader_field_id is not null) loader_fields - FROM versions v - INNER JOIN loaders_versions lv ON v.id = lv.version_id - INNER JOIN loaders l ON lv.loader_id = l.id - INNER JOIN loaders_project_types lpt ON lpt.joining_loader_id = l.id - INNER JOIN project_types pt ON pt.id = lpt.joining_project_type_id - INNER JOIN loaders_project_types_games lptg ON lptg.loader_id = l.id AND lptg.project_type_id = pt.id - INNER JOIN games g ON lptg.game_id = g.id - LEFT JOIN loader_fields_loaders lfl ON lfl.loader_id = l.id - WHERE v.id = ANY($1) - GROUP BY mod_id - ", - &all_version_ids.iter().map(|x| x.0).collect::>() - ).fetch(&mut *exec) - .map_ok(|m| { - let project_id = ProjectId(m.mod_id); - - // Add loader fields to the set we need to fetch - let loader_loader_field_ids = m.loader_fields.unwrap_or_default().into_iter().map(LoaderFieldId).collect::>(); - for loader_field_id in loader_loader_field_ids.iter() { - loader_field_ids.insert(*loader_field_id); + #[derive(Default)] + struct VersionLoaderData { + loaders: Vec, + project_types: Vec, + games: Vec, + loader_loader_field_ids: Vec, } - // Add loader + loader associated data to the map - let version_loader_data = VersionLoaderData { - loaders: m.loaders.unwrap_or_default(), - project_types: m.project_types.unwrap_or_default(), - games: m.games.unwrap_or_default(), - loader_loader_field_ids, - }; + let loader_field_ids = DashSet::new(); + let loaders_ptypes_games: DashMap = sqlx::query!( + " + SELECT DISTINCT mod_id, + ARRAY_AGG(DISTINCT l.loader) filter (where l.loader is not null) loaders, + ARRAY_AGG(DISTINCT pt.name) filter (where pt.name is not null) project_types, + ARRAY_AGG(DISTINCT g.slug) filter (where g.slug is not null) games, + ARRAY_AGG(DISTINCT lfl.loader_field_id) filter (where lfl.loader_field_id is not null) loader_fields + FROM versions v + INNER JOIN loaders_versions lv ON v.id = lv.version_id + INNER JOIN loaders l ON lv.loader_id = l.id + INNER JOIN loaders_project_types lpt ON lpt.joining_loader_id = l.id + INNER JOIN project_types pt ON pt.id = lpt.joining_project_type_id + INNER JOIN loaders_project_types_games lptg ON lptg.loader_id = l.id AND lptg.project_type_id = pt.id + INNER JOIN games g ON lptg.game_id = g.id + LEFT JOIN loader_fields_loaders lfl ON lfl.loader_id = l.id + WHERE v.id = ANY($1) + GROUP BY mod_id + ", + &all_version_ids.iter().map(|x| x.0).collect::>() + ).fetch(&mut *exec) + .map_ok(|m| { + let project_id = ProjectId(m.mod_id); - (project_id, version_loader_data) + // Add loader fields to the set we need to fetch + let loader_loader_field_ids = m.loader_fields.unwrap_or_default().into_iter().map(LoaderFieldId).collect::>(); + for loader_field_id in loader_loader_field_ids.iter() { + loader_field_ids.insert(*loader_field_id); + } - } - ).try_collect().await?; + // Add loader + loader associated data to the map + let version_loader_data = VersionLoaderData { + loaders: m.loaders.unwrap_or_default(), + project_types: m.project_types.unwrap_or_default(), + games: m.games.unwrap_or_default(), + loader_loader_field_ids, + }; - let loader_fields: Vec = sqlx::query!( - " - SELECT DISTINCT id, field, field_type, enum_type, min_val, max_val, optional - FROM loader_fields lf - WHERE id = ANY($1) - ", - &loader_field_ids.iter().map(|x| x.0).collect::>() - ) - .fetch(&mut *exec) - .map_ok(|m| QueryLoaderField { - id: LoaderFieldId(m.id), - field: m.field, - field_type: m.field_type, - enum_type: m.enum_type.map(LoaderFieldEnumId), - min_val: m.min_val, - max_val: m.max_val, - optional: m.optional, - }) - .try_collect() - .await?; + (project_id, version_loader_data) - let db_projects: Vec = sqlx::query!( - " - SELECT m.id id, m.name name, m.summary summary, m.downloads downloads, m.follows follows, - m.icon_url icon_url, m.description description, m.published published, - m.updated updated, m.approved approved, m.queued, m.status status, m.requested_status requested_status, - m.license_url license_url, - m.team_id team_id, m.organization_id organization_id, m.license license, m.slug slug, m.moderation_message moderation_message, m.moderation_message_body moderation_message_body, - m.webhook_sent, m.color, - t.id thread_id, m.monetization_status monetization_status, - ARRAY_AGG(DISTINCT c.category) filter (where c.category is not null and mc.is_additional is false) categories, - ARRAY_AGG(DISTINCT c.category) filter (where c.category is not null and mc.is_additional is true) additional_categories - FROM mods m - INNER JOIN threads t ON t.mod_id = m.id - LEFT JOIN mods_categories mc ON mc.joining_mod_id = m.id - LEFT JOIN categories c ON mc.joining_category_id = c.id - WHERE m.id = ANY($1) OR m.slug = ANY($2) - GROUP BY t.id, m.id; - ", - &project_ids_parsed, - &slugs, - ) - .fetch_many(&mut *exec) - .try_filter_map(|e| async { - Ok(e.right().map(|m| { + } + ).try_collect().await?; + + let loader_fields: Vec = sqlx::query!( + " + SELECT DISTINCT id, field, field_type, enum_type, min_val, max_val, optional + FROM loader_fields lf + WHERE id = ANY($1) + ", + &loader_field_ids.iter().map(|x| x.0).collect::>() + ) + .fetch(&mut *exec) + .map_ok(|m| QueryLoaderField { + id: LoaderFieldId(m.id), + field: m.field, + field_type: m.field_type, + enum_type: m.enum_type.map(LoaderFieldEnumId), + min_val: m.min_val, + max_val: m.max_val, + optional: m.optional, + }) + .try_collect() + .await?; + + let projects = sqlx::query!( + " + SELECT m.id id, m.name name, m.summary summary, m.downloads downloads, m.follows follows, + m.icon_url icon_url, m.description description, m.published published, + m.updated updated, m.approved approved, m.queued, m.status status, m.requested_status requested_status, + m.license_url license_url, + m.team_id team_id, m.organization_id organization_id, m.license license, m.slug slug, m.moderation_message moderation_message, m.moderation_message_body moderation_message_body, + m.webhook_sent, m.color, + t.id thread_id, m.monetization_status monetization_status, + ARRAY_AGG(DISTINCT c.category) filter (where c.category is not null and mc.is_additional is false) categories, + ARRAY_AGG(DISTINCT c.category) filter (where c.category is not null and mc.is_additional is true) additional_categories + FROM mods m + INNER JOIN threads t ON t.mod_id = m.id + LEFT JOIN mods_categories mc ON mc.joining_mod_id = m.id + LEFT JOIN categories c ON mc.joining_category_id = c.id + WHERE m.id = ANY($1) OR m.slug = ANY($2) + GROUP BY t.id, m.id; + ", + &project_ids_parsed, + &slugs, + ) + .fetch(&mut *exec) + .try_fold(DashMap::new(), |acc, m| { let id = m.id; let project_id = ProjectId(id); let VersionLoaderData { @@ -815,54 +769,54 @@ impl Project { project_types, games, loader_loader_field_ids, - } = loaders_ptypes_games.remove(&project_id).map(|x|x.1).unwrap_or_default(); + } = loaders_ptypes_games.remove(&project_id).map(|x|x.1).unwrap_or_default(); let mut versions = versions.remove(&project_id).map(|x| x.1).unwrap_or_default(); let mut gallery = mods_gallery.remove(&project_id).map(|x| x.1).unwrap_or_default(); let urls = links.remove(&project_id).map(|x| x.1).unwrap_or_default(); let version_fields = version_fields.remove(&project_id).map(|x| x.1).unwrap_or_default(); let loader_fields = loader_fields.iter() - .filter(|x| loader_loader_field_ids.contains(&x.id)) - .collect::>(); + .filter(|x| loader_loader_field_ids.contains(&x.id)) + .collect::>(); - QueryProject { - inner: Project { - id: ProjectId(id), - team_id: TeamId(m.team_id), - organization_id: m.organization_id.map(OrganizationId), - name: m.name.clone(), - summary: m.summary.clone(), - downloads: m.downloads, - icon_url: m.icon_url.clone(), - published: m.published, - updated: m.updated, - license_url: m.license_url.clone(), - status: ProjectStatus::from_string( - &m.status, - ), - requested_status: m.requested_status.map(|x| ProjectStatus::from_string( - &x, - )), - license: m.license.clone(), - slug: m.slug.clone(), - description: m.description.clone(), - follows: m.follows, - moderation_message: m.moderation_message, - moderation_message_body: m.moderation_message_body, - approved: m.approved, - webhook_sent: m.webhook_sent, - color: m.color.map(|x| x as u32), - queued: m.queued, - monetization_status: MonetizationStatus::from_string( - &m.monetization_status, - ), - loaders, - }, - categories: m.categories.unwrap_or_default(), - additional_categories: m.additional_categories.unwrap_or_default(), - project_types, - games, - versions: { + let project = QueryProject { + inner: Project { + id: ProjectId(id), + team_id: TeamId(m.team_id), + organization_id: m.organization_id.map(OrganizationId), + name: m.name.clone(), + summary: m.summary.clone(), + downloads: m.downloads, + icon_url: m.icon_url.clone(), + published: m.published, + updated: m.updated, + license_url: m.license_url.clone(), + status: ProjectStatus::from_string( + &m.status, + ), + requested_status: m.requested_status.map(|x| ProjectStatus::from_string( + &x, + )), + license: m.license.clone(), + slug: m.slug.clone(), + description: m.description.clone(), + follows: m.follows, + moderation_message: m.moderation_message, + moderation_message_body: m.moderation_message_body, + approved: m.approved, + webhook_sent: m.webhook_sent, + color: m.color.map(|x| x as u32), + queued: m.queued, + monetization_status: MonetizationStatus::from_string( + &m.monetization_status, + ), + loaders, + }, + categories: m.categories.unwrap_or_default(), + additional_categories: m.additional_categories.unwrap_or_default(), + project_types, + games, + versions: { // Each version is a tuple of (VersionId, DateTime) versions.sort_by(|a, b| a.1.cmp(&b.1)); versions.into_iter().map(|x| x.0).collect() @@ -872,32 +826,20 @@ impl Project { gallery }, urls, - aggregate_version_fields: VersionField::from_query_json(version_fields, &loader_fields, &loader_field_enum_values, true), - thread_id: ThreadId(m.thread_id), - }})) - }) - .try_collect::>() - .await?; + aggregate_version_fields: VersionField::from_query_json(version_fields, &loader_fields, &loader_field_enum_values, true), + thread_id: ThreadId(m.thread_id), + }; - for project in db_projects { - redis - .set_serialized_to_json(PROJECTS_NAMESPACE, project.inner.id.0, &project, None) + acc.insert(m.id, (m.slug, project)); + async move { Ok(acc) } + }) .await?; - if let Some(slug) = &project.inner.slug { - redis - .set( - PROJECTS_SLUGS_NAMESPACE, - &slug.to_lowercase(), - &project.inner.id.0.to_string(), - None, - ) - .await?; - } - found_projects.push(project); - } - } - Ok(found_projects) + Ok(projects) + }, + ).await?; + + Ok(val) } pub async fn get_dependencies<'a, E>( diff --git a/src/database/models/session_item.rs b/src/database/models/session_item.rs index f27af5bb6..dac42b1e3 100644 --- a/src/database/models/session_item.rs +++ b/src/database/models/session_item.rs @@ -1,9 +1,12 @@ use super::ids::*; use crate::database::models::DatabaseError; use crate::database::redis::RedisPool; -use crate::models::ids::base62_impl::{parse_base62, to_base62}; +use crate::models::ids::base62_impl::parse_base62; use chrono::{DateTime, Utc}; +use dashmap::DashMap; use serde::{Deserialize, Serialize}; +use std::fmt::{Debug, Display}; +use std::hash::Hash; const SESSIONS_NAMESPACE: &str = "sessions"; const SESSIONS_IDS_NAMESPACE: &str = "sessions_ids"; @@ -79,7 +82,7 @@ pub struct Session { } impl Session { - pub async fn get<'a, E, T: ToString>( + pub async fn get<'a, E, T: Display + Hash + Eq + PartialEq + Clone + Debug>( id: T, exec: E, redis: &RedisPool, @@ -120,7 +123,7 @@ impl Session { Session::get_many(&ids, exec, redis).await } - pub async fn get_many<'a, E, T: ToString>( + pub async fn get_many<'a, E, T: Display + Hash + Eq + PartialEq + Clone + Debug>( session_strings: &[T], exec: E, redis: &RedisPool, @@ -130,109 +133,60 @@ impl Session { { use futures::TryStreamExt; - let mut redis = redis.connect().await?; - - if session_strings.is_empty() { - return Ok(Vec::new()); - } - - let mut found_sessions = Vec::new(); - let mut remaining_strings = session_strings - .iter() - .map(|x| x.to_string()) - .collect::>(); - - let mut session_ids = session_strings - .iter() - .flat_map(|x| parse_base62(&x.to_string()).map(|x| x as i64)) - .collect::>(); - - session_ids.append( - &mut redis - .multi_get::( - SESSIONS_IDS_NAMESPACE, - session_strings.iter().map(|x| x.to_string()), + let val = redis.get_cached_keys_with_slug( + SESSIONS_NAMESPACE, + SESSIONS_IDS_NAMESPACE, + true, + session_strings, + |ids| async move { + let session_ids: Vec = ids + .iter() + .flat_map(|x| parse_base62(&x.to_string()).ok()) + .map(|x| x as i64) + .collect(); + let slugs = ids + .into_iter() + .map(|x| x.to_string()) + .collect::>(); + let db_sessions = sqlx::query!( + " + SELECT id, user_id, session, created, last_login, expires, refresh_expires, os, platform, + city, country, ip, user_agent + FROM sessions + WHERE id = ANY($1) OR session = ANY($2) + ORDER BY created DESC + ", + &session_ids, + &slugs, ) - .await? - .into_iter() - .flatten() - .collect(), - ); + .fetch(exec) + .try_fold(DashMap::new(), |acc, x| { + let session = Session { + id: SessionId(x.id), + session: x.session.clone(), + user_id: UserId(x.user_id), + created: x.created, + last_login: x.last_login, + expires: x.expires, + refresh_expires: x.refresh_expires, + os: x.os, + platform: x.platform, + city: x.city, + country: x.country, + ip: x.ip, + user_agent: x.user_agent, + }; - if !session_ids.is_empty() { - let sessions = redis - .multi_get::( - SESSIONS_NAMESPACE, - session_ids.iter().map(|x| x.to_string()), - ) - .await?; - for session in sessions { - if let Some(session) = - session.and_then(|x| serde_json::from_str::(&x).ok()) - { - remaining_strings - .retain(|x| &to_base62(session.id.0 as u64) != x && &session.session != x); - found_sessions.push(session); - continue; - } - } - } + acc.insert(x.id, (Some(x.session), session)); - if !remaining_strings.is_empty() { - let session_ids_parsed: Vec = remaining_strings - .iter() - .flat_map(|x| parse_base62(&x.to_string()).ok()) - .map(|x| x as i64) - .collect(); - let db_sessions: Vec = sqlx::query!( - " - SELECT id, user_id, session, created, last_login, expires, refresh_expires, os, platform, - city, country, ip, user_agent - FROM sessions - WHERE id = ANY($1) OR session = ANY($2) - ORDER BY created DESC - ", - &session_ids_parsed, - &remaining_strings.into_iter().map(|x| x.to_string()).collect::>(), - ) - .fetch_many(exec) - .try_filter_map(|e| async { - Ok(e.right().map(|x| Session { - id: SessionId(x.id), - session: x.session, - user_id: UserId(x.user_id), - created: x.created, - last_login: x.last_login, - expires: x.expires, - refresh_expires: x.refresh_expires, - os: x.os, - platform: x.platform, - city: x.city, - country: x.country, - ip: x.ip, - user_agent: x.user_agent, - })) - }) - .try_collect::>() - .await?; - - for session in db_sessions { - redis - .set_serialized_to_json(SESSIONS_NAMESPACE, session.id.0, &session, None) + async move { Ok(acc) } + }) .await?; - redis - .set( - SESSIONS_IDS_NAMESPACE, - &session.session, - &session.id.0.to_string(), - None, - ) - .await?; - found_sessions.push(session); - } - } - Ok(found_sessions) + Ok(db_sessions) + }).await?; + + Ok(val) } pub async fn get_user_sessions<'a, E>( diff --git a/src/database/models/team_item.rs b/src/database/models/team_item.rs index b6353d45e..b43fdd7b6 100644 --- a/src/database/models/team_item.rs +++ b/src/database/models/team_item.rs @@ -3,6 +3,8 @@ use crate::{ database::redis::RedisPool, models::teams::{OrganizationPermissions, ProjectPermissions}, }; +use dashmap::DashMap; +use futures::TryStreamExt; use itertools::Itertools; use rust_decimal::Decimal; use serde::{Deserialize, Serialize}; @@ -203,87 +205,56 @@ impl TeamMember { where E: sqlx::Executor<'a, Database = sqlx::Postgres> + Copy, { - use futures::stream::TryStreamExt; - if team_ids.is_empty() { return Ok(Vec::new()); } - let mut redis = redis.connect().await?; + let val = redis.get_cached_keys( + TEAMS_NAMESPACE, + &team_ids.iter().map(|x| x.0).collect::>(), + |team_ids| async move { + let teams = sqlx::query!( + " + SELECT id, team_id, role AS member_role, is_owner, permissions, organization_permissions, + accepted, payouts_split, + ordering, user_id + FROM team_members + WHERE team_id = ANY($1) + ORDER BY team_id, ordering; + ", + &team_ids + ) + .fetch(exec) + .try_fold(DashMap::new(), |acc: DashMap>, m| { + let member = TeamMember { + id: TeamMemberId(m.id), + team_id: TeamId(m.team_id), + role: m.member_role, + is_owner: m.is_owner, + permissions: ProjectPermissions::from_bits(m.permissions as u64) + .unwrap_or_default(), + organization_permissions: m + .organization_permissions + .map(|p| OrganizationPermissions::from_bits(p as u64).unwrap_or_default()), + accepted: m.accepted, + user_id: UserId(m.user_id), + payouts_split: m.payouts_split, + ordering: m.ordering, + }; - let mut team_ids_parsed: Vec = team_ids.iter().map(|x| x.0).collect(); + acc.entry(m.team_id) + .or_default() + .push(member); - let mut found_teams = Vec::new(); - - let teams = redis - .multi_get::( - TEAMS_NAMESPACE, - team_ids_parsed.iter().map(|x| x.to_string()), - ) - .await?; - - for team_raw in teams { - if let Some(mut team) = team_raw - .clone() - .and_then(|x| serde_json::from_str::>(&x).ok()) - { - if let Some(team_id) = team.first().map(|x| x.team_id) { - team_ids_parsed.retain(|x| &team_id.0 != x); - } - - found_teams.append(&mut team); - continue; - } - } - - if !team_ids_parsed.is_empty() { - let teams: Vec = sqlx::query!( - " - SELECT id, team_id, role AS member_role, is_owner, permissions, organization_permissions, - accepted, payouts_split, - ordering, user_id - FROM team_members - WHERE team_id = ANY($1) - ORDER BY team_id, ordering; - ", - &team_ids_parsed - ) - .fetch_many(exec) - .try_filter_map(|e| async { - Ok(e.right().map(|m| TeamMember { - id: TeamMemberId(m.id), - team_id: TeamId(m.team_id), - role: m.member_role, - is_owner: m.is_owner, - permissions: ProjectPermissions::from_bits(m.permissions as u64) - .unwrap_or_default(), - organization_permissions: m - .organization_permissions - .map(|p| OrganizationPermissions::from_bits(p as u64).unwrap_or_default()), - accepted: m.accepted, - user_id: UserId(m.user_id), - payouts_split: m.payouts_split, - ordering: m.ordering, - })) - }) - .try_collect::>() - .await?; - - for (id, mut members) in teams - .into_iter() - .group_by(|x| x.team_id) - .into_iter() - .map(|(key, group)| (key, group.collect::>())) - .collect::>() - { - redis - .set_serialized_to_json(TEAMS_NAMESPACE, id.0, &members, None) + async move { Ok(acc) } + }) .await?; - found_teams.append(&mut members); - } - } - Ok(found_teams) + Ok(teams) + }, + ).await?; + + Ok(val.into_iter().flatten().collect()) } pub async fn clear_cache(id: TeamId, redis: &RedisPool) -> Result<(), super::DatabaseError> { @@ -315,8 +286,6 @@ impl TeamMember { where E: sqlx::Executor<'a, Database = sqlx::Postgres>, { - use futures::stream::TryStreamExt; - let team_ids_parsed: Vec = team_ids.iter().map(|x| x.0).collect(); let team_members = sqlx::query!( diff --git a/src/database/models/user_item.rs b/src/database/models/user_item.rs index 6f821db84..06b73d526 100644 --- a/src/database/models/user_item.rs +++ b/src/database/models/user_item.rs @@ -5,8 +5,11 @@ use crate::database::redis::RedisPool; use crate::models::ids::base62_impl::{parse_base62, to_base62}; use crate::models::users::Badges; use chrono::{DateTime, Utc}; +use dashmap::DashMap; use rust_decimal::Decimal; use serde::{Deserialize, Serialize}; +use std::fmt::{Debug, Display}; +use std::hash::Hash; const USERS_NAMESPACE: &str = "users"; const USER_USERNAMES_NAMESPACE: &str = "users_usernames"; @@ -132,7 +135,7 @@ impl User { User::get_many(&ids, exec, redis).await } - pub async fn get_many<'a, E, T: ToString>( + pub async fn get_many<'a, E, T: Display + Hash + Eq + PartialEq + Clone + Debug>( users_strings: &[T], exec: E, redis: &RedisPool, @@ -142,123 +145,73 @@ impl User { { use futures::TryStreamExt; - let mut redis = redis.connect().await?; - - if users_strings.is_empty() { - return Ok(Vec::new()); - } - - let mut found_users = Vec::new(); - let mut remaining_strings = users_strings - .iter() - .map(|x| x.to_string()) - .collect::>(); - - let mut user_ids = users_strings - .iter() - .flat_map(|x| parse_base62(&x.to_string()).map(|x| x as i64)) - .collect::>(); - - user_ids.append( - &mut redis - .multi_get::( - USER_USERNAMES_NAMESPACE, - users_strings.iter().map(|x| x.to_string().to_lowercase()), - ) - .await? - .into_iter() - .flatten() - .collect(), - ); - - if !user_ids.is_empty() { - let users = redis - .multi_get::(USERS_NAMESPACE, user_ids.iter().map(|x| x.to_string())) - .await?; - for user in users { - if let Some(user) = user.and_then(|x| serde_json::from_str::(&x).ok()) { - remaining_strings.retain(|x| { - &to_base62(user.id.0 as u64) != x - && user.username.to_lowercase() != x.to_lowercase() - }); - found_users.push(user); - continue; - } - } - } - - if !remaining_strings.is_empty() { - let user_ids_parsed: Vec = remaining_strings - .iter() - .flat_map(|x| parse_base62(&x.to_string()).ok()) - .map(|x| x as i64) - .collect(); - let db_users: Vec = sqlx::query!( - " - SELECT id, name, email, - avatar_url, username, bio, - created, role, badges, - balance, - github_id, discord_id, gitlab_id, google_id, steam_id, microsoft_id, - email_verified, password, totp_secret, paypal_id, paypal_country, paypal_email, - venmo_handle - FROM users - WHERE id = ANY($1) OR LOWER(username) = ANY($2) - ", - &user_ids_parsed, - &remaining_strings + let val = redis.get_cached_keys_with_slug( + USERS_NAMESPACE, + USER_USERNAMES_NAMESPACE, + false, + users_strings, + |ids| async move { + let user_ids: Vec = ids + .iter() + .flat_map(|x| parse_base62(&x.to_string()).ok()) + .map(|x| x as i64) + .collect(); + let slugs = ids .into_iter() .map(|x| x.to_string().to_lowercase()) - .collect::>(), - ) - .fetch_many(exec) - .try_filter_map(|e| async { - Ok(e.right().map(|u| User { - id: UserId(u.id), - github_id: u.github_id, - discord_id: u.discord_id, - gitlab_id: u.gitlab_id, - google_id: u.google_id, - steam_id: u.steam_id, - microsoft_id: u.microsoft_id, - name: u.name, - email: u.email, - email_verified: u.email_verified, - avatar_url: u.avatar_url, - username: u.username, - bio: u.bio, - created: u.created, - role: u.role, - badges: Badges::from_bits(u.badges as u64).unwrap_or_default(), - balance: u.balance, - password: u.password, - paypal_id: u.paypal_id, - paypal_country: u.paypal_country, - paypal_email: u.paypal_email, - venmo_handle: u.venmo_handle, - totp_secret: u.totp_secret, - })) - }) - .try_collect::>() - .await?; + .collect::>(); - for user in db_users { - redis - .set_serialized_to_json(USERS_NAMESPACE, user.id.0, &user, None) - .await?; - redis - .set( - USER_USERNAMES_NAMESPACE, - &user.username.to_lowercase(), - &user.id.0.to_string(), - None, - ) - .await?; - found_users.push(user); - } - } + let users = sqlx::query!( + " + SELECT id, name, email, + avatar_url, username, bio, + created, role, badges, + balance, + github_id, discord_id, gitlab_id, google_id, steam_id, microsoft_id, + email_verified, password, totp_secret, paypal_id, paypal_country, paypal_email, + venmo_handle + FROM users + WHERE id = ANY($1) OR LOWER(username) = ANY($2) + ", + &user_ids, + &slugs, + ) + .fetch(exec) + .try_fold(DashMap::new(), |acc, u| { + let user = User { + id: UserId(u.id), + github_id: u.github_id, + discord_id: u.discord_id, + gitlab_id: u.gitlab_id, + google_id: u.google_id, + steam_id: u.steam_id, + microsoft_id: u.microsoft_id, + name: u.name, + email: u.email, + email_verified: u.email_verified, + avatar_url: u.avatar_url, + username: u.username.clone(), + bio: u.bio, + created: u.created, + role: u.role, + badges: Badges::from_bits(u.badges as u64).unwrap_or_default(), + balance: u.balance, + password: u.password, + paypal_id: u.paypal_id, + paypal_country: u.paypal_country, + paypal_email: u.paypal_email, + venmo_handle: u.venmo_handle, + totp_secret: u.totp_secret, + }; - Ok(found_users) + acc.insert(u.id, (Some(u.username), user)); + async move { Ok(acc) } + }) + .await?; + + Ok(users) + }).await?; + Ok(val) } pub async fn get_email<'a, E>(email: &str, exec: E) -> Result, sqlx::Error> diff --git a/src/database/models/version_item.rs b/src/database/models/version_item.rs index eeb6a965d..5d654ab21 100644 --- a/src/database/models/version_item.rs +++ b/src/database/models/version_item.rs @@ -8,6 +8,7 @@ use crate::database::redis::RedisPool; use crate::models::projects::{FileType, VersionStatus}; use chrono::{DateTime, Utc}; use dashmap::{DashMap, DashSet}; +use futures::TryStreamExt; use itertools::Itertools; use serde::{Deserialize, Serialize}; use std::cmp::Ordering; @@ -469,301 +470,263 @@ impl Version { where E: sqlx::Acquire<'a, Database = sqlx::Postgres>, { - let version_ids = version_ids - .iter() - .unique() - .copied() - .collect::>(); + let mut val = redis.get_cached_keys( + VERSIONS_NAMESPACE, + &version_ids.iter().map(|x| x.0).collect::>(), + |version_ids| async move { + let mut exec = exec.acquire().await?; - use futures::stream::TryStreamExt; + let loader_field_enum_value_ids = DashSet::new(); + let version_fields: DashMap> = sqlx::query!( + " + SELECT version_id, field_id, int_value, enum_value, string_value + FROM version_fields + WHERE version_id = ANY($1) + ", + &version_ids + ) + .fetch(&mut *exec) + .try_fold( + DashMap::new(), + |acc: DashMap>, m| { + let qvf = QueryVersionField { + version_id: VersionId(m.version_id), + field_id: LoaderFieldId(m.field_id), + int_value: m.int_value, + enum_value: m.enum_value.map(LoaderFieldEnumValueId), + string_value: m.string_value, + }; - if version_ids.is_empty() { - return Ok(Vec::new()); - } + if let Some(enum_value) = m.enum_value { + loader_field_enum_value_ids.insert(LoaderFieldEnumValueId(enum_value)); + } - let mut exec = exec.acquire().await?; - let mut redis = redis.connect().await?; + acc.entry(VersionId(m.version_id)).or_default().push(qvf); + async move { Ok(acc) } + }, + ) + .await?; - let mut version_ids_parsed: Vec = version_ids.iter().map(|x| x.0).collect(); + #[derive(Default)] + struct VersionLoaderData { + loaders: Vec, + project_types: Vec, + games: Vec, + loader_loader_field_ids: Vec, + } - let mut found_versions = Vec::new(); + let loader_field_ids = DashSet::new(); + let loaders_ptypes_games: DashMap = sqlx::query!( + " + SELECT DISTINCT version_id, + ARRAY_AGG(DISTINCT l.loader) filter (where l.loader is not null) loaders, + ARRAY_AGG(DISTINCT pt.name) filter (where pt.name is not null) project_types, + ARRAY_AGG(DISTINCT g.slug) filter (where g.slug is not null) games, + ARRAY_AGG(DISTINCT lfl.loader_field_id) filter (where lfl.loader_field_id is not null) loader_fields + FROM versions v + INNER JOIN loaders_versions lv ON v.id = lv.version_id + INNER JOIN loaders l ON lv.loader_id = l.id + INNER JOIN loaders_project_types lpt ON lpt.joining_loader_id = l.id + INNER JOIN project_types pt ON pt.id = lpt.joining_project_type_id + INNER JOIN loaders_project_types_games lptg ON lptg.loader_id = l.id AND lptg.project_type_id = pt.id + INNER JOIN games g ON lptg.game_id = g.id + LEFT JOIN loader_fields_loaders lfl ON lfl.loader_id = l.id + WHERE v.id = ANY($1) + GROUP BY version_id + ", + &version_ids + ).fetch(&mut *exec) + .map_ok(|m| { + let version_id = VersionId(m.version_id); - let versions = redis - .multi_get::( - VERSIONS_NAMESPACE, - version_ids_parsed - .clone() - .iter() - .map(|x| x.to_string()) - .collect::>(), - ) - .await?; + // Add loader fields to the set we need to fetch + let loader_loader_field_ids = m.loader_fields.unwrap_or_default().into_iter().map(LoaderFieldId).collect::>(); + for loader_field_id in loader_loader_field_ids.iter() { + loader_field_ids.insert(*loader_field_id); + } - for version in versions { - if let Some(version) = - version.and_then(|x| serde_json::from_str::(&x).ok()) - { - version_ids_parsed.retain(|x| &version.inner.id.0 != x); - found_versions.push(version); - continue; - } - } + // Add loader + loader associated data to the map + let version_loader_data = VersionLoaderData { + loaders: m.loaders.unwrap_or_default(), + project_types: m.project_types.unwrap_or_default(), + games: m.games.unwrap_or_default(), + loader_loader_field_ids, + }; + (version_id,version_loader_data) - if !version_ids_parsed.is_empty() { - let loader_field_enum_value_ids = DashSet::new(); - let version_fields: DashMap> = sqlx::query!( - " - SELECT version_id, field_id, int_value, enum_value, string_value - FROM version_fields - WHERE version_id = ANY($1) - ", - &version_ids_parsed - ) - .fetch(&mut *exec) - .try_fold( - DashMap::new(), - |acc: DashMap>, m| { - let qvf = QueryVersionField { - version_id: VersionId(m.version_id), - field_id: LoaderFieldId(m.field_id), - int_value: m.int_value, - enum_value: m.enum_value.map(LoaderFieldEnumValueId), - string_value: m.string_value, - }; - - if let Some(enum_value) = m.enum_value { - loader_field_enum_value_ids.insert(LoaderFieldEnumValueId(enum_value)); } + ).try_collect().await?; - acc.entry(VersionId(m.version_id)).or_default().push(qvf); - async move { Ok(acc) } - }, - ) - .await?; + // Fetch all loader fields from any version + let loader_fields: Vec = sqlx::query!( + " + SELECT DISTINCT id, field, field_type, enum_type, min_val, max_val, optional + FROM loader_fields lf + WHERE id = ANY($1) + ", + &loader_field_ids.iter().map(|x| x.0).collect::>() + ) + .fetch(&mut *exec) + .map_ok(|m| QueryLoaderField { + id: LoaderFieldId(m.id), + field: m.field, + field_type: m.field_type, + enum_type: m.enum_type.map(LoaderFieldEnumId), + min_val: m.min_val, + max_val: m.max_val, + optional: m.optional, + }) + .try_collect() + .await?; - #[derive(Default)] - struct VersionLoaderData { - loaders: Vec, - project_types: Vec, - games: Vec, - loader_loader_field_ids: Vec, - } + let loader_field_enum_values: Vec = sqlx::query!( + " + SELECT DISTINCT id, enum_id, value, ordering, created, metadata + FROM loader_field_enum_values lfev + WHERE id = ANY($1) + ORDER BY enum_id, ordering, created ASC + ", + &loader_field_enum_value_ids + .iter() + .map(|x| x.0) + .collect::>() + ) + .fetch(&mut *exec) + .map_ok(|m| QueryLoaderFieldEnumValue { + id: LoaderFieldEnumValueId(m.id), + enum_id: LoaderFieldEnumId(m.enum_id), + value: m.value, + ordering: m.ordering, + created: m.created, + metadata: m.metadata, + }) + .try_collect() + .await?; - let loader_field_ids = DashSet::new(); - let loaders_ptypes_games: DashMap = sqlx::query!( - " - SELECT DISTINCT version_id, - ARRAY_AGG(DISTINCT l.loader) filter (where l.loader is not null) loaders, - ARRAY_AGG(DISTINCT pt.name) filter (where pt.name is not null) project_types, - ARRAY_AGG(DISTINCT g.slug) filter (where g.slug is not null) games, - ARRAY_AGG(DISTINCT lfl.loader_field_id) filter (where lfl.loader_field_id is not null) loader_fields - FROM versions v - INNER JOIN loaders_versions lv ON v.id = lv.version_id - INNER JOIN loaders l ON lv.loader_id = l.id - INNER JOIN loaders_project_types lpt ON lpt.joining_loader_id = l.id - INNER JOIN project_types pt ON pt.id = lpt.joining_project_type_id - INNER JOIN loaders_project_types_games lptg ON lptg.loader_id = l.id AND lptg.project_type_id = pt.id - INNER JOIN games g ON lptg.game_id = g.id - LEFT JOIN loader_fields_loaders lfl ON lfl.loader_id = l.id - WHERE v.id = ANY($1) - GROUP BY version_id - ", - &version_ids_parsed - ).fetch(&mut *exec) - .map_ok(|m| { - let version_id = VersionId(m.version_id); - - // Add loader fields to the set we need to fetch - let loader_loader_field_ids = m.loader_fields.unwrap_or_default().into_iter().map(LoaderFieldId).collect::>(); - for loader_field_id in loader_loader_field_ids.iter() { - loader_field_ids.insert(*loader_field_id); + #[derive(Deserialize)] + struct Hash { + pub file_id: FileId, + pub algorithm: String, + pub hash: String, } - // Add loader + loader associated data to the map - let version_loader_data = VersionLoaderData { - loaders: m.loaders.unwrap_or_default(), - project_types: m.project_types.unwrap_or_default(), - games: m.games.unwrap_or_default(), - loader_loader_field_ids, - }; - (version_id,version_loader_data) - + #[derive(Deserialize)] + struct File { + pub id: FileId, + pub url: String, + pub filename: String, + pub primary: bool, + pub size: u32, + pub file_type: Option, } - ).try_collect().await?; - // Fetch all loader fields from any version - let loader_fields: Vec = sqlx::query!( - " - SELECT DISTINCT id, field, field_type, enum_type, min_val, max_val, optional - FROM loader_fields lf - WHERE id = ANY($1) - ", - &loader_field_ids.iter().map(|x| x.0).collect::>() - ) - .fetch(&mut *exec) - .map_ok(|m| QueryLoaderField { - id: LoaderFieldId(m.id), - field: m.field, - field_type: m.field_type, - enum_type: m.enum_type.map(LoaderFieldEnumId), - min_val: m.min_val, - max_val: m.max_val, - optional: m.optional, - }) - .try_collect() - .await?; + let file_ids = DashSet::new(); + let reverse_file_map = DashMap::new(); + let files : DashMap> = sqlx::query!( + " + SELECT DISTINCT version_id, f.id, f.url, f.filename, f.is_primary, f.size, f.file_type + FROM files f + WHERE f.version_id = ANY($1) + ", + &version_ids + ).fetch(&mut *exec) + .try_fold(DashMap::new(), |acc : DashMap>, m| { + let file = File { + id: FileId(m.id), + url: m.url, + filename: m.filename, + primary: m.is_primary, + size: m.size as u32, + file_type: m.file_type.map(|x| FileType::from_string(&x)), + }; - let loader_field_enum_values: Vec = sqlx::query!( - " - SELECT DISTINCT id, enum_id, value, ordering, created, metadata - FROM loader_field_enum_values lfev - WHERE id = ANY($1) - ORDER BY enum_id, ordering, created ASC - ", - &loader_field_enum_value_ids - .iter() - .map(|x| x.0) - .collect::>() - ) - .fetch(&mut *exec) - .map_ok(|m| QueryLoaderFieldEnumValue { - id: LoaderFieldEnumValueId(m.id), - enum_id: LoaderFieldEnumId(m.enum_id), - value: m.value, - ordering: m.ordering, - created: m.created, - metadata: m.metadata, - }) - .try_collect() - .await?; + file_ids.insert(FileId(m.id)); + reverse_file_map.insert(FileId(m.id), VersionId(m.version_id)); - #[derive(Deserialize)] - struct Hash { - pub file_id: FileId, - pub algorithm: String, - pub hash: String, - } - - #[derive(Deserialize)] - struct File { - pub id: FileId, - pub url: String, - pub filename: String, - pub primary: bool, - pub size: u32, - pub file_type: Option, - } - - let file_ids = DashSet::new(); - let reverse_file_map = DashMap::new(); - let files : DashMap> = sqlx::query!( - " - SELECT DISTINCT version_id, f.id, f.url, f.filename, f.is_primary, f.size, f.file_type - FROM files f - WHERE f.version_id = ANY($1) - ", - &version_ids_parsed - ).fetch(&mut *exec) - .try_fold(DashMap::new(), |acc : DashMap>, m| { - let file = File { - id: FileId(m.id), - url: m.url, - filename: m.filename, - primary: m.is_primary, - size: m.size as u32, - file_type: m.file_type.map(|x| FileType::from_string(&x)), - }; - - file_ids.insert(FileId(m.id)); - reverse_file_map.insert(FileId(m.id), VersionId(m.version_id)); - - acc.entry(VersionId(m.version_id)) - .or_default() - .push(file); - async move { Ok(acc) } - } - ).await?; - - let hashes: DashMap> = sqlx::query!( - " - SELECT DISTINCT file_id, algorithm, encode(hash, 'escape') hash - FROM hashes - WHERE file_id = ANY($1) - ", - &file_ids.iter().map(|x| x.0).collect::>() - ) - .fetch(&mut *exec) - .try_fold(DashMap::new(), |acc: DashMap>, m| { - if let Some(found_hash) = m.hash { - let hash = Hash { - file_id: FileId(m.file_id), - algorithm: m.algorithm, - hash: found_hash, - }; - - if let Some(version_id) = reverse_file_map.get(&FileId(m.file_id)) { - acc.entry(*version_id).or_default().push(hash); + acc.entry(VersionId(m.version_id)) + .or_default() + .push(file); + async move { Ok(acc) } } - } - async move { Ok(acc) } - }) - .await?; + ).await?; - let dependencies : DashMap> = sqlx::query!( - " - SELECT DISTINCT dependent_id as version_id, d.mod_dependency_id as dependency_project_id, d.dependency_id as dependency_version_id, d.dependency_file_name as file_name, d.dependency_type as dependency_type - FROM dependencies d - WHERE dependent_id = ANY($1) - ", - &version_ids_parsed - ).fetch(&mut *exec) - .try_fold(DashMap::new(), |acc : DashMap<_,Vec>, m| { - let dependency = QueryDependency { - project_id: m.dependency_project_id.map(ProjectId), - version_id: m.dependency_version_id.map(VersionId), - file_name: m.file_name, - dependency_type: m.dependency_type, - }; + let hashes: DashMap> = sqlx::query!( + " + SELECT DISTINCT file_id, algorithm, encode(hash, 'escape') hash + FROM hashes + WHERE file_id = ANY($1) + ", + &file_ids.iter().map(|x| x.0).collect::>() + ) + .fetch(&mut *exec) + .try_fold(DashMap::new(), |acc: DashMap>, m| { + if let Some(found_hash) = m.hash { + let hash = Hash { + file_id: FileId(m.file_id), + algorithm: m.algorithm, + hash: found_hash, + }; - acc.entry(VersionId(m.version_id)) - .or_default() - .push(dependency); - async move { Ok(acc) } - } - ).await?; + if let Some(version_id) = reverse_file_map.get(&FileId(m.file_id)) { + acc.entry(*version_id).or_default().push(hash); + } + } + async move { Ok(acc) } + }) + .await?; - let db_versions: Vec = sqlx::query!( - " - SELECT v.id id, v.mod_id mod_id, v.author_id author_id, v.name version_name, v.version_number version_number, - v.changelog changelog, v.date_published date_published, v.downloads downloads, - v.version_type version_type, v.featured featured, v.status status, v.requested_status requested_status, v.ordering ordering - FROM versions v - WHERE v.id = ANY($1) - ORDER BY v.ordering ASC NULLS LAST, v.date_published ASC; - ", - &version_ids_parsed - ) - .fetch_many(&mut *exec) - .try_filter_map(|e| async { - Ok(e.right().map(|v| - { + let dependencies : DashMap> = sqlx::query!( + " + SELECT DISTINCT dependent_id as version_id, d.mod_dependency_id as dependency_project_id, d.dependency_id as dependency_version_id, d.dependency_file_name as file_name, d.dependency_type as dependency_type + FROM dependencies d + WHERE dependent_id = ANY($1) + ", + &version_ids + ).fetch(&mut *exec) + .try_fold(DashMap::new(), |acc : DashMap<_,Vec>, m| { + let dependency = QueryDependency { + project_id: m.dependency_project_id.map(ProjectId), + version_id: m.dependency_version_id.map(VersionId), + file_name: m.file_name, + dependency_type: m.dependency_type, + }; + + acc.entry(VersionId(m.version_id)) + .or_default() + .push(dependency); + async move { Ok(acc) } + } + ).await?; + + let res = sqlx::query!( + " + SELECT v.id id, v.mod_id mod_id, v.author_id author_id, v.name version_name, v.version_number version_number, + v.changelog changelog, v.date_published date_published, v.downloads downloads, + v.version_type version_type, v.featured featured, v.status status, v.requested_status requested_status, v.ordering ordering + FROM versions v + WHERE v.id = ANY($1); + ", + &version_ids + ) + .fetch(&mut *exec) + .try_fold(DashMap::new(), |acc, v| { let version_id = VersionId(v.id); let VersionLoaderData { loaders, project_types, games, loader_loader_field_ids, - } = loaders_ptypes_games.remove(&version_id).map(|x|x.1).unwrap_or_default(); + } = loaders_ptypes_games.remove(&version_id).map(|x|x.1).unwrap_or_default(); let files = files.remove(&version_id).map(|x|x.1).unwrap_or_default(); let hashes = hashes.remove(&version_id).map(|x|x.1).unwrap_or_default(); let version_fields = version_fields.remove(&version_id).map(|x|x.1).unwrap_or_default(); let dependencies = dependencies.remove(&version_id).map(|x|x.1).unwrap_or_default(); let loader_fields = loader_fields.iter() - .filter(|x| loader_loader_field_ids.contains(&x.id)) - .collect::>(); + .filter(|x| loader_loader_field_ids.contains(&x.id)) + .collect::>(); - QueryVersion { + let query_version = QueryVersion { inner: Version { id: VersionId(v.id), project_id: ProjectId(v.mod_id), @@ -821,22 +784,20 @@ impl Version { project_types, games, dependencies, - } - })) - }) - .try_collect::>() - .await?; + }; - for version in db_versions { - redis - .set_serialized_to_json(VERSIONS_NAMESPACE, version.inner.id.0, &version, None) + acc.insert(v.id, query_version); + async move { Ok(acc) } + }) .await?; - found_versions.push(version); - } - } + Ok(res) + }, + ).await?; - Ok(found_versions) + val.sort(); + + Ok(val) } pub async fn get_file_from_hash<'a, 'b, E>( @@ -866,110 +827,66 @@ impl Version { where E: sqlx::Executor<'a, Database = sqlx::Postgres> + Copy, { - use futures::stream::TryStreamExt; - - let mut redis = redis.connect().await?; - - if hashes.is_empty() { - return Ok(Vec::new()); - } - - let mut file_ids_parsed = hashes.to_vec(); - - let mut found_files = Vec::new(); - - let files = redis - .multi_get::( - VERSION_FILES_NAMESPACE, - file_ids_parsed - .iter() - .map(|hash| format!("{}_{}", algorithm, hash)) - .collect::>(), - ) - .await?; - for file in files { - if let Some(mut file) = - file.and_then(|x| serde_json::from_str::>(&x).ok()) - { - file_ids_parsed.retain(|x| { - !file - .iter() - .any(|y| y.hashes.iter().any(|z| z.0 == &algorithm && z.1 == x)) - }); - found_files.append(&mut file); - continue; - } - } - - if !file_ids_parsed.is_empty() { - let db_files: Vec = sqlx::query!( - " - SELECT f.id, f.version_id, v.mod_id, f.url, f.filename, f.is_primary, f.size, f.file_type, - JSONB_AGG(DISTINCT jsonb_build_object('algorithm', h.algorithm, 'hash', encode(h.hash, 'escape'))) filter (where h.hash is not null) hashes - FROM files f - INNER JOIN versions v on v.id = f.version_id - INNER JOIN hashes h on h.file_id = f.id - WHERE h.algorithm = $1 AND h.hash = ANY($2) - GROUP BY f.id, v.mod_id, v.date_published - ORDER BY v.date_published - ", - algorithm, - &file_ids_parsed.into_iter().map(|x| x.as_bytes().to_vec()).collect::>(), - ) - .fetch_many(executor) - .try_filter_map(|e| async { - Ok(e.right().map(|f| { + let val = redis.get_cached_keys( + VERSION_FILES_NAMESPACE, + &hashes.iter().map(|x| format!("{algorithm}_{x}")).collect::>(), + |file_ids| async move { + let files = sqlx::query!( + " + SELECT f.id, f.version_id, v.mod_id, f.url, f.filename, f.is_primary, f.size, f.file_type, + JSONB_AGG(DISTINCT jsonb_build_object('algorithm', h.algorithm, 'hash', encode(h.hash, 'escape'))) filter (where h.hash is not null) hashes + FROM files f + INNER JOIN versions v on v.id = f.version_id + INNER JOIN hashes h on h.file_id = f.id + WHERE h.algorithm = $1 AND h.hash = ANY($2) + GROUP BY f.id, v.mod_id, v.date_published + ORDER BY v.date_published + ", + algorithm, + &file_ids.into_iter().flat_map(|x| x.split('_').last().map(|x| x.as_bytes().to_vec())).collect::>(), + ) + .fetch(executor) + .try_fold(DashMap::new(), |acc, f| { #[derive(Deserialize)] struct Hash { pub algorithm: String, pub hash: String, } - SingleFile { - id: FileId(f.id), - version_id: VersionId(f.version_id), - project_id: ProjectId(f.mod_id), - url: f.url, - filename: f.filename, - hashes: serde_json::from_value::>( - f.hashes.unwrap_or_default(), - ) - .ok() - .unwrap_or_default().into_iter().map(|x| (x.algorithm, x.hash)).collect(), - primary: f.is_primary, - size: f.size as u32, - file_type: f.file_type.map(|x| FileType::from_string(&x)), + let hashes = serde_json::from_value::>( + f.hashes.unwrap_or_default(), + ) + .ok() + .unwrap_or_default().into_iter().map(|x| (x.algorithm, x.hash)) + .collect::>(); + + if let Some(hash) = hashes.get(&algorithm) { + let key = format!("{algorithm}_{hash}"); + + let file = SingleFile { + id: FileId(f.id), + version_id: VersionId(f.version_id), + project_id: ProjectId(f.mod_id), + url: f.url, + filename: f.filename, + hashes, + primary: f.is_primary, + size: f.size as u32, + file_type: f.file_type.map(|x| FileType::from_string(&x)), + }; + + acc.insert(key, file); } - } - )) - }) - .try_collect::>() - .await?; - let mut save_files: HashMap> = HashMap::new(); - - for file in db_files { - for (algo, hash) in &file.hashes { - let key = format!("{}_{}", algo, hash); - - if let Some(files) = save_files.get_mut(&key) { - files.push(file.clone()); - } else { - save_files.insert(key, vec![file.clone()]); - } - } - } - - for (key, mut files) in save_files { - redis - .set_serialized_to_json(VERSION_FILES_NAMESPACE, key, &files, None) + async move { Ok(acc) } + }) .await?; - found_files.append(&mut files); + Ok(files) } - } + ).await?; - Ok(found_files) + Ok(val) } pub async fn clear_cache( diff --git a/src/database/redis.rs b/src/database/redis.rs index c80450cde..e63a37bc2 100644 --- a/src/database/redis.rs +++ b/src/database/redis.rs @@ -1,10 +1,20 @@ use super::models::DatabaseError; +use crate::models::ids::base62_impl::{parse_base62, to_base62}; +use chrono::{TimeZone, Utc}; +use dashmap::DashMap; use deadpool_redis::{Config, Runtime}; -use itertools::Itertools; -use redis::{cmd, Cmd, FromRedisValue}; -use std::fmt::Display; +use redis::{cmd, Cmd, ExistenceCheck, SetExpiry, SetOptions}; +use serde::de::DeserializeOwned; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use std::fmt::{Debug, Display}; +use std::future::Future; +use std::hash::Hash; +use std::pin::Pin; +use std::time::Duration; -const DEFAULT_EXPIRY: i64 = 1800; // 30 minutes +const DEFAULT_EXPIRY: i64 = 60 * 60 * 12; // 12 hours +const ACTUAL_EXPIRY: i64 = 60 * 30; // 30 minutes #[derive(Clone)] pub struct RedisPool { @@ -47,6 +57,364 @@ impl RedisPool { meta_namespace: self.meta_namespace.clone(), }) } + + pub async fn get_cached_keys( + &self, + namespace: &str, + keys: &[K], + closure: F, + ) -> Result, DatabaseError> + where + F: FnOnce(Vec) -> Fut, + Fut: Future, DatabaseError>>, + T: Serialize + DeserializeOwned, + K: Display + Hash + Eq + PartialEq + Clone + DeserializeOwned + Serialize + Debug, + { + Ok(self + .get_cached_keys_raw(namespace, keys, closure) + .await? + .into_iter() + .map(|x| x.1) + .collect()) + } + + pub async fn get_cached_keys_raw( + &self, + namespace: &str, + keys: &[K], + closure: F, + ) -> Result, DatabaseError> + where + F: FnOnce(Vec) -> Fut, + Fut: Future, DatabaseError>>, + T: Serialize + DeserializeOwned, + K: Display + Hash + Eq + PartialEq + Clone + DeserializeOwned + Serialize + Debug, + { + self.get_cached_keys_raw_with_slug(namespace, None, false, keys, |ids| async move { + Ok(closure(ids) + .await? + .into_iter() + .map(|(key, val)| (key, (None::, val))) + .collect()) + }) + .await + } + + pub async fn get_cached_keys_with_slug( + &self, + namespace: &str, + slug_namespace: &str, + case_sensitive: bool, + keys: &[I], + closure: F, + ) -> Result, DatabaseError> + where + F: FnOnce(Vec) -> Fut, + Fut: Future, T)>, DatabaseError>>, + T: Serialize + DeserializeOwned, + I: Display + Hash + Eq + PartialEq + Clone + Debug, + K: Display + Hash + Eq + PartialEq + Clone + DeserializeOwned + Serialize, + S: Display + Clone + DeserializeOwned + Serialize + Debug, + { + Ok(self + .get_cached_keys_raw_with_slug( + namespace, + Some(slug_namespace), + case_sensitive, + keys, + closure, + ) + .await? + .into_iter() + .map(|x| x.1) + .collect()) + } + + pub async fn get_cached_keys_raw_with_slug( + &self, + namespace: &str, + slug_namespace: Option<&str>, + case_sensitive: bool, + keys: &[I], + closure: F, + ) -> Result, DatabaseError> + where + F: FnOnce(Vec) -> Fut, + Fut: Future, T)>, DatabaseError>>, + T: Serialize + DeserializeOwned, + I: Display + Hash + Eq + PartialEq + Clone + Debug, + K: Display + Hash + Eq + PartialEq + Clone + DeserializeOwned + Serialize, + S: Display + Clone + DeserializeOwned + Serialize + Debug, + { + let connection = self.connect().await?.connection; + + let ids = keys + .iter() + .map(|x| (x.to_string(), x.clone())) + .collect::>(); + + if ids.is_empty() { + return Ok(HashMap::new()); + } + + let get_cached_values = + |ids: DashMap, mut connection: deadpool_redis::Connection| async move { + let slug_ids = if let Some(slug_namespace) = slug_namespace { + cmd("MGET") + .arg( + ids.iter() + .map(|x| { + format!( + "{}_{slug_namespace}:{}", + self.meta_namespace, + if case_sensitive { + x.value().to_string() + } else { + x.value().to_string().to_lowercase() + } + ) + }) + .collect::>(), + ) + .query_async::<_, Vec>>(&mut connection) + .await? + .into_iter() + .flatten() + .collect::>() + } else { + Vec::new() + }; + + let cached_values = cmd("MGET") + .arg( + ids.iter() + .map(|x| x.value().to_string()) + .chain(ids.iter().filter_map(|x| { + parse_base62(&x.value().to_string()) + .ok() + .map(|x| x.to_string()) + })) + .chain(slug_ids) + .map(|x| format!("{}_{namespace}:{x}", self.meta_namespace)) + .collect::>(), + ) + .query_async::<_, Vec>>(&mut connection) + .await? + .into_iter() + .filter_map(|x| { + x.and_then(|val| serde_json::from_str::>(&val).ok()) + .map(|val| (val.key.clone(), val)) + }) + .collect::>(); + + Ok::<_, DatabaseError>((cached_values, connection, ids)) + }; + + let current_time = Utc::now(); + let mut expired_values = HashMap::new(); + + let (cached_values_raw, mut connection, ids) = get_cached_values(ids, connection).await?; + let mut cached_values = cached_values_raw + .into_iter() + .filter_map(|(key, val)| { + if Utc.timestamp(val.iat + ACTUAL_EXPIRY, 0) < current_time { + expired_values.insert(val.key.to_string(), val); + + None + } else { + let key_str = val.key.to_string(); + ids.remove(&key_str); + + if let Ok(value) = key_str.parse::() { + let base62 = to_base62(value); + ids.remove(&base62); + } + + if let Some(ref alias) = val.alias { + ids.remove(&alias.to_string()); + } + + Some((key, val)) + } + }) + .collect::>(); + + let subscribe_ids = DashMap::new(); + + if !ids.is_empty() { + let mut pipe = redis::pipe(); + + let fetch_ids = ids.iter().map(|x| x.key().clone()).collect::>(); + + fetch_ids.iter().for_each(|key| { + pipe.atomic().set_options( + format!("{}_{namespace}:{}/lock", self.meta_namespace, key), + 100, + SetOptions::default() + .get(true) + .conditional_set(ExistenceCheck::NX) + .with_expiration(SetExpiry::EX(60)), + ); + }); + let results = pipe + .query_async::<_, Vec>>(&mut connection) + .await?; + + for (idx, key) in fetch_ids.into_iter().enumerate() { + if let Some(locked) = results.get(idx) { + if locked.is_none() { + continue; + } + } + + if let Some((key, raw_key)) = ids.remove(&key) { + if let Some(val) = expired_values.remove(&key) { + if let Some(ref alias) = val.alias { + ids.remove(&alias.to_string()); + } + + if let Ok(value) = val.key.to_string().parse::() { + let base62 = to_base62(value); + ids.remove(&base62); + } + + cached_values.insert(val.key.clone(), val); + } else { + subscribe_ids.insert(key, raw_key); + } + } + } + } + + #[allow(clippy::type_complexity)] + let mut fetch_tasks: Vec< + Pin>, DatabaseError>>>>, + > = Vec::new(); + + if !ids.is_empty() { + fetch_tasks.push(Box::pin(async { + let fetch_ids = ids.iter().map(|x| x.value().clone()).collect::>(); + + let vals = closure(fetch_ids).await?; + let mut return_values = HashMap::new(); + + let mut pipe = redis::pipe(); + if !vals.is_empty() { + for (key, (slug, value)) in vals { + let value = RedisValue { + key: key.clone(), + iat: Utc::now().timestamp(), + val: value, + alias: slug.clone(), + }; + + pipe.atomic().set_ex( + format!("{}_{namespace}:{key}", self.meta_namespace), + serde_json::to_string(&value)?, + DEFAULT_EXPIRY as u64, + ); + + if let Some(slug) = slug { + ids.remove(&slug.to_string()); + + if let Some(slug_namespace) = slug_namespace { + let actual_slug = if case_sensitive { + slug.to_string() + } else { + slug.to_string().to_lowercase() + }; + + pipe.atomic().set_ex( + format!( + "{}_{slug_namespace}:{}", + self.meta_namespace, actual_slug + ), + key.to_string(), + DEFAULT_EXPIRY as u64, + ); + + pipe.atomic().del(format!( + "{}_{namespace}:{}/lock", + self.meta_namespace, actual_slug + )); + } + } + + let key_str = key.to_string(); + ids.remove(&key_str); + + if let Ok(value) = key_str.parse::() { + let base62 = to_base62(value); + ids.remove(&base62); + + pipe.atomic() + .del(format!("{}_{namespace}:{base62}/lock", self.meta_namespace)); + } + + pipe.atomic() + .del(format!("{}_{namespace}:{key}/lock", self.meta_namespace)); + + return_values.insert(key, value); + } + } + + for (key, _) in ids { + pipe.atomic() + .del(format!("{}_{namespace}:{key}/lock", self.meta_namespace)); + } + + pipe.query_async(&mut connection).await?; + + Ok(return_values) + })); + } + + if !subscribe_ids.is_empty() { + fetch_tasks.push(Box::pin(async { + let mut connection = self.pool.get().await?; + + let mut interval = tokio::time::interval(Duration::from_millis(100)); + let start = Utc::now(); + loop { + let results = cmd("MGET") + .arg( + subscribe_ids + .iter() + .map(|x| { + format!("{}_{namespace}:{}/lock", self.meta_namespace, x.key()) + }) + .collect::>(), + ) + .query_async::<_, Vec>>(&mut connection) + .await?; + + if results.into_iter().all(|x| x.is_none()) { + break; + } + + if (Utc::now() - start) > chrono::Duration::seconds(5) { + return Err(DatabaseError::CacheTimeout); + } + + interval.tick().await; + } + + let (return_values, _, _) = get_cached_values(subscribe_ids, connection).await?; + + Ok(return_values) + })); + } + + if !fetch_tasks.is_empty() { + for map in futures::future::try_join_all(fetch_tasks).await? { + for (key, value) in map { + cached_values.insert(key, value); + } + } + } + + Ok(cached_values.into_iter().map(|x| (x.0, x.1.val)).collect()) + } } impl RedisConnection { @@ -120,26 +488,6 @@ impl RedisConnection { .and_then(|x| serde_json::from_str(&x).ok())) } - pub async fn multi_get( - &mut self, - namespace: &str, - ids: impl IntoIterator, - ) -> Result>, DatabaseError> - where - R: FromRedisValue, - { - let mut cmd = cmd("MGET"); - - let ids = ids.into_iter().map(|x| x.to_string()).collect_vec(); - redis_args( - &mut cmd, - &ids.into_iter() - .map(|x| format!("{}_{}:{}", self.meta_namespace, namespace, x)) - .collect_vec(), - ); - Ok(redis_execute(&mut cmd, &mut self.connection).await?) - } - pub async fn delete(&mut self, namespace: &str, id: T1) -> Result<(), DatabaseError> where T1: Display, @@ -177,6 +525,15 @@ impl RedisConnection { } } +#[derive(Serialize, Deserialize)] +pub struct RedisValue { + key: K, + #[serde(skip_serializing_if = "Option::is_none")] + alias: Option, + iat: i64, + val: T, +} + pub fn redis_args(cmd: &mut Cmd, args: &[String]) { for arg in args { cmd.arg(arg); diff --git a/src/models/v3/organizations.rs b/src/models/v3/organizations.rs index 11a0f72d8..f2817e36d 100644 --- a/src/models/v3/organizations.rs +++ b/src/models/v3/organizations.rs @@ -5,7 +5,7 @@ use super::{ use serde::{Deserialize, Serialize}; /// The ID of a team -#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Hash, Debug)] #[serde(from = "Base62Id")] #[serde(into = "Base62Id")] pub struct OrganizationId(pub u64); diff --git a/src/models/v3/pats.rs b/src/models/v3/pats.rs index d4ef6e285..4de7e7c87 100644 --- a/src/models/v3/pats.rs +++ b/src/models/v3/pats.rs @@ -5,7 +5,7 @@ use chrono::{DateTime, Utc}; use serde::{Deserialize, Serialize}; /// The ID of a team -#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Hash, Debug)] #[serde(from = "Base62Id")] #[serde(into = "Base62Id")] pub struct PatId(pub u64); diff --git a/src/models/v3/sessions.rs b/src/models/v3/sessions.rs index 9cfb6d506..46a8a69ac 100644 --- a/src/models/v3/sessions.rs +++ b/src/models/v3/sessions.rs @@ -3,7 +3,7 @@ use crate::models::users::UserId; use chrono::{DateTime, Utc}; use serde::{Deserialize, Serialize}; -#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Hash, Debug)] #[serde(from = "Base62Id")] #[serde(into = "Base62Id")] pub struct SessionId(pub u64); diff --git a/src/queue/analytics.rs b/src/queue/analytics.rs index ecb300a25..f1fd91215 100644 --- a/src/queue/analytics.rs +++ b/src/queue/analytics.rs @@ -117,8 +117,6 @@ impl AnalyticsQueue { let new_count = if let Some((views, monetized)) = raw_views.get_mut(idx) { if let Some(count) = count { - println!("len: {} count: {}", views.len(), count); - if count > 3 { *monetized = false; continue; diff --git a/tests/pats.rs b/tests/pats.rs index d9ab3226b..07b130f9e 100644 --- a/tests/pats.rs +++ b/tests/pats.rs @@ -115,6 +115,8 @@ pub async fn pat_full_test() { "expires": Utc::now() + Duration::days(1), // no longer expired! })) .to_request(); + + println!("PAT ID FOR TEST: {}", id); let resp = test_env.call(req).await; assert_status!(&resp, StatusCode::NO_CONTENT); assert_eq!(mock_pat_test(access_token).await, 200); // Works again diff --git a/tests/project.rs b/tests/project.rs index 741705650..99c68c3b5 100644 --- a/tests/project.rs +++ b/tests/project.rs @@ -69,7 +69,10 @@ async fn test_get_project() { .unwrap() .unwrap(); let cached_project: serde_json::Value = serde_json::from_str(&cached_project).unwrap(); - assert_eq!(cached_project["inner"]["slug"], json!(alpha_project_slug)); + assert_eq!( + cached_project["val"]["inner"]["slug"], + json!(alpha_project_slug) + ); // Make the request again, this time it should be cached let resp = api.get_project(alpha_project_id, USER_USER_PAT).await; diff --git a/tests/version.rs b/tests/version.rs index de5878316..f482bc359 100644 --- a/tests/version.rs +++ b/tests/version.rs @@ -55,7 +55,7 @@ async fn test_get_version() { .unwrap(); let cached_project: serde_json::Value = serde_json::from_str(&cached_project).unwrap(); assert_eq!( - cached_project["inner"]["project_id"], + cached_project["val"]["inner"]["project_id"], json!(parse_base62(alpha_project_id).unwrap()) ); @@ -617,6 +617,7 @@ async fn version_ordering_for_specified_orderings_orders_lower_order_first() { USER_USER_PAT, ) .await; + assert_common_version_ids(&versions, vec![new_version_id, alpha_version_id]); }) .await;