Initial shared instances backend (#3800)
* Create base shared instance migration and initial routes * Fix build * Add version uploads * Add permissions field for shared instance users * Actually use permissions field * Add "public" flag to shared instances that allow GETing them without authorization * Add the ability to get and list shared instance versions * Add the ability to delete shared instance versions * Fix build after merge * Secured file hosting (#3784) * Remove Backblaze-specific file-hosting backend * Added S3_USES_PATH_STYLE_BUCKETS * Remove unused file_id parameter from delete_file_version * Add support for separate public and private buckets in labrinth::file_hosting * Rename delete_file_version to delete_file * Add (untested) get_url_for_private_file * Remove url field from shared instance routes * Remove url field from shared instance routes * Use private bucket for shared instance versions * Make S3 environment variables fully separate between public and private buckets * Change file host expiry for shared instances to 180 seconds * Fix lint * Merge shared instance migrations into a single migration * Replace shared instance owners with Ghost instead of deleting the instance
This commit is contained in:
parent
d4864deac5
commit
cc34e69524
@ -85,11 +85,10 @@ During development, you might notice that changes made directly to entities in t
|
|||||||
|
|
||||||
#### CDN options
|
#### CDN options
|
||||||
|
|
||||||
`STORAGE_BACKEND`: Controls what storage backend is used. This can be either `local`, `backblaze`, or `s3`, but defaults to `local`
|
`STORAGE_BACKEND`: Controls what storage backend is used. This can be either `local` or `s3`, but defaults to `local`
|
||||||
|
|
||||||
The Backblaze and S3 configuration options are fairly self-explanatory in name, so here's simply their names:
|
The S3 configuration options are fairly self-explanatory in name, so here's simply their names:
|
||||||
`BACKBLAZE_KEY_ID`, `BACKBLAZE_KEY`, `BACKBLAZE_BUCKET_ID`
|
`S3_ACCESS_TOKEN`, `S3_SECRET`, `S3_URL`, `S3_REGION`, `S3_PUBLIC_BUCKET_NAME`, `S3_PRIVATE_BUCKET_NAME`, `S3_USES_PATH_STYLE_BUCKETS`
|
||||||
`S3_ACCESS_TOKEN`, `S3_SECRET`, `S3_URL`, `S3_REGION`, `S3_BUCKET_NAME`
|
|
||||||
|
|
||||||
#### Search, OAuth, and miscellaneous options
|
#### Search, OAuth, and miscellaneous options
|
||||||
|
|
||||||
|
|||||||
@ -28,15 +28,19 @@ CLOUDFLARE_INTEGRATION=false
|
|||||||
STORAGE_BACKEND=local
|
STORAGE_BACKEND=local
|
||||||
MOCK_FILE_PATH=/tmp/modrinth
|
MOCK_FILE_PATH=/tmp/modrinth
|
||||||
|
|
||||||
BACKBLAZE_KEY_ID=none
|
S3_PUBLIC_BUCKET_NAME=none
|
||||||
BACKBLAZE_KEY=none
|
S3_PUBLIC_USES_PATH_STYLE_BUCKET=false
|
||||||
BACKBLAZE_BUCKET_ID=none
|
S3_PUBLIC_REGION=none
|
||||||
|
S3_PUBLIC_URL=none
|
||||||
|
S3_PUBLIC_ACCESS_TOKEN=none
|
||||||
|
S3_PUBLIC_SECRET=none
|
||||||
|
|
||||||
S3_ACCESS_TOKEN=none
|
S3_PRIVATE_BUCKET_NAME=none
|
||||||
S3_SECRET=none
|
S3_PRIVATE_USES_PATH_STYLE_BUCKET=false
|
||||||
S3_URL=none
|
S3_PRIVATE_REGION=none
|
||||||
S3_REGION=none
|
S3_PRIVATE_URL=none
|
||||||
S3_BUCKET_NAME=none
|
S3_PRIVATE_ACCESS_TOKEN=none
|
||||||
|
S3_PRIVATE_SECRET=none
|
||||||
|
|
||||||
# 1 hour
|
# 1 hour
|
||||||
LOCAL_INDEX_INTERVAL=3600
|
LOCAL_INDEX_INTERVAL=3600
|
||||||
|
|||||||
16
apps/labrinth/.sqlx/query-09ebec1a568edf1959f20b33d8ba2b8edb55d93ada8f2243448865163f555d8d.json
generated
Normal file
16
apps/labrinth/.sqlx/query-09ebec1a568edf1959f20b33d8ba2b8edb55d93ada8f2243448865163f555d8d.json
generated
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
{
|
||||||
|
"db_name": "PostgreSQL",
|
||||||
|
"query": "\n INSERT INTO shared_instance_users (user_id, shared_instance_id, permissions)\n VALUES ($1, $2, $3)\n ",
|
||||||
|
"describe": {
|
||||||
|
"columns": [],
|
||||||
|
"parameters": {
|
||||||
|
"Left": [
|
||||||
|
"Int8",
|
||||||
|
"Int8",
|
||||||
|
"Int8"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"nullable": []
|
||||||
|
},
|
||||||
|
"hash": "09ebec1a568edf1959f20b33d8ba2b8edb55d93ada8f2243448865163f555d8d"
|
||||||
|
}
|
||||||
46
apps/labrinth/.sqlx/query-1ebe19b7b4f10039065967a0b1ca4bb38acc54e4ea5de020fffef7457000fa6e.json
generated
Normal file
46
apps/labrinth/.sqlx/query-1ebe19b7b4f10039065967a0b1ca4bb38acc54e4ea5de020fffef7457000fa6e.json
generated
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
{
|
||||||
|
"db_name": "PostgreSQL",
|
||||||
|
"query": "\n SELECT id, title, owner_id, public, current_version_id\n FROM shared_instances\n WHERE id = $1\n ",
|
||||||
|
"describe": {
|
||||||
|
"columns": [
|
||||||
|
{
|
||||||
|
"ordinal": 0,
|
||||||
|
"name": "id",
|
||||||
|
"type_info": "Int8"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ordinal": 1,
|
||||||
|
"name": "title",
|
||||||
|
"type_info": "Varchar"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ordinal": 2,
|
||||||
|
"name": "owner_id",
|
||||||
|
"type_info": "Int8"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ordinal": 3,
|
||||||
|
"name": "public",
|
||||||
|
"type_info": "Bool"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ordinal": 4,
|
||||||
|
"name": "current_version_id",
|
||||||
|
"type_info": "Int8"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"parameters": {
|
||||||
|
"Left": [
|
||||||
|
"Int8"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"nullable": [
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
true
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"hash": "1ebe19b7b4f10039065967a0b1ca4bb38acc54e4ea5de020fffef7457000fa6e"
|
||||||
|
}
|
||||||
46
apps/labrinth/.sqlx/query-265c4d6f33714c8a5cf3137c429e2b57e917e9507942d65f40c1b733209cabf0.json
generated
Normal file
46
apps/labrinth/.sqlx/query-265c4d6f33714c8a5cf3137c429e2b57e917e9507942d65f40c1b733209cabf0.json
generated
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
{
|
||||||
|
"db_name": "PostgreSQL",
|
||||||
|
"query": "\n SELECT id, shared_instance_id, size, sha512, created\n FROM shared_instance_versions\n WHERE shared_instance_id = $1\n ORDER BY created DESC\n ",
|
||||||
|
"describe": {
|
||||||
|
"columns": [
|
||||||
|
{
|
||||||
|
"ordinal": 0,
|
||||||
|
"name": "id",
|
||||||
|
"type_info": "Int8"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ordinal": 1,
|
||||||
|
"name": "shared_instance_id",
|
||||||
|
"type_info": "Int8"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ordinal": 2,
|
||||||
|
"name": "size",
|
||||||
|
"type_info": "Int8"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ordinal": 3,
|
||||||
|
"name": "sha512",
|
||||||
|
"type_info": "Bytea"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ordinal": 4,
|
||||||
|
"name": "created",
|
||||||
|
"type_info": "Timestamptz"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"parameters": {
|
||||||
|
"Left": [
|
||||||
|
"Int8"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"nullable": [
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
false
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"hash": "265c4d6f33714c8a5cf3137c429e2b57e917e9507942d65f40c1b733209cabf0"
|
||||||
|
}
|
||||||
14
apps/labrinth/.sqlx/query-47130ef29ce5914528e5424fe516a9158a3ea08f8720f6df5b4902cd8094d3bb.json
generated
Normal file
14
apps/labrinth/.sqlx/query-47130ef29ce5914528e5424fe516a9158a3ea08f8720f6df5b4902cd8094d3bb.json
generated
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
{
|
||||||
|
"db_name": "PostgreSQL",
|
||||||
|
"query": "\n DELETE FROM shared_instance_versions\n WHERE id = $1\n ",
|
||||||
|
"describe": {
|
||||||
|
"columns": [],
|
||||||
|
"parameters": {
|
||||||
|
"Left": [
|
||||||
|
"Int8"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"nullable": []
|
||||||
|
},
|
||||||
|
"hash": "47130ef29ce5914528e5424fe516a9158a3ea08f8720f6df5b4902cd8094d3bb"
|
||||||
|
}
|
||||||
15
apps/labrinth/.sqlx/query-47ec9f179f1c52213bd32b37621ab13ae43d180b8c86cb2a6fab0253dd4eba55.json
generated
Normal file
15
apps/labrinth/.sqlx/query-47ec9f179f1c52213bd32b37621ab13ae43d180b8c86cb2a6fab0253dd4eba55.json
generated
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
{
|
||||||
|
"db_name": "PostgreSQL",
|
||||||
|
"query": "UPDATE shared_instances SET current_version_id = $1 WHERE id = $2",
|
||||||
|
"describe": {
|
||||||
|
"columns": [],
|
||||||
|
"parameters": {
|
||||||
|
"Left": [
|
||||||
|
"Int8",
|
||||||
|
"Int8"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"nullable": []
|
||||||
|
},
|
||||||
|
"hash": "47ec9f179f1c52213bd32b37621ab13ae43d180b8c86cb2a6fab0253dd4eba55"
|
||||||
|
}
|
||||||
15
apps/labrinth/.sqlx/query-6b166d129b0ee028898620054a58fa4c3641eb2221e522bf50abad4f5e977599.json
generated
Normal file
15
apps/labrinth/.sqlx/query-6b166d129b0ee028898620054a58fa4c3641eb2221e522bf50abad4f5e977599.json
generated
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
{
|
||||||
|
"db_name": "PostgreSQL",
|
||||||
|
"query": "\n UPDATE shared_instances\n SET public = $1\n WHERE id = $2\n ",
|
||||||
|
"describe": {
|
||||||
|
"columns": [],
|
||||||
|
"parameters": {
|
||||||
|
"Left": [
|
||||||
|
"Bool",
|
||||||
|
"Int8"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"nullable": []
|
||||||
|
},
|
||||||
|
"hash": "6b166d129b0ee028898620054a58fa4c3641eb2221e522bf50abad4f5e977599"
|
||||||
|
}
|
||||||
17
apps/labrinth/.sqlx/query-6f72c853e139f23322fe6f1f02e4e07e5ae80b5dfca6dc041a03c0c7a30a5cf1.json
generated
Normal file
17
apps/labrinth/.sqlx/query-6f72c853e139f23322fe6f1f02e4e07e5ae80b5dfca6dc041a03c0c7a30a5cf1.json
generated
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
{
|
||||||
|
"db_name": "PostgreSQL",
|
||||||
|
"query": "\n INSERT INTO shared_instances (id, title, owner_id, current_version_id)\n VALUES ($1, $2, $3, $4)\n ",
|
||||||
|
"describe": {
|
||||||
|
"columns": [],
|
||||||
|
"parameters": {
|
||||||
|
"Left": [
|
||||||
|
"Int8",
|
||||||
|
"Varchar",
|
||||||
|
"Int8",
|
||||||
|
"Int8"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"nullable": []
|
||||||
|
},
|
||||||
|
"hash": "6f72c853e139f23322fe6f1f02e4e07e5ae80b5dfca6dc041a03c0c7a30a5cf1"
|
||||||
|
}
|
||||||
15
apps/labrinth/.sqlx/query-72ae0e8debd06067894a2f7bea279446dd964da4efa49c5464cebde57860f741.json
generated
Normal file
15
apps/labrinth/.sqlx/query-72ae0e8debd06067894a2f7bea279446dd964da4efa49c5464cebde57860f741.json
generated
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
{
|
||||||
|
"db_name": "PostgreSQL",
|
||||||
|
"query": "\n UPDATE shared_instances\n SET owner_id = $1\n WHERE owner_id = $2\n ",
|
||||||
|
"describe": {
|
||||||
|
"columns": [],
|
||||||
|
"parameters": {
|
||||||
|
"Left": [
|
||||||
|
"Int8",
|
||||||
|
"Int8"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"nullable": []
|
||||||
|
},
|
||||||
|
"hash": "72ae0e8debd06067894a2f7bea279446dd964da4efa49c5464cebde57860f741"
|
||||||
|
}
|
||||||
22
apps/labrinth/.sqlx/query-7c445073f61e30723416a9690aa9d227d95f2a8f2eb9852833e14c723903988b.json
generated
Normal file
22
apps/labrinth/.sqlx/query-7c445073f61e30723416a9690aa9d227d95f2a8f2eb9852833e14c723903988b.json
generated
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
{
|
||||||
|
"db_name": "PostgreSQL",
|
||||||
|
"query": "SELECT EXISTS(SELECT 1 FROM shared_instance_versions WHERE id=$1)",
|
||||||
|
"describe": {
|
||||||
|
"columns": [
|
||||||
|
{
|
||||||
|
"ordinal": 0,
|
||||||
|
"name": "exists",
|
||||||
|
"type_info": "Bool"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"parameters": {
|
||||||
|
"Left": [
|
||||||
|
"Int8"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"nullable": [
|
||||||
|
null
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"hash": "7c445073f61e30723416a9690aa9d227d95f2a8f2eb9852833e14c723903988b"
|
||||||
|
}
|
||||||
46
apps/labrinth/.sqlx/query-9c6e18cb19251e54b3b96446ab88d84842152b82c9a0032d1db587d7099b8550.json
generated
Normal file
46
apps/labrinth/.sqlx/query-9c6e18cb19251e54b3b96446ab88d84842152b82c9a0032d1db587d7099b8550.json
generated
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
{
|
||||||
|
"db_name": "PostgreSQL",
|
||||||
|
"query": "\n -- See https://github.com/launchbadge/sqlx/issues/1266 for why we need all the \"as\"\n SELECT\n id as \"id!\",\n title as \"title!\",\n public as \"public!\",\n owner_id as \"owner_id!\",\n current_version_id\n FROM shared_instances\n WHERE owner_id = $1\n UNION\n SELECT\n id as \"id!\",\n title as \"title!\",\n public as \"public!\",\n owner_id as \"owner_id!\",\n current_version_id\n FROM shared_instances\n JOIN shared_instance_users ON id = shared_instance_id\n WHERE user_id = $1\n ",
|
||||||
|
"describe": {
|
||||||
|
"columns": [
|
||||||
|
{
|
||||||
|
"ordinal": 0,
|
||||||
|
"name": "id!",
|
||||||
|
"type_info": "Int8"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ordinal": 1,
|
||||||
|
"name": "title!",
|
||||||
|
"type_info": "Varchar"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ordinal": 2,
|
||||||
|
"name": "public!",
|
||||||
|
"type_info": "Bool"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ordinal": 3,
|
||||||
|
"name": "owner_id!",
|
||||||
|
"type_info": "Int8"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ordinal": 4,
|
||||||
|
"name": "current_version_id",
|
||||||
|
"type_info": "Int8"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"parameters": {
|
||||||
|
"Left": [
|
||||||
|
"Int8"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"nullable": [
|
||||||
|
null,
|
||||||
|
null,
|
||||||
|
null,
|
||||||
|
null,
|
||||||
|
null
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"hash": "9c6e18cb19251e54b3b96446ab88d84842152b82c9a0032d1db587d7099b8550"
|
||||||
|
}
|
||||||
15
apps/labrinth/.sqlx/query-9ccaf8ea52b1b6f0880d34cdb4a9405e28c265bef6121b457c4f39cacf00683f.json
generated
Normal file
15
apps/labrinth/.sqlx/query-9ccaf8ea52b1b6f0880d34cdb4a9405e28c265bef6121b457c4f39cacf00683f.json
generated
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
{
|
||||||
|
"db_name": "PostgreSQL",
|
||||||
|
"query": "\n UPDATE shared_instances\n SET title = $1\n WHERE id = $2\n ",
|
||||||
|
"describe": {
|
||||||
|
"columns": [],
|
||||||
|
"parameters": {
|
||||||
|
"Left": [
|
||||||
|
"Varchar",
|
||||||
|
"Int8"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"nullable": []
|
||||||
|
},
|
||||||
|
"hash": "9ccaf8ea52b1b6f0880d34cdb4a9405e28c265bef6121b457c4f39cacf00683f"
|
||||||
|
}
|
||||||
34
apps/labrinth/.sqlx/query-aec58041cf5e5e68501652336581b8c709645ef29f3b5fb6e8e07fc212b36798.json
generated
Normal file
34
apps/labrinth/.sqlx/query-aec58041cf5e5e68501652336581b8c709645ef29f3b5fb6e8e07fc212b36798.json
generated
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
{
|
||||||
|
"db_name": "PostgreSQL",
|
||||||
|
"query": "\n SELECT shared_instance_id, user_id, permissions\n FROM shared_instance_users\n WHERE shared_instance_id = ANY($1)\n ",
|
||||||
|
"describe": {
|
||||||
|
"columns": [
|
||||||
|
{
|
||||||
|
"ordinal": 0,
|
||||||
|
"name": "shared_instance_id",
|
||||||
|
"type_info": "Int8"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ordinal": 1,
|
||||||
|
"name": "user_id",
|
||||||
|
"type_info": "Int8"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ordinal": 2,
|
||||||
|
"name": "permissions",
|
||||||
|
"type_info": "Int8"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"parameters": {
|
||||||
|
"Left": [
|
||||||
|
"Int8Array"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"nullable": [
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
false
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"hash": "aec58041cf5e5e68501652336581b8c709645ef29f3b5fb6e8e07fc212b36798"
|
||||||
|
}
|
||||||
46
apps/labrinth/.sqlx/query-b93253bbc35b24974d13bc8ee0447be2a18275f33f8991d910f693fbcc1ff731.json
generated
Normal file
46
apps/labrinth/.sqlx/query-b93253bbc35b24974d13bc8ee0447be2a18275f33f8991d910f693fbcc1ff731.json
generated
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
{
|
||||||
|
"db_name": "PostgreSQL",
|
||||||
|
"query": "\n SELECT id, shared_instance_id, size, sha512, created\n FROM shared_instance_versions\n WHERE id = $1\n ",
|
||||||
|
"describe": {
|
||||||
|
"columns": [
|
||||||
|
{
|
||||||
|
"ordinal": 0,
|
||||||
|
"name": "id",
|
||||||
|
"type_info": "Int8"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ordinal": 1,
|
||||||
|
"name": "shared_instance_id",
|
||||||
|
"type_info": "Int8"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ordinal": 2,
|
||||||
|
"name": "size",
|
||||||
|
"type_info": "Int8"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ordinal": 3,
|
||||||
|
"name": "sha512",
|
||||||
|
"type_info": "Bytea"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ordinal": 4,
|
||||||
|
"name": "created",
|
||||||
|
"type_info": "Timestamptz"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"parameters": {
|
||||||
|
"Left": [
|
||||||
|
"Int8"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"nullable": [
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
false
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"hash": "b93253bbc35b24974d13bc8ee0447be2a18275f33f8991d910f693fbcc1ff731"
|
||||||
|
}
|
||||||
23
apps/labrinth/.sqlx/query-c3869a595693757ccf81085d0c8eb2231578aff18c93d02ead97c3c07f0b27ea.json
generated
Normal file
23
apps/labrinth/.sqlx/query-c3869a595693757ccf81085d0c8eb2231578aff18c93d02ead97c3c07f0b27ea.json
generated
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
{
|
||||||
|
"db_name": "PostgreSQL",
|
||||||
|
"query": "\n SELECT permissions\n FROM shared_instance_users\n WHERE shared_instance_id = $1 AND user_id = $2\n ",
|
||||||
|
"describe": {
|
||||||
|
"columns": [
|
||||||
|
{
|
||||||
|
"ordinal": 0,
|
||||||
|
"name": "permissions",
|
||||||
|
"type_info": "Int8"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"parameters": {
|
||||||
|
"Left": [
|
||||||
|
"Int8",
|
||||||
|
"Int8"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"nullable": [
|
||||||
|
false
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"hash": "c3869a595693757ccf81085d0c8eb2231578aff18c93d02ead97c3c07f0b27ea"
|
||||||
|
}
|
||||||
14
apps/labrinth/.sqlx/query-cef730c02bb67b0536d35e5aaca0bd34c3893e8b55bbd126a988137ec7bf1ff9.json
generated
Normal file
14
apps/labrinth/.sqlx/query-cef730c02bb67b0536d35e5aaca0bd34c3893e8b55bbd126a988137ec7bf1ff9.json
generated
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
{
|
||||||
|
"db_name": "PostgreSQL",
|
||||||
|
"query": "\n DELETE FROM shared_instances\n WHERE id = $1\n ",
|
||||||
|
"describe": {
|
||||||
|
"columns": [],
|
||||||
|
"parameters": {
|
||||||
|
"Left": [
|
||||||
|
"Int8"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"nullable": []
|
||||||
|
},
|
||||||
|
"hash": "cef730c02bb67b0536d35e5aaca0bd34c3893e8b55bbd126a988137ec7bf1ff9"
|
||||||
|
}
|
||||||
22
apps/labrinth/.sqlx/query-d8558a8039ade3b383db4f0e095e6826f46c27ab3a21520e9e169fd1491521c4.json
generated
Normal file
22
apps/labrinth/.sqlx/query-d8558a8039ade3b383db4f0e095e6826f46c27ab3a21520e9e169fd1491521c4.json
generated
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
{
|
||||||
|
"db_name": "PostgreSQL",
|
||||||
|
"query": "SELECT EXISTS(SELECT 1 FROM shared_instances WHERE id=$1)",
|
||||||
|
"describe": {
|
||||||
|
"columns": [
|
||||||
|
{
|
||||||
|
"ordinal": 0,
|
||||||
|
"name": "exists",
|
||||||
|
"type_info": "Bool"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"parameters": {
|
||||||
|
"Left": [
|
||||||
|
"Int8"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"nullable": [
|
||||||
|
null
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"hash": "d8558a8039ade3b383db4f0e095e6826f46c27ab3a21520e9e169fd1491521c4"
|
||||||
|
}
|
||||||
18
apps/labrinth/.sqlx/query-d8a1d710f86b3df4d99c2d2ec26ec405531e4270be85087122245991ec88473e.json
generated
Normal file
18
apps/labrinth/.sqlx/query-d8a1d710f86b3df4d99c2d2ec26ec405531e4270be85087122245991ec88473e.json
generated
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
{
|
||||||
|
"db_name": "PostgreSQL",
|
||||||
|
"query": "\n INSERT INTO shared_instance_versions (id, shared_instance_id, size, sha512, created)\n VALUES ($1, $2, $3, $4, $5)\n ",
|
||||||
|
"describe": {
|
||||||
|
"columns": [],
|
||||||
|
"parameters": {
|
||||||
|
"Left": [
|
||||||
|
"Int8",
|
||||||
|
"Int8",
|
||||||
|
"Int8",
|
||||||
|
"Bytea",
|
||||||
|
"Timestamptz"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"nullable": []
|
||||||
|
},
|
||||||
|
"hash": "d8a1d710f86b3df4d99c2d2ec26ec405531e4270be85087122245991ec88473e"
|
||||||
|
}
|
||||||
14
apps/labrinth/.sqlx/query-f6388b5026e25191840d1a157a9ed48aaedab5db381f4efc389b852d9020a0e6.json
generated
Normal file
14
apps/labrinth/.sqlx/query-f6388b5026e25191840d1a157a9ed48aaedab5db381f4efc389b852d9020a0e6.json
generated
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
{
|
||||||
|
"db_name": "PostgreSQL",
|
||||||
|
"query": "\n UPDATE shared_instances\n SET current_version_id = (\n SELECT id FROM shared_instance_versions\n WHERE shared_instance_id = $1\n ORDER BY created DESC\n LIMIT 1\n )\n WHERE id = $1\n ",
|
||||||
|
"describe": {
|
||||||
|
"columns": [],
|
||||||
|
"parameters": {
|
||||||
|
"Left": [
|
||||||
|
"Int8"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"nullable": []
|
||||||
|
},
|
||||||
|
"hash": "f6388b5026e25191840d1a157a9ed48aaedab5db381f4efc389b852d9020a0e6"
|
||||||
|
}
|
||||||
43
apps/labrinth/migrations/20250519184051_shared-instances.sql
Normal file
43
apps/labrinth/migrations/20250519184051_shared-instances.sql
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
CREATE TABLE shared_instances (
|
||||||
|
id BIGINT PRIMARY KEY,
|
||||||
|
title VARCHAR(255) NOT NULL,
|
||||||
|
owner_id BIGINT NOT NULL REFERENCES users,
|
||||||
|
current_version_id BIGINT NULL,
|
||||||
|
public BOOLEAN NOT NULL DEFAULT FALSE
|
||||||
|
);
|
||||||
|
CREATE INDEX shared_instances_owner_id ON shared_instances(owner_id);
|
||||||
|
|
||||||
|
CREATE TABLE shared_instance_users (
|
||||||
|
user_id BIGINT NOT NULL REFERENCES users ON DELETE CASCADE,
|
||||||
|
shared_instance_id BIGINT NOT NULL REFERENCES shared_instances ON DELETE CASCADE,
|
||||||
|
permissions BIGINT NOT NULL DEFAULT 0,
|
||||||
|
|
||||||
|
PRIMARY KEY (user_id, shared_instance_id)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE shared_instance_invited_users (
|
||||||
|
id BIGINT PRIMARY KEY,
|
||||||
|
shared_instance_id BIGINT NOT NULL REFERENCES shared_instances ON DELETE CASCADE,
|
||||||
|
invited_user_id BIGINT NULL REFERENCES users ON DELETE CASCADE
|
||||||
|
);
|
||||||
|
CREATE INDEX shared_instance_invited_users_shared_instance_id ON shared_instance_invited_users(shared_instance_id);
|
||||||
|
CREATE INDEX shared_instance_invited_users_invited_user_id ON shared_instance_invited_users(invited_user_id);
|
||||||
|
|
||||||
|
CREATE TABLE shared_instance_invite_links (
|
||||||
|
id BIGINT PRIMARY KEY,
|
||||||
|
shared_instance_id BIGINT NOT NULL REFERENCES shared_instances ON DELETE CASCADE,
|
||||||
|
expiration timestamptz NULL,
|
||||||
|
remaining_uses BIGINT CHECK ( remaining_uses >= 0 ) NULL
|
||||||
|
);
|
||||||
|
CREATE INDEX shared_instance_invite_links_shared_instance_id ON shared_instance_invite_links(shared_instance_id);
|
||||||
|
|
||||||
|
CREATE TABLE shared_instance_versions (
|
||||||
|
id BIGINT PRIMARY KEY,
|
||||||
|
shared_instance_id BIGINT NOT NULL REFERENCES shared_instances ON DELETE CASCADE,
|
||||||
|
size BIGINT NOT NULL,
|
||||||
|
sha512 bytea NOT NULL,
|
||||||
|
created timestamptz NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||||
|
);
|
||||||
|
|
||||||
|
ALTER TABLE shared_instances
|
||||||
|
ADD FOREIGN KEY (current_version_id) REFERENCES shared_instance_versions(id) ON DELETE SET NULL;
|
||||||
@ -9,7 +9,7 @@ use ariadne::ids::DecodingError;
|
|||||||
#[error("{}", .error_type)]
|
#[error("{}", .error_type)]
|
||||||
pub struct OAuthError {
|
pub struct OAuthError {
|
||||||
#[source]
|
#[source]
|
||||||
pub error_type: OAuthErrorType,
|
pub error_type: Box<OAuthErrorType>,
|
||||||
|
|
||||||
pub state: Option<String>,
|
pub state: Option<String>,
|
||||||
pub valid_redirect_uri: Option<ValidatedRedirectUri>,
|
pub valid_redirect_uri: Option<ValidatedRedirectUri>,
|
||||||
@ -32,7 +32,7 @@ impl OAuthError {
|
|||||||
/// See: IETF RFC 6749 4.1.2.1 (https://datatracker.ietf.org/doc/html/rfc6749#section-4.1.2.1)
|
/// See: IETF RFC 6749 4.1.2.1 (https://datatracker.ietf.org/doc/html/rfc6749#section-4.1.2.1)
|
||||||
pub fn error(error_type: impl Into<OAuthErrorType>) -> Self {
|
pub fn error(error_type: impl Into<OAuthErrorType>) -> Self {
|
||||||
Self {
|
Self {
|
||||||
error_type: error_type.into(),
|
error_type: Box::new(error_type.into()),
|
||||||
valid_redirect_uri: None,
|
valid_redirect_uri: None,
|
||||||
state: None,
|
state: None,
|
||||||
}
|
}
|
||||||
@ -48,7 +48,7 @@ impl OAuthError {
|
|||||||
valid_redirect_uri: &ValidatedRedirectUri,
|
valid_redirect_uri: &ValidatedRedirectUri,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
error_type: err.into(),
|
error_type: Box::new(err.into()),
|
||||||
state: state.clone(),
|
state: state.clone(),
|
||||||
valid_redirect_uri: Some(valid_redirect_uri.clone()),
|
valid_redirect_uri: Some(valid_redirect_uri.clone()),
|
||||||
}
|
}
|
||||||
@ -57,7 +57,7 @@ impl OAuthError {
|
|||||||
|
|
||||||
impl actix_web::ResponseError for OAuthError {
|
impl actix_web::ResponseError for OAuthError {
|
||||||
fn status_code(&self) -> StatusCode {
|
fn status_code(&self) -> StatusCode {
|
||||||
match self.error_type {
|
match *self.error_type {
|
||||||
OAuthErrorType::AuthenticationError(_)
|
OAuthErrorType::AuthenticationError(_)
|
||||||
| OAuthErrorType::FailedScopeParse(_)
|
| OAuthErrorType::FailedScopeParse(_)
|
||||||
| OAuthErrorType::ScopesTooBroad
|
| OAuthErrorType::ScopesTooBroad
|
||||||
|
|||||||
@ -101,7 +101,7 @@ mod tests {
|
|||||||
);
|
);
|
||||||
|
|
||||||
assert!(validated.is_err_and(|e| matches!(
|
assert!(validated.is_err_and(|e| matches!(
|
||||||
e.error_type,
|
*e.error_type,
|
||||||
OAuthErrorType::RedirectUriNotConfigured(_)
|
OAuthErrorType::RedirectUriNotConfigured(_)
|
||||||
)));
|
)));
|
||||||
}
|
}
|
||||||
|
|||||||
@ -10,6 +10,40 @@ use actix_web::HttpRequest;
|
|||||||
use actix_web::http::header::{AUTHORIZATION, HeaderValue};
|
use actix_web::http::header::{AUTHORIZATION, HeaderValue};
|
||||||
use chrono::Utc;
|
use chrono::Utc;
|
||||||
|
|
||||||
|
pub async fn get_maybe_user_from_headers<'a, E>(
|
||||||
|
req: &HttpRequest,
|
||||||
|
executor: E,
|
||||||
|
redis: &RedisPool,
|
||||||
|
session_queue: &AuthQueue,
|
||||||
|
required_scopes: Scopes,
|
||||||
|
) -> Result<Option<(Scopes, User)>, AuthenticationError>
|
||||||
|
where
|
||||||
|
E: sqlx::Executor<'a, Database = sqlx::Postgres> + Copy,
|
||||||
|
{
|
||||||
|
if !req.headers().contains_key(AUTHORIZATION) {
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch DB user record and minos user from headers
|
||||||
|
let Some((scopes, db_user)) = get_user_record_from_bearer_token(
|
||||||
|
req,
|
||||||
|
None,
|
||||||
|
executor,
|
||||||
|
redis,
|
||||||
|
session_queue,
|
||||||
|
)
|
||||||
|
.await?
|
||||||
|
else {
|
||||||
|
return Ok(None);
|
||||||
|
};
|
||||||
|
|
||||||
|
if !scopes.contains(required_scopes) {
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Some((scopes, User::from_full(db_user))))
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn get_user_from_headers<'a, E>(
|
pub async fn get_user_from_headers<'a, E>(
|
||||||
req: &HttpRequest,
|
req: &HttpRequest,
|
||||||
executor: E,
|
executor: E,
|
||||||
|
|||||||
@ -3,8 +3,9 @@ use crate::models::ids::{
|
|||||||
ChargeId, CollectionId, FileId, ImageId, NotificationId,
|
ChargeId, CollectionId, FileId, ImageId, NotificationId,
|
||||||
OAuthAccessTokenId, OAuthClientAuthorizationId, OAuthClientId,
|
OAuthAccessTokenId, OAuthClientAuthorizationId, OAuthClientId,
|
||||||
OAuthRedirectUriId, OrganizationId, PatId, PayoutId, ProductId,
|
OAuthRedirectUriId, OrganizationId, PatId, PayoutId, ProductId,
|
||||||
ProductPriceId, ProjectId, ReportId, SessionId, TeamId, TeamMemberId,
|
ProductPriceId, ProjectId, ReportId, SessionId, SharedInstanceId,
|
||||||
ThreadId, ThreadMessageId, UserSubscriptionId, VersionId,
|
SharedInstanceVersionId, TeamId, TeamMemberId, ThreadId, ThreadMessageId,
|
||||||
|
UserSubscriptionId, VersionId,
|
||||||
};
|
};
|
||||||
use ariadne::ids::base62_impl::to_base62;
|
use ariadne::ids::base62_impl::to_base62;
|
||||||
use ariadne::ids::{UserId, random_base62_rng, random_base62_rng_range};
|
use ariadne::ids::{UserId, random_base62_rng, random_base62_rng_range};
|
||||||
@ -88,39 +89,50 @@ macro_rules! generate_bulk_ids {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
macro_rules! impl_db_id_interface {
|
||||||
|
($id_struct:ident, $db_id_struct:ident, $(, generator: $generator_function:ident @ $db_table:expr, $(bulk_generator: $bulk_generator_function:ident,)?)?) => {
|
||||||
|
#[derive(Copy, Clone, Debug, Type, Serialize, Deserialize, PartialEq, Eq, Hash)]
|
||||||
|
#[sqlx(transparent)]
|
||||||
|
pub struct $db_id_struct(pub i64);
|
||||||
|
|
||||||
|
impl From<$id_struct> for $db_id_struct {
|
||||||
|
fn from(id: $id_struct) -> Self {
|
||||||
|
Self(id.0 as i64)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<$db_id_struct> for $id_struct {
|
||||||
|
fn from(id: $db_id_struct) -> Self {
|
||||||
|
Self(id.0 as u64)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
$(
|
||||||
|
generate_ids!(
|
||||||
|
$generator_function,
|
||||||
|
$db_id_struct,
|
||||||
|
"SELECT EXISTS(SELECT 1 FROM " + $db_table + " WHERE id=$1)"
|
||||||
|
);
|
||||||
|
|
||||||
|
$(
|
||||||
|
generate_bulk_ids!(
|
||||||
|
$bulk_generator_function,
|
||||||
|
$db_id_struct,
|
||||||
|
"SELECT EXISTS(SELECT 1 FROM " + $db_table + " WHERE id = ANY($1))"
|
||||||
|
);
|
||||||
|
)?
|
||||||
|
)?
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
macro_rules! db_id_interface {
|
macro_rules! db_id_interface {
|
||||||
($id_struct:ident $(, generator: $generator_function:ident @ $db_table:expr, $(bulk_generator: $bulk_generator_function:ident,)?)?) => {
|
($id_struct:ident $(, generator: $generator_function:ident @ $db_table:expr, $(bulk_generator: $bulk_generator_function:ident,)?)?) => {
|
||||||
paste! {
|
paste! {
|
||||||
#[derive(Copy, Clone, Debug, Type, Serialize, Deserialize, PartialEq, Eq, Hash)]
|
impl_db_id_interface!(
|
||||||
#[sqlx(transparent)]
|
$id_struct,
|
||||||
pub struct [< DB $id_struct >](pub i64);
|
[< DB $id_struct >],
|
||||||
|
$(, generator: $generator_function @ $db_table, $(bulk_generator: $bulk_generator_function,)?)?
|
||||||
impl From<$id_struct> for [< DB $id_struct >] {
|
);
|
||||||
fn from(id: $id_struct) -> Self {
|
|
||||||
Self(id.0 as i64)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl From<[< DB $id_struct >]> for $id_struct {
|
|
||||||
fn from(id: [< DB $id_struct >]) -> Self {
|
|
||||||
Self(id.0 as u64)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
$(
|
|
||||||
generate_ids!(
|
|
||||||
$generator_function,
|
|
||||||
[< DB $id_struct >],
|
|
||||||
"SELECT EXISTS(SELECT 1 FROM " + $db_table + " WHERE id=$1)"
|
|
||||||
);
|
|
||||||
|
|
||||||
$(
|
|
||||||
generate_bulk_ids!(
|
|
||||||
$bulk_generator_function,
|
|
||||||
[< DB $id_struct >],
|
|
||||||
"SELECT EXISTS(SELECT 1 FROM " + $db_table + " WHERE id = ANY($1))"
|
|
||||||
);
|
|
||||||
)?
|
|
||||||
)?
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
@ -212,6 +224,14 @@ db_id_interface!(
|
|||||||
SessionId,
|
SessionId,
|
||||||
generator: generate_session_id @ "sessions",
|
generator: generate_session_id @ "sessions",
|
||||||
);
|
);
|
||||||
|
db_id_interface!(
|
||||||
|
SharedInstanceId,
|
||||||
|
generator: generate_shared_instance_id @ "shared_instances",
|
||||||
|
);
|
||||||
|
db_id_interface!(
|
||||||
|
SharedInstanceVersionId,
|
||||||
|
generator: generate_shared_instance_version_id @ "shared_instance_versions",
|
||||||
|
);
|
||||||
db_id_interface!(
|
db_id_interface!(
|
||||||
TeamId,
|
TeamId,
|
||||||
generator: generate_team_id @ "teams",
|
generator: generate_team_id @ "teams",
|
||||||
|
|||||||
@ -20,6 +20,7 @@ pub mod product_item;
|
|||||||
pub mod project_item;
|
pub mod project_item;
|
||||||
pub mod report_item;
|
pub mod report_item;
|
||||||
pub mod session_item;
|
pub mod session_item;
|
||||||
|
pub mod shared_instance_item;
|
||||||
pub mod team_item;
|
pub mod team_item;
|
||||||
pub mod thread_item;
|
pub mod thread_item;
|
||||||
pub mod user_item;
|
pub mod user_item;
|
||||||
|
|||||||
335
apps/labrinth/src/database/models/shared_instance_item.rs
Normal file
335
apps/labrinth/src/database/models/shared_instance_item.rs
Normal file
@ -0,0 +1,335 @@
|
|||||||
|
use crate::database::models::{
|
||||||
|
DBSharedInstanceId, DBSharedInstanceVersionId, DBUserId,
|
||||||
|
};
|
||||||
|
use crate::database::redis::RedisPool;
|
||||||
|
use crate::models::shared_instances::SharedInstanceUserPermissions;
|
||||||
|
use chrono::{DateTime, Utc};
|
||||||
|
use dashmap::DashMap;
|
||||||
|
use futures_util::TryStreamExt;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
//region shared_instances
|
||||||
|
pub struct DBSharedInstance {
|
||||||
|
pub id: DBSharedInstanceId,
|
||||||
|
pub title: String,
|
||||||
|
pub owner_id: DBUserId,
|
||||||
|
pub public: bool,
|
||||||
|
pub current_version_id: Option<DBSharedInstanceVersionId>,
|
||||||
|
}
|
||||||
|
|
||||||
|
struct SharedInstanceQueryResult {
|
||||||
|
id: i64,
|
||||||
|
title: String,
|
||||||
|
owner_id: i64,
|
||||||
|
public: bool,
|
||||||
|
current_version_id: Option<i64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<SharedInstanceQueryResult> for DBSharedInstance {
|
||||||
|
fn from(val: SharedInstanceQueryResult) -> Self {
|
||||||
|
DBSharedInstance {
|
||||||
|
id: DBSharedInstanceId(val.id),
|
||||||
|
title: val.title,
|
||||||
|
owner_id: DBUserId(val.owner_id),
|
||||||
|
public: val.public,
|
||||||
|
current_version_id: val
|
||||||
|
.current_version_id
|
||||||
|
.map(DBSharedInstanceVersionId),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DBSharedInstance {
|
||||||
|
pub async fn insert(
|
||||||
|
&self,
|
||||||
|
transaction: &mut sqlx::Transaction<'_, sqlx::Postgres>,
|
||||||
|
) -> Result<(), sqlx::Error> {
|
||||||
|
sqlx::query!(
|
||||||
|
"
|
||||||
|
INSERT INTO shared_instances (id, title, owner_id, current_version_id)
|
||||||
|
VALUES ($1, $2, $3, $4)
|
||||||
|
",
|
||||||
|
self.id as DBSharedInstanceId,
|
||||||
|
self.title,
|
||||||
|
self.owner_id as DBUserId,
|
||||||
|
self.current_version_id.map(|x| x.0),
|
||||||
|
)
|
||||||
|
.execute(&mut **transaction)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn get(
|
||||||
|
id: DBSharedInstanceId,
|
||||||
|
exec: impl sqlx::Executor<'_, Database = sqlx::Postgres>,
|
||||||
|
) -> Result<Option<Self>, sqlx::Error> {
|
||||||
|
let result = sqlx::query_as!(
|
||||||
|
SharedInstanceQueryResult,
|
||||||
|
"
|
||||||
|
SELECT id, title, owner_id, public, current_version_id
|
||||||
|
FROM shared_instances
|
||||||
|
WHERE id = $1
|
||||||
|
",
|
||||||
|
id.0,
|
||||||
|
)
|
||||||
|
.fetch_optional(exec)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(result.map(Into::into))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn list_for_user(
|
||||||
|
user: DBUserId,
|
||||||
|
exec: impl sqlx::Executor<'_, Database = sqlx::Postgres>,
|
||||||
|
) -> Result<Vec<Self>, sqlx::Error> {
|
||||||
|
let results = sqlx::query_as!(
|
||||||
|
SharedInstanceQueryResult,
|
||||||
|
r#"
|
||||||
|
-- See https://github.com/launchbadge/sqlx/issues/1266 for why we need all the "as"
|
||||||
|
SELECT
|
||||||
|
id as "id!",
|
||||||
|
title as "title!",
|
||||||
|
public as "public!",
|
||||||
|
owner_id as "owner_id!",
|
||||||
|
current_version_id
|
||||||
|
FROM shared_instances
|
||||||
|
WHERE owner_id = $1
|
||||||
|
UNION
|
||||||
|
SELECT
|
||||||
|
id as "id!",
|
||||||
|
title as "title!",
|
||||||
|
public as "public!",
|
||||||
|
owner_id as "owner_id!",
|
||||||
|
current_version_id
|
||||||
|
FROM shared_instances
|
||||||
|
JOIN shared_instance_users ON id = shared_instance_id
|
||||||
|
WHERE user_id = $1
|
||||||
|
"#,
|
||||||
|
user.0,
|
||||||
|
)
|
||||||
|
.fetch_all(exec)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(results.into_iter().map(Into::into).collect())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
//endregion
|
||||||
|
|
||||||
|
//region shared_instance_users
|
||||||
|
const USERS_NAMESPACE: &str = "shared_instance_users";
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize, Clone, Debug)]
|
||||||
|
pub struct DBSharedInstanceUser {
|
||||||
|
pub user_id: DBUserId,
|
||||||
|
pub shared_instance_id: DBSharedInstanceId,
|
||||||
|
pub permissions: SharedInstanceUserPermissions,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DBSharedInstanceUser {
|
||||||
|
pub async fn insert(
|
||||||
|
&self,
|
||||||
|
transaction: &mut sqlx::Transaction<'_, sqlx::Postgres>,
|
||||||
|
) -> Result<(), sqlx::Error> {
|
||||||
|
sqlx::query!(
|
||||||
|
"
|
||||||
|
INSERT INTO shared_instance_users (user_id, shared_instance_id, permissions)
|
||||||
|
VALUES ($1, $2, $3)
|
||||||
|
",
|
||||||
|
self.user_id as DBUserId,
|
||||||
|
self.shared_instance_id as DBSharedInstanceId,
|
||||||
|
self.permissions.bits() as i64,
|
||||||
|
)
|
||||||
|
.execute(&mut **transaction)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn get_user_permissions(
|
||||||
|
instance_id: DBSharedInstanceId,
|
||||||
|
user_id: DBUserId,
|
||||||
|
exec: impl sqlx::Executor<'_, Database = sqlx::Postgres>,
|
||||||
|
) -> Result<Option<SharedInstanceUserPermissions>, super::DatabaseError>
|
||||||
|
{
|
||||||
|
let permissions = sqlx::query!(
|
||||||
|
"
|
||||||
|
SELECT permissions
|
||||||
|
FROM shared_instance_users
|
||||||
|
WHERE shared_instance_id = $1 AND user_id = $2
|
||||||
|
",
|
||||||
|
instance_id as DBSharedInstanceId,
|
||||||
|
user_id as DBUserId,
|
||||||
|
)
|
||||||
|
.fetch_optional(exec)
|
||||||
|
.await?
|
||||||
|
.map(|x| {
|
||||||
|
SharedInstanceUserPermissions::from_bits(x.permissions as u64)
|
||||||
|
.unwrap_or(SharedInstanceUserPermissions::empty())
|
||||||
|
});
|
||||||
|
|
||||||
|
Ok(permissions)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn get_from_instance(
|
||||||
|
instance_id: DBSharedInstanceId,
|
||||||
|
exec: impl sqlx::Executor<'_, Database = sqlx::Postgres>,
|
||||||
|
redis: &RedisPool,
|
||||||
|
) -> Result<Vec<DBSharedInstanceUser>, super::DatabaseError> {
|
||||||
|
Self::get_from_instance_many(&[instance_id], exec, redis).await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn get_from_instance_many(
|
||||||
|
instance_ids: &[DBSharedInstanceId],
|
||||||
|
exec: impl sqlx::Executor<'_, Database = sqlx::Postgres>,
|
||||||
|
redis: &RedisPool,
|
||||||
|
) -> Result<Vec<DBSharedInstanceUser>, super::DatabaseError> {
|
||||||
|
if instance_ids.is_empty() {
|
||||||
|
return Ok(vec![]);
|
||||||
|
}
|
||||||
|
|
||||||
|
let users = redis
|
||||||
|
.get_cached_keys(
|
||||||
|
USERS_NAMESPACE,
|
||||||
|
&instance_ids.iter().map(|id| id.0).collect::<Vec<_>>(),
|
||||||
|
async |user_ids| {
|
||||||
|
let users = sqlx::query!(
|
||||||
|
"
|
||||||
|
SELECT shared_instance_id, user_id, permissions
|
||||||
|
FROM shared_instance_users
|
||||||
|
WHERE shared_instance_id = ANY($1)
|
||||||
|
",
|
||||||
|
&user_ids
|
||||||
|
)
|
||||||
|
.fetch(exec)
|
||||||
|
.try_fold(DashMap::new(), |acc: DashMap<_, Vec<_>>, m| {
|
||||||
|
acc.entry(m.shared_instance_id).or_default().push(
|
||||||
|
DBSharedInstanceUser {
|
||||||
|
user_id: DBUserId(m.user_id),
|
||||||
|
shared_instance_id: DBSharedInstanceId(
|
||||||
|
m.shared_instance_id,
|
||||||
|
),
|
||||||
|
permissions:
|
||||||
|
SharedInstanceUserPermissions::from_bits(
|
||||||
|
m.permissions as u64,
|
||||||
|
)
|
||||||
|
.unwrap_or(
|
||||||
|
SharedInstanceUserPermissions::empty(),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
async move { Ok(acc) }
|
||||||
|
})
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(users)
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(users.into_iter().flatten().collect())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn clear_cache(
|
||||||
|
instance_id: DBSharedInstanceId,
|
||||||
|
redis: &RedisPool,
|
||||||
|
) -> Result<(), super::DatabaseError> {
|
||||||
|
let mut redis = redis.connect().await?;
|
||||||
|
redis.delete(USERS_NAMESPACE, instance_id.0).await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
//endregion
|
||||||
|
|
||||||
|
//region shared_instance_versions
|
||||||
|
pub struct DBSharedInstanceVersion {
|
||||||
|
pub id: DBSharedInstanceVersionId,
|
||||||
|
pub shared_instance_id: DBSharedInstanceId,
|
||||||
|
pub size: u64,
|
||||||
|
pub sha512: Vec<u8>,
|
||||||
|
pub created: DateTime<Utc>,
|
||||||
|
}
|
||||||
|
|
||||||
|
struct SharedInstanceVersionQueryResult {
|
||||||
|
id: i64,
|
||||||
|
shared_instance_id: i64,
|
||||||
|
size: i64,
|
||||||
|
sha512: Vec<u8>,
|
||||||
|
created: DateTime<Utc>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<SharedInstanceVersionQueryResult> for DBSharedInstanceVersion {
|
||||||
|
fn from(val: SharedInstanceVersionQueryResult) -> Self {
|
||||||
|
DBSharedInstanceVersion {
|
||||||
|
id: DBSharedInstanceVersionId(val.id),
|
||||||
|
shared_instance_id: DBSharedInstanceId(val.shared_instance_id),
|
||||||
|
size: val.size as u64,
|
||||||
|
sha512: val.sha512,
|
||||||
|
created: val.created,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DBSharedInstanceVersion {
|
||||||
|
pub async fn insert(
|
||||||
|
&self,
|
||||||
|
transaction: &mut sqlx::Transaction<'_, sqlx::Postgres>,
|
||||||
|
) -> Result<(), sqlx::Error> {
|
||||||
|
sqlx::query!(
|
||||||
|
"
|
||||||
|
INSERT INTO shared_instance_versions (id, shared_instance_id, size, sha512, created)
|
||||||
|
VALUES ($1, $2, $3, $4, $5)
|
||||||
|
",
|
||||||
|
self.id as DBSharedInstanceVersionId,
|
||||||
|
self.shared_instance_id as DBSharedInstanceId,
|
||||||
|
self.size as i64,
|
||||||
|
self.sha512,
|
||||||
|
self.created,
|
||||||
|
)
|
||||||
|
.execute(&mut **transaction)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn get(
|
||||||
|
id: DBSharedInstanceVersionId,
|
||||||
|
exec: impl sqlx::Executor<'_, Database = sqlx::Postgres>,
|
||||||
|
) -> Result<Option<Self>, sqlx::Error> {
|
||||||
|
let result = sqlx::query_as!(
|
||||||
|
SharedInstanceVersionQueryResult,
|
||||||
|
"
|
||||||
|
SELECT id, shared_instance_id, size, sha512, created
|
||||||
|
FROM shared_instance_versions
|
||||||
|
WHERE id = $1
|
||||||
|
",
|
||||||
|
id as DBSharedInstanceVersionId,
|
||||||
|
)
|
||||||
|
.fetch_optional(exec)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(result.map(Into::into))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn get_for_instance(
|
||||||
|
instance_id: DBSharedInstanceId,
|
||||||
|
exec: impl sqlx::Executor<'_, Database = sqlx::Postgres>,
|
||||||
|
) -> Result<Vec<Self>, sqlx::Error> {
|
||||||
|
let results = sqlx::query_as!(
|
||||||
|
SharedInstanceVersionQueryResult,
|
||||||
|
"
|
||||||
|
SELECT id, shared_instance_id, size, sha512, created
|
||||||
|
FROM shared_instance_versions
|
||||||
|
WHERE shared_instance_id = $1
|
||||||
|
ORDER BY created DESC
|
||||||
|
",
|
||||||
|
instance_id as DBSharedInstanceId,
|
||||||
|
)
|
||||||
|
.fetch_all(exec)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(results.into_iter().map(Into::into).collect())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
//endregion
|
||||||
@ -511,6 +511,18 @@ impl DBUser {
|
|||||||
.execute(&mut **transaction)
|
.execute(&mut **transaction)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
|
sqlx::query!(
|
||||||
|
"
|
||||||
|
UPDATE shared_instances
|
||||||
|
SET owner_id = $1
|
||||||
|
WHERE owner_id = $2
|
||||||
|
",
|
||||||
|
deleted_user as DBUserId,
|
||||||
|
id as DBUserId,
|
||||||
|
)
|
||||||
|
.execute(&mut **transaction)
|
||||||
|
.await?;
|
||||||
|
|
||||||
use futures::TryStreamExt;
|
use futures::TryStreamExt;
|
||||||
let notifications: Vec<i64> = sqlx::query!(
|
let notifications: Vec<i64> = sqlx::query!(
|
||||||
"
|
"
|
||||||
|
|||||||
@ -1,108 +0,0 @@
|
|||||||
use super::{DeleteFileData, FileHost, FileHostingError, UploadFileData};
|
|
||||||
use async_trait::async_trait;
|
|
||||||
use bytes::Bytes;
|
|
||||||
use reqwest::Response;
|
|
||||||
use serde::Deserialize;
|
|
||||||
use sha2::Digest;
|
|
||||||
|
|
||||||
mod authorization;
|
|
||||||
mod delete;
|
|
||||||
mod upload;
|
|
||||||
|
|
||||||
pub struct BackblazeHost {
|
|
||||||
upload_url_data: authorization::UploadUrlData,
|
|
||||||
authorization_data: authorization::AuthorizationData,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BackblazeHost {
|
|
||||||
pub async fn new(key_id: &str, key: &str, bucket_id: &str) -> Self {
|
|
||||||
let authorization_data =
|
|
||||||
authorization::authorize_account(key_id, key).await.unwrap();
|
|
||||||
let upload_url_data =
|
|
||||||
authorization::get_upload_url(&authorization_data, bucket_id)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
BackblazeHost {
|
|
||||||
upload_url_data,
|
|
||||||
authorization_data,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl FileHost for BackblazeHost {
|
|
||||||
async fn upload_file(
|
|
||||||
&self,
|
|
||||||
content_type: &str,
|
|
||||||
file_name: &str,
|
|
||||||
file_bytes: Bytes,
|
|
||||||
) -> Result<UploadFileData, FileHostingError> {
|
|
||||||
let content_sha512 = format!("{:x}", sha2::Sha512::digest(&file_bytes));
|
|
||||||
|
|
||||||
let upload_data = upload::upload_file(
|
|
||||||
&self.upload_url_data,
|
|
||||||
content_type,
|
|
||||||
file_name,
|
|
||||||
file_bytes,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
Ok(UploadFileData {
|
|
||||||
file_id: upload_data.file_id,
|
|
||||||
file_name: upload_data.file_name,
|
|
||||||
content_length: upload_data.content_length,
|
|
||||||
content_sha512,
|
|
||||||
content_sha1: upload_data.content_sha1,
|
|
||||||
content_md5: upload_data.content_md5,
|
|
||||||
content_type: upload_data.content_type,
|
|
||||||
upload_timestamp: upload_data.upload_timestamp,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
async fn upload_file_streaming(
|
|
||||||
&self,
|
|
||||||
content_type: &str,
|
|
||||||
file_name: &str,
|
|
||||||
stream: reqwest::Body
|
|
||||||
) -> Result<UploadFileData, FileHostingError> {
|
|
||||||
use futures::stream::StreamExt;
|
|
||||||
|
|
||||||
let mut data = Vec::new();
|
|
||||||
while let Some(chunk) = stream.next().await {
|
|
||||||
data.extend_from_slice(&chunk.map_err(|e| FileHostingError::Other(e))?);
|
|
||||||
}
|
|
||||||
self.upload_file(content_type, file_name, data).await
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
|
|
||||||
async fn delete_file_version(
|
|
||||||
&self,
|
|
||||||
file_id: &str,
|
|
||||||
file_name: &str,
|
|
||||||
) -> Result<DeleteFileData, FileHostingError> {
|
|
||||||
let delete_data = delete::delete_file_version(
|
|
||||||
&self.authorization_data,
|
|
||||||
file_id,
|
|
||||||
file_name,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
Ok(DeleteFileData {
|
|
||||||
file_id: delete_data.file_id,
|
|
||||||
file_name: delete_data.file_name,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn process_response<T>(
|
|
||||||
response: Response,
|
|
||||||
) -> Result<T, FileHostingError>
|
|
||||||
where
|
|
||||||
T: for<'de> Deserialize<'de>,
|
|
||||||
{
|
|
||||||
if response.status().is_success() {
|
|
||||||
Ok(response.json().await?)
|
|
||||||
} else {
|
|
||||||
Err(FileHostingError::BackblazeError(response.json().await?))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@ -1,81 +0,0 @@
|
|||||||
use crate::file_hosting::FileHostingError;
|
|
||||||
use base64::Engine;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct AuthorizationPermissions {
|
|
||||||
bucket_id: Option<String>,
|
|
||||||
bucket_name: Option<String>,
|
|
||||||
capabilities: Vec<String>,
|
|
||||||
name_prefix: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct AuthorizationData {
|
|
||||||
pub absolute_minimum_part_size: i32,
|
|
||||||
pub account_id: String,
|
|
||||||
pub allowed: AuthorizationPermissions,
|
|
||||||
pub api_url: String,
|
|
||||||
pub authorization_token: String,
|
|
||||||
pub download_url: String,
|
|
||||||
pub recommended_part_size: i32,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct UploadUrlData {
|
|
||||||
pub bucket_id: String,
|
|
||||||
pub upload_url: String,
|
|
||||||
pub authorization_token: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn authorize_account(
|
|
||||||
key_id: &str,
|
|
||||||
application_key: &str,
|
|
||||||
) -> Result<AuthorizationData, FileHostingError> {
|
|
||||||
let combined_key = format!("{key_id}:{application_key}");
|
|
||||||
let formatted_key = format!(
|
|
||||||
"Basic {}",
|
|
||||||
base64::engine::general_purpose::STANDARD.encode(combined_key)
|
|
||||||
);
|
|
||||||
|
|
||||||
let response = reqwest::Client::new()
|
|
||||||
.get("https://api.backblazeb2.com/b2api/v2/b2_authorize_account")
|
|
||||||
.header(reqwest::header::CONTENT_TYPE, "application/json")
|
|
||||||
.header(reqwest::header::AUTHORIZATION, formatted_key)
|
|
||||||
.send()
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
super::process_response(response).await
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_upload_url(
|
|
||||||
authorization_data: &AuthorizationData,
|
|
||||||
bucket_id: &str,
|
|
||||||
) -> Result<UploadUrlData, FileHostingError> {
|
|
||||||
let response = reqwest::Client::new()
|
|
||||||
.post(
|
|
||||||
format!(
|
|
||||||
"{}/b2api/v2/b2_get_upload_url",
|
|
||||||
authorization_data.api_url
|
|
||||||
)
|
|
||||||
.to_string(),
|
|
||||||
)
|
|
||||||
.header(reqwest::header::CONTENT_TYPE, "application/json")
|
|
||||||
.header(
|
|
||||||
reqwest::header::AUTHORIZATION,
|
|
||||||
&authorization_data.authorization_token,
|
|
||||||
)
|
|
||||||
.body(
|
|
||||||
serde_json::json!({
|
|
||||||
"bucketId": bucket_id,
|
|
||||||
})
|
|
||||||
.to_string(),
|
|
||||||
)
|
|
||||||
.send()
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
super::process_response(response).await
|
|
||||||
}
|
|
||||||
@ -1,38 +0,0 @@
|
|||||||
use super::authorization::AuthorizationData;
|
|
||||||
use crate::file_hosting::FileHostingError;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct DeleteFileData {
|
|
||||||
pub file_id: String,
|
|
||||||
pub file_name: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn delete_file_version(
|
|
||||||
authorization_data: &AuthorizationData,
|
|
||||||
file_id: &str,
|
|
||||||
file_name: &str,
|
|
||||||
) -> Result<DeleteFileData, FileHostingError> {
|
|
||||||
let response = reqwest::Client::new()
|
|
||||||
.post(format!(
|
|
||||||
"{}/b2api/v2/b2_delete_file_version",
|
|
||||||
authorization_data.api_url
|
|
||||||
))
|
|
||||||
.header(reqwest::header::CONTENT_TYPE, "application/json")
|
|
||||||
.header(
|
|
||||||
reqwest::header::AUTHORIZATION,
|
|
||||||
&authorization_data.authorization_token,
|
|
||||||
)
|
|
||||||
.body(
|
|
||||||
serde_json::json!({
|
|
||||||
"fileName": file_name,
|
|
||||||
"fileId": file_id
|
|
||||||
})
|
|
||||||
.to_string(),
|
|
||||||
)
|
|
||||||
.send()
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
super::process_response(response).await
|
|
||||||
}
|
|
||||||
@ -1,47 +0,0 @@
|
|||||||
use super::authorization::UploadUrlData;
|
|
||||||
use crate::file_hosting::FileHostingError;
|
|
||||||
use bytes::Bytes;
|
|
||||||
use hex::ToHex;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use sha1::Digest;
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
|
||||||
#[serde(rename_all = "camelCase")]
|
|
||||||
pub struct UploadFileData {
|
|
||||||
pub file_id: String,
|
|
||||||
pub file_name: String,
|
|
||||||
pub account_id: String,
|
|
||||||
pub bucket_id: String,
|
|
||||||
pub content_length: u32,
|
|
||||||
pub content_sha1: String,
|
|
||||||
pub content_md5: Option<String>,
|
|
||||||
pub content_type: String,
|
|
||||||
pub upload_timestamp: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
//Content Types found here: https://www.backblaze.com/b2/docs/content-types.html
|
|
||||||
pub async fn upload_file(
|
|
||||||
url_data: &UploadUrlData,
|
|
||||||
content_type: &str,
|
|
||||||
file_name: &str,
|
|
||||||
file_bytes: Bytes,
|
|
||||||
) -> Result<UploadFileData, FileHostingError> {
|
|
||||||
let response = reqwest::Client::new()
|
|
||||||
.post(&url_data.upload_url)
|
|
||||||
.header(
|
|
||||||
reqwest::header::AUTHORIZATION,
|
|
||||||
&url_data.authorization_token,
|
|
||||||
)
|
|
||||||
.header("X-Bz-File-Name", file_name)
|
|
||||||
.header(reqwest::header::CONTENT_TYPE, content_type)
|
|
||||||
.header(reqwest::header::CONTENT_LENGTH, file_bytes.len())
|
|
||||||
.header(
|
|
||||||
"X-Bz-Content-Sha1",
|
|
||||||
sha1::Sha1::digest(&file_bytes).encode_hex::<String>(),
|
|
||||||
)
|
|
||||||
.body(file_bytes)
|
|
||||||
.send()
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
super::process_response(response).await
|
|
||||||
}
|
|
||||||
@ -1,9 +1,13 @@
|
|||||||
use super::{DeleteFileData, FileHost, FileHostingError, UploadFileData};
|
use super::{
|
||||||
|
DeleteFileData, FileHost, FileHostPublicity, FileHostingError,
|
||||||
|
UploadFileData,
|
||||||
|
};
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
use chrono::Utc;
|
use chrono::Utc;
|
||||||
use hex::ToHex;
|
use hex::ToHex;
|
||||||
use sha2::Digest;
|
use sha2::Digest;
|
||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
pub struct MockHost(());
|
pub struct MockHost(());
|
||||||
@ -20,11 +24,10 @@ impl FileHost for MockHost {
|
|||||||
&self,
|
&self,
|
||||||
content_type: &str,
|
content_type: &str,
|
||||||
file_name: &str,
|
file_name: &str,
|
||||||
|
file_publicity: FileHostPublicity,
|
||||||
file_bytes: Bytes,
|
file_bytes: Bytes,
|
||||||
) -> Result<UploadFileData, FileHostingError> {
|
) -> Result<UploadFileData, FileHostingError> {
|
||||||
let path =
|
let path = get_file_path(file_name, file_publicity);
|
||||||
std::path::Path::new(&dotenvy::var("MOCK_FILE_PATH").unwrap())
|
|
||||||
.join(file_name.replace("../", ""));
|
|
||||||
std::fs::create_dir_all(
|
std::fs::create_dir_all(
|
||||||
path.parent().ok_or(FileHostingError::InvalidFilename)?,
|
path.parent().ok_or(FileHostingError::InvalidFilename)?,
|
||||||
)?;
|
)?;
|
||||||
@ -33,8 +36,8 @@ impl FileHost for MockHost {
|
|||||||
|
|
||||||
std::fs::write(path, &*file_bytes)?;
|
std::fs::write(path, &*file_bytes)?;
|
||||||
Ok(UploadFileData {
|
Ok(UploadFileData {
|
||||||
file_id: String::from("MOCK_FILE_ID"),
|
|
||||||
file_name: file_name.to_string(),
|
file_name: file_name.to_string(),
|
||||||
|
file_publicity,
|
||||||
content_length: file_bytes.len() as u32,
|
content_length: file_bytes.len() as u32,
|
||||||
content_sha512,
|
content_sha512,
|
||||||
content_sha1,
|
content_sha1,
|
||||||
@ -44,20 +47,40 @@ impl FileHost for MockHost {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn delete_file_version(
|
async fn get_url_for_private_file(
|
||||||
&self,
|
&self,
|
||||||
file_id: &str,
|
|
||||||
file_name: &str,
|
file_name: &str,
|
||||||
|
_expiry_secs: u32,
|
||||||
|
) -> Result<String, FileHostingError> {
|
||||||
|
let cdn_url = dotenvy::var("CDN_URL").unwrap();
|
||||||
|
Ok(format!("{cdn_url}/private/{file_name}"))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn delete_file(
|
||||||
|
&self,
|
||||||
|
file_name: &str,
|
||||||
|
file_publicity: FileHostPublicity,
|
||||||
) -> Result<DeleteFileData, FileHostingError> {
|
) -> Result<DeleteFileData, FileHostingError> {
|
||||||
let path =
|
let path = get_file_path(file_name, file_publicity);
|
||||||
std::path::Path::new(&dotenvy::var("MOCK_FILE_PATH").unwrap())
|
|
||||||
.join(file_name.replace("../", ""));
|
|
||||||
if path.exists() {
|
if path.exists() {
|
||||||
std::fs::remove_file(path)?;
|
std::fs::remove_file(path)?;
|
||||||
}
|
}
|
||||||
Ok(DeleteFileData {
|
Ok(DeleteFileData {
|
||||||
file_id: file_id.to_string(),
|
|
||||||
file_name: file_name.to_string(),
|
file_name: file_name.to_string(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn get_file_path(
|
||||||
|
file_name: &str,
|
||||||
|
file_publicity: FileHostPublicity,
|
||||||
|
) -> PathBuf {
|
||||||
|
let mut path = PathBuf::from(dotenvy::var("MOCK_FILE_PATH").unwrap());
|
||||||
|
|
||||||
|
if matches!(file_publicity, FileHostPublicity::Private) {
|
||||||
|
path.push("private");
|
||||||
|
}
|
||||||
|
path.push(file_name.replace("../", ""));
|
||||||
|
|
||||||
|
path
|
||||||
|
}
|
||||||
|
|||||||
@ -1,23 +1,17 @@
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use thiserror::Error;
|
use thiserror::Error;
|
||||||
|
|
||||||
mod backblaze;
|
|
||||||
mod mock;
|
mod mock;
|
||||||
mod s3_host;
|
mod s3_host;
|
||||||
|
|
||||||
pub use backblaze::BackblazeHost;
|
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
pub use mock::MockHost;
|
pub use mock::MockHost;
|
||||||
pub use s3_host::S3Host;
|
pub use s3_host::{S3BucketConfig, S3Host};
|
||||||
|
|
||||||
#[derive(Error, Debug)]
|
#[derive(Error, Debug)]
|
||||||
pub enum FileHostingError {
|
pub enum FileHostingError {
|
||||||
#[error("Error while accessing the data from backblaze")]
|
#[error("S3 error when {0}: {1}")]
|
||||||
HttpError(#[from] reqwest::Error),
|
S3Error(&'static str, s3::error::S3Error),
|
||||||
#[error("Backblaze error: {0}")]
|
|
||||||
BackblazeError(serde_json::Value),
|
|
||||||
#[error("S3 error: {0}")]
|
|
||||||
S3Error(String),
|
|
||||||
#[error("File system error in file hosting: {0}")]
|
#[error("File system error in file hosting: {0}")]
|
||||||
FileSystemError(#[from] std::io::Error),
|
FileSystemError(#[from] std::io::Error),
|
||||||
#[error("Invalid Filename")]
|
#[error("Invalid Filename")]
|
||||||
@ -26,8 +20,8 @@ pub enum FileHostingError {
|
|||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct UploadFileData {
|
pub struct UploadFileData {
|
||||||
pub file_id: String,
|
|
||||||
pub file_name: String,
|
pub file_name: String,
|
||||||
|
pub file_publicity: FileHostPublicity,
|
||||||
pub content_length: u32,
|
pub content_length: u32,
|
||||||
pub content_sha512: String,
|
pub content_sha512: String,
|
||||||
pub content_sha1: String,
|
pub content_sha1: String,
|
||||||
@ -38,22 +32,34 @@ pub struct UploadFileData {
|
|||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct DeleteFileData {
|
pub struct DeleteFileData {
|
||||||
pub file_id: String,
|
|
||||||
pub file_name: String,
|
pub file_name: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Copy, Clone)]
|
||||||
|
pub enum FileHostPublicity {
|
||||||
|
Public,
|
||||||
|
Private,
|
||||||
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
pub trait FileHost {
|
pub trait FileHost {
|
||||||
async fn upload_file(
|
async fn upload_file(
|
||||||
&self,
|
&self,
|
||||||
content_type: &str,
|
content_type: &str,
|
||||||
file_name: &str,
|
file_name: &str,
|
||||||
|
file_publicity: FileHostPublicity,
|
||||||
file_bytes: Bytes,
|
file_bytes: Bytes,
|
||||||
) -> Result<UploadFileData, FileHostingError>;
|
) -> Result<UploadFileData, FileHostingError>;
|
||||||
|
|
||||||
async fn delete_file_version(
|
async fn get_url_for_private_file(
|
||||||
&self,
|
&self,
|
||||||
file_id: &str,
|
|
||||||
file_name: &str,
|
file_name: &str,
|
||||||
|
expiry_secs: u32,
|
||||||
|
) -> Result<String, FileHostingError>;
|
||||||
|
|
||||||
|
async fn delete_file(
|
||||||
|
&self,
|
||||||
|
file_name: &str,
|
||||||
|
file_publicity: FileHostPublicity,
|
||||||
) -> Result<DeleteFileData, FileHostingError>;
|
) -> Result<DeleteFileData, FileHostingError>;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,5 +1,6 @@
|
|||||||
use crate::file_hosting::{
|
use crate::file_hosting::{
|
||||||
DeleteFileData, FileHost, FileHostingError, UploadFileData,
|
DeleteFileData, FileHost, FileHostPublicity, FileHostingError,
|
||||||
|
UploadFileData,
|
||||||
};
|
};
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
@ -10,50 +11,70 @@ use s3::creds::Credentials;
|
|||||||
use s3::region::Region;
|
use s3::region::Region;
|
||||||
use sha2::Digest;
|
use sha2::Digest;
|
||||||
|
|
||||||
|
pub struct S3BucketConfig {
|
||||||
|
pub name: String,
|
||||||
|
pub uses_path_style: bool,
|
||||||
|
pub region: String,
|
||||||
|
pub url: String,
|
||||||
|
pub access_token: String,
|
||||||
|
pub secret: String,
|
||||||
|
}
|
||||||
|
|
||||||
pub struct S3Host {
|
pub struct S3Host {
|
||||||
bucket: Bucket,
|
public_bucket: Bucket,
|
||||||
|
private_bucket: Bucket,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl S3Host {
|
impl S3Host {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
bucket_name: &str,
|
public_bucket: S3BucketConfig,
|
||||||
bucket_region: &str,
|
private_bucket: S3BucketConfig,
|
||||||
url: &str,
|
|
||||||
access_token: &str,
|
|
||||||
secret: &str,
|
|
||||||
) -> Result<S3Host, FileHostingError> {
|
) -> Result<S3Host, FileHostingError> {
|
||||||
let bucket = Bucket::new(
|
let create_bucket =
|
||||||
bucket_name,
|
|config: S3BucketConfig| -> Result<_, FileHostingError> {
|
||||||
if bucket_region == "r2" {
|
let mut bucket = Bucket::new(
|
||||||
Region::R2 {
|
"",
|
||||||
account_id: url.to_string(),
|
if config.region == "r2" {
|
||||||
}
|
Region::R2 {
|
||||||
} else {
|
account_id: config.url,
|
||||||
Region::Custom {
|
}
|
||||||
region: bucket_region.to_string(),
|
} else {
|
||||||
endpoint: url.to_string(),
|
Region::Custom {
|
||||||
}
|
region: config.region,
|
||||||
},
|
endpoint: config.url,
|
||||||
Credentials::new(
|
}
|
||||||
Some(access_token),
|
},
|
||||||
Some(secret),
|
Credentials {
|
||||||
None,
|
access_key: Some(config.access_token),
|
||||||
None,
|
secret_key: Some(config.secret),
|
||||||
None,
|
..Credentials::anonymous().unwrap()
|
||||||
)
|
},
|
||||||
.map_err(|_| {
|
|
||||||
FileHostingError::S3Error(
|
|
||||||
"Error while creating credentials".to_string(),
|
|
||||||
)
|
)
|
||||||
})?,
|
.map_err(|e| {
|
||||||
)
|
FileHostingError::S3Error("creating Bucket instance", e)
|
||||||
.map_err(|_| {
|
})?;
|
||||||
FileHostingError::S3Error(
|
|
||||||
"Error while creating Bucket instance".to_string(),
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
Ok(S3Host { bucket: *bucket })
|
bucket.name = config.name;
|
||||||
|
if config.uses_path_style {
|
||||||
|
bucket.set_path_style();
|
||||||
|
} else {
|
||||||
|
bucket.set_subdomain_style();
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(bucket)
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(S3Host {
|
||||||
|
public_bucket: *create_bucket(public_bucket)?,
|
||||||
|
private_bucket: *create_bucket(private_bucket)?,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_bucket(&self, publicity: FileHostPublicity) -> &Bucket {
|
||||||
|
match publicity {
|
||||||
|
FileHostPublicity::Public => &self.public_bucket,
|
||||||
|
FileHostPublicity::Private => &self.private_bucket,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -63,27 +84,24 @@ impl FileHost for S3Host {
|
|||||||
&self,
|
&self,
|
||||||
content_type: &str,
|
content_type: &str,
|
||||||
file_name: &str,
|
file_name: &str,
|
||||||
|
file_publicity: FileHostPublicity,
|
||||||
file_bytes: Bytes,
|
file_bytes: Bytes,
|
||||||
) -> Result<UploadFileData, FileHostingError> {
|
) -> Result<UploadFileData, FileHostingError> {
|
||||||
let content_sha1 = sha1::Sha1::digest(&file_bytes).encode_hex();
|
let content_sha1 = sha1::Sha1::digest(&file_bytes).encode_hex();
|
||||||
let content_sha512 = format!("{:x}", sha2::Sha512::digest(&file_bytes));
|
let content_sha512 = format!("{:x}", sha2::Sha512::digest(&file_bytes));
|
||||||
|
|
||||||
self.bucket
|
self.get_bucket(file_publicity)
|
||||||
.put_object_with_content_type(
|
.put_object_with_content_type(
|
||||||
format!("/{file_name}"),
|
format!("/{file_name}"),
|
||||||
&file_bytes,
|
&file_bytes,
|
||||||
content_type,
|
content_type,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.map_err(|err| {
|
.map_err(|e| FileHostingError::S3Error("uploading file", e))?;
|
||||||
FileHostingError::S3Error(format!(
|
|
||||||
"Error while uploading file {file_name} to S3: {err}"
|
|
||||||
))
|
|
||||||
})?;
|
|
||||||
|
|
||||||
Ok(UploadFileData {
|
Ok(UploadFileData {
|
||||||
file_id: file_name.to_string(),
|
|
||||||
file_name: file_name.to_string(),
|
file_name: file_name.to_string(),
|
||||||
|
file_publicity,
|
||||||
content_length: file_bytes.len() as u32,
|
content_length: file_bytes.len() as u32,
|
||||||
content_sha512,
|
content_sha512,
|
||||||
content_sha1,
|
content_sha1,
|
||||||
@ -93,22 +111,32 @@ impl FileHost for S3Host {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn delete_file_version(
|
async fn get_url_for_private_file(
|
||||||
&self,
|
&self,
|
||||||
file_id: &str,
|
|
||||||
file_name: &str,
|
file_name: &str,
|
||||||
|
expiry_secs: u32,
|
||||||
|
) -> Result<String, FileHostingError> {
|
||||||
|
let url = self
|
||||||
|
.private_bucket
|
||||||
|
.presign_get(format!("/{file_name}"), expiry_secs, None)
|
||||||
|
.await
|
||||||
|
.map_err(|e| {
|
||||||
|
FileHostingError::S3Error("generating presigned URL", e)
|
||||||
|
})?;
|
||||||
|
Ok(url)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn delete_file(
|
||||||
|
&self,
|
||||||
|
file_name: &str,
|
||||||
|
file_publicity: FileHostPublicity,
|
||||||
) -> Result<DeleteFileData, FileHostingError> {
|
) -> Result<DeleteFileData, FileHostingError> {
|
||||||
self.bucket
|
self.get_bucket(file_publicity)
|
||||||
.delete_object(format!("/{file_name}"))
|
.delete_object(format!("/{file_name}"))
|
||||||
.await
|
.await
|
||||||
.map_err(|err| {
|
.map_err(|e| FileHostingError::S3Error("deleting file", e))?;
|
||||||
FileHostingError::S3Error(format!(
|
|
||||||
"Error while deleting file {file_name} to S3: {err}"
|
|
||||||
))
|
|
||||||
})?;
|
|
||||||
|
|
||||||
Ok(DeleteFileData {
|
Ok(DeleteFileData {
|
||||||
file_id: file_id.to_string(),
|
|
||||||
file_name: file_name.to_string(),
|
file_name: file_name.to_string(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@ -334,7 +334,7 @@ pub fn app_config(
|
|||||||
pub fn check_env_vars() -> bool {
|
pub fn check_env_vars() -> bool {
|
||||||
let mut failed = false;
|
let mut failed = false;
|
||||||
|
|
||||||
fn check_var<T: std::str::FromStr>(var: &'static str) -> bool {
|
fn check_var<T: std::str::FromStr>(var: &str) -> bool {
|
||||||
let check = parse_var::<T>(var).is_none();
|
let check = parse_var::<T>(var).is_none();
|
||||||
if check {
|
if check {
|
||||||
warn!(
|
warn!(
|
||||||
@ -361,25 +361,33 @@ pub fn check_env_vars() -> bool {
|
|||||||
|
|
||||||
let storage_backend = dotenvy::var("STORAGE_BACKEND").ok();
|
let storage_backend = dotenvy::var("STORAGE_BACKEND").ok();
|
||||||
match storage_backend.as_deref() {
|
match storage_backend.as_deref() {
|
||||||
Some("backblaze") => {
|
|
||||||
failed |= check_var::<String>("BACKBLAZE_KEY_ID");
|
|
||||||
failed |= check_var::<String>("BACKBLAZE_KEY");
|
|
||||||
failed |= check_var::<String>("BACKBLAZE_BUCKET_ID");
|
|
||||||
}
|
|
||||||
Some("s3") => {
|
Some("s3") => {
|
||||||
failed |= check_var::<String>("S3_ACCESS_TOKEN");
|
let mut check_var_set = |var_prefix| {
|
||||||
failed |= check_var::<String>("S3_SECRET");
|
failed |= check_var::<String>(&format!(
|
||||||
failed |= check_var::<String>("S3_URL");
|
"S3_{var_prefix}_BUCKET_NAME"
|
||||||
failed |= check_var::<String>("S3_REGION");
|
));
|
||||||
failed |= check_var::<String>("S3_BUCKET_NAME");
|
failed |= check_var::<bool>(&format!(
|
||||||
|
"S3_{var_prefix}_USES_PATH_STYLE_BUCKET"
|
||||||
|
));
|
||||||
|
failed |=
|
||||||
|
check_var::<String>(&format!("S3_{var_prefix}_REGION"));
|
||||||
|
failed |= check_var::<String>(&format!("S3_{var_prefix}_URL"));
|
||||||
|
failed |= check_var::<String>(&format!(
|
||||||
|
"S3_{var_prefix}_ACCESS_TOKEN"
|
||||||
|
));
|
||||||
|
failed |=
|
||||||
|
check_var::<String>(&format!("S3_{var_prefix}_SECRET"));
|
||||||
|
};
|
||||||
|
|
||||||
|
check_var_set("PUBLIC");
|
||||||
|
check_var_set("PRIVATE");
|
||||||
}
|
}
|
||||||
Some("local") => {
|
Some("local") => {
|
||||||
failed |= check_var::<String>("MOCK_FILE_PATH");
|
failed |= check_var::<String>("MOCK_FILE_PATH");
|
||||||
}
|
}
|
||||||
Some(backend) => {
|
Some(backend) => {
|
||||||
warn!(
|
warn!(
|
||||||
"Variable `STORAGE_BACKEND` contains an invalid value: {}. Expected \"backblaze\", \"s3\", or \"local\".",
|
"Variable `STORAGE_BACKEND` contains an invalid value: {backend}. Expected \"s3\" or \"local\"."
|
||||||
backend
|
|
||||||
);
|
);
|
||||||
failed |= true;
|
failed |= true;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -4,8 +4,9 @@ use actix_web_prom::PrometheusMetricsBuilder;
|
|||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use labrinth::background_task::BackgroundTask;
|
use labrinth::background_task::BackgroundTask;
|
||||||
use labrinth::database::redis::RedisPool;
|
use labrinth::database::redis::RedisPool;
|
||||||
use labrinth::file_hosting::S3Host;
|
use labrinth::file_hosting::{S3BucketConfig, S3Host};
|
||||||
use labrinth::search;
|
use labrinth::search;
|
||||||
|
use labrinth::util::env::parse_var;
|
||||||
use labrinth::util::ratelimit::rate_limit_middleware;
|
use labrinth::util::ratelimit::rate_limit_middleware;
|
||||||
use labrinth::{check_env_vars, clickhouse, database, file_hosting, queue};
|
use labrinth::{check_env_vars, clickhouse, database, file_hosting, queue};
|
||||||
use std::ffi::CStr;
|
use std::ffi::CStr;
|
||||||
@ -51,6 +52,7 @@ async fn main() -> std::io::Result<()> {
|
|||||||
|
|
||||||
if check_env_vars() {
|
if check_env_vars() {
|
||||||
error!("Some environment variables are missing!");
|
error!("Some environment variables are missing!");
|
||||||
|
std::process::exit(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
// DSN is from SENTRY_DSN env variable.
|
// DSN is from SENTRY_DSN env variable.
|
||||||
@ -93,24 +95,33 @@ async fn main() -> std::io::Result<()> {
|
|||||||
|
|
||||||
let file_host: Arc<dyn file_hosting::FileHost + Send + Sync> =
|
let file_host: Arc<dyn file_hosting::FileHost + Send + Sync> =
|
||||||
match storage_backend.as_str() {
|
match storage_backend.as_str() {
|
||||||
"backblaze" => Arc::new(
|
"s3" => {
|
||||||
file_hosting::BackblazeHost::new(
|
let config_from_env = |bucket_type| S3BucketConfig {
|
||||||
&dotenvy::var("BACKBLAZE_KEY_ID").unwrap(),
|
name: parse_var(&format!("S3_{bucket_type}_BUCKET_NAME"))
|
||||||
&dotenvy::var("BACKBLAZE_KEY").unwrap(),
|
.unwrap(),
|
||||||
&dotenvy::var("BACKBLAZE_BUCKET_ID").unwrap(),
|
uses_path_style: parse_var(&format!(
|
||||||
|
"S3_{bucket_type}_USES_PATH_STYLE_BUCKET"
|
||||||
|
))
|
||||||
|
.unwrap(),
|
||||||
|
region: parse_var(&format!("S3_{bucket_type}_REGION"))
|
||||||
|
.unwrap(),
|
||||||
|
url: parse_var(&format!("S3_{bucket_type}_URL")).unwrap(),
|
||||||
|
access_token: parse_var(&format!(
|
||||||
|
"S3_{bucket_type}_ACCESS_TOKEN"
|
||||||
|
))
|
||||||
|
.unwrap(),
|
||||||
|
secret: parse_var(&format!("S3_{bucket_type}_SECRET"))
|
||||||
|
.unwrap(),
|
||||||
|
};
|
||||||
|
|
||||||
|
Arc::new(
|
||||||
|
S3Host::new(
|
||||||
|
config_from_env("PUBLIC"),
|
||||||
|
config_from_env("PRIVATE"),
|
||||||
|
)
|
||||||
|
.unwrap(),
|
||||||
)
|
)
|
||||||
.await,
|
}
|
||||||
),
|
|
||||||
"s3" => Arc::new(
|
|
||||||
S3Host::new(
|
|
||||||
&dotenvy::var("S3_BUCKET_NAME").unwrap(),
|
|
||||||
&dotenvy::var("S3_REGION").unwrap(),
|
|
||||||
&dotenvy::var("S3_URL").unwrap(),
|
|
||||||
&dotenvy::var("S3_ACCESS_TOKEN").unwrap(),
|
|
||||||
&dotenvy::var("S3_SECRET").unwrap(),
|
|
||||||
)
|
|
||||||
.unwrap(),
|
|
||||||
),
|
|
||||||
"local" => Arc::new(file_hosting::MockHost::new()),
|
"local" => Arc::new(file_hosting::MockHost::new()),
|
||||||
_ => panic!("Invalid storage backend specified. Aborting startup!"),
|
_ => panic!("Invalid storage backend specified. Aborting startup!"),
|
||||||
};
|
};
|
||||||
|
|||||||
@ -16,6 +16,7 @@ pub use v3::payouts;
|
|||||||
pub use v3::projects;
|
pub use v3::projects;
|
||||||
pub use v3::reports;
|
pub use v3::reports;
|
||||||
pub use v3::sessions;
|
pub use v3::sessions;
|
||||||
|
pub use v3::shared_instances;
|
||||||
pub use v3::teams;
|
pub use v3::teams;
|
||||||
pub use v3::threads;
|
pub use v3::threads;
|
||||||
pub use v3::users;
|
pub use v3::users;
|
||||||
|
|||||||
@ -17,6 +17,8 @@ base62_id!(ProductPriceId);
|
|||||||
base62_id!(ProjectId);
|
base62_id!(ProjectId);
|
||||||
base62_id!(ReportId);
|
base62_id!(ReportId);
|
||||||
base62_id!(SessionId);
|
base62_id!(SessionId);
|
||||||
|
base62_id!(SharedInstanceId);
|
||||||
|
base62_id!(SharedInstanceVersionId);
|
||||||
base62_id!(TeamId);
|
base62_id!(TeamId);
|
||||||
base62_id!(TeamMemberId);
|
base62_id!(TeamMemberId);
|
||||||
base62_id!(ThreadId);
|
base62_id!(ThreadId);
|
||||||
|
|||||||
@ -12,6 +12,7 @@ pub mod payouts;
|
|||||||
pub mod projects;
|
pub mod projects;
|
||||||
pub mod reports;
|
pub mod reports;
|
||||||
pub mod sessions;
|
pub mod sessions;
|
||||||
|
pub mod shared_instances;
|
||||||
pub mod teams;
|
pub mod teams;
|
||||||
pub mod threads;
|
pub mod threads;
|
||||||
pub mod users;
|
pub mod users;
|
||||||
|
|||||||
@ -100,6 +100,24 @@ bitflags::bitflags! {
|
|||||||
// only accessible by modrinth-issued sessions
|
// only accessible by modrinth-issued sessions
|
||||||
const SESSION_ACCESS = 1 << 39;
|
const SESSION_ACCESS = 1 << 39;
|
||||||
|
|
||||||
|
// create a shared instance
|
||||||
|
const SHARED_INSTANCE_CREATE = 1 << 40;
|
||||||
|
// read a shared instance
|
||||||
|
const SHARED_INSTANCE_READ = 1 << 41;
|
||||||
|
// write to a shared instance
|
||||||
|
const SHARED_INSTANCE_WRITE = 1 << 42;
|
||||||
|
// delete a shared instance
|
||||||
|
const SHARED_INSTANCE_DELETE = 1 << 43;
|
||||||
|
|
||||||
|
// create a shared instance version
|
||||||
|
const SHARED_INSTANCE_VERSION_CREATE = 1 << 44;
|
||||||
|
// read a shared instance version
|
||||||
|
const SHARED_INSTANCE_VERSION_READ = 1 << 45;
|
||||||
|
// write to a shared instance version
|
||||||
|
const SHARED_INSTANCE_VERSION_WRITE = 1 << 46;
|
||||||
|
// delete a shared instance version
|
||||||
|
const SHARED_INSTANCE_VERSION_DELETE = 1 << 47;
|
||||||
|
|
||||||
const NONE = 0b0;
|
const NONE = 0b0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
89
apps/labrinth/src/models/v3/shared_instances.rs
Normal file
89
apps/labrinth/src/models/v3/shared_instances.rs
Normal file
@ -0,0 +1,89 @@
|
|||||||
|
use crate::bitflags_serde_impl;
|
||||||
|
use crate::database::models::shared_instance_item::{
|
||||||
|
DBSharedInstance, DBSharedInstanceUser, DBSharedInstanceVersion,
|
||||||
|
};
|
||||||
|
use crate::models::ids::{SharedInstanceId, SharedInstanceVersionId};
|
||||||
|
use ariadne::ids::UserId;
|
||||||
|
use bitflags::bitflags;
|
||||||
|
use chrono::{DateTime, Utc};
|
||||||
|
use hex::ToHex;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct SharedInstance {
|
||||||
|
pub id: SharedInstanceId,
|
||||||
|
pub title: String,
|
||||||
|
pub owner: UserId,
|
||||||
|
pub public: bool,
|
||||||
|
pub current_version: Option<SharedInstanceVersion>,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub additional_users: Option<Vec<SharedInstanceUser>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SharedInstance {
|
||||||
|
pub fn from_db(
|
||||||
|
instance: DBSharedInstance,
|
||||||
|
users: Option<Vec<DBSharedInstanceUser>>,
|
||||||
|
current_version: Option<DBSharedInstanceVersion>,
|
||||||
|
) -> Self {
|
||||||
|
SharedInstance {
|
||||||
|
id: instance.id.into(),
|
||||||
|
title: instance.title,
|
||||||
|
owner: instance.owner_id.into(),
|
||||||
|
public: instance.public,
|
||||||
|
current_version: current_version.map(Into::into),
|
||||||
|
additional_users: users
|
||||||
|
.map(|x| x.into_iter().map(Into::into).collect()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct SharedInstanceVersion {
|
||||||
|
pub id: SharedInstanceVersionId,
|
||||||
|
pub shared_instance: SharedInstanceId,
|
||||||
|
pub size: u64,
|
||||||
|
pub sha512: String,
|
||||||
|
pub created: DateTime<Utc>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<DBSharedInstanceVersion> for SharedInstanceVersion {
|
||||||
|
fn from(value: DBSharedInstanceVersion) -> Self {
|
||||||
|
let version_id = value.id.into();
|
||||||
|
let shared_instance_id = value.shared_instance_id.into();
|
||||||
|
SharedInstanceVersion {
|
||||||
|
id: version_id,
|
||||||
|
shared_instance: shared_instance_id,
|
||||||
|
size: value.size,
|
||||||
|
sha512: value.sha512.encode_hex(),
|
||||||
|
created: value.created,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bitflags! {
|
||||||
|
#[derive(Copy, Clone, Debug)]
|
||||||
|
pub struct SharedInstanceUserPermissions: u64 {
|
||||||
|
const EDIT = 1 << 0;
|
||||||
|
const DELETE = 1 << 1;
|
||||||
|
const UPLOAD_VERSION = 1 << 2;
|
||||||
|
const DELETE_VERSION = 1 << 3;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bitflags_serde_impl!(SharedInstanceUserPermissions, u64);
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct SharedInstanceUser {
|
||||||
|
pub user: UserId,
|
||||||
|
pub permissions: SharedInstanceUserPermissions,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<DBSharedInstanceUser> for SharedInstanceUser {
|
||||||
|
fn from(user: DBSharedInstanceUser) -> Self {
|
||||||
|
SharedInstanceUser {
|
||||||
|
user: user.user_id.into(),
|
||||||
|
permissions: user.permissions,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -4,7 +4,7 @@ use crate::auth::{AuthProvider, AuthenticationError, get_user_from_headers};
|
|||||||
use crate::database::models::DBUser;
|
use crate::database::models::DBUser;
|
||||||
use crate::database::models::flow_item::DBFlow;
|
use crate::database::models::flow_item::DBFlow;
|
||||||
use crate::database::redis::RedisPool;
|
use crate::database::redis::RedisPool;
|
||||||
use crate::file_hosting::FileHost;
|
use crate::file_hosting::{FileHost, FileHostPublicity};
|
||||||
use crate::models::pats::Scopes;
|
use crate::models::pats::Scopes;
|
||||||
use crate::models::users::{Badges, Role};
|
use crate::models::users::{Badges, Role};
|
||||||
use crate::queue::session::AuthQueue;
|
use crate::queue::session::AuthQueue;
|
||||||
@ -136,6 +136,7 @@ impl TempUser {
|
|||||||
|
|
||||||
let upload_result = upload_image_optimized(
|
let upload_result = upload_image_optimized(
|
||||||
&format!("user/{}", ariadne::ids::UserId::from(user_id)),
|
&format!("user/{}", ariadne::ids::UserId::from(user_id)),
|
||||||
|
FileHostPublicity::Public,
|
||||||
bytes,
|
bytes,
|
||||||
ext,
|
ext,
|
||||||
Some(96),
|
Some(96),
|
||||||
|
|||||||
@ -4,7 +4,7 @@ use crate::database::models::{
|
|||||||
collection_item, generate_collection_id, project_item,
|
collection_item, generate_collection_id, project_item,
|
||||||
};
|
};
|
||||||
use crate::database::redis::RedisPool;
|
use crate::database::redis::RedisPool;
|
||||||
use crate::file_hosting::FileHost;
|
use crate::file_hosting::{FileHost, FileHostPublicity};
|
||||||
use crate::models::collections::{Collection, CollectionStatus};
|
use crate::models::collections::{Collection, CollectionStatus};
|
||||||
use crate::models::ids::{CollectionId, ProjectId};
|
use crate::models::ids::{CollectionId, ProjectId};
|
||||||
use crate::models::pats::Scopes;
|
use crate::models::pats::Scopes;
|
||||||
@ -12,7 +12,7 @@ use crate::queue::session::AuthQueue;
|
|||||||
use crate::routes::ApiError;
|
use crate::routes::ApiError;
|
||||||
use crate::routes::v3::project_creation::CreateError;
|
use crate::routes::v3::project_creation::CreateError;
|
||||||
use crate::util::img::delete_old_images;
|
use crate::util::img::delete_old_images;
|
||||||
use crate::util::routes::read_from_payload;
|
use crate::util::routes::read_limited_from_payload;
|
||||||
use crate::util::validate::validation_errors_to_string;
|
use crate::util::validate::validation_errors_to_string;
|
||||||
use crate::{database, models};
|
use crate::{database, models};
|
||||||
use actix_web::web::Data;
|
use actix_web::web::Data;
|
||||||
@ -413,11 +413,12 @@ pub async fn collection_icon_edit(
|
|||||||
delete_old_images(
|
delete_old_images(
|
||||||
collection_item.icon_url,
|
collection_item.icon_url,
|
||||||
collection_item.raw_icon_url,
|
collection_item.raw_icon_url,
|
||||||
|
FileHostPublicity::Public,
|
||||||
&***file_host,
|
&***file_host,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let bytes = read_from_payload(
|
let bytes = read_limited_from_payload(
|
||||||
&mut payload,
|
&mut payload,
|
||||||
262144,
|
262144,
|
||||||
"Icons must be smaller than 256KiB",
|
"Icons must be smaller than 256KiB",
|
||||||
@ -427,6 +428,7 @@ pub async fn collection_icon_edit(
|
|||||||
let collection_id: CollectionId = collection_item.id.into();
|
let collection_id: CollectionId = collection_item.id.into();
|
||||||
let upload_result = crate::util::img::upload_image_optimized(
|
let upload_result = crate::util::img::upload_image_optimized(
|
||||||
&format!("data/{collection_id}"),
|
&format!("data/{collection_id}"),
|
||||||
|
FileHostPublicity::Public,
|
||||||
bytes.freeze(),
|
bytes.freeze(),
|
||||||
&ext.ext,
|
&ext.ext,
|
||||||
Some(96),
|
Some(96),
|
||||||
@ -493,6 +495,7 @@ pub async fn delete_collection_icon(
|
|||||||
delete_old_images(
|
delete_old_images(
|
||||||
collection_item.icon_url,
|
collection_item.icon_url,
|
||||||
collection_item.raw_icon_url,
|
collection_item.raw_icon_url,
|
||||||
|
FileHostPublicity::Public,
|
||||||
&***file_host,
|
&***file_host,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|||||||
@ -8,13 +8,13 @@ use crate::database::models::{
|
|||||||
project_item, report_item, thread_item, version_item,
|
project_item, report_item, thread_item, version_item,
|
||||||
};
|
};
|
||||||
use crate::database::redis::RedisPool;
|
use crate::database::redis::RedisPool;
|
||||||
use crate::file_hosting::FileHost;
|
use crate::file_hosting::{FileHost, FileHostPublicity};
|
||||||
use crate::models::ids::{ReportId, ThreadMessageId, VersionId};
|
use crate::models::ids::{ReportId, ThreadMessageId, VersionId};
|
||||||
use crate::models::images::{Image, ImageContext};
|
use crate::models::images::{Image, ImageContext};
|
||||||
use crate::queue::session::AuthQueue;
|
use crate::queue::session::AuthQueue;
|
||||||
use crate::routes::ApiError;
|
use crate::routes::ApiError;
|
||||||
use crate::util::img::upload_image_optimized;
|
use crate::util::img::upload_image_optimized;
|
||||||
use crate::util::routes::read_from_payload;
|
use crate::util::routes::read_limited_from_payload;
|
||||||
use actix_web::{HttpRequest, HttpResponse, web};
|
use actix_web::{HttpRequest, HttpResponse, web};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use sqlx::PgPool;
|
use sqlx::PgPool;
|
||||||
@ -176,7 +176,7 @@ pub async fn images_add(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Upload the image to the file host
|
// Upload the image to the file host
|
||||||
let bytes = read_from_payload(
|
let bytes = read_limited_from_payload(
|
||||||
&mut payload,
|
&mut payload,
|
||||||
1_048_576,
|
1_048_576,
|
||||||
"Icons must be smaller than 1MiB",
|
"Icons must be smaller than 1MiB",
|
||||||
@ -186,6 +186,7 @@ pub async fn images_add(
|
|||||||
let content_length = bytes.len();
|
let content_length = bytes.len();
|
||||||
let upload_result = upload_image_optimized(
|
let upload_result = upload_image_optimized(
|
||||||
"data/cached_images",
|
"data/cached_images",
|
||||||
|
FileHostPublicity::Public, // FIXME: Maybe use private images for threads
|
||||||
bytes.freeze(),
|
bytes.freeze(),
|
||||||
&data.ext,
|
&data.ext,
|
||||||
None,
|
None,
|
||||||
|
|||||||
@ -13,6 +13,8 @@ pub mod payouts;
|
|||||||
pub mod project_creation;
|
pub mod project_creation;
|
||||||
pub mod projects;
|
pub mod projects;
|
||||||
pub mod reports;
|
pub mod reports;
|
||||||
|
pub mod shared_instance_version_creation;
|
||||||
|
pub mod shared_instances;
|
||||||
pub mod statistics;
|
pub mod statistics;
|
||||||
pub mod tags;
|
pub mod tags;
|
||||||
pub mod teams;
|
pub mod teams;
|
||||||
@ -36,6 +38,8 @@ pub fn config(cfg: &mut web::ServiceConfig) {
|
|||||||
.configure(project_creation::config)
|
.configure(project_creation::config)
|
||||||
.configure(projects::config)
|
.configure(projects::config)
|
||||||
.configure(reports::config)
|
.configure(reports::config)
|
||||||
|
.configure(shared_instance_version_creation::config)
|
||||||
|
.configure(shared_instances::config)
|
||||||
.configure(statistics::config)
|
.configure(statistics::config)
|
||||||
.configure(tags::config)
|
.configure(tags::config)
|
||||||
.configure(teams::config)
|
.configure(teams::config)
|
||||||
|
|||||||
@ -1,6 +1,9 @@
|
|||||||
use std::{collections::HashSet, fmt::Display, sync::Arc};
|
use std::{collections::HashSet, fmt::Display, sync::Arc};
|
||||||
|
|
||||||
use super::ApiError;
|
use super::ApiError;
|
||||||
|
use crate::file_hosting::FileHostPublicity;
|
||||||
|
use crate::models::ids::OAuthClientId;
|
||||||
|
use crate::util::img::{delete_old_images, upload_image_optimized};
|
||||||
use crate::{
|
use crate::{
|
||||||
auth::{checks::ValidateAuthorized, get_user_from_headers},
|
auth::{checks::ValidateAuthorized, get_user_from_headers},
|
||||||
database::{
|
database::{
|
||||||
@ -23,7 +26,7 @@ use crate::{
|
|||||||
};
|
};
|
||||||
use crate::{
|
use crate::{
|
||||||
file_hosting::FileHost, models::oauth_clients::DeleteOAuthClientQueryParam,
|
file_hosting::FileHost, models::oauth_clients::DeleteOAuthClientQueryParam,
|
||||||
util::routes::read_from_payload,
|
util::routes::read_limited_from_payload,
|
||||||
};
|
};
|
||||||
use actix_web::{
|
use actix_web::{
|
||||||
HttpRequest, HttpResponse, delete, get, patch, post,
|
HttpRequest, HttpResponse, delete, get, patch, post,
|
||||||
@ -38,9 +41,6 @@ use serde::{Deserialize, Serialize};
|
|||||||
use sqlx::PgPool;
|
use sqlx::PgPool;
|
||||||
use validator::Validate;
|
use validator::Validate;
|
||||||
|
|
||||||
use crate::models::ids::OAuthClientId;
|
|
||||||
use crate::util::img::{delete_old_images, upload_image_optimized};
|
|
||||||
|
|
||||||
pub fn config(cfg: &mut web::ServiceConfig) {
|
pub fn config(cfg: &mut web::ServiceConfig) {
|
||||||
cfg.service(
|
cfg.service(
|
||||||
scope("oauth")
|
scope("oauth")
|
||||||
@ -381,11 +381,12 @@ pub async fn oauth_client_icon_edit(
|
|||||||
delete_old_images(
|
delete_old_images(
|
||||||
client.icon_url.clone(),
|
client.icon_url.clone(),
|
||||||
client.raw_icon_url.clone(),
|
client.raw_icon_url.clone(),
|
||||||
|
FileHostPublicity::Public,
|
||||||
&***file_host,
|
&***file_host,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let bytes = read_from_payload(
|
let bytes = read_limited_from_payload(
|
||||||
&mut payload,
|
&mut payload,
|
||||||
262144,
|
262144,
|
||||||
"Icons must be smaller than 256KiB",
|
"Icons must be smaller than 256KiB",
|
||||||
@ -393,6 +394,7 @@ pub async fn oauth_client_icon_edit(
|
|||||||
.await?;
|
.await?;
|
||||||
let upload_result = upload_image_optimized(
|
let upload_result = upload_image_optimized(
|
||||||
&format!("data/{client_id}"),
|
&format!("data/{client_id}"),
|
||||||
|
FileHostPublicity::Public,
|
||||||
bytes.freeze(),
|
bytes.freeze(),
|
||||||
&ext.ext,
|
&ext.ext,
|
||||||
Some(96),
|
Some(96),
|
||||||
@ -447,6 +449,7 @@ pub async fn oauth_client_icon_delete(
|
|||||||
delete_old_images(
|
delete_old_images(
|
||||||
client.icon_url.clone(),
|
client.icon_url.clone(),
|
||||||
client.raw_icon_url.clone(),
|
client.raw_icon_url.clone(),
|
||||||
|
FileHostPublicity::Public,
|
||||||
&***file_host,
|
&***file_host,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|||||||
@ -8,14 +8,14 @@ use crate::database::models::{
|
|||||||
DBOrganization, generate_organization_id, team_item,
|
DBOrganization, generate_organization_id, team_item,
|
||||||
};
|
};
|
||||||
use crate::database::redis::RedisPool;
|
use crate::database::redis::RedisPool;
|
||||||
use crate::file_hosting::FileHost;
|
use crate::file_hosting::{FileHost, FileHostPublicity};
|
||||||
use crate::models::ids::OrganizationId;
|
use crate::models::ids::OrganizationId;
|
||||||
use crate::models::pats::Scopes;
|
use crate::models::pats::Scopes;
|
||||||
use crate::models::teams::{OrganizationPermissions, ProjectPermissions};
|
use crate::models::teams::{OrganizationPermissions, ProjectPermissions};
|
||||||
use crate::queue::session::AuthQueue;
|
use crate::queue::session::AuthQueue;
|
||||||
use crate::routes::v3::project_creation::CreateError;
|
use crate::routes::v3::project_creation::CreateError;
|
||||||
use crate::util::img::delete_old_images;
|
use crate::util::img::delete_old_images;
|
||||||
use crate::util::routes::read_from_payload;
|
use crate::util::routes::read_limited_from_payload;
|
||||||
use crate::util::validate::validation_errors_to_string;
|
use crate::util::validate::validation_errors_to_string;
|
||||||
use crate::{database, models};
|
use crate::{database, models};
|
||||||
use actix_web::{HttpRequest, HttpResponse, web};
|
use actix_web::{HttpRequest, HttpResponse, web};
|
||||||
@ -1088,11 +1088,12 @@ pub async fn organization_icon_edit(
|
|||||||
delete_old_images(
|
delete_old_images(
|
||||||
organization_item.icon_url,
|
organization_item.icon_url,
|
||||||
organization_item.raw_icon_url,
|
organization_item.raw_icon_url,
|
||||||
|
FileHostPublicity::Public,
|
||||||
&***file_host,
|
&***file_host,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let bytes = read_from_payload(
|
let bytes = read_limited_from_payload(
|
||||||
&mut payload,
|
&mut payload,
|
||||||
262144,
|
262144,
|
||||||
"Icons must be smaller than 256KiB",
|
"Icons must be smaller than 256KiB",
|
||||||
@ -1102,6 +1103,7 @@ pub async fn organization_icon_edit(
|
|||||||
let organization_id: OrganizationId = organization_item.id.into();
|
let organization_id: OrganizationId = organization_item.id.into();
|
||||||
let upload_result = crate::util::img::upload_image_optimized(
|
let upload_result = crate::util::img::upload_image_optimized(
|
||||||
&format!("data/{organization_id}"),
|
&format!("data/{organization_id}"),
|
||||||
|
FileHostPublicity::Public,
|
||||||
bytes.freeze(),
|
bytes.freeze(),
|
||||||
&ext.ext,
|
&ext.ext,
|
||||||
Some(96),
|
Some(96),
|
||||||
@ -1191,6 +1193,7 @@ pub async fn delete_organization_icon(
|
|||||||
delete_old_images(
|
delete_old_images(
|
||||||
organization_item.icon_url,
|
organization_item.icon_url,
|
||||||
organization_item.raw_icon_url,
|
organization_item.raw_icon_url,
|
||||||
|
FileHostPublicity::Public,
|
||||||
&***file_host,
|
&***file_host,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|||||||
@ -6,7 +6,7 @@ use crate::database::models::loader_fields::{
|
|||||||
use crate::database::models::thread_item::ThreadBuilder;
|
use crate::database::models::thread_item::ThreadBuilder;
|
||||||
use crate::database::models::{self, DBUser, image_item};
|
use crate::database::models::{self, DBUser, image_item};
|
||||||
use crate::database::redis::RedisPool;
|
use crate::database::redis::RedisPool;
|
||||||
use crate::file_hosting::{FileHost, FileHostingError};
|
use crate::file_hosting::{FileHost, FileHostPublicity, FileHostingError};
|
||||||
use crate::models::error::ApiError;
|
use crate::models::error::ApiError;
|
||||||
use crate::models::ids::{ImageId, OrganizationId, ProjectId, VersionId};
|
use crate::models::ids::{ImageId, OrganizationId, ProjectId, VersionId};
|
||||||
use crate::models::images::{Image, ImageContext};
|
use crate::models::images::{Image, ImageContext};
|
||||||
@ -240,18 +240,16 @@ pub struct NewGalleryItem {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub struct UploadedFile {
|
pub struct UploadedFile {
|
||||||
pub file_id: String,
|
pub name: String,
|
||||||
pub file_name: String,
|
pub publicity: FileHostPublicity,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn undo_uploads(
|
pub async fn undo_uploads(
|
||||||
file_host: &dyn FileHost,
|
file_host: &dyn FileHost,
|
||||||
uploaded_files: &[UploadedFile],
|
uploaded_files: &[UploadedFile],
|
||||||
) -> Result<(), CreateError> {
|
) -> Result<(), FileHostingError> {
|
||||||
for file in uploaded_files {
|
for file in uploaded_files {
|
||||||
file_host
|
file_host.delete_file(&file.name, file.publicity).await?;
|
||||||
.delete_file_version(&file.file_id, &file.file_name)
|
|
||||||
.await?;
|
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -309,13 +307,13 @@ Get logged in user
|
|||||||
|
|
||||||
2. Upload
|
2. Upload
|
||||||
- Icon: check file format & size
|
- Icon: check file format & size
|
||||||
- Upload to backblaze & record URL
|
- Upload to S3 & record URL
|
||||||
- Project files
|
- Project files
|
||||||
- Check for matching version
|
- Check for matching version
|
||||||
- File size limits?
|
- File size limits?
|
||||||
- Check file type
|
- Check file type
|
||||||
- Eventually, malware scan
|
- Eventually, malware scan
|
||||||
- Upload to backblaze & create VersionFileBuilder
|
- Upload to S3 & create VersionFileBuilder
|
||||||
-
|
-
|
||||||
|
|
||||||
3. Creation
|
3. Creation
|
||||||
@ -334,7 +332,7 @@ async fn project_create_inner(
|
|||||||
redis: &RedisPool,
|
redis: &RedisPool,
|
||||||
session_queue: &AuthQueue,
|
session_queue: &AuthQueue,
|
||||||
) -> Result<HttpResponse, CreateError> {
|
) -> Result<HttpResponse, CreateError> {
|
||||||
// The base URL for files uploaded to backblaze
|
// The base URL for files uploaded to S3
|
||||||
let cdn_url = dotenvy::var("CDN_URL")?;
|
let cdn_url = dotenvy::var("CDN_URL")?;
|
||||||
|
|
||||||
// The currently logged in user
|
// The currently logged in user
|
||||||
@ -516,6 +514,7 @@ async fn project_create_inner(
|
|||||||
let url = format!("data/{project_id}/images");
|
let url = format!("data/{project_id}/images");
|
||||||
let upload_result = upload_image_optimized(
|
let upload_result = upload_image_optimized(
|
||||||
&url,
|
&url,
|
||||||
|
FileHostPublicity::Public,
|
||||||
data.freeze(),
|
data.freeze(),
|
||||||
file_extension,
|
file_extension,
|
||||||
Some(350),
|
Some(350),
|
||||||
@ -526,8 +525,8 @@ async fn project_create_inner(
|
|||||||
.map_err(|e| CreateError::InvalidIconFormat(e.to_string()))?;
|
.map_err(|e| CreateError::InvalidIconFormat(e.to_string()))?;
|
||||||
|
|
||||||
uploaded_files.push(UploadedFile {
|
uploaded_files.push(UploadedFile {
|
||||||
file_id: upload_result.raw_url_path.clone(),
|
name: upload_result.raw_url_path,
|
||||||
file_name: upload_result.raw_url_path,
|
publicity: FileHostPublicity::Public,
|
||||||
});
|
});
|
||||||
gallery_urls.push(crate::models::projects::GalleryItem {
|
gallery_urls.push(crate::models::projects::GalleryItem {
|
||||||
url: upload_result.url,
|
url: upload_result.url,
|
||||||
@ -1010,6 +1009,7 @@ async fn process_icon_upload(
|
|||||||
.await?;
|
.await?;
|
||||||
let upload_result = crate::util::img::upload_image_optimized(
|
let upload_result = crate::util::img::upload_image_optimized(
|
||||||
&format!("data/{}", to_base62(id)),
|
&format!("data/{}", to_base62(id)),
|
||||||
|
FileHostPublicity::Public,
|
||||||
data.freeze(),
|
data.freeze(),
|
||||||
file_extension,
|
file_extension,
|
||||||
Some(96),
|
Some(96),
|
||||||
@ -1020,13 +1020,13 @@ async fn process_icon_upload(
|
|||||||
.map_err(|e| CreateError::InvalidIconFormat(e.to_string()))?;
|
.map_err(|e| CreateError::InvalidIconFormat(e.to_string()))?;
|
||||||
|
|
||||||
uploaded_files.push(UploadedFile {
|
uploaded_files.push(UploadedFile {
|
||||||
file_id: upload_result.raw_url_path.clone(),
|
name: upload_result.raw_url_path,
|
||||||
file_name: upload_result.raw_url_path,
|
publicity: FileHostPublicity::Public,
|
||||||
});
|
});
|
||||||
|
|
||||||
uploaded_files.push(UploadedFile {
|
uploaded_files.push(UploadedFile {
|
||||||
file_id: upload_result.url_path.clone(),
|
name: upload_result.url_path,
|
||||||
file_name: upload_result.url_path,
|
publicity: FileHostPublicity::Public,
|
||||||
});
|
});
|
||||||
|
|
||||||
Ok((
|
Ok((
|
||||||
|
|||||||
@ -9,7 +9,7 @@ use crate::database::models::thread_item::ThreadMessageBuilder;
|
|||||||
use crate::database::models::{DBTeamMember, ids as db_ids, image_item};
|
use crate::database::models::{DBTeamMember, ids as db_ids, image_item};
|
||||||
use crate::database::redis::RedisPool;
|
use crate::database::redis::RedisPool;
|
||||||
use crate::database::{self, models as db_models};
|
use crate::database::{self, models as db_models};
|
||||||
use crate::file_hosting::FileHost;
|
use crate::file_hosting::{FileHost, FileHostPublicity};
|
||||||
use crate::models;
|
use crate::models;
|
||||||
use crate::models::ids::ProjectId;
|
use crate::models::ids::ProjectId;
|
||||||
use crate::models::images::ImageContext;
|
use crate::models::images::ImageContext;
|
||||||
@ -28,7 +28,7 @@ use crate::search::indexing::remove_documents;
|
|||||||
use crate::search::{SearchConfig, SearchError, search_for_project};
|
use crate::search::{SearchConfig, SearchError, search_for_project};
|
||||||
use crate::util::img;
|
use crate::util::img;
|
||||||
use crate::util::img::{delete_old_images, upload_image_optimized};
|
use crate::util::img::{delete_old_images, upload_image_optimized};
|
||||||
use crate::util::routes::read_from_payload;
|
use crate::util::routes::read_limited_from_payload;
|
||||||
use crate::util::validate::validation_errors_to_string;
|
use crate::util::validate::validation_errors_to_string;
|
||||||
use actix_web::{HttpRequest, HttpResponse, web};
|
use actix_web::{HttpRequest, HttpResponse, web};
|
||||||
use ariadne::ids::base62_impl::parse_base62;
|
use ariadne::ids::base62_impl::parse_base62;
|
||||||
@ -1487,11 +1487,12 @@ pub async fn project_icon_edit(
|
|||||||
delete_old_images(
|
delete_old_images(
|
||||||
project_item.inner.icon_url,
|
project_item.inner.icon_url,
|
||||||
project_item.inner.raw_icon_url,
|
project_item.inner.raw_icon_url,
|
||||||
|
FileHostPublicity::Public,
|
||||||
&***file_host,
|
&***file_host,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let bytes = read_from_payload(
|
let bytes = read_limited_from_payload(
|
||||||
&mut payload,
|
&mut payload,
|
||||||
262144,
|
262144,
|
||||||
"Icons must be smaller than 256KiB",
|
"Icons must be smaller than 256KiB",
|
||||||
@ -1501,6 +1502,7 @@ pub async fn project_icon_edit(
|
|||||||
let project_id: ProjectId = project_item.inner.id.into();
|
let project_id: ProjectId = project_item.inner.id.into();
|
||||||
let upload_result = upload_image_optimized(
|
let upload_result = upload_image_optimized(
|
||||||
&format!("data/{project_id}"),
|
&format!("data/{project_id}"),
|
||||||
|
FileHostPublicity::Public,
|
||||||
bytes.freeze(),
|
bytes.freeze(),
|
||||||
&ext.ext,
|
&ext.ext,
|
||||||
Some(96),
|
Some(96),
|
||||||
@ -1597,6 +1599,7 @@ pub async fn delete_project_icon(
|
|||||||
delete_old_images(
|
delete_old_images(
|
||||||
project_item.inner.icon_url,
|
project_item.inner.icon_url,
|
||||||
project_item.inner.raw_icon_url,
|
project_item.inner.raw_icon_url,
|
||||||
|
FileHostPublicity::Public,
|
||||||
&***file_host,
|
&***file_host,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
@ -1709,7 +1712,7 @@ pub async fn add_gallery_item(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let bytes = read_from_payload(
|
let bytes = read_limited_from_payload(
|
||||||
&mut payload,
|
&mut payload,
|
||||||
2 * (1 << 20),
|
2 * (1 << 20),
|
||||||
"Gallery image exceeds the maximum of 2MiB.",
|
"Gallery image exceeds the maximum of 2MiB.",
|
||||||
@ -1719,6 +1722,7 @@ pub async fn add_gallery_item(
|
|||||||
let id: ProjectId = project_item.inner.id.into();
|
let id: ProjectId = project_item.inner.id.into();
|
||||||
let upload_result = upload_image_optimized(
|
let upload_result = upload_image_optimized(
|
||||||
&format!("data/{id}/images"),
|
&format!("data/{id}/images"),
|
||||||
|
FileHostPublicity::Public,
|
||||||
bytes.freeze(),
|
bytes.freeze(),
|
||||||
&ext.ext,
|
&ext.ext,
|
||||||
Some(350),
|
Some(350),
|
||||||
@ -2049,6 +2053,7 @@ pub async fn delete_gallery_item(
|
|||||||
delete_old_images(
|
delete_old_images(
|
||||||
Some(item.image_url),
|
Some(item.image_url),
|
||||||
Some(item.raw_image_url),
|
Some(item.raw_image_url),
|
||||||
|
FileHostPublicity::Public,
|
||||||
&***file_host,
|
&***file_host,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|||||||
@ -14,11 +14,11 @@ use crate::models::threads::{MessageBody, ThreadType};
|
|||||||
use crate::queue::session::AuthQueue;
|
use crate::queue::session::AuthQueue;
|
||||||
use crate::routes::ApiError;
|
use crate::routes::ApiError;
|
||||||
use crate::util::img;
|
use crate::util::img;
|
||||||
|
use crate::util::routes::read_typed_from_payload;
|
||||||
use actix_web::{HttpRequest, HttpResponse, web};
|
use actix_web::{HttpRequest, HttpResponse, web};
|
||||||
use ariadne::ids::UserId;
|
use ariadne::ids::UserId;
|
||||||
use ariadne::ids::base62_impl::parse_base62;
|
use ariadne::ids::base62_impl::parse_base62;
|
||||||
use chrono::Utc;
|
use chrono::Utc;
|
||||||
use futures::StreamExt;
|
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
use sqlx::PgPool;
|
use sqlx::PgPool;
|
||||||
use validator::Validate;
|
use validator::Validate;
|
||||||
@ -63,15 +63,7 @@ pub async fn report_create(
|
|||||||
.await?
|
.await?
|
||||||
.1;
|
.1;
|
||||||
|
|
||||||
let mut bytes = web::BytesMut::new();
|
let new_report: CreateReport = read_typed_from_payload(&mut body).await?;
|
||||||
while let Some(item) = body.next().await {
|
|
||||||
bytes.extend_from_slice(&item.map_err(|_| {
|
|
||||||
ApiError::InvalidInput(
|
|
||||||
"Error while parsing request payload!".to_string(),
|
|
||||||
)
|
|
||||||
})?);
|
|
||||||
}
|
|
||||||
let new_report: CreateReport = serde_json::from_slice(bytes.as_ref())?;
|
|
||||||
|
|
||||||
let id =
|
let id =
|
||||||
crate::database::models::generate_report_id(&mut transaction).await?;
|
crate::database::models::generate_report_id(&mut transaction).await?;
|
||||||
|
|||||||
200
apps/labrinth/src/routes/v3/shared_instance_version_creation.rs
Normal file
200
apps/labrinth/src/routes/v3/shared_instance_version_creation.rs
Normal file
@ -0,0 +1,200 @@
|
|||||||
|
use crate::auth::get_user_from_headers;
|
||||||
|
use crate::database::models::shared_instance_item::{
|
||||||
|
DBSharedInstance, DBSharedInstanceUser, DBSharedInstanceVersion,
|
||||||
|
};
|
||||||
|
use crate::database::models::{
|
||||||
|
DBSharedInstanceId, DBSharedInstanceVersionId,
|
||||||
|
generate_shared_instance_version_id,
|
||||||
|
};
|
||||||
|
use crate::database::redis::RedisPool;
|
||||||
|
use crate::file_hosting::{FileHost, FileHostPublicity};
|
||||||
|
use crate::models::ids::{SharedInstanceId, SharedInstanceVersionId};
|
||||||
|
use crate::models::pats::Scopes;
|
||||||
|
use crate::models::shared_instances::{
|
||||||
|
SharedInstanceUserPermissions, SharedInstanceVersion,
|
||||||
|
};
|
||||||
|
use crate::queue::session::AuthQueue;
|
||||||
|
use crate::routes::ApiError;
|
||||||
|
use crate::routes::v3::project_creation::UploadedFile;
|
||||||
|
use crate::util::ext::MRPACK_MIME_TYPE;
|
||||||
|
use actix_web::http::header::ContentLength;
|
||||||
|
use actix_web::web::Data;
|
||||||
|
use actix_web::{HttpRequest, HttpResponse, web};
|
||||||
|
use bytes::BytesMut;
|
||||||
|
use chrono::Utc;
|
||||||
|
use futures_util::StreamExt;
|
||||||
|
use hex::FromHex;
|
||||||
|
use sqlx::{PgPool, Postgres, Transaction};
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
const MAX_FILE_SIZE: usize = 500 * 1024 * 1024;
|
||||||
|
const MAX_FILE_SIZE_TEXT: &str = "500 MB";
|
||||||
|
|
||||||
|
pub fn config(cfg: &mut web::ServiceConfig) {
|
||||||
|
cfg.route(
|
||||||
|
"shared-instance/{id}/version",
|
||||||
|
web::post().to(shared_instance_version_create),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
|
pub async fn shared_instance_version_create(
|
||||||
|
req: HttpRequest,
|
||||||
|
pool: Data<PgPool>,
|
||||||
|
payload: web::Payload,
|
||||||
|
web::Header(ContentLength(content_length)): web::Header<ContentLength>,
|
||||||
|
redis: Data<RedisPool>,
|
||||||
|
file_host: Data<Arc<dyn FileHost + Send + Sync>>,
|
||||||
|
info: web::Path<(SharedInstanceId,)>,
|
||||||
|
session_queue: Data<AuthQueue>,
|
||||||
|
) -> Result<HttpResponse, ApiError> {
|
||||||
|
if content_length > MAX_FILE_SIZE {
|
||||||
|
return Err(ApiError::InvalidInput(format!(
|
||||||
|
"File size exceeds the maximum limit of {MAX_FILE_SIZE_TEXT}"
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut transaction = pool.begin().await?;
|
||||||
|
let mut uploaded_files = vec![];
|
||||||
|
|
||||||
|
let result = shared_instance_version_create_inner(
|
||||||
|
req,
|
||||||
|
&pool,
|
||||||
|
payload,
|
||||||
|
content_length,
|
||||||
|
&redis,
|
||||||
|
&***file_host,
|
||||||
|
info.into_inner().0.into(),
|
||||||
|
&session_queue,
|
||||||
|
&mut transaction,
|
||||||
|
&mut uploaded_files,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
if result.is_err() {
|
||||||
|
let undo_result = super::project_creation::undo_uploads(
|
||||||
|
&***file_host,
|
||||||
|
&uploaded_files,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
let rollback_result = transaction.rollback().await;
|
||||||
|
|
||||||
|
undo_result?;
|
||||||
|
if let Err(e) = rollback_result {
|
||||||
|
return Err(e.into());
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
transaction.commit().await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
result
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
|
async fn shared_instance_version_create_inner(
|
||||||
|
req: HttpRequest,
|
||||||
|
pool: &PgPool,
|
||||||
|
mut payload: web::Payload,
|
||||||
|
content_length: usize,
|
||||||
|
redis: &RedisPool,
|
||||||
|
file_host: &dyn FileHost,
|
||||||
|
instance_id: DBSharedInstanceId,
|
||||||
|
session_queue: &AuthQueue,
|
||||||
|
transaction: &mut Transaction<'_, Postgres>,
|
||||||
|
uploaded_files: &mut Vec<UploadedFile>,
|
||||||
|
) -> Result<HttpResponse, ApiError> {
|
||||||
|
let user = get_user_from_headers(
|
||||||
|
&req,
|
||||||
|
pool,
|
||||||
|
redis,
|
||||||
|
session_queue,
|
||||||
|
Scopes::SHARED_INSTANCE_VERSION_CREATE,
|
||||||
|
)
|
||||||
|
.await?
|
||||||
|
.1;
|
||||||
|
|
||||||
|
let Some(instance) = DBSharedInstance::get(instance_id, pool).await? else {
|
||||||
|
return Err(ApiError::NotFound);
|
||||||
|
};
|
||||||
|
if !user.role.is_mod() && instance.owner_id != user.id.into() {
|
||||||
|
let permissions = DBSharedInstanceUser::get_user_permissions(
|
||||||
|
instance_id,
|
||||||
|
user.id.into(),
|
||||||
|
pool,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
if let Some(permissions) = permissions {
|
||||||
|
if !permissions
|
||||||
|
.contains(SharedInstanceUserPermissions::UPLOAD_VERSION)
|
||||||
|
{
|
||||||
|
return Err(ApiError::CustomAuthentication(
|
||||||
|
"You do not have permission to upload a version for this shared instance.".to_string()
|
||||||
|
));
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return Err(ApiError::NotFound);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let version_id =
|
||||||
|
generate_shared_instance_version_id(&mut *transaction).await?;
|
||||||
|
|
||||||
|
let mut file_data = BytesMut::new();
|
||||||
|
while let Some(chunk) = payload.next().await {
|
||||||
|
let chunk = chunk.map_err(|_| {
|
||||||
|
ApiError::InvalidInput(
|
||||||
|
"Unable to parse bytes in payload sent!".to_string(),
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
if file_data.len() + chunk.len() <= MAX_FILE_SIZE {
|
||||||
|
file_data.extend_from_slice(&chunk);
|
||||||
|
} else {
|
||||||
|
file_data
|
||||||
|
.extend_from_slice(&chunk[..MAX_FILE_SIZE - file_data.len()]);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let file_data = file_data.freeze();
|
||||||
|
let file_path = format!(
|
||||||
|
"shared_instance/{}.mrpack",
|
||||||
|
SharedInstanceVersionId::from(version_id),
|
||||||
|
);
|
||||||
|
|
||||||
|
let upload_data = file_host
|
||||||
|
.upload_file(
|
||||||
|
MRPACK_MIME_TYPE,
|
||||||
|
&file_path,
|
||||||
|
FileHostPublicity::Private,
|
||||||
|
file_data,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
uploaded_files.push(UploadedFile {
|
||||||
|
name: file_path,
|
||||||
|
publicity: upload_data.file_publicity,
|
||||||
|
});
|
||||||
|
|
||||||
|
let sha512 = Vec::<u8>::from_hex(upload_data.content_sha512).unwrap();
|
||||||
|
|
||||||
|
let new_version = DBSharedInstanceVersion {
|
||||||
|
id: version_id,
|
||||||
|
shared_instance_id: instance_id,
|
||||||
|
size: content_length as u64,
|
||||||
|
sha512,
|
||||||
|
created: Utc::now(),
|
||||||
|
};
|
||||||
|
new_version.insert(transaction).await?;
|
||||||
|
|
||||||
|
sqlx::query!(
|
||||||
|
"UPDATE shared_instances SET current_version_id = $1 WHERE id = $2",
|
||||||
|
new_version.id as DBSharedInstanceVersionId,
|
||||||
|
instance_id as DBSharedInstanceId,
|
||||||
|
)
|
||||||
|
.execute(&mut **transaction)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let version: SharedInstanceVersion = new_version.into();
|
||||||
|
Ok(HttpResponse::Created().json(version))
|
||||||
|
}
|
||||||
612
apps/labrinth/src/routes/v3/shared_instances.rs
Normal file
612
apps/labrinth/src/routes/v3/shared_instances.rs
Normal file
@ -0,0 +1,612 @@
|
|||||||
|
use crate::auth::get_user_from_headers;
|
||||||
|
use crate::auth::validate::get_maybe_user_from_headers;
|
||||||
|
use crate::database::models::shared_instance_item::{
|
||||||
|
DBSharedInstance, DBSharedInstanceUser, DBSharedInstanceVersion,
|
||||||
|
};
|
||||||
|
use crate::database::models::{
|
||||||
|
DBSharedInstanceId, DBSharedInstanceVersionId, generate_shared_instance_id,
|
||||||
|
};
|
||||||
|
use crate::database::redis::RedisPool;
|
||||||
|
use crate::file_hosting::FileHost;
|
||||||
|
use crate::models::ids::{SharedInstanceId, SharedInstanceVersionId};
|
||||||
|
use crate::models::pats::Scopes;
|
||||||
|
use crate::models::shared_instances::{
|
||||||
|
SharedInstance, SharedInstanceUserPermissions, SharedInstanceVersion,
|
||||||
|
};
|
||||||
|
use crate::models::users::User;
|
||||||
|
use crate::queue::session::AuthQueue;
|
||||||
|
use crate::routes::ApiError;
|
||||||
|
use crate::util::routes::read_typed_from_payload;
|
||||||
|
use actix_web::web::{Data, Redirect};
|
||||||
|
use actix_web::{HttpRequest, HttpResponse, web};
|
||||||
|
use futures_util::future::try_join_all;
|
||||||
|
use serde::Deserialize;
|
||||||
|
use sqlx::PgPool;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use validator::Validate;
|
||||||
|
|
||||||
|
pub fn config(cfg: &mut web::ServiceConfig) {
|
||||||
|
cfg.route("shared-instance", web::post().to(shared_instance_create));
|
||||||
|
cfg.route("shared-instance", web::get().to(shared_instance_list));
|
||||||
|
cfg.service(
|
||||||
|
web::scope("shared-instance")
|
||||||
|
.route("{id}", web::get().to(shared_instance_get))
|
||||||
|
.route("{id}", web::patch().to(shared_instance_edit))
|
||||||
|
.route("{id}", web::delete().to(shared_instance_delete))
|
||||||
|
.route("{id}/version", web::get().to(shared_instance_version_list)),
|
||||||
|
);
|
||||||
|
cfg.service(
|
||||||
|
web::scope("shared-instance-version")
|
||||||
|
.route("{id}", web::get().to(shared_instance_version_get))
|
||||||
|
.route("{id}", web::delete().to(shared_instance_version_delete))
|
||||||
|
.route(
|
||||||
|
"{id}/download",
|
||||||
|
web::get().to(shared_instance_version_download),
|
||||||
|
),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Validate)]
|
||||||
|
pub struct CreateSharedInstance {
|
||||||
|
#[validate(
|
||||||
|
length(min = 3, max = 64),
|
||||||
|
custom(function = "crate::util::validate::validate_name")
|
||||||
|
)]
|
||||||
|
pub title: String,
|
||||||
|
#[serde(default)]
|
||||||
|
pub public: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn shared_instance_create(
|
||||||
|
req: HttpRequest,
|
||||||
|
pool: Data<PgPool>,
|
||||||
|
mut body: web::Payload,
|
||||||
|
redis: Data<RedisPool>,
|
||||||
|
session_queue: Data<AuthQueue>,
|
||||||
|
) -> Result<HttpResponse, ApiError> {
|
||||||
|
let new_instance: CreateSharedInstance =
|
||||||
|
read_typed_from_payload(&mut body).await?;
|
||||||
|
|
||||||
|
let mut transaction = pool.begin().await?;
|
||||||
|
|
||||||
|
let user = get_user_from_headers(
|
||||||
|
&req,
|
||||||
|
&**pool,
|
||||||
|
&redis,
|
||||||
|
&session_queue,
|
||||||
|
Scopes::SHARED_INSTANCE_CREATE,
|
||||||
|
)
|
||||||
|
.await?
|
||||||
|
.1;
|
||||||
|
|
||||||
|
let id = generate_shared_instance_id(&mut transaction).await?;
|
||||||
|
|
||||||
|
let instance = DBSharedInstance {
|
||||||
|
id,
|
||||||
|
title: new_instance.title,
|
||||||
|
owner_id: user.id.into(),
|
||||||
|
public: new_instance.public,
|
||||||
|
current_version_id: None,
|
||||||
|
};
|
||||||
|
instance.insert(&mut transaction).await?;
|
||||||
|
|
||||||
|
transaction.commit().await?;
|
||||||
|
|
||||||
|
Ok(HttpResponse::Created().json(SharedInstance {
|
||||||
|
id: id.into(),
|
||||||
|
title: instance.title,
|
||||||
|
owner: user.id,
|
||||||
|
public: instance.public,
|
||||||
|
current_version: None,
|
||||||
|
additional_users: Some(vec![]),
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn shared_instance_list(
|
||||||
|
req: HttpRequest,
|
||||||
|
pool: Data<PgPool>,
|
||||||
|
redis: Data<RedisPool>,
|
||||||
|
session_queue: Data<AuthQueue>,
|
||||||
|
) -> Result<HttpResponse, ApiError> {
|
||||||
|
let user = get_user_from_headers(
|
||||||
|
&req,
|
||||||
|
&**pool,
|
||||||
|
&redis,
|
||||||
|
&session_queue,
|
||||||
|
Scopes::SHARED_INSTANCE_READ,
|
||||||
|
)
|
||||||
|
.await?
|
||||||
|
.1;
|
||||||
|
|
||||||
|
// TODO: Something for moderators to be able to see all instances?
|
||||||
|
let instances =
|
||||||
|
DBSharedInstance::list_for_user(user.id.into(), &**pool).await?;
|
||||||
|
let instances = try_join_all(instances.into_iter().map(
|
||||||
|
async |instance| -> Result<SharedInstance, ApiError> {
|
||||||
|
let version = if let Some(version_id) = instance.current_version_id
|
||||||
|
{
|
||||||
|
DBSharedInstanceVersion::get(version_id, &**pool).await?
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
let instance_id = instance.id;
|
||||||
|
Ok(SharedInstance::from_db(
|
||||||
|
instance,
|
||||||
|
Some(
|
||||||
|
DBSharedInstanceUser::get_from_instance(
|
||||||
|
instance_id,
|
||||||
|
&**pool,
|
||||||
|
&redis,
|
||||||
|
)
|
||||||
|
.await?,
|
||||||
|
),
|
||||||
|
version,
|
||||||
|
))
|
||||||
|
},
|
||||||
|
))
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(HttpResponse::Ok().json(instances))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn shared_instance_get(
|
||||||
|
req: HttpRequest,
|
||||||
|
pool: Data<PgPool>,
|
||||||
|
redis: Data<RedisPool>,
|
||||||
|
info: web::Path<(SharedInstanceId,)>,
|
||||||
|
session_queue: Data<AuthQueue>,
|
||||||
|
) -> Result<HttpResponse, ApiError> {
|
||||||
|
let id = info.into_inner().0.into();
|
||||||
|
|
||||||
|
let user = get_maybe_user_from_headers(
|
||||||
|
&req,
|
||||||
|
&**pool,
|
||||||
|
&redis,
|
||||||
|
&session_queue,
|
||||||
|
Scopes::SHARED_INSTANCE_READ,
|
||||||
|
)
|
||||||
|
.await?
|
||||||
|
.map(|(_, user)| user);
|
||||||
|
|
||||||
|
let shared_instance = DBSharedInstance::get(id, &**pool).await?;
|
||||||
|
|
||||||
|
if let Some(shared_instance) = shared_instance {
|
||||||
|
let users =
|
||||||
|
DBSharedInstanceUser::get_from_instance(id, &**pool, &redis)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let privately_accessible = user.is_some_and(|user| {
|
||||||
|
can_access_instance_privately(&shared_instance, &users, &user)
|
||||||
|
});
|
||||||
|
if !shared_instance.public && !privately_accessible {
|
||||||
|
return Err(ApiError::NotFound);
|
||||||
|
}
|
||||||
|
|
||||||
|
let current_version =
|
||||||
|
if let Some(version_id) = shared_instance.current_version_id {
|
||||||
|
DBSharedInstanceVersion::get(version_id, &**pool).await?
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
let shared_instance = SharedInstance::from_db(
|
||||||
|
shared_instance,
|
||||||
|
privately_accessible.then_some(users),
|
||||||
|
current_version,
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(HttpResponse::Ok().json(shared_instance))
|
||||||
|
} else {
|
||||||
|
Err(ApiError::NotFound)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn can_access_instance_privately(
|
||||||
|
instance: &DBSharedInstance,
|
||||||
|
users: &[DBSharedInstanceUser],
|
||||||
|
user: &User,
|
||||||
|
) -> bool {
|
||||||
|
user.role.is_mod()
|
||||||
|
|| instance.owner_id == user.id.into()
|
||||||
|
|| users.iter().any(|x| x.user_id == user.id.into())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Validate)]
|
||||||
|
pub struct EditSharedInstance {
|
||||||
|
#[validate(
|
||||||
|
length(min = 3, max = 64),
|
||||||
|
custom(function = "crate::util::validate::validate_name")
|
||||||
|
)]
|
||||||
|
pub title: Option<String>,
|
||||||
|
pub public: Option<bool>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn shared_instance_edit(
|
||||||
|
req: HttpRequest,
|
||||||
|
pool: Data<PgPool>,
|
||||||
|
mut body: web::Payload,
|
||||||
|
redis: Data<RedisPool>,
|
||||||
|
info: web::Path<(SharedInstanceId,)>,
|
||||||
|
session_queue: Data<AuthQueue>,
|
||||||
|
) -> Result<HttpResponse, ApiError> {
|
||||||
|
let id = info.into_inner().0.into();
|
||||||
|
let edit_instance: EditSharedInstance =
|
||||||
|
read_typed_from_payload(&mut body).await?;
|
||||||
|
|
||||||
|
let mut transaction = pool.begin().await?;
|
||||||
|
|
||||||
|
let user = get_user_from_headers(
|
||||||
|
&req,
|
||||||
|
&**pool,
|
||||||
|
&redis,
|
||||||
|
&session_queue,
|
||||||
|
Scopes::SHARED_INSTANCE_WRITE,
|
||||||
|
)
|
||||||
|
.await?
|
||||||
|
.1;
|
||||||
|
|
||||||
|
let Some(instance) = DBSharedInstance::get(id, &**pool).await? else {
|
||||||
|
return Err(ApiError::NotFound);
|
||||||
|
};
|
||||||
|
|
||||||
|
if !user.role.is_mod() && instance.owner_id != user.id.into() {
|
||||||
|
let permissions = DBSharedInstanceUser::get_user_permissions(
|
||||||
|
id,
|
||||||
|
user.id.into(),
|
||||||
|
&**pool,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
if let Some(permissions) = permissions {
|
||||||
|
if !permissions.contains(SharedInstanceUserPermissions::EDIT) {
|
||||||
|
return Err(ApiError::CustomAuthentication(
|
||||||
|
"You do not have permission to edit this shared instance."
|
||||||
|
.to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return Err(ApiError::NotFound);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(title) = edit_instance.title {
|
||||||
|
sqlx::query!(
|
||||||
|
"
|
||||||
|
UPDATE shared_instances
|
||||||
|
SET title = $1
|
||||||
|
WHERE id = $2
|
||||||
|
",
|
||||||
|
title,
|
||||||
|
id as DBSharedInstanceId,
|
||||||
|
)
|
||||||
|
.execute(&mut *transaction)
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(public) = edit_instance.public {
|
||||||
|
sqlx::query!(
|
||||||
|
"
|
||||||
|
UPDATE shared_instances
|
||||||
|
SET public = $1
|
||||||
|
WHERE id = $2
|
||||||
|
",
|
||||||
|
public,
|
||||||
|
id as DBSharedInstanceId,
|
||||||
|
)
|
||||||
|
.execute(&mut *transaction)
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
transaction.commit().await?;
|
||||||
|
|
||||||
|
Ok(HttpResponse::NoContent().body(""))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn shared_instance_delete(
|
||||||
|
req: HttpRequest,
|
||||||
|
pool: Data<PgPool>,
|
||||||
|
redis: Data<RedisPool>,
|
||||||
|
info: web::Path<(SharedInstanceId,)>,
|
||||||
|
session_queue: Data<AuthQueue>,
|
||||||
|
) -> Result<HttpResponse, ApiError> {
|
||||||
|
let id: DBSharedInstanceId = info.into_inner().0.into();
|
||||||
|
|
||||||
|
let user = get_user_from_headers(
|
||||||
|
&req,
|
||||||
|
&**pool,
|
||||||
|
&redis,
|
||||||
|
&session_queue,
|
||||||
|
Scopes::SHARED_INSTANCE_DELETE,
|
||||||
|
)
|
||||||
|
.await?
|
||||||
|
.1;
|
||||||
|
|
||||||
|
let Some(instance) = DBSharedInstance::get(id, &**pool).await? else {
|
||||||
|
return Err(ApiError::NotFound);
|
||||||
|
};
|
||||||
|
|
||||||
|
if !user.role.is_mod() && instance.owner_id != user.id.into() {
|
||||||
|
let permissions = DBSharedInstanceUser::get_user_permissions(
|
||||||
|
id,
|
||||||
|
user.id.into(),
|
||||||
|
&**pool,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
if let Some(permissions) = permissions {
|
||||||
|
if !permissions.contains(SharedInstanceUserPermissions::DELETE) {
|
||||||
|
return Err(ApiError::CustomAuthentication(
|
||||||
|
"You do not have permission to delete this shared instance.".to_string()
|
||||||
|
));
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return Err(ApiError::NotFound);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sqlx::query!(
|
||||||
|
"
|
||||||
|
DELETE FROM shared_instances
|
||||||
|
WHERE id = $1
|
||||||
|
",
|
||||||
|
id as DBSharedInstanceId,
|
||||||
|
)
|
||||||
|
.execute(&**pool)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
DBSharedInstanceUser::clear_cache(id, &redis).await?;
|
||||||
|
|
||||||
|
Ok(HttpResponse::NoContent().body(""))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn shared_instance_version_list(
|
||||||
|
req: HttpRequest,
|
||||||
|
pool: Data<PgPool>,
|
||||||
|
redis: Data<RedisPool>,
|
||||||
|
info: web::Path<(SharedInstanceId,)>,
|
||||||
|
session_queue: Data<AuthQueue>,
|
||||||
|
) -> Result<HttpResponse, ApiError> {
|
||||||
|
let id = info.into_inner().0.into();
|
||||||
|
|
||||||
|
let user = get_maybe_user_from_headers(
|
||||||
|
&req,
|
||||||
|
&**pool,
|
||||||
|
&redis,
|
||||||
|
&session_queue,
|
||||||
|
Scopes::SHARED_INSTANCE_READ,
|
||||||
|
)
|
||||||
|
.await?
|
||||||
|
.map(|(_, user)| user);
|
||||||
|
|
||||||
|
let shared_instance = DBSharedInstance::get(id, &**pool).await?;
|
||||||
|
|
||||||
|
if let Some(shared_instance) = shared_instance {
|
||||||
|
if !can_access_instance_as_maybe_user(
|
||||||
|
&pool,
|
||||||
|
&redis,
|
||||||
|
&shared_instance,
|
||||||
|
user,
|
||||||
|
)
|
||||||
|
.await?
|
||||||
|
{
|
||||||
|
return Err(ApiError::NotFound);
|
||||||
|
}
|
||||||
|
|
||||||
|
let versions =
|
||||||
|
DBSharedInstanceVersion::get_for_instance(id, &**pool).await?;
|
||||||
|
let versions = versions
|
||||||
|
.into_iter()
|
||||||
|
.map(Into::into)
|
||||||
|
.collect::<Vec<SharedInstanceVersion>>();
|
||||||
|
|
||||||
|
Ok(HttpResponse::Ok().json(versions))
|
||||||
|
} else {
|
||||||
|
Err(ApiError::NotFound)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn shared_instance_version_get(
|
||||||
|
req: HttpRequest,
|
||||||
|
pool: Data<PgPool>,
|
||||||
|
redis: Data<RedisPool>,
|
||||||
|
info: web::Path<(SharedInstanceVersionId,)>,
|
||||||
|
session_queue: Data<AuthQueue>,
|
||||||
|
) -> Result<HttpResponse, ApiError> {
|
||||||
|
let version_id = info.into_inner().0.into();
|
||||||
|
|
||||||
|
let user = get_maybe_user_from_headers(
|
||||||
|
&req,
|
||||||
|
&**pool,
|
||||||
|
&redis,
|
||||||
|
&session_queue,
|
||||||
|
Scopes::SHARED_INSTANCE_READ,
|
||||||
|
)
|
||||||
|
.await?
|
||||||
|
.map(|(_, user)| user);
|
||||||
|
|
||||||
|
let version = DBSharedInstanceVersion::get(version_id, &**pool).await?;
|
||||||
|
|
||||||
|
if let Some(version) = version {
|
||||||
|
let instance =
|
||||||
|
DBSharedInstance::get(version.shared_instance_id, &**pool).await?;
|
||||||
|
if let Some(instance) = instance {
|
||||||
|
if !can_access_instance_as_maybe_user(
|
||||||
|
&pool, &redis, &instance, user,
|
||||||
|
)
|
||||||
|
.await?
|
||||||
|
{
|
||||||
|
return Err(ApiError::NotFound);
|
||||||
|
}
|
||||||
|
|
||||||
|
let version: SharedInstanceVersion = version.into();
|
||||||
|
Ok(HttpResponse::Ok().json(version))
|
||||||
|
} else {
|
||||||
|
Err(ApiError::NotFound)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
Err(ApiError::NotFound)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn can_access_instance_as_maybe_user(
|
||||||
|
pool: &PgPool,
|
||||||
|
redis: &RedisPool,
|
||||||
|
instance: &DBSharedInstance,
|
||||||
|
user: Option<User>,
|
||||||
|
) -> Result<bool, ApiError> {
|
||||||
|
if instance.public {
|
||||||
|
return Ok(true);
|
||||||
|
}
|
||||||
|
let users =
|
||||||
|
DBSharedInstanceUser::get_from_instance(instance.id, pool, redis)
|
||||||
|
.await?;
|
||||||
|
Ok(user.is_some_and(|user| {
|
||||||
|
can_access_instance_privately(instance, &users, &user)
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn shared_instance_version_delete(
|
||||||
|
req: HttpRequest,
|
||||||
|
pool: Data<PgPool>,
|
||||||
|
redis: Data<RedisPool>,
|
||||||
|
info: web::Path<(SharedInstanceVersionId,)>,
|
||||||
|
session_queue: Data<AuthQueue>,
|
||||||
|
) -> Result<HttpResponse, ApiError> {
|
||||||
|
let version_id = info.into_inner().0.into();
|
||||||
|
|
||||||
|
let user = get_user_from_headers(
|
||||||
|
&req,
|
||||||
|
&**pool,
|
||||||
|
&redis,
|
||||||
|
&session_queue,
|
||||||
|
Scopes::SHARED_INSTANCE_VERSION_DELETE,
|
||||||
|
)
|
||||||
|
.await?
|
||||||
|
.1;
|
||||||
|
|
||||||
|
let shared_instance_version =
|
||||||
|
DBSharedInstanceVersion::get(version_id, &**pool).await?;
|
||||||
|
|
||||||
|
if let Some(shared_instance_version) = shared_instance_version {
|
||||||
|
let shared_instance = DBSharedInstance::get(
|
||||||
|
shared_instance_version.shared_instance_id,
|
||||||
|
&**pool,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
if let Some(shared_instance) = shared_instance {
|
||||||
|
if !user.role.is_mod() && shared_instance.owner_id != user.id.into()
|
||||||
|
{
|
||||||
|
let permissions = DBSharedInstanceUser::get_user_permissions(
|
||||||
|
shared_instance.id,
|
||||||
|
user.id.into(),
|
||||||
|
&**pool,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
if let Some(permissions) = permissions {
|
||||||
|
if !permissions
|
||||||
|
.contains(SharedInstanceUserPermissions::DELETE)
|
||||||
|
{
|
||||||
|
return Err(ApiError::CustomAuthentication(
|
||||||
|
"You do not have permission to delete this shared instance version.".to_string()
|
||||||
|
));
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return Err(ApiError::NotFound);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
delete_instance_version(shared_instance.id, version_id, &pool)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(HttpResponse::NoContent().body(""))
|
||||||
|
} else {
|
||||||
|
Err(ApiError::NotFound)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
Err(ApiError::NotFound)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn delete_instance_version(
|
||||||
|
instance_id: DBSharedInstanceId,
|
||||||
|
version_id: DBSharedInstanceVersionId,
|
||||||
|
pool: &PgPool,
|
||||||
|
) -> Result<(), ApiError> {
|
||||||
|
let mut transaction = pool.begin().await?;
|
||||||
|
|
||||||
|
sqlx::query!(
|
||||||
|
"
|
||||||
|
DELETE FROM shared_instance_versions
|
||||||
|
WHERE id = $1
|
||||||
|
",
|
||||||
|
version_id as DBSharedInstanceVersionId,
|
||||||
|
)
|
||||||
|
.execute(&mut *transaction)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
sqlx::query!(
|
||||||
|
"
|
||||||
|
UPDATE shared_instances
|
||||||
|
SET current_version_id = (
|
||||||
|
SELECT id FROM shared_instance_versions
|
||||||
|
WHERE shared_instance_id = $1
|
||||||
|
ORDER BY created DESC
|
||||||
|
LIMIT 1
|
||||||
|
)
|
||||||
|
WHERE id = $1
|
||||||
|
",
|
||||||
|
instance_id as DBSharedInstanceId,
|
||||||
|
)
|
||||||
|
.execute(&mut *transaction)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
transaction.commit().await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn shared_instance_version_download(
|
||||||
|
req: HttpRequest,
|
||||||
|
pool: Data<PgPool>,
|
||||||
|
redis: Data<RedisPool>,
|
||||||
|
file_host: Data<Arc<dyn FileHost + Send + Sync>>,
|
||||||
|
info: web::Path<(SharedInstanceVersionId,)>,
|
||||||
|
session_queue: Data<AuthQueue>,
|
||||||
|
) -> Result<Redirect, ApiError> {
|
||||||
|
let version_id = info.into_inner().0.into();
|
||||||
|
|
||||||
|
let user = get_maybe_user_from_headers(
|
||||||
|
&req,
|
||||||
|
&**pool,
|
||||||
|
&redis,
|
||||||
|
&session_queue,
|
||||||
|
Scopes::SHARED_INSTANCE_VERSION_READ,
|
||||||
|
)
|
||||||
|
.await?
|
||||||
|
.map(|(_, user)| user);
|
||||||
|
|
||||||
|
let version = DBSharedInstanceVersion::get(version_id, &**pool).await?;
|
||||||
|
|
||||||
|
if let Some(version) = version {
|
||||||
|
let instance =
|
||||||
|
DBSharedInstance::get(version.shared_instance_id, &**pool).await?;
|
||||||
|
if let Some(instance) = instance {
|
||||||
|
if !can_access_instance_as_maybe_user(
|
||||||
|
&pool, &redis, &instance, user,
|
||||||
|
)
|
||||||
|
.await?
|
||||||
|
{
|
||||||
|
return Err(ApiError::NotFound);
|
||||||
|
}
|
||||||
|
|
||||||
|
let file_name = format!(
|
||||||
|
"shared_instance/{}.mrpack",
|
||||||
|
SharedInstanceVersionId::from(version_id)
|
||||||
|
);
|
||||||
|
let url =
|
||||||
|
file_host.get_url_for_private_file(&file_name, 180).await?;
|
||||||
|
|
||||||
|
Ok(Redirect::to(url).see_other())
|
||||||
|
} else {
|
||||||
|
Err(ApiError::NotFound)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
Err(ApiError::NotFound)
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -6,7 +6,7 @@ use crate::database::models::image_item;
|
|||||||
use crate::database::models::notification_item::NotificationBuilder;
|
use crate::database::models::notification_item::NotificationBuilder;
|
||||||
use crate::database::models::thread_item::ThreadMessageBuilder;
|
use crate::database::models::thread_item::ThreadMessageBuilder;
|
||||||
use crate::database::redis::RedisPool;
|
use crate::database::redis::RedisPool;
|
||||||
use crate::file_hosting::FileHost;
|
use crate::file_hosting::{FileHost, FileHostPublicity};
|
||||||
use crate::models::ids::{ThreadId, ThreadMessageId};
|
use crate::models::ids::{ThreadId, ThreadMessageId};
|
||||||
use crate::models::images::{Image, ImageContext};
|
use crate::models::images::{Image, ImageContext};
|
||||||
use crate::models::notifications::NotificationBody;
|
use crate::models::notifications::NotificationBody;
|
||||||
@ -606,7 +606,12 @@ pub async fn message_delete(
|
|||||||
for image in images {
|
for image in images {
|
||||||
let name = image.url.split(&format!("{cdn_url}/")).nth(1);
|
let name = image.url.split(&format!("{cdn_url}/")).nth(1);
|
||||||
if let Some(icon_path) = name {
|
if let Some(icon_path) = name {
|
||||||
file_host.delete_file_version("", icon_path).await?;
|
file_host
|
||||||
|
.delete_file(
|
||||||
|
icon_path,
|
||||||
|
FileHostPublicity::Public, // FIXME: Consider using private file storage?
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
}
|
}
|
||||||
database::DBImage::remove(image.id, &mut transaction, &redis)
|
database::DBImage::remove(image.id, &mut transaction, &redis)
|
||||||
.await?;
|
.await?;
|
||||||
|
|||||||
@ -1,6 +1,7 @@
|
|||||||
use std::{collections::HashMap, sync::Arc};
|
use std::{collections::HashMap, sync::Arc};
|
||||||
|
|
||||||
use super::{ApiError, oauth_clients::get_user_clients};
|
use super::{ApiError, oauth_clients::get_user_clients};
|
||||||
|
use crate::file_hosting::FileHostPublicity;
|
||||||
use crate::util::img::delete_old_images;
|
use crate::util::img::delete_old_images;
|
||||||
use crate::{
|
use crate::{
|
||||||
auth::{filter_visible_projects, get_user_from_headers},
|
auth::{filter_visible_projects, get_user_from_headers},
|
||||||
@ -14,7 +15,10 @@ use crate::{
|
|||||||
users::{Badges, Role},
|
users::{Badges, Role},
|
||||||
},
|
},
|
||||||
queue::session::AuthQueue,
|
queue::session::AuthQueue,
|
||||||
util::{routes::read_from_payload, validate::validation_errors_to_string},
|
util::{
|
||||||
|
routes::read_limited_from_payload,
|
||||||
|
validate::validation_errors_to_string,
|
||||||
|
},
|
||||||
};
|
};
|
||||||
use actix_web::{HttpRequest, HttpResponse, web};
|
use actix_web::{HttpRequest, HttpResponse, web};
|
||||||
use ariadne::ids::UserId;
|
use ariadne::ids::UserId;
|
||||||
@ -576,11 +580,12 @@ pub async fn user_icon_edit(
|
|||||||
delete_old_images(
|
delete_old_images(
|
||||||
actual_user.avatar_url,
|
actual_user.avatar_url,
|
||||||
actual_user.raw_avatar_url,
|
actual_user.raw_avatar_url,
|
||||||
|
FileHostPublicity::Public,
|
||||||
&***file_host,
|
&***file_host,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let bytes = read_from_payload(
|
let bytes = read_limited_from_payload(
|
||||||
&mut payload,
|
&mut payload,
|
||||||
262144,
|
262144,
|
||||||
"Icons must be smaller than 256KiB",
|
"Icons must be smaller than 256KiB",
|
||||||
@ -590,6 +595,7 @@ pub async fn user_icon_edit(
|
|||||||
let user_id: UserId = actual_user.id.into();
|
let user_id: UserId = actual_user.id.into();
|
||||||
let upload_result = crate::util::img::upload_image_optimized(
|
let upload_result = crate::util::img::upload_image_optimized(
|
||||||
&format!("data/{user_id}"),
|
&format!("data/{user_id}"),
|
||||||
|
FileHostPublicity::Public,
|
||||||
bytes.freeze(),
|
bytes.freeze(),
|
||||||
&ext.ext,
|
&ext.ext,
|
||||||
Some(96),
|
Some(96),
|
||||||
@ -648,6 +654,7 @@ pub async fn user_icon_delete(
|
|||||||
delete_old_images(
|
delete_old_images(
|
||||||
actual_user.avatar_url,
|
actual_user.avatar_url,
|
||||||
actual_user.raw_avatar_url,
|
actual_user.raw_avatar_url,
|
||||||
|
FileHostPublicity::Public,
|
||||||
&***file_host,
|
&***file_host,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|||||||
@ -9,7 +9,7 @@ use crate::database::models::version_item::{
|
|||||||
};
|
};
|
||||||
use crate::database::models::{self, DBOrganization, image_item};
|
use crate::database::models::{self, DBOrganization, image_item};
|
||||||
use crate::database::redis::RedisPool;
|
use crate::database::redis::RedisPool;
|
||||||
use crate::file_hosting::FileHost;
|
use crate::file_hosting::{FileHost, FileHostPublicity};
|
||||||
use crate::models::ids::{ImageId, ProjectId, VersionId};
|
use crate::models::ids::{ImageId, ProjectId, VersionId};
|
||||||
use crate::models::images::{Image, ImageContext};
|
use crate::models::images::{Image, ImageContext};
|
||||||
use crate::models::notifications::NotificationBody;
|
use crate::models::notifications::NotificationBody;
|
||||||
@ -952,12 +952,12 @@ pub async fn upload_file(
|
|||||||
format!("data/{}/versions/{}/{}", project_id, version_id, &file_name);
|
format!("data/{}/versions/{}/{}", project_id, version_id, &file_name);
|
||||||
|
|
||||||
let upload_data = file_host
|
let upload_data = file_host
|
||||||
.upload_file(content_type, &file_path, data)
|
.upload_file(content_type, &file_path, FileHostPublicity::Public, data)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
uploaded_files.push(UploadedFile {
|
uploaded_files.push(UploadedFile {
|
||||||
file_id: upload_data.file_id,
|
name: file_path,
|
||||||
file_name: file_path,
|
publicity: FileHostPublicity::Public,
|
||||||
});
|
});
|
||||||
|
|
||||||
let sha1_bytes = upload_data.content_sha1.into_bytes();
|
let sha1_bytes = upload_data.content_sha1.into_bytes();
|
||||||
|
|||||||
@ -1,6 +1,6 @@
|
|||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
|
|
||||||
pub fn parse_var<T: FromStr>(var: &'static str) -> Option<T> {
|
pub fn parse_var<T: FromStr>(var: &str) -> Option<T> {
|
||||||
dotenvy::var(var).ok().and_then(|i| i.parse().ok())
|
dotenvy::var(var).ok().and_then(|i| i.parse().ok())
|
||||||
}
|
}
|
||||||
pub fn parse_strings_from_var(var: &'static str) -> Option<Vec<String>> {
|
pub fn parse_strings_from_var(var: &'static str) -> Option<Vec<String>> {
|
||||||
|
|||||||
@ -1,3 +1,5 @@
|
|||||||
|
pub const MRPACK_MIME_TYPE: &str = "application/x-modrinth-modpack+zip";
|
||||||
|
|
||||||
pub fn get_image_content_type(extension: &str) -> Option<&'static str> {
|
pub fn get_image_content_type(extension: &str) -> Option<&'static str> {
|
||||||
match extension {
|
match extension {
|
||||||
"bmp" => Some("image/bmp"),
|
"bmp" => Some("image/bmp"),
|
||||||
@ -24,7 +26,7 @@ pub fn project_file_type(ext: &str) -> Option<&str> {
|
|||||||
match ext {
|
match ext {
|
||||||
"jar" => Some("application/java-archive"),
|
"jar" => Some("application/java-archive"),
|
||||||
"zip" | "litemod" => Some("application/zip"),
|
"zip" | "litemod" => Some("application/zip"),
|
||||||
"mrpack" => Some("application/x-modrinth-modpack+zip"),
|
"mrpack" => Some(MRPACK_MIME_TYPE),
|
||||||
_ => None,
|
_ => None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,7 +1,7 @@
|
|||||||
use crate::database;
|
use crate::database;
|
||||||
use crate::database::models::image_item;
|
use crate::database::models::image_item;
|
||||||
use crate::database::redis::RedisPool;
|
use crate::database::redis::RedisPool;
|
||||||
use crate::file_hosting::FileHost;
|
use crate::file_hosting::{FileHost, FileHostPublicity};
|
||||||
use crate::models::images::ImageContext;
|
use crate::models::images::ImageContext;
|
||||||
use crate::routes::ApiError;
|
use crate::routes::ApiError;
|
||||||
use color_thief::ColorFormat;
|
use color_thief::ColorFormat;
|
||||||
@ -38,11 +38,14 @@ pub struct UploadImageResult {
|
|||||||
pub raw_url: String,
|
pub raw_url: String,
|
||||||
pub raw_url_path: String,
|
pub raw_url_path: String,
|
||||||
|
|
||||||
|
pub publicity: FileHostPublicity,
|
||||||
|
|
||||||
pub color: Option<u32>,
|
pub color: Option<u32>,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn upload_image_optimized(
|
pub async fn upload_image_optimized(
|
||||||
upload_folder: &str,
|
upload_folder: &str,
|
||||||
|
publicity: FileHostPublicity,
|
||||||
bytes: bytes::Bytes,
|
bytes: bytes::Bytes,
|
||||||
file_extension: &str,
|
file_extension: &str,
|
||||||
target_width: Option<u32>,
|
target_width: Option<u32>,
|
||||||
@ -80,6 +83,7 @@ pub async fn upload_image_optimized(
|
|||||||
target_width.unwrap_or(0),
|
target_width.unwrap_or(0),
|
||||||
processed_image_ext
|
processed_image_ext
|
||||||
),
|
),
|
||||||
|
publicity,
|
||||||
processed_image,
|
processed_image,
|
||||||
)
|
)
|
||||||
.await?,
|
.await?,
|
||||||
@ -92,6 +96,7 @@ pub async fn upload_image_optimized(
|
|||||||
.upload_file(
|
.upload_file(
|
||||||
content_type,
|
content_type,
|
||||||
&format!("{upload_folder}/{hash}.{file_extension}"),
|
&format!("{upload_folder}/{hash}.{file_extension}"),
|
||||||
|
publicity,
|
||||||
bytes,
|
bytes,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
@ -107,6 +112,9 @@ pub async fn upload_image_optimized(
|
|||||||
|
|
||||||
raw_url: url,
|
raw_url: url,
|
||||||
raw_url_path: upload_data.file_name,
|
raw_url_path: upload_data.file_name,
|
||||||
|
|
||||||
|
publicity,
|
||||||
|
|
||||||
color,
|
color,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -165,6 +173,7 @@ fn convert_to_webp(img: &DynamicImage) -> Result<Vec<u8>, ImageError> {
|
|||||||
pub async fn delete_old_images(
|
pub async fn delete_old_images(
|
||||||
image_url: Option<String>,
|
image_url: Option<String>,
|
||||||
raw_image_url: Option<String>,
|
raw_image_url: Option<String>,
|
||||||
|
publicity: FileHostPublicity,
|
||||||
file_host: &dyn FileHost,
|
file_host: &dyn FileHost,
|
||||||
) -> Result<(), ApiError> {
|
) -> Result<(), ApiError> {
|
||||||
let cdn_url = dotenvy::var("CDN_URL")?;
|
let cdn_url = dotenvy::var("CDN_URL")?;
|
||||||
@ -173,7 +182,7 @@ pub async fn delete_old_images(
|
|||||||
let name = image_url.split(&cdn_url_start).nth(1);
|
let name = image_url.split(&cdn_url_start).nth(1);
|
||||||
|
|
||||||
if let Some(icon_path) = name {
|
if let Some(icon_path) = name {
|
||||||
file_host.delete_file_version("", icon_path).await?;
|
file_host.delete_file(icon_path, publicity).await?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -181,7 +190,7 @@ pub async fn delete_old_images(
|
|||||||
let name = raw_image_url.split(&cdn_url_start).nth(1);
|
let name = raw_image_url.split(&cdn_url_start).nth(1);
|
||||||
|
|
||||||
if let Some(icon_path) = name {
|
if let Some(icon_path) = name {
|
||||||
file_host.delete_file_version("", icon_path).await?;
|
file_host.delete_file(icon_path, publicity).await?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -1,11 +1,14 @@
|
|||||||
use crate::routes::ApiError;
|
use crate::routes::ApiError;
|
||||||
use crate::routes::v3::project_creation::CreateError;
|
use crate::routes::v3::project_creation::CreateError;
|
||||||
|
use crate::util::validate::validation_errors_to_string;
|
||||||
use actix_multipart::Field;
|
use actix_multipart::Field;
|
||||||
use actix_web::web::Payload;
|
use actix_web::web::Payload;
|
||||||
use bytes::BytesMut;
|
use bytes::BytesMut;
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
|
use serde::de::DeserializeOwned;
|
||||||
|
use validator::Validate;
|
||||||
|
|
||||||
pub async fn read_from_payload(
|
pub async fn read_limited_from_payload(
|
||||||
payload: &mut Payload,
|
payload: &mut Payload,
|
||||||
cap: usize,
|
cap: usize,
|
||||||
err_msg: &'static str,
|
err_msg: &'static str,
|
||||||
@ -25,6 +28,28 @@ pub async fn read_from_payload(
|
|||||||
Ok(bytes)
|
Ok(bytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn read_typed_from_payload<T>(
|
||||||
|
payload: &mut Payload,
|
||||||
|
) -> Result<T, ApiError>
|
||||||
|
where
|
||||||
|
T: DeserializeOwned + Validate,
|
||||||
|
{
|
||||||
|
let mut bytes = BytesMut::new();
|
||||||
|
while let Some(item) = payload.next().await {
|
||||||
|
bytes.extend_from_slice(&item.map_err(|_| {
|
||||||
|
ApiError::InvalidInput(
|
||||||
|
"Unable to parse bytes in payload sent!".to_string(),
|
||||||
|
)
|
||||||
|
})?);
|
||||||
|
}
|
||||||
|
|
||||||
|
let parsed: T = serde_json::from_slice(&bytes)?;
|
||||||
|
parsed.validate().map_err(|err| {
|
||||||
|
ApiError::InvalidInput(validation_errors_to_string(err, None))
|
||||||
|
})?;
|
||||||
|
Ok(parsed)
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn read_from_field(
|
pub async fn read_from_field(
|
||||||
field: &mut Field,
|
field: &mut Field,
|
||||||
cap: usize,
|
cap: usize,
|
||||||
|
|||||||
@ -71,7 +71,6 @@ pub enum DecodingError {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[macro_export]
|
#[macro_export]
|
||||||
#[doc(hidden)]
|
|
||||||
macro_rules! impl_base62_display {
|
macro_rules! impl_base62_display {
|
||||||
($struct:ty) => {
|
($struct:ty) => {
|
||||||
impl std::fmt::Display for $struct {
|
impl std::fmt::Display for $struct {
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user