Compiler improvements (#753)

* basic redis add

* toml; reverted unnecessary changes

* merge issues

* increased test connections

---------

Co-authored-by: Geometrically <18202329+Geometrically@users.noreply.github.com>
This commit is contained in:
Wyatt Verchere 2023-11-19 19:10:13 -08:00 committed by GitHub
parent e06a77af28
commit dfba6c7c91
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
22 changed files with 307 additions and 140 deletions

View File

@ -109,3 +109,9 @@ derive-new = "0.5.9"
[dev-dependencies]
actix-http = "3.4.0"
[profile.dev]
opt-level = 0 # Minimal optimization, speeds up compilation
lto = false # Disables Link Time Optimization
incremental = true # Enables incremental compilation
codegen-units = 16 # Higher number can improve compile times but reduce runtime performance

View File

@ -90,6 +90,8 @@ impl Category {
where
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
{
let mut redis = redis.connect().await?;
let res: Option<Vec<Category>> = redis
.get_deserialized_from_json(TAGS_NAMESPACE, "category")
.await?;
@ -155,6 +157,8 @@ impl DonationPlatform {
where
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
{
let mut redis = redis.connect().await?;
let res: Option<Vec<DonationPlatform>> = redis
.get_deserialized_from_json(TAGS_NAMESPACE, "donation_platform")
.await?;
@ -209,6 +213,8 @@ impl ReportType {
where
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
{
let mut redis = redis.connect().await?;
let res: Option<Vec<String>> = redis
.get_deserialized_from_json(TAGS_NAMESPACE, "report_type")
.await?;
@ -257,6 +263,8 @@ impl ProjectType {
where
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
{
let mut redis = redis.connect().await?;
let res: Option<Vec<String>> = redis
.get_deserialized_from_json(TAGS_NAMESPACE, "project_type")
.await?;

View File

@ -157,6 +157,8 @@ impl Collection {
{
use futures::TryStreamExt;
let mut redis = redis.connect().await?;
if collection_ids.is_empty() {
return Ok(Vec::new());
}
@ -166,7 +168,10 @@ impl Collection {
if !collection_ids.is_empty() {
let collections = redis
.multi_get::<String, _>(COLLECTIONS_NAMESPACE, collection_ids.iter().map(|x| x.0))
.multi_get::<String>(
COLLECTIONS_NAMESPACE,
collection_ids.iter().map(|x| x.0.to_string()),
)
.await?;
for collection in collections {
@ -240,6 +245,8 @@ impl Collection {
}
pub async fn clear_cache(id: CollectionId, redis: &RedisPool) -> Result<(), DatabaseError> {
let mut redis = redis.connect().await?;
redis.delete(COLLECTIONS_NAMESPACE, id.0).await?;
Ok(())
}

View File

@ -58,6 +58,8 @@ impl Flow {
expires: Duration,
redis: &RedisPool,
) -> Result<String, DatabaseError> {
let mut redis = redis.connect().await?;
let flow = ChaCha20Rng::from_entropy()
.sample_iter(&Alphanumeric)
.take(32)
@ -71,6 +73,8 @@ impl Flow {
}
pub async fn get(id: &str, redis: &RedisPool) -> Result<Option<Flow>, DatabaseError> {
let mut redis = redis.connect().await?;
redis.get_deserialized_from_json(FLOWS_NAMESPACE, id).await
}
@ -91,6 +95,8 @@ impl Flow {
}
pub async fn remove(id: &str, redis: &RedisPool) -> Result<Option<()>, DatabaseError> {
let mut redis = redis.connect().await?;
redis.delete(FLOWS_NAMESPACE, id).await?;
Ok(Some(()))
}

View File

@ -180,6 +180,7 @@ impl Image {
{
use futures::TryStreamExt;
let mut redis = redis.connect().await?;
if image_ids.is_empty() {
return Ok(Vec::new());
}
@ -191,7 +192,7 @@ impl Image {
if !image_ids.is_empty() {
let images = redis
.multi_get::<String, _>(IMAGES_NAMESPACE, image_ids)
.multi_get::<String>(IMAGES_NAMESPACE, image_ids.iter().map(|x| x.to_string()))
.await?;
for image in images {
if let Some(image) = image.and_then(|x| serde_json::from_str::<Image>(&x).ok()) {
@ -246,6 +247,8 @@ impl Image {
}
pub async fn clear_cache(id: ImageId, redis: &RedisPool) -> Result<(), DatabaseError> {
let mut redis = redis.connect().await?;
redis.delete(IMAGES_NAMESPACE, id.0).await?;
Ok(())
}

View File

@ -44,6 +44,7 @@ impl Game {
where
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
{
let mut redis = redis.connect().await?;
let cached_games: Option<Vec<Game>> = redis
.get_deserialized_from_json(GAMES_LIST_NAMESPACE, "games")
.await?;
@ -95,6 +96,7 @@ impl Loader {
where
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
{
let mut redis = redis.connect().await?;
let cached_id: Option<i32> = redis.get_deserialized_from_json(LOADER_ID, name).await?;
if let Some(cached_id) = cached_id {
return Ok(Some(LoaderId(cached_id)));
@ -124,6 +126,7 @@ impl Loader {
where
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
{
let mut redis = redis.connect().await?;
let cached_loaders: Option<Vec<Loader>> = redis
.get_deserialized_from_json(LOADERS_LIST_NAMESPACE, "all")
.await?;
@ -318,9 +321,11 @@ impl LoaderField {
{
type RedisLoaderFieldTuple = (LoaderId, Vec<LoaderField>);
let mut redis = redis.connect().await?;
let mut loader_ids = loader_ids.to_vec();
let cached_fields: Vec<RedisLoaderFieldTuple> = redis
.multi_get::<String, _>(LOADER_FIELDS_NAMESPACE, loader_ids.iter().map(|x| x.0))
.multi_get::<String>(LOADER_FIELDS_NAMESPACE, loader_ids.iter().map(|x| x.0))
.await?
.into_iter()
.flatten()
@ -399,6 +404,8 @@ impl LoaderFieldEnum {
where
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
{
let mut redis = redis.connect().await?;
let cached_enum = redis
.get_deserialized_from_json(LOADER_FIELD_ENUMS_ID_NAMESPACE, enum_name)
.await?;
@ -488,12 +495,13 @@ impl LoaderFieldEnumValue {
where
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
{
let mut redis = redis.connect().await?;
let mut found_enums = Vec::new();
let mut remaining_enums: Vec<LoaderFieldEnumId> = loader_field_enum_ids.to_vec();
if !remaining_enums.is_empty() {
let enums = redis
.multi_get::<String, _>(
.multi_get::<String>(
LOADER_FIELD_ENUM_VALUES_NAMESPACE,
loader_field_enum_ids.iter().map(|x| x.0),
)

View File

@ -174,8 +174,10 @@ impl Notification {
where
E: sqlx::Executor<'a, Database = sqlx::Postgres> + Copy,
{
let mut redis = redis.connect().await?;
let cached_notifications: Option<Vec<Notification>> = redis
.get_deserialized_from_json(USER_NOTIFICATIONS_NAMESPACE, user_id.0)
.get_deserialized_from_json(USER_NOTIFICATIONS_NAMESPACE, &user_id.0.to_string())
.await?;
if let Some(notifications) = cached_notifications {
@ -319,6 +321,8 @@ impl Notification {
user_ids: impl IntoIterator<Item = &UserId>,
redis: &RedisPool,
) -> Result<(), DatabaseError> {
let mut redis = redis.connect().await?;
redis
.delete_many(
user_ids

View File

@ -103,6 +103,8 @@ impl Organization {
{
use futures::stream::TryStreamExt;
let mut redis = redis.connect().await?;
if organization_strings.is_empty() {
return Ok(Vec::new());
}
@ -120,11 +122,12 @@ impl Organization {
organization_ids.append(
&mut redis
.multi_get::<i64, _>(
.multi_get::<i64>(
ORGANIZATIONS_TITLES_NAMESPACE,
organization_strings
.iter()
.map(|x| x.to_string().to_lowercase()),
.map(|x| x.to_string().to_lowercase())
.collect::<Vec<_>>(),
)
.await?
.into_iter()
@ -134,7 +137,10 @@ impl Organization {
if !organization_ids.is_empty() {
let organizations = redis
.multi_get::<String, _>(ORGANIZATIONS_NAMESPACE, organization_ids)
.multi_get::<String>(
ORGANIZATIONS_NAMESPACE,
organization_ids.iter().map(|x| x.to_string()),
)
.await?;
for organization in organizations {
@ -197,8 +203,8 @@ impl Organization {
redis
.set(
ORGANIZATIONS_TITLES_NAMESPACE,
organization.title.to_lowercase(),
organization.id.0,
&organization.title.to_lowercase(),
&organization.id.0.to_string(),
None,
)
.await?;
@ -318,6 +324,8 @@ impl Organization {
title: Option<String>,
redis: &RedisPool,
) -> Result<(), super::DatabaseError> {
let mut redis = redis.connect().await?;
redis
.delete_many([
(ORGANIZATIONS_NAMESPACE, Some(id.0.to_string())),

View File

@ -89,6 +89,8 @@ impl PersonalAccessToken {
{
use futures::TryStreamExt;
let mut redis = redis.connect().await?;
if pat_strings.is_empty() {
return Ok(Vec::new());
}
@ -106,7 +108,7 @@ impl PersonalAccessToken {
pat_ids.append(
&mut redis
.multi_get::<i64, _>(
.multi_get::<i64>(
PATS_TOKENS_NAMESPACE,
pat_strings.iter().map(|x| x.to_string()),
)
@ -118,7 +120,7 @@ impl PersonalAccessToken {
if !pat_ids.is_empty() {
let pats = redis
.multi_get::<String, _>(PATS_NAMESPACE, pat_ids)
.multi_get::<String>(PATS_NAMESPACE, pat_ids.iter().map(|x| x.to_string()))
.await?;
for pat in pats {
if let Some(pat) =
@ -174,8 +176,8 @@ impl PersonalAccessToken {
redis
.set(
PATS_TOKENS_NAMESPACE,
pat.access_token.clone(),
pat.id.0,
&pat.access_token,
&pat.id.0.to_string(),
None,
)
.await?;
@ -194,8 +196,10 @@ impl PersonalAccessToken {
where
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
{
let mut redis = redis.connect().await?;
let res = redis
.get_deserialized_from_json::<Vec<i64>, _>(PATS_USERS_NAMESPACE, user_id.0)
.get_deserialized_from_json::<Vec<i64>>(PATS_USERS_NAMESPACE, &user_id.0.to_string())
.await?;
if let Some(res) = res {
@ -220,8 +224,8 @@ impl PersonalAccessToken {
redis
.set(
PATS_USERS_NAMESPACE,
user_id.0,
serde_json::to_string(&db_pats)?,
&user_id.0.to_string(),
&serde_json::to_string(&db_pats)?,
None,
)
.await?;
@ -232,6 +236,8 @@ impl PersonalAccessToken {
clear_pats: Vec<(Option<PatId>, Option<String>, Option<UserId>)>,
redis: &RedisPool,
) -> Result<(), DatabaseError> {
let mut redis = redis.connect().await?;
if clear_pats.is_empty() {
return Ok(());
}

View File

@ -513,6 +513,8 @@ impl Project {
return Ok(Vec::new());
}
let mut redis = redis.connect().await?;
let mut found_projects = Vec::new();
let mut remaining_strings = project_strings
.iter()
@ -526,7 +528,7 @@ impl Project {
project_ids.append(
&mut redis
.multi_get::<i64, _>(
.multi_get::<i64>(
PROJECTS_SLUGS_NAMESPACE,
project_strings.iter().map(|x| x.to_string().to_lowercase()),
)
@ -537,7 +539,10 @@ impl Project {
);
if !project_ids.is_empty() {
let projects = redis
.multi_get::<String, _>(PROJECTS_NAMESPACE, project_ids)
.multi_get::<String>(
PROJECTS_NAMESPACE,
project_ids.iter().map(|x| x.to_string()),
)
.await?;
for project in projects {
if let Some(project) =
@ -686,8 +691,8 @@ impl Project {
redis
.set(
PROJECTS_SLUGS_NAMESPACE,
slug.to_lowercase(),
project.inner.id.0,
&slug.to_lowercase(),
&project.inner.id.0.to_string(),
None,
)
.await?;
@ -709,8 +714,13 @@ impl Project {
{
type Dependencies = Vec<(Option<VersionId>, Option<ProjectId>, Option<ProjectId>)>;
let mut redis = redis.connect().await?;
let dependencies = redis
.get_deserialized_from_json::<Dependencies, _>(PROJECTS_DEPENDENCIES_NAMESPACE, id.0)
.get_deserialized_from_json::<Dependencies>(
PROJECTS_DEPENDENCIES_NAMESPACE,
&id.0.to_string(),
)
.await?;
if let Some(dependencies) = dependencies {
return Ok(dependencies);
@ -755,6 +765,8 @@ impl Project {
clear_dependencies: Option<bool>,
redis: &RedisPool,
) -> Result<(), DatabaseError> {
let mut redis = redis.connect().await?;
redis
.delete_many([
(PROJECTS_NAMESPACE, Some(id.0.to_string())),

View File

@ -130,6 +130,8 @@ impl Session {
{
use futures::TryStreamExt;
let mut redis = redis.connect().await?;
if session_strings.is_empty() {
return Ok(Vec::new());
}
@ -147,7 +149,7 @@ impl Session {
session_ids.append(
&mut redis
.multi_get::<i64, _>(
.multi_get::<i64>(
SESSIONS_IDS_NAMESPACE,
session_strings.iter().map(|x| x.to_string()),
)
@ -159,7 +161,10 @@ impl Session {
if !session_ids.is_empty() {
let sessions = redis
.multi_get::<String, _>(SESSIONS_NAMESPACE, session_ids)
.multi_get::<String>(
SESSIONS_NAMESPACE,
session_ids.iter().map(|x| x.to_string()),
)
.await?;
for session in sessions {
if let Some(session) =
@ -218,8 +223,8 @@ impl Session {
redis
.set(
SESSIONS_IDS_NAMESPACE,
session.session.clone(),
session.id.0,
&session.session,
&session.id.0.to_string(),
None,
)
.await?;
@ -238,8 +243,13 @@ impl Session {
where
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
{
let mut redis = redis.connect().await?;
let res = redis
.get_deserialized_from_json::<Vec<i64>, _>(SESSIONS_USERS_NAMESPACE, user_id.0)
.get_deserialized_from_json::<Vec<i64>>(
SESSIONS_USERS_NAMESPACE,
&user_id.0.to_string(),
)
.await?;
if let Some(res) = res {
@ -272,6 +282,8 @@ impl Session {
clear_sessions: Vec<(Option<SessionId>, Option<String>, Option<UserId>)>,
redis: &RedisPool,
) -> Result<(), DatabaseError> {
let mut redis = redis.connect().await?;
if clear_sessions.is_empty() {
return Ok(());
}

View File

@ -197,18 +197,23 @@ impl TeamMember {
where
E: sqlx::Executor<'a, Database = sqlx::Postgres> + Copy,
{
use futures::stream::TryStreamExt;
if team_ids.is_empty() {
return Ok(Vec::new());
}
use futures::stream::TryStreamExt;
let mut redis = redis.connect().await?;
let mut team_ids_parsed: Vec<i64> = team_ids.iter().map(|x| x.0).collect();
let mut found_teams = Vec::new();
let teams = redis
.multi_get::<String, _>(TEAMS_NAMESPACE, team_ids_parsed.clone())
.multi_get::<String>(
TEAMS_NAMESPACE,
team_ids_parsed.iter().map(|x| x.to_string()),
)
.await?;
for team_raw in teams {
@ -271,6 +276,7 @@ impl TeamMember {
}
pub async fn clear_cache(id: TeamId, redis: &RedisPool) -> Result<(), super::DatabaseError> {
let mut redis = redis.connect().await?;
redis.delete(TEAMS_NAMESPACE, id.0).await?;
Ok(())
}

View File

@ -134,6 +134,8 @@ impl User {
{
use futures::TryStreamExt;
let mut redis = redis.connect().await?;
if users_strings.is_empty() {
return Ok(Vec::new());
}
@ -151,7 +153,7 @@ impl User {
user_ids.append(
&mut redis
.multi_get::<i64, _>(
.multi_get::<i64>(
USER_USERNAMES_NAMESPACE,
users_strings.iter().map(|x| x.to_string().to_lowercase()),
)
@ -163,7 +165,7 @@ impl User {
if !user_ids.is_empty() {
let users = redis
.multi_get::<String, _>(USERS_NAMESPACE, user_ids)
.multi_get::<String>(USERS_NAMESPACE, user_ids.iter().map(|x| x.to_string()))
.await?;
for user in users {
if let Some(user) = user.and_then(|x| serde_json::from_str::<User>(&x).ok()) {
@ -239,8 +241,8 @@ impl User {
redis
.set(
USER_USERNAMES_NAMESPACE,
user.username.to_lowercase(),
user.id.0,
&user.username.to_lowercase(),
&user.id.0.to_string(),
None,
)
.await?;
@ -278,8 +280,13 @@ impl User {
{
use futures::stream::TryStreamExt;
let mut redis = redis.connect().await?;
let cached_projects = redis
.get_deserialized_from_json::<Vec<ProjectId>, _>(USERS_PROJECTS_NAMESPACE, user_id.0)
.get_deserialized_from_json::<Vec<ProjectId>>(
USERS_PROJECTS_NAMESPACE,
&user_id.0.to_string(),
)
.await?;
if let Some(projects) = cached_projects {
@ -384,6 +391,8 @@ impl User {
user_ids: &[(UserId, Option<String>)],
redis: &RedisPool,
) -> Result<(), DatabaseError> {
let mut redis = redis.connect().await?;
redis
.delete_many(user_ids.iter().flat_map(|(id, username)| {
[
@ -402,6 +411,8 @@ impl User {
user_ids: &[UserId],
redis: &RedisPool,
) -> Result<(), DatabaseError> {
let mut redis = redis.connect().await?;
redis
.delete_many(
user_ids

View File

@ -492,18 +492,27 @@ impl Version {
where
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
{
use futures::stream::TryStreamExt;
if version_ids.is_empty() {
return Ok(Vec::new());
}
use futures::stream::TryStreamExt;
let mut redis = redis.connect().await?;
let mut version_ids_parsed: Vec<i64> = version_ids.iter().map(|x| x.0).collect();
let mut found_versions = Vec::new();
let versions = redis
.multi_get::<String, _>(VERSIONS_NAMESPACE, version_ids_parsed.clone())
.multi_get::<String>(
VERSIONS_NAMESPACE,
version_ids_parsed
.clone()
.iter()
.map(|x| x.to_string())
.collect::<Vec<_>>(),
)
.await?;
for version in versions {
@ -721,18 +730,20 @@ impl Version {
where
E: sqlx::Executor<'a, Database = sqlx::Postgres> + Copy,
{
use futures::stream::TryStreamExt;
let mut redis = redis.connect().await?;
if hashes.is_empty() {
return Ok(Vec::new());
}
use futures::stream::TryStreamExt;
let mut file_ids_parsed = hashes.to_vec();
let mut found_files = Vec::new();
let files = redis
.multi_get::<String, _>(
.multi_get::<String>(
VERSION_FILES_NAMESPACE,
file_ids_parsed
.iter()
@ -829,6 +840,8 @@ impl Version {
version: &QueryVersion,
redis: &RedisPool,
) -> Result<(), DatabaseError> {
let mut redis = redis.connect().await?;
redis
.delete_many(
iter::once((VERSIONS_NAMESPACE, Some(version.inner.id.0.to_string()))).chain(

View File

@ -1,6 +1,7 @@
use super::models::DatabaseError;
use deadpool_redis::{Config, Runtime};
use redis::{cmd, FromRedisValue, ToRedisArgs};
use itertools::Itertools;
use redis::{cmd, Cmd};
use std::fmt::Display;
const DEFAULT_EXPIRY: i64 = 1800; // 30 minutes
@ -11,6 +12,11 @@ pub struct RedisPool {
meta_namespace: String,
}
pub struct RedisConnection {
pub connection: deadpool_redis::Connection,
meta_namespace: String,
}
impl RedisPool {
// initiate a new redis pool
// testing pool uses a hashmap to mimic redis behaviour for very small data sizes (ie: tests)
@ -35,32 +41,39 @@ impl RedisPool {
}
}
pub async fn set<T1, T2>(
&self,
pub async fn connect(&self) -> Result<RedisConnection, DatabaseError> {
Ok(RedisConnection {
connection: self.pool.get().await?,
meta_namespace: self.meta_namespace.clone(),
})
}
}
impl RedisConnection {
pub async fn set(
&mut self,
namespace: &str,
id: T1,
data: T2,
id: &str,
data: &str,
expiry: Option<i64>,
) -> Result<(), DatabaseError>
where
T1: Display,
T2: ToRedisArgs,
{
let mut redis_connection = self.pool.get().await?;
cmd("SET")
.arg(format!("{}_{}:{}", self.meta_namespace, namespace, id))
.arg(data)
.arg("EX")
.arg(expiry.unwrap_or(DEFAULT_EXPIRY))
.query_async::<_, ()>(&mut redis_connection)
.await?;
) -> Result<(), DatabaseError> {
let mut cmd = cmd("SET");
redis_args(
&mut cmd,
vec![
format!("{}_{}:{}", self.meta_namespace, namespace, id),
data.to_string(),
"EX".to_string(),
expiry.unwrap_or(DEFAULT_EXPIRY).to_string(),
]
.as_slice(),
);
redis_execute(&mut cmd, &mut self.connection).await?;
Ok(())
}
pub async fn set_serialized_to_json<Id, D>(
&self,
&mut self,
namespace: &str,
id: Id,
data: D,
@ -70,92 +83,116 @@ impl RedisPool {
Id: Display,
D: serde::Serialize,
{
self.set(namespace, id, serde_json::to_string(&data)?, expiry)
.await
self.set(
namespace,
&id.to_string(),
&serde_json::to_string(&data)?,
expiry,
)
.await
}
pub async fn get<R, Id>(&self, namespace: &str, id: Id) -> Result<Option<R>, DatabaseError>
where
Id: Display,
R: FromRedisValue,
{
let mut redis_connection = self.pool.get().await?;
let res = cmd("GET")
.arg(format!("{}_{}:{}", self.meta_namespace, namespace, id))
.query_async::<_, Option<R>>(&mut redis_connection)
.await?;
pub async fn get(
&mut self,
namespace: &str,
id: &str,
) -> Result<Option<String>, DatabaseError> {
let mut cmd = cmd("GET");
redis_args(
&mut cmd,
vec![format!("{}_{}:{}", self.meta_namespace, namespace, id)].as_slice(),
);
let res = redis_execute(&mut cmd, &mut self.connection).await?;
Ok(res)
}
pub async fn get_deserialized_from_json<R, Id>(
&self,
pub async fn get_deserialized_from_json<R>(
&mut self,
namespace: &str,
id: Id,
id: &str,
) -> Result<Option<R>, DatabaseError>
where
Id: Display,
R: for<'a> serde::Deserialize<'a>,
{
Ok(self
.get::<String, Id>(namespace, id)
.get(namespace, id)
.await?
.and_then(|x| serde_json::from_str(&x).ok()))
}
pub async fn multi_get<R, T1>(
&self,
pub async fn multi_get<R>(
&mut self,
namespace: &str,
ids: impl IntoIterator<Item = T1>,
ids: impl IntoIterator<Item = impl Display>,
) -> Result<Vec<Option<R>>, DatabaseError>
where
T1: Display,
R: FromRedisValue,
R: for<'a> serde::Deserialize<'a>,
{
let mut redis_connection = self.pool.get().await?;
let res = cmd("MGET")
.arg(
ids.into_iter()
.map(|x| format!("{}_{}:{}", self.meta_namespace, namespace, x))
.collect::<Vec<_>>(),
)
.query_async::<_, Vec<Option<R>>>(&mut redis_connection)
.await?;
Ok(res)
let mut cmd = cmd("MGET");
redis_args(
&mut cmd,
&ids.into_iter()
.map(|x| format!("{}_{}:{}", self.meta_namespace, namespace, x))
.collect_vec(),
);
let res: Vec<Option<String>> = redis_execute(&mut cmd, &mut self.connection).await?;
Ok(res
.into_iter()
.map(|x| x.and_then(|x| serde_json::from_str(&x).ok()))
.collect())
}
pub async fn delete<T1>(&self, namespace: &str, id: T1) -> Result<(), DatabaseError>
pub async fn delete<T1>(&mut self, namespace: &str, id: T1) -> Result<(), DatabaseError>
where
T1: Display,
{
let mut redis_connection = self.pool.get().await?;
cmd("DEL")
.arg(format!("{}_{}:{}", self.meta_namespace, namespace, id))
.query_async::<_, ()>(&mut redis_connection)
.await?;
let mut cmd = cmd("DEL");
redis_args(
&mut cmd,
vec![format!("{}_{}:{}", self.meta_namespace, namespace, id)].as_slice(),
);
redis_execute(&mut cmd, &mut self.connection).await?;
Ok(())
}
pub async fn delete_many(
&self,
&mut self,
iter: impl IntoIterator<Item = (&str, Option<String>)>,
) -> Result<(), DatabaseError> {
let mut cmd = cmd("DEL");
let mut any = false;
for (namespace, id) in iter {
if let Some(id) = id {
cmd.arg(format!("{}_{}:{}", self.meta_namespace, namespace, id));
redis_args(
&mut cmd,
[format!("{}_{}:{}", self.meta_namespace, namespace, id)].as_slice(),
);
any = true;
}
}
if any {
let mut redis_connection = self.pool.get().await?;
cmd.query_async::<_, ()>(&mut redis_connection).await?;
redis_execute(&mut cmd, &mut self.connection).await?;
}
Ok(())
}
}
pub fn redis_args(cmd: &mut Cmd, args: &[String]) {
for arg in args {
cmd.arg(arg);
}
}
pub async fn redis_execute<T>(
cmd: &mut Cmd,
redis: &mut deadpool_redis::Connection,
) -> Result<T, deadpool_redis::PoolError>
where
T: redis::FromRedisValue,
{
let res = cmd.query_async::<_, T>(redis).await?;
Ok(res)
}

View File

@ -7,6 +7,7 @@ pub mod env;
pub mod ext;
pub mod guards;
pub mod img;
pub mod redis;
pub mod routes;
pub mod validate;
pub mod webhook;

18
src/util/redis.rs Normal file
View File

@ -0,0 +1,18 @@
use redis::Cmd;
pub fn redis_args(cmd: &mut Cmd, args: &[String]) {
for arg in args {
cmd.arg(arg);
}
}
pub async fn redis_execute<T>(
cmd: &mut Cmd,
redis: &mut deadpool_redis::Connection,
) -> Result<T, deadpool_redis::PoolError>
where
T: redis::FromRedisValue,
{
let res = cmd.query_async::<_, T>(redis).await?;
Ok(res)
}

View File

@ -40,20 +40,21 @@ async fn test_get_project() {
assert_eq!(versions[0], json!(alpha_version_id));
// Confirm that the request was cached
let mut redis_pool = test_env.db.redis_pool.connect().await.unwrap();
assert_eq!(
test_env
.db
.redis_pool
.get::<i64, _>(PROJECTS_SLUGS_NAMESPACE, alpha_project_slug)
redis_pool
.get(PROJECTS_SLUGS_NAMESPACE, alpha_project_slug)
.await
.unwrap(),
.unwrap()
.and_then(|x| x.parse::<i64>().ok()),
Some(parse_base62(alpha_project_id).unwrap() as i64)
);
let cached_project = test_env
.db
.redis_pool
.get::<String, _>(PROJECTS_NAMESPACE, parse_base62(alpha_project_id).unwrap())
let cached_project = redis_pool
.get(
PROJECTS_NAMESPACE,
&parse_base62(alpha_project_id).unwrap().to_string(),
)
.await
.unwrap()
.unwrap();
@ -249,22 +250,21 @@ async fn test_add_remove_project() {
assert_eq!(resp.status(), 204);
// Confirm that the project is gone from the cache
let mut redis_pool = test_env.db.redis_pool.connect().await.unwrap();
assert_eq!(
test_env
.db
.redis_pool
.get::<i64, _>(PROJECTS_SLUGS_NAMESPACE, "demo")
redis_pool
.get(PROJECTS_SLUGS_NAMESPACE, "demo")
.await
.unwrap(),
.unwrap()
.and_then(|x| x.parse::<i64>().ok()),
None
);
assert_eq!(
test_env
.db
.redis_pool
.get::<i64, _>(PROJECTS_SLUGS_NAMESPACE, id)
redis_pool
.get(PROJECTS_SLUGS_NAMESPACE, &id)
.await
.unwrap(),
.unwrap()
.and_then(|x| x.parse::<i64>().ok()),
None
);

View File

@ -20,7 +20,7 @@ mod common;
#[actix_rt::test]
async fn search_projects() {
// Test setup and dummy data
let test_env = TestEnvironment::build(Some(8)).await;
let test_env = TestEnvironment::build(Some(10)).await;
let api = &test_env.v3;
let test_name = test_env.db.database_name.clone();

View File

@ -221,22 +221,21 @@ async fn test_add_remove_project() {
assert_eq!(resp.status(), 204);
// Confirm that the project is gone from the cache
let mut redis_conn = test_env.db.redis_pool.connect().await.unwrap();
assert_eq!(
test_env
.db
.redis_pool
.get::<i64, _>(PROJECTS_SLUGS_NAMESPACE, "demo")
redis_conn
.get(PROJECTS_SLUGS_NAMESPACE, "demo")
.await
.unwrap(),
.unwrap()
.map(|x| x.parse::<i64>().unwrap()),
None
);
assert_eq!(
test_env
.db
.redis_pool
.get::<i64, _>(PROJECTS_SLUGS_NAMESPACE, id)
redis_conn
.get(PROJECTS_SLUGS_NAMESPACE, &id)
.await
.unwrap(),
.unwrap()
.map(|x| x.parse::<i64>().unwrap()),
None
);

View File

@ -17,7 +17,7 @@ async fn search_projects() {
// It should drastically simplify this function
// Test setup and dummy data
let test_env = TestEnvironment::build(Some(8)).await;
let test_env = TestEnvironment::build(Some(10)).await;
let api = &test_env.v2;
let test_name = test_env.db.database_name.clone();

View File

@ -33,10 +33,12 @@ async fn test_get_version() {
assert_eq!(&version.project_id.to_string(), alpha_project_id);
assert_eq!(&version.id.to_string(), alpha_version_id);
let cached_project = test_env
.db
.redis_pool
.get::<String, _>(VERSIONS_NAMESPACE, parse_base62(alpha_version_id).unwrap())
let mut redis_conn = test_env.db.redis_pool.connect().await.unwrap();
let cached_project = redis_conn
.get(
VERSIONS_NAMESPACE,
&parse_base62(alpha_version_id).unwrap().to_string(),
)
.await
.unwrap()
.unwrap();