separation of database (#1)

Reviewed-on: #1
This commit is contained in:
2025-11-03 02:26:19 +00:00
parent 8b98df745c
commit 39ee8d52a4
21 changed files with 407 additions and 140 deletions

3
Cargo.lock generated
View File

@@ -1221,7 +1221,6 @@ dependencies = [
"chrono", "chrono",
"futures", "futures",
"serde", "serde",
"sqlx",
"thiserror 2.0.17", "thiserror 2.0.17",
"uuid", "uuid",
] ]
@@ -1231,7 +1230,9 @@ name = "libertas_infra"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"async-trait", "async-trait",
"chrono",
"libertas_core", "libertas_core",
"serde",
"sqlx", "sqlx",
"uuid", "uuid",
] ]

View File

@@ -7,3 +7,17 @@ services:
- "6222:6222" - "6222:6222"
- "8222:8222" - "8222:8222"
restart: unless-stopped restart: unless-stopped
db:
image: postgres:17
container_name: libertas_db
environment:
POSTGRES_USER: libertas
POSTGRES_PASSWORD: libertas_password
POSTGRES_DB: libertas_db
ports:
- "5436:5432"
volumes:
- libertas_db_data:/var/lib/postgresql/data
restart: unless-stopped
volumes:
libertas_db_data:

View File

@@ -7,12 +7,13 @@ pub fn load_config() -> CoreResult<Config> {
Ok(Config { Ok(Config {
database: DatabaseConfig { database: DatabaseConfig {
db_type: DatabaseType::Postgres, db_type: DatabaseType::Postgres,
url: "postgres://postgres:postgres@localhost:5432/libertas_db".to_string(), url: "postgres://libertas:libertas_password@localhost:5436/libertas_db".to_string(),
}, },
server_address: "127.0.0.1:8080".to_string(), server_address: "127.0.0.1:8080".to_string(),
jwt_secret: "super_secret_jwt_key".to_string(), jwt_secret: "super_secret_jwt_key".to_string(),
media_library_path: "media_library".to_string(), media_library_path: "media_library".to_string(),
broker_url: "nats://localhost:4222".to_string(), broker_url: "nats://localhost:4222".to_string(),
max_upload_size_mb: Some(100), max_upload_size_mb: Some(100),
default_storage_quota_gb: Some(10),
}) })
} }

View File

@@ -38,6 +38,7 @@ pub async fn build_app_state(config: Config) -> CoreResult<AppState> {
user_repo.clone(), user_repo.clone(),
hasher, hasher,
tokenizer.clone(), tokenizer.clone(),
Arc::new(config.clone()),
)); ));
let media_service = Arc::new(MediaServiceImpl::new( let media_service = Arc::new(MediaServiceImpl::new(
media_repo.clone(), media_repo.clone(),

View File

@@ -37,14 +37,12 @@ impl From<Media> for MediaResponse {
} }
} }
pub fn media_routes() -> Router<AppState> { pub fn media_routes(max_upload_size: usize) -> Router<AppState> {
let max_size_mb = 100; // todo: get from config
Router::new() Router::new()
.route("/", post(upload_media)) .route("/", post(upload_media))
.route("/{id}", get(get_media_details).delete(delete_media)) .route("/{id}", get(get_media_details).delete(delete_media))
.route("/{id}/file", get(get_media_file)) .route("/{id}/file", get(get_media_file))
.layer(DefaultBodyLimit::max(max_size_mb * 1024 * 1024)) .layer(DefaultBodyLimit::max(max_upload_size))
} }
async fn upload_media( async fn upload_media(

View File

@@ -28,8 +28,10 @@ async fn main() -> anyhow::Result<()> {
let addr: SocketAddr = config.server_address.parse()?; let addr: SocketAddr = config.server_address.parse()?;
let app_state = factory::build_app_state(config).await?; let app_state = factory::build_app_state(config).await?;
let max_upload_size =
(app_state.config.max_upload_size_mb.unwrap_or(100) * 1024 * 1024) as usize;
let app = routes::api_routes().with_state(app_state); let app = routes::api_routes(max_upload_size).with_state(app_state);
println!("Starting server at http://{}", addr); println!("Starting server at http://{}", addr);

View File

@@ -5,10 +5,10 @@ use crate::{
state::AppState, state::AppState,
}; };
pub fn api_routes() -> Router<AppState> { pub fn api_routes(max_upload_size: usize) -> Router<AppState> {
let auth_routes = auth_handlers::auth_routes(); let auth_routes = auth_handlers::auth_routes();
let user_routes = user_handlers::user_routes(); let user_routes = user_handlers::user_routes();
let media_routes = media_handlers::media_routes(); let media_routes = media_handlers::media_routes(max_upload_size);
let album_routes = album_handlers::album_routes(); let album_routes = album_handlers::album_routes();
Router::new() Router::new()

View File

@@ -46,89 +46,24 @@ impl MediaServiceImpl {
#[async_trait] #[async_trait]
impl MediaService for MediaServiceImpl { impl MediaService for MediaServiceImpl {
async fn upload_media(&self, mut data: UploadMediaData<'_>) -> CoreResult<Media> { async fn upload_media(&self, mut data: UploadMediaData<'_>) -> CoreResult<Media> {
let user = self let (file_bytes, hash, file_size) = self.hash_and_buffer_stream(&mut data).await?;
.user_repo
.find_by_id(data.owner_id)
.await?
.ok_or(CoreError::NotFound("User".to_string(), data.owner_id))?;
let mut hasher = Sha256::new(); let owner_id = data.owner_id;
let mut file_bytes = Vec::new(); let filename = data.filename;
let mime_type = data.mime_type;
while let Some(chunk_result) = data.stream.next().await { self.check_upload_prerequisites(owner_id, file_size, &hash)
let chunk = chunk_result.map_err(|e| CoreError::Io(e))?;
hasher.update(&chunk);
file_bytes.extend_from_slice(&chunk);
}
let file_size = file_bytes.len() as i64;
if user.storage_used + file_size > user.storage_quota {
return Err(CoreError::Auth(format!(
"Storage quota exceeded. Used: {}, Quota: {}",
user.storage_used, user.storage_quota
)));
}
let hash = format!("{:x}", hasher.finalize());
if self.repo.find_by_hash(&hash).await?.is_some() {
return Err(CoreError::Duplicate(
"A file with this content already exists".to_string(),
));
}
let now = chrono::Utc::now();
let year = now.year().to_string();
let month = format!("{:02}", now.month());
let mut dest_path = PathBuf::from(&self.config.media_library_path);
dest_path.push(year.clone());
dest_path.push(month.clone());
fs::create_dir_all(&dest_path)
.await
.map_err(|e| CoreError::Io(e))?;
dest_path.push(&data.filename);
let storage_path_str = PathBuf::from(&year)
.join(&month)
.join(&data.filename)
.to_string_lossy()
.to_string();
let mut file = fs::File::create(&dest_path)
.await
.map_err(|e| CoreError::Io(e))?;
file.write_all(&file_bytes)
.await
.map_err(|e| CoreError::Io(e))?;
let media_model = Media {
id: Uuid::new_v4(),
owner_id: data.owner_id,
storage_path: storage_path_str,
original_filename: data.filename,
mime_type: data.mime_type,
hash,
created_at: now,
extracted_location: None,
width: None,
height: None,
};
self.repo.create(&media_model).await?;
self.user_repo
.update_storage_used(user.id, file_size)
.await?; .await?;
let job_payload = json!({ "media_id": media_model.id }); let storage_path = self.persist_media_file(&file_bytes, &filename).await?;
self.nats_client
.publish("media.new".to_string(), job_payload.to_string().into())
.await
.map_err(|e| CoreError::Unknown(format!("Failed to publish NATS job: {}", e)))?;
Ok(media_model) let media = self
.persist_media_metadata(owner_id, filename, mime_type, storage_path, hash, file_size)
.await?;
self.publish_new_media_job(media.id).await?;
Ok(media)
} }
async fn get_media_details(&self, id: Uuid, user_id: Uuid) -> CoreResult<Media> { async fn get_media_details(&self, id: Uuid, user_id: Uuid) -> CoreResult<Media> {
@@ -235,3 +170,110 @@ impl MediaService for MediaServiceImpl {
Ok(()) Ok(())
} }
} }
impl MediaServiceImpl {
async fn hash_and_buffer_stream(
&self,
stream: &mut UploadMediaData<'_>,
) -> CoreResult<(Vec<u8>, String, i64)> {
let mut hasher = Sha256::new();
let mut file_bytes = Vec::new();
while let Some(chunk_result) = stream.stream.next().await {
let chunk = chunk_result.map_err(|e| CoreError::Io(e))?;
hasher.update(&chunk);
file_bytes.extend_from_slice(&chunk);
}
let file_size = file_bytes.len() as i64;
let hash = format!("{:x}", hasher.finalize());
Ok((file_bytes, hash, file_size))
}
async fn check_upload_prerequisites(
&self,
user_id: Uuid,
file_size: i64,
hash: &str,
) -> CoreResult<()> {
let user = self
.user_repo
.find_by_id(user_id)
.await?
.ok_or(CoreError::NotFound("User".to_string(), user_id))?;
if user.storage_used + file_size > user.storage_quota {
return Err(CoreError::Auth(format!(
"Storage quota exceeded. Used: {}, Quota: {}",
user.storage_used, user.storage_quota
)));
}
if self.repo.find_by_hash(hash).await?.is_some() {
return Err(CoreError::Duplicate(
"A file with this content already exists".to_string(),
));
}
Ok(())
}
async fn persist_media_file(&self, file_bytes: &[u8], filename: &str) -> CoreResult<String> {
let now = chrono::Utc::now();
let year = now.year().to_string();
let month = format!("{:02}", now.month());
let mut dest_path = PathBuf::from(&self.config.media_library_path);
dest_path.push(year.clone());
dest_path.push(month.clone());
fs::create_dir_all(&dest_path).await?;
dest_path.push(filename);
let storage_path_str = PathBuf::from(&year)
.join(&month)
.join(filename)
.to_string_lossy()
.to_string();
let mut file = fs::File::create(&dest_path).await?;
file.write_all(&file_bytes).await?;
Ok(storage_path_str)
}
async fn persist_media_metadata(
&self,
owner_id: Uuid,
filename: String,
mime_type: String,
storage_path: String,
hash: String,
file_size: i64,
) -> CoreResult<Media> {
let media_model = Media {
id: Uuid::new_v4(),
owner_id,
storage_path,
original_filename: filename,
mime_type,
hash,
created_at: chrono::Utc::now(),
extracted_location: None,
width: None,
height: None,
};
self.repo.create(&media_model).await?;
self.user_repo
.update_storage_used(owner_id, file_size)
.await?;
Ok(media_model)
}
async fn publish_new_media_job(&self, media_id: Uuid) -> CoreResult<()> {
let job_payload = json!({ "media_id": media_id });
self.nats_client
.publish("media.new".to_string(), job_payload.to_string().into())
.await
.map_err(|e| CoreError::Unknown(format!("Failed to publish NATS job: {}", e)))
}
}

View File

@@ -2,6 +2,7 @@ use std::sync::Arc;
use async_trait::async_trait; use async_trait::async_trait;
use libertas_core::{ use libertas_core::{
config::Config,
error::{CoreError, CoreResult}, error::{CoreError, CoreResult},
models::{Role, User}, models::{Role, User},
repositories::UserRepository, repositories::UserRepository,
@@ -16,6 +17,7 @@ pub struct UserServiceImpl {
repo: Arc<dyn UserRepository>, repo: Arc<dyn UserRepository>,
hasher: Arc<dyn PasswordHasher>, hasher: Arc<dyn PasswordHasher>,
tokenizer: Arc<dyn TokenGenerator>, tokenizer: Arc<dyn TokenGenerator>,
config: Arc<Config>,
} }
impl UserServiceImpl { impl UserServiceImpl {
@@ -23,11 +25,13 @@ impl UserServiceImpl {
repo: Arc<dyn UserRepository>, repo: Arc<dyn UserRepository>,
hasher: Arc<dyn PasswordHasher>, hasher: Arc<dyn PasswordHasher>,
tokenizer: Arc<dyn TokenGenerator>, tokenizer: Arc<dyn TokenGenerator>,
config: Arc<Config>,
) -> Self { ) -> Self {
Self { Self {
repo, repo,
hasher, hasher,
tokenizer, tokenizer,
config,
} }
} }
} }
@@ -50,6 +54,9 @@ impl UserService for UserServiceImpl {
let hashed_password = self.hasher.hash_password(data.password).await?; let hashed_password = self.hasher.hash_password(data.password).await?;
let quota_gb = self.config.default_storage_quota_gb.unwrap_or(10);
let storage_quota = (quota_gb * 1024 * 1024 * 1024) as i64;
let user = User { let user = User {
id: Uuid::new_v4(), id: Uuid::new_v4(),
username: data.username.to_string(), username: data.username.to_string(),
@@ -58,7 +65,7 @@ impl UserService for UserServiceImpl {
created_at: chrono::Utc::now(), created_at: chrono::Utc::now(),
updated_at: chrono::Utc::now(), updated_at: chrono::Utc::now(),
role: Role::User, role: Role::User,
storage_quota: 10 * 1024 * 1024 * 1024, // 10 GB storage_quota,
storage_used: 0, storage_used: 0,
}; };

View File

@@ -10,12 +10,5 @@ bytes = "1.10.1"
chrono = "0.4.42" chrono = "0.4.42"
futures = "0.3.31" futures = "0.3.31"
thiserror = "2.0.17" thiserror = "2.0.17"
uuid = "1.18.1" uuid = {version = "1.18.1", features = ["v4", "serde"] }
sqlx = { version = "0.8.6", features = [
"runtime-tokio",
"postgres",
"uuid",
"chrono",
"sqlite",
] }
serde = { version = "1.0.228", features = ["derive"] } serde = { version = "1.0.228", features = ["derive"] }

View File

@@ -20,4 +20,5 @@ pub struct Config {
pub media_library_path: String, pub media_library_path: String,
pub broker_url: String, pub broker_url: String,
pub max_upload_size_mb: Option<u32>, pub max_upload_size_mb: Option<u32>,
pub default_storage_quota_gb: Option<u64>,
} }

View File

@@ -1,8 +1,7 @@
use serde::Deserialize; use serde::Deserialize;
#[derive(Debug, Clone, PartialEq, Eq, sqlx::Type)] #[derive(Debug, Clone, PartialEq, Eq)]
#[sqlx(rename_all = "lowercase")]
#[sqlx(type_name = "TEXT")]
pub enum Role { pub enum Role {
User, User,
Admin, Admin,
@@ -17,6 +16,15 @@ impl Role {
} }
} }
impl From<&str> for Role {
fn from(s: &str) -> Self {
match s {
"admin" => Role::Admin,
_ => Role::User,
}
}
}
pub struct Media { pub struct Media {
pub id: uuid::Uuid, pub id: uuid::Uuid,
pub owner_id: uuid::Uuid, pub owner_id: uuid::Uuid,
@@ -30,7 +38,7 @@ pub struct Media {
pub height: Option<i32>, pub height: Option<i32>,
} }
#[derive(Clone, sqlx::FromRow)] #[derive(Clone)]
pub struct User { pub struct User {
pub id: uuid::Uuid, pub id: uuid::Uuid,
pub username: String, pub username: String,
@@ -44,7 +52,7 @@ pub struct User {
pub storage_used: i64, // in bytes pub storage_used: i64, // in bytes
} }
#[derive(Clone, sqlx::FromRow)] #[derive(Clone)]
pub struct Album { pub struct Album {
pub id: uuid::Uuid, pub id: uuid::Uuid,
pub owner_id: uuid::Uuid, pub owner_id: uuid::Uuid,
@@ -78,14 +86,30 @@ pub struct AlbumMedia {
pub media_id: uuid::Uuid, pub media_id: uuid::Uuid,
} }
#[derive(Debug, Clone, Copy, sqlx::Type, PartialEq, Eq, Deserialize)] #[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize)]
#[sqlx(rename_all = "lowercase")]
#[sqlx(type_name = "album_permission")]
pub enum AlbumPermission { pub enum AlbumPermission {
View, View,
Contribute, Contribute,
} }
impl AlbumPermission {
pub fn as_str(&self) -> &'static str {
match self {
AlbumPermission::View => "view",
AlbumPermission::Contribute => "contribute",
}
}
}
impl From<&str> for AlbumPermission {
fn from(s: &str) -> Self {
match s {
"contribute" => AlbumPermission::Contribute,
_ => AlbumPermission::View,
}
}
}
pub struct AlbumShare { pub struct AlbumShare {
pub album_id: uuid::Uuid, pub album_id: uuid::Uuid,
pub user_id: uuid::Uuid, pub user_id: uuid::Uuid,

View File

@@ -14,3 +14,5 @@ sqlx = { version = "0.8.6", features = [
] } ] }
async-trait = "0.1.89" async-trait = "0.1.89"
uuid = { version = "1.18.1", features = ["v4"] } uuid = { version = "1.18.1", features = ["v4"] }
chrono = "0.4.42"
serde = { version = "1.0.228", features = ["derive"] }

View File

@@ -0,0 +1,64 @@
use chrono::Utc;
use serde::Deserialize;
use uuid::Uuid;
#[derive(Debug, Clone, PartialEq, Eq, sqlx::Type)]
#[sqlx(rename_all = "lowercase")]
#[sqlx(type_name = "TEXT")]
pub enum PostgresRole {
User,
Admin,
}
#[derive(sqlx::FromRow)]
pub struct PostgresUser {
pub id: Uuid,
pub username: String,
pub email: String,
pub hashed_password: String,
pub created_at: chrono::DateTime<Utc>,
pub updated_at: chrono::DateTime<Utc>,
pub role: String,
pub storage_quota: i64,
pub storage_used: i64,
}
#[derive(sqlx::FromRow)]
pub struct PostgresAlbum {
pub id: uuid::Uuid,
pub owner_id: uuid::Uuid,
pub name: String,
pub description: Option<String>,
pub is_public: bool,
pub created_at: chrono::DateTime<chrono::Utc>,
pub updated_at: chrono::DateTime<chrono::Utc>,
}
#[derive(sqlx::FromRow)]
pub struct PostgresMedia {
pub id: uuid::Uuid,
pub owner_id: uuid::Uuid,
pub storage_path: String,
pub original_filename: String,
pub mime_type: String,
pub hash: String,
pub created_at: chrono::DateTime<chrono::Utc>,
pub extracted_location: Option<String>,
pub width: Option<i32>,
pub height: Option<i32>,
}
#[derive(Debug, Clone, Copy, sqlx::Type, PartialEq, Eq, Deserialize)]
#[sqlx(rename_all = "lowercase")]
#[sqlx(type_name = "album_permission")]
pub enum PostgresAlbumPermission {
View,
Contribute,
}
pub struct PostgresAlbumShare {
pub album_id: uuid::Uuid,
pub user_id: uuid::Uuid,
pub permission: PostgresAlbumPermission,
}

View File

@@ -1,2 +1,4 @@
pub mod factory; pub mod factory;
pub mod repositories; pub mod repositories;
pub mod db_models;
pub mod mappers;

View File

@@ -0,0 +1,96 @@
use libertas_core::models::{Album, AlbumPermission, AlbumShare, Media, Role, User};
use crate::db_models::{PostgresAlbum, PostgresAlbumPermission, PostgresAlbumShare, PostgresMedia, PostgresRole, PostgresUser};
impl From<PostgresRole> for Role {
fn from(pg_role: PostgresRole) -> Self {
match pg_role {
PostgresRole::User => Role::User,
PostgresRole::Admin => Role::Admin,
}
}
}
impl From<Role> for PostgresRole {
fn from(role: Role) -> Self {
match role {
Role::User => PostgresRole::User,
Role::Admin => PostgresRole::Admin,
}
}
}
impl From<PostgresUser> for User {
fn from(pg_user: PostgresUser) -> Self {
User {
id: pg_user.id,
username: pg_user.username,
email: pg_user.email,
hashed_password: pg_user.hashed_password,
created_at: pg_user.created_at,
updated_at: pg_user.updated_at,
role: Role::from(pg_user.role.as_str()),
storage_quota: pg_user.storage_quota,
storage_used: pg_user.storage_used,
}
}
}
impl From<PostgresAlbum> for Album {
fn from(pg_album: PostgresAlbum) -> Self {
Album {
id: pg_album.id,
owner_id: pg_album.owner_id,
name: pg_album.name,
description: pg_album.description,
is_public: pg_album.is_public,
created_at: pg_album.created_at,
updated_at: pg_album.updated_at,
}
}
}
impl From<PostgresMedia> for Media {
fn from(pg_media: PostgresMedia) -> Self {
Media {
id: pg_media.id,
owner_id: pg_media.owner_id,
storage_path: pg_media.storage_path,
original_filename: pg_media.original_filename,
mime_type: pg_media.mime_type,
hash: pg_media.hash,
created_at: pg_media.created_at,
extracted_location: pg_media.extracted_location,
width: pg_media.width,
height: pg_media.height,
}
}
}
impl From<PostgresAlbumPermission> for AlbumPermission {
fn from(pg_permission: PostgresAlbumPermission) -> Self {
match pg_permission {
PostgresAlbumPermission::View => AlbumPermission::View,
PostgresAlbumPermission::Contribute => AlbumPermission::Contribute,
}
}
}
impl From<AlbumPermission> for PostgresAlbumPermission {
fn from(permission: AlbumPermission) -> Self {
match permission {
AlbumPermission::View => PostgresAlbumPermission::View,
AlbumPermission::Contribute => PostgresAlbumPermission::Contribute,
}
}
}
impl From<PostgresAlbumShare> for AlbumShare {
fn from(pg_share: PostgresAlbumShare) -> Self {
AlbumShare {
album_id: pg_share.album_id,
user_id: pg_share.user_id,
permission: AlbumPermission::from(pg_share.permission),
}
}
}

View File

@@ -7,6 +7,8 @@ use libertas_core::{
use sqlx::PgPool; use sqlx::PgPool;
use uuid::Uuid; use uuid::Uuid;
use crate::db_models::PostgresAlbum;
#[derive(Clone)] #[derive(Clone)]
pub struct PostgresAlbumRepository { pub struct PostgresAlbumRepository {
pool: PgPool, pool: PgPool,
@@ -42,8 +44,8 @@ impl AlbumRepository for PostgresAlbumRepository {
} }
async fn find_by_id(&self, id: Uuid) -> CoreResult<Option<Album>> { async fn find_by_id(&self, id: Uuid) -> CoreResult<Option<Album>> {
sqlx::query_as!( let pg_album = sqlx::query_as!(
Album, PostgresAlbum,
r#" r#"
SELECT id, owner_id, name, description, is_public, created_at, updated_at SELECT id, owner_id, name, description, is_public, created_at, updated_at
FROM albums FROM albums
@@ -53,12 +55,13 @@ impl AlbumRepository for PostgresAlbumRepository {
) )
.fetch_optional(&self.pool) .fetch_optional(&self.pool)
.await .await
.map_err(|e| CoreError::Database(e.to_string())) .map_err(|e| CoreError::Database(e.to_string()))?;
Ok(pg_album.map(|a| a.into()))
} }
async fn list_by_user(&self, user_id: Uuid) -> CoreResult<Vec<Album>> { async fn list_by_user(&self, user_id: Uuid) -> CoreResult<Vec<Album>> {
sqlx::query_as!( let pg_albums = sqlx::query_as!(
Album, PostgresAlbum,
r#" r#"
SELECT id, owner_id, name, description, is_public, created_at, updated_at SELECT id, owner_id, name, description, is_public, created_at, updated_at
FROM albums FROM albums
@@ -68,7 +71,9 @@ impl AlbumRepository for PostgresAlbumRepository {
) )
.fetch_all(&self.pool) .fetch_all(&self.pool)
.await .await
.map_err(|e| CoreError::Database(e.to_string())) .map_err(|e| CoreError::Database(e.to_string()))?;
Ok(pg_albums.into_iter().map(|a| a.into()).collect())
} }
async fn add_media_to_album(&self, album_id: Uuid, media_ids: &[Uuid]) -> CoreResult<()> { async fn add_media_to_album(&self, album_id: Uuid, media_ids: &[Uuid]) -> CoreResult<()> {

View File

@@ -1,11 +1,10 @@
use async_trait::async_trait; use async_trait::async_trait;
use libertas_core::{ use libertas_core::{
error::{CoreError, CoreResult}, error::{CoreError, CoreResult}, models::AlbumPermission, repositories::AlbumShareRepository
models::AlbumPermission,
repositories::AlbumShareRepository,
}; };
use sqlx::PgPool; use sqlx::PgPool;
use uuid::Uuid; use uuid::Uuid;
use crate::db_models::PostgresAlbumPermission;
#[derive(Clone)] #[derive(Clone)]
pub struct PostgresAlbumShareRepository { pub struct PostgresAlbumShareRepository {
@@ -35,7 +34,7 @@ impl AlbumShareRepository for PostgresAlbumShareRepository {
"#, "#,
album_id, album_id,
user_id, user_id,
permission as AlbumPermission, PostgresAlbumPermission::from(permission) as PostgresAlbumPermission,
) )
.execute(&self.pool) .execute(&self.pool)
.await .await
@@ -51,7 +50,7 @@ impl AlbumShareRepository for PostgresAlbumShareRepository {
) -> CoreResult<Option<AlbumPermission>> { ) -> CoreResult<Option<AlbumPermission>> {
let result = sqlx::query!( let result = sqlx::query!(
r#" r#"
SELECT permission as "permission: AlbumPermission" SELECT permission as "permission: PostgresAlbumPermission"
FROM album_shares FROM album_shares
WHERE album_id = $1 AND user_id = $2 WHERE album_id = $1 AND user_id = $2
"#, "#,
@@ -62,7 +61,7 @@ impl AlbumShareRepository for PostgresAlbumShareRepository {
.await .await
.map_err(|e| CoreError::Database(e.to_string()))?; .map_err(|e| CoreError::Database(e.to_string()))?;
Ok(result.map(|row| row.permission)) Ok(result.map(|row| row.permission.into()))
} }
async fn is_media_in_shared_album(&self, media_id: Uuid, user_id: Uuid) -> CoreResult<bool> { async fn is_media_in_shared_album(&self, media_id: Uuid, user_id: Uuid) -> CoreResult<bool> {

View File

@@ -7,6 +7,8 @@ use libertas_core::{
use sqlx::PgPool; use sqlx::PgPool;
use uuid::Uuid; use uuid::Uuid;
use crate::db_models::PostgresMedia;
#[derive(Clone)] #[derive(Clone)]
pub struct PostgresMediaRepository { pub struct PostgresMediaRepository {
pool: PgPool, pool: PgPool,
@@ -44,8 +46,8 @@ impl MediaRepository for PostgresMediaRepository {
} }
async fn find_by_hash(&self, hash: &str) -> CoreResult<Option<Media>> { async fn find_by_hash(&self, hash: &str) -> CoreResult<Option<Media>> {
sqlx::query_as!( let pg_media = sqlx::query_as!(
Media, PostgresMedia,
r#" r#"
SELECT id, owner_id, storage_path, original_filename, mime_type, hash, created_at, SELECT id, owner_id, storage_path, original_filename, mime_type, hash, created_at,
extracted_location, width, height extracted_location, width, height
@@ -56,12 +58,14 @@ impl MediaRepository for PostgresMediaRepository {
) )
.fetch_optional(&self.pool) .fetch_optional(&self.pool)
.await .await
.map_err(|e| CoreError::Database(e.to_string())) .map_err(|e| CoreError::Database(e.to_string()))?;
Ok(pg_media.map(|m| m.into()))
} }
async fn find_by_id(&self, id: Uuid) -> CoreResult<Option<Media>> { async fn find_by_id(&self, id: Uuid) -> CoreResult<Option<Media>> {
sqlx::query_as!( let pg_media = sqlx::query_as!(
Media, PostgresMedia,
r#" r#"
SELECT id, owner_id, storage_path, original_filename, mime_type, hash, created_at, SELECT id, owner_id, storage_path, original_filename, mime_type, hash, created_at,
extracted_location, width, height extracted_location, width, height
@@ -72,12 +76,14 @@ impl MediaRepository for PostgresMediaRepository {
) )
.fetch_optional(&self.pool) .fetch_optional(&self.pool)
.await .await
.map_err(|e| CoreError::Database(e.to_string())) .map_err(|e| CoreError::Database(e.to_string()))?;
Ok(pg_media.map(|m| m.into()))
} }
async fn list_by_user(&self, user_id: Uuid) -> CoreResult<Vec<Media>> { async fn list_by_user(&self, user_id: Uuid) -> CoreResult<Vec<Media>> {
sqlx::query_as!( let pg_media = sqlx::query_as!(
Media, PostgresMedia,
r#" r#"
SELECT id, owner_id, storage_path, original_filename, mime_type, hash, created_at, SELECT id, owner_id, storage_path, original_filename, mime_type, hash, created_at,
extracted_location, width, height extracted_location, width, height
@@ -88,7 +94,9 @@ impl MediaRepository for PostgresMediaRepository {
) )
.fetch_all(&self.pool) .fetch_all(&self.pool)
.await .await
.map_err(|e| CoreError::Database(e.to_string())) .map_err(|e| CoreError::Database(e.to_string()))?;
Ok(pg_media.into_iter().map(|m| m.into()).collect())
} }
async fn update_metadata( async fn update_metadata(

View File

@@ -1,11 +1,13 @@
use async_trait::async_trait; use async_trait::async_trait;
use libertas_core::{ use libertas_core::{
error::{CoreError, CoreResult}, error::{CoreError, CoreResult},
models::{Role, User}, models::User,
repositories::UserRepository, repositories::UserRepository,
}; };
use sqlx::{PgPool, SqlitePool, types::Uuid}; use sqlx::{PgPool, SqlitePool, types::Uuid};
use crate::db_models::PostgresUser;
#[derive(Clone)] #[derive(Clone)]
pub struct PostgresUserRepository { pub struct PostgresUserRepository {
pool: PgPool, pool: PgPool,
@@ -54,12 +56,12 @@ impl UserRepository for PostgresUserRepository {
} }
async fn find_by_email(&self, email: &str) -> CoreResult<Option<User>> { async fn find_by_email(&self, email: &str) -> CoreResult<Option<User>> {
sqlx::query_as!( let pg_user = sqlx::query_as!(
User, PostgresUser,
r#" r#"
SELECT SELECT
id, username, email, hashed_password, created_at, updated_at, id, username, email, hashed_password, created_at, updated_at,
role as "role: Role", role,
storage_quota, storage_used storage_quota, storage_used
FROM users FROM users
WHERE email = $1 WHERE email = $1
@@ -68,17 +70,18 @@ impl UserRepository for PostgresUserRepository {
) )
.fetch_optional(&self.pool) .fetch_optional(&self.pool)
.await .await
.map_err(|e| CoreError::Database(e.to_string())) .map_err(|e| CoreError::Database(e.to_string()))?;
Ok(pg_user.map(|u| u.into()))
} }
async fn find_by_username(&self, username: &str) -> CoreResult<Option<User>> { async fn find_by_username(&self, username: &str) -> CoreResult<Option<User>> {
sqlx::query_as!( let pg_user = sqlx::query_as!(
User, PostgresUser,
r#" r#"
SELECT SELECT
id, username, email, hashed_password, created_at, updated_at, id, username, email, hashed_password, created_at, updated_at,
role as "role: Role", role, storage_quota, storage_used
storage_quota, storage_used
FROM users FROM users
WHERE username = $1 WHERE username = $1
"#, "#,
@@ -86,16 +89,18 @@ impl UserRepository for PostgresUserRepository {
) )
.fetch_optional(&self.pool) .fetch_optional(&self.pool)
.await .await
.map_err(|e| CoreError::Database(e.to_string())) .map_err(|e| CoreError::Database(e.to_string()))?;
Ok(pg_user.map(|u| u.into()))
} }
async fn find_by_id(&self, id: Uuid) -> CoreResult<Option<User>> { async fn find_by_id(&self, id: Uuid) -> CoreResult<Option<User>> {
sqlx::query_as!( let pg_user = sqlx::query_as!(
User, PostgresUser,
r#" r#"
SELECT SELECT
id, username, email, hashed_password, created_at, updated_at, id, username, email, hashed_password, created_at, updated_at,
role as "role: Role", role,
storage_quota, storage_used storage_quota, storage_used
FROM users FROM users
WHERE id = $1 WHERE id = $1
@@ -104,7 +109,8 @@ impl UserRepository for PostgresUserRepository {
) )
.fetch_optional(&self.pool) .fetch_optional(&self.pool)
.await .await
.map_err(|e| CoreError::Database(e.to_string())) .map_err(|e| CoreError::Database(e.to_string()))?;
Ok(pg_user.map(|u| u.into()))
} }
async fn update_storage_used(&self, user_id: Uuid, bytes: i64) -> CoreResult<()> { async fn update_storage_used(&self, user_id: Uuid, bytes: i64) -> CoreResult<()> {

View File

@@ -7,12 +7,13 @@ pub fn load_config() -> CoreResult<Config> {
Ok(Config { Ok(Config {
database: DatabaseConfig { database: DatabaseConfig {
db_type: DatabaseType::Postgres, db_type: DatabaseType::Postgres,
url: "postgres://postgres:postgres@localhost:5432/libertas_db".to_string(), url: "postgres://libertas:libertas_password@localhost:5436/libertas_db".to_string(),
}, },
server_address: "127.0.0.1:8080".to_string(), server_address: "127.0.0.1:8080".to_string(),
jwt_secret: "super_secret_jwt_key".to_string(), jwt_secret: "super_secret_jwt_key".to_string(),
media_library_path: "media_library".to_string(), media_library_path: "media_library".to_string(),
broker_url: "nats://localhost:4222".to_string(), broker_url: "nats://localhost:4222".to_string(),
max_upload_size_mb: Some(100), max_upload_size_mb: Some(100),
default_storage_quota_gb: Some(10),
}) })
} }