separation of database #1

Merged
GKaszewski merged 3 commits from wip-buggy into master 2025-11-03 02:26:20 +00:00
11 changed files with 216 additions and 136 deletions
Showing only changes of commit 964bcf2655 - Show all commits

View File

@@ -14,5 +14,6 @@ pub fn load_config() -> CoreResult<Config> {
media_library_path: "media_library".to_string(), media_library_path: "media_library".to_string(),
broker_url: "nats://localhost:4222".to_string(), broker_url: "nats://localhost:4222".to_string(),
max_upload_size_mb: Some(100), max_upload_size_mb: Some(100),
default_storage_quota_gb: Some(10),
}) })
} }

View File

@@ -38,6 +38,7 @@ pub async fn build_app_state(config: Config) -> CoreResult<AppState> {
user_repo.clone(), user_repo.clone(),
hasher, hasher,
tokenizer.clone(), tokenizer.clone(),
Arc::new(config.clone()),
)); ));
let media_service = Arc::new(MediaServiceImpl::new( let media_service = Arc::new(MediaServiceImpl::new(
media_repo.clone(), media_repo.clone(),

View File

@@ -37,14 +37,12 @@ impl From<Media> for MediaResponse {
} }
} }
pub fn media_routes() -> Router<AppState> { pub fn media_routes(max_upload_size: usize) -> Router<AppState> {
let max_size_mb = 100; // todo: get from config
Router::new() Router::new()
.route("/", post(upload_media)) .route("/", post(upload_media))
.route("/{id}", get(get_media_details).delete(delete_media)) .route("/{id}", get(get_media_details).delete(delete_media))
.route("/{id}/file", get(get_media_file)) .route("/{id}/file", get(get_media_file))
.layer(DefaultBodyLimit::max(max_size_mb * 1024 * 1024)) .layer(DefaultBodyLimit::max(max_upload_size))
} }
async fn upload_media( async fn upload_media(

View File

@@ -28,8 +28,10 @@ async fn main() -> anyhow::Result<()> {
let addr: SocketAddr = config.server_address.parse()?; let addr: SocketAddr = config.server_address.parse()?;
let app_state = factory::build_app_state(config).await?; let app_state = factory::build_app_state(config).await?;
let max_upload_size =
(app_state.config.max_upload_size_mb.unwrap_or(100) * 1024 * 1024) as usize;
let app = routes::api_routes().with_state(app_state); let app = routes::api_routes(max_upload_size).with_state(app_state);
println!("Starting server at http://{}", addr); println!("Starting server at http://{}", addr);

View File

@@ -5,10 +5,10 @@ use crate::{
state::AppState, state::AppState,
}; };
pub fn api_routes() -> Router<AppState> { pub fn api_routes(max_upload_size: usize) -> Router<AppState> {
let auth_routes = auth_handlers::auth_routes(); let auth_routes = auth_handlers::auth_routes();
let user_routes = user_handlers::user_routes(); let user_routes = user_handlers::user_routes();
let media_routes = media_handlers::media_routes(); let media_routes = media_handlers::media_routes(max_upload_size);
let album_routes = album_handlers::album_routes(); let album_routes = album_handlers::album_routes();
Router::new() Router::new()

View File

@@ -46,89 +46,20 @@ impl MediaServiceImpl {
#[async_trait] #[async_trait]
impl MediaService for MediaServiceImpl { impl MediaService for MediaServiceImpl {
async fn upload_media(&self, mut data: UploadMediaData<'_>) -> CoreResult<Media> { async fn upload_media(&self, mut data: UploadMediaData<'_>) -> CoreResult<Media> {
let user = self let (file_bytes, hash, file_size) = self.hash_and_buffer_stream(&mut data).await?;
.user_repo
.find_by_id(data.owner_id)
.await?
.ok_or(CoreError::NotFound("User".to_string(), data.owner_id))?;
let mut hasher = Sha256::new(); self.check_upload_prerequisites(data.owner_id, file_size, &hash)
let mut file_bytes = Vec::new();
while let Some(chunk_result) = data.stream.next().await {
let chunk = chunk_result.map_err(|e| CoreError::Io(e))?;
hasher.update(&chunk);
file_bytes.extend_from_slice(&chunk);
}
let file_size = file_bytes.len() as i64;
if user.storage_used + file_size > user.storage_quota {
return Err(CoreError::Auth(format!(
"Storage quota exceeded. Used: {}, Quota: {}",
user.storage_used, user.storage_quota
)));
}
let hash = format!("{:x}", hasher.finalize());
if self.repo.find_by_hash(&hash).await?.is_some() {
return Err(CoreError::Duplicate(
"A file with this content already exists".to_string(),
));
}
let now = chrono::Utc::now();
let year = now.year().to_string();
let month = format!("{:02}", now.month());
let mut dest_path = PathBuf::from(&self.config.media_library_path);
dest_path.push(year.clone());
dest_path.push(month.clone());
fs::create_dir_all(&dest_path)
.await
.map_err(|e| CoreError::Io(e))?;
dest_path.push(&data.filename);
let storage_path_str = PathBuf::from(&year)
.join(&month)
.join(&data.filename)
.to_string_lossy()
.to_string();
let mut file = fs::File::create(&dest_path)
.await
.map_err(|e| CoreError::Io(e))?;
file.write_all(&file_bytes)
.await
.map_err(|e| CoreError::Io(e))?;
let media_model = Media {
id: Uuid::new_v4(),
owner_id: data.owner_id,
storage_path: storage_path_str,
original_filename: data.filename,
mime_type: data.mime_type,
hash,
created_at: now,
extracted_location: None,
width: None,
height: None,
};
self.repo.create(&media_model).await?;
self.user_repo
.update_storage_used(user.id, file_size)
.await?; .await?;
let job_payload = json!({ "media_id": media_model.id }); let storage_path = self.persist_media_file(&file_bytes, &data.filename).await?;
self.nats_client
.publish("media.new".to_string(), job_payload.to_string().into())
.await
.map_err(|e| CoreError::Unknown(format!("Failed to publish NATS job: {}", e)))?;
Ok(media_model) let media = self
.persist_media_metadata(&data, storage_path, hash, file_size)
.await?;
self.publish_new_media_job(media.id).await?;
Ok(media)
} }
async fn get_media_details(&self, id: Uuid, user_id: Uuid) -> CoreResult<Media> { async fn get_media_details(&self, id: Uuid, user_id: Uuid) -> CoreResult<Media> {
@@ -235,3 +166,108 @@ impl MediaService for MediaServiceImpl {
Ok(()) Ok(())
} }
} }
impl MediaServiceImpl {
async fn hash_and_buffer_stream(
&self,
stream: &mut UploadMediaData<'_>,
) -> CoreResult<(Vec<u8>, String, i64)> {
let mut hasher = Sha256::new();
let mut file_bytes = Vec::new();
while let Some(chunk_result) = stream.stream.next().await {
let chunk = chunk_result.map_err(|e| CoreError::Io(e))?;
hasher.update(&chunk);
file_bytes.extend_from_slice(&chunk);
}
let file_size = file_bytes.len() as i64;
let hash = format!("{:x}", hasher.finalize());
Ok((file_bytes, hash, file_size))
}
async fn check_upload_prerequisites(
&self,
user_id: Uuid,
file_size: i64,
hash: &str,
) -> CoreResult<()> {
let user = self
.user_repo
.find_by_id(user_id)
.await?
.ok_or(CoreError::NotFound("User".to_string(), user_id))?;
if user.storage_used + file_size > user.storage_quota {
return Err(CoreError::Auth(format!(
"Storage quota exceeded. Used: {}, Quota: {}",
user.storage_used, user.storage_quota
)));
}
if self.repo.find_by_hash(hash).await?.is_some() {
return Err(CoreError::Duplicate(
"A file with this content already exists".to_string(),
));
}
Ok(())
}
async fn persist_media_file(&self, file_bytes: &[u8], filename: &str) -> CoreResult<String> {
let now = chrono::Utc::now();
let year = now.year().to_string();
let month = format!("{:02}", now.month());
let mut dest_path = PathBuf::from(&self.config.media_library_path);
dest_path.push(year.clone());
dest_path.push(month.clone());
fs::create_dir_all(&dest_path).await?;
dest_path.push(filename);
let storage_path_str = PathBuf::from(&year)
.join(&month)
.join(filename)
.to_string_lossy()
.to_string();
let mut file = fs::File::create(&dest_path).await?;
file.write_all(&file_bytes).await?;
Ok(storage_path_str)
}
async fn persist_media_metadata(
&self,
data: &UploadMediaData<'_>,
storage_path: String,
hash: String,
file_size: i64,
) -> CoreResult<Media> {
let media_model = Media {
id: Uuid::new_v4(),
owner_id: data.owner_id,
storage_path,
original_filename: data.filename.clone(),
mime_type: data.mime_type.clone(),
hash,
created_at: chrono::Utc::now(),
extracted_location: None,
width: None,
height: None,
};
self.repo.create(&media_model).await?;
self.user_repo
.update_storage_used(data.owner_id, file_size)
.await?;
Ok(media_model)
}
async fn publish_new_media_job(&self, media_id: Uuid) -> CoreResult<()> {
let job_payload = json!({ "media_id": media_id });
self.nats_client
.publish("media.new".to_string(), job_payload.to_string().into())
.await
.map_err(|e| CoreError::Unknown(format!("Failed to publish NATS job: {}", e)))
}
}

View File

@@ -2,6 +2,7 @@ use std::sync::Arc;
use async_trait::async_trait; use async_trait::async_trait;
use libertas_core::{ use libertas_core::{
config::Config,
error::{CoreError, CoreResult}, error::{CoreError, CoreResult},
models::{Role, User}, models::{Role, User},
repositories::UserRepository, repositories::UserRepository,
@@ -16,6 +17,7 @@ pub struct UserServiceImpl {
repo: Arc<dyn UserRepository>, repo: Arc<dyn UserRepository>,
hasher: Arc<dyn PasswordHasher>, hasher: Arc<dyn PasswordHasher>,
tokenizer: Arc<dyn TokenGenerator>, tokenizer: Arc<dyn TokenGenerator>,
config: Arc<Config>,
} }
impl UserServiceImpl { impl UserServiceImpl {
@@ -23,11 +25,13 @@ impl UserServiceImpl {
repo: Arc<dyn UserRepository>, repo: Arc<dyn UserRepository>,
hasher: Arc<dyn PasswordHasher>, hasher: Arc<dyn PasswordHasher>,
tokenizer: Arc<dyn TokenGenerator>, tokenizer: Arc<dyn TokenGenerator>,
config: Arc<Config>,
) -> Self { ) -> Self {
Self { Self {
repo, repo,
hasher, hasher,
tokenizer, tokenizer,
config,
} }
} }
} }
@@ -50,6 +54,9 @@ impl UserService for UserServiceImpl {
let hashed_password = self.hasher.hash_password(data.password).await?; let hashed_password = self.hasher.hash_password(data.password).await?;
let quota_gb = self.config.default_storage_quota_gb.unwrap_or(10);
let storage_quota = (quota_gb * 1024 * 1024 * 1024) as i64;
let user = User { let user = User {
id: Uuid::new_v4(), id: Uuid::new_v4(),
username: data.username.to_string(), username: data.username.to_string(),
@@ -58,7 +65,7 @@ impl UserService for UserServiceImpl {
created_at: chrono::Utc::now(), created_at: chrono::Utc::now(),
updated_at: chrono::Utc::now(), updated_at: chrono::Utc::now(),
role: Role::User, role: Role::User,
storage_quota: 10 * 1024 * 1024 * 1024, // 10 GB storage_quota,
storage_used: 0, storage_used: 0,
}; };

View File

@@ -20,4 +20,5 @@ pub struct Config {
pub media_library_path: String, pub media_library_path: String,
pub broker_url: String, pub broker_url: String,
pub max_upload_size_mb: Option<u32>, pub max_upload_size_mb: Option<u32>,
pub default_storage_quota_gb: Option<u64>,
} }

View File

@@ -1,8 +1,7 @@
use serde::Deserialize; use serde::Deserialize;
#[derive(Debug, Clone, PartialEq, Eq, sqlx::Type)] #[derive(Debug, Clone, PartialEq, Eq)]
#[sqlx(rename_all = "lowercase")]
#[sqlx(type_name = "TEXT")]
pub enum Role { pub enum Role {
User, User,
Admin, Admin,
@@ -17,6 +16,15 @@ impl Role {
} }
} }
impl From<&str> for Role {
fn from(s: &str) -> Self {
match s {
"admin" => Role::Admin,
_ => Role::User,
}
}
}
pub struct Media { pub struct Media {
pub id: uuid::Uuid, pub id: uuid::Uuid,
pub owner_id: uuid::Uuid, pub owner_id: uuid::Uuid,
@@ -30,7 +38,7 @@ pub struct Media {
pub height: Option<i32>, pub height: Option<i32>,
} }
#[derive(Clone, sqlx::FromRow)] #[derive(Clone)]
pub struct User { pub struct User {
pub id: uuid::Uuid, pub id: uuid::Uuid,
pub username: String, pub username: String,
@@ -44,7 +52,7 @@ pub struct User {
pub storage_used: i64, // in bytes pub storage_used: i64, // in bytes
} }
#[derive(Clone, sqlx::FromRow)] #[derive(Clone)]
pub struct Album { pub struct Album {
pub id: uuid::Uuid, pub id: uuid::Uuid,
pub owner_id: uuid::Uuid, pub owner_id: uuid::Uuid,
@@ -78,14 +86,30 @@ pub struct AlbumMedia {
pub media_id: uuid::Uuid, pub media_id: uuid::Uuid,
} }
#[derive(Debug, Clone, Copy, sqlx::Type, PartialEq, Eq, Deserialize)] #[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize)]
#[sqlx(rename_all = "lowercase")]
#[sqlx(type_name = "album_permission")]
pub enum AlbumPermission { pub enum AlbumPermission {
View, View,
Contribute, Contribute,
} }
impl AlbumPermission {
pub fn as_str(&self) -> &'static str {
match self {
AlbumPermission::View => "view",
AlbumPermission::Contribute => "contribute",
}
}
}
impl From<&str> for AlbumPermission {
fn from(s: &str) -> Self {
match s {
"contribute" => AlbumPermission::Contribute,
_ => AlbumPermission::View,
}
}
}
pub struct AlbumShare { pub struct AlbumShare {
pub album_id: uuid::Uuid, pub album_id: uuid::Uuid,
pub user_id: uuid::Uuid, pub user_id: uuid::Uuid,

View File

@@ -35,7 +35,7 @@ impl AlbumShareRepository for PostgresAlbumShareRepository {
"#, "#,
album_id, album_id,
user_id, user_id,
permission as AlbumPermission, permission.as_str() as "album_permission"
) )
.execute(&self.pool) .execute(&self.pool)
.await .await
@@ -49,12 +49,9 @@ impl AlbumShareRepository for PostgresAlbumShareRepository {
album_id: Uuid, album_id: Uuid,
user_id: Uuid, user_id: Uuid,
) -> CoreResult<Option<AlbumPermission>> { ) -> CoreResult<Option<AlbumPermission>> {
let result = sqlx::query!( let row = sqlx::query!(
r#" // --- FIX 2: CAST the enum to TEXT in the SQL ---
SELECT permission as "permission: AlbumPermission" "SELECT permission::TEXT as permission FROM album_shares WHERE album_id = $1 AND user_id = $2",
FROM album_shares
WHERE album_id = $1 AND user_id = $2
"#,
album_id, album_id,
user_id user_id
) )
@@ -62,7 +59,8 @@ impl AlbumShareRepository for PostgresAlbumShareRepository {
.await .await
.map_err(|e| CoreError::Database(e.to_string()))?; .map_err(|e| CoreError::Database(e.to_string()))?;
Ok(result.map(|row| row.permission)) // This now works because r.permission is a String
Ok(row.map(|r| AlbumPermission::from(r.permission.as_str())))
} }
async fn is_media_in_shared_album(&self, media_id: Uuid, user_id: Uuid) -> CoreResult<bool> { async fn is_media_in_shared_album(&self, media_id: Uuid, user_id: Uuid) -> CoreResult<bool> {

View File

@@ -54,57 +54,69 @@ impl UserRepository for PostgresUserRepository {
} }
async fn find_by_email(&self, email: &str) -> CoreResult<Option<User>> { async fn find_by_email(&self, email: &str) -> CoreResult<Option<User>> {
sqlx::query_as!( let row = sqlx::query!(
User, r#"SELECT id, username, email, hashed_password, created_at, updated_at, role, storage_quota, storage_used FROM users WHERE email = $1"#,
r#"
SELECT
id, username, email, hashed_password, created_at, updated_at,
role as "role: Role",
storage_quota, storage_used
FROM users
WHERE email = $1
"#,
email email
) )
.fetch_optional(&self.pool) .fetch_optional(&self.pool)
.await .await
.map_err(|e| CoreError::Database(e.to_string())) .map_err(|e| CoreError::Database(e.to_string()))?;
Ok(row.map(|r| User {
id: r.id,
username: r.username,
email: r.email,
hashed_password: r.hashed_password,
created_at: r.created_at,
updated_at: r.updated_at,
role: Role::from(r.role.as_str()),
storage_quota: r.storage_quota,
storage_used: r.storage_used,
}))
} }
async fn find_by_username(&self, username: &str) -> CoreResult<Option<User>> { async fn find_by_username(&self, username: &str) -> CoreResult<Option<User>> {
sqlx::query_as!( let row = sqlx::query!(
User, r#"SELECT id, username, email, hashed_password, created_at, updated_at, role, storage_quota, storage_used FROM users WHERE username = $1"#,
r#"
SELECT
id, username, email, hashed_password, created_at, updated_at,
role as "role: Role",
storage_quota, storage_used
FROM users
WHERE username = $1
"#,
username username
) )
.fetch_optional(&self.pool) .fetch_optional(&self.pool)
.await .await
.map_err(|e| CoreError::Database(e.to_string())) .map_err(|e| CoreError::Database(e.to_string()))?;
Ok(row.map(|r| User {
id: r.id,
username: r.username,
email: r.email,
hashed_password: r.hashed_password,
created_at: r.created_at,
updated_at: r.updated_at,
role: Role::from(r.role.as_str()),
storage_quota: r.storage_quota,
storage_used: r.storage_used,
}))
} }
async fn find_by_id(&self, id: Uuid) -> CoreResult<Option<User>> { async fn find_by_id(&self, id: Uuid) -> CoreResult<Option<User>> {
sqlx::query_as!( let row = sqlx::query!(
User, r#"SELECT id, username, email, hashed_password, created_at, updated_at, role, storage_quota, storage_used FROM users WHERE id = $1"#,
r#"
SELECT
id, username, email, hashed_password, created_at, updated_at,
role as "role: Role",
storage_quota, storage_used
FROM users
WHERE id = $1
"#,
id id
) )
.fetch_optional(&self.pool) .fetch_optional(&self.pool)
.await .await
.map_err(|e| CoreError::Database(e.to_string())) .map_err(|e| CoreError::Database(e.to_string()))?;
Ok(row.map(|r| User {
id: r.id,
username: r.username,
email: r.email,
hashed_password: r.hashed_password,
created_at: r.created_at,
updated_at: r.updated_at,
role: Role::from(r.role.as_str()),
storage_quota: r.storage_quota,
storage_used: r.storage_used,
}))
} }
async fn update_storage_used(&self, user_id: Uuid, bytes: i64) -> CoreResult<()> { async fn update_storage_used(&self, user_id: Uuid, bytes: i64) -> CoreResult<()> {