Refactor schedule and user repositories into modular structure

- Moved schedule repository logic into separate modules for SQLite and PostgreSQL implementations.
- Created a mapping module for shared data structures and mapping functions in the schedule repository.
- Added new mapping module for user repository to handle user data transformations.
- Implemented PostgreSQL and SQLite user repository adapters with necessary CRUD operations.
- Added tests for user repository functionality, including saving, finding, and deleting users.
This commit is contained in:
2026-03-13 01:35:14 +01:00
parent 79ced7b77b
commit eeb4e2cb41
39 changed files with 2288 additions and 2194 deletions

View File

@@ -1,275 +0,0 @@
//! SQLite and PostgreSQL adapters for ChannelRepository
use async_trait::async_trait;
use chrono::{DateTime, Utc};
use sqlx::FromRow;
use uuid::Uuid;
use domain::{
Channel, ChannelId, ChannelRepository, DomainError, DomainResult, RecyclePolicy,
ScheduleConfig, UserId,
};
// ============================================================================
// Row type + mapping (shared between SQLite and Postgres)
// ============================================================================
#[derive(Debug, FromRow)]
struct ChannelRow {
id: String,
owner_id: String,
name: String,
description: Option<String>,
timezone: String,
schedule_config: String,
recycle_policy: String,
created_at: String,
updated_at: String,
}
fn parse_dt(s: &str) -> Result<DateTime<Utc>, DomainError> {
DateTime::parse_from_rfc3339(s)
.map(|dt| dt.with_timezone(&Utc))
.or_else(|_| {
chrono::NaiveDateTime::parse_from_str(s, "%Y-%m-%d %H:%M:%S").map(|dt| dt.and_utc())
})
.map_err(|e| DomainError::RepositoryError(format!("Invalid datetime '{}': {}", s, e)))
}
impl TryFrom<ChannelRow> for Channel {
type Error = DomainError;
fn try_from(row: ChannelRow) -> Result<Self, Self::Error> {
let id: ChannelId = Uuid::parse_str(&row.id)
.map_err(|e| DomainError::RepositoryError(format!("Invalid channel UUID: {}", e)))?;
let owner_id: UserId = Uuid::parse_str(&row.owner_id)
.map_err(|e| DomainError::RepositoryError(format!("Invalid owner UUID: {}", e)))?;
let schedule_config: ScheduleConfig = serde_json::from_str(&row.schedule_config)
.map_err(|e| {
DomainError::RepositoryError(format!("Invalid schedule_config JSON: {}", e))
})?;
let recycle_policy: RecyclePolicy = serde_json::from_str(&row.recycle_policy)
.map_err(|e| {
DomainError::RepositoryError(format!("Invalid recycle_policy JSON: {}", e))
})?;
Ok(Channel {
id,
owner_id,
name: row.name,
description: row.description,
timezone: row.timezone,
schedule_config,
recycle_policy,
created_at: parse_dt(&row.created_at)?,
updated_at: parse_dt(&row.updated_at)?,
})
}
}
const SELECT_COLS: &str =
"id, owner_id, name, description, timezone, schedule_config, recycle_policy, created_at, updated_at";
// ============================================================================
// SQLite adapter
// ============================================================================
#[cfg(feature = "sqlite")]
pub struct SqliteChannelRepository {
pool: sqlx::SqlitePool,
}
#[cfg(feature = "sqlite")]
impl SqliteChannelRepository {
pub fn new(pool: sqlx::SqlitePool) -> Self {
Self { pool }
}
}
#[cfg(feature = "sqlite")]
#[async_trait]
impl ChannelRepository for SqliteChannelRepository {
async fn find_by_id(&self, id: ChannelId) -> DomainResult<Option<Channel>> {
let sql = format!("SELECT {SELECT_COLS} FROM channels WHERE id = ?");
let row: Option<ChannelRow> = sqlx::query_as(&sql)
.bind(id.to_string())
.fetch_optional(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
row.map(Channel::try_from).transpose()
}
async fn find_by_owner(&self, owner_id: UserId) -> DomainResult<Vec<Channel>> {
let sql = format!(
"SELECT {SELECT_COLS} FROM channels WHERE owner_id = ? ORDER BY created_at ASC"
);
let rows: Vec<ChannelRow> = sqlx::query_as(&sql)
.bind(owner_id.to_string())
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
rows.into_iter().map(Channel::try_from).collect()
}
async fn find_all(&self) -> DomainResult<Vec<Channel>> {
let sql = format!("SELECT {SELECT_COLS} FROM channels ORDER BY created_at ASC");
let rows: Vec<ChannelRow> = sqlx::query_as(&sql)
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
rows.into_iter().map(Channel::try_from).collect()
}
async fn save(&self, channel: &Channel) -> DomainResult<()> {
let schedule_config = serde_json::to_string(&channel.schedule_config).map_err(|e| {
DomainError::RepositoryError(format!("Failed to serialize schedule_config: {}", e))
})?;
let recycle_policy = serde_json::to_string(&channel.recycle_policy).map_err(|e| {
DomainError::RepositoryError(format!("Failed to serialize recycle_policy: {}", e))
})?;
sqlx::query(
r#"
INSERT INTO channels
(id, owner_id, name, description, timezone, schedule_config, recycle_policy, created_at, updated_at)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(id) DO UPDATE SET
name = excluded.name,
description = excluded.description,
timezone = excluded.timezone,
schedule_config = excluded.schedule_config,
recycle_policy = excluded.recycle_policy,
updated_at = excluded.updated_at
"#,
)
.bind(channel.id.to_string())
.bind(channel.owner_id.to_string())
.bind(&channel.name)
.bind(&channel.description)
.bind(&channel.timezone)
.bind(&schedule_config)
.bind(&recycle_policy)
.bind(channel.created_at.to_rfc3339())
.bind(channel.updated_at.to_rfc3339())
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
Ok(())
}
async fn delete(&self, id: ChannelId) -> DomainResult<()> {
sqlx::query("DELETE FROM channels WHERE id = ?")
.bind(id.to_string())
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
Ok(())
}
}
// ============================================================================
// PostgreSQL adapter
// ============================================================================
#[cfg(feature = "postgres")]
pub struct PostgresChannelRepository {
pool: sqlx::Pool<sqlx::Postgres>,
}
#[cfg(feature = "postgres")]
impl PostgresChannelRepository {
pub fn new(pool: sqlx::Pool<sqlx::Postgres>) -> Self {
Self { pool }
}
}
#[cfg(feature = "postgres")]
#[async_trait]
impl ChannelRepository for PostgresChannelRepository {
async fn find_by_id(&self, id: ChannelId) -> DomainResult<Option<Channel>> {
let sql = format!("SELECT {SELECT_COLS} FROM channels WHERE id = $1");
let row: Option<ChannelRow> = sqlx::query_as(&sql)
.bind(id.to_string())
.fetch_optional(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
row.map(Channel::try_from).transpose()
}
async fn find_by_owner(&self, owner_id: UserId) -> DomainResult<Vec<Channel>> {
let sql = format!(
"SELECT {SELECT_COLS} FROM channels WHERE owner_id = $1 ORDER BY created_at ASC"
);
let rows: Vec<ChannelRow> = sqlx::query_as(&sql)
.bind(owner_id.to_string())
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
rows.into_iter().map(Channel::try_from).collect()
}
async fn find_all(&self) -> DomainResult<Vec<Channel>> {
let sql = format!("SELECT {SELECT_COLS} FROM channels ORDER BY created_at ASC");
let rows: Vec<ChannelRow> = sqlx::query_as(&sql)
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
rows.into_iter().map(Channel::try_from).collect()
}
async fn save(&self, channel: &Channel) -> DomainResult<()> {
let schedule_config = serde_json::to_string(&channel.schedule_config).map_err(|e| {
DomainError::RepositoryError(format!("Failed to serialize schedule_config: {}", e))
})?;
let recycle_policy = serde_json::to_string(&channel.recycle_policy).map_err(|e| {
DomainError::RepositoryError(format!("Failed to serialize recycle_policy: {}", e))
})?;
sqlx::query(
r#"
INSERT INTO channels
(id, owner_id, name, description, timezone, schedule_config, recycle_policy, created_at, updated_at)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
ON CONFLICT(id) DO UPDATE SET
name = EXCLUDED.name,
description = EXCLUDED.description,
timezone = EXCLUDED.timezone,
schedule_config = EXCLUDED.schedule_config,
recycle_policy = EXCLUDED.recycle_policy,
updated_at = EXCLUDED.updated_at
"#,
)
.bind(channel.id.to_string())
.bind(channel.owner_id.to_string())
.bind(&channel.name)
.bind(&channel.description)
.bind(&channel.timezone)
.bind(&schedule_config)
.bind(&recycle_policy)
.bind(channel.created_at.to_rfc3339())
.bind(channel.updated_at.to_rfc3339())
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
Ok(())
}
async fn delete(&self, id: ChannelId) -> DomainResult<()> {
sqlx::query("DELETE FROM channels WHERE id = $1")
.bind(id.to_string())
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
Ok(())
}
}

View File

@@ -0,0 +1,61 @@
use chrono::{DateTime, Utc};
use sqlx::FromRow;
use uuid::Uuid;
use domain::{Channel, ChannelId, DomainError, RecyclePolicy, ScheduleConfig, UserId};
#[derive(Debug, FromRow)]
pub(super) struct ChannelRow {
pub id: String,
pub owner_id: String,
pub name: String,
pub description: Option<String>,
pub timezone: String,
pub schedule_config: String,
pub recycle_policy: String,
pub created_at: String,
pub updated_at: String,
}
pub(super) fn parse_dt(s: &str) -> Result<DateTime<Utc>, DomainError> {
DateTime::parse_from_rfc3339(s)
.map(|dt| dt.with_timezone(&Utc))
.or_else(|_| {
chrono::NaiveDateTime::parse_from_str(s, "%Y-%m-%d %H:%M:%S").map(|dt| dt.and_utc())
})
.map_err(|e| DomainError::RepositoryError(format!("Invalid datetime '{}': {}", s, e)))
}
impl TryFrom<ChannelRow> for Channel {
type Error = DomainError;
fn try_from(row: ChannelRow) -> Result<Self, Self::Error> {
let id: ChannelId = Uuid::parse_str(&row.id)
.map_err(|e| DomainError::RepositoryError(format!("Invalid channel UUID: {}", e)))?;
let owner_id: UserId = Uuid::parse_str(&row.owner_id)
.map_err(|e| DomainError::RepositoryError(format!("Invalid owner UUID: {}", e)))?;
let schedule_config: ScheduleConfig = serde_json::from_str(&row.schedule_config)
.map_err(|e| {
DomainError::RepositoryError(format!("Invalid schedule_config JSON: {}", e))
})?;
let recycle_policy: RecyclePolicy = serde_json::from_str(&row.recycle_policy)
.map_err(|e| {
DomainError::RepositoryError(format!("Invalid recycle_policy JSON: {}", e))
})?;
Ok(Channel {
id,
owner_id,
name: row.name,
description: row.description,
timezone: row.timezone,
schedule_config,
recycle_policy,
created_at: parse_dt(&row.created_at)?,
updated_at: parse_dt(&row.updated_at)?,
})
}
}
pub(super) const SELECT_COLS: &str =
"id, owner_id, name, description, timezone, schedule_config, recycle_policy, created_at, updated_at";

View File

@@ -0,0 +1,13 @@
//! SQLite and PostgreSQL adapters for ChannelRepository
mod mapping;
#[cfg(feature = "sqlite")]
mod sqlite;
#[cfg(feature = "postgres")]
mod postgres;
#[cfg(feature = "sqlite")]
pub use sqlite::SqliteChannelRepository;
#[cfg(feature = "postgres")]
pub use postgres::PostgresChannelRepository;

View File

@@ -0,0 +1,100 @@
use async_trait::async_trait;
use domain::{Channel, ChannelId, ChannelRepository, DomainError, DomainResult, UserId};
use super::mapping::{ChannelRow, SELECT_COLS};
pub struct PostgresChannelRepository {
pool: sqlx::Pool<sqlx::Postgres>,
}
impl PostgresChannelRepository {
pub fn new(pool: sqlx::Pool<sqlx::Postgres>) -> Self {
Self { pool }
}
}
#[async_trait]
impl ChannelRepository for PostgresChannelRepository {
async fn find_by_id(&self, id: ChannelId) -> DomainResult<Option<Channel>> {
let sql = format!("SELECT {SELECT_COLS} FROM channels WHERE id = $1");
let row: Option<ChannelRow> = sqlx::query_as(&sql)
.bind(id.to_string())
.fetch_optional(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
row.map(Channel::try_from).transpose()
}
async fn find_by_owner(&self, owner_id: UserId) -> DomainResult<Vec<Channel>> {
let sql = format!(
"SELECT {SELECT_COLS} FROM channels WHERE owner_id = $1 ORDER BY created_at ASC"
);
let rows: Vec<ChannelRow> = sqlx::query_as(&sql)
.bind(owner_id.to_string())
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
rows.into_iter().map(Channel::try_from).collect()
}
async fn find_all(&self) -> DomainResult<Vec<Channel>> {
let sql = format!("SELECT {SELECT_COLS} FROM channels ORDER BY created_at ASC");
let rows: Vec<ChannelRow> = sqlx::query_as(&sql)
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
rows.into_iter().map(Channel::try_from).collect()
}
async fn save(&self, channel: &Channel) -> DomainResult<()> {
let schedule_config = serde_json::to_string(&channel.schedule_config).map_err(|e| {
DomainError::RepositoryError(format!("Failed to serialize schedule_config: {}", e))
})?;
let recycle_policy = serde_json::to_string(&channel.recycle_policy).map_err(|e| {
DomainError::RepositoryError(format!("Failed to serialize recycle_policy: {}", e))
})?;
sqlx::query(
r#"
INSERT INTO channels
(id, owner_id, name, description, timezone, schedule_config, recycle_policy, created_at, updated_at)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
ON CONFLICT(id) DO UPDATE SET
name = EXCLUDED.name,
description = EXCLUDED.description,
timezone = EXCLUDED.timezone,
schedule_config = EXCLUDED.schedule_config,
recycle_policy = EXCLUDED.recycle_policy,
updated_at = EXCLUDED.updated_at
"#,
)
.bind(channel.id.to_string())
.bind(channel.owner_id.to_string())
.bind(&channel.name)
.bind(&channel.description)
.bind(&channel.timezone)
.bind(&schedule_config)
.bind(&recycle_policy)
.bind(channel.created_at.to_rfc3339())
.bind(channel.updated_at.to_rfc3339())
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
Ok(())
}
async fn delete(&self, id: ChannelId) -> DomainResult<()> {
sqlx::query("DELETE FROM channels WHERE id = $1")
.bind(id.to_string())
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
Ok(())
}
}

View File

@@ -0,0 +1,100 @@
use async_trait::async_trait;
use domain::{Channel, ChannelId, ChannelRepository, DomainError, DomainResult, UserId};
use super::mapping::{ChannelRow, SELECT_COLS};
pub struct SqliteChannelRepository {
pool: sqlx::SqlitePool,
}
impl SqliteChannelRepository {
pub fn new(pool: sqlx::SqlitePool) -> Self {
Self { pool }
}
}
#[async_trait]
impl ChannelRepository for SqliteChannelRepository {
async fn find_by_id(&self, id: ChannelId) -> DomainResult<Option<Channel>> {
let sql = format!("SELECT {SELECT_COLS} FROM channels WHERE id = ?");
let row: Option<ChannelRow> = sqlx::query_as(&sql)
.bind(id.to_string())
.fetch_optional(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
row.map(Channel::try_from).transpose()
}
async fn find_by_owner(&self, owner_id: UserId) -> DomainResult<Vec<Channel>> {
let sql = format!(
"SELECT {SELECT_COLS} FROM channels WHERE owner_id = ? ORDER BY created_at ASC"
);
let rows: Vec<ChannelRow> = sqlx::query_as(&sql)
.bind(owner_id.to_string())
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
rows.into_iter().map(Channel::try_from).collect()
}
async fn find_all(&self) -> DomainResult<Vec<Channel>> {
let sql = format!("SELECT {SELECT_COLS} FROM channels ORDER BY created_at ASC");
let rows: Vec<ChannelRow> = sqlx::query_as(&sql)
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
rows.into_iter().map(Channel::try_from).collect()
}
async fn save(&self, channel: &Channel) -> DomainResult<()> {
let schedule_config = serde_json::to_string(&channel.schedule_config).map_err(|e| {
DomainError::RepositoryError(format!("Failed to serialize schedule_config: {}", e))
})?;
let recycle_policy = serde_json::to_string(&channel.recycle_policy).map_err(|e| {
DomainError::RepositoryError(format!("Failed to serialize recycle_policy: {}", e))
})?;
sqlx::query(
r#"
INSERT INTO channels
(id, owner_id, name, description, timezone, schedule_config, recycle_policy, created_at, updated_at)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(id) DO UPDATE SET
name = excluded.name,
description = excluded.description,
timezone = excluded.timezone,
schedule_config = excluded.schedule_config,
recycle_policy = excluded.recycle_policy,
updated_at = excluded.updated_at
"#,
)
.bind(channel.id.to_string())
.bind(channel.owner_id.to_string())
.bind(&channel.name)
.bind(&channel.description)
.bind(&channel.timezone)
.bind(&schedule_config)
.bind(&recycle_policy)
.bind(channel.created_at.to_rfc3339())
.bind(channel.updated_at.to_rfc3339())
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
Ok(())
}
async fn delete(&self, id: ChannelId) -> DomainResult<()> {
sqlx::query("DELETE FROM channels WHERE id = ?")
.bind(id.to_string())
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
Ok(())
}
}

View File

@@ -0,0 +1,10 @@
/// Connection details for a single Jellyfin instance.
#[derive(Debug, Clone)]
pub struct JellyfinConfig {
/// e.g. `"http://192.168.1.10:8096"` — no trailing slash
pub base_url: String,
/// Jellyfin API key (Settings → API Keys)
pub api_key: String,
/// The Jellyfin user ID used for library browsing
pub user_id: String,
}

View File

@@ -0,0 +1,35 @@
use domain::{ContentType, MediaItem, MediaItemId};
use super::models::JellyfinItem;
/// Ticks are Jellyfin's time unit: 1 tick = 100 nanoseconds → 10,000,000 ticks/sec.
pub(super) const TICKS_PER_SEC: i64 = 10_000_000;
/// Map a raw Jellyfin item to a domain `MediaItem`. Returns `None` for unknown
/// item types (e.g. Season, Series, Folder) so they are silently skipped.
pub(super) fn map_jellyfin_item(item: JellyfinItem) -> Option<MediaItem> {
let content_type = match item.item_type.as_str() {
"Movie" => ContentType::Movie,
"Episode" => ContentType::Episode,
_ => return None,
};
let duration_secs = item
.run_time_ticks
.map(|t| (t / TICKS_PER_SEC) as u32)
.unwrap_or(0);
Some(MediaItem {
id: MediaItemId::new(item.id),
title: item.name,
content_type,
duration_secs,
description: item.overview,
genres: item.genres.unwrap_or_default(),
year: item.production_year,
tags: item.tags.unwrap_or_default(),
series_name: item.series_name,
season_number: item.parent_index_number,
episode_number: item.index_number,
})
}

View File

@@ -0,0 +1,15 @@
//! Jellyfin media provider adapter
//!
//! Implements [`IMediaProvider`] by talking to the Jellyfin HTTP API.
//! The domain never sees Jellyfin-specific types — this module translates
//! between Jellyfin's API model and the domain's abstract `MediaItem`/`MediaFilter`.
#![cfg(feature = "jellyfin")]
mod config;
mod mapping;
mod models;
mod provider;
pub use config::JellyfinConfig;
pub use provider::JellyfinMediaProvider;

View File

@@ -0,0 +1,57 @@
use serde::Deserialize;
use domain::ContentType;
// ============================================================================
// Jellyfin API response types
// ============================================================================
#[derive(Debug, Deserialize)]
pub(super) struct JellyfinItemsResponse {
#[serde(rename = "Items")]
pub items: Vec<JellyfinItem>,
}
#[derive(Debug, Deserialize)]
pub(super) struct JellyfinItem {
#[serde(rename = "Id")]
pub id: String,
#[serde(rename = "Name")]
pub name: String,
#[serde(rename = "Type")]
pub item_type: String,
#[serde(rename = "RunTimeTicks")]
pub run_time_ticks: Option<i64>,
#[serde(rename = "Overview")]
pub overview: Option<String>,
#[serde(rename = "Genres")]
pub genres: Option<Vec<String>>,
#[serde(rename = "ProductionYear")]
pub production_year: Option<u16>,
#[serde(rename = "Tags")]
pub tags: Option<Vec<String>>,
/// TV show name (episodes only)
#[serde(rename = "SeriesName")]
pub series_name: Option<String>,
/// Season number (episodes only)
#[serde(rename = "ParentIndexNumber")]
pub parent_index_number: Option<u32>,
/// Episode number within the season (episodes only)
#[serde(rename = "IndexNumber")]
pub index_number: Option<u32>,
/// Collection type for virtual library folders (e.g. "movies", "tvshows")
#[serde(rename = "CollectionType")]
pub collection_type: Option<String>,
/// Total number of child items (used for Series to count episodes)
#[serde(rename = "RecursiveItemCount")]
pub recursive_item_count: Option<u32>,
}
pub(super) fn jellyfin_item_type(ct: &ContentType) -> &'static str {
match ct {
ContentType::Movie => "Movie",
ContentType::Episode => "Episode",
// Jellyfin has no native "Short" type; short films are filed as Movies
ContentType::Short => "Movie",
}
}

View File

@@ -1,41 +1,17 @@
//! Jellyfin media provider adapter
//!
//! Implements [`IMediaProvider`] by talking to the Jellyfin HTTP API.
//! The domain never sees Jellyfin-specific types — this module translates
//! between Jellyfin's API model and the domain's abstract `MediaItem`/`MediaFilter`.
#![cfg(feature = "jellyfin")]
use async_trait::async_trait;
use serde::Deserialize;
use domain::{Collection, ContentType, DomainError, DomainResult, IMediaProvider, MediaFilter, MediaItem, MediaItemId, SeriesSummary};
use domain::{
Collection, ContentType, DomainError, DomainResult, IMediaProvider, MediaFilter, MediaItem,
MediaItemId, SeriesSummary,
};
/// Ticks are Jellyfin's time unit: 1 tick = 100 nanoseconds → 10,000,000 ticks/sec.
const TICKS_PER_SEC: i64 = 10_000_000;
// ============================================================================
// Configuration
// ============================================================================
/// Connection details for a single Jellyfin instance.
#[derive(Debug, Clone)]
pub struct JellyfinConfig {
/// e.g. `"http://192.168.1.10:8096"` — no trailing slash
pub base_url: String,
/// Jellyfin API key (Settings → API Keys)
pub api_key: String,
/// The Jellyfin user ID used for library browsing
pub user_id: String,
}
// ============================================================================
// Adapter
// ============================================================================
use super::config::JellyfinConfig;
use super::mapping::{map_jellyfin_item, TICKS_PER_SEC};
use super::models::{jellyfin_item_type, JellyfinItemsResponse};
pub struct JellyfinMediaProvider {
client: reqwest::Client,
config: JellyfinConfig,
pub(super) client: reqwest::Client,
pub(super) config: JellyfinConfig,
}
impl JellyfinMediaProvider {
@@ -48,9 +24,7 @@ impl JellyfinMediaProvider {
},
}
}
}
impl JellyfinMediaProvider {
/// Inner fetch: applies all filter fields plus an optional series name override.
async fn fetch_items_for_series(
&self,
@@ -151,7 +125,6 @@ impl JellyfinMediaProvider {
Ok(items)
}
}
#[async_trait]
@@ -396,90 +369,3 @@ impl IMediaProvider for JellyfinMediaProvider {
))
}
}
// ============================================================================
// Jellyfin API response types
// ============================================================================
#[derive(Debug, Deserialize)]
struct JellyfinItemsResponse {
#[serde(rename = "Items")]
items: Vec<JellyfinItem>,
}
#[derive(Debug, Deserialize)]
struct JellyfinItem {
#[serde(rename = "Id")]
id: String,
#[serde(rename = "Name")]
name: String,
#[serde(rename = "Type")]
item_type: String,
#[serde(rename = "RunTimeTicks")]
run_time_ticks: Option<i64>,
#[serde(rename = "Overview")]
overview: Option<String>,
#[serde(rename = "Genres")]
genres: Option<Vec<String>>,
#[serde(rename = "ProductionYear")]
production_year: Option<u16>,
#[serde(rename = "Tags")]
tags: Option<Vec<String>>,
/// TV show name (episodes only)
#[serde(rename = "SeriesName")]
series_name: Option<String>,
/// Season number (episodes only)
#[serde(rename = "ParentIndexNumber")]
parent_index_number: Option<u32>,
/// Episode number within the season (episodes only)
#[serde(rename = "IndexNumber")]
index_number: Option<u32>,
/// Collection type for virtual library folders (e.g. "movies", "tvshows")
#[serde(rename = "CollectionType")]
collection_type: Option<String>,
/// Total number of child items (used for Series to count episodes)
#[serde(rename = "RecursiveItemCount")]
recursive_item_count: Option<u32>,
}
// ============================================================================
// Mapping helpers
// ============================================================================
fn jellyfin_item_type(ct: &ContentType) -> &'static str {
match ct {
ContentType::Movie => "Movie",
ContentType::Episode => "Episode",
// Jellyfin has no native "Short" type; short films are filed as Movies
ContentType::Short => "Movie",
}
}
/// Map a raw Jellyfin item to a domain `MediaItem`. Returns `None` for unknown
/// item types (e.g. Season, Series, Folder) so they are silently skipped.
fn map_jellyfin_item(item: JellyfinItem) -> Option<MediaItem> {
let content_type = match item.item_type.as_str() {
"Movie" => ContentType::Movie,
"Episode" => ContentType::Episode,
_ => return None,
};
let duration_secs = item
.run_time_ticks
.map(|t| (t / TICKS_PER_SEC) as u32)
.unwrap_or(0);
Some(MediaItem {
id: MediaItemId::new(item.id),
title: item.name,
content_type,
duration_secs,
description: item.overview,
genres: item.genres.unwrap_or_default(),
year: item.production_year,
tags: item.tags.unwrap_or_default(),
series_name: item.series_name,
season_number: item.parent_index_number,
episode_number: item.index_number,
})
}

View File

@@ -1,447 +0,0 @@
//! SQLite and PostgreSQL adapters for ScheduleRepository
use async_trait::async_trait;
use chrono::{DateTime, Utc};
use sqlx::FromRow;
use uuid::Uuid;
use domain::{
ChannelId, DomainError, DomainResult, GeneratedSchedule, MediaItem, MediaItemId,
PlaybackRecord, ScheduleRepository, ScheduledSlot,
};
// ============================================================================
// Row types
// ============================================================================
#[derive(Debug, FromRow)]
struct ScheduleRow {
id: String,
channel_id: String,
valid_from: String,
valid_until: String,
generation: i64,
}
#[derive(Debug, FromRow)]
struct SlotRow {
id: String,
// schedule_id selected but only used to drive the JOIN; not needed for domain type
#[allow(dead_code)]
schedule_id: String,
start_at: String,
end_at: String,
item: String,
source_block_id: String,
}
#[derive(Debug, FromRow)]
struct PlaybackRecordRow {
id: String,
channel_id: String,
item_id: String,
played_at: String,
generation: i64,
}
// ============================================================================
// Mapping
// ============================================================================
fn parse_dt(s: &str) -> Result<DateTime<Utc>, DomainError> {
DateTime::parse_from_rfc3339(s)
.map(|dt| dt.with_timezone(&Utc))
.or_else(|_| {
chrono::NaiveDateTime::parse_from_str(s, "%Y-%m-%d %H:%M:%S").map(|dt| dt.and_utc())
})
.map_err(|e| DomainError::RepositoryError(format!("Invalid datetime '{}': {}", s, e)))
}
fn map_slot_row(row: SlotRow) -> Result<ScheduledSlot, DomainError> {
let id = Uuid::parse_str(&row.id)
.map_err(|e| DomainError::RepositoryError(format!("Invalid slot UUID: {}", e)))?;
let source_block_id = Uuid::parse_str(&row.source_block_id)
.map_err(|e| DomainError::RepositoryError(format!("Invalid block UUID: {}", e)))?;
let item: MediaItem = serde_json::from_str(&row.item)
.map_err(|e| DomainError::RepositoryError(format!("Invalid slot item JSON: {}", e)))?;
Ok(ScheduledSlot {
id,
start_at: parse_dt(&row.start_at)?,
end_at: parse_dt(&row.end_at)?,
item,
source_block_id,
})
}
fn map_schedule(row: ScheduleRow, slot_rows: Vec<SlotRow>) -> Result<GeneratedSchedule, DomainError> {
let id = Uuid::parse_str(&row.id)
.map_err(|e| DomainError::RepositoryError(format!("Invalid schedule UUID: {}", e)))?;
let channel_id = Uuid::parse_str(&row.channel_id)
.map_err(|e| DomainError::RepositoryError(format!("Invalid channel UUID: {}", e)))?;
let slots: Result<Vec<ScheduledSlot>, _> = slot_rows.into_iter().map(map_slot_row).collect();
Ok(GeneratedSchedule {
id,
channel_id,
valid_from: parse_dt(&row.valid_from)?,
valid_until: parse_dt(&row.valid_until)?,
generation: row.generation as u32,
slots: slots?,
})
}
impl TryFrom<PlaybackRecordRow> for PlaybackRecord {
type Error = DomainError;
fn try_from(row: PlaybackRecordRow) -> Result<Self, Self::Error> {
let id = Uuid::parse_str(&row.id)
.map_err(|e| DomainError::RepositoryError(format!("Invalid UUID: {}", e)))?;
let channel_id = Uuid::parse_str(&row.channel_id)
.map_err(|e| DomainError::RepositoryError(format!("Invalid channel UUID: {}", e)))?;
Ok(PlaybackRecord {
id,
channel_id,
item_id: MediaItemId::new(row.item_id),
played_at: parse_dt(&row.played_at)?,
generation: row.generation as u32,
})
}
}
// ============================================================================
// SQLite adapter
// ============================================================================
#[cfg(feature = "sqlite")]
pub struct SqliteScheduleRepository {
pool: sqlx::SqlitePool,
}
#[cfg(feature = "sqlite")]
impl SqliteScheduleRepository {
pub fn new(pool: sqlx::SqlitePool) -> Self {
Self { pool }
}
async fn fetch_slots(&self, schedule_id: &str) -> DomainResult<Vec<SlotRow>> {
sqlx::query_as(
"SELECT id, schedule_id, start_at, end_at, item, source_block_id \
FROM scheduled_slots WHERE schedule_id = ? ORDER BY start_at",
)
.bind(schedule_id)
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))
}
}
#[cfg(feature = "sqlite")]
#[async_trait]
impl ScheduleRepository for SqliteScheduleRepository {
async fn find_active(
&self,
channel_id: ChannelId,
at: DateTime<Utc>,
) -> DomainResult<Option<GeneratedSchedule>> {
let at_str = at.to_rfc3339();
let row: Option<ScheduleRow> = sqlx::query_as(
"SELECT id, channel_id, valid_from, valid_until, generation \
FROM generated_schedules \
WHERE channel_id = ? AND valid_from <= ? AND valid_until > ? \
LIMIT 1",
)
.bind(channel_id.to_string())
.bind(&at_str)
.bind(&at_str)
.fetch_optional(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
match row {
None => Ok(None),
Some(r) => {
let slots = self.fetch_slots(&r.id).await?;
Some(map_schedule(r, slots)).transpose()
}
}
}
async fn find_latest(&self, channel_id: ChannelId) -> DomainResult<Option<GeneratedSchedule>> {
let row: Option<ScheduleRow> = sqlx::query_as(
"SELECT id, channel_id, valid_from, valid_until, generation \
FROM generated_schedules \
WHERE channel_id = ? ORDER BY valid_from DESC LIMIT 1",
)
.bind(channel_id.to_string())
.fetch_optional(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
match row {
None => Ok(None),
Some(r) => {
let slots = self.fetch_slots(&r.id).await?;
Some(map_schedule(r, slots)).transpose()
}
}
}
async fn save(&self, schedule: &GeneratedSchedule) -> DomainResult<()> {
// Upsert the schedule header
sqlx::query(
r#"
INSERT INTO generated_schedules (id, channel_id, valid_from, valid_until, generation)
VALUES (?, ?, ?, ?, ?)
ON CONFLICT(id) DO UPDATE SET
valid_from = excluded.valid_from,
valid_until = excluded.valid_until,
generation = excluded.generation
"#,
)
.bind(schedule.id.to_string())
.bind(schedule.channel_id.to_string())
.bind(schedule.valid_from.to_rfc3339())
.bind(schedule.valid_until.to_rfc3339())
.bind(schedule.generation as i64)
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
// Replace all slots (delete-then-insert is safe here; schedule saves are
// infrequent and atomic within a single-writer SQLite connection)
sqlx::query("DELETE FROM scheduled_slots WHERE schedule_id = ?")
.bind(schedule.id.to_string())
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
for slot in &schedule.slots {
let item_json = serde_json::to_string(&slot.item).map_err(|e| {
DomainError::RepositoryError(format!("Failed to serialize slot item: {}", e))
})?;
sqlx::query(
r#"
INSERT INTO scheduled_slots (id, schedule_id, start_at, end_at, item, source_block_id)
VALUES (?, ?, ?, ?, ?, ?)
"#,
)
.bind(slot.id.to_string())
.bind(schedule.id.to_string())
.bind(slot.start_at.to_rfc3339())
.bind(slot.end_at.to_rfc3339())
.bind(&item_json)
.bind(slot.source_block_id.to_string())
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
}
Ok(())
}
async fn find_playback_history(
&self,
channel_id: ChannelId,
) -> DomainResult<Vec<PlaybackRecord>> {
let rows: Vec<PlaybackRecordRow> = sqlx::query_as(
"SELECT id, channel_id, item_id, played_at, generation \
FROM playback_records WHERE channel_id = ? ORDER BY played_at DESC",
)
.bind(channel_id.to_string())
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
rows.into_iter().map(PlaybackRecord::try_from).collect()
}
async fn save_playback_record(&self, record: &PlaybackRecord) -> DomainResult<()> {
sqlx::query(
r#"
INSERT INTO playback_records (id, channel_id, item_id, played_at, generation)
VALUES (?, ?, ?, ?, ?)
ON CONFLICT(id) DO NOTHING
"#,
)
.bind(record.id.to_string())
.bind(record.channel_id.to_string())
.bind(record.item_id.as_ref())
.bind(record.played_at.to_rfc3339())
.bind(record.generation as i64)
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
Ok(())
}
}
// ============================================================================
// PostgreSQL adapter
// ============================================================================
#[cfg(feature = "postgres")]
pub struct PostgresScheduleRepository {
pool: sqlx::Pool<sqlx::Postgres>,
}
#[cfg(feature = "postgres")]
impl PostgresScheduleRepository {
pub fn new(pool: sqlx::Pool<sqlx::Postgres>) -> Self {
Self { pool }
}
async fn fetch_slots(&self, schedule_id: &str) -> DomainResult<Vec<SlotRow>> {
sqlx::query_as(
"SELECT id, schedule_id, start_at, end_at, item, source_block_id \
FROM scheduled_slots WHERE schedule_id = $1 ORDER BY start_at",
)
.bind(schedule_id)
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))
}
}
#[cfg(feature = "postgres")]
#[async_trait]
impl ScheduleRepository for PostgresScheduleRepository {
async fn find_active(
&self,
channel_id: ChannelId,
at: DateTime<Utc>,
) -> DomainResult<Option<GeneratedSchedule>> {
let at_str = at.to_rfc3339();
let row: Option<ScheduleRow> = sqlx::query_as(
"SELECT id, channel_id, valid_from, valid_until, generation \
FROM generated_schedules \
WHERE channel_id = $1 AND valid_from <= $2 AND valid_until > $3 \
LIMIT 1",
)
.bind(channel_id.to_string())
.bind(&at_str)
.bind(&at_str)
.fetch_optional(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
match row {
None => Ok(None),
Some(r) => {
let slots = self.fetch_slots(&r.id).await?;
Some(map_schedule(r, slots)).transpose()
}
}
}
async fn find_latest(&self, channel_id: ChannelId) -> DomainResult<Option<GeneratedSchedule>> {
let row: Option<ScheduleRow> = sqlx::query_as(
"SELECT id, channel_id, valid_from, valid_until, generation \
FROM generated_schedules \
WHERE channel_id = $1 ORDER BY valid_from DESC LIMIT 1",
)
.bind(channel_id.to_string())
.fetch_optional(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
match row {
None => Ok(None),
Some(r) => {
let slots = self.fetch_slots(&r.id).await?;
Some(map_schedule(r, slots)).transpose()
}
}
}
async fn save(&self, schedule: &GeneratedSchedule) -> DomainResult<()> {
sqlx::query(
r#"
INSERT INTO generated_schedules (id, channel_id, valid_from, valid_until, generation)
VALUES ($1, $2, $3, $4, $5)
ON CONFLICT(id) DO UPDATE SET
valid_from = EXCLUDED.valid_from,
valid_until = EXCLUDED.valid_until,
generation = EXCLUDED.generation
"#,
)
.bind(schedule.id.to_string())
.bind(schedule.channel_id.to_string())
.bind(schedule.valid_from.to_rfc3339())
.bind(schedule.valid_until.to_rfc3339())
.bind(schedule.generation as i64)
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
sqlx::query("DELETE FROM scheduled_slots WHERE schedule_id = $1")
.bind(schedule.id.to_string())
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
for slot in &schedule.slots {
let item_json = serde_json::to_string(&slot.item).map_err(|e| {
DomainError::RepositoryError(format!("Failed to serialize slot item: {}", e))
})?;
sqlx::query(
r#"
INSERT INTO scheduled_slots (id, schedule_id, start_at, end_at, item, source_block_id)
VALUES ($1, $2, $3, $4, $5, $6)
"#,
)
.bind(slot.id.to_string())
.bind(schedule.id.to_string())
.bind(slot.start_at.to_rfc3339())
.bind(slot.end_at.to_rfc3339())
.bind(&item_json)
.bind(slot.source_block_id.to_string())
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
}
Ok(())
}
async fn find_playback_history(
&self,
channel_id: ChannelId,
) -> DomainResult<Vec<PlaybackRecord>> {
let rows: Vec<PlaybackRecordRow> = sqlx::query_as(
"SELECT id, channel_id, item_id, played_at, generation \
FROM playback_records WHERE channel_id = $1 ORDER BY played_at DESC",
)
.bind(channel_id.to_string())
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
rows.into_iter().map(PlaybackRecord::try_from).collect()
}
async fn save_playback_record(&self, record: &PlaybackRecord) -> DomainResult<()> {
sqlx::query(
r#"
INSERT INTO playback_records (id, channel_id, item_id, played_at, generation)
VALUES ($1, $2, $3, $4, $5)
ON CONFLICT(id) DO NOTHING
"#,
)
.bind(record.id.to_string())
.bind(record.channel_id.to_string())
.bind(record.item_id.as_ref())
.bind(record.played_at.to_rfc3339())
.bind(record.generation as i64)
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
Ok(())
}
}

View File

@@ -0,0 +1,109 @@
use chrono::{DateTime, Utc};
use sqlx::FromRow;
use uuid::Uuid;
use domain::{DomainError, GeneratedSchedule, MediaItem, MediaItemId, PlaybackRecord, ScheduledSlot};
// ============================================================================
// Row types
// ============================================================================
#[derive(Debug, FromRow)]
pub(super) struct ScheduleRow {
pub id: String,
pub channel_id: String,
pub valid_from: String,
pub valid_until: String,
pub generation: i64,
}
#[derive(Debug, FromRow)]
pub(super) struct SlotRow {
pub id: String,
// schedule_id selected but only used to drive the JOIN; not needed for domain type
#[allow(dead_code)]
pub schedule_id: String,
pub start_at: String,
pub end_at: String,
pub item: String,
pub source_block_id: String,
}
#[derive(Debug, FromRow)]
pub(super) struct PlaybackRecordRow {
pub id: String,
pub channel_id: String,
pub item_id: String,
pub played_at: String,
pub generation: i64,
}
// ============================================================================
// Mapping
// ============================================================================
pub(super) fn parse_dt(s: &str) -> Result<DateTime<Utc>, DomainError> {
DateTime::parse_from_rfc3339(s)
.map(|dt| dt.with_timezone(&Utc))
.or_else(|_| {
chrono::NaiveDateTime::parse_from_str(s, "%Y-%m-%d %H:%M:%S").map(|dt| dt.and_utc())
})
.map_err(|e| DomainError::RepositoryError(format!("Invalid datetime '{}': {}", s, e)))
}
pub(super) fn map_slot_row(row: SlotRow) -> Result<ScheduledSlot, DomainError> {
let id = Uuid::parse_str(&row.id)
.map_err(|e| DomainError::RepositoryError(format!("Invalid slot UUID: {}", e)))?;
let source_block_id = Uuid::parse_str(&row.source_block_id)
.map_err(|e| DomainError::RepositoryError(format!("Invalid block UUID: {}", e)))?;
let item: MediaItem = serde_json::from_str(&row.item)
.map_err(|e| DomainError::RepositoryError(format!("Invalid slot item JSON: {}", e)))?;
Ok(ScheduledSlot {
id,
start_at: parse_dt(&row.start_at)?,
end_at: parse_dt(&row.end_at)?,
item,
source_block_id,
})
}
pub(super) fn map_schedule(
row: ScheduleRow,
slot_rows: Vec<SlotRow>,
) -> Result<GeneratedSchedule, DomainError> {
let id = Uuid::parse_str(&row.id)
.map_err(|e| DomainError::RepositoryError(format!("Invalid schedule UUID: {}", e)))?;
let channel_id = Uuid::parse_str(&row.channel_id)
.map_err(|e| DomainError::RepositoryError(format!("Invalid channel UUID: {}", e)))?;
let slots: Result<Vec<ScheduledSlot>, _> = slot_rows.into_iter().map(map_slot_row).collect();
Ok(GeneratedSchedule {
id,
channel_id,
valid_from: parse_dt(&row.valid_from)?,
valid_until: parse_dt(&row.valid_until)?,
generation: row.generation as u32,
slots: slots?,
})
}
impl TryFrom<PlaybackRecordRow> for PlaybackRecord {
type Error = DomainError;
fn try_from(row: PlaybackRecordRow) -> Result<Self, Self::Error> {
let id = Uuid::parse_str(&row.id)
.map_err(|e| DomainError::RepositoryError(format!("Invalid UUID: {}", e)))?;
let channel_id = Uuid::parse_str(&row.channel_id)
.map_err(|e| DomainError::RepositoryError(format!("Invalid channel UUID: {}", e)))?;
Ok(PlaybackRecord {
id,
channel_id,
item_id: MediaItemId::new(row.item_id),
played_at: parse_dt(&row.played_at)?,
generation: row.generation as u32,
})
}
}

View File

@@ -0,0 +1,13 @@
//! SQLite and PostgreSQL adapters for ScheduleRepository
mod mapping;
#[cfg(feature = "sqlite")]
mod sqlite;
#[cfg(feature = "postgres")]
mod postgres;
#[cfg(feature = "sqlite")]
pub use sqlite::SqliteScheduleRepository;
#[cfg(feature = "postgres")]
pub use postgres::PostgresScheduleRepository;

View File

@@ -0,0 +1,165 @@
use async_trait::async_trait;
use chrono::{DateTime, Utc};
use domain::{ChannelId, DomainError, DomainResult, GeneratedSchedule, PlaybackRecord, ScheduleRepository};
use super::mapping::{map_schedule, PlaybackRecordRow, ScheduleRow, SlotRow};
pub struct PostgresScheduleRepository {
pool: sqlx::Pool<sqlx::Postgres>,
}
impl PostgresScheduleRepository {
pub fn new(pool: sqlx::Pool<sqlx::Postgres>) -> Self {
Self { pool }
}
async fn fetch_slots(&self, schedule_id: &str) -> DomainResult<Vec<SlotRow>> {
sqlx::query_as(
"SELECT id, schedule_id, start_at, end_at, item, source_block_id \
FROM scheduled_slots WHERE schedule_id = $1 ORDER BY start_at",
)
.bind(schedule_id)
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))
}
}
#[async_trait]
impl ScheduleRepository for PostgresScheduleRepository {
async fn find_active(
&self,
channel_id: ChannelId,
at: DateTime<Utc>,
) -> DomainResult<Option<GeneratedSchedule>> {
let at_str = at.to_rfc3339();
let row: Option<ScheduleRow> = sqlx::query_as(
"SELECT id, channel_id, valid_from, valid_until, generation \
FROM generated_schedules \
WHERE channel_id = $1 AND valid_from <= $2 AND valid_until > $3 \
LIMIT 1",
)
.bind(channel_id.to_string())
.bind(&at_str)
.bind(&at_str)
.fetch_optional(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
match row {
None => Ok(None),
Some(r) => {
let slots = self.fetch_slots(&r.id).await?;
Some(map_schedule(r, slots)).transpose()
}
}
}
async fn find_latest(&self, channel_id: ChannelId) -> DomainResult<Option<GeneratedSchedule>> {
let row: Option<ScheduleRow> = sqlx::query_as(
"SELECT id, channel_id, valid_from, valid_until, generation \
FROM generated_schedules \
WHERE channel_id = $1 ORDER BY valid_from DESC LIMIT 1",
)
.bind(channel_id.to_string())
.fetch_optional(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
match row {
None => Ok(None),
Some(r) => {
let slots = self.fetch_slots(&r.id).await?;
Some(map_schedule(r, slots)).transpose()
}
}
}
async fn save(&self, schedule: &GeneratedSchedule) -> DomainResult<()> {
sqlx::query(
r#"
INSERT INTO generated_schedules (id, channel_id, valid_from, valid_until, generation)
VALUES ($1, $2, $3, $4, $5)
ON CONFLICT(id) DO UPDATE SET
valid_from = EXCLUDED.valid_from,
valid_until = EXCLUDED.valid_until,
generation = EXCLUDED.generation
"#,
)
.bind(schedule.id.to_string())
.bind(schedule.channel_id.to_string())
.bind(schedule.valid_from.to_rfc3339())
.bind(schedule.valid_until.to_rfc3339())
.bind(schedule.generation as i64)
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
sqlx::query("DELETE FROM scheduled_slots WHERE schedule_id = $1")
.bind(schedule.id.to_string())
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
for slot in &schedule.slots {
let item_json = serde_json::to_string(&slot.item).map_err(|e| {
DomainError::RepositoryError(format!("Failed to serialize slot item: {}", e))
})?;
sqlx::query(
r#"
INSERT INTO scheduled_slots (id, schedule_id, start_at, end_at, item, source_block_id)
VALUES ($1, $2, $3, $4, $5, $6)
"#,
)
.bind(slot.id.to_string())
.bind(schedule.id.to_string())
.bind(slot.start_at.to_rfc3339())
.bind(slot.end_at.to_rfc3339())
.bind(&item_json)
.bind(slot.source_block_id.to_string())
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
}
Ok(())
}
async fn find_playback_history(
&self,
channel_id: ChannelId,
) -> DomainResult<Vec<PlaybackRecord>> {
let rows: Vec<PlaybackRecordRow> = sqlx::query_as(
"SELECT id, channel_id, item_id, played_at, generation \
FROM playback_records WHERE channel_id = $1 ORDER BY played_at DESC",
)
.bind(channel_id.to_string())
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
rows.into_iter().map(PlaybackRecord::try_from).collect()
}
async fn save_playback_record(&self, record: &PlaybackRecord) -> DomainResult<()> {
sqlx::query(
r#"
INSERT INTO playback_records (id, channel_id, item_id, played_at, generation)
VALUES ($1, $2, $3, $4, $5)
ON CONFLICT(id) DO NOTHING
"#,
)
.bind(record.id.to_string())
.bind(record.channel_id.to_string())
.bind(record.item_id.as_ref())
.bind(record.played_at.to_rfc3339())
.bind(record.generation as i64)
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
Ok(())
}
}

View File

@@ -0,0 +1,168 @@
use async_trait::async_trait;
use chrono::{DateTime, Utc};
use domain::{ChannelId, DomainError, DomainResult, GeneratedSchedule, PlaybackRecord, ScheduleRepository};
use super::mapping::{map_schedule, PlaybackRecordRow, ScheduleRow, SlotRow};
pub struct SqliteScheduleRepository {
pool: sqlx::SqlitePool,
}
impl SqliteScheduleRepository {
pub fn new(pool: sqlx::SqlitePool) -> Self {
Self { pool }
}
async fn fetch_slots(&self, schedule_id: &str) -> DomainResult<Vec<SlotRow>> {
sqlx::query_as(
"SELECT id, schedule_id, start_at, end_at, item, source_block_id \
FROM scheduled_slots WHERE schedule_id = ? ORDER BY start_at",
)
.bind(schedule_id)
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))
}
}
#[async_trait]
impl ScheduleRepository for SqliteScheduleRepository {
async fn find_active(
&self,
channel_id: ChannelId,
at: DateTime<Utc>,
) -> DomainResult<Option<GeneratedSchedule>> {
let at_str = at.to_rfc3339();
let row: Option<ScheduleRow> = sqlx::query_as(
"SELECT id, channel_id, valid_from, valid_until, generation \
FROM generated_schedules \
WHERE channel_id = ? AND valid_from <= ? AND valid_until > ? \
LIMIT 1",
)
.bind(channel_id.to_string())
.bind(&at_str)
.bind(&at_str)
.fetch_optional(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
match row {
None => Ok(None),
Some(r) => {
let slots = self.fetch_slots(&r.id).await?;
Some(map_schedule(r, slots)).transpose()
}
}
}
async fn find_latest(&self, channel_id: ChannelId) -> DomainResult<Option<GeneratedSchedule>> {
let row: Option<ScheduleRow> = sqlx::query_as(
"SELECT id, channel_id, valid_from, valid_until, generation \
FROM generated_schedules \
WHERE channel_id = ? ORDER BY valid_from DESC LIMIT 1",
)
.bind(channel_id.to_string())
.fetch_optional(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
match row {
None => Ok(None),
Some(r) => {
let slots = self.fetch_slots(&r.id).await?;
Some(map_schedule(r, slots)).transpose()
}
}
}
async fn save(&self, schedule: &GeneratedSchedule) -> DomainResult<()> {
// Upsert the schedule header
sqlx::query(
r#"
INSERT INTO generated_schedules (id, channel_id, valid_from, valid_until, generation)
VALUES (?, ?, ?, ?, ?)
ON CONFLICT(id) DO UPDATE SET
valid_from = excluded.valid_from,
valid_until = excluded.valid_until,
generation = excluded.generation
"#,
)
.bind(schedule.id.to_string())
.bind(schedule.channel_id.to_string())
.bind(schedule.valid_from.to_rfc3339())
.bind(schedule.valid_until.to_rfc3339())
.bind(schedule.generation as i64)
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
// Replace all slots (delete-then-insert is safe here; schedule saves are
// infrequent and atomic within a single-writer SQLite connection)
sqlx::query("DELETE FROM scheduled_slots WHERE schedule_id = ?")
.bind(schedule.id.to_string())
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
for slot in &schedule.slots {
let item_json = serde_json::to_string(&slot.item).map_err(|e| {
DomainError::RepositoryError(format!("Failed to serialize slot item: {}", e))
})?;
sqlx::query(
r#"
INSERT INTO scheduled_slots (id, schedule_id, start_at, end_at, item, source_block_id)
VALUES (?, ?, ?, ?, ?, ?)
"#,
)
.bind(slot.id.to_string())
.bind(schedule.id.to_string())
.bind(slot.start_at.to_rfc3339())
.bind(slot.end_at.to_rfc3339())
.bind(&item_json)
.bind(slot.source_block_id.to_string())
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
}
Ok(())
}
async fn find_playback_history(
&self,
channel_id: ChannelId,
) -> DomainResult<Vec<PlaybackRecord>> {
let rows: Vec<PlaybackRecordRow> = sqlx::query_as(
"SELECT id, channel_id, item_id, played_at, generation \
FROM playback_records WHERE channel_id = ? ORDER BY played_at DESC",
)
.bind(channel_id.to_string())
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
rows.into_iter().map(PlaybackRecord::try_from).collect()
}
async fn save_playback_record(&self, record: &PlaybackRecord) -> DomainResult<()> {
sqlx::query(
r#"
INSERT INTO playback_records (id, channel_id, item_id, played_at, generation)
VALUES (?, ?, ?, ?, ?)
ON CONFLICT(id) DO NOTHING
"#,
)
.bind(record.id.to_string())
.bind(record.channel_id.to_string())
.bind(record.item_id.as_ref())
.bind(record.played_at.to_rfc3339())
.bind(record.generation as i64)
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
Ok(())
}
}

View File

@@ -0,0 +1,42 @@
use chrono::{DateTime, Utc};
use sqlx::FromRow;
use uuid::Uuid;
use domain::{DomainError, Email, User};
#[derive(Debug, FromRow)]
pub(super) struct UserRow {
pub id: String,
pub subject: String,
pub email: String,
pub password_hash: Option<String>,
pub created_at: String,
}
impl TryFrom<UserRow> for User {
type Error = DomainError;
fn try_from(row: UserRow) -> Result<Self, Self::Error> {
let id = Uuid::parse_str(&row.id)
.map_err(|e| DomainError::RepositoryError(format!("Invalid UUID: {}", e)))?;
let created_at = DateTime::parse_from_rfc3339(&row.created_at)
.map(|dt| dt.with_timezone(&Utc))
.or_else(|_| {
// Fallback for SQLite datetime format
chrono::NaiveDateTime::parse_from_str(&row.created_at, "%Y-%m-%d %H:%M:%S")
.map(|dt| dt.and_utc())
})
.map_err(|e| DomainError::RepositoryError(format!("Invalid datetime: {}", e)))?;
let email = Email::try_from(row.email)
.map_err(|e| DomainError::RepositoryError(format!("Invalid email in DB: {}", e)))?;
Ok(User::with_id(
id,
row.subject,
email,
row.password_hash,
created_at,
))
}
}

View File

@@ -0,0 +1,13 @@
//! SQLite and PostgreSQL implementations of UserRepository
mod mapping;
#[cfg(feature = "sqlite")]
mod sqlite;
#[cfg(feature = "postgres")]
mod postgres;
#[cfg(feature = "sqlite")]
pub use sqlite::SqliteUserRepository;
#[cfg(feature = "postgres")]
pub use postgres::PostgresUserRepository;

View File

@@ -0,0 +1,102 @@
use async_trait::async_trait;
use uuid::Uuid;
use domain::{DomainError, DomainResult, User, UserRepository};
use super::mapping::UserRow;
/// PostgreSQL adapter for UserRepository
#[derive(Clone)]
pub struct PostgresUserRepository {
pool: sqlx::Pool<sqlx::Postgres>,
}
impl PostgresUserRepository {
pub fn new(pool: sqlx::Pool<sqlx::Postgres>) -> Self {
Self { pool }
}
}
#[async_trait]
impl UserRepository for PostgresUserRepository {
async fn find_by_id(&self, id: Uuid) -> DomainResult<Option<User>> {
let id_str = id.to_string();
let row: Option<UserRow> = sqlx::query_as(
"SELECT id, subject, email, password_hash, created_at FROM users WHERE id = $1",
)
.bind(&id_str)
.fetch_optional(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
row.map(User::try_from).transpose()
}
async fn find_by_subject(&self, subject: &str) -> DomainResult<Option<User>> {
let row: Option<UserRow> = sqlx::query_as(
"SELECT id, subject, email, password_hash, created_at FROM users WHERE subject = $1",
)
.bind(subject)
.fetch_optional(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
row.map(User::try_from).transpose()
}
async fn find_by_email(&self, email: &str) -> DomainResult<Option<User>> {
let row: Option<UserRow> = sqlx::query_as(
"SELECT id, subject, email, password_hash, created_at FROM users WHERE email = $1",
)
.bind(email)
.fetch_optional(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
row.map(User::try_from).transpose()
}
async fn save(&self, user: &User) -> DomainResult<()> {
let id = user.id.to_string();
let created_at = user.created_at.to_rfc3339();
sqlx::query(
r#"
INSERT INTO users (id, subject, email, password_hash, created_at)
VALUES ($1, $2, $3, $4, $5)
ON CONFLICT(id) DO UPDATE SET
subject = excluded.subject,
email = excluded.email,
password_hash = excluded.password_hash
"#,
)
.bind(&id)
.bind(&user.subject)
.bind(user.email.as_ref())
.bind(&user.password_hash)
.bind(&created_at)
.execute(&self.pool)
.await
.map_err(|e| {
let msg = e.to_string();
if msg.contains("unique constraint") || msg.contains("duplicate key") {
DomainError::UserAlreadyExists(user.email.as_ref().to_string())
} else {
DomainError::RepositoryError(msg)
}
})?;
Ok(())
}
async fn delete(&self, id: Uuid) -> DomainResult<()> {
let id_str = id.to_string();
sqlx::query("DELETE FROM users WHERE id = $1")
.bind(&id_str)
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
Ok(())
}
}

View File

@@ -1,65 +1,22 @@
//! SQLite and PostgreSQL implementations of UserRepository
use async_trait::async_trait;
use chrono::{DateTime, Utc};
use sqlx::FromRow;
use uuid::Uuid;
use domain::{DomainError, DomainResult, Email, User, UserRepository};
use domain::{DomainError, DomainResult, User, UserRepository};
/// Row type for database query results (shared between SQLite and PostgreSQL)
#[derive(Debug, FromRow)]
struct UserRow {
id: String,
subject: String,
email: String,
password_hash: Option<String>,
created_at: String,
}
impl TryFrom<UserRow> for User {
type Error = DomainError;
fn try_from(row: UserRow) -> Result<Self, Self::Error> {
let id = Uuid::parse_str(&row.id)
.map_err(|e| DomainError::RepositoryError(format!("Invalid UUID: {}", e)))?;
let created_at = DateTime::parse_from_rfc3339(&row.created_at)
.map(|dt| dt.with_timezone(&Utc))
.or_else(|_| {
// Fallback for SQLite datetime format
chrono::NaiveDateTime::parse_from_str(&row.created_at, "%Y-%m-%d %H:%M:%S")
.map(|dt| dt.and_utc())
})
.map_err(|e| DomainError::RepositoryError(format!("Invalid datetime: {}", e)))?;
let email = Email::try_from(row.email)
.map_err(|e| DomainError::RepositoryError(format!("Invalid email in DB: {}", e)))?;
Ok(User::with_id(
id,
row.subject,
email,
row.password_hash,
created_at,
))
}
}
use super::mapping::UserRow;
/// SQLite adapter for UserRepository
#[cfg(feature = "sqlite")]
#[derive(Clone)]
pub struct SqliteUserRepository {
pool: sqlx::SqlitePool,
}
#[cfg(feature = "sqlite")]
impl SqliteUserRepository {
pub fn new(pool: sqlx::SqlitePool) -> Self {
Self { pool }
}
}
#[cfg(feature = "sqlite")]
#[async_trait]
impl UserRepository for SqliteUserRepository {
async fn find_by_id(&self, id: Uuid) -> DomainResult<Option<User>> {
@@ -145,9 +102,10 @@ impl UserRepository for SqliteUserRepository {
}
}
#[cfg(all(test, feature = "sqlite"))]
#[cfg(test)]
mod tests {
use super::*;
use domain::Email;
use crate::db::run_migrations;
use k_core::db::{DatabaseConfig, DatabasePool, connect};
@@ -223,102 +181,3 @@ mod tests {
assert!(found.is_none());
}
}
/// PostgreSQL adapter for UserRepository
#[cfg(feature = "postgres")]
#[derive(Clone)]
pub struct PostgresUserRepository {
pool: sqlx::Pool<sqlx::Postgres>,
}
#[cfg(feature = "postgres")]
impl PostgresUserRepository {
pub fn new(pool: sqlx::Pool<sqlx::Postgres>) -> Self {
Self { pool }
}
}
#[cfg(feature = "postgres")]
#[async_trait]
impl UserRepository for PostgresUserRepository {
async fn find_by_id(&self, id: Uuid) -> DomainResult<Option<User>> {
let id_str = id.to_string();
let row: Option<UserRow> = sqlx::query_as(
"SELECT id, subject, email, password_hash, created_at FROM users WHERE id = $1",
)
.bind(&id_str)
.fetch_optional(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
row.map(User::try_from).transpose()
}
async fn find_by_subject(&self, subject: &str) -> DomainResult<Option<User>> {
let row: Option<UserRow> = sqlx::query_as(
"SELECT id, subject, email, password_hash, created_at FROM users WHERE subject = $1",
)
.bind(subject)
.fetch_optional(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
row.map(User::try_from).transpose()
}
async fn find_by_email(&self, email: &str) -> DomainResult<Option<User>> {
let row: Option<UserRow> = sqlx::query_as(
"SELECT id, subject, email, password_hash, created_at FROM users WHERE email = $1",
)
.bind(email)
.fetch_optional(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
row.map(User::try_from).transpose()
}
async fn save(&self, user: &User) -> DomainResult<()> {
let id = user.id.to_string();
let created_at = user.created_at.to_rfc3339();
sqlx::query(
r#"
INSERT INTO users (id, subject, email, password_hash, created_at)
VALUES ($1, $2, $3, $4, $5)
ON CONFLICT(id) DO UPDATE SET
subject = excluded.subject,
email = excluded.email,
password_hash = excluded.password_hash
"#,
)
.bind(&id)
.bind(&user.subject)
.bind(user.email.as_ref())
.bind(&user.password_hash)
.bind(&created_at)
.execute(&self.pool)
.await
.map_err(|e| {
let msg = e.to_string();
if msg.contains("unique constraint") || msg.contains("duplicate key") {
DomainError::UserAlreadyExists(user.email.as_ref().to_string())
} else {
DomainError::RepositoryError(msg)
}
})?;
Ok(())
}
async fn delete(&self, id: Uuid) -> DomainResult<()> {
let id_str = id.to_string();
sqlx::query("DELETE FROM users WHERE id = $1")
.bind(&id_str)
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
Ok(())
}
}