Files
k-tv/k-tv-backend/infra/src/schedule_repository/sqlite.rs
Gabriel Kaszewski eeb4e2cb41 Refactor schedule and user repositories into modular structure
- Moved schedule repository logic into separate modules for SQLite and PostgreSQL implementations.
- Created a mapping module for shared data structures and mapping functions in the schedule repository.
- Added new mapping module for user repository to handle user data transformations.
- Implemented PostgreSQL and SQLite user repository adapters with necessary CRUD operations.
- Added tests for user repository functionality, including saving, finding, and deleting users.
2026-03-13 01:35:14 +01:00

169 lines
5.7 KiB
Rust

use async_trait::async_trait;
use chrono::{DateTime, Utc};
use domain::{ChannelId, DomainError, DomainResult, GeneratedSchedule, PlaybackRecord, ScheduleRepository};
use super::mapping::{map_schedule, PlaybackRecordRow, ScheduleRow, SlotRow};
pub struct SqliteScheduleRepository {
pool: sqlx::SqlitePool,
}
impl SqliteScheduleRepository {
pub fn new(pool: sqlx::SqlitePool) -> Self {
Self { pool }
}
async fn fetch_slots(&self, schedule_id: &str) -> DomainResult<Vec<SlotRow>> {
sqlx::query_as(
"SELECT id, schedule_id, start_at, end_at, item, source_block_id \
FROM scheduled_slots WHERE schedule_id = ? ORDER BY start_at",
)
.bind(schedule_id)
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))
}
}
#[async_trait]
impl ScheduleRepository for SqliteScheduleRepository {
async fn find_active(
&self,
channel_id: ChannelId,
at: DateTime<Utc>,
) -> DomainResult<Option<GeneratedSchedule>> {
let at_str = at.to_rfc3339();
let row: Option<ScheduleRow> = sqlx::query_as(
"SELECT id, channel_id, valid_from, valid_until, generation \
FROM generated_schedules \
WHERE channel_id = ? AND valid_from <= ? AND valid_until > ? \
LIMIT 1",
)
.bind(channel_id.to_string())
.bind(&at_str)
.bind(&at_str)
.fetch_optional(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
match row {
None => Ok(None),
Some(r) => {
let slots = self.fetch_slots(&r.id).await?;
Some(map_schedule(r, slots)).transpose()
}
}
}
async fn find_latest(&self, channel_id: ChannelId) -> DomainResult<Option<GeneratedSchedule>> {
let row: Option<ScheduleRow> = sqlx::query_as(
"SELECT id, channel_id, valid_from, valid_until, generation \
FROM generated_schedules \
WHERE channel_id = ? ORDER BY valid_from DESC LIMIT 1",
)
.bind(channel_id.to_string())
.fetch_optional(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
match row {
None => Ok(None),
Some(r) => {
let slots = self.fetch_slots(&r.id).await?;
Some(map_schedule(r, slots)).transpose()
}
}
}
async fn save(&self, schedule: &GeneratedSchedule) -> DomainResult<()> {
// Upsert the schedule header
sqlx::query(
r#"
INSERT INTO generated_schedules (id, channel_id, valid_from, valid_until, generation)
VALUES (?, ?, ?, ?, ?)
ON CONFLICT(id) DO UPDATE SET
valid_from = excluded.valid_from,
valid_until = excluded.valid_until,
generation = excluded.generation
"#,
)
.bind(schedule.id.to_string())
.bind(schedule.channel_id.to_string())
.bind(schedule.valid_from.to_rfc3339())
.bind(schedule.valid_until.to_rfc3339())
.bind(schedule.generation as i64)
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
// Replace all slots (delete-then-insert is safe here; schedule saves are
// infrequent and atomic within a single-writer SQLite connection)
sqlx::query("DELETE FROM scheduled_slots WHERE schedule_id = ?")
.bind(schedule.id.to_string())
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
for slot in &schedule.slots {
let item_json = serde_json::to_string(&slot.item).map_err(|e| {
DomainError::RepositoryError(format!("Failed to serialize slot item: {}", e))
})?;
sqlx::query(
r#"
INSERT INTO scheduled_slots (id, schedule_id, start_at, end_at, item, source_block_id)
VALUES (?, ?, ?, ?, ?, ?)
"#,
)
.bind(slot.id.to_string())
.bind(schedule.id.to_string())
.bind(slot.start_at.to_rfc3339())
.bind(slot.end_at.to_rfc3339())
.bind(&item_json)
.bind(slot.source_block_id.to_string())
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
}
Ok(())
}
async fn find_playback_history(
&self,
channel_id: ChannelId,
) -> DomainResult<Vec<PlaybackRecord>> {
let rows: Vec<PlaybackRecordRow> = sqlx::query_as(
"SELECT id, channel_id, item_id, played_at, generation \
FROM playback_records WHERE channel_id = ? ORDER BY played_at DESC",
)
.bind(channel_id.to_string())
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
rows.into_iter().map(PlaybackRecord::try_from).collect()
}
async fn save_playback_record(&self, record: &PlaybackRecord) -> DomainResult<()> {
sqlx::query(
r#"
INSERT INTO playback_records (id, channel_id, item_id, played_at, generation)
VALUES (?, ?, ?, ?, ?)
ON CONFLICT(id) DO NOTHING
"#,
)
.bind(record.id.to_string())
.bind(record.channel_id.to_string())
.bind(record.item_id.as_ref())
.bind(record.played_at.to_rfc3339())
.bind(record.generation as i64)
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
Ok(())
}
}