Compare commits

..

97 Commits

Author SHA1 Message Date
e3a65d8052 fix: use StdRng for shuffling indices in fill_block function 2026-03-20 23:12:44 +01:00
f45ca77b79 fix: format code for improved readability and consistency 2026-03-20 01:57:22 +01:00
a5c31ef8a9 fix(frontend): restore plain type labels in grouped sidebar 2026-03-20 01:56:11 +01:00
3662a5ab9e fix(frontend): suppress shows when type filter active; clarify grouped type labels 2026-03-20 01:53:13 +01:00
137251fe37 fix(frontend): restore ALL sentinel in sidebar to fix hydration mismatch 2026-03-20 01:33:27 +01:00
8101734c63 feat(frontend): add useLibraryShows and useLibrarySeasons hooks 2026-03-20 01:29:55 +01:00
6cf8a6d5e3 feat(frontend): implement grouped/drilldown view in library grid 2026-03-20 01:23:33 +01:00
c5317cb639 feat(frontend): add viewMode/drilldown state to library page 2026-03-20 01:21:54 +01:00
5f66493558 feat(api): add /library/shows and /library/shows/:name/seasons routes + season filter 2026-03-20 01:19:31 +01:00
5cc4cde223 feat(frontend): add ShowTile, SeasonTile, BreadcrumbNav components 2026-03-20 01:19:08 +01:00
5b89481104 feat(frontend): extend schedule dialog to support show/series selection 2026-03-20 01:19:00 +01:00
33338ac100 feat(frontend): make library sidebar drilldown-aware 2026-03-20 01:18:52 +01:00
66eef2c82e feat(frontend): add useLibraryShows and useLibrarySeasons hooks 2026-03-20 01:18:34 +01:00
6f1a4e19d3 feat(infra): implement list_shows, list_seasons + season_number filter 2026-03-20 01:16:02 +01:00
dd69470ee4 feat(frontend): add ShowSummary, SeasonSummary types + library shows/seasons API methods 2026-03-20 01:14:43 +01:00
23722a771b feat(domain): add ShowSummary, SeasonSummary types + ILibraryRepository methods 2026-03-20 01:13:00 +01:00
4cf7fdc1c2 feat(frontend): add library sync interval + sync now to admin settings panel 2026-03-20 00:38:04 +01:00
91271bd83c feat(frontend): library page, components, and schedule/add-to-block dialogs (tasks 11-14) 2026-03-20 00:35:40 +01:00
49c7f7abd7 feat(frontend): add useLibrarySearch, useLibrarySyncStatus, useTriggerSync, useAdminSettings hooks 2026-03-20 00:30:44 +01:00
978ad1cdb0 feat(frontend): add library paged types, syncStatus/triggerSync/admin API methods 2026-03-20 00:30:03 +01:00
e1a885dcc9 fix(api): mount admin settings routes at /admin/settings (not /admin/library/settings) 2026-03-20 00:28:24 +01:00
e849548e9e feat(api): replace live-provider library routes with DB-backed routes; add sync + admin settings endpoints 2026-03-20 00:27:06 +01:00
d92d629fbc feat(api): wire library_repo, app_settings_repo, library_sync_adapter into AppState; start scheduler 2026-03-20 00:23:25 +01:00
aa5e3c28aa feat(api): add library sync background task 2026-03-20 00:23:22 +01:00
64138b07e4 feat(infra): add FullSyncAdapter for library sync 2026-03-20 00:19:45 +01:00
6732576d06 feat(infra): add SqliteAppSettingsRepository 2026-03-20 00:17:05 +01:00
a3a421c0ac feat(infra): add SqliteLibraryRepository 2026-03-20 00:15:01 +01:00
c6c93766c7 refactor(domain): remove redundant IAppSettingsRepository re-export; add TODO for Jellyfin enrichment 2026-03-20 00:11:30 +01:00
e101b44fa5 feat(domain): add library types, LibrarySyncAdapter, ILibraryRepository, IAppSettingsRepository; extend MediaItem with thumbnail_url and collection_id 2026-03-20 00:08:10 +01:00
666b1f2753 feat(db): add missing indexes to library migrations 2026-03-20 00:03:27 +01:00
a7c3f1f92e feat(db): add library_items, library_sync_log, app_settings migrations 2026-03-20 00:01:34 +01:00
187cd064fb docs: add library management implementation plan 2026-03-19 23:57:05 +01:00
4cc0e155bd docs: add library management design spec 2026-03-19 23:43:37 +01:00
175d0bb0bb fix(tests): add missing refresh_expiry_days param to JwtConfig::new in tests 2026-03-19 23:03:36 +01:00
311fdd4006 feat: multi-instance provider support
- provider_configs: add id TEXT PK; migrate existing rows (provider_type becomes id)
- local_files_index: add provider_id column + index; scope all queries per instance
- ProviderConfigRow: add id field; add get_by_id to trait
- LocalIndex:🆕 add provider_id param; all SQL scoped by provider_id
- factory: thread provider_id through build_local_files_bundle
- AppState.local_index: Option<Arc<LocalIndex>> → HashMap<String, Arc<LocalIndex>>
- admin_providers: restructured routes (POST /admin/providers create, PUT/DELETE /{id}, POST /test)
- admin_providers: use row.id as registry key for jellyfin and local_files
- files.rescan: optional ?provider=<id> query param
- frontend: add id to ProviderConfig, update api/hooks, new multi-instance panel UX
2026-03-19 22:54:41 +01:00
373e1c7c0a fix: remove default-run entry from Cargo.toml 2026-03-19 22:34:09 +01:00
d2412da057 feat(auth): refresh tokens + remember me
Backend: add refresh JWT (30d, token_type claim), POST /auth/refresh
endpoint (rotates token pair), remember_me on login, JWT_REFRESH_EXPIRY_DAYS
env var. Extractors now reject refresh tokens on protected routes.

Frontend: sessionStorage for non-remembered sessions, localStorage +
refresh token for remembered sessions. Transparent 401 recovery in
api.ts (retry once after refresh). Remember me checkbox on login page
with security note when checked.
2026-03-19 22:24:26 +01:00
8bdd5e2277 fix(infra): deserialize channel schedule_config via ScheduleConfigCompat for V1 compat 2026-03-17 14:56:09 +01:00
26343b08f8 fix: test mocks for new trait methods, V1 schedule_config re-import, stale comments 2026-03-17 14:53:23 +01:00
6d350940b9 feat(frontend): schedule history dialog with rollback, wire ConfigHistorySheet 2026-03-17 14:48:39 +01:00
ba6abad602 feat(frontend): weekly grid editor with day tabs and copy shortcut 2026-03-17 14:46:34 +01:00
c0da075f03 feat(frontend): config history sheet with pin and restore 2026-03-17 14:45:00 +01:00
6bfb148e39 feat(frontend): config history and schedule rollback hooks 2026-03-17 14:43:12 +01:00
45c05b5720 fix: snapshot existing config before update; rollback returns 200 2026-03-17 14:41:57 +01:00
bd498b9bcb feat(frontend): ScheduleConfig V2 types, weekday schema, export update 2026-03-17 14:39:19 +01:00
20e80ac28e feat: config history — auto-snapshot on update, list/pin/restore endpoints 2026-03-17 14:39:09 +01:00
ad3a73f061 feat: schedule history — list, detail, rollback endpoints 2026-03-17 14:38:51 +01:00
c0fb8f69de feat(infra): implement config snapshot repository methods 2026-03-17 14:32:04 +01:00
8b8e8a8d8c fix(mcp): update block mutations for ScheduleConfig V2 day_blocks 2026-03-17 14:32:02 +01:00
05d2d77515 feat(infra): schedule history list, get-by-id, delete-after methods 2026-03-17 14:32:02 +01:00
8b701745bf fix(api): update block lookups to use all_blocks() after ScheduleConfig V2 2026-03-17 14:31:24 +01:00
a79ee1b228 feat(domain): 7-day generation window, day_blocks lookup by weekday 2026-03-17 14:29:10 +01:00
d8e39c66be feat(infra): add channel_config_snapshots migration 2026-03-17 14:28:35 +01:00
055937fc3d fix(domain): use ChannelId type in patch_config_snapshot_label 2026-03-17 14:27:41 +01:00
1338f6bace feat(domain): extend ChannelRepository and ScheduleRepository ports for history 2026-03-17 14:25:51 +01:00
995f5b1339 feat(domain): add ChannelConfigSnapshot entity 2026-03-17 14:25:49 +01:00
22bee4f32c feat(domain): ScheduleConfig V2 day-keyed weekly grid with V1 compat 2026-03-17 14:21:00 +01:00
5f1421f4bd fix(domain): improve Weekday tests and document all() ordering 2026-03-17 14:18:13 +01:00
f8e8e85cb0 feat(domain): add Weekday enum with From<chrono::Weekday> 2026-03-17 14:16:16 +01:00
c550790287 feat: add find_last_slot_per_block method to schedule repositories and update related logic 2026-03-17 13:02:20 +01:00
d8dd047020 feat: implement local-files feature with various enhancements and cleanup 2026-03-17 03:00:39 +01:00
c4d2e48f73 fix(frontend): resolve all eslint warnings and errors
- block-timeline: ref updates moved to useLayoutEffect
- channel-card, guide/page: Date.now() wrapped in useMemo + suppress purity rule
- auth-context: lazy localStorage init (removes setState-in-effect)
- use-channel-order: lazy localStorage init (removes setState-in-effect)
- use-idle: start timer on mount without calling resetIdle (removes setState-in-effect)
- use-subtitles, transcode-settings-dialog: inline eslint-disable on exact violating line
- providers: block-level eslint-disable for tokenRef closure in useState initializer
- edit-channel-sheet: remove unused minsToTime and BlockContent imports
- docs/page: escape unescaped quote and apostrophe entities
2026-03-17 02:40:32 +01:00
8ed8da2d90 refactor(frontend): extract logic to hooks, split inline components
Area 1 (tv/page.tsx 964→423 lines):
- hooks: use-fullscreen, use-idle, use-volume, use-quality, use-subtitles,
  use-channel-input, use-channel-passwords, use-tv-keyboard
- components: SubtitlePicker, VolumeControl, QualityPicker, TopControlBar,
  LogoWatermark, AutoplayPrompt, ChannelNumberOverlay, TvBaseLayer

Area 2 (edit-channel-sheet.tsx 1244→678 lines):
- hooks: use-channel-form (all form state + reset logic)
- lib/schemas.ts: extracted Zod schemas + extractErrors
- components: AlgorithmicFilterEditor, RecyclePolicyEditor, WebhookEditor,
  AccessSettingsEditor, LogoEditor

Area 3 (dashboard/page.tsx 406→261 lines):
- hooks: use-channel-order, use-import-channel, use-regenerate-all
- lib/channel-export.ts: pure export utility
- components: DashboardHeader
2026-03-17 02:25:02 +01:00
ce92b43205 fix: show toast and redirect on expired session (401)
Fix stale closure bug in QueryProvider (token ref) and add warning toast so users know why they were redirected to login.
2026-03-17 01:37:11 +01:00
7244349e97 refactor: allow unused variable warning for db_pool in build_provider_registry 2026-03-16 04:41:08 +01:00
6aa86b6666 refactor: extract router/serve to server.rs, main is now thin orchestrator 2026-03-16 04:39:36 +01:00
e7bd66ffdf refactor: extract background task spawning to startup.rs 2026-03-16 04:37:49 +01:00
b25ae95626 refactor: extract provider registry to provider_registry.rs 2026-03-16 04:36:41 +01:00
5949ffc63b refactor: extract DB init to database.rs 2026-03-16 04:34:08 +01:00
29e654cabc refactor: extract telemetry init to telemetry.rs 2026-03-16 04:33:01 +01:00
9d792249c9 feat: implement transcode settings repository and integrate with local-files provider 2026-03-16 04:24:39 +01:00
50df852416 fix: remove sqlx from API layer, read TTL from TranscodeManager, init local_files from DB on startup 2026-03-16 04:08:52 +01:00
d88afbfe2e fix: sync cleanup_ttl_hours to transcode_settings table on provider save 2026-03-16 04:02:58 +01:00
0637504974 fix: local_files hot-reload via RwLock state fields and rebuild_registry 2026-03-16 03:58:59 +01:00
712cf1deb9 fix: local_files hot-reload via RwLock state fields + rebuild_registry local_files case 2026-03-16 03:58:36 +01:00
89036ba62d feat: admin provider UI (types, hooks, guard, settings panel, conditional admin nav) 2026-03-16 03:38:37 +01:00
87f94fcc51 feat: admin provider routes (list/update/delete/test) with admin middleware 2026-03-16 03:34:54 +01:00
46333853d2 feat: ConfigSource enum, RwLock provider_registry, is_admin in UserResponse, available_provider_types 2026-03-16 03:30:44 +01:00
0e51b7c0f1 feat: implement SqliteProviderConfigRepository, build_provider_config_repository factory 2026-03-16 03:26:02 +01:00
4ca8690a89 feat: add admin + provider_configs migration 2026-03-16 03:24:15 +01:00
d80d4e9741 feat: add is_admin to User, count_users, ProviderConfigRepository trait, admin migration 2026-03-16 03:22:00 +01:00
b35054f23e feat(tv-page): add subtitle track toggle functionality 2026-03-16 02:42:24 +01:00
abcf872d2d docs: update README files to include new environment variables and local files feature 2026-03-16 02:29:42 +01:00
e805028d46 feat: add server-sent events for logging and activity tracking
- Implemented a custom tracing layer (`AppLogLayer`) to capture log events and broadcast them to SSE clients.
- Created admin routes for streaming server logs and listing recent activity logs.
- Added an activity log repository interface and SQLite implementation for persisting activity events.
- Integrated activity logging into user authentication and channel CRUD operations.
- Developed frontend components for displaying server logs and activity logs in the admin panel.
- Enhanced the video player with a stats overlay for monitoring streaming metrics.
2026-03-16 02:21:40 +01:00
4df6522952 feat(channel-card): add confirmation dialog for schedule regeneration 2026-03-16 01:50:05 +01:00
40f698acb7 refactor: clean up styles and improve layout in dashboard and edit channel components
- Removed unnecessary class names for buttons in ChannelCard and DashboardPage components.
- Updated layout styles in RootLayout to apply dark mode by default.
- Refactored edit-channel-sheet to streamline block editor and filter editor components.
- Adjusted duration input fields to reflect minutes instead of seconds in AlgorithmicFilterEditor.
- Enhanced the structure of the EditChannelSheet for better readability and maintainability.
2026-03-16 01:40:28 +01:00
e76167134b feat: add webhook body template and headers support for channels 2026-03-16 01:10:26 +01:00
db461db270 webhooks (#1)
Reviewed-on: #1
2026-03-15 23:51:41 +00:00
2ba9bfbf2f feat(channel-card): update TV link to include channel ID in query parameters 2026-03-15 23:59:07 +01:00
f1e2c727aa feat(tests): add unit tests for auto-scheduler functionality 2026-03-15 23:56:52 +01:00
1102e385f3 feat(transcoding): add FFmpeg HLS transcoding support
- Introduced `TranscodeManager` for managing on-demand transcoding of local video files.
- Added configuration options for transcoding in `Config` and `LocalFilesConfig`.
- Implemented new API routes for managing transcoding settings, stats, and cache.
- Updated `LocalFilesProvider` to support transcoding capabilities.
- Created frontend components for managing transcode settings and displaying stats.
- Added database migration for transcode settings.
- Enhanced existing routes and DTOs to accommodate new transcoding features.
2026-03-15 00:34:23 +01:00
ead65e6be2 feat: implement multi-provider support in media library
- Introduced IProviderRegistry to manage multiple media providers.
- Updated AppState to use provider_registry instead of a single media_provider.
- Refactored library routes to support provider-specific queries for collections, series, genres, and items.
- Enhanced ProgrammingBlock to include provider_id for algorithmic and manual content types.
- Modified frontend components to allow selection of providers and updated API calls to include provider parameters.
- Adjusted hooks and types to accommodate provider-specific functionality.
2026-03-14 23:59:21 +01:00
c53892159a feat(mcp): implement media channel management and scheduling features 2026-03-14 23:19:24 +01:00
f7f4d92376 feat(docs): enhance documentation with Docker deployment and local files provider sections 2026-03-14 04:10:57 +01:00
cf92cc49c2 feat(stream): add stream quality selection and update stream URL handling 2026-03-14 04:03:54 +01:00
8f42164bce feat: add local files provider with indexing and rescan functionality
- Implemented LocalFilesProvider to manage local video files.
- Added LocalIndex for in-memory and SQLite-backed indexing of video files.
- Introduced scanning functionality to detect video files and extract metadata.
- Added API endpoints for listing collections, genres, and series based on provider capabilities.
- Enhanced existing routes to check for provider capabilities before processing requests.
- Updated frontend to utilize provider capabilities for conditional rendering of UI elements.
- Implemented rescan functionality to refresh the local files index.
- Added database migration for local files index schema.
2026-03-14 03:44:32 +01:00
9b6bcfc566 refactor(guide): improve code formatting and readability in page.tsx 2026-03-14 03:02:53 +01:00
186 changed files with 18686 additions and 2159 deletions

3
.gitignore vendored Normal file
View File

@@ -0,0 +1,3 @@
transcode/
.worktrees/
.superpowers/

View File

@@ -70,4 +70,5 @@ docker compose -f compose.yml -f compose.traefik.yml up -d
| `NEXT_PUBLIC_API_URL` | frontend build arg | Baked in at build time — must point to the public backend URL |
| `API_URL` | frontend runtime env | Server-side only (Next.js API routes). Set in compose. |
| `DATABASE_URL` | backend | `sqlite:///app/data/k-tv.db` or postgres DSN |
| `SESSION_SECRET` | backend | Change in production |
| `JWT_SECRET` | backend | JWT signing key — change in production (min 32 chars) |
| `COOKIE_SECRET` | backend | OIDC state cookie encryption key — change in production (min 64 chars) |

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,255 @@
# Library Management — Design Spec
**Date:** 2026-03-19
**Status:** Approved
## Context
K-TV currently has ephemeral library browsing: metadata is always fetched live from providers (Jellyfin, local files) on demand, only accessible through the block editor filter UI. There is no persistent library, no cross-provider browsing, and no way to schedule directly from browsing media.
This feature introduces an in-house library that syncs and stores media metadata from all providers into k-tv's own DB, then surfaces it through a first-class `/library` page where users can browse, filter, multi-select, and schedule media directly onto channels.
---
## Data Model
### Migration `20260319000002_add_library_tables.sql`
**`library_items` table**
| Column | Type | Notes |
|---|---|---|
| `id` | TEXT PK | `"{provider_id}::{raw_item_id}"` — double-colon, matches existing registry prefix format |
| `provider_id` | TEXT | `"jellyfin"`, `"local"`, etc. |
| `external_id` | TEXT | Raw ID from provider (for re-fetching) |
| `title` | TEXT | |
| `content_type` | TEXT | `"movie"` \| `"episode"` \| `"short"` |
| `duration_secs` | INTEGER | |
| `series_name` | TEXT | NULL for movies |
| `season_number` | INTEGER | NULL for movies |
| `episode_number` | INTEGER | NULL for movies |
| `year` | INTEGER | |
| `genres` | TEXT | JSON array |
| `tags` | TEXT | JSON array |
| `collection_id` | TEXT | Provider-specific collection ID |
| `collection_name` | TEXT | Human-readable name (synced from provider) |
| `collection_type` | TEXT | e.g. `"movies"`, `"tvshows"` |
| `thumbnail_url` | TEXT | Provider-served image URL; re-fetched on every sync |
| `synced_at` | TEXT | ISO8601 timestamp |
`thumbnail_url` is refreshed on every full sync. Frontend must handle broken image URLs gracefully (show a placeholder on load error) since URLs may break if provider URL or API key changes between syncs.
**`library_sync_log` table**
| Column | Type | Notes |
|---|---|---|
| `id` | INTEGER PK AUTOINCREMENT | |
| `provider_id` | TEXT | |
| `started_at` | TEXT | ISO8601 |
| `finished_at` | TEXT | ISO8601, NULL while running |
| `items_found` | INTEGER | |
| `status` | TEXT | `"running"` \| `"done"` \| `"error"` |
| `error_msg` | TEXT | NULL on success |
### Migration `20260319000003_add_app_settings.sql`
**`app_settings` table** — general-purpose key-value store for admin-configurable settings. Co-exists with the existing `transcode_settings` singleton table (that table is not modified). Seeded with: `INSERT OR IGNORE INTO app_settings(key, value) VALUES ('library_sync_interval_hours', '6')`.
| Column | Type | Notes |
|---|---|---|
| `key` | TEXT PK | |
| `value` | TEXT | Bare JSON scalar stored as text (e.g. `6`, not `"6"`) |
`GET /admin/settings` returns parsed values: `{ "library_sync_interval_hours": 6 }` (number, not string). Backend parses with `serde_json::Value` on read; frontend receives typed JSON.
---
## Backend Architecture
### Sync Engine
**Layer placement:**
- `LibraryItem`, `LibrarySyncResult`, `LibrarySyncAdapter` trait, and `ILibraryRepository` trait live in **`domain/src/library.rs`**
- `FullSyncAdapter` (impl) and `SqliteLibraryRepository` (impl) live in **`infra/src/library/`**
The `LibrarySyncAdapter` domain trait does **not** take a DB pool — DB writes are an infra concern handled entirely inside the impl:
```rust
// domain/src/library.rs
#[async_trait]
pub trait LibrarySyncAdapter: Send + Sync {
async fn sync_provider(
&self,
provider: &dyn IMediaProvider,
provider_id: &str,
) -> LibrarySyncResult;
}
#[async_trait]
pub trait ILibraryRepository: Send + Sync {
async fn search(&self, filter: LibrarySearchFilter) -> Vec<LibraryItem>;
async fn get_by_id(&self, id: &str) -> Option<LibraryItem>;
async fn list_collections(&self, provider_id: Option<&str>) -> Vec<LibraryCollection>;
async fn list_series(&self, provider_id: Option<&str>) -> Vec<String>;
async fn list_genres(&self, content_type: Option<ContentType>, provider_id: Option<&str>) -> Vec<String>;
async fn upsert_items(&self, provider_id: &str, items: Vec<LibraryItem>) -> DomainResult<()>;
async fn clear_provider(&self, provider_id: &str) -> DomainResult<()>;
async fn log_sync_start(&self, provider_id: &str) -> i64; // returns log row id
async fn log_sync_finish(&self, log_id: i64, result: &LibrarySyncResult);
async fn latest_sync_status(&self) -> Vec<LibrarySyncLogEntry>;
async fn is_sync_running(&self, provider_id: &str) -> bool;
}
```
`FullSyncAdapter` in infra holds `Arc<dyn ILibraryRepository>` and calls repo methods internally — no DB pool leaks into domain.
```
infra/src/library/
mod.rs
full_sync.rs -- FullSyncAdapter impl: calls list_collections for names/types,
fetch_items(&MediaFilter::default()), repo.clear_provider + repo.upsert_items
repository.rs -- SqliteLibraryRepository impl of ILibraryRepository
scheduler.rs -- tokio interval task; 10s startup delay (hardcoded); reads interval from
app_settings on each tick via AppSettingsRepository
```
**AppState** gains:
```rust
library_sync_adapter: Arc<dyn LibrarySyncAdapter>,
library_repo: Arc<dyn ILibraryRepository>,
```
### Sync Concurrency Guard
Before starting a sync for a provider, the scheduler and `POST /library/sync` handler both call `repo.is_sync_running(provider_id)`. If `true`, the scheduler skips that provider for this tick; the HTTP endpoint returns **409 Conflict** with body `{ "error": "sync already running for provider" }`. This prevents the truncate+insert race.
### Admin Settings
- `GET /admin/settings` — returns `app_settings` rows as parsed JSON object. Requires `is_admin = true` (`AdminUser` extractor).
- `PUT /admin/settings` — partial update (only provided keys updated). Requires `is_admin = true`. Scheduler reads new value on next tick.
### Library API Routes (all require authenticated user)
| Endpoint | Notes |
|---|---|
| `GET /library/items?type=&series[]=&collection=&genre=&decade=&min_duration=&max_duration=&search=&provider=&offset=0&limit=50` | DB-backed; returns `{ items: LibraryItemResponse[], total: u32 }` |
| `GET /library/items/:id` | Single item |
| `GET /library/collections?provider=` | `{ id, name, collection_type }[]` from DB |
| `GET /library/series?provider=` | `String[]` from DB |
| `GET /library/genres?type=&provider=` | `String[]` from DB |
| `GET /library/sync/status` | `LibrarySyncLogEntry[]` (latest per provider) |
| `POST /library/sync` | Fires sync; 409 if already running; requires `is_admin = true` |
| `GET /admin/settings` | `{ key: value }` map (parsed); requires `is_admin = true` |
| `PUT /admin/settings` | Partial update; requires `is_admin = true` |
**Existing library route API contract is unchanged** for all params except `offset`/`limit` (new). Frontend `use-library.ts` hooks continue working without modification.
---
## Frontend Architecture
### New route: `/library`
Added to main nav alongside Dashboard and TV.
```
app/(main)/library/
page.tsx -- layout, search/filter state, pagination state, multi-select state
components/
library-sidebar.tsx -- provider picker, type, genre chips, series picker, decade, duration range
library-grid.tsx -- paginated grid of LibraryItemCard
library-item-card.tsx -- thumbnail (with broken-image fallback placeholder), title,
duration badge, content type, checkbox
schedule-from-library-dialog.tsx -- modal (see flow below)
add-to-block-dialog.tsx -- modal (see flow below)
sync-status-bar.tsx -- "Last synced 2h ago · Jellyfin" strip at top
```
### New hooks
```
hooks/use-library-search.ts -- useLibrarySearch(filter, page): wraps GET /library/items with
offset/limit pagination. Query key: ["library", "search", filter, page].
onSuccess of useTriggerSync: invalidate ["library", "search"] and ["library", "sync"].
hooks/use-library-sync.ts -- useLibrarySyncStatus() → ["library", "sync"],
useTriggerSync() → POST /library/sync; on success invalidates
["library", "search"] and ["library", "sync"]
hooks/use-admin-settings.ts -- useAdminSettings(), useUpdateAdminSettings()
```
Existing `use-library.ts` and its four hooks (`useCollections`, `useSeries`, `useGenres`, `useLibraryItems`) are **unchanged** — still used by `AlgorithmicFilterEditor` in the block editor.
### Schedule From Library Flow
1. User selects one or more items → floating action bar at bottom
2. "Schedule on channel" → `ScheduleFromLibraryDialog` modal
3. Modal fields (in order — time/days/strategy disabled until channel is selected):
- **Channel** picker (required; enables remaining fields once selected)
- **Days**: MonSun checkboxes
- **Time**: `NaiveTime` input interpreted in the selected channel's timezone. Timezone label displayed inline (e.g. "20:00 Europe/Warsaw"). Disabled until channel is selected.
- **Duration**: For single item, defaults to `ceil(duration_secs / 60)` minutes shown in UI. For multi-item, user sets manually. Rounding to nearest minute shown explicitly (e.g. "1h 35m (rounded from 1h 34m 47s)").
- **Fill strategy**: Sequential (default for episodic) | Random | Best Fit
4. Preview: *"3 blocks will be created on [Channel] — Mon/Wed/Fri at 20:00 [Europe/Warsaw], Sequential"*
5. Confirm → `PUT /channels/:id` merging new `ProgrammingBlock` entries into `schedule_config.day_blocks`:
- Series / episodic: **Algorithmic** block with `series_names: [series]`
- Specific item(s): **Manual** block with those item IDs
### Add To Block Flow
1. User selects items → "Add to block" from action bar
2. `AddToBlockDialog`:
- Pick channel
- Pick existing **manual** block: populated from `useChannel(id)` by collecting all blocks across all days with `content.type === "manual"`, **deduplicated by block `id`** (same block appearing Mon + Wed shown once)
3. Confirm → appends item IDs to that block. Since the same block object (by `id`) may appear in multiple days in `schedule_config.day_blocks`, the PUT updates **all day entries that contain that block id** — the block is mutated wherever it appears, consistently.
### Admin Settings UI
Settings panel (cog icon in dashboard header, alongside existing transcode settings) gains a "Library sync" section:
- Number input: "Sync interval (hours)"
- "Sync now" button (visible to admin users only; calls `POST /library/sync`; disabled + shows spinner while running)
- Status: "Last synced: [time] · [N] items" per provider from `GET /library/sync/status`
---
## Key Files Modified
**Backend:**
- `domain/src/lib.rs` — add `library` module
- `domain/src/library.rs` — new: `LibraryItem`, `LibraryCollection`, `LibrarySyncResult`, `LibrarySyncAdapter` trait, `ILibraryRepository` trait, `LibrarySearchFilter`, `LibrarySyncLogEntry`
- `infra/src/library/full_sync.rs``FullSyncAdapter` impl
- `infra/src/library/repository.rs``SqliteLibraryRepository` impl
- `infra/src/library/scheduler.rs` — tokio interval task, 10s startup delay
- `api/src/routes/library.rs` — DB-backed handlers + sync/admin routes
- `api/src/routes/mod.rs` — wire admin settings routes
- `api/src/main.rs` — start sync scheduler task
- `api/src/state.rs` — add `library_sync_adapter: Arc<dyn LibrarySyncAdapter>`, `library_repo: Arc<dyn ILibraryRepository>`
- `migrations_sqlite/20260319000002_add_library_tables.sql`
- `migrations_sqlite/20260319000003_add_app_settings.sql`
**Frontend:**
- `lib/types.ts` — add `LibraryItem`, `LibraryCollection`, `SyncLogEntry`, `AdminSettings`
- `lib/api.ts` — add `api.library.items(filter, page)`, `api.library.syncStatus()`, `api.library.triggerSync()`, `api.admin.getSettings()`, `api.admin.updateSettings(partial)`
- `app/(main)/layout.tsx` — add Library nav link
- New files per structure above
---
## Verification
1. **Sync**: `POST /library/sync` → 200. `GET /library/sync/status` shows `done` with item count. `library_items` rows in DB have `collection_name` and `thumbnail_url` populated.
2. **Sync dedup**: Second `POST /library/sync` while first is running → 409 Conflict.
3. **Library API pagination**: `GET /library/items?offset=0&limit=10` returns 10 items + `total`. `?offset=10&limit=10` returns next page.
4. **Provider filter**: `GET /library/items?provider=jellyfin` returns only Jellyfin items.
5. **Collections**: `GET /library/collections` returns `{ id, name, collection_type }` objects.
6. **Admin guard**: `POST /library/sync` and `PUT /admin/settings` with non-admin user → 403.
7. **Admin settings**: `PUT /admin/settings { "library_sync_interval_hours": 2 }``GET /admin/settings` returns `{ "library_sync_interval_hours": 2 }` (number). Scheduler uses new interval.
8. **Library UI**: `/library` page loads, sidebar filters update grid, pagination controls work. `sync-status-bar` shows last sync time.
9. **Broken thumbnail**: Item with a broken `thumbnail_url` shows fallback placeholder in `library-item-card`.
10. **Multi-select action bar**: Select 3 items → action bar appears with "Schedule on channel" and "Add to block".
11. **Schedule flow — time gating**: Time input is disabled until channel is selected; timezone shown next to input after channel selected.
12. **Schedule flow — rounding**: Single-item selection shows rounded duration with note in dialog.
13. **Schedule flow — confirm**: Series scheduled → Dashboard shows Algorithmic blocks on correct days with `series_names` filter.
14. **Add to block — dedup**: Block appearing on Mon+Wed shown once in picker. Confirming updates both days.
15. **Cache invalidation**: After `useTriggerSync()` resolves, `["library", "search"]` and `["library", "sync"]` query keys are invalidated, grid refreshes.
16. **Block editor unchanged**: `AlgorithmicFilterEditor` works; `useLibraryItems` in `use-library.ts` unchanged.
17. **Regression**: `cargo test` passes.

302
k-tv-backend/Cargo.lock generated
View File

@@ -78,18 +78,23 @@ dependencies = [
"chrono",
"domain",
"dotenvy",
"handlebars",
"infra",
"k-core",
"rand 0.8.5",
"reqwest",
"serde",
"serde_json",
"serde_qs",
"thiserror 2.0.17",
"time",
"tokio",
"tokio-stream",
"tokio-util",
"tower",
"tower-http",
"tracing",
"tracing-subscriber",
"uuid",
]
@@ -536,14 +541,38 @@ dependencies = [
"syn",
]
[[package]]
name = "darling"
version = "0.20.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee"
dependencies = [
"darling_core 0.20.11",
"darling_macro 0.20.11",
]
[[package]]
name = "darling"
version = "0.21.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9cdf337090841a411e2a7f3deb9187445851f91b309c0c0a29e05f74a00a48c0"
dependencies = [
"darling_core",
"darling_macro",
"darling_core 0.21.3",
"darling_macro 0.21.3",
]
[[package]]
name = "darling_core"
version = "0.20.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0d00b9596d185e565c2207a0b01f8bd1a135483d02d9b7b0a54b11da8d53412e"
dependencies = [
"fnv",
"ident_case",
"proc-macro2",
"quote",
"strsim",
"syn",
]
[[package]]
@@ -560,13 +589,24 @@ dependencies = [
"syn",
]
[[package]]
name = "darling_macro"
version = "0.20.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead"
dependencies = [
"darling_core 0.20.11",
"quote",
"syn",
]
[[package]]
name = "darling_macro"
version = "0.21.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81"
dependencies = [
"darling_core",
"darling_core 0.21.3",
"quote",
"syn",
]
@@ -598,6 +638,37 @@ dependencies = [
"serde_core",
]
[[package]]
name = "derive_builder"
version = "0.20.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "507dfb09ea8b7fa618fcf76e953f4f5e192547945816d5358edffe39f6f94947"
dependencies = [
"derive_builder_macro",
]
[[package]]
name = "derive_builder_core"
version = "0.20.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2d5bcf7b024d6835cfb3d473887cd966994907effbe9227e8c8219824d06c4e8"
dependencies = [
"darling 0.20.11",
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "derive_builder_macro"
version = "0.20.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c"
dependencies = [
"derive_builder_core",
"syn",
]
[[package]]
name = "digest"
version = "0.10.7"
@@ -631,6 +702,7 @@ dependencies = [
"email_address",
"rand 0.8.5",
"serde",
"serde_json",
"thiserror 2.0.17",
"tokio",
"url",
@@ -849,6 +921,21 @@ dependencies = [
"percent-encoding",
]
[[package]]
name = "futures"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876"
dependencies = [
"futures-channel",
"futures-core",
"futures-executor",
"futures-io",
"futures-sink",
"futures-task",
"futures-util",
]
[[package]]
name = "futures-channel"
version = "0.3.31"
@@ -922,6 +1009,7 @@ version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81"
dependencies = [
"futures-channel",
"futures-core",
"futures-io",
"futures-macro",
@@ -1011,6 +1099,22 @@ dependencies = [
"tracing",
]
[[package]]
name = "handlebars"
version = "6.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b3f9296c208515b87bd915a2f5d1163d4b3f863ba83337d7713cf478055948e"
dependencies = [
"derive_builder",
"log",
"num-order",
"pest",
"pest_derive",
"serde",
"serde_json",
"thiserror 2.0.17",
]
[[package]]
name = "hashbrown"
version = "0.12.3"
@@ -1372,6 +1476,7 @@ dependencies = [
"async-nats",
"async-trait",
"axum-extra",
"base64 0.22.1",
"chrono",
"domain",
"futures-core",
@@ -1389,6 +1494,7 @@ dependencies = [
"tracing",
"url",
"uuid",
"walkdir",
]
[[package]]
@@ -1578,6 +1684,28 @@ version = "0.8.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "47e1ffaa40ddd1f3ed91f717a33c8c0ee23fff369e3aa8772b9605cc1d22f4c3"
[[package]]
name = "mcp"
version = "0.1.0"
dependencies = [
"anyhow",
"async-trait",
"chrono",
"domain",
"dotenvy",
"infra",
"k-core",
"rmcp",
"schemars 0.8.22",
"serde",
"serde_json",
"thiserror 2.0.17",
"tokio",
"tracing",
"tracing-subscriber",
"uuid",
]
[[package]]
name = "md-5"
version = "0.10.6"
@@ -1713,6 +1841,21 @@ dependencies = [
"num-traits",
]
[[package]]
name = "num-modular"
version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "17bb261bf36fa7d83f4c294f834e91256769097b3cb505d44831e0a179ac647f"
[[package]]
name = "num-order"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "537b596b97c40fcf8056d153049eb22f481c17ebce72a513ec9286e4986d1bb6"
dependencies = [
"num-modular",
]
[[package]]
name = "num-traits"
version = "0.2.19"
@@ -1915,6 +2058,12 @@ dependencies = [
"subtle",
]
[[package]]
name = "paste"
version = "1.0.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a"
[[package]]
name = "pem"
version = "3.0.6"
@@ -1940,6 +2089,49 @@ version = "2.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220"
[[package]]
name = "pest"
version = "2.8.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e0848c601009d37dfa3430c4666e147e49cdcf1b92ecd3e63657d8a5f19da662"
dependencies = [
"memchr",
"ucd-trie",
]
[[package]]
name = "pest_derive"
version = "2.8.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "11f486f1ea21e6c10ed15d5a7c77165d0ee443402f0780849d1768e7d9d6fe77"
dependencies = [
"pest",
"pest_generator",
]
[[package]]
name = "pest_generator"
version = "2.8.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8040c4647b13b210a963c1ed407c1ff4fdfa01c31d6d2a098218702e6664f94f"
dependencies = [
"pest",
"pest_meta",
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "pest_meta"
version = "2.8.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "89815c69d36021a140146f26659a81d6c2afa33d216d736dd4be5381a7362220"
dependencies = [
"pest",
"sha2",
]
[[package]]
name = "phf"
version = "0.12.1"
@@ -2341,6 +2533,38 @@ dependencies = [
"windows-sys 0.52.0",
]
[[package]]
name = "rmcp"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "33a0110d28bd076f39e14bfd5b0340216dd18effeb5d02b43215944cc3e5c751"
dependencies = [
"base64 0.21.7",
"chrono",
"futures",
"paste",
"pin-project-lite",
"rmcp-macros",
"schemars 0.8.22",
"serde",
"serde_json",
"thiserror 2.0.17",
"tokio",
"tokio-util",
"tracing",
]
[[package]]
name = "rmcp-macros"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a6e2b2fd7497540489fa2db285edd43b7ed14c49157157438664278da6e42a7a"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "rsa"
version = "0.9.9"
@@ -2468,6 +2692,15 @@ version = "1.0.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a50f4cf475b65d88e057964e0e9bb1f0aa9bbb2036dc65c64596b42932536984"
[[package]]
name = "same-file"
version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
dependencies = [
"winapi-util",
]
[[package]]
name = "schannel"
version = "0.1.28"
@@ -2477,6 +2710,18 @@ dependencies = [
"windows-sys 0.61.2",
]
[[package]]
name = "schemars"
version = "0.8.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3fbf2ae1b8bc8e02df939598064d22402220cd5bbcca1c76f7d6a310974d5615"
dependencies = [
"dyn-clone",
"schemars_derive",
"serde",
"serde_json",
]
[[package]]
name = "schemars"
version = "0.9.0"
@@ -2501,6 +2746,18 @@ dependencies = [
"serde_json",
]
[[package]]
name = "schemars_derive"
version = "0.8.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32e265784ad618884abaea0600a9adf15393368d840e0222d101a072f3f7534d"
dependencies = [
"proc-macro2",
"quote",
"serde_derive_internals",
"syn",
]
[[package]]
name = "scopeguard"
version = "1.2.0"
@@ -2590,6 +2847,17 @@ dependencies = [
"syn",
]
[[package]]
name = "serde_derive_internals"
version = "0.29.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "serde_json"
version = "1.0.148"
@@ -2691,7 +2959,7 @@ version = "3.16.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "52a8e3ca0ca629121f70ab50f95249e5a6f925cc0f6ffe8256c45b728875706c"
dependencies = [
"darling",
"darling 0.21.3",
"proc-macro2",
"quote",
"syn",
@@ -3280,6 +3548,7 @@ dependencies = [
"futures-core",
"pin-project-lite",
"tokio",
"tokio-util",
]
[[package]]
@@ -3447,6 +3716,12 @@ version = "1.19.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "562d481066bde0658276a35467c4af00bdc6ee726305698a55b86e61d7ad82bb"
[[package]]
name = "ucd-trie"
version = "0.1.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971"
[[package]]
name = "unicode-bidi"
version = "0.3.18"
@@ -3539,6 +3814,16 @@ version = "0.9.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a"
[[package]]
name = "walkdir"
version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b"
dependencies = [
"same-file",
"winapi-util",
]
[[package]]
name = "want"
version = "0.3.1"
@@ -3675,6 +3960,15 @@ dependencies = [
"wasite",
]
[[package]]
name = "winapi-util"
version = "0.1.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22"
dependencies = [
"windows-sys 0.61.2",
]
[[package]]
name = "windows-core"
version = "0.62.2"

View File

@@ -1,3 +1,3 @@
[workspace]
members = ["domain", "infra", "api"]
members = ["domain", "infra", "api", "mcp"]
resolver = "2"

View File

@@ -10,8 +10,12 @@ FROM debian:bookworm-slim
WORKDIR /app
# Install OpenSSL (required for many Rust networking crates) and CA certificates
RUN apt-get update && apt-get install -y libssl3 ca-certificates && rm -rf /var/lib/apt/lists/*
# Install OpenSSL, CA certs, and ffmpeg (provides ffprobe for local-files duration scanning)
RUN apt-get update && apt-get install -y --no-install-recommends \
libssl3 \
ca-certificates \
ffmpeg \
&& rm -rf /var/lib/apt/lists/*
COPY --from=builder /app/target/release/api .

View File

@@ -11,7 +11,7 @@ The backend is a Cargo workspace with three crates following Hexagonal (Ports &
```
k-tv-backend/
├── domain/ # Pure business logic — no I/O, no frameworks
├── infra/ # Adapters: SQLite/Postgres repositories, Jellyfin HTTP client
├── infra/ # Adapters: SQLite/Postgres repositories, Jellyfin HTTP client, local files
└── api/ # Axum HTTP server — routes, DTOs, startup wiring
```
@@ -79,11 +79,20 @@ OIDC state (CSRF token, PKCE verifier, nonce) is stored in a short-lived encrypt
If Jellyfin variables are not set, the server starts normally but schedule generation endpoints return an error. Channel CRUD and auth still work.
### Local Files (optional — requires `local-files` feature)
| Variable | Default | Description |
|----------|---------|-------------|
| `LOCAL_FILES_DIR` | — | Absolute path to local video library root. Enables the local-files provider when set. |
| `TRANSCODE_DIR` | — | Directory for FFmpeg HLS transcode cache. Enables transcoding when set. |
| `TRANSCODE_CLEANUP_TTL_HOURS` | `24` | Hours after last access before a transcode cache entry is deleted. |
### CORS & Production
| Variable | Default | Description |
|----------|---------|-------------|
| `CORS_ALLOWED_ORIGINS` | `http://localhost:5173` | Comma-separated allowed origins |
| `BASE_URL` | `http://localhost:3000` | Public base URL used to build stream URLs for local files |
| `PRODUCTION` | `false` | Enforces minimum secret lengths when `true` |
## Feature Flags
@@ -100,6 +109,7 @@ default = ["sqlite", "auth-jwt", "jellyfin"]
| `auth-jwt` | JWT Bearer token authentication |
| `auth-oidc` | OpenID Connect integration |
| `jellyfin` | Jellyfin media provider adapter |
| `local-files` | Local filesystem media provider with optional FFmpeg transcoding |
## API Reference
@@ -137,11 +147,49 @@ All endpoints are under `/api/v1/`. Endpoints marked **Bearer** require an `Auth
| `GET` | `/channels/:id/epg?from=&until=` | Bearer | EPG slots overlapping a time window (RFC3339 datetimes) |
| `GET` | `/channels/:id/stream` | Bearer | `307` redirect to the current item's stream URL — `204` if no-signal |
### Other
### Library
All endpoints require Bearer auth and return `501 Not Implemented` if the active provider lacks the relevant capability.
| Method | Path | Auth | Description |
|--------|------|------|-------------|
| `GET` | `/config` | — | Server configuration flags |
| `GET` | `/library/collections` | Bearer | List media collections/libraries |
| `GET` | `/library/series` | Bearer | List TV series (supports `?collection=`, `?provider=`) |
| `GET` | `/library/genres` | Bearer | List genres (supports `?type=`, `?provider=`) |
| `GET` | `/library/items` | Bearer | Search/filter media items (supports `?q=`, `?type=`, `?series[]=`, `?collection=`, `?limit=`, `?strategy=`, `?provider=`) |
### Files (local-files feature only)
| Method | Path | Auth | Description |
|--------|------|------|-------------|
| `GET` | `/files/stream/:id` | — | Range-header video streaming for local files |
| `POST` | `/files/rescan` | Bearer | Trigger library rescan, returns `{ items_found }` |
| `GET` | `/files/transcode/:id/playlist.m3u8` | — | HLS transcode playlist |
| `GET` | `/files/transcode/:id/:segment` | — | HLS transcode segment |
| `GET` | `/files/transcode-settings` | Bearer | Get transcode settings (`cleanup_ttl_hours`) |
| `PUT` | `/files/transcode-settings` | Bearer | Update transcode settings |
| `GET` | `/files/transcode-stats` | Bearer | Cache stats `{ cache_size_bytes, item_count }` |
| `DELETE` | `/files/transcode-cache` | Bearer | Clear the transcode cache |
### IPTV
| Method | Path | Auth | Description |
|--------|------|------|-------------|
| `GET` | `/iptv/playlist.m3u` | `?token=` | M3U playlist of all channels |
| `GET` | `/iptv/epg.xml` | `?token=` | XMLTV EPG for all channels |
### Admin
| Method | Path | Auth | Description |
|--------|------|------|-------------|
| `GET` | `/admin/logs` | `?token=` | SSE stream of live server log lines (`{ level, target, message, timestamp }`) |
| `GET` | `/admin/activity` | Bearer | Recent 50 in-app activity events |
### Config
| Method | Path | Auth | Description |
|--------|------|------|-------------|
| `GET` | `/config` | — | Server configuration flags and provider capabilities |
## Examples
@@ -267,6 +315,21 @@ curl -s -I http://localhost:3000/api/v1/channels/<id>/stream \
### Channel
A named broadcast channel owned by a user. Holds a `schedule_config` (the programming template) and a `recycle_policy`.
Channel fields:
| Field | Description |
|-------|-------------|
| `access_mode` | `public` / `password_protected` / `account_required` / `owner_only` |
| `access_password` | Hashed password when `access_mode` is `password_protected` |
| `logo` | URL or inline SVG for the watermark overlay |
| `logo_position` | `top_right` (default) / `top_left` / `bottom_left` / `bottom_right` |
| `logo_opacity` | 0.01.0, default 1.0 |
| `auto_schedule` | When `true`, the server auto-regenerates the schedule when it expires |
| `webhook_url` | HTTP endpoint called on domain events |
| `webhook_poll_interval_secs` | Polling interval for webhook delivery |
| `webhook_body_template` | Handlebars template for the webhook POST body |
| `webhook_headers` | JSON object of extra HTTP headers sent with webhooks |
### ScheduleConfig
The shareable programming template: an ordered list of `ProgrammingBlock`s. Channels do not need to cover all 24 hours — gaps are valid and produce a no-signal state.
@@ -286,6 +349,8 @@ Provider-agnostic filter used by algorithmic blocks:
| `tags` | Provider tag strings |
| `min_duration_secs` / `max_duration_secs` | Duration bounds for item selection |
| `collections` | Abstract groupings (Jellyfin library IDs, Plex sections, folder paths, etc.) |
| `series_names` | List of TV series names (OR-combined) |
| `search_term` | Free-text search term for library browsing |
### FillStrategy
How an algorithmic block fills its time budget:
@@ -305,6 +370,22 @@ Controls when previously aired items become eligible again:
| `cooldown_generations` | Don't replay within this many schedule generations |
| `min_available_ratio` | Always keep at least this fraction (0.01.0) of the matching pool selectable, even if their cooldown hasn't expired. Prevents small libraries from running dry. |
### ProviderCapabilities
`GET /config` returns `providers[]` with per-provider capabilities. Library endpoints return `501` if the active provider lacks the relevant capability.
| Capability | Description |
|------------|-------------|
| `collections` | Provider can list/filter by collections |
| `series` | Provider exposes TV series groupings |
| `genres` | Provider exposes genre metadata |
| `tags` | Provider supports tag filtering |
| `decade` | Provider supports decade filtering |
| `search` | Provider supports free-text search |
| `streaming_protocol` | `hls` or `direct_file` |
| `rescan` | Provider supports triggering a library rescan |
| `transcode` | FFmpeg transcoding is available (local-files only) |
### No-signal state
`GET /channels/:id/now` and `GET /channels/:id/stream` return `204 No Content` when the current time falls in a gap between blocks. The frontend should display static / noise in this case — matching the broadcast TV experience.
@@ -338,6 +419,9 @@ cargo build -F sqlite,auth-jwt,auth-oidc,jellyfin
# PostgreSQL variant
cargo build --no-default-features -F postgres,auth-jwt,jellyfin
# With local files + transcoding
cargo build -F sqlite,auth-jwt,jellyfin,local-files
```
### Docker
@@ -357,7 +441,8 @@ k-tv-backend/
│ │ # ScheduledSlot, MediaItem, PlaybackRecord, User, ...
│ ├── value_objects.rs # MediaFilter, FillStrategy, RecyclePolicy,
│ │ # MediaItemId, ContentType, Email, ...
│ ├── ports.rs # IMediaProvider trait
│ ├── ports.rs # IMediaProvider trait, ProviderCapabilities
│ ├── events.rs # Domain event types
│ ├── repositories.rs # ChannelRepository, ScheduleRepository, UserRepository
│ ├── services.rs # ChannelService, ScheduleEngineService, UserService
│ └── errors.rs # DomainError
@@ -366,7 +451,9 @@ k-tv-backend/
│ ├── channel_repository.rs # SQLite + Postgres ChannelRepository adapters
│ ├── schedule_repository.rs # SQLite + Postgres ScheduleRepository adapters
│ ├── user_repository.rs # SQLite + Postgres UserRepository adapters
│ ├── activity_log_repository/ # Activity log persistence
│ ├── jellyfin.rs # Jellyfin IMediaProvider adapter
│ ├── local_files/ # Local filesystem provider + FFmpeg transcoder
│ ├── auth/
│ │ ├── jwt.rs # JWT create + validate
│ │ └── oidc.rs # OIDC flow (stateless cookie state)
@@ -376,13 +463,22 @@ k-tv-backend/
├── api/src/
│ ├── routes/
│ │ ├── auth.rs # /auth/* endpoints
│ │ ├── channels.rs # /channels/* endpoints (CRUD, EPG, broadcast)
│ │ ── config.rs # /config endpoint
│ │ ├── channels/ # /channels/* endpoints (CRUD, EPG, broadcast)
│ │ ── admin.rs # /admin/logs (SSE), /admin/activity
│ │ ├── config.rs # /config endpoint
│ │ ├── files.rs # /files/* endpoints (local-files feature)
│ │ ├── iptv.rs # /iptv/playlist.m3u, /iptv/epg.xml
│ │ └── library.rs # /library/* endpoints
│ ├── config.rs # Config::from_env()
│ ├── state.rs # AppState
│ ├── extractors.rs # CurrentUser (JWT Bearer extractor)
│ ├── error.rs # ApiError → HTTP status mapping
│ ├── dto.rs # All request + response types
│ ├── events.rs # SSE event broadcasting
│ ├── log_layer.rs # Tracing layer → SSE log stream
│ ├── poller.rs # Webhook polling task
│ ├── scheduler.rs # Auto-schedule renewal task
│ ├── webhook.rs # Webhook delivery
│ └── main.rs # Startup wiring
├── migrations_sqlite/

View File

@@ -11,6 +11,12 @@ postgres = ["infra/postgres"]
auth-oidc = ["infra/auth-oidc"]
auth-jwt = ["infra/auth-jwt"]
jellyfin = ["infra/jellyfin"]
local-files = ["infra/local-files", "dep:tokio-util"]
[profile.release]
strip = true
lto = true
codegen-units = 1
[dependencies]
k-core = { git = "https://git.gabrielkaszewski.dev/GKaszewski/k-core", features = [
@@ -24,7 +30,10 @@ infra = { path = "../infra", default-features = false, features = ["sqlite"] }
# Web framework
axum = { version = "0.8.8", features = ["macros"] }
axum-extra = { version = "0.10", features = ["cookie-private", "cookie-key-expansion"] }
axum-extra = { version = "0.10", features = [
"cookie-private",
"cookie-key-expansion",
] }
tower = "0.5.2"
tower-http = { version = "0.6.2", features = ["cors", "trace"] }
@@ -47,7 +56,12 @@ uuid = { version = "1.19.0", features = ["v4", "serde"] }
# Logging
tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter", "fmt"] }
tokio-stream = { version = "0.1", features = ["sync"] }
reqwest = { version = "0.12", features = ["json"] }
handlebars = "6"
async-trait = "0.1"
dotenvy = "0.15.7"
time = "0.3"
tokio-util = { version = "0.7", features = ["io"], optional = true }

View File

@@ -3,10 +3,18 @@
//! Loads configuration from environment variables.
use std::env;
use std::path::PathBuf;
#[derive(Debug, Clone, PartialEq)]
pub enum ConfigSource {
Env,
Db,
}
/// Application configuration loaded from environment variables
#[derive(Debug, Clone)]
pub struct Config {
pub config_source: ConfigSource,
pub database_url: String,
pub cookie_secret: String,
pub cors_allowed_origins: Vec<String>,
@@ -28,6 +36,7 @@ pub struct Config {
pub jwt_issuer: Option<String>,
pub jwt_audience: Option<String>,
pub jwt_expiry_hours: u64,
pub jwt_refresh_expiry_days: u64,
/// Whether the application is running in production mode
pub is_production: bool,
@@ -40,6 +49,14 @@ pub struct Config {
pub jellyfin_api_key: Option<String>,
pub jellyfin_user_id: Option<String>,
/// Root directory for the local-files provider. Set `LOCAL_FILES_DIR` to enable.
pub local_files_dir: Option<PathBuf>,
/// Directory for FFmpeg HLS transcode cache. Set `TRANSCODE_DIR` to enable transcoding.
pub transcode_dir: Option<PathBuf>,
/// How long (hours) to keep transcode cache entries before cleanup. Default 24.
pub transcode_cleanup_ttl_hours: u32,
/// Public base URL of this API server (used to build IPTV stream URLs).
pub base_url: String,
}
@@ -101,6 +118,11 @@ impl Config {
.and_then(|s| s.parse().ok())
.unwrap_or(24);
let jwt_refresh_expiry_days = env::var("JWT_REFRESH_EXPIRY_DAYS")
.ok()
.and_then(|s| s.parse().ok())
.unwrap_or(30);
let is_production = env::var("PRODUCTION")
.or_else(|_| env::var("RUST_ENV"))
.map(|v| v.to_lowercase() == "production" || v == "1" || v == "true")
@@ -114,10 +136,24 @@ impl Config {
let jellyfin_api_key = env::var("JELLYFIN_API_KEY").ok();
let jellyfin_user_id = env::var("JELLYFIN_USER_ID").ok();
let local_files_dir = env::var("LOCAL_FILES_DIR").ok().map(PathBuf::from);
let transcode_dir = env::var("TRANSCODE_DIR").ok().map(PathBuf::from);
let transcode_cleanup_ttl_hours = env::var("TRANSCODE_CLEANUP_TTL_HOURS")
.ok()
.and_then(|s| s.parse().ok())
.unwrap_or(24);
let base_url = env::var("BASE_URL")
.unwrap_or_else(|_| format!("http://localhost:{}", port));
let config_source = match env::var("CONFIG_SOURCE").as_deref() {
Ok("db") | Ok("DB") => ConfigSource::Db,
_ => ConfigSource::Env,
};
Self {
config_source,
host,
port,
database_url,
@@ -135,11 +171,15 @@ impl Config {
jwt_issuer,
jwt_audience,
jwt_expiry_hours,
jwt_refresh_expiry_days,
is_production,
allow_registration,
jellyfin_base_url,
jellyfin_api_key,
jellyfin_user_id,
local_files_dir,
transcode_dir,
transcode_cleanup_ttl_hours,
base_url,
}
}

View File

@@ -0,0 +1,36 @@
use std::sync::Arc;
use std::time::Duration as StdDuration;
use crate::config::Config;
use infra::run_migrations;
use k_core::db::DatabasePool;
pub async fn init_database(config: &Config) -> anyhow::Result<Arc<DatabasePool>> {
tracing::info!("Connecting to database: {}", config.database_url);
#[cfg(all(feature = "sqlite", not(feature = "postgres")))]
let db_type = k_core::db::DbType::Sqlite;
#[cfg(all(feature = "postgres", not(feature = "sqlite")))]
let db_type = k_core::db::DbType::Postgres;
// Both features enabled: fall back to URL inspection at runtime
#[cfg(all(feature = "sqlite", feature = "postgres"))]
let db_type = if config.database_url.starts_with("postgres") {
k_core::db::DbType::Postgres
} else {
k_core::db::DbType::Sqlite
};
let db_config = k_core::db::DatabaseConfig {
db_type,
url: config.database_url.clone(),
max_connections: config.db_max_connections,
min_connections: config.db_min_connections,
acquire_timeout: StdDuration::from_secs(30),
};
let pool = k_core::db::connect(&db_config).await?;
run_migrations(&pool).await?;
Ok(Arc::new(pool))
}

View File

@@ -15,6 +15,15 @@ pub struct LoginRequest {
pub email: Email,
/// Password is validated on deserialization (min 8 chars)
pub password: Password,
/// When true, a refresh token is also issued for persistent sessions
#[serde(default)]
pub remember_me: bool,
}
/// Refresh token request
#[derive(Debug, Deserialize)]
pub struct RefreshRequest {
pub refresh_token: String,
}
/// Register request with validated email and password newtypes
@@ -32,6 +41,7 @@ pub struct UserResponse {
pub id: Uuid,
pub email: String,
pub created_at: DateTime<Utc>,
pub is_admin: bool,
}
/// JWT token response
@@ -40,12 +50,54 @@ pub struct TokenResponse {
pub access_token: String,
pub token_type: String,
pub expires_in: u64,
/// Only present when remember_me was true at login, or on token refresh
#[serde(skip_serializing_if = "Option::is_none")]
pub refresh_token: Option<String>,
}
/// Per-provider info returned by `GET /config`.
#[derive(Debug, Serialize)]
pub struct ProviderInfo {
pub id: String,
pub capabilities: domain::ProviderCapabilities,
}
/// System configuration response
#[derive(Debug, Serialize)]
pub struct ConfigResponse {
pub allow_registration: bool,
/// All registered providers with their capabilities.
pub providers: Vec<ProviderInfo>,
/// Capabilities of the primary provider — kept for backward compatibility.
pub provider_capabilities: domain::ProviderCapabilities,
/// Provider type strings supported by this build (feature-gated).
pub available_provider_types: Vec<String>,
}
// ============================================================================
// Admin DTOs
// ============================================================================
/// An activity log entry returned by GET /admin/activity.
#[derive(Debug, Serialize)]
pub struct ActivityEventResponse {
pub id: Uuid,
pub timestamp: DateTime<Utc>,
pub event_type: String,
pub detail: String,
pub channel_id: Option<Uuid>,
}
impl From<domain::ActivityEvent> for ActivityEventResponse {
fn from(e: domain::ActivityEvent) -> Self {
Self {
id: e.id,
timestamp: e.timestamp,
event_type: e.event_type,
detail: e.detail,
channel_id: e.channel_id,
}
}
}
// ============================================================================
@@ -61,6 +113,10 @@ pub struct CreateChannelRequest {
pub access_mode: Option<domain::AccessMode>,
/// Plain-text password; hashed before storage.
pub access_password: Option<String>,
pub webhook_url: Option<String>,
pub webhook_poll_interval_secs: Option<u32>,
pub webhook_body_template: Option<String>,
pub webhook_headers: Option<String>,
}
/// All fields are optional — only provided fields are updated.
@@ -70,7 +126,7 @@ pub struct UpdateChannelRequest {
pub description: Option<String>,
pub timezone: Option<String>,
/// Replace the entire schedule config (template import/edit)
pub schedule_config: Option<domain::ScheduleConfig>,
pub schedule_config: Option<domain::ScheduleConfigCompat>,
pub recycle_policy: Option<domain::RecyclePolicy>,
pub auto_schedule: Option<bool>,
pub access_mode: Option<domain::AccessMode>,
@@ -80,6 +136,13 @@ pub struct UpdateChannelRequest {
pub logo: Option<Option<String>>,
pub logo_position: Option<domain::LogoPosition>,
pub logo_opacity: Option<f32>,
/// `Some(None)` = clear, `Some(Some(url))` = set, `None` = unchanged.
pub webhook_url: Option<Option<String>>,
pub webhook_poll_interval_secs: Option<u32>,
/// `Some(None)` = clear, `Some(Some(tmpl))` = set, `None` = unchanged.
pub webhook_body_template: Option<Option<String>>,
/// `Some(None)` = clear, `Some(Some(json))` = set, `None` = unchanged.
pub webhook_headers: Option<Option<String>>,
}
#[derive(Debug, Serialize)]
@@ -96,6 +159,10 @@ pub struct ChannelResponse {
pub logo: Option<String>,
pub logo_position: domain::LogoPosition,
pub logo_opacity: f32,
pub webhook_url: Option<String>,
pub webhook_poll_interval_secs: u32,
pub webhook_body_template: Option<String>,
pub webhook_headers: Option<String>,
pub created_at: DateTime<Utc>,
pub updated_at: DateTime<Utc>,
}
@@ -115,12 +182,44 @@ impl From<domain::Channel> for ChannelResponse {
logo: c.logo,
logo_position: c.logo_position,
logo_opacity: c.logo_opacity,
webhook_url: c.webhook_url,
webhook_poll_interval_secs: c.webhook_poll_interval_secs,
webhook_body_template: c.webhook_body_template,
webhook_headers: c.webhook_headers,
created_at: c.created_at,
updated_at: c.updated_at,
}
}
}
// ============================================================================
// Config history DTOs
// ============================================================================
#[derive(Debug, Serialize)]
pub struct ConfigSnapshotResponse {
pub id: Uuid,
pub version_num: i64,
pub label: Option<String>,
pub created_at: DateTime<Utc>,
}
impl From<domain::ChannelConfigSnapshot> for ConfigSnapshotResponse {
fn from(s: domain::ChannelConfigSnapshot) -> Self {
Self {
id: s.id,
version_num: s.version_num,
label: s.label,
created_at: s.created_at,
}
}
}
#[derive(Debug, Deserialize)]
pub struct PatchSnapshotRequest {
pub label: Option<String>,
}
// ============================================================================
// EPG / playback DTOs
// ============================================================================
@@ -186,8 +285,7 @@ impl ScheduledSlotResponse {
pub fn with_block_access(slot: domain::ScheduledSlot, channel: &domain::Channel) -> Self {
let block_access_mode = channel
.schedule_config
.blocks
.iter()
.all_blocks()
.find(|b| b.id == slot.source_block_id)
.map(|b| b.access_mode.clone())
.unwrap_or_default();
@@ -224,6 +322,50 @@ pub struct ScheduleResponse {
pub slots: Vec<ScheduledSlotResponse>,
}
// ============================================================================
// Transcode DTOs
// ============================================================================
#[cfg(feature = "local-files")]
#[derive(Debug, Serialize)]
pub struct TranscodeSettingsResponse {
pub cleanup_ttl_hours: u32,
}
#[cfg(feature = "local-files")]
#[derive(Debug, Deserialize)]
pub struct UpdateTranscodeSettingsRequest {
pub cleanup_ttl_hours: u32,
}
#[cfg(feature = "local-files")]
#[derive(Debug, Serialize)]
pub struct TranscodeStatsResponse {
pub cache_size_bytes: u64,
pub item_count: usize,
}
#[derive(Debug, Serialize)]
pub struct ScheduleHistoryEntry {
pub id: Uuid,
pub generation: u32,
pub valid_from: DateTime<Utc>,
pub valid_until: DateTime<Utc>,
pub slot_count: usize,
}
impl From<domain::GeneratedSchedule> for ScheduleHistoryEntry {
fn from(s: domain::GeneratedSchedule) -> Self {
Self {
id: s.id,
generation: s.generation,
valid_from: s.valid_from,
valid_until: s.valid_until,
slot_count: s.slots.len(),
}
}
}
impl From<domain::GeneratedSchedule> for ScheduleResponse {
fn from(s: domain::GeneratedSchedule) -> Self {
Self {

View File

@@ -35,6 +35,16 @@ pub enum ApiError {
#[error("auth_required")]
AuthRequired,
#[allow(dead_code)]
#[error("Not found: {0}")]
NotFound(String),
#[error("Not implemented: {0}")]
NotImplemented(String),
#[error("Conflict: {0}")]
Conflict(String),
}
/// Error response body
@@ -132,6 +142,30 @@ impl IntoResponse for ApiError {
details: None,
},
),
ApiError::NotFound(msg) => (
StatusCode::NOT_FOUND,
ErrorResponse {
error: "Not found".to_string(),
details: Some(msg.clone()),
},
),
ApiError::NotImplemented(msg) => (
StatusCode::NOT_IMPLEMENTED,
ErrorResponse {
error: "Not implemented".to_string(),
details: Some(msg.clone()),
},
),
ApiError::Conflict(msg) => (
StatusCode::CONFLICT,
ErrorResponse {
error: "Conflict".to_string(),
details: Some(msg.clone()),
},
),
};
(status, Json(error_response)).into_response()
@@ -146,7 +180,17 @@ impl ApiError {
pub fn internal(msg: impl Into<String>) -> Self {
Self::Internal(msg.into())
}
pub fn not_found(msg: impl Into<String>) -> Self {
Self::NotFound(msg.into())
}
pub fn conflict(msg: impl Into<String>) -> Self {
Self::Conflict(msg.into())
}
pub fn not_implemented(msg: impl Into<String>) -> Self {
Self::NotImplemented(msg.into())
}
}
/// Result type alias for API handlers
pub type ApiResult<T> = Result<T, ApiError>;

View File

@@ -0,0 +1,12 @@
//! Event bus type alias.
//!
//! The broadcast sender is kept in `AppState` and cloned into each route handler.
//! Receivers are created with `event_tx.subscribe()`.
use tokio::sync::broadcast;
use domain::DomainEvent;
/// A sender half of the domain-event broadcast channel.
///
/// Clone to share across tasks. Use `event_tx.subscribe()` to create receivers.
pub type EventBus = broadcast::Sender<DomainEvent>;

View File

@@ -67,7 +67,7 @@ impl FromRequestParts<AppState> for OptionalCurrentUser {
let user = validate_jwt_token(&token, state).await.ok();
return Ok(OptionalCurrentUser(user));
}
return Ok(OptionalCurrentUser(None));
Ok(OptionalCurrentUser(None))
}
#[cfg(not(feature = "auth-jwt"))]
@@ -78,6 +78,21 @@ impl FromRequestParts<AppState> for OptionalCurrentUser {
}
}
/// Extracted admin user — returns 403 if user is not an admin.
pub struct AdminUser(pub User);
impl FromRequestParts<AppState> for AdminUser {
type Rejection = ApiError;
async fn from_request_parts(parts: &mut Parts, state: &AppState) -> Result<Self, Self::Rejection> {
let CurrentUser(user) = CurrentUser::from_request_parts(parts, state).await?;
if !user.is_admin {
return Err(ApiError::Forbidden("Admin access required".to_string()));
}
Ok(AdminUser(user))
}
}
/// Authenticate using JWT Bearer token from the `Authorization` header.
#[cfg(feature = "auth-jwt")]
async fn try_jwt_auth(parts: &mut Parts, state: &AppState) -> Result<User, ApiError> {
@@ -107,7 +122,7 @@ pub(crate) async fn validate_jwt_token(token: &str, state: &AppState) -> Result<
.as_ref()
.ok_or_else(|| ApiError::Internal("JWT validator not configured".to_string()))?;
let claims = validator.validate_token(token).map_err(|e| {
let claims = validator.validate_access_token(token).map_err(|e| {
tracing::debug!("JWT validation failed: {:?}", e);
match e {
infra::auth::jwt::JwtError::Expired => {

View File

@@ -0,0 +1,64 @@
//! Background library sync task.
//! Fires 10 seconds after startup, then every N hours (read from app_settings).
use std::sync::Arc;
use std::time::Duration;
use domain::IProviderRegistry;
const STARTUP_DELAY_SECS: u64 = 10;
const DEFAULT_INTERVAL_HOURS: u64 = 6;
pub async fn run_library_sync(
sync_adapter: Arc<dyn domain::LibrarySyncAdapter>,
registry: Arc<tokio::sync::RwLock<Arc<infra::ProviderRegistry>>>,
app_settings_repo: Arc<dyn domain::IAppSettingsRepository>,
) {
tokio::time::sleep(Duration::from_secs(STARTUP_DELAY_SECS)).await;
loop {
tick(&sync_adapter, &registry).await;
let interval_hours = load_interval_hours(&app_settings_repo).await;
tokio::time::sleep(Duration::from_secs(interval_hours * 3600)).await;
}
}
async fn load_interval_hours(repo: &Arc<dyn domain::IAppSettingsRepository>) -> u64 {
repo.get("library_sync_interval_hours")
.await
.ok()
.flatten()
.and_then(|v| v.parse::<u64>().ok())
.unwrap_or(DEFAULT_INTERVAL_HOURS)
}
async fn tick(
sync_adapter: &Arc<dyn domain::LibrarySyncAdapter>,
registry: &Arc<tokio::sync::RwLock<Arc<infra::ProviderRegistry>>>,
) {
let reg = registry.read().await;
let provider_ids: Vec<String> = reg.provider_ids();
drop(reg);
for provider_id in provider_ids {
let reg = registry.read().await;
let provider = match reg.get_provider(&provider_id) {
Some(p) => p,
None => continue,
};
drop(reg);
tracing::info!("library-sync: syncing provider '{}'", provider_id);
let result = sync_adapter.sync_provider(provider.as_ref(), &provider_id).await;
if let Some(ref err) = result.error {
tracing::warn!("library-sync: provider '{}' failed: {}", provider_id, err);
} else {
tracing::info!(
"library-sync: provider '{}' done — {} items in {}ms",
provider_id, result.items_found, result.duration_ms
);
}
}
}

View File

@@ -0,0 +1,72 @@
//! Custom tracing layer that captures log events and broadcasts them to SSE clients.
use chrono::Utc;
use serde::Serialize;
use std::collections::VecDeque;
use std::sync::{Arc, Mutex};
use tokio::sync::broadcast;
use tracing::Event;
use tracing_subscriber::Layer;
/// A single structured log line sent to SSE clients.
#[derive(Debug, Clone, Serialize)]
pub struct LogLine {
pub level: String,
pub target: String,
pub message: String,
pub timestamp: String,
}
/// Tracing layer that fans log events out to a broadcast channel + ring buffer.
pub struct AppLogLayer {
tx: broadcast::Sender<LogLine>,
history: Arc<Mutex<VecDeque<LogLine>>>,
}
impl AppLogLayer {
pub fn new(
tx: broadcast::Sender<LogLine>,
history: Arc<Mutex<VecDeque<LogLine>>>,
) -> Self {
Self { tx, history }
}
}
impl<S: tracing::Subscriber> Layer<S> for AppLogLayer {
fn on_event(&self, event: &Event<'_>, _ctx: tracing_subscriber::layer::Context<'_, S>) {
let mut visitor = MsgVisitor(String::new());
event.record(&mut visitor);
let line = LogLine {
level: event.metadata().level().to_string(),
target: event.metadata().target().to_string(),
message: visitor.0,
timestamp: Utc::now().to_rfc3339(),
};
if let Ok(mut history) = self.history.lock() {
if history.len() >= 200 {
history.pop_front();
}
history.push_back(line.clone());
}
let _ = self.tx.send(line);
}
}
struct MsgVisitor(String);
impl tracing::field::Visit for MsgVisitor {
fn record_str(&mut self, field: &tracing::field::Field, value: &str) {
if field.name() == "message" {
self.0 = value.to_owned();
}
}
fn record_debug(&mut self, field: &tracing::field::Field, value: &dyn std::fmt::Debug) {
if field.name() == "message" {
self.0 = format!("{value:?}");
}
}
}

View File

@@ -2,203 +2,131 @@
//!
//! Configures and starts the HTTP server with JWT-based authentication.
use std::net::SocketAddr;
use std::time::Duration as StdDuration;
use axum::Router;
use axum::http::{HeaderName, HeaderValue};
use std::sync::Arc;
use tower_http::cors::{AllowHeaders, AllowMethods, AllowOrigin, CorsLayer};
use domain::{ChannelService, IMediaProvider, ScheduleEngineService, UserService};
use infra::factory::{build_channel_repository, build_schedule_repository, build_user_repository};
use infra::run_migrations;
use k_core::http::server::{ServerConfig, apply_standard_middleware};
use k_core::logging;
use tokio::net::TcpListener;
use tracing::info;
use domain::{ChannelService, IProviderRegistry, ScheduleEngineService, UserService};
use infra::factory::{build_activity_log_repository, build_app_settings_repository, build_channel_repository, build_library_repository, build_provider_config_repository, build_schedule_repository, build_user_repository};
#[cfg(feature = "local-files")]
use infra::factory::build_transcode_settings_repository;
mod config;
mod database;
mod library_scheduler;
mod provider_registry;
mod dto;
mod error;
mod events;
mod extractors;
mod log_layer;
mod poller;
mod routes;
mod scheduler;
mod server;
mod startup;
mod state;
mod telemetry;
mod webhook;
use crate::config::Config;
use crate::state::AppState;
#[tokio::main]
async fn main() -> anyhow::Result<()> {
logging::init("api");
let handles = telemetry::init_tracing();
let config = Config::from_env();
info!("Starting server on {}:{}", config.host, config.port);
// Setup database
tracing::info!("Connecting to database: {}", config.database_url);
#[cfg(all(feature = "sqlite", not(feature = "postgres")))]
let db_type = k_core::db::DbType::Sqlite;
#[cfg(all(feature = "postgres", not(feature = "sqlite")))]
let db_type = k_core::db::DbType::Postgres;
// Both features enabled: fall back to URL inspection at runtime
#[cfg(all(feature = "sqlite", feature = "postgres"))]
let db_type = if config.database_url.starts_with("postgres") {
k_core::db::DbType::Postgres
} else {
k_core::db::DbType::Sqlite
};
let db_config = k_core::db::DatabaseConfig {
db_type,
url: config.database_url.clone(),
max_connections: config.db_max_connections,
min_connections: config.db_min_connections,
acquire_timeout: StdDuration::from_secs(30),
};
let db_pool = k_core::db::connect(&db_config).await?;
run_migrations(&db_pool).await?;
let db_pool = database::init_database(&config).await?;
let user_repo = build_user_repository(&db_pool).await?;
let channel_repo = build_channel_repository(&db_pool).await?;
let schedule_repo = build_schedule_repository(&db_pool).await?;
let activity_log_repo = build_activity_log_repository(&db_pool).await?;
let user_service = UserService::new(user_repo);
let channel_service = ChannelService::new(channel_repo.clone());
// Build media provider — Jellyfin if configured, no-op fallback otherwise.
let media_provider: Arc<dyn IMediaProvider> = build_media_provider(&config);
// Build provider registry — all configured providers are registered simultaneously.
let provider_config_repo = build_provider_config_repository(&db_pool).await?;
let bundle = provider_registry::build_provider_registry(
&config, &db_pool, &provider_config_repo,
).await?;
let registry_arc = bundle.registry;
let provider_registry: Arc<tokio::sync::RwLock<Arc<infra::ProviderRegistry>>> =
Arc::new(tokio::sync::RwLock::new(Arc::clone(&registry_arc)));
let (event_tx, event_rx) = tokio::sync::broadcast::channel::<domain::DomainEvent>(64);
let bg_channel_repo = channel_repo.clone();
let webhook_channel_repo = channel_repo.clone();
tokio::spawn(webhook::run_webhook_consumer(
event_rx,
webhook_channel_repo,
reqwest::Client::new(),
));
let schedule_engine = ScheduleEngineService::new(
Arc::clone(&media_provider),
Arc::clone(&registry_arc) as Arc<dyn IProviderRegistry>,
channel_repo,
schedule_repo,
);
let state = AppState::new(
#[cfg(feature = "local-files")]
let transcode_settings_repo = build_transcode_settings_repository(&db_pool).await.ok();
let library_repo = build_library_repository(&db_pool).await?;
let app_settings_repo = build_app_settings_repository(&db_pool).await?;
let library_sync_adapter: Arc<dyn domain::LibrarySyncAdapter> =
Arc::new(infra::FullSyncAdapter::new(Arc::clone(&library_repo)));
#[allow(unused_mut)]
let mut state = AppState::new(
user_service,
channel_service,
schedule_engine,
media_provider,
provider_registry,
provider_config_repo,
config.clone(),
event_tx.clone(),
handles.log_tx,
handles.log_history,
activity_log_repo,
db_pool,
library_repo,
library_sync_adapter,
app_settings_repo,
#[cfg(feature = "local-files")]
transcode_settings_repo,
)
.await?;
let server_config = ServerConfig {
cors_origins: config.cors_allowed_origins.clone(),
};
let bg_schedule_engine = Arc::clone(&state.schedule_engine);
tokio::spawn(scheduler::run_auto_scheduler(bg_schedule_engine, bg_channel_repo));
let app = Router::new()
.nest("/api/v1", routes::api_v1_router())
.with_state(state);
let app = apply_standard_middleware(app, &server_config);
// Wrap with an outer CorsLayer that includes the custom password headers.
// Being outermost it handles OPTIONS preflights before k_core's inner layer.
let origins: Vec<HeaderValue> = config
.cors_allowed_origins
.iter()
.filter_map(|o| o.parse().ok())
.collect();
let cors = CorsLayer::new()
.allow_origin(AllowOrigin::list(origins))
.allow_methods(AllowMethods::any())
.allow_headers(AllowHeaders::list([
axum::http::header::AUTHORIZATION,
axum::http::header::CONTENT_TYPE,
HeaderName::from_static("x-channel-password"),
HeaderName::from_static("x-block-password"),
]));
let app = app.layer(cors);
let addr: SocketAddr = format!("{}:{}", config.host, config.port).parse()?;
let listener = TcpListener::bind(addr).await?;
tracing::info!("🚀 API server running at http://{}", addr);
tracing::info!("🔒 Authentication mode: JWT (Bearer token)");
#[cfg(feature = "auth-jwt")]
tracing::info!(" ✓ JWT auth enabled");
#[cfg(feature = "auth-oidc")]
tracing::info!(" ✓ OIDC integration enabled (stateless cookie state)");
tracing::info!("📝 API endpoints available at /api/v1/...");
axum::serve(listener, app).await?;
Ok(())
}
/// Build the media provider from config.
/// Falls back to a no-op provider that returns an informative error when
/// Jellyfin env vars are not set, so other API features still work in dev.
fn build_media_provider(config: &Config) -> Arc<dyn IMediaProvider> {
#[cfg(feature = "jellyfin")]
if let (Some(base_url), Some(api_key), Some(user_id)) = (
&config.jellyfin_base_url,
&config.jellyfin_api_key,
&config.jellyfin_user_id,
) {
tracing::info!("Media provider: Jellyfin at {}", base_url);
return Arc::new(infra::JellyfinMediaProvider::new(infra::JellyfinConfig {
base_url: base_url.clone(),
api_key: api_key.clone(),
user_id: user_id.clone(),
}));
#[cfg(feature = "local-files")]
if !bundle.local_index.is_empty() {
*state.local_index.write().await = bundle.local_index;
}
#[cfg(feature = "local-files")]
if let Some(tm) = bundle.transcode_manager {
*state.transcode_manager.write().await = Some(tm);
}
tracing::warn!(
"No media provider configured. Set JELLYFIN_BASE_URL, JELLYFIN_API_KEY, \
and JELLYFIN_USER_ID to enable schedule generation."
startup::spawn_background_tasks(
Arc::clone(&state.schedule_engine),
bg_channel_repo,
event_tx,
);
Arc::new(NoopMediaProvider)
}
/// Stand-in provider used when no real media source is configured.
/// Returns a descriptive error for every call so schedule endpoints fail
/// gracefully rather than panicking at startup.
struct NoopMediaProvider;
#[async_trait::async_trait]
impl IMediaProvider for NoopMediaProvider {
async fn fetch_items(
&self,
_: &domain::MediaFilter,
) -> domain::DomainResult<Vec<domain::MediaItem>> {
Err(domain::DomainError::InfrastructureError(
"No media provider configured. Set JELLYFIN_BASE_URL, JELLYFIN_API_KEY, \
and JELLYFIN_USER_ID."
.into(),
))
}
async fn fetch_by_id(
&self,
_: &domain::MediaItemId,
) -> domain::DomainResult<Option<domain::MediaItem>> {
Err(domain::DomainError::InfrastructureError(
"No media provider configured.".into(),
))
}
async fn get_stream_url(
&self,
_: &domain::MediaItemId,
) -> domain::DomainResult<String> {
Err(domain::DomainError::InfrastructureError(
"No media provider configured.".into(),
))
}
tokio::spawn(library_scheduler::run_library_sync(
Arc::clone(&state.library_sync_adapter),
Arc::clone(&state.provider_registry),
Arc::clone(&state.app_settings_repo),
));
server::build_and_serve(state, &config).await
}

View File

@@ -0,0 +1,488 @@
//! BroadcastPoller background task.
//!
//! Polls each channel that has a webhook_url configured. On each tick (every 1s)
//! it checks which channels are due for a poll (elapsed >= webhook_poll_interval_secs)
//! and emits BroadcastTransition or NoSignal events when the current slot changes.
use std::collections::HashMap;
use std::sync::Arc;
use std::time::{Duration, Instant};
use chrono::Utc;
use tokio::sync::broadcast;
use tracing::error;
use uuid::Uuid;
use domain::{ChannelRepository, DomainError, DomainEvent, ScheduleEngineService};
/// Per-channel poller state.
#[derive(Debug)]
pub struct ChannelPollState {
/// ID of the last slot we saw as current (None = no signal).
last_slot_id: Option<Uuid>,
/// Wall-clock instant of the last poll for this channel.
last_checked: Instant,
}
/// Polls channels with webhook URLs and emits broadcast transition events.
pub async fn run_broadcast_poller(
schedule_engine: Arc<ScheduleEngineService>,
channel_repo: Arc<dyn ChannelRepository>,
event_tx: broadcast::Sender<DomainEvent>,
) {
let mut state: HashMap<Uuid, ChannelPollState> = HashMap::new();
loop {
tokio::time::sleep(Duration::from_secs(1)).await;
poll_tick(&schedule_engine, &channel_repo, &event_tx, &mut state).await;
}
}
pub(crate) async fn poll_tick(
schedule_engine: &Arc<ScheduleEngineService>,
channel_repo: &Arc<dyn ChannelRepository>,
event_tx: &broadcast::Sender<DomainEvent>,
state: &mut HashMap<Uuid, ChannelPollState>,
) {
let channels = match channel_repo.find_all().await {
Ok(c) => c,
Err(e) => {
error!("broadcast poller: failed to load channels: {}", e);
return;
}
};
// Remove deleted channels from state
let live_ids: std::collections::HashSet<Uuid> = channels.iter().map(|c| c.id).collect();
state.retain(|id, _| live_ids.contains(id));
let now = Utc::now();
for channel in channels {
// Only poll channels with a configured webhook URL
if channel.webhook_url.is_none() {
state.remove(&channel.id);
continue;
}
let poll_interval = Duration::from_secs(channel.webhook_poll_interval_secs as u64);
let entry = state.entry(channel.id).or_insert_with(|| ChannelPollState {
last_slot_id: None,
last_checked: Instant::now() - poll_interval, // trigger immediately on first encounter
});
if entry.last_checked.elapsed() < poll_interval {
continue; // Not yet due for a poll
}
entry.last_checked = Instant::now();
// Find the current slot
let current_slot_id = match schedule_engine.get_active_schedule(channel.id, now).await {
Ok(Some(schedule)) => schedule
.slots
.iter()
.find(|s| s.start_at <= now && now < s.end_at)
.map(|s| s.id),
Ok(None) => None,
Err(DomainError::NoActiveSchedule(_)) => None,
Err(DomainError::ChannelNotFound(_)) => {
state.remove(&channel.id);
continue;
}
Err(e) => {
error!(
"broadcast poller: error checking schedule for channel {}: {}",
channel.id, e
);
continue;
}
};
if current_slot_id == entry.last_slot_id {
continue;
}
// State changed — emit appropriate event
match &current_slot_id {
Some(slot_id) => {
if let Ok(Some(schedule)) =
schedule_engine.get_active_schedule(channel.id, now).await
{
if let Some(slot) = schedule.slots.iter().find(|s| s.id == *slot_id).cloned() {
let _ = event_tx.send(DomainEvent::BroadcastTransition {
channel_id: channel.id,
slot,
});
}
}
}
None => {
let _ = event_tx.send(DomainEvent::NoSignal {
channel_id: channel.id,
});
}
}
entry.last_slot_id = current_slot_id;
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::collections::HashMap;
use std::sync::{Arc, Mutex};
use async_trait::async_trait;
use chrono::{DateTime, Duration, Utc};
use domain::value_objects::{ChannelId, ContentType, UserId};
use domain::{
BlockId, Channel, ChannelRepository, Collection, DomainResult, GeneratedSchedule,
IProviderRegistry, MediaFilter, MediaItem, MediaItemId, PlaybackRecord, ProviderCapabilities,
ScheduleEngineService, ScheduleRepository, SeriesSummary, StreamQuality,
};
use tokio::sync::broadcast;
use uuid::Uuid;
// ── Mocks ─────────────────────────────────────────────────────────────────
struct MockChannelRepo {
channels: Vec<Channel>,
}
#[async_trait]
impl ChannelRepository for MockChannelRepo {
async fn find_by_id(&self, id: ChannelId) -> DomainResult<Option<Channel>> {
Ok(self.channels.iter().find(|c| c.id == id).cloned())
}
async fn find_by_owner(&self, _owner_id: UserId) -> DomainResult<Vec<Channel>> {
unimplemented!()
}
async fn find_all(&self) -> DomainResult<Vec<Channel>> {
Ok(self.channels.clone())
}
async fn find_auto_schedule_enabled(&self) -> DomainResult<Vec<Channel>> {
unimplemented!()
}
async fn save(&self, _channel: &Channel) -> DomainResult<()> {
unimplemented!()
}
async fn delete(&self, _id: ChannelId) -> DomainResult<()> {
unimplemented!()
}
async fn save_config_snapshot(&self, _channel_id: ChannelId, _config: &domain::ScheduleConfig, _label: Option<String>) -> DomainResult<domain::ChannelConfigSnapshot> { unimplemented!() }
async fn list_config_snapshots(&self, _channel_id: ChannelId) -> DomainResult<Vec<domain::ChannelConfigSnapshot>> { unimplemented!() }
async fn get_config_snapshot(&self, _channel_id: ChannelId, _snapshot_id: Uuid) -> DomainResult<Option<domain::ChannelConfigSnapshot>> { unimplemented!() }
async fn patch_config_snapshot_label(&self, _channel_id: ChannelId, _snapshot_id: Uuid, _label: Option<String>) -> DomainResult<Option<domain::ChannelConfigSnapshot>> { unimplemented!() }
}
struct MockScheduleRepo {
active: Option<GeneratedSchedule>,
saved: Arc<Mutex<Vec<GeneratedSchedule>>>,
}
#[async_trait]
impl ScheduleRepository for MockScheduleRepo {
async fn find_active(
&self,
_channel_id: ChannelId,
_at: DateTime<Utc>,
) -> DomainResult<Option<GeneratedSchedule>> {
Ok(self.active.clone())
}
async fn find_latest(
&self,
_channel_id: ChannelId,
) -> DomainResult<Option<GeneratedSchedule>> {
Ok(self.active.clone())
}
async fn save(&self, schedule: &GeneratedSchedule) -> DomainResult<()> {
self.saved.lock().unwrap().push(schedule.clone());
Ok(())
}
async fn find_playback_history(
&self,
_channel_id: ChannelId,
) -> DomainResult<Vec<PlaybackRecord>> {
Ok(vec![])
}
async fn save_playback_record(&self, _record: &PlaybackRecord) -> DomainResult<()> {
Ok(())
}
async fn find_last_slot_per_block(
&self,
_channel_id: ChannelId,
) -> DomainResult<HashMap<BlockId, MediaItemId>> {
Ok(HashMap::new())
}
async fn list_schedule_history(&self, _channel_id: ChannelId) -> DomainResult<Vec<GeneratedSchedule>> { unimplemented!() }
async fn get_schedule_by_id(&self, _channel_id: ChannelId, _schedule_id: Uuid) -> DomainResult<Option<GeneratedSchedule>> { unimplemented!() }
async fn delete_schedules_after(&self, _channel_id: ChannelId, _target_generation: u32) -> DomainResult<()> { unimplemented!() }
}
struct MockRegistry;
#[async_trait]
impl IProviderRegistry for MockRegistry {
async fn fetch_items(
&self,
_provider_id: &str,
_filter: &MediaFilter,
) -> DomainResult<Vec<MediaItem>> {
Ok(vec![])
}
async fn fetch_by_id(&self, _item_id: &MediaItemId) -> DomainResult<Option<MediaItem>> {
Ok(None)
}
async fn get_stream_url(
&self,
_item_id: &MediaItemId,
_quality: &StreamQuality,
) -> DomainResult<String> {
unimplemented!()
}
fn provider_ids(&self) -> Vec<String> {
vec![]
}
fn primary_id(&self) -> &str {
""
}
fn capabilities(&self, _provider_id: &str) -> Option<ProviderCapabilities> {
None
}
async fn list_collections(&self, _provider_id: &str) -> DomainResult<Vec<Collection>> {
unimplemented!()
}
async fn list_series(
&self,
_provider_id: &str,
_collection_id: Option<&str>,
) -> DomainResult<Vec<SeriesSummary>> {
unimplemented!()
}
async fn list_genres(
&self,
_provider_id: &str,
_content_type: Option<&ContentType>,
) -> DomainResult<Vec<String>> {
unimplemented!()
}
}
// ── Helpers ───────────────────────────────────────────────────────────────
fn make_channel_with_webhook(channel_id: Uuid) -> Channel {
let mut ch = Channel::new(Uuid::new_v4(), "Test", "UTC");
ch.id = channel_id;
ch.webhook_url = Some("http://example.com/hook".to_string());
ch.webhook_poll_interval_secs = 0; // always due
ch
}
fn make_slot(_channel_id: Uuid, slot_id: Uuid) -> domain::ScheduledSlot {
use domain::entities::MediaItem;
let now = Utc::now();
domain::ScheduledSlot {
id: slot_id,
start_at: now - Duration::minutes(1),
end_at: now + Duration::minutes(29),
item: MediaItem {
id: MediaItemId::new("test-item"),
title: "Test Movie".to_string(),
content_type: ContentType::Movie,
duration_secs: 1800,
description: None,
genres: vec![],
year: None,
tags: vec![],
series_name: None,
season_number: None,
episode_number: None,
thumbnail_url: None,
collection_id: None,
},
source_block_id: Uuid::new_v4(),
}
}
fn make_schedule(channel_id: Uuid, slots: Vec<domain::ScheduledSlot>) -> GeneratedSchedule {
let now = Utc::now();
GeneratedSchedule {
id: Uuid::new_v4(),
channel_id,
valid_from: now - Duration::hours(1),
valid_until: now + Duration::hours(47),
generation: 1,
slots,
}
}
fn make_engine(
channel_repo: Arc<dyn ChannelRepository>,
schedule_repo: Arc<dyn ScheduleRepository>,
) -> Arc<ScheduleEngineService> {
Arc::new(ScheduleEngineService::new(
Arc::new(MockRegistry),
channel_repo,
schedule_repo,
))
}
// ── Tests ─────────────────────────────────────────────────────────────────
#[tokio::test]
async fn test_broadcast_transition_emitted_on_slot_change() {
let channel_id = Uuid::new_v4();
let slot_id = Uuid::new_v4();
let ch = make_channel_with_webhook(channel_id);
let slot = make_slot(channel_id, slot_id);
let schedule = make_schedule(channel_id, vec![slot]);
let channel_repo: Arc<dyn ChannelRepository> =
Arc::new(MockChannelRepo { channels: vec![ch] });
let schedule_repo: Arc<dyn ScheduleRepository> = Arc::new(MockScheduleRepo {
active: Some(schedule),
saved: Arc::new(Mutex::new(vec![])),
});
let engine = make_engine(channel_repo.clone(), schedule_repo);
let (event_tx, mut event_rx) = broadcast::channel(8);
let mut state: HashMap<Uuid, ChannelPollState> = HashMap::new();
poll_tick(&engine, &channel_repo, &event_tx, &mut state).await;
let event = event_rx.try_recv().expect("expected an event");
match event {
DomainEvent::BroadcastTransition {
channel_id: cid,
slot: s,
} => {
assert_eq!(cid, channel_id);
assert_eq!(s.id, slot_id);
}
_other => panic!("expected BroadcastTransition, got something else"),
}
}
#[tokio::test]
async fn test_no_event_when_slot_unchanged() {
let channel_id = Uuid::new_v4();
let slot_id = Uuid::new_v4();
let ch = make_channel_with_webhook(channel_id);
let slot = make_slot(channel_id, slot_id);
let schedule = make_schedule(channel_id, vec![slot]);
let channel_repo: Arc<dyn ChannelRepository> =
Arc::new(MockChannelRepo { channels: vec![ch] });
let schedule_repo: Arc<dyn ScheduleRepository> = Arc::new(MockScheduleRepo {
active: Some(schedule),
saved: Arc::new(Mutex::new(vec![])),
});
let engine = make_engine(channel_repo.clone(), schedule_repo);
let (event_tx, mut event_rx) = broadcast::channel(8);
let mut state: HashMap<Uuid, ChannelPollState> = HashMap::new();
// First tick — emits BroadcastTransition
poll_tick(&engine, &channel_repo, &event_tx, &mut state).await;
let _ = event_rx.try_recv();
// Second tick — same slot, no event
poll_tick(&engine, &channel_repo, &event_tx, &mut state).await;
assert!(
event_rx.try_recv().is_err(),
"no event expected when slot unchanged"
);
}
#[tokio::test]
async fn test_no_signal_emitted_when_slot_goes_to_none() {
let channel_id = Uuid::new_v4();
let slot_id = Uuid::new_v4();
let ch = make_channel_with_webhook(channel_id);
let slot = make_slot(channel_id, slot_id);
let schedule_with_slot = make_schedule(channel_id, vec![slot]);
// Repo that starts with a slot then returns empty schedule
use std::sync::atomic::{AtomicBool, Ordering};
struct SwitchingScheduleRepo {
first: GeneratedSchedule,
second: GeneratedSchedule,
called: AtomicBool,
}
#[async_trait]
impl ScheduleRepository for SwitchingScheduleRepo {
async fn find_active(
&self,
_channel_id: ChannelId,
_at: DateTime<Utc>,
) -> DomainResult<Option<GeneratedSchedule>> {
if self.called.swap(true, Ordering::SeqCst) {
Ok(Some(self.second.clone()))
} else {
Ok(Some(self.first.clone()))
}
}
async fn find_latest(&self, _: ChannelId) -> DomainResult<Option<GeneratedSchedule>> {
Ok(None)
}
async fn save(&self, _: &GeneratedSchedule) -> DomainResult<()> {
Ok(())
}
async fn find_playback_history(
&self,
_: ChannelId,
) -> DomainResult<Vec<PlaybackRecord>> {
Ok(vec![])
}
async fn save_playback_record(&self, _: &PlaybackRecord) -> DomainResult<()> {
Ok(())
}
async fn find_last_slot_per_block(
&self,
_: ChannelId,
) -> DomainResult<HashMap<BlockId, MediaItemId>> {
Ok(HashMap::new())
}
async fn list_schedule_history(&self, _: ChannelId) -> DomainResult<Vec<GeneratedSchedule>> { unimplemented!() }
async fn get_schedule_by_id(&self, _: ChannelId, _: Uuid) -> DomainResult<Option<GeneratedSchedule>> { unimplemented!() }
async fn delete_schedules_after(&self, _: ChannelId, _: u32) -> DomainResult<()> { unimplemented!() }
}
let now = Utc::now();
let empty_schedule = GeneratedSchedule {
id: Uuid::new_v4(),
channel_id,
valid_from: now - Duration::hours(1),
valid_until: now + Duration::hours(47),
generation: 2,
slots: vec![], // no current slot
};
let channel_repo: Arc<dyn ChannelRepository> =
Arc::new(MockChannelRepo { channels: vec![ch] });
let schedule_repo: Arc<dyn ScheduleRepository> = Arc::new(SwitchingScheduleRepo {
first: schedule_with_slot,
second: empty_schedule,
called: AtomicBool::new(false),
});
let engine = make_engine(channel_repo.clone(), schedule_repo);
let (event_tx, mut event_rx) = broadcast::channel(8);
let mut state: HashMap<Uuid, ChannelPollState> = HashMap::new();
// First tick — emits BroadcastTransition (slot present)
poll_tick(&engine, &channel_repo, &event_tx, &mut state).await;
let _ = event_rx.try_recv();
// Second tick — schedule has no current slot, emits NoSignal
poll_tick(&engine, &channel_repo, &event_tx, &mut state).await;
let event = event_rx.try_recv().expect("expected NoSignal event");
match event {
DomainEvent::NoSignal { channel_id: cid } => assert_eq!(cid, channel_id),
_ => panic!("expected NoSignal"),
}
}
}

View File

@@ -0,0 +1,209 @@
use std::sync::Arc;
use domain::{
DomainError, IMediaProvider, ProviderCapabilities, ProviderConfigRepository,
StreamingProtocol, StreamQuality,
};
use k_core::db::DatabasePool;
use crate::config::{Config, ConfigSource};
#[cfg(feature = "local-files")]
use infra::factory::build_transcode_settings_repository;
pub struct ProviderBundle {
pub registry: Arc<infra::ProviderRegistry>,
#[cfg(feature = "local-files")]
pub local_index: std::collections::HashMap<String, Arc<infra::LocalIndex>>,
#[cfg(feature = "local-files")]
pub transcode_manager: Option<Arc<infra::TranscodeManager>>,
}
pub async fn build_provider_registry(
config: &Config,
#[cfg_attr(not(feature = "local-files"), allow(unused_variables))]
db_pool: &Arc<DatabasePool>,
provider_config_repo: &Arc<dyn ProviderConfigRepository>,
) -> anyhow::Result<ProviderBundle> {
#[cfg(feature = "local-files")]
let mut local_index: std::collections::HashMap<String, Arc<infra::LocalIndex>> = std::collections::HashMap::new();
#[cfg(feature = "local-files")]
let mut transcode_manager: Option<Arc<infra::TranscodeManager>> = None;
let mut registry = infra::ProviderRegistry::new();
if config.config_source == ConfigSource::Db {
tracing::info!("CONFIG_SOURCE=db: loading provider configs from database");
let rows = provider_config_repo.get_all().await?;
for row in &rows {
if !row.enabled { continue; }
match row.provider_type.as_str() {
#[cfg(feature = "jellyfin")]
"jellyfin" => {
if let Ok(cfg) = serde_json::from_str::<infra::JellyfinConfig>(&row.config_json) {
tracing::info!("Loading Jellyfin provider [{}] from DB config", row.id);
registry.register(&row.id, Arc::new(infra::JellyfinMediaProvider::new(cfg)));
}
}
#[cfg(feature = "local-files")]
"local_files" => {
if let Ok(cfg_map) = serde_json::from_str::<std::collections::HashMap<String, String>>(&row.config_json)
&& let Some(files_dir) = cfg_map.get("files_dir")
{
let transcode_dir = cfg_map.get("transcode_dir")
.filter(|s| !s.is_empty())
.map(std::path::PathBuf::from);
let cleanup_ttl_hours: u32 = cfg_map.get("cleanup_ttl_hours")
.and_then(|s| s.parse().ok())
.unwrap_or(24);
tracing::info!("Loading local-files provider [{}] from DB config at {:?}", row.id, files_dir);
match infra::factory::build_local_files_bundle(
db_pool,
std::path::PathBuf::from(files_dir),
transcode_dir,
cleanup_ttl_hours,
config.base_url.clone(),
&row.id,
).await {
Ok(bundle) => {
let scan_idx = Arc::clone(&bundle.local_index);
tokio::spawn(async move { scan_idx.rescan().await; });
if let Some(ref tm) = bundle.transcode_manager {
tracing::info!("Transcoding enabled for [{}]", row.id);
// Load persisted TTL override from transcode_settings table.
let tm_clone = Arc::clone(tm);
let repo = build_transcode_settings_repository(db_pool).await.ok();
tokio::spawn(async move {
if let Some(r) = repo
&& let Ok(Some(ttl)) = r.load_cleanup_ttl().await
{
tm_clone.set_cleanup_ttl(ttl);
}
});
}
registry.register(&row.id, bundle.provider);
if transcode_manager.is_none() {
transcode_manager = bundle.transcode_manager;
}
local_index.insert(row.id.clone(), bundle.local_index);
}
Err(e) => tracing::warn!("Failed to build local-files provider [{}]: {}", row.id, e),
}
}
}
_ => {}
}
}
} else {
#[cfg(feature = "jellyfin")]
if let (Some(base_url), Some(api_key), Some(user_id)) = (
&config.jellyfin_base_url,
&config.jellyfin_api_key,
&config.jellyfin_user_id,
) {
tracing::info!("Media provider: Jellyfin at {}", base_url);
registry.register("jellyfin", Arc::new(infra::JellyfinMediaProvider::new(infra::JellyfinConfig {
base_url: base_url.clone(),
api_key: api_key.clone(),
user_id: user_id.clone(),
})));
}
#[cfg(feature = "local-files")]
if let Some(dir) = &config.local_files_dir {
tracing::info!("Media provider: local files at {:?}", dir);
match infra::factory::build_local_files_bundle(
db_pool,
dir.clone(),
config.transcode_dir.clone(),
config.transcode_cleanup_ttl_hours,
config.base_url.clone(),
"local",
).await {
Ok(bundle) => {
let scan_idx = Arc::clone(&bundle.local_index);
tokio::spawn(async move { scan_idx.rescan().await; });
if let Some(ref tm) = bundle.transcode_manager {
tracing::info!("Transcoding enabled; cache dir: {:?}", config.transcode_dir);
let tm_clone = Arc::clone(tm);
let repo = build_transcode_settings_repository(db_pool).await.ok();
tokio::spawn(async move {
if let Some(r) = repo
&& let Ok(Some(ttl)) = r.load_cleanup_ttl().await
{
tm_clone.set_cleanup_ttl(ttl);
}
});
}
registry.register("local", bundle.provider);
transcode_manager = bundle.transcode_manager;
local_index.insert("local".to_string(), bundle.local_index);
}
Err(e) => tracing::warn!("local-files requires SQLite; ignoring LOCAL_FILES_DIR: {}", e),
}
}
}
if registry.is_empty() {
tracing::warn!("No media provider configured. Set JELLYFIN_BASE_URL / LOCAL_FILES_DIR.");
registry.register("noop", Arc::new(NoopMediaProvider));
}
Ok(ProviderBundle {
registry: Arc::new(registry),
#[cfg(feature = "local-files")]
local_index,
#[cfg(feature = "local-files")]
transcode_manager,
})
}
/// Stand-in provider used when no real media source is configured.
/// Returns a descriptive error for every call so schedule endpoints fail
/// gracefully rather than panicking at startup.
struct NoopMediaProvider;
#[async_trait::async_trait]
impl IMediaProvider for NoopMediaProvider {
fn capabilities(&self) -> ProviderCapabilities {
ProviderCapabilities {
collections: false,
series: false,
genres: false,
tags: false,
decade: false,
search: false,
streaming_protocol: StreamingProtocol::DirectFile,
rescan: false,
transcode: false,
}
}
async fn fetch_items(
&self,
_: &domain::MediaFilter,
) -> domain::DomainResult<Vec<domain::MediaItem>> {
Err(DomainError::InfrastructureError(
"No media provider configured. Set JELLYFIN_BASE_URL or LOCAL_FILES_DIR.".into(),
))
}
async fn fetch_by_id(
&self,
_: &domain::MediaItemId,
) -> domain::DomainResult<Option<domain::MediaItem>> {
Err(DomainError::InfrastructureError(
"No media provider configured.".into(),
))
}
async fn get_stream_url(
&self,
_: &domain::MediaItemId,
_: &StreamQuality,
) -> domain::DomainResult<String> {
Err(DomainError::InfrastructureError(
"No media provider configured.".into(),
))
}
}

View File

@@ -0,0 +1,95 @@
//! Admin routes: SSE log stream + activity log.
use axum::{
Json,
extract::State,
response::{
IntoResponse,
sse::{Event, KeepAlive, Sse},
},
};
use tokio_stream::{StreamExt, wrappers::BroadcastStream};
use crate::{
dto::ActivityEventResponse,
error::ApiError,
extractors::OptionalCurrentUser,
state::AppState,
};
use axum::Router;
use axum::routing::get;
pub fn router() -> Router<AppState> {
Router::new()
.route("/logs", get(stream_logs))
.route("/activity", get(list_activity))
}
/// Stream server log lines as Server-Sent Events.
///
/// Auth: requires a valid JWT passed as `?token=<jwt>` (EventSource cannot set headers).
/// On connect: flushes the recent history ring buffer, then streams live events.
pub async fn stream_logs(
State(state): State<AppState>,
OptionalCurrentUser(user): OptionalCurrentUser,
) -> Result<impl IntoResponse, ApiError> {
if user.is_none() {
return Err(ApiError::Unauthorized(
"Authentication required for log stream".to_string(),
));
}
// Snapshot history and subscribe before releasing the lock so we don't miss events.
let rx = state.log_tx.subscribe();
let history: Vec<_> = state
.log_history
.lock()
.map(|h| h.iter().cloned().collect())
.unwrap_or_default();
let history_stream = tokio_stream::iter(history).map(|line| {
let data = serde_json::to_string(&line).unwrap_or_default();
Ok::<Event, String>(Event::default().data(data))
});
let live_stream = BroadcastStream::new(rx).filter_map(|result| match result {
Ok(line) => {
let data = serde_json::to_string(&line).unwrap_or_default();
Some(Ok::<Event, String>(Event::default().data(data)))
}
Err(tokio_stream::wrappers::errors::BroadcastStreamRecvError::Lagged(n)) => {
let data = format!(
r#"{{"level":"WARN","target":"sse","message":"[{n} log lines dropped — buffer overrun]","timestamp":""}}"#
);
Some(Ok(Event::default().data(data)))
}
});
let combined = history_stream.chain(live_stream);
Ok(Sse::new(combined).keep_alive(KeepAlive::default()))
}
/// Return recent activity log entries.
///
/// Auth: requires a valid JWT (Authorization: Bearer or ?token=).
pub async fn list_activity(
State(state): State<AppState>,
OptionalCurrentUser(user): OptionalCurrentUser,
) -> Result<impl IntoResponse, ApiError> {
if user.is_none() {
return Err(ApiError::Unauthorized(
"Authentication required".to_string(),
));
}
let events = state
.activity_log_repo
.recent(50)
.await
.map_err(ApiError::from)?;
let response: Vec<ActivityEventResponse> = events.into_iter().map(Into::into).collect();
Ok(Json(response))
}

View File

@@ -0,0 +1,513 @@
//! Admin provider management routes.
//!
//! All routes require an admin user. Allows listing, creating, updating, deleting, and
//! testing media provider configs stored in the DB. Only available when
//! CONFIG_SOURCE=db.
use std::collections::HashMap;
use std::sync::Arc;
use axum::Router;
use axum::extract::{Path, State};
use axum::http::StatusCode;
use axum::response::IntoResponse;
use axum::routing::{get, post, put};
use axum::Json;
use domain::errors::DomainResult;
use domain::ProviderConfigRow;
use serde::{Deserialize, Serialize};
use crate::config::ConfigSource;
use crate::error::ApiError;
use crate::extractors::AdminUser;
use crate::state::AppState;
// ---------------------------------------------------------------------------
// DTOs
// ---------------------------------------------------------------------------
/// Validate that an instance id is a safe slug (alphanumeric + hyphens, 1-40 chars).
fn is_valid_instance_id(id: &str) -> bool {
!id.is_empty()
&& id.len() <= 40
&& id.chars().all(|c| c.is_ascii_alphanumeric() || c == '-')
}
#[derive(Debug, Deserialize)]
pub struct CreateProviderRequest {
pub id: String,
pub provider_type: String,
pub config_json: HashMap<String, String>,
pub enabled: bool,
}
#[derive(Debug, Deserialize)]
pub struct UpdateProviderRequest {
pub config_json: HashMap<String, String>,
pub enabled: bool,
}
#[derive(Debug, Deserialize)]
pub struct TestProviderRequest {
pub provider_type: String,
pub config_json: HashMap<String, String>,
}
#[derive(Debug, Serialize)]
pub struct ProviderConfigResponse {
pub id: String,
pub provider_type: String,
pub config_json: HashMap<String, serde_json::Value>,
pub enabled: bool,
}
#[derive(Debug, Serialize)]
pub struct TestResult {
pub ok: bool,
pub message: String,
}
// ---------------------------------------------------------------------------
// Router
// ---------------------------------------------------------------------------
pub fn router() -> Router<AppState> {
Router::new()
.route("/", get(list_providers).post(create_provider))
.route("/{id}", put(update_provider).delete(delete_provider))
.route("/test", post(test_provider))
}
// ---------------------------------------------------------------------------
// Helpers
// ---------------------------------------------------------------------------
fn mask_config(raw: &str) -> HashMap<String, serde_json::Value> {
let parsed: HashMap<String, serde_json::Value> =
serde_json::from_str(raw).unwrap_or_default();
parsed
.into_iter()
.map(|(k, v)| {
let secret_key = ["key", "password", "secret", "token"]
.iter()
.any(|kw| k.to_lowercase().contains(kw));
let masked = if secret_key {
match &v {
serde_json::Value::String(s) if !s.is_empty() => {
serde_json::Value::String("***".to_string())
}
_ => v,
}
} else {
v
};
(k, masked)
})
.collect()
}
fn conflict_response() -> impl IntoResponse {
(
StatusCode::CONFLICT,
Json(serde_json::json!({
"error": "UI config disabled — set CONFIG_SOURCE=db on the server"
})),
)
}
async fn rebuild_registry(state: &AppState) -> DomainResult<()> {
let rows = state.provider_config_repo.get_all().await?;
let mut new_registry = infra::ProviderRegistry::new();
#[cfg(feature = "local-files")]
let mut new_local_index: std::collections::HashMap<String, Arc<infra::LocalIndex>> =
std::collections::HashMap::new();
#[cfg(feature = "local-files")]
let mut first_transcode_manager: Option<Arc<infra::TranscodeManager>> = None;
for row in &rows {
if !row.enabled {
continue;
}
match row.provider_type.as_str() {
#[cfg(feature = "jellyfin")]
"jellyfin" => {
if let Ok(cfg) =
serde_json::from_str::<infra::JellyfinConfig>(&row.config_json)
{
new_registry.register(
&row.id,
Arc::new(infra::JellyfinMediaProvider::new(cfg)),
);
}
}
#[cfg(feature = "local-files")]
"local_files" => {
let config: std::collections::HashMap<String, String> =
match serde_json::from_str(&row.config_json) {
Ok(c) => c,
Err(_) => continue,
};
let files_dir = match config.get("files_dir") {
Some(d) => std::path::PathBuf::from(d),
None => continue,
};
let transcode_dir = config
.get("transcode_dir")
.filter(|s| !s.is_empty())
.map(std::path::PathBuf::from);
let cleanup_ttl_hours: u32 = config
.get("cleanup_ttl_hours")
.and_then(|s| s.parse().ok())
.unwrap_or(24);
let base_url = state.config.base_url.clone();
match infra::factory::build_local_files_bundle(
&state.db_pool,
files_dir,
transcode_dir,
cleanup_ttl_hours,
base_url,
&row.id,
).await {
Ok(bundle) => {
let scan_idx = Arc::clone(&bundle.local_index);
tokio::spawn(async move { scan_idx.rescan().await; });
new_registry.register(&row.id, bundle.provider);
new_local_index.insert(row.id.clone(), bundle.local_index);
if first_transcode_manager.is_none() {
first_transcode_manager = bundle.transcode_manager;
}
}
Err(e) => {
tracing::warn!("local_files provider [{}] requires SQLite; skipping: {}", row.id, e);
continue;
}
}
}
_ => {}
}
}
if new_registry.is_empty() {
new_registry.register("noop", Arc::new(NoopMediaProvider));
}
*state.provider_registry.write().await = Arc::new(new_registry);
#[cfg(feature = "local-files")]
{
*state.local_index.write().await = new_local_index;
*state.transcode_manager.write().await = first_transcode_manager;
}
Ok(())
}
// ---------------------------------------------------------------------------
// Handlers
// ---------------------------------------------------------------------------
pub async fn list_providers(
State(state): State<AppState>,
AdminUser(_user): AdminUser,
) -> Result<impl IntoResponse, ApiError> {
let rows = state
.provider_config_repo
.get_all()
.await
.map_err(ApiError::from)?;
let response: Vec<ProviderConfigResponse> = rows
.iter()
.map(|row| ProviderConfigResponse {
id: row.id.clone(),
provider_type: row.provider_type.clone(),
config_json: mask_config(&row.config_json),
enabled: row.enabled,
})
.collect();
Ok(Json(response))
}
pub async fn create_provider(
State(state): State<AppState>,
AdminUser(_user): AdminUser,
Json(payload): Json<CreateProviderRequest>,
) -> Result<impl IntoResponse, ApiError> {
if state.config.config_source != ConfigSource::Db {
return Ok(conflict_response().into_response());
}
if !is_valid_instance_id(&payload.id) {
return Err(ApiError::Validation(
"Instance id must be 1-40 alphanumeric+hyphen characters".to_string(),
));
}
let known = matches!(payload.provider_type.as_str(), "jellyfin" | "local_files");
if !known {
return Err(ApiError::Validation(format!(
"Unknown provider type: {}",
payload.provider_type
)));
}
// Check for uniqueness
if state
.provider_config_repo
.get_by_id(&payload.id)
.await
.map_err(ApiError::from)?
.is_some()
{
return Ok((
StatusCode::CONFLICT,
Json(serde_json::json!({ "error": format!("Provider instance '{}' already exists", payload.id) })),
).into_response());
}
let config_json = serde_json::to_string(&payload.config_json)
.map_err(|e| ApiError::Internal(format!("Failed to serialize config: {}", e)))?;
let row = ProviderConfigRow {
id: payload.id.clone(),
provider_type: payload.provider_type.clone(),
config_json: config_json.clone(),
enabled: payload.enabled,
updated_at: chrono::Utc::now().to_rfc3339(),
};
state
.provider_config_repo
.upsert(&row)
.await
.map_err(ApiError::from)?;
rebuild_registry(&state)
.await
.map_err(ApiError::from)?;
let response = ProviderConfigResponse {
id: payload.id,
provider_type: payload.provider_type,
config_json: mask_config(&config_json),
enabled: payload.enabled,
};
Ok((StatusCode::CREATED, Json(response)).into_response())
}
pub async fn update_provider(
State(state): State<AppState>,
AdminUser(_user): AdminUser,
Path(instance_id): Path<String>,
Json(payload): Json<UpdateProviderRequest>,
) -> Result<impl IntoResponse, ApiError> {
if state.config.config_source != ConfigSource::Db {
return Ok(conflict_response().into_response());
}
let existing = state
.provider_config_repo
.get_by_id(&instance_id)
.await
.map_err(ApiError::from)?
.ok_or_else(|| ApiError::NotFound(format!("Provider instance '{}' not found", instance_id)))?;
let config_json = serde_json::to_string(&payload.config_json)
.map_err(|e| ApiError::Internal(format!("Failed to serialize config: {}", e)))?;
let row = ProviderConfigRow {
id: existing.id.clone(),
provider_type: existing.provider_type.clone(),
config_json: config_json.clone(),
enabled: payload.enabled,
updated_at: chrono::Utc::now().to_rfc3339(),
};
state
.provider_config_repo
.upsert(&row)
.await
.map_err(ApiError::from)?;
rebuild_registry(&state)
.await
.map_err(ApiError::from)?;
let response = ProviderConfigResponse {
id: existing.id,
provider_type: existing.provider_type,
config_json: mask_config(&config_json),
enabled: payload.enabled,
};
Ok(Json(response).into_response())
}
pub async fn delete_provider(
State(state): State<AppState>,
AdminUser(_user): AdminUser,
Path(instance_id): Path<String>,
) -> Result<impl IntoResponse, ApiError> {
if state.config.config_source != ConfigSource::Db {
return Ok(conflict_response().into_response());
}
state
.provider_config_repo
.delete(&instance_id)
.await
.map_err(ApiError::from)?;
rebuild_registry(&state)
.await
.map_err(ApiError::from)?;
Ok(StatusCode::NO_CONTENT.into_response())
}
pub async fn test_provider(
State(_state): State<AppState>,
AdminUser(_user): AdminUser,
Json(payload): Json<TestProviderRequest>,
) -> Result<impl IntoResponse, ApiError> {
let result = match payload.provider_type.as_str() {
"jellyfin" => test_jellyfin(&payload.config_json).await,
"local_files" => test_local_files(&payload.config_json),
_ => TestResult {
ok: false,
message: "Unknown provider type".to_string(),
},
};
Ok(Json(result))
}
async fn test_jellyfin(config: &HashMap<String, String>) -> TestResult {
let base_url = match config.get("base_url") {
Some(u) => u.trim_end_matches('/').to_string(),
None => {
return TestResult {
ok: false,
message: "Missing field: base_url".to_string(),
}
}
};
let api_key = match config.get("api_key") {
Some(k) => k.clone(),
None => {
return TestResult {
ok: false,
message: "Missing field: api_key".to_string(),
}
}
};
let url = format!("{}/System/Info", base_url);
let client = reqwest::Client::new();
match client
.get(&url)
.header("X-Emby-Token", &api_key)
.send()
.await
{
Ok(resp) => {
let status = resp.status();
if status.is_success() {
TestResult {
ok: true,
message: format!("Connected successfully (HTTP {})", status.as_u16()),
}
} else {
TestResult {
ok: false,
message: format!("Jellyfin returned HTTP {}", status.as_u16()),
}
}
}
Err(e) => TestResult {
ok: false,
message: format!("Connection failed: {}", e),
},
}
}
fn test_local_files(config: &HashMap<String, String>) -> TestResult {
let path = match config.get("files_dir") {
Some(p) => p.clone(),
None => {
return TestResult {
ok: false,
message: "Missing field: files_dir".to_string(),
}
}
};
let p = std::path::Path::new(&path);
if p.exists() && p.is_dir() {
TestResult {
ok: true,
message: format!("Directory exists: {}", path),
}
} else {
TestResult {
ok: false,
message: format!("Path does not exist or is not a directory: {}", path),
}
}
}
// ---------------------------------------------------------------------------
// NoopMediaProvider (local copy — avoids pub-ing it from main.rs)
// ---------------------------------------------------------------------------
struct NoopMediaProvider;
#[async_trait::async_trait]
impl domain::IMediaProvider for NoopMediaProvider {
fn capabilities(&self) -> domain::ProviderCapabilities {
domain::ProviderCapabilities {
collections: false,
series: false,
genres: false,
tags: false,
decade: false,
search: false,
streaming_protocol: domain::StreamingProtocol::DirectFile,
rescan: false,
transcode: false,
}
}
async fn fetch_items(
&self,
_: &domain::MediaFilter,
) -> domain::DomainResult<Vec<domain::MediaItem>> {
Err(domain::DomainError::InfrastructureError(
"No media provider configured.".into(),
))
}
async fn fetch_by_id(
&self,
_: &domain::MediaItemId,
) -> domain::DomainResult<Option<domain::MediaItem>> {
Err(domain::DomainError::InfrastructureError(
"No media provider configured.".into(),
))
}
async fn get_stream_url(
&self,
_: &domain::MediaItemId,
_: &domain::StreamQuality,
) -> domain::DomainResult<String> {
Err(domain::DomainError::InfrastructureError(
"No media provider configured.".into(),
))
}
}

View File

@@ -6,13 +6,13 @@ use axum::{
};
use crate::{
dto::{LoginRequest, RegisterRequest, TokenResponse, UserResponse},
dto::{LoginRequest, RefreshRequest, RegisterRequest, TokenResponse, UserResponse},
error::ApiError,
extractors::CurrentUser,
state::AppState,
};
use super::create_jwt;
use super::{create_jwt, create_refresh_jwt};
/// Login with email + password → JWT token
pub(super) async fn login(
@@ -35,6 +35,12 @@ pub(super) async fn login(
}
let token = create_jwt(&user, &state)?;
let refresh_token = if payload.remember_me {
Some(create_refresh_jwt(&user, &state)?)
} else {
None
};
let _ = state.activity_log_repo.log("user_login", user.email.as_ref(), None).await;
Ok((
StatusCode::OK,
@@ -42,6 +48,7 @@ pub(super) async fn login(
access_token: token,
token_type: "Bearer".to_string(),
expires_in: state.config.jwt_expiry_hours * 3600,
refresh_token,
}),
))
}
@@ -70,6 +77,7 @@ pub(super) async fn register(
access_token: token,
token_type: "Bearer".to_string(),
expires_in: state.config.jwt_expiry_hours * 3600,
refresh_token: None,
}),
))
}
@@ -85,6 +93,47 @@ pub(super) async fn me(CurrentUser(user): CurrentUser) -> Result<impl IntoRespon
id: user.id,
email: user.email.into_inner(),
created_at: user.created_at,
is_admin: user.is_admin,
}))
}
/// Exchange a valid refresh token for a new access + refresh token pair
#[cfg(feature = "auth-jwt")]
pub(super) async fn refresh_token(
State(state): State<AppState>,
Json(payload): Json<RefreshRequest>,
) -> Result<impl IntoResponse, ApiError> {
let validator = state
.jwt_validator
.as_ref()
.ok_or_else(|| ApiError::Internal("JWT not configured".to_string()))?;
let claims = validator
.validate_refresh_token(&payload.refresh_token)
.map_err(|e| {
tracing::debug!("Refresh token validation failed: {:?}", e);
ApiError::Unauthorized("Invalid or expired refresh token".to_string())
})?;
let user_id: uuid::Uuid = claims
.sub
.parse()
.map_err(|_| ApiError::Unauthorized("Invalid user ID in token".to_string()))?;
let user = state
.user_service
.find_by_id(user_id)
.await
.map_err(|e| ApiError::Internal(format!("Failed to fetch user: {}", e)))?;
let access_token = create_jwt(&user, &state)?;
let new_refresh_token = create_refresh_jwt(&user, &state)?;
Ok(Json(TokenResponse {
access_token,
token_type: "Bearer".to_string(),
expires_in: state.config.jwt_expiry_hours * 3600,
refresh_token: Some(new_refresh_token),
}))
}
@@ -100,5 +149,6 @@ pub(super) async fn get_token(
access_token: token,
token_type: "Bearer".to_string(),
expires_in: state.config.jwt_expiry_hours * 3600,
refresh_token: None,
}))
}

View File

@@ -18,7 +18,9 @@ pub fn router() -> Router<AppState> {
.route("/me", get(local::me));
#[cfg(feature = "auth-jwt")]
let r = r.route("/token", post(local::get_token));
let r = r
.route("/token", post(local::get_token))
.route("/refresh", post(local::refresh_token));
#[cfg(feature = "auth-oidc")]
let r = r
@@ -28,7 +30,7 @@ pub fn router() -> Router<AppState> {
r
}
/// Helper: create JWT for a user
/// Helper: create access JWT for a user
#[cfg(feature = "auth-jwt")]
pub(super) fn create_jwt(user: &domain::User, state: &AppState) -> Result<String, ApiError> {
let validator = state
@@ -45,3 +47,21 @@ pub(super) fn create_jwt(user: &domain::User, state: &AppState) -> Result<String
pub(super) fn create_jwt(_user: &domain::User, _state: &AppState) -> Result<String, ApiError> {
Err(ApiError::Internal("JWT feature not enabled".to_string()))
}
/// Helper: create refresh JWT for a user
#[cfg(feature = "auth-jwt")]
pub(super) fn create_refresh_jwt(user: &domain::User, state: &AppState) -> Result<String, ApiError> {
let validator = state
.jwt_validator
.as_ref()
.ok_or_else(|| ApiError::Internal("JWT not configured".to_string()))?;
validator
.create_refresh_token(user)
.map_err(|e| ApiError::Internal(format!("Failed to create refresh token: {}", e)))
}
#[cfg(not(feature = "auth-jwt"))]
pub(super) fn create_refresh_jwt(_user: &domain::User, _state: &AppState) -> Result<String, ApiError> {
Err(ApiError::Internal("JWT feature not enabled".to_string()))
}

View File

@@ -8,7 +8,7 @@ use chrono::Utc;
use serde::Deserialize;
use uuid::Uuid;
use domain::{DomainError, ScheduleEngineService};
use domain::{DomainError, ScheduleEngineService, StreamQuality};
use crate::{
dto::{CurrentBroadcastResponse, ScheduledSlotResponse},
@@ -61,8 +61,7 @@ pub(super) async fn get_current_broadcast(
Some(broadcast) => {
let block_access_mode = channel
.schedule_config
.blocks
.iter()
.all_blocks()
.find(|b| b.id == broadcast.slot.source_block_id)
.map(|b| b.access_mode.clone())
.unwrap_or_default();
@@ -130,11 +129,18 @@ pub(super) async fn get_epg(
/// Redirect to the stream URL for whatever is currently playing.
/// Returns 307 Temporary Redirect so the client fetches from the media provider directly.
/// Returns 204 No Content when the channel is in a gap (no-signal).
#[derive(Debug, Deserialize)]
pub(super) struct StreamQuery {
/// "direct" | bitrate in bps as string (e.g. "8000000"). Defaults to "direct".
quality: Option<String>,
}
pub(super) async fn get_stream(
State(state): State<AppState>,
Path(channel_id): Path<Uuid>,
OptionalCurrentUser(user): OptionalCurrentUser,
headers: HeaderMap,
Query(query): Query<StreamQuery>,
) -> Result<Response, ApiError> {
let channel = state.channel_service.find_by_id(channel_id).await?;
@@ -161,8 +167,7 @@ pub(super) async fn get_stream(
// Block-level access check
if let Some(block) = channel
.schedule_config
.blocks
.iter()
.all_blocks()
.find(|b| b.id == broadcast.slot.source_block_id)
{
check_access(
@@ -174,9 +179,13 @@ pub(super) async fn get_stream(
)?;
}
let stream_quality = match query.quality.as_deref() {
Some("direct") | None => StreamQuality::Direct,
Some(bps_str) => StreamQuality::Transcode(bps_str.parse::<u32>().unwrap_or(40_000_000)),
};
let url = state
.schedule_engine
.get_stream_url(&broadcast.slot.item.id)
.get_stream_url(&broadcast.slot.item.id, &stream_quality)
.await?;
Ok(Redirect::temporary(&url).into_response())

View File

@@ -0,0 +1,72 @@
use axum::{
Json,
extract::{Path, State},
http::StatusCode,
response::IntoResponse,
};
use uuid::Uuid;
use crate::{
dto::{ChannelResponse, ConfigSnapshotResponse, PatchSnapshotRequest},
error::ApiError,
extractors::CurrentUser,
state::AppState,
};
use super::require_owner;
pub(super) async fn list_config_history(
State(state): State<AppState>,
CurrentUser(user): CurrentUser,
Path(channel_id): Path<Uuid>,
) -> Result<impl IntoResponse, ApiError> {
let channel = state.channel_service.find_by_id(channel_id).await?;
require_owner(&channel, user.id)?;
let snapshots = state.channel_service.list_config_snapshots(channel_id).await?;
let response: Vec<ConfigSnapshotResponse> = snapshots.into_iter().map(Into::into).collect();
Ok(Json(response))
}
pub(super) async fn patch_config_snapshot(
State(state): State<AppState>,
CurrentUser(user): CurrentUser,
Path((channel_id, snap_id)): Path<(Uuid, Uuid)>,
Json(payload): Json<PatchSnapshotRequest>,
) -> Result<impl IntoResponse, ApiError> {
let channel = state.channel_service.find_by_id(channel_id).await?;
require_owner(&channel, user.id)?;
let updated = state
.channel_service
.patch_config_snapshot_label(channel_id, snap_id, payload.label)
.await?
.ok_or_else(|| ApiError::NotFound("Snapshot not found".into()))?;
Ok(Json(ConfigSnapshotResponse::from(updated)))
}
pub(super) async fn restore_config_snapshot(
State(state): State<AppState>,
CurrentUser(user): CurrentUser,
Path((channel_id, snap_id)): Path<(Uuid, Uuid)>,
) -> Result<impl IntoResponse, ApiError> {
let channel = state.channel_service.find_by_id(channel_id).await?;
require_owner(&channel, user.id)?;
let updated = state
.channel_service
.restore_config_snapshot(channel_id, snap_id)
.await
.map_err(|e| match e {
domain::DomainError::ChannelNotFound(_) => ApiError::NotFound("Snapshot not found".into()),
other => ApiError::from(other),
})?;
let _ = state
.activity_log_repo
.log("config_restored", &snap_id.to_string(), Some(channel_id))
.await;
Ok((StatusCode::OK, Json(ChannelResponse::from(updated))))
}

View File

@@ -5,6 +5,7 @@ use axum::{
response::IntoResponse,
};
use chrono::Utc;
use domain;
use uuid::Uuid;
use crate::{
@@ -47,10 +48,28 @@ pub(super) async fn create_channel(
channel.access_password_hash = Some(infra::auth::hash_password(pw));
changed = true;
}
if let Some(url) = payload.webhook_url {
channel.webhook_url = Some(url);
changed = true;
}
if let Some(interval) = payload.webhook_poll_interval_secs {
channel.webhook_poll_interval_secs = interval;
changed = true;
}
if let Some(tmpl) = payload.webhook_body_template {
channel.webhook_body_template = Some(tmpl);
changed = true;
}
if let Some(headers) = payload.webhook_headers {
channel.webhook_headers = Some(headers);
changed = true;
}
if changed {
channel = state.channel_service.update(channel).await?;
}
let _ = state.event_tx.send(domain::DomainEvent::ChannelCreated { channel: channel.clone() });
let _ = state.activity_log_repo.log("channel_created", &channel.name, Some(channel.id)).await;
Ok((StatusCode::CREATED, Json(ChannelResponse::from(channel))))
}
@@ -83,7 +102,7 @@ pub(super) async fn update_channel(
channel.timezone = tz;
}
if let Some(sc) = payload.schedule_config {
channel.schedule_config = sc;
channel.schedule_config = domain::ScheduleConfig::from(sc);
}
if let Some(rp) = payload.recycle_policy {
channel.recycle_policy = rp;
@@ -110,9 +129,23 @@ pub(super) async fn update_channel(
if let Some(opacity) = payload.logo_opacity {
channel.logo_opacity = opacity.clamp(0.0, 1.0);
}
if let Some(url) = payload.webhook_url {
channel.webhook_url = url;
}
if let Some(interval) = payload.webhook_poll_interval_secs {
channel.webhook_poll_interval_secs = interval;
}
if let Some(tmpl) = payload.webhook_body_template {
channel.webhook_body_template = tmpl;
}
if let Some(headers) = payload.webhook_headers {
channel.webhook_headers = headers;
}
channel.updated_at = Utc::now();
let channel = state.channel_service.update(channel).await?;
let _ = state.event_tx.send(domain::DomainEvent::ChannelUpdated { channel: channel.clone() });
let _ = state.activity_log_repo.log("channel_updated", &channel.name, Some(channel.id)).await;
Ok(Json(ChannelResponse::from(channel)))
}
@@ -123,5 +156,7 @@ pub(super) async fn delete_channel(
) -> Result<impl IntoResponse, ApiError> {
// ChannelService::delete enforces ownership internally
state.channel_service.delete(channel_id, user.id).await?;
let _ = state.event_tx.send(domain::DomainEvent::ChannelDeleted { channel_id });
let _ = state.activity_log_repo.log("channel_deleted", &channel_id.to_string(), Some(channel_id)).await;
Ok(StatusCode::NO_CONTENT)
}

View File

@@ -13,6 +13,7 @@ use domain::{AccessMode, User};
use crate::{error::ApiError, state::AppState};
mod broadcast;
mod config_history;
mod crud;
mod schedule;
@@ -27,9 +28,30 @@ pub fn router() -> Router<AppState> {
"/{id}/schedule",
post(schedule::generate_schedule).get(schedule::get_active_schedule),
)
.route("/{id}/schedule/history", get(schedule::list_schedule_history))
.route(
"/{id}/schedule/history/{gen_id}",
get(schedule::get_schedule_history_entry),
)
.route(
"/{id}/schedule/history/{gen_id}/rollback",
post(schedule::rollback_schedule),
)
.route("/{id}/now", get(broadcast::get_current_broadcast))
.route("/{id}/epg", get(broadcast::get_epg))
.route("/{id}/stream", get(broadcast::get_stream))
.route(
"/{id}/config/history",
get(config_history::list_config_history),
)
.route(
"/{id}/config/history/{snap_id}",
axum::routing::patch(config_history::patch_config_snapshot),
)
.route(
"/{id}/config/history/{snap_id}/restore",
post(config_history::restore_config_snapshot),
)
}
// ============================================================================

View File

@@ -7,10 +7,10 @@ use axum::{
use chrono::Utc;
use uuid::Uuid;
use domain::DomainError;
use domain::{self, DomainError};
use crate::{
dto::ScheduleResponse,
dto::{ScheduleHistoryEntry, ScheduleResponse},
error::ApiError,
extractors::CurrentUser,
state::AppState,
@@ -18,7 +18,7 @@ use crate::{
use super::require_owner;
/// Trigger 48-hour schedule generation for a channel, starting from now.
/// Trigger 7-day schedule generation for a channel, starting from now.
/// Replaces any existing schedule for the same window.
pub(super) async fn generate_schedule(
State(state): State<AppState>,
@@ -33,10 +33,16 @@ pub(super) async fn generate_schedule(
.generate_schedule(channel_id, Utc::now())
.await?;
let _ = state.event_tx.send(domain::DomainEvent::ScheduleGenerated {
channel_id,
schedule: schedule.clone(),
});
let detail = format!("{} slots", schedule.slots.len());
let _ = state.activity_log_repo.log("schedule_generated", &detail, Some(channel_id)).await;
Ok((StatusCode::CREATED, Json(ScheduleResponse::from(schedule))))
}
/// Return the currently active 48-hour schedule for a channel.
/// Return the currently active 7-day schedule for a channel.
/// 404 if no schedule has been generated yet — call POST /:id/schedule first.
pub(super) async fn get_active_schedule(
State(state): State<AppState>,
@@ -54,3 +60,75 @@ pub(super) async fn get_active_schedule(
Ok(Json(ScheduleResponse::from(schedule)))
}
/// List all schedule generations for a channel, newest first.
/// Returns lightweight entries (no slots).
pub(super) async fn list_schedule_history(
State(state): State<AppState>,
CurrentUser(user): CurrentUser,
Path(channel_id): Path<Uuid>,
) -> Result<impl IntoResponse, ApiError> {
let channel = state.channel_service.find_by_id(channel_id).await?;
require_owner(&channel, user.id)?;
let history = state.schedule_engine.list_schedule_history(channel_id).await?;
let entries: Vec<ScheduleHistoryEntry> = history.into_iter().map(Into::into).collect();
Ok(Json(entries))
}
/// Fetch a single historical schedule with all its slots.
pub(super) async fn get_schedule_history_entry(
State(state): State<AppState>,
CurrentUser(user): CurrentUser,
Path((channel_id, gen_id)): Path<(Uuid, Uuid)>,
) -> Result<impl IntoResponse, ApiError> {
let channel = state.channel_service.find_by_id(channel_id).await?;
require_owner(&channel, user.id)?;
let schedule = state
.schedule_engine
.get_schedule_by_id(channel_id, gen_id)
.await?
.ok_or_else(|| ApiError::NotFound(format!("Schedule {} not found", gen_id)))?;
Ok(Json(ScheduleResponse::from(schedule)))
}
/// Roll back to a previous schedule generation.
///
/// Deletes all generations after `gen_id`'s generation, then generates a fresh
/// schedule from now (inheriting the rolled-back generation as the base for
/// recycle-policy history).
pub(super) async fn rollback_schedule(
State(state): State<AppState>,
CurrentUser(user): CurrentUser,
Path((channel_id, gen_id)): Path<(Uuid, Uuid)>,
) -> Result<impl IntoResponse, ApiError> {
let channel = state.channel_service.find_by_id(channel_id).await?;
require_owner(&channel, user.id)?;
let target = state
.schedule_engine
.get_schedule_by_id(channel_id, gen_id)
.await?
.ok_or_else(|| ApiError::NotFound(format!("Schedule {} not found", gen_id)))?;
state
.schedule_engine
.delete_schedules_after(channel_id, target.generation)
.await?;
let schedule = state
.schedule_engine
.generate_schedule(channel_id, Utc::now())
.await?;
let _ = state.event_tx.send(domain::DomainEvent::ScheduleGenerated {
channel_id,
schedule: schedule.clone(),
});
let detail = format!("rollback to gen {}; {} slots", target.generation, schedule.slots.len());
let _ = state.activity_log_repo.log("schedule_rollback", &detail, Some(channel_id)).await;
Ok(Json(ScheduleResponse::from(schedule)))
}

View File

@@ -1,15 +1,52 @@
use axum::{Json, Router, extract::State, routing::get};
use std::sync::Arc;
use crate::config::Config;
use crate::dto::ConfigResponse;
use domain::{IProviderRegistry as _, ProviderCapabilities, StreamingProtocol};
use crate::dto::{ConfigResponse, ProviderInfo};
use crate::state::AppState;
pub fn router() -> Router<AppState> {
Router::new().route("/", get(get_config))
}
async fn get_config(State(config): State<Arc<Config>>) -> Json<ConfigResponse> {
#[allow(clippy::vec_init_then_push)]
async fn get_config(State(state): State<AppState>) -> Json<ConfigResponse> {
let registry = state.provider_registry.read().await;
let providers: Vec<ProviderInfo> = registry
.provider_ids()
.into_iter()
.filter_map(|id| {
registry.capabilities(&id).map(|caps| ProviderInfo {
id: id.clone(),
capabilities: caps,
})
})
.collect();
let primary_capabilities = registry
.capabilities(registry.primary_id())
.unwrap_or(ProviderCapabilities {
collections: false,
series: false,
genres: false,
tags: false,
decade: false,
search: false,
streaming_protocol: StreamingProtocol::DirectFile,
rescan: false,
transcode: false,
});
let mut available_provider_types = Vec::new();
#[cfg(feature = "jellyfin")]
available_provider_types.push("jellyfin".to_string());
#[cfg(feature = "local-files")]
available_provider_types.push("local_files".to_string());
Json(ConfigResponse {
allow_registration: config.allow_registration,
allow_registration: state.config.allow_registration,
providers,
provider_capabilities: primary_capabilities,
available_provider_types,
})
}

View File

@@ -0,0 +1,359 @@
//! Local-file streaming, rescan, and transcode routes.
//!
//! GET /files/stream/:id — Range streaming (no auth)
//! POST /files/rescan — index rebuild (auth required)
//! GET /files/transcode/:id/playlist.m3u8 — trigger transcode + serve playlist
//! GET /files/transcode/:id/:segment — serve .ts / .m3u8 segment
//! GET /files/transcode-settings — read TTL (auth)
//! PUT /files/transcode-settings — update TTL (auth)
//! GET /files/transcode-stats — cache size (auth)
//! DELETE /files/transcode-cache — clear cache (auth)
use axum::{
Router,
extract::{Path, State},
http::HeaderMap,
response::Response,
routing::get,
};
use crate::{error::ApiError, state::AppState};
#[cfg(feature = "local-files")]
use axum::{
Json,
extract::Query,
http::StatusCode,
routing::{delete, post},
};
#[cfg(feature = "local-files")]
use serde::Deserialize;
#[cfg(feature = "local-files")]
use crate::{
dto::{TranscodeSettingsResponse, TranscodeStatsResponse, UpdateTranscodeSettingsRequest},
extractors::CurrentUser,
};
pub fn router() -> Router<AppState> {
let r = Router::new().route("/stream/{id}", get(stream_file));
#[cfg(feature = "local-files")]
let r = r
.route("/rescan", post(trigger_rescan))
.route("/transcode/{id}/playlist.m3u8", get(transcode_playlist))
.route("/transcode/{id}/{segment}", get(transcode_segment))
.route(
"/transcode-settings",
get(get_transcode_settings).put(update_transcode_settings),
)
.route("/transcode-stats", get(get_transcode_stats))
.route("/transcode-cache", delete(clear_transcode_cache));
r
}
// ============================================================================
// Direct streaming
// ============================================================================
#[cfg_attr(not(feature = "local-files"), allow(unused_variables))]
async fn stream_file(
State(state): State<AppState>,
Path(encoded_id): Path<String>,
headers: HeaderMap,
) -> Result<Response, ApiError> {
#[cfg(feature = "local-files")]
{
use axum::body::Body;
use std::io::SeekFrom;
use tokio::io::{AsyncReadExt as _, AsyncSeekExt as _};
use tokio_util::io::ReaderStream;
let root_dir = state.config.local_files_dir.as_ref().ok_or_else(|| {
ApiError::not_implemented("LOCAL_FILES_DIR not configured")
})?;
let rel = infra::local_files::decode_stream_id(&encoded_id)
.ok_or_else(|| ApiError::validation("invalid stream id"))?;
let full_path = root_dir.join(&rel);
let canonical_root = root_dir
.canonicalize()
.map_err(|e| ApiError::internal(e.to_string()))?;
let canonical = full_path
.canonicalize()
.map_err(|_| ApiError::not_found("file not found"))?;
if !canonical.starts_with(&canonical_root) {
return Err(ApiError::Forbidden("path traversal detected".into()));
}
let mut file = tokio::fs::File::open(&canonical)
.await
.map_err(|_| ApiError::not_found("file not found"))?;
let file_size = file
.metadata()
.await
.map_err(|e| ApiError::internal(e.to_string()))?
.len();
let ext = canonical
.extension()
.and_then(|e| e.to_str())
.unwrap_or("")
.to_lowercase();
let content_type = content_type_for_ext(&ext);
let range = headers
.get(axum::http::header::RANGE)
.and_then(|v| v.to_str().ok())
.and_then(|r| parse_range(r, file_size));
let (start, end, status) = if let Some((s, e)) = range {
(s, e.min(file_size.saturating_sub(1)), StatusCode::PARTIAL_CONTENT)
} else {
(0, file_size.saturating_sub(1), StatusCode::OK)
};
let length = end - start + 1;
file.seek(SeekFrom::Start(start))
.await
.map_err(|e| ApiError::internal(e.to_string()))?;
let stream = ReaderStream::new(file.take(length));
let body = Body::from_stream(stream);
let mut builder = Response::builder()
.status(status)
.header("Content-Type", content_type)
.header("Content-Length", length.to_string())
.header("Accept-Ranges", "bytes");
if status == StatusCode::PARTIAL_CONTENT {
builder = builder.header(
"Content-Range",
format!("bytes {}-{}/{}", start, end, file_size),
);
}
builder.body(body).map_err(|e| ApiError::internal(e.to_string()))
}
#[cfg(not(feature = "local-files"))]
Err(ApiError::not_implemented("local-files feature not enabled"))
}
// ============================================================================
// Rescan
// ============================================================================
#[cfg(feature = "local-files")]
#[derive(Deserialize)]
struct RescanQuery {
provider: Option<String>,
}
#[cfg(feature = "local-files")]
async fn trigger_rescan(
State(state): State<AppState>,
CurrentUser(_user): CurrentUser,
Query(query): Query<RescanQuery>,
) -> Result<Json<serde_json::Value>, ApiError> {
let map = state.local_index.read().await.clone();
let index = if let Some(id) = &query.provider {
map.get(id).cloned()
} else {
map.values().next().cloned()
};
let index = index.ok_or_else(|| ApiError::not_implemented("no local files provider active"))?;
let count = index.rescan().await;
Ok(Json(serde_json::json!({ "items_found": count })))
}
// ============================================================================
// Transcode endpoints
// ============================================================================
#[cfg(feature = "local-files")]
async fn transcode_playlist(
State(state): State<AppState>,
Path(id): Path<String>,
) -> Result<Response, ApiError> {
let tm = state.transcode_manager.read().await.clone()
.ok_or_else(|| ApiError::not_implemented("TRANSCODE_DIR not configured"))?;
let root = state.config.local_files_dir.as_ref().ok_or_else(|| {
ApiError::not_implemented("LOCAL_FILES_DIR not configured")
})?;
let rel = infra::local_files::decode_stream_id(&id)
.ok_or_else(|| ApiError::validation("invalid item id"))?;
let src = root.join(&rel);
tm.ensure_transcoded(&id, &src)
.await
.map_err(|e| ApiError::internal(e.to_string()))?;
let playlist_path = tm.transcode_dir.join(&id).join("playlist.m3u8");
let content = tokio::fs::read_to_string(&playlist_path)
.await
.map_err(|e| ApiError::internal(e.to_string()))?;
Response::builder()
.status(200)
.header("Content-Type", "application/vnd.apple.mpegurl")
.header("Cache-Control", "no-cache")
.body(axum::body::Body::from(content))
.map_err(|e| ApiError::internal(e.to_string()))
}
#[derive(Deserialize)]
#[cfg(feature = "local-files")]
struct TranscodeSegmentPath {
id: String,
segment: String,
}
#[cfg(feature = "local-files")]
async fn transcode_segment(
State(state): State<AppState>,
Path(params): Path<TranscodeSegmentPath>,
) -> Result<Response, ApiError> {
let TranscodeSegmentPath { id, segment } = params;
let ext = std::path::Path::new(&segment)
.extension()
.and_then(|e| e.to_str())
.unwrap_or("");
if ext != "ts" && ext != "m3u8" {
return Err(ApiError::not_found("invalid segment extension"));
}
if segment.contains('/') || segment.contains("..") {
return Err(ApiError::Forbidden("invalid segment path".into()));
}
let tm = state.transcode_manager.read().await.clone()
.ok_or_else(|| ApiError::not_implemented("TRANSCODE_DIR not configured"))?;
let file_path = tm.transcode_dir.join(&id).join(&segment);
let canonical_base = tm
.transcode_dir
.canonicalize()
.map_err(|e| ApiError::internal(e.to_string()))?;
let canonical_file = file_path
.canonicalize()
.map_err(|_| ApiError::not_found("segment not found"))?;
if !canonical_file.starts_with(&canonical_base) {
return Err(ApiError::Forbidden("path traversal detected".into()));
}
let content = tokio::fs::read(&canonical_file)
.await
.map_err(|_| ApiError::not_found("segment not found"))?;
let content_type = if ext == "ts" {
"video/mp2t"
} else {
"application/vnd.apple.mpegurl"
};
Response::builder()
.status(200)
.header("Content-Type", content_type)
.body(axum::body::Body::from(content))
.map_err(|e| ApiError::internal(e.to_string()))
}
// ============================================================================
// Transcode settings / stats / cache management
// ============================================================================
#[cfg(feature = "local-files")]
async fn get_transcode_settings(
State(state): State<AppState>,
CurrentUser(_user): CurrentUser,
) -> Result<Json<TranscodeSettingsResponse>, ApiError> {
let tm = state.transcode_manager.read().await.clone()
.ok_or_else(|| ApiError::not_implemented("TRANSCODE_DIR not configured"))?;
Ok(Json(TranscodeSettingsResponse {
cleanup_ttl_hours: tm.get_cleanup_ttl(),
}))
}
#[cfg(feature = "local-files")]
async fn update_transcode_settings(
State(state): State<AppState>,
CurrentUser(_user): CurrentUser,
Json(req): Json<UpdateTranscodeSettingsRequest>,
) -> Result<Json<TranscodeSettingsResponse>, ApiError> {
if let Some(repo) = &state.transcode_settings_repo {
repo.save_cleanup_ttl(req.cleanup_ttl_hours)
.await
.map_err(|e| ApiError::internal(e.to_string()))?;
}
let tm_opt = state.transcode_manager.read().await.clone();
if let Some(tm) = tm_opt {
tm.set_cleanup_ttl(req.cleanup_ttl_hours);
}
Ok(Json(TranscodeSettingsResponse {
cleanup_ttl_hours: req.cleanup_ttl_hours,
}))
}
#[cfg(feature = "local-files")]
async fn get_transcode_stats(
State(state): State<AppState>,
CurrentUser(_user): CurrentUser,
) -> Result<Json<TranscodeStatsResponse>, ApiError> {
let tm = state.transcode_manager.read().await.clone()
.ok_or_else(|| ApiError::not_implemented("TRANSCODE_DIR not configured"))?;
let (cache_size_bytes, item_count) = tm.cache_stats().await;
Ok(Json(TranscodeStatsResponse {
cache_size_bytes,
item_count,
}))
}
#[cfg(feature = "local-files")]
async fn clear_transcode_cache(
State(state): State<AppState>,
CurrentUser(_user): CurrentUser,
) -> Result<StatusCode, ApiError> {
let tm = state.transcode_manager.read().await.clone()
.ok_or_else(|| ApiError::not_implemented("TRANSCODE_DIR not configured"))?;
tm.clear_cache()
.await
.map_err(|e| ApiError::internal(e.to_string()))?;
Ok(StatusCode::NO_CONTENT)
}
// ============================================================================
// Helpers
// ============================================================================
#[cfg(feature = "local-files")]
fn content_type_for_ext(ext: &str) -> &'static str {
match ext {
"mp4" | "m4v" => "video/mp4",
"mkv" => "video/x-matroska",
"avi" => "video/x-msvideo",
"mov" => "video/quicktime",
"webm" => "video/webm",
_ => "application/octet-stream",
}
}
#[cfg(feature = "local-files")]
fn parse_range(range: &str, file_size: u64) -> Option<(u64, u64)> {
let range = range.strip_prefix("bytes=")?;
let (start_str, end_str) = range.split_once('-')?;
let start: u64 = start_str.parse().ok()?;
let end: u64 = if end_str.is_empty() {
file_size.saturating_sub(1)
} else {
end_str.parse().ok()?
};
if start > end || start >= file_size {
return None;
}
Some((start, end))
}

View File

@@ -1,24 +1,39 @@
//! Library browsing routes
//! Library routes — DB-backed.
//!
//! These endpoints expose the media provider's library to the dashboard so
//! users can discover what's available without knowing provider-internal IDs.
//! All routes require authentication.
//! GET /library/collections — collections derived from synced items
//! GET /library/series — series names
//! GET /library/genres — genres
//! GET /library/items — search / browse
//! GET /library/items/:id — single item
//! GET /library/sync/status — latest sync log per provider
//! POST /library/sync — trigger an ad-hoc sync (auth)
//!
//! GET /library/collections — top-level libraries (Jellyfin views, Plex sections)
//! GET /library/series — TV series, optionally scoped to a collection
//! GET /library/genres — available genres, optionally filtered by content type
//! GET /library/items — search / browse items (used for block filter preview)
//! Admin (nested under /admin/library):
//! GET /admin/library/settings — app_settings key/value
//! PUT /admin/library/settings — update app_settings
use std::collections::HashMap;
use std::sync::Arc;
use axum::{
Json, Router,
extract::{Query, RawQuery, State},
routing::get,
extract::{Path, Query, RawQuery, State},
http::StatusCode,
response::IntoResponse,
routing::{get, post, put},
};
use domain::{ContentType, ILibraryRepository, LibrarySearchFilter, LibrarySyncAdapter};
use serde::{Deserialize, Serialize};
use domain::{Collection, ContentType, MediaFilter, SeriesSummary};
use crate::{
error::ApiError,
extractors::{AdminUser, CurrentUser},
state::AppState,
};
use crate::{error::ApiError, extractors::CurrentUser, state::AppState};
// ============================================================================
// Routers
// ============================================================================
pub fn router() -> Router<AppState> {
Router::new()
@@ -26,6 +41,15 @@ pub fn router() -> Router<AppState> {
.route("/series", get(list_series))
.route("/genres", get(list_genres))
.route("/items", get(search_items))
.route("/items/{id}", get(get_item))
.route("/shows", get(list_shows))
.route("/shows/{name}/seasons", get(list_seasons))
.route("/sync/status", get(sync_status))
.route("/sync", post(trigger_sync))
}
pub fn admin_router() -> Router<AppState> {
Router::new().route("/settings", get(get_settings).put(update_settings))
}
// ============================================================================
@@ -40,38 +64,6 @@ struct CollectionResponse {
collection_type: Option<String>,
}
impl From<Collection> for CollectionResponse {
fn from(c: Collection) -> Self {
Self {
id: c.id,
name: c.name,
collection_type: c.collection_type,
}
}
}
#[derive(Debug, Serialize)]
struct SeriesResponse {
id: String,
name: String,
episode_count: u32,
genres: Vec<String>,
#[serde(skip_serializing_if = "Option::is_none")]
year: Option<u16>,
}
impl From<SeriesSummary> for SeriesResponse {
fn from(s: SeriesSummary) -> Self {
Self {
id: s.id,
name: s.name,
episode_count: s.episode_count,
genres: s.genres,
year: s.year,
}
}
}
#[derive(Debug, Serialize)]
struct LibraryItemResponse {
id: String,
@@ -87,147 +79,370 @@ struct LibraryItemResponse {
#[serde(skip_serializing_if = "Option::is_none")]
year: Option<u16>,
genres: Vec<String>,
#[serde(skip_serializing_if = "Option::is_none")]
thumbnail_url: Option<String>,
}
#[derive(Debug, Serialize)]
struct PagedResponse<T: Serialize> {
items: Vec<T>,
total: u32,
}
#[derive(Debug, Serialize)]
struct ShowSummaryResponse {
series_name: String,
episode_count: u32,
season_count: u32,
#[serde(skip_serializing_if = "Option::is_none")]
thumbnail_url: Option<String>,
genres: Vec<String>,
}
#[derive(Debug, Serialize)]
struct SeasonSummaryResponse {
season_number: u32,
episode_count: u32,
#[serde(skip_serializing_if = "Option::is_none")]
thumbnail_url: Option<String>,
}
#[derive(Debug, Serialize)]
struct SyncLogResponse {
id: i64,
provider_id: String,
started_at: String,
#[serde(skip_serializing_if = "Option::is_none")]
finished_at: Option<String>,
items_found: u32,
status: String,
#[serde(skip_serializing_if = "Option::is_none")]
error_msg: Option<String>,
}
// ============================================================================
// Query params
// ============================================================================
#[derive(Debug, Deserialize)]
struct CollectionsQuery {
provider: Option<String>,
}
#[derive(Debug, Deserialize)]
struct SeriesQuery {
/// Scope results to a specific collection (provider library ID).
collection: Option<String>,
provider: Option<String>,
}
#[derive(Debug, Deserialize)]
struct GenresQuery {
/// Limit genres to a content type: "movie", "episode", or "short".
#[serde(rename = "type")]
content_type: Option<String>,
provider: Option<String>,
}
#[derive(Debug, Default, Deserialize)]
struct ItemsQuery {
/// Free-text search.
q: Option<String>,
/// Content type filter: "movie", "episode", or "short".
#[serde(rename = "type")]
content_type: Option<String>,
/// Filter episodes by series name. Repeat the param for multiple series:
/// `?series[]=iCarly&series[]=Victorious`
#[serde(default)]
series: Vec<String>,
/// Scope to a provider collection ID.
#[serde(default)]
genres: Vec<String>,
collection: Option<String>,
/// Maximum number of results (default: 50, max: 200).
limit: Option<usize>,
/// Fill strategy to simulate: "random" | "sequential" | "best_fit".
/// Applies the same ordering the schedule engine would use so the preview
/// reflects what will actually be scheduled.
strategy: Option<String>,
limit: Option<u32>,
offset: Option<u32>,
provider: Option<String>,
season: Option<u32>,
}
#[derive(Debug, Default, Deserialize)]
struct ShowsQuery {
q: Option<String>,
provider: Option<String>,
#[serde(default)]
genres: Vec<String>,
}
#[derive(Debug, Deserialize)]
struct SeasonsQuery {
provider: Option<String>,
}
// ============================================================================
// Handlers
// ============================================================================
/// List top-level collections (Jellyfin virtual libraries, Plex sections, etc.)
async fn list_collections(
State(state): State<AppState>,
CurrentUser(_user): CurrentUser,
Query(params): Query<CollectionsQuery>,
) -> Result<Json<Vec<CollectionResponse>>, ApiError> {
let collections = state.media_provider.list_collections().await?;
Ok(Json(collections.into_iter().map(Into::into).collect()))
let cols = state
.library_repo
.list_collections(params.provider.as_deref())
.await?;
let resp = cols
.into_iter()
.map(|c| CollectionResponse {
id: c.id,
name: c.name,
collection_type: c.collection_type,
})
.collect();
Ok(Json(resp))
}
/// List TV series, optionally scoped to a collection.
async fn list_series(
State(state): State<AppState>,
CurrentUser(_user): CurrentUser,
Query(params): Query<SeriesQuery>,
) -> Result<Json<Vec<SeriesResponse>>, ApiError> {
) -> Result<Json<Vec<String>>, ApiError> {
let series = state
.media_provider
.list_series(params.collection.as_deref())
.library_repo
.list_series(params.provider.as_deref())
.await?;
Ok(Json(series.into_iter().map(Into::into).collect()))
Ok(Json(series))
}
/// List available genres, optionally filtered to a content type.
async fn list_genres(
State(state): State<AppState>,
CurrentUser(_user): CurrentUser,
Query(params): Query<GenresQuery>,
) -> Result<Json<Vec<String>>, ApiError> {
let ct = parse_content_type(params.content_type.as_deref())?;
let genres = state.media_provider.list_genres(ct.as_ref()).await?;
let genres = state
.library_repo
.list_genres(ct.as_ref(), params.provider.as_deref())
.await?;
Ok(Json(genres))
}
/// Search / browse library items. Used by the block editor to preview what a
/// filter matches before saving a channel config.
async fn search_items(
State(state): State<AppState>,
CurrentUser(_user): CurrentUser,
RawQuery(raw_query): RawQuery,
) -> Result<Json<Vec<LibraryItemResponse>>, ApiError> {
let qs_config = serde_qs::Config::new(2, false); // non-strict: accept encoded brackets
) -> Result<Json<PagedResponse<LibraryItemResponse>>, ApiError> {
let qs_config = serde_qs::Config::new(2, false);
let params: ItemsQuery = raw_query
.as_deref()
.map(|q| qs_config.deserialize_str::<ItemsQuery>(q))
.transpose()
.map_err(|e| ApiError::validation(e.to_string()))?
.unwrap_or_default();
let limit = params.limit.unwrap_or(50).min(200);
let filter = MediaFilter {
let limit = params.limit.unwrap_or(50).min(200);
let offset = params.offset.unwrap_or(0);
let filter = LibrarySearchFilter {
provider_id: params.provider,
content_type: parse_content_type(params.content_type.as_deref())?,
search_term: params.q,
series_names: params.series,
collections: params
.collection
.map(|c| vec![c])
.unwrap_or_default(),
collection_id: params.collection,
genres: params.genres,
search_term: params.q,
season_number: params.season,
offset,
limit,
..Default::default()
};
let mut items = state.media_provider.fetch_items(&filter).await?;
let (items, total) = state.library_repo.search(&filter).await?;
let resp = items.into_iter().map(library_item_to_response).collect();
Ok(Json(PagedResponse { items: resp, total }))
}
// Apply the same ordering the schedule engine uses so the preview reflects
// what will actually be scheduled rather than raw provider order.
match params.strategy.as_deref() {
Some("random") => {
use rand::seq::SliceRandom;
items.shuffle(&mut rand::thread_rng());
}
Some("best_fit") => {
// Mirror the greedy bin-packing: longest items first.
items.sort_by(|a, b| b.duration_secs.cmp(&a.duration_secs));
}
_ => {} // "sequential" / unset: keep provider order (episode order per series)
}
async fn get_item(
State(state): State<AppState>,
CurrentUser(_user): CurrentUser,
Path(id): Path<String>,
) -> Result<Json<LibraryItemResponse>, ApiError> {
let item = state
.library_repo
.get_by_id(&id)
.await?
.ok_or_else(|| ApiError::NotFound(format!("Library item '{}' not found", id)))?;
Ok(Json(library_item_to_response(item)))
}
let response: Vec<LibraryItemResponse> = items
async fn sync_status(
State(state): State<AppState>,
CurrentUser(_user): CurrentUser,
) -> Result<Json<Vec<SyncLogResponse>>, ApiError> {
let entries = state.library_repo.latest_sync_status().await?;
let resp = entries
.into_iter()
.take(limit)
.map(|item| LibraryItemResponse {
id: item.id.into_inner(),
title: item.title,
content_type: match item.content_type {
domain::ContentType::Movie => "movie".into(),
domain::ContentType::Episode => "episode".into(),
domain::ContentType::Short => "short".into(),
},
duration_secs: item.duration_secs,
series_name: item.series_name,
season_number: item.season_number,
episode_number: item.episode_number,
year: item.year,
genres: item.genres,
.map(|e| SyncLogResponse {
id: e.id,
provider_id: e.provider_id,
started_at: e.started_at,
finished_at: e.finished_at,
items_found: e.items_found,
status: e.status,
error_msg: e.error_msg,
})
.collect();
Ok(Json(resp))
}
Ok(Json(response))
async fn trigger_sync(
State(state): State<AppState>,
CurrentUser(_user): CurrentUser,
) -> Result<impl IntoResponse, ApiError> {
use domain::IProviderRegistry as _;
let provider_ids: Vec<String> = {
let reg = state.provider_registry.read().await;
reg.provider_ids()
};
// 409 if any provider is already syncing
for pid in &provider_ids {
let running = state.library_repo.is_sync_running(pid).await?;
if running {
return Ok((
StatusCode::CONFLICT,
Json(serde_json::json!({
"error": format!("Sync already running for provider '{}'", pid)
})),
)
.into_response());
}
}
// Spawn background sync
let sync_adapter: Arc<dyn LibrarySyncAdapter> = Arc::clone(&state.library_sync_adapter);
let registry = Arc::clone(&state.provider_registry);
tokio::spawn(async move {
let providers: Vec<(String, Arc<dyn domain::IMediaProvider>)> = {
let reg = registry.read().await;
provider_ids
.iter()
.filter_map(|id| reg.get_provider(id).map(|p| (id.clone(), p)))
.collect()
};
for (pid, provider) in providers {
let result = sync_adapter.sync_provider(provider.as_ref(), &pid).await;
if let Some(ref err) = result.error {
tracing::warn!("manual sync: provider '{}' failed: {}", pid, err);
} else {
tracing::info!(
"manual sync: provider '{}' done — {} items in {}ms",
pid,
result.items_found,
result.duration_ms
);
}
}
});
Ok((
StatusCode::ACCEPTED,
Json(serde_json::json!({ "message": "Sync started" })),
)
.into_response())
}
async fn list_shows(
State(state): State<AppState>,
CurrentUser(_user): CurrentUser,
Query(params): Query<ShowsQuery>,
) -> Result<Json<Vec<ShowSummaryResponse>>, ApiError> {
let shows = state
.library_repo
.list_shows(
params.provider.as_deref(),
params.q.as_deref(),
&params.genres,
)
.await?;
let resp = shows
.into_iter()
.map(|s| ShowSummaryResponse {
series_name: s.series_name,
episode_count: s.episode_count,
season_count: s.season_count,
thumbnail_url: s.thumbnail_url,
genres: s.genres,
})
.collect();
Ok(Json(resp))
}
async fn list_seasons(
State(state): State<AppState>,
CurrentUser(_user): CurrentUser,
Path(name): Path<String>,
Query(params): Query<SeasonsQuery>,
) -> Result<Json<Vec<SeasonSummaryResponse>>, ApiError> {
let seasons = state
.library_repo
.list_seasons(&name, params.provider.as_deref())
.await?;
let resp = seasons
.into_iter()
.map(|s| SeasonSummaryResponse {
season_number: s.season_number,
episode_count: s.episode_count,
thumbnail_url: s.thumbnail_url,
})
.collect();
Ok(Json(resp))
}
async fn get_settings(
State(state): State<AppState>,
AdminUser(_user): AdminUser,
) -> Result<Json<HashMap<String, serde_json::Value>>, ApiError> {
let pairs = state.app_settings_repo.get_all().await?;
let map: HashMap<String, serde_json::Value> = pairs
.into_iter()
.map(|(k, v)| {
// Try to parse as number first, then bool, then keep as string
let val = if let Ok(n) = v.parse::<i64>() {
serde_json::Value::Number(n.into())
} else if let Ok(b) = v.parse::<bool>() {
serde_json::Value::Bool(b)
} else {
serde_json::Value::String(v)
};
(k, val)
})
.collect();
Ok(Json(map))
}
async fn update_settings(
State(state): State<AppState>,
AdminUser(_user): AdminUser,
Json(body): Json<HashMap<String, serde_json::Value>>,
) -> Result<Json<HashMap<String, serde_json::Value>>, ApiError> {
for (key, val) in &body {
let val_str = match val {
serde_json::Value::String(s) => s.clone(),
serde_json::Value::Number(n) => n.to_string(),
serde_json::Value::Bool(b) => b.to_string(),
other => other.to_string(),
};
state.app_settings_repo.set(key, &val_str).await?;
}
// Return the updated state
let pairs = state.app_settings_repo.get_all().await?;
let map: HashMap<String, serde_json::Value> = pairs
.into_iter()
.map(|(k, v)| {
let val = if let Ok(n) = v.parse::<i64>() {
serde_json::Value::Number(n.into())
} else if let Ok(b) = v.parse::<bool>() {
serde_json::Value::Bool(b)
} else {
serde_json::Value::String(v)
};
(k, val)
})
.collect();
Ok(Json(map))
}
// ============================================================================
@@ -246,3 +461,22 @@ fn parse_content_type(s: Option<&str>) -> Result<Option<ContentType>, ApiError>
))),
}
}
fn library_item_to_response(item: domain::LibraryItem) -> LibraryItemResponse {
LibraryItemResponse {
id: item.id,
title: item.title,
content_type: match item.content_type {
ContentType::Movie => "movie".into(),
ContentType::Episode => "episode".into(),
ContentType::Short => "short".into(),
},
duration_secs: item.duration_secs,
series_name: item.series_name,
season_number: item.season_number,
episode_number: item.episode_number,
year: item.year,
genres: item.genres,
thumbnail_url: item.thumbnail_url,
}
}

View File

@@ -5,18 +5,25 @@
use crate::state::AppState;
use axum::Router;
pub mod admin;
pub mod admin_providers;
pub mod auth;
pub mod channels;
pub mod config;
pub mod files;
pub mod iptv;
pub mod library;
/// Construct the API v1 router
pub fn api_v1_router() -> Router<AppState> {
Router::new()
.nest("/admin", admin::router())
.nest("/admin/providers", admin_providers::router())
.nest("/auth", auth::router())
.nest("/channels", channels::router())
.nest("/config", config::router())
.nest("/files", files::router())
.nest("/iptv", iptv::router())
.nest("/library", library::router())
.nest("/admin", library::admin_router())
}

View File

@@ -7,21 +7,24 @@ use std::sync::Arc;
use std::time::Duration;
use chrono::Utc;
use domain::{ChannelRepository, ScheduleEngineService};
use domain::{ChannelRepository, DomainEvent, ScheduleEngineService};
use tokio::sync::broadcast;
pub async fn run_auto_scheduler(
schedule_engine: Arc<ScheduleEngineService>,
channel_repo: Arc<dyn ChannelRepository>,
event_tx: broadcast::Sender<DomainEvent>,
) {
loop {
tokio::time::sleep(Duration::from_secs(3600)).await;
tick(&schedule_engine, &channel_repo).await;
tick(&schedule_engine, &channel_repo, &event_tx).await;
}
}
async fn tick(
schedule_engine: &Arc<ScheduleEngineService>,
channel_repo: &Arc<dyn ChannelRepository>,
event_tx: &broadcast::Sender<DomainEvent>,
) {
let channels = match channel_repo.find_auto_schedule_enabled().await {
Ok(c) => c,
@@ -59,18 +62,285 @@ async fn tick(
}
};
if let Err(e) = schedule_engine.generate_schedule(channel.id, from).await {
tracing::warn!(
"auto-scheduler: failed to generate schedule for channel {}: {}",
channel.id,
e
);
} else {
match schedule_engine.generate_schedule(channel.id, from).await {
Ok(schedule) => {
tracing::info!(
"auto-scheduler: generated schedule for channel {} starting at {}",
channel.id,
from
);
let _ = event_tx.send(DomainEvent::ScheduleGenerated {
channel_id: channel.id,
schedule,
});
}
Err(e) => {
tracing::warn!(
"auto-scheduler: failed to generate schedule for channel {}: {}",
channel.id,
e
);
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::sync::{Arc, Mutex};
use async_trait::async_trait;
use chrono::{DateTime, Duration, Utc};
use domain::value_objects::{ChannelId, ContentType, UserId};
use std::collections::HashMap;
use domain::{
BlockId, Channel, ChannelRepository, Collection, DomainResult, GeneratedSchedule,
IProviderRegistry, MediaFilter, MediaItem, MediaItemId, PlaybackRecord, ProviderCapabilities,
ScheduleEngineService, ScheduleRepository, SeriesSummary, StreamQuality,
};
use uuid::Uuid;
// ── Mocks ─────────────────────────────────────────────────────────────────
struct MockChannelRepo {
channels: Vec<Channel>,
}
#[async_trait]
impl ChannelRepository for MockChannelRepo {
async fn find_by_id(&self, id: ChannelId) -> DomainResult<Option<Channel>> {
Ok(self.channels.iter().find(|c| c.id == id).cloned())
}
async fn find_by_owner(&self, _owner_id: UserId) -> DomainResult<Vec<Channel>> {
unimplemented!()
}
async fn find_all(&self) -> DomainResult<Vec<Channel>> {
unimplemented!()
}
async fn find_auto_schedule_enabled(&self) -> DomainResult<Vec<Channel>> {
Ok(self.channels.clone())
}
async fn save(&self, _channel: &Channel) -> DomainResult<()> {
unimplemented!()
}
async fn delete(&self, _id: ChannelId) -> DomainResult<()> {
unimplemented!()
}
async fn save_config_snapshot(&self, _channel_id: ChannelId, _config: &domain::ScheduleConfig, _label: Option<String>) -> DomainResult<domain::ChannelConfigSnapshot> { unimplemented!() }
async fn list_config_snapshots(&self, _channel_id: ChannelId) -> DomainResult<Vec<domain::ChannelConfigSnapshot>> { unimplemented!() }
async fn get_config_snapshot(&self, _channel_id: ChannelId, _snapshot_id: Uuid) -> DomainResult<Option<domain::ChannelConfigSnapshot>> { unimplemented!() }
async fn patch_config_snapshot_label(&self, _channel_id: ChannelId, _snapshot_id: Uuid, _label: Option<String>) -> DomainResult<Option<domain::ChannelConfigSnapshot>> { unimplemented!() }
}
struct MockScheduleRepo {
latest: Option<GeneratedSchedule>,
saved: Arc<Mutex<Vec<GeneratedSchedule>>>,
}
#[async_trait]
impl ScheduleRepository for MockScheduleRepo {
async fn find_active(
&self,
_channel_id: ChannelId,
_at: DateTime<Utc>,
) -> DomainResult<Option<GeneratedSchedule>> {
Ok(None)
}
async fn find_latest(
&self,
_channel_id: ChannelId,
) -> DomainResult<Option<GeneratedSchedule>> {
Ok(self.latest.clone())
}
async fn save(&self, schedule: &GeneratedSchedule) -> DomainResult<()> {
self.saved.lock().unwrap().push(schedule.clone());
Ok(())
}
async fn find_playback_history(
&self,
_channel_id: ChannelId,
) -> DomainResult<Vec<PlaybackRecord>> {
Ok(vec![])
}
async fn save_playback_record(&self, _record: &PlaybackRecord) -> DomainResult<()> {
Ok(())
}
async fn find_last_slot_per_block(
&self,
_channel_id: ChannelId,
) -> DomainResult<HashMap<BlockId, MediaItemId>> {
Ok(HashMap::new())
}
async fn list_schedule_history(&self, _channel_id: ChannelId) -> DomainResult<Vec<GeneratedSchedule>> { unimplemented!() }
async fn get_schedule_by_id(&self, _channel_id: ChannelId, _schedule_id: Uuid) -> DomainResult<Option<GeneratedSchedule>> { unimplemented!() }
async fn delete_schedules_after(&self, _channel_id: ChannelId, _target_generation: u32) -> DomainResult<()> { unimplemented!() }
}
struct MockRegistry;
#[async_trait]
impl IProviderRegistry for MockRegistry {
async fn fetch_items(
&self,
_provider_id: &str,
_filter: &MediaFilter,
) -> DomainResult<Vec<MediaItem>> {
Ok(vec![])
}
async fn fetch_by_id(&self, _item_id: &MediaItemId) -> DomainResult<Option<MediaItem>> {
Ok(None)
}
async fn get_stream_url(
&self,
_item_id: &MediaItemId,
_quality: &StreamQuality,
) -> DomainResult<String> {
unimplemented!()
}
fn provider_ids(&self) -> Vec<String> {
vec![]
}
fn primary_id(&self) -> &str {
""
}
fn capabilities(&self, _provider_id: &str) -> Option<ProviderCapabilities> {
None
}
async fn list_collections(&self, _provider_id: &str) -> DomainResult<Vec<Collection>> {
unimplemented!()
}
async fn list_series(
&self,
_provider_id: &str,
_collection_id: Option<&str>,
) -> DomainResult<Vec<SeriesSummary>> {
unimplemented!()
}
async fn list_genres(
&self,
_provider_id: &str,
_content_type: Option<&ContentType>,
) -> DomainResult<Vec<String>> {
unimplemented!()
}
}
// ── Helpers ───────────────────────────────────────────────────────────────
fn make_channel() -> Channel {
let mut ch = Channel::new(Uuid::new_v4(), "Test", "UTC");
ch.auto_schedule = true;
ch
}
fn make_schedule(channel_id: ChannelId, valid_until: DateTime<Utc>) -> GeneratedSchedule {
GeneratedSchedule {
id: Uuid::new_v4(),
channel_id,
valid_from: valid_until - Duration::hours(48),
valid_until,
generation: 1,
slots: vec![],
}
}
fn make_engine(
channel_repo: Arc<dyn ChannelRepository>,
schedule_repo: Arc<dyn ScheduleRepository>,
) -> Arc<ScheduleEngineService> {
Arc::new(ScheduleEngineService::new(
Arc::new(MockRegistry),
channel_repo,
schedule_repo,
))
}
// ── Tests ─────────────────────────────────────────────────────────────────
#[tokio::test]
async fn test_no_schedule_generates_from_now() {
let ch = make_channel();
let saved = Arc::new(Mutex::new(vec![]));
let channel_repo: Arc<dyn ChannelRepository> =
Arc::new(MockChannelRepo { channels: vec![ch] });
let schedule_repo: Arc<dyn ScheduleRepository> = Arc::new(MockScheduleRepo {
latest: None,
saved: saved.clone(),
});
let engine = make_engine(channel_repo.clone(), schedule_repo);
let (event_tx, _) = tokio::sync::broadcast::channel(8);
tick(&engine, &channel_repo, &event_tx).await;
let saved = saved.lock().unwrap();
assert_eq!(saved.len(), 1);
let diff = (saved[0].valid_from - Utc::now()).num_seconds().abs();
assert!(diff < 5, "valid_from should be ~now, diff={diff}");
}
#[tokio::test]
async fn test_fresh_schedule_skips() {
let ch = make_channel();
let valid_until = Utc::now() + Duration::hours(25);
let schedule = make_schedule(ch.id, valid_until);
let saved = Arc::new(Mutex::new(vec![]));
let channel_repo: Arc<dyn ChannelRepository> =
Arc::new(MockChannelRepo { channels: vec![ch] });
let schedule_repo: Arc<dyn ScheduleRepository> = Arc::new(MockScheduleRepo {
latest: Some(schedule),
saved: saved.clone(),
});
let engine = make_engine(channel_repo.clone(), schedule_repo);
let (event_tx, _) = tokio::sync::broadcast::channel(8);
tick(&engine, &channel_repo, &event_tx).await;
assert_eq!(saved.lock().unwrap().len(), 0);
}
#[tokio::test]
async fn test_expiring_schedule_seamless_handoff() {
let ch = make_channel();
let valid_until = Utc::now() + Duration::hours(20);
let schedule = make_schedule(ch.id, valid_until);
let saved = Arc::new(Mutex::new(vec![]));
let channel_repo: Arc<dyn ChannelRepository> =
Arc::new(MockChannelRepo { channels: vec![ch] });
let schedule_repo: Arc<dyn ScheduleRepository> = Arc::new(MockScheduleRepo {
latest: Some(schedule),
saved: saved.clone(),
});
let engine = make_engine(channel_repo.clone(), schedule_repo);
let (event_tx, _) = tokio::sync::broadcast::channel(8);
tick(&engine, &channel_repo, &event_tx).await;
let saved = saved.lock().unwrap();
assert_eq!(saved.len(), 1);
assert_eq!(saved[0].valid_from, valid_until);
}
#[tokio::test]
async fn test_expired_schedule_generates_from_now() {
let ch = make_channel();
let valid_until = Utc::now() - Duration::hours(1);
let schedule = make_schedule(ch.id, valid_until);
let saved = Arc::new(Mutex::new(vec![]));
let channel_repo: Arc<dyn ChannelRepository> =
Arc::new(MockChannelRepo { channels: vec![ch] });
let schedule_repo: Arc<dyn ScheduleRepository> = Arc::new(MockScheduleRepo {
latest: Some(schedule),
saved: saved.clone(),
});
let engine = make_engine(channel_repo.clone(), schedule_repo);
let (event_tx, _) = tokio::sync::broadcast::channel(8);
tick(&engine, &channel_repo, &event_tx).await;
let saved = saved.lock().unwrap();
assert_eq!(saved.len(), 1);
let diff = (saved[0].valid_from - Utc::now()).num_seconds().abs();
assert!(diff < 5, "valid_from should be ~now, diff={diff}");
}
}

View File

@@ -0,0 +1,59 @@
use std::net::SocketAddr;
use axum::Router;
use axum::http::{HeaderName, HeaderValue};
use k_core::http::server::{ServerConfig, apply_standard_middleware};
use tokio::net::TcpListener;
use tower_http::cors::{AllowHeaders, AllowMethods, AllowOrigin, CorsLayer};
use crate::config::Config;
use crate::routes;
use crate::state::AppState;
pub async fn build_and_serve(state: AppState, config: &Config) -> anyhow::Result<()> {
let server_config = ServerConfig {
cors_origins: config.cors_allowed_origins.clone(),
};
let app = Router::new()
.nest("/api/v1", routes::api_v1_router())
.with_state(state);
let app = apply_standard_middleware(app, &server_config);
// Wrap with an outer CorsLayer that includes the custom password headers.
// Being outermost it handles OPTIONS preflights before k_core's inner layer.
let origins: Vec<HeaderValue> = config
.cors_allowed_origins
.iter()
.filter_map(|o| o.parse().ok())
.collect();
let cors = CorsLayer::new()
.allow_origin(AllowOrigin::list(origins))
.allow_methods(AllowMethods::any())
.allow_headers(AllowHeaders::list([
axum::http::header::AUTHORIZATION,
axum::http::header::CONTENT_TYPE,
HeaderName::from_static("x-channel-password"),
HeaderName::from_static("x-block-password"),
]));
let app = app.layer(cors);
let addr: SocketAddr = format!("{}:{}", config.host, config.port).parse()?;
let listener = TcpListener::bind(addr).await?;
tracing::info!("🚀 API server running at http://{}", addr);
tracing::info!("🔒 Authentication mode: JWT (Bearer token)");
#[cfg(feature = "auth-jwt")]
tracing::info!(" ✓ JWT auth enabled");
#[cfg(feature = "auth-oidc")]
tracing::info!(" ✓ OIDC integration enabled (stateless cookie state)");
tracing::info!("📝 API endpoints available at /api/v1/...");
axum::serve(listener, app).await?;
Ok(())
}

View File

@@ -0,0 +1,24 @@
use std::sync::Arc;
use domain::{ChannelRepository, DomainEvent, ScheduleEngineService};
use tokio::sync::broadcast;
use crate::{poller, scheduler};
pub fn spawn_background_tasks(
schedule_engine: Arc<ScheduleEngineService>,
channel_repo: Arc<dyn ChannelRepository>,
event_tx: broadcast::Sender<DomainEvent>,
) {
let bg_channel_repo = channel_repo.clone();
tokio::spawn(scheduler::run_auto_scheduler(
Arc::clone(&schedule_engine),
bg_channel_repo,
event_tx.clone(),
));
tokio::spawn(poller::run_broadcast_poller(
schedule_engine,
channel_repo,
event_tx,
));
}

View File

@@ -8,32 +8,75 @@ use axum_extra::extract::cookie::Key;
use infra::auth::jwt::{JwtConfig, JwtValidator};
#[cfg(feature = "auth-oidc")]
use infra::auth::oidc::OidcService;
use std::sync::Arc;
use std::collections::VecDeque;
#[cfg(feature = "local-files")]
use std::collections::HashMap;
use std::sync::{Arc, Mutex};
use tokio::sync::broadcast;
use crate::config::Config;
use domain::{ChannelService, IMediaProvider, ScheduleEngineService, UserService};
use crate::events::EventBus;
use crate::log_layer::LogLine;
use domain::{ActivityLogRepository, ChannelService, IAppSettingsRepository, ILibraryRepository, LibrarySyncAdapter, ProviderConfigRepository, ScheduleEngineService, UserService};
#[cfg(feature = "local-files")]
use domain::TranscodeSettingsRepository;
use k_core::db::DatabasePool;
#[derive(Clone)]
pub struct AppState {
pub user_service: Arc<UserService>,
pub channel_service: Arc<ChannelService>,
pub schedule_engine: Arc<ScheduleEngineService>,
pub media_provider: Arc<dyn IMediaProvider>,
pub provider_registry: Arc<tokio::sync::RwLock<Arc<infra::ProviderRegistry>>>,
pub provider_config_repo: Arc<dyn ProviderConfigRepository>,
pub cookie_key: Key,
#[cfg(feature = "auth-oidc")]
pub oidc_service: Option<Arc<OidcService>>,
#[cfg(feature = "auth-jwt")]
pub jwt_validator: Option<Arc<JwtValidator>>,
pub config: Arc<Config>,
pub event_tx: EventBus,
/// Broadcast channel for streaming log lines to SSE clients.
pub log_tx: broadcast::Sender<LogLine>,
/// Ring buffer of recent log lines sent to new SSE clients on connect.
pub log_history: Arc<Mutex<VecDeque<LogLine>>>,
/// Repository for persisted in-app activity events.
pub activity_log_repo: Arc<dyn ActivityLogRepository>,
/// Indexes for local-files provider instances, keyed by provider instance id.
#[cfg(feature = "local-files")]
pub local_index: Arc<tokio::sync::RwLock<HashMap<String, Arc<infra::LocalIndex>>>>,
/// TranscodeManager for FFmpeg HLS transcoding (requires TRANSCODE_DIR).
#[cfg(feature = "local-files")]
pub transcode_manager: Arc<tokio::sync::RwLock<Option<Arc<infra::TranscodeManager>>>>,
/// Repository for transcode settings persistence.
#[cfg(feature = "local-files")]
pub transcode_settings_repo: Option<Arc<dyn TranscodeSettingsRepository>>,
/// Database pool — used by infra factory functions for hot-reload.
pub db_pool: Arc<DatabasePool>,
pub library_repo: Arc<dyn ILibraryRepository>,
pub library_sync_adapter: Arc<dyn LibrarySyncAdapter>,
pub app_settings_repo: Arc<dyn IAppSettingsRepository>,
}
impl AppState {
#[allow(clippy::too_many_arguments)]
pub async fn new(
user_service: UserService,
channel_service: ChannelService,
schedule_engine: ScheduleEngineService,
media_provider: Arc<dyn IMediaProvider>,
provider_registry: Arc<tokio::sync::RwLock<Arc<infra::ProviderRegistry>>>,
provider_config_repo: Arc<dyn ProviderConfigRepository>,
config: Config,
event_tx: EventBus,
log_tx: broadcast::Sender<LogLine>,
log_history: Arc<Mutex<VecDeque<LogLine>>>,
activity_log_repo: Arc<dyn ActivityLogRepository>,
db_pool: Arc<DatabasePool>,
library_repo: Arc<dyn ILibraryRepository>,
library_sync_adapter: Arc<dyn LibrarySyncAdapter>,
app_settings_repo: Arc<dyn IAppSettingsRepository>,
#[cfg(feature = "local-files")]
transcode_settings_repo: Option<Arc<dyn TranscodeSettingsRepository>>,
) -> anyhow::Result<Self> {
let cookie_key = Key::derive_from(config.cookie_secret.as_bytes());
@@ -89,6 +132,7 @@ impl AppState {
config.jwt_issuer.clone(),
config.jwt_audience.clone(),
Some(config.jwt_expiry_hours),
Some(config.jwt_refresh_expiry_days),
config.is_production,
)?;
Some(Arc::new(JwtValidator::new(jwt_config)))
@@ -98,13 +142,28 @@ impl AppState {
user_service: Arc::new(user_service),
channel_service: Arc::new(channel_service),
schedule_engine: Arc::new(schedule_engine),
media_provider,
provider_registry,
provider_config_repo,
cookie_key,
#[cfg(feature = "auth-oidc")]
oidc_service,
#[cfg(feature = "auth-jwt")]
jwt_validator,
config: Arc::new(config),
event_tx,
log_tx,
log_history,
activity_log_repo,
#[cfg(feature = "local-files")]
local_index: Arc::new(tokio::sync::RwLock::new(HashMap::new())),
#[cfg(feature = "local-files")]
transcode_manager: Arc::new(tokio::sync::RwLock::new(None)),
#[cfg(feature = "local-files")]
transcode_settings_repo,
db_pool,
library_repo,
library_sync_adapter,
app_settings_repo,
})
}
}

View File

@@ -0,0 +1,25 @@
use std::collections::VecDeque;
use std::sync::{Arc, Mutex};
use tokio::sync::broadcast;
use tracing_subscriber::{EnvFilter, fmt, layer::SubscriberExt, util::SubscriberInitExt};
use crate::log_layer::{AppLogLayer, LogLine};
pub struct LoggingHandles {
pub log_tx: broadcast::Sender<LogLine>,
pub log_history: Arc<Mutex<VecDeque<LogLine>>>,
}
pub fn init_tracing() -> LoggingHandles {
let (log_tx, _) = broadcast::channel::<LogLine>(512);
let log_history = Arc::new(Mutex::new(VecDeque::<LogLine>::new()));
tracing_subscriber::registry()
.with(EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("info")))
.with(fmt::layer())
.with(AppLogLayer::new(log_tx.clone(), Arc::clone(&log_history)))
.init();
LoggingHandles { log_tx, log_history }
}

View File

@@ -0,0 +1,212 @@
//! WebhookConsumer background task.
//!
//! Subscribes to the domain-event broadcast channel, looks up each channel's
//! webhook_url, and fires HTTP POST requests (fire-and-forget).
use chrono::Utc;
use handlebars::Handlebars;
use serde_json::{Value, json};
use std::sync::Arc;
use tokio::sync::broadcast;
use tracing::{info, warn};
use uuid::Uuid;
use domain::{ChannelRepository, DomainEvent};
/// Consumes domain events and delivers them to per-channel webhook URLs.
///
/// Uses fire-and-forget HTTP POST — failures are logged as warnings, never retried.
pub async fn run_webhook_consumer(
mut rx: broadcast::Receiver<DomainEvent>,
channel_repo: Arc<dyn ChannelRepository>,
client: reqwest::Client,
) {
loop {
match rx.recv().await {
Ok(event) => {
let channel_id = event_channel_id(&event);
let payload = build_payload(&event);
match channel_repo.find_by_id(channel_id).await {
Ok(Some(channel)) => {
if let Some(url) = channel.webhook_url {
let client = client.clone();
let template = channel.webhook_body_template.clone();
let headers = channel.webhook_headers.clone();
tokio::spawn(async move {
post_webhook(&client, &url, payload, template.as_deref(), headers.as_deref()).await;
});
}
// No webhook_url configured — skip silently
}
Ok(None) => {
// Channel deleted — nothing to do
}
Err(e) => {
warn!("webhook consumer: failed to look up channel {}: {}", channel_id, e);
}
}
}
Err(broadcast::error::RecvError::Lagged(n)) => {
warn!("webhook consumer lagged, {} events dropped", n);
// Continue — don't break; catch up from current position
}
Err(broadcast::error::RecvError::Closed) => {
info!("webhook consumer: event bus closed, shutting down");
break;
}
}
}
}
/// Extract the channel_id from any event variant.
fn event_channel_id(event: &DomainEvent) -> Uuid {
match event {
DomainEvent::BroadcastTransition { channel_id, .. } => *channel_id,
DomainEvent::NoSignal { channel_id } => *channel_id,
DomainEvent::ScheduleGenerated { channel_id, .. } => *channel_id,
DomainEvent::ChannelCreated { channel } => channel.id,
DomainEvent::ChannelUpdated { channel } => channel.id,
DomainEvent::ChannelDeleted { channel_id } => *channel_id,
}
}
/// Build the JSON payload for an event.
fn build_payload(event: &DomainEvent) -> Value {
let now = Utc::now().to_rfc3339();
match event {
DomainEvent::BroadcastTransition { channel_id, slot } => {
let offset_secs = (Utc::now() - slot.start_at).num_seconds().max(0) as u64;
json!({
"event": "broadcast_transition",
"timestamp": now,
"channel_id": channel_id,
"data": {
"slot_id": slot.id,
"item": {
"id": slot.item.id.as_ref(),
"title": slot.item.title,
"duration_secs": slot.item.duration_secs,
},
"start_at": slot.start_at.to_rfc3339(),
"end_at": slot.end_at.to_rfc3339(),
"offset_secs": offset_secs,
}
})
}
DomainEvent::NoSignal { channel_id } => {
json!({
"event": "no_signal",
"timestamp": now,
"channel_id": channel_id,
"data": {}
})
}
DomainEvent::ScheduleGenerated { channel_id, schedule } => {
json!({
"event": "schedule_generated",
"timestamp": now,
"channel_id": channel_id,
"data": {
"generation": schedule.generation,
"valid_from": schedule.valid_from.to_rfc3339(),
"valid_until": schedule.valid_until.to_rfc3339(),
"slot_count": schedule.slots.len(),
}
})
}
DomainEvent::ChannelCreated { channel } => {
json!({
"event": "channel_created",
"timestamp": now,
"channel_id": channel.id,
"data": {
"name": channel.name,
"description": channel.description,
}
})
}
DomainEvent::ChannelUpdated { channel } => {
json!({
"event": "channel_updated",
"timestamp": now,
"channel_id": channel.id,
"data": {
"name": channel.name,
"description": channel.description,
}
})
}
DomainEvent::ChannelDeleted { channel_id } => {
json!({
"event": "channel_deleted",
"timestamp": now,
"channel_id": channel_id,
"data": {}
})
}
}
}
/// Fire-and-forget HTTP POST to a webhook URL.
///
/// If `template` is provided it is rendered with `payload` as context via Handlebars.
/// `headers_json` is a JSON object string of extra HTTP headers (e.g. `{"Authorization":"Bearer x"}`).
/// Content-Type defaults to `application/json` unless overridden in `headers_json`.
async fn post_webhook(
client: &reqwest::Client,
url: &str,
payload: Value,
template: Option<&str>,
headers_json: Option<&str>,
) {
let body = if let Some(tmpl) = template {
let hbs = Handlebars::new();
match hbs.render_template(tmpl, &payload) {
Ok(rendered) => rendered,
Err(e) => {
warn!("webhook template render failed for {}: {}", url, e);
return;
}
}
} else {
match serde_json::to_string(&payload) {
Ok(s) => s,
Err(e) => {
warn!("webhook payload serialize failed: {}", e);
return;
}
}
};
let mut req = client.post(url).body(body);
let mut has_content_type = false;
if let Some(h) = headers_json
&& let Ok(map) = serde_json::from_str::<serde_json::Map<String, Value>>(h)
{
for (k, v) in &map {
if k.to_lowercase() == "content-type" {
has_content_type = true;
}
if let Some(v_str) = v.as_str() {
req = req.header(k.as_str(), v_str);
}
}
}
if !has_content_type {
req = req.header("Content-Type", "application/json");
}
match req.send().await {
Ok(resp) => {
if !resp.status().is_success() {
warn!("webhook POST to {} returned status {}", url, resp.status());
}
}
Err(e) => {
warn!("webhook POST to {} failed: {}", url, e);
}
}
}

View File

@@ -16,3 +16,4 @@ uuid = { version = "1.19.0", features = ["v4", "serde"] }
[dev-dependencies]
tokio = { version = "1", features = ["rt", "macros"] }
serde_json = "1"

View File

@@ -6,11 +6,12 @@
pub use crate::value_objects::{Email, UserId};
use chrono::{DateTime, NaiveTime, Timelike, Utc};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use uuid::Uuid;
use crate::value_objects::{
AccessMode, BlockId, ChannelId, ContentType, FillStrategy, LogoPosition, MediaFilter,
MediaItemId, RecyclePolicy, SlotId,
MediaItemId, RecyclePolicy, SlotId, Weekday,
};
/// A user in the system.
@@ -22,6 +23,7 @@ pub struct User {
pub subject: String,
pub email: Email,
pub password_hash: Option<String>,
pub is_admin: bool,
pub created_at: DateTime<Utc>,
}
@@ -32,6 +34,7 @@ impl User {
subject: subject.into(),
email,
password_hash: None,
is_admin: false,
created_at: Utc::now(),
}
}
@@ -41,6 +44,7 @@ impl User {
subject: impl Into<String>,
email: Email,
password_hash: Option<String>,
is_admin: bool,
created_at: DateTime<Utc>,
) -> Self {
Self {
@@ -48,6 +52,7 @@ impl User {
subject: subject.into(),
email,
password_hash,
is_admin,
created_at,
}
}
@@ -58,6 +63,7 @@ impl User {
subject: format!("local|{}", Uuid::new_v4()),
email,
password_hash: Some(password_hash.into()),
is_admin: false,
created_at: Utc::now(),
}
}
@@ -88,6 +94,10 @@ pub struct Channel {
pub logo: Option<String>,
pub logo_position: LogoPosition,
pub logo_opacity: f32,
pub webhook_url: Option<String>,
pub webhook_poll_interval_secs: u32,
pub webhook_body_template: Option<String>,
pub webhook_headers: Option<String>,
pub created_at: DateTime<Utc>,
pub updated_at: DateTime<Utc>,
}
@@ -113,46 +123,87 @@ impl Channel {
logo: None,
logo_position: LogoPosition::default(),
logo_opacity: 1.0,
webhook_url: None,
webhook_poll_interval_secs: 5,
webhook_body_template: None,
webhook_headers: None,
created_at: now,
updated_at: now,
}
}
}
/// The user-designed programming template.
/// The user-designed programming template (V2: day-keyed weekly grid).
///
/// This is the shareable/exportable part of a channel. It contains an ordered
/// list of `ProgrammingBlock`s but makes no assumptions about the media source.
/// A channel does not need to cover all 24 hours — gaps are valid and render
/// as a no-signal state on the client.
/// Each day of the week has its own independent list of `ProgrammingBlock`s.
/// A day with an empty vec (or absent key) produces no slots — valid, not an error.
/// A channel does not need to cover all 24 hours — gaps render as no-signal.
///
/// `deny_unknown_fields` is required so the `#[serde(untagged)]` compat enum
/// correctly rejects V1 `{"blocks":[...]}` payloads and falls through to `OldScheduleConfig`.
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
#[serde(deny_unknown_fields)]
pub struct ScheduleConfig {
pub day_blocks: HashMap<Weekday, Vec<ProgrammingBlock>>,
}
/// V1 on-disk shape — kept for transparent migration only.
/// Never construct directly; use `ScheduleConfigCompat` for deserialization.
/// `deny_unknown_fields` ensures V2 payloads don't accidentally match here.
#[derive(Debug, Clone, Deserialize)]
#[serde(deny_unknown_fields)]
pub struct OldScheduleConfig {
pub blocks: Vec<ProgrammingBlock>,
}
/// Deserializes either V2 (`day_blocks`) or V1 (`blocks`) from the DB.
/// V1 is automatically promoted: all blocks are copied to all 7 days.
#[derive(Debug, Clone, Deserialize)]
#[serde(untagged)]
pub enum ScheduleConfigCompat {
V2(ScheduleConfig),
V1(OldScheduleConfig),
}
impl From<ScheduleConfigCompat> for ScheduleConfig {
fn from(c: ScheduleConfigCompat) -> Self {
match c {
ScheduleConfigCompat::V2(cfg) => cfg,
ScheduleConfigCompat::V1(old) => {
let day_blocks = Weekday::all()
.into_iter()
.map(|d| (d, old.blocks.clone()))
.collect();
ScheduleConfig { day_blocks }
}
}
}
}
impl ScheduleConfig {
/// Return the block whose time window contains `time`, if any.
///
/// Handles blocks that span midnight (e.g. start 23:00, duration 180 min).
pub fn find_block_at(&self, time: NaiveTime) -> Option<&ProgrammingBlock> {
/// Blocks for a given day. Returns empty slice if the day has no blocks.
pub fn blocks_for(&self, day: Weekday) -> &[ProgrammingBlock] {
self.day_blocks.get(&day).map(|v| v.as_slice()).unwrap_or(&[])
}
/// The block whose window contains `time` on `day`, if any.
pub fn find_block_at(&self, day: Weekday, time: NaiveTime) -> Option<&ProgrammingBlock> {
let secs = time.num_seconds_from_midnight();
self.blocks.iter().find(|block| {
self.blocks_for(day).iter().find(|block| {
let start = block.start_time.num_seconds_from_midnight();
let end = start + block.duration_mins * 60;
if end <= 86_400 {
secs >= start && secs < end
} else {
// Block crosses midnight: active from `start` to `end % 86400` next day
secs >= start || secs < (end % 86_400)
}
})
}
/// Return the start time of the next block that begins strictly after `time`,
/// within the same calendar day.
pub fn next_block_start_after(&self, time: NaiveTime) -> Option<NaiveTime> {
/// The start time of the next block beginning strictly after `time` on `day`.
pub fn next_block_start_after(&self, day: Weekday, time: NaiveTime) -> Option<NaiveTime> {
let secs = time.num_seconds_from_midnight();
self.blocks
self.blocks_for(day)
.iter()
.map(|b| b.start_time.num_seconds_from_midnight())
.filter(|&s| s > secs)
@@ -160,9 +211,15 @@ impl ScheduleConfig {
.and_then(|s| NaiveTime::from_num_seconds_from_midnight_opt(s, 0))
}
/// The earliest block start time across all blocks (used for next-day rollover).
/// Earliest block start time across ALL days (used by background scheduler).
/// Returns `None` if every day is empty.
pub fn earliest_block_start(&self) -> Option<NaiveTime> {
self.blocks.iter().map(|b| b.start_time).min()
self.day_blocks.values().flatten().map(|b| b.start_time).min()
}
/// Iterator over all blocks across all days (for block-ID lookups that are day-agnostic).
pub fn all_blocks(&self) -> impl Iterator<Item = &ProgrammingBlock> {
self.day_blocks.values().flatten()
}
}
@@ -214,7 +271,7 @@ impl ProgrammingBlock {
name: name.into(),
start_time,
duration_mins,
content: BlockContent::Algorithmic { filter, strategy },
content: BlockContent::Algorithmic { filter, strategy, provider_id: String::new() },
loop_on_finish: true,
ignore_recycle_policy: false,
access_mode: AccessMode::default(),
@@ -233,7 +290,7 @@ impl ProgrammingBlock {
name: name.into(),
start_time,
duration_mins,
content: BlockContent::Manual { items },
content: BlockContent::Manual { items, provider_id: String::new() },
loop_on_finish: true,
ignore_recycle_policy: false,
access_mode: AccessMode::default(),
@@ -247,11 +304,21 @@ impl ProgrammingBlock {
#[serde(tag = "type", rename_all = "snake_case")]
pub enum BlockContent {
/// The user hand-picked specific items in a specific order.
Manual { items: Vec<MediaItemId> },
/// Item IDs are prefixed with the provider key (e.g. `"jellyfin::abc123"`)
/// so the registry can route each fetch to the correct provider.
Manual {
items: Vec<MediaItemId>,
/// Registry key of the provider these items come from. Empty string = primary.
#[serde(default)]
provider_id: String,
},
/// The engine selects items from the provider using the given filter and strategy.
Algorithmic {
filter: MediaFilter,
strategy: FillStrategy,
/// Registry key of the provider to query. Empty string = primary.
#[serde(default)]
provider_id: String,
},
}
@@ -279,9 +346,13 @@ pub struct MediaItem {
pub season_number: Option<u32>,
/// For episodes: episode number within the season (1-based).
pub episode_number: Option<u32>,
/// Provider-served thumbnail image URL, populated if available.
pub thumbnail_url: Option<String>,
/// Provider-specific collection this item belongs to.
pub collection_id: Option<String>,
}
/// A fully resolved 48-hour broadcast program for one channel.
/// A fully resolved 7-day broadcast program for one channel.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct GeneratedSchedule {
pub id: Uuid,
@@ -333,6 +404,18 @@ pub struct PlaybackRecord {
pub generation: u32,
}
/// A point-in-time snapshot of a channel's `ScheduleConfig`.
/// Auto-created on every config save; users can pin with a label.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ChannelConfigSnapshot {
pub id: Uuid,
pub channel_id: ChannelId,
pub config: ScheduleConfig,
pub version_num: i64,
pub label: Option<String>,
pub created_at: DateTime<Utc>,
}
impl PlaybackRecord {
pub fn new(channel_id: ChannelId, item_id: MediaItemId, generation: u32) -> Self {
Self {
@@ -344,3 +427,74 @@ impl PlaybackRecord {
}
}
}
#[cfg(test)]
mod schedule_config_tests {
use super::*;
use chrono::NaiveTime;
fn t(h: u32, m: u32) -> NaiveTime {
NaiveTime::from_hms_opt(h, m, 0).unwrap()
}
fn make_block(start: NaiveTime, duration_mins: u32) -> ProgrammingBlock {
ProgrammingBlock::new_algorithmic(
"test", start, duration_mins,
Default::default(), FillStrategy::Random,
)
}
fn cfg_with_monday_block(start: NaiveTime, dur: u32) -> ScheduleConfig {
let mut cfg = ScheduleConfig::default();
cfg.day_blocks.insert(Weekday::Monday, vec![make_block(start, dur)]);
cfg
}
#[test]
fn find_block_at_finds_active_block() {
let cfg = cfg_with_monday_block(t(8, 0), 60);
assert!(cfg.find_block_at(Weekday::Monday, t(8, 30)).is_some());
assert!(cfg.find_block_at(Weekday::Monday, t(9, 0)).is_none());
}
#[test]
fn find_block_at_wrong_day_returns_none() {
let cfg = cfg_with_monday_block(t(8, 0), 60);
assert!(cfg.find_block_at(Weekday::Tuesday, t(8, 30)).is_none());
}
#[test]
fn v1_compat_copies_blocks_to_all_days() {
let json = r#"{"blocks": []}"#;
let compat: ScheduleConfigCompat = serde_json::from_str(json).unwrap();
let cfg: ScheduleConfig = compat.into();
assert_eq!(cfg.day_blocks.len(), 7);
}
#[test]
fn v2_payload_with_unknown_blocks_key_fails() {
let json = r#"{"blocks": [], "day_blocks": {}}"#;
let result: Result<ScheduleConfigCompat, _> = serde_json::from_str(json);
match result {
Ok(ScheduleConfigCompat::V2(cfg)) => {
let _ = cfg;
}
Ok(ScheduleConfigCompat::V1(_)) => { /* acceptable */ }
Err(_) => { /* acceptable — ambiguous payload rejected */ }
}
}
#[test]
fn earliest_block_start_across_days() {
let mut cfg = ScheduleConfig::default();
cfg.day_blocks.insert(Weekday::Monday, vec![make_block(t(10, 0), 60)]);
cfg.day_blocks.insert(Weekday::Friday, vec![make_block(t(7, 0), 60)]);
assert_eq!(cfg.earliest_block_start(), Some(t(7, 0)));
}
#[test]
fn empty_config_earliest_block_start_is_none() {
let cfg = ScheduleConfig::default();
assert!(cfg.earliest_block_start().is_none());
}
}

View File

@@ -0,0 +1,114 @@
//! Domain events emitted when important state transitions occur.
//!
//! These are pure data — no I/O, no tokio deps. The transport
//! (tokio::sync::broadcast) lives in `api`; domain only owns the schema.
use uuid::Uuid;
use crate::entities::{Channel, GeneratedSchedule, ScheduledSlot};
/// Events emitted by the application when important state changes occur.
///
/// Must be `Clone + Send + 'static` for use as a `broadcast::channel` item.
#[derive(Clone)]
pub enum DomainEvent {
BroadcastTransition {
channel_id: Uuid,
slot: ScheduledSlot,
},
NoSignal {
channel_id: Uuid,
},
ScheduleGenerated {
channel_id: Uuid,
schedule: GeneratedSchedule,
},
ChannelCreated {
channel: Channel,
},
ChannelUpdated {
channel: Channel,
},
ChannelDeleted {
channel_id: Uuid,
},
}
#[cfg(test)]
mod tests {
use super::*;
use uuid::Uuid;
fn make_slot() -> crate::entities::ScheduledSlot {
use crate::entities::{MediaItem, ScheduledSlot};
use crate::value_objects::{ContentType, MediaItemId};
use chrono::Utc;
ScheduledSlot {
id: Uuid::new_v4(),
start_at: Utc::now(),
end_at: Utc::now() + chrono::Duration::minutes(30),
item: MediaItem {
id: MediaItemId::new("test-item".to_string()),
title: "Test Movie".to_string(),
content_type: ContentType::Movie,
duration_secs: 1800,
description: None,
genres: vec![],
year: None,
tags: vec![],
series_name: None,
season_number: None,
episode_number: None,
thumbnail_url: None,
collection_id: None,
},
source_block_id: Uuid::new_v4(),
}
}
#[test]
fn broadcast_transition_carries_slot() {
let channel_id = Uuid::new_v4();
let slot = make_slot();
let event = DomainEvent::BroadcastTransition { channel_id, slot: slot.clone() };
match event {
DomainEvent::BroadcastTransition { channel_id: cid, slot: s } => {
assert_eq!(cid, channel_id);
assert_eq!(s.item.title, "Test Movie");
}
_ => panic!("wrong variant"),
}
}
#[test]
fn no_signal_carries_channel_id() {
let channel_id = Uuid::new_v4();
let event = DomainEvent::NoSignal { channel_id };
match event {
DomainEvent::NoSignal { channel_id: cid } => assert_eq!(cid, channel_id),
_ => panic!("wrong variant"),
}
}
#[test]
fn schedule_generated_carries_metadata() {
use crate::entities::GeneratedSchedule;
use chrono::Utc;
let channel_id = Uuid::new_v4();
let schedule = GeneratedSchedule {
id: Uuid::new_v4(),
channel_id,
valid_from: Utc::now(),
valid_until: Utc::now() + chrono::Duration::hours(48),
generation: 3,
slots: vec![],
};
let event = DomainEvent::ScheduleGenerated { channel_id, schedule: schedule.clone() };
match event {
DomainEvent::ScheduleGenerated { schedule: s, .. } => {
assert_eq!(s.generation, 3);
assert_eq!(s.slots.len(), 0);
}
_ => panic!("wrong variant"),
}
}
}

View File

@@ -6,16 +6,24 @@
pub mod entities;
pub mod errors;
pub mod iptv;
pub mod library;
pub mod ports;
pub mod repositories;
pub mod services;
pub mod events;
pub mod value_objects;
// Re-export commonly used types
pub use entities::*;
pub use errors::{DomainError, DomainResult};
pub use ports::{Collection, IMediaProvider, SeriesSummary};
pub use events::DomainEvent;
pub use ports::{Collection, IMediaProvider, IProviderRegistry, ProviderCapabilities, SeriesSummary, StreamingProtocol, StreamQuality};
pub use repositories::*;
pub use iptv::{generate_m3u, generate_xmltv};
pub use library::{
ILibraryRepository, LibraryCollection, LibraryItem, LibrarySearchFilter,
LibrarySyncAdapter, LibrarySyncLogEntry, LibrarySyncResult,
SeasonSummary, ShowSummary,
};
pub use services::{ChannelService, ScheduleEngineService, UserService};
pub use value_objects::*;

View File

@@ -0,0 +1,187 @@
//! Library domain types and ports.
use async_trait::async_trait;
use crate::{ContentType, DomainResult, IMediaProvider};
/// A media item stored in the local library cache.
#[derive(Debug, Clone)]
pub struct LibraryItem {
pub id: String,
pub provider_id: String,
pub external_id: String,
pub title: String,
pub content_type: ContentType,
pub duration_secs: u32,
pub series_name: Option<String>,
pub season_number: Option<u32>,
pub episode_number: Option<u32>,
pub year: Option<u16>,
pub genres: Vec<String>,
pub tags: Vec<String>,
pub collection_id: Option<String>,
pub collection_name: Option<String>,
pub collection_type: Option<String>,
pub thumbnail_url: Option<String>,
pub synced_at: String,
}
/// A collection summary derived from synced library items.
#[derive(Debug, Clone)]
pub struct LibraryCollection {
pub id: String,
pub name: String,
pub collection_type: Option<String>,
}
/// Result of a single provider sync run.
#[derive(Debug, Clone)]
pub struct LibrarySyncResult {
pub provider_id: String,
pub items_found: u32,
pub duration_ms: u64,
pub error: Option<String>,
}
/// Log entry from library_sync_log table.
#[derive(Debug, Clone)]
pub struct LibrarySyncLogEntry {
pub id: i64,
pub provider_id: String,
pub started_at: String,
pub finished_at: Option<String>,
pub items_found: u32,
pub status: String,
pub error_msg: Option<String>,
}
/// Filter for searching the local library.
#[derive(Debug, Clone)]
pub struct LibrarySearchFilter {
pub provider_id: Option<String>,
pub content_type: Option<ContentType>,
pub series_names: Vec<String>,
pub collection_id: Option<String>,
pub genres: Vec<String>,
pub decade: Option<u16>,
pub min_duration_secs: Option<u32>,
pub max_duration_secs: Option<u32>,
pub search_term: Option<String>,
pub season_number: Option<u32>,
pub offset: u32,
pub limit: u32,
}
impl Default for LibrarySearchFilter {
fn default() -> Self {
Self {
provider_id: None,
content_type: None,
series_names: vec![],
collection_id: None,
genres: vec![],
decade: None,
min_duration_secs: None,
max_duration_secs: None,
search_term: None,
season_number: None,
offset: 0,
limit: 50,
}
}
}
/// Aggregated summary of a TV show derived from synced episodes.
#[derive(Debug, Clone)]
pub struct ShowSummary {
pub series_name: String,
pub episode_count: u32,
pub season_count: u32,
pub thumbnail_url: Option<String>,
pub genres: Vec<String>,
}
/// Aggregated summary of one season of a TV show.
#[derive(Debug, Clone)]
pub struct SeasonSummary {
pub season_number: u32,
pub episode_count: u32,
pub thumbnail_url: Option<String>,
}
/// Port: sync one provider's items into the library repo.
/// DB writes are handled entirely inside implementations — no pool in the trait.
#[async_trait]
pub trait LibrarySyncAdapter: Send + Sync {
async fn sync_provider(
&self,
provider: &dyn IMediaProvider,
provider_id: &str,
) -> LibrarySyncResult;
}
/// Port: read/write access to the persisted library.
#[async_trait]
pub trait ILibraryRepository: Send + Sync {
async fn search(&self, filter: &LibrarySearchFilter) -> DomainResult<(Vec<LibraryItem>, u32)>;
async fn get_by_id(&self, id: &str) -> DomainResult<Option<LibraryItem>>;
async fn list_collections(&self, provider_id: Option<&str>) -> DomainResult<Vec<LibraryCollection>>;
async fn list_series(&self, provider_id: Option<&str>) -> DomainResult<Vec<String>>;
async fn list_genres(&self, content_type: Option<&ContentType>, provider_id: Option<&str>) -> DomainResult<Vec<String>>;
async fn upsert_items(&self, provider_id: &str, items: Vec<LibraryItem>) -> DomainResult<()>;
async fn clear_provider(&self, provider_id: &str) -> DomainResult<()>;
async fn log_sync_start(&self, provider_id: &str) -> DomainResult<i64>;
async fn log_sync_finish(&self, log_id: i64, result: &LibrarySyncResult) -> DomainResult<()>;
async fn latest_sync_status(&self) -> DomainResult<Vec<LibrarySyncLogEntry>>;
async fn is_sync_running(&self, provider_id: &str) -> DomainResult<bool>;
async fn list_shows(
&self,
provider_id: Option<&str>,
search_term: Option<&str>,
genres: &[String],
) -> DomainResult<Vec<ShowSummary>>;
async fn list_seasons(
&self,
series_name: &str,
provider_id: Option<&str>,
) -> DomainResult<Vec<SeasonSummary>>;
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn library_item_id_uses_double_colon_separator() {
let item = LibraryItem {
id: "jellyfin::abc123".to_string(),
provider_id: "jellyfin".to_string(),
external_id: "abc123".to_string(),
title: "Test Movie".to_string(),
content_type: crate::ContentType::Movie,
duration_secs: 7200,
series_name: None,
season_number: None,
episode_number: None,
year: Some(2020),
genres: vec!["Action".to_string()],
tags: vec![],
collection_id: None,
collection_name: None,
collection_type: None,
thumbnail_url: None,
synced_at: "2026-03-19T00:00:00Z".to_string(),
};
assert!(item.id.contains("::"));
assert_eq!(item.provider_id, "jellyfin");
}
#[test]
fn library_search_filter_defaults_are_empty() {
let f = LibrarySearchFilter::default();
assert!(f.genres.is_empty());
assert!(f.series_names.is_empty());
assert_eq!(f.offset, 0);
assert_eq!(f.limit, 50);
}
}

View File

@@ -12,6 +12,52 @@ use crate::entities::MediaItem;
use crate::errors::{DomainError, DomainResult};
use crate::value_objects::{ContentType, MediaFilter, MediaItemId};
// ============================================================================
// Stream quality
// ============================================================================
/// Requested stream quality for `get_stream_url`.
#[derive(Debug, Clone)]
pub enum StreamQuality {
/// Try direct stream via PlaybackInfo; fall back to HLS at 8 Mbps.
Direct,
/// Force HLS transcode at this bitrate (bits per second).
Transcode(u32),
}
// ============================================================================
// Provider capabilities
// ============================================================================
/// How a provider delivers video to the client.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum StreamingProtocol {
/// HLS playlist (`.m3u8`). Requires hls.js on non-Safari browsers.
Hls,
/// Direct file URL with Range-header support. Native `<video>` element.
DirectFile,
}
/// Feature matrix for a media provider.
///
/// The API and frontend use this to gate calls and hide UI controls that
/// the active provider does not support.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ProviderCapabilities {
pub collections: bool,
pub series: bool,
pub genres: bool,
pub tags: bool,
pub decade: bool,
pub search: bool,
pub streaming_protocol: StreamingProtocol,
/// Whether `POST /files/rescan` is available.
pub rescan: bool,
/// Whether on-demand FFmpeg transcoding to HLS is available.
pub transcode: bool,
}
// ============================================================================
// Library browsing types
// ============================================================================
@@ -58,6 +104,12 @@ pub struct SeriesSummary {
/// `NoopMediaProvider`) inherit the default and return a clear error.
#[async_trait]
pub trait IMediaProvider: Send + Sync {
/// Declare what features this provider supports.
///
/// Called at request time (not cached) so the response always reflects the
/// active provider. Implementations return a plain struct — no I/O needed.
fn capabilities(&self) -> ProviderCapabilities;
/// Fetch metadata for all items matching `filter` from this provider.
///
/// The provider interprets each field of `MediaFilter` in terms of its own
@@ -76,7 +128,7 @@ pub trait IMediaProvider: Send + Sync {
///
/// URLs are intentionally *not* stored in the schedule because they may be
/// short-lived (signed URLs, session tokens) or depend on client context.
async fn get_stream_url(&self, item_id: &MediaItemId) -> DomainResult<String>;
async fn get_stream_url(&self, item_id: &MediaItemId, quality: &StreamQuality) -> DomainResult<String>;
/// List top-level collections (libraries/sections) available in this provider.
///
@@ -112,3 +164,47 @@ pub trait IMediaProvider: Send + Sync {
))
}
}
// ============================================================================
// Registry port
// ============================================================================
/// Port for routing media operations across multiple named providers.
///
/// The registry holds all configured providers (Jellyfin, local files, …) and
/// dispatches each call to the right one. Item IDs are prefixed with the
/// provider key (e.g. `"jellyfin::abc123"`, `"local::base64path"`) so every
/// fetch and stream call is self-routing. An empty prefix falls back to the
/// primary (first-registered) provider for backward compatibility.
#[async_trait]
pub trait IProviderRegistry: Send + Sync {
/// Fetch items from a named provider (used by Algorithmic blocks).
/// Empty `provider_id` uses the primary provider.
/// Returned item IDs are stamped with the provider prefix.
async fn fetch_items(&self, provider_id: &str, filter: &MediaFilter) -> DomainResult<Vec<MediaItem>>;
/// Fetch a single item by its (possibly prefixed) ID.
/// Routes to the correct provider by parsing the prefix.
async fn fetch_by_id(&self, item_id: &MediaItemId) -> DomainResult<Option<MediaItem>>;
/// Get a playback URL. Routes via prefix in `item_id`.
async fn get_stream_url(&self, item_id: &MediaItemId, quality: &StreamQuality) -> DomainResult<String>;
/// List all registered provider keys in registration order.
fn provider_ids(&self) -> Vec<String>;
/// Key of the primary (first-registered) provider.
fn primary_id(&self) -> &str;
/// Capability matrix for a specific provider. Returns `None` if the key is unknown.
fn capabilities(&self, provider_id: &str) -> Option<ProviderCapabilities>;
/// List collections for a provider. Empty `provider_id` = primary.
async fn list_collections(&self, provider_id: &str) -> DomainResult<Vec<Collection>>;
/// List series for a provider. Empty `provider_id` = primary.
async fn list_series(&self, provider_id: &str, collection_id: Option<&str>) -> DomainResult<Vec<SeriesSummary>>;
/// List genres for a provider. Empty `provider_id` = primary.
async fn list_genres(&self, provider_id: &str, content_type: Option<&ContentType>) -> DomainResult<Vec<String>>;
}

View File

@@ -3,14 +3,26 @@
//! These traits define the interface for data persistence.
//! Implementations live in the infra layer.
use std::collections::HashMap;
use async_trait::async_trait;
use chrono::DateTime;
use chrono::Utc;
use uuid::Uuid;
use crate::entities::{Channel, GeneratedSchedule, PlaybackRecord, User};
use crate::entities::{Channel, ChannelConfigSnapshot, GeneratedSchedule, PlaybackRecord, ScheduleConfig, User};
use crate::errors::DomainResult;
use crate::value_objects::{ChannelId, UserId};
use crate::value_objects::{BlockId, ChannelId, MediaItemId, UserId};
/// An in-app activity event stored in the database for the admin log view.
#[derive(Debug, Clone)]
pub struct ActivityEvent {
pub id: Uuid,
pub timestamp: DateTime<Utc>,
pub event_type: String,
pub detail: String,
pub channel_id: Option<Uuid>,
}
/// Repository port for User persistence
#[async_trait]
@@ -29,6 +41,26 @@ pub trait UserRepository: Send + Sync {
/// Delete a user by their ID
async fn delete(&self, id: Uuid) -> DomainResult<()>;
/// Count total number of users (used for first-user admin promotion)
async fn count_users(&self) -> DomainResult<u64>;
}
#[derive(Debug, Clone)]
pub struct ProviderConfigRow {
pub id: String,
pub provider_type: String,
pub config_json: String,
pub enabled: bool,
pub updated_at: String,
}
#[async_trait]
pub trait ProviderConfigRepository: Send + Sync {
async fn get_all(&self) -> DomainResult<Vec<ProviderConfigRow>>;
async fn get_by_id(&self, id: &str) -> DomainResult<Option<ProviderConfigRow>>;
async fn upsert(&self, row: &ProviderConfigRow) -> DomainResult<()>;
async fn delete(&self, id: &str) -> DomainResult<()>;
}
/// Repository port for `Channel` persistence.
@@ -41,6 +73,33 @@ pub trait ChannelRepository: Send + Sync {
/// Insert or update a channel.
async fn save(&self, channel: &Channel) -> DomainResult<()>;
async fn delete(&self, id: ChannelId) -> DomainResult<()>;
/// Snapshot the current config before saving a new one.
/// version_num is computed by the infra layer as MAX(version_num)+1 inside a transaction.
async fn save_config_snapshot(
&self,
channel_id: ChannelId,
config: &ScheduleConfig,
label: Option<String>,
) -> DomainResult<ChannelConfigSnapshot>;
async fn list_config_snapshots(
&self,
channel_id: ChannelId,
) -> DomainResult<Vec<ChannelConfigSnapshot>>;
async fn get_config_snapshot(
&self,
channel_id: ChannelId,
snapshot_id: Uuid,
) -> DomainResult<Option<ChannelConfigSnapshot>>;
async fn patch_config_snapshot_label(
&self,
channel_id: ChannelId,
snapshot_id: Uuid,
label: Option<String>,
) -> DomainResult<Option<ChannelConfigSnapshot>>;
}
/// Repository port for `GeneratedSchedule` and `PlaybackRecord` persistence.
@@ -70,4 +129,65 @@ pub trait ScheduleRepository: Send + Sync {
) -> DomainResult<Vec<PlaybackRecord>>;
async fn save_playback_record(&self, record: &PlaybackRecord) -> DomainResult<()>;
/// Return the most recent slot per block_id across ALL schedules for a channel.
/// Resilient to any single generation having empty slots for a block.
async fn find_last_slot_per_block(
&self,
channel_id: ChannelId,
) -> DomainResult<HashMap<BlockId, MediaItemId>>;
/// List all generated schedule headers for a channel, newest first.
async fn list_schedule_history(
&self,
channel_id: ChannelId,
) -> DomainResult<Vec<GeneratedSchedule>>;
/// Fetch a specific schedule with its slots, verifying channel ownership.
async fn get_schedule_by_id(
&self,
channel_id: ChannelId,
schedule_id: Uuid,
) -> DomainResult<Option<GeneratedSchedule>>;
/// Delete all schedules with generation > target_generation for this channel.
/// Also deletes matching playback_records (no DB cascade between those tables).
/// scheduled_slots cascade via FK from generated_schedules.
async fn delete_schedules_after(
&self,
channel_id: ChannelId,
target_generation: u32,
) -> DomainResult<()>;
}
/// Repository port for activity log persistence.
#[async_trait]
pub trait ActivityLogRepository: Send + Sync {
async fn log(
&self,
event_type: &str,
detail: &str,
channel_id: Option<Uuid>,
) -> DomainResult<()>;
async fn recent(&self, limit: u32) -> DomainResult<Vec<ActivityEvent>>;
}
/// Repository port for transcode settings persistence.
#[async_trait]
pub trait TranscodeSettingsRepository: Send + Sync {
/// Load the persisted cleanup TTL. Returns None if no row exists yet.
async fn load_cleanup_ttl(&self) -> DomainResult<Option<u32>>;
/// Persist the cleanup TTL (upsert — always row id=1).
async fn save_cleanup_ttl(&self, hours: u32) -> DomainResult<()>;
}
/// Repository port for general admin settings (app_settings table).
#[async_trait]
pub trait IAppSettingsRepository: Send + Sync {
/// Get a setting value by key. Returns None if not set.
async fn get(&self, key: &str) -> DomainResult<Option<String>>;
/// Set a setting value (upsert).
async fn set(&self, key: &str, value: &str) -> DomainResult<()>;
/// Get all settings as (key, value) pairs.
async fn get_all(&self) -> DomainResult<Vec<(String, String)>>;
}

View File

@@ -1,6 +1,8 @@
use std::sync::Arc;
use crate::entities::Channel;
use uuid::Uuid;
use crate::entities::{Channel, ChannelConfigSnapshot, ScheduleConfig};
use crate::errors::{DomainError, DomainResult};
use crate::repositories::ChannelRepository;
use crate::value_objects::{ChannelId, UserId};
@@ -42,10 +44,75 @@ impl ChannelService {
}
pub async fn update(&self, channel: Channel) -> DomainResult<Channel> {
// Auto-snapshot the existing config before overwriting
if let Some(existing) = self.channel_repo.find_by_id(channel.id).await? {
self.channel_repo
.save_config_snapshot(channel.id, &existing.schedule_config, None)
.await?;
}
self.channel_repo.save(&channel).await?;
Ok(channel)
}
pub async fn list_config_snapshots(
&self,
channel_id: ChannelId,
) -> DomainResult<Vec<ChannelConfigSnapshot>> {
self.channel_repo.list_config_snapshots(channel_id).await
}
pub async fn get_config_snapshot(
&self,
channel_id: ChannelId,
snapshot_id: Uuid,
) -> DomainResult<Option<ChannelConfigSnapshot>> {
self.channel_repo.get_config_snapshot(channel_id, snapshot_id).await
}
pub async fn patch_config_snapshot_label(
&self,
channel_id: ChannelId,
snapshot_id: Uuid,
label: Option<String>,
) -> DomainResult<Option<ChannelConfigSnapshot>> {
self.channel_repo.patch_config_snapshot_label(channel_id, snapshot_id, label).await
}
/// Restore a snapshot: auto-snapshot current config, then apply the snapshot's config.
pub async fn restore_config_snapshot(
&self,
channel_id: ChannelId,
snapshot_id: Uuid,
) -> DomainResult<Channel> {
let snapshot = self
.channel_repo
.get_config_snapshot(channel_id, snapshot_id)
.await?
.ok_or(DomainError::ChannelNotFound(channel_id))?;
let mut channel = self
.channel_repo
.find_by_id(channel_id)
.await?
.ok_or(DomainError::ChannelNotFound(channel_id))?;
// Snapshot current config before overwriting
self.channel_repo
.save_config_snapshot(channel_id, &channel.schedule_config, None)
.await?;
channel.schedule_config = snapshot.config;
channel.updated_at = chrono::Utc::now();
self.channel_repo.save(&channel).await?;
Ok(channel)
}
pub async fn save_config_snapshot(
&self,
channel_id: ChannelId,
config: &ScheduleConfig,
label: Option<String>,
) -> DomainResult<ChannelConfigSnapshot> {
self.channel_repo.save_config_snapshot(channel_id, config, label).await
}
/// Delete a channel, enforcing that `requester_id` is the owner.
pub async fn delete(&self, id: ChannelId, requester_id: UserId) -> DomainResult<()> {
let channel = self.find_by_id(id).await?;

View File

@@ -1,6 +1,8 @@
use std::collections::HashSet;
use rand::rngs::StdRng;
use rand::seq::SliceRandom;
use rand::SeedableRng;
use crate::entities::MediaItem;
use crate::value_objects::{FillStrategy, MediaItemId};
@@ -20,7 +22,7 @@ pub(super) fn fill_block<'a>(
}
FillStrategy::Random => {
let mut indices: Vec<usize> = (0..pool.len()).collect();
indices.shuffle(&mut rand::thread_rng());
indices.shuffle(&mut StdRng::from_entropy());
let mut remaining = target_secs;
let mut result = Vec::new();
for i in indices {
@@ -127,12 +129,22 @@ pub(super) fn fill_sequential<'a>(
};
// Greedily fill the block's time budget in episode order.
// Stop at the first episode that doesn't fit — skipping would break ordering.
let mut remaining = target_secs;
let mut result = Vec::new();
for item in ordered {
for item in &ordered {
if item.duration_secs <= remaining {
remaining -= item.duration_secs;
result.push(item);
result.push(*item);
} else {
break;
}
}
// Edge case: if the very first episode is longer than the entire block,
// still include it — the slot builder clips it to block end via .min(end).
if result.is_empty() {
if let Some(&first) = ordered.first() {
result.push(first);
}
}
result

View File

@@ -1,7 +1,6 @@
use std::collections::HashMap;
use std::sync::Arc;
use chrono::{DateTime, Duration, TimeZone, Utc};
use chrono::{DateTime, Datelike, Duration, TimeZone, Utc};
use chrono_tz::Tz;
use uuid::Uuid;
@@ -10,7 +9,7 @@ use crate::entities::{
ScheduledSlot,
};
use crate::errors::{DomainError, DomainResult};
use crate::ports::IMediaProvider;
use crate::ports::{IProviderRegistry, StreamQuality};
use crate::repositories::{ChannelRepository, ScheduleRepository};
use crate::value_objects::{
BlockId, ChannelId, FillStrategy, MediaFilter, MediaItemId, RecyclePolicy,
@@ -21,24 +20,24 @@ mod recycle;
/// Core scheduling engine.
///
/// Generates 48-hour broadcast schedules by walking through a channel's
/// Generates 7-day broadcast schedules by walking through a channel's
/// `ScheduleConfig` day by day, resolving each `ProgrammingBlock` into concrete
/// `ScheduledSlot`s via the `IMediaProvider`, and applying the `RecyclePolicy`
/// to avoid replaying recently aired items.
pub struct ScheduleEngineService {
media_provider: Arc<dyn IMediaProvider>,
provider_registry: Arc<dyn IProviderRegistry>,
channel_repo: Arc<dyn ChannelRepository>,
schedule_repo: Arc<dyn ScheduleRepository>,
}
impl ScheduleEngineService {
pub fn new(
media_provider: Arc<dyn IMediaProvider>,
provider_registry: Arc<dyn IProviderRegistry>,
channel_repo: Arc<dyn ChannelRepository>,
schedule_repo: Arc<dyn ScheduleRepository>,
) -> Self {
Self {
media_provider,
provider_registry,
channel_repo,
schedule_repo,
}
@@ -48,12 +47,12 @@ impl ScheduleEngineService {
// Public API
// -------------------------------------------------------------------------
/// Generate and persist a 48-hour schedule for `channel_id` starting at `from`.
/// Generate and persist a 7-day schedule for `channel_id` starting at `from`.
///
/// The algorithm:
/// 1. Walk each calendar day in the 48-hour window.
/// 1. Walk each calendar day in the 7-day window.
/// 2. For each `ProgrammingBlock`, compute its UTC wall-clock interval for that day.
/// 3. Clip the interval to `[from, from + 48h)`.
/// 3. Clip the interval to `[from, from + 7d)`.
/// 4. Resolve the block content via the media provider, applying the recycle policy.
/// 5. For `Sequential` blocks, resume from where the previous generation left off
/// (series continuity — see `fill::fill_sequential`).
@@ -91,21 +90,18 @@ impl ScheduleEngineService {
.map(|s| s.generation + 1)
.unwrap_or(1);
// Build the initial per-block continuity map from the previous generation's
// last slot per block. The map is updated as each block occurrence is resolved
// within this generation so that the second day of a 48h schedule continues
// from where the first day ended.
let mut block_continuity: HashMap<BlockId, MediaItemId> = latest_schedule
.iter()
.flat_map(|s| &s.slots)
.fold(HashMap::new(), |mut map, slot| {
// keep only the *last* slot per block (slots are sorted ascending)
map.insert(slot.source_block_id, slot.item.id.clone());
map
});
// Build the initial per-block continuity map from the most recent slot per
// block across ALL schedules. This is resilient to any single generation
// having empty slots for a block (e.g. provider returned nothing transiently).
// The map is updated as each block occurrence is resolved within this
// generation so the second day of a 48h schedule continues from here.
let mut block_continuity = self
.schedule_repo
.find_last_slot_per_block(channel_id)
.await?;
let valid_from = from;
let valid_until = from + Duration::hours(48);
let valid_until = from + Duration::days(7);
let start_date = from.with_timezone(&tz).date_naive();
let end_date = valid_until.with_timezone(&tz).date_naive();
@@ -114,7 +110,8 @@ impl ScheduleEngineService {
let mut current_date = start_date;
while current_date <= end_date {
for block in &channel.schedule_config.blocks {
let weekday = crate::value_objects::Weekday::from(current_date.weekday());
for block in channel.schedule_config.blocks_for(weekday) {
let naive_start = current_date.and_time(block.start_time);
// `earliest()` handles DST gaps — if the local time doesn't exist
@@ -127,7 +124,7 @@ impl ScheduleEngineService {
let block_end_utc =
block_start_utc + Duration::minutes(block.duration_mins as i64);
// Clip to the 48-hour window.
// Clip to the 7-day window.
let slot_start = block_start_utc.max(valid_from);
let slot_end = block_end_utc.min(valid_until);
@@ -223,17 +220,43 @@ impl ScheduleEngineService {
self.schedule_repo.find_active(channel_id, at).await
}
/// Delegate stream URL resolution to the configured media provider.
pub async fn get_stream_url(&self, item_id: &MediaItemId) -> DomainResult<String> {
self.media_provider.get_stream_url(item_id).await
/// Delegate stream URL resolution to the provider registry (routes via ID prefix).
pub async fn get_stream_url(&self, item_id: &MediaItemId, quality: &StreamQuality) -> DomainResult<String> {
self.provider_registry.get_stream_url(item_id, quality).await
}
/// List all generated schedule headers for a channel, newest first.
pub async fn list_schedule_history(
&self,
channel_id: ChannelId,
) -> DomainResult<Vec<GeneratedSchedule>> {
self.schedule_repo.list_schedule_history(channel_id).await
}
/// Fetch a specific schedule with its slots.
pub async fn get_schedule_by_id(
&self,
channel_id: ChannelId,
schedule_id: uuid::Uuid,
) -> DomainResult<Option<GeneratedSchedule>> {
self.schedule_repo.get_schedule_by_id(channel_id, schedule_id).await
}
/// Delete all schedules with generation > target_generation for this channel.
pub async fn delete_schedules_after(
&self,
channel_id: ChannelId,
target_generation: u32,
) -> DomainResult<()> {
self.schedule_repo.delete_schedules_after(channel_id, target_generation).await
}
/// Return all slots that overlap the given time window — the EPG data.
pub fn get_epg<'a>(
schedule: &'a GeneratedSchedule,
pub fn get_epg(
schedule: &GeneratedSchedule,
from: DateTime<Utc>,
until: DateTime<Utc>,
) -> Vec<&'a ScheduledSlot> {
) -> Vec<&ScheduledSlot> {
schedule
.slots
.iter()
@@ -245,6 +268,7 @@ impl ScheduleEngineService {
// Block resolution
// -------------------------------------------------------------------------
#[allow(clippy::too_many_arguments)]
async fn resolve_block(
&self,
block: &ProgrammingBlock,
@@ -256,12 +280,12 @@ impl ScheduleEngineService {
last_item_id: Option<&MediaItemId>,
) -> DomainResult<Vec<ScheduledSlot>> {
match &block.content {
BlockContent::Manual { items } => {
BlockContent::Manual { items, .. } => {
self.resolve_manual(items, start, end, block.id).await
}
BlockContent::Algorithmic { filter, strategy } => {
BlockContent::Algorithmic { filter, strategy, provider_id } => {
self.resolve_algorithmic(
filter, strategy, start, end, history, policy, generation,
provider_id, filter, strategy, start, end, history, policy, generation,
block.id, last_item_id,
block.loop_on_finish,
block.ignore_recycle_policy,
@@ -287,7 +311,7 @@ impl ScheduleEngineService {
if cursor >= end {
break;
}
if let Some(item) = self.media_provider.fetch_by_id(item_id).await? {
if let Some(item) = self.provider_registry.fetch_by_id(item_id).await? {
let item_end =
(cursor + Duration::seconds(item.duration_secs as i64)).min(end);
slots.push(ScheduledSlot {
@@ -310,8 +334,10 @@ impl ScheduleEngineService {
///
/// `last_item_id` is the ID of the last item scheduled in this block in the
/// previous generation. Used only by `Sequential` for series continuity.
#[allow(clippy::too_many_arguments)]
async fn resolve_algorithmic(
&self,
provider_id: &str,
filter: &MediaFilter,
strategy: &FillStrategy,
start: DateTime<Utc>,
@@ -327,7 +353,7 @@ impl ScheduleEngineService {
// `candidates` — all items matching the filter, in provider order.
// Kept separate from `pool` so Sequential can rotate through the full
// ordered list while still honouring cooldowns.
let candidates = self.media_provider.fetch_items(filter).await?;
let candidates = self.provider_registry.fetch_items(provider_id, filter).await?;
if candidates.is_empty() {
return Ok(vec![]);

View File

@@ -31,7 +31,10 @@ impl UserService {
}
let email = Email::try_from(email)?;
let user = User::new(subject, email);
let mut user = User::new(subject, email);
if self.user_repository.count_users().await? == 0 {
user.is_admin = true;
}
self.user_repository.save(&user).await?;
Ok(user)
}
@@ -53,7 +56,10 @@ impl UserService {
password_hash: &str,
) -> DomainResult<User> {
let email = Email::try_from(email)?;
let user = User::new_local(email, password_hash);
let mut user = User::new_local(email, password_hash);
if self.user_repository.count_users().await? == 0 {
user.is_admin = true;
}
self.user_repository.save(&user).await?;
Ok(user)
}

View File

@@ -138,3 +138,64 @@ impl Default for RecyclePolicy {
}
}
}
/// Day of week, used as key in weekly schedule configs.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum Weekday {
Monday,
Tuesday,
Wednesday,
Thursday,
Friday,
Saturday,
Sunday,
}
impl From<chrono::Weekday> for Weekday {
fn from(w: chrono::Weekday) -> Self {
match w {
chrono::Weekday::Mon => Weekday::Monday,
chrono::Weekday::Tue => Weekday::Tuesday,
chrono::Weekday::Wed => Weekday::Wednesday,
chrono::Weekday::Thu => Weekday::Thursday,
chrono::Weekday::Fri => Weekday::Friday,
chrono::Weekday::Sat => Weekday::Saturday,
chrono::Weekday::Sun => Weekday::Sunday,
}
}
}
impl Weekday {
pub fn all() -> [Weekday; 7] {
// ISO week order: Monday = index 0, Sunday = index 6.
// The schedule engine depends on this order when iterating days.
[
Weekday::Monday, Weekday::Tuesday, Weekday::Wednesday,
Weekday::Thursday, Weekday::Friday, Weekday::Saturday, Weekday::Sunday,
]
}
}
#[cfg(test)]
mod weekday_tests {
use super::*;
#[test]
fn from_chrono_weekday_all_variants() {
assert_eq!(Weekday::from(chrono::Weekday::Mon), Weekday::Monday);
assert_eq!(Weekday::from(chrono::Weekday::Tue), Weekday::Tuesday);
assert_eq!(Weekday::from(chrono::Weekday::Wed), Weekday::Wednesday);
assert_eq!(Weekday::from(chrono::Weekday::Thu), Weekday::Thursday);
assert_eq!(Weekday::from(chrono::Weekday::Fri), Weekday::Friday);
assert_eq!(Weekday::from(chrono::Weekday::Sat), Weekday::Saturday);
assert_eq!(Weekday::from(chrono::Weekday::Sun), Weekday::Sunday);
}
#[test]
fn all_returns_monday_first_sunday_last() {
let days = Weekday::all();
assert_eq!(days[0], Weekday::Monday);
assert_eq!(days[6], Weekday::Sunday);
}
}

View File

@@ -11,6 +11,7 @@ broker-nats = ["dep:futures-util", "k-core/broker-nats"]
auth-oidc = ["dep:openidconnect", "dep:url", "dep:axum-extra"]
auth-jwt = ["dep:jsonwebtoken"]
jellyfin = ["dep:reqwest"]
local-files = ["dep:walkdir", "dep:base64", "sqlite"]
[dependencies]
k-core = { git = "https://git.gabrielkaszewski.dev/GKaszewski/k-core", features = [
@@ -46,3 +47,5 @@ jsonwebtoken = { version = "10.2.0", features = [
"rsa",
"rust_crypto",
], optional = true }
walkdir = { version = "2", optional = true }
base64 = { version = "0.22", optional = true }

View File

@@ -0,0 +1,5 @@
#[cfg(feature = "sqlite")]
mod sqlite;
#[cfg(feature = "sqlite")]
pub use sqlite::SqliteActivityLogRepository;

View File

@@ -0,0 +1,71 @@
use async_trait::async_trait;
use chrono::Utc;
use uuid::Uuid;
use domain::{ActivityEvent, ActivityLogRepository, DomainError, DomainResult};
pub struct SqliteActivityLogRepository {
pool: sqlx::SqlitePool,
}
impl SqliteActivityLogRepository {
pub fn new(pool: sqlx::SqlitePool) -> Self {
Self { pool }
}
}
#[async_trait]
impl ActivityLogRepository for SqliteActivityLogRepository {
async fn log(
&self,
event_type: &str,
detail: &str,
channel_id: Option<Uuid>,
) -> DomainResult<()> {
let id = Uuid::new_v4().to_string();
let timestamp = Utc::now().to_rfc3339();
let channel_id_str = channel_id.map(|id| id.to_string());
sqlx::query(
"INSERT INTO activity_log (id, timestamp, event_type, detail, channel_id) VALUES (?, ?, ?, ?, ?)",
)
.bind(&id)
.bind(&timestamp)
.bind(event_type)
.bind(detail)
.bind(&channel_id_str)
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
Ok(())
}
async fn recent(&self, limit: u32) -> DomainResult<Vec<ActivityEvent>> {
let rows: Vec<(String, String, String, String, Option<String>)> = sqlx::query_as(
"SELECT id, timestamp, event_type, detail, channel_id FROM activity_log ORDER BY timestamp DESC LIMIT ?",
)
.bind(limit)
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
let events = rows
.into_iter()
.filter_map(|(id, timestamp, event_type, detail, channel_id)| {
let id = Uuid::parse_str(&id).ok()?;
let timestamp = timestamp.parse().ok()?;
let channel_id = channel_id.and_then(|s| Uuid::parse_str(&s).ok());
Some(ActivityEvent {
id,
timestamp,
event_type,
detail,
channel_id,
})
})
.collect();
Ok(events)
}
}

View File

@@ -0,0 +1,83 @@
//! SQLite implementation of IAppSettingsRepository.
use async_trait::async_trait;
use sqlx::SqlitePool;
use domain::{DomainError, DomainResult, IAppSettingsRepository};
pub struct SqliteAppSettingsRepository {
pool: SqlitePool,
}
impl SqliteAppSettingsRepository {
pub fn new(pool: SqlitePool) -> Self {
Self { pool }
}
}
#[async_trait]
impl IAppSettingsRepository for SqliteAppSettingsRepository {
async fn get(&self, key: &str) -> DomainResult<Option<String>> {
sqlx::query_scalar::<_, String>("SELECT value FROM app_settings WHERE key = ?")
.bind(key)
.fetch_optional(&self.pool)
.await
.map_err(|e| DomainError::InfrastructureError(e.to_string()))
}
async fn set(&self, key: &str, value: &str) -> DomainResult<()> {
sqlx::query("INSERT OR REPLACE INTO app_settings (key, value) VALUES (?, ?)")
.bind(key)
.bind(value)
.execute(&self.pool)
.await
.map(|_| ())
.map_err(|e| DomainError::InfrastructureError(e.to_string()))
}
async fn get_all(&self) -> DomainResult<Vec<(String, String)>> {
sqlx::query_as::<_, (String, String)>("SELECT key, value FROM app_settings ORDER BY key")
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::InfrastructureError(e.to_string()))
}
}
#[cfg(test)]
mod tests {
use super::*;
use sqlx::SqlitePool;
use domain::IAppSettingsRepository;
async fn setup() -> SqlitePool {
let pool = SqlitePool::connect(":memory:").await.unwrap();
sqlx::query(
"CREATE TABLE app_settings (key TEXT PRIMARY KEY, value TEXT NOT NULL)"
).execute(&pool).await.unwrap();
sqlx::query("INSERT INTO app_settings VALUES ('library_sync_interval_hours', '6')")
.execute(&pool).await.unwrap();
pool
}
#[tokio::test]
async fn get_returns_seeded_value() {
let repo = SqliteAppSettingsRepository::new(setup().await);
let val = repo.get("library_sync_interval_hours").await.unwrap();
assert_eq!(val, Some("6".to_string()));
}
#[tokio::test]
async fn set_then_get() {
let repo = SqliteAppSettingsRepository::new(setup().await);
repo.set("library_sync_interval_hours", "12").await.unwrap();
let val = repo.get("library_sync_interval_hours").await.unwrap();
assert_eq!(val, Some("12".to_string()));
}
#[tokio::test]
async fn get_all_returns_all_keys() {
let repo = SqliteAppSettingsRepository::new(setup().await);
let all = repo.get_all().await.unwrap();
assert!(!all.is_empty());
assert!(all.iter().any(|(k, _)| k == "library_sync_interval_hours"));
}
}

View File

@@ -20,8 +20,10 @@ pub struct JwtConfig {
pub issuer: Option<String>,
/// Expected audience (for validation)
pub audience: Option<String>,
/// Token expiry in hours (default: 24)
/// Access token expiry in hours (default: 24)
pub expiry_hours: u64,
/// Refresh token expiry in days (default: 30)
pub refresh_expiry_days: u64,
}
impl JwtConfig {
@@ -33,6 +35,7 @@ impl JwtConfig {
issuer: Option<String>,
audience: Option<String>,
expiry_hours: Option<u64>,
refresh_expiry_days: Option<u64>,
is_production: bool,
) -> Result<Self, JwtError> {
// Validate secret strength in production
@@ -48,6 +51,7 @@ impl JwtConfig {
issuer,
audience,
expiry_hours: expiry_hours.unwrap_or(24),
refresh_expiry_days: refresh_expiry_days.unwrap_or(30),
})
}
@@ -58,10 +62,15 @@ impl JwtConfig {
issuer: None,
audience: None,
expiry_hours: 24,
refresh_expiry_days: 30,
}
}
}
fn default_token_type() -> String {
"access".to_string()
}
/// JWT claims structure
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct JwtClaims {
@@ -79,6 +88,9 @@ pub struct JwtClaims {
/// Audience
#[serde(skip_serializing_if = "Option::is_none")]
pub aud: Option<String>,
/// Token type: "access" or "refresh". Defaults to "access" for backward compat.
#[serde(default = "default_token_type")]
pub token_type: String,
}
/// JWT-related errors
@@ -141,7 +153,7 @@ impl JwtValidator {
}
}
/// Create a JWT token for the given user
/// Create an access JWT token for the given user
pub fn create_token(&self, user: &User) -> Result<String, JwtError> {
let now = SystemTime::now()
.duration_since(UNIX_EPOCH)
@@ -157,6 +169,30 @@ impl JwtValidator {
iat: now,
iss: self.config.issuer.clone(),
aud: self.config.audience.clone(),
token_type: "access".to_string(),
};
let header = Header::new(Algorithm::HS256);
encode(&header, &claims, &self.encoding_key).map_err(JwtError::CreationFailed)
}
/// Create a refresh JWT token for the given user (longer-lived)
pub fn create_refresh_token(&self, user: &User) -> Result<String, JwtError> {
let now = SystemTime::now()
.duration_since(UNIX_EPOCH)
.expect("Time went backwards")
.as_secs() as usize;
let expiry = now + (self.config.refresh_expiry_days as usize * 86400);
let claims = JwtClaims {
sub: user.id.to_string(),
email: user.email.as_ref().to_string(),
exp: expiry,
iat: now,
iss: self.config.issuer.clone(),
aud: self.config.audience.clone(),
token_type: "refresh".to_string(),
};
let header = Header::new(Algorithm::HS256);
@@ -176,14 +212,28 @@ impl JwtValidator {
Ok(token_data.claims)
}
/// Validate an access token — rejects refresh tokens
pub fn validate_access_token(&self, token: &str) -> Result<JwtClaims, JwtError> {
let claims = self.validate_token(token)?;
if claims.token_type != "access" {
return Err(JwtError::ValidationFailed("Not an access token".to_string()));
}
Ok(claims)
}
/// Validate a refresh token — rejects access tokens
pub fn validate_refresh_token(&self, token: &str) -> Result<JwtClaims, JwtError> {
let claims = self.validate_token(token)?;
if claims.token_type != "refresh" {
return Err(JwtError::ValidationFailed("Not a refresh token".to_string()));
}
Ok(claims)
}
/// Get the user ID (subject) from a token without full validation
/// Useful for logging/debugging, but should not be trusted for auth
pub fn decode_unverified(&self, token: &str) -> Result<JwtClaims, JwtError> {
let mut validation = Validation::new(Algorithm::HS256);
validation.insecure_disable_signature_validation();
validation.validate_exp = false;
let token_data = decode::<JwtClaims>(token, &self.decoding_key, &validation)
let token_data = jsonwebtoken::dangerous::insecure_decode::<JwtClaims>(token)
.map_err(|_| JwtError::InvalidFormat)?;
Ok(token_data.claims)
@@ -232,6 +282,7 @@ mod tests {
None,
None,
None,
None,
true, // Production mode
);
@@ -245,6 +296,7 @@ mod tests {
None,
None,
None,
None,
false, // Development mode
);

View File

@@ -2,7 +2,7 @@ use chrono::{DateTime, Utc};
use sqlx::FromRow;
use uuid::Uuid;
use domain::{AccessMode, Channel, ChannelId, DomainError, LogoPosition, RecyclePolicy, ScheduleConfig, UserId};
use domain::{AccessMode, Channel, ChannelId, DomainError, LogoPosition, RecyclePolicy, ScheduleConfig, ScheduleConfigCompat, UserId};
#[derive(Debug, FromRow)]
pub(super) struct ChannelRow {
@@ -19,6 +19,10 @@ pub(super) struct ChannelRow {
pub logo: Option<String>,
pub logo_position: String,
pub logo_opacity: f32,
pub webhook_url: Option<String>,
pub webhook_poll_interval_secs: i64,
pub webhook_body_template: Option<String>,
pub webhook_headers: Option<String>,
pub created_at: String,
pub updated_at: String,
}
@@ -40,10 +44,11 @@ impl TryFrom<ChannelRow> for Channel {
.map_err(|e| DomainError::RepositoryError(format!("Invalid channel UUID: {}", e)))?;
let owner_id: UserId = Uuid::parse_str(&row.owner_id)
.map_err(|e| DomainError::RepositoryError(format!("Invalid owner UUID: {}", e)))?;
let schedule_config: ScheduleConfig = serde_json::from_str(&row.schedule_config)
let schedule_config: ScheduleConfig = serde_json::from_str::<ScheduleConfigCompat>(&row.schedule_config)
.map_err(|e| {
DomainError::RepositoryError(format!("Invalid schedule_config JSON: {}", e))
})?;
})
.map(ScheduleConfig::from)?;
let recycle_policy: RecyclePolicy = serde_json::from_str(&row.recycle_policy)
.map_err(|e| {
DomainError::RepositoryError(format!("Invalid recycle_policy JSON: {}", e))
@@ -73,6 +78,10 @@ impl TryFrom<ChannelRow> for Channel {
logo: row.logo,
logo_position,
logo_opacity: row.logo_opacity,
webhook_url: row.webhook_url,
webhook_poll_interval_secs: row.webhook_poll_interval_secs as u32,
webhook_body_template: row.webhook_body_template,
webhook_headers: row.webhook_headers,
created_at: parse_dt(&row.created_at)?,
updated_at: parse_dt(&row.updated_at)?,
})
@@ -80,4 +89,4 @@ impl TryFrom<ChannelRow> for Channel {
}
pub(super) const SELECT_COLS: &str =
"id, owner_id, name, description, timezone, schedule_config, recycle_policy, auto_schedule, access_mode, access_password_hash, logo, logo_position, logo_opacity, created_at, updated_at";
"id, owner_id, name, description, timezone, schedule_config, recycle_policy, auto_schedule, access_mode, access_password_hash, logo, logo_position, logo_opacity, webhook_url, webhook_poll_interval_secs, webhook_body_template, webhook_headers, created_at, updated_at";

View File

@@ -66,8 +66,8 @@ impl ChannelRepository for PostgresChannelRepository {
sqlx::query(
r#"
INSERT INTO channels
(id, owner_id, name, description, timezone, schedule_config, recycle_policy, auto_schedule, access_mode, access_password_hash, created_at, updated_at)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12)
(id, owner_id, name, description, timezone, schedule_config, recycle_policy, auto_schedule, access_mode, access_password_hash, webhook_url, webhook_poll_interval_secs, created_at, updated_at)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14)
ON CONFLICT(id) DO UPDATE SET
name = EXCLUDED.name,
description = EXCLUDED.description,
@@ -77,6 +77,8 @@ impl ChannelRepository for PostgresChannelRepository {
auto_schedule = EXCLUDED.auto_schedule,
access_mode = EXCLUDED.access_mode,
access_password_hash = EXCLUDED.access_password_hash,
webhook_url = EXCLUDED.webhook_url,
webhook_poll_interval_secs = EXCLUDED.webhook_poll_interval_secs,
updated_at = EXCLUDED.updated_at
"#,
)
@@ -90,6 +92,8 @@ impl ChannelRepository for PostgresChannelRepository {
.bind(channel.auto_schedule as i64)
.bind(&access_mode)
.bind(&channel.access_password_hash)
.bind(&channel.webhook_url)
.bind(channel.webhook_poll_interval_secs as i64)
.bind(channel.created_at.to_rfc3339())
.bind(channel.updated_at.to_rfc3339())
.execute(&self.pool)

View File

@@ -1,6 +1,9 @@
use async_trait::async_trait;
use chrono::{DateTime, Utc};
use sqlx::Row;
use uuid::Uuid;
use domain::{Channel, ChannelId, ChannelRepository, DomainError, DomainResult, UserId};
use domain::{Channel, ChannelConfigSnapshot, ChannelId, ChannelRepository, DomainError, DomainResult, ScheduleConfig, ScheduleConfigCompat, UserId};
use super::mapping::{ChannelRow, SELECT_COLS};
@@ -71,8 +74,8 @@ impl ChannelRepository for SqliteChannelRepository {
sqlx::query(
r#"
INSERT INTO channels
(id, owner_id, name, description, timezone, schedule_config, recycle_policy, auto_schedule, access_mode, access_password_hash, logo, logo_position, logo_opacity, created_at, updated_at)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
(id, owner_id, name, description, timezone, schedule_config, recycle_policy, auto_schedule, access_mode, access_password_hash, logo, logo_position, logo_opacity, webhook_url, webhook_poll_interval_secs, webhook_body_template, webhook_headers, created_at, updated_at)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(id) DO UPDATE SET
name = excluded.name,
description = excluded.description,
@@ -85,6 +88,10 @@ impl ChannelRepository for SqliteChannelRepository {
logo = excluded.logo,
logo_position = excluded.logo_position,
logo_opacity = excluded.logo_opacity,
webhook_url = excluded.webhook_url,
webhook_poll_interval_secs = excluded.webhook_poll_interval_secs,
webhook_body_template = excluded.webhook_body_template,
webhook_headers = excluded.webhook_headers,
updated_at = excluded.updated_at
"#,
)
@@ -101,6 +108,10 @@ impl ChannelRepository for SqliteChannelRepository {
.bind(&channel.logo)
.bind(&logo_position)
.bind(channel.logo_opacity)
.bind(&channel.webhook_url)
.bind(channel.webhook_poll_interval_secs as i64)
.bind(&channel.webhook_body_template)
.bind(&channel.webhook_headers)
.bind(channel.created_at.to_rfc3339())
.bind(channel.updated_at.to_rfc3339())
.execute(&self.pool)
@@ -131,4 +142,129 @@ impl ChannelRepository for SqliteChannelRepository {
Ok(())
}
async fn save_config_snapshot(
&self,
channel_id: ChannelId,
config: &ScheduleConfig,
label: Option<String>,
) -> DomainResult<ChannelConfigSnapshot> {
let id = Uuid::new_v4();
let now = Utc::now();
let config_json = serde_json::to_string(config)
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
let mut tx = self.pool.begin().await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
let version_num: i64 = sqlx::query_scalar(
"SELECT COALESCE(MAX(version_num), 0) + 1 FROM channel_config_snapshots WHERE channel_id = ?"
)
.bind(channel_id.to_string())
.fetch_one(&mut *tx)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
sqlx::query(
"INSERT INTO channel_config_snapshots (id, channel_id, config_json, version_num, label, created_at)
VALUES (?, ?, ?, ?, ?, ?)"
)
.bind(id.to_string())
.bind(channel_id.to_string())
.bind(&config_json)
.bind(version_num)
.bind(&label)
.bind(now.to_rfc3339())
.execute(&mut *tx)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
tx.commit().await.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
Ok(ChannelConfigSnapshot { id, channel_id, config: config.clone(), version_num, label, created_at: now })
}
async fn list_config_snapshots(
&self,
channel_id: ChannelId,
) -> DomainResult<Vec<ChannelConfigSnapshot>> {
let rows = sqlx::query(
"SELECT id, config_json, version_num, label, created_at
FROM channel_config_snapshots WHERE channel_id = ?
ORDER BY version_num DESC"
)
.bind(channel_id.to_string())
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
rows.iter().map(|row| {
let id: Uuid = row.get::<String, _>("id").parse()
.map_err(|_| DomainError::RepositoryError("bad uuid".into()))?;
let config_json: String = row.get("config_json");
let config_compat: ScheduleConfigCompat = serde_json::from_str(&config_json)
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
let config: ScheduleConfig = config_compat.into();
let version_num: i64 = row.get("version_num");
let label: Option<String> = row.get("label");
let created_at_str: String = row.get("created_at");
let created_at = created_at_str.parse::<DateTime<Utc>>()
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
Ok(ChannelConfigSnapshot { id, channel_id, config, version_num, label, created_at })
}).collect()
}
async fn get_config_snapshot(
&self,
channel_id: ChannelId,
snapshot_id: Uuid,
) -> DomainResult<Option<ChannelConfigSnapshot>> {
let row = sqlx::query(
"SELECT id, config_json, version_num, label, created_at
FROM channel_config_snapshots WHERE id = ? AND channel_id = ?"
)
.bind(snapshot_id.to_string())
.bind(channel_id.to_string())
.fetch_optional(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
match row {
None => Ok(None),
Some(row) => {
let config_json: String = row.get("config_json");
let config_compat: ScheduleConfigCompat = serde_json::from_str(&config_json)
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
let config: ScheduleConfig = config_compat.into();
let version_num: i64 = row.get("version_num");
let label: Option<String> = row.get("label");
let created_at_str: String = row.get("created_at");
let created_at = created_at_str.parse::<DateTime<Utc>>()
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
Ok(Some(ChannelConfigSnapshot { id: snapshot_id, channel_id, config, version_num, label, created_at }))
}
}
}
async fn patch_config_snapshot_label(
&self,
channel_id: ChannelId,
snapshot_id: Uuid,
label: Option<String>,
) -> DomainResult<Option<ChannelConfigSnapshot>> {
let updated = sqlx::query(
"UPDATE channel_config_snapshots SET label = ? WHERE id = ? AND channel_id = ? RETURNING id"
)
.bind(&label)
.bind(snapshot_id.to_string())
.bind(channel_id.to_string())
.fetch_optional(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
if updated.is_none() {
return Ok(None);
}
self.get_config_snapshot(channel_id, snapshot_id).await
}
}

View File

@@ -1,7 +1,7 @@
use std::sync::Arc;
use crate::db::DatabasePool;
use domain::{ChannelRepository, ScheduleRepository, UserRepository};
use domain::{ActivityLogRepository, ChannelRepository, IAppSettingsRepository, ILibraryRepository, ProviderConfigRepository, ScheduleRepository, TranscodeSettingsRepository, UserRepository};
#[derive(Debug, thiserror::Error)]
pub enum FactoryError {
@@ -51,6 +51,40 @@ pub async fn build_channel_repository(
}
}
pub async fn build_activity_log_repository(
pool: &DatabasePool,
) -> FactoryResult<Arc<dyn ActivityLogRepository>> {
match pool {
#[cfg(feature = "sqlite")]
DatabasePool::Sqlite(pool) => Ok(Arc::new(
crate::activity_log_repository::SqliteActivityLogRepository::new(pool.clone()),
)),
#[cfg(feature = "postgres")]
DatabasePool::Postgres(_pool) => Err(FactoryError::NotImplemented(
"ActivityLogRepository not yet implemented for Postgres".to_string(),
)),
#[allow(unreachable_patterns)]
_ => Err(FactoryError::NotImplemented(
"No database feature enabled".to_string(),
)),
}
}
pub async fn build_provider_config_repository(
pool: &DatabasePool,
) -> FactoryResult<Arc<dyn ProviderConfigRepository>> {
match pool {
#[cfg(feature = "sqlite")]
DatabasePool::Sqlite(pool) => Ok(Arc::new(
crate::provider_config_repository::SqliteProviderConfigRepository::new(pool.clone()),
)),
#[allow(unreachable_patterns)]
_ => Err(FactoryError::NotImplemented(
"ProviderConfigRepository not implemented for this database".to_string(),
)),
}
}
pub async fn build_schedule_repository(
pool: &DatabasePool,
) -> FactoryResult<Arc<dyn ScheduleRepository>> {
@@ -69,3 +103,88 @@ pub async fn build_schedule_repository(
)),
}
}
pub async fn build_transcode_settings_repository(
pool: &DatabasePool,
) -> FactoryResult<Arc<dyn TranscodeSettingsRepository>> {
match pool {
#[cfg(feature = "sqlite")]
DatabasePool::Sqlite(p) => Ok(Arc::new(
crate::transcode_settings_repository::SqliteTranscodeSettingsRepository::new(p.clone()),
)),
#[allow(unreachable_patterns)]
_ => Err(FactoryError::NotImplemented(
"TranscodeSettingsRepository not implemented for this database".to_string(),
)),
}
}
pub async fn build_library_repository(
pool: &DatabasePool,
) -> FactoryResult<Arc<dyn ILibraryRepository>> {
match pool {
#[cfg(feature = "sqlite")]
DatabasePool::Sqlite(pool) => Ok(Arc::new(
crate::library_repository::SqliteLibraryRepository::new(pool.clone()),
)),
#[allow(unreachable_patterns)]
_ => Err(FactoryError::NotImplemented(
"LibraryRepository not implemented for this database".to_string(),
)),
}
}
pub async fn build_app_settings_repository(
pool: &DatabasePool,
) -> FactoryResult<Arc<dyn IAppSettingsRepository>> {
match pool {
#[cfg(feature = "sqlite")]
DatabasePool::Sqlite(pool) => Ok(Arc::new(
crate::app_settings_repository::SqliteAppSettingsRepository::new(pool.clone()),
)),
#[allow(unreachable_patterns)]
_ => Err(FactoryError::NotImplemented(
"AppSettingsRepository not implemented for this database".to_string(),
)),
}
}
#[cfg(feature = "local-files")]
pub struct LocalFilesBundle {
pub provider: Arc<crate::LocalFilesProvider>,
pub local_index: Arc<crate::LocalIndex>,
pub transcode_manager: Option<Arc<crate::TranscodeManager>>,
}
#[cfg(feature = "local-files")]
pub async fn build_local_files_bundle(
pool: &DatabasePool,
root_dir: std::path::PathBuf,
transcode_dir: Option<std::path::PathBuf>,
cleanup_ttl_hours: u32,
base_url: String,
provider_id: &str,
) -> FactoryResult<LocalFilesBundle> {
match pool {
#[cfg(feature = "sqlite")]
DatabasePool::Sqlite(sqlite_pool) => {
let cfg = crate::LocalFilesConfig {
root_dir,
base_url,
transcode_dir: transcode_dir.clone(),
cleanup_ttl_hours,
};
let idx = Arc::new(crate::LocalIndex::new(&cfg, sqlite_pool.clone(), provider_id.to_string()).await);
let tm = transcode_dir.as_ref().map(|td| {
std::fs::create_dir_all(td).ok();
crate::TranscodeManager::new(td.clone(), cleanup_ttl_hours)
});
let provider = Arc::new(crate::LocalFilesProvider::new(Arc::clone(&idx), cfg, tm.clone()));
Ok(LocalFilesBundle { provider, local_index: idx, transcode_manager: tm })
}
#[allow(unreachable_patterns)]
_ => Err(FactoryError::NotImplemented(
"local-files requires SQLite".to_string(),
)),
}
}

View File

@@ -1,5 +1,5 @@
/// Connection details for a single Jellyfin instance.
#[derive(Debug, Clone)]
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
pub struct JellyfinConfig {
/// e.g. `"http://192.168.1.10:8096"` — no trailing slash
pub base_url: String,

View File

@@ -31,5 +31,8 @@ pub(super) fn map_jellyfin_item(item: JellyfinItem) -> Option<MediaItem> {
series_name: item.series_name,
season_number: item.parent_index_number,
episode_number: item.index_number,
// TODO(library-sync): populate thumbnail_url from Jellyfin image API and collection_id from parent_id when FullSyncAdapter is implemented (Task 5)
thumbnail_url: None,
collection_id: None,
})
}

View File

@@ -47,6 +47,20 @@ pub(super) struct JellyfinItem {
pub recursive_item_count: Option<u32>,
}
#[derive(Debug, Deserialize)]
pub(super) struct JellyfinPlaybackInfoResponse {
#[serde(rename = "MediaSources")]
pub media_sources: Vec<JellyfinMediaSource>,
}
#[derive(Debug, Deserialize)]
pub(super) struct JellyfinMediaSource {
#[serde(rename = "SupportsDirectStream")]
pub supports_direct_stream: bool,
#[serde(rename = "DirectStreamUrl")]
pub direct_stream_url: Option<String>,
}
pub(super) fn jellyfin_item_type(ct: &ContentType) -> &'static str {
match ct {
ContentType::Movie => "Movie",

View File

@@ -2,12 +2,12 @@ use async_trait::async_trait;
use domain::{
Collection, ContentType, DomainError, DomainResult, IMediaProvider, MediaFilter, MediaItem,
MediaItemId, SeriesSummary,
MediaItemId, ProviderCapabilities, SeriesSummary, StreamQuality, StreamingProtocol,
};
use super::config::JellyfinConfig;
use super::mapping::{map_jellyfin_item, TICKS_PER_SEC};
use super::models::{jellyfin_item_type, JellyfinItemsResponse};
use super::models::{jellyfin_item_type, JellyfinItemsResponse, JellyfinPlaybackInfoResponse};
pub struct JellyfinMediaProvider {
pub(super) client: reqwest::Client,
@@ -73,6 +73,10 @@ impl JellyfinMediaProvider {
// requested — season first, then episode within the season.
params.push(("SortBy", "ParentIndexNumber,IndexNumber".into()));
params.push(("SortOrder", "Ascending".into()));
// Prevent Jellyfin from returning Season/Series container items.
if filter.content_type.is_none() {
params.push(("IncludeItemTypes", "Episode".into()));
}
} else {
// No series filter — scope to the collection (library) if one is set.
if let Some(parent_id) = filter.collections.first() {
@@ -129,6 +133,20 @@ impl JellyfinMediaProvider {
#[async_trait]
impl IMediaProvider for JellyfinMediaProvider {
fn capabilities(&self) -> ProviderCapabilities {
ProviderCapabilities {
collections: true,
series: true,
genres: true,
tags: true,
decade: true,
search: true,
streaming_protocol: StreamingProtocol::Hls,
rescan: false,
transcode: false,
}
}
/// Fetch items matching `filter` from the Jellyfin library.
///
/// When `series_names` has more than one entry the results from each series
@@ -348,24 +366,44 @@ impl IMediaProvider for JellyfinMediaProvider {
Ok(body.items.into_iter().map(|item| item.name).collect())
}
/// Build an HLS stream URL for a Jellyfin item.
///
/// Returns a `master.m3u8` playlist URL. Jellyfin transcodes to H.264/AAC
/// segments on the fly. HLS is preferred over a single MP4 stream because
/// `StartTimeTicks` works reliably with HLS — each segment is independent,
/// so Jellyfin can begin the playlist at the correct broadcast offset
/// without needing to byte-range seek into an in-progress transcode.
///
/// The API key is embedded so the player needs no separate auth header.
/// The caller (stream proxy route) appends `StartTimeTicks` when there is
/// a non-zero broadcast offset.
async fn get_stream_url(&self, item_id: &MediaItemId) -> DomainResult<String> {
Ok(format!(
"{}/Videos/{}/master.m3u8?videoCodec=h264&audioCodec=aac&VideoBitRate=40000000&mediaSourceId={}&api_key={}",
self.config.base_url,
item_id.as_ref(),
item_id.as_ref(),
self.config.api_key,
))
async fn get_stream_url(&self, item_id: &MediaItemId, quality: &StreamQuality) -> DomainResult<String> {
match quality {
StreamQuality::Direct => {
let url = format!("{}/Items/{}/PlaybackInfo", self.config.base_url, item_id.as_ref());
let resp = self.client.post(&url)
.header("X-Emby-Token", &self.config.api_key)
.query(&[("userId", &self.config.user_id), ("mediaSourceId", &item_id.as_ref().to_string())])
.json(&serde_json::json!({}))
.send().await
.map_err(|e| DomainError::InfrastructureError(format!("PlaybackInfo failed: {e}")))?;
if resp.status().is_success() {
let info: JellyfinPlaybackInfoResponse = resp.json().await
.map_err(|e| DomainError::InfrastructureError(format!("PlaybackInfo parse failed: {e}")))?;
if let Some(src) = info.media_sources.first()
&& src.supports_direct_stream
&& let Some(rel_url) = &src.direct_stream_url
{
return Ok(format!("{}{}&api_key={}", self.config.base_url, rel_url, self.config.api_key));
}
}
// Fallback: HLS at 8 Mbps
Ok(self.hls_url(item_id, 8_000_000))
}
StreamQuality::Transcode(bps) => Ok(self.hls_url(item_id, *bps)),
}
}
}
impl JellyfinMediaProvider {
fn hls_url(&self, item_id: &MediaItemId, bitrate: u32) -> String {
format!(
"{}/Videos/{}/master.m3u8?videoCodec=h264&audioCodec=aac&VideoBitRate={}&mediaSourceId={}&SubtitleMethod=Hls&subtitleCodec=vtt&api_key={}",
self.config.base_url,
item_id.as_ref(),
bitrate,
item_id.as_ref(),
self.config.api_key,
)
}
}

View File

@@ -17,19 +17,47 @@ pub mod auth;
pub mod db;
pub mod factory;
pub mod jellyfin;
pub mod provider_registry;
mod library_sync;
pub use library_sync::FullSyncAdapter;
mod app_settings_repository;
mod activity_log_repository;
mod channel_repository;
mod library_repository;
mod provider_config_repository;
mod schedule_repository;
mod transcode_settings_repository;
mod user_repository;
#[cfg(feature = "local-files")]
pub mod local_files;
// Re-export for convenience
pub use db::run_migrations;
pub use provider_registry::ProviderRegistry;
#[cfg(feature = "sqlite")]
pub use app_settings_repository::SqliteAppSettingsRepository;
#[cfg(feature = "sqlite")]
pub use activity_log_repository::SqliteActivityLogRepository;
#[cfg(feature = "sqlite")]
pub use user_repository::SqliteUserRepository;
#[cfg(feature = "sqlite")]
pub use channel_repository::SqliteChannelRepository;
#[cfg(feature = "sqlite")]
pub use provider_config_repository::SqliteProviderConfigRepository;
#[cfg(feature = "sqlite")]
pub use schedule_repository::SqliteScheduleRepository;
#[cfg(feature = "sqlite")]
pub use transcode_settings_repository::SqliteTranscodeSettingsRepository;
#[cfg(feature = "sqlite")]
pub use library_repository::SqliteLibraryRepository;
pub use domain::TranscodeSettingsRepository;
#[cfg(feature = "jellyfin")]
pub use jellyfin::{JellyfinConfig, JellyfinMediaProvider};
#[cfg(feature = "local-files")]
pub use local_files::{LocalFilesConfig, LocalFilesProvider, LocalIndex, TranscodeManager, decode_stream_id};

View File

@@ -0,0 +1,508 @@
//! SQLite implementation of ILibraryRepository.
use async_trait::async_trait;
use sqlx::SqlitePool;
use domain::{
ContentType, DomainError, DomainResult, ILibraryRepository,
LibraryCollection, LibraryItem, LibrarySearchFilter, LibrarySyncLogEntry, LibrarySyncResult,
SeasonSummary, ShowSummary,
};
pub struct SqliteLibraryRepository {
pool: SqlitePool,
}
impl SqliteLibraryRepository {
pub fn new(pool: SqlitePool) -> Self {
Self { pool }
}
}
fn content_type_str(ct: &ContentType) -> &'static str {
match ct {
ContentType::Movie => "movie",
ContentType::Episode => "episode",
ContentType::Short => "short",
}
}
fn parse_content_type(s: &str) -> ContentType {
match s {
"episode" => ContentType::Episode,
"short" => ContentType::Short,
_ => ContentType::Movie,
}
}
#[async_trait]
impl ILibraryRepository for SqliteLibraryRepository {
async fn search(&self, filter: &LibrarySearchFilter) -> DomainResult<(Vec<LibraryItem>, u32)> {
let mut conditions: Vec<String> = vec![];
if let Some(ref p) = filter.provider_id {
conditions.push(format!("provider_id = '{}'", p.replace('\'', "''")));
}
if let Some(ref ct) = filter.content_type {
conditions.push(format!("content_type = '{}'", content_type_str(ct)));
}
if let Some(ref st) = filter.search_term {
conditions.push(format!("title LIKE '%{}%'", st.replace('\'', "''")));
}
if let Some(ref cid) = filter.collection_id {
conditions.push(format!("collection_id = '{}'", cid.replace('\'', "''")));
}
if let Some(decade) = filter.decade {
let end = decade + 10;
conditions.push(format!("year >= {} AND year < {}", decade, end));
}
if let Some(min) = filter.min_duration_secs {
conditions.push(format!("duration_secs >= {}", min));
}
if let Some(max) = filter.max_duration_secs {
conditions.push(format!("duration_secs <= {}", max));
}
if !filter.series_names.is_empty() {
let quoted: Vec<String> = filter.series_names.iter()
.map(|s| format!("'{}'", s.replace('\'', "''")))
.collect();
conditions.push(format!("series_name IN ({})", quoted.join(",")));
}
if !filter.genres.is_empty() {
let genre_conditions: Vec<String> = filter.genres.iter()
.map(|g| format!("EXISTS (SELECT 1 FROM json_each(library_items.genres) WHERE value = '{}')", g.replace('\'', "''")))
.collect();
conditions.push(format!("({})", genre_conditions.join(" OR ")));
}
if let Some(sn) = filter.season_number {
conditions.push(format!("season_number = {}", sn));
}
let where_clause = if conditions.is_empty() {
String::new()
} else {
format!("WHERE {}", conditions.join(" AND "))
};
let count_sql = format!("SELECT COUNT(*) FROM library_items {}", where_clause);
let total: i64 = sqlx::query_scalar(&count_sql)
.fetch_one(&self.pool)
.await
.map_err(|e| DomainError::InfrastructureError(e.to_string()))?;
let items_sql = format!(
"SELECT * FROM library_items {} ORDER BY title ASC LIMIT {} OFFSET {}",
where_clause, filter.limit, filter.offset
);
let rows = sqlx::query_as::<_, LibraryItemRow>(&items_sql)
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::InfrastructureError(e.to_string()))?;
Ok((rows.into_iter().map(Into::into).collect(), total as u32))
}
async fn get_by_id(&self, id: &str) -> DomainResult<Option<LibraryItem>> {
let row = sqlx::query_as::<_, LibraryItemRow>(
"SELECT * FROM library_items WHERE id = ?"
)
.bind(id)
.fetch_optional(&self.pool)
.await
.map_err(|e| DomainError::InfrastructureError(e.to_string()))?;
Ok(row.map(Into::into))
}
async fn list_collections(&self, provider_id: Option<&str>) -> DomainResult<Vec<LibraryCollection>> {
let rows: Vec<(String, Option<String>, Option<String>)> = if let Some(p) = provider_id {
sqlx::query_as::<_, (String, Option<String>, Option<String>)>(
"SELECT DISTINCT collection_id, collection_name, collection_type
FROM library_items WHERE collection_id IS NOT NULL AND provider_id = ?
ORDER BY collection_name ASC"
).bind(p).fetch_all(&self.pool).await
} else {
sqlx::query_as::<_, (String, Option<String>, Option<String>)>(
"SELECT DISTINCT collection_id, collection_name, collection_type
FROM library_items WHERE collection_id IS NOT NULL
ORDER BY collection_name ASC"
).fetch_all(&self.pool).await
}.map_err(|e| DomainError::InfrastructureError(e.to_string()))?;
Ok(rows.into_iter().map(|(id, name, ct)| LibraryCollection {
id,
name: name.unwrap_or_default(),
collection_type: ct,
}).collect())
}
async fn list_series(&self, provider_id: Option<&str>) -> DomainResult<Vec<String>> {
let rows: Vec<(String,)> = if let Some(p) = provider_id {
sqlx::query_as(
"SELECT DISTINCT series_name FROM library_items
WHERE series_name IS NOT NULL AND provider_id = ? ORDER BY series_name ASC"
).bind(p).fetch_all(&self.pool).await
} else {
sqlx::query_as(
"SELECT DISTINCT series_name FROM library_items
WHERE series_name IS NOT NULL ORDER BY series_name ASC"
).fetch_all(&self.pool).await
}.map_err(|e| DomainError::InfrastructureError(e.to_string()))?;
Ok(rows.into_iter().map(|(s,)| s).collect())
}
async fn list_genres(&self, content_type: Option<&ContentType>, provider_id: Option<&str>) -> DomainResult<Vec<String>> {
let sql = match (content_type, provider_id) {
(Some(ct), Some(p)) => format!(
"SELECT DISTINCT je.value FROM library_items li, json_each(li.genres) je
WHERE li.content_type = '{}' AND li.provider_id = '{}' ORDER BY je.value ASC",
content_type_str(ct), p.replace('\'', "''")
),
(Some(ct), None) => format!(
"SELECT DISTINCT je.value FROM library_items li, json_each(li.genres) je
WHERE li.content_type = '{}' ORDER BY je.value ASC",
content_type_str(ct)
),
(None, Some(p)) => format!(
"SELECT DISTINCT je.value FROM library_items li, json_each(li.genres) je
WHERE li.provider_id = '{}' ORDER BY je.value ASC",
p.replace('\'', "''")
),
(None, None) => "SELECT DISTINCT je.value FROM library_items li, json_each(li.genres) je ORDER BY je.value ASC".to_string(),
};
let rows: Vec<(String,)> = sqlx::query_as(&sql)
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::InfrastructureError(e.to_string()))?;
Ok(rows.into_iter().map(|(s,)| s).collect())
}
async fn upsert_items(&self, _provider_id: &str, items: Vec<LibraryItem>) -> DomainResult<()> {
let mut tx = self.pool.begin().await.map_err(|e| DomainError::InfrastructureError(e.to_string()))?;
for item in items {
sqlx::query(
"INSERT OR REPLACE INTO library_items
(id, provider_id, external_id, title, content_type, duration_secs,
series_name, season_number, episode_number, year, genres, tags,
collection_id, collection_name, collection_type, thumbnail_url, synced_at)
VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)"
)
.bind(&item.id).bind(&item.provider_id).bind(&item.external_id)
.bind(&item.title).bind(content_type_str(&item.content_type))
.bind(item.duration_secs as i64)
.bind(&item.series_name).bind(item.season_number.map(|n| n as i64))
.bind(item.episode_number.map(|n| n as i64))
.bind(item.year.map(|n| n as i64))
.bind(serde_json::to_string(&item.genres).unwrap_or_default())
.bind(serde_json::to_string(&item.tags).unwrap_or_default())
.bind(&item.collection_id).bind(&item.collection_name)
.bind(&item.collection_type).bind(&item.thumbnail_url)
.bind(&item.synced_at)
.execute(&mut *tx)
.await
.map_err(|e| DomainError::InfrastructureError(e.to_string()))?;
}
tx.commit().await.map_err(|e| DomainError::InfrastructureError(e.to_string()))
}
async fn clear_provider(&self, provider_id: &str) -> DomainResult<()> {
sqlx::query("DELETE FROM library_items WHERE provider_id = ?")
.bind(provider_id)
.execute(&self.pool)
.await
.map(|_| ())
.map_err(|e| DomainError::InfrastructureError(e.to_string()))
}
async fn log_sync_start(&self, provider_id: &str) -> DomainResult<i64> {
let now = chrono::Utc::now().to_rfc3339();
let id = sqlx::query_scalar::<_, i64>(
"INSERT INTO library_sync_log (provider_id, started_at, status)
VALUES (?, ?, 'running') RETURNING id"
)
.bind(provider_id).bind(&now)
.fetch_one(&self.pool)
.await
.map_err(|e| DomainError::InfrastructureError(e.to_string()))?;
Ok(id)
}
async fn log_sync_finish(&self, log_id: i64, result: &LibrarySyncResult) -> DomainResult<()> {
let now = chrono::Utc::now().to_rfc3339();
let status = if result.error.is_none() { "done" } else { "error" };
sqlx::query(
"UPDATE library_sync_log
SET finished_at = ?, items_found = ?, status = ?, error_msg = ?
WHERE id = ?"
)
.bind(&now).bind(result.items_found as i64)
.bind(status).bind(&result.error).bind(log_id)
.execute(&self.pool)
.await
.map(|_| ())
.map_err(|e| DomainError::InfrastructureError(e.to_string()))
}
async fn latest_sync_status(&self) -> DomainResult<Vec<LibrarySyncLogEntry>> {
let rows = sqlx::query_as::<_, SyncLogRow>(
"SELECT * FROM library_sync_log
WHERE id IN (
SELECT MAX(id) FROM library_sync_log GROUP BY provider_id
)
ORDER BY started_at DESC"
)
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::InfrastructureError(e.to_string()))?;
Ok(rows.into_iter().map(|r| LibrarySyncLogEntry {
id: r.id, provider_id: r.provider_id, started_at: r.started_at,
finished_at: r.finished_at, items_found: r.items_found as u32,
status: r.status, error_msg: r.error_msg,
}).collect())
}
async fn is_sync_running(&self, provider_id: &str) -> DomainResult<bool> {
let count: i64 = sqlx::query_scalar(
"SELECT COUNT(*) FROM library_sync_log WHERE provider_id = ? AND status = 'running'"
)
.bind(provider_id)
.fetch_one(&self.pool)
.await
.map_err(|e| DomainError::InfrastructureError(e.to_string()))?;
Ok(count > 0)
}
async fn list_shows(
&self,
provider_id: Option<&str>,
search_term: Option<&str>,
genres: &[String],
) -> DomainResult<Vec<ShowSummary>> {
let mut conditions = vec![
"content_type = 'episode'".to_string(),
"series_name IS NOT NULL".to_string(),
];
if let Some(p) = provider_id {
conditions.push(format!("provider_id = '{}'", p.replace('\'', "''")));
}
if let Some(st) = search_term {
let escaped = st.replace('\'', "''");
conditions.push(format!(
"(title LIKE '%{escaped}%' OR series_name LIKE '%{escaped}%')"
));
}
if !genres.is_empty() {
let genre_conditions: Vec<String> = genres
.iter()
.map(|g| format!(
"EXISTS (SELECT 1 FROM json_each(library_items.genres) WHERE value = '{}')",
g.replace('\'', "''")
))
.collect();
conditions.push(format!("({})", genre_conditions.join(" OR ")));
}
let where_clause = format!("WHERE {}", conditions.join(" AND "));
let sql = format!(
"SELECT series_name, COUNT(*) AS episode_count, COUNT(DISTINCT season_number) AS season_count, MAX(thumbnail_url) AS thumbnail_url, GROUP_CONCAT(genres, ',') AS genres_blob FROM library_items {} GROUP BY series_name ORDER BY series_name ASC",
where_clause
);
let rows = sqlx::query_as::<_, ShowSummaryRow>(&sql)
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::InfrastructureError(e.to_string()))?;
Ok(rows
.into_iter()
.map(|r| {
let genres: Vec<String> = r
.genres_blob
.split("],[")
.flat_map(|chunk| {
let cleaned = chunk.trim_start_matches('[').trim_end_matches(']');
cleaned
.split(',')
.filter_map(|s| {
let s = s.trim().trim_matches('"');
if s.is_empty() { None } else { Some(s.to_string()) }
})
.collect::<Vec<_>>()
})
.collect::<std::collections::HashSet<_>>()
.into_iter()
.collect();
ShowSummary {
series_name: r.series_name,
episode_count: r.episode_count as u32,
season_count: r.season_count as u32,
thumbnail_url: r.thumbnail_url,
genres,
}
})
.collect())
}
async fn list_seasons(
&self,
series_name: &str,
provider_id: Option<&str>,
) -> DomainResult<Vec<SeasonSummary>> {
let mut conditions = vec![
format!("series_name = '{}'", series_name.replace('\'', "''")),
"content_type = 'episode'".to_string(),
];
if let Some(p) = provider_id {
conditions.push(format!("provider_id = '{}'", p.replace('\'', "''")));
}
let where_clause = format!("WHERE {}", conditions.join(" AND "));
let sql = format!(
"SELECT season_number, COUNT(*) AS episode_count, MAX(thumbnail_url) AS thumbnail_url FROM library_items {} GROUP BY season_number ORDER BY season_number ASC",
where_clause
);
let rows = sqlx::query_as::<_, SeasonSummaryRow>(&sql)
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::InfrastructureError(e.to_string()))?;
Ok(rows
.into_iter()
.map(|r| SeasonSummary {
season_number: r.season_number as u32,
episode_count: r.episode_count as u32,
thumbnail_url: r.thumbnail_url,
})
.collect())
}
}
// ── SQLx row types ─────────────────────────────────────────────────────────
#[derive(sqlx::FromRow)]
struct LibraryItemRow {
id: String, provider_id: String, external_id: String, title: String,
content_type: String, duration_secs: i64,
series_name: Option<String>, season_number: Option<i64>, episode_number: Option<i64>,
year: Option<i64>, genres: String, tags: String,
collection_id: Option<String>, collection_name: Option<String>, collection_type: Option<String>,
thumbnail_url: Option<String>, synced_at: String,
}
impl From<LibraryItemRow> for LibraryItem {
fn from(r: LibraryItemRow) -> Self {
Self {
id: r.id, provider_id: r.provider_id, external_id: r.external_id,
title: r.title, content_type: parse_content_type(&r.content_type),
duration_secs: r.duration_secs as u32,
series_name: r.series_name,
season_number: r.season_number.map(|n| n as u32),
episode_number: r.episode_number.map(|n| n as u32),
year: r.year.map(|n| n as u16),
genres: serde_json::from_str(&r.genres).unwrap_or_default(),
tags: serde_json::from_str(&r.tags).unwrap_or_default(),
collection_id: r.collection_id, collection_name: r.collection_name,
collection_type: r.collection_type, thumbnail_url: r.thumbnail_url,
synced_at: r.synced_at,
}
}
}
#[derive(sqlx::FromRow)]
struct SyncLogRow {
id: i64, provider_id: String, started_at: String, finished_at: Option<String>,
items_found: i64, status: String, error_msg: Option<String>,
}
#[derive(sqlx::FromRow)]
struct ShowSummaryRow {
series_name: String,
episode_count: i64,
season_count: i64,
thumbnail_url: Option<String>,
genres_blob: String,
}
#[derive(sqlx::FromRow)]
struct SeasonSummaryRow {
season_number: i64,
episode_count: i64,
thumbnail_url: Option<String>,
}
#[cfg(test)]
mod tests {
use super::*;
use sqlx::SqlitePool;
use domain::{LibraryItem, LibrarySearchFilter, ContentType};
async fn setup() -> SqlitePool {
let pool = SqlitePool::connect(":memory:").await.unwrap();
sqlx::query(
"CREATE TABLE library_items (
id TEXT PRIMARY KEY, provider_id TEXT NOT NULL, external_id TEXT NOT NULL,
title TEXT NOT NULL, content_type TEXT NOT NULL, duration_secs INTEGER NOT NULL DEFAULT 0,
series_name TEXT, season_number INTEGER, episode_number INTEGER, year INTEGER,
genres TEXT NOT NULL DEFAULT '[]', tags TEXT NOT NULL DEFAULT '[]',
collection_id TEXT, collection_name TEXT, collection_type TEXT,
thumbnail_url TEXT, synced_at TEXT NOT NULL
)"
).execute(&pool).await.unwrap();
sqlx::query(
"CREATE TABLE library_sync_log (
id INTEGER PRIMARY KEY AUTOINCREMENT, provider_id TEXT NOT NULL,
started_at TEXT NOT NULL, finished_at TEXT, items_found INTEGER NOT NULL DEFAULT 0,
status TEXT NOT NULL DEFAULT 'running', error_msg TEXT
)"
).execute(&pool).await.unwrap();
pool
}
fn make_item(id: &str, provider: &str, title: &str) -> LibraryItem {
LibraryItem {
id: id.to_string(), provider_id: provider.to_string(), external_id: id.to_string(),
title: title.to_string(), content_type: ContentType::Movie,
duration_secs: 3600, series_name: None, season_number: None, episode_number: None,
year: Some(2020), genres: vec!["Action".to_string()], tags: vec![],
collection_id: None, collection_name: None, collection_type: None,
thumbnail_url: None, synced_at: "2026-03-19T00:00:00Z".to_string(),
}
}
#[tokio::test]
async fn upsert_then_search_returns_items() {
let pool = setup().await;
let repo = SqliteLibraryRepository::new(pool);
let items = vec![make_item("jellyfin::1", "jellyfin", "Movie A")];
repo.upsert_items("jellyfin", items).await.unwrap();
let (results, total) = repo.search(&LibrarySearchFilter { limit: 50, ..Default::default() }).await.unwrap();
assert_eq!(total, 1);
assert_eq!(results[0].title, "Movie A");
}
#[tokio::test]
async fn clear_provider_removes_only_that_provider() {
let pool = setup().await;
let repo = SqliteLibraryRepository::new(pool);
repo.upsert_items("jellyfin", vec![make_item("jellyfin::1", "jellyfin", "Jelly Movie")]).await.unwrap();
repo.upsert_items("local", vec![make_item("local::1", "local", "Local Movie")]).await.unwrap();
repo.clear_provider("jellyfin").await.unwrap();
let (results, _) = repo.search(&LibrarySearchFilter { limit: 50, ..Default::default() }).await.unwrap();
assert_eq!(results.len(), 1);
assert_eq!(results[0].provider_id, "local");
}
#[tokio::test]
async fn is_sync_running_reflects_status() {
let pool = setup().await;
let repo = SqliteLibraryRepository::new(pool);
assert!(!repo.is_sync_running("jellyfin").await.unwrap());
let log_id = repo.log_sync_start("jellyfin").await.unwrap();
assert!(repo.is_sync_running("jellyfin").await.unwrap());
let result = domain::LibrarySyncResult {
provider_id: "jellyfin".to_string(), items_found: 5, duration_ms: 100, error: None,
};
repo.log_sync_finish(log_id, &result).await.unwrap();
assert!(!repo.is_sync_running("jellyfin").await.unwrap());
}
}

View File

@@ -0,0 +1,249 @@
//! Full-sync library sync adapter: truncate + re-insert all provider items.
use std::collections::HashMap;
use std::sync::Arc;
use std::time::Instant;
use async_trait::async_trait;
use domain::{
Collection, ILibraryRepository, IMediaProvider, LibraryItem,
LibrarySyncAdapter, LibrarySyncResult, MediaFilter,
};
pub struct FullSyncAdapter {
repo: Arc<dyn ILibraryRepository>,
}
impl FullSyncAdapter {
pub fn new(repo: Arc<dyn ILibraryRepository>) -> Self {
Self { repo }
}
}
#[async_trait]
impl LibrarySyncAdapter for FullSyncAdapter {
async fn sync_provider(
&self,
provider: &dyn IMediaProvider,
provider_id: &str,
) -> LibrarySyncResult {
let start = Instant::now();
// Check for running sync first
match self.repo.is_sync_running(provider_id).await {
Ok(true) => {
return LibrarySyncResult {
provider_id: provider_id.to_string(),
items_found: 0,
duration_ms: 0,
error: Some("sync already running".to_string()),
};
}
Err(e) => {
return LibrarySyncResult {
provider_id: provider_id.to_string(),
items_found: 0,
duration_ms: 0,
error: Some(e.to_string()),
};
}
Ok(false) => {}
}
let log_id = match self.repo.log_sync_start(provider_id).await {
Ok(id) => id,
Err(e) => {
return LibrarySyncResult {
provider_id: provider_id.to_string(),
items_found: 0,
duration_ms: start.elapsed().as_millis() as u64,
error: Some(e.to_string()),
};
}
};
// Fetch collections for name/type enrichment — build a lookup map
let collections: Vec<Collection> = provider.list_collections().await.unwrap_or_default();
let collection_map: HashMap<String, &Collection> =
collections.iter().map(|c| (c.id.clone(), c)).collect();
// Fetch all items
let media_items = match provider.fetch_items(&MediaFilter::default()).await {
Ok(items) => items,
Err(e) => {
let result = LibrarySyncResult {
provider_id: provider_id.to_string(),
items_found: 0,
duration_ms: start.elapsed().as_millis() as u64,
error: Some(e.to_string()),
};
let _ = self.repo.log_sync_finish(log_id, &result).await;
return result;
}
};
let items_found = media_items.len() as u32;
let now = chrono::Utc::now().to_rfc3339();
let library_items: Vec<LibraryItem> = media_items
.into_iter()
.map(|item| {
let raw_id = item.id.into_inner();
let id = format!("{}::{}", provider_id, raw_id);
// Enrich with collection name/type using the lookup map.
let (col_name, col_type) = item.collection_id.as_deref()
.and_then(|cid| collection_map.get(cid))
.map(|c| (Some(c.name.clone()), c.collection_type.clone()))
.unwrap_or((None, None));
LibraryItem {
id,
provider_id: provider_id.to_string(),
external_id: raw_id,
title: item.title,
content_type: item.content_type,
duration_secs: item.duration_secs,
series_name: item.series_name,
season_number: item.season_number,
episode_number: item.episode_number,
year: item.year,
genres: item.genres,
tags: item.tags,
collection_id: item.collection_id,
collection_name: col_name,
collection_type: col_type,
thumbnail_url: item.thumbnail_url,
synced_at: now.clone(),
}
})
.collect();
// Truncate + insert
if let Err(e) = self.repo.clear_provider(provider_id).await {
let result = LibrarySyncResult {
provider_id: provider_id.to_string(),
items_found: 0,
duration_ms: start.elapsed().as_millis() as u64,
error: Some(e.to_string()),
};
let _ = self.repo.log_sync_finish(log_id, &result).await;
return result;
}
let result = match self.repo.upsert_items(provider_id, library_items).await {
Ok(()) => LibrarySyncResult {
provider_id: provider_id.to_string(),
items_found,
duration_ms: start.elapsed().as_millis() as u64,
error: None,
},
Err(e) => LibrarySyncResult {
provider_id: provider_id.to_string(),
items_found: 0,
duration_ms: start.elapsed().as_millis() as u64,
error: Some(e.to_string()),
},
};
let _ = self.repo.log_sync_finish(log_id, &result).await;
result
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::sync::{Arc, Mutex};
use async_trait::async_trait;
use domain::*;
struct MockProvider {
items: Vec<MediaItem>,
}
#[async_trait]
impl IMediaProvider for MockProvider {
fn capabilities(&self) -> ProviderCapabilities {
ProviderCapabilities {
collections: true,
series: false,
genres: false,
tags: false,
decade: false,
search: false,
streaming_protocol: StreamingProtocol::Hls,
rescan: false,
transcode: false,
}
}
async fn fetch_items(&self, _filter: &MediaFilter) -> DomainResult<Vec<MediaItem>> {
Ok(self.items.clone())
}
async fn fetch_by_id(&self, _id: &MediaItemId) -> DomainResult<Option<MediaItem>> { Ok(None) }
async fn get_stream_url(&self, _id: &MediaItemId, _q: &StreamQuality) -> DomainResult<String> { Ok(String::new()) }
async fn list_collections(&self) -> DomainResult<Vec<Collection>> { Ok(vec![]) }
async fn list_series(&self, _col: Option<&str>) -> DomainResult<Vec<SeriesSummary>> { Ok(vec![]) }
async fn list_genres(&self, _ct: Option<&ContentType>) -> DomainResult<Vec<String>> { Ok(vec![]) }
}
struct SpyRepo {
upserted: Arc<Mutex<Vec<LibraryItem>>>,
cleared: Arc<Mutex<Vec<String>>>,
}
#[async_trait]
impl ILibraryRepository for SpyRepo {
async fn search(&self, _f: &LibrarySearchFilter) -> DomainResult<(Vec<LibraryItem>, u32)> { Ok((vec![], 0)) }
async fn get_by_id(&self, _id: &str) -> DomainResult<Option<LibraryItem>> { Ok(None) }
async fn list_collections(&self, _p: Option<&str>) -> DomainResult<Vec<LibraryCollection>> { Ok(vec![]) }
async fn list_series(&self, _p: Option<&str>) -> DomainResult<Vec<String>> { Ok(vec![]) }
async fn list_genres(&self, _ct: Option<&ContentType>, _p: Option<&str>) -> DomainResult<Vec<String>> { Ok(vec![]) }
async fn upsert_items(&self, _pid: &str, items: Vec<LibraryItem>) -> DomainResult<()> {
self.upserted.lock().unwrap().extend(items);
Ok(())
}
async fn clear_provider(&self, pid: &str) -> DomainResult<()> {
self.cleared.lock().unwrap().push(pid.to_string());
Ok(())
}
async fn log_sync_start(&self, _pid: &str) -> DomainResult<i64> { Ok(1) }
async fn log_sync_finish(&self, _id: i64, _r: &LibrarySyncResult) -> DomainResult<()> { Ok(()) }
async fn latest_sync_status(&self) -> DomainResult<Vec<LibrarySyncLogEntry>> { Ok(vec![]) }
async fn is_sync_running(&self, _pid: &str) -> DomainResult<bool> { Ok(false) }
async fn list_shows(&self, _p: Option<&str>, _st: Option<&str>, _g: &[String]) -> DomainResult<Vec<domain::ShowSummary>> { Ok(vec![]) }
async fn list_seasons(&self, _sn: &str, _p: Option<&str>) -> DomainResult<Vec<domain::SeasonSummary>> { Ok(vec![]) }
}
#[tokio::test]
async fn sync_clears_then_upserts_items() {
let upserted = Arc::new(Mutex::new(vec![]));
let cleared = Arc::new(Mutex::new(vec![]));
let repo: Arc<dyn ILibraryRepository> = Arc::new(SpyRepo {
upserted: Arc::clone(&upserted),
cleared: Arc::clone(&cleared),
});
let adapter = FullSyncAdapter::new(Arc::clone(&repo));
let provider = MockProvider {
items: vec![MediaItem {
id: MediaItemId::new("abc".to_string()),
title: "Test Movie".to_string(),
content_type: ContentType::Movie,
duration_secs: 3600,
description: None,
series_name: None,
season_number: None,
episode_number: None,
year: None,
genres: vec![],
tags: vec![],
thumbnail_url: None,
collection_id: None,
}],
};
let result = adapter.sync_provider(&provider, "jellyfin").await;
assert!(result.error.is_none());
assert_eq!(result.items_found, 1);
assert_eq!(cleared.lock().unwrap().as_slice(), &["jellyfin"]);
assert_eq!(upserted.lock().unwrap().len(), 1);
}
}

View File

@@ -0,0 +1,13 @@
use std::path::PathBuf;
/// Configuration for the local files media provider.
pub struct LocalFilesConfig {
/// Root directory containing video files. All files are served relative to this.
pub root_dir: PathBuf,
/// Public base URL of this API server, used to build stream URLs.
pub base_url: String,
/// Directory for FFmpeg HLS transcode cache. `None` disables transcoding.
pub transcode_dir: Option<PathBuf>,
/// How long (hours) to keep transcode cache entries. Passed to TranscodeManager.
pub cleanup_ttl_hours: u32,
}

View File

@@ -0,0 +1,188 @@
use std::collections::HashMap;
use std::path::PathBuf;
use std::sync::Arc;
use chrono::Utc;
use tokio::sync::RwLock;
use tracing::{error, info};
use domain::MediaItemId;
use super::config::LocalFilesConfig;
use super::scanner::{scan_dir, LocalFileItem};
/// Encode a rel-path string into a URL-safe, padding-free base64 MediaItemId.
pub fn encode_id(rel_path: &str) -> MediaItemId {
use base64::Engine as _;
MediaItemId::new(
base64::engine::general_purpose::URL_SAFE_NO_PAD.encode(rel_path.as_bytes()),
)
}
/// Decode a MediaItemId back to a relative path string.
pub fn decode_id(id: &MediaItemId) -> Option<String> {
use base64::Engine as _;
let bytes = base64::engine::general_purpose::URL_SAFE_NO_PAD
.decode(id.as_ref())
.ok()?;
String::from_utf8(bytes).ok()
}
/// In-memory (+ SQLite-backed) index of local video files.
///
/// On startup the index is populated from the SQLite cache so the provider can
/// serve requests immediately. A background task calls `rescan()` to pick up
/// any changes on disk and write them back to the cache.
pub struct LocalIndex {
items: Arc<RwLock<HashMap<MediaItemId, LocalFileItem>>>,
pub root_dir: PathBuf,
provider_id: String,
pool: sqlx::SqlitePool,
}
impl LocalIndex {
/// Create the index, immediately loading persisted entries from SQLite.
pub async fn new(config: &LocalFilesConfig, pool: sqlx::SqlitePool, provider_id: String) -> Self {
let idx = Self {
items: Arc::new(RwLock::new(HashMap::new())),
root_dir: config.root_dir.clone(),
provider_id,
pool,
};
idx.load_from_db().await;
idx
}
/// Load previously scanned items from SQLite (instant on startup).
async fn load_from_db(&self) {
#[derive(sqlx::FromRow)]
struct Row {
id: String,
rel_path: String,
title: String,
duration_secs: i64,
year: Option<i64>,
tags: String,
top_dir: String,
}
let rows = sqlx::query_as::<_, Row>(
"SELECT id, rel_path, title, duration_secs, year, tags, top_dir \
FROM local_files_index WHERE provider_id = ?",
)
.bind(&self.provider_id)
.fetch_all(&self.pool)
.await;
match rows {
Ok(rows) => {
let mut map = self.items.write().await;
for row in rows {
let tags: Vec<String> =
serde_json::from_str(&row.tags).unwrap_or_default();
let item = LocalFileItem {
rel_path: row.rel_path,
title: row.title,
duration_secs: row.duration_secs as u32,
year: row.year.map(|y| y as u16),
tags,
top_dir: row.top_dir,
};
map.insert(MediaItemId::new(row.id), item);
}
info!("Local files index [{}]: loaded {} items from DB", self.provider_id, map.len());
}
Err(e) => {
// Table might not exist yet on first run — that's fine.
tracing::debug!("Could not load local files index from DB: {}", e);
}
}
}
/// Scan the filesystem for video files and rebuild the index.
///
/// Returns the number of items found. Called on startup (background task)
/// and via `POST /files/rescan`.
pub async fn rescan(&self) -> u32 {
info!("Local files [{}]: scanning {:?}", self.provider_id, self.root_dir);
let new_items = scan_dir(&self.root_dir).await;
let count = new_items.len() as u32;
// Swap in-memory map.
{
let mut map = self.items.write().await;
map.clear();
for item in &new_items {
let id = encode_id(&item.rel_path);
map.insert(id, item.clone());
}
}
// Persist to SQLite.
if let Err(e) = self.save_to_db(&new_items).await {
error!("Failed to persist local files index: {}", e);
}
info!("Local files [{}]: indexed {} items", self.provider_id, count);
count
}
async fn save_to_db(&self, items: &[LocalFileItem]) -> Result<(), sqlx::Error> {
// Rebuild the table in one transaction, scoped to this provider.
let mut tx = self.pool.begin().await?;
sqlx::query("DELETE FROM local_files_index WHERE provider_id = ?")
.bind(&self.provider_id)
.execute(&mut *tx)
.await?;
let now = Utc::now().to_rfc3339();
for item in items {
let id = encode_id(&item.rel_path).into_inner();
let tags_json = serde_json::to_string(&item.tags).unwrap_or_else(|_| "[]".into());
sqlx::query(
"INSERT INTO local_files_index \
(id, rel_path, title, duration_secs, year, tags, top_dir, scanned_at, provider_id) \
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)",
)
.bind(&id)
.bind(&item.rel_path)
.bind(&item.title)
.bind(item.duration_secs as i64)
.bind(item.year.map(|y| y as i64))
.bind(&tags_json)
.bind(&item.top_dir)
.bind(&now)
.bind(&self.provider_id)
.execute(&mut *tx)
.await?;
}
tx.commit().await
}
pub async fn get(&self, id: &MediaItemId) -> Option<LocalFileItem> {
self.items.read().await.get(id).cloned()
}
pub async fn get_all(&self) -> Vec<(MediaItemId, LocalFileItem)> {
self.items
.read()
.await
.iter()
.map(|(k, v)| (k.clone(), v.clone()))
.collect()
}
/// Return unique top-level directories as collection names.
pub async fn collections(&self) -> Vec<String> {
let map = self.items.read().await;
let mut seen = std::collections::HashSet::new();
for item in map.values() {
seen.insert(item.top_dir.clone());
}
let mut dirs: Vec<String> = seen.into_iter().collect();
dirs.sort();
dirs
}
}

View File

@@ -0,0 +1,10 @@
pub mod config;
pub mod index;
pub mod provider;
pub mod scanner;
pub mod transcoder;
pub use config::LocalFilesConfig;
pub use index::LocalIndex;
pub use provider::{LocalFilesProvider, decode_stream_id};
pub use transcoder::TranscodeManager;

View File

@@ -0,0 +1,186 @@
use std::sync::Arc;
use async_trait::async_trait;
use domain::{
Collection, ContentType, DomainError, DomainResult, IMediaProvider, MediaFilter, MediaItem,
MediaItemId, ProviderCapabilities, StreamQuality, StreamingProtocol,
};
use super::config::LocalFilesConfig;
use super::index::{LocalIndex, decode_id};
use super::scanner::LocalFileItem;
use super::transcoder::TranscodeManager;
pub struct LocalFilesProvider {
pub index: Arc<LocalIndex>,
base_url: String,
transcode_manager: Option<Arc<TranscodeManager>>,
}
const SHORT_DURATION_SECS: u32 = 1200; // 20 minutes
impl LocalFilesProvider {
pub fn new(
index: Arc<LocalIndex>,
config: LocalFilesConfig,
transcode_manager: Option<Arc<TranscodeManager>>,
) -> Self {
Self {
index,
base_url: config.base_url.trim_end_matches('/').to_string(),
transcode_manager,
}
}
}
fn to_media_item(id: MediaItemId, item: &LocalFileItem) -> MediaItem {
let content_type = if item.duration_secs < 1200 {
ContentType::Short
} else {
ContentType::Movie
};
MediaItem {
id,
title: item.title.clone(),
content_type,
duration_secs: item.duration_secs,
description: None,
genres: vec![],
year: item.year,
tags: item.tags.clone(),
series_name: None,
season_number: None,
episode_number: None,
thumbnail_url: None,
collection_id: None,
}
}
#[async_trait]
impl IMediaProvider for LocalFilesProvider {
fn capabilities(&self) -> ProviderCapabilities {
ProviderCapabilities {
collections: true,
series: false,
genres: false,
tags: true,
decade: true,
search: true,
streaming_protocol: if self.transcode_manager.is_some() {
StreamingProtocol::Hls
} else {
StreamingProtocol::DirectFile
},
rescan: true,
transcode: self.transcode_manager.is_some(),
}
}
async fn fetch_items(&self, filter: &MediaFilter) -> DomainResult<Vec<MediaItem>> {
let all = self.index.get_all().await;
let results = all
.into_iter()
.filter_map(|(id, item)| {
// content_type: derive heuristically, then filter
let content_type = if item.duration_secs < SHORT_DURATION_SECS {
ContentType::Short
} else {
ContentType::Movie
};
if let Some(ref ct) = filter.content_type && &content_type != ct {
return None;
}
// collections: match against top_dir
if !filter.collections.is_empty() && !filter.collections.contains(&item.top_dir) {
return None;
}
// tags: OR — item must have at least one matching tag
if !filter.tags.is_empty() {
let has = filter
.tags
.iter()
.any(|tag| item.tags.iter().any(|t| t.eq_ignore_ascii_case(tag)));
if !has {
return None;
}
}
// decade: year in [decade, decade+9]
if let Some(decade) = filter.decade {
match item.year {
Some(y) if y >= decade && y <= decade + 9 => {}
_ => return None,
}
}
// duration bounds
if let Some(min) = filter.min_duration_secs && item.duration_secs < min {
return None;
}
if let Some(max) = filter.max_duration_secs && item.duration_secs > max {
return None;
}
// search_term: case-insensitive substring in title
if let Some(ref q) = filter.search_term && !item.title.to_lowercase().contains(&q.to_lowercase()) {
return None;
}
Some(to_media_item(id, &item))
})
.collect();
Ok(results)
}
async fn fetch_by_id(&self, item_id: &MediaItemId) -> DomainResult<Option<MediaItem>> {
Ok(self
.index
.get(item_id)
.await
.map(|item| to_media_item(item_id.clone(), &item)))
}
async fn get_stream_url(&self, item_id: &MediaItemId, quality: &StreamQuality) -> DomainResult<String> {
match quality {
StreamQuality::Transcode(_) if self.transcode_manager.is_some() => {
let tm = self.transcode_manager.as_ref().unwrap();
let rel = decode_id(item_id).ok_or_else(|| {
DomainError::InfrastructureError("invalid item id encoding".into())
})?;
let src = self.index.root_dir.join(&rel);
tm.ensure_transcoded(item_id.as_ref(), &src).await?;
Ok(format!(
"{}/api/v1/files/transcode/{}/playlist.m3u8",
self.base_url,
item_id.as_ref()
))
}
_ => Ok(format!(
"{}/api/v1/files/stream/{}",
self.base_url,
item_id.as_ref()
)),
}
}
async fn list_collections(&self) -> DomainResult<Vec<Collection>> {
let dirs = self.index.collections().await;
Ok(dirs
.into_iter()
.map(|d| Collection {
id: d.clone(),
name: d,
collection_type: None,
})
.collect())
}
}
/// Decode an encoded ID from a URL path segment to its relative path string.
pub fn decode_stream_id(encoded: &str) -> Option<String> {
decode_id(&MediaItemId::new(encoded))
}

View File

@@ -0,0 +1,161 @@
use std::path::Path;
use tokio::process::Command;
const VIDEO_EXTENSIONS: &[&str] = &["mp4", "mkv", "avi", "mov", "webm", "m4v"];
/// In-memory representation of a scanned local video file.
#[derive(Debug, Clone)]
pub struct LocalFileItem {
/// Relative path from root, with forward slashes (used as the stable ID source).
pub rel_path: String,
pub title: String,
pub duration_secs: u32,
pub year: Option<u16>,
/// Ancestor directory names between root and file (excluding root itself).
pub tags: Vec<String>,
/// First path component under root (used as collection id/name).
pub top_dir: String,
}
/// Walk `root` and return all recognised video files with metadata.
///
/// ffprobe is called for each file to determine duration. Files that cannot be
/// probed are included with `duration_secs = 0` so they still appear in the index.
pub async fn scan_dir(root: &Path) -> Vec<LocalFileItem> {
let mut items = Vec::new();
let walker = walkdir::WalkDir::new(root).follow_links(true);
for entry in walker.into_iter().filter_map(|e| e.ok()) {
if !entry.file_type().is_file() {
continue;
}
let path = entry.path();
let ext = path
.extension()
.and_then(|e| e.to_str())
.map(|e| e.to_lowercase());
let ext = match ext {
Some(ref e) if VIDEO_EXTENSIONS.contains(&e.as_str()) => e.clone(),
_ => continue,
};
let _ = ext; // extension validated, not needed further
let rel = match path.strip_prefix(root) {
Ok(r) => r,
Err(_) => continue,
};
// Normalise to forward-slash string for cross-platform stability.
let rel_path: String = rel
.components()
.map(|c| c.as_os_str().to_string_lossy().into_owned())
.collect::<Vec<_>>()
.join("/");
// Top-level directory under root.
let top_dir = rel
.components()
.next()
.filter(|_| rel.components().count() > 1) // skip if file is at root level
.map(|c| c.as_os_str().to_string_lossy().into_owned())
.unwrap_or_else(|| "__root__".to_string());
// Title: stem with separator chars replaced by spaces.
let stem = path
.file_stem()
.and_then(|s| s.to_str())
.unwrap_or("")
.to_string();
let title = stem.replace(['_', '-', '.'], " ");
let title = title.trim().to_string();
// Year: first 4-digit number starting with 19xx or 20xx in filename or parent dirs.
let search_str = format!(
"{} {}",
stem,
rel.parent()
.and_then(|p| p.to_str())
.unwrap_or("")
);
let year = extract_year(&search_str);
// Tags: ancestor directory components between root and the file.
let tags: Vec<String> = rel
.parent()
.into_iter()
.flat_map(|p| p.components())
.map(|c| c.as_os_str().to_string_lossy().into_owned())
.filter(|s| !s.is_empty())
.collect();
let duration_secs = get_duration(path).await.unwrap_or(0);
items.push(LocalFileItem {
rel_path,
title,
duration_secs,
year,
tags,
top_dir,
});
}
items
}
/// Extract the first plausible 4-digit year (19002099) from `s`.
fn extract_year(s: &str) -> Option<u16> {
let chars: Vec<char> = s.chars().collect();
let n = chars.len();
if n < 4 {
return None;
}
for i in 0..=(n - 4) {
// All four chars must be ASCII digits.
if !chars[i..i + 4].iter().all(|c| c.is_ascii_digit()) {
continue;
}
// Parse and range-check.
let s4: String = chars[i..i + 4].iter().collect();
let num: u16 = s4.parse().ok()?;
if !(1900..=2099).contains(&num) {
continue;
}
// Word-boundary: char before and after must not be digits.
let before_ok = i == 0 || !chars[i - 1].is_ascii_digit();
let after_ok = i + 4 >= n || !chars[i + 4].is_ascii_digit();
if before_ok && after_ok {
return Some(num);
}
}
None
}
/// Run ffprobe to get the duration of `path` in whole seconds.
async fn get_duration(path: &Path) -> Option<u32> {
#[derive(serde::Deserialize)]
struct Fmt {
duration: Option<String>,
}
#[derive(serde::Deserialize)]
struct Out {
format: Fmt,
}
let output = Command::new("ffprobe")
.args([
"-v",
"quiet",
"-print_format",
"json",
"-show_format",
path.to_str()?,
])
.output()
.await
.ok()?;
let parsed: Out = serde_json::from_slice(&output.stdout).ok()?;
let dur: f64 = parsed.format.duration?.parse().ok()?;
Some(dur as u32)
}

View File

@@ -0,0 +1,252 @@
//! FFmpeg HLS transcoder for local video files.
//!
//! `TranscodeManager` orchestrates on-demand transcoding: the first request for
//! an item spawns an ffmpeg process and returns once the initial HLS playlist
//! appears. Concurrent requests for the same item subscribe to a watch channel
//! and wait without spawning duplicate processes. Transcoded segments are cached
//! in `transcode_dir/{item_id}/` and cleaned up by a background task.
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use std::sync::{
Arc,
atomic::{AtomicU32, Ordering},
};
use std::time::{Duration, Instant};
use tokio::sync::{Mutex, watch};
use tracing::{info, warn, error};
use domain::{DomainError, DomainResult};
// ============================================================================
// Types
// ============================================================================
#[derive(Clone, Debug)]
pub enum TranscodeStatus {
Ready,
Failed(String),
}
// ============================================================================
// Manager
// ============================================================================
pub struct TranscodeManager {
pub transcode_dir: PathBuf,
cleanup_ttl_hours: Arc<AtomicU32>,
active: Arc<Mutex<HashMap<String, watch::Sender<Option<TranscodeStatus>>>>>,
}
impl TranscodeManager {
pub fn new(transcode_dir: PathBuf, cleanup_ttl_hours: u32) -> Arc<Self> {
let mgr = Arc::new(Self {
transcode_dir,
cleanup_ttl_hours: Arc::new(AtomicU32::new(cleanup_ttl_hours)),
active: Arc::new(Mutex::new(HashMap::new())),
});
// Background cleanup task — uses Weak to avoid keeping manager alive.
let weak = Arc::downgrade(&mgr);
tokio::spawn(async move {
let mut interval = tokio::time::interval(Duration::from_secs(3600));
loop {
interval.tick().await;
match weak.upgrade() {
Some(m) => m.run_cleanup().await,
None => break,
}
}
});
mgr
}
/// Update the cleanup TTL (also persisted to DB by the route handler).
pub fn set_cleanup_ttl(&self, hours: u32) {
self.cleanup_ttl_hours.store(hours, Ordering::Relaxed);
}
pub fn get_cleanup_ttl(&self) -> u32 {
self.cleanup_ttl_hours.load(Ordering::Relaxed)
}
/// Ensure `item_id` has been transcoded to HLS. Blocks until the initial
/// playlist appears or an error occurs. Concurrent callers share the result.
pub async fn ensure_transcoded(&self, item_id: &str, src_path: &Path) -> DomainResult<()> {
let out_dir = self.transcode_dir.join(item_id);
let playlist = out_dir.join("playlist.m3u8");
if playlist.exists() {
return Ok(());
}
let mut rx = {
let mut map = self.active.lock().await;
if let Some(tx) = map.get(item_id) {
tx.subscribe()
} else {
let (tx, rx) = watch::channel::<Option<TranscodeStatus>>(None);
map.insert(item_id.to_string(), tx.clone());
let item_id_owned = item_id.to_string();
let src_owned = src_path.to_path_buf();
let out_dir_owned = out_dir.clone();
let playlist_owned = playlist.clone();
let active_ref = Arc::clone(&self.active);
tokio::spawn(async move {
let _ = tokio::fs::create_dir_all(&out_dir_owned).await;
let status = do_transcode(&src_owned, &out_dir_owned, &playlist_owned).await;
if matches!(status, TranscodeStatus::Ready) {
info!("transcode ready: {}", item_id_owned);
} else if let TranscodeStatus::Failed(ref e) = status {
error!("transcode failed for {}: {}", item_id_owned, e);
}
let _ = tx.send(Some(status));
active_ref.lock().await.remove(&item_id_owned);
});
rx
}
};
// Wait for Ready or Failed.
loop {
rx.changed().await.map_err(|_| {
DomainError::InfrastructureError("transcode task dropped unexpectedly".into())
})?;
if let Some(status) = &*rx.borrow() {
return match status {
TranscodeStatus::Ready => Ok(()),
TranscodeStatus::Failed(e) => Err(DomainError::InfrastructureError(
format!("transcode failed: {}", e),
)),
};
}
}
}
/// Remove all cached transcode directories.
pub async fn clear_cache(&self) -> std::io::Result<()> {
if self.transcode_dir.exists() {
tokio::fs::remove_dir_all(&self.transcode_dir).await?;
}
tokio::fs::create_dir_all(&self.transcode_dir).await
}
/// Return `(total_bytes, item_count)` for the cache directory.
pub async fn cache_stats(&self) -> (u64, usize) {
let mut total_bytes = 0u64;
let mut item_count = 0usize;
let Ok(mut entries) = tokio::fs::read_dir(&self.transcode_dir).await else {
return (0, 0);
};
while let Ok(Some(entry)) = entries.next_entry().await {
if !entry.path().is_dir() {
continue;
}
item_count += 1;
if let Ok(mut sub) = tokio::fs::read_dir(entry.path()).await {
while let Ok(Some(f)) = sub.next_entry().await {
if let Ok(meta) = f.metadata().await {
total_bytes += meta.len();
}
}
}
}
(total_bytes, item_count)
}
async fn run_cleanup(&self) {
let ttl_hours = self.cleanup_ttl_hours.load(Ordering::Relaxed) as u64;
let ttl = Duration::from_secs(ttl_hours * 3600);
let now = std::time::SystemTime::now();
let Ok(mut entries) = tokio::fs::read_dir(&self.transcode_dir).await else {
return;
};
while let Ok(Some(entry)) = entries.next_entry().await {
let path = entry.path();
if !path.is_dir() {
continue;
}
let playlist = path.join("playlist.m3u8");
if let Ok(meta) = tokio::fs::metadata(&playlist).await
&& let Ok(modified) = meta.modified()
&& let Ok(age) = now.duration_since(modified)
&& age > ttl
{
warn!("cleanup: removing stale transcode {:?}", path);
let _ = tokio::fs::remove_dir_all(&path).await;
}
}
}
}
// ============================================================================
// FFmpeg helper
// ============================================================================
async fn do_transcode(src: &Path, out_dir: &Path, playlist: &Path) -> TranscodeStatus {
let segment_pattern = out_dir.join("seg%05d.ts");
let mut child = match tokio::process::Command::new("ffmpeg")
.args([
"-i",
src.to_str().unwrap_or(""),
"-c:v",
"libx264",
"-preset",
"fast",
"-crf",
"23",
"-c:a",
"aac",
"-b:a",
"128k",
"-hls_time",
"6",
"-hls_list_size",
"0",
"-hls_flags",
"independent_segments",
"-hls_segment_filename",
segment_pattern.to_str().unwrap_or(""),
playlist.to_str().unwrap_or(""),
])
.stdout(std::process::Stdio::null())
.stderr(std::process::Stdio::null())
.spawn()
{
Ok(c) => c,
Err(e) => return TranscodeStatus::Failed(format!("ffmpeg spawn error: {}", e)),
};
// Poll for playlist.m3u8 — it appears after the first segment is written,
// allowing the client to start playback before transcoding is complete.
let start = Instant::now();
let timeout = Duration::from_secs(60);
loop {
if playlist.exists() {
return TranscodeStatus::Ready;
}
if start.elapsed() > timeout {
let _ = child.kill().await;
return TranscodeStatus::Failed("timeout waiting for transcode to start".into());
}
match child.try_wait() {
Ok(Some(status)) => {
return if playlist.exists() {
TranscodeStatus::Ready
} else if status.success() {
TranscodeStatus::Failed("ffmpeg exited but produced no playlist".into())
} else {
TranscodeStatus::Failed("ffmpeg exited with non-zero status".into())
};
}
Err(e) => return TranscodeStatus::Failed(e.to_string()),
Ok(None) => {}
}
tokio::time::sleep(Duration::from_millis(100)).await;
}
}

View File

@@ -0,0 +1,4 @@
#[cfg(feature = "sqlite")]
mod sqlite;
#[cfg(feature = "sqlite")]
pub use sqlite::SqliteProviderConfigRepository;

View File

@@ -0,0 +1,84 @@
use async_trait::async_trait;
use domain::{DomainError, DomainResult, ProviderConfigRepository, ProviderConfigRow};
#[derive(Clone)]
pub struct SqliteProviderConfigRepository {
pool: sqlx::SqlitePool,
}
impl SqliteProviderConfigRepository {
pub fn new(pool: sqlx::SqlitePool) -> Self {
Self { pool }
}
}
#[async_trait]
impl ProviderConfigRepository for SqliteProviderConfigRepository {
async fn get_all(&self) -> DomainResult<Vec<ProviderConfigRow>> {
let rows: Vec<(String, String, String, i64, String)> = sqlx::query_as(
"SELECT id, provider_type, config_json, enabled, updated_at FROM provider_configs",
)
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
Ok(rows
.into_iter()
.map(|(id, provider_type, config_json, enabled, updated_at)| ProviderConfigRow {
id,
provider_type,
config_json,
enabled: enabled != 0,
updated_at,
})
.collect())
}
async fn get_by_id(&self, id: &str) -> DomainResult<Option<ProviderConfigRow>> {
let row: Option<(String, String, String, i64, String)> = sqlx::query_as(
"SELECT id, provider_type, config_json, enabled, updated_at FROM provider_configs WHERE id = ?",
)
.bind(id)
.fetch_optional(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
Ok(row.map(|(id, provider_type, config_json, enabled, updated_at)| ProviderConfigRow {
id,
provider_type,
config_json,
enabled: enabled != 0,
updated_at,
}))
}
async fn upsert(&self, row: &ProviderConfigRow) -> DomainResult<()> {
sqlx::query(
r#"INSERT INTO provider_configs (id, provider_type, config_json, enabled, updated_at)
VALUES (?, ?, ?, ?, ?)
ON CONFLICT(id) DO UPDATE SET
provider_type = excluded.provider_type,
config_json = excluded.config_json,
enabled = excluded.enabled,
updated_at = excluded.updated_at"#,
)
.bind(&row.id)
.bind(&row.provider_type)
.bind(&row.config_json)
.bind(row.enabled as i64)
.bind(&row.updated_at)
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
Ok(())
}
async fn delete(&self, id: &str) -> DomainResult<()> {
sqlx::query("DELETE FROM provider_configs WHERE id = ?")
.bind(id)
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
Ok(())
}
}

View File

@@ -0,0 +1,175 @@
//! Provider registry — routes media operations to the correct named provider.
//!
//! Item IDs are prefixed with the provider key separated by `::`, e.g.
//! `"jellyfin::abc123"` or `"local::base64path"`. The registry strips the
//! prefix before calling the underlying provider and re-stamps returned IDs
//! so every item is self-routing throughout its lifetime.
//!
//! An empty prefix (un-prefixed IDs from old data, or new blocks with no
//! `provider_id` set) falls back to the primary (first-registered) provider.
use std::sync::Arc;
use async_trait::async_trait;
use domain::errors::{DomainError, DomainResult};
use domain::ports::{
Collection, IMediaProvider, IProviderRegistry, ProviderCapabilities, SeriesSummary,
StreamQuality,
};
use domain::{ContentType, MediaFilter, MediaItem, MediaItemId};
/// Registry of named media providers.
///
/// Providers are registered with a short key (e.g. `"jellyfin"`, `"local"`).
/// The first registered provider is the *primary* — it handles un-prefixed IDs
/// and empty `provider_id` strings for backward compatibility.
pub struct ProviderRegistry {
/// Ordered list of `(key, provider)` pairs. Order determines the primary.
providers: Vec<(String, Arc<dyn IMediaProvider>)>,
}
impl ProviderRegistry {
pub fn new() -> Self {
Self { providers: Vec::new() }
}
/// Register a provider under `id`. The first registered becomes the primary.
pub fn register(&mut self, id: impl Into<String>, provider: Arc<dyn IMediaProvider>) {
self.providers.push((id.into(), provider));
}
pub fn is_empty(&self) -> bool {
self.providers.is_empty()
}
/// Return the provider registered under `id`, if any.
pub fn get_provider(&self, id: &str) -> Option<Arc<dyn IMediaProvider>> {
self.providers
.iter()
.find(|(pid, _)| pid == id)
.map(|(_, p)| Arc::clone(p))
}
// -------------------------------------------------------------------------
// Internal helpers
// -------------------------------------------------------------------------
fn prefix_id(provider_id: &str, raw_id: &str) -> MediaItemId {
MediaItemId::new(format!("{}::{}", provider_id, raw_id))
}
/// Split `"provider_key::raw_id"` into `(key, raw_id)`.
/// Un-prefixed IDs return `("", full_id)` → primary provider fallback.
fn parse_prefix(id: &MediaItemId) -> (&str, &str) {
let s: &str = id.as_ref();
match s.find("::") {
Some(pos) => (&s[..pos], &s[pos + 2..]),
None => ("", s),
}
}
/// Resolve a provider key to the provider, defaulting to primary on empty key.
/// Returns `(resolved_key, provider)` so the caller can re-stamp IDs.
fn resolve_provider<'a>(
&'a self,
provider_id: &str,
) -> DomainResult<(&'a str, &'a Arc<dyn IMediaProvider>)> {
if provider_id.is_empty() {
self.providers
.first()
.map(|(id, p)| (id.as_str(), p))
.ok_or_else(|| DomainError::InfrastructureError("No providers registered".into()))
} else {
self.providers
.iter()
.find(|(id, _)| id == provider_id)
.map(|(id, p)| (id.as_str(), p))
.ok_or_else(|| {
DomainError::InfrastructureError(
format!("Provider '{}' not found", provider_id),
)
})
}
}
fn wrap_items(provider_id: &str, items: Vec<MediaItem>) -> Vec<MediaItem> {
items
.into_iter()
.map(|mut item| {
item.id = Self::prefix_id(provider_id, item.id.as_ref());
item
})
.collect()
}
}
impl Default for ProviderRegistry {
fn default() -> Self {
Self::new()
}
}
#[async_trait]
impl IProviderRegistry for ProviderRegistry {
async fn fetch_items(&self, provider_id: &str, filter: &MediaFilter) -> DomainResult<Vec<MediaItem>> {
let (pid, provider) = self.resolve_provider(provider_id)?;
let items = provider.fetch_items(filter).await?;
Ok(Self::wrap_items(pid, items))
}
async fn fetch_by_id(&self, item_id: &MediaItemId) -> DomainResult<Option<MediaItem>> {
let (prefix, raw) = Self::parse_prefix(item_id);
let (pid, provider) = self.resolve_provider(prefix)?;
let raw_id = MediaItemId::new(raw);
let result = provider.fetch_by_id(&raw_id).await?;
Ok(result.map(|mut item| {
item.id = Self::prefix_id(pid, item.id.as_ref());
item
}))
}
async fn get_stream_url(&self, item_id: &MediaItemId, quality: &StreamQuality) -> DomainResult<String> {
let (prefix, raw) = Self::parse_prefix(item_id);
let (_, provider) = self.resolve_provider(prefix)?;
let raw_id = MediaItemId::new(raw);
provider.get_stream_url(&raw_id, quality).await
}
fn provider_ids(&self) -> Vec<String> {
self.providers.iter().map(|(id, _)| id.clone()).collect()
}
fn primary_id(&self) -> &str {
self.providers
.first()
.map(|(id, _)| id.as_str())
.unwrap_or("")
}
fn capabilities(&self, provider_id: &str) -> Option<ProviderCapabilities> {
let target = if provider_id.is_empty() {
self.providers.first().map(|(id, _)| id.as_str())?
} else {
provider_id
};
self.providers
.iter()
.find(|(id, _)| id == target)
.map(|(_, p)| p.capabilities())
}
async fn list_collections(&self, provider_id: &str) -> DomainResult<Vec<Collection>> {
let (_, provider) = self.resolve_provider(provider_id)?;
provider.list_collections().await
}
async fn list_series(&self, provider_id: &str, collection_id: Option<&str>) -> DomainResult<Vec<SeriesSummary>> {
let (_, provider) = self.resolve_provider(provider_id)?;
provider.list_series(collection_id).await
}
async fn list_genres(&self, provider_id: &str, content_type: Option<&ContentType>) -> DomainResult<Vec<String>> {
let (_, provider) = self.resolve_provider(provider_id)?;
provider.list_genres(content_type).await
}
}

View File

@@ -29,6 +29,12 @@ pub(super) struct SlotRow {
pub source_block_id: String,
}
#[derive(Debug, FromRow)]
pub(super) struct LastSlotRow {
pub source_block_id: String,
pub item: String,
}
#[derive(Debug, FromRow)]
pub(super) struct PlaybackRecordRow {
pub id: String,

View File

@@ -1,9 +1,11 @@
use async_trait::async_trait;
use chrono::{DateTime, Utc};
use domain::{ChannelId, DomainError, DomainResult, GeneratedSchedule, PlaybackRecord, ScheduleRepository};
use std::collections::HashMap;
use super::mapping::{map_schedule, PlaybackRecordRow, ScheduleRow, SlotRow};
use domain::{BlockId, ChannelId, DomainError, DomainResult, GeneratedSchedule, MediaItemId, PlaybackRecord, ScheduleRepository};
use super::mapping::{map_schedule, LastSlotRow, PlaybackRecordRow, ScheduleRow, SlotRow};
pub struct PostgresScheduleRepository {
pool: sqlx::Pool<sqlx::Postgres>,
@@ -143,6 +145,41 @@ impl ScheduleRepository for PostgresScheduleRepository {
rows.into_iter().map(PlaybackRecord::try_from).collect()
}
async fn find_last_slot_per_block(
&self,
channel_id: ChannelId,
) -> DomainResult<HashMap<BlockId, MediaItemId>> {
let channel_id_str = channel_id.to_string();
let rows: Vec<LastSlotRow> = sqlx::query_as(
"SELECT ss.source_block_id, ss.item \
FROM scheduled_slots ss \
INNER JOIN generated_schedules gs ON gs.id = ss.schedule_id \
WHERE gs.channel_id = $1 \
AND ss.start_at = ( \
SELECT MAX(ss2.start_at) \
FROM scheduled_slots ss2 \
INNER JOIN generated_schedules gs2 ON gs2.id = ss2.schedule_id \
WHERE ss2.source_block_id = ss.source_block_id \
AND gs2.channel_id = $2 \
)",
)
.bind(&channel_id_str)
.bind(&channel_id_str)
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
let mut map = HashMap::new();
for row in rows {
let block_id = uuid::Uuid::parse_str(&row.source_block_id)
.map_err(|e| DomainError::RepositoryError(format!("Invalid block UUID: {}", e)))?;
let item: domain::MediaItem = serde_json::from_str(&row.item)
.map_err(|e| DomainError::RepositoryError(format!("Invalid slot item JSON: {}", e)))?;
map.insert(block_id, item.id);
}
Ok(map)
}
async fn save_playback_record(&self, record: &PlaybackRecord) -> DomainResult<()> {
sqlx::query(
r#"

View File

@@ -1,9 +1,12 @@
use async_trait::async_trait;
use chrono::{DateTime, Utc};
use domain::{ChannelId, DomainError, DomainResult, GeneratedSchedule, PlaybackRecord, ScheduleRepository};
use std::collections::HashMap;
use super::mapping::{map_schedule, PlaybackRecordRow, ScheduleRow, SlotRow};
use domain::{BlockId, ChannelId, DomainError, DomainResult, GeneratedSchedule, MediaItemId, PlaybackRecord, ScheduleRepository};
use uuid::Uuid;
use super::mapping::{map_schedule, LastSlotRow, PlaybackRecordRow, ScheduleRow, SlotRow};
pub struct SqliteScheduleRepository {
pool: sqlx::SqlitePool,
@@ -146,6 +149,112 @@ impl ScheduleRepository for SqliteScheduleRepository {
rows.into_iter().map(PlaybackRecord::try_from).collect()
}
async fn find_last_slot_per_block(
&self,
channel_id: ChannelId,
) -> DomainResult<HashMap<BlockId, MediaItemId>> {
let channel_id_str = channel_id.to_string();
let rows: Vec<LastSlotRow> = sqlx::query_as(
"SELECT ss.source_block_id, ss.item \
FROM scheduled_slots ss \
INNER JOIN generated_schedules gs ON gs.id = ss.schedule_id \
WHERE gs.channel_id = ? \
AND ss.start_at = ( \
SELECT MAX(ss2.start_at) \
FROM scheduled_slots ss2 \
INNER JOIN generated_schedules gs2 ON gs2.id = ss2.schedule_id \
WHERE ss2.source_block_id = ss.source_block_id \
AND gs2.channel_id = ? \
)",
)
.bind(&channel_id_str)
.bind(&channel_id_str)
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
let mut map = HashMap::new();
for row in rows {
let block_id = uuid::Uuid::parse_str(&row.source_block_id)
.map_err(|e| DomainError::RepositoryError(format!("Invalid block UUID: {}", e)))?;
let item: domain::MediaItem = serde_json::from_str(&row.item)
.map_err(|e| DomainError::RepositoryError(format!("Invalid slot item JSON: {}", e)))?;
map.insert(block_id, item.id);
}
Ok(map)
}
async fn list_schedule_history(
&self,
channel_id: ChannelId,
) -> DomainResult<Vec<GeneratedSchedule>> {
let rows: Vec<ScheduleRow> = sqlx::query_as(
"SELECT id, channel_id, valid_from, valid_until, generation \
FROM generated_schedules WHERE channel_id = ? ORDER BY generation DESC",
)
.bind(channel_id.to_string())
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
rows.into_iter()
.map(|r| map_schedule(r, vec![]))
.collect()
}
async fn get_schedule_by_id(
&self,
channel_id: ChannelId,
schedule_id: Uuid,
) -> DomainResult<Option<GeneratedSchedule>> {
let row: Option<ScheduleRow> = sqlx::query_as(
"SELECT id, channel_id, valid_from, valid_until, generation \
FROM generated_schedules WHERE id = ? AND channel_id = ?",
)
.bind(schedule_id.to_string())
.bind(channel_id.to_string())
.fetch_optional(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
match row {
None => Ok(None),
Some(r) => {
let slots = self.fetch_slots(&r.id).await?;
Some(map_schedule(r, slots)).transpose()
}
}
}
async fn delete_schedules_after(
&self,
channel_id: ChannelId,
target_generation: u32,
) -> DomainResult<()> {
let target_gen = target_generation as i64;
let ch = channel_id.to_string();
sqlx::query(
"DELETE FROM playback_records WHERE channel_id = ? AND generation > ?",
)
.bind(&ch)
.bind(target_gen)
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
sqlx::query(
"DELETE FROM generated_schedules WHERE channel_id = ? AND generation > ?",
)
.bind(&ch)
.bind(target_gen)
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
Ok(())
}
async fn save_playback_record(&self, record: &PlaybackRecord) -> DomainResult<()> {
sqlx::query(
r#"

View File

@@ -0,0 +1,4 @@
#[cfg(feature = "sqlite")]
mod sqlite;
#[cfg(feature = "sqlite")]
pub use sqlite::SqliteTranscodeSettingsRepository;

View File

@@ -0,0 +1,34 @@
use async_trait::async_trait;
use domain::{DomainError, DomainResult, TranscodeSettingsRepository};
use sqlx::SqlitePool;
pub struct SqliteTranscodeSettingsRepository {
pool: SqlitePool,
}
impl SqliteTranscodeSettingsRepository {
pub fn new(pool: SqlitePool) -> Self {
Self { pool }
}
}
#[async_trait]
impl TranscodeSettingsRepository for SqliteTranscodeSettingsRepository {
async fn load_cleanup_ttl(&self) -> DomainResult<Option<u32>> {
let row: Option<(i64,)> =
sqlx::query_as("SELECT cleanup_ttl_hours FROM transcode_settings WHERE id = 1")
.fetch_optional(&self.pool)
.await
.map_err(|e| DomainError::InfrastructureError(e.to_string()))?;
Ok(row.map(|(h,)| h as u32))
}
async fn save_cleanup_ttl(&self, hours: u32) -> DomainResult<()> {
sqlx::query("UPDATE transcode_settings SET cleanup_ttl_hours = ? WHERE id = 1")
.bind(hours as i64)
.execute(&self.pool)
.await
.map_err(|e| DomainError::InfrastructureError(e.to_string()))?;
Ok(())
}
}

View File

@@ -10,6 +10,7 @@ pub(super) struct UserRow {
pub subject: String,
pub email: String,
pub password_hash: Option<String>,
pub is_admin: i64,
pub created_at: String,
}
@@ -36,6 +37,7 @@ impl TryFrom<UserRow> for User {
row.subject,
email,
row.password_hash,
row.is_admin != 0,
created_at,
))
}

View File

@@ -22,7 +22,7 @@ impl UserRepository for PostgresUserRepository {
async fn find_by_id(&self, id: Uuid) -> DomainResult<Option<User>> {
let id_str = id.to_string();
let row: Option<UserRow> = sqlx::query_as(
"SELECT id, subject, email, password_hash, created_at FROM users WHERE id = $1",
"SELECT id, subject, email, password_hash, is_admin, created_at FROM users WHERE id = $1",
)
.bind(&id_str)
.fetch_optional(&self.pool)
@@ -34,7 +34,7 @@ impl UserRepository for PostgresUserRepository {
async fn find_by_subject(&self, subject: &str) -> DomainResult<Option<User>> {
let row: Option<UserRow> = sqlx::query_as(
"SELECT id, subject, email, password_hash, created_at FROM users WHERE subject = $1",
"SELECT id, subject, email, password_hash, is_admin, created_at FROM users WHERE subject = $1",
)
.bind(subject)
.fetch_optional(&self.pool)
@@ -46,7 +46,7 @@ impl UserRepository for PostgresUserRepository {
async fn find_by_email(&self, email: &str) -> DomainResult<Option<User>> {
let row: Option<UserRow> = sqlx::query_as(
"SELECT id, subject, email, password_hash, created_at FROM users WHERE email = $1",
"SELECT id, subject, email, password_hash, is_admin, created_at FROM users WHERE email = $1",
)
.bind(email)
.fetch_optional(&self.pool)
@@ -62,18 +62,20 @@ impl UserRepository for PostgresUserRepository {
sqlx::query(
r#"
INSERT INTO users (id, subject, email, password_hash, created_at)
VALUES ($1, $2, $3, $4, $5)
INSERT INTO users (id, subject, email, password_hash, is_admin, created_at)
VALUES ($1, $2, $3, $4, $5, $6)
ON CONFLICT(id) DO UPDATE SET
subject = excluded.subject,
email = excluded.email,
password_hash = excluded.password_hash
password_hash = excluded.password_hash,
is_admin = excluded.is_admin
"#,
)
.bind(&id)
.bind(&user.subject)
.bind(user.email.as_ref())
.bind(&user.password_hash)
.bind(user.is_admin)
.bind(&created_at)
.execute(&self.pool)
.await
@@ -99,4 +101,12 @@ impl UserRepository for PostgresUserRepository {
Ok(())
}
async fn count_users(&self) -> DomainResult<u64> {
let (count,): (i64,) = sqlx::query_as("SELECT COUNT(*) FROM users")
.fetch_one(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
Ok(count as u64)
}
}

View File

@@ -22,7 +22,7 @@ impl UserRepository for SqliteUserRepository {
async fn find_by_id(&self, id: Uuid) -> DomainResult<Option<User>> {
let id_str = id.to_string();
let row: Option<UserRow> = sqlx::query_as(
"SELECT id, subject, email, password_hash, created_at FROM users WHERE id = ?",
"SELECT id, subject, email, password_hash, is_admin, created_at FROM users WHERE id = ?",
)
.bind(&id_str)
.fetch_optional(&self.pool)
@@ -34,7 +34,7 @@ impl UserRepository for SqliteUserRepository {
async fn find_by_subject(&self, subject: &str) -> DomainResult<Option<User>> {
let row: Option<UserRow> = sqlx::query_as(
"SELECT id, subject, email, password_hash, created_at FROM users WHERE subject = ?",
"SELECT id, subject, email, password_hash, is_admin, created_at FROM users WHERE subject = ?",
)
.bind(subject)
.fetch_optional(&self.pool)
@@ -46,7 +46,7 @@ impl UserRepository for SqliteUserRepository {
async fn find_by_email(&self, email: &str) -> DomainResult<Option<User>> {
let row: Option<UserRow> = sqlx::query_as(
"SELECT id, subject, email, password_hash, created_at FROM users WHERE email = ?",
"SELECT id, subject, email, password_hash, is_admin, created_at FROM users WHERE email = ?",
)
.bind(email)
.fetch_optional(&self.pool)
@@ -62,18 +62,20 @@ impl UserRepository for SqliteUserRepository {
sqlx::query(
r#"
INSERT INTO users (id, subject, email, password_hash, created_at)
VALUES (?, ?, ?, ?, ?)
INSERT INTO users (id, subject, email, password_hash, is_admin, created_at)
VALUES (?, ?, ?, ?, ?, ?)
ON CONFLICT(id) DO UPDATE SET
subject = excluded.subject,
email = excluded.email,
password_hash = excluded.password_hash
password_hash = excluded.password_hash,
is_admin = excluded.is_admin
"#,
)
.bind(&id)
.bind(&user.subject)
.bind(user.email.as_ref())
.bind(&user.password_hash)
.bind(user.is_admin as i64)
.bind(&created_at)
.execute(&self.pool)
.await
@@ -100,6 +102,14 @@ impl UserRepository for SqliteUserRepository {
Ok(())
}
async fn count_users(&self) -> DomainResult<u64> {
let (count,): (i64,) = sqlx::query_as("SELECT COUNT(*) FROM users")
.fetch_one(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
Ok(count as u64)
}
}
#[cfg(test)]

View File

@@ -0,0 +1,35 @@
[package]
name = "mcp"
version = "0.1.0"
edition = "2024"
[features]
default = ["sqlite", "jellyfin"]
sqlite = ["infra/sqlite"]
postgres = ["infra/postgres"]
jellyfin = ["infra/jellyfin"]
local-files = ["infra/local-files"]
[dependencies]
domain = { path = "../domain" }
infra = { path = "../infra", default-features = false, features = ["sqlite"] }
k-core = { git = "https://git.gabrielkaszewski.dev/GKaszewski/k-core", features = [
"logging",
"db-sqlx",
"sqlite",
] }
rmcp = { version = "0.1", features = ["server", "transport-io"] }
tokio = { version = "1", features = ["full"] }
serde = { version = "1", features = ["derive"] }
serde_json = "1"
anyhow = "1"
thiserror = "2"
tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
chrono = { version = "0.4", features = ["serde"] }
uuid = { version = "1", features = ["v4", "serde"] }
schemars = "0.8"
dotenvy = "0.15"
async-trait = "0.1"

View File

@@ -0,0 +1,13 @@
use domain::DomainError;
pub fn domain_err(e: DomainError) -> String {
serde_json::json!({"error": e.to_string()}).to_string()
}
pub fn json_err(e: serde_json::Error) -> String {
serde_json::json!({"error": format!("serialization failed: {e}")}).to_string()
}
pub fn ok_json<T: serde::Serialize>(value: &T) -> String {
serde_json::to_string(value).unwrap_or_else(json_err)
}

View File

@@ -0,0 +1,177 @@
use std::sync::Arc;
use std::time::Duration as StdDuration;
use domain::{
ChannelService, DomainError, DomainResult, IMediaProvider, IProviderRegistry, MediaFilter,
MediaItemId, ProviderCapabilities, ScheduleEngineService, StreamQuality, StreamingProtocol,
UserService,
};
use infra::factory::{build_channel_repository, build_schedule_repository, build_user_repository};
use infra::run_migrations;
use tracing::info;
use uuid::Uuid;
mod error;
mod server;
mod tools;
use server::KTvMcpServer;
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let _ = dotenvy::dotenv();
tracing_subscriber::fmt()
.with_writer(std::io::stderr)
.with_env_filter(
tracing_subscriber::EnvFilter::from_default_env()
.add_directive("mcp=info".parse().unwrap()),
)
.init();
let database_url = std::env::var("DATABASE_URL")
.unwrap_or_else(|_| "sqlite:data.db?mode=rwc".to_string());
let owner_id: Uuid = std::env::var("MCP_USER_ID")
.map_err(|_| anyhow::anyhow!("MCP_USER_ID env var is required (UUID of the user)"))?
.parse()
.map_err(|_| anyhow::anyhow!("MCP_USER_ID must be a valid UUID"))?;
info!("Connecting to database: {}", database_url);
#[cfg(all(feature = "sqlite", not(feature = "postgres")))]
let db_type = k_core::db::DbType::Sqlite;
#[cfg(all(feature = "postgres", not(feature = "sqlite")))]
let db_type = k_core::db::DbType::Postgres;
#[cfg(all(feature = "sqlite", feature = "postgres"))]
let db_type = if database_url.starts_with("postgres") {
k_core::db::DbType::Postgres
} else {
k_core::db::DbType::Sqlite
};
let db_config = k_core::db::DatabaseConfig {
db_type,
url: database_url.clone(),
max_connections: 5,
min_connections: 1,
acquire_timeout: StdDuration::from_secs(30),
};
let db_pool = k_core::db::connect(&db_config).await?;
run_migrations(&db_pool).await?;
let user_repo = build_user_repository(&db_pool).await?;
let channel_repo = build_channel_repository(&db_pool).await?;
let schedule_repo = build_schedule_repository(&db_pool).await?;
let _user_service = UserService::new(user_repo);
let channel_service = ChannelService::new(channel_repo.clone());
let mut registry = infra::ProviderRegistry::new();
#[cfg(feature = "jellyfin")]
{
let base_url = std::env::var("JELLYFIN_BASE_URL").ok();
let api_key = std::env::var("JELLYFIN_API_KEY").ok();
let user_id = std::env::var("JELLYFIN_USER_ID").ok();
if let (Some(base_url), Some(api_key), Some(user_id)) = (base_url, api_key, user_id) {
info!("Media provider: Jellyfin at {}", base_url);
registry.register("jellyfin", Arc::new(infra::JellyfinMediaProvider::new(
infra::JellyfinConfig { base_url, api_key, user_id },
)));
}
}
#[cfg(feature = "local-files")]
if let Some(dir) = std::env::var("LOCAL_FILES_DIR").ok().map(std::path::PathBuf::from) {
let k_core::db::DatabasePool::Sqlite(ref sqlite_pool) = db_pool;
let base_url = std::env::var("BASE_URL")
.unwrap_or_else(|_| "http://localhost:3000".to_string());
let lf_cfg = infra::LocalFilesConfig {
root_dir: dir,
base_url,
transcode_dir: None,
cleanup_ttl_hours: 24,
};
let idx = Arc::new(infra::LocalIndex::new(&lf_cfg, sqlite_pool.clone()).await);
let scan_idx = Arc::clone(&idx);
tokio::spawn(async move { scan_idx.rescan().await; });
registry.register("local", Arc::new(infra::LocalFilesProvider::new(idx, lf_cfg, None)));
}
if registry.is_empty() {
tracing::warn!("No media provider configured. Set JELLYFIN_BASE_URL or LOCAL_FILES_DIR.");
registry.register("noop", Arc::new(NoopMediaProvider));
}
let registry = Arc::new(registry);
let schedule_engine = ScheduleEngineService::new(
Arc::clone(&registry) as Arc<dyn IProviderRegistry>,
channel_repo,
schedule_repo,
);
let server = KTvMcpServer {
channel_service: Arc::new(channel_service),
schedule_engine: Arc::new(schedule_engine),
provider_registry: registry,
owner_id,
};
info!("K-TV MCP server starting (stdio transport), owner_id={}", owner_id);
use rmcp::ServiceExt;
let service = server
.serve(rmcp::transport::stdio())
.await
.inspect_err(|e| tracing::error!("MCP server error: {e}"))?;
service.waiting().await?;
Ok(())
}
struct NoopMediaProvider;
#[async_trait::async_trait]
impl IMediaProvider for NoopMediaProvider {
fn capabilities(&self) -> ProviderCapabilities {
ProviderCapabilities {
collections: false,
series: false,
genres: false,
tags: false,
decade: false,
search: false,
streaming_protocol: StreamingProtocol::DirectFile,
rescan: false,
transcode: false,
}
}
async fn fetch_items(&self, _: &MediaFilter) -> DomainResult<Vec<domain::MediaItem>> {
Err(DomainError::InfrastructureError(
"No media provider configured.".into(),
))
}
async fn fetch_by_id(&self, _: &MediaItemId) -> DomainResult<Option<domain::MediaItem>> {
Err(DomainError::InfrastructureError(
"No media provider configured.".into(),
))
}
async fn get_stream_url(
&self,
_: &MediaItemId,
_: &StreamQuality,
) -> DomainResult<String> {
Err(DomainError::InfrastructureError(
"No media provider configured.".into(),
))
}
}

View File

@@ -0,0 +1,307 @@
use std::sync::Arc;
use domain::{
ChannelService, ContentType, ProgrammingBlock, ScheduleConfig, ScheduleEngineService,
};
use rmcp::{
ServerHandler,
model::{Implementation, ProtocolVersion, ServerCapabilities, ServerInfo},
tool,
};
use schemars::JsonSchema;
use serde::Deserialize;
use uuid::Uuid;
use crate::tools::{channels, library, schedule};
#[derive(Clone)]
pub struct KTvMcpServer {
pub channel_service: Arc<ChannelService>,
pub schedule_engine: Arc<ScheduleEngineService>,
pub provider_registry: Arc<infra::ProviderRegistry>,
pub owner_id: Uuid,
}
// ============================================================================
// Parameter types — Uuid fields stored as String to satisfy JsonSchema bound.
// ============================================================================
#[derive(Debug, Deserialize, JsonSchema)]
pub struct GetChannelParams {
/// Channel UUID (e.g. "550e8400-e29b-41d4-a716-446655440000")
pub id: String,
}
#[derive(Debug, Deserialize, JsonSchema)]
pub struct CreateChannelParams {
pub name: String,
/// IANA timezone, e.g. "America/New_York"
pub timezone: String,
pub description: Option<String>,
}
#[derive(Debug, Deserialize, JsonSchema)]
pub struct UpdateChannelParams {
/// Channel UUID
pub id: String,
pub name: Option<String>,
pub timezone: Option<String>,
pub description: Option<String>,
}
#[derive(Debug, Deserialize, JsonSchema)]
pub struct DeleteChannelParams {
/// Channel UUID
pub id: String,
}
#[derive(Debug, Deserialize, JsonSchema)]
pub struct SetScheduleConfigParams {
/// Channel UUID
pub channel_id: String,
/// JSON object of the full ScheduleConfig shape: {"monday": [...], "tuesday": [...], ...}
pub day_blocks_json: String,
}
#[derive(Debug, Deserialize, JsonSchema)]
pub struct AddBlockParams {
/// Channel UUID
pub channel_id: String,
/// Day of week: "monday", "tuesday", "wednesday", "thursday", "friday", "saturday", "sunday"
pub day: String,
/// ProgrammingBlock serialized as JSON
pub block_json: String,
}
#[derive(Debug, Deserialize, JsonSchema)]
pub struct RemoveBlockParams {
/// Channel UUID
pub channel_id: String,
/// Block UUID
pub block_id: String,
}
#[derive(Debug, Deserialize, JsonSchema)]
pub struct ChannelIdParam {
/// Channel UUID
pub channel_id: String,
}
#[derive(Debug, Deserialize, JsonSchema)]
pub struct SearchMediaParams {
/// "movie", "episode", or "short"
pub content_type: Option<String>,
pub genres: Option<Vec<String>>,
pub search_term: Option<String>,
pub series_names: Option<Vec<String>>,
pub collections: Option<Vec<String>>,
}
#[derive(Debug, Deserialize, JsonSchema)]
pub struct ListGenresParams {
/// Optional content type: "movie", "episode", or "short"
pub content_type: Option<String>,
}
// ============================================================================
// Tool implementations
// ============================================================================
fn parse_uuid(s: &str) -> Result<Uuid, String> {
s.parse::<Uuid>()
.map_err(|_| serde_json::json!({"error": format!("invalid UUID: {s}")}).to_string())
}
#[tool(tool_box)]
impl KTvMcpServer {
#[tool(description = "List all channels owned by the configured user")]
async fn list_channels(&self) -> String {
channels::list_channels(&self.channel_service, self.owner_id).await
}
#[tool(description = "Get a channel by UUID")]
async fn get_channel(&self, #[tool(aggr)] p: GetChannelParams) -> String {
match parse_uuid(&p.id) {
Ok(id) => channels::get_channel(&self.channel_service, id).await,
Err(e) => e,
}
}
#[tool(description = "Create a new channel with a name and IANA timezone")]
async fn create_channel(&self, #[tool(aggr)] p: CreateChannelParams) -> String {
channels::create_channel(
&self.channel_service,
self.owner_id,
&p.name,
&p.timezone,
p.description,
)
.await
}
#[tool(description = "Update channel name, timezone, and/or description")]
async fn update_channel(&self, #[tool(aggr)] p: UpdateChannelParams) -> String {
match parse_uuid(&p.id) {
Ok(id) => {
channels::update_channel(
&self.channel_service,
id,
p.name,
p.timezone,
p.description,
)
.await
}
Err(e) => e,
}
}
#[tool(description = "Delete a channel (must be owned by the configured user)")]
async fn delete_channel(&self, #[tool(aggr)] p: DeleteChannelParams) -> String {
match parse_uuid(&p.id) {
Ok(id) => channels::delete_channel(&self.channel_service, id, self.owner_id).await,
Err(e) => e,
}
}
#[tool(
description = "Replace a channel's entire schedule config. day_blocks_json is a JSON object of the ScheduleConfig shape: {\"monday\": [...], ...}"
)]
async fn set_schedule_config(&self, #[tool(aggr)] p: SetScheduleConfigParams) -> String {
let channel_id = match parse_uuid(&p.channel_id) {
Ok(id) => id,
Err(e) => return e,
};
let config: ScheduleConfig = match serde_json::from_str(&p.day_blocks_json) {
Ok(c) => c,
Err(e) => {
return serde_json::json!({"error": format!("invalid day_blocks_json: {e}")})
.to_string()
}
};
channels::set_schedule_config(&self.channel_service, channel_id, config).await
}
#[tool(
description = "Append a ProgrammingBlock to a channel's schedule for a specific day. day: monday|tuesday|wednesday|thursday|friday|saturday|sunday. block_json is a serialized ProgrammingBlock."
)]
async fn add_programming_block(&self, #[tool(aggr)] p: AddBlockParams) -> String {
let channel_id = match parse_uuid(&p.channel_id) {
Ok(id) => id,
Err(e) => return e,
};
let day: domain::Weekday = match serde_json::from_str(&format!("\"{}\"", p.day)) {
Ok(d) => d,
Err(e) => {
return serde_json::json!({"error": format!("invalid day: {e}")}).to_string()
}
};
let block: ProgrammingBlock = match serde_json::from_str(&p.block_json) {
Ok(b) => b,
Err(e) => {
return serde_json::json!({"error": format!("invalid block_json: {e}")}).to_string()
}
};
channels::add_programming_block(&self.channel_service, channel_id, day, block).await
}
#[tool(description = "Remove a programming block from a channel's schedule by block UUID")]
async fn remove_programming_block(&self, #[tool(aggr)] p: RemoveBlockParams) -> String {
let channel_id = match parse_uuid(&p.channel_id) {
Ok(id) => id,
Err(e) => return e,
};
let block_id = match parse_uuid(&p.block_id) {
Ok(id) => id,
Err(e) => return e,
};
channels::remove_programming_block(&self.channel_service, channel_id, block_id).await
}
#[tool(description = "Generate a fresh 48-hour schedule for the given channel")]
async fn generate_schedule(&self, #[tool(aggr)] p: ChannelIdParam) -> String {
match parse_uuid(&p.channel_id) {
Ok(id) => schedule::generate_schedule(&self.schedule_engine, id).await,
Err(e) => e,
}
}
#[tool(description = "Get the currently active schedule for a channel (returns null if none)")]
async fn get_active_schedule(&self, #[tool(aggr)] p: ChannelIdParam) -> String {
match parse_uuid(&p.channel_id) {
Ok(id) => schedule::get_active_schedule(&self.schedule_engine, id).await,
Err(e) => e,
}
}
#[tool(
description = "Get what is currently broadcasting on a channel (returns null if in a gap or no schedule)"
)]
async fn get_current_broadcast(&self, #[tool(aggr)] p: ChannelIdParam) -> String {
match parse_uuid(&p.channel_id) {
Ok(id) => schedule::get_current_broadcast(&self.schedule_engine, id).await,
Err(e) => e,
}
}
#[tool(description = "List media collections/libraries available in the configured provider")]
async fn list_collections(&self) -> String {
library::list_collections(&self.provider_registry).await
}
#[tool(
description = "List genres available in the provider, optionally filtered by content type (movie/episode/short)"
)]
async fn list_genres(&self, #[tool(aggr)] p: ListGenresParams) -> String {
let ct = p.content_type.as_deref().and_then(parse_content_type);
library::list_genres(&self.provider_registry, ct).await
}
#[tool(
description = "Search media items. content_type: movie|episode|short. Returns JSON array of MediaItem."
)]
async fn search_media(&self, #[tool(aggr)] p: SearchMediaParams) -> String {
let ct = p.content_type.as_deref().and_then(parse_content_type);
library::search_media(
&self.provider_registry,
ct,
p.genres.unwrap_or_default(),
p.search_term,
p.series_names.unwrap_or_default(),
p.collections.unwrap_or_default(),
)
.await
}
}
fn parse_content_type(s: &str) -> Option<ContentType> {
match s {
"movie" => Some(ContentType::Movie),
"episode" => Some(ContentType::Episode),
"short" => Some(ContentType::Short),
_ => None,
}
}
// ============================================================================
// ServerHandler
// ============================================================================
#[tool(tool_box)]
impl ServerHandler for KTvMcpServer {
fn get_info(&self) -> ServerInfo {
ServerInfo {
protocol_version: ProtocolVersion::V_2024_11_05,
capabilities: ServerCapabilities::builder().enable_tools().build(),
server_info: Implementation {
name: "k-tv-mcp".into(),
version: "0.1.0".into(),
},
instructions: Some(
"K-TV MCP server. Create channels, define programming blocks, generate schedules. \
All operations run as the user configured via MCP_USER_ID."
.into(),
),
}
}
}

View File

@@ -0,0 +1,133 @@
use domain::{Channel, ChannelService, ScheduleConfig, UserId};
use std::sync::Arc;
use uuid::Uuid;
use crate::error::{domain_err, ok_json};
pub async fn list_channels(svc: &Arc<ChannelService>, owner_id: UserId) -> String {
match svc.find_by_owner(owner_id).await {
Ok(channels) => ok_json(&channels),
Err(e) => domain_err(e),
}
}
pub async fn get_channel(svc: &Arc<ChannelService>, id: Uuid) -> String {
match svc.find_by_id(id).await {
Ok(channel) => ok_json(&channel),
Err(e) => domain_err(e),
}
}
pub async fn create_channel(
svc: &Arc<ChannelService>,
owner_id: UserId,
name: &str,
timezone: &str,
description: Option<String>,
) -> String {
let channel = match svc.create(owner_id, name, timezone).await {
Ok(c) => c,
Err(e) => return domain_err(e),
};
if description.is_none() {
return ok_json(&channel);
}
let mut channel: Channel = channel;
channel.description = description;
channel.updated_at = chrono::Utc::now();
match svc.update(channel).await {
Ok(c) => ok_json(&c),
Err(e) => domain_err(e),
}
}
pub async fn update_channel(
svc: &Arc<ChannelService>,
id: Uuid,
name: Option<String>,
timezone: Option<String>,
description: Option<String>,
) -> String {
let mut channel: Channel = match svc.find_by_id(id).await {
Ok(c) => c,
Err(e) => return domain_err(e),
};
if let Some(n) = name {
channel.name = n;
}
if let Some(tz) = timezone {
channel.timezone = tz;
}
if description.is_some() {
channel.description = description;
}
channel.updated_at = chrono::Utc::now();
match svc.update(channel).await {
Ok(c) => ok_json(&c),
Err(e) => domain_err(e),
}
}
pub async fn delete_channel(svc: &Arc<ChannelService>, id: Uuid, owner_id: UserId) -> String {
match svc.delete(id, owner_id).await {
Ok(()) => serde_json::json!({"deleted": id}).to_string(),
Err(e) => domain_err(e),
}
}
pub async fn set_schedule_config(
svc: &Arc<ChannelService>,
channel_id: Uuid,
config: ScheduleConfig,
) -> String {
let mut channel: Channel = match svc.find_by_id(channel_id).await {
Ok(c) => c,
Err(e) => return domain_err(e),
};
channel.schedule_config = config;
channel.updated_at = chrono::Utc::now();
match svc.update(channel).await {
Ok(c) => ok_json(&c),
Err(e) => domain_err(e),
}
}
pub async fn add_programming_block(
svc: &Arc<ChannelService>,
channel_id: Uuid,
day: domain::Weekday,
block: domain::ProgrammingBlock,
) -> String {
let mut channel: Channel = match svc.find_by_id(channel_id).await {
Ok(c) => c,
Err(e) => return domain_err(e),
};
channel.schedule_config.day_blocks
.entry(day)
.or_default()
.push(block);
channel.updated_at = chrono::Utc::now();
match svc.update(channel).await {
Ok(c) => ok_json(&c),
Err(e) => domain_err(e),
}
}
pub async fn remove_programming_block(
svc: &Arc<ChannelService>,
channel_id: Uuid,
block_id: Uuid,
) -> String {
let mut channel: Channel = match svc.find_by_id(channel_id).await {
Ok(c) => c,
Err(e) => return domain_err(e),
};
for blocks in channel.schedule_config.day_blocks.values_mut() {
blocks.retain(|b| b.id != block_id);
}
channel.updated_at = chrono::Utc::now();
match svc.update(channel).await {
Ok(c) => ok_json(&c),
Err(e) => domain_err(e),
}
}

View File

@@ -0,0 +1,43 @@
use domain::{ContentType, IProviderRegistry, MediaFilter};
use std::sync::Arc;
use crate::error::{domain_err, ok_json};
pub async fn list_collections(registry: &Arc<infra::ProviderRegistry>) -> String {
match registry.list_collections("").await {
Ok(cols) => ok_json(&cols),
Err(e) => domain_err(e),
}
}
pub async fn list_genres(
registry: &Arc<infra::ProviderRegistry>,
content_type: Option<ContentType>,
) -> String {
match registry.list_genres("", content_type.as_ref()).await {
Ok(genres) => ok_json(&genres),
Err(e) => domain_err(e),
}
}
pub async fn search_media(
registry: &Arc<infra::ProviderRegistry>,
content_type: Option<ContentType>,
genres: Vec<String>,
search_term: Option<String>,
series_names: Vec<String>,
collections: Vec<String>,
) -> String {
let filter = MediaFilter {
content_type,
genres,
search_term,
series_names,
collections,
..Default::default()
};
match registry.fetch_items("", &filter).await {
Ok(items) => ok_json(&items),
Err(e) => domain_err(e),
}
}

View File

@@ -0,0 +1,3 @@
pub mod channels;
pub mod library;
pub mod schedule;

View File

@@ -0,0 +1,47 @@
use chrono::Utc;
use domain::{ScheduleEngineService, ScheduledSlot};
use serde::Serialize;
use std::sync::Arc;
use uuid::Uuid;
use crate::error::{domain_err, ok_json};
/// Serializable DTO for CurrentBroadcast (domain type does not derive Serialize).
#[derive(Serialize)]
struct CurrentBroadcastDto {
slot: ScheduledSlot,
offset_secs: u32,
}
pub async fn generate_schedule(engine: &Arc<ScheduleEngineService>, channel_id: Uuid) -> String {
match engine.generate_schedule(channel_id, Utc::now()).await {
Ok(schedule) => ok_json(&schedule),
Err(e) => domain_err(e),
}
}
pub async fn get_active_schedule(engine: &Arc<ScheduleEngineService>, channel_id: Uuid) -> String {
match engine.get_active_schedule(channel_id, Utc::now()).await {
Ok(Some(schedule)) => ok_json(&schedule),
Ok(None) => "null".to_string(),
Err(e) => domain_err(e),
}
}
pub async fn get_current_broadcast(
engine: &Arc<ScheduleEngineService>,
channel_id: Uuid,
) -> String {
let schedule = match engine.get_active_schedule(channel_id, Utc::now()).await {
Ok(Some(s)) => s,
Ok(None) => return "null".to_string(),
Err(e) => return domain_err(e),
};
match ScheduleEngineService::get_current_broadcast(&schedule, Utc::now()) {
Some(b) => ok_json(&CurrentBroadcastDto {
slot: b.slot,
offset_secs: b.offset_secs,
}),
None => "null".to_string(),
}
}

View File

@@ -0,0 +1,10 @@
CREATE TABLE IF NOT EXISTS local_files_index (
id TEXT PRIMARY KEY,
rel_path TEXT NOT NULL UNIQUE,
title TEXT NOT NULL,
duration_secs INTEGER NOT NULL DEFAULT 0,
year INTEGER,
tags TEXT NOT NULL DEFAULT '[]',
top_dir TEXT NOT NULL DEFAULT '',
scanned_at TEXT NOT NULL
);

View File

@@ -0,0 +1,5 @@
CREATE TABLE IF NOT EXISTS transcode_settings (
id INTEGER PRIMARY KEY CHECK (id = 1),
cleanup_ttl_hours INTEGER NOT NULL DEFAULT 24
);
INSERT OR IGNORE INTO transcode_settings (id, cleanup_ttl_hours) VALUES (1, 24);

View File

@@ -0,0 +1,2 @@
ALTER TABLE channels ADD COLUMN webhook_url TEXT;
ALTER TABLE channels ADD COLUMN webhook_poll_interval_secs INTEGER NOT NULL DEFAULT 5;

View File

@@ -0,0 +1,2 @@
ALTER TABLE channels ADD COLUMN webhook_body_template TEXT;
ALTER TABLE channels ADD COLUMN webhook_headers TEXT;

View File

@@ -0,0 +1,9 @@
CREATE TABLE IF NOT EXISTS activity_log (
id TEXT PRIMARY KEY NOT NULL,
timestamp TEXT NOT NULL,
event_type TEXT NOT NULL,
detail TEXT NOT NULL,
channel_id TEXT
);
CREATE INDEX IF NOT EXISTS idx_activity_log_timestamp ON activity_log(timestamp DESC);

View File

@@ -0,0 +1,8 @@
ALTER TABLE users ADD COLUMN is_admin INTEGER NOT NULL DEFAULT 0;
CREATE TABLE provider_configs (
provider_type TEXT PRIMARY KEY,
config_json TEXT NOT NULL,
enabled INTEGER NOT NULL DEFAULT 1,
updated_at TEXT NOT NULL
);

View File

@@ -0,0 +1,12 @@
CREATE TABLE channel_config_snapshots (
id TEXT PRIMARY KEY NOT NULL,
channel_id TEXT NOT NULL REFERENCES channels(id) ON DELETE CASCADE,
config_json TEXT NOT NULL,
version_num INTEGER NOT NULL,
label TEXT,
created_at TEXT NOT NULL,
UNIQUE (channel_id, version_num)
);
CREATE INDEX idx_config_snapshots_channel
ON channel_config_snapshots(channel_id, version_num DESC);

View File

@@ -0,0 +1,17 @@
-- Recreate provider_configs with per-instance id as PK
CREATE TABLE provider_configs_new (
id TEXT PRIMARY KEY,
provider_type TEXT NOT NULL,
config_json TEXT NOT NULL,
enabled INTEGER NOT NULL DEFAULT 1,
updated_at TEXT NOT NULL
);
INSERT INTO provider_configs_new (id, provider_type, config_json, enabled, updated_at)
SELECT provider_type, provider_type, config_json, enabled, updated_at
FROM provider_configs;
DROP TABLE provider_configs;
ALTER TABLE provider_configs_new RENAME TO provider_configs;
-- Scope local_files_index entries by provider instance
ALTER TABLE local_files_index ADD COLUMN provider_id TEXT NOT NULL DEFAULT 'local';
CREATE INDEX IF NOT EXISTS idx_local_files_provider ON local_files_index(provider_id);

Some files were not shown because too many files have changed in this diff Show More