Compare commits

...

132 Commits

Author SHA1 Message Date
e3a65d8052 fix: use StdRng for shuffling indices in fill_block function 2026-03-20 23:12:44 +01:00
f45ca77b79 fix: format code for improved readability and consistency 2026-03-20 01:57:22 +01:00
a5c31ef8a9 fix(frontend): restore plain type labels in grouped sidebar 2026-03-20 01:56:11 +01:00
3662a5ab9e fix(frontend): suppress shows when type filter active; clarify grouped type labels 2026-03-20 01:53:13 +01:00
137251fe37 fix(frontend): restore ALL sentinel in sidebar to fix hydration mismatch 2026-03-20 01:33:27 +01:00
8101734c63 feat(frontend): add useLibraryShows and useLibrarySeasons hooks 2026-03-20 01:29:55 +01:00
6cf8a6d5e3 feat(frontend): implement grouped/drilldown view in library grid 2026-03-20 01:23:33 +01:00
c5317cb639 feat(frontend): add viewMode/drilldown state to library page 2026-03-20 01:21:54 +01:00
5f66493558 feat(api): add /library/shows and /library/shows/:name/seasons routes + season filter 2026-03-20 01:19:31 +01:00
5cc4cde223 feat(frontend): add ShowTile, SeasonTile, BreadcrumbNav components 2026-03-20 01:19:08 +01:00
5b89481104 feat(frontend): extend schedule dialog to support show/series selection 2026-03-20 01:19:00 +01:00
33338ac100 feat(frontend): make library sidebar drilldown-aware 2026-03-20 01:18:52 +01:00
66eef2c82e feat(frontend): add useLibraryShows and useLibrarySeasons hooks 2026-03-20 01:18:34 +01:00
6f1a4e19d3 feat(infra): implement list_shows, list_seasons + season_number filter 2026-03-20 01:16:02 +01:00
dd69470ee4 feat(frontend): add ShowSummary, SeasonSummary types + library shows/seasons API methods 2026-03-20 01:14:43 +01:00
23722a771b feat(domain): add ShowSummary, SeasonSummary types + ILibraryRepository methods 2026-03-20 01:13:00 +01:00
4cf7fdc1c2 feat(frontend): add library sync interval + sync now to admin settings panel 2026-03-20 00:38:04 +01:00
91271bd83c feat(frontend): library page, components, and schedule/add-to-block dialogs (tasks 11-14) 2026-03-20 00:35:40 +01:00
49c7f7abd7 feat(frontend): add useLibrarySearch, useLibrarySyncStatus, useTriggerSync, useAdminSettings hooks 2026-03-20 00:30:44 +01:00
978ad1cdb0 feat(frontend): add library paged types, syncStatus/triggerSync/admin API methods 2026-03-20 00:30:03 +01:00
e1a885dcc9 fix(api): mount admin settings routes at /admin/settings (not /admin/library/settings) 2026-03-20 00:28:24 +01:00
e849548e9e feat(api): replace live-provider library routes with DB-backed routes; add sync + admin settings endpoints 2026-03-20 00:27:06 +01:00
d92d629fbc feat(api): wire library_repo, app_settings_repo, library_sync_adapter into AppState; start scheduler 2026-03-20 00:23:25 +01:00
aa5e3c28aa feat(api): add library sync background task 2026-03-20 00:23:22 +01:00
64138b07e4 feat(infra): add FullSyncAdapter for library sync 2026-03-20 00:19:45 +01:00
6732576d06 feat(infra): add SqliteAppSettingsRepository 2026-03-20 00:17:05 +01:00
a3a421c0ac feat(infra): add SqliteLibraryRepository 2026-03-20 00:15:01 +01:00
c6c93766c7 refactor(domain): remove redundant IAppSettingsRepository re-export; add TODO for Jellyfin enrichment 2026-03-20 00:11:30 +01:00
e101b44fa5 feat(domain): add library types, LibrarySyncAdapter, ILibraryRepository, IAppSettingsRepository; extend MediaItem with thumbnail_url and collection_id 2026-03-20 00:08:10 +01:00
666b1f2753 feat(db): add missing indexes to library migrations 2026-03-20 00:03:27 +01:00
a7c3f1f92e feat(db): add library_items, library_sync_log, app_settings migrations 2026-03-20 00:01:34 +01:00
187cd064fb docs: add library management implementation plan 2026-03-19 23:57:05 +01:00
4cc0e155bd docs: add library management design spec 2026-03-19 23:43:37 +01:00
175d0bb0bb fix(tests): add missing refresh_expiry_days param to JwtConfig::new in tests 2026-03-19 23:03:36 +01:00
311fdd4006 feat: multi-instance provider support
- provider_configs: add id TEXT PK; migrate existing rows (provider_type becomes id)
- local_files_index: add provider_id column + index; scope all queries per instance
- ProviderConfigRow: add id field; add get_by_id to trait
- LocalIndex:🆕 add provider_id param; all SQL scoped by provider_id
- factory: thread provider_id through build_local_files_bundle
- AppState.local_index: Option<Arc<LocalIndex>> → HashMap<String, Arc<LocalIndex>>
- admin_providers: restructured routes (POST /admin/providers create, PUT/DELETE /{id}, POST /test)
- admin_providers: use row.id as registry key for jellyfin and local_files
- files.rescan: optional ?provider=<id> query param
- frontend: add id to ProviderConfig, update api/hooks, new multi-instance panel UX
2026-03-19 22:54:41 +01:00
373e1c7c0a fix: remove default-run entry from Cargo.toml 2026-03-19 22:34:09 +01:00
d2412da057 feat(auth): refresh tokens + remember me
Backend: add refresh JWT (30d, token_type claim), POST /auth/refresh
endpoint (rotates token pair), remember_me on login, JWT_REFRESH_EXPIRY_DAYS
env var. Extractors now reject refresh tokens on protected routes.

Frontend: sessionStorage for non-remembered sessions, localStorage +
refresh token for remembered sessions. Transparent 401 recovery in
api.ts (retry once after refresh). Remember me checkbox on login page
with security note when checked.
2026-03-19 22:24:26 +01:00
8bdd5e2277 fix(infra): deserialize channel schedule_config via ScheduleConfigCompat for V1 compat 2026-03-17 14:56:09 +01:00
26343b08f8 fix: test mocks for new trait methods, V1 schedule_config re-import, stale comments 2026-03-17 14:53:23 +01:00
6d350940b9 feat(frontend): schedule history dialog with rollback, wire ConfigHistorySheet 2026-03-17 14:48:39 +01:00
ba6abad602 feat(frontend): weekly grid editor with day tabs and copy shortcut 2026-03-17 14:46:34 +01:00
c0da075f03 feat(frontend): config history sheet with pin and restore 2026-03-17 14:45:00 +01:00
6bfb148e39 feat(frontend): config history and schedule rollback hooks 2026-03-17 14:43:12 +01:00
45c05b5720 fix: snapshot existing config before update; rollback returns 200 2026-03-17 14:41:57 +01:00
bd498b9bcb feat(frontend): ScheduleConfig V2 types, weekday schema, export update 2026-03-17 14:39:19 +01:00
20e80ac28e feat: config history — auto-snapshot on update, list/pin/restore endpoints 2026-03-17 14:39:09 +01:00
ad3a73f061 feat: schedule history — list, detail, rollback endpoints 2026-03-17 14:38:51 +01:00
c0fb8f69de feat(infra): implement config snapshot repository methods 2026-03-17 14:32:04 +01:00
8b8e8a8d8c fix(mcp): update block mutations for ScheduleConfig V2 day_blocks 2026-03-17 14:32:02 +01:00
05d2d77515 feat(infra): schedule history list, get-by-id, delete-after methods 2026-03-17 14:32:02 +01:00
8b701745bf fix(api): update block lookups to use all_blocks() after ScheduleConfig V2 2026-03-17 14:31:24 +01:00
a79ee1b228 feat(domain): 7-day generation window, day_blocks lookup by weekday 2026-03-17 14:29:10 +01:00
d8e39c66be feat(infra): add channel_config_snapshots migration 2026-03-17 14:28:35 +01:00
055937fc3d fix(domain): use ChannelId type in patch_config_snapshot_label 2026-03-17 14:27:41 +01:00
1338f6bace feat(domain): extend ChannelRepository and ScheduleRepository ports for history 2026-03-17 14:25:51 +01:00
995f5b1339 feat(domain): add ChannelConfigSnapshot entity 2026-03-17 14:25:49 +01:00
22bee4f32c feat(domain): ScheduleConfig V2 day-keyed weekly grid with V1 compat 2026-03-17 14:21:00 +01:00
5f1421f4bd fix(domain): improve Weekday tests and document all() ordering 2026-03-17 14:18:13 +01:00
f8e8e85cb0 feat(domain): add Weekday enum with From<chrono::Weekday> 2026-03-17 14:16:16 +01:00
c550790287 feat: add find_last_slot_per_block method to schedule repositories and update related logic 2026-03-17 13:02:20 +01:00
d8dd047020 feat: implement local-files feature with various enhancements and cleanup 2026-03-17 03:00:39 +01:00
c4d2e48f73 fix(frontend): resolve all eslint warnings and errors
- block-timeline: ref updates moved to useLayoutEffect
- channel-card, guide/page: Date.now() wrapped in useMemo + suppress purity rule
- auth-context: lazy localStorage init (removes setState-in-effect)
- use-channel-order: lazy localStorage init (removes setState-in-effect)
- use-idle: start timer on mount without calling resetIdle (removes setState-in-effect)
- use-subtitles, transcode-settings-dialog: inline eslint-disable on exact violating line
- providers: block-level eslint-disable for tokenRef closure in useState initializer
- edit-channel-sheet: remove unused minsToTime and BlockContent imports
- docs/page: escape unescaped quote and apostrophe entities
2026-03-17 02:40:32 +01:00
8ed8da2d90 refactor(frontend): extract logic to hooks, split inline components
Area 1 (tv/page.tsx 964→423 lines):
- hooks: use-fullscreen, use-idle, use-volume, use-quality, use-subtitles,
  use-channel-input, use-channel-passwords, use-tv-keyboard
- components: SubtitlePicker, VolumeControl, QualityPicker, TopControlBar,
  LogoWatermark, AutoplayPrompt, ChannelNumberOverlay, TvBaseLayer

Area 2 (edit-channel-sheet.tsx 1244→678 lines):
- hooks: use-channel-form (all form state + reset logic)
- lib/schemas.ts: extracted Zod schemas + extractErrors
- components: AlgorithmicFilterEditor, RecyclePolicyEditor, WebhookEditor,
  AccessSettingsEditor, LogoEditor

Area 3 (dashboard/page.tsx 406→261 lines):
- hooks: use-channel-order, use-import-channel, use-regenerate-all
- lib/channel-export.ts: pure export utility
- components: DashboardHeader
2026-03-17 02:25:02 +01:00
ce92b43205 fix: show toast and redirect on expired session (401)
Fix stale closure bug in QueryProvider (token ref) and add warning toast so users know why they were redirected to login.
2026-03-17 01:37:11 +01:00
7244349e97 refactor: allow unused variable warning for db_pool in build_provider_registry 2026-03-16 04:41:08 +01:00
6aa86b6666 refactor: extract router/serve to server.rs, main is now thin orchestrator 2026-03-16 04:39:36 +01:00
e7bd66ffdf refactor: extract background task spawning to startup.rs 2026-03-16 04:37:49 +01:00
b25ae95626 refactor: extract provider registry to provider_registry.rs 2026-03-16 04:36:41 +01:00
5949ffc63b refactor: extract DB init to database.rs 2026-03-16 04:34:08 +01:00
29e654cabc refactor: extract telemetry init to telemetry.rs 2026-03-16 04:33:01 +01:00
9d792249c9 feat: implement transcode settings repository and integrate with local-files provider 2026-03-16 04:24:39 +01:00
50df852416 fix: remove sqlx from API layer, read TTL from TranscodeManager, init local_files from DB on startup 2026-03-16 04:08:52 +01:00
d88afbfe2e fix: sync cleanup_ttl_hours to transcode_settings table on provider save 2026-03-16 04:02:58 +01:00
0637504974 fix: local_files hot-reload via RwLock state fields and rebuild_registry 2026-03-16 03:58:59 +01:00
712cf1deb9 fix: local_files hot-reload via RwLock state fields + rebuild_registry local_files case 2026-03-16 03:58:36 +01:00
89036ba62d feat: admin provider UI (types, hooks, guard, settings panel, conditional admin nav) 2026-03-16 03:38:37 +01:00
87f94fcc51 feat: admin provider routes (list/update/delete/test) with admin middleware 2026-03-16 03:34:54 +01:00
46333853d2 feat: ConfigSource enum, RwLock provider_registry, is_admin in UserResponse, available_provider_types 2026-03-16 03:30:44 +01:00
0e51b7c0f1 feat: implement SqliteProviderConfigRepository, build_provider_config_repository factory 2026-03-16 03:26:02 +01:00
4ca8690a89 feat: add admin + provider_configs migration 2026-03-16 03:24:15 +01:00
d80d4e9741 feat: add is_admin to User, count_users, ProviderConfigRepository trait, admin migration 2026-03-16 03:22:00 +01:00
b35054f23e feat(tv-page): add subtitle track toggle functionality 2026-03-16 02:42:24 +01:00
abcf872d2d docs: update README files to include new environment variables and local files feature 2026-03-16 02:29:42 +01:00
e805028d46 feat: add server-sent events for logging and activity tracking
- Implemented a custom tracing layer (`AppLogLayer`) to capture log events and broadcast them to SSE clients.
- Created admin routes for streaming server logs and listing recent activity logs.
- Added an activity log repository interface and SQLite implementation for persisting activity events.
- Integrated activity logging into user authentication and channel CRUD operations.
- Developed frontend components for displaying server logs and activity logs in the admin panel.
- Enhanced the video player with a stats overlay for monitoring streaming metrics.
2026-03-16 02:21:40 +01:00
4df6522952 feat(channel-card): add confirmation dialog for schedule regeneration 2026-03-16 01:50:05 +01:00
40f698acb7 refactor: clean up styles and improve layout in dashboard and edit channel components
- Removed unnecessary class names for buttons in ChannelCard and DashboardPage components.
- Updated layout styles in RootLayout to apply dark mode by default.
- Refactored edit-channel-sheet to streamline block editor and filter editor components.
- Adjusted duration input fields to reflect minutes instead of seconds in AlgorithmicFilterEditor.
- Enhanced the structure of the EditChannelSheet for better readability and maintainability.
2026-03-16 01:40:28 +01:00
e76167134b feat: add webhook body template and headers support for channels 2026-03-16 01:10:26 +01:00
db461db270 webhooks (#1)
Reviewed-on: #1
2026-03-15 23:51:41 +00:00
2ba9bfbf2f feat(channel-card): update TV link to include channel ID in query parameters 2026-03-15 23:59:07 +01:00
f1e2c727aa feat(tests): add unit tests for auto-scheduler functionality 2026-03-15 23:56:52 +01:00
1102e385f3 feat(transcoding): add FFmpeg HLS transcoding support
- Introduced `TranscodeManager` for managing on-demand transcoding of local video files.
- Added configuration options for transcoding in `Config` and `LocalFilesConfig`.
- Implemented new API routes for managing transcoding settings, stats, and cache.
- Updated `LocalFilesProvider` to support transcoding capabilities.
- Created frontend components for managing transcode settings and displaying stats.
- Added database migration for transcode settings.
- Enhanced existing routes and DTOs to accommodate new transcoding features.
2026-03-15 00:34:23 +01:00
ead65e6be2 feat: implement multi-provider support in media library
- Introduced IProviderRegistry to manage multiple media providers.
- Updated AppState to use provider_registry instead of a single media_provider.
- Refactored library routes to support provider-specific queries for collections, series, genres, and items.
- Enhanced ProgrammingBlock to include provider_id for algorithmic and manual content types.
- Modified frontend components to allow selection of providers and updated API calls to include provider parameters.
- Adjusted hooks and types to accommodate provider-specific functionality.
2026-03-14 23:59:21 +01:00
c53892159a feat(mcp): implement media channel management and scheduling features 2026-03-14 23:19:24 +01:00
f7f4d92376 feat(docs): enhance documentation with Docker deployment and local files provider sections 2026-03-14 04:10:57 +01:00
cf92cc49c2 feat(stream): add stream quality selection and update stream URL handling 2026-03-14 04:03:54 +01:00
8f42164bce feat: add local files provider with indexing and rescan functionality
- Implemented LocalFilesProvider to manage local video files.
- Added LocalIndex for in-memory and SQLite-backed indexing of video files.
- Introduced scanning functionality to detect video files and extract metadata.
- Added API endpoints for listing collections, genres, and series based on provider capabilities.
- Enhanced existing routes to check for provider capabilities before processing requests.
- Updated frontend to utilize provider capabilities for conditional rendering of UI elements.
- Implemented rescan functionality to refresh the local files index.
- Added database migration for local files index schema.
2026-03-14 03:44:32 +01:00
9b6bcfc566 refactor(guide): improve code formatting and readability in page.tsx 2026-03-14 03:02:53 +01:00
c189056003 feat(auth): enhance error handling for token expiration and unauthorized access 2026-03-14 03:00:30 +01:00
791741fde0 feat(docs): add README with project overview, Docker instructions, and environment variables 2026-03-14 02:43:10 +01:00
0bdf7104a9 feat(casting): implement casting functionality with auto-mute and UI controls 2026-03-14 02:38:54 +01:00
953366ed63 feat(video-player): add muted prop to VideoPlayer and handle mute state in playback 2026-03-14 02:31:40 +01:00
6c14c8f491 feat(layout): add Script component for Google Cast framework integration 2026-03-14 02:29:11 +01:00
da714840ee feat(channel): add logo support with position and opacity settings 2026-03-14 02:27:16 +01:00
e610c23fea feat: add IPTV export functionality with M3U and XMLTV generation, including UI components for export dialog 2026-03-14 02:11:20 +01:00
66ec0c51c0 feat: refactor QueryProvider to include error handling and improve query client setup 2026-03-14 01:49:10 +01:00
81df6eb8ff feat: add access control to channels with various modes
- Introduced AccessMode enum to define channel access levels: Public, PasswordProtected, AccountRequired, and OwnerOnly.
- Updated Channel and ProgrammingBlock entities to include access_mode and access_password_hash fields.
- Enhanced create and update channel functionality to handle access mode and password.
- Implemented access checks in channel routes based on the defined access modes.
- Modified frontend components to support channel creation and editing with access control options.
- Added ChannelPasswordModal for handling password input when accessing restricted channels.
- Updated API calls to include channel and block passwords as needed.
- Created database migrations to add access_mode and access_password_hash columns to channels table.
2026-03-14 01:45:10 +01:00
924e162563 feat(video-player): enable AirPlay support for video player component 2026-03-14 01:16:26 +01:00
1fc473342d feat(channel): add auto-schedule feature to channels with background scheduler 2026-03-13 02:27:27 +01:00
dfd8f52a53 feat(tv): relocate fullscreen event tracking for iOS Safari to ensure proper video element handling 2026-03-13 02:02:51 +01:00
c152894291 feat(tv): enhance fullscreen functionality for iOS Safari with video element events 2026-03-13 01:56:44 +01:00
6a4eb099cb feat(schedule): add loop and recycle policy options to programming blocks 2026-03-13 01:53:02 +01:00
eeb4e2cb41 Refactor schedule and user repositories into modular structure
- Moved schedule repository logic into separate modules for SQLite and PostgreSQL implementations.
- Created a mapping module for shared data structures and mapping functions in the schedule repository.
- Added new mapping module for user repository to handle user data transformations.
- Implemented PostgreSQL and SQLite user repository adapters with necessary CRUD operations.
- Added tests for user repository functionality, including saving, finding, and deleting users.
2026-03-13 01:35:14 +01:00
79ced7b77b feat(tv): update video player to handle ended event and improve channel navigation 2026-03-12 04:19:56 +01:00
8754758254 feat(tv): implement fullscreen navigation behavior for TV page 2026-03-12 03:32:31 +01:00
9559858075 feat(guide): implement channel guide page with EPG and upcoming slots
feat(layout): add guide link to navigation
feat(tv): enable channel navigation via query parameter
2026-03-12 03:29:52 +01:00
e5a9b99b14 feat(library): add strategy parameter for item fetching and update filter preview 2026-03-12 03:24:32 +01:00
6d1bed2ecb feat(jellyfin): enhance series fetching logic to support independent series retrieval and improve item interleaving 2026-03-12 03:21:16 +01:00
f028b1be98 feat: update media filter to support multiple series names and enhance library item fetching 2026-03-12 03:12:59 +01:00
bf07a65dcd feat(library): add media library browsing functionality
- Introduced new `library` module in the API routes to handle media library requests.
- Enhanced `AppState` to include a media provider for library interactions.
- Defined new `IMediaProvider` trait methods for listing collections, series, and genres.
- Implemented Jellyfin media provider methods for fetching collections and series.
- Added frontend components for selecting series and displaying filter previews.
- Created hooks for fetching collections, series, and genres from the library.
- Updated media filter to support series name and search term.
- Enhanced API client to handle new library-related endpoints.
2026-03-12 02:54:30 +01:00
f069376136 feat: add interaction handling for autoplay block in video player 2026-03-12 02:35:02 +01:00
4d2eeaa8c6 Revert "feat: enhance HLS stream URL generation with remuxing options for Jellyfin"
This reverts commit fa695da81b.
2026-03-12 02:30:02 +01:00
fa695da81b feat: enhance HLS stream URL generation with remuxing options for Jellyfin 2026-03-12 02:09:59 +01:00
4e1de172f7 feat: add support for user registration toggle and Traefik integration in Docker setup 2026-03-11 22:56:23 +01:00
dc29976c1f feat: add Docker configuration and environment setup for backend and frontend 2026-03-11 22:49:59 +01:00
cb49c3e50a feat: update favicon and replace logo; remove unused SVG files 2026-03-11 22:46:35 +01:00
20aed753d8 feat: enhance schedule generation with series continuity for sequential blocks 2026-03-11 22:42:44 +01:00
ee64fc0b8a feat: enhance schedule slot handling with episode details and duration calculation 2026-03-11 22:30:05 +01:00
0f1b9c11fe feat: implement configuration management and enhance user registration flow 2026-03-11 22:26:16 +01:00
62549faffa feat: add buffering spinner to VideoPlayer component 2026-03-11 22:06:10 +01:00
2caad1670d feat: add Docs link to navigation in MainLayout 2026-03-11 22:04:15 +01:00
b2f40054fc feat: add subtitle track support to VideoPlayer and integrate with TvPage 2026-03-11 21:55:20 +01:00
f6ff65094b feat: enhance MediaItem with additional episode details and update ChannelInfo component 2026-03-11 21:45:11 +01:00
235 changed files with 25170 additions and 3754 deletions

61
.env.example Normal file
View File

@@ -0,0 +1,61 @@
# Copy this file to .env and fill in the values before running `docker compose up`.
# ── Ports (optional, defaults shown) ─────────────────────────────────────────
BACKEND_PORT=3000
FRONTEND_PORT=3001
# ── Auth ──────────────────────────────────────────────────────────────────────
# Generate: openssl rand -hex 32
JWT_SECRET=change-me-generate-with-openssl-rand-hex-32
# Generate: openssl rand -base64 64
COOKIE_SECRET=change-me-must-be-at-least-64-characters-long-for-production!!
JWT_EXPIRY_HOURS=24
# Set to false to disable new user registration (existing users can still log in)
ALLOW_REGISTRATION=true
# Set to true when serving over HTTPS
SECURE_COOKIE=false
PRODUCTION=false
# ── CORS ──────────────────────────────────────────────────────────────────────
# Origin(s) from which the browser will hit the backend, comma-separated.
# Must match what users type in their browser for the frontend.
# Example (local): http://localhost:3001
# Example (remote): https://tv.example.com
CORS_ALLOWED_ORIGINS=http://localhost:3001
# ── Frontend / API URL ────────────────────────────────────────────────────────
# Public URL of the BACKEND, as seen from the user's browser.
# This is baked into the Next.js client bundle at build time.
# Example (local): http://localhost:3000/api/v1
# Example (remote): https://api.example.com/api/v1
NEXT_PUBLIC_API_URL=http://localhost:3000/api/v1
# ── Jellyfin ──────────────────────────────────────────────────────────────────
JELLYFIN_BASE_URL=http://jellyfin:8096
JELLYFIN_API_KEY=your-jellyfin-api-key-here
JELLYFIN_USER_ID=your-jellyfin-user-id-here
# ── Database pool (optional) ──────────────────────────────────────────────────
DB_MAX_CONNECTIONS=5
DB_MIN_CONNECTIONS=1
# ── PostgreSQL (optional, uncomment db service in compose.yml first) ──────────
# POSTGRES_PASSWORD=change-me
# ── Traefik (only needed with compose.traefik.yml) ────────────────────────────
# External Docker network Traefik is attached to
TRAEFIK_NETWORK=traefik_proxy
# Traefik entrypoint (usually websecure for HTTPS, web for HTTP)
TRAEFIK_ENTRYPOINT=websecure
# Cert resolver defined in your Traefik static config
TRAEFIK_CERT_RESOLVER=letsencrypt
# Public hostnames routed by Traefik
FRONTEND_HOST=tv.example.com
BACKEND_HOST=tv-api.example.com
# When using Traefik, update these to the public URLs:
# NEXT_PUBLIC_API_URL=https://tv-api.example.com/api/v1
# CORS_ALLOWED_ORIGINS=https://tv.example.com

3
.gitignore vendored Normal file
View File

@@ -0,0 +1,3 @@
transcode/
.worktrees/
.superpowers/

74
README.md Normal file
View File

@@ -0,0 +1,74 @@
# k-tv
Self-hosted linear TV channel orchestration. Turns a personal media library into broadcast-style channels.
## Stack
- **Backend** — Rust (Axum), SQLite/PostgreSQL, Jellyfin
- **Frontend** — Next.js 16, React 19, TanStack Query, Tailwind v4, shadcn/ui
---
## Docker
Registry: `registry.gabrielkaszewski.dev`
### Build
`NEXT_PUBLIC_API_URL` is baked into the frontend bundle at build time — always pass it explicitly.
```bash
# Backend
docker build -t registry.gabrielkaszewski.dev/k-tv-backend:latest ./k-tv-backend
# Frontend — NEXT_PUBLIC_API_URL required
docker build \
--build-arg NEXT_PUBLIC_API_URL=https://tv-api.gabrielkaszewski.dev/api/v1 \
-t registry.gabrielkaszewski.dev/k-tv-frontend:latest \
./k-tv-frontend
```
### Push
```bash
docker push registry.gabrielkaszewski.dev/k-tv-backend:latest
docker push registry.gabrielkaszewski.dev/k-tv-frontend:latest
```
### Build + push (one-liner)
```bash
docker build -t registry.gabrielkaszewski.dev/k-tv-backend:latest ./k-tv-backend && \
docker push registry.gabrielkaszewski.dev/k-tv-backend:latest && \
docker build \
--build-arg NEXT_PUBLIC_API_URL=https://tv-api.gabrielkaszewski.dev/api/v1 \
-t registry.gabrielkaszewski.dev/k-tv-frontend:latest \
./k-tv-frontend && \
docker push registry.gabrielkaszewski.dev/k-tv-frontend:latest
```
### Deploy (on server)
```bash
docker compose -f compose.yml -f compose.traefik.yml pull
docker compose -f compose.yml -f compose.traefik.yml up -d
```
---
## Ports
| Service | Port |
|----------|------|
| Backend | 3000 |
| Frontend | 3001 |
## Env vars
| Var | Where | Note |
|-----|-------|------|
| `NEXT_PUBLIC_API_URL` | frontend build arg | Baked in at build time — must point to the public backend URL |
| `API_URL` | frontend runtime env | Server-side only (Next.js API routes). Set in compose. |
| `DATABASE_URL` | backend | `sqlite:///app/data/k-tv.db` or postgres DSN |
| `JWT_SECRET` | backend | JWT signing key — change in production (min 32 chars) |
| `COOKIE_SECRET` | backend | OIDC state cookie encryption key — change in production (min 64 chars) |

49
compose.traefik.yml Normal file
View File

@@ -0,0 +1,49 @@
# Traefik integration overlay.
#
# Usage:
# docker compose -f compose.yml -f compose.traefik.yml up -d --build
#
# Assumes Traefik is already running on your host with an external Docker
# network. Add these variables to your .env (see .env.example):
#
# TRAEFIK_NETWORK name of the external Traefik network (default: traefik_proxy)
# TRAEFIK_ENTRYPOINT Traefik entrypoint name (default: websecure)
# TRAEFIK_CERT_RESOLVER cert resolver name for TLS (default: letsencrypt)
# FRONTEND_HOST public hostname for the frontend e.g. tv.example.com
# BACKEND_HOST public hostname for the backend API e.g. tv-api.example.com
#
# Remember: NEXT_PUBLIC_API_URL in .env must be the *public* backend URL,
# e.g. https://tv-api.example.com/api/v1, and you must rebuild after changing it.
services:
backend:
ports: [] # Traefik handles ingress; no direct port exposure needed
networks:
- default
- traefik
labels:
- "traefik.enable=true"
- "traefik.docker.network=${TRAEFIK_NETWORK:-traefik_proxy}"
- "traefik.http.routers.ktv-backend.rule=Host(`${BACKEND_HOST}`)"
- "traefik.http.routers.ktv-backend.entrypoints=${TRAEFIK_ENTRYPOINT:-websecure}"
- "traefik.http.routers.ktv-backend.tls.certresolver=${TRAEFIK_CERT_RESOLVER:-letsencrypt}"
- "traefik.http.services.ktv-backend.loadbalancer.server.port=3000"
frontend:
ports: []
networks:
- default
- traefik
labels:
- "traefik.enable=true"
- "traefik.docker.network=${TRAEFIK_NETWORK:-traefik_proxy}"
- "traefik.http.routers.ktv-frontend.rule=Host(`${FRONTEND_HOST}`)"
- "traefik.http.routers.ktv-frontend.entrypoints=${TRAEFIK_ENTRYPOINT:-websecure}"
- "traefik.http.routers.ktv-frontend.tls.certresolver=${TRAEFIK_CERT_RESOLVER:-letsencrypt}"
- "traefik.http.services.ktv-frontend.loadbalancer.server.port=3001"
networks:
traefik:
external: true
name: ${TRAEFIK_NETWORK:-traefik_proxy}

73
compose.yml Normal file
View File

@@ -0,0 +1,73 @@
services:
# ── Backend (Rust / Axum) ──────────────────────────────────────────────────
backend:
build: ./k-tv-backend
ports:
- "${BACKEND_PORT:-3000}:3000"
environment:
- HOST=0.0.0.0
- PORT=3000
- DATABASE_URL=sqlite:///app/data/k-tv.db?mode=rwc
# Allow requests from the browser (the user-facing frontend URL)
- CORS_ALLOWED_ORIGINS=${CORS_ALLOWED_ORIGINS}
# Auth — generate with: openssl rand -hex 32
- JWT_SECRET=${JWT_SECRET}
# Cookie secret — generate with: openssl rand -base64 64
- COOKIE_SECRET=${COOKIE_SECRET}
- JWT_EXPIRY_HOURS=${JWT_EXPIRY_HOURS:-24}
- SECURE_COOKIE=${SECURE_COOKIE:-false}
- PRODUCTION=${PRODUCTION:-false}
- ALLOW_REGISTRATION=${ALLOW_REGISTRATION:-true}
- DB_MAX_CONNECTIONS=${DB_MAX_CONNECTIONS:-5}
- DB_MIN_CONNECTIONS=${DB_MIN_CONNECTIONS:-1}
# Jellyfin — all three required for schedule generation
- JELLYFIN_BASE_URL=${JELLYFIN_BASE_URL}
- JELLYFIN_API_KEY=${JELLYFIN_API_KEY}
- JELLYFIN_USER_ID=${JELLYFIN_USER_ID}
volumes:
- backend_data:/app/data
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "curl -f http://localhost:3000/api/v1/config || exit 1"]
interval: 30s
timeout: 5s
retries: 3
# ── Frontend (Next.js) ────────────────────────────────────────────────────
frontend:
build:
context: ./k-tv-frontend
args:
# Browser-visible backend URL — baked into the client bundle at build time.
# Rebuild the image after changing this.
NEXT_PUBLIC_API_URL: ${NEXT_PUBLIC_API_URL:-http://localhost:4000/api/v1}
ports:
- "${FRONTEND_PORT:-3001}:3001"
environment:
# Server-side API URL — uses Docker's internal network, never exposed.
# Next.js API routes (e.g. /api/stream/[channelId]) use this.
API_URL: http://backend:3000/api/v1
depends_on:
backend:
condition: service_healthy
restart: unless-stopped
volumes:
backend_data:
# ── Optional: PostgreSQL ───────────────────────────────────────────────────
# Uncomment the db service and set DATABASE_URL in backend's environment:
# DATABASE_URL: postgres://ktv:${POSTGRES_PASSWORD}@db:5432/ktv
#
# db:
# image: postgres:16-alpine
# environment:
# POSTGRES_USER: ktv
# POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
# POSTGRES_DB: ktv
# volumes:
# - db_data:/var/lib/postgresql/data
# restart: unless-stopped
#
# db_data:

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,255 @@
# Library Management — Design Spec
**Date:** 2026-03-19
**Status:** Approved
## Context
K-TV currently has ephemeral library browsing: metadata is always fetched live from providers (Jellyfin, local files) on demand, only accessible through the block editor filter UI. There is no persistent library, no cross-provider browsing, and no way to schedule directly from browsing media.
This feature introduces an in-house library that syncs and stores media metadata from all providers into k-tv's own DB, then surfaces it through a first-class `/library` page where users can browse, filter, multi-select, and schedule media directly onto channels.
---
## Data Model
### Migration `20260319000002_add_library_tables.sql`
**`library_items` table**
| Column | Type | Notes |
|---|---|---|
| `id` | TEXT PK | `"{provider_id}::{raw_item_id}"` — double-colon, matches existing registry prefix format |
| `provider_id` | TEXT | `"jellyfin"`, `"local"`, etc. |
| `external_id` | TEXT | Raw ID from provider (for re-fetching) |
| `title` | TEXT | |
| `content_type` | TEXT | `"movie"` \| `"episode"` \| `"short"` |
| `duration_secs` | INTEGER | |
| `series_name` | TEXT | NULL for movies |
| `season_number` | INTEGER | NULL for movies |
| `episode_number` | INTEGER | NULL for movies |
| `year` | INTEGER | |
| `genres` | TEXT | JSON array |
| `tags` | TEXT | JSON array |
| `collection_id` | TEXT | Provider-specific collection ID |
| `collection_name` | TEXT | Human-readable name (synced from provider) |
| `collection_type` | TEXT | e.g. `"movies"`, `"tvshows"` |
| `thumbnail_url` | TEXT | Provider-served image URL; re-fetched on every sync |
| `synced_at` | TEXT | ISO8601 timestamp |
`thumbnail_url` is refreshed on every full sync. Frontend must handle broken image URLs gracefully (show a placeholder on load error) since URLs may break if provider URL or API key changes between syncs.
**`library_sync_log` table**
| Column | Type | Notes |
|---|---|---|
| `id` | INTEGER PK AUTOINCREMENT | |
| `provider_id` | TEXT | |
| `started_at` | TEXT | ISO8601 |
| `finished_at` | TEXT | ISO8601, NULL while running |
| `items_found` | INTEGER | |
| `status` | TEXT | `"running"` \| `"done"` \| `"error"` |
| `error_msg` | TEXT | NULL on success |
### Migration `20260319000003_add_app_settings.sql`
**`app_settings` table** — general-purpose key-value store for admin-configurable settings. Co-exists with the existing `transcode_settings` singleton table (that table is not modified). Seeded with: `INSERT OR IGNORE INTO app_settings(key, value) VALUES ('library_sync_interval_hours', '6')`.
| Column | Type | Notes |
|---|---|---|
| `key` | TEXT PK | |
| `value` | TEXT | Bare JSON scalar stored as text (e.g. `6`, not `"6"`) |
`GET /admin/settings` returns parsed values: `{ "library_sync_interval_hours": 6 }` (number, not string). Backend parses with `serde_json::Value` on read; frontend receives typed JSON.
---
## Backend Architecture
### Sync Engine
**Layer placement:**
- `LibraryItem`, `LibrarySyncResult`, `LibrarySyncAdapter` trait, and `ILibraryRepository` trait live in **`domain/src/library.rs`**
- `FullSyncAdapter` (impl) and `SqliteLibraryRepository` (impl) live in **`infra/src/library/`**
The `LibrarySyncAdapter` domain trait does **not** take a DB pool — DB writes are an infra concern handled entirely inside the impl:
```rust
// domain/src/library.rs
#[async_trait]
pub trait LibrarySyncAdapter: Send + Sync {
async fn sync_provider(
&self,
provider: &dyn IMediaProvider,
provider_id: &str,
) -> LibrarySyncResult;
}
#[async_trait]
pub trait ILibraryRepository: Send + Sync {
async fn search(&self, filter: LibrarySearchFilter) -> Vec<LibraryItem>;
async fn get_by_id(&self, id: &str) -> Option<LibraryItem>;
async fn list_collections(&self, provider_id: Option<&str>) -> Vec<LibraryCollection>;
async fn list_series(&self, provider_id: Option<&str>) -> Vec<String>;
async fn list_genres(&self, content_type: Option<ContentType>, provider_id: Option<&str>) -> Vec<String>;
async fn upsert_items(&self, provider_id: &str, items: Vec<LibraryItem>) -> DomainResult<()>;
async fn clear_provider(&self, provider_id: &str) -> DomainResult<()>;
async fn log_sync_start(&self, provider_id: &str) -> i64; // returns log row id
async fn log_sync_finish(&self, log_id: i64, result: &LibrarySyncResult);
async fn latest_sync_status(&self) -> Vec<LibrarySyncLogEntry>;
async fn is_sync_running(&self, provider_id: &str) -> bool;
}
```
`FullSyncAdapter` in infra holds `Arc<dyn ILibraryRepository>` and calls repo methods internally — no DB pool leaks into domain.
```
infra/src/library/
mod.rs
full_sync.rs -- FullSyncAdapter impl: calls list_collections for names/types,
fetch_items(&MediaFilter::default()), repo.clear_provider + repo.upsert_items
repository.rs -- SqliteLibraryRepository impl of ILibraryRepository
scheduler.rs -- tokio interval task; 10s startup delay (hardcoded); reads interval from
app_settings on each tick via AppSettingsRepository
```
**AppState** gains:
```rust
library_sync_adapter: Arc<dyn LibrarySyncAdapter>,
library_repo: Arc<dyn ILibraryRepository>,
```
### Sync Concurrency Guard
Before starting a sync for a provider, the scheduler and `POST /library/sync` handler both call `repo.is_sync_running(provider_id)`. If `true`, the scheduler skips that provider for this tick; the HTTP endpoint returns **409 Conflict** with body `{ "error": "sync already running for provider" }`. This prevents the truncate+insert race.
### Admin Settings
- `GET /admin/settings` — returns `app_settings` rows as parsed JSON object. Requires `is_admin = true` (`AdminUser` extractor).
- `PUT /admin/settings` — partial update (only provided keys updated). Requires `is_admin = true`. Scheduler reads new value on next tick.
### Library API Routes (all require authenticated user)
| Endpoint | Notes |
|---|---|
| `GET /library/items?type=&series[]=&collection=&genre=&decade=&min_duration=&max_duration=&search=&provider=&offset=0&limit=50` | DB-backed; returns `{ items: LibraryItemResponse[], total: u32 }` |
| `GET /library/items/:id` | Single item |
| `GET /library/collections?provider=` | `{ id, name, collection_type }[]` from DB |
| `GET /library/series?provider=` | `String[]` from DB |
| `GET /library/genres?type=&provider=` | `String[]` from DB |
| `GET /library/sync/status` | `LibrarySyncLogEntry[]` (latest per provider) |
| `POST /library/sync` | Fires sync; 409 if already running; requires `is_admin = true` |
| `GET /admin/settings` | `{ key: value }` map (parsed); requires `is_admin = true` |
| `PUT /admin/settings` | Partial update; requires `is_admin = true` |
**Existing library route API contract is unchanged** for all params except `offset`/`limit` (new). Frontend `use-library.ts` hooks continue working without modification.
---
## Frontend Architecture
### New route: `/library`
Added to main nav alongside Dashboard and TV.
```
app/(main)/library/
page.tsx -- layout, search/filter state, pagination state, multi-select state
components/
library-sidebar.tsx -- provider picker, type, genre chips, series picker, decade, duration range
library-grid.tsx -- paginated grid of LibraryItemCard
library-item-card.tsx -- thumbnail (with broken-image fallback placeholder), title,
duration badge, content type, checkbox
schedule-from-library-dialog.tsx -- modal (see flow below)
add-to-block-dialog.tsx -- modal (see flow below)
sync-status-bar.tsx -- "Last synced 2h ago · Jellyfin" strip at top
```
### New hooks
```
hooks/use-library-search.ts -- useLibrarySearch(filter, page): wraps GET /library/items with
offset/limit pagination. Query key: ["library", "search", filter, page].
onSuccess of useTriggerSync: invalidate ["library", "search"] and ["library", "sync"].
hooks/use-library-sync.ts -- useLibrarySyncStatus() → ["library", "sync"],
useTriggerSync() → POST /library/sync; on success invalidates
["library", "search"] and ["library", "sync"]
hooks/use-admin-settings.ts -- useAdminSettings(), useUpdateAdminSettings()
```
Existing `use-library.ts` and its four hooks (`useCollections`, `useSeries`, `useGenres`, `useLibraryItems`) are **unchanged** — still used by `AlgorithmicFilterEditor` in the block editor.
### Schedule From Library Flow
1. User selects one or more items → floating action bar at bottom
2. "Schedule on channel" → `ScheduleFromLibraryDialog` modal
3. Modal fields (in order — time/days/strategy disabled until channel is selected):
- **Channel** picker (required; enables remaining fields once selected)
- **Days**: MonSun checkboxes
- **Time**: `NaiveTime` input interpreted in the selected channel's timezone. Timezone label displayed inline (e.g. "20:00 Europe/Warsaw"). Disabled until channel is selected.
- **Duration**: For single item, defaults to `ceil(duration_secs / 60)` minutes shown in UI. For multi-item, user sets manually. Rounding to nearest minute shown explicitly (e.g. "1h 35m (rounded from 1h 34m 47s)").
- **Fill strategy**: Sequential (default for episodic) | Random | Best Fit
4. Preview: *"3 blocks will be created on [Channel] — Mon/Wed/Fri at 20:00 [Europe/Warsaw], Sequential"*
5. Confirm → `PUT /channels/:id` merging new `ProgrammingBlock` entries into `schedule_config.day_blocks`:
- Series / episodic: **Algorithmic** block with `series_names: [series]`
- Specific item(s): **Manual** block with those item IDs
### Add To Block Flow
1. User selects items → "Add to block" from action bar
2. `AddToBlockDialog`:
- Pick channel
- Pick existing **manual** block: populated from `useChannel(id)` by collecting all blocks across all days with `content.type === "manual"`, **deduplicated by block `id`** (same block appearing Mon + Wed shown once)
3. Confirm → appends item IDs to that block. Since the same block object (by `id`) may appear in multiple days in `schedule_config.day_blocks`, the PUT updates **all day entries that contain that block id** — the block is mutated wherever it appears, consistently.
### Admin Settings UI
Settings panel (cog icon in dashboard header, alongside existing transcode settings) gains a "Library sync" section:
- Number input: "Sync interval (hours)"
- "Sync now" button (visible to admin users only; calls `POST /library/sync`; disabled + shows spinner while running)
- Status: "Last synced: [time] · [N] items" per provider from `GET /library/sync/status`
---
## Key Files Modified
**Backend:**
- `domain/src/lib.rs` — add `library` module
- `domain/src/library.rs` — new: `LibraryItem`, `LibraryCollection`, `LibrarySyncResult`, `LibrarySyncAdapter` trait, `ILibraryRepository` trait, `LibrarySearchFilter`, `LibrarySyncLogEntry`
- `infra/src/library/full_sync.rs``FullSyncAdapter` impl
- `infra/src/library/repository.rs``SqliteLibraryRepository` impl
- `infra/src/library/scheduler.rs` — tokio interval task, 10s startup delay
- `api/src/routes/library.rs` — DB-backed handlers + sync/admin routes
- `api/src/routes/mod.rs` — wire admin settings routes
- `api/src/main.rs` — start sync scheduler task
- `api/src/state.rs` — add `library_sync_adapter: Arc<dyn LibrarySyncAdapter>`, `library_repo: Arc<dyn ILibraryRepository>`
- `migrations_sqlite/20260319000002_add_library_tables.sql`
- `migrations_sqlite/20260319000003_add_app_settings.sql`
**Frontend:**
- `lib/types.ts` — add `LibraryItem`, `LibraryCollection`, `SyncLogEntry`, `AdminSettings`
- `lib/api.ts` — add `api.library.items(filter, page)`, `api.library.syncStatus()`, `api.library.triggerSync()`, `api.admin.getSettings()`, `api.admin.updateSettings(partial)`
- `app/(main)/layout.tsx` — add Library nav link
- New files per structure above
---
## Verification
1. **Sync**: `POST /library/sync` → 200. `GET /library/sync/status` shows `done` with item count. `library_items` rows in DB have `collection_name` and `thumbnail_url` populated.
2. **Sync dedup**: Second `POST /library/sync` while first is running → 409 Conflict.
3. **Library API pagination**: `GET /library/items?offset=0&limit=10` returns 10 items + `total`. `?offset=10&limit=10` returns next page.
4. **Provider filter**: `GET /library/items?provider=jellyfin` returns only Jellyfin items.
5. **Collections**: `GET /library/collections` returns `{ id, name, collection_type }` objects.
6. **Admin guard**: `POST /library/sync` and `PUT /admin/settings` with non-admin user → 403.
7. **Admin settings**: `PUT /admin/settings { "library_sync_interval_hours": 2 }``GET /admin/settings` returns `{ "library_sync_interval_hours": 2 }` (number). Scheduler uses new interval.
8. **Library UI**: `/library` page loads, sidebar filters update grid, pagination controls work. `sync-status-bar` shows last sync time.
9. **Broken thumbnail**: Item with a broken `thumbnail_url` shows fallback placeholder in `library-item-card`.
10. **Multi-select action bar**: Select 3 items → action bar appears with "Schedule on channel" and "Add to block".
11. **Schedule flow — time gating**: Time input is disabled until channel is selected; timezone shown next to input after channel selected.
12. **Schedule flow — rounding**: Single-item selection shows rounded duration with note in dialog.
13. **Schedule flow — confirm**: Series scheduled → Dashboard shows Algorithmic blocks on correct days with `series_names` filter.
14. **Add to block — dedup**: Block appearing on Mon+Wed shown once in picker. Confirming updates both days.
15. **Cache invalidation**: After `useTriggerSync()` resolves, `["library", "search"]` and `["library", "sync"]` query keys are invalidated, grid refreshes.
16. **Block editor unchanged**: `AlgorithmicFilterEditor` works; `useLibraryItems` in `use-library.ts` unchanged.
17. **Regression**: `cargo test` passes.

317
k-tv-backend/Cargo.lock generated
View File

@@ -78,16 +78,23 @@ dependencies = [
"chrono", "chrono",
"domain", "domain",
"dotenvy", "dotenvy",
"handlebars",
"infra", "infra",
"k-core", "k-core",
"rand 0.8.5",
"reqwest",
"serde", "serde",
"serde_json", "serde_json",
"serde_qs",
"thiserror 2.0.17", "thiserror 2.0.17",
"time", "time",
"tokio", "tokio",
"tokio-stream",
"tokio-util",
"tower", "tower",
"tower-http", "tower-http",
"tracing", "tracing",
"tracing-subscriber",
"uuid", "uuid",
] ]
@@ -534,14 +541,38 @@ dependencies = [
"syn", "syn",
] ]
[[package]]
name = "darling"
version = "0.20.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee"
dependencies = [
"darling_core 0.20.11",
"darling_macro 0.20.11",
]
[[package]] [[package]]
name = "darling" name = "darling"
version = "0.21.3" version = "0.21.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9cdf337090841a411e2a7f3deb9187445851f91b309c0c0a29e05f74a00a48c0" checksum = "9cdf337090841a411e2a7f3deb9187445851f91b309c0c0a29e05f74a00a48c0"
dependencies = [ dependencies = [
"darling_core", "darling_core 0.21.3",
"darling_macro", "darling_macro 0.21.3",
]
[[package]]
name = "darling_core"
version = "0.20.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0d00b9596d185e565c2207a0b01f8bd1a135483d02d9b7b0a54b11da8d53412e"
dependencies = [
"fnv",
"ident_case",
"proc-macro2",
"quote",
"strsim",
"syn",
] ]
[[package]] [[package]]
@@ -558,13 +589,24 @@ dependencies = [
"syn", "syn",
] ]
[[package]]
name = "darling_macro"
version = "0.20.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead"
dependencies = [
"darling_core 0.20.11",
"quote",
"syn",
]
[[package]] [[package]]
name = "darling_macro" name = "darling_macro"
version = "0.21.3" version = "0.21.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81"
dependencies = [ dependencies = [
"darling_core", "darling_core 0.21.3",
"quote", "quote",
"syn", "syn",
] ]
@@ -596,6 +638,37 @@ dependencies = [
"serde_core", "serde_core",
] ]
[[package]]
name = "derive_builder"
version = "0.20.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "507dfb09ea8b7fa618fcf76e953f4f5e192547945816d5358edffe39f6f94947"
dependencies = [
"derive_builder_macro",
]
[[package]]
name = "derive_builder_core"
version = "0.20.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2d5bcf7b024d6835cfb3d473887cd966994907effbe9227e8c8219824d06c4e8"
dependencies = [
"darling 0.20.11",
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "derive_builder_macro"
version = "0.20.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c"
dependencies = [
"derive_builder_core",
"syn",
]
[[package]] [[package]]
name = "digest" name = "digest"
version = "0.10.7" version = "0.10.7"
@@ -629,6 +702,7 @@ dependencies = [
"email_address", "email_address",
"rand 0.8.5", "rand 0.8.5",
"serde", "serde",
"serde_json",
"thiserror 2.0.17", "thiserror 2.0.17",
"tokio", "tokio",
"url", "url",
@@ -847,6 +921,21 @@ dependencies = [
"percent-encoding", "percent-encoding",
] ]
[[package]]
name = "futures"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876"
dependencies = [
"futures-channel",
"futures-core",
"futures-executor",
"futures-io",
"futures-sink",
"futures-task",
"futures-util",
]
[[package]] [[package]]
name = "futures-channel" name = "futures-channel"
version = "0.3.31" version = "0.3.31"
@@ -920,6 +1009,7 @@ version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81"
dependencies = [ dependencies = [
"futures-channel",
"futures-core", "futures-core",
"futures-io", "futures-io",
"futures-macro", "futures-macro",
@@ -1009,6 +1099,22 @@ dependencies = [
"tracing", "tracing",
] ]
[[package]]
name = "handlebars"
version = "6.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b3f9296c208515b87bd915a2f5d1163d4b3f863ba83337d7713cf478055948e"
dependencies = [
"derive_builder",
"log",
"num-order",
"pest",
"pest_derive",
"serde",
"serde_json",
"thiserror 2.0.17",
]
[[package]] [[package]]
name = "hashbrown" name = "hashbrown"
version = "0.12.3" version = "0.12.3"
@@ -1370,6 +1476,7 @@ dependencies = [
"async-nats", "async-nats",
"async-trait", "async-trait",
"axum-extra", "axum-extra",
"base64 0.22.1",
"chrono", "chrono",
"domain", "domain",
"futures-core", "futures-core",
@@ -1387,6 +1494,7 @@ dependencies = [
"tracing", "tracing",
"url", "url",
"uuid", "uuid",
"walkdir",
] ]
[[package]] [[package]]
@@ -1576,6 +1684,28 @@ version = "0.8.4"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "47e1ffaa40ddd1f3ed91f717a33c8c0ee23fff369e3aa8772b9605cc1d22f4c3" checksum = "47e1ffaa40ddd1f3ed91f717a33c8c0ee23fff369e3aa8772b9605cc1d22f4c3"
[[package]]
name = "mcp"
version = "0.1.0"
dependencies = [
"anyhow",
"async-trait",
"chrono",
"domain",
"dotenvy",
"infra",
"k-core",
"rmcp",
"schemars 0.8.22",
"serde",
"serde_json",
"thiserror 2.0.17",
"tokio",
"tracing",
"tracing-subscriber",
"uuid",
]
[[package]] [[package]]
name = "md-5" name = "md-5"
version = "0.10.6" version = "0.10.6"
@@ -1711,6 +1841,21 @@ dependencies = [
"num-traits", "num-traits",
] ]
[[package]]
name = "num-modular"
version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "17bb261bf36fa7d83f4c294f834e91256769097b3cb505d44831e0a179ac647f"
[[package]]
name = "num-order"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "537b596b97c40fcf8056d153049eb22f481c17ebce72a513ec9286e4986d1bb6"
dependencies = [
"num-modular",
]
[[package]] [[package]]
name = "num-traits" name = "num-traits"
version = "0.2.19" version = "0.2.19"
@@ -1727,7 +1872,7 @@ version = "5.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "51e219e79014df21a225b1860a479e2dcd7cbd9130f4defd4bd0e191ea31d67d" checksum = "51e219e79014df21a225b1860a479e2dcd7cbd9130f4defd4bd0e191ea31d67d"
dependencies = [ dependencies = [
"base64 0.21.7", "base64 0.22.1",
"chrono", "chrono",
"getrandom 0.2.16", "getrandom 0.2.16",
"http", "http",
@@ -1913,6 +2058,12 @@ dependencies = [
"subtle", "subtle",
] ]
[[package]]
name = "paste"
version = "1.0.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a"
[[package]] [[package]]
name = "pem" name = "pem"
version = "3.0.6" version = "3.0.6"
@@ -1938,6 +2089,49 @@ version = "2.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220"
[[package]]
name = "pest"
version = "2.8.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e0848c601009d37dfa3430c4666e147e49cdcf1b92ecd3e63657d8a5f19da662"
dependencies = [
"memchr",
"ucd-trie",
]
[[package]]
name = "pest_derive"
version = "2.8.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "11f486f1ea21e6c10ed15d5a7c77165d0ee443402f0780849d1768e7d9d6fe77"
dependencies = [
"pest",
"pest_generator",
]
[[package]]
name = "pest_generator"
version = "2.8.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8040c4647b13b210a963c1ed407c1ff4fdfa01c31d6d2a098218702e6664f94f"
dependencies = [
"pest",
"pest_meta",
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "pest_meta"
version = "2.8.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "89815c69d36021a140146f26659a81d6c2afa33d216d736dd4be5381a7362220"
dependencies = [
"pest",
"sha2",
]
[[package]] [[package]]
name = "phf" name = "phf"
version = "0.12.1" version = "0.12.1"
@@ -2339,6 +2533,38 @@ dependencies = [
"windows-sys 0.52.0", "windows-sys 0.52.0",
] ]
[[package]]
name = "rmcp"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "33a0110d28bd076f39e14bfd5b0340216dd18effeb5d02b43215944cc3e5c751"
dependencies = [
"base64 0.21.7",
"chrono",
"futures",
"paste",
"pin-project-lite",
"rmcp-macros",
"schemars 0.8.22",
"serde",
"serde_json",
"thiserror 2.0.17",
"tokio",
"tokio-util",
"tracing",
]
[[package]]
name = "rmcp-macros"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a6e2b2fd7497540489fa2db285edd43b7ed14c49157157438664278da6e42a7a"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]] [[package]]
name = "rsa" name = "rsa"
version = "0.9.9" version = "0.9.9"
@@ -2466,6 +2692,15 @@ version = "1.0.22"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a50f4cf475b65d88e057964e0e9bb1f0aa9bbb2036dc65c64596b42932536984" checksum = "a50f4cf475b65d88e057964e0e9bb1f0aa9bbb2036dc65c64596b42932536984"
[[package]]
name = "same-file"
version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
dependencies = [
"winapi-util",
]
[[package]] [[package]]
name = "schannel" name = "schannel"
version = "0.1.28" version = "0.1.28"
@@ -2475,6 +2710,18 @@ dependencies = [
"windows-sys 0.61.2", "windows-sys 0.61.2",
] ]
[[package]]
name = "schemars"
version = "0.8.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3fbf2ae1b8bc8e02df939598064d22402220cd5bbcca1c76f7d6a310974d5615"
dependencies = [
"dyn-clone",
"schemars_derive",
"serde",
"serde_json",
]
[[package]] [[package]]
name = "schemars" name = "schemars"
version = "0.9.0" version = "0.9.0"
@@ -2499,6 +2746,18 @@ dependencies = [
"serde_json", "serde_json",
] ]
[[package]]
name = "schemars_derive"
version = "0.8.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32e265784ad618884abaea0600a9adf15393368d840e0222d101a072f3f7534d"
dependencies = [
"proc-macro2",
"quote",
"serde_derive_internals",
"syn",
]
[[package]] [[package]]
name = "scopeguard" name = "scopeguard"
version = "1.2.0" version = "1.2.0"
@@ -2588,6 +2847,17 @@ dependencies = [
"syn", "syn",
] ]
[[package]]
name = "serde_derive_internals"
version = "0.29.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]] [[package]]
name = "serde_json" name = "serde_json"
version = "1.0.148" version = "1.0.148"
@@ -2630,6 +2900,17 @@ dependencies = [
"serde", "serde",
] ]
[[package]]
name = "serde_qs"
version = "0.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cd34f36fe4c5ba9654417139a9b3a20d2e1de6012ee678ad14d240c22c78d8d6"
dependencies = [
"percent-encoding",
"serde",
"thiserror 1.0.69",
]
[[package]] [[package]]
name = "serde_repr" name = "serde_repr"
version = "0.1.20" version = "0.1.20"
@@ -2678,7 +2959,7 @@ version = "3.16.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "52a8e3ca0ca629121f70ab50f95249e5a6f925cc0f6ffe8256c45b728875706c" checksum = "52a8e3ca0ca629121f70ab50f95249e5a6f925cc0f6ffe8256c45b728875706c"
dependencies = [ dependencies = [
"darling", "darling 0.21.3",
"proc-macro2", "proc-macro2",
"quote", "quote",
"syn", "syn",
@@ -3267,6 +3548,7 @@ dependencies = [
"futures-core", "futures-core",
"pin-project-lite", "pin-project-lite",
"tokio", "tokio",
"tokio-util",
] ]
[[package]] [[package]]
@@ -3434,6 +3716,12 @@ version = "1.19.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "562d481066bde0658276a35467c4af00bdc6ee726305698a55b86e61d7ad82bb" checksum = "562d481066bde0658276a35467c4af00bdc6ee726305698a55b86e61d7ad82bb"
[[package]]
name = "ucd-trie"
version = "0.1.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971"
[[package]] [[package]]
name = "unicode-bidi" name = "unicode-bidi"
version = "0.3.18" version = "0.3.18"
@@ -3526,6 +3814,16 @@ version = "0.9.5"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a"
[[package]]
name = "walkdir"
version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b"
dependencies = [
"same-file",
"winapi-util",
]
[[package]] [[package]]
name = "want" name = "want"
version = "0.3.1" version = "0.3.1"
@@ -3662,6 +3960,15 @@ dependencies = [
"wasite", "wasite",
] ]
[[package]]
name = "winapi-util"
version = "0.1.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22"
dependencies = [
"windows-sys 0.61.2",
]
[[package]] [[package]]
name = "windows-core" name = "windows-core"
version = "0.62.2" version = "0.62.2"

View File

@@ -1,3 +1,3 @@
[workspace] [workspace]
members = ["domain", "infra", "api"] members = ["domain", "infra", "api", "mcp"]
resolver = "2" resolver = "2"

View File

@@ -10,8 +10,12 @@ FROM debian:bookworm-slim
WORKDIR /app WORKDIR /app
# Install OpenSSL (required for many Rust networking crates) and CA certificates # Install OpenSSL, CA certs, and ffmpeg (provides ffprobe for local-files duration scanning)
RUN apt-get update && apt-get install -y libssl3 ca-certificates && rm -rf /var/lib/apt/lists/* RUN apt-get update && apt-get install -y --no-install-recommends \
libssl3 \
ca-certificates \
ffmpeg \
&& rm -rf /var/lib/apt/lists/*
COPY --from=builder /app/target/release/api . COPY --from=builder /app/target/release/api .

View File

@@ -11,7 +11,7 @@ The backend is a Cargo workspace with three crates following Hexagonal (Ports &
``` ```
k-tv-backend/ k-tv-backend/
├── domain/ # Pure business logic — no I/O, no frameworks ├── domain/ # Pure business logic — no I/O, no frameworks
├── infra/ # Adapters: SQLite/Postgres repositories, Jellyfin HTTP client ├── infra/ # Adapters: SQLite/Postgres repositories, Jellyfin HTTP client, local files
└── api/ # Axum HTTP server — routes, DTOs, startup wiring └── api/ # Axum HTTP server — routes, DTOs, startup wiring
``` ```
@@ -79,11 +79,20 @@ OIDC state (CSRF token, PKCE verifier, nonce) is stored in a short-lived encrypt
If Jellyfin variables are not set, the server starts normally but schedule generation endpoints return an error. Channel CRUD and auth still work. If Jellyfin variables are not set, the server starts normally but schedule generation endpoints return an error. Channel CRUD and auth still work.
### Local Files (optional — requires `local-files` feature)
| Variable | Default | Description |
|----------|---------|-------------|
| `LOCAL_FILES_DIR` | — | Absolute path to local video library root. Enables the local-files provider when set. |
| `TRANSCODE_DIR` | — | Directory for FFmpeg HLS transcode cache. Enables transcoding when set. |
| `TRANSCODE_CLEANUP_TTL_HOURS` | `24` | Hours after last access before a transcode cache entry is deleted. |
### CORS & Production ### CORS & Production
| Variable | Default | Description | | Variable | Default | Description |
|----------|---------|-------------| |----------|---------|-------------|
| `CORS_ALLOWED_ORIGINS` | `http://localhost:5173` | Comma-separated allowed origins | | `CORS_ALLOWED_ORIGINS` | `http://localhost:5173` | Comma-separated allowed origins |
| `BASE_URL` | `http://localhost:3000` | Public base URL used to build stream URLs for local files |
| `PRODUCTION` | `false` | Enforces minimum secret lengths when `true` | | `PRODUCTION` | `false` | Enforces minimum secret lengths when `true` |
## Feature Flags ## Feature Flags
@@ -100,6 +109,7 @@ default = ["sqlite", "auth-jwt", "jellyfin"]
| `auth-jwt` | JWT Bearer token authentication | | `auth-jwt` | JWT Bearer token authentication |
| `auth-oidc` | OpenID Connect integration | | `auth-oidc` | OpenID Connect integration |
| `jellyfin` | Jellyfin media provider adapter | | `jellyfin` | Jellyfin media provider adapter |
| `local-files` | Local filesystem media provider with optional FFmpeg transcoding |
## API Reference ## API Reference
@@ -137,11 +147,49 @@ All endpoints are under `/api/v1/`. Endpoints marked **Bearer** require an `Auth
| `GET` | `/channels/:id/epg?from=&until=` | Bearer | EPG slots overlapping a time window (RFC3339 datetimes) | | `GET` | `/channels/:id/epg?from=&until=` | Bearer | EPG slots overlapping a time window (RFC3339 datetimes) |
| `GET` | `/channels/:id/stream` | Bearer | `307` redirect to the current item's stream URL — `204` if no-signal | | `GET` | `/channels/:id/stream` | Bearer | `307` redirect to the current item's stream URL — `204` if no-signal |
### Other ### Library
All endpoints require Bearer auth and return `501 Not Implemented` if the active provider lacks the relevant capability.
| Method | Path | Auth | Description | | Method | Path | Auth | Description |
|--------|------|------|-------------| |--------|------|------|-------------|
| `GET` | `/config` | — | Server configuration flags | | `GET` | `/library/collections` | Bearer | List media collections/libraries |
| `GET` | `/library/series` | Bearer | List TV series (supports `?collection=`, `?provider=`) |
| `GET` | `/library/genres` | Bearer | List genres (supports `?type=`, `?provider=`) |
| `GET` | `/library/items` | Bearer | Search/filter media items (supports `?q=`, `?type=`, `?series[]=`, `?collection=`, `?limit=`, `?strategy=`, `?provider=`) |
### Files (local-files feature only)
| Method | Path | Auth | Description |
|--------|------|------|-------------|
| `GET` | `/files/stream/:id` | — | Range-header video streaming for local files |
| `POST` | `/files/rescan` | Bearer | Trigger library rescan, returns `{ items_found }` |
| `GET` | `/files/transcode/:id/playlist.m3u8` | — | HLS transcode playlist |
| `GET` | `/files/transcode/:id/:segment` | — | HLS transcode segment |
| `GET` | `/files/transcode-settings` | Bearer | Get transcode settings (`cleanup_ttl_hours`) |
| `PUT` | `/files/transcode-settings` | Bearer | Update transcode settings |
| `GET` | `/files/transcode-stats` | Bearer | Cache stats `{ cache_size_bytes, item_count }` |
| `DELETE` | `/files/transcode-cache` | Bearer | Clear the transcode cache |
### IPTV
| Method | Path | Auth | Description |
|--------|------|------|-------------|
| `GET` | `/iptv/playlist.m3u` | `?token=` | M3U playlist of all channels |
| `GET` | `/iptv/epg.xml` | `?token=` | XMLTV EPG for all channels |
### Admin
| Method | Path | Auth | Description |
|--------|------|------|-------------|
| `GET` | `/admin/logs` | `?token=` | SSE stream of live server log lines (`{ level, target, message, timestamp }`) |
| `GET` | `/admin/activity` | Bearer | Recent 50 in-app activity events |
### Config
| Method | Path | Auth | Description |
|--------|------|------|-------------|
| `GET` | `/config` | — | Server configuration flags and provider capabilities |
## Examples ## Examples
@@ -267,6 +315,21 @@ curl -s -I http://localhost:3000/api/v1/channels/<id>/stream \
### Channel ### Channel
A named broadcast channel owned by a user. Holds a `schedule_config` (the programming template) and a `recycle_policy`. A named broadcast channel owned by a user. Holds a `schedule_config` (the programming template) and a `recycle_policy`.
Channel fields:
| Field | Description |
|-------|-------------|
| `access_mode` | `public` / `password_protected` / `account_required` / `owner_only` |
| `access_password` | Hashed password when `access_mode` is `password_protected` |
| `logo` | URL or inline SVG for the watermark overlay |
| `logo_position` | `top_right` (default) / `top_left` / `bottom_left` / `bottom_right` |
| `logo_opacity` | 0.01.0, default 1.0 |
| `auto_schedule` | When `true`, the server auto-regenerates the schedule when it expires |
| `webhook_url` | HTTP endpoint called on domain events |
| `webhook_poll_interval_secs` | Polling interval for webhook delivery |
| `webhook_body_template` | Handlebars template for the webhook POST body |
| `webhook_headers` | JSON object of extra HTTP headers sent with webhooks |
### ScheduleConfig ### ScheduleConfig
The shareable programming template: an ordered list of `ProgrammingBlock`s. Channels do not need to cover all 24 hours — gaps are valid and produce a no-signal state. The shareable programming template: an ordered list of `ProgrammingBlock`s. Channels do not need to cover all 24 hours — gaps are valid and produce a no-signal state.
@@ -286,6 +349,8 @@ Provider-agnostic filter used by algorithmic blocks:
| `tags` | Provider tag strings | | `tags` | Provider tag strings |
| `min_duration_secs` / `max_duration_secs` | Duration bounds for item selection | | `min_duration_secs` / `max_duration_secs` | Duration bounds for item selection |
| `collections` | Abstract groupings (Jellyfin library IDs, Plex sections, folder paths, etc.) | | `collections` | Abstract groupings (Jellyfin library IDs, Plex sections, folder paths, etc.) |
| `series_names` | List of TV series names (OR-combined) |
| `search_term` | Free-text search term for library browsing |
### FillStrategy ### FillStrategy
How an algorithmic block fills its time budget: How an algorithmic block fills its time budget:
@@ -305,6 +370,22 @@ Controls when previously aired items become eligible again:
| `cooldown_generations` | Don't replay within this many schedule generations | | `cooldown_generations` | Don't replay within this many schedule generations |
| `min_available_ratio` | Always keep at least this fraction (0.01.0) of the matching pool selectable, even if their cooldown hasn't expired. Prevents small libraries from running dry. | | `min_available_ratio` | Always keep at least this fraction (0.01.0) of the matching pool selectable, even if their cooldown hasn't expired. Prevents small libraries from running dry. |
### ProviderCapabilities
`GET /config` returns `providers[]` with per-provider capabilities. Library endpoints return `501` if the active provider lacks the relevant capability.
| Capability | Description |
|------------|-------------|
| `collections` | Provider can list/filter by collections |
| `series` | Provider exposes TV series groupings |
| `genres` | Provider exposes genre metadata |
| `tags` | Provider supports tag filtering |
| `decade` | Provider supports decade filtering |
| `search` | Provider supports free-text search |
| `streaming_protocol` | `hls` or `direct_file` |
| `rescan` | Provider supports triggering a library rescan |
| `transcode` | FFmpeg transcoding is available (local-files only) |
### No-signal state ### No-signal state
`GET /channels/:id/now` and `GET /channels/:id/stream` return `204 No Content` when the current time falls in a gap between blocks. The frontend should display static / noise in this case — matching the broadcast TV experience. `GET /channels/:id/now` and `GET /channels/:id/stream` return `204 No Content` when the current time falls in a gap between blocks. The frontend should display static / noise in this case — matching the broadcast TV experience.
@@ -338,6 +419,9 @@ cargo build -F sqlite,auth-jwt,auth-oidc,jellyfin
# PostgreSQL variant # PostgreSQL variant
cargo build --no-default-features -F postgres,auth-jwt,jellyfin cargo build --no-default-features -F postgres,auth-jwt,jellyfin
# With local files + transcoding
cargo build -F sqlite,auth-jwt,jellyfin,local-files
``` ```
### Docker ### Docker
@@ -357,7 +441,8 @@ k-tv-backend/
│ │ # ScheduledSlot, MediaItem, PlaybackRecord, User, ... │ │ # ScheduledSlot, MediaItem, PlaybackRecord, User, ...
│ ├── value_objects.rs # MediaFilter, FillStrategy, RecyclePolicy, │ ├── value_objects.rs # MediaFilter, FillStrategy, RecyclePolicy,
│ │ # MediaItemId, ContentType, Email, ... │ │ # MediaItemId, ContentType, Email, ...
│ ├── ports.rs # IMediaProvider trait │ ├── ports.rs # IMediaProvider trait, ProviderCapabilities
│ ├── events.rs # Domain event types
│ ├── repositories.rs # ChannelRepository, ScheduleRepository, UserRepository │ ├── repositories.rs # ChannelRepository, ScheduleRepository, UserRepository
│ ├── services.rs # ChannelService, ScheduleEngineService, UserService │ ├── services.rs # ChannelService, ScheduleEngineService, UserService
│ └── errors.rs # DomainError │ └── errors.rs # DomainError
@@ -366,7 +451,9 @@ k-tv-backend/
│ ├── channel_repository.rs # SQLite + Postgres ChannelRepository adapters │ ├── channel_repository.rs # SQLite + Postgres ChannelRepository adapters
│ ├── schedule_repository.rs # SQLite + Postgres ScheduleRepository adapters │ ├── schedule_repository.rs # SQLite + Postgres ScheduleRepository adapters
│ ├── user_repository.rs # SQLite + Postgres UserRepository adapters │ ├── user_repository.rs # SQLite + Postgres UserRepository adapters
│ ├── activity_log_repository/ # Activity log persistence
│ ├── jellyfin.rs # Jellyfin IMediaProvider adapter │ ├── jellyfin.rs # Jellyfin IMediaProvider adapter
│ ├── local_files/ # Local filesystem provider + FFmpeg transcoder
│ ├── auth/ │ ├── auth/
│ │ ├── jwt.rs # JWT create + validate │ │ ├── jwt.rs # JWT create + validate
│ │ └── oidc.rs # OIDC flow (stateless cookie state) │ │ └── oidc.rs # OIDC flow (stateless cookie state)
@@ -376,13 +463,22 @@ k-tv-backend/
├── api/src/ ├── api/src/
│ ├── routes/ │ ├── routes/
│ │ ├── auth.rs # /auth/* endpoints │ │ ├── auth.rs # /auth/* endpoints
│ │ ├── channels.rs # /channels/* endpoints (CRUD, EPG, broadcast) │ │ ├── channels/ # /channels/* endpoints (CRUD, EPG, broadcast)
│ │ ── config.rs # /config endpoint │ │ ── admin.rs # /admin/logs (SSE), /admin/activity
│ │ ├── config.rs # /config endpoint
│ │ ├── files.rs # /files/* endpoints (local-files feature)
│ │ ├── iptv.rs # /iptv/playlist.m3u, /iptv/epg.xml
│ │ └── library.rs # /library/* endpoints
│ ├── config.rs # Config::from_env() │ ├── config.rs # Config::from_env()
│ ├── state.rs # AppState │ ├── state.rs # AppState
│ ├── extractors.rs # CurrentUser (JWT Bearer extractor) │ ├── extractors.rs # CurrentUser (JWT Bearer extractor)
│ ├── error.rs # ApiError → HTTP status mapping │ ├── error.rs # ApiError → HTTP status mapping
│ ├── dto.rs # All request + response types │ ├── dto.rs # All request + response types
│ ├── events.rs # SSE event broadcasting
│ ├── log_layer.rs # Tracing layer → SSE log stream
│ ├── poller.rs # Webhook polling task
│ ├── scheduler.rs # Auto-schedule renewal task
│ ├── webhook.rs # Webhook delivery
│ └── main.rs # Startup wiring │ └── main.rs # Startup wiring
├── migrations_sqlite/ ├── migrations_sqlite/

View File

@@ -11,6 +11,12 @@ postgres = ["infra/postgres"]
auth-oidc = ["infra/auth-oidc"] auth-oidc = ["infra/auth-oidc"]
auth-jwt = ["infra/auth-jwt"] auth-jwt = ["infra/auth-jwt"]
jellyfin = ["infra/jellyfin"] jellyfin = ["infra/jellyfin"]
local-files = ["infra/local-files", "dep:tokio-util"]
[profile.release]
strip = true
lto = true
codegen-units = 1
[dependencies] [dependencies]
k-core = { git = "https://git.gabrielkaszewski.dev/GKaszewski/k-core", features = [ k-core = { git = "https://git.gabrielkaszewski.dev/GKaszewski/k-core", features = [
@@ -24,7 +30,10 @@ infra = { path = "../infra", default-features = false, features = ["sqlite"] }
# Web framework # Web framework
axum = { version = "0.8.8", features = ["macros"] } axum = { version = "0.8.8", features = ["macros"] }
axum-extra = { version = "0.10", features = ["cookie-private", "cookie-key-expansion"] } axum-extra = { version = "0.10", features = [
"cookie-private",
"cookie-key-expansion",
] }
tower = "0.5.2" tower = "0.5.2"
tower-http = { version = "0.6.2", features = ["cors", "trace"] } tower-http = { version = "0.6.2", features = ["cors", "trace"] }
@@ -34,6 +43,8 @@ tokio = { version = "1.48.0", features = ["full"] }
# Serialization # Serialization
serde = { version = "1.0.228", features = ["derive"] } serde = { version = "1.0.228", features = ["derive"] }
serde_json = "1.0" serde_json = "1.0"
serde_qs = "0.13"
rand = "0.8"
# Error handling # Error handling
thiserror = "2.0.17" thiserror = "2.0.17"
@@ -45,7 +56,12 @@ uuid = { version = "1.19.0", features = ["v4", "serde"] }
# Logging # Logging
tracing = "0.1" tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter", "fmt"] }
tokio-stream = { version = "0.1", features = ["sync"] }
reqwest = { version = "0.12", features = ["json"] }
handlebars = "6"
async-trait = "0.1" async-trait = "0.1"
dotenvy = "0.15.7" dotenvy = "0.15.7"
time = "0.3" time = "0.3"
tokio-util = { version = "0.7", features = ["io"], optional = true }

View File

@@ -3,10 +3,18 @@
//! Loads configuration from environment variables. //! Loads configuration from environment variables.
use std::env; use std::env;
use std::path::PathBuf;
#[derive(Debug, Clone, PartialEq)]
pub enum ConfigSource {
Env,
Db,
}
/// Application configuration loaded from environment variables /// Application configuration loaded from environment variables
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct Config { pub struct Config {
pub config_source: ConfigSource,
pub database_url: String, pub database_url: String,
pub cookie_secret: String, pub cookie_secret: String,
pub cors_allowed_origins: Vec<String>, pub cors_allowed_origins: Vec<String>,
@@ -28,14 +36,29 @@ pub struct Config {
pub jwt_issuer: Option<String>, pub jwt_issuer: Option<String>,
pub jwt_audience: Option<String>, pub jwt_audience: Option<String>,
pub jwt_expiry_hours: u64, pub jwt_expiry_hours: u64,
pub jwt_refresh_expiry_days: u64,
/// Whether the application is running in production mode /// Whether the application is running in production mode
pub is_production: bool, pub is_production: bool,
/// Whether new user registration is open. Set ALLOW_REGISTRATION=false to lock down.
pub allow_registration: bool,
// Jellyfin media provider // Jellyfin media provider
pub jellyfin_base_url: Option<String>, pub jellyfin_base_url: Option<String>,
pub jellyfin_api_key: Option<String>, pub jellyfin_api_key: Option<String>,
pub jellyfin_user_id: Option<String>, pub jellyfin_user_id: Option<String>,
/// Root directory for the local-files provider. Set `LOCAL_FILES_DIR` to enable.
pub local_files_dir: Option<PathBuf>,
/// Directory for FFmpeg HLS transcode cache. Set `TRANSCODE_DIR` to enable transcoding.
pub transcode_dir: Option<PathBuf>,
/// How long (hours) to keep transcode cache entries before cleanup. Default 24.
pub transcode_cleanup_ttl_hours: u32,
/// Public base URL of this API server (used to build IPTV stream URLs).
pub base_url: String,
} }
impl Config { impl Config {
@@ -95,16 +118,42 @@ impl Config {
.and_then(|s| s.parse().ok()) .and_then(|s| s.parse().ok())
.unwrap_or(24); .unwrap_or(24);
let jwt_refresh_expiry_days = env::var("JWT_REFRESH_EXPIRY_DAYS")
.ok()
.and_then(|s| s.parse().ok())
.unwrap_or(30);
let is_production = env::var("PRODUCTION") let is_production = env::var("PRODUCTION")
.or_else(|_| env::var("RUST_ENV")) .or_else(|_| env::var("RUST_ENV"))
.map(|v| v.to_lowercase() == "production" || v == "1" || v == "true") .map(|v| v.to_lowercase() == "production" || v == "1" || v == "true")
.unwrap_or(false); .unwrap_or(false);
let allow_registration = env::var("ALLOW_REGISTRATION")
.map(|v| !(v == "false" || v == "0"))
.unwrap_or(true);
let jellyfin_base_url = env::var("JELLYFIN_BASE_URL").ok(); let jellyfin_base_url = env::var("JELLYFIN_BASE_URL").ok();
let jellyfin_api_key = env::var("JELLYFIN_API_KEY").ok(); let jellyfin_api_key = env::var("JELLYFIN_API_KEY").ok();
let jellyfin_user_id = env::var("JELLYFIN_USER_ID").ok(); let jellyfin_user_id = env::var("JELLYFIN_USER_ID").ok();
let local_files_dir = env::var("LOCAL_FILES_DIR").ok().map(PathBuf::from);
let transcode_dir = env::var("TRANSCODE_DIR").ok().map(PathBuf::from);
let transcode_cleanup_ttl_hours = env::var("TRANSCODE_CLEANUP_TTL_HOURS")
.ok()
.and_then(|s| s.parse().ok())
.unwrap_or(24);
let base_url = env::var("BASE_URL")
.unwrap_or_else(|_| format!("http://localhost:{}", port));
let config_source = match env::var("CONFIG_SOURCE").as_deref() {
Ok("db") | Ok("DB") => ConfigSource::Db,
_ => ConfigSource::Env,
};
Self { Self {
config_source,
host, host,
port, port,
database_url, database_url,
@@ -122,10 +171,16 @@ impl Config {
jwt_issuer, jwt_issuer,
jwt_audience, jwt_audience,
jwt_expiry_hours, jwt_expiry_hours,
jwt_refresh_expiry_days,
is_production, is_production,
allow_registration,
jellyfin_base_url, jellyfin_base_url,
jellyfin_api_key, jellyfin_api_key,
jellyfin_user_id, jellyfin_user_id,
local_files_dir,
transcode_dir,
transcode_cleanup_ttl_hours,
base_url,
} }
} }
} }

View File

@@ -0,0 +1,36 @@
use std::sync::Arc;
use std::time::Duration as StdDuration;
use crate::config::Config;
use infra::run_migrations;
use k_core::db::DatabasePool;
pub async fn init_database(config: &Config) -> anyhow::Result<Arc<DatabasePool>> {
tracing::info!("Connecting to database: {}", config.database_url);
#[cfg(all(feature = "sqlite", not(feature = "postgres")))]
let db_type = k_core::db::DbType::Sqlite;
#[cfg(all(feature = "postgres", not(feature = "sqlite")))]
let db_type = k_core::db::DbType::Postgres;
// Both features enabled: fall back to URL inspection at runtime
#[cfg(all(feature = "sqlite", feature = "postgres"))]
let db_type = if config.database_url.starts_with("postgres") {
k_core::db::DbType::Postgres
} else {
k_core::db::DbType::Sqlite
};
let db_config = k_core::db::DatabaseConfig {
db_type,
url: config.database_url.clone(),
max_connections: config.db_max_connections,
min_connections: config.db_min_connections,
acquire_timeout: StdDuration::from_secs(30),
};
let pool = k_core::db::connect(&db_config).await?;
run_migrations(&pool).await?;
Ok(Arc::new(pool))
}

View File

@@ -15,6 +15,15 @@ pub struct LoginRequest {
pub email: Email, pub email: Email,
/// Password is validated on deserialization (min 8 chars) /// Password is validated on deserialization (min 8 chars)
pub password: Password, pub password: Password,
/// When true, a refresh token is also issued for persistent sessions
#[serde(default)]
pub remember_me: bool,
}
/// Refresh token request
#[derive(Debug, Deserialize)]
pub struct RefreshRequest {
pub refresh_token: String,
} }
/// Register request with validated email and password newtypes /// Register request with validated email and password newtypes
@@ -32,6 +41,7 @@ pub struct UserResponse {
pub id: Uuid, pub id: Uuid,
pub email: String, pub email: String,
pub created_at: DateTime<Utc>, pub created_at: DateTime<Utc>,
pub is_admin: bool,
} }
/// JWT token response /// JWT token response
@@ -40,12 +50,54 @@ pub struct TokenResponse {
pub access_token: String, pub access_token: String,
pub token_type: String, pub token_type: String,
pub expires_in: u64, pub expires_in: u64,
/// Only present when remember_me was true at login, or on token refresh
#[serde(skip_serializing_if = "Option::is_none")]
pub refresh_token: Option<String>,
}
/// Per-provider info returned by `GET /config`.
#[derive(Debug, Serialize)]
pub struct ProviderInfo {
pub id: String,
pub capabilities: domain::ProviderCapabilities,
} }
/// System configuration response /// System configuration response
#[derive(Debug, Serialize)] #[derive(Debug, Serialize)]
pub struct ConfigResponse { pub struct ConfigResponse {
pub allow_registration: bool, pub allow_registration: bool,
/// All registered providers with their capabilities.
pub providers: Vec<ProviderInfo>,
/// Capabilities of the primary provider — kept for backward compatibility.
pub provider_capabilities: domain::ProviderCapabilities,
/// Provider type strings supported by this build (feature-gated).
pub available_provider_types: Vec<String>,
}
// ============================================================================
// Admin DTOs
// ============================================================================
/// An activity log entry returned by GET /admin/activity.
#[derive(Debug, Serialize)]
pub struct ActivityEventResponse {
pub id: Uuid,
pub timestamp: DateTime<Utc>,
pub event_type: String,
pub detail: String,
pub channel_id: Option<Uuid>,
}
impl From<domain::ActivityEvent> for ActivityEventResponse {
fn from(e: domain::ActivityEvent) -> Self {
Self {
id: e.id,
timestamp: e.timestamp,
event_type: e.event_type,
detail: e.detail,
channel_id: e.channel_id,
}
}
} }
// ============================================================================ // ============================================================================
@@ -58,6 +110,13 @@ pub struct CreateChannelRequest {
pub description: Option<String>, pub description: Option<String>,
/// IANA timezone, e.g. "UTC" or "America/New_York" /// IANA timezone, e.g. "UTC" or "America/New_York"
pub timezone: String, pub timezone: String,
pub access_mode: Option<domain::AccessMode>,
/// Plain-text password; hashed before storage.
pub access_password: Option<String>,
pub webhook_url: Option<String>,
pub webhook_poll_interval_secs: Option<u32>,
pub webhook_body_template: Option<String>,
pub webhook_headers: Option<String>,
} }
/// All fields are optional — only provided fields are updated. /// All fields are optional — only provided fields are updated.
@@ -67,8 +126,23 @@ pub struct UpdateChannelRequest {
pub description: Option<String>, pub description: Option<String>,
pub timezone: Option<String>, pub timezone: Option<String>,
/// Replace the entire schedule config (template import/edit) /// Replace the entire schedule config (template import/edit)
pub schedule_config: Option<domain::ScheduleConfig>, pub schedule_config: Option<domain::ScheduleConfigCompat>,
pub recycle_policy: Option<domain::RecyclePolicy>, pub recycle_policy: Option<domain::RecyclePolicy>,
pub auto_schedule: Option<bool>,
pub access_mode: Option<domain::AccessMode>,
/// Empty string clears the password; non-empty re-hashes.
pub access_password: Option<String>,
/// `Some(None)` = clear logo, `Some(Some(url))` = set logo, `None` = unchanged.
pub logo: Option<Option<String>>,
pub logo_position: Option<domain::LogoPosition>,
pub logo_opacity: Option<f32>,
/// `Some(None)` = clear, `Some(Some(url))` = set, `None` = unchanged.
pub webhook_url: Option<Option<String>>,
pub webhook_poll_interval_secs: Option<u32>,
/// `Some(None)` = clear, `Some(Some(tmpl))` = set, `None` = unchanged.
pub webhook_body_template: Option<Option<String>>,
/// `Some(None)` = clear, `Some(Some(json))` = set, `None` = unchanged.
pub webhook_headers: Option<Option<String>>,
} }
#[derive(Debug, Serialize)] #[derive(Debug, Serialize)]
@@ -80,6 +154,15 @@ pub struct ChannelResponse {
pub timezone: String, pub timezone: String,
pub schedule_config: domain::ScheduleConfig, pub schedule_config: domain::ScheduleConfig,
pub recycle_policy: domain::RecyclePolicy, pub recycle_policy: domain::RecyclePolicy,
pub auto_schedule: bool,
pub access_mode: domain::AccessMode,
pub logo: Option<String>,
pub logo_position: domain::LogoPosition,
pub logo_opacity: f32,
pub webhook_url: Option<String>,
pub webhook_poll_interval_secs: u32,
pub webhook_body_template: Option<String>,
pub webhook_headers: Option<String>,
pub created_at: DateTime<Utc>, pub created_at: DateTime<Utc>,
pub updated_at: DateTime<Utc>, pub updated_at: DateTime<Utc>,
} }
@@ -94,12 +177,49 @@ impl From<domain::Channel> for ChannelResponse {
timezone: c.timezone, timezone: c.timezone,
schedule_config: c.schedule_config, schedule_config: c.schedule_config,
recycle_policy: c.recycle_policy, recycle_policy: c.recycle_policy,
auto_schedule: c.auto_schedule,
access_mode: c.access_mode,
logo: c.logo,
logo_position: c.logo_position,
logo_opacity: c.logo_opacity,
webhook_url: c.webhook_url,
webhook_poll_interval_secs: c.webhook_poll_interval_secs,
webhook_body_template: c.webhook_body_template,
webhook_headers: c.webhook_headers,
created_at: c.created_at, created_at: c.created_at,
updated_at: c.updated_at, updated_at: c.updated_at,
} }
} }
} }
// ============================================================================
// Config history DTOs
// ============================================================================
#[derive(Debug, Serialize)]
pub struct ConfigSnapshotResponse {
pub id: Uuid,
pub version_num: i64,
pub label: Option<String>,
pub created_at: DateTime<Utc>,
}
impl From<domain::ChannelConfigSnapshot> for ConfigSnapshotResponse {
fn from(s: domain::ChannelConfigSnapshot) -> Self {
Self {
id: s.id,
version_num: s.version_num,
label: s.label,
created_at: s.created_at,
}
}
}
#[derive(Debug, Deserialize)]
pub struct PatchSnapshotRequest {
pub label: Option<String>,
}
// ============================================================================ // ============================================================================
// EPG / playback DTOs // EPG / playback DTOs
// ============================================================================ // ============================================================================
@@ -110,9 +230,13 @@ pub struct MediaItemResponse {
pub title: String, pub title: String,
pub content_type: domain::ContentType, pub content_type: domain::ContentType,
pub duration_secs: u32, pub duration_secs: u32,
pub description: Option<String>,
pub genres: Vec<String>, pub genres: Vec<String>,
pub year: Option<u16>, pub year: Option<u16>,
pub tags: Vec<String>, pub tags: Vec<String>,
pub series_name: Option<String>,
pub season_number: Option<u32>,
pub episode_number: Option<u32>,
} }
impl From<domain::MediaItem> for MediaItemResponse { impl From<domain::MediaItem> for MediaItemResponse {
@@ -122,9 +246,13 @@ impl From<domain::MediaItem> for MediaItemResponse {
title: i.title, title: i.title,
content_type: i.content_type, content_type: i.content_type,
duration_secs: i.duration_secs, duration_secs: i.duration_secs,
description: i.description,
genres: i.genres, genres: i.genres,
year: i.year, year: i.year,
tags: i.tags, tags: i.tags,
series_name: i.series_name,
season_number: i.season_number,
episode_number: i.episode_number,
} }
} }
} }
@@ -136,6 +264,8 @@ pub struct ScheduledSlotResponse {
pub end_at: DateTime<Utc>, pub end_at: DateTime<Utc>,
pub item: MediaItemResponse, pub item: MediaItemResponse,
pub source_block_id: Uuid, pub source_block_id: Uuid,
#[serde(default)]
pub block_access_mode: domain::AccessMode,
} }
impl From<domain::ScheduledSlot> for ScheduledSlotResponse { impl From<domain::ScheduledSlot> for ScheduledSlotResponse {
@@ -146,6 +276,26 @@ impl From<domain::ScheduledSlot> for ScheduledSlotResponse {
end_at: s.end_at, end_at: s.end_at,
item: s.item.into(), item: s.item.into(),
source_block_id: s.source_block_id, source_block_id: s.source_block_id,
block_access_mode: domain::AccessMode::default(),
}
}
}
impl ScheduledSlotResponse {
pub fn with_block_access(slot: domain::ScheduledSlot, channel: &domain::Channel) -> Self {
let block_access_mode = channel
.schedule_config
.all_blocks()
.find(|b| b.id == slot.source_block_id)
.map(|b| b.access_mode.clone())
.unwrap_or_default();
Self {
id: slot.id,
start_at: slot.start_at,
end_at: slot.end_at,
item: slot.item.into(),
source_block_id: slot.source_block_id,
block_access_mode,
} }
} }
} }
@@ -158,6 +308,8 @@ pub struct CurrentBroadcastResponse {
/// Seconds elapsed since the start of the current item — use this as the /// Seconds elapsed since the start of the current item — use this as the
/// initial seek position for the player. /// initial seek position for the player.
pub offset_secs: u32, pub offset_secs: u32,
/// Access mode of the block currently playing. The stream is gated by this.
pub block_access_mode: domain::AccessMode,
} }
#[derive(Debug, Serialize)] #[derive(Debug, Serialize)]
@@ -170,6 +322,50 @@ pub struct ScheduleResponse {
pub slots: Vec<ScheduledSlotResponse>, pub slots: Vec<ScheduledSlotResponse>,
} }
// ============================================================================
// Transcode DTOs
// ============================================================================
#[cfg(feature = "local-files")]
#[derive(Debug, Serialize)]
pub struct TranscodeSettingsResponse {
pub cleanup_ttl_hours: u32,
}
#[cfg(feature = "local-files")]
#[derive(Debug, Deserialize)]
pub struct UpdateTranscodeSettingsRequest {
pub cleanup_ttl_hours: u32,
}
#[cfg(feature = "local-files")]
#[derive(Debug, Serialize)]
pub struct TranscodeStatsResponse {
pub cache_size_bytes: u64,
pub item_count: usize,
}
#[derive(Debug, Serialize)]
pub struct ScheduleHistoryEntry {
pub id: Uuid,
pub generation: u32,
pub valid_from: DateTime<Utc>,
pub valid_until: DateTime<Utc>,
pub slot_count: usize,
}
impl From<domain::GeneratedSchedule> for ScheduleHistoryEntry {
fn from(s: domain::GeneratedSchedule) -> Self {
Self {
id: s.id,
generation: s.generation,
valid_from: s.valid_from,
valid_until: s.valid_until,
slot_count: s.slots.len(),
}
}
}
impl From<domain::GeneratedSchedule> for ScheduleResponse { impl From<domain::GeneratedSchedule> for ScheduleResponse {
fn from(s: domain::GeneratedSchedule) -> Self { fn from(s: domain::GeneratedSchedule) -> Self {
Self { Self {

View File

@@ -29,6 +29,22 @@ pub enum ApiError {
#[error("Unauthorized: {0}")] #[error("Unauthorized: {0}")]
Unauthorized(String), Unauthorized(String),
#[error("password_required")]
PasswordRequired,
#[error("auth_required")]
AuthRequired,
#[allow(dead_code)]
#[error("Not found: {0}")]
NotFound(String),
#[error("Not implemented: {0}")]
NotImplemented(String),
#[error("Conflict: {0}")]
Conflict(String),
} }
/// Error response body /// Error response body
@@ -110,6 +126,46 @@ impl IntoResponse for ApiError {
details: Some(msg.clone()), details: Some(msg.clone()),
}, },
), ),
ApiError::PasswordRequired => (
StatusCode::UNAUTHORIZED,
ErrorResponse {
error: "password_required".to_string(),
details: None,
},
),
ApiError::AuthRequired => (
StatusCode::UNAUTHORIZED,
ErrorResponse {
error: "auth_required".to_string(),
details: None,
},
),
ApiError::NotFound(msg) => (
StatusCode::NOT_FOUND,
ErrorResponse {
error: "Not found".to_string(),
details: Some(msg.clone()),
},
),
ApiError::NotImplemented(msg) => (
StatusCode::NOT_IMPLEMENTED,
ErrorResponse {
error: "Not implemented".to_string(),
details: Some(msg.clone()),
},
),
ApiError::Conflict(msg) => (
StatusCode::CONFLICT,
ErrorResponse {
error: "Conflict".to_string(),
details: Some(msg.clone()),
},
),
}; };
(status, Json(error_response)).into_response() (status, Json(error_response)).into_response()
@@ -124,7 +180,17 @@ impl ApiError {
pub fn internal(msg: impl Into<String>) -> Self { pub fn internal(msg: impl Into<String>) -> Self {
Self::Internal(msg.into()) Self::Internal(msg.into())
} }
pub fn not_found(msg: impl Into<String>) -> Self {
Self::NotFound(msg.into())
}
pub fn conflict(msg: impl Into<String>) -> Self {
Self::Conflict(msg.into())
}
pub fn not_implemented(msg: impl Into<String>) -> Self {
Self::NotImplemented(msg.into())
}
} }
/// Result type alias for API handlers
pub type ApiResult<T> = Result<T, ApiError>;

View File

@@ -0,0 +1,12 @@
//! Event bus type alias.
//!
//! The broadcast sender is kept in `AppState` and cloned into each route handler.
//! Receivers are created with `event_tx.subscribe()`.
use tokio::sync::broadcast;
use domain::DomainEvent;
/// A sender half of the domain-event broadcast channel.
///
/// Clone to share across tasks. Use `event_tx.subscribe()` to create receivers.
pub type EventBus = broadcast::Sender<DomainEvent>;

View File

@@ -38,7 +38,62 @@ impl FromRequestParts<AppState> for CurrentUser {
} }
} }
/// Authenticate using JWT Bearer token /// Optional current user — returns None instead of error when auth is missing/invalid.
///
/// Checks `Authorization: Bearer <token>` first; falls back to `?token=<jwt>` query param
/// so IPTV clients and direct stream links work without custom headers.
pub struct OptionalCurrentUser(pub Option<User>);
impl FromRequestParts<AppState> for OptionalCurrentUser {
type Rejection = ApiError;
async fn from_request_parts(
parts: &mut Parts,
state: &AppState,
) -> Result<Self, Self::Rejection> {
#[cfg(feature = "auth-jwt")]
{
// Try Authorization header first
if let Ok(user) = try_jwt_auth(parts, state).await {
return Ok(OptionalCurrentUser(Some(user)));
}
// Fall back to ?token= query param
let query_token = parts.uri.query().and_then(|q| {
q.split('&')
.find(|seg| seg.starts_with("token="))
.map(|seg| seg[6..].to_owned())
});
if let Some(token) = query_token {
let user = validate_jwt_token(&token, state).await.ok();
return Ok(OptionalCurrentUser(user));
}
Ok(OptionalCurrentUser(None))
}
#[cfg(not(feature = "auth-jwt"))]
{
let _ = (parts, state);
Ok(OptionalCurrentUser(None))
}
}
}
/// Extracted admin user — returns 403 if user is not an admin.
pub struct AdminUser(pub User);
impl FromRequestParts<AppState> for AdminUser {
type Rejection = ApiError;
async fn from_request_parts(parts: &mut Parts, state: &AppState) -> Result<Self, Self::Rejection> {
let CurrentUser(user) = CurrentUser::from_request_parts(parts, state).await?;
if !user.is_admin {
return Err(ApiError::Forbidden("Admin access required".to_string()));
}
Ok(AdminUser(user))
}
}
/// Authenticate using JWT Bearer token from the `Authorization` header.
#[cfg(feature = "auth-jwt")] #[cfg(feature = "auth-jwt")]
async fn try_jwt_auth(parts: &mut Parts, state: &AppState) -> Result<User, ApiError> { async fn try_jwt_auth(parts: &mut Parts, state: &AppState) -> Result<User, ApiError> {
use axum::http::header::AUTHORIZATION; use axum::http::header::AUTHORIZATION;
@@ -56,12 +111,18 @@ async fn try_jwt_auth(parts: &mut Parts, state: &AppState) -> Result<User, ApiEr
ApiError::Unauthorized("Authorization header must use Bearer scheme".to_string()) ApiError::Unauthorized("Authorization header must use Bearer scheme".to_string())
})?; })?;
validate_jwt_token(token, state).await
}
/// Validate a raw JWT string and return the corresponding `User`.
#[cfg(feature = "auth-jwt")]
pub(crate) async fn validate_jwt_token(token: &str, state: &AppState) -> Result<User, ApiError> {
let validator = state let validator = state
.jwt_validator .jwt_validator
.as_ref() .as_ref()
.ok_or_else(|| ApiError::Internal("JWT validator not configured".to_string()))?; .ok_or_else(|| ApiError::Internal("JWT validator not configured".to_string()))?;
let claims = validator.validate_token(token).map_err(|e| { let claims = validator.validate_access_token(token).map_err(|e| {
tracing::debug!("JWT validation failed: {:?}", e); tracing::debug!("JWT validation failed: {:?}", e);
match e { match e {
infra::auth::jwt::JwtError::Expired => { infra::auth::jwt::JwtError::Expired => {

View File

@@ -0,0 +1,64 @@
//! Background library sync task.
//! Fires 10 seconds after startup, then every N hours (read from app_settings).
use std::sync::Arc;
use std::time::Duration;
use domain::IProviderRegistry;
const STARTUP_DELAY_SECS: u64 = 10;
const DEFAULT_INTERVAL_HOURS: u64 = 6;
pub async fn run_library_sync(
sync_adapter: Arc<dyn domain::LibrarySyncAdapter>,
registry: Arc<tokio::sync::RwLock<Arc<infra::ProviderRegistry>>>,
app_settings_repo: Arc<dyn domain::IAppSettingsRepository>,
) {
tokio::time::sleep(Duration::from_secs(STARTUP_DELAY_SECS)).await;
loop {
tick(&sync_adapter, &registry).await;
let interval_hours = load_interval_hours(&app_settings_repo).await;
tokio::time::sleep(Duration::from_secs(interval_hours * 3600)).await;
}
}
async fn load_interval_hours(repo: &Arc<dyn domain::IAppSettingsRepository>) -> u64 {
repo.get("library_sync_interval_hours")
.await
.ok()
.flatten()
.and_then(|v| v.parse::<u64>().ok())
.unwrap_or(DEFAULT_INTERVAL_HOURS)
}
async fn tick(
sync_adapter: &Arc<dyn domain::LibrarySyncAdapter>,
registry: &Arc<tokio::sync::RwLock<Arc<infra::ProviderRegistry>>>,
) {
let reg = registry.read().await;
let provider_ids: Vec<String> = reg.provider_ids();
drop(reg);
for provider_id in provider_ids {
let reg = registry.read().await;
let provider = match reg.get_provider(&provider_id) {
Some(p) => p,
None => continue,
};
drop(reg);
tracing::info!("library-sync: syncing provider '{}'", provider_id);
let result = sync_adapter.sync_provider(provider.as_ref(), &provider_id).await;
if let Some(ref err) = result.error {
tracing::warn!("library-sync: provider '{}' failed: {}", provider_id, err);
} else {
tracing::info!(
"library-sync: provider '{}' done — {} items in {}ms",
provider_id, result.items_found, result.duration_ms
);
}
}
}

View File

@@ -0,0 +1,72 @@
//! Custom tracing layer that captures log events and broadcasts them to SSE clients.
use chrono::Utc;
use serde::Serialize;
use std::collections::VecDeque;
use std::sync::{Arc, Mutex};
use tokio::sync::broadcast;
use tracing::Event;
use tracing_subscriber::Layer;
/// A single structured log line sent to SSE clients.
#[derive(Debug, Clone, Serialize)]
pub struct LogLine {
pub level: String,
pub target: String,
pub message: String,
pub timestamp: String,
}
/// Tracing layer that fans log events out to a broadcast channel + ring buffer.
pub struct AppLogLayer {
tx: broadcast::Sender<LogLine>,
history: Arc<Mutex<VecDeque<LogLine>>>,
}
impl AppLogLayer {
pub fn new(
tx: broadcast::Sender<LogLine>,
history: Arc<Mutex<VecDeque<LogLine>>>,
) -> Self {
Self { tx, history }
}
}
impl<S: tracing::Subscriber> Layer<S> for AppLogLayer {
fn on_event(&self, event: &Event<'_>, _ctx: tracing_subscriber::layer::Context<'_, S>) {
let mut visitor = MsgVisitor(String::new());
event.record(&mut visitor);
let line = LogLine {
level: event.metadata().level().to_string(),
target: event.metadata().target().to_string(),
message: visitor.0,
timestamp: Utc::now().to_rfc3339(),
};
if let Ok(mut history) = self.history.lock() {
if history.len() >= 200 {
history.pop_front();
}
history.push_back(line.clone());
}
let _ = self.tx.send(line);
}
}
struct MsgVisitor(String);
impl tracing::field::Visit for MsgVisitor {
fn record_str(&mut self, field: &tracing::field::Field, value: &str) {
if field.name() == "message" {
self.0 = value.to_owned();
}
}
fn record_debug(&mut self, field: &tracing::field::Field, value: &dyn std::fmt::Debug) {
if field.name() == "message" {
self.0 = format!("{value:?}");
}
}
}

View File

@@ -2,167 +2,131 @@
//! //!
//! Configures and starts the HTTP server with JWT-based authentication. //! Configures and starts the HTTP server with JWT-based authentication.
use std::net::SocketAddr;
use std::time::Duration as StdDuration;
use axum::Router;
use std::sync::Arc; use std::sync::Arc;
use domain::{ChannelService, IMediaProvider, ScheduleEngineService, UserService};
use infra::factory::{build_channel_repository, build_schedule_repository, build_user_repository};
use infra::run_migrations;
use k_core::http::server::{ServerConfig, apply_standard_middleware};
use k_core::logging;
use tokio::net::TcpListener;
use tracing::info; use tracing::info;
use domain::{ChannelService, IProviderRegistry, ScheduleEngineService, UserService};
use infra::factory::{build_activity_log_repository, build_app_settings_repository, build_channel_repository, build_library_repository, build_provider_config_repository, build_schedule_repository, build_user_repository};
#[cfg(feature = "local-files")]
use infra::factory::build_transcode_settings_repository;
mod config; mod config;
mod database;
mod library_scheduler;
mod provider_registry;
mod dto; mod dto;
mod error; mod error;
mod events;
mod extractors; mod extractors;
mod log_layer;
mod poller;
mod routes; mod routes;
mod scheduler;
mod server;
mod startup;
mod state; mod state;
mod telemetry;
mod webhook;
use crate::config::Config; use crate::config::Config;
use crate::state::AppState; use crate::state::AppState;
#[tokio::main] #[tokio::main]
async fn main() -> anyhow::Result<()> { async fn main() -> anyhow::Result<()> {
logging::init("api"); let handles = telemetry::init_tracing();
let config = Config::from_env(); let config = Config::from_env();
info!("Starting server on {}:{}", config.host, config.port); info!("Starting server on {}:{}", config.host, config.port);
// Setup database // Setup database
tracing::info!("Connecting to database: {}", config.database_url); let db_pool = database::init_database(&config).await?;
#[cfg(all(feature = "sqlite", not(feature = "postgres")))]
let db_type = k_core::db::DbType::Sqlite;
#[cfg(all(feature = "postgres", not(feature = "sqlite")))]
let db_type = k_core::db::DbType::Postgres;
// Both features enabled: fall back to URL inspection at runtime
#[cfg(all(feature = "sqlite", feature = "postgres"))]
let db_type = if config.database_url.starts_with("postgres") {
k_core::db::DbType::Postgres
} else {
k_core::db::DbType::Sqlite
};
let db_config = k_core::db::DatabaseConfig {
db_type,
url: config.database_url.clone(),
max_connections: config.db_max_connections,
min_connections: config.db_min_connections,
acquire_timeout: StdDuration::from_secs(30),
};
let db_pool = k_core::db::connect(&db_config).await?;
run_migrations(&db_pool).await?;
let user_repo = build_user_repository(&db_pool).await?; let user_repo = build_user_repository(&db_pool).await?;
let channel_repo = build_channel_repository(&db_pool).await?; let channel_repo = build_channel_repository(&db_pool).await?;
let schedule_repo = build_schedule_repository(&db_pool).await?; let schedule_repo = build_schedule_repository(&db_pool).await?;
let activity_log_repo = build_activity_log_repository(&db_pool).await?;
let user_service = UserService::new(user_repo); let user_service = UserService::new(user_repo);
let channel_service = ChannelService::new(channel_repo.clone()); let channel_service = ChannelService::new(channel_repo.clone());
// Build media provider — Jellyfin if configured, no-op fallback otherwise. // Build provider registry — all configured providers are registered simultaneously.
let media_provider: Arc<dyn IMediaProvider> = build_media_provider(&config); let provider_config_repo = build_provider_config_repository(&db_pool).await?;
let schedule_engine = ScheduleEngineService::new(media_provider, channel_repo, schedule_repo); let bundle = provider_registry::build_provider_registry(
&config, &db_pool, &provider_config_repo,
).await?;
let state = AppState::new(user_service, channel_service, schedule_engine, config.clone()).await?; let registry_arc = bundle.registry;
let provider_registry: Arc<tokio::sync::RwLock<Arc<infra::ProviderRegistry>>> =
Arc::new(tokio::sync::RwLock::new(Arc::clone(&registry_arc)));
let server_config = ServerConfig { let (event_tx, event_rx) = tokio::sync::broadcast::channel::<domain::DomainEvent>(64);
cors_origins: config.cors_allowed_origins.clone(),
};
let app = Router::new() let bg_channel_repo = channel_repo.clone();
.nest("/api/v1", routes::api_v1_router()) let webhook_channel_repo = channel_repo.clone();
.with_state(state); tokio::spawn(webhook::run_webhook_consumer(
event_rx,
webhook_channel_repo,
reqwest::Client::new(),
));
let app = apply_standard_middleware(app, &server_config); let schedule_engine = ScheduleEngineService::new(
Arc::clone(&registry_arc) as Arc<dyn IProviderRegistry>,
let addr: SocketAddr = format!("{}:{}", config.host, config.port).parse()?; channel_repo,
let listener = TcpListener::bind(addr).await?; schedule_repo,
tracing::info!("🚀 API server running at http://{}", addr);
tracing::info!("🔒 Authentication mode: JWT (Bearer token)");
#[cfg(feature = "auth-jwt")]
tracing::info!(" ✓ JWT auth enabled");
#[cfg(feature = "auth-oidc")]
tracing::info!(" ✓ OIDC integration enabled (stateless cookie state)");
tracing::info!("📝 API endpoints available at /api/v1/...");
axum::serve(listener, app).await?;
Ok(())
}
/// Build the media provider from config.
/// Falls back to a no-op provider that returns an informative error when
/// Jellyfin env vars are not set, so other API features still work in dev.
fn build_media_provider(config: &Config) -> Arc<dyn IMediaProvider> {
#[cfg(feature = "jellyfin")]
if let (Some(base_url), Some(api_key), Some(user_id)) = (
&config.jellyfin_base_url,
&config.jellyfin_api_key,
&config.jellyfin_user_id,
) {
tracing::info!("Media provider: Jellyfin at {}", base_url);
return Arc::new(infra::JellyfinMediaProvider::new(infra::JellyfinConfig {
base_url: base_url.clone(),
api_key: api_key.clone(),
user_id: user_id.clone(),
}));
}
tracing::warn!(
"No media provider configured. Set JELLYFIN_BASE_URL, JELLYFIN_API_KEY, \
and JELLYFIN_USER_ID to enable schedule generation."
); );
Arc::new(NoopMediaProvider)
#[cfg(feature = "local-files")]
let transcode_settings_repo = build_transcode_settings_repository(&db_pool).await.ok();
let library_repo = build_library_repository(&db_pool).await?;
let app_settings_repo = build_app_settings_repository(&db_pool).await?;
let library_sync_adapter: Arc<dyn domain::LibrarySyncAdapter> =
Arc::new(infra::FullSyncAdapter::new(Arc::clone(&library_repo)));
#[allow(unused_mut)]
let mut state = AppState::new(
user_service,
channel_service,
schedule_engine,
provider_registry,
provider_config_repo,
config.clone(),
event_tx.clone(),
handles.log_tx,
handles.log_history,
activity_log_repo,
db_pool,
library_repo,
library_sync_adapter,
app_settings_repo,
#[cfg(feature = "local-files")]
transcode_settings_repo,
)
.await?;
#[cfg(feature = "local-files")]
if !bundle.local_index.is_empty() {
*state.local_index.write().await = bundle.local_index;
}
#[cfg(feature = "local-files")]
if let Some(tm) = bundle.transcode_manager {
*state.transcode_manager.write().await = Some(tm);
} }
/// Stand-in provider used when no real media source is configured. startup::spawn_background_tasks(
/// Returns a descriptive error for every call so schedule endpoints fail Arc::clone(&state.schedule_engine),
/// gracefully rather than panicking at startup. bg_channel_repo,
struct NoopMediaProvider; event_tx,
);
#[async_trait::async_trait] tokio::spawn(library_scheduler::run_library_sync(
impl IMediaProvider for NoopMediaProvider { Arc::clone(&state.library_sync_adapter),
async fn fetch_items( Arc::clone(&state.provider_registry),
&self, Arc::clone(&state.app_settings_repo),
_: &domain::MediaFilter, ));
) -> domain::DomainResult<Vec<domain::MediaItem>> {
Err(domain::DomainError::InfrastructureError(
"No media provider configured. Set JELLYFIN_BASE_URL, JELLYFIN_API_KEY, \
and JELLYFIN_USER_ID."
.into(),
))
}
async fn fetch_by_id( server::build_and_serve(state, &config).await
&self,
_: &domain::MediaItemId,
) -> domain::DomainResult<Option<domain::MediaItem>> {
Err(domain::DomainError::InfrastructureError(
"No media provider configured.".into(),
))
}
async fn get_stream_url(
&self,
_: &domain::MediaItemId,
) -> domain::DomainResult<String> {
Err(domain::DomainError::InfrastructureError(
"No media provider configured.".into(),
))
}
} }

View File

@@ -0,0 +1,488 @@
//! BroadcastPoller background task.
//!
//! Polls each channel that has a webhook_url configured. On each tick (every 1s)
//! it checks which channels are due for a poll (elapsed >= webhook_poll_interval_secs)
//! and emits BroadcastTransition or NoSignal events when the current slot changes.
use std::collections::HashMap;
use std::sync::Arc;
use std::time::{Duration, Instant};
use chrono::Utc;
use tokio::sync::broadcast;
use tracing::error;
use uuid::Uuid;
use domain::{ChannelRepository, DomainError, DomainEvent, ScheduleEngineService};
/// Per-channel poller state.
#[derive(Debug)]
pub struct ChannelPollState {
/// ID of the last slot we saw as current (None = no signal).
last_slot_id: Option<Uuid>,
/// Wall-clock instant of the last poll for this channel.
last_checked: Instant,
}
/// Polls channels with webhook URLs and emits broadcast transition events.
pub async fn run_broadcast_poller(
schedule_engine: Arc<ScheduleEngineService>,
channel_repo: Arc<dyn ChannelRepository>,
event_tx: broadcast::Sender<DomainEvent>,
) {
let mut state: HashMap<Uuid, ChannelPollState> = HashMap::new();
loop {
tokio::time::sleep(Duration::from_secs(1)).await;
poll_tick(&schedule_engine, &channel_repo, &event_tx, &mut state).await;
}
}
pub(crate) async fn poll_tick(
schedule_engine: &Arc<ScheduleEngineService>,
channel_repo: &Arc<dyn ChannelRepository>,
event_tx: &broadcast::Sender<DomainEvent>,
state: &mut HashMap<Uuid, ChannelPollState>,
) {
let channels = match channel_repo.find_all().await {
Ok(c) => c,
Err(e) => {
error!("broadcast poller: failed to load channels: {}", e);
return;
}
};
// Remove deleted channels from state
let live_ids: std::collections::HashSet<Uuid> = channels.iter().map(|c| c.id).collect();
state.retain(|id, _| live_ids.contains(id));
let now = Utc::now();
for channel in channels {
// Only poll channels with a configured webhook URL
if channel.webhook_url.is_none() {
state.remove(&channel.id);
continue;
}
let poll_interval = Duration::from_secs(channel.webhook_poll_interval_secs as u64);
let entry = state.entry(channel.id).or_insert_with(|| ChannelPollState {
last_slot_id: None,
last_checked: Instant::now() - poll_interval, // trigger immediately on first encounter
});
if entry.last_checked.elapsed() < poll_interval {
continue; // Not yet due for a poll
}
entry.last_checked = Instant::now();
// Find the current slot
let current_slot_id = match schedule_engine.get_active_schedule(channel.id, now).await {
Ok(Some(schedule)) => schedule
.slots
.iter()
.find(|s| s.start_at <= now && now < s.end_at)
.map(|s| s.id),
Ok(None) => None,
Err(DomainError::NoActiveSchedule(_)) => None,
Err(DomainError::ChannelNotFound(_)) => {
state.remove(&channel.id);
continue;
}
Err(e) => {
error!(
"broadcast poller: error checking schedule for channel {}: {}",
channel.id, e
);
continue;
}
};
if current_slot_id == entry.last_slot_id {
continue;
}
// State changed — emit appropriate event
match &current_slot_id {
Some(slot_id) => {
if let Ok(Some(schedule)) =
schedule_engine.get_active_schedule(channel.id, now).await
{
if let Some(slot) = schedule.slots.iter().find(|s| s.id == *slot_id).cloned() {
let _ = event_tx.send(DomainEvent::BroadcastTransition {
channel_id: channel.id,
slot,
});
}
}
}
None => {
let _ = event_tx.send(DomainEvent::NoSignal {
channel_id: channel.id,
});
}
}
entry.last_slot_id = current_slot_id;
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::collections::HashMap;
use std::sync::{Arc, Mutex};
use async_trait::async_trait;
use chrono::{DateTime, Duration, Utc};
use domain::value_objects::{ChannelId, ContentType, UserId};
use domain::{
BlockId, Channel, ChannelRepository, Collection, DomainResult, GeneratedSchedule,
IProviderRegistry, MediaFilter, MediaItem, MediaItemId, PlaybackRecord, ProviderCapabilities,
ScheduleEngineService, ScheduleRepository, SeriesSummary, StreamQuality,
};
use tokio::sync::broadcast;
use uuid::Uuid;
// ── Mocks ─────────────────────────────────────────────────────────────────
struct MockChannelRepo {
channels: Vec<Channel>,
}
#[async_trait]
impl ChannelRepository for MockChannelRepo {
async fn find_by_id(&self, id: ChannelId) -> DomainResult<Option<Channel>> {
Ok(self.channels.iter().find(|c| c.id == id).cloned())
}
async fn find_by_owner(&self, _owner_id: UserId) -> DomainResult<Vec<Channel>> {
unimplemented!()
}
async fn find_all(&self) -> DomainResult<Vec<Channel>> {
Ok(self.channels.clone())
}
async fn find_auto_schedule_enabled(&self) -> DomainResult<Vec<Channel>> {
unimplemented!()
}
async fn save(&self, _channel: &Channel) -> DomainResult<()> {
unimplemented!()
}
async fn delete(&self, _id: ChannelId) -> DomainResult<()> {
unimplemented!()
}
async fn save_config_snapshot(&self, _channel_id: ChannelId, _config: &domain::ScheduleConfig, _label: Option<String>) -> DomainResult<domain::ChannelConfigSnapshot> { unimplemented!() }
async fn list_config_snapshots(&self, _channel_id: ChannelId) -> DomainResult<Vec<domain::ChannelConfigSnapshot>> { unimplemented!() }
async fn get_config_snapshot(&self, _channel_id: ChannelId, _snapshot_id: Uuid) -> DomainResult<Option<domain::ChannelConfigSnapshot>> { unimplemented!() }
async fn patch_config_snapshot_label(&self, _channel_id: ChannelId, _snapshot_id: Uuid, _label: Option<String>) -> DomainResult<Option<domain::ChannelConfigSnapshot>> { unimplemented!() }
}
struct MockScheduleRepo {
active: Option<GeneratedSchedule>,
saved: Arc<Mutex<Vec<GeneratedSchedule>>>,
}
#[async_trait]
impl ScheduleRepository for MockScheduleRepo {
async fn find_active(
&self,
_channel_id: ChannelId,
_at: DateTime<Utc>,
) -> DomainResult<Option<GeneratedSchedule>> {
Ok(self.active.clone())
}
async fn find_latest(
&self,
_channel_id: ChannelId,
) -> DomainResult<Option<GeneratedSchedule>> {
Ok(self.active.clone())
}
async fn save(&self, schedule: &GeneratedSchedule) -> DomainResult<()> {
self.saved.lock().unwrap().push(schedule.clone());
Ok(())
}
async fn find_playback_history(
&self,
_channel_id: ChannelId,
) -> DomainResult<Vec<PlaybackRecord>> {
Ok(vec![])
}
async fn save_playback_record(&self, _record: &PlaybackRecord) -> DomainResult<()> {
Ok(())
}
async fn find_last_slot_per_block(
&self,
_channel_id: ChannelId,
) -> DomainResult<HashMap<BlockId, MediaItemId>> {
Ok(HashMap::new())
}
async fn list_schedule_history(&self, _channel_id: ChannelId) -> DomainResult<Vec<GeneratedSchedule>> { unimplemented!() }
async fn get_schedule_by_id(&self, _channel_id: ChannelId, _schedule_id: Uuid) -> DomainResult<Option<GeneratedSchedule>> { unimplemented!() }
async fn delete_schedules_after(&self, _channel_id: ChannelId, _target_generation: u32) -> DomainResult<()> { unimplemented!() }
}
struct MockRegistry;
#[async_trait]
impl IProviderRegistry for MockRegistry {
async fn fetch_items(
&self,
_provider_id: &str,
_filter: &MediaFilter,
) -> DomainResult<Vec<MediaItem>> {
Ok(vec![])
}
async fn fetch_by_id(&self, _item_id: &MediaItemId) -> DomainResult<Option<MediaItem>> {
Ok(None)
}
async fn get_stream_url(
&self,
_item_id: &MediaItemId,
_quality: &StreamQuality,
) -> DomainResult<String> {
unimplemented!()
}
fn provider_ids(&self) -> Vec<String> {
vec![]
}
fn primary_id(&self) -> &str {
""
}
fn capabilities(&self, _provider_id: &str) -> Option<ProviderCapabilities> {
None
}
async fn list_collections(&self, _provider_id: &str) -> DomainResult<Vec<Collection>> {
unimplemented!()
}
async fn list_series(
&self,
_provider_id: &str,
_collection_id: Option<&str>,
) -> DomainResult<Vec<SeriesSummary>> {
unimplemented!()
}
async fn list_genres(
&self,
_provider_id: &str,
_content_type: Option<&ContentType>,
) -> DomainResult<Vec<String>> {
unimplemented!()
}
}
// ── Helpers ───────────────────────────────────────────────────────────────
fn make_channel_with_webhook(channel_id: Uuid) -> Channel {
let mut ch = Channel::new(Uuid::new_v4(), "Test", "UTC");
ch.id = channel_id;
ch.webhook_url = Some("http://example.com/hook".to_string());
ch.webhook_poll_interval_secs = 0; // always due
ch
}
fn make_slot(_channel_id: Uuid, slot_id: Uuid) -> domain::ScheduledSlot {
use domain::entities::MediaItem;
let now = Utc::now();
domain::ScheduledSlot {
id: slot_id,
start_at: now - Duration::minutes(1),
end_at: now + Duration::minutes(29),
item: MediaItem {
id: MediaItemId::new("test-item"),
title: "Test Movie".to_string(),
content_type: ContentType::Movie,
duration_secs: 1800,
description: None,
genres: vec![],
year: None,
tags: vec![],
series_name: None,
season_number: None,
episode_number: None,
thumbnail_url: None,
collection_id: None,
},
source_block_id: Uuid::new_v4(),
}
}
fn make_schedule(channel_id: Uuid, slots: Vec<domain::ScheduledSlot>) -> GeneratedSchedule {
let now = Utc::now();
GeneratedSchedule {
id: Uuid::new_v4(),
channel_id,
valid_from: now - Duration::hours(1),
valid_until: now + Duration::hours(47),
generation: 1,
slots,
}
}
fn make_engine(
channel_repo: Arc<dyn ChannelRepository>,
schedule_repo: Arc<dyn ScheduleRepository>,
) -> Arc<ScheduleEngineService> {
Arc::new(ScheduleEngineService::new(
Arc::new(MockRegistry),
channel_repo,
schedule_repo,
))
}
// ── Tests ─────────────────────────────────────────────────────────────────
#[tokio::test]
async fn test_broadcast_transition_emitted_on_slot_change() {
let channel_id = Uuid::new_v4();
let slot_id = Uuid::new_v4();
let ch = make_channel_with_webhook(channel_id);
let slot = make_slot(channel_id, slot_id);
let schedule = make_schedule(channel_id, vec![slot]);
let channel_repo: Arc<dyn ChannelRepository> =
Arc::new(MockChannelRepo { channels: vec![ch] });
let schedule_repo: Arc<dyn ScheduleRepository> = Arc::new(MockScheduleRepo {
active: Some(schedule),
saved: Arc::new(Mutex::new(vec![])),
});
let engine = make_engine(channel_repo.clone(), schedule_repo);
let (event_tx, mut event_rx) = broadcast::channel(8);
let mut state: HashMap<Uuid, ChannelPollState> = HashMap::new();
poll_tick(&engine, &channel_repo, &event_tx, &mut state).await;
let event = event_rx.try_recv().expect("expected an event");
match event {
DomainEvent::BroadcastTransition {
channel_id: cid,
slot: s,
} => {
assert_eq!(cid, channel_id);
assert_eq!(s.id, slot_id);
}
_other => panic!("expected BroadcastTransition, got something else"),
}
}
#[tokio::test]
async fn test_no_event_when_slot_unchanged() {
let channel_id = Uuid::new_v4();
let slot_id = Uuid::new_v4();
let ch = make_channel_with_webhook(channel_id);
let slot = make_slot(channel_id, slot_id);
let schedule = make_schedule(channel_id, vec![slot]);
let channel_repo: Arc<dyn ChannelRepository> =
Arc::new(MockChannelRepo { channels: vec![ch] });
let schedule_repo: Arc<dyn ScheduleRepository> = Arc::new(MockScheduleRepo {
active: Some(schedule),
saved: Arc::new(Mutex::new(vec![])),
});
let engine = make_engine(channel_repo.clone(), schedule_repo);
let (event_tx, mut event_rx) = broadcast::channel(8);
let mut state: HashMap<Uuid, ChannelPollState> = HashMap::new();
// First tick — emits BroadcastTransition
poll_tick(&engine, &channel_repo, &event_tx, &mut state).await;
let _ = event_rx.try_recv();
// Second tick — same slot, no event
poll_tick(&engine, &channel_repo, &event_tx, &mut state).await;
assert!(
event_rx.try_recv().is_err(),
"no event expected when slot unchanged"
);
}
#[tokio::test]
async fn test_no_signal_emitted_when_slot_goes_to_none() {
let channel_id = Uuid::new_v4();
let slot_id = Uuid::new_v4();
let ch = make_channel_with_webhook(channel_id);
let slot = make_slot(channel_id, slot_id);
let schedule_with_slot = make_schedule(channel_id, vec![slot]);
// Repo that starts with a slot then returns empty schedule
use std::sync::atomic::{AtomicBool, Ordering};
struct SwitchingScheduleRepo {
first: GeneratedSchedule,
second: GeneratedSchedule,
called: AtomicBool,
}
#[async_trait]
impl ScheduleRepository for SwitchingScheduleRepo {
async fn find_active(
&self,
_channel_id: ChannelId,
_at: DateTime<Utc>,
) -> DomainResult<Option<GeneratedSchedule>> {
if self.called.swap(true, Ordering::SeqCst) {
Ok(Some(self.second.clone()))
} else {
Ok(Some(self.first.clone()))
}
}
async fn find_latest(&self, _: ChannelId) -> DomainResult<Option<GeneratedSchedule>> {
Ok(None)
}
async fn save(&self, _: &GeneratedSchedule) -> DomainResult<()> {
Ok(())
}
async fn find_playback_history(
&self,
_: ChannelId,
) -> DomainResult<Vec<PlaybackRecord>> {
Ok(vec![])
}
async fn save_playback_record(&self, _: &PlaybackRecord) -> DomainResult<()> {
Ok(())
}
async fn find_last_slot_per_block(
&self,
_: ChannelId,
) -> DomainResult<HashMap<BlockId, MediaItemId>> {
Ok(HashMap::new())
}
async fn list_schedule_history(&self, _: ChannelId) -> DomainResult<Vec<GeneratedSchedule>> { unimplemented!() }
async fn get_schedule_by_id(&self, _: ChannelId, _: Uuid) -> DomainResult<Option<GeneratedSchedule>> { unimplemented!() }
async fn delete_schedules_after(&self, _: ChannelId, _: u32) -> DomainResult<()> { unimplemented!() }
}
let now = Utc::now();
let empty_schedule = GeneratedSchedule {
id: Uuid::new_v4(),
channel_id,
valid_from: now - Duration::hours(1),
valid_until: now + Duration::hours(47),
generation: 2,
slots: vec![], // no current slot
};
let channel_repo: Arc<dyn ChannelRepository> =
Arc::new(MockChannelRepo { channels: vec![ch] });
let schedule_repo: Arc<dyn ScheduleRepository> = Arc::new(SwitchingScheduleRepo {
first: schedule_with_slot,
second: empty_schedule,
called: AtomicBool::new(false),
});
let engine = make_engine(channel_repo.clone(), schedule_repo);
let (event_tx, mut event_rx) = broadcast::channel(8);
let mut state: HashMap<Uuid, ChannelPollState> = HashMap::new();
// First tick — emits BroadcastTransition (slot present)
poll_tick(&engine, &channel_repo, &event_tx, &mut state).await;
let _ = event_rx.try_recv();
// Second tick — schedule has no current slot, emits NoSignal
poll_tick(&engine, &channel_repo, &event_tx, &mut state).await;
let event = event_rx.try_recv().expect("expected NoSignal event");
match event {
DomainEvent::NoSignal { channel_id: cid } => assert_eq!(cid, channel_id),
_ => panic!("expected NoSignal"),
}
}
}

View File

@@ -0,0 +1,209 @@
use std::sync::Arc;
use domain::{
DomainError, IMediaProvider, ProviderCapabilities, ProviderConfigRepository,
StreamingProtocol, StreamQuality,
};
use k_core::db::DatabasePool;
use crate::config::{Config, ConfigSource};
#[cfg(feature = "local-files")]
use infra::factory::build_transcode_settings_repository;
pub struct ProviderBundle {
pub registry: Arc<infra::ProviderRegistry>,
#[cfg(feature = "local-files")]
pub local_index: std::collections::HashMap<String, Arc<infra::LocalIndex>>,
#[cfg(feature = "local-files")]
pub transcode_manager: Option<Arc<infra::TranscodeManager>>,
}
pub async fn build_provider_registry(
config: &Config,
#[cfg_attr(not(feature = "local-files"), allow(unused_variables))]
db_pool: &Arc<DatabasePool>,
provider_config_repo: &Arc<dyn ProviderConfigRepository>,
) -> anyhow::Result<ProviderBundle> {
#[cfg(feature = "local-files")]
let mut local_index: std::collections::HashMap<String, Arc<infra::LocalIndex>> = std::collections::HashMap::new();
#[cfg(feature = "local-files")]
let mut transcode_manager: Option<Arc<infra::TranscodeManager>> = None;
let mut registry = infra::ProviderRegistry::new();
if config.config_source == ConfigSource::Db {
tracing::info!("CONFIG_SOURCE=db: loading provider configs from database");
let rows = provider_config_repo.get_all().await?;
for row in &rows {
if !row.enabled { continue; }
match row.provider_type.as_str() {
#[cfg(feature = "jellyfin")]
"jellyfin" => {
if let Ok(cfg) = serde_json::from_str::<infra::JellyfinConfig>(&row.config_json) {
tracing::info!("Loading Jellyfin provider [{}] from DB config", row.id);
registry.register(&row.id, Arc::new(infra::JellyfinMediaProvider::new(cfg)));
}
}
#[cfg(feature = "local-files")]
"local_files" => {
if let Ok(cfg_map) = serde_json::from_str::<std::collections::HashMap<String, String>>(&row.config_json)
&& let Some(files_dir) = cfg_map.get("files_dir")
{
let transcode_dir = cfg_map.get("transcode_dir")
.filter(|s| !s.is_empty())
.map(std::path::PathBuf::from);
let cleanup_ttl_hours: u32 = cfg_map.get("cleanup_ttl_hours")
.and_then(|s| s.parse().ok())
.unwrap_or(24);
tracing::info!("Loading local-files provider [{}] from DB config at {:?}", row.id, files_dir);
match infra::factory::build_local_files_bundle(
db_pool,
std::path::PathBuf::from(files_dir),
transcode_dir,
cleanup_ttl_hours,
config.base_url.clone(),
&row.id,
).await {
Ok(bundle) => {
let scan_idx = Arc::clone(&bundle.local_index);
tokio::spawn(async move { scan_idx.rescan().await; });
if let Some(ref tm) = bundle.transcode_manager {
tracing::info!("Transcoding enabled for [{}]", row.id);
// Load persisted TTL override from transcode_settings table.
let tm_clone = Arc::clone(tm);
let repo = build_transcode_settings_repository(db_pool).await.ok();
tokio::spawn(async move {
if let Some(r) = repo
&& let Ok(Some(ttl)) = r.load_cleanup_ttl().await
{
tm_clone.set_cleanup_ttl(ttl);
}
});
}
registry.register(&row.id, bundle.provider);
if transcode_manager.is_none() {
transcode_manager = bundle.transcode_manager;
}
local_index.insert(row.id.clone(), bundle.local_index);
}
Err(e) => tracing::warn!("Failed to build local-files provider [{}]: {}", row.id, e),
}
}
}
_ => {}
}
}
} else {
#[cfg(feature = "jellyfin")]
if let (Some(base_url), Some(api_key), Some(user_id)) = (
&config.jellyfin_base_url,
&config.jellyfin_api_key,
&config.jellyfin_user_id,
) {
tracing::info!("Media provider: Jellyfin at {}", base_url);
registry.register("jellyfin", Arc::new(infra::JellyfinMediaProvider::new(infra::JellyfinConfig {
base_url: base_url.clone(),
api_key: api_key.clone(),
user_id: user_id.clone(),
})));
}
#[cfg(feature = "local-files")]
if let Some(dir) = &config.local_files_dir {
tracing::info!("Media provider: local files at {:?}", dir);
match infra::factory::build_local_files_bundle(
db_pool,
dir.clone(),
config.transcode_dir.clone(),
config.transcode_cleanup_ttl_hours,
config.base_url.clone(),
"local",
).await {
Ok(bundle) => {
let scan_idx = Arc::clone(&bundle.local_index);
tokio::spawn(async move { scan_idx.rescan().await; });
if let Some(ref tm) = bundle.transcode_manager {
tracing::info!("Transcoding enabled; cache dir: {:?}", config.transcode_dir);
let tm_clone = Arc::clone(tm);
let repo = build_transcode_settings_repository(db_pool).await.ok();
tokio::spawn(async move {
if let Some(r) = repo
&& let Ok(Some(ttl)) = r.load_cleanup_ttl().await
{
tm_clone.set_cleanup_ttl(ttl);
}
});
}
registry.register("local", bundle.provider);
transcode_manager = bundle.transcode_manager;
local_index.insert("local".to_string(), bundle.local_index);
}
Err(e) => tracing::warn!("local-files requires SQLite; ignoring LOCAL_FILES_DIR: {}", e),
}
}
}
if registry.is_empty() {
tracing::warn!("No media provider configured. Set JELLYFIN_BASE_URL / LOCAL_FILES_DIR.");
registry.register("noop", Arc::new(NoopMediaProvider));
}
Ok(ProviderBundle {
registry: Arc::new(registry),
#[cfg(feature = "local-files")]
local_index,
#[cfg(feature = "local-files")]
transcode_manager,
})
}
/// Stand-in provider used when no real media source is configured.
/// Returns a descriptive error for every call so schedule endpoints fail
/// gracefully rather than panicking at startup.
struct NoopMediaProvider;
#[async_trait::async_trait]
impl IMediaProvider for NoopMediaProvider {
fn capabilities(&self) -> ProviderCapabilities {
ProviderCapabilities {
collections: false,
series: false,
genres: false,
tags: false,
decade: false,
search: false,
streaming_protocol: StreamingProtocol::DirectFile,
rescan: false,
transcode: false,
}
}
async fn fetch_items(
&self,
_: &domain::MediaFilter,
) -> domain::DomainResult<Vec<domain::MediaItem>> {
Err(DomainError::InfrastructureError(
"No media provider configured. Set JELLYFIN_BASE_URL or LOCAL_FILES_DIR.".into(),
))
}
async fn fetch_by_id(
&self,
_: &domain::MediaItemId,
) -> domain::DomainResult<Option<domain::MediaItem>> {
Err(DomainError::InfrastructureError(
"No media provider configured.".into(),
))
}
async fn get_stream_url(
&self,
_: &domain::MediaItemId,
_: &StreamQuality,
) -> domain::DomainResult<String> {
Err(DomainError::InfrastructureError(
"No media provider configured.".into(),
))
}
}

View File

@@ -0,0 +1,95 @@
//! Admin routes: SSE log stream + activity log.
use axum::{
Json,
extract::State,
response::{
IntoResponse,
sse::{Event, KeepAlive, Sse},
},
};
use tokio_stream::{StreamExt, wrappers::BroadcastStream};
use crate::{
dto::ActivityEventResponse,
error::ApiError,
extractors::OptionalCurrentUser,
state::AppState,
};
use axum::Router;
use axum::routing::get;
pub fn router() -> Router<AppState> {
Router::new()
.route("/logs", get(stream_logs))
.route("/activity", get(list_activity))
}
/// Stream server log lines as Server-Sent Events.
///
/// Auth: requires a valid JWT passed as `?token=<jwt>` (EventSource cannot set headers).
/// On connect: flushes the recent history ring buffer, then streams live events.
pub async fn stream_logs(
State(state): State<AppState>,
OptionalCurrentUser(user): OptionalCurrentUser,
) -> Result<impl IntoResponse, ApiError> {
if user.is_none() {
return Err(ApiError::Unauthorized(
"Authentication required for log stream".to_string(),
));
}
// Snapshot history and subscribe before releasing the lock so we don't miss events.
let rx = state.log_tx.subscribe();
let history: Vec<_> = state
.log_history
.lock()
.map(|h| h.iter().cloned().collect())
.unwrap_or_default();
let history_stream = tokio_stream::iter(history).map(|line| {
let data = serde_json::to_string(&line).unwrap_or_default();
Ok::<Event, String>(Event::default().data(data))
});
let live_stream = BroadcastStream::new(rx).filter_map(|result| match result {
Ok(line) => {
let data = serde_json::to_string(&line).unwrap_or_default();
Some(Ok::<Event, String>(Event::default().data(data)))
}
Err(tokio_stream::wrappers::errors::BroadcastStreamRecvError::Lagged(n)) => {
let data = format!(
r#"{{"level":"WARN","target":"sse","message":"[{n} log lines dropped — buffer overrun]","timestamp":""}}"#
);
Some(Ok(Event::default().data(data)))
}
});
let combined = history_stream.chain(live_stream);
Ok(Sse::new(combined).keep_alive(KeepAlive::default()))
}
/// Return recent activity log entries.
///
/// Auth: requires a valid JWT (Authorization: Bearer or ?token=).
pub async fn list_activity(
State(state): State<AppState>,
OptionalCurrentUser(user): OptionalCurrentUser,
) -> Result<impl IntoResponse, ApiError> {
if user.is_none() {
return Err(ApiError::Unauthorized(
"Authentication required".to_string(),
));
}
let events = state
.activity_log_repo
.recent(50)
.await
.map_err(ApiError::from)?;
let response: Vec<ActivityEventResponse> = events.into_iter().map(Into::into).collect();
Ok(Json(response))
}

View File

@@ -0,0 +1,513 @@
//! Admin provider management routes.
//!
//! All routes require an admin user. Allows listing, creating, updating, deleting, and
//! testing media provider configs stored in the DB. Only available when
//! CONFIG_SOURCE=db.
use std::collections::HashMap;
use std::sync::Arc;
use axum::Router;
use axum::extract::{Path, State};
use axum::http::StatusCode;
use axum::response::IntoResponse;
use axum::routing::{get, post, put};
use axum::Json;
use domain::errors::DomainResult;
use domain::ProviderConfigRow;
use serde::{Deserialize, Serialize};
use crate::config::ConfigSource;
use crate::error::ApiError;
use crate::extractors::AdminUser;
use crate::state::AppState;
// ---------------------------------------------------------------------------
// DTOs
// ---------------------------------------------------------------------------
/// Validate that an instance id is a safe slug (alphanumeric + hyphens, 1-40 chars).
fn is_valid_instance_id(id: &str) -> bool {
!id.is_empty()
&& id.len() <= 40
&& id.chars().all(|c| c.is_ascii_alphanumeric() || c == '-')
}
#[derive(Debug, Deserialize)]
pub struct CreateProviderRequest {
pub id: String,
pub provider_type: String,
pub config_json: HashMap<String, String>,
pub enabled: bool,
}
#[derive(Debug, Deserialize)]
pub struct UpdateProviderRequest {
pub config_json: HashMap<String, String>,
pub enabled: bool,
}
#[derive(Debug, Deserialize)]
pub struct TestProviderRequest {
pub provider_type: String,
pub config_json: HashMap<String, String>,
}
#[derive(Debug, Serialize)]
pub struct ProviderConfigResponse {
pub id: String,
pub provider_type: String,
pub config_json: HashMap<String, serde_json::Value>,
pub enabled: bool,
}
#[derive(Debug, Serialize)]
pub struct TestResult {
pub ok: bool,
pub message: String,
}
// ---------------------------------------------------------------------------
// Router
// ---------------------------------------------------------------------------
pub fn router() -> Router<AppState> {
Router::new()
.route("/", get(list_providers).post(create_provider))
.route("/{id}", put(update_provider).delete(delete_provider))
.route("/test", post(test_provider))
}
// ---------------------------------------------------------------------------
// Helpers
// ---------------------------------------------------------------------------
fn mask_config(raw: &str) -> HashMap<String, serde_json::Value> {
let parsed: HashMap<String, serde_json::Value> =
serde_json::from_str(raw).unwrap_or_default();
parsed
.into_iter()
.map(|(k, v)| {
let secret_key = ["key", "password", "secret", "token"]
.iter()
.any(|kw| k.to_lowercase().contains(kw));
let masked = if secret_key {
match &v {
serde_json::Value::String(s) if !s.is_empty() => {
serde_json::Value::String("***".to_string())
}
_ => v,
}
} else {
v
};
(k, masked)
})
.collect()
}
fn conflict_response() -> impl IntoResponse {
(
StatusCode::CONFLICT,
Json(serde_json::json!({
"error": "UI config disabled — set CONFIG_SOURCE=db on the server"
})),
)
}
async fn rebuild_registry(state: &AppState) -> DomainResult<()> {
let rows = state.provider_config_repo.get_all().await?;
let mut new_registry = infra::ProviderRegistry::new();
#[cfg(feature = "local-files")]
let mut new_local_index: std::collections::HashMap<String, Arc<infra::LocalIndex>> =
std::collections::HashMap::new();
#[cfg(feature = "local-files")]
let mut first_transcode_manager: Option<Arc<infra::TranscodeManager>> = None;
for row in &rows {
if !row.enabled {
continue;
}
match row.provider_type.as_str() {
#[cfg(feature = "jellyfin")]
"jellyfin" => {
if let Ok(cfg) =
serde_json::from_str::<infra::JellyfinConfig>(&row.config_json)
{
new_registry.register(
&row.id,
Arc::new(infra::JellyfinMediaProvider::new(cfg)),
);
}
}
#[cfg(feature = "local-files")]
"local_files" => {
let config: std::collections::HashMap<String, String> =
match serde_json::from_str(&row.config_json) {
Ok(c) => c,
Err(_) => continue,
};
let files_dir = match config.get("files_dir") {
Some(d) => std::path::PathBuf::from(d),
None => continue,
};
let transcode_dir = config
.get("transcode_dir")
.filter(|s| !s.is_empty())
.map(std::path::PathBuf::from);
let cleanup_ttl_hours: u32 = config
.get("cleanup_ttl_hours")
.and_then(|s| s.parse().ok())
.unwrap_or(24);
let base_url = state.config.base_url.clone();
match infra::factory::build_local_files_bundle(
&state.db_pool,
files_dir,
transcode_dir,
cleanup_ttl_hours,
base_url,
&row.id,
).await {
Ok(bundle) => {
let scan_idx = Arc::clone(&bundle.local_index);
tokio::spawn(async move { scan_idx.rescan().await; });
new_registry.register(&row.id, bundle.provider);
new_local_index.insert(row.id.clone(), bundle.local_index);
if first_transcode_manager.is_none() {
first_transcode_manager = bundle.transcode_manager;
}
}
Err(e) => {
tracing::warn!("local_files provider [{}] requires SQLite; skipping: {}", row.id, e);
continue;
}
}
}
_ => {}
}
}
if new_registry.is_empty() {
new_registry.register("noop", Arc::new(NoopMediaProvider));
}
*state.provider_registry.write().await = Arc::new(new_registry);
#[cfg(feature = "local-files")]
{
*state.local_index.write().await = new_local_index;
*state.transcode_manager.write().await = first_transcode_manager;
}
Ok(())
}
// ---------------------------------------------------------------------------
// Handlers
// ---------------------------------------------------------------------------
pub async fn list_providers(
State(state): State<AppState>,
AdminUser(_user): AdminUser,
) -> Result<impl IntoResponse, ApiError> {
let rows = state
.provider_config_repo
.get_all()
.await
.map_err(ApiError::from)?;
let response: Vec<ProviderConfigResponse> = rows
.iter()
.map(|row| ProviderConfigResponse {
id: row.id.clone(),
provider_type: row.provider_type.clone(),
config_json: mask_config(&row.config_json),
enabled: row.enabled,
})
.collect();
Ok(Json(response))
}
pub async fn create_provider(
State(state): State<AppState>,
AdminUser(_user): AdminUser,
Json(payload): Json<CreateProviderRequest>,
) -> Result<impl IntoResponse, ApiError> {
if state.config.config_source != ConfigSource::Db {
return Ok(conflict_response().into_response());
}
if !is_valid_instance_id(&payload.id) {
return Err(ApiError::Validation(
"Instance id must be 1-40 alphanumeric+hyphen characters".to_string(),
));
}
let known = matches!(payload.provider_type.as_str(), "jellyfin" | "local_files");
if !known {
return Err(ApiError::Validation(format!(
"Unknown provider type: {}",
payload.provider_type
)));
}
// Check for uniqueness
if state
.provider_config_repo
.get_by_id(&payload.id)
.await
.map_err(ApiError::from)?
.is_some()
{
return Ok((
StatusCode::CONFLICT,
Json(serde_json::json!({ "error": format!("Provider instance '{}' already exists", payload.id) })),
).into_response());
}
let config_json = serde_json::to_string(&payload.config_json)
.map_err(|e| ApiError::Internal(format!("Failed to serialize config: {}", e)))?;
let row = ProviderConfigRow {
id: payload.id.clone(),
provider_type: payload.provider_type.clone(),
config_json: config_json.clone(),
enabled: payload.enabled,
updated_at: chrono::Utc::now().to_rfc3339(),
};
state
.provider_config_repo
.upsert(&row)
.await
.map_err(ApiError::from)?;
rebuild_registry(&state)
.await
.map_err(ApiError::from)?;
let response = ProviderConfigResponse {
id: payload.id,
provider_type: payload.provider_type,
config_json: mask_config(&config_json),
enabled: payload.enabled,
};
Ok((StatusCode::CREATED, Json(response)).into_response())
}
pub async fn update_provider(
State(state): State<AppState>,
AdminUser(_user): AdminUser,
Path(instance_id): Path<String>,
Json(payload): Json<UpdateProviderRequest>,
) -> Result<impl IntoResponse, ApiError> {
if state.config.config_source != ConfigSource::Db {
return Ok(conflict_response().into_response());
}
let existing = state
.provider_config_repo
.get_by_id(&instance_id)
.await
.map_err(ApiError::from)?
.ok_or_else(|| ApiError::NotFound(format!("Provider instance '{}' not found", instance_id)))?;
let config_json = serde_json::to_string(&payload.config_json)
.map_err(|e| ApiError::Internal(format!("Failed to serialize config: {}", e)))?;
let row = ProviderConfigRow {
id: existing.id.clone(),
provider_type: existing.provider_type.clone(),
config_json: config_json.clone(),
enabled: payload.enabled,
updated_at: chrono::Utc::now().to_rfc3339(),
};
state
.provider_config_repo
.upsert(&row)
.await
.map_err(ApiError::from)?;
rebuild_registry(&state)
.await
.map_err(ApiError::from)?;
let response = ProviderConfigResponse {
id: existing.id,
provider_type: existing.provider_type,
config_json: mask_config(&config_json),
enabled: payload.enabled,
};
Ok(Json(response).into_response())
}
pub async fn delete_provider(
State(state): State<AppState>,
AdminUser(_user): AdminUser,
Path(instance_id): Path<String>,
) -> Result<impl IntoResponse, ApiError> {
if state.config.config_source != ConfigSource::Db {
return Ok(conflict_response().into_response());
}
state
.provider_config_repo
.delete(&instance_id)
.await
.map_err(ApiError::from)?;
rebuild_registry(&state)
.await
.map_err(ApiError::from)?;
Ok(StatusCode::NO_CONTENT.into_response())
}
pub async fn test_provider(
State(_state): State<AppState>,
AdminUser(_user): AdminUser,
Json(payload): Json<TestProviderRequest>,
) -> Result<impl IntoResponse, ApiError> {
let result = match payload.provider_type.as_str() {
"jellyfin" => test_jellyfin(&payload.config_json).await,
"local_files" => test_local_files(&payload.config_json),
_ => TestResult {
ok: false,
message: "Unknown provider type".to_string(),
},
};
Ok(Json(result))
}
async fn test_jellyfin(config: &HashMap<String, String>) -> TestResult {
let base_url = match config.get("base_url") {
Some(u) => u.trim_end_matches('/').to_string(),
None => {
return TestResult {
ok: false,
message: "Missing field: base_url".to_string(),
}
}
};
let api_key = match config.get("api_key") {
Some(k) => k.clone(),
None => {
return TestResult {
ok: false,
message: "Missing field: api_key".to_string(),
}
}
};
let url = format!("{}/System/Info", base_url);
let client = reqwest::Client::new();
match client
.get(&url)
.header("X-Emby-Token", &api_key)
.send()
.await
{
Ok(resp) => {
let status = resp.status();
if status.is_success() {
TestResult {
ok: true,
message: format!("Connected successfully (HTTP {})", status.as_u16()),
}
} else {
TestResult {
ok: false,
message: format!("Jellyfin returned HTTP {}", status.as_u16()),
}
}
}
Err(e) => TestResult {
ok: false,
message: format!("Connection failed: {}", e),
},
}
}
fn test_local_files(config: &HashMap<String, String>) -> TestResult {
let path = match config.get("files_dir") {
Some(p) => p.clone(),
None => {
return TestResult {
ok: false,
message: "Missing field: files_dir".to_string(),
}
}
};
let p = std::path::Path::new(&path);
if p.exists() && p.is_dir() {
TestResult {
ok: true,
message: format!("Directory exists: {}", path),
}
} else {
TestResult {
ok: false,
message: format!("Path does not exist or is not a directory: {}", path),
}
}
}
// ---------------------------------------------------------------------------
// NoopMediaProvider (local copy — avoids pub-ing it from main.rs)
// ---------------------------------------------------------------------------
struct NoopMediaProvider;
#[async_trait::async_trait]
impl domain::IMediaProvider for NoopMediaProvider {
fn capabilities(&self) -> domain::ProviderCapabilities {
domain::ProviderCapabilities {
collections: false,
series: false,
genres: false,
tags: false,
decade: false,
search: false,
streaming_protocol: domain::StreamingProtocol::DirectFile,
rescan: false,
transcode: false,
}
}
async fn fetch_items(
&self,
_: &domain::MediaFilter,
) -> domain::DomainResult<Vec<domain::MediaItem>> {
Err(domain::DomainError::InfrastructureError(
"No media provider configured.".into(),
))
}
async fn fetch_by_id(
&self,
_: &domain::MediaItemId,
) -> domain::DomainResult<Option<domain::MediaItem>> {
Err(domain::DomainError::InfrastructureError(
"No media provider configured.".into(),
))
}
async fn get_stream_url(
&self,
_: &domain::MediaItemId,
_: &domain::StreamQuality,
) -> domain::DomainResult<String> {
Err(domain::DomainError::InfrastructureError(
"No media provider configured.".into(),
))
}
}

View File

@@ -1,253 +0,0 @@
//! Authentication routes
//!
//! Provides login, register, logout, token, and OIDC endpoints.
//! All authentication is JWT-based. OIDC state is stored in an encrypted cookie.
use axum::{
Router,
extract::{Json, State},
http::StatusCode,
response::IntoResponse,
routing::{get, post},
};
use crate::{
dto::{LoginRequest, RegisterRequest, TokenResponse, UserResponse},
error::ApiError,
extractors::CurrentUser,
state::AppState,
};
pub fn router() -> Router<AppState> {
let r = Router::new()
.route("/login", post(login))
.route("/register", post(register))
.route("/logout", post(logout))
.route("/me", get(me));
#[cfg(feature = "auth-jwt")]
let r = r.route("/token", post(get_token));
#[cfg(feature = "auth-oidc")]
let r = r
.route("/login/oidc", get(oidc_login))
.route("/callback", get(oidc_callback));
r
}
/// Login with email + password → JWT token
async fn login(
State(state): State<AppState>,
Json(payload): Json<LoginRequest>,
) -> Result<impl IntoResponse, ApiError> {
let user = state
.user_service
.find_by_email(payload.email.as_ref())
.await?
.ok_or_else(|| ApiError::Unauthorized("Invalid credentials".to_string()))?;
let hash = user
.password_hash
.as_deref()
.ok_or_else(|| ApiError::Unauthorized("Invalid credentials".to_string()))?;
if !infra::auth::verify_password(payload.password.as_ref(), hash) {
return Err(ApiError::Unauthorized("Invalid credentials".to_string()));
}
let token = create_jwt(&user, &state)?;
Ok((
StatusCode::OK,
Json(TokenResponse {
access_token: token,
token_type: "Bearer".to_string(),
expires_in: state.config.jwt_expiry_hours * 3600,
}),
))
}
/// Register a new local user → JWT token
async fn register(
State(state): State<AppState>,
Json(payload): Json<RegisterRequest>,
) -> Result<impl IntoResponse, ApiError> {
let password_hash = infra::auth::hash_password(payload.password.as_ref());
let user = state
.user_service
.create_local(payload.email.as_ref(), &password_hash)
.await?;
let token = create_jwt(&user, &state)?;
Ok((
StatusCode::CREATED,
Json(TokenResponse {
access_token: token,
token_type: "Bearer".to_string(),
expires_in: state.config.jwt_expiry_hours * 3600,
}),
))
}
/// Logout — JWT is stateless; instruct the client to drop the token
async fn logout() -> impl IntoResponse {
StatusCode::OK
}
/// Get current user info from JWT
async fn me(CurrentUser(user): CurrentUser) -> Result<impl IntoResponse, ApiError> {
Ok(Json(UserResponse {
id: user.id,
email: user.email.into_inner(),
created_at: user.created_at,
}))
}
/// Issue a new JWT for the currently authenticated user (OIDC→JWT exchange or token refresh)
#[cfg(feature = "auth-jwt")]
async fn get_token(
State(state): State<AppState>,
CurrentUser(user): CurrentUser,
) -> Result<impl IntoResponse, ApiError> {
let token = create_jwt(&user, &state)?;
Ok(Json(TokenResponse {
access_token: token,
token_type: "Bearer".to_string(),
expires_in: state.config.jwt_expiry_hours * 3600,
}))
}
/// Helper: create JWT for a user
#[cfg(feature = "auth-jwt")]
fn create_jwt(user: &domain::User, state: &AppState) -> Result<String, ApiError> {
let validator = state
.jwt_validator
.as_ref()
.ok_or_else(|| ApiError::Internal("JWT not configured".to_string()))?;
validator
.create_token(user)
.map_err(|e| ApiError::Internal(format!("Failed to create token: {}", e)))
}
#[cfg(not(feature = "auth-jwt"))]
fn create_jwt(_user: &domain::User, _state: &AppState) -> Result<String, ApiError> {
Err(ApiError::Internal("JWT feature not enabled".to_string()))
}
// ============================================================================
// OIDC Routes
// ============================================================================
#[cfg(feature = "auth-oidc")]
#[derive(serde::Deserialize)]
struct CallbackParams {
code: String,
state: String,
}
/// Start OIDC login: generate authorization URL and store state in encrypted cookie
#[cfg(feature = "auth-oidc")]
async fn oidc_login(
State(state): State<AppState>,
jar: axum_extra::extract::PrivateCookieJar,
) -> Result<impl IntoResponse, ApiError> {
use axum::http::header;
use axum::response::Response;
use axum_extra::extract::cookie::{Cookie, SameSite};
let service = state
.oidc_service
.as_ref()
.ok_or(ApiError::Internal("OIDC not configured".into()))?;
let (auth_data, oidc_state) = service.get_authorization_url();
let state_json = serde_json::to_string(&oidc_state)
.map_err(|e| ApiError::Internal(format!("Failed to serialize OIDC state: {}", e)))?;
let cookie = Cookie::build(("oidc_state", state_json))
.max_age(time::Duration::minutes(5))
.http_only(true)
.same_site(SameSite::Lax)
.secure(state.config.secure_cookie)
.path("/")
.build();
let updated_jar = jar.add(cookie);
let redirect = axum::response::Redirect::to(auth_data.url.as_str()).into_response();
let (mut parts, body) = redirect.into_parts();
parts.headers.insert(
header::CACHE_CONTROL,
"no-cache, no-store, must-revalidate".parse().unwrap(),
);
parts
.headers
.insert(header::PRAGMA, "no-cache".parse().unwrap());
parts.headers.insert(header::EXPIRES, "0".parse().unwrap());
Ok((updated_jar, Response::from_parts(parts, body)))
}
/// Handle OIDC callback: verify state cookie, complete exchange, issue JWT, clear cookie
#[cfg(feature = "auth-oidc")]
async fn oidc_callback(
State(state): State<AppState>,
jar: axum_extra::extract::PrivateCookieJar,
axum::extract::Query(params): axum::extract::Query<CallbackParams>,
) -> Result<impl IntoResponse, ApiError> {
use infra::auth::oidc::OidcState;
let service = state
.oidc_service
.as_ref()
.ok_or(ApiError::Internal("OIDC not configured".into()))?;
// Read and decrypt OIDC state from cookie
let cookie = jar
.get("oidc_state")
.ok_or(ApiError::Validation("Missing OIDC state cookie".into()))?;
let oidc_state: OidcState = serde_json::from_str(cookie.value())
.map_err(|_| ApiError::Validation("Invalid OIDC state cookie".into()))?;
// Verify CSRF token
if params.state != oidc_state.csrf_token.as_ref() {
return Err(ApiError::Validation("Invalid CSRF token".into()));
}
// Complete OIDC exchange
let oidc_user = service
.resolve_callback(
domain::AuthorizationCode::new(params.code),
oidc_state.nonce,
oidc_state.pkce_verifier,
)
.await
.map_err(|e| ApiError::Internal(e.to_string()))?;
let user = state
.user_service
.find_or_create(&oidc_user.subject, &oidc_user.email)
.await
.map_err(|e| ApiError::Internal(e.to_string()))?;
// Clear the OIDC state cookie
let cleared_jar = jar.remove(axum_extra::extract::cookie::Cookie::from("oidc_state"));
let token = create_jwt(&user, &state)?;
Ok((
cleared_jar,
Json(TokenResponse {
access_token: token,
token_type: "Bearer".to_string(),
expires_in: state.config.jwt_expiry_hours * 3600,
}),
))
}

View File

@@ -0,0 +1,154 @@
use axum::{
Json,
extract::State,
http::StatusCode,
response::IntoResponse,
};
use crate::{
dto::{LoginRequest, RefreshRequest, RegisterRequest, TokenResponse, UserResponse},
error::ApiError,
extractors::CurrentUser,
state::AppState,
};
use super::{create_jwt, create_refresh_jwt};
/// Login with email + password → JWT token
pub(super) async fn login(
State(state): State<AppState>,
Json(payload): Json<LoginRequest>,
) -> Result<impl IntoResponse, ApiError> {
let user = state
.user_service
.find_by_email(payload.email.as_ref())
.await?
.ok_or_else(|| ApiError::Unauthorized("Invalid credentials".to_string()))?;
let hash = user
.password_hash
.as_deref()
.ok_or_else(|| ApiError::Unauthorized("Invalid credentials".to_string()))?;
if !infra::auth::verify_password(payload.password.as_ref(), hash) {
return Err(ApiError::Unauthorized("Invalid credentials".to_string()));
}
let token = create_jwt(&user, &state)?;
let refresh_token = if payload.remember_me {
Some(create_refresh_jwt(&user, &state)?)
} else {
None
};
let _ = state.activity_log_repo.log("user_login", user.email.as_ref(), None).await;
Ok((
StatusCode::OK,
Json(TokenResponse {
access_token: token,
token_type: "Bearer".to_string(),
expires_in: state.config.jwt_expiry_hours * 3600,
refresh_token,
}),
))
}
/// Register a new local user → JWT token
pub(super) async fn register(
State(state): State<AppState>,
Json(payload): Json<RegisterRequest>,
) -> Result<impl IntoResponse, ApiError> {
if !state.config.allow_registration {
return Err(ApiError::Forbidden("Registration is disabled".to_string()));
}
let password_hash = infra::auth::hash_password(payload.password.as_ref());
let user = state
.user_service
.create_local(payload.email.as_ref(), &password_hash)
.await?;
let token = create_jwt(&user, &state)?;
Ok((
StatusCode::CREATED,
Json(TokenResponse {
access_token: token,
token_type: "Bearer".to_string(),
expires_in: state.config.jwt_expiry_hours * 3600,
refresh_token: None,
}),
))
}
/// Logout — JWT is stateless; instruct the client to drop the token
pub(super) async fn logout() -> impl IntoResponse {
StatusCode::OK
}
/// Get current user info from JWT
pub(super) async fn me(CurrentUser(user): CurrentUser) -> Result<impl IntoResponse, ApiError> {
Ok(Json(UserResponse {
id: user.id,
email: user.email.into_inner(),
created_at: user.created_at,
is_admin: user.is_admin,
}))
}
/// Exchange a valid refresh token for a new access + refresh token pair
#[cfg(feature = "auth-jwt")]
pub(super) async fn refresh_token(
State(state): State<AppState>,
Json(payload): Json<RefreshRequest>,
) -> Result<impl IntoResponse, ApiError> {
let validator = state
.jwt_validator
.as_ref()
.ok_or_else(|| ApiError::Internal("JWT not configured".to_string()))?;
let claims = validator
.validate_refresh_token(&payload.refresh_token)
.map_err(|e| {
tracing::debug!("Refresh token validation failed: {:?}", e);
ApiError::Unauthorized("Invalid or expired refresh token".to_string())
})?;
let user_id: uuid::Uuid = claims
.sub
.parse()
.map_err(|_| ApiError::Unauthorized("Invalid user ID in token".to_string()))?;
let user = state
.user_service
.find_by_id(user_id)
.await
.map_err(|e| ApiError::Internal(format!("Failed to fetch user: {}", e)))?;
let access_token = create_jwt(&user, &state)?;
let new_refresh_token = create_refresh_jwt(&user, &state)?;
Ok(Json(TokenResponse {
access_token,
token_type: "Bearer".to_string(),
expires_in: state.config.jwt_expiry_hours * 3600,
refresh_token: Some(new_refresh_token),
}))
}
/// Issue a new JWT for the currently authenticated user (OIDC→JWT exchange or token refresh)
#[cfg(feature = "auth-jwt")]
pub(super) async fn get_token(
State(state): State<AppState>,
CurrentUser(user): CurrentUser,
) -> Result<impl IntoResponse, ApiError> {
let token = create_jwt(&user, &state)?;
Ok(Json(TokenResponse {
access_token: token,
token_type: "Bearer".to_string(),
expires_in: state.config.jwt_expiry_hours * 3600,
refresh_token: None,
}))
}

View File

@@ -0,0 +1,67 @@
//! Authentication routes
//!
//! Provides login, register, logout, token, and OIDC endpoints.
//! All authentication is JWT-based. OIDC state is stored in an encrypted cookie.
use axum::{Router, routing::{get, post}};
use crate::{error::ApiError, state::AppState};
mod local;
mod oidc;
pub fn router() -> Router<AppState> {
let r = Router::new()
.route("/login", post(local::login))
.route("/register", post(local::register))
.route("/logout", post(local::logout))
.route("/me", get(local::me));
#[cfg(feature = "auth-jwt")]
let r = r
.route("/token", post(local::get_token))
.route("/refresh", post(local::refresh_token));
#[cfg(feature = "auth-oidc")]
let r = r
.route("/login/oidc", get(oidc::oidc_login))
.route("/callback", get(oidc::oidc_callback));
r
}
/// Helper: create access JWT for a user
#[cfg(feature = "auth-jwt")]
pub(super) fn create_jwt(user: &domain::User, state: &AppState) -> Result<String, ApiError> {
let validator = state
.jwt_validator
.as_ref()
.ok_or_else(|| ApiError::Internal("JWT not configured".to_string()))?;
validator
.create_token(user)
.map_err(|e| ApiError::Internal(format!("Failed to create token: {}", e)))
}
#[cfg(not(feature = "auth-jwt"))]
pub(super) fn create_jwt(_user: &domain::User, _state: &AppState) -> Result<String, ApiError> {
Err(ApiError::Internal("JWT feature not enabled".to_string()))
}
/// Helper: create refresh JWT for a user
#[cfg(feature = "auth-jwt")]
pub(super) fn create_refresh_jwt(user: &domain::User, state: &AppState) -> Result<String, ApiError> {
let validator = state
.jwt_validator
.as_ref()
.ok_or_else(|| ApiError::Internal("JWT not configured".to_string()))?;
validator
.create_refresh_token(user)
.map_err(|e| ApiError::Internal(format!("Failed to create refresh token: {}", e)))
}
#[cfg(not(feature = "auth-jwt"))]
pub(super) fn create_refresh_jwt(_user: &domain::User, _state: &AppState) -> Result<String, ApiError> {
Err(ApiError::Internal("JWT feature not enabled".to_string()))
}

View File

@@ -0,0 +1,124 @@
#[cfg(feature = "auth-oidc")]
use axum::{
Json,
extract::State,
http::header,
response::{IntoResponse, Response},
};
#[cfg(feature = "auth-oidc")]
use crate::{
dto::TokenResponse,
error::ApiError,
state::AppState,
};
#[cfg(feature = "auth-oidc")]
use super::create_jwt;
#[cfg(feature = "auth-oidc")]
#[derive(serde::Deserialize)]
pub(super) struct CallbackParams {
pub code: String,
pub state: String,
}
/// Start OIDC login: generate authorization URL and store state in encrypted cookie
#[cfg(feature = "auth-oidc")]
pub(super) async fn oidc_login(
State(state): State<AppState>,
jar: axum_extra::extract::PrivateCookieJar,
) -> Result<impl IntoResponse, ApiError> {
use axum_extra::extract::cookie::{Cookie, SameSite};
let service = state
.oidc_service
.as_ref()
.ok_or(ApiError::Internal("OIDC not configured".into()))?;
let (auth_data, oidc_state) = service.get_authorization_url();
let state_json = serde_json::to_string(&oidc_state)
.map_err(|e| ApiError::Internal(format!("Failed to serialize OIDC state: {}", e)))?;
let cookie = Cookie::build(("oidc_state", state_json))
.max_age(time::Duration::minutes(5))
.http_only(true)
.same_site(SameSite::Lax)
.secure(state.config.secure_cookie)
.path("/")
.build();
let updated_jar = jar.add(cookie);
let redirect = axum::response::Redirect::to(auth_data.url.as_str()).into_response();
let (mut parts, body) = redirect.into_parts();
parts.headers.insert(
header::CACHE_CONTROL,
"no-cache, no-store, must-revalidate".parse().unwrap(),
);
parts
.headers
.insert(header::PRAGMA, "no-cache".parse().unwrap());
parts.headers.insert(header::EXPIRES, "0".parse().unwrap());
Ok((updated_jar, Response::from_parts(parts, body)))
}
/// Handle OIDC callback: verify state cookie, complete exchange, issue JWT, clear cookie
#[cfg(feature = "auth-oidc")]
pub(super) async fn oidc_callback(
State(state): State<AppState>,
jar: axum_extra::extract::PrivateCookieJar,
axum::extract::Query(params): axum::extract::Query<CallbackParams>,
) -> Result<impl IntoResponse, ApiError> {
use infra::auth::oidc::OidcState;
let service = state
.oidc_service
.as_ref()
.ok_or(ApiError::Internal("OIDC not configured".into()))?;
// Read and decrypt OIDC state from cookie
let cookie = jar
.get("oidc_state")
.ok_or(ApiError::Validation("Missing OIDC state cookie".into()))?;
let oidc_state: OidcState = serde_json::from_str(cookie.value())
.map_err(|_| ApiError::Validation("Invalid OIDC state cookie".into()))?;
// Verify CSRF token
if params.state != oidc_state.csrf_token.as_ref() {
return Err(ApiError::Validation("Invalid CSRF token".into()));
}
// Complete OIDC exchange
let oidc_user = service
.resolve_callback(
domain::AuthorizationCode::new(params.code),
oidc_state.nonce,
oidc_state.pkce_verifier,
)
.await
.map_err(|e| ApiError::Internal(e.to_string()))?;
let user = state
.user_service
.find_or_create(&oidc_user.subject, &oidc_user.email)
.await
.map_err(|e| ApiError::Internal(e.to_string()))?;
// Clear the OIDC state cookie
let cleared_jar = jar.remove(axum_extra::extract::cookie::Cookie::from("oidc_state"));
let token = create_jwt(&user, &state)?;
Ok((
cleared_jar,
Json(TokenResponse {
access_token: token,
token_type: "Bearer".to_string(),
expires_in: state.config.jwt_expiry_hours * 3600,
}),
))
}

View File

@@ -1,285 +0,0 @@
//! Channel routes
//!
//! CRUD + schedule generation require authentication (Bearer JWT).
//! Viewing endpoints (list, now, epg, stream) are intentionally public so the
//! TV page works without login.
use axum::{
Json, Router,
extract::{Path, Query, State},
http::StatusCode,
response::{IntoResponse, Redirect, Response},
routing::{get, post},
};
use chrono::{DateTime, Utc};
use serde::Deserialize;
use uuid::Uuid;
use domain::{DomainError, ScheduleEngineService};
use crate::{
dto::{
ChannelResponse, CreateChannelRequest, CurrentBroadcastResponse, ScheduleResponse,
ScheduledSlotResponse, UpdateChannelRequest,
},
error::ApiError,
extractors::CurrentUser,
state::AppState,
};
pub fn router() -> Router<AppState> {
Router::new()
.route("/", get(list_channels).post(create_channel))
.route(
"/{id}",
get(get_channel).put(update_channel).delete(delete_channel),
)
.route(
"/{id}/schedule",
post(generate_schedule).get(get_active_schedule),
)
.route("/{id}/now", get(get_current_broadcast))
.route("/{id}/epg", get(get_epg))
.route("/{id}/stream", get(get_stream))
}
// ============================================================================
// Channel CRUD
// ============================================================================
async fn list_channels(
State(state): State<AppState>,
) -> Result<impl IntoResponse, ApiError> {
let channels = state.channel_service.find_all().await?;
let response: Vec<ChannelResponse> = channels.into_iter().map(Into::into).collect();
Ok(Json(response))
}
async fn create_channel(
State(state): State<AppState>,
CurrentUser(user): CurrentUser,
Json(payload): Json<CreateChannelRequest>,
) -> Result<impl IntoResponse, ApiError> {
let mut channel = state
.channel_service
.create(user.id, &payload.name, &payload.timezone)
.await?;
if let Some(desc) = payload.description {
channel.description = Some(desc);
channel = state.channel_service.update(channel).await?;
}
Ok((StatusCode::CREATED, Json(ChannelResponse::from(channel))))
}
async fn get_channel(
State(state): State<AppState>,
CurrentUser(user): CurrentUser,
Path(channel_id): Path<Uuid>,
) -> Result<impl IntoResponse, ApiError> {
let channel = state.channel_service.find_by_id(channel_id).await?;
require_owner(&channel, user.id)?;
Ok(Json(ChannelResponse::from(channel)))
}
async fn update_channel(
State(state): State<AppState>,
CurrentUser(user): CurrentUser,
Path(channel_id): Path<Uuid>,
Json(payload): Json<UpdateChannelRequest>,
) -> Result<impl IntoResponse, ApiError> {
let mut channel = state.channel_service.find_by_id(channel_id).await?;
require_owner(&channel, user.id)?;
if let Some(name) = payload.name {
channel.name = name;
}
if let Some(desc) = payload.description {
channel.description = Some(desc);
}
if let Some(tz) = payload.timezone {
channel.timezone = tz;
}
if let Some(sc) = payload.schedule_config {
channel.schedule_config = sc;
}
if let Some(rp) = payload.recycle_policy {
channel.recycle_policy = rp;
}
channel.updated_at = Utc::now();
let channel = state.channel_service.update(channel).await?;
Ok(Json(ChannelResponse::from(channel)))
}
async fn delete_channel(
State(state): State<AppState>,
CurrentUser(user): CurrentUser,
Path(channel_id): Path<Uuid>,
) -> Result<impl IntoResponse, ApiError> {
// ChannelService::delete enforces ownership internally
state.channel_service.delete(channel_id, user.id).await?;
Ok(StatusCode::NO_CONTENT)
}
// ============================================================================
// Schedule generation + retrieval
// ============================================================================
/// Trigger 48-hour schedule generation for a channel, starting from now.
/// Replaces any existing schedule for the same window.
async fn generate_schedule(
State(state): State<AppState>,
CurrentUser(user): CurrentUser,
Path(channel_id): Path<Uuid>,
) -> Result<impl IntoResponse, ApiError> {
let channel = state.channel_service.find_by_id(channel_id).await?;
require_owner(&channel, user.id)?;
let schedule = state
.schedule_engine
.generate_schedule(channel_id, Utc::now())
.await?;
Ok((StatusCode::CREATED, Json(ScheduleResponse::from(schedule))))
}
/// Return the currently active 48-hour schedule for a channel.
/// 404 if no schedule has been generated yet — call POST /:id/schedule first.
async fn get_active_schedule(
State(state): State<AppState>,
CurrentUser(user): CurrentUser,
Path(channel_id): Path<Uuid>,
) -> Result<impl IntoResponse, ApiError> {
let channel = state.channel_service.find_by_id(channel_id).await?;
require_owner(&channel, user.id)?;
let schedule = state
.schedule_engine
.get_active_schedule(channel_id, Utc::now())
.await?
.ok_or(DomainError::NoActiveSchedule(channel_id))?;
Ok(Json(ScheduleResponse::from(schedule)))
}
// ============================================================================
// Live broadcast endpoints
// ============================================================================
/// What is currently playing right now on this channel.
/// Returns 204 No Content when the channel is in a gap between blocks (no-signal).
async fn get_current_broadcast(
State(state): State<AppState>,
Path(channel_id): Path<Uuid>,
) -> Result<Response, ApiError> {
let _channel = state.channel_service.find_by_id(channel_id).await?;
let now = Utc::now();
let schedule = state
.schedule_engine
.get_active_schedule(channel_id, now)
.await?
.ok_or(DomainError::NoActiveSchedule(channel_id))?;
match ScheduleEngineService::get_current_broadcast(&schedule, now) {
None => Ok(StatusCode::NO_CONTENT.into_response()),
Some(broadcast) => Ok(Json(CurrentBroadcastResponse {
slot: broadcast.slot.into(),
offset_secs: broadcast.offset_secs,
})
.into_response()),
}
}
/// EPG: return scheduled slots that overlap a time window.
///
/// Query params (both RFC3339, both optional):
/// - `from` — start of window (default: now)
/// - `until` — end of window (default: now + 4 hours)
#[derive(Debug, Deserialize)]
struct EpgQuery {
from: Option<String>,
until: Option<String>,
}
async fn get_epg(
State(state): State<AppState>,
Path(channel_id): Path<Uuid>,
Query(params): Query<EpgQuery>,
) -> Result<impl IntoResponse, ApiError> {
let _channel = state.channel_service.find_by_id(channel_id).await?;
let now = Utc::now();
let from = parse_optional_dt(params.from, now)?;
let until = parse_optional_dt(params.until, now + chrono::Duration::hours(4))?;
if until <= from {
return Err(ApiError::validation("'until' must be after 'from'"));
}
let schedule = state
.schedule_engine
.get_active_schedule(channel_id, from)
.await?
.ok_or(DomainError::NoActiveSchedule(channel_id))?;
let slots: Vec<ScheduledSlotResponse> = ScheduleEngineService::get_epg(&schedule, from, until)
.into_iter()
.cloned()
.map(Into::into)
.collect();
Ok(Json(slots))
}
/// Redirect to the stream URL for whatever is currently playing.
/// Returns 307 Temporary Redirect so the client fetches from the media provider directly.
/// Returns 204 No Content when the channel is in a gap (no-signal).
async fn get_stream(
State(state): State<AppState>,
Path(channel_id): Path<Uuid>,
) -> Result<Response, ApiError> {
let _channel = state.channel_service.find_by_id(channel_id).await?;
let now = Utc::now();
let schedule = state
.schedule_engine
.get_active_schedule(channel_id, now)
.await?
.ok_or(DomainError::NoActiveSchedule(channel_id))?;
let broadcast = match ScheduleEngineService::get_current_broadcast(&schedule, now) {
None => return Ok(StatusCode::NO_CONTENT.into_response()),
Some(b) => b,
};
let url = state
.schedule_engine
.get_stream_url(&broadcast.slot.item.id)
.await?;
Ok(Redirect::temporary(&url).into_response())
}
// ============================================================================
// Helpers
// ============================================================================
fn require_owner(channel: &domain::Channel, user_id: Uuid) -> Result<(), ApiError> {
if channel.owner_id != user_id {
Err(ApiError::Forbidden("You don't own this channel".into()))
} else {
Ok(())
}
}
fn parse_optional_dt(s: Option<String>, default: DateTime<Utc>) -> Result<DateTime<Utc>, ApiError> {
match s {
None => Ok(default),
Some(raw) => DateTime::parse_from_rfc3339(&raw)
.map(|dt| dt.with_timezone(&Utc))
.map_err(|_| ApiError::validation(format!("Invalid datetime '{}' — use RFC3339", raw))),
}
}

View File

@@ -0,0 +1,192 @@
use axum::{
Json,
extract::{Path, Query, State},
http::{HeaderMap, StatusCode},
response::{IntoResponse, Redirect, Response},
};
use chrono::Utc;
use serde::Deserialize;
use uuid::Uuid;
use domain::{DomainError, ScheduleEngineService, StreamQuality};
use crate::{
dto::{CurrentBroadcastResponse, ScheduledSlotResponse},
error::ApiError,
extractors::OptionalCurrentUser,
state::AppState,
};
use super::{check_access, parse_optional_dt};
fn channel_password(headers: &HeaderMap) -> Option<&str> {
headers
.get("X-Channel-Password")
.and_then(|v| v.to_str().ok())
}
fn block_password(headers: &HeaderMap) -> Option<&str> {
headers
.get("X-Block-Password")
.and_then(|v| v.to_str().ok())
}
/// What is currently playing right now on this channel.
/// Returns 204 No Content when the channel is in a gap between blocks (no-signal).
pub(super) async fn get_current_broadcast(
State(state): State<AppState>,
Path(channel_id): Path<Uuid>,
OptionalCurrentUser(user): OptionalCurrentUser,
headers: HeaderMap,
) -> Result<Response, ApiError> {
let channel = state.channel_service.find_by_id(channel_id).await?;
check_access(
&channel.access_mode,
channel.access_password_hash.as_deref(),
user.as_ref(),
channel.owner_id,
channel_password(&headers),
)?;
let now = Utc::now();
let schedule = state
.schedule_engine
.get_active_schedule(channel_id, now)
.await?
.ok_or(DomainError::NoActiveSchedule(channel_id))?;
match ScheduleEngineService::get_current_broadcast(&schedule, now) {
None => Ok(StatusCode::NO_CONTENT.into_response()),
Some(broadcast) => {
let block_access_mode = channel
.schedule_config
.all_blocks()
.find(|b| b.id == broadcast.slot.source_block_id)
.map(|b| b.access_mode.clone())
.unwrap_or_default();
Ok(Json(CurrentBroadcastResponse {
block_access_mode: block_access_mode.clone(),
slot: ScheduledSlotResponse::with_block_access(broadcast.slot, &channel),
offset_secs: broadcast.offset_secs,
})
.into_response())
}
}
}
/// EPG: return scheduled slots that overlap a time window.
///
/// Query params (both RFC3339, both optional):
/// - `from` — start of window (default: now)
/// - `until` — end of window (default: now + 4 hours)
#[derive(Debug, Deserialize)]
pub(super) struct EpgQuery {
from: Option<String>,
until: Option<String>,
}
pub(super) async fn get_epg(
State(state): State<AppState>,
Path(channel_id): Path<Uuid>,
OptionalCurrentUser(user): OptionalCurrentUser,
headers: HeaderMap,
Query(params): Query<EpgQuery>,
) -> Result<impl IntoResponse, ApiError> {
let channel = state.channel_service.find_by_id(channel_id).await?;
check_access(
&channel.access_mode,
channel.access_password_hash.as_deref(),
user.as_ref(),
channel.owner_id,
channel_password(&headers),
)?;
let now = Utc::now();
let from = parse_optional_dt(params.from, now)?;
let until = parse_optional_dt(params.until, now + chrono::Duration::hours(4))?;
if until <= from {
return Err(ApiError::validation("'until' must be after 'from'"));
}
let schedule = state
.schedule_engine
.get_active_schedule(channel_id, from)
.await?
.ok_or(DomainError::NoActiveSchedule(channel_id))?;
let slots: Vec<ScheduledSlotResponse> = ScheduleEngineService::get_epg(&schedule, from, until)
.into_iter()
.cloned()
.map(|slot| ScheduledSlotResponse::with_block_access(slot, &channel))
.collect();
Ok(Json(slots))
}
/// Redirect to the stream URL for whatever is currently playing.
/// Returns 307 Temporary Redirect so the client fetches from the media provider directly.
/// Returns 204 No Content when the channel is in a gap (no-signal).
#[derive(Debug, Deserialize)]
pub(super) struct StreamQuery {
/// "direct" | bitrate in bps as string (e.g. "8000000"). Defaults to "direct".
quality: Option<String>,
}
pub(super) async fn get_stream(
State(state): State<AppState>,
Path(channel_id): Path<Uuid>,
OptionalCurrentUser(user): OptionalCurrentUser,
headers: HeaderMap,
Query(query): Query<StreamQuery>,
) -> Result<Response, ApiError> {
let channel = state.channel_service.find_by_id(channel_id).await?;
check_access(
&channel.access_mode,
channel.access_password_hash.as_deref(),
user.as_ref(),
channel.owner_id,
channel_password(&headers),
)?;
let now = Utc::now();
let schedule = state
.schedule_engine
.get_active_schedule(channel_id, now)
.await?
.ok_or(DomainError::NoActiveSchedule(channel_id))?;
let broadcast = match ScheduleEngineService::get_current_broadcast(&schedule, now) {
None => return Ok(StatusCode::NO_CONTENT.into_response()),
Some(b) => b,
};
// Block-level access check
if let Some(block) = channel
.schedule_config
.all_blocks()
.find(|b| b.id == broadcast.slot.source_block_id)
{
check_access(
&block.access_mode,
block.access_password_hash.as_deref(),
user.as_ref(),
channel.owner_id,
block_password(&headers),
)?;
}
let stream_quality = match query.quality.as_deref() {
Some("direct") | None => StreamQuality::Direct,
Some(bps_str) => StreamQuality::Transcode(bps_str.parse::<u32>().unwrap_or(40_000_000)),
};
let url = state
.schedule_engine
.get_stream_url(&broadcast.slot.item.id, &stream_quality)
.await?;
Ok(Redirect::temporary(&url).into_response())
}

View File

@@ -0,0 +1,72 @@
use axum::{
Json,
extract::{Path, State},
http::StatusCode,
response::IntoResponse,
};
use uuid::Uuid;
use crate::{
dto::{ChannelResponse, ConfigSnapshotResponse, PatchSnapshotRequest},
error::ApiError,
extractors::CurrentUser,
state::AppState,
};
use super::require_owner;
pub(super) async fn list_config_history(
State(state): State<AppState>,
CurrentUser(user): CurrentUser,
Path(channel_id): Path<Uuid>,
) -> Result<impl IntoResponse, ApiError> {
let channel = state.channel_service.find_by_id(channel_id).await?;
require_owner(&channel, user.id)?;
let snapshots = state.channel_service.list_config_snapshots(channel_id).await?;
let response: Vec<ConfigSnapshotResponse> = snapshots.into_iter().map(Into::into).collect();
Ok(Json(response))
}
pub(super) async fn patch_config_snapshot(
State(state): State<AppState>,
CurrentUser(user): CurrentUser,
Path((channel_id, snap_id)): Path<(Uuid, Uuid)>,
Json(payload): Json<PatchSnapshotRequest>,
) -> Result<impl IntoResponse, ApiError> {
let channel = state.channel_service.find_by_id(channel_id).await?;
require_owner(&channel, user.id)?;
let updated = state
.channel_service
.patch_config_snapshot_label(channel_id, snap_id, payload.label)
.await?
.ok_or_else(|| ApiError::NotFound("Snapshot not found".into()))?;
Ok(Json(ConfigSnapshotResponse::from(updated)))
}
pub(super) async fn restore_config_snapshot(
State(state): State<AppState>,
CurrentUser(user): CurrentUser,
Path((channel_id, snap_id)): Path<(Uuid, Uuid)>,
) -> Result<impl IntoResponse, ApiError> {
let channel = state.channel_service.find_by_id(channel_id).await?;
require_owner(&channel, user.id)?;
let updated = state
.channel_service
.restore_config_snapshot(channel_id, snap_id)
.await
.map_err(|e| match e {
domain::DomainError::ChannelNotFound(_) => ApiError::NotFound("Snapshot not found".into()),
other => ApiError::from(other),
})?;
let _ = state
.activity_log_repo
.log("config_restored", &snap_id.to_string(), Some(channel_id))
.await;
Ok((StatusCode::OK, Json(ChannelResponse::from(updated))))
}

View File

@@ -0,0 +1,162 @@
use axum::{
Json,
extract::{Path, State},
http::StatusCode,
response::IntoResponse,
};
use chrono::Utc;
use domain;
use uuid::Uuid;
use crate::{
dto::{ChannelResponse, CreateChannelRequest, UpdateChannelRequest},
error::ApiError,
extractors::CurrentUser,
state::AppState,
};
use super::require_owner;
pub(super) async fn list_channels(
State(state): State<AppState>,
) -> Result<impl IntoResponse, ApiError> {
let channels = state.channel_service.find_all().await?;
let response: Vec<ChannelResponse> = channels.into_iter().map(Into::into).collect();
Ok(Json(response))
}
pub(super) async fn create_channel(
State(state): State<AppState>,
CurrentUser(user): CurrentUser,
Json(payload): Json<CreateChannelRequest>,
) -> Result<impl IntoResponse, ApiError> {
let mut channel = state
.channel_service
.create(user.id, &payload.name, &payload.timezone)
.await?;
let mut changed = false;
if let Some(desc) = payload.description {
channel.description = Some(desc);
changed = true;
}
if let Some(mode) = payload.access_mode {
channel.access_mode = mode;
changed = true;
}
if let Some(pw) = payload.access_password.as_deref().filter(|p| !p.is_empty()) {
channel.access_password_hash = Some(infra::auth::hash_password(pw));
changed = true;
}
if let Some(url) = payload.webhook_url {
channel.webhook_url = Some(url);
changed = true;
}
if let Some(interval) = payload.webhook_poll_interval_secs {
channel.webhook_poll_interval_secs = interval;
changed = true;
}
if let Some(tmpl) = payload.webhook_body_template {
channel.webhook_body_template = Some(tmpl);
changed = true;
}
if let Some(headers) = payload.webhook_headers {
channel.webhook_headers = Some(headers);
changed = true;
}
if changed {
channel = state.channel_service.update(channel).await?;
}
let _ = state.event_tx.send(domain::DomainEvent::ChannelCreated { channel: channel.clone() });
let _ = state.activity_log_repo.log("channel_created", &channel.name, Some(channel.id)).await;
Ok((StatusCode::CREATED, Json(ChannelResponse::from(channel))))
}
pub(super) async fn get_channel(
State(state): State<AppState>,
CurrentUser(user): CurrentUser,
Path(channel_id): Path<Uuid>,
) -> Result<impl IntoResponse, ApiError> {
let channel = state.channel_service.find_by_id(channel_id).await?;
require_owner(&channel, user.id)?;
Ok(Json(ChannelResponse::from(channel)))
}
pub(super) async fn update_channel(
State(state): State<AppState>,
CurrentUser(user): CurrentUser,
Path(channel_id): Path<Uuid>,
Json(payload): Json<UpdateChannelRequest>,
) -> Result<impl IntoResponse, ApiError> {
let mut channel = state.channel_service.find_by_id(channel_id).await?;
require_owner(&channel, user.id)?;
if let Some(name) = payload.name {
channel.name = name;
}
if let Some(desc) = payload.description {
channel.description = Some(desc);
}
if let Some(tz) = payload.timezone {
channel.timezone = tz;
}
if let Some(sc) = payload.schedule_config {
channel.schedule_config = domain::ScheduleConfig::from(sc);
}
if let Some(rp) = payload.recycle_policy {
channel.recycle_policy = rp;
}
if let Some(auto) = payload.auto_schedule {
channel.auto_schedule = auto;
}
if let Some(mode) = payload.access_mode {
channel.access_mode = mode;
}
if let Some(pw) = payload.access_password {
if pw.is_empty() {
channel.access_password_hash = None;
} else {
channel.access_password_hash = Some(infra::auth::hash_password(&pw));
}
}
if let Some(logo) = payload.logo {
channel.logo = logo;
}
if let Some(pos) = payload.logo_position {
channel.logo_position = pos;
}
if let Some(opacity) = payload.logo_opacity {
channel.logo_opacity = opacity.clamp(0.0, 1.0);
}
if let Some(url) = payload.webhook_url {
channel.webhook_url = url;
}
if let Some(interval) = payload.webhook_poll_interval_secs {
channel.webhook_poll_interval_secs = interval;
}
if let Some(tmpl) = payload.webhook_body_template {
channel.webhook_body_template = tmpl;
}
if let Some(headers) = payload.webhook_headers {
channel.webhook_headers = headers;
}
channel.updated_at = Utc::now();
let channel = state.channel_service.update(channel).await?;
let _ = state.event_tx.send(domain::DomainEvent::ChannelUpdated { channel: channel.clone() });
let _ = state.activity_log_repo.log("channel_updated", &channel.name, Some(channel.id)).await;
Ok(Json(ChannelResponse::from(channel)))
}
pub(super) async fn delete_channel(
State(state): State<AppState>,
CurrentUser(user): CurrentUser,
Path(channel_id): Path<Uuid>,
) -> Result<impl IntoResponse, ApiError> {
// ChannelService::delete enforces ownership internally
state.channel_service.delete(channel_id, user.id).await?;
let _ = state.event_tx.send(domain::DomainEvent::ChannelDeleted { channel_id });
let _ = state.activity_log_repo.log("channel_deleted", &channel_id.to_string(), Some(channel_id)).await;
Ok(StatusCode::NO_CONTENT)
}

View File

@@ -0,0 +1,117 @@
//! Channel routes
//!
//! CRUD + schedule generation require authentication (Bearer JWT).
//! Viewing endpoints (list, now, epg, stream) are intentionally public so the
//! TV page works without login.
use axum::{Router, routing::{get, post}};
use chrono::{DateTime, Utc};
use uuid::Uuid;
use domain::{AccessMode, User};
use crate::{error::ApiError, state::AppState};
mod broadcast;
mod config_history;
mod crud;
mod schedule;
pub fn router() -> Router<AppState> {
Router::new()
.route("/", get(crud::list_channels).post(crud::create_channel))
.route(
"/{id}",
get(crud::get_channel).put(crud::update_channel).delete(crud::delete_channel),
)
.route(
"/{id}/schedule",
post(schedule::generate_schedule).get(schedule::get_active_schedule),
)
.route("/{id}/schedule/history", get(schedule::list_schedule_history))
.route(
"/{id}/schedule/history/{gen_id}",
get(schedule::get_schedule_history_entry),
)
.route(
"/{id}/schedule/history/{gen_id}/rollback",
post(schedule::rollback_schedule),
)
.route("/{id}/now", get(broadcast::get_current_broadcast))
.route("/{id}/epg", get(broadcast::get_epg))
.route("/{id}/stream", get(broadcast::get_stream))
.route(
"/{id}/config/history",
get(config_history::list_config_history),
)
.route(
"/{id}/config/history/{snap_id}",
axum::routing::patch(config_history::patch_config_snapshot),
)
.route(
"/{id}/config/history/{snap_id}/restore",
post(config_history::restore_config_snapshot),
)
}
// ============================================================================
// Shared helpers
// ============================================================================
pub(super) fn require_owner(channel: &domain::Channel, user_id: Uuid) -> Result<(), ApiError> {
if channel.owner_id != user_id {
Err(ApiError::Forbidden("You don't own this channel".into()))
} else {
Ok(())
}
}
/// Gate access to a channel or block based on its `AccessMode`.
pub(super) fn check_access(
mode: &AccessMode,
password_hash: Option<&str>,
user: Option<&User>,
owner_id: Uuid,
supplied_password: Option<&str>,
) -> Result<(), ApiError> {
match mode {
AccessMode::Public => Ok(()),
AccessMode::PasswordProtected => {
// Owner always has access to their own channel without needing the password
if user.map(|u| u.id) == Some(owner_id) {
return Ok(());
}
let hash = password_hash.ok_or(ApiError::PasswordRequired)?;
let supplied = supplied_password.unwrap_or("").trim();
if supplied.is_empty() {
return Err(ApiError::PasswordRequired);
}
if !infra::auth::verify_password(supplied, hash) {
return Err(ApiError::PasswordRequired);
}
Ok(())
}
AccessMode::AccountRequired => {
if user.is_some() { Ok(()) } else { Err(ApiError::AuthRequired) }
}
AccessMode::OwnerOnly => {
if user.map(|u| u.id) == Some(owner_id) {
Ok(())
} else {
Err(ApiError::Forbidden("owner only".into()))
}
}
}
}
pub(super) fn parse_optional_dt(
s: Option<String>,
default: DateTime<Utc>,
) -> Result<DateTime<Utc>, ApiError> {
match s {
None => Ok(default),
Some(raw) => DateTime::parse_from_rfc3339(&raw)
.map(|dt| dt.with_timezone(&Utc))
.map_err(|_| ApiError::validation(format!("Invalid datetime '{}' — use RFC3339", raw))),
}
}

View File

@@ -0,0 +1,134 @@
use axum::{
Json,
extract::{Path, State},
http::StatusCode,
response::IntoResponse,
};
use chrono::Utc;
use uuid::Uuid;
use domain::{self, DomainError};
use crate::{
dto::{ScheduleHistoryEntry, ScheduleResponse},
error::ApiError,
extractors::CurrentUser,
state::AppState,
};
use super::require_owner;
/// Trigger 7-day schedule generation for a channel, starting from now.
/// Replaces any existing schedule for the same window.
pub(super) async fn generate_schedule(
State(state): State<AppState>,
CurrentUser(user): CurrentUser,
Path(channel_id): Path<Uuid>,
) -> Result<impl IntoResponse, ApiError> {
let channel = state.channel_service.find_by_id(channel_id).await?;
require_owner(&channel, user.id)?;
let schedule = state
.schedule_engine
.generate_schedule(channel_id, Utc::now())
.await?;
let _ = state.event_tx.send(domain::DomainEvent::ScheduleGenerated {
channel_id,
schedule: schedule.clone(),
});
let detail = format!("{} slots", schedule.slots.len());
let _ = state.activity_log_repo.log("schedule_generated", &detail, Some(channel_id)).await;
Ok((StatusCode::CREATED, Json(ScheduleResponse::from(schedule))))
}
/// Return the currently active 7-day schedule for a channel.
/// 404 if no schedule has been generated yet — call POST /:id/schedule first.
pub(super) async fn get_active_schedule(
State(state): State<AppState>,
CurrentUser(user): CurrentUser,
Path(channel_id): Path<Uuid>,
) -> Result<impl IntoResponse, ApiError> {
let channel = state.channel_service.find_by_id(channel_id).await?;
require_owner(&channel, user.id)?;
let schedule = state
.schedule_engine
.get_active_schedule(channel_id, Utc::now())
.await?
.ok_or(DomainError::NoActiveSchedule(channel_id))?;
Ok(Json(ScheduleResponse::from(schedule)))
}
/// List all schedule generations for a channel, newest first.
/// Returns lightweight entries (no slots).
pub(super) async fn list_schedule_history(
State(state): State<AppState>,
CurrentUser(user): CurrentUser,
Path(channel_id): Path<Uuid>,
) -> Result<impl IntoResponse, ApiError> {
let channel = state.channel_service.find_by_id(channel_id).await?;
require_owner(&channel, user.id)?;
let history = state.schedule_engine.list_schedule_history(channel_id).await?;
let entries: Vec<ScheduleHistoryEntry> = history.into_iter().map(Into::into).collect();
Ok(Json(entries))
}
/// Fetch a single historical schedule with all its slots.
pub(super) async fn get_schedule_history_entry(
State(state): State<AppState>,
CurrentUser(user): CurrentUser,
Path((channel_id, gen_id)): Path<(Uuid, Uuid)>,
) -> Result<impl IntoResponse, ApiError> {
let channel = state.channel_service.find_by_id(channel_id).await?;
require_owner(&channel, user.id)?;
let schedule = state
.schedule_engine
.get_schedule_by_id(channel_id, gen_id)
.await?
.ok_or_else(|| ApiError::NotFound(format!("Schedule {} not found", gen_id)))?;
Ok(Json(ScheduleResponse::from(schedule)))
}
/// Roll back to a previous schedule generation.
///
/// Deletes all generations after `gen_id`'s generation, then generates a fresh
/// schedule from now (inheriting the rolled-back generation as the base for
/// recycle-policy history).
pub(super) async fn rollback_schedule(
State(state): State<AppState>,
CurrentUser(user): CurrentUser,
Path((channel_id, gen_id)): Path<(Uuid, Uuid)>,
) -> Result<impl IntoResponse, ApiError> {
let channel = state.channel_service.find_by_id(channel_id).await?;
require_owner(&channel, user.id)?;
let target = state
.schedule_engine
.get_schedule_by_id(channel_id, gen_id)
.await?
.ok_or_else(|| ApiError::NotFound(format!("Schedule {} not found", gen_id)))?;
state
.schedule_engine
.delete_schedules_after(channel_id, target.generation)
.await?;
let schedule = state
.schedule_engine
.generate_schedule(channel_id, Utc::now())
.await?;
let _ = state.event_tx.send(domain::DomainEvent::ScheduleGenerated {
channel_id,
schedule: schedule.clone(),
});
let detail = format!("rollback to gen {}; {} slots", target.generation, schedule.slots.len());
let _ = state.activity_log_repo.log("schedule_rollback", &detail, Some(channel_id)).await;
Ok(Json(ScheduleResponse::from(schedule)))
}

View File

@@ -1,13 +1,52 @@
use axum::{Json, Router, routing::get}; use axum::{Json, Router, extract::State, routing::get};
use crate::dto::ConfigResponse; use domain::{IProviderRegistry as _, ProviderCapabilities, StreamingProtocol};
use crate::dto::{ConfigResponse, ProviderInfo};
use crate::state::AppState; use crate::state::AppState;
pub fn router() -> Router<AppState> { pub fn router() -> Router<AppState> {
Router::new().route("/", get(get_config)) Router::new().route("/", get(get_config))
} }
async fn get_config() -> Json<ConfigResponse> { #[allow(clippy::vec_init_then_push)]
async fn get_config(State(state): State<AppState>) -> Json<ConfigResponse> {
let registry = state.provider_registry.read().await;
let providers: Vec<ProviderInfo> = registry
.provider_ids()
.into_iter()
.filter_map(|id| {
registry.capabilities(&id).map(|caps| ProviderInfo {
id: id.clone(),
capabilities: caps,
})
})
.collect();
let primary_capabilities = registry
.capabilities(registry.primary_id())
.unwrap_or(ProviderCapabilities {
collections: false,
series: false,
genres: false,
tags: false,
decade: false,
search: false,
streaming_protocol: StreamingProtocol::DirectFile,
rescan: false,
transcode: false,
});
let mut available_provider_types = Vec::new();
#[cfg(feature = "jellyfin")]
available_provider_types.push("jellyfin".to_string());
#[cfg(feature = "local-files")]
available_provider_types.push("local_files".to_string());
Json(ConfigResponse { Json(ConfigResponse {
allow_registration: true, // Default to true for template allow_registration: state.config.allow_registration,
providers,
provider_capabilities: primary_capabilities,
available_provider_types,
}) })
} }

View File

@@ -0,0 +1,359 @@
//! Local-file streaming, rescan, and transcode routes.
//!
//! GET /files/stream/:id — Range streaming (no auth)
//! POST /files/rescan — index rebuild (auth required)
//! GET /files/transcode/:id/playlist.m3u8 — trigger transcode + serve playlist
//! GET /files/transcode/:id/:segment — serve .ts / .m3u8 segment
//! GET /files/transcode-settings — read TTL (auth)
//! PUT /files/transcode-settings — update TTL (auth)
//! GET /files/transcode-stats — cache size (auth)
//! DELETE /files/transcode-cache — clear cache (auth)
use axum::{
Router,
extract::{Path, State},
http::HeaderMap,
response::Response,
routing::get,
};
use crate::{error::ApiError, state::AppState};
#[cfg(feature = "local-files")]
use axum::{
Json,
extract::Query,
http::StatusCode,
routing::{delete, post},
};
#[cfg(feature = "local-files")]
use serde::Deserialize;
#[cfg(feature = "local-files")]
use crate::{
dto::{TranscodeSettingsResponse, TranscodeStatsResponse, UpdateTranscodeSettingsRequest},
extractors::CurrentUser,
};
pub fn router() -> Router<AppState> {
let r = Router::new().route("/stream/{id}", get(stream_file));
#[cfg(feature = "local-files")]
let r = r
.route("/rescan", post(trigger_rescan))
.route("/transcode/{id}/playlist.m3u8", get(transcode_playlist))
.route("/transcode/{id}/{segment}", get(transcode_segment))
.route(
"/transcode-settings",
get(get_transcode_settings).put(update_transcode_settings),
)
.route("/transcode-stats", get(get_transcode_stats))
.route("/transcode-cache", delete(clear_transcode_cache));
r
}
// ============================================================================
// Direct streaming
// ============================================================================
#[cfg_attr(not(feature = "local-files"), allow(unused_variables))]
async fn stream_file(
State(state): State<AppState>,
Path(encoded_id): Path<String>,
headers: HeaderMap,
) -> Result<Response, ApiError> {
#[cfg(feature = "local-files")]
{
use axum::body::Body;
use std::io::SeekFrom;
use tokio::io::{AsyncReadExt as _, AsyncSeekExt as _};
use tokio_util::io::ReaderStream;
let root_dir = state.config.local_files_dir.as_ref().ok_or_else(|| {
ApiError::not_implemented("LOCAL_FILES_DIR not configured")
})?;
let rel = infra::local_files::decode_stream_id(&encoded_id)
.ok_or_else(|| ApiError::validation("invalid stream id"))?;
let full_path = root_dir.join(&rel);
let canonical_root = root_dir
.canonicalize()
.map_err(|e| ApiError::internal(e.to_string()))?;
let canonical = full_path
.canonicalize()
.map_err(|_| ApiError::not_found("file not found"))?;
if !canonical.starts_with(&canonical_root) {
return Err(ApiError::Forbidden("path traversal detected".into()));
}
let mut file = tokio::fs::File::open(&canonical)
.await
.map_err(|_| ApiError::not_found("file not found"))?;
let file_size = file
.metadata()
.await
.map_err(|e| ApiError::internal(e.to_string()))?
.len();
let ext = canonical
.extension()
.and_then(|e| e.to_str())
.unwrap_or("")
.to_lowercase();
let content_type = content_type_for_ext(&ext);
let range = headers
.get(axum::http::header::RANGE)
.and_then(|v| v.to_str().ok())
.and_then(|r| parse_range(r, file_size));
let (start, end, status) = if let Some((s, e)) = range {
(s, e.min(file_size.saturating_sub(1)), StatusCode::PARTIAL_CONTENT)
} else {
(0, file_size.saturating_sub(1), StatusCode::OK)
};
let length = end - start + 1;
file.seek(SeekFrom::Start(start))
.await
.map_err(|e| ApiError::internal(e.to_string()))?;
let stream = ReaderStream::new(file.take(length));
let body = Body::from_stream(stream);
let mut builder = Response::builder()
.status(status)
.header("Content-Type", content_type)
.header("Content-Length", length.to_string())
.header("Accept-Ranges", "bytes");
if status == StatusCode::PARTIAL_CONTENT {
builder = builder.header(
"Content-Range",
format!("bytes {}-{}/{}", start, end, file_size),
);
}
builder.body(body).map_err(|e| ApiError::internal(e.to_string()))
}
#[cfg(not(feature = "local-files"))]
Err(ApiError::not_implemented("local-files feature not enabled"))
}
// ============================================================================
// Rescan
// ============================================================================
#[cfg(feature = "local-files")]
#[derive(Deserialize)]
struct RescanQuery {
provider: Option<String>,
}
#[cfg(feature = "local-files")]
async fn trigger_rescan(
State(state): State<AppState>,
CurrentUser(_user): CurrentUser,
Query(query): Query<RescanQuery>,
) -> Result<Json<serde_json::Value>, ApiError> {
let map = state.local_index.read().await.clone();
let index = if let Some(id) = &query.provider {
map.get(id).cloned()
} else {
map.values().next().cloned()
};
let index = index.ok_or_else(|| ApiError::not_implemented("no local files provider active"))?;
let count = index.rescan().await;
Ok(Json(serde_json::json!({ "items_found": count })))
}
// ============================================================================
// Transcode endpoints
// ============================================================================
#[cfg(feature = "local-files")]
async fn transcode_playlist(
State(state): State<AppState>,
Path(id): Path<String>,
) -> Result<Response, ApiError> {
let tm = state.transcode_manager.read().await.clone()
.ok_or_else(|| ApiError::not_implemented("TRANSCODE_DIR not configured"))?;
let root = state.config.local_files_dir.as_ref().ok_or_else(|| {
ApiError::not_implemented("LOCAL_FILES_DIR not configured")
})?;
let rel = infra::local_files::decode_stream_id(&id)
.ok_or_else(|| ApiError::validation("invalid item id"))?;
let src = root.join(&rel);
tm.ensure_transcoded(&id, &src)
.await
.map_err(|e| ApiError::internal(e.to_string()))?;
let playlist_path = tm.transcode_dir.join(&id).join("playlist.m3u8");
let content = tokio::fs::read_to_string(&playlist_path)
.await
.map_err(|e| ApiError::internal(e.to_string()))?;
Response::builder()
.status(200)
.header("Content-Type", "application/vnd.apple.mpegurl")
.header("Cache-Control", "no-cache")
.body(axum::body::Body::from(content))
.map_err(|e| ApiError::internal(e.to_string()))
}
#[derive(Deserialize)]
#[cfg(feature = "local-files")]
struct TranscodeSegmentPath {
id: String,
segment: String,
}
#[cfg(feature = "local-files")]
async fn transcode_segment(
State(state): State<AppState>,
Path(params): Path<TranscodeSegmentPath>,
) -> Result<Response, ApiError> {
let TranscodeSegmentPath { id, segment } = params;
let ext = std::path::Path::new(&segment)
.extension()
.and_then(|e| e.to_str())
.unwrap_or("");
if ext != "ts" && ext != "m3u8" {
return Err(ApiError::not_found("invalid segment extension"));
}
if segment.contains('/') || segment.contains("..") {
return Err(ApiError::Forbidden("invalid segment path".into()));
}
let tm = state.transcode_manager.read().await.clone()
.ok_or_else(|| ApiError::not_implemented("TRANSCODE_DIR not configured"))?;
let file_path = tm.transcode_dir.join(&id).join(&segment);
let canonical_base = tm
.transcode_dir
.canonicalize()
.map_err(|e| ApiError::internal(e.to_string()))?;
let canonical_file = file_path
.canonicalize()
.map_err(|_| ApiError::not_found("segment not found"))?;
if !canonical_file.starts_with(&canonical_base) {
return Err(ApiError::Forbidden("path traversal detected".into()));
}
let content = tokio::fs::read(&canonical_file)
.await
.map_err(|_| ApiError::not_found("segment not found"))?;
let content_type = if ext == "ts" {
"video/mp2t"
} else {
"application/vnd.apple.mpegurl"
};
Response::builder()
.status(200)
.header("Content-Type", content_type)
.body(axum::body::Body::from(content))
.map_err(|e| ApiError::internal(e.to_string()))
}
// ============================================================================
// Transcode settings / stats / cache management
// ============================================================================
#[cfg(feature = "local-files")]
async fn get_transcode_settings(
State(state): State<AppState>,
CurrentUser(_user): CurrentUser,
) -> Result<Json<TranscodeSettingsResponse>, ApiError> {
let tm = state.transcode_manager.read().await.clone()
.ok_or_else(|| ApiError::not_implemented("TRANSCODE_DIR not configured"))?;
Ok(Json(TranscodeSettingsResponse {
cleanup_ttl_hours: tm.get_cleanup_ttl(),
}))
}
#[cfg(feature = "local-files")]
async fn update_transcode_settings(
State(state): State<AppState>,
CurrentUser(_user): CurrentUser,
Json(req): Json<UpdateTranscodeSettingsRequest>,
) -> Result<Json<TranscodeSettingsResponse>, ApiError> {
if let Some(repo) = &state.transcode_settings_repo {
repo.save_cleanup_ttl(req.cleanup_ttl_hours)
.await
.map_err(|e| ApiError::internal(e.to_string()))?;
}
let tm_opt = state.transcode_manager.read().await.clone();
if let Some(tm) = tm_opt {
tm.set_cleanup_ttl(req.cleanup_ttl_hours);
}
Ok(Json(TranscodeSettingsResponse {
cleanup_ttl_hours: req.cleanup_ttl_hours,
}))
}
#[cfg(feature = "local-files")]
async fn get_transcode_stats(
State(state): State<AppState>,
CurrentUser(_user): CurrentUser,
) -> Result<Json<TranscodeStatsResponse>, ApiError> {
let tm = state.transcode_manager.read().await.clone()
.ok_or_else(|| ApiError::not_implemented("TRANSCODE_DIR not configured"))?;
let (cache_size_bytes, item_count) = tm.cache_stats().await;
Ok(Json(TranscodeStatsResponse {
cache_size_bytes,
item_count,
}))
}
#[cfg(feature = "local-files")]
async fn clear_transcode_cache(
State(state): State<AppState>,
CurrentUser(_user): CurrentUser,
) -> Result<StatusCode, ApiError> {
let tm = state.transcode_manager.read().await.clone()
.ok_or_else(|| ApiError::not_implemented("TRANSCODE_DIR not configured"))?;
tm.clear_cache()
.await
.map_err(|e| ApiError::internal(e.to_string()))?;
Ok(StatusCode::NO_CONTENT)
}
// ============================================================================
// Helpers
// ============================================================================
#[cfg(feature = "local-files")]
fn content_type_for_ext(ext: &str) -> &'static str {
match ext {
"mp4" | "m4v" => "video/mp4",
"mkv" => "video/x-matroska",
"avi" => "video/x-msvideo",
"mov" => "video/quicktime",
"webm" => "video/webm",
_ => "application/octet-stream",
}
}
#[cfg(feature = "local-files")]
fn parse_range(range: &str, file_size: u64) -> Option<(u64, u64)> {
let range = range.strip_prefix("bytes=")?;
let (start_str, end_str) = range.split_once('-')?;
let start: u64 = start_str.parse().ok()?;
let end: u64 = if end_str.is_empty() {
file_size.saturating_sub(1)
} else {
end_str.parse().ok()?
};
if start > end || start >= file_size {
return None;
}
Some((start, end))
}

View File

@@ -0,0 +1,118 @@
//! IPTV export routes
//!
//! Generates M3U playlists and XMLTV guides for use with standard IPTV clients.
//! Auth is provided via `?token=<jwt>` query param so URLs can be pasted
//! directly into TiviMate, VLC, etc.
use std::collections::HashMap;
use axum::{
Router,
extract::{Query, State},
http::{HeaderValue, StatusCode, header},
response::{IntoResponse, Response},
routing::get,
};
use chrono::Utc;
use serde::Deserialize;
use crate::{error::ApiError, state::AppState};
#[cfg(feature = "auth-jwt")]
use crate::extractors::validate_jwt_token;
pub fn router() -> Router<AppState> {
Router::new()
.route("/playlist.m3u", get(get_playlist))
.route("/epg.xml", get(get_epg))
}
#[derive(Debug, Deserialize)]
struct TokenQuery {
token: Option<String>,
}
/// `GET /api/v1/iptv/playlist.m3u?token={jwt}`
///
/// Returns an M3U playlist with one entry per channel the authenticated user owns.
async fn get_playlist(
State(state): State<AppState>,
Query(params): Query<TokenQuery>,
) -> Result<Response, ApiError> {
let token = params
.token
.as_deref()
.unwrap_or("")
.to_owned();
let user = authenticate_query_token(&token, &state).await?;
let channels = state.channel_service.find_by_owner(user.id).await?;
let body = domain::generate_m3u(&channels, &state.config.base_url, &token);
Ok((
StatusCode::OK,
[(header::CONTENT_TYPE, HeaderValue::from_static("audio/x-mpegurl"))],
body,
)
.into_response())
}
/// `GET /api/v1/iptv/epg.xml?token={jwt}`
///
/// Returns an XMLTV document covering the active schedule for all channels
/// owned by the authenticated user.
async fn get_epg(
State(state): State<AppState>,
Query(params): Query<TokenQuery>,
) -> Result<Response, ApiError> {
let token = params.token.as_deref().unwrap_or("").to_owned();
let user = authenticate_query_token(&token, &state).await?;
let channels = state.channel_service.find_by_owner(user.id).await?;
let now = Utc::now();
let mut slots_by_channel = HashMap::new();
for ch in &channels {
if let Ok(Some(schedule)) = state.schedule_engine.get_active_schedule(ch.id, now).await {
slots_by_channel.insert(ch.id, schedule.slots);
}
}
let body = domain::generate_xmltv(&channels, &slots_by_channel);
Ok((
StatusCode::OK,
[(
header::CONTENT_TYPE,
HeaderValue::from_static("application/xml; charset=utf-8"),
)],
body,
)
.into_response())
}
/// Validate a JWT from the `?token=` query param and return the user.
async fn authenticate_query_token(
token: &str,
state: &AppState,
) -> Result<domain::User, ApiError> {
if token.is_empty() {
return Err(ApiError::Unauthorized(
"Missing ?token= query parameter".to_string(),
));
}
#[cfg(feature = "auth-jwt")]
{
return validate_jwt_token(token, state).await;
}
#[cfg(not(feature = "auth-jwt"))]
{
let _ = (token, state);
Err(ApiError::Unauthorized(
"No authentication backend configured".to_string(),
))
}
}

View File

@@ -0,0 +1,482 @@
//! Library routes — DB-backed.
//!
//! GET /library/collections — collections derived from synced items
//! GET /library/series — series names
//! GET /library/genres — genres
//! GET /library/items — search / browse
//! GET /library/items/:id — single item
//! GET /library/sync/status — latest sync log per provider
//! POST /library/sync — trigger an ad-hoc sync (auth)
//!
//! Admin (nested under /admin/library):
//! GET /admin/library/settings — app_settings key/value
//! PUT /admin/library/settings — update app_settings
use std::collections::HashMap;
use std::sync::Arc;
use axum::{
Json, Router,
extract::{Path, Query, RawQuery, State},
http::StatusCode,
response::IntoResponse,
routing::{get, post, put},
};
use domain::{ContentType, ILibraryRepository, LibrarySearchFilter, LibrarySyncAdapter};
use serde::{Deserialize, Serialize};
use crate::{
error::ApiError,
extractors::{AdminUser, CurrentUser},
state::AppState,
};
// ============================================================================
// Routers
// ============================================================================
pub fn router() -> Router<AppState> {
Router::new()
.route("/collections", get(list_collections))
.route("/series", get(list_series))
.route("/genres", get(list_genres))
.route("/items", get(search_items))
.route("/items/{id}", get(get_item))
.route("/shows", get(list_shows))
.route("/shows/{name}/seasons", get(list_seasons))
.route("/sync/status", get(sync_status))
.route("/sync", post(trigger_sync))
}
pub fn admin_router() -> Router<AppState> {
Router::new().route("/settings", get(get_settings).put(update_settings))
}
// ============================================================================
// Response DTOs
// ============================================================================
#[derive(Debug, Serialize)]
struct CollectionResponse {
id: String,
name: String,
#[serde(skip_serializing_if = "Option::is_none")]
collection_type: Option<String>,
}
#[derive(Debug, Serialize)]
struct LibraryItemResponse {
id: String,
title: String,
content_type: String,
duration_secs: u32,
#[serde(skip_serializing_if = "Option::is_none")]
series_name: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
season_number: Option<u32>,
#[serde(skip_serializing_if = "Option::is_none")]
episode_number: Option<u32>,
#[serde(skip_serializing_if = "Option::is_none")]
year: Option<u16>,
genres: Vec<String>,
#[serde(skip_serializing_if = "Option::is_none")]
thumbnail_url: Option<String>,
}
#[derive(Debug, Serialize)]
struct PagedResponse<T: Serialize> {
items: Vec<T>,
total: u32,
}
#[derive(Debug, Serialize)]
struct ShowSummaryResponse {
series_name: String,
episode_count: u32,
season_count: u32,
#[serde(skip_serializing_if = "Option::is_none")]
thumbnail_url: Option<String>,
genres: Vec<String>,
}
#[derive(Debug, Serialize)]
struct SeasonSummaryResponse {
season_number: u32,
episode_count: u32,
#[serde(skip_serializing_if = "Option::is_none")]
thumbnail_url: Option<String>,
}
#[derive(Debug, Serialize)]
struct SyncLogResponse {
id: i64,
provider_id: String,
started_at: String,
#[serde(skip_serializing_if = "Option::is_none")]
finished_at: Option<String>,
items_found: u32,
status: String,
#[serde(skip_serializing_if = "Option::is_none")]
error_msg: Option<String>,
}
// ============================================================================
// Query params
// ============================================================================
#[derive(Debug, Deserialize)]
struct CollectionsQuery {
provider: Option<String>,
}
#[derive(Debug, Deserialize)]
struct SeriesQuery {
provider: Option<String>,
}
#[derive(Debug, Deserialize)]
struct GenresQuery {
#[serde(rename = "type")]
content_type: Option<String>,
provider: Option<String>,
}
#[derive(Debug, Default, Deserialize)]
struct ItemsQuery {
q: Option<String>,
#[serde(rename = "type")]
content_type: Option<String>,
#[serde(default)]
series: Vec<String>,
#[serde(default)]
genres: Vec<String>,
collection: Option<String>,
limit: Option<u32>,
offset: Option<u32>,
provider: Option<String>,
season: Option<u32>,
}
#[derive(Debug, Default, Deserialize)]
struct ShowsQuery {
q: Option<String>,
provider: Option<String>,
#[serde(default)]
genres: Vec<String>,
}
#[derive(Debug, Deserialize)]
struct SeasonsQuery {
provider: Option<String>,
}
// ============================================================================
// Handlers
// ============================================================================
async fn list_collections(
State(state): State<AppState>,
CurrentUser(_user): CurrentUser,
Query(params): Query<CollectionsQuery>,
) -> Result<Json<Vec<CollectionResponse>>, ApiError> {
let cols = state
.library_repo
.list_collections(params.provider.as_deref())
.await?;
let resp = cols
.into_iter()
.map(|c| CollectionResponse {
id: c.id,
name: c.name,
collection_type: c.collection_type,
})
.collect();
Ok(Json(resp))
}
async fn list_series(
State(state): State<AppState>,
CurrentUser(_user): CurrentUser,
Query(params): Query<SeriesQuery>,
) -> Result<Json<Vec<String>>, ApiError> {
let series = state
.library_repo
.list_series(params.provider.as_deref())
.await?;
Ok(Json(series))
}
async fn list_genres(
State(state): State<AppState>,
CurrentUser(_user): CurrentUser,
Query(params): Query<GenresQuery>,
) -> Result<Json<Vec<String>>, ApiError> {
let ct = parse_content_type(params.content_type.as_deref())?;
let genres = state
.library_repo
.list_genres(ct.as_ref(), params.provider.as_deref())
.await?;
Ok(Json(genres))
}
async fn search_items(
State(state): State<AppState>,
CurrentUser(_user): CurrentUser,
RawQuery(raw_query): RawQuery,
) -> Result<Json<PagedResponse<LibraryItemResponse>>, ApiError> {
let qs_config = serde_qs::Config::new(2, false);
let params: ItemsQuery = raw_query
.as_deref()
.map(|q| qs_config.deserialize_str::<ItemsQuery>(q))
.transpose()
.map_err(|e| ApiError::validation(e.to_string()))?
.unwrap_or_default();
let limit = params.limit.unwrap_or(50).min(200);
let offset = params.offset.unwrap_or(0);
let filter = LibrarySearchFilter {
provider_id: params.provider,
content_type: parse_content_type(params.content_type.as_deref())?,
series_names: params.series,
collection_id: params.collection,
genres: params.genres,
search_term: params.q,
season_number: params.season,
offset,
limit,
..Default::default()
};
let (items, total) = state.library_repo.search(&filter).await?;
let resp = items.into_iter().map(library_item_to_response).collect();
Ok(Json(PagedResponse { items: resp, total }))
}
async fn get_item(
State(state): State<AppState>,
CurrentUser(_user): CurrentUser,
Path(id): Path<String>,
) -> Result<Json<LibraryItemResponse>, ApiError> {
let item = state
.library_repo
.get_by_id(&id)
.await?
.ok_or_else(|| ApiError::NotFound(format!("Library item '{}' not found", id)))?;
Ok(Json(library_item_to_response(item)))
}
async fn sync_status(
State(state): State<AppState>,
CurrentUser(_user): CurrentUser,
) -> Result<Json<Vec<SyncLogResponse>>, ApiError> {
let entries = state.library_repo.latest_sync_status().await?;
let resp = entries
.into_iter()
.map(|e| SyncLogResponse {
id: e.id,
provider_id: e.provider_id,
started_at: e.started_at,
finished_at: e.finished_at,
items_found: e.items_found,
status: e.status,
error_msg: e.error_msg,
})
.collect();
Ok(Json(resp))
}
async fn trigger_sync(
State(state): State<AppState>,
CurrentUser(_user): CurrentUser,
) -> Result<impl IntoResponse, ApiError> {
use domain::IProviderRegistry as _;
let provider_ids: Vec<String> = {
let reg = state.provider_registry.read().await;
reg.provider_ids()
};
// 409 if any provider is already syncing
for pid in &provider_ids {
let running = state.library_repo.is_sync_running(pid).await?;
if running {
return Ok((
StatusCode::CONFLICT,
Json(serde_json::json!({
"error": format!("Sync already running for provider '{}'", pid)
})),
)
.into_response());
}
}
// Spawn background sync
let sync_adapter: Arc<dyn LibrarySyncAdapter> = Arc::clone(&state.library_sync_adapter);
let registry = Arc::clone(&state.provider_registry);
tokio::spawn(async move {
let providers: Vec<(String, Arc<dyn domain::IMediaProvider>)> = {
let reg = registry.read().await;
provider_ids
.iter()
.filter_map(|id| reg.get_provider(id).map(|p| (id.clone(), p)))
.collect()
};
for (pid, provider) in providers {
let result = sync_adapter.sync_provider(provider.as_ref(), &pid).await;
if let Some(ref err) = result.error {
tracing::warn!("manual sync: provider '{}' failed: {}", pid, err);
} else {
tracing::info!(
"manual sync: provider '{}' done — {} items in {}ms",
pid,
result.items_found,
result.duration_ms
);
}
}
});
Ok((
StatusCode::ACCEPTED,
Json(serde_json::json!({ "message": "Sync started" })),
)
.into_response())
}
async fn list_shows(
State(state): State<AppState>,
CurrentUser(_user): CurrentUser,
Query(params): Query<ShowsQuery>,
) -> Result<Json<Vec<ShowSummaryResponse>>, ApiError> {
let shows = state
.library_repo
.list_shows(
params.provider.as_deref(),
params.q.as_deref(),
&params.genres,
)
.await?;
let resp = shows
.into_iter()
.map(|s| ShowSummaryResponse {
series_name: s.series_name,
episode_count: s.episode_count,
season_count: s.season_count,
thumbnail_url: s.thumbnail_url,
genres: s.genres,
})
.collect();
Ok(Json(resp))
}
async fn list_seasons(
State(state): State<AppState>,
CurrentUser(_user): CurrentUser,
Path(name): Path<String>,
Query(params): Query<SeasonsQuery>,
) -> Result<Json<Vec<SeasonSummaryResponse>>, ApiError> {
let seasons = state
.library_repo
.list_seasons(&name, params.provider.as_deref())
.await?;
let resp = seasons
.into_iter()
.map(|s| SeasonSummaryResponse {
season_number: s.season_number,
episode_count: s.episode_count,
thumbnail_url: s.thumbnail_url,
})
.collect();
Ok(Json(resp))
}
async fn get_settings(
State(state): State<AppState>,
AdminUser(_user): AdminUser,
) -> Result<Json<HashMap<String, serde_json::Value>>, ApiError> {
let pairs = state.app_settings_repo.get_all().await?;
let map: HashMap<String, serde_json::Value> = pairs
.into_iter()
.map(|(k, v)| {
// Try to parse as number first, then bool, then keep as string
let val = if let Ok(n) = v.parse::<i64>() {
serde_json::Value::Number(n.into())
} else if let Ok(b) = v.parse::<bool>() {
serde_json::Value::Bool(b)
} else {
serde_json::Value::String(v)
};
(k, val)
})
.collect();
Ok(Json(map))
}
async fn update_settings(
State(state): State<AppState>,
AdminUser(_user): AdminUser,
Json(body): Json<HashMap<String, serde_json::Value>>,
) -> Result<Json<HashMap<String, serde_json::Value>>, ApiError> {
for (key, val) in &body {
let val_str = match val {
serde_json::Value::String(s) => s.clone(),
serde_json::Value::Number(n) => n.to_string(),
serde_json::Value::Bool(b) => b.to_string(),
other => other.to_string(),
};
state.app_settings_repo.set(key, &val_str).await?;
}
// Return the updated state
let pairs = state.app_settings_repo.get_all().await?;
let map: HashMap<String, serde_json::Value> = pairs
.into_iter()
.map(|(k, v)| {
let val = if let Ok(n) = v.parse::<i64>() {
serde_json::Value::Number(n.into())
} else if let Ok(b) = v.parse::<bool>() {
serde_json::Value::Bool(b)
} else {
serde_json::Value::String(v)
};
(k, val)
})
.collect();
Ok(Json(map))
}
// ============================================================================
// Helpers
// ============================================================================
fn parse_content_type(s: Option<&str>) -> Result<Option<ContentType>, ApiError> {
match s {
None | Some("") => Ok(None),
Some("movie") => Ok(Some(ContentType::Movie)),
Some("episode") => Ok(Some(ContentType::Episode)),
Some("short") => Ok(Some(ContentType::Short)),
Some(other) => Err(ApiError::validation(format!(
"Unknown content type '{}'. Use movie, episode, or short.",
other
))),
}
}
fn library_item_to_response(item: domain::LibraryItem) -> LibraryItemResponse {
LibraryItemResponse {
id: item.id,
title: item.title,
content_type: match item.content_type {
ContentType::Movie => "movie".into(),
ContentType::Episode => "episode".into(),
ContentType::Short => "short".into(),
},
duration_secs: item.duration_secs,
series_name: item.series_name,
season_number: item.season_number,
episode_number: item.episode_number,
year: item.year,
genres: item.genres,
thumbnail_url: item.thumbnail_url,
}
}

View File

@@ -5,14 +5,25 @@
use crate::state::AppState; use crate::state::AppState;
use axum::Router; use axum::Router;
pub mod admin;
pub mod admin_providers;
pub mod auth; pub mod auth;
pub mod channels; pub mod channels;
pub mod config; pub mod config;
pub mod files;
pub mod iptv;
pub mod library;
/// Construct the API v1 router /// Construct the API v1 router
pub fn api_v1_router() -> Router<AppState> { pub fn api_v1_router() -> Router<AppState> {
Router::new() Router::new()
.nest("/admin", admin::router())
.nest("/admin/providers", admin_providers::router())
.nest("/auth", auth::router()) .nest("/auth", auth::router())
.nest("/channels", channels::router()) .nest("/channels", channels::router())
.nest("/config", config::router()) .nest("/config", config::router())
.nest("/files", files::router())
.nest("/iptv", iptv::router())
.nest("/library", library::router())
.nest("/admin", library::admin_router())
} }

View File

@@ -0,0 +1,346 @@
//! Background auto-scheduler task.
//!
//! Runs every hour, finds channels with `auto_schedule = true`, and regenerates
//! their schedule if it is within 24 hours of expiry (or already expired).
use std::sync::Arc;
use std::time::Duration;
use chrono::Utc;
use domain::{ChannelRepository, DomainEvent, ScheduleEngineService};
use tokio::sync::broadcast;
pub async fn run_auto_scheduler(
schedule_engine: Arc<ScheduleEngineService>,
channel_repo: Arc<dyn ChannelRepository>,
event_tx: broadcast::Sender<DomainEvent>,
) {
loop {
tokio::time::sleep(Duration::from_secs(3600)).await;
tick(&schedule_engine, &channel_repo, &event_tx).await;
}
}
async fn tick(
schedule_engine: &Arc<ScheduleEngineService>,
channel_repo: &Arc<dyn ChannelRepository>,
event_tx: &broadcast::Sender<DomainEvent>,
) {
let channels = match channel_repo.find_auto_schedule_enabled().await {
Ok(c) => c,
Err(e) => {
tracing::warn!("auto-scheduler: failed to fetch channels: {}", e);
return;
}
};
let now = Utc::now();
for channel in channels {
let from = match schedule_engine.get_latest_schedule(channel.id).await {
Ok(Some(s)) => {
let remaining = s.valid_until - now;
if remaining > chrono::Duration::hours(24) {
// Still fresh — skip until it gets close to expiry
continue;
} else if s.valid_until > now {
// Seamless handoff: new schedule starts where the old one ends
s.valid_until
} else {
// Expired: start from now to avoid scheduling in the past
now
}
}
Ok(None) => now,
Err(e) => {
tracing::warn!(
"auto-scheduler: failed to fetch latest schedule for channel {}: {}",
channel.id,
e
);
continue;
}
};
match schedule_engine.generate_schedule(channel.id, from).await {
Ok(schedule) => {
tracing::info!(
"auto-scheduler: generated schedule for channel {} starting at {}",
channel.id,
from
);
let _ = event_tx.send(DomainEvent::ScheduleGenerated {
channel_id: channel.id,
schedule,
});
}
Err(e) => {
tracing::warn!(
"auto-scheduler: failed to generate schedule for channel {}: {}",
channel.id,
e
);
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::sync::{Arc, Mutex};
use async_trait::async_trait;
use chrono::{DateTime, Duration, Utc};
use domain::value_objects::{ChannelId, ContentType, UserId};
use std::collections::HashMap;
use domain::{
BlockId, Channel, ChannelRepository, Collection, DomainResult, GeneratedSchedule,
IProviderRegistry, MediaFilter, MediaItem, MediaItemId, PlaybackRecord, ProviderCapabilities,
ScheduleEngineService, ScheduleRepository, SeriesSummary, StreamQuality,
};
use uuid::Uuid;
// ── Mocks ─────────────────────────────────────────────────────────────────
struct MockChannelRepo {
channels: Vec<Channel>,
}
#[async_trait]
impl ChannelRepository for MockChannelRepo {
async fn find_by_id(&self, id: ChannelId) -> DomainResult<Option<Channel>> {
Ok(self.channels.iter().find(|c| c.id == id).cloned())
}
async fn find_by_owner(&self, _owner_id: UserId) -> DomainResult<Vec<Channel>> {
unimplemented!()
}
async fn find_all(&self) -> DomainResult<Vec<Channel>> {
unimplemented!()
}
async fn find_auto_schedule_enabled(&self) -> DomainResult<Vec<Channel>> {
Ok(self.channels.clone())
}
async fn save(&self, _channel: &Channel) -> DomainResult<()> {
unimplemented!()
}
async fn delete(&self, _id: ChannelId) -> DomainResult<()> {
unimplemented!()
}
async fn save_config_snapshot(&self, _channel_id: ChannelId, _config: &domain::ScheduleConfig, _label: Option<String>) -> DomainResult<domain::ChannelConfigSnapshot> { unimplemented!() }
async fn list_config_snapshots(&self, _channel_id: ChannelId) -> DomainResult<Vec<domain::ChannelConfigSnapshot>> { unimplemented!() }
async fn get_config_snapshot(&self, _channel_id: ChannelId, _snapshot_id: Uuid) -> DomainResult<Option<domain::ChannelConfigSnapshot>> { unimplemented!() }
async fn patch_config_snapshot_label(&self, _channel_id: ChannelId, _snapshot_id: Uuid, _label: Option<String>) -> DomainResult<Option<domain::ChannelConfigSnapshot>> { unimplemented!() }
}
struct MockScheduleRepo {
latest: Option<GeneratedSchedule>,
saved: Arc<Mutex<Vec<GeneratedSchedule>>>,
}
#[async_trait]
impl ScheduleRepository for MockScheduleRepo {
async fn find_active(
&self,
_channel_id: ChannelId,
_at: DateTime<Utc>,
) -> DomainResult<Option<GeneratedSchedule>> {
Ok(None)
}
async fn find_latest(
&self,
_channel_id: ChannelId,
) -> DomainResult<Option<GeneratedSchedule>> {
Ok(self.latest.clone())
}
async fn save(&self, schedule: &GeneratedSchedule) -> DomainResult<()> {
self.saved.lock().unwrap().push(schedule.clone());
Ok(())
}
async fn find_playback_history(
&self,
_channel_id: ChannelId,
) -> DomainResult<Vec<PlaybackRecord>> {
Ok(vec![])
}
async fn save_playback_record(&self, _record: &PlaybackRecord) -> DomainResult<()> {
Ok(())
}
async fn find_last_slot_per_block(
&self,
_channel_id: ChannelId,
) -> DomainResult<HashMap<BlockId, MediaItemId>> {
Ok(HashMap::new())
}
async fn list_schedule_history(&self, _channel_id: ChannelId) -> DomainResult<Vec<GeneratedSchedule>> { unimplemented!() }
async fn get_schedule_by_id(&self, _channel_id: ChannelId, _schedule_id: Uuid) -> DomainResult<Option<GeneratedSchedule>> { unimplemented!() }
async fn delete_schedules_after(&self, _channel_id: ChannelId, _target_generation: u32) -> DomainResult<()> { unimplemented!() }
}
struct MockRegistry;
#[async_trait]
impl IProviderRegistry for MockRegistry {
async fn fetch_items(
&self,
_provider_id: &str,
_filter: &MediaFilter,
) -> DomainResult<Vec<MediaItem>> {
Ok(vec![])
}
async fn fetch_by_id(&self, _item_id: &MediaItemId) -> DomainResult<Option<MediaItem>> {
Ok(None)
}
async fn get_stream_url(
&self,
_item_id: &MediaItemId,
_quality: &StreamQuality,
) -> DomainResult<String> {
unimplemented!()
}
fn provider_ids(&self) -> Vec<String> {
vec![]
}
fn primary_id(&self) -> &str {
""
}
fn capabilities(&self, _provider_id: &str) -> Option<ProviderCapabilities> {
None
}
async fn list_collections(&self, _provider_id: &str) -> DomainResult<Vec<Collection>> {
unimplemented!()
}
async fn list_series(
&self,
_provider_id: &str,
_collection_id: Option<&str>,
) -> DomainResult<Vec<SeriesSummary>> {
unimplemented!()
}
async fn list_genres(
&self,
_provider_id: &str,
_content_type: Option<&ContentType>,
) -> DomainResult<Vec<String>> {
unimplemented!()
}
}
// ── Helpers ───────────────────────────────────────────────────────────────
fn make_channel() -> Channel {
let mut ch = Channel::new(Uuid::new_v4(), "Test", "UTC");
ch.auto_schedule = true;
ch
}
fn make_schedule(channel_id: ChannelId, valid_until: DateTime<Utc>) -> GeneratedSchedule {
GeneratedSchedule {
id: Uuid::new_v4(),
channel_id,
valid_from: valid_until - Duration::hours(48),
valid_until,
generation: 1,
slots: vec![],
}
}
fn make_engine(
channel_repo: Arc<dyn ChannelRepository>,
schedule_repo: Arc<dyn ScheduleRepository>,
) -> Arc<ScheduleEngineService> {
Arc::new(ScheduleEngineService::new(
Arc::new(MockRegistry),
channel_repo,
schedule_repo,
))
}
// ── Tests ─────────────────────────────────────────────────────────────────
#[tokio::test]
async fn test_no_schedule_generates_from_now() {
let ch = make_channel();
let saved = Arc::new(Mutex::new(vec![]));
let channel_repo: Arc<dyn ChannelRepository> =
Arc::new(MockChannelRepo { channels: vec![ch] });
let schedule_repo: Arc<dyn ScheduleRepository> = Arc::new(MockScheduleRepo {
latest: None,
saved: saved.clone(),
});
let engine = make_engine(channel_repo.clone(), schedule_repo);
let (event_tx, _) = tokio::sync::broadcast::channel(8);
tick(&engine, &channel_repo, &event_tx).await;
let saved = saved.lock().unwrap();
assert_eq!(saved.len(), 1);
let diff = (saved[0].valid_from - Utc::now()).num_seconds().abs();
assert!(diff < 5, "valid_from should be ~now, diff={diff}");
}
#[tokio::test]
async fn test_fresh_schedule_skips() {
let ch = make_channel();
let valid_until = Utc::now() + Duration::hours(25);
let schedule = make_schedule(ch.id, valid_until);
let saved = Arc::new(Mutex::new(vec![]));
let channel_repo: Arc<dyn ChannelRepository> =
Arc::new(MockChannelRepo { channels: vec![ch] });
let schedule_repo: Arc<dyn ScheduleRepository> = Arc::new(MockScheduleRepo {
latest: Some(schedule),
saved: saved.clone(),
});
let engine = make_engine(channel_repo.clone(), schedule_repo);
let (event_tx, _) = tokio::sync::broadcast::channel(8);
tick(&engine, &channel_repo, &event_tx).await;
assert_eq!(saved.lock().unwrap().len(), 0);
}
#[tokio::test]
async fn test_expiring_schedule_seamless_handoff() {
let ch = make_channel();
let valid_until = Utc::now() + Duration::hours(20);
let schedule = make_schedule(ch.id, valid_until);
let saved = Arc::new(Mutex::new(vec![]));
let channel_repo: Arc<dyn ChannelRepository> =
Arc::new(MockChannelRepo { channels: vec![ch] });
let schedule_repo: Arc<dyn ScheduleRepository> = Arc::new(MockScheduleRepo {
latest: Some(schedule),
saved: saved.clone(),
});
let engine = make_engine(channel_repo.clone(), schedule_repo);
let (event_tx, _) = tokio::sync::broadcast::channel(8);
tick(&engine, &channel_repo, &event_tx).await;
let saved = saved.lock().unwrap();
assert_eq!(saved.len(), 1);
assert_eq!(saved[0].valid_from, valid_until);
}
#[tokio::test]
async fn test_expired_schedule_generates_from_now() {
let ch = make_channel();
let valid_until = Utc::now() - Duration::hours(1);
let schedule = make_schedule(ch.id, valid_until);
let saved = Arc::new(Mutex::new(vec![]));
let channel_repo: Arc<dyn ChannelRepository> =
Arc::new(MockChannelRepo { channels: vec![ch] });
let schedule_repo: Arc<dyn ScheduleRepository> = Arc::new(MockScheduleRepo {
latest: Some(schedule),
saved: saved.clone(),
});
let engine = make_engine(channel_repo.clone(), schedule_repo);
let (event_tx, _) = tokio::sync::broadcast::channel(8);
tick(&engine, &channel_repo, &event_tx).await;
let saved = saved.lock().unwrap();
assert_eq!(saved.len(), 1);
let diff = (saved[0].valid_from - Utc::now()).num_seconds().abs();
assert!(diff < 5, "valid_from should be ~now, diff={diff}");
}
}

View File

@@ -0,0 +1,59 @@
use std::net::SocketAddr;
use axum::Router;
use axum::http::{HeaderName, HeaderValue};
use k_core::http::server::{ServerConfig, apply_standard_middleware};
use tokio::net::TcpListener;
use tower_http::cors::{AllowHeaders, AllowMethods, AllowOrigin, CorsLayer};
use crate::config::Config;
use crate::routes;
use crate::state::AppState;
pub async fn build_and_serve(state: AppState, config: &Config) -> anyhow::Result<()> {
let server_config = ServerConfig {
cors_origins: config.cors_allowed_origins.clone(),
};
let app = Router::new()
.nest("/api/v1", routes::api_v1_router())
.with_state(state);
let app = apply_standard_middleware(app, &server_config);
// Wrap with an outer CorsLayer that includes the custom password headers.
// Being outermost it handles OPTIONS preflights before k_core's inner layer.
let origins: Vec<HeaderValue> = config
.cors_allowed_origins
.iter()
.filter_map(|o| o.parse().ok())
.collect();
let cors = CorsLayer::new()
.allow_origin(AllowOrigin::list(origins))
.allow_methods(AllowMethods::any())
.allow_headers(AllowHeaders::list([
axum::http::header::AUTHORIZATION,
axum::http::header::CONTENT_TYPE,
HeaderName::from_static("x-channel-password"),
HeaderName::from_static("x-block-password"),
]));
let app = app.layer(cors);
let addr: SocketAddr = format!("{}:{}", config.host, config.port).parse()?;
let listener = TcpListener::bind(addr).await?;
tracing::info!("🚀 API server running at http://{}", addr);
tracing::info!("🔒 Authentication mode: JWT (Bearer token)");
#[cfg(feature = "auth-jwt")]
tracing::info!(" ✓ JWT auth enabled");
#[cfg(feature = "auth-oidc")]
tracing::info!(" ✓ OIDC integration enabled (stateless cookie state)");
tracing::info!("📝 API endpoints available at /api/v1/...");
axum::serve(listener, app).await?;
Ok(())
}

View File

@@ -0,0 +1,24 @@
use std::sync::Arc;
use domain::{ChannelRepository, DomainEvent, ScheduleEngineService};
use tokio::sync::broadcast;
use crate::{poller, scheduler};
pub fn spawn_background_tasks(
schedule_engine: Arc<ScheduleEngineService>,
channel_repo: Arc<dyn ChannelRepository>,
event_tx: broadcast::Sender<DomainEvent>,
) {
let bg_channel_repo = channel_repo.clone();
tokio::spawn(scheduler::run_auto_scheduler(
Arc::clone(&schedule_engine),
bg_channel_repo,
event_tx.clone(),
));
tokio::spawn(poller::run_broadcast_poller(
schedule_engine,
channel_repo,
event_tx,
));
}

View File

@@ -8,30 +8,75 @@ use axum_extra::extract::cookie::Key;
use infra::auth::jwt::{JwtConfig, JwtValidator}; use infra::auth::jwt::{JwtConfig, JwtValidator};
#[cfg(feature = "auth-oidc")] #[cfg(feature = "auth-oidc")]
use infra::auth::oidc::OidcService; use infra::auth::oidc::OidcService;
use std::sync::Arc; use std::collections::VecDeque;
#[cfg(feature = "local-files")]
use std::collections::HashMap;
use std::sync::{Arc, Mutex};
use tokio::sync::broadcast;
use crate::config::Config; use crate::config::Config;
use domain::{ChannelService, ScheduleEngineService, UserService}; use crate::events::EventBus;
use crate::log_layer::LogLine;
use domain::{ActivityLogRepository, ChannelService, IAppSettingsRepository, ILibraryRepository, LibrarySyncAdapter, ProviderConfigRepository, ScheduleEngineService, UserService};
#[cfg(feature = "local-files")]
use domain::TranscodeSettingsRepository;
use k_core::db::DatabasePool;
#[derive(Clone)] #[derive(Clone)]
pub struct AppState { pub struct AppState {
pub user_service: Arc<UserService>, pub user_service: Arc<UserService>,
pub channel_service: Arc<ChannelService>, pub channel_service: Arc<ChannelService>,
pub schedule_engine: Arc<ScheduleEngineService>, pub schedule_engine: Arc<ScheduleEngineService>,
pub provider_registry: Arc<tokio::sync::RwLock<Arc<infra::ProviderRegistry>>>,
pub provider_config_repo: Arc<dyn ProviderConfigRepository>,
pub cookie_key: Key, pub cookie_key: Key,
#[cfg(feature = "auth-oidc")] #[cfg(feature = "auth-oidc")]
pub oidc_service: Option<Arc<OidcService>>, pub oidc_service: Option<Arc<OidcService>>,
#[cfg(feature = "auth-jwt")] #[cfg(feature = "auth-jwt")]
pub jwt_validator: Option<Arc<JwtValidator>>, pub jwt_validator: Option<Arc<JwtValidator>>,
pub config: Arc<Config>, pub config: Arc<Config>,
pub event_tx: EventBus,
/// Broadcast channel for streaming log lines to SSE clients.
pub log_tx: broadcast::Sender<LogLine>,
/// Ring buffer of recent log lines sent to new SSE clients on connect.
pub log_history: Arc<Mutex<VecDeque<LogLine>>>,
/// Repository for persisted in-app activity events.
pub activity_log_repo: Arc<dyn ActivityLogRepository>,
/// Indexes for local-files provider instances, keyed by provider instance id.
#[cfg(feature = "local-files")]
pub local_index: Arc<tokio::sync::RwLock<HashMap<String, Arc<infra::LocalIndex>>>>,
/// TranscodeManager for FFmpeg HLS transcoding (requires TRANSCODE_DIR).
#[cfg(feature = "local-files")]
pub transcode_manager: Arc<tokio::sync::RwLock<Option<Arc<infra::TranscodeManager>>>>,
/// Repository for transcode settings persistence.
#[cfg(feature = "local-files")]
pub transcode_settings_repo: Option<Arc<dyn TranscodeSettingsRepository>>,
/// Database pool — used by infra factory functions for hot-reload.
pub db_pool: Arc<DatabasePool>,
pub library_repo: Arc<dyn ILibraryRepository>,
pub library_sync_adapter: Arc<dyn LibrarySyncAdapter>,
pub app_settings_repo: Arc<dyn IAppSettingsRepository>,
} }
impl AppState { impl AppState {
#[allow(clippy::too_many_arguments)]
pub async fn new( pub async fn new(
user_service: UserService, user_service: UserService,
channel_service: ChannelService, channel_service: ChannelService,
schedule_engine: ScheduleEngineService, schedule_engine: ScheduleEngineService,
provider_registry: Arc<tokio::sync::RwLock<Arc<infra::ProviderRegistry>>>,
provider_config_repo: Arc<dyn ProviderConfigRepository>,
config: Config, config: Config,
event_tx: EventBus,
log_tx: broadcast::Sender<LogLine>,
log_history: Arc<Mutex<VecDeque<LogLine>>>,
activity_log_repo: Arc<dyn ActivityLogRepository>,
db_pool: Arc<DatabasePool>,
library_repo: Arc<dyn ILibraryRepository>,
library_sync_adapter: Arc<dyn LibrarySyncAdapter>,
app_settings_repo: Arc<dyn IAppSettingsRepository>,
#[cfg(feature = "local-files")]
transcode_settings_repo: Option<Arc<dyn TranscodeSettingsRepository>>,
) -> anyhow::Result<Self> { ) -> anyhow::Result<Self> {
let cookie_key = Key::derive_from(config.cookie_secret.as_bytes()); let cookie_key = Key::derive_from(config.cookie_secret.as_bytes());
@@ -87,6 +132,7 @@ impl AppState {
config.jwt_issuer.clone(), config.jwt_issuer.clone(),
config.jwt_audience.clone(), config.jwt_audience.clone(),
Some(config.jwt_expiry_hours), Some(config.jwt_expiry_hours),
Some(config.jwt_refresh_expiry_days),
config.is_production, config.is_production,
)?; )?;
Some(Arc::new(JwtValidator::new(jwt_config))) Some(Arc::new(JwtValidator::new(jwt_config)))
@@ -96,12 +142,28 @@ impl AppState {
user_service: Arc::new(user_service), user_service: Arc::new(user_service),
channel_service: Arc::new(channel_service), channel_service: Arc::new(channel_service),
schedule_engine: Arc::new(schedule_engine), schedule_engine: Arc::new(schedule_engine),
provider_registry,
provider_config_repo,
cookie_key, cookie_key,
#[cfg(feature = "auth-oidc")] #[cfg(feature = "auth-oidc")]
oidc_service, oidc_service,
#[cfg(feature = "auth-jwt")] #[cfg(feature = "auth-jwt")]
jwt_validator, jwt_validator,
config: Arc::new(config), config: Arc::new(config),
event_tx,
log_tx,
log_history,
activity_log_repo,
#[cfg(feature = "local-files")]
local_index: Arc::new(tokio::sync::RwLock::new(HashMap::new())),
#[cfg(feature = "local-files")]
transcode_manager: Arc::new(tokio::sync::RwLock::new(None)),
#[cfg(feature = "local-files")]
transcode_settings_repo,
db_pool,
library_repo,
library_sync_adapter,
app_settings_repo,
}) })
} }
} }

View File

@@ -0,0 +1,25 @@
use std::collections::VecDeque;
use std::sync::{Arc, Mutex};
use tokio::sync::broadcast;
use tracing_subscriber::{EnvFilter, fmt, layer::SubscriberExt, util::SubscriberInitExt};
use crate::log_layer::{AppLogLayer, LogLine};
pub struct LoggingHandles {
pub log_tx: broadcast::Sender<LogLine>,
pub log_history: Arc<Mutex<VecDeque<LogLine>>>,
}
pub fn init_tracing() -> LoggingHandles {
let (log_tx, _) = broadcast::channel::<LogLine>(512);
let log_history = Arc::new(Mutex::new(VecDeque::<LogLine>::new()));
tracing_subscriber::registry()
.with(EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("info")))
.with(fmt::layer())
.with(AppLogLayer::new(log_tx.clone(), Arc::clone(&log_history)))
.init();
LoggingHandles { log_tx, log_history }
}

View File

@@ -0,0 +1,212 @@
//! WebhookConsumer background task.
//!
//! Subscribes to the domain-event broadcast channel, looks up each channel's
//! webhook_url, and fires HTTP POST requests (fire-and-forget).
use chrono::Utc;
use handlebars::Handlebars;
use serde_json::{Value, json};
use std::sync::Arc;
use tokio::sync::broadcast;
use tracing::{info, warn};
use uuid::Uuid;
use domain::{ChannelRepository, DomainEvent};
/// Consumes domain events and delivers them to per-channel webhook URLs.
///
/// Uses fire-and-forget HTTP POST — failures are logged as warnings, never retried.
pub async fn run_webhook_consumer(
mut rx: broadcast::Receiver<DomainEvent>,
channel_repo: Arc<dyn ChannelRepository>,
client: reqwest::Client,
) {
loop {
match rx.recv().await {
Ok(event) => {
let channel_id = event_channel_id(&event);
let payload = build_payload(&event);
match channel_repo.find_by_id(channel_id).await {
Ok(Some(channel)) => {
if let Some(url) = channel.webhook_url {
let client = client.clone();
let template = channel.webhook_body_template.clone();
let headers = channel.webhook_headers.clone();
tokio::spawn(async move {
post_webhook(&client, &url, payload, template.as_deref(), headers.as_deref()).await;
});
}
// No webhook_url configured — skip silently
}
Ok(None) => {
// Channel deleted — nothing to do
}
Err(e) => {
warn!("webhook consumer: failed to look up channel {}: {}", channel_id, e);
}
}
}
Err(broadcast::error::RecvError::Lagged(n)) => {
warn!("webhook consumer lagged, {} events dropped", n);
// Continue — don't break; catch up from current position
}
Err(broadcast::error::RecvError::Closed) => {
info!("webhook consumer: event bus closed, shutting down");
break;
}
}
}
}
/// Extract the channel_id from any event variant.
fn event_channel_id(event: &DomainEvent) -> Uuid {
match event {
DomainEvent::BroadcastTransition { channel_id, .. } => *channel_id,
DomainEvent::NoSignal { channel_id } => *channel_id,
DomainEvent::ScheduleGenerated { channel_id, .. } => *channel_id,
DomainEvent::ChannelCreated { channel } => channel.id,
DomainEvent::ChannelUpdated { channel } => channel.id,
DomainEvent::ChannelDeleted { channel_id } => *channel_id,
}
}
/// Build the JSON payload for an event.
fn build_payload(event: &DomainEvent) -> Value {
let now = Utc::now().to_rfc3339();
match event {
DomainEvent::BroadcastTransition { channel_id, slot } => {
let offset_secs = (Utc::now() - slot.start_at).num_seconds().max(0) as u64;
json!({
"event": "broadcast_transition",
"timestamp": now,
"channel_id": channel_id,
"data": {
"slot_id": slot.id,
"item": {
"id": slot.item.id.as_ref(),
"title": slot.item.title,
"duration_secs": slot.item.duration_secs,
},
"start_at": slot.start_at.to_rfc3339(),
"end_at": slot.end_at.to_rfc3339(),
"offset_secs": offset_secs,
}
})
}
DomainEvent::NoSignal { channel_id } => {
json!({
"event": "no_signal",
"timestamp": now,
"channel_id": channel_id,
"data": {}
})
}
DomainEvent::ScheduleGenerated { channel_id, schedule } => {
json!({
"event": "schedule_generated",
"timestamp": now,
"channel_id": channel_id,
"data": {
"generation": schedule.generation,
"valid_from": schedule.valid_from.to_rfc3339(),
"valid_until": schedule.valid_until.to_rfc3339(),
"slot_count": schedule.slots.len(),
}
})
}
DomainEvent::ChannelCreated { channel } => {
json!({
"event": "channel_created",
"timestamp": now,
"channel_id": channel.id,
"data": {
"name": channel.name,
"description": channel.description,
}
})
}
DomainEvent::ChannelUpdated { channel } => {
json!({
"event": "channel_updated",
"timestamp": now,
"channel_id": channel.id,
"data": {
"name": channel.name,
"description": channel.description,
}
})
}
DomainEvent::ChannelDeleted { channel_id } => {
json!({
"event": "channel_deleted",
"timestamp": now,
"channel_id": channel_id,
"data": {}
})
}
}
}
/// Fire-and-forget HTTP POST to a webhook URL.
///
/// If `template` is provided it is rendered with `payload` as context via Handlebars.
/// `headers_json` is a JSON object string of extra HTTP headers (e.g. `{"Authorization":"Bearer x"}`).
/// Content-Type defaults to `application/json` unless overridden in `headers_json`.
async fn post_webhook(
client: &reqwest::Client,
url: &str,
payload: Value,
template: Option<&str>,
headers_json: Option<&str>,
) {
let body = if let Some(tmpl) = template {
let hbs = Handlebars::new();
match hbs.render_template(tmpl, &payload) {
Ok(rendered) => rendered,
Err(e) => {
warn!("webhook template render failed for {}: {}", url, e);
return;
}
}
} else {
match serde_json::to_string(&payload) {
Ok(s) => s,
Err(e) => {
warn!("webhook payload serialize failed: {}", e);
return;
}
}
};
let mut req = client.post(url).body(body);
let mut has_content_type = false;
if let Some(h) = headers_json
&& let Ok(map) = serde_json::from_str::<serde_json::Map<String, Value>>(h)
{
for (k, v) in &map {
if k.to_lowercase() == "content-type" {
has_content_type = true;
}
if let Some(v_str) = v.as_str() {
req = req.header(k.as_str(), v_str);
}
}
}
if !has_content_type {
req = req.header("Content-Type", "application/json");
}
match req.send().await {
Ok(resp) => {
if !resp.status().is_success() {
warn!("webhook POST to {} returned status {}", url, resp.status());
}
}
Err(e) => {
warn!("webhook POST to {} failed: {}", url, e);
}
}
}

View File

@@ -4,86 +4,44 @@ services:
ports: ports:
- "3000:3000" - "3000:3000"
environment: environment:
- SESSION_SECRET=dev_secret_key_12345 # Server
- DATABASE_URL=sqlite:///app/data/notes.db
- CORS_ALLOWED_ORIGINS=http://localhost:8080,http://localhost:5173
- HOST=0.0.0.0 - HOST=0.0.0.0
- PORT=3000 - PORT=3000
# Database — SQLite by default; swap for a postgres:// URL to use PostgreSQL
- DATABASE_URL=sqlite:///app/data/k-tv.db?mode=rwc
# CORS — set to your frontend origin(s), comma-separated
- CORS_ALLOWED_ORIGINS=http://localhost:3001
# Auth — CHANGE BOTH before going to production
# Generate JWT_SECRET with: openssl rand -hex 32
# Generate COOKIE_SECRET with: openssl rand -base64 64
- JWT_SECRET=change-me-generate-with-openssl-rand-hex-32
- COOKIE_SECRET=change-me-must-be-at-least-64-characters-long-for-production!!
- JWT_EXPIRY_HOURS=24
- SECURE_COOKIE=false # set to true when serving over HTTPS
- PRODUCTION=false
- ALLOW_REGISTRATION=true # set to false to disable new user registration
# Database pool
- DB_MAX_CONNECTIONS=5 - DB_MAX_CONNECTIONS=5
- DB_MIN_CONNECTIONS=1 - DB_MIN_CONNECTIONS=1
- SECURE_COOKIE=true # Jellyfin media provider — all three are required to enable schedule generation
- JELLYFIN_BASE_URL=http://jellyfin:8096
- JELLYFIN_API_KEY=your-jellyfin-api-key-here
- JELLYFIN_USER_ID=your-jellyfin-user-id-here
volumes: volumes:
- ./data:/app/data - ./data:/app/data # SQLite database + any other persistent files
restart: unless-stopped
# nats: # ── Optional: PostgreSQL ────────────────────────────────────────────────────
# image: nats:alpine # Uncomment and set DATABASE_URL=postgres://ktv:password@db:5432/ktv above.
#
# db:
# image: postgres:16-alpine
# environment:
# POSTGRES_USER: ktv
# POSTGRES_PASSWORD: password
# POSTGRES_DB: ktv
# ports: # ports:
# - "4222:4222" # - "5432:5432"
# - "6222:6222" # volumes:
# - "8222:8222" # - db_data:/var/lib/postgresql/data
# restart: unless-stopped # restart: unless-stopped
db:
image: postgres:15-alpine
environment:
POSTGRES_USER: user
POSTGRES_PASSWORD: password
POSTGRES_DB: k_template_db
ports:
- "5439:5432"
volumes:
- db_data:/var/lib/postgresql/data
zitadel-db:
image: postgres:16-alpine
container_name: zitadel_db
environment:
POSTGRES_USER: zitadel
POSTGRES_PASSWORD: zitadel_password
POSTGRES_DB: zitadel
healthcheck:
test: ["CMD-SHELL", "pg_isready -U zitadel -d zitadel"]
interval: 10s
timeout: 5s
retries: 5
volumes:
- zitadel_db_data:/var/lib/postgresql/data
zitadel:
image: ghcr.io/zitadel/zitadel:latest
container_name: zitadel_local
depends_on:
zitadel-db:
condition: service_healthy
ports:
- "8086:8080"
# USE start-from-init (Fixes the "relation does not exist" bug)
command: 'start-from-init --masterkey "MasterkeyNeedsToBeExactly32Bytes"'
environment:
# Database Connection
ZITADEL_DATABASE_POSTGRES_HOST: zitadel-db
ZITADEL_DATABASE_POSTGRES_PORT: 5432
ZITADEL_DATABASE_POSTGRES_DATABASE: zitadel
# APPLICATION USER (Zitadel uses this to run)
ZITADEL_DATABASE_POSTGRES_USER_USERNAME: zitadel
ZITADEL_DATABASE_POSTGRES_USER_PASSWORD: zitadel_password
ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE: disable
# ADMIN USER (Zitadel uses this to create tables/migrations)
# We use 'zitadel' because it is the owner of the DB in your postgres container.
ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME: zitadel
ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD: zitadel_password
ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE: disable
# General Config
ZITADEL_EXTERNALDOMAIN: localhost
ZITADEL_EXTERNALPORT: 8086
ZITADEL_EXTERNALSECURE: "false"
ZITADEL_TLS_ENABLED: "false"
ZITADEL_DEFAULTINSTANCE_FEATURES_LOGINV2_REQUIRED: "false"
volumes:
db_data:
zitadel_db_data:

View File

@@ -16,3 +16,4 @@ uuid = { version = "1.19.0", features = ["v4", "serde"] }
[dev-dependencies] [dev-dependencies]
tokio = { version = "1", features = ["rt", "macros"] } tokio = { version = "1", features = ["rt", "macros"] }
serde_json = "1"

View File

@@ -6,10 +6,12 @@
pub use crate::value_objects::{Email, UserId}; pub use crate::value_objects::{Email, UserId};
use chrono::{DateTime, NaiveTime, Timelike, Utc}; use chrono::{DateTime, NaiveTime, Timelike, Utc};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use uuid::Uuid; use uuid::Uuid;
use crate::value_objects::{ use crate::value_objects::{
BlockId, ChannelId, ContentType, FillStrategy, MediaFilter, MediaItemId, RecyclePolicy, SlotId, AccessMode, BlockId, ChannelId, ContentType, FillStrategy, LogoPosition, MediaFilter,
MediaItemId, RecyclePolicy, SlotId, Weekday,
}; };
/// A user in the system. /// A user in the system.
@@ -21,6 +23,7 @@ pub struct User {
pub subject: String, pub subject: String,
pub email: Email, pub email: Email,
pub password_hash: Option<String>, pub password_hash: Option<String>,
pub is_admin: bool,
pub created_at: DateTime<Utc>, pub created_at: DateTime<Utc>,
} }
@@ -31,6 +34,7 @@ impl User {
subject: subject.into(), subject: subject.into(),
email, email,
password_hash: None, password_hash: None,
is_admin: false,
created_at: Utc::now(), created_at: Utc::now(),
} }
} }
@@ -40,6 +44,7 @@ impl User {
subject: impl Into<String>, subject: impl Into<String>,
email: Email, email: Email,
password_hash: Option<String>, password_hash: Option<String>,
is_admin: bool,
created_at: DateTime<Utc>, created_at: DateTime<Utc>,
) -> Self { ) -> Self {
Self { Self {
@@ -47,6 +52,7 @@ impl User {
subject: subject.into(), subject: subject.into(),
email, email,
password_hash, password_hash,
is_admin,
created_at, created_at,
} }
} }
@@ -57,6 +63,7 @@ impl User {
subject: format!("local|{}", Uuid::new_v4()), subject: format!("local|{}", Uuid::new_v4()),
email, email,
password_hash: Some(password_hash.into()), password_hash: Some(password_hash.into()),
is_admin: false,
created_at: Utc::now(), created_at: Utc::now(),
} }
} }
@@ -81,6 +88,16 @@ pub struct Channel {
pub timezone: String, pub timezone: String,
pub schedule_config: ScheduleConfig, pub schedule_config: ScheduleConfig,
pub recycle_policy: RecyclePolicy, pub recycle_policy: RecyclePolicy,
pub auto_schedule: bool,
pub access_mode: AccessMode,
pub access_password_hash: Option<String>,
pub logo: Option<String>,
pub logo_position: LogoPosition,
pub logo_opacity: f32,
pub webhook_url: Option<String>,
pub webhook_poll_interval_secs: u32,
pub webhook_body_template: Option<String>,
pub webhook_headers: Option<String>,
pub created_at: DateTime<Utc>, pub created_at: DateTime<Utc>,
pub updated_at: DateTime<Utc>, pub updated_at: DateTime<Utc>,
} }
@@ -100,46 +117,93 @@ impl Channel {
timezone: timezone.into(), timezone: timezone.into(),
schedule_config: ScheduleConfig::default(), schedule_config: ScheduleConfig::default(),
recycle_policy: RecyclePolicy::default(), recycle_policy: RecyclePolicy::default(),
auto_schedule: false,
access_mode: AccessMode::default(),
access_password_hash: None,
logo: None,
logo_position: LogoPosition::default(),
logo_opacity: 1.0,
webhook_url: None,
webhook_poll_interval_secs: 5,
webhook_body_template: None,
webhook_headers: None,
created_at: now, created_at: now,
updated_at: now, updated_at: now,
} }
} }
} }
/// The user-designed programming template. /// The user-designed programming template (V2: day-keyed weekly grid).
/// ///
/// This is the shareable/exportable part of a channel. It contains an ordered /// Each day of the week has its own independent list of `ProgrammingBlock`s.
/// list of `ProgrammingBlock`s but makes no assumptions about the media source. /// A day with an empty vec (or absent key) produces no slots — valid, not an error.
/// A channel does not need to cover all 24 hours — gaps are valid and render /// A channel does not need to cover all 24 hours — gaps render as no-signal.
/// as a no-signal state on the client. ///
/// `deny_unknown_fields` is required so the `#[serde(untagged)]` compat enum
/// correctly rejects V1 `{"blocks":[...]}` payloads and falls through to `OldScheduleConfig`.
#[derive(Debug, Clone, Default, Serialize, Deserialize)] #[derive(Debug, Clone, Default, Serialize, Deserialize)]
#[serde(deny_unknown_fields)]
pub struct ScheduleConfig { pub struct ScheduleConfig {
pub day_blocks: HashMap<Weekday, Vec<ProgrammingBlock>>,
}
/// V1 on-disk shape — kept for transparent migration only.
/// Never construct directly; use `ScheduleConfigCompat` for deserialization.
/// `deny_unknown_fields` ensures V2 payloads don't accidentally match here.
#[derive(Debug, Clone, Deserialize)]
#[serde(deny_unknown_fields)]
pub struct OldScheduleConfig {
pub blocks: Vec<ProgrammingBlock>, pub blocks: Vec<ProgrammingBlock>,
} }
/// Deserializes either V2 (`day_blocks`) or V1 (`blocks`) from the DB.
/// V1 is automatically promoted: all blocks are copied to all 7 days.
#[derive(Debug, Clone, Deserialize)]
#[serde(untagged)]
pub enum ScheduleConfigCompat {
V2(ScheduleConfig),
V1(OldScheduleConfig),
}
impl From<ScheduleConfigCompat> for ScheduleConfig {
fn from(c: ScheduleConfigCompat) -> Self {
match c {
ScheduleConfigCompat::V2(cfg) => cfg,
ScheduleConfigCompat::V1(old) => {
let day_blocks = Weekday::all()
.into_iter()
.map(|d| (d, old.blocks.clone()))
.collect();
ScheduleConfig { day_blocks }
}
}
}
}
impl ScheduleConfig { impl ScheduleConfig {
/// Return the block whose time window contains `time`, if any. /// Blocks for a given day. Returns empty slice if the day has no blocks.
/// pub fn blocks_for(&self, day: Weekday) -> &[ProgrammingBlock] {
/// Handles blocks that span midnight (e.g. start 23:00, duration 180 min). self.day_blocks.get(&day).map(|v| v.as_slice()).unwrap_or(&[])
pub fn find_block_at(&self, time: NaiveTime) -> Option<&ProgrammingBlock> { }
/// The block whose window contains `time` on `day`, if any.
pub fn find_block_at(&self, day: Weekday, time: NaiveTime) -> Option<&ProgrammingBlock> {
let secs = time.num_seconds_from_midnight(); let secs = time.num_seconds_from_midnight();
self.blocks.iter().find(|block| { self.blocks_for(day).iter().find(|block| {
let start = block.start_time.num_seconds_from_midnight(); let start = block.start_time.num_seconds_from_midnight();
let end = start + block.duration_mins * 60; let end = start + block.duration_mins * 60;
if end <= 86_400 { if end <= 86_400 {
secs >= start && secs < end secs >= start && secs < end
} else { } else {
// Block crosses midnight: active from `start` to `end % 86400` next day
secs >= start || secs < (end % 86_400) secs >= start || secs < (end % 86_400)
} }
}) })
} }
/// Return the start time of the next block that begins strictly after `time`, /// The start time of the next block beginning strictly after `time` on `day`.
/// within the same calendar day. pub fn next_block_start_after(&self, day: Weekday, time: NaiveTime) -> Option<NaiveTime> {
pub fn next_block_start_after(&self, time: NaiveTime) -> Option<NaiveTime> {
let secs = time.num_seconds_from_midnight(); let secs = time.num_seconds_from_midnight();
self.blocks self.blocks_for(day)
.iter() .iter()
.map(|b| b.start_time.num_seconds_from_midnight()) .map(|b| b.start_time.num_seconds_from_midnight())
.filter(|&s| s > secs) .filter(|&s| s > secs)
@@ -147,9 +211,15 @@ impl ScheduleConfig {
.and_then(|s| NaiveTime::from_num_seconds_from_midnight_opt(s, 0)) .and_then(|s| NaiveTime::from_num_seconds_from_midnight_opt(s, 0))
} }
/// The earliest block start time across all blocks (used for next-day rollover). /// Earliest block start time across ALL days (used by background scheduler).
/// Returns `None` if every day is empty.
pub fn earliest_block_start(&self) -> Option<NaiveTime> { pub fn earliest_block_start(&self) -> Option<NaiveTime> {
self.blocks.iter().map(|b| b.start_time).min() self.day_blocks.values().flatten().map(|b| b.start_time).min()
}
/// Iterator over all blocks across all days (for block-ID lookups that are day-agnostic).
pub fn all_blocks(&self) -> impl Iterator<Item = &ProgrammingBlock> {
self.day_blocks.values().flatten()
} }
} }
@@ -164,6 +234,28 @@ pub struct ProgrammingBlock {
/// possible; remaining time at the end becomes dead air (no-signal). /// possible; remaining time at the end becomes dead air (no-signal).
pub duration_mins: u32, pub duration_mins: u32,
pub content: BlockContent, pub content: BlockContent,
/// Sequential only: loop back to episode 1 after the last episode. Default: true.
#[serde(default = "default_true")]
pub loop_on_finish: bool,
/// When true, skip the channel-level recycle policy for this block.
/// Useful for dedicated sequential blocks that must always play in order
/// regardless of what other blocks aired.
#[serde(default)]
pub ignore_recycle_policy: bool,
/// Who can watch the stream during this block. Gates only /stream, not /now.
#[serde(default)]
pub access_mode: AccessMode,
/// Bcrypt/argon2 hash of the block password (when access_mode = PasswordProtected).
#[serde(default, skip_serializing_if = "Option::is_none")]
pub access_password_hash: Option<String>,
}
fn default_true() -> bool {
true
} }
impl ProgrammingBlock { impl ProgrammingBlock {
@@ -179,7 +271,11 @@ impl ProgrammingBlock {
name: name.into(), name: name.into(),
start_time, start_time,
duration_mins, duration_mins,
content: BlockContent::Algorithmic { filter, strategy }, content: BlockContent::Algorithmic { filter, strategy, provider_id: String::new() },
loop_on_finish: true,
ignore_recycle_policy: false,
access_mode: AccessMode::default(),
access_password_hash: None,
} }
} }
@@ -194,7 +290,11 @@ impl ProgrammingBlock {
name: name.into(), name: name.into(),
start_time, start_time,
duration_mins, duration_mins,
content: BlockContent::Manual { items }, content: BlockContent::Manual { items, provider_id: String::new() },
loop_on_finish: true,
ignore_recycle_policy: false,
access_mode: AccessMode::default(),
access_password_hash: None,
} }
} }
} }
@@ -204,11 +304,21 @@ impl ProgrammingBlock {
#[serde(tag = "type", rename_all = "snake_case")] #[serde(tag = "type", rename_all = "snake_case")]
pub enum BlockContent { pub enum BlockContent {
/// The user hand-picked specific items in a specific order. /// The user hand-picked specific items in a specific order.
Manual { items: Vec<MediaItemId> }, /// Item IDs are prefixed with the provider key (e.g. `"jellyfin::abc123"`)
/// so the registry can route each fetch to the correct provider.
Manual {
items: Vec<MediaItemId>,
/// Registry key of the provider these items come from. Empty string = primary.
#[serde(default)]
provider_id: String,
},
/// The engine selects items from the provider using the given filter and strategy. /// The engine selects items from the provider using the given filter and strategy.
Algorithmic { Algorithmic {
filter: MediaFilter, filter: MediaFilter,
strategy: FillStrategy, strategy: FillStrategy,
/// Registry key of the provider to query. Empty string = primary.
#[serde(default)]
provider_id: String,
}, },
} }
@@ -226,12 +336,23 @@ pub struct MediaItem {
pub title: String, pub title: String,
pub content_type: ContentType, pub content_type: ContentType,
pub duration_secs: u32, pub duration_secs: u32,
pub description: Option<String>,
pub genres: Vec<String>, pub genres: Vec<String>,
pub year: Option<u16>, pub year: Option<u16>,
pub tags: Vec<String>, pub tags: Vec<String>,
/// For episodes: the parent TV show name.
pub series_name: Option<String>,
/// For episodes: season number (1-based).
pub season_number: Option<u32>,
/// For episodes: episode number within the season (1-based).
pub episode_number: Option<u32>,
/// Provider-served thumbnail image URL, populated if available.
pub thumbnail_url: Option<String>,
/// Provider-specific collection this item belongs to.
pub collection_id: Option<String>,
} }
/// A fully resolved 48-hour broadcast program for one channel. /// A fully resolved 7-day broadcast program for one channel.
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub struct GeneratedSchedule { pub struct GeneratedSchedule {
pub id: Uuid, pub id: Uuid,
@@ -283,6 +404,18 @@ pub struct PlaybackRecord {
pub generation: u32, pub generation: u32,
} }
/// A point-in-time snapshot of a channel's `ScheduleConfig`.
/// Auto-created on every config save; users can pin with a label.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ChannelConfigSnapshot {
pub id: Uuid,
pub channel_id: ChannelId,
pub config: ScheduleConfig,
pub version_num: i64,
pub label: Option<String>,
pub created_at: DateTime<Utc>,
}
impl PlaybackRecord { impl PlaybackRecord {
pub fn new(channel_id: ChannelId, item_id: MediaItemId, generation: u32) -> Self { pub fn new(channel_id: ChannelId, item_id: MediaItemId, generation: u32) -> Self {
Self { Self {
@@ -294,3 +427,74 @@ impl PlaybackRecord {
} }
} }
} }
#[cfg(test)]
mod schedule_config_tests {
use super::*;
use chrono::NaiveTime;
fn t(h: u32, m: u32) -> NaiveTime {
NaiveTime::from_hms_opt(h, m, 0).unwrap()
}
fn make_block(start: NaiveTime, duration_mins: u32) -> ProgrammingBlock {
ProgrammingBlock::new_algorithmic(
"test", start, duration_mins,
Default::default(), FillStrategy::Random,
)
}
fn cfg_with_monday_block(start: NaiveTime, dur: u32) -> ScheduleConfig {
let mut cfg = ScheduleConfig::default();
cfg.day_blocks.insert(Weekday::Monday, vec![make_block(start, dur)]);
cfg
}
#[test]
fn find_block_at_finds_active_block() {
let cfg = cfg_with_monday_block(t(8, 0), 60);
assert!(cfg.find_block_at(Weekday::Monday, t(8, 30)).is_some());
assert!(cfg.find_block_at(Weekday::Monday, t(9, 0)).is_none());
}
#[test]
fn find_block_at_wrong_day_returns_none() {
let cfg = cfg_with_monday_block(t(8, 0), 60);
assert!(cfg.find_block_at(Weekday::Tuesday, t(8, 30)).is_none());
}
#[test]
fn v1_compat_copies_blocks_to_all_days() {
let json = r#"{"blocks": []}"#;
let compat: ScheduleConfigCompat = serde_json::from_str(json).unwrap();
let cfg: ScheduleConfig = compat.into();
assert_eq!(cfg.day_blocks.len(), 7);
}
#[test]
fn v2_payload_with_unknown_blocks_key_fails() {
let json = r#"{"blocks": [], "day_blocks": {}}"#;
let result: Result<ScheduleConfigCompat, _> = serde_json::from_str(json);
match result {
Ok(ScheduleConfigCompat::V2(cfg)) => {
let _ = cfg;
}
Ok(ScheduleConfigCompat::V1(_)) => { /* acceptable */ }
Err(_) => { /* acceptable — ambiguous payload rejected */ }
}
}
#[test]
fn earliest_block_start_across_days() {
let mut cfg = ScheduleConfig::default();
cfg.day_blocks.insert(Weekday::Monday, vec![make_block(t(10, 0), 60)]);
cfg.day_blocks.insert(Weekday::Friday, vec![make_block(t(7, 0), 60)]);
assert_eq!(cfg.earliest_block_start(), Some(t(7, 0)));
}
#[test]
fn empty_config_earliest_block_start_is_none() {
let cfg = ScheduleConfig::default();
assert!(cfg.earliest_block_start().is_none());
}
}

View File

@@ -0,0 +1,114 @@
//! Domain events emitted when important state transitions occur.
//!
//! These are pure data — no I/O, no tokio deps. The transport
//! (tokio::sync::broadcast) lives in `api`; domain only owns the schema.
use uuid::Uuid;
use crate::entities::{Channel, GeneratedSchedule, ScheduledSlot};
/// Events emitted by the application when important state changes occur.
///
/// Must be `Clone + Send + 'static` for use as a `broadcast::channel` item.
#[derive(Clone)]
pub enum DomainEvent {
BroadcastTransition {
channel_id: Uuid,
slot: ScheduledSlot,
},
NoSignal {
channel_id: Uuid,
},
ScheduleGenerated {
channel_id: Uuid,
schedule: GeneratedSchedule,
},
ChannelCreated {
channel: Channel,
},
ChannelUpdated {
channel: Channel,
},
ChannelDeleted {
channel_id: Uuid,
},
}
#[cfg(test)]
mod tests {
use super::*;
use uuid::Uuid;
fn make_slot() -> crate::entities::ScheduledSlot {
use crate::entities::{MediaItem, ScheduledSlot};
use crate::value_objects::{ContentType, MediaItemId};
use chrono::Utc;
ScheduledSlot {
id: Uuid::new_v4(),
start_at: Utc::now(),
end_at: Utc::now() + chrono::Duration::minutes(30),
item: MediaItem {
id: MediaItemId::new("test-item".to_string()),
title: "Test Movie".to_string(),
content_type: ContentType::Movie,
duration_secs: 1800,
description: None,
genres: vec![],
year: None,
tags: vec![],
series_name: None,
season_number: None,
episode_number: None,
thumbnail_url: None,
collection_id: None,
},
source_block_id: Uuid::new_v4(),
}
}
#[test]
fn broadcast_transition_carries_slot() {
let channel_id = Uuid::new_v4();
let slot = make_slot();
let event = DomainEvent::BroadcastTransition { channel_id, slot: slot.clone() };
match event {
DomainEvent::BroadcastTransition { channel_id: cid, slot: s } => {
assert_eq!(cid, channel_id);
assert_eq!(s.item.title, "Test Movie");
}
_ => panic!("wrong variant"),
}
}
#[test]
fn no_signal_carries_channel_id() {
let channel_id = Uuid::new_v4();
let event = DomainEvent::NoSignal { channel_id };
match event {
DomainEvent::NoSignal { channel_id: cid } => assert_eq!(cid, channel_id),
_ => panic!("wrong variant"),
}
}
#[test]
fn schedule_generated_carries_metadata() {
use crate::entities::GeneratedSchedule;
use chrono::Utc;
let channel_id = Uuid::new_v4();
let schedule = GeneratedSchedule {
id: Uuid::new_v4(),
channel_id,
valid_from: Utc::now(),
valid_until: Utc::now() + chrono::Duration::hours(48),
generation: 3,
slots: vec![],
};
let event = DomainEvent::ScheduleGenerated { channel_id, schedule: schedule.clone() };
match event {
DomainEvent::ScheduleGenerated { schedule: s, .. } => {
assert_eq!(s.generation, 3);
assert_eq!(s.slots.len(), 0);
}
_ => panic!("wrong variant"),
}
}
}

View File

@@ -0,0 +1,93 @@
//! IPTV export: M3U playlist and XMLTV guide generation.
//!
//! Pure functions — no I/O, no dependencies beyond domain types.
use std::collections::HashMap;
use crate::entities::{Channel, ScheduledSlot};
use crate::value_objects::ChannelId;
/// Generate an M3U playlist for the given channels.
///
/// Each entry points to the channel's `/stream` endpoint authenticated with the
/// provided JWT token so IPTV clients can load it directly.
pub fn generate_m3u(channels: &[Channel], base_url: &str, token: &str) -> String {
let mut out = String::from("#EXTM3U\n");
for ch in channels {
out.push_str(&format!(
"#EXTINF:-1 tvg-id=\"{}\" tvg-name=\"{}\" tvg-logo=\"\" group-title=\"K-TV\",{}\n",
ch.id, ch.name, ch.name
));
out.push_str(&format!(
"{}/api/v1/channels/{}/stream?token={}\n",
base_url, ch.id, token
));
}
out
}
/// Generate an XMLTV EPG document for the given channels and their scheduled slots.
pub fn generate_xmltv(
channels: &[Channel],
slots_by_channel: &HashMap<ChannelId, Vec<ScheduledSlot>>,
) -> String {
let mut out =
String::from("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<tv generator-info-name=\"k-tv\">\n");
for ch in channels {
out.push_str(&format!(
" <channel id=\"{}\"><display-name>{}</display-name></channel>\n",
ch.id,
escape_xml(&ch.name)
));
}
for ch in channels {
if let Some(slots) = slots_by_channel.get(&ch.id) {
for slot in slots {
let start = slot.start_at.format("%Y%m%d%H%M%S +0000");
let stop = slot.end_at.format("%Y%m%d%H%M%S +0000");
out.push_str(&format!(
" <programme start=\"{}\" stop=\"{}\" channel=\"{}\">\n",
start, stop, ch.id
));
out.push_str(&format!(
" <title lang=\"en\">{}</title>\n",
escape_xml(&slot.item.title)
));
if let Some(desc) = &slot.item.description {
out.push_str(&format!(
" <desc lang=\"en\">{}</desc>\n",
escape_xml(desc)
));
}
if let Some(genre) = slot.item.genres.first() {
out.push_str(&format!(
" <category lang=\"en\">{}</category>\n",
escape_xml(genre)
));
}
if let (Some(season), Some(episode)) =
(slot.item.season_number, slot.item.episode_number)
{
out.push_str(&format!(
" <episode-num system=\"onscreen\">S{}E{}</episode-num>\n",
season, episode
));
}
out.push_str(" </programme>\n");
}
}
}
out.push_str("</tv>\n");
out
}
fn escape_xml(s: &str) -> String {
s.replace('&', "&amp;")
.replace('<', "&lt;")
.replace('>', "&gt;")
.replace('"', "&quot;")
.replace('\'', "&apos;")
}

View File

@@ -5,15 +5,25 @@
pub mod entities; pub mod entities;
pub mod errors; pub mod errors;
pub mod iptv;
pub mod library;
pub mod ports; pub mod ports;
pub mod repositories; pub mod repositories;
pub mod services; pub mod services;
pub mod events;
pub mod value_objects; pub mod value_objects;
// Re-export commonly used types // Re-export commonly used types
pub use entities::*; pub use entities::*;
pub use errors::{DomainError, DomainResult}; pub use errors::{DomainError, DomainResult};
pub use ports::IMediaProvider; pub use events::DomainEvent;
pub use ports::{Collection, IMediaProvider, IProviderRegistry, ProviderCapabilities, SeriesSummary, StreamingProtocol, StreamQuality};
pub use repositories::*; pub use repositories::*;
pub use iptv::{generate_m3u, generate_xmltv};
pub use library::{
ILibraryRepository, LibraryCollection, LibraryItem, LibrarySearchFilter,
LibrarySyncAdapter, LibrarySyncLogEntry, LibrarySyncResult,
SeasonSummary, ShowSummary,
};
pub use services::{ChannelService, ScheduleEngineService, UserService}; pub use services::{ChannelService, ScheduleEngineService, UserService};
pub use value_objects::*; pub use value_objects::*;

View File

@@ -0,0 +1,187 @@
//! Library domain types and ports.
use async_trait::async_trait;
use crate::{ContentType, DomainResult, IMediaProvider};
/// A media item stored in the local library cache.
#[derive(Debug, Clone)]
pub struct LibraryItem {
pub id: String,
pub provider_id: String,
pub external_id: String,
pub title: String,
pub content_type: ContentType,
pub duration_secs: u32,
pub series_name: Option<String>,
pub season_number: Option<u32>,
pub episode_number: Option<u32>,
pub year: Option<u16>,
pub genres: Vec<String>,
pub tags: Vec<String>,
pub collection_id: Option<String>,
pub collection_name: Option<String>,
pub collection_type: Option<String>,
pub thumbnail_url: Option<String>,
pub synced_at: String,
}
/// A collection summary derived from synced library items.
#[derive(Debug, Clone)]
pub struct LibraryCollection {
pub id: String,
pub name: String,
pub collection_type: Option<String>,
}
/// Result of a single provider sync run.
#[derive(Debug, Clone)]
pub struct LibrarySyncResult {
pub provider_id: String,
pub items_found: u32,
pub duration_ms: u64,
pub error: Option<String>,
}
/// Log entry from library_sync_log table.
#[derive(Debug, Clone)]
pub struct LibrarySyncLogEntry {
pub id: i64,
pub provider_id: String,
pub started_at: String,
pub finished_at: Option<String>,
pub items_found: u32,
pub status: String,
pub error_msg: Option<String>,
}
/// Filter for searching the local library.
#[derive(Debug, Clone)]
pub struct LibrarySearchFilter {
pub provider_id: Option<String>,
pub content_type: Option<ContentType>,
pub series_names: Vec<String>,
pub collection_id: Option<String>,
pub genres: Vec<String>,
pub decade: Option<u16>,
pub min_duration_secs: Option<u32>,
pub max_duration_secs: Option<u32>,
pub search_term: Option<String>,
pub season_number: Option<u32>,
pub offset: u32,
pub limit: u32,
}
impl Default for LibrarySearchFilter {
fn default() -> Self {
Self {
provider_id: None,
content_type: None,
series_names: vec![],
collection_id: None,
genres: vec![],
decade: None,
min_duration_secs: None,
max_duration_secs: None,
search_term: None,
season_number: None,
offset: 0,
limit: 50,
}
}
}
/// Aggregated summary of a TV show derived from synced episodes.
#[derive(Debug, Clone)]
pub struct ShowSummary {
pub series_name: String,
pub episode_count: u32,
pub season_count: u32,
pub thumbnail_url: Option<String>,
pub genres: Vec<String>,
}
/// Aggregated summary of one season of a TV show.
#[derive(Debug, Clone)]
pub struct SeasonSummary {
pub season_number: u32,
pub episode_count: u32,
pub thumbnail_url: Option<String>,
}
/// Port: sync one provider's items into the library repo.
/// DB writes are handled entirely inside implementations — no pool in the trait.
#[async_trait]
pub trait LibrarySyncAdapter: Send + Sync {
async fn sync_provider(
&self,
provider: &dyn IMediaProvider,
provider_id: &str,
) -> LibrarySyncResult;
}
/// Port: read/write access to the persisted library.
#[async_trait]
pub trait ILibraryRepository: Send + Sync {
async fn search(&self, filter: &LibrarySearchFilter) -> DomainResult<(Vec<LibraryItem>, u32)>;
async fn get_by_id(&self, id: &str) -> DomainResult<Option<LibraryItem>>;
async fn list_collections(&self, provider_id: Option<&str>) -> DomainResult<Vec<LibraryCollection>>;
async fn list_series(&self, provider_id: Option<&str>) -> DomainResult<Vec<String>>;
async fn list_genres(&self, content_type: Option<&ContentType>, provider_id: Option<&str>) -> DomainResult<Vec<String>>;
async fn upsert_items(&self, provider_id: &str, items: Vec<LibraryItem>) -> DomainResult<()>;
async fn clear_provider(&self, provider_id: &str) -> DomainResult<()>;
async fn log_sync_start(&self, provider_id: &str) -> DomainResult<i64>;
async fn log_sync_finish(&self, log_id: i64, result: &LibrarySyncResult) -> DomainResult<()>;
async fn latest_sync_status(&self) -> DomainResult<Vec<LibrarySyncLogEntry>>;
async fn is_sync_running(&self, provider_id: &str) -> DomainResult<bool>;
async fn list_shows(
&self,
provider_id: Option<&str>,
search_term: Option<&str>,
genres: &[String],
) -> DomainResult<Vec<ShowSummary>>;
async fn list_seasons(
&self,
series_name: &str,
provider_id: Option<&str>,
) -> DomainResult<Vec<SeasonSummary>>;
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn library_item_id_uses_double_colon_separator() {
let item = LibraryItem {
id: "jellyfin::abc123".to_string(),
provider_id: "jellyfin".to_string(),
external_id: "abc123".to_string(),
title: "Test Movie".to_string(),
content_type: crate::ContentType::Movie,
duration_secs: 7200,
series_name: None,
season_number: None,
episode_number: None,
year: Some(2020),
genres: vec!["Action".to_string()],
tags: vec![],
collection_id: None,
collection_name: None,
collection_type: None,
thumbnail_url: None,
synced_at: "2026-03-19T00:00:00Z".to_string(),
};
assert!(item.id.contains("::"));
assert_eq!(item.provider_id, "jellyfin");
}
#[test]
fn library_search_filter_defaults_are_empty() {
let f = LibrarySearchFilter::default();
assert!(f.genres.is_empty());
assert!(f.series_names.is_empty());
assert_eq!(f.offset, 0);
assert_eq!(f.limit, 50);
}
}

View File

@@ -6,17 +6,110 @@
//! these traits for each concrete source. //! these traits for each concrete source.
use async_trait::async_trait; use async_trait::async_trait;
use serde::{Deserialize, Serialize};
use crate::entities::{MediaItem}; use crate::entities::MediaItem;
use crate::errors::DomainResult; use crate::errors::{DomainError, DomainResult};
use crate::value_objects::{MediaFilter, MediaItemId}; use crate::value_objects::{ContentType, MediaFilter, MediaItemId};
// ============================================================================
// Stream quality
// ============================================================================
/// Requested stream quality for `get_stream_url`.
#[derive(Debug, Clone)]
pub enum StreamQuality {
/// Try direct stream via PlaybackInfo; fall back to HLS at 8 Mbps.
Direct,
/// Force HLS transcode at this bitrate (bits per second).
Transcode(u32),
}
// ============================================================================
// Provider capabilities
// ============================================================================
/// How a provider delivers video to the client.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum StreamingProtocol {
/// HLS playlist (`.m3u8`). Requires hls.js on non-Safari browsers.
Hls,
/// Direct file URL with Range-header support. Native `<video>` element.
DirectFile,
}
/// Feature matrix for a media provider.
///
/// The API and frontend use this to gate calls and hide UI controls that
/// the active provider does not support.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ProviderCapabilities {
pub collections: bool,
pub series: bool,
pub genres: bool,
pub tags: bool,
pub decade: bool,
pub search: bool,
pub streaming_protocol: StreamingProtocol,
/// Whether `POST /files/rescan` is available.
pub rescan: bool,
/// Whether on-demand FFmpeg transcoding to HLS is available.
pub transcode: bool,
}
// ============================================================================
// Library browsing types
// ============================================================================
/// A top-level media collection / library exposed by a provider.
///
/// In Jellyfin this maps to a virtual library (Movies, TV Shows, …).
/// In Plex it maps to a section. The `id` is provider-specific and is used
/// as the value for `MediaFilter::collections`.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Collection {
pub id: String,
pub name: String,
/// Provider-specific type hint, e.g. "movies", "tvshows". `None` when the
/// provider does not expose this information.
pub collection_type: Option<String>,
}
/// Lightweight summary of a TV series available in the provider's library.
/// Returned by `IMediaProvider::list_series` for the dashboard browser.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SeriesSummary {
/// Provider-specific series ID (opaque — used for ParentId filtering).
pub id: String,
pub name: String,
/// Total number of episodes across all seasons, if the provider exposes it.
pub episode_count: u32,
pub genres: Vec<String>,
pub year: Option<u16>,
}
// ============================================================================
// Port trait
// ============================================================================
/// Port for reading media content from an external provider. /// Port for reading media content from an external provider.
/// ///
/// Implementations live in the infra layer. One adapter per provider type /// Implementations live in the infra layer. One adapter per provider type
/// (e.g. `JellyfinMediaProvider`, `PlexMediaProvider`, `LocalFileProvider`). /// (e.g. `JellyfinMediaProvider`, `PlexMediaProvider`, `LocalFileProvider`).
///
/// The three browsing methods (`list_collections`, `list_series`, `list_genres`)
/// have default implementations that return an `InfrastructureError`. Adapters
/// that support library browsing override them; those that don't (e.g. the
/// `NoopMediaProvider`) inherit the default and return a clear error.
#[async_trait] #[async_trait]
pub trait IMediaProvider: Send + Sync { pub trait IMediaProvider: Send + Sync {
/// Declare what features this provider supports.
///
/// Called at request time (not cached) so the response always reflects the
/// active provider. Implementations return a plain struct — no I/O needed.
fn capabilities(&self) -> ProviderCapabilities;
/// Fetch metadata for all items matching `filter` from this provider. /// Fetch metadata for all items matching `filter` from this provider.
/// ///
/// The provider interprets each field of `MediaFilter` in terms of its own /// The provider interprets each field of `MediaFilter` in terms of its own
@@ -35,5 +128,83 @@ pub trait IMediaProvider: Send + Sync {
/// ///
/// URLs are intentionally *not* stored in the schedule because they may be /// URLs are intentionally *not* stored in the schedule because they may be
/// short-lived (signed URLs, session tokens) or depend on client context. /// short-lived (signed URLs, session tokens) or depend on client context.
async fn get_stream_url(&self, item_id: &MediaItemId) -> DomainResult<String>; async fn get_stream_url(&self, item_id: &MediaItemId, quality: &StreamQuality) -> DomainResult<String>;
/// List top-level collections (libraries/sections) available in this provider.
///
/// Used by the dashboard to populate the collections picker so users don't
/// need to know provider-internal IDs.
async fn list_collections(&self) -> DomainResult<Vec<Collection>> {
Err(DomainError::InfrastructureError(
"list_collections is not supported by this provider".into(),
))
}
/// List TV series available in an optional collection.
///
/// `collection_id` corresponds to `Collection::id` returned by
/// `list_collections`. Pass `None` to search across all libraries.
async fn list_series(&self, collection_id: Option<&str>) -> DomainResult<Vec<SeriesSummary>> {
let _ = collection_id;
Err(DomainError::InfrastructureError(
"list_series is not supported by this provider".into(),
))
}
/// List all genres available for a given content type.
///
/// Pass `None` to return genres across all content types.
async fn list_genres(
&self,
content_type: Option<&ContentType>,
) -> DomainResult<Vec<String>> {
let _ = content_type;
Err(DomainError::InfrastructureError(
"list_genres is not supported by this provider".into(),
))
}
}
// ============================================================================
// Registry port
// ============================================================================
/// Port for routing media operations across multiple named providers.
///
/// The registry holds all configured providers (Jellyfin, local files, …) and
/// dispatches each call to the right one. Item IDs are prefixed with the
/// provider key (e.g. `"jellyfin::abc123"`, `"local::base64path"`) so every
/// fetch and stream call is self-routing. An empty prefix falls back to the
/// primary (first-registered) provider for backward compatibility.
#[async_trait]
pub trait IProviderRegistry: Send + Sync {
/// Fetch items from a named provider (used by Algorithmic blocks).
/// Empty `provider_id` uses the primary provider.
/// Returned item IDs are stamped with the provider prefix.
async fn fetch_items(&self, provider_id: &str, filter: &MediaFilter) -> DomainResult<Vec<MediaItem>>;
/// Fetch a single item by its (possibly prefixed) ID.
/// Routes to the correct provider by parsing the prefix.
async fn fetch_by_id(&self, item_id: &MediaItemId) -> DomainResult<Option<MediaItem>>;
/// Get a playback URL. Routes via prefix in `item_id`.
async fn get_stream_url(&self, item_id: &MediaItemId, quality: &StreamQuality) -> DomainResult<String>;
/// List all registered provider keys in registration order.
fn provider_ids(&self) -> Vec<String>;
/// Key of the primary (first-registered) provider.
fn primary_id(&self) -> &str;
/// Capability matrix for a specific provider. Returns `None` if the key is unknown.
fn capabilities(&self, provider_id: &str) -> Option<ProviderCapabilities>;
/// List collections for a provider. Empty `provider_id` = primary.
async fn list_collections(&self, provider_id: &str) -> DomainResult<Vec<Collection>>;
/// List series for a provider. Empty `provider_id` = primary.
async fn list_series(&self, provider_id: &str, collection_id: Option<&str>) -> DomainResult<Vec<SeriesSummary>>;
/// List genres for a provider. Empty `provider_id` = primary.
async fn list_genres(&self, provider_id: &str, content_type: Option<&ContentType>) -> DomainResult<Vec<String>>;
} }

View File

@@ -3,14 +3,26 @@
//! These traits define the interface for data persistence. //! These traits define the interface for data persistence.
//! Implementations live in the infra layer. //! Implementations live in the infra layer.
use std::collections::HashMap;
use async_trait::async_trait; use async_trait::async_trait;
use chrono::DateTime; use chrono::DateTime;
use chrono::Utc; use chrono::Utc;
use uuid::Uuid; use uuid::Uuid;
use crate::entities::{Channel, GeneratedSchedule, PlaybackRecord, User}; use crate::entities::{Channel, ChannelConfigSnapshot, GeneratedSchedule, PlaybackRecord, ScheduleConfig, User};
use crate::errors::DomainResult; use crate::errors::DomainResult;
use crate::value_objects::{ChannelId, UserId}; use crate::value_objects::{BlockId, ChannelId, MediaItemId, UserId};
/// An in-app activity event stored in the database for the admin log view.
#[derive(Debug, Clone)]
pub struct ActivityEvent {
pub id: Uuid,
pub timestamp: DateTime<Utc>,
pub event_type: String,
pub detail: String,
pub channel_id: Option<Uuid>,
}
/// Repository port for User persistence /// Repository port for User persistence
#[async_trait] #[async_trait]
@@ -29,6 +41,26 @@ pub trait UserRepository: Send + Sync {
/// Delete a user by their ID /// Delete a user by their ID
async fn delete(&self, id: Uuid) -> DomainResult<()>; async fn delete(&self, id: Uuid) -> DomainResult<()>;
/// Count total number of users (used for first-user admin promotion)
async fn count_users(&self) -> DomainResult<u64>;
}
#[derive(Debug, Clone)]
pub struct ProviderConfigRow {
pub id: String,
pub provider_type: String,
pub config_json: String,
pub enabled: bool,
pub updated_at: String,
}
#[async_trait]
pub trait ProviderConfigRepository: Send + Sync {
async fn get_all(&self) -> DomainResult<Vec<ProviderConfigRow>>;
async fn get_by_id(&self, id: &str) -> DomainResult<Option<ProviderConfigRow>>;
async fn upsert(&self, row: &ProviderConfigRow) -> DomainResult<()>;
async fn delete(&self, id: &str) -> DomainResult<()>;
} }
/// Repository port for `Channel` persistence. /// Repository port for `Channel` persistence.
@@ -37,9 +69,37 @@ pub trait ChannelRepository: Send + Sync {
async fn find_by_id(&self, id: ChannelId) -> DomainResult<Option<Channel>>; async fn find_by_id(&self, id: ChannelId) -> DomainResult<Option<Channel>>;
async fn find_by_owner(&self, owner_id: UserId) -> DomainResult<Vec<Channel>>; async fn find_by_owner(&self, owner_id: UserId) -> DomainResult<Vec<Channel>>;
async fn find_all(&self) -> DomainResult<Vec<Channel>>; async fn find_all(&self) -> DomainResult<Vec<Channel>>;
async fn find_auto_schedule_enabled(&self) -> DomainResult<Vec<Channel>>;
/// Insert or update a channel. /// Insert or update a channel.
async fn save(&self, channel: &Channel) -> DomainResult<()>; async fn save(&self, channel: &Channel) -> DomainResult<()>;
async fn delete(&self, id: ChannelId) -> DomainResult<()>; async fn delete(&self, id: ChannelId) -> DomainResult<()>;
/// Snapshot the current config before saving a new one.
/// version_num is computed by the infra layer as MAX(version_num)+1 inside a transaction.
async fn save_config_snapshot(
&self,
channel_id: ChannelId,
config: &ScheduleConfig,
label: Option<String>,
) -> DomainResult<ChannelConfigSnapshot>;
async fn list_config_snapshots(
&self,
channel_id: ChannelId,
) -> DomainResult<Vec<ChannelConfigSnapshot>>;
async fn get_config_snapshot(
&self,
channel_id: ChannelId,
snapshot_id: Uuid,
) -> DomainResult<Option<ChannelConfigSnapshot>>;
async fn patch_config_snapshot_label(
&self,
channel_id: ChannelId,
snapshot_id: Uuid,
label: Option<String>,
) -> DomainResult<Option<ChannelConfigSnapshot>>;
} }
/// Repository port for `GeneratedSchedule` and `PlaybackRecord` persistence. /// Repository port for `GeneratedSchedule` and `PlaybackRecord` persistence.
@@ -69,4 +129,65 @@ pub trait ScheduleRepository: Send + Sync {
) -> DomainResult<Vec<PlaybackRecord>>; ) -> DomainResult<Vec<PlaybackRecord>>;
async fn save_playback_record(&self, record: &PlaybackRecord) -> DomainResult<()>; async fn save_playback_record(&self, record: &PlaybackRecord) -> DomainResult<()>;
/// Return the most recent slot per block_id across ALL schedules for a channel.
/// Resilient to any single generation having empty slots for a block.
async fn find_last_slot_per_block(
&self,
channel_id: ChannelId,
) -> DomainResult<HashMap<BlockId, MediaItemId>>;
/// List all generated schedule headers for a channel, newest first.
async fn list_schedule_history(
&self,
channel_id: ChannelId,
) -> DomainResult<Vec<GeneratedSchedule>>;
/// Fetch a specific schedule with its slots, verifying channel ownership.
async fn get_schedule_by_id(
&self,
channel_id: ChannelId,
schedule_id: Uuid,
) -> DomainResult<Option<GeneratedSchedule>>;
/// Delete all schedules with generation > target_generation for this channel.
/// Also deletes matching playback_records (no DB cascade between those tables).
/// scheduled_slots cascade via FK from generated_schedules.
async fn delete_schedules_after(
&self,
channel_id: ChannelId,
target_generation: u32,
) -> DomainResult<()>;
}
/// Repository port for activity log persistence.
#[async_trait]
pub trait ActivityLogRepository: Send + Sync {
async fn log(
&self,
event_type: &str,
detail: &str,
channel_id: Option<Uuid>,
) -> DomainResult<()>;
async fn recent(&self, limit: u32) -> DomainResult<Vec<ActivityEvent>>;
}
/// Repository port for transcode settings persistence.
#[async_trait]
pub trait TranscodeSettingsRepository: Send + Sync {
/// Load the persisted cleanup TTL. Returns None if no row exists yet.
async fn load_cleanup_ttl(&self) -> DomainResult<Option<u32>>;
/// Persist the cleanup TTL (upsert — always row id=1).
async fn save_cleanup_ttl(&self, hours: u32) -> DomainResult<()>;
}
/// Repository port for general admin settings (app_settings table).
#[async_trait]
pub trait IAppSettingsRepository: Send + Sync {
/// Get a setting value by key. Returns None if not set.
async fn get(&self, key: &str) -> DomainResult<Option<String>>;
/// Set a setting value (upsert).
async fn set(&self, key: &str, value: &str) -> DomainResult<()>;
/// Get all settings as (key, value) pairs.
async fn get_all(&self) -> DomainResult<Vec<(String, String)>>;
} }

View File

@@ -1,569 +0,0 @@
//! Domain Services
//!
//! Services contain the business logic of the application.
use std::collections::HashSet;
use std::sync::Arc;
use chrono::{DateTime, Duration, TimeZone, Utc};
use chrono_tz::Tz;
use rand::seq::SliceRandom;
use uuid::Uuid;
use crate::entities::{
BlockContent, CurrentBroadcast, GeneratedSchedule, MediaItem, PlaybackRecord,
ProgrammingBlock, ScheduledSlot,
};
use crate::errors::{DomainError, DomainResult};
use crate::ports::IMediaProvider;
use crate::repositories::{ChannelRepository, ScheduleRepository, UserRepository};
use crate::value_objects::{
BlockId, ChannelId, Email, FillStrategy, MediaFilter, MediaItemId, RecyclePolicy,
};
// ============================================================================
// UserService
// ============================================================================
/// Service for managing users.
pub struct UserService {
user_repository: Arc<dyn UserRepository>,
}
impl UserService {
pub fn new(user_repository: Arc<dyn UserRepository>) -> Self {
Self { user_repository }
}
pub async fn find_or_create(&self, subject: &str, email: &str) -> DomainResult<crate::entities::User> {
if let Some(user) = self.user_repository.find_by_subject(subject).await? {
return Ok(user);
}
if let Some(mut user) = self.user_repository.find_by_email(email).await? {
if user.subject != subject {
user.subject = subject.to_string();
self.user_repository.save(&user).await?;
}
return Ok(user);
}
let email = Email::try_from(email)?;
let user = crate::entities::User::new(subject, email);
self.user_repository.save(&user).await?;
Ok(user)
}
pub async fn find_by_id(&self, id: Uuid) -> DomainResult<crate::entities::User> {
self.user_repository
.find_by_id(id)
.await?
.ok_or(DomainError::UserNotFound(id))
}
pub async fn find_by_email(&self, email: &str) -> DomainResult<Option<crate::entities::User>> {
self.user_repository.find_by_email(email).await
}
pub async fn create_local(
&self,
email: &str,
password_hash: &str,
) -> DomainResult<crate::entities::User> {
let email = Email::try_from(email)?;
let user = crate::entities::User::new_local(email, password_hash);
self.user_repository.save(&user).await?;
Ok(user)
}
}
// ============================================================================
// ChannelService
// ============================================================================
/// Service for managing channels (CRUD + ownership enforcement).
pub struct ChannelService {
channel_repo: Arc<dyn ChannelRepository>,
}
impl ChannelService {
pub fn new(channel_repo: Arc<dyn ChannelRepository>) -> Self {
Self { channel_repo }
}
pub async fn create(
&self,
owner_id: crate::value_objects::UserId,
name: &str,
timezone: &str,
) -> DomainResult<crate::entities::Channel> {
let channel = crate::entities::Channel::new(owner_id, name, timezone);
self.channel_repo.save(&channel).await?;
Ok(channel)
}
pub async fn find_by_id(
&self,
id: ChannelId,
) -> DomainResult<crate::entities::Channel> {
self.channel_repo
.find_by_id(id)
.await?
.ok_or(DomainError::ChannelNotFound(id))
}
pub async fn find_all(&self) -> DomainResult<Vec<crate::entities::Channel>> {
self.channel_repo.find_all().await
}
pub async fn find_by_owner(
&self,
owner_id: crate::value_objects::UserId,
) -> DomainResult<Vec<crate::entities::Channel>> {
self.channel_repo.find_by_owner(owner_id).await
}
pub async fn update(
&self,
channel: crate::entities::Channel,
) -> DomainResult<crate::entities::Channel> {
self.channel_repo.save(&channel).await?;
Ok(channel)
}
/// Delete a channel, enforcing that `requester_id` is the owner.
pub async fn delete(
&self,
id: ChannelId,
requester_id: crate::value_objects::UserId,
) -> DomainResult<()> {
let channel = self.find_by_id(id).await?;
if channel.owner_id != requester_id {
return Err(DomainError::forbidden("You don't own this channel"));
}
self.channel_repo.delete(id).await
}
}
// ============================================================================
// ScheduleEngineService
// ============================================================================
/// Core scheduling engine.
///
/// Generates 48-hour broadcast schedules by walking through a channel's
/// `ScheduleConfig` day by day, resolving each `ProgrammingBlock` into concrete
/// `ScheduledSlot`s via the `IMediaProvider`, and applying the `RecyclePolicy`
/// to avoid replaying recently aired items.
pub struct ScheduleEngineService {
media_provider: Arc<dyn IMediaProvider>,
channel_repo: Arc<dyn ChannelRepository>,
schedule_repo: Arc<dyn ScheduleRepository>,
}
impl ScheduleEngineService {
pub fn new(
media_provider: Arc<dyn IMediaProvider>,
channel_repo: Arc<dyn ChannelRepository>,
schedule_repo: Arc<dyn ScheduleRepository>,
) -> Self {
Self {
media_provider,
channel_repo,
schedule_repo,
}
}
// -------------------------------------------------------------------------
// Public API
// -------------------------------------------------------------------------
/// Generate and persist a 48-hour schedule for `channel_id` starting at `from`.
///
/// The algorithm:
/// 1. Walk each calendar day in the 48-hour window.
/// 2. For each `ProgrammingBlock`, compute its UTC wall-clock interval for that day.
/// 3. Clip the interval to `[from, from + 48h)`.
/// 4. Resolve the block content via the media provider, applying the recycle policy.
/// 5. Record every played item in the playback history.
///
/// Gaps between blocks are left empty — clients render them as a no-signal state.
pub async fn generate_schedule(
&self,
channel_id: ChannelId,
from: DateTime<Utc>,
) -> DomainResult<GeneratedSchedule> {
let channel = self
.channel_repo
.find_by_id(channel_id)
.await?
.ok_or(DomainError::ChannelNotFound(channel_id))?;
let tz: Tz = channel
.timezone
.parse()
.map_err(|_| DomainError::TimezoneError(channel.timezone.clone()))?;
let history = self
.schedule_repo
.find_playback_history(channel_id)
.await?;
let generation = self
.schedule_repo
.find_latest(channel_id)
.await?
.map(|s| s.generation + 1)
.unwrap_or(1);
let valid_from = from;
let valid_until = from + Duration::hours(48);
let start_date = from.with_timezone(&tz).date_naive();
let end_date = valid_until.with_timezone(&tz).date_naive();
let mut slots: Vec<ScheduledSlot> = Vec::new();
let mut current_date = start_date;
while current_date <= end_date {
for block in &channel.schedule_config.blocks {
let naive_start = current_date.and_time(block.start_time);
// `earliest()` handles DST gaps — if the local time doesn't exist
// (e.g. clocks spring forward) we skip this block occurrence.
let block_start_utc = match tz.from_local_datetime(&naive_start).earliest() {
Some(dt) => dt.with_timezone(&Utc),
None => continue,
};
let block_end_utc =
block_start_utc + Duration::minutes(block.duration_mins as i64);
// Clip to the 48-hour window.
let slot_start = block_start_utc.max(valid_from);
let slot_end = block_end_utc.min(valid_until);
if slot_end <= slot_start {
continue;
}
let mut block_slots = self
.resolve_block(
block,
slot_start,
slot_end,
&history,
&channel.recycle_policy,
generation,
)
.await?;
slots.append(&mut block_slots);
}
current_date = current_date.succ_opt().ok_or_else(|| {
DomainError::validation("Date overflow during schedule generation")
})?;
}
// Blocks in ScheduleConfig are not required to be sorted; sort resolved slots.
slots.sort_by_key(|s| s.start_at);
let schedule = GeneratedSchedule {
id: Uuid::new_v4(),
channel_id,
valid_from,
valid_until,
generation,
slots,
};
self.schedule_repo.save(&schedule).await?;
// Persist playback history so the recycle policy has data for next generation.
for slot in &schedule.slots {
let record =
PlaybackRecord::new(channel_id, slot.item.id.clone(), generation);
self.schedule_repo.save_playback_record(&record).await?;
}
Ok(schedule)
}
/// Determine what is currently broadcasting on a schedule.
///
/// Returns `None` when `now` falls in a gap between blocks — the client
/// should display a no-signal / static screen in that case.
pub fn get_current_broadcast(
schedule: &GeneratedSchedule,
now: DateTime<Utc>,
) -> Option<CurrentBroadcast> {
schedule
.slots
.iter()
.find(|s| s.start_at <= now && now < s.end_at)
.map(|slot| CurrentBroadcast {
slot: slot.clone(),
offset_secs: (now - slot.start_at).num_seconds() as u32,
})
}
/// Look up the schedule currently active at `at` without generating a new one.
pub async fn get_active_schedule(
&self,
channel_id: ChannelId,
at: DateTime<Utc>,
) -> DomainResult<Option<GeneratedSchedule>> {
self.schedule_repo.find_active(channel_id, at).await
}
/// Delegate stream URL resolution to the configured media provider.
pub async fn get_stream_url(&self, item_id: &MediaItemId) -> DomainResult<String> {
self.media_provider.get_stream_url(item_id).await
}
/// Return all slots that overlap the given time window — the EPG data.
pub fn get_epg<'a>(
schedule: &'a GeneratedSchedule,
from: DateTime<Utc>,
until: DateTime<Utc>,
) -> Vec<&'a ScheduledSlot> {
schedule
.slots
.iter()
.filter(|s| s.start_at < until && s.end_at > from)
.collect()
}
// -------------------------------------------------------------------------
// Block resolution
// -------------------------------------------------------------------------
async fn resolve_block(
&self,
block: &ProgrammingBlock,
start: DateTime<Utc>,
end: DateTime<Utc>,
history: &[PlaybackRecord],
policy: &RecyclePolicy,
generation: u32,
) -> DomainResult<Vec<ScheduledSlot>> {
match &block.content {
BlockContent::Manual { items } => {
self.resolve_manual(items, start, end, block.id).await
}
BlockContent::Algorithmic { filter, strategy } => {
self.resolve_algorithmic(
filter, strategy, start, end, history, policy, generation, block.id,
)
.await
}
}
}
/// Resolve a manual block by fetching each hand-picked item in order.
/// Stops when the block's time budget (`end`) is exhausted.
async fn resolve_manual(
&self,
item_ids: &[MediaItemId],
start: DateTime<Utc>,
end: DateTime<Utc>,
block_id: BlockId,
) -> DomainResult<Vec<ScheduledSlot>> {
let mut slots = Vec::new();
let mut cursor = start;
for item_id in item_ids {
if cursor >= end {
break;
}
if let Some(item) = self.media_provider.fetch_by_id(item_id).await? {
let item_end =
(cursor + Duration::seconds(item.duration_secs as i64)).min(end);
slots.push(ScheduledSlot {
id: Uuid::new_v4(),
start_at: cursor,
end_at: item_end,
item,
source_block_id: block_id,
});
cursor = item_end;
}
// If item is not found (deleted/unavailable), silently skip it.
}
Ok(slots)
}
/// Resolve an algorithmic block: fetch candidates, apply recycle policy,
/// run the fill strategy, and build slots.
async fn resolve_algorithmic(
&self,
filter: &MediaFilter,
strategy: &FillStrategy,
start: DateTime<Utc>,
end: DateTime<Utc>,
history: &[PlaybackRecord],
policy: &RecyclePolicy,
generation: u32,
block_id: BlockId,
) -> DomainResult<Vec<ScheduledSlot>> {
let candidates = self.media_provider.fetch_items(filter).await?;
if candidates.is_empty() {
return Ok(vec![]);
}
let pool = Self::apply_recycle_policy(candidates, history, policy, generation);
let target_secs = (end - start).num_seconds() as u32;
let selected = Self::fill_block(&pool, target_secs, strategy);
let mut slots = Vec::new();
let mut cursor = start;
for item in selected {
if cursor >= end {
break;
}
let item_end =
(cursor + Duration::seconds(item.duration_secs as i64)).min(end);
slots.push(ScheduledSlot {
id: Uuid::new_v4(),
start_at: cursor,
end_at: item_end,
item: item.clone(),
source_block_id: block_id,
});
cursor = item_end;
}
Ok(slots)
}
// -------------------------------------------------------------------------
// Recycle policy
// -------------------------------------------------------------------------
/// Filter `candidates` according to `policy`, returning the eligible pool.
///
/// An item is on cooldown if *either* the day-based or generation-based
/// threshold is exceeded. If honouring all cooldowns would leave fewer items
/// than `policy.min_available_ratio` of the total, all cooldowns are waived
/// and the full pool is returned (prevents small libraries from stalling).
fn apply_recycle_policy(
candidates: Vec<MediaItem>,
history: &[PlaybackRecord],
policy: &RecyclePolicy,
current_generation: u32,
) -> Vec<MediaItem> {
let now = Utc::now();
let excluded: HashSet<MediaItemId> = history
.iter()
.filter(|record| {
let by_days = policy
.cooldown_days
.map(|days| (now - record.played_at).num_days() < days as i64)
.unwrap_or(false);
let by_gen = policy
.cooldown_generations
.map(|gens| {
current_generation.saturating_sub(record.generation) < gens
})
.unwrap_or(false);
by_days || by_gen
})
.map(|r| r.item_id.clone())
.collect();
let available: Vec<MediaItem> = candidates
.iter()
.filter(|i| !excluded.contains(&i.id))
.cloned()
.collect();
let min_count =
(candidates.len() as f32 * policy.min_available_ratio).ceil() as usize;
if available.len() < min_count {
// Pool too small after applying cooldowns — recycle everything.
candidates
} else {
available
}
}
// -------------------------------------------------------------------------
// Fill strategies
// -------------------------------------------------------------------------
fn fill_block<'a>(
pool: &'a [MediaItem],
target_secs: u32,
strategy: &FillStrategy,
) -> Vec<&'a MediaItem> {
match strategy {
FillStrategy::BestFit => Self::fill_best_fit(pool, target_secs),
FillStrategy::Sequential => Self::fill_sequential(pool, target_secs),
FillStrategy::Random => {
let mut indices: Vec<usize> = (0..pool.len()).collect();
indices.shuffle(&mut rand::thread_rng());
let mut remaining = target_secs;
let mut result = Vec::new();
for i in indices {
let item = &pool[i];
if item.duration_secs <= remaining {
remaining -= item.duration_secs;
result.push(item);
}
}
result
}
}
}
/// Greedy bin-packing: at each step pick the longest item that still fits
/// in the remaining budget, without repeating items within the same block.
fn fill_best_fit(pool: &[MediaItem], target_secs: u32) -> Vec<&MediaItem> {
let mut remaining = target_secs;
let mut selected: Vec<&MediaItem> = Vec::new();
let mut used: HashSet<usize> = HashSet::new();
loop {
let best = pool
.iter()
.enumerate()
.filter(|(idx, item)| {
!used.contains(idx) && item.duration_secs <= remaining
})
.max_by_key(|(_, item)| item.duration_secs);
match best {
Some((idx, item)) => {
remaining -= item.duration_secs;
used.insert(idx);
selected.push(item);
}
None => break,
}
}
selected
}
/// Sequential: iterate the pool in order, picking items that fit within
/// the remaining budget. Good for series where episode order matters.
fn fill_sequential(pool: &[MediaItem], target_secs: u32) -> Vec<&MediaItem> {
let mut remaining = target_secs;
let mut result = Vec::new();
for item in pool {
if item.duration_secs <= remaining {
remaining -= item.duration_secs;
result.push(item);
}
}
result
}
}

View File

@@ -0,0 +1,124 @@
use std::sync::Arc;
use uuid::Uuid;
use crate::entities::{Channel, ChannelConfigSnapshot, ScheduleConfig};
use crate::errors::{DomainError, DomainResult};
use crate::repositories::ChannelRepository;
use crate::value_objects::{ChannelId, UserId};
/// Service for managing channels (CRUD + ownership enforcement).
pub struct ChannelService {
channel_repo: Arc<dyn ChannelRepository>,
}
impl ChannelService {
pub fn new(channel_repo: Arc<dyn ChannelRepository>) -> Self {
Self { channel_repo }
}
pub async fn create(
&self,
owner_id: UserId,
name: &str,
timezone: &str,
) -> DomainResult<Channel> {
let channel = Channel::new(owner_id, name, timezone);
self.channel_repo.save(&channel).await?;
Ok(channel)
}
pub async fn find_by_id(&self, id: ChannelId) -> DomainResult<Channel> {
self.channel_repo
.find_by_id(id)
.await?
.ok_or(DomainError::ChannelNotFound(id))
}
pub async fn find_all(&self) -> DomainResult<Vec<Channel>> {
self.channel_repo.find_all().await
}
pub async fn find_by_owner(&self, owner_id: UserId) -> DomainResult<Vec<Channel>> {
self.channel_repo.find_by_owner(owner_id).await
}
pub async fn update(&self, channel: Channel) -> DomainResult<Channel> {
// Auto-snapshot the existing config before overwriting
if let Some(existing) = self.channel_repo.find_by_id(channel.id).await? {
self.channel_repo
.save_config_snapshot(channel.id, &existing.schedule_config, None)
.await?;
}
self.channel_repo.save(&channel).await?;
Ok(channel)
}
pub async fn list_config_snapshots(
&self,
channel_id: ChannelId,
) -> DomainResult<Vec<ChannelConfigSnapshot>> {
self.channel_repo.list_config_snapshots(channel_id).await
}
pub async fn get_config_snapshot(
&self,
channel_id: ChannelId,
snapshot_id: Uuid,
) -> DomainResult<Option<ChannelConfigSnapshot>> {
self.channel_repo.get_config_snapshot(channel_id, snapshot_id).await
}
pub async fn patch_config_snapshot_label(
&self,
channel_id: ChannelId,
snapshot_id: Uuid,
label: Option<String>,
) -> DomainResult<Option<ChannelConfigSnapshot>> {
self.channel_repo.patch_config_snapshot_label(channel_id, snapshot_id, label).await
}
/// Restore a snapshot: auto-snapshot current config, then apply the snapshot's config.
pub async fn restore_config_snapshot(
&self,
channel_id: ChannelId,
snapshot_id: Uuid,
) -> DomainResult<Channel> {
let snapshot = self
.channel_repo
.get_config_snapshot(channel_id, snapshot_id)
.await?
.ok_or(DomainError::ChannelNotFound(channel_id))?;
let mut channel = self
.channel_repo
.find_by_id(channel_id)
.await?
.ok_or(DomainError::ChannelNotFound(channel_id))?;
// Snapshot current config before overwriting
self.channel_repo
.save_config_snapshot(channel_id, &channel.schedule_config, None)
.await?;
channel.schedule_config = snapshot.config;
channel.updated_at = chrono::Utc::now();
self.channel_repo.save(&channel).await?;
Ok(channel)
}
pub async fn save_config_snapshot(
&self,
channel_id: ChannelId,
config: &ScheduleConfig,
label: Option<String>,
) -> DomainResult<ChannelConfigSnapshot> {
self.channel_repo.save_config_snapshot(channel_id, config, label).await
}
/// Delete a channel, enforcing that `requester_id` is the owner.
pub async fn delete(&self, id: ChannelId, requester_id: UserId) -> DomainResult<()> {
let channel = self.find_by_id(id).await?;
if channel.owner_id != requester_id {
return Err(DomainError::forbidden("You don't own this channel"));
}
self.channel_repo.delete(id).await
}
}

View File

@@ -0,0 +1,11 @@
//! Domain Services
//!
//! Services contain the business logic of the application.
pub mod channel;
pub mod schedule;
pub mod user;
pub use channel::ChannelService;
pub use schedule::ScheduleEngineService;
pub use user::UserService;

View File

@@ -0,0 +1,151 @@
use std::collections::HashSet;
use rand::rngs::StdRng;
use rand::seq::SliceRandom;
use rand::SeedableRng;
use crate::entities::MediaItem;
use crate::value_objects::{FillStrategy, MediaItemId};
pub(super) fn fill_block<'a>(
candidates: &'a [MediaItem],
pool: &'a [MediaItem],
target_secs: u32,
strategy: &FillStrategy,
last_item_id: Option<&MediaItemId>,
loop_on_finish: bool,
) -> Vec<&'a MediaItem> {
match strategy {
FillStrategy::BestFit => fill_best_fit(pool, target_secs),
FillStrategy::Sequential => {
fill_sequential(candidates, pool, target_secs, last_item_id, loop_on_finish)
}
FillStrategy::Random => {
let mut indices: Vec<usize> = (0..pool.len()).collect();
indices.shuffle(&mut StdRng::from_entropy());
let mut remaining = target_secs;
let mut result = Vec::new();
for i in indices {
let item = &pool[i];
if item.duration_secs <= remaining {
remaining -= item.duration_secs;
result.push(item);
}
}
result
}
}
}
/// Greedy bin-packing: at each step pick the longest item that still fits
/// in the remaining budget, without repeating items within the same block.
pub(super) fn fill_best_fit(pool: &[MediaItem], target_secs: u32) -> Vec<&MediaItem> {
let mut remaining = target_secs;
let mut selected: Vec<&MediaItem> = Vec::new();
let mut used: HashSet<usize> = HashSet::new();
loop {
let best = pool
.iter()
.enumerate()
.filter(|(idx, item)| {
!used.contains(idx) && item.duration_secs <= remaining
})
.max_by_key(|(_, item)| item.duration_secs);
match best {
Some((idx, item)) => {
remaining -= item.duration_secs;
used.insert(idx);
selected.push(item);
}
None => break,
}
}
selected
}
/// Sequential fill with cross-generation series continuity.
///
/// `candidates` — all items matching the filter, in Jellyfin's natural order
/// (typically by season + episode number for TV shows).
/// `pool` — candidates filtered by the recycle policy (eligible to air).
/// `last_item_id` — the last item scheduled in this block in the previous
/// generation or in an earlier occurrence of this block within
/// the current generation. Used to resume the series from the
/// next episode rather than restarting from episode 1.
///
/// Algorithm:
/// 1. Find `last_item_id`'s position in `candidates` and start from the next index.
/// 2. Walk the full `candidates` list in order (wrapping around at the end),
/// but only pick items that are in `pool` (i.e. not on cooldown).
/// 3. Greedily fill the time budget with items in that order.
///
/// This ensures episodes always air in series order, the series wraps correctly
/// when the last episode has been reached, and cooldowns are still respected.
pub(super) fn fill_sequential<'a>(
candidates: &'a [MediaItem],
pool: &'a [MediaItem],
target_secs: u32,
last_item_id: Option<&MediaItemId>,
loop_on_finish: bool,
) -> Vec<&'a MediaItem> {
if pool.is_empty() {
return vec![];
}
// Set of item IDs currently eligible to air.
let available: HashSet<&MediaItemId> = pool.iter().map(|i| &i.id).collect();
let ordered: Vec<&MediaItem> = if loop_on_finish {
// Find where in the full ordered list to resume, wrapping around.
// Falls back to index 0 if last_item_id is absent or was removed from the library.
let start_idx = last_item_id
.and_then(|id| candidates.iter().position(|c| &c.id == id))
.map(|pos| (pos + 1) % candidates.len())
.unwrap_or(0);
(0..candidates.len())
.map(|i| &candidates[(start_idx + i) % candidates.len()])
.filter(|item| available.contains(&item.id))
.collect()
} else {
// No wrap: compute raw next position without modulo.
// If the series has finished (next_pos >= len), return dead air.
let next_pos = last_item_id
.and_then(|id| candidates.iter().position(|c| &c.id == id))
.map(|pos| pos + 1)
.unwrap_or(0);
if next_pos >= candidates.len() {
return vec![]; // series finished — dead air
}
candidates[next_pos..]
.iter()
.filter(|item| available.contains(&item.id))
.collect()
};
// Greedily fill the block's time budget in episode order.
// Stop at the first episode that doesn't fit — skipping would break ordering.
let mut remaining = target_secs;
let mut result = Vec::new();
for item in &ordered {
if item.duration_secs <= remaining {
remaining -= item.duration_secs;
result.push(*item);
} else {
break;
}
}
// Edge case: if the very first episode is longer than the entire block,
// still include it — the slot builder clips it to block end via .min(end).
if result.is_empty() {
if let Some(&first) = ordered.first() {
result.push(first);
}
}
result
}

View File

@@ -0,0 +1,392 @@
use std::sync::Arc;
use chrono::{DateTime, Datelike, Duration, TimeZone, Utc};
use chrono_tz::Tz;
use uuid::Uuid;
use crate::entities::{
BlockContent, CurrentBroadcast, GeneratedSchedule, PlaybackRecord, ProgrammingBlock,
ScheduledSlot,
};
use crate::errors::{DomainError, DomainResult};
use crate::ports::{IProviderRegistry, StreamQuality};
use crate::repositories::{ChannelRepository, ScheduleRepository};
use crate::value_objects::{
BlockId, ChannelId, FillStrategy, MediaFilter, MediaItemId, RecyclePolicy,
};
mod fill;
mod recycle;
/// Core scheduling engine.
///
/// Generates 7-day broadcast schedules by walking through a channel's
/// `ScheduleConfig` day by day, resolving each `ProgrammingBlock` into concrete
/// `ScheduledSlot`s via the `IMediaProvider`, and applying the `RecyclePolicy`
/// to avoid replaying recently aired items.
pub struct ScheduleEngineService {
provider_registry: Arc<dyn IProviderRegistry>,
channel_repo: Arc<dyn ChannelRepository>,
schedule_repo: Arc<dyn ScheduleRepository>,
}
impl ScheduleEngineService {
pub fn new(
provider_registry: Arc<dyn IProviderRegistry>,
channel_repo: Arc<dyn ChannelRepository>,
schedule_repo: Arc<dyn ScheduleRepository>,
) -> Self {
Self {
provider_registry,
channel_repo,
schedule_repo,
}
}
// -------------------------------------------------------------------------
// Public API
// -------------------------------------------------------------------------
/// Generate and persist a 7-day schedule for `channel_id` starting at `from`.
///
/// The algorithm:
/// 1. Walk each calendar day in the 7-day window.
/// 2. For each `ProgrammingBlock`, compute its UTC wall-clock interval for that day.
/// 3. Clip the interval to `[from, from + 7d)`.
/// 4. Resolve the block content via the media provider, applying the recycle policy.
/// 5. For `Sequential` blocks, resume from where the previous generation left off
/// (series continuity — see `fill::fill_sequential`).
/// 6. Record every played item in the playback history.
///
/// Gaps between blocks are left empty — clients render them as a no-signal state.
pub async fn generate_schedule(
&self,
channel_id: ChannelId,
from: DateTime<Utc>,
) -> DomainResult<GeneratedSchedule> {
let channel = self
.channel_repo
.find_by_id(channel_id)
.await?
.ok_or(DomainError::ChannelNotFound(channel_id))?;
let tz: Tz = channel
.timezone
.parse()
.map_err(|_| DomainError::TimezoneError(channel.timezone.clone()))?;
let history = self
.schedule_repo
.find_playback_history(channel_id)
.await?;
// Load the most recent schedule for two purposes:
// 1. Derive the next generation number.
// 2. Know where each Sequential block left off (series continuity).
let latest_schedule = self.schedule_repo.find_latest(channel_id).await?;
let generation = latest_schedule
.as_ref()
.map(|s| s.generation + 1)
.unwrap_or(1);
// Build the initial per-block continuity map from the most recent slot per
// block across ALL schedules. This is resilient to any single generation
// having empty slots for a block (e.g. provider returned nothing transiently).
// The map is updated as each block occurrence is resolved within this
// generation so the second day of a 48h schedule continues from here.
let mut block_continuity = self
.schedule_repo
.find_last_slot_per_block(channel_id)
.await?;
let valid_from = from;
let valid_until = from + Duration::days(7);
let start_date = from.with_timezone(&tz).date_naive();
let end_date = valid_until.with_timezone(&tz).date_naive();
let mut slots: Vec<ScheduledSlot> = Vec::new();
let mut current_date = start_date;
while current_date <= end_date {
let weekday = crate::value_objects::Weekday::from(current_date.weekday());
for block in channel.schedule_config.blocks_for(weekday) {
let naive_start = current_date.and_time(block.start_time);
// `earliest()` handles DST gaps — if the local time doesn't exist
// (e.g. clocks spring forward) we skip this block occurrence.
let block_start_utc = match tz.from_local_datetime(&naive_start).earliest() {
Some(dt) => dt.with_timezone(&Utc),
None => continue,
};
let block_end_utc =
block_start_utc + Duration::minutes(block.duration_mins as i64);
// Clip to the 7-day window.
let slot_start = block_start_utc.max(valid_from);
let slot_end = block_end_utc.min(valid_until);
if slot_end <= slot_start {
continue;
}
// For Sequential blocks: resume from the last item aired in this block.
let last_item_id = block_continuity.get(&block.id);
let mut block_slots = self
.resolve_block(
block,
slot_start,
slot_end,
&history,
&channel.recycle_policy,
generation,
last_item_id,
)
.await?;
// Update continuity so the next occurrence of this block (same
// generation, next calendar day) continues from here.
if let Some(last_slot) = block_slots.last() {
block_continuity.insert(block.id, last_slot.item.id.clone());
}
slots.append(&mut block_slots);
}
current_date = current_date.succ_opt().ok_or_else(|| {
DomainError::validation("Date overflow during schedule generation")
})?;
}
// Blocks in ScheduleConfig are not required to be sorted; sort resolved slots.
slots.sort_by_key(|s| s.start_at);
let schedule = GeneratedSchedule {
id: Uuid::new_v4(),
channel_id,
valid_from,
valid_until,
generation,
slots,
};
self.schedule_repo.save(&schedule).await?;
// Persist playback history so the recycle policy has data for next generation.
for slot in &schedule.slots {
let record =
PlaybackRecord::new(channel_id, slot.item.id.clone(), generation);
self.schedule_repo.save_playback_record(&record).await?;
}
Ok(schedule)
}
/// Determine what is currently broadcasting on a schedule.
///
/// Returns `None` when `now` falls in a gap between blocks — the client
/// should display a no-signal / static screen in that case.
pub fn get_current_broadcast(
schedule: &GeneratedSchedule,
now: DateTime<Utc>,
) -> Option<CurrentBroadcast> {
schedule
.slots
.iter()
.find(|s| s.start_at <= now && now < s.end_at)
.map(|slot| CurrentBroadcast {
slot: slot.clone(),
offset_secs: (now - slot.start_at).num_seconds() as u32,
})
}
/// Return the most recently generated schedule for a channel (used by the background scheduler).
pub async fn get_latest_schedule(
&self,
channel_id: ChannelId,
) -> DomainResult<Option<GeneratedSchedule>> {
self.schedule_repo.find_latest(channel_id).await
}
/// Look up the schedule currently active at `at` without generating a new one.
pub async fn get_active_schedule(
&self,
channel_id: ChannelId,
at: DateTime<Utc>,
) -> DomainResult<Option<GeneratedSchedule>> {
self.schedule_repo.find_active(channel_id, at).await
}
/// Delegate stream URL resolution to the provider registry (routes via ID prefix).
pub async fn get_stream_url(&self, item_id: &MediaItemId, quality: &StreamQuality) -> DomainResult<String> {
self.provider_registry.get_stream_url(item_id, quality).await
}
/// List all generated schedule headers for a channel, newest first.
pub async fn list_schedule_history(
&self,
channel_id: ChannelId,
) -> DomainResult<Vec<GeneratedSchedule>> {
self.schedule_repo.list_schedule_history(channel_id).await
}
/// Fetch a specific schedule with its slots.
pub async fn get_schedule_by_id(
&self,
channel_id: ChannelId,
schedule_id: uuid::Uuid,
) -> DomainResult<Option<GeneratedSchedule>> {
self.schedule_repo.get_schedule_by_id(channel_id, schedule_id).await
}
/// Delete all schedules with generation > target_generation for this channel.
pub async fn delete_schedules_after(
&self,
channel_id: ChannelId,
target_generation: u32,
) -> DomainResult<()> {
self.schedule_repo.delete_schedules_after(channel_id, target_generation).await
}
/// Return all slots that overlap the given time window — the EPG data.
pub fn get_epg(
schedule: &GeneratedSchedule,
from: DateTime<Utc>,
until: DateTime<Utc>,
) -> Vec<&ScheduledSlot> {
schedule
.slots
.iter()
.filter(|s| s.start_at < until && s.end_at > from)
.collect()
}
// -------------------------------------------------------------------------
// Block resolution
// -------------------------------------------------------------------------
#[allow(clippy::too_many_arguments)]
async fn resolve_block(
&self,
block: &ProgrammingBlock,
start: DateTime<Utc>,
end: DateTime<Utc>,
history: &[PlaybackRecord],
policy: &RecyclePolicy,
generation: u32,
last_item_id: Option<&MediaItemId>,
) -> DomainResult<Vec<ScheduledSlot>> {
match &block.content {
BlockContent::Manual { items, .. } => {
self.resolve_manual(items, start, end, block.id).await
}
BlockContent::Algorithmic { filter, strategy, provider_id } => {
self.resolve_algorithmic(
provider_id, filter, strategy, start, end, history, policy, generation,
block.id, last_item_id,
block.loop_on_finish,
block.ignore_recycle_policy,
)
.await
}
}
}
/// Resolve a manual block by fetching each hand-picked item in order.
/// Stops when the block's time budget (`end`) is exhausted.
async fn resolve_manual(
&self,
item_ids: &[MediaItemId],
start: DateTime<Utc>,
end: DateTime<Utc>,
block_id: BlockId,
) -> DomainResult<Vec<ScheduledSlot>> {
let mut slots = Vec::new();
let mut cursor = start;
for item_id in item_ids {
if cursor >= end {
break;
}
if let Some(item) = self.provider_registry.fetch_by_id(item_id).await? {
let item_end =
(cursor + Duration::seconds(item.duration_secs as i64)).min(end);
slots.push(ScheduledSlot {
id: Uuid::new_v4(),
start_at: cursor,
end_at: item_end,
item,
source_block_id: block_id,
});
cursor = item_end;
}
// If item is not found (deleted/unavailable), silently skip it.
}
Ok(slots)
}
/// Resolve an algorithmic block: fetch candidates, apply recycle policy,
/// run the fill strategy, and build slots.
///
/// `last_item_id` is the ID of the last item scheduled in this block in the
/// previous generation. Used only by `Sequential` for series continuity.
#[allow(clippy::too_many_arguments)]
async fn resolve_algorithmic(
&self,
provider_id: &str,
filter: &MediaFilter,
strategy: &FillStrategy,
start: DateTime<Utc>,
end: DateTime<Utc>,
history: &[PlaybackRecord],
policy: &RecyclePolicy,
generation: u32,
block_id: BlockId,
last_item_id: Option<&MediaItemId>,
loop_on_finish: bool,
ignore_recycle_policy: bool,
) -> DomainResult<Vec<ScheduledSlot>> {
// `candidates` — all items matching the filter, in provider order.
// Kept separate from `pool` so Sequential can rotate through the full
// ordered list while still honouring cooldowns.
let candidates = self.provider_registry.fetch_items(provider_id, filter).await?;
if candidates.is_empty() {
return Ok(vec![]);
}
let pool = if ignore_recycle_policy {
candidates.clone()
} else {
recycle::apply_recycle_policy(&candidates, history, policy, generation)
};
let target_secs = (end - start).num_seconds() as u32;
let selected =
fill::fill_block(&candidates, &pool, target_secs, strategy, last_item_id, loop_on_finish);
let mut slots = Vec::new();
let mut cursor = start;
for item in selected {
if cursor >= end {
break;
}
let item_end =
(cursor + Duration::seconds(item.duration_secs as i64)).min(end);
slots.push(ScheduledSlot {
id: Uuid::new_v4(),
start_at: cursor,
end_at: item_end,
item: item.clone(),
source_block_id: block_id,
});
cursor = item_end;
}
Ok(slots)
}
}

View File

@@ -0,0 +1,55 @@
use std::collections::HashSet;
use chrono::Utc;
use crate::entities::{MediaItem, PlaybackRecord};
use crate::value_objects::{MediaItemId, RecyclePolicy};
/// Filter `candidates` according to `policy`, returning the eligible pool.
///
/// An item is on cooldown if *either* the day-based or generation-based
/// threshold is exceeded. If honouring all cooldowns would leave fewer items
/// than `policy.min_available_ratio` of the total, all cooldowns are waived
/// and the full pool is returned (prevents small libraries from stalling).
pub(super) fn apply_recycle_policy(
candidates: &[MediaItem],
history: &[PlaybackRecord],
policy: &RecyclePolicy,
current_generation: u32,
) -> Vec<MediaItem> {
let now = Utc::now();
let excluded: HashSet<MediaItemId> = history
.iter()
.filter(|record| {
let by_days = policy
.cooldown_days
.map(|days| (now - record.played_at).num_days() < days as i64)
.unwrap_or(false);
let by_gen = policy
.cooldown_generations
.map(|gens| current_generation.saturating_sub(record.generation) < gens)
.unwrap_or(false);
by_days || by_gen
})
.map(|r| r.item_id.clone())
.collect();
let available: Vec<MediaItem> = candidates
.iter()
.filter(|i| !excluded.contains(&i.id))
.cloned()
.collect();
let min_count =
(candidates.len() as f32 * policy.min_available_ratio).ceil() as usize;
if available.len() < min_count {
// Pool too small after applying cooldowns — recycle everything.
candidates.to_vec()
} else {
available
}
}

View File

@@ -0,0 +1,66 @@
use std::sync::Arc;
use uuid::Uuid;
use crate::entities::User;
use crate::errors::{DomainError, DomainResult};
use crate::repositories::UserRepository;
use crate::value_objects::Email;
/// Service for managing users.
pub struct UserService {
user_repository: Arc<dyn UserRepository>,
}
impl UserService {
pub fn new(user_repository: Arc<dyn UserRepository>) -> Self {
Self { user_repository }
}
pub async fn find_or_create(&self, subject: &str, email: &str) -> DomainResult<User> {
if let Some(user) = self.user_repository.find_by_subject(subject).await? {
return Ok(user);
}
if let Some(mut user) = self.user_repository.find_by_email(email).await? {
if user.subject != subject {
user.subject = subject.to_string();
self.user_repository.save(&user).await?;
}
return Ok(user);
}
let email = Email::try_from(email)?;
let mut user = User::new(subject, email);
if self.user_repository.count_users().await? == 0 {
user.is_admin = true;
}
self.user_repository.save(&user).await?;
Ok(user)
}
pub async fn find_by_id(&self, id: Uuid) -> DomainResult<User> {
self.user_repository
.find_by_id(id)
.await?
.ok_or(DomainError::UserNotFound(id))
}
pub async fn find_by_email(&self, email: &str) -> DomainResult<Option<User>> {
self.user_repository.find_by_email(email).await
}
pub async fn create_local(
&self,
email: &str,
password_hash: &str,
) -> DomainResult<User> {
let email = Email::try_from(email)?;
let mut user = User::new_local(email, password_hash);
if self.user_repository.count_users().await? == 0 {
user.is_admin = true;
}
self.user_repository.save(&user).await?;
Ok(user)
}
}

View File

@@ -0,0 +1,227 @@
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use std::fmt;
use thiserror::Error;
// ============================================================================
// Validation Error
// ============================================================================
/// Errors that occur when parsing/validating value objects
#[derive(Debug, Error, Clone, PartialEq, Eq)]
#[non_exhaustive]
pub enum ValidationError {
#[error("Invalid email format: {0}")]
InvalidEmail(String),
#[error("Password must be at least {min} characters, got {actual}")]
PasswordTooShort { min: usize, actual: usize },
#[error("Invalid URL: {0}")]
InvalidUrl(String),
#[error("Value cannot be empty: {0}")]
Empty(String),
#[error("Secret too short: minimum {min} bytes required, got {actual}")]
SecretTooShort { min: usize, actual: usize },
}
// ============================================================================
// Email (using email_address crate for RFC-compliant validation)
// ============================================================================
/// A validated email address using RFC-compliant validation.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct Email(email_address::EmailAddress);
impl Email {
/// Create a new validated email address
pub fn new(value: impl AsRef<str>) -> Result<Self, ValidationError> {
let value = value.as_ref().trim().to_lowercase();
let addr: email_address::EmailAddress = value
.parse()
.map_err(|_| ValidationError::InvalidEmail(value.clone()))?;
Ok(Self(addr))
}
/// Get the inner value
pub fn into_inner(self) -> String {
self.0.to_string()
}
}
impl AsRef<str> for Email {
fn as_ref(&self) -> &str {
self.0.as_ref()
}
}
impl fmt::Display for Email {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.0)
}
}
impl TryFrom<String> for Email {
type Error = ValidationError;
fn try_from(value: String) -> Result<Self, Self::Error> {
Self::new(value)
}
}
impl TryFrom<&str> for Email {
type Error = ValidationError;
fn try_from(value: &str) -> Result<Self, Self::Error> {
Self::new(value)
}
}
impl Serialize for Email {
fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
serializer.serialize_str(self.0.as_ref())
}
}
impl<'de> Deserialize<'de> for Email {
fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
let s = String::deserialize(deserializer)?;
Self::new(s).map_err(serde::de::Error::custom)
}
}
// ============================================================================
// Password
// ============================================================================
/// A validated password input (NOT the hash).
///
/// Enforces minimum length of 6 characters.
#[derive(Clone, PartialEq, Eq)]
pub struct Password(String);
/// Minimum password length (NIST recommendation)
pub const MIN_PASSWORD_LENGTH: usize = 8;
impl Password {
pub fn new(value: impl Into<String>) -> Result<Self, ValidationError> {
let value = value.into();
if value.len() < MIN_PASSWORD_LENGTH {
return Err(ValidationError::PasswordTooShort {
min: MIN_PASSWORD_LENGTH,
actual: value.len(),
});
}
Ok(Self(value))
}
pub fn into_inner(self) -> String {
self.0
}
}
impl AsRef<str> for Password {
fn as_ref(&self) -> &str {
&self.0
}
}
// Intentionally hide password content in Debug
impl fmt::Debug for Password {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "Password(***)")
}
}
impl TryFrom<String> for Password {
type Error = ValidationError;
fn try_from(value: String) -> Result<Self, Self::Error> {
Self::new(value)
}
}
impl TryFrom<&str> for Password {
type Error = ValidationError;
fn try_from(value: &str) -> Result<Self, Self::Error> {
Self::new(value)
}
}
impl<'de> Deserialize<'de> for Password {
fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
let s = String::deserialize(deserializer)?;
Self::new(s).map_err(serde::de::Error::custom)
}
}
// Note: Password should NOT implement Serialize to prevent accidental exposure
// ============================================================================
// Tests
// ============================================================================
#[cfg(test)]
mod tests {
use super::*;
mod email_tests {
use super::*;
#[test]
fn test_valid_email() {
assert!(Email::new("user@example.com").is_ok());
assert!(Email::new("USER@EXAMPLE.COM").is_ok()); // Should lowercase
assert!(Email::new(" user@example.com ").is_ok()); // Should trim
}
#[test]
fn test_email_normalizes() {
let email = Email::new(" USER@EXAMPLE.COM ").unwrap();
assert_eq!(email.as_ref(), "user@example.com");
}
#[test]
fn test_invalid_email_no_at() {
assert!(Email::new("userexample.com").is_err());
}
#[test]
fn test_invalid_email_no_domain() {
assert!(Email::new("user@").is_err());
}
#[test]
fn test_invalid_email_no_local() {
assert!(Email::new("@example.com").is_err());
}
}
mod password_tests {
use super::*;
#[test]
fn test_valid_password() {
assert!(Password::new("secret123").is_ok());
assert!(Password::new("12345678").is_ok()); // Exactly 8 chars
}
#[test]
fn test_password_too_short() {
assert!(Password::new("1234567").is_err()); // 7 chars
assert!(Password::new("").is_err());
}
#[test]
fn test_password_debug_hides_content() {
let password = Password::new("supersecret").unwrap();
let debug = format!("{:?}", password);
assert!(!debug.contains("supersecret"));
assert!(debug.contains("***"));
}
}
}

View File

@@ -0,0 +1,6 @@
use uuid::Uuid;
pub type UserId = Uuid;
pub type ChannelId = Uuid;
pub type SlotId = Uuid;
pub type BlockId = Uuid;

View File

@@ -0,0 +1,14 @@
//! Value Objects for K-Notes Domain
//!
//! Newtypes that encapsulate validation logic, following the "parse, don't validate" pattern.
//! These types can only be constructed if the input is valid, providing compile-time guarantees.
pub mod auth;
pub mod ids;
pub mod oidc;
pub mod scheduling;
pub use auth::*;
pub use ids::*;
pub use oidc::*;
pub use scheduling::*;

View File

@@ -1,174 +1,8 @@
//! Value Objects for K-Notes Domain use serde::{Deserialize, Deserializer, Serialize};
//!
//! Newtypes that encapsulate validation logic, following the "parse, don't validate" pattern.
//! These types can only be constructed if the input is valid, providing compile-time guarantees.
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use std::fmt; use std::fmt;
use thiserror::Error;
use url::Url; use url::Url;
use uuid::Uuid;
pub type UserId = Uuid; use super::auth::ValidationError;
// ============================================================================
// Validation Error
// ============================================================================
/// Errors that occur when parsing/validating value objects
#[derive(Debug, Error, Clone, PartialEq, Eq)]
#[non_exhaustive]
pub enum ValidationError {
#[error("Invalid email format: {0}")]
InvalidEmail(String),
#[error("Password must be at least {min} characters, got {actual}")]
PasswordTooShort { min: usize, actual: usize },
#[error("Invalid URL: {0}")]
InvalidUrl(String),
#[error("Value cannot be empty: {0}")]
Empty(String),
#[error("Secret too short: minimum {min} bytes required, got {actual}")]
SecretTooShort { min: usize, actual: usize },
}
// ============================================================================
// Email (using email_address crate for RFC-compliant validation)
// ============================================================================
/// A validated email address using RFC-compliant validation.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct Email(email_address::EmailAddress);
impl Email {
/// Create a new validated email address
pub fn new(value: impl AsRef<str>) -> Result<Self, ValidationError> {
let value = value.as_ref().trim().to_lowercase();
let addr: email_address::EmailAddress = value
.parse()
.map_err(|_| ValidationError::InvalidEmail(value.clone()))?;
Ok(Self(addr))
}
/// Get the inner value
pub fn into_inner(self) -> String {
self.0.to_string()
}
}
impl AsRef<str> for Email {
fn as_ref(&self) -> &str {
self.0.as_ref()
}
}
impl fmt::Display for Email {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.0)
}
}
impl TryFrom<String> for Email {
type Error = ValidationError;
fn try_from(value: String) -> Result<Self, Self::Error> {
Self::new(value)
}
}
impl TryFrom<&str> for Email {
type Error = ValidationError;
fn try_from(value: &str) -> Result<Self, Self::Error> {
Self::new(value)
}
}
impl Serialize for Email {
fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
serializer.serialize_str(self.0.as_ref())
}
}
impl<'de> Deserialize<'de> for Email {
fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
let s = String::deserialize(deserializer)?;
Self::new(s).map_err(serde::de::Error::custom)
}
}
// ============================================================================
// Password
// ============================================================================
/// A validated password input (NOT the hash).
///
/// Enforces minimum length of 6 characters.
#[derive(Clone, PartialEq, Eq)]
pub struct Password(String);
/// Minimum password length (NIST recommendation)
pub const MIN_PASSWORD_LENGTH: usize = 8;
impl Password {
pub fn new(value: impl Into<String>) -> Result<Self, ValidationError> {
let value = value.into();
if value.len() < MIN_PASSWORD_LENGTH {
return Err(ValidationError::PasswordTooShort {
min: MIN_PASSWORD_LENGTH,
actual: value.len(),
});
}
Ok(Self(value))
}
pub fn into_inner(self) -> String {
self.0
}
}
impl AsRef<str> for Password {
fn as_ref(&self) -> &str {
&self.0
}
}
// Intentionally hide password content in Debug
impl fmt::Debug for Password {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "Password(***)")
}
}
impl TryFrom<String> for Password {
type Error = ValidationError;
fn try_from(value: String) -> Result<Self, Self::Error> {
Self::new(value)
}
}
impl TryFrom<&str> for Password {
type Error = ValidationError;
fn try_from(value: &str) -> Result<Self, Self::Error> {
Self::new(value)
}
}
impl<'de> Deserialize<'de> for Password {
fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
let s = String::deserialize(deserializer)?;
Self::new(s).map_err(serde::de::Error::custom)
}
}
// Note: Password should NOT implement Serialize to prevent accidental exposure
// ============================================================================ // ============================================================================
// OIDC Configuration Newtypes // OIDC Configuration Newtypes
@@ -534,122 +368,6 @@ impl fmt::Debug for JwtSecret {
} }
} }
// ============================================================================
// Channel / Schedule types
// ============================================================================
pub type ChannelId = Uuid;
pub type SlotId = Uuid;
pub type BlockId = Uuid;
/// Opaque media item identifier — format is provider-specific internally.
/// The domain never inspects the string; it just passes it back to the provider.
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct MediaItemId(String);
impl MediaItemId {
pub fn new(value: impl Into<String>) -> Self {
Self(value.into())
}
pub fn into_inner(self) -> String {
self.0
}
}
impl AsRef<str> for MediaItemId {
fn as_ref(&self) -> &str {
&self.0
}
}
impl fmt::Display for MediaItemId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.0)
}
}
impl From<String> for MediaItemId {
fn from(s: String) -> Self {
Self(s)
}
}
impl From<&str> for MediaItemId {
fn from(s: &str) -> Self {
Self(s.to_string())
}
}
/// The broad category of a media item.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum ContentType {
Movie,
Episode,
Short,
}
/// Provider-agnostic filter for querying media items.
///
/// Each field is optional — omitting it means "no constraint on this dimension".
/// The `IMediaProvider` adapter interprets these fields in terms of its own API.
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct MediaFilter {
pub content_type: Option<ContentType>,
pub genres: Vec<String>,
/// Starting year of a decade: 1990 means 19901999.
pub decade: Option<u16>,
pub tags: Vec<String>,
pub min_duration_secs: Option<u32>,
pub max_duration_secs: Option<u32>,
/// Abstract groupings interpreted by each provider (Jellyfin library, Plex section,
/// filesystem path, etc.). An empty list means "all available content".
pub collections: Vec<String>,
}
/// How the scheduling engine fills a time block with selected media items.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum FillStrategy {
/// Greedy bin-packing: at each step pick the longest item that still fits,
/// minimising dead air. Good for variety blocks.
BestFit,
/// Pick items in the order returned by the provider — ideal for series
/// where episode sequence matters.
Sequential,
/// Shuffle the pool randomly then fill sequentially. Good for "shuffle play" channels.
Random,
}
/// Controls when previously aired items become eligible to play again.
///
/// An item is *on cooldown* if *either* threshold is met.
/// `min_available_ratio` is a safety valve: if honouring the cooldown would
/// leave fewer items than this fraction of the total pool, the cooldown is
/// ignored and all items become eligible. This prevents small libraries from
/// running completely dry.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct RecyclePolicy {
/// Do not replay an item within this many calendar days.
pub cooldown_days: Option<u32>,
/// Do not replay an item within this many schedule generations.
pub cooldown_generations: Option<u32>,
/// Always keep at least this fraction (0.01.0) of the matching pool
/// available for selection, even if their cooldown has not yet expired.
pub min_available_ratio: f32,
}
impl Default for RecyclePolicy {
fn default() -> Self {
Self {
cooldown_days: Some(30),
cooldown_generations: None,
min_available_ratio: 0.2,
}
}
}
// ============================================================================ // ============================================================================
// Tests // Tests
// ============================================================================ // ============================================================================
@@ -658,62 +376,6 @@ impl Default for RecyclePolicy {
mod tests { mod tests {
use super::*; use super::*;
mod email_tests {
use super::*;
#[test]
fn test_valid_email() {
assert!(Email::new("user@example.com").is_ok());
assert!(Email::new("USER@EXAMPLE.COM").is_ok()); // Should lowercase
assert!(Email::new(" user@example.com ").is_ok()); // Should trim
}
#[test]
fn test_email_normalizes() {
let email = Email::new(" USER@EXAMPLE.COM ").unwrap();
assert_eq!(email.as_ref(), "user@example.com");
}
#[test]
fn test_invalid_email_no_at() {
assert!(Email::new("userexample.com").is_err());
}
#[test]
fn test_invalid_email_no_domain() {
assert!(Email::new("user@").is_err());
}
#[test]
fn test_invalid_email_no_local() {
assert!(Email::new("@example.com").is_err());
}
}
mod password_tests {
use super::*;
#[test]
fn test_valid_password() {
assert!(Password::new("secret123").is_ok());
assert!(Password::new("12345678").is_ok()); // Exactly 8 chars
}
#[test]
fn test_password_too_short() {
assert!(Password::new("1234567").is_err()); // 7 chars
assert!(Password::new("").is_err());
}
#[test]
fn test_password_debug_hides_content() {
let password = Password::new("supersecret").unwrap();
let debug = format!("{:?}", password);
assert!(!debug.contains("supersecret"));
assert!(debug.contains("***"));
}
}
mod oidc_tests { mod oidc_tests {
use super::*; use super::*;

View File

@@ -0,0 +1,201 @@
use serde::{Deserialize, Serialize};
use std::fmt;
/// Position of the channel logo watermark overlay.
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
#[serde(rename_all = "snake_case")]
pub enum LogoPosition {
TopLeft,
#[default]
TopRight,
BottomLeft,
BottomRight,
}
/// Controls who can view a channel's broadcast and stream.
#[derive(Debug, Clone, Default, Serialize, Deserialize, PartialEq, Eq)]
#[serde(rename_all = "snake_case")]
pub enum AccessMode {
#[default]
Public,
PasswordProtected,
AccountRequired,
OwnerOnly,
}
/// Opaque media item identifier — format is provider-specific internally.
/// The domain never inspects the string; it just passes it back to the provider.
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct MediaItemId(String);
impl MediaItemId {
pub fn new(value: impl Into<String>) -> Self {
Self(value.into())
}
pub fn into_inner(self) -> String {
self.0
}
}
impl AsRef<str> for MediaItemId {
fn as_ref(&self) -> &str {
&self.0
}
}
impl fmt::Display for MediaItemId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.0)
}
}
impl From<String> for MediaItemId {
fn from(s: String) -> Self {
Self(s)
}
}
impl From<&str> for MediaItemId {
fn from(s: &str) -> Self {
Self(s.to_string())
}
}
/// The broad category of a media item.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum ContentType {
Movie,
Episode,
Short,
}
/// Provider-agnostic filter for querying media items.
///
/// Each field is optional — omitting it means "no constraint on this dimension".
/// The `IMediaProvider` adapter interprets these fields in terms of its own API.
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct MediaFilter {
pub content_type: Option<ContentType>,
pub genres: Vec<String>,
/// Starting year of a decade: 1990 means 19901999.
pub decade: Option<u16>,
pub tags: Vec<String>,
pub min_duration_secs: Option<u32>,
pub max_duration_secs: Option<u32>,
/// Abstract groupings interpreted by each provider (Jellyfin library, Plex section,
/// filesystem path, etc.). An empty list means "all available content".
pub collections: Vec<String>,
/// Filter to one or more TV series by name. Use with `content_type: Episode`.
/// With `Sequential` strategy each series plays in chronological order.
/// Multiple series are OR-combined: any episode from any listed show is eligible.
#[serde(default)]
pub series_names: Vec<String>,
/// Free-text search term. Intended for library browsing; typically omitted
/// during schedule generation.
pub search_term: Option<String>,
}
/// How the scheduling engine fills a time block with selected media items.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum FillStrategy {
/// Greedy bin-packing: at each step pick the longest item that still fits,
/// minimising dead air. Good for variety blocks.
BestFit,
/// Pick items in the order returned by the provider — ideal for series
/// where episode sequence matters.
Sequential,
/// Shuffle the pool randomly then fill sequentially. Good for "shuffle play" channels.
Random,
}
/// Controls when previously aired items become eligible to play again.
///
/// An item is *on cooldown* if *either* threshold is met.
/// `min_available_ratio` is a safety valve: if honouring the cooldown would
/// leave fewer items than this fraction of the total pool, the cooldown is
/// ignored and all items become eligible. This prevents small libraries from
/// running completely dry.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct RecyclePolicy {
/// Do not replay an item within this many calendar days.
pub cooldown_days: Option<u32>,
/// Do not replay an item within this many schedule generations.
pub cooldown_generations: Option<u32>,
/// Always keep at least this fraction (0.01.0) of the matching pool
/// available for selection, even if their cooldown has not yet expired.
pub min_available_ratio: f32,
}
impl Default for RecyclePolicy {
fn default() -> Self {
Self {
cooldown_days: Some(30),
cooldown_generations: None,
min_available_ratio: 0.2,
}
}
}
/// Day of week, used as key in weekly schedule configs.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum Weekday {
Monday,
Tuesday,
Wednesday,
Thursday,
Friday,
Saturday,
Sunday,
}
impl From<chrono::Weekday> for Weekday {
fn from(w: chrono::Weekday) -> Self {
match w {
chrono::Weekday::Mon => Weekday::Monday,
chrono::Weekday::Tue => Weekday::Tuesday,
chrono::Weekday::Wed => Weekday::Wednesday,
chrono::Weekday::Thu => Weekday::Thursday,
chrono::Weekday::Fri => Weekday::Friday,
chrono::Weekday::Sat => Weekday::Saturday,
chrono::Weekday::Sun => Weekday::Sunday,
}
}
}
impl Weekday {
pub fn all() -> [Weekday; 7] {
// ISO week order: Monday = index 0, Sunday = index 6.
// The schedule engine depends on this order when iterating days.
[
Weekday::Monday, Weekday::Tuesday, Weekday::Wednesday,
Weekday::Thursday, Weekday::Friday, Weekday::Saturday, Weekday::Sunday,
]
}
}
#[cfg(test)]
mod weekday_tests {
use super::*;
#[test]
fn from_chrono_weekday_all_variants() {
assert_eq!(Weekday::from(chrono::Weekday::Mon), Weekday::Monday);
assert_eq!(Weekday::from(chrono::Weekday::Tue), Weekday::Tuesday);
assert_eq!(Weekday::from(chrono::Weekday::Wed), Weekday::Wednesday);
assert_eq!(Weekday::from(chrono::Weekday::Thu), Weekday::Thursday);
assert_eq!(Weekday::from(chrono::Weekday::Fri), Weekday::Friday);
assert_eq!(Weekday::from(chrono::Weekday::Sat), Weekday::Saturday);
assert_eq!(Weekday::from(chrono::Weekday::Sun), Weekday::Sunday);
}
#[test]
fn all_returns_monday_first_sunday_last() {
let days = Weekday::all();
assert_eq!(days[0], Weekday::Monday);
assert_eq!(days[6], Weekday::Sunday);
}
}

View File

@@ -11,6 +11,7 @@ broker-nats = ["dep:futures-util", "k-core/broker-nats"]
auth-oidc = ["dep:openidconnect", "dep:url", "dep:axum-extra"] auth-oidc = ["dep:openidconnect", "dep:url", "dep:axum-extra"]
auth-jwt = ["dep:jsonwebtoken"] auth-jwt = ["dep:jsonwebtoken"]
jellyfin = ["dep:reqwest"] jellyfin = ["dep:reqwest"]
local-files = ["dep:walkdir", "dep:base64", "sqlite"]
[dependencies] [dependencies]
k-core = { git = "https://git.gabrielkaszewski.dev/GKaszewski/k-core", features = [ k-core = { git = "https://git.gabrielkaszewski.dev/GKaszewski/k-core", features = [
@@ -46,3 +47,5 @@ jsonwebtoken = { version = "10.2.0", features = [
"rsa", "rsa",
"rust_crypto", "rust_crypto",
], optional = true } ], optional = true }
walkdir = { version = "2", optional = true }
base64 = { version = "0.22", optional = true }

View File

@@ -0,0 +1,5 @@
#[cfg(feature = "sqlite")]
mod sqlite;
#[cfg(feature = "sqlite")]
pub use sqlite::SqliteActivityLogRepository;

View File

@@ -0,0 +1,71 @@
use async_trait::async_trait;
use chrono::Utc;
use uuid::Uuid;
use domain::{ActivityEvent, ActivityLogRepository, DomainError, DomainResult};
pub struct SqliteActivityLogRepository {
pool: sqlx::SqlitePool,
}
impl SqliteActivityLogRepository {
pub fn new(pool: sqlx::SqlitePool) -> Self {
Self { pool }
}
}
#[async_trait]
impl ActivityLogRepository for SqliteActivityLogRepository {
async fn log(
&self,
event_type: &str,
detail: &str,
channel_id: Option<Uuid>,
) -> DomainResult<()> {
let id = Uuid::new_v4().to_string();
let timestamp = Utc::now().to_rfc3339();
let channel_id_str = channel_id.map(|id| id.to_string());
sqlx::query(
"INSERT INTO activity_log (id, timestamp, event_type, detail, channel_id) VALUES (?, ?, ?, ?, ?)",
)
.bind(&id)
.bind(&timestamp)
.bind(event_type)
.bind(detail)
.bind(&channel_id_str)
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
Ok(())
}
async fn recent(&self, limit: u32) -> DomainResult<Vec<ActivityEvent>> {
let rows: Vec<(String, String, String, String, Option<String>)> = sqlx::query_as(
"SELECT id, timestamp, event_type, detail, channel_id FROM activity_log ORDER BY timestamp DESC LIMIT ?",
)
.bind(limit)
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
let events = rows
.into_iter()
.filter_map(|(id, timestamp, event_type, detail, channel_id)| {
let id = Uuid::parse_str(&id).ok()?;
let timestamp = timestamp.parse().ok()?;
let channel_id = channel_id.and_then(|s| Uuid::parse_str(&s).ok());
Some(ActivityEvent {
id,
timestamp,
event_type,
detail,
channel_id,
})
})
.collect();
Ok(events)
}
}

View File

@@ -0,0 +1,83 @@
//! SQLite implementation of IAppSettingsRepository.
use async_trait::async_trait;
use sqlx::SqlitePool;
use domain::{DomainError, DomainResult, IAppSettingsRepository};
pub struct SqliteAppSettingsRepository {
pool: SqlitePool,
}
impl SqliteAppSettingsRepository {
pub fn new(pool: SqlitePool) -> Self {
Self { pool }
}
}
#[async_trait]
impl IAppSettingsRepository for SqliteAppSettingsRepository {
async fn get(&self, key: &str) -> DomainResult<Option<String>> {
sqlx::query_scalar::<_, String>("SELECT value FROM app_settings WHERE key = ?")
.bind(key)
.fetch_optional(&self.pool)
.await
.map_err(|e| DomainError::InfrastructureError(e.to_string()))
}
async fn set(&self, key: &str, value: &str) -> DomainResult<()> {
sqlx::query("INSERT OR REPLACE INTO app_settings (key, value) VALUES (?, ?)")
.bind(key)
.bind(value)
.execute(&self.pool)
.await
.map(|_| ())
.map_err(|e| DomainError::InfrastructureError(e.to_string()))
}
async fn get_all(&self) -> DomainResult<Vec<(String, String)>> {
sqlx::query_as::<_, (String, String)>("SELECT key, value FROM app_settings ORDER BY key")
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::InfrastructureError(e.to_string()))
}
}
#[cfg(test)]
mod tests {
use super::*;
use sqlx::SqlitePool;
use domain::IAppSettingsRepository;
async fn setup() -> SqlitePool {
let pool = SqlitePool::connect(":memory:").await.unwrap();
sqlx::query(
"CREATE TABLE app_settings (key TEXT PRIMARY KEY, value TEXT NOT NULL)"
).execute(&pool).await.unwrap();
sqlx::query("INSERT INTO app_settings VALUES ('library_sync_interval_hours', '6')")
.execute(&pool).await.unwrap();
pool
}
#[tokio::test]
async fn get_returns_seeded_value() {
let repo = SqliteAppSettingsRepository::new(setup().await);
let val = repo.get("library_sync_interval_hours").await.unwrap();
assert_eq!(val, Some("6".to_string()));
}
#[tokio::test]
async fn set_then_get() {
let repo = SqliteAppSettingsRepository::new(setup().await);
repo.set("library_sync_interval_hours", "12").await.unwrap();
let val = repo.get("library_sync_interval_hours").await.unwrap();
assert_eq!(val, Some("12".to_string()));
}
#[tokio::test]
async fn get_all_returns_all_keys() {
let repo = SqliteAppSettingsRepository::new(setup().await);
let all = repo.get_all().await.unwrap();
assert!(!all.is_empty());
assert!(all.iter().any(|(k, _)| k == "library_sync_interval_hours"));
}
}

View File

@@ -20,8 +20,10 @@ pub struct JwtConfig {
pub issuer: Option<String>, pub issuer: Option<String>,
/// Expected audience (for validation) /// Expected audience (for validation)
pub audience: Option<String>, pub audience: Option<String>,
/// Token expiry in hours (default: 24) /// Access token expiry in hours (default: 24)
pub expiry_hours: u64, pub expiry_hours: u64,
/// Refresh token expiry in days (default: 30)
pub refresh_expiry_days: u64,
} }
impl JwtConfig { impl JwtConfig {
@@ -33,6 +35,7 @@ impl JwtConfig {
issuer: Option<String>, issuer: Option<String>,
audience: Option<String>, audience: Option<String>,
expiry_hours: Option<u64>, expiry_hours: Option<u64>,
refresh_expiry_days: Option<u64>,
is_production: bool, is_production: bool,
) -> Result<Self, JwtError> { ) -> Result<Self, JwtError> {
// Validate secret strength in production // Validate secret strength in production
@@ -48,6 +51,7 @@ impl JwtConfig {
issuer, issuer,
audience, audience,
expiry_hours: expiry_hours.unwrap_or(24), expiry_hours: expiry_hours.unwrap_or(24),
refresh_expiry_days: refresh_expiry_days.unwrap_or(30),
}) })
} }
@@ -58,10 +62,15 @@ impl JwtConfig {
issuer: None, issuer: None,
audience: None, audience: None,
expiry_hours: 24, expiry_hours: 24,
refresh_expiry_days: 30,
} }
} }
} }
fn default_token_type() -> String {
"access".to_string()
}
/// JWT claims structure /// JWT claims structure
#[derive(Debug, Serialize, Deserialize, Clone)] #[derive(Debug, Serialize, Deserialize, Clone)]
pub struct JwtClaims { pub struct JwtClaims {
@@ -79,6 +88,9 @@ pub struct JwtClaims {
/// Audience /// Audience
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub aud: Option<String>, pub aud: Option<String>,
/// Token type: "access" or "refresh". Defaults to "access" for backward compat.
#[serde(default = "default_token_type")]
pub token_type: String,
} }
/// JWT-related errors /// JWT-related errors
@@ -141,7 +153,7 @@ impl JwtValidator {
} }
} }
/// Create a JWT token for the given user /// Create an access JWT token for the given user
pub fn create_token(&self, user: &User) -> Result<String, JwtError> { pub fn create_token(&self, user: &User) -> Result<String, JwtError> {
let now = SystemTime::now() let now = SystemTime::now()
.duration_since(UNIX_EPOCH) .duration_since(UNIX_EPOCH)
@@ -157,6 +169,30 @@ impl JwtValidator {
iat: now, iat: now,
iss: self.config.issuer.clone(), iss: self.config.issuer.clone(),
aud: self.config.audience.clone(), aud: self.config.audience.clone(),
token_type: "access".to_string(),
};
let header = Header::new(Algorithm::HS256);
encode(&header, &claims, &self.encoding_key).map_err(JwtError::CreationFailed)
}
/// Create a refresh JWT token for the given user (longer-lived)
pub fn create_refresh_token(&self, user: &User) -> Result<String, JwtError> {
let now = SystemTime::now()
.duration_since(UNIX_EPOCH)
.expect("Time went backwards")
.as_secs() as usize;
let expiry = now + (self.config.refresh_expiry_days as usize * 86400);
let claims = JwtClaims {
sub: user.id.to_string(),
email: user.email.as_ref().to_string(),
exp: expiry,
iat: now,
iss: self.config.issuer.clone(),
aud: self.config.audience.clone(),
token_type: "refresh".to_string(),
}; };
let header = Header::new(Algorithm::HS256); let header = Header::new(Algorithm::HS256);
@@ -176,14 +212,28 @@ impl JwtValidator {
Ok(token_data.claims) Ok(token_data.claims)
} }
/// Validate an access token — rejects refresh tokens
pub fn validate_access_token(&self, token: &str) -> Result<JwtClaims, JwtError> {
let claims = self.validate_token(token)?;
if claims.token_type != "access" {
return Err(JwtError::ValidationFailed("Not an access token".to_string()));
}
Ok(claims)
}
/// Validate a refresh token — rejects access tokens
pub fn validate_refresh_token(&self, token: &str) -> Result<JwtClaims, JwtError> {
let claims = self.validate_token(token)?;
if claims.token_type != "refresh" {
return Err(JwtError::ValidationFailed("Not a refresh token".to_string()));
}
Ok(claims)
}
/// Get the user ID (subject) from a token without full validation /// Get the user ID (subject) from a token without full validation
/// Useful for logging/debugging, but should not be trusted for auth /// Useful for logging/debugging, but should not be trusted for auth
pub fn decode_unverified(&self, token: &str) -> Result<JwtClaims, JwtError> { pub fn decode_unverified(&self, token: &str) -> Result<JwtClaims, JwtError> {
let mut validation = Validation::new(Algorithm::HS256); let token_data = jsonwebtoken::dangerous::insecure_decode::<JwtClaims>(token)
validation.insecure_disable_signature_validation();
validation.validate_exp = false;
let token_data = decode::<JwtClaims>(token, &self.decoding_key, &validation)
.map_err(|_| JwtError::InvalidFormat)?; .map_err(|_| JwtError::InvalidFormat)?;
Ok(token_data.claims) Ok(token_data.claims)
@@ -232,6 +282,7 @@ mod tests {
None, None,
None, None,
None, None,
None,
true, // Production mode true, // Production mode
); );
@@ -245,6 +296,7 @@ mod tests {
None, None,
None, None,
None, None,
None,
false, // Development mode false, // Development mode
); );

View File

@@ -1,275 +0,0 @@
//! SQLite and PostgreSQL adapters for ChannelRepository
use async_trait::async_trait;
use chrono::{DateTime, Utc};
use sqlx::FromRow;
use uuid::Uuid;
use domain::{
Channel, ChannelId, ChannelRepository, DomainError, DomainResult, RecyclePolicy,
ScheduleConfig, UserId,
};
// ============================================================================
// Row type + mapping (shared between SQLite and Postgres)
// ============================================================================
#[derive(Debug, FromRow)]
struct ChannelRow {
id: String,
owner_id: String,
name: String,
description: Option<String>,
timezone: String,
schedule_config: String,
recycle_policy: String,
created_at: String,
updated_at: String,
}
fn parse_dt(s: &str) -> Result<DateTime<Utc>, DomainError> {
DateTime::parse_from_rfc3339(s)
.map(|dt| dt.with_timezone(&Utc))
.or_else(|_| {
chrono::NaiveDateTime::parse_from_str(s, "%Y-%m-%d %H:%M:%S").map(|dt| dt.and_utc())
})
.map_err(|e| DomainError::RepositoryError(format!("Invalid datetime '{}': {}", s, e)))
}
impl TryFrom<ChannelRow> for Channel {
type Error = DomainError;
fn try_from(row: ChannelRow) -> Result<Self, Self::Error> {
let id: ChannelId = Uuid::parse_str(&row.id)
.map_err(|e| DomainError::RepositoryError(format!("Invalid channel UUID: {}", e)))?;
let owner_id: UserId = Uuid::parse_str(&row.owner_id)
.map_err(|e| DomainError::RepositoryError(format!("Invalid owner UUID: {}", e)))?;
let schedule_config: ScheduleConfig = serde_json::from_str(&row.schedule_config)
.map_err(|e| {
DomainError::RepositoryError(format!("Invalid schedule_config JSON: {}", e))
})?;
let recycle_policy: RecyclePolicy = serde_json::from_str(&row.recycle_policy)
.map_err(|e| {
DomainError::RepositoryError(format!("Invalid recycle_policy JSON: {}", e))
})?;
Ok(Channel {
id,
owner_id,
name: row.name,
description: row.description,
timezone: row.timezone,
schedule_config,
recycle_policy,
created_at: parse_dt(&row.created_at)?,
updated_at: parse_dt(&row.updated_at)?,
})
}
}
const SELECT_COLS: &str =
"id, owner_id, name, description, timezone, schedule_config, recycle_policy, created_at, updated_at";
// ============================================================================
// SQLite adapter
// ============================================================================
#[cfg(feature = "sqlite")]
pub struct SqliteChannelRepository {
pool: sqlx::SqlitePool,
}
#[cfg(feature = "sqlite")]
impl SqliteChannelRepository {
pub fn new(pool: sqlx::SqlitePool) -> Self {
Self { pool }
}
}
#[cfg(feature = "sqlite")]
#[async_trait]
impl ChannelRepository for SqliteChannelRepository {
async fn find_by_id(&self, id: ChannelId) -> DomainResult<Option<Channel>> {
let sql = format!("SELECT {SELECT_COLS} FROM channels WHERE id = ?");
let row: Option<ChannelRow> = sqlx::query_as(&sql)
.bind(id.to_string())
.fetch_optional(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
row.map(Channel::try_from).transpose()
}
async fn find_by_owner(&self, owner_id: UserId) -> DomainResult<Vec<Channel>> {
let sql = format!(
"SELECT {SELECT_COLS} FROM channels WHERE owner_id = ? ORDER BY created_at ASC"
);
let rows: Vec<ChannelRow> = sqlx::query_as(&sql)
.bind(owner_id.to_string())
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
rows.into_iter().map(Channel::try_from).collect()
}
async fn find_all(&self) -> DomainResult<Vec<Channel>> {
let sql = format!("SELECT {SELECT_COLS} FROM channels ORDER BY created_at ASC");
let rows: Vec<ChannelRow> = sqlx::query_as(&sql)
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
rows.into_iter().map(Channel::try_from).collect()
}
async fn save(&self, channel: &Channel) -> DomainResult<()> {
let schedule_config = serde_json::to_string(&channel.schedule_config).map_err(|e| {
DomainError::RepositoryError(format!("Failed to serialize schedule_config: {}", e))
})?;
let recycle_policy = serde_json::to_string(&channel.recycle_policy).map_err(|e| {
DomainError::RepositoryError(format!("Failed to serialize recycle_policy: {}", e))
})?;
sqlx::query(
r#"
INSERT INTO channels
(id, owner_id, name, description, timezone, schedule_config, recycle_policy, created_at, updated_at)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(id) DO UPDATE SET
name = excluded.name,
description = excluded.description,
timezone = excluded.timezone,
schedule_config = excluded.schedule_config,
recycle_policy = excluded.recycle_policy,
updated_at = excluded.updated_at
"#,
)
.bind(channel.id.to_string())
.bind(channel.owner_id.to_string())
.bind(&channel.name)
.bind(&channel.description)
.bind(&channel.timezone)
.bind(&schedule_config)
.bind(&recycle_policy)
.bind(channel.created_at.to_rfc3339())
.bind(channel.updated_at.to_rfc3339())
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
Ok(())
}
async fn delete(&self, id: ChannelId) -> DomainResult<()> {
sqlx::query("DELETE FROM channels WHERE id = ?")
.bind(id.to_string())
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
Ok(())
}
}
// ============================================================================
// PostgreSQL adapter
// ============================================================================
#[cfg(feature = "postgres")]
pub struct PostgresChannelRepository {
pool: sqlx::Pool<sqlx::Postgres>,
}
#[cfg(feature = "postgres")]
impl PostgresChannelRepository {
pub fn new(pool: sqlx::Pool<sqlx::Postgres>) -> Self {
Self { pool }
}
}
#[cfg(feature = "postgres")]
#[async_trait]
impl ChannelRepository for PostgresChannelRepository {
async fn find_by_id(&self, id: ChannelId) -> DomainResult<Option<Channel>> {
let sql = format!("SELECT {SELECT_COLS} FROM channels WHERE id = $1");
let row: Option<ChannelRow> = sqlx::query_as(&sql)
.bind(id.to_string())
.fetch_optional(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
row.map(Channel::try_from).transpose()
}
async fn find_by_owner(&self, owner_id: UserId) -> DomainResult<Vec<Channel>> {
let sql = format!(
"SELECT {SELECT_COLS} FROM channels WHERE owner_id = $1 ORDER BY created_at ASC"
);
let rows: Vec<ChannelRow> = sqlx::query_as(&sql)
.bind(owner_id.to_string())
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
rows.into_iter().map(Channel::try_from).collect()
}
async fn find_all(&self) -> DomainResult<Vec<Channel>> {
let sql = format!("SELECT {SELECT_COLS} FROM channels ORDER BY created_at ASC");
let rows: Vec<ChannelRow> = sqlx::query_as(&sql)
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
rows.into_iter().map(Channel::try_from).collect()
}
async fn save(&self, channel: &Channel) -> DomainResult<()> {
let schedule_config = serde_json::to_string(&channel.schedule_config).map_err(|e| {
DomainError::RepositoryError(format!("Failed to serialize schedule_config: {}", e))
})?;
let recycle_policy = serde_json::to_string(&channel.recycle_policy).map_err(|e| {
DomainError::RepositoryError(format!("Failed to serialize recycle_policy: {}", e))
})?;
sqlx::query(
r#"
INSERT INTO channels
(id, owner_id, name, description, timezone, schedule_config, recycle_policy, created_at, updated_at)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
ON CONFLICT(id) DO UPDATE SET
name = EXCLUDED.name,
description = EXCLUDED.description,
timezone = EXCLUDED.timezone,
schedule_config = EXCLUDED.schedule_config,
recycle_policy = EXCLUDED.recycle_policy,
updated_at = EXCLUDED.updated_at
"#,
)
.bind(channel.id.to_string())
.bind(channel.owner_id.to_string())
.bind(&channel.name)
.bind(&channel.description)
.bind(&channel.timezone)
.bind(&schedule_config)
.bind(&recycle_policy)
.bind(channel.created_at.to_rfc3339())
.bind(channel.updated_at.to_rfc3339())
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
Ok(())
}
async fn delete(&self, id: ChannelId) -> DomainResult<()> {
sqlx::query("DELETE FROM channels WHERE id = $1")
.bind(id.to_string())
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
Ok(())
}
}

View File

@@ -0,0 +1,92 @@
use chrono::{DateTime, Utc};
use sqlx::FromRow;
use uuid::Uuid;
use domain::{AccessMode, Channel, ChannelId, DomainError, LogoPosition, RecyclePolicy, ScheduleConfig, ScheduleConfigCompat, UserId};
#[derive(Debug, FromRow)]
pub(super) struct ChannelRow {
pub id: String,
pub owner_id: String,
pub name: String,
pub description: Option<String>,
pub timezone: String,
pub schedule_config: String,
pub recycle_policy: String,
pub auto_schedule: i64,
pub access_mode: String,
pub access_password_hash: Option<String>,
pub logo: Option<String>,
pub logo_position: String,
pub logo_opacity: f32,
pub webhook_url: Option<String>,
pub webhook_poll_interval_secs: i64,
pub webhook_body_template: Option<String>,
pub webhook_headers: Option<String>,
pub created_at: String,
pub updated_at: String,
}
pub(super) fn parse_dt(s: &str) -> Result<DateTime<Utc>, DomainError> {
DateTime::parse_from_rfc3339(s)
.map(|dt| dt.with_timezone(&Utc))
.or_else(|_| {
chrono::NaiveDateTime::parse_from_str(s, "%Y-%m-%d %H:%M:%S").map(|dt| dt.and_utc())
})
.map_err(|e| DomainError::RepositoryError(format!("Invalid datetime '{}': {}", s, e)))
}
impl TryFrom<ChannelRow> for Channel {
type Error = DomainError;
fn try_from(row: ChannelRow) -> Result<Self, Self::Error> {
let id: ChannelId = Uuid::parse_str(&row.id)
.map_err(|e| DomainError::RepositoryError(format!("Invalid channel UUID: {}", e)))?;
let owner_id: UserId = Uuid::parse_str(&row.owner_id)
.map_err(|e| DomainError::RepositoryError(format!("Invalid owner UUID: {}", e)))?;
let schedule_config: ScheduleConfig = serde_json::from_str::<ScheduleConfigCompat>(&row.schedule_config)
.map_err(|e| {
DomainError::RepositoryError(format!("Invalid schedule_config JSON: {}", e))
})
.map(ScheduleConfig::from)?;
let recycle_policy: RecyclePolicy = serde_json::from_str(&row.recycle_policy)
.map_err(|e| {
DomainError::RepositoryError(format!("Invalid recycle_policy JSON: {}", e))
})?;
let access_mode: AccessMode = serde_json::from_value(
serde_json::Value::String(row.access_mode),
)
.unwrap_or_default();
let logo_position: LogoPosition = serde_json::from_value(
serde_json::Value::String(row.logo_position),
)
.unwrap_or_default();
Ok(Channel {
id,
owner_id,
name: row.name,
description: row.description,
timezone: row.timezone,
schedule_config,
recycle_policy,
auto_schedule: row.auto_schedule != 0,
access_mode,
access_password_hash: row.access_password_hash,
logo: row.logo,
logo_position,
logo_opacity: row.logo_opacity,
webhook_url: row.webhook_url,
webhook_poll_interval_secs: row.webhook_poll_interval_secs as u32,
webhook_body_template: row.webhook_body_template,
webhook_headers: row.webhook_headers,
created_at: parse_dt(&row.created_at)?,
updated_at: parse_dt(&row.updated_at)?,
})
}
}
pub(super) const SELECT_COLS: &str =
"id, owner_id, name, description, timezone, schedule_config, recycle_policy, auto_schedule, access_mode, access_password_hash, logo, logo_position, logo_opacity, webhook_url, webhook_poll_interval_secs, webhook_body_template, webhook_headers, created_at, updated_at";

View File

@@ -0,0 +1,13 @@
//! SQLite and PostgreSQL adapters for ChannelRepository
mod mapping;
#[cfg(feature = "sqlite")]
mod sqlite;
#[cfg(feature = "postgres")]
mod postgres;
#[cfg(feature = "sqlite")]
pub use sqlite::SqliteChannelRepository;
#[cfg(feature = "postgres")]
pub use postgres::PostgresChannelRepository;

View File

@@ -0,0 +1,127 @@
use async_trait::async_trait;
use domain::{Channel, ChannelId, ChannelRepository, DomainError, DomainResult, UserId};
use super::mapping::{ChannelRow, SELECT_COLS};
pub struct PostgresChannelRepository {
pool: sqlx::Pool<sqlx::Postgres>,
}
impl PostgresChannelRepository {
pub fn new(pool: sqlx::Pool<sqlx::Postgres>) -> Self {
Self { pool }
}
}
#[async_trait]
impl ChannelRepository for PostgresChannelRepository {
async fn find_by_id(&self, id: ChannelId) -> DomainResult<Option<Channel>> {
let sql = format!("SELECT {SELECT_COLS} FROM channels WHERE id = $1");
let row: Option<ChannelRow> = sqlx::query_as(&sql)
.bind(id.to_string())
.fetch_optional(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
row.map(Channel::try_from).transpose()
}
async fn find_by_owner(&self, owner_id: UserId) -> DomainResult<Vec<Channel>> {
let sql = format!(
"SELECT {SELECT_COLS} FROM channels WHERE owner_id = $1 ORDER BY created_at ASC"
);
let rows: Vec<ChannelRow> = sqlx::query_as(&sql)
.bind(owner_id.to_string())
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
rows.into_iter().map(Channel::try_from).collect()
}
async fn find_all(&self) -> DomainResult<Vec<Channel>> {
let sql = format!("SELECT {SELECT_COLS} FROM channels ORDER BY created_at ASC");
let rows: Vec<ChannelRow> = sqlx::query_as(&sql)
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
rows.into_iter().map(Channel::try_from).collect()
}
async fn save(&self, channel: &Channel) -> DomainResult<()> {
let schedule_config = serde_json::to_string(&channel.schedule_config).map_err(|e| {
DomainError::RepositoryError(format!("Failed to serialize schedule_config: {}", e))
})?;
let recycle_policy = serde_json::to_string(&channel.recycle_policy).map_err(|e| {
DomainError::RepositoryError(format!("Failed to serialize recycle_policy: {}", e))
})?;
let access_mode = serde_json::to_value(&channel.access_mode)
.ok()
.and_then(|v| v.as_str().map(str::to_owned))
.unwrap_or_else(|| "public".to_owned());
sqlx::query(
r#"
INSERT INTO channels
(id, owner_id, name, description, timezone, schedule_config, recycle_policy, auto_schedule, access_mode, access_password_hash, webhook_url, webhook_poll_interval_secs, created_at, updated_at)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14)
ON CONFLICT(id) DO UPDATE SET
name = EXCLUDED.name,
description = EXCLUDED.description,
timezone = EXCLUDED.timezone,
schedule_config = EXCLUDED.schedule_config,
recycle_policy = EXCLUDED.recycle_policy,
auto_schedule = EXCLUDED.auto_schedule,
access_mode = EXCLUDED.access_mode,
access_password_hash = EXCLUDED.access_password_hash,
webhook_url = EXCLUDED.webhook_url,
webhook_poll_interval_secs = EXCLUDED.webhook_poll_interval_secs,
updated_at = EXCLUDED.updated_at
"#,
)
.bind(channel.id.to_string())
.bind(channel.owner_id.to_string())
.bind(&channel.name)
.bind(&channel.description)
.bind(&channel.timezone)
.bind(&schedule_config)
.bind(&recycle_policy)
.bind(channel.auto_schedule as i64)
.bind(&access_mode)
.bind(&channel.access_password_hash)
.bind(&channel.webhook_url)
.bind(channel.webhook_poll_interval_secs as i64)
.bind(channel.created_at.to_rfc3339())
.bind(channel.updated_at.to_rfc3339())
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
Ok(())
}
async fn find_auto_schedule_enabled(&self) -> DomainResult<Vec<Channel>> {
let sql = format!(
"SELECT {SELECT_COLS} FROM channels WHERE auto_schedule = 1 ORDER BY created_at ASC"
);
let rows: Vec<ChannelRow> = sqlx::query_as(&sql)
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
rows.into_iter().map(Channel::try_from).collect()
}
async fn delete(&self, id: ChannelId) -> DomainResult<()> {
sqlx::query("DELETE FROM channels WHERE id = $1")
.bind(id.to_string())
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
Ok(())
}
}

View File

@@ -0,0 +1,270 @@
use async_trait::async_trait;
use chrono::{DateTime, Utc};
use sqlx::Row;
use uuid::Uuid;
use domain::{Channel, ChannelConfigSnapshot, ChannelId, ChannelRepository, DomainError, DomainResult, ScheduleConfig, ScheduleConfigCompat, UserId};
use super::mapping::{ChannelRow, SELECT_COLS};
pub struct SqliteChannelRepository {
pool: sqlx::SqlitePool,
}
impl SqliteChannelRepository {
pub fn new(pool: sqlx::SqlitePool) -> Self {
Self { pool }
}
}
#[async_trait]
impl ChannelRepository for SqliteChannelRepository {
async fn find_by_id(&self, id: ChannelId) -> DomainResult<Option<Channel>> {
let sql = format!("SELECT {SELECT_COLS} FROM channels WHERE id = ?");
let row: Option<ChannelRow> = sqlx::query_as(&sql)
.bind(id.to_string())
.fetch_optional(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
row.map(Channel::try_from).transpose()
}
async fn find_by_owner(&self, owner_id: UserId) -> DomainResult<Vec<Channel>> {
let sql = format!(
"SELECT {SELECT_COLS} FROM channels WHERE owner_id = ? ORDER BY created_at ASC"
);
let rows: Vec<ChannelRow> = sqlx::query_as(&sql)
.bind(owner_id.to_string())
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
rows.into_iter().map(Channel::try_from).collect()
}
async fn find_all(&self) -> DomainResult<Vec<Channel>> {
let sql = format!("SELECT {SELECT_COLS} FROM channels ORDER BY created_at ASC");
let rows: Vec<ChannelRow> = sqlx::query_as(&sql)
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
rows.into_iter().map(Channel::try_from).collect()
}
async fn save(&self, channel: &Channel) -> DomainResult<()> {
let schedule_config = serde_json::to_string(&channel.schedule_config).map_err(|e| {
DomainError::RepositoryError(format!("Failed to serialize schedule_config: {}", e))
})?;
let recycle_policy = serde_json::to_string(&channel.recycle_policy).map_err(|e| {
DomainError::RepositoryError(format!("Failed to serialize recycle_policy: {}", e))
})?;
let access_mode = serde_json::to_value(&channel.access_mode)
.ok()
.and_then(|v| v.as_str().map(str::to_owned))
.unwrap_or_else(|| "public".to_owned());
let logo_position = serde_json::to_value(&channel.logo_position)
.ok()
.and_then(|v| v.as_str().map(str::to_owned))
.unwrap_or_else(|| "top_right".to_owned());
sqlx::query(
r#"
INSERT INTO channels
(id, owner_id, name, description, timezone, schedule_config, recycle_policy, auto_schedule, access_mode, access_password_hash, logo, logo_position, logo_opacity, webhook_url, webhook_poll_interval_secs, webhook_body_template, webhook_headers, created_at, updated_at)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(id) DO UPDATE SET
name = excluded.name,
description = excluded.description,
timezone = excluded.timezone,
schedule_config = excluded.schedule_config,
recycle_policy = excluded.recycle_policy,
auto_schedule = excluded.auto_schedule,
access_mode = excluded.access_mode,
access_password_hash = excluded.access_password_hash,
logo = excluded.logo,
logo_position = excluded.logo_position,
logo_opacity = excluded.logo_opacity,
webhook_url = excluded.webhook_url,
webhook_poll_interval_secs = excluded.webhook_poll_interval_secs,
webhook_body_template = excluded.webhook_body_template,
webhook_headers = excluded.webhook_headers,
updated_at = excluded.updated_at
"#,
)
.bind(channel.id.to_string())
.bind(channel.owner_id.to_string())
.bind(&channel.name)
.bind(&channel.description)
.bind(&channel.timezone)
.bind(&schedule_config)
.bind(&recycle_policy)
.bind(channel.auto_schedule as i64)
.bind(&access_mode)
.bind(&channel.access_password_hash)
.bind(&channel.logo)
.bind(&logo_position)
.bind(channel.logo_opacity)
.bind(&channel.webhook_url)
.bind(channel.webhook_poll_interval_secs as i64)
.bind(&channel.webhook_body_template)
.bind(&channel.webhook_headers)
.bind(channel.created_at.to_rfc3339())
.bind(channel.updated_at.to_rfc3339())
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
Ok(())
}
async fn find_auto_schedule_enabled(&self) -> DomainResult<Vec<Channel>> {
let sql = format!(
"SELECT {SELECT_COLS} FROM channels WHERE auto_schedule = 1 ORDER BY created_at ASC"
);
let rows: Vec<ChannelRow> = sqlx::query_as(&sql)
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
rows.into_iter().map(Channel::try_from).collect()
}
async fn delete(&self, id: ChannelId) -> DomainResult<()> {
sqlx::query("DELETE FROM channels WHERE id = ?")
.bind(id.to_string())
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
Ok(())
}
async fn save_config_snapshot(
&self,
channel_id: ChannelId,
config: &ScheduleConfig,
label: Option<String>,
) -> DomainResult<ChannelConfigSnapshot> {
let id = Uuid::new_v4();
let now = Utc::now();
let config_json = serde_json::to_string(config)
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
let mut tx = self.pool.begin().await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
let version_num: i64 = sqlx::query_scalar(
"SELECT COALESCE(MAX(version_num), 0) + 1 FROM channel_config_snapshots WHERE channel_id = ?"
)
.bind(channel_id.to_string())
.fetch_one(&mut *tx)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
sqlx::query(
"INSERT INTO channel_config_snapshots (id, channel_id, config_json, version_num, label, created_at)
VALUES (?, ?, ?, ?, ?, ?)"
)
.bind(id.to_string())
.bind(channel_id.to_string())
.bind(&config_json)
.bind(version_num)
.bind(&label)
.bind(now.to_rfc3339())
.execute(&mut *tx)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
tx.commit().await.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
Ok(ChannelConfigSnapshot { id, channel_id, config: config.clone(), version_num, label, created_at: now })
}
async fn list_config_snapshots(
&self,
channel_id: ChannelId,
) -> DomainResult<Vec<ChannelConfigSnapshot>> {
let rows = sqlx::query(
"SELECT id, config_json, version_num, label, created_at
FROM channel_config_snapshots WHERE channel_id = ?
ORDER BY version_num DESC"
)
.bind(channel_id.to_string())
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
rows.iter().map(|row| {
let id: Uuid = row.get::<String, _>("id").parse()
.map_err(|_| DomainError::RepositoryError("bad uuid".into()))?;
let config_json: String = row.get("config_json");
let config_compat: ScheduleConfigCompat = serde_json::from_str(&config_json)
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
let config: ScheduleConfig = config_compat.into();
let version_num: i64 = row.get("version_num");
let label: Option<String> = row.get("label");
let created_at_str: String = row.get("created_at");
let created_at = created_at_str.parse::<DateTime<Utc>>()
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
Ok(ChannelConfigSnapshot { id, channel_id, config, version_num, label, created_at })
}).collect()
}
async fn get_config_snapshot(
&self,
channel_id: ChannelId,
snapshot_id: Uuid,
) -> DomainResult<Option<ChannelConfigSnapshot>> {
let row = sqlx::query(
"SELECT id, config_json, version_num, label, created_at
FROM channel_config_snapshots WHERE id = ? AND channel_id = ?"
)
.bind(snapshot_id.to_string())
.bind(channel_id.to_string())
.fetch_optional(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
match row {
None => Ok(None),
Some(row) => {
let config_json: String = row.get("config_json");
let config_compat: ScheduleConfigCompat = serde_json::from_str(&config_json)
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
let config: ScheduleConfig = config_compat.into();
let version_num: i64 = row.get("version_num");
let label: Option<String> = row.get("label");
let created_at_str: String = row.get("created_at");
let created_at = created_at_str.parse::<DateTime<Utc>>()
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
Ok(Some(ChannelConfigSnapshot { id: snapshot_id, channel_id, config, version_num, label, created_at }))
}
}
}
async fn patch_config_snapshot_label(
&self,
channel_id: ChannelId,
snapshot_id: Uuid,
label: Option<String>,
) -> DomainResult<Option<ChannelConfigSnapshot>> {
let updated = sqlx::query(
"UPDATE channel_config_snapshots SET label = ? WHERE id = ? AND channel_id = ? RETURNING id"
)
.bind(&label)
.bind(snapshot_id.to_string())
.bind(channel_id.to_string())
.fetch_optional(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
if updated.is_none() {
return Ok(None);
}
self.get_config_snapshot(channel_id, snapshot_id).await
}
}

View File

@@ -1,7 +1,7 @@
use std::sync::Arc; use std::sync::Arc;
use crate::db::DatabasePool; use crate::db::DatabasePool;
use domain::{ChannelRepository, ScheduleRepository, UserRepository}; use domain::{ActivityLogRepository, ChannelRepository, IAppSettingsRepository, ILibraryRepository, ProviderConfigRepository, ScheduleRepository, TranscodeSettingsRepository, UserRepository};
#[derive(Debug, thiserror::Error)] #[derive(Debug, thiserror::Error)]
pub enum FactoryError { pub enum FactoryError {
@@ -51,6 +51,40 @@ pub async fn build_channel_repository(
} }
} }
pub async fn build_activity_log_repository(
pool: &DatabasePool,
) -> FactoryResult<Arc<dyn ActivityLogRepository>> {
match pool {
#[cfg(feature = "sqlite")]
DatabasePool::Sqlite(pool) => Ok(Arc::new(
crate::activity_log_repository::SqliteActivityLogRepository::new(pool.clone()),
)),
#[cfg(feature = "postgres")]
DatabasePool::Postgres(_pool) => Err(FactoryError::NotImplemented(
"ActivityLogRepository not yet implemented for Postgres".to_string(),
)),
#[allow(unreachable_patterns)]
_ => Err(FactoryError::NotImplemented(
"No database feature enabled".to_string(),
)),
}
}
pub async fn build_provider_config_repository(
pool: &DatabasePool,
) -> FactoryResult<Arc<dyn ProviderConfigRepository>> {
match pool {
#[cfg(feature = "sqlite")]
DatabasePool::Sqlite(pool) => Ok(Arc::new(
crate::provider_config_repository::SqliteProviderConfigRepository::new(pool.clone()),
)),
#[allow(unreachable_patterns)]
_ => Err(FactoryError::NotImplemented(
"ProviderConfigRepository not implemented for this database".to_string(),
)),
}
}
pub async fn build_schedule_repository( pub async fn build_schedule_repository(
pool: &DatabasePool, pool: &DatabasePool,
) -> FactoryResult<Arc<dyn ScheduleRepository>> { ) -> FactoryResult<Arc<dyn ScheduleRepository>> {
@@ -69,3 +103,88 @@ pub async fn build_schedule_repository(
)), )),
} }
} }
pub async fn build_transcode_settings_repository(
pool: &DatabasePool,
) -> FactoryResult<Arc<dyn TranscodeSettingsRepository>> {
match pool {
#[cfg(feature = "sqlite")]
DatabasePool::Sqlite(p) => Ok(Arc::new(
crate::transcode_settings_repository::SqliteTranscodeSettingsRepository::new(p.clone()),
)),
#[allow(unreachable_patterns)]
_ => Err(FactoryError::NotImplemented(
"TranscodeSettingsRepository not implemented for this database".to_string(),
)),
}
}
pub async fn build_library_repository(
pool: &DatabasePool,
) -> FactoryResult<Arc<dyn ILibraryRepository>> {
match pool {
#[cfg(feature = "sqlite")]
DatabasePool::Sqlite(pool) => Ok(Arc::new(
crate::library_repository::SqliteLibraryRepository::new(pool.clone()),
)),
#[allow(unreachable_patterns)]
_ => Err(FactoryError::NotImplemented(
"LibraryRepository not implemented for this database".to_string(),
)),
}
}
pub async fn build_app_settings_repository(
pool: &DatabasePool,
) -> FactoryResult<Arc<dyn IAppSettingsRepository>> {
match pool {
#[cfg(feature = "sqlite")]
DatabasePool::Sqlite(pool) => Ok(Arc::new(
crate::app_settings_repository::SqliteAppSettingsRepository::new(pool.clone()),
)),
#[allow(unreachable_patterns)]
_ => Err(FactoryError::NotImplemented(
"AppSettingsRepository not implemented for this database".to_string(),
)),
}
}
#[cfg(feature = "local-files")]
pub struct LocalFilesBundle {
pub provider: Arc<crate::LocalFilesProvider>,
pub local_index: Arc<crate::LocalIndex>,
pub transcode_manager: Option<Arc<crate::TranscodeManager>>,
}
#[cfg(feature = "local-files")]
pub async fn build_local_files_bundle(
pool: &DatabasePool,
root_dir: std::path::PathBuf,
transcode_dir: Option<std::path::PathBuf>,
cleanup_ttl_hours: u32,
base_url: String,
provider_id: &str,
) -> FactoryResult<LocalFilesBundle> {
match pool {
#[cfg(feature = "sqlite")]
DatabasePool::Sqlite(sqlite_pool) => {
let cfg = crate::LocalFilesConfig {
root_dir,
base_url,
transcode_dir: transcode_dir.clone(),
cleanup_ttl_hours,
};
let idx = Arc::new(crate::LocalIndex::new(&cfg, sqlite_pool.clone(), provider_id.to_string()).await);
let tm = transcode_dir.as_ref().map(|td| {
std::fs::create_dir_all(td).ok();
crate::TranscodeManager::new(td.clone(), cleanup_ttl_hours)
});
let provider = Arc::new(crate::LocalFilesProvider::new(Arc::clone(&idx), cfg, tm.clone()));
Ok(LocalFilesBundle { provider, local_index: idx, transcode_manager: tm })
}
#[allow(unreachable_patterns)]
_ => Err(FactoryError::NotImplemented(
"local-files requires SQLite".to_string(),
)),
}
}

View File

@@ -1,245 +0,0 @@
//! Jellyfin media provider adapter
//!
//! Implements [`IMediaProvider`] by talking to the Jellyfin HTTP API.
//! The domain never sees Jellyfin-specific types — this module translates
//! between Jellyfin's API model and the domain's abstract `MediaItem`/`MediaFilter`.
#![cfg(feature = "jellyfin")]
use async_trait::async_trait;
use serde::Deserialize;
use domain::{ContentType, DomainError, DomainResult, IMediaProvider, MediaFilter, MediaItem, MediaItemId};
/// Ticks are Jellyfin's time unit: 1 tick = 100 nanoseconds → 10,000,000 ticks/sec.
const TICKS_PER_SEC: i64 = 10_000_000;
// ============================================================================
// Configuration
// ============================================================================
/// Connection details for a single Jellyfin instance.
#[derive(Debug, Clone)]
pub struct JellyfinConfig {
/// e.g. `"http://192.168.1.10:8096"` — no trailing slash
pub base_url: String,
/// Jellyfin API key (Settings → API Keys)
pub api_key: String,
/// The Jellyfin user ID used for library browsing
pub user_id: String,
}
// ============================================================================
// Adapter
// ============================================================================
pub struct JellyfinMediaProvider {
client: reqwest::Client,
config: JellyfinConfig,
}
impl JellyfinMediaProvider {
pub fn new(config: JellyfinConfig) -> Self {
Self {
client: reqwest::Client::new(),
config: JellyfinConfig {
base_url: config.base_url.trim_end_matches('/').to_string(),
..config
},
}
}
}
#[async_trait]
impl IMediaProvider for JellyfinMediaProvider {
/// Fetch items matching `filter` from the Jellyfin library.
///
/// `MediaFilter.collections` maps to Jellyfin `ParentId` (library/folder UUID).
/// Multiple collections are not supported in a single call; the first entry wins.
/// Decades are mapped to Jellyfin's `MinYear`/`MaxYear`.
async fn fetch_items(&self, filter: &MediaFilter) -> DomainResult<Vec<MediaItem>> {
let url = format!(
"{}/Users/{}/Items",
self.config.base_url, self.config.user_id
);
let mut params: Vec<(&str, String)> = vec![
("Recursive", "true".into()),
("Fields", "Genres,Tags,RunTimeTicks,ProductionYear".into()),
];
if let Some(ct) = &filter.content_type {
params.push(("IncludeItemTypes", jellyfin_item_type(ct).into()));
}
if !filter.genres.is_empty() {
params.push(("Genres", filter.genres.join("|")));
}
if let Some(decade) = filter.decade {
params.push(("MinYear", decade.to_string()));
params.push(("MaxYear", (decade + 9).to_string()));
}
if !filter.tags.is_empty() {
params.push(("Tags", filter.tags.join("|")));
}
if let Some(min) = filter.min_duration_secs {
params.push(("MinRunTimeTicks", (min as i64 * TICKS_PER_SEC).to_string()));
}
if let Some(max) = filter.max_duration_secs {
params.push(("MaxRunTimeTicks", (max as i64 * TICKS_PER_SEC).to_string()));
}
// Treat the first collection entry as a Jellyfin ParentId (library/folder)
if let Some(parent_id) = filter.collections.first() {
params.push(("ParentId", parent_id.clone()));
}
let response = self
.client
.get(&url)
.header("X-Emby-Token", &self.config.api_key)
.query(&params)
.send()
.await
.map_err(|e| {
DomainError::InfrastructureError(format!("Jellyfin request failed: {}", e))
})?;
if !response.status().is_success() {
return Err(DomainError::InfrastructureError(format!(
"Jellyfin returned HTTP {}",
response.status()
)));
}
let body: JellyfinItemsResponse = response.json().await.map_err(|e| {
DomainError::InfrastructureError(format!("Failed to parse Jellyfin response: {}", e))
})?;
Ok(body.items.into_iter().filter_map(map_jellyfin_item).collect())
}
/// Fetch a single item by its opaque ID.
///
/// Returns `None` if the item is not found or cannot be mapped.
async fn fetch_by_id(&self, item_id: &MediaItemId) -> DomainResult<Option<MediaItem>> {
let url = format!(
"{}/Users/{}/Items",
self.config.base_url, self.config.user_id
);
let response = self
.client
.get(&url)
.header("X-Emby-Token", &self.config.api_key)
.query(&[
("Ids", item_id.as_ref()),
("Fields", "Genres,Tags,RunTimeTicks,ProductionYear"),
])
.send()
.await
.map_err(|e| {
DomainError::InfrastructureError(format!("Jellyfin request failed: {}", e))
})?;
if !response.status().is_success() {
return Ok(None);
}
let body: JellyfinItemsResponse = response.json().await.map_err(|e| {
DomainError::InfrastructureError(format!("Failed to parse Jellyfin response: {}", e))
})?;
Ok(body.items.into_iter().next().and_then(map_jellyfin_item))
}
/// Build an HLS stream URL for a Jellyfin item.
///
/// Returns a `master.m3u8` playlist URL. Jellyfin transcodes to H.264/AAC
/// segments on the fly. HLS is preferred over a single MP4 stream because
/// `StartTimeTicks` works reliably with HLS — each segment is independent,
/// so Jellyfin can begin the playlist at the correct broadcast offset
/// without needing to byte-range seek into an in-progress transcode.
///
/// The API key is embedded so the player needs no separate auth header.
/// The caller (stream proxy route) appends `StartTimeTicks` when there is
/// a non-zero broadcast offset.
async fn get_stream_url(&self, item_id: &MediaItemId) -> DomainResult<String> {
Ok(format!(
"{}/Videos/{}/master.m3u8?videoCodec=h264&audioCodec=aac&VideoBitRate=40000000&mediaSourceId={}&api_key={}",
self.config.base_url,
item_id.as_ref(),
item_id.as_ref(),
self.config.api_key,
))
}
}
// ============================================================================
// Jellyfin API response types
// ============================================================================
#[derive(Debug, Deserialize)]
struct JellyfinItemsResponse {
#[serde(rename = "Items")]
items: Vec<JellyfinItem>,
}
#[derive(Debug, Deserialize)]
struct JellyfinItem {
#[serde(rename = "Id")]
id: String,
#[serde(rename = "Name")]
name: String,
#[serde(rename = "Type")]
item_type: String,
#[serde(rename = "RunTimeTicks")]
run_time_ticks: Option<i64>,
#[serde(rename = "Genres")]
genres: Option<Vec<String>>,
#[serde(rename = "ProductionYear")]
production_year: Option<u16>,
#[serde(rename = "Tags")]
tags: Option<Vec<String>>,
}
// ============================================================================
// Mapping helpers
// ============================================================================
fn jellyfin_item_type(ct: &ContentType) -> &'static str {
match ct {
ContentType::Movie => "Movie",
ContentType::Episode => "Episode",
// Jellyfin has no native "Short" type; short films are filed as Movies
ContentType::Short => "Movie",
}
}
/// Map a raw Jellyfin item to a domain `MediaItem`. Returns `None` for unknown
/// item types (e.g. Season, Series, Folder) so they are silently skipped.
fn map_jellyfin_item(item: JellyfinItem) -> Option<MediaItem> {
let content_type = match item.item_type.as_str() {
"Movie" => ContentType::Movie,
"Episode" => ContentType::Episode,
_ => return None,
};
let duration_secs = item
.run_time_ticks
.map(|t| (t / TICKS_PER_SEC) as u32)
.unwrap_or(0);
Some(MediaItem {
id: MediaItemId::new(item.id),
title: item.name,
content_type,
duration_secs,
genres: item.genres.unwrap_or_default(),
year: item.production_year,
tags: item.tags.unwrap_or_default(),
})
}

View File

@@ -0,0 +1,10 @@
/// Connection details for a single Jellyfin instance.
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
pub struct JellyfinConfig {
/// e.g. `"http://192.168.1.10:8096"` — no trailing slash
pub base_url: String,
/// Jellyfin API key (Settings → API Keys)
pub api_key: String,
/// The Jellyfin user ID used for library browsing
pub user_id: String,
}

View File

@@ -0,0 +1,38 @@
use domain::{ContentType, MediaItem, MediaItemId};
use super::models::JellyfinItem;
/// Ticks are Jellyfin's time unit: 1 tick = 100 nanoseconds → 10,000,000 ticks/sec.
pub(super) const TICKS_PER_SEC: i64 = 10_000_000;
/// Map a raw Jellyfin item to a domain `MediaItem`. Returns `None` for unknown
/// item types (e.g. Season, Series, Folder) so they are silently skipped.
pub(super) fn map_jellyfin_item(item: JellyfinItem) -> Option<MediaItem> {
let content_type = match item.item_type.as_str() {
"Movie" => ContentType::Movie,
"Episode" => ContentType::Episode,
_ => return None,
};
let duration_secs = item
.run_time_ticks
.map(|t| (t / TICKS_PER_SEC) as u32)
.unwrap_or(0);
Some(MediaItem {
id: MediaItemId::new(item.id),
title: item.name,
content_type,
duration_secs,
description: item.overview,
genres: item.genres.unwrap_or_default(),
year: item.production_year,
tags: item.tags.unwrap_or_default(),
series_name: item.series_name,
season_number: item.parent_index_number,
episode_number: item.index_number,
// TODO(library-sync): populate thumbnail_url from Jellyfin image API and collection_id from parent_id when FullSyncAdapter is implemented (Task 5)
thumbnail_url: None,
collection_id: None,
})
}

View File

@@ -0,0 +1,15 @@
//! Jellyfin media provider adapter
//!
//! Implements [`IMediaProvider`] by talking to the Jellyfin HTTP API.
//! The domain never sees Jellyfin-specific types — this module translates
//! between Jellyfin's API model and the domain's abstract `MediaItem`/`MediaFilter`.
#![cfg(feature = "jellyfin")]
mod config;
mod mapping;
mod models;
mod provider;
pub use config::JellyfinConfig;
pub use provider::JellyfinMediaProvider;

View File

@@ -0,0 +1,71 @@
use serde::Deserialize;
use domain::ContentType;
// ============================================================================
// Jellyfin API response types
// ============================================================================
#[derive(Debug, Deserialize)]
pub(super) struct JellyfinItemsResponse {
#[serde(rename = "Items")]
pub items: Vec<JellyfinItem>,
}
#[derive(Debug, Deserialize)]
pub(super) struct JellyfinItem {
#[serde(rename = "Id")]
pub id: String,
#[serde(rename = "Name")]
pub name: String,
#[serde(rename = "Type")]
pub item_type: String,
#[serde(rename = "RunTimeTicks")]
pub run_time_ticks: Option<i64>,
#[serde(rename = "Overview")]
pub overview: Option<String>,
#[serde(rename = "Genres")]
pub genres: Option<Vec<String>>,
#[serde(rename = "ProductionYear")]
pub production_year: Option<u16>,
#[serde(rename = "Tags")]
pub tags: Option<Vec<String>>,
/// TV show name (episodes only)
#[serde(rename = "SeriesName")]
pub series_name: Option<String>,
/// Season number (episodes only)
#[serde(rename = "ParentIndexNumber")]
pub parent_index_number: Option<u32>,
/// Episode number within the season (episodes only)
#[serde(rename = "IndexNumber")]
pub index_number: Option<u32>,
/// Collection type for virtual library folders (e.g. "movies", "tvshows")
#[serde(rename = "CollectionType")]
pub collection_type: Option<String>,
/// Total number of child items (used for Series to count episodes)
#[serde(rename = "RecursiveItemCount")]
pub recursive_item_count: Option<u32>,
}
#[derive(Debug, Deserialize)]
pub(super) struct JellyfinPlaybackInfoResponse {
#[serde(rename = "MediaSources")]
pub media_sources: Vec<JellyfinMediaSource>,
}
#[derive(Debug, Deserialize)]
pub(super) struct JellyfinMediaSource {
#[serde(rename = "SupportsDirectStream")]
pub supports_direct_stream: bool,
#[serde(rename = "DirectStreamUrl")]
pub direct_stream_url: Option<String>,
}
pub(super) fn jellyfin_item_type(ct: &ContentType) -> &'static str {
match ct {
ContentType::Movie => "Movie",
ContentType::Episode => "Episode",
// Jellyfin has no native "Short" type; short films are filed as Movies
ContentType::Short => "Movie",
}
}

View File

@@ -0,0 +1,409 @@
use async_trait::async_trait;
use domain::{
Collection, ContentType, DomainError, DomainResult, IMediaProvider, MediaFilter, MediaItem,
MediaItemId, ProviderCapabilities, SeriesSummary, StreamQuality, StreamingProtocol,
};
use super::config::JellyfinConfig;
use super::mapping::{map_jellyfin_item, TICKS_PER_SEC};
use super::models::{jellyfin_item_type, JellyfinItemsResponse, JellyfinPlaybackInfoResponse};
pub struct JellyfinMediaProvider {
pub(super) client: reqwest::Client,
pub(super) config: JellyfinConfig,
}
impl JellyfinMediaProvider {
pub fn new(config: JellyfinConfig) -> Self {
Self {
client: reqwest::Client::new(),
config: JellyfinConfig {
base_url: config.base_url.trim_end_matches('/').to_string(),
..config
},
}
}
/// Inner fetch: applies all filter fields plus an optional series name override.
async fn fetch_items_for_series(
&self,
filter: &MediaFilter,
series_name: Option<&str>,
) -> DomainResult<Vec<MediaItem>> {
let url = format!(
"{}/Users/{}/Items",
self.config.base_url, self.config.user_id
);
let mut params: Vec<(&str, String)> = vec![
("Recursive", "true".into()),
("Fields", "Genres,Tags,RunTimeTicks,ProductionYear,Overview".into()),
];
if let Some(ct) = &filter.content_type {
params.push(("IncludeItemTypes", jellyfin_item_type(ct).into()));
}
if !filter.genres.is_empty() {
params.push(("Genres", filter.genres.join("|")));
}
if let Some(decade) = filter.decade {
params.push(("MinYear", decade.to_string()));
params.push(("MaxYear", (decade + 9).to_string()));
}
if !filter.tags.is_empty() {
params.push(("Tags", filter.tags.join("|")));
}
if let Some(min) = filter.min_duration_secs {
params.push(("MinRunTimeTicks", (min as i64 * TICKS_PER_SEC).to_string()));
}
if let Some(max) = filter.max_duration_secs {
params.push(("MaxRunTimeTicks", (max as i64 * TICKS_PER_SEC).to_string()));
}
if let Some(name) = series_name {
// Series-level targeting: skip ParentId so the show is found regardless
// of which library it lives in. SeriesName is already precise enough.
params.push(("SeriesName", name.to_string()));
// Return episodes in chronological order when a specific series is
// requested — season first, then episode within the season.
params.push(("SortBy", "ParentIndexNumber,IndexNumber".into()));
params.push(("SortOrder", "Ascending".into()));
// Prevent Jellyfin from returning Season/Series container items.
if filter.content_type.is_none() {
params.push(("IncludeItemTypes", "Episode".into()));
}
} else {
// No series filter — scope to the collection (library) if one is set.
if let Some(parent_id) = filter.collections.first() {
params.push(("ParentId", parent_id.clone()));
}
}
if let Some(q) = &filter.search_term {
params.push(("SearchTerm", q.clone()));
}
let response = self
.client
.get(&url)
.header("X-Emby-Token", &self.config.api_key)
.query(&params)
.send()
.await
.map_err(|e| {
DomainError::InfrastructureError(format!("Jellyfin request failed: {}", e))
})?;
if !response.status().is_success() {
return Err(DomainError::InfrastructureError(format!(
"Jellyfin returned HTTP {}",
response.status()
)));
}
let body: JellyfinItemsResponse = response.json().await.map_err(|e| {
DomainError::InfrastructureError(format!("Failed to parse Jellyfin response: {}", e))
})?;
// Jellyfin's SeriesName query param is not a strict filter — it can
// bleed items from other shows. Post-filter in Rust to guarantee that
// only the requested series is returned.
let items = body.items.into_iter().filter_map(map_jellyfin_item);
let items: Vec<MediaItem> = if let Some(name) = series_name {
items
.filter(|item| {
item.series_name
.as_deref()
.map(|s| s.eq_ignore_ascii_case(name))
.unwrap_or(false)
})
.collect()
} else {
items.collect()
};
Ok(items)
}
}
#[async_trait]
impl IMediaProvider for JellyfinMediaProvider {
fn capabilities(&self) -> ProviderCapabilities {
ProviderCapabilities {
collections: true,
series: true,
genres: true,
tags: true,
decade: true,
search: true,
streaming_protocol: StreamingProtocol::Hls,
rescan: false,
transcode: false,
}
}
/// Fetch items matching `filter` from the Jellyfin library.
///
/// When `series_names` has more than one entry the results from each series
/// are fetched sequentially and concatenated (Jellyfin only supports one
/// `SeriesName` param per request).
async fn fetch_items(&self, filter: &MediaFilter) -> DomainResult<Vec<MediaItem>> {
match filter.series_names.len() {
0 | 1 => {
let series = filter.series_names.first().map(String::as_str);
self.fetch_items_for_series(filter, series).await
}
_ => {
// Fetch each series independently, then interleave round-robin.
// Round-robin ensures every show gets fair representation when a
// downstream limit is applied (preview, block fill) even if one
// series has far more episodes than another.
let mut per_series: Vec<Vec<MediaItem>> = Vec::new();
for series_name in &filter.series_names {
let items = self
.fetch_items_for_series(filter, Some(series_name.as_str()))
.await?;
if !items.is_empty() {
per_series.push(items);
}
}
let max_len = per_series.iter().map(|s| s.len()).max().unwrap_or(0);
let mut all = Vec::with_capacity(per_series.iter().map(|s| s.len()).sum());
for i in 0..max_len {
for s in &per_series {
if let Some(item) = s.get(i) {
all.push(item.clone());
}
}
}
Ok(all)
}
}
}
/// Fetch a single item by its opaque ID.
///
/// Returns `None` if the item is not found or cannot be mapped.
async fn fetch_by_id(&self, item_id: &MediaItemId) -> DomainResult<Option<MediaItem>> {
let url = format!(
"{}/Users/{}/Items",
self.config.base_url, self.config.user_id
);
let response = self
.client
.get(&url)
.header("X-Emby-Token", &self.config.api_key)
.query(&[
("Ids", item_id.as_ref()),
("Fields", "Genres,Tags,RunTimeTicks,ProductionYear"),
])
.send()
.await
.map_err(|e| {
DomainError::InfrastructureError(format!("Jellyfin request failed: {}", e))
})?;
if !response.status().is_success() {
return Ok(None);
}
let body: JellyfinItemsResponse = response.json().await.map_err(|e| {
DomainError::InfrastructureError(format!("Failed to parse Jellyfin response: {}", e))
})?;
Ok(body.items.into_iter().next().and_then(map_jellyfin_item))
}
/// List top-level virtual libraries available to the configured user.
///
/// Uses the `/Users/{userId}/Views` endpoint which returns exactly the
/// top-level nodes the user has access to (Movies, TV Shows, etc.).
async fn list_collections(&self) -> DomainResult<Vec<Collection>> {
let url = format!(
"{}/Users/{}/Views",
self.config.base_url, self.config.user_id
);
let response = self
.client
.get(&url)
.header("X-Emby-Token", &self.config.api_key)
.send()
.await
.map_err(|e| {
DomainError::InfrastructureError(format!("Jellyfin request failed: {}", e))
})?;
if !response.status().is_success() {
return Err(DomainError::InfrastructureError(format!(
"Jellyfin returned HTTP {}",
response.status()
)));
}
let body: JellyfinItemsResponse = response.json().await.map_err(|e| {
DomainError::InfrastructureError(format!("Failed to parse Jellyfin response: {}", e))
})?;
Ok(body
.items
.into_iter()
.map(|item| Collection {
id: item.id,
name: item.name,
collection_type: item.collection_type,
})
.collect())
}
/// List all Series items, optionally scoped to a collection (ParentId).
///
/// Results are sorted alphabetically. `RecursiveItemCount` gives the total
/// episode count across all seasons without a second round-trip.
async fn list_series(&self, collection_id: Option<&str>) -> DomainResult<Vec<SeriesSummary>> {
let url = format!(
"{}/Users/{}/Items",
self.config.base_url, self.config.user_id
);
let mut params: Vec<(&str, String)> = vec![
("Recursive", "true".into()),
("IncludeItemTypes", "Series".into()),
(
"Fields",
"Genres,ProductionYear,RecursiveItemCount".into(),
),
("SortBy", "SortName".into()),
("SortOrder", "Ascending".into()),
];
if let Some(id) = collection_id {
params.push(("ParentId", id.to_string()));
}
let response = self
.client
.get(&url)
.header("X-Emby-Token", &self.config.api_key)
.query(&params)
.send()
.await
.map_err(|e| {
DomainError::InfrastructureError(format!("Jellyfin request failed: {}", e))
})?;
if !response.status().is_success() {
return Err(DomainError::InfrastructureError(format!(
"Jellyfin returned HTTP {}",
response.status()
)));
}
let body: JellyfinItemsResponse = response.json().await.map_err(|e| {
DomainError::InfrastructureError(format!("Failed to parse Jellyfin response: {}", e))
})?;
Ok(body
.items
.into_iter()
.map(|item| SeriesSummary {
id: item.id,
name: item.name,
episode_count: item.recursive_item_count.unwrap_or(0),
genres: item.genres.unwrap_or_default(),
year: item.production_year,
})
.collect())
}
/// List available genres from the Jellyfin `/Genres` endpoint.
///
/// Optionally filtered to a specific content type (Movie or Episode).
async fn list_genres(
&self,
content_type: Option<&ContentType>,
) -> DomainResult<Vec<String>> {
let url = format!("{}/Genres", self.config.base_url);
let mut params: Vec<(&str, String)> = vec![
("UserId", self.config.user_id.clone()),
("SortBy", "SortName".into()),
("SortOrder", "Ascending".into()),
];
if let Some(ct) = content_type {
params.push(("IncludeItemTypes", jellyfin_item_type(ct).into()));
}
let response = self
.client
.get(&url)
.header("X-Emby-Token", &self.config.api_key)
.query(&params)
.send()
.await
.map_err(|e| {
DomainError::InfrastructureError(format!("Jellyfin request failed: {}", e))
})?;
if !response.status().is_success() {
return Err(DomainError::InfrastructureError(format!(
"Jellyfin returned HTTP {}",
response.status()
)));
}
let body: JellyfinItemsResponse = response.json().await.map_err(|e| {
DomainError::InfrastructureError(format!("Failed to parse Jellyfin response: {}", e))
})?;
Ok(body.items.into_iter().map(|item| item.name).collect())
}
async fn get_stream_url(&self, item_id: &MediaItemId, quality: &StreamQuality) -> DomainResult<String> {
match quality {
StreamQuality::Direct => {
let url = format!("{}/Items/{}/PlaybackInfo", self.config.base_url, item_id.as_ref());
let resp = self.client.post(&url)
.header("X-Emby-Token", &self.config.api_key)
.query(&[("userId", &self.config.user_id), ("mediaSourceId", &item_id.as_ref().to_string())])
.json(&serde_json::json!({}))
.send().await
.map_err(|e| DomainError::InfrastructureError(format!("PlaybackInfo failed: {e}")))?;
if resp.status().is_success() {
let info: JellyfinPlaybackInfoResponse = resp.json().await
.map_err(|e| DomainError::InfrastructureError(format!("PlaybackInfo parse failed: {e}")))?;
if let Some(src) = info.media_sources.first()
&& src.supports_direct_stream
&& let Some(rel_url) = &src.direct_stream_url
{
return Ok(format!("{}{}&api_key={}", self.config.base_url, rel_url, self.config.api_key));
}
}
// Fallback: HLS at 8 Mbps
Ok(self.hls_url(item_id, 8_000_000))
}
StreamQuality::Transcode(bps) => Ok(self.hls_url(item_id, *bps)),
}
}
}
impl JellyfinMediaProvider {
fn hls_url(&self, item_id: &MediaItemId, bitrate: u32) -> String {
format!(
"{}/Videos/{}/master.m3u8?videoCodec=h264&audioCodec=aac&VideoBitRate={}&mediaSourceId={}&SubtitleMethod=Hls&subtitleCodec=vtt&api_key={}",
self.config.base_url,
item_id.as_ref(),
bitrate,
item_id.as_ref(),
self.config.api_key,
)
}
}

View File

@@ -17,19 +17,47 @@ pub mod auth;
pub mod db; pub mod db;
pub mod factory; pub mod factory;
pub mod jellyfin; pub mod jellyfin;
pub mod provider_registry;
mod library_sync;
pub use library_sync::FullSyncAdapter;
mod app_settings_repository;
mod activity_log_repository;
mod channel_repository; mod channel_repository;
mod library_repository;
mod provider_config_repository;
mod schedule_repository; mod schedule_repository;
mod transcode_settings_repository;
mod user_repository; mod user_repository;
#[cfg(feature = "local-files")]
pub mod local_files;
// Re-export for convenience // Re-export for convenience
pub use db::run_migrations; pub use db::run_migrations;
pub use provider_registry::ProviderRegistry;
#[cfg(feature = "sqlite")]
pub use app_settings_repository::SqliteAppSettingsRepository;
#[cfg(feature = "sqlite")]
pub use activity_log_repository::SqliteActivityLogRepository;
#[cfg(feature = "sqlite")] #[cfg(feature = "sqlite")]
pub use user_repository::SqliteUserRepository; pub use user_repository::SqliteUserRepository;
#[cfg(feature = "sqlite")] #[cfg(feature = "sqlite")]
pub use channel_repository::SqliteChannelRepository; pub use channel_repository::SqliteChannelRepository;
#[cfg(feature = "sqlite")] #[cfg(feature = "sqlite")]
pub use provider_config_repository::SqliteProviderConfigRepository;
#[cfg(feature = "sqlite")]
pub use schedule_repository::SqliteScheduleRepository; pub use schedule_repository::SqliteScheduleRepository;
#[cfg(feature = "sqlite")]
pub use transcode_settings_repository::SqliteTranscodeSettingsRepository;
#[cfg(feature = "sqlite")]
pub use library_repository::SqliteLibraryRepository;
pub use domain::TranscodeSettingsRepository;
#[cfg(feature = "jellyfin")] #[cfg(feature = "jellyfin")]
pub use jellyfin::{JellyfinConfig, JellyfinMediaProvider}; pub use jellyfin::{JellyfinConfig, JellyfinMediaProvider};
#[cfg(feature = "local-files")]
pub use local_files::{LocalFilesConfig, LocalFilesProvider, LocalIndex, TranscodeManager, decode_stream_id};

View File

@@ -0,0 +1,508 @@
//! SQLite implementation of ILibraryRepository.
use async_trait::async_trait;
use sqlx::SqlitePool;
use domain::{
ContentType, DomainError, DomainResult, ILibraryRepository,
LibraryCollection, LibraryItem, LibrarySearchFilter, LibrarySyncLogEntry, LibrarySyncResult,
SeasonSummary, ShowSummary,
};
pub struct SqliteLibraryRepository {
pool: SqlitePool,
}
impl SqliteLibraryRepository {
pub fn new(pool: SqlitePool) -> Self {
Self { pool }
}
}
fn content_type_str(ct: &ContentType) -> &'static str {
match ct {
ContentType::Movie => "movie",
ContentType::Episode => "episode",
ContentType::Short => "short",
}
}
fn parse_content_type(s: &str) -> ContentType {
match s {
"episode" => ContentType::Episode,
"short" => ContentType::Short,
_ => ContentType::Movie,
}
}
#[async_trait]
impl ILibraryRepository for SqliteLibraryRepository {
async fn search(&self, filter: &LibrarySearchFilter) -> DomainResult<(Vec<LibraryItem>, u32)> {
let mut conditions: Vec<String> = vec![];
if let Some(ref p) = filter.provider_id {
conditions.push(format!("provider_id = '{}'", p.replace('\'', "''")));
}
if let Some(ref ct) = filter.content_type {
conditions.push(format!("content_type = '{}'", content_type_str(ct)));
}
if let Some(ref st) = filter.search_term {
conditions.push(format!("title LIKE '%{}%'", st.replace('\'', "''")));
}
if let Some(ref cid) = filter.collection_id {
conditions.push(format!("collection_id = '{}'", cid.replace('\'', "''")));
}
if let Some(decade) = filter.decade {
let end = decade + 10;
conditions.push(format!("year >= {} AND year < {}", decade, end));
}
if let Some(min) = filter.min_duration_secs {
conditions.push(format!("duration_secs >= {}", min));
}
if let Some(max) = filter.max_duration_secs {
conditions.push(format!("duration_secs <= {}", max));
}
if !filter.series_names.is_empty() {
let quoted: Vec<String> = filter.series_names.iter()
.map(|s| format!("'{}'", s.replace('\'', "''")))
.collect();
conditions.push(format!("series_name IN ({})", quoted.join(",")));
}
if !filter.genres.is_empty() {
let genre_conditions: Vec<String> = filter.genres.iter()
.map(|g| format!("EXISTS (SELECT 1 FROM json_each(library_items.genres) WHERE value = '{}')", g.replace('\'', "''")))
.collect();
conditions.push(format!("({})", genre_conditions.join(" OR ")));
}
if let Some(sn) = filter.season_number {
conditions.push(format!("season_number = {}", sn));
}
let where_clause = if conditions.is_empty() {
String::new()
} else {
format!("WHERE {}", conditions.join(" AND "))
};
let count_sql = format!("SELECT COUNT(*) FROM library_items {}", where_clause);
let total: i64 = sqlx::query_scalar(&count_sql)
.fetch_one(&self.pool)
.await
.map_err(|e| DomainError::InfrastructureError(e.to_string()))?;
let items_sql = format!(
"SELECT * FROM library_items {} ORDER BY title ASC LIMIT {} OFFSET {}",
where_clause, filter.limit, filter.offset
);
let rows = sqlx::query_as::<_, LibraryItemRow>(&items_sql)
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::InfrastructureError(e.to_string()))?;
Ok((rows.into_iter().map(Into::into).collect(), total as u32))
}
async fn get_by_id(&self, id: &str) -> DomainResult<Option<LibraryItem>> {
let row = sqlx::query_as::<_, LibraryItemRow>(
"SELECT * FROM library_items WHERE id = ?"
)
.bind(id)
.fetch_optional(&self.pool)
.await
.map_err(|e| DomainError::InfrastructureError(e.to_string()))?;
Ok(row.map(Into::into))
}
async fn list_collections(&self, provider_id: Option<&str>) -> DomainResult<Vec<LibraryCollection>> {
let rows: Vec<(String, Option<String>, Option<String>)> = if let Some(p) = provider_id {
sqlx::query_as::<_, (String, Option<String>, Option<String>)>(
"SELECT DISTINCT collection_id, collection_name, collection_type
FROM library_items WHERE collection_id IS NOT NULL AND provider_id = ?
ORDER BY collection_name ASC"
).bind(p).fetch_all(&self.pool).await
} else {
sqlx::query_as::<_, (String, Option<String>, Option<String>)>(
"SELECT DISTINCT collection_id, collection_name, collection_type
FROM library_items WHERE collection_id IS NOT NULL
ORDER BY collection_name ASC"
).fetch_all(&self.pool).await
}.map_err(|e| DomainError::InfrastructureError(e.to_string()))?;
Ok(rows.into_iter().map(|(id, name, ct)| LibraryCollection {
id,
name: name.unwrap_or_default(),
collection_type: ct,
}).collect())
}
async fn list_series(&self, provider_id: Option<&str>) -> DomainResult<Vec<String>> {
let rows: Vec<(String,)> = if let Some(p) = provider_id {
sqlx::query_as(
"SELECT DISTINCT series_name FROM library_items
WHERE series_name IS NOT NULL AND provider_id = ? ORDER BY series_name ASC"
).bind(p).fetch_all(&self.pool).await
} else {
sqlx::query_as(
"SELECT DISTINCT series_name FROM library_items
WHERE series_name IS NOT NULL ORDER BY series_name ASC"
).fetch_all(&self.pool).await
}.map_err(|e| DomainError::InfrastructureError(e.to_string()))?;
Ok(rows.into_iter().map(|(s,)| s).collect())
}
async fn list_genres(&self, content_type: Option<&ContentType>, provider_id: Option<&str>) -> DomainResult<Vec<String>> {
let sql = match (content_type, provider_id) {
(Some(ct), Some(p)) => format!(
"SELECT DISTINCT je.value FROM library_items li, json_each(li.genres) je
WHERE li.content_type = '{}' AND li.provider_id = '{}' ORDER BY je.value ASC",
content_type_str(ct), p.replace('\'', "''")
),
(Some(ct), None) => format!(
"SELECT DISTINCT je.value FROM library_items li, json_each(li.genres) je
WHERE li.content_type = '{}' ORDER BY je.value ASC",
content_type_str(ct)
),
(None, Some(p)) => format!(
"SELECT DISTINCT je.value FROM library_items li, json_each(li.genres) je
WHERE li.provider_id = '{}' ORDER BY je.value ASC",
p.replace('\'', "''")
),
(None, None) => "SELECT DISTINCT je.value FROM library_items li, json_each(li.genres) je ORDER BY je.value ASC".to_string(),
};
let rows: Vec<(String,)> = sqlx::query_as(&sql)
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::InfrastructureError(e.to_string()))?;
Ok(rows.into_iter().map(|(s,)| s).collect())
}
async fn upsert_items(&self, _provider_id: &str, items: Vec<LibraryItem>) -> DomainResult<()> {
let mut tx = self.pool.begin().await.map_err(|e| DomainError::InfrastructureError(e.to_string()))?;
for item in items {
sqlx::query(
"INSERT OR REPLACE INTO library_items
(id, provider_id, external_id, title, content_type, duration_secs,
series_name, season_number, episode_number, year, genres, tags,
collection_id, collection_name, collection_type, thumbnail_url, synced_at)
VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)"
)
.bind(&item.id).bind(&item.provider_id).bind(&item.external_id)
.bind(&item.title).bind(content_type_str(&item.content_type))
.bind(item.duration_secs as i64)
.bind(&item.series_name).bind(item.season_number.map(|n| n as i64))
.bind(item.episode_number.map(|n| n as i64))
.bind(item.year.map(|n| n as i64))
.bind(serde_json::to_string(&item.genres).unwrap_or_default())
.bind(serde_json::to_string(&item.tags).unwrap_or_default())
.bind(&item.collection_id).bind(&item.collection_name)
.bind(&item.collection_type).bind(&item.thumbnail_url)
.bind(&item.synced_at)
.execute(&mut *tx)
.await
.map_err(|e| DomainError::InfrastructureError(e.to_string()))?;
}
tx.commit().await.map_err(|e| DomainError::InfrastructureError(e.to_string()))
}
async fn clear_provider(&self, provider_id: &str) -> DomainResult<()> {
sqlx::query("DELETE FROM library_items WHERE provider_id = ?")
.bind(provider_id)
.execute(&self.pool)
.await
.map(|_| ())
.map_err(|e| DomainError::InfrastructureError(e.to_string()))
}
async fn log_sync_start(&self, provider_id: &str) -> DomainResult<i64> {
let now = chrono::Utc::now().to_rfc3339();
let id = sqlx::query_scalar::<_, i64>(
"INSERT INTO library_sync_log (provider_id, started_at, status)
VALUES (?, ?, 'running') RETURNING id"
)
.bind(provider_id).bind(&now)
.fetch_one(&self.pool)
.await
.map_err(|e| DomainError::InfrastructureError(e.to_string()))?;
Ok(id)
}
async fn log_sync_finish(&self, log_id: i64, result: &LibrarySyncResult) -> DomainResult<()> {
let now = chrono::Utc::now().to_rfc3339();
let status = if result.error.is_none() { "done" } else { "error" };
sqlx::query(
"UPDATE library_sync_log
SET finished_at = ?, items_found = ?, status = ?, error_msg = ?
WHERE id = ?"
)
.bind(&now).bind(result.items_found as i64)
.bind(status).bind(&result.error).bind(log_id)
.execute(&self.pool)
.await
.map(|_| ())
.map_err(|e| DomainError::InfrastructureError(e.to_string()))
}
async fn latest_sync_status(&self) -> DomainResult<Vec<LibrarySyncLogEntry>> {
let rows = sqlx::query_as::<_, SyncLogRow>(
"SELECT * FROM library_sync_log
WHERE id IN (
SELECT MAX(id) FROM library_sync_log GROUP BY provider_id
)
ORDER BY started_at DESC"
)
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::InfrastructureError(e.to_string()))?;
Ok(rows.into_iter().map(|r| LibrarySyncLogEntry {
id: r.id, provider_id: r.provider_id, started_at: r.started_at,
finished_at: r.finished_at, items_found: r.items_found as u32,
status: r.status, error_msg: r.error_msg,
}).collect())
}
async fn is_sync_running(&self, provider_id: &str) -> DomainResult<bool> {
let count: i64 = sqlx::query_scalar(
"SELECT COUNT(*) FROM library_sync_log WHERE provider_id = ? AND status = 'running'"
)
.bind(provider_id)
.fetch_one(&self.pool)
.await
.map_err(|e| DomainError::InfrastructureError(e.to_string()))?;
Ok(count > 0)
}
async fn list_shows(
&self,
provider_id: Option<&str>,
search_term: Option<&str>,
genres: &[String],
) -> DomainResult<Vec<ShowSummary>> {
let mut conditions = vec![
"content_type = 'episode'".to_string(),
"series_name IS NOT NULL".to_string(),
];
if let Some(p) = provider_id {
conditions.push(format!("provider_id = '{}'", p.replace('\'', "''")));
}
if let Some(st) = search_term {
let escaped = st.replace('\'', "''");
conditions.push(format!(
"(title LIKE '%{escaped}%' OR series_name LIKE '%{escaped}%')"
));
}
if !genres.is_empty() {
let genre_conditions: Vec<String> = genres
.iter()
.map(|g| format!(
"EXISTS (SELECT 1 FROM json_each(library_items.genres) WHERE value = '{}')",
g.replace('\'', "''")
))
.collect();
conditions.push(format!("({})", genre_conditions.join(" OR ")));
}
let where_clause = format!("WHERE {}", conditions.join(" AND "));
let sql = format!(
"SELECT series_name, COUNT(*) AS episode_count, COUNT(DISTINCT season_number) AS season_count, MAX(thumbnail_url) AS thumbnail_url, GROUP_CONCAT(genres, ',') AS genres_blob FROM library_items {} GROUP BY series_name ORDER BY series_name ASC",
where_clause
);
let rows = sqlx::query_as::<_, ShowSummaryRow>(&sql)
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::InfrastructureError(e.to_string()))?;
Ok(rows
.into_iter()
.map(|r| {
let genres: Vec<String> = r
.genres_blob
.split("],[")
.flat_map(|chunk| {
let cleaned = chunk.trim_start_matches('[').trim_end_matches(']');
cleaned
.split(',')
.filter_map(|s| {
let s = s.trim().trim_matches('"');
if s.is_empty() { None } else { Some(s.to_string()) }
})
.collect::<Vec<_>>()
})
.collect::<std::collections::HashSet<_>>()
.into_iter()
.collect();
ShowSummary {
series_name: r.series_name,
episode_count: r.episode_count as u32,
season_count: r.season_count as u32,
thumbnail_url: r.thumbnail_url,
genres,
}
})
.collect())
}
async fn list_seasons(
&self,
series_name: &str,
provider_id: Option<&str>,
) -> DomainResult<Vec<SeasonSummary>> {
let mut conditions = vec![
format!("series_name = '{}'", series_name.replace('\'', "''")),
"content_type = 'episode'".to_string(),
];
if let Some(p) = provider_id {
conditions.push(format!("provider_id = '{}'", p.replace('\'', "''")));
}
let where_clause = format!("WHERE {}", conditions.join(" AND "));
let sql = format!(
"SELECT season_number, COUNT(*) AS episode_count, MAX(thumbnail_url) AS thumbnail_url FROM library_items {} GROUP BY season_number ORDER BY season_number ASC",
where_clause
);
let rows = sqlx::query_as::<_, SeasonSummaryRow>(&sql)
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::InfrastructureError(e.to_string()))?;
Ok(rows
.into_iter()
.map(|r| SeasonSummary {
season_number: r.season_number as u32,
episode_count: r.episode_count as u32,
thumbnail_url: r.thumbnail_url,
})
.collect())
}
}
// ── SQLx row types ─────────────────────────────────────────────────────────
#[derive(sqlx::FromRow)]
struct LibraryItemRow {
id: String, provider_id: String, external_id: String, title: String,
content_type: String, duration_secs: i64,
series_name: Option<String>, season_number: Option<i64>, episode_number: Option<i64>,
year: Option<i64>, genres: String, tags: String,
collection_id: Option<String>, collection_name: Option<String>, collection_type: Option<String>,
thumbnail_url: Option<String>, synced_at: String,
}
impl From<LibraryItemRow> for LibraryItem {
fn from(r: LibraryItemRow) -> Self {
Self {
id: r.id, provider_id: r.provider_id, external_id: r.external_id,
title: r.title, content_type: parse_content_type(&r.content_type),
duration_secs: r.duration_secs as u32,
series_name: r.series_name,
season_number: r.season_number.map(|n| n as u32),
episode_number: r.episode_number.map(|n| n as u32),
year: r.year.map(|n| n as u16),
genres: serde_json::from_str(&r.genres).unwrap_or_default(),
tags: serde_json::from_str(&r.tags).unwrap_or_default(),
collection_id: r.collection_id, collection_name: r.collection_name,
collection_type: r.collection_type, thumbnail_url: r.thumbnail_url,
synced_at: r.synced_at,
}
}
}
#[derive(sqlx::FromRow)]
struct SyncLogRow {
id: i64, provider_id: String, started_at: String, finished_at: Option<String>,
items_found: i64, status: String, error_msg: Option<String>,
}
#[derive(sqlx::FromRow)]
struct ShowSummaryRow {
series_name: String,
episode_count: i64,
season_count: i64,
thumbnail_url: Option<String>,
genres_blob: String,
}
#[derive(sqlx::FromRow)]
struct SeasonSummaryRow {
season_number: i64,
episode_count: i64,
thumbnail_url: Option<String>,
}
#[cfg(test)]
mod tests {
use super::*;
use sqlx::SqlitePool;
use domain::{LibraryItem, LibrarySearchFilter, ContentType};
async fn setup() -> SqlitePool {
let pool = SqlitePool::connect(":memory:").await.unwrap();
sqlx::query(
"CREATE TABLE library_items (
id TEXT PRIMARY KEY, provider_id TEXT NOT NULL, external_id TEXT NOT NULL,
title TEXT NOT NULL, content_type TEXT NOT NULL, duration_secs INTEGER NOT NULL DEFAULT 0,
series_name TEXT, season_number INTEGER, episode_number INTEGER, year INTEGER,
genres TEXT NOT NULL DEFAULT '[]', tags TEXT NOT NULL DEFAULT '[]',
collection_id TEXT, collection_name TEXT, collection_type TEXT,
thumbnail_url TEXT, synced_at TEXT NOT NULL
)"
).execute(&pool).await.unwrap();
sqlx::query(
"CREATE TABLE library_sync_log (
id INTEGER PRIMARY KEY AUTOINCREMENT, provider_id TEXT NOT NULL,
started_at TEXT NOT NULL, finished_at TEXT, items_found INTEGER NOT NULL DEFAULT 0,
status TEXT NOT NULL DEFAULT 'running', error_msg TEXT
)"
).execute(&pool).await.unwrap();
pool
}
fn make_item(id: &str, provider: &str, title: &str) -> LibraryItem {
LibraryItem {
id: id.to_string(), provider_id: provider.to_string(), external_id: id.to_string(),
title: title.to_string(), content_type: ContentType::Movie,
duration_secs: 3600, series_name: None, season_number: None, episode_number: None,
year: Some(2020), genres: vec!["Action".to_string()], tags: vec![],
collection_id: None, collection_name: None, collection_type: None,
thumbnail_url: None, synced_at: "2026-03-19T00:00:00Z".to_string(),
}
}
#[tokio::test]
async fn upsert_then_search_returns_items() {
let pool = setup().await;
let repo = SqliteLibraryRepository::new(pool);
let items = vec![make_item("jellyfin::1", "jellyfin", "Movie A")];
repo.upsert_items("jellyfin", items).await.unwrap();
let (results, total) = repo.search(&LibrarySearchFilter { limit: 50, ..Default::default() }).await.unwrap();
assert_eq!(total, 1);
assert_eq!(results[0].title, "Movie A");
}
#[tokio::test]
async fn clear_provider_removes_only_that_provider() {
let pool = setup().await;
let repo = SqliteLibraryRepository::new(pool);
repo.upsert_items("jellyfin", vec![make_item("jellyfin::1", "jellyfin", "Jelly Movie")]).await.unwrap();
repo.upsert_items("local", vec![make_item("local::1", "local", "Local Movie")]).await.unwrap();
repo.clear_provider("jellyfin").await.unwrap();
let (results, _) = repo.search(&LibrarySearchFilter { limit: 50, ..Default::default() }).await.unwrap();
assert_eq!(results.len(), 1);
assert_eq!(results[0].provider_id, "local");
}
#[tokio::test]
async fn is_sync_running_reflects_status() {
let pool = setup().await;
let repo = SqliteLibraryRepository::new(pool);
assert!(!repo.is_sync_running("jellyfin").await.unwrap());
let log_id = repo.log_sync_start("jellyfin").await.unwrap();
assert!(repo.is_sync_running("jellyfin").await.unwrap());
let result = domain::LibrarySyncResult {
provider_id: "jellyfin".to_string(), items_found: 5, duration_ms: 100, error: None,
};
repo.log_sync_finish(log_id, &result).await.unwrap();
assert!(!repo.is_sync_running("jellyfin").await.unwrap());
}
}

View File

@@ -0,0 +1,249 @@
//! Full-sync library sync adapter: truncate + re-insert all provider items.
use std::collections::HashMap;
use std::sync::Arc;
use std::time::Instant;
use async_trait::async_trait;
use domain::{
Collection, ILibraryRepository, IMediaProvider, LibraryItem,
LibrarySyncAdapter, LibrarySyncResult, MediaFilter,
};
pub struct FullSyncAdapter {
repo: Arc<dyn ILibraryRepository>,
}
impl FullSyncAdapter {
pub fn new(repo: Arc<dyn ILibraryRepository>) -> Self {
Self { repo }
}
}
#[async_trait]
impl LibrarySyncAdapter for FullSyncAdapter {
async fn sync_provider(
&self,
provider: &dyn IMediaProvider,
provider_id: &str,
) -> LibrarySyncResult {
let start = Instant::now();
// Check for running sync first
match self.repo.is_sync_running(provider_id).await {
Ok(true) => {
return LibrarySyncResult {
provider_id: provider_id.to_string(),
items_found: 0,
duration_ms: 0,
error: Some("sync already running".to_string()),
};
}
Err(e) => {
return LibrarySyncResult {
provider_id: provider_id.to_string(),
items_found: 0,
duration_ms: 0,
error: Some(e.to_string()),
};
}
Ok(false) => {}
}
let log_id = match self.repo.log_sync_start(provider_id).await {
Ok(id) => id,
Err(e) => {
return LibrarySyncResult {
provider_id: provider_id.to_string(),
items_found: 0,
duration_ms: start.elapsed().as_millis() as u64,
error: Some(e.to_string()),
};
}
};
// Fetch collections for name/type enrichment — build a lookup map
let collections: Vec<Collection> = provider.list_collections().await.unwrap_or_default();
let collection_map: HashMap<String, &Collection> =
collections.iter().map(|c| (c.id.clone(), c)).collect();
// Fetch all items
let media_items = match provider.fetch_items(&MediaFilter::default()).await {
Ok(items) => items,
Err(e) => {
let result = LibrarySyncResult {
provider_id: provider_id.to_string(),
items_found: 0,
duration_ms: start.elapsed().as_millis() as u64,
error: Some(e.to_string()),
};
let _ = self.repo.log_sync_finish(log_id, &result).await;
return result;
}
};
let items_found = media_items.len() as u32;
let now = chrono::Utc::now().to_rfc3339();
let library_items: Vec<LibraryItem> = media_items
.into_iter()
.map(|item| {
let raw_id = item.id.into_inner();
let id = format!("{}::{}", provider_id, raw_id);
// Enrich with collection name/type using the lookup map.
let (col_name, col_type) = item.collection_id.as_deref()
.and_then(|cid| collection_map.get(cid))
.map(|c| (Some(c.name.clone()), c.collection_type.clone()))
.unwrap_or((None, None));
LibraryItem {
id,
provider_id: provider_id.to_string(),
external_id: raw_id,
title: item.title,
content_type: item.content_type,
duration_secs: item.duration_secs,
series_name: item.series_name,
season_number: item.season_number,
episode_number: item.episode_number,
year: item.year,
genres: item.genres,
tags: item.tags,
collection_id: item.collection_id,
collection_name: col_name,
collection_type: col_type,
thumbnail_url: item.thumbnail_url,
synced_at: now.clone(),
}
})
.collect();
// Truncate + insert
if let Err(e) = self.repo.clear_provider(provider_id).await {
let result = LibrarySyncResult {
provider_id: provider_id.to_string(),
items_found: 0,
duration_ms: start.elapsed().as_millis() as u64,
error: Some(e.to_string()),
};
let _ = self.repo.log_sync_finish(log_id, &result).await;
return result;
}
let result = match self.repo.upsert_items(provider_id, library_items).await {
Ok(()) => LibrarySyncResult {
provider_id: provider_id.to_string(),
items_found,
duration_ms: start.elapsed().as_millis() as u64,
error: None,
},
Err(e) => LibrarySyncResult {
provider_id: provider_id.to_string(),
items_found: 0,
duration_ms: start.elapsed().as_millis() as u64,
error: Some(e.to_string()),
},
};
let _ = self.repo.log_sync_finish(log_id, &result).await;
result
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::sync::{Arc, Mutex};
use async_trait::async_trait;
use domain::*;
struct MockProvider {
items: Vec<MediaItem>,
}
#[async_trait]
impl IMediaProvider for MockProvider {
fn capabilities(&self) -> ProviderCapabilities {
ProviderCapabilities {
collections: true,
series: false,
genres: false,
tags: false,
decade: false,
search: false,
streaming_protocol: StreamingProtocol::Hls,
rescan: false,
transcode: false,
}
}
async fn fetch_items(&self, _filter: &MediaFilter) -> DomainResult<Vec<MediaItem>> {
Ok(self.items.clone())
}
async fn fetch_by_id(&self, _id: &MediaItemId) -> DomainResult<Option<MediaItem>> { Ok(None) }
async fn get_stream_url(&self, _id: &MediaItemId, _q: &StreamQuality) -> DomainResult<String> { Ok(String::new()) }
async fn list_collections(&self) -> DomainResult<Vec<Collection>> { Ok(vec![]) }
async fn list_series(&self, _col: Option<&str>) -> DomainResult<Vec<SeriesSummary>> { Ok(vec![]) }
async fn list_genres(&self, _ct: Option<&ContentType>) -> DomainResult<Vec<String>> { Ok(vec![]) }
}
struct SpyRepo {
upserted: Arc<Mutex<Vec<LibraryItem>>>,
cleared: Arc<Mutex<Vec<String>>>,
}
#[async_trait]
impl ILibraryRepository for SpyRepo {
async fn search(&self, _f: &LibrarySearchFilter) -> DomainResult<(Vec<LibraryItem>, u32)> { Ok((vec![], 0)) }
async fn get_by_id(&self, _id: &str) -> DomainResult<Option<LibraryItem>> { Ok(None) }
async fn list_collections(&self, _p: Option<&str>) -> DomainResult<Vec<LibraryCollection>> { Ok(vec![]) }
async fn list_series(&self, _p: Option<&str>) -> DomainResult<Vec<String>> { Ok(vec![]) }
async fn list_genres(&self, _ct: Option<&ContentType>, _p: Option<&str>) -> DomainResult<Vec<String>> { Ok(vec![]) }
async fn upsert_items(&self, _pid: &str, items: Vec<LibraryItem>) -> DomainResult<()> {
self.upserted.lock().unwrap().extend(items);
Ok(())
}
async fn clear_provider(&self, pid: &str) -> DomainResult<()> {
self.cleared.lock().unwrap().push(pid.to_string());
Ok(())
}
async fn log_sync_start(&self, _pid: &str) -> DomainResult<i64> { Ok(1) }
async fn log_sync_finish(&self, _id: i64, _r: &LibrarySyncResult) -> DomainResult<()> { Ok(()) }
async fn latest_sync_status(&self) -> DomainResult<Vec<LibrarySyncLogEntry>> { Ok(vec![]) }
async fn is_sync_running(&self, _pid: &str) -> DomainResult<bool> { Ok(false) }
async fn list_shows(&self, _p: Option<&str>, _st: Option<&str>, _g: &[String]) -> DomainResult<Vec<domain::ShowSummary>> { Ok(vec![]) }
async fn list_seasons(&self, _sn: &str, _p: Option<&str>) -> DomainResult<Vec<domain::SeasonSummary>> { Ok(vec![]) }
}
#[tokio::test]
async fn sync_clears_then_upserts_items() {
let upserted = Arc::new(Mutex::new(vec![]));
let cleared = Arc::new(Mutex::new(vec![]));
let repo: Arc<dyn ILibraryRepository> = Arc::new(SpyRepo {
upserted: Arc::clone(&upserted),
cleared: Arc::clone(&cleared),
});
let adapter = FullSyncAdapter::new(Arc::clone(&repo));
let provider = MockProvider {
items: vec![MediaItem {
id: MediaItemId::new("abc".to_string()),
title: "Test Movie".to_string(),
content_type: ContentType::Movie,
duration_secs: 3600,
description: None,
series_name: None,
season_number: None,
episode_number: None,
year: None,
genres: vec![],
tags: vec![],
thumbnail_url: None,
collection_id: None,
}],
};
let result = adapter.sync_provider(&provider, "jellyfin").await;
assert!(result.error.is_none());
assert_eq!(result.items_found, 1);
assert_eq!(cleared.lock().unwrap().as_slice(), &["jellyfin"]);
assert_eq!(upserted.lock().unwrap().len(), 1);
}
}

View File

@@ -0,0 +1,13 @@
use std::path::PathBuf;
/// Configuration for the local files media provider.
pub struct LocalFilesConfig {
/// Root directory containing video files. All files are served relative to this.
pub root_dir: PathBuf,
/// Public base URL of this API server, used to build stream URLs.
pub base_url: String,
/// Directory for FFmpeg HLS transcode cache. `None` disables transcoding.
pub transcode_dir: Option<PathBuf>,
/// How long (hours) to keep transcode cache entries. Passed to TranscodeManager.
pub cleanup_ttl_hours: u32,
}

View File

@@ -0,0 +1,188 @@
use std::collections::HashMap;
use std::path::PathBuf;
use std::sync::Arc;
use chrono::Utc;
use tokio::sync::RwLock;
use tracing::{error, info};
use domain::MediaItemId;
use super::config::LocalFilesConfig;
use super::scanner::{scan_dir, LocalFileItem};
/// Encode a rel-path string into a URL-safe, padding-free base64 MediaItemId.
pub fn encode_id(rel_path: &str) -> MediaItemId {
use base64::Engine as _;
MediaItemId::new(
base64::engine::general_purpose::URL_SAFE_NO_PAD.encode(rel_path.as_bytes()),
)
}
/// Decode a MediaItemId back to a relative path string.
pub fn decode_id(id: &MediaItemId) -> Option<String> {
use base64::Engine as _;
let bytes = base64::engine::general_purpose::URL_SAFE_NO_PAD
.decode(id.as_ref())
.ok()?;
String::from_utf8(bytes).ok()
}
/// In-memory (+ SQLite-backed) index of local video files.
///
/// On startup the index is populated from the SQLite cache so the provider can
/// serve requests immediately. A background task calls `rescan()` to pick up
/// any changes on disk and write them back to the cache.
pub struct LocalIndex {
items: Arc<RwLock<HashMap<MediaItemId, LocalFileItem>>>,
pub root_dir: PathBuf,
provider_id: String,
pool: sqlx::SqlitePool,
}
impl LocalIndex {
/// Create the index, immediately loading persisted entries from SQLite.
pub async fn new(config: &LocalFilesConfig, pool: sqlx::SqlitePool, provider_id: String) -> Self {
let idx = Self {
items: Arc::new(RwLock::new(HashMap::new())),
root_dir: config.root_dir.clone(),
provider_id,
pool,
};
idx.load_from_db().await;
idx
}
/// Load previously scanned items from SQLite (instant on startup).
async fn load_from_db(&self) {
#[derive(sqlx::FromRow)]
struct Row {
id: String,
rel_path: String,
title: String,
duration_secs: i64,
year: Option<i64>,
tags: String,
top_dir: String,
}
let rows = sqlx::query_as::<_, Row>(
"SELECT id, rel_path, title, duration_secs, year, tags, top_dir \
FROM local_files_index WHERE provider_id = ?",
)
.bind(&self.provider_id)
.fetch_all(&self.pool)
.await;
match rows {
Ok(rows) => {
let mut map = self.items.write().await;
for row in rows {
let tags: Vec<String> =
serde_json::from_str(&row.tags).unwrap_or_default();
let item = LocalFileItem {
rel_path: row.rel_path,
title: row.title,
duration_secs: row.duration_secs as u32,
year: row.year.map(|y| y as u16),
tags,
top_dir: row.top_dir,
};
map.insert(MediaItemId::new(row.id), item);
}
info!("Local files index [{}]: loaded {} items from DB", self.provider_id, map.len());
}
Err(e) => {
// Table might not exist yet on first run — that's fine.
tracing::debug!("Could not load local files index from DB: {}", e);
}
}
}
/// Scan the filesystem for video files and rebuild the index.
///
/// Returns the number of items found. Called on startup (background task)
/// and via `POST /files/rescan`.
pub async fn rescan(&self) -> u32 {
info!("Local files [{}]: scanning {:?}", self.provider_id, self.root_dir);
let new_items = scan_dir(&self.root_dir).await;
let count = new_items.len() as u32;
// Swap in-memory map.
{
let mut map = self.items.write().await;
map.clear();
for item in &new_items {
let id = encode_id(&item.rel_path);
map.insert(id, item.clone());
}
}
// Persist to SQLite.
if let Err(e) = self.save_to_db(&new_items).await {
error!("Failed to persist local files index: {}", e);
}
info!("Local files [{}]: indexed {} items", self.provider_id, count);
count
}
async fn save_to_db(&self, items: &[LocalFileItem]) -> Result<(), sqlx::Error> {
// Rebuild the table in one transaction, scoped to this provider.
let mut tx = self.pool.begin().await?;
sqlx::query("DELETE FROM local_files_index WHERE provider_id = ?")
.bind(&self.provider_id)
.execute(&mut *tx)
.await?;
let now = Utc::now().to_rfc3339();
for item in items {
let id = encode_id(&item.rel_path).into_inner();
let tags_json = serde_json::to_string(&item.tags).unwrap_or_else(|_| "[]".into());
sqlx::query(
"INSERT INTO local_files_index \
(id, rel_path, title, duration_secs, year, tags, top_dir, scanned_at, provider_id) \
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)",
)
.bind(&id)
.bind(&item.rel_path)
.bind(&item.title)
.bind(item.duration_secs as i64)
.bind(item.year.map(|y| y as i64))
.bind(&tags_json)
.bind(&item.top_dir)
.bind(&now)
.bind(&self.provider_id)
.execute(&mut *tx)
.await?;
}
tx.commit().await
}
pub async fn get(&self, id: &MediaItemId) -> Option<LocalFileItem> {
self.items.read().await.get(id).cloned()
}
pub async fn get_all(&self) -> Vec<(MediaItemId, LocalFileItem)> {
self.items
.read()
.await
.iter()
.map(|(k, v)| (k.clone(), v.clone()))
.collect()
}
/// Return unique top-level directories as collection names.
pub async fn collections(&self) -> Vec<String> {
let map = self.items.read().await;
let mut seen = std::collections::HashSet::new();
for item in map.values() {
seen.insert(item.top_dir.clone());
}
let mut dirs: Vec<String> = seen.into_iter().collect();
dirs.sort();
dirs
}
}

View File

@@ -0,0 +1,10 @@
pub mod config;
pub mod index;
pub mod provider;
pub mod scanner;
pub mod transcoder;
pub use config::LocalFilesConfig;
pub use index::LocalIndex;
pub use provider::{LocalFilesProvider, decode_stream_id};
pub use transcoder::TranscodeManager;

View File

@@ -0,0 +1,186 @@
use std::sync::Arc;
use async_trait::async_trait;
use domain::{
Collection, ContentType, DomainError, DomainResult, IMediaProvider, MediaFilter, MediaItem,
MediaItemId, ProviderCapabilities, StreamQuality, StreamingProtocol,
};
use super::config::LocalFilesConfig;
use super::index::{LocalIndex, decode_id};
use super::scanner::LocalFileItem;
use super::transcoder::TranscodeManager;
pub struct LocalFilesProvider {
pub index: Arc<LocalIndex>,
base_url: String,
transcode_manager: Option<Arc<TranscodeManager>>,
}
const SHORT_DURATION_SECS: u32 = 1200; // 20 minutes
impl LocalFilesProvider {
pub fn new(
index: Arc<LocalIndex>,
config: LocalFilesConfig,
transcode_manager: Option<Arc<TranscodeManager>>,
) -> Self {
Self {
index,
base_url: config.base_url.trim_end_matches('/').to_string(),
transcode_manager,
}
}
}
fn to_media_item(id: MediaItemId, item: &LocalFileItem) -> MediaItem {
let content_type = if item.duration_secs < 1200 {
ContentType::Short
} else {
ContentType::Movie
};
MediaItem {
id,
title: item.title.clone(),
content_type,
duration_secs: item.duration_secs,
description: None,
genres: vec![],
year: item.year,
tags: item.tags.clone(),
series_name: None,
season_number: None,
episode_number: None,
thumbnail_url: None,
collection_id: None,
}
}
#[async_trait]
impl IMediaProvider for LocalFilesProvider {
fn capabilities(&self) -> ProviderCapabilities {
ProviderCapabilities {
collections: true,
series: false,
genres: false,
tags: true,
decade: true,
search: true,
streaming_protocol: if self.transcode_manager.is_some() {
StreamingProtocol::Hls
} else {
StreamingProtocol::DirectFile
},
rescan: true,
transcode: self.transcode_manager.is_some(),
}
}
async fn fetch_items(&self, filter: &MediaFilter) -> DomainResult<Vec<MediaItem>> {
let all = self.index.get_all().await;
let results = all
.into_iter()
.filter_map(|(id, item)| {
// content_type: derive heuristically, then filter
let content_type = if item.duration_secs < SHORT_DURATION_SECS {
ContentType::Short
} else {
ContentType::Movie
};
if let Some(ref ct) = filter.content_type && &content_type != ct {
return None;
}
// collections: match against top_dir
if !filter.collections.is_empty() && !filter.collections.contains(&item.top_dir) {
return None;
}
// tags: OR — item must have at least one matching tag
if !filter.tags.is_empty() {
let has = filter
.tags
.iter()
.any(|tag| item.tags.iter().any(|t| t.eq_ignore_ascii_case(tag)));
if !has {
return None;
}
}
// decade: year in [decade, decade+9]
if let Some(decade) = filter.decade {
match item.year {
Some(y) if y >= decade && y <= decade + 9 => {}
_ => return None,
}
}
// duration bounds
if let Some(min) = filter.min_duration_secs && item.duration_secs < min {
return None;
}
if let Some(max) = filter.max_duration_secs && item.duration_secs > max {
return None;
}
// search_term: case-insensitive substring in title
if let Some(ref q) = filter.search_term && !item.title.to_lowercase().contains(&q.to_lowercase()) {
return None;
}
Some(to_media_item(id, &item))
})
.collect();
Ok(results)
}
async fn fetch_by_id(&self, item_id: &MediaItemId) -> DomainResult<Option<MediaItem>> {
Ok(self
.index
.get(item_id)
.await
.map(|item| to_media_item(item_id.clone(), &item)))
}
async fn get_stream_url(&self, item_id: &MediaItemId, quality: &StreamQuality) -> DomainResult<String> {
match quality {
StreamQuality::Transcode(_) if self.transcode_manager.is_some() => {
let tm = self.transcode_manager.as_ref().unwrap();
let rel = decode_id(item_id).ok_or_else(|| {
DomainError::InfrastructureError("invalid item id encoding".into())
})?;
let src = self.index.root_dir.join(&rel);
tm.ensure_transcoded(item_id.as_ref(), &src).await?;
Ok(format!(
"{}/api/v1/files/transcode/{}/playlist.m3u8",
self.base_url,
item_id.as_ref()
))
}
_ => Ok(format!(
"{}/api/v1/files/stream/{}",
self.base_url,
item_id.as_ref()
)),
}
}
async fn list_collections(&self) -> DomainResult<Vec<Collection>> {
let dirs = self.index.collections().await;
Ok(dirs
.into_iter()
.map(|d| Collection {
id: d.clone(),
name: d,
collection_type: None,
})
.collect())
}
}
/// Decode an encoded ID from a URL path segment to its relative path string.
pub fn decode_stream_id(encoded: &str) -> Option<String> {
decode_id(&MediaItemId::new(encoded))
}

View File

@@ -0,0 +1,161 @@
use std::path::Path;
use tokio::process::Command;
const VIDEO_EXTENSIONS: &[&str] = &["mp4", "mkv", "avi", "mov", "webm", "m4v"];
/// In-memory representation of a scanned local video file.
#[derive(Debug, Clone)]
pub struct LocalFileItem {
/// Relative path from root, with forward slashes (used as the stable ID source).
pub rel_path: String,
pub title: String,
pub duration_secs: u32,
pub year: Option<u16>,
/// Ancestor directory names between root and file (excluding root itself).
pub tags: Vec<String>,
/// First path component under root (used as collection id/name).
pub top_dir: String,
}
/// Walk `root` and return all recognised video files with metadata.
///
/// ffprobe is called for each file to determine duration. Files that cannot be
/// probed are included with `duration_secs = 0` so they still appear in the index.
pub async fn scan_dir(root: &Path) -> Vec<LocalFileItem> {
let mut items = Vec::new();
let walker = walkdir::WalkDir::new(root).follow_links(true);
for entry in walker.into_iter().filter_map(|e| e.ok()) {
if !entry.file_type().is_file() {
continue;
}
let path = entry.path();
let ext = path
.extension()
.and_then(|e| e.to_str())
.map(|e| e.to_lowercase());
let ext = match ext {
Some(ref e) if VIDEO_EXTENSIONS.contains(&e.as_str()) => e.clone(),
_ => continue,
};
let _ = ext; // extension validated, not needed further
let rel = match path.strip_prefix(root) {
Ok(r) => r,
Err(_) => continue,
};
// Normalise to forward-slash string for cross-platform stability.
let rel_path: String = rel
.components()
.map(|c| c.as_os_str().to_string_lossy().into_owned())
.collect::<Vec<_>>()
.join("/");
// Top-level directory under root.
let top_dir = rel
.components()
.next()
.filter(|_| rel.components().count() > 1) // skip if file is at root level
.map(|c| c.as_os_str().to_string_lossy().into_owned())
.unwrap_or_else(|| "__root__".to_string());
// Title: stem with separator chars replaced by spaces.
let stem = path
.file_stem()
.and_then(|s| s.to_str())
.unwrap_or("")
.to_string();
let title = stem.replace(['_', '-', '.'], " ");
let title = title.trim().to_string();
// Year: first 4-digit number starting with 19xx or 20xx in filename or parent dirs.
let search_str = format!(
"{} {}",
stem,
rel.parent()
.and_then(|p| p.to_str())
.unwrap_or("")
);
let year = extract_year(&search_str);
// Tags: ancestor directory components between root and the file.
let tags: Vec<String> = rel
.parent()
.into_iter()
.flat_map(|p| p.components())
.map(|c| c.as_os_str().to_string_lossy().into_owned())
.filter(|s| !s.is_empty())
.collect();
let duration_secs = get_duration(path).await.unwrap_or(0);
items.push(LocalFileItem {
rel_path,
title,
duration_secs,
year,
tags,
top_dir,
});
}
items
}
/// Extract the first plausible 4-digit year (19002099) from `s`.
fn extract_year(s: &str) -> Option<u16> {
let chars: Vec<char> = s.chars().collect();
let n = chars.len();
if n < 4 {
return None;
}
for i in 0..=(n - 4) {
// All four chars must be ASCII digits.
if !chars[i..i + 4].iter().all(|c| c.is_ascii_digit()) {
continue;
}
// Parse and range-check.
let s4: String = chars[i..i + 4].iter().collect();
let num: u16 = s4.parse().ok()?;
if !(1900..=2099).contains(&num) {
continue;
}
// Word-boundary: char before and after must not be digits.
let before_ok = i == 0 || !chars[i - 1].is_ascii_digit();
let after_ok = i + 4 >= n || !chars[i + 4].is_ascii_digit();
if before_ok && after_ok {
return Some(num);
}
}
None
}
/// Run ffprobe to get the duration of `path` in whole seconds.
async fn get_duration(path: &Path) -> Option<u32> {
#[derive(serde::Deserialize)]
struct Fmt {
duration: Option<String>,
}
#[derive(serde::Deserialize)]
struct Out {
format: Fmt,
}
let output = Command::new("ffprobe")
.args([
"-v",
"quiet",
"-print_format",
"json",
"-show_format",
path.to_str()?,
])
.output()
.await
.ok()?;
let parsed: Out = serde_json::from_slice(&output.stdout).ok()?;
let dur: f64 = parsed.format.duration?.parse().ok()?;
Some(dur as u32)
}

View File

@@ -0,0 +1,252 @@
//! FFmpeg HLS transcoder for local video files.
//!
//! `TranscodeManager` orchestrates on-demand transcoding: the first request for
//! an item spawns an ffmpeg process and returns once the initial HLS playlist
//! appears. Concurrent requests for the same item subscribe to a watch channel
//! and wait without spawning duplicate processes. Transcoded segments are cached
//! in `transcode_dir/{item_id}/` and cleaned up by a background task.
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use std::sync::{
Arc,
atomic::{AtomicU32, Ordering},
};
use std::time::{Duration, Instant};
use tokio::sync::{Mutex, watch};
use tracing::{info, warn, error};
use domain::{DomainError, DomainResult};
// ============================================================================
// Types
// ============================================================================
#[derive(Clone, Debug)]
pub enum TranscodeStatus {
Ready,
Failed(String),
}
// ============================================================================
// Manager
// ============================================================================
pub struct TranscodeManager {
pub transcode_dir: PathBuf,
cleanup_ttl_hours: Arc<AtomicU32>,
active: Arc<Mutex<HashMap<String, watch::Sender<Option<TranscodeStatus>>>>>,
}
impl TranscodeManager {
pub fn new(transcode_dir: PathBuf, cleanup_ttl_hours: u32) -> Arc<Self> {
let mgr = Arc::new(Self {
transcode_dir,
cleanup_ttl_hours: Arc::new(AtomicU32::new(cleanup_ttl_hours)),
active: Arc::new(Mutex::new(HashMap::new())),
});
// Background cleanup task — uses Weak to avoid keeping manager alive.
let weak = Arc::downgrade(&mgr);
tokio::spawn(async move {
let mut interval = tokio::time::interval(Duration::from_secs(3600));
loop {
interval.tick().await;
match weak.upgrade() {
Some(m) => m.run_cleanup().await,
None => break,
}
}
});
mgr
}
/// Update the cleanup TTL (also persisted to DB by the route handler).
pub fn set_cleanup_ttl(&self, hours: u32) {
self.cleanup_ttl_hours.store(hours, Ordering::Relaxed);
}
pub fn get_cleanup_ttl(&self) -> u32 {
self.cleanup_ttl_hours.load(Ordering::Relaxed)
}
/// Ensure `item_id` has been transcoded to HLS. Blocks until the initial
/// playlist appears or an error occurs. Concurrent callers share the result.
pub async fn ensure_transcoded(&self, item_id: &str, src_path: &Path) -> DomainResult<()> {
let out_dir = self.transcode_dir.join(item_id);
let playlist = out_dir.join("playlist.m3u8");
if playlist.exists() {
return Ok(());
}
let mut rx = {
let mut map = self.active.lock().await;
if let Some(tx) = map.get(item_id) {
tx.subscribe()
} else {
let (tx, rx) = watch::channel::<Option<TranscodeStatus>>(None);
map.insert(item_id.to_string(), tx.clone());
let item_id_owned = item_id.to_string();
let src_owned = src_path.to_path_buf();
let out_dir_owned = out_dir.clone();
let playlist_owned = playlist.clone();
let active_ref = Arc::clone(&self.active);
tokio::spawn(async move {
let _ = tokio::fs::create_dir_all(&out_dir_owned).await;
let status = do_transcode(&src_owned, &out_dir_owned, &playlist_owned).await;
if matches!(status, TranscodeStatus::Ready) {
info!("transcode ready: {}", item_id_owned);
} else if let TranscodeStatus::Failed(ref e) = status {
error!("transcode failed for {}: {}", item_id_owned, e);
}
let _ = tx.send(Some(status));
active_ref.lock().await.remove(&item_id_owned);
});
rx
}
};
// Wait for Ready or Failed.
loop {
rx.changed().await.map_err(|_| {
DomainError::InfrastructureError("transcode task dropped unexpectedly".into())
})?;
if let Some(status) = &*rx.borrow() {
return match status {
TranscodeStatus::Ready => Ok(()),
TranscodeStatus::Failed(e) => Err(DomainError::InfrastructureError(
format!("transcode failed: {}", e),
)),
};
}
}
}
/// Remove all cached transcode directories.
pub async fn clear_cache(&self) -> std::io::Result<()> {
if self.transcode_dir.exists() {
tokio::fs::remove_dir_all(&self.transcode_dir).await?;
}
tokio::fs::create_dir_all(&self.transcode_dir).await
}
/// Return `(total_bytes, item_count)` for the cache directory.
pub async fn cache_stats(&self) -> (u64, usize) {
let mut total_bytes = 0u64;
let mut item_count = 0usize;
let Ok(mut entries) = tokio::fs::read_dir(&self.transcode_dir).await else {
return (0, 0);
};
while let Ok(Some(entry)) = entries.next_entry().await {
if !entry.path().is_dir() {
continue;
}
item_count += 1;
if let Ok(mut sub) = tokio::fs::read_dir(entry.path()).await {
while let Ok(Some(f)) = sub.next_entry().await {
if let Ok(meta) = f.metadata().await {
total_bytes += meta.len();
}
}
}
}
(total_bytes, item_count)
}
async fn run_cleanup(&self) {
let ttl_hours = self.cleanup_ttl_hours.load(Ordering::Relaxed) as u64;
let ttl = Duration::from_secs(ttl_hours * 3600);
let now = std::time::SystemTime::now();
let Ok(mut entries) = tokio::fs::read_dir(&self.transcode_dir).await else {
return;
};
while let Ok(Some(entry)) = entries.next_entry().await {
let path = entry.path();
if !path.is_dir() {
continue;
}
let playlist = path.join("playlist.m3u8");
if let Ok(meta) = tokio::fs::metadata(&playlist).await
&& let Ok(modified) = meta.modified()
&& let Ok(age) = now.duration_since(modified)
&& age > ttl
{
warn!("cleanup: removing stale transcode {:?}", path);
let _ = tokio::fs::remove_dir_all(&path).await;
}
}
}
}
// ============================================================================
// FFmpeg helper
// ============================================================================
async fn do_transcode(src: &Path, out_dir: &Path, playlist: &Path) -> TranscodeStatus {
let segment_pattern = out_dir.join("seg%05d.ts");
let mut child = match tokio::process::Command::new("ffmpeg")
.args([
"-i",
src.to_str().unwrap_or(""),
"-c:v",
"libx264",
"-preset",
"fast",
"-crf",
"23",
"-c:a",
"aac",
"-b:a",
"128k",
"-hls_time",
"6",
"-hls_list_size",
"0",
"-hls_flags",
"independent_segments",
"-hls_segment_filename",
segment_pattern.to_str().unwrap_or(""),
playlist.to_str().unwrap_or(""),
])
.stdout(std::process::Stdio::null())
.stderr(std::process::Stdio::null())
.spawn()
{
Ok(c) => c,
Err(e) => return TranscodeStatus::Failed(format!("ffmpeg spawn error: {}", e)),
};
// Poll for playlist.m3u8 — it appears after the first segment is written,
// allowing the client to start playback before transcoding is complete.
let start = Instant::now();
let timeout = Duration::from_secs(60);
loop {
if playlist.exists() {
return TranscodeStatus::Ready;
}
if start.elapsed() > timeout {
let _ = child.kill().await;
return TranscodeStatus::Failed("timeout waiting for transcode to start".into());
}
match child.try_wait() {
Ok(Some(status)) => {
return if playlist.exists() {
TranscodeStatus::Ready
} else if status.success() {
TranscodeStatus::Failed("ffmpeg exited but produced no playlist".into())
} else {
TranscodeStatus::Failed("ffmpeg exited with non-zero status".into())
};
}
Err(e) => return TranscodeStatus::Failed(e.to_string()),
Ok(None) => {}
}
tokio::time::sleep(Duration::from_millis(100)).await;
}
}

View File

@@ -0,0 +1,4 @@
#[cfg(feature = "sqlite")]
mod sqlite;
#[cfg(feature = "sqlite")]
pub use sqlite::SqliteProviderConfigRepository;

View File

@@ -0,0 +1,84 @@
use async_trait::async_trait;
use domain::{DomainError, DomainResult, ProviderConfigRepository, ProviderConfigRow};
#[derive(Clone)]
pub struct SqliteProviderConfigRepository {
pool: sqlx::SqlitePool,
}
impl SqliteProviderConfigRepository {
pub fn new(pool: sqlx::SqlitePool) -> Self {
Self { pool }
}
}
#[async_trait]
impl ProviderConfigRepository for SqliteProviderConfigRepository {
async fn get_all(&self) -> DomainResult<Vec<ProviderConfigRow>> {
let rows: Vec<(String, String, String, i64, String)> = sqlx::query_as(
"SELECT id, provider_type, config_json, enabled, updated_at FROM provider_configs",
)
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
Ok(rows
.into_iter()
.map(|(id, provider_type, config_json, enabled, updated_at)| ProviderConfigRow {
id,
provider_type,
config_json,
enabled: enabled != 0,
updated_at,
})
.collect())
}
async fn get_by_id(&self, id: &str) -> DomainResult<Option<ProviderConfigRow>> {
let row: Option<(String, String, String, i64, String)> = sqlx::query_as(
"SELECT id, provider_type, config_json, enabled, updated_at FROM provider_configs WHERE id = ?",
)
.bind(id)
.fetch_optional(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
Ok(row.map(|(id, provider_type, config_json, enabled, updated_at)| ProviderConfigRow {
id,
provider_type,
config_json,
enabled: enabled != 0,
updated_at,
}))
}
async fn upsert(&self, row: &ProviderConfigRow) -> DomainResult<()> {
sqlx::query(
r#"INSERT INTO provider_configs (id, provider_type, config_json, enabled, updated_at)
VALUES (?, ?, ?, ?, ?)
ON CONFLICT(id) DO UPDATE SET
provider_type = excluded.provider_type,
config_json = excluded.config_json,
enabled = excluded.enabled,
updated_at = excluded.updated_at"#,
)
.bind(&row.id)
.bind(&row.provider_type)
.bind(&row.config_json)
.bind(row.enabled as i64)
.bind(&row.updated_at)
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
Ok(())
}
async fn delete(&self, id: &str) -> DomainResult<()> {
sqlx::query("DELETE FROM provider_configs WHERE id = ?")
.bind(id)
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
Ok(())
}
}

View File

@@ -0,0 +1,175 @@
//! Provider registry — routes media operations to the correct named provider.
//!
//! Item IDs are prefixed with the provider key separated by `::`, e.g.
//! `"jellyfin::abc123"` or `"local::base64path"`. The registry strips the
//! prefix before calling the underlying provider and re-stamps returned IDs
//! so every item is self-routing throughout its lifetime.
//!
//! An empty prefix (un-prefixed IDs from old data, or new blocks with no
//! `provider_id` set) falls back to the primary (first-registered) provider.
use std::sync::Arc;
use async_trait::async_trait;
use domain::errors::{DomainError, DomainResult};
use domain::ports::{
Collection, IMediaProvider, IProviderRegistry, ProviderCapabilities, SeriesSummary,
StreamQuality,
};
use domain::{ContentType, MediaFilter, MediaItem, MediaItemId};
/// Registry of named media providers.
///
/// Providers are registered with a short key (e.g. `"jellyfin"`, `"local"`).
/// The first registered provider is the *primary* — it handles un-prefixed IDs
/// and empty `provider_id` strings for backward compatibility.
pub struct ProviderRegistry {
/// Ordered list of `(key, provider)` pairs. Order determines the primary.
providers: Vec<(String, Arc<dyn IMediaProvider>)>,
}
impl ProviderRegistry {
pub fn new() -> Self {
Self { providers: Vec::new() }
}
/// Register a provider under `id`. The first registered becomes the primary.
pub fn register(&mut self, id: impl Into<String>, provider: Arc<dyn IMediaProvider>) {
self.providers.push((id.into(), provider));
}
pub fn is_empty(&self) -> bool {
self.providers.is_empty()
}
/// Return the provider registered under `id`, if any.
pub fn get_provider(&self, id: &str) -> Option<Arc<dyn IMediaProvider>> {
self.providers
.iter()
.find(|(pid, _)| pid == id)
.map(|(_, p)| Arc::clone(p))
}
// -------------------------------------------------------------------------
// Internal helpers
// -------------------------------------------------------------------------
fn prefix_id(provider_id: &str, raw_id: &str) -> MediaItemId {
MediaItemId::new(format!("{}::{}", provider_id, raw_id))
}
/// Split `"provider_key::raw_id"` into `(key, raw_id)`.
/// Un-prefixed IDs return `("", full_id)` → primary provider fallback.
fn parse_prefix(id: &MediaItemId) -> (&str, &str) {
let s: &str = id.as_ref();
match s.find("::") {
Some(pos) => (&s[..pos], &s[pos + 2..]),
None => ("", s),
}
}
/// Resolve a provider key to the provider, defaulting to primary on empty key.
/// Returns `(resolved_key, provider)` so the caller can re-stamp IDs.
fn resolve_provider<'a>(
&'a self,
provider_id: &str,
) -> DomainResult<(&'a str, &'a Arc<dyn IMediaProvider>)> {
if provider_id.is_empty() {
self.providers
.first()
.map(|(id, p)| (id.as_str(), p))
.ok_or_else(|| DomainError::InfrastructureError("No providers registered".into()))
} else {
self.providers
.iter()
.find(|(id, _)| id == provider_id)
.map(|(id, p)| (id.as_str(), p))
.ok_or_else(|| {
DomainError::InfrastructureError(
format!("Provider '{}' not found", provider_id),
)
})
}
}
fn wrap_items(provider_id: &str, items: Vec<MediaItem>) -> Vec<MediaItem> {
items
.into_iter()
.map(|mut item| {
item.id = Self::prefix_id(provider_id, item.id.as_ref());
item
})
.collect()
}
}
impl Default for ProviderRegistry {
fn default() -> Self {
Self::new()
}
}
#[async_trait]
impl IProviderRegistry for ProviderRegistry {
async fn fetch_items(&self, provider_id: &str, filter: &MediaFilter) -> DomainResult<Vec<MediaItem>> {
let (pid, provider) = self.resolve_provider(provider_id)?;
let items = provider.fetch_items(filter).await?;
Ok(Self::wrap_items(pid, items))
}
async fn fetch_by_id(&self, item_id: &MediaItemId) -> DomainResult<Option<MediaItem>> {
let (prefix, raw) = Self::parse_prefix(item_id);
let (pid, provider) = self.resolve_provider(prefix)?;
let raw_id = MediaItemId::new(raw);
let result = provider.fetch_by_id(&raw_id).await?;
Ok(result.map(|mut item| {
item.id = Self::prefix_id(pid, item.id.as_ref());
item
}))
}
async fn get_stream_url(&self, item_id: &MediaItemId, quality: &StreamQuality) -> DomainResult<String> {
let (prefix, raw) = Self::parse_prefix(item_id);
let (_, provider) = self.resolve_provider(prefix)?;
let raw_id = MediaItemId::new(raw);
provider.get_stream_url(&raw_id, quality).await
}
fn provider_ids(&self) -> Vec<String> {
self.providers.iter().map(|(id, _)| id.clone()).collect()
}
fn primary_id(&self) -> &str {
self.providers
.first()
.map(|(id, _)| id.as_str())
.unwrap_or("")
}
fn capabilities(&self, provider_id: &str) -> Option<ProviderCapabilities> {
let target = if provider_id.is_empty() {
self.providers.first().map(|(id, _)| id.as_str())?
} else {
provider_id
};
self.providers
.iter()
.find(|(id, _)| id == target)
.map(|(_, p)| p.capabilities())
}
async fn list_collections(&self, provider_id: &str) -> DomainResult<Vec<Collection>> {
let (_, provider) = self.resolve_provider(provider_id)?;
provider.list_collections().await
}
async fn list_series(&self, provider_id: &str, collection_id: Option<&str>) -> DomainResult<Vec<SeriesSummary>> {
let (_, provider) = self.resolve_provider(provider_id)?;
provider.list_series(collection_id).await
}
async fn list_genres(&self, provider_id: &str, content_type: Option<&ContentType>) -> DomainResult<Vec<String>> {
let (_, provider) = self.resolve_provider(provider_id)?;
provider.list_genres(content_type).await
}
}

View File

@@ -1,447 +0,0 @@
//! SQLite and PostgreSQL adapters for ScheduleRepository
use async_trait::async_trait;
use chrono::{DateTime, Utc};
use sqlx::FromRow;
use uuid::Uuid;
use domain::{
ChannelId, DomainError, DomainResult, GeneratedSchedule, MediaItem, MediaItemId,
PlaybackRecord, ScheduleRepository, ScheduledSlot,
};
// ============================================================================
// Row types
// ============================================================================
#[derive(Debug, FromRow)]
struct ScheduleRow {
id: String,
channel_id: String,
valid_from: String,
valid_until: String,
generation: i64,
}
#[derive(Debug, FromRow)]
struct SlotRow {
id: String,
// schedule_id selected but only used to drive the JOIN; not needed for domain type
#[allow(dead_code)]
schedule_id: String,
start_at: String,
end_at: String,
item: String,
source_block_id: String,
}
#[derive(Debug, FromRow)]
struct PlaybackRecordRow {
id: String,
channel_id: String,
item_id: String,
played_at: String,
generation: i64,
}
// ============================================================================
// Mapping
// ============================================================================
fn parse_dt(s: &str) -> Result<DateTime<Utc>, DomainError> {
DateTime::parse_from_rfc3339(s)
.map(|dt| dt.with_timezone(&Utc))
.or_else(|_| {
chrono::NaiveDateTime::parse_from_str(s, "%Y-%m-%d %H:%M:%S").map(|dt| dt.and_utc())
})
.map_err(|e| DomainError::RepositoryError(format!("Invalid datetime '{}': {}", s, e)))
}
fn map_slot_row(row: SlotRow) -> Result<ScheduledSlot, DomainError> {
let id = Uuid::parse_str(&row.id)
.map_err(|e| DomainError::RepositoryError(format!("Invalid slot UUID: {}", e)))?;
let source_block_id = Uuid::parse_str(&row.source_block_id)
.map_err(|e| DomainError::RepositoryError(format!("Invalid block UUID: {}", e)))?;
let item: MediaItem = serde_json::from_str(&row.item)
.map_err(|e| DomainError::RepositoryError(format!("Invalid slot item JSON: {}", e)))?;
Ok(ScheduledSlot {
id,
start_at: parse_dt(&row.start_at)?,
end_at: parse_dt(&row.end_at)?,
item,
source_block_id,
})
}
fn map_schedule(row: ScheduleRow, slot_rows: Vec<SlotRow>) -> Result<GeneratedSchedule, DomainError> {
let id = Uuid::parse_str(&row.id)
.map_err(|e| DomainError::RepositoryError(format!("Invalid schedule UUID: {}", e)))?;
let channel_id = Uuid::parse_str(&row.channel_id)
.map_err(|e| DomainError::RepositoryError(format!("Invalid channel UUID: {}", e)))?;
let slots: Result<Vec<ScheduledSlot>, _> = slot_rows.into_iter().map(map_slot_row).collect();
Ok(GeneratedSchedule {
id,
channel_id,
valid_from: parse_dt(&row.valid_from)?,
valid_until: parse_dt(&row.valid_until)?,
generation: row.generation as u32,
slots: slots?,
})
}
impl TryFrom<PlaybackRecordRow> for PlaybackRecord {
type Error = DomainError;
fn try_from(row: PlaybackRecordRow) -> Result<Self, Self::Error> {
let id = Uuid::parse_str(&row.id)
.map_err(|e| DomainError::RepositoryError(format!("Invalid UUID: {}", e)))?;
let channel_id = Uuid::parse_str(&row.channel_id)
.map_err(|e| DomainError::RepositoryError(format!("Invalid channel UUID: {}", e)))?;
Ok(PlaybackRecord {
id,
channel_id,
item_id: MediaItemId::new(row.item_id),
played_at: parse_dt(&row.played_at)?,
generation: row.generation as u32,
})
}
}
// ============================================================================
// SQLite adapter
// ============================================================================
#[cfg(feature = "sqlite")]
pub struct SqliteScheduleRepository {
pool: sqlx::SqlitePool,
}
#[cfg(feature = "sqlite")]
impl SqliteScheduleRepository {
pub fn new(pool: sqlx::SqlitePool) -> Self {
Self { pool }
}
async fn fetch_slots(&self, schedule_id: &str) -> DomainResult<Vec<SlotRow>> {
sqlx::query_as(
"SELECT id, schedule_id, start_at, end_at, item, source_block_id \
FROM scheduled_slots WHERE schedule_id = ? ORDER BY start_at",
)
.bind(schedule_id)
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))
}
}
#[cfg(feature = "sqlite")]
#[async_trait]
impl ScheduleRepository for SqliteScheduleRepository {
async fn find_active(
&self,
channel_id: ChannelId,
at: DateTime<Utc>,
) -> DomainResult<Option<GeneratedSchedule>> {
let at_str = at.to_rfc3339();
let row: Option<ScheduleRow> = sqlx::query_as(
"SELECT id, channel_id, valid_from, valid_until, generation \
FROM generated_schedules \
WHERE channel_id = ? AND valid_from <= ? AND valid_until > ? \
LIMIT 1",
)
.bind(channel_id.to_string())
.bind(&at_str)
.bind(&at_str)
.fetch_optional(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
match row {
None => Ok(None),
Some(r) => {
let slots = self.fetch_slots(&r.id).await?;
Some(map_schedule(r, slots)).transpose()
}
}
}
async fn find_latest(&self, channel_id: ChannelId) -> DomainResult<Option<GeneratedSchedule>> {
let row: Option<ScheduleRow> = sqlx::query_as(
"SELECT id, channel_id, valid_from, valid_until, generation \
FROM generated_schedules \
WHERE channel_id = ? ORDER BY valid_from DESC LIMIT 1",
)
.bind(channel_id.to_string())
.fetch_optional(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
match row {
None => Ok(None),
Some(r) => {
let slots = self.fetch_slots(&r.id).await?;
Some(map_schedule(r, slots)).transpose()
}
}
}
async fn save(&self, schedule: &GeneratedSchedule) -> DomainResult<()> {
// Upsert the schedule header
sqlx::query(
r#"
INSERT INTO generated_schedules (id, channel_id, valid_from, valid_until, generation)
VALUES (?, ?, ?, ?, ?)
ON CONFLICT(id) DO UPDATE SET
valid_from = excluded.valid_from,
valid_until = excluded.valid_until,
generation = excluded.generation
"#,
)
.bind(schedule.id.to_string())
.bind(schedule.channel_id.to_string())
.bind(schedule.valid_from.to_rfc3339())
.bind(schedule.valid_until.to_rfc3339())
.bind(schedule.generation as i64)
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
// Replace all slots (delete-then-insert is safe here; schedule saves are
// infrequent and atomic within a single-writer SQLite connection)
sqlx::query("DELETE FROM scheduled_slots WHERE schedule_id = ?")
.bind(schedule.id.to_string())
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
for slot in &schedule.slots {
let item_json = serde_json::to_string(&slot.item).map_err(|e| {
DomainError::RepositoryError(format!("Failed to serialize slot item: {}", e))
})?;
sqlx::query(
r#"
INSERT INTO scheduled_slots (id, schedule_id, start_at, end_at, item, source_block_id)
VALUES (?, ?, ?, ?, ?, ?)
"#,
)
.bind(slot.id.to_string())
.bind(schedule.id.to_string())
.bind(slot.start_at.to_rfc3339())
.bind(slot.end_at.to_rfc3339())
.bind(&item_json)
.bind(slot.source_block_id.to_string())
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
}
Ok(())
}
async fn find_playback_history(
&self,
channel_id: ChannelId,
) -> DomainResult<Vec<PlaybackRecord>> {
let rows: Vec<PlaybackRecordRow> = sqlx::query_as(
"SELECT id, channel_id, item_id, played_at, generation \
FROM playback_records WHERE channel_id = ? ORDER BY played_at DESC",
)
.bind(channel_id.to_string())
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
rows.into_iter().map(PlaybackRecord::try_from).collect()
}
async fn save_playback_record(&self, record: &PlaybackRecord) -> DomainResult<()> {
sqlx::query(
r#"
INSERT INTO playback_records (id, channel_id, item_id, played_at, generation)
VALUES (?, ?, ?, ?, ?)
ON CONFLICT(id) DO NOTHING
"#,
)
.bind(record.id.to_string())
.bind(record.channel_id.to_string())
.bind(record.item_id.as_ref())
.bind(record.played_at.to_rfc3339())
.bind(record.generation as i64)
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
Ok(())
}
}
// ============================================================================
// PostgreSQL adapter
// ============================================================================
#[cfg(feature = "postgres")]
pub struct PostgresScheduleRepository {
pool: sqlx::Pool<sqlx::Postgres>,
}
#[cfg(feature = "postgres")]
impl PostgresScheduleRepository {
pub fn new(pool: sqlx::Pool<sqlx::Postgres>) -> Self {
Self { pool }
}
async fn fetch_slots(&self, schedule_id: &str) -> DomainResult<Vec<SlotRow>> {
sqlx::query_as(
"SELECT id, schedule_id, start_at, end_at, item, source_block_id \
FROM scheduled_slots WHERE schedule_id = $1 ORDER BY start_at",
)
.bind(schedule_id)
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))
}
}
#[cfg(feature = "postgres")]
#[async_trait]
impl ScheduleRepository for PostgresScheduleRepository {
async fn find_active(
&self,
channel_id: ChannelId,
at: DateTime<Utc>,
) -> DomainResult<Option<GeneratedSchedule>> {
let at_str = at.to_rfc3339();
let row: Option<ScheduleRow> = sqlx::query_as(
"SELECT id, channel_id, valid_from, valid_until, generation \
FROM generated_schedules \
WHERE channel_id = $1 AND valid_from <= $2 AND valid_until > $3 \
LIMIT 1",
)
.bind(channel_id.to_string())
.bind(&at_str)
.bind(&at_str)
.fetch_optional(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
match row {
None => Ok(None),
Some(r) => {
let slots = self.fetch_slots(&r.id).await?;
Some(map_schedule(r, slots)).transpose()
}
}
}
async fn find_latest(&self, channel_id: ChannelId) -> DomainResult<Option<GeneratedSchedule>> {
let row: Option<ScheduleRow> = sqlx::query_as(
"SELECT id, channel_id, valid_from, valid_until, generation \
FROM generated_schedules \
WHERE channel_id = $1 ORDER BY valid_from DESC LIMIT 1",
)
.bind(channel_id.to_string())
.fetch_optional(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
match row {
None => Ok(None),
Some(r) => {
let slots = self.fetch_slots(&r.id).await?;
Some(map_schedule(r, slots)).transpose()
}
}
}
async fn save(&self, schedule: &GeneratedSchedule) -> DomainResult<()> {
sqlx::query(
r#"
INSERT INTO generated_schedules (id, channel_id, valid_from, valid_until, generation)
VALUES ($1, $2, $3, $4, $5)
ON CONFLICT(id) DO UPDATE SET
valid_from = EXCLUDED.valid_from,
valid_until = EXCLUDED.valid_until,
generation = EXCLUDED.generation
"#,
)
.bind(schedule.id.to_string())
.bind(schedule.channel_id.to_string())
.bind(schedule.valid_from.to_rfc3339())
.bind(schedule.valid_until.to_rfc3339())
.bind(schedule.generation as i64)
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
sqlx::query("DELETE FROM scheduled_slots WHERE schedule_id = $1")
.bind(schedule.id.to_string())
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
for slot in &schedule.slots {
let item_json = serde_json::to_string(&slot.item).map_err(|e| {
DomainError::RepositoryError(format!("Failed to serialize slot item: {}", e))
})?;
sqlx::query(
r#"
INSERT INTO scheduled_slots (id, schedule_id, start_at, end_at, item, source_block_id)
VALUES ($1, $2, $3, $4, $5, $6)
"#,
)
.bind(slot.id.to_string())
.bind(schedule.id.to_string())
.bind(slot.start_at.to_rfc3339())
.bind(slot.end_at.to_rfc3339())
.bind(&item_json)
.bind(slot.source_block_id.to_string())
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
}
Ok(())
}
async fn find_playback_history(
&self,
channel_id: ChannelId,
) -> DomainResult<Vec<PlaybackRecord>> {
let rows: Vec<PlaybackRecordRow> = sqlx::query_as(
"SELECT id, channel_id, item_id, played_at, generation \
FROM playback_records WHERE channel_id = $1 ORDER BY played_at DESC",
)
.bind(channel_id.to_string())
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
rows.into_iter().map(PlaybackRecord::try_from).collect()
}
async fn save_playback_record(&self, record: &PlaybackRecord) -> DomainResult<()> {
sqlx::query(
r#"
INSERT INTO playback_records (id, channel_id, item_id, played_at, generation)
VALUES ($1, $2, $3, $4, $5)
ON CONFLICT(id) DO NOTHING
"#,
)
.bind(record.id.to_string())
.bind(record.channel_id.to_string())
.bind(record.item_id.as_ref())
.bind(record.played_at.to_rfc3339())
.bind(record.generation as i64)
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
Ok(())
}
}

View File

@@ -0,0 +1,115 @@
use chrono::{DateTime, Utc};
use sqlx::FromRow;
use uuid::Uuid;
use domain::{DomainError, GeneratedSchedule, MediaItem, MediaItemId, PlaybackRecord, ScheduledSlot};
// ============================================================================
// Row types
// ============================================================================
#[derive(Debug, FromRow)]
pub(super) struct ScheduleRow {
pub id: String,
pub channel_id: String,
pub valid_from: String,
pub valid_until: String,
pub generation: i64,
}
#[derive(Debug, FromRow)]
pub(super) struct SlotRow {
pub id: String,
// schedule_id selected but only used to drive the JOIN; not needed for domain type
#[allow(dead_code)]
pub schedule_id: String,
pub start_at: String,
pub end_at: String,
pub item: String,
pub source_block_id: String,
}
#[derive(Debug, FromRow)]
pub(super) struct LastSlotRow {
pub source_block_id: String,
pub item: String,
}
#[derive(Debug, FromRow)]
pub(super) struct PlaybackRecordRow {
pub id: String,
pub channel_id: String,
pub item_id: String,
pub played_at: String,
pub generation: i64,
}
// ============================================================================
// Mapping
// ============================================================================
pub(super) fn parse_dt(s: &str) -> Result<DateTime<Utc>, DomainError> {
DateTime::parse_from_rfc3339(s)
.map(|dt| dt.with_timezone(&Utc))
.or_else(|_| {
chrono::NaiveDateTime::parse_from_str(s, "%Y-%m-%d %H:%M:%S").map(|dt| dt.and_utc())
})
.map_err(|e| DomainError::RepositoryError(format!("Invalid datetime '{}': {}", s, e)))
}
pub(super) fn map_slot_row(row: SlotRow) -> Result<ScheduledSlot, DomainError> {
let id = Uuid::parse_str(&row.id)
.map_err(|e| DomainError::RepositoryError(format!("Invalid slot UUID: {}", e)))?;
let source_block_id = Uuid::parse_str(&row.source_block_id)
.map_err(|e| DomainError::RepositoryError(format!("Invalid block UUID: {}", e)))?;
let item: MediaItem = serde_json::from_str(&row.item)
.map_err(|e| DomainError::RepositoryError(format!("Invalid slot item JSON: {}", e)))?;
Ok(ScheduledSlot {
id,
start_at: parse_dt(&row.start_at)?,
end_at: parse_dt(&row.end_at)?,
item,
source_block_id,
})
}
pub(super) fn map_schedule(
row: ScheduleRow,
slot_rows: Vec<SlotRow>,
) -> Result<GeneratedSchedule, DomainError> {
let id = Uuid::parse_str(&row.id)
.map_err(|e| DomainError::RepositoryError(format!("Invalid schedule UUID: {}", e)))?;
let channel_id = Uuid::parse_str(&row.channel_id)
.map_err(|e| DomainError::RepositoryError(format!("Invalid channel UUID: {}", e)))?;
let slots: Result<Vec<ScheduledSlot>, _> = slot_rows.into_iter().map(map_slot_row).collect();
Ok(GeneratedSchedule {
id,
channel_id,
valid_from: parse_dt(&row.valid_from)?,
valid_until: parse_dt(&row.valid_until)?,
generation: row.generation as u32,
slots: slots?,
})
}
impl TryFrom<PlaybackRecordRow> for PlaybackRecord {
type Error = DomainError;
fn try_from(row: PlaybackRecordRow) -> Result<Self, Self::Error> {
let id = Uuid::parse_str(&row.id)
.map_err(|e| DomainError::RepositoryError(format!("Invalid UUID: {}", e)))?;
let channel_id = Uuid::parse_str(&row.channel_id)
.map_err(|e| DomainError::RepositoryError(format!("Invalid channel UUID: {}", e)))?;
Ok(PlaybackRecord {
id,
channel_id,
item_id: MediaItemId::new(row.item_id),
played_at: parse_dt(&row.played_at)?,
generation: row.generation as u32,
})
}
}

View File

@@ -0,0 +1,13 @@
//! SQLite and PostgreSQL adapters for ScheduleRepository
mod mapping;
#[cfg(feature = "sqlite")]
mod sqlite;
#[cfg(feature = "postgres")]
mod postgres;
#[cfg(feature = "sqlite")]
pub use sqlite::SqliteScheduleRepository;
#[cfg(feature = "postgres")]
pub use postgres::PostgresScheduleRepository;

View File

@@ -0,0 +1,202 @@
use async_trait::async_trait;
use chrono::{DateTime, Utc};
use std::collections::HashMap;
use domain::{BlockId, ChannelId, DomainError, DomainResult, GeneratedSchedule, MediaItemId, PlaybackRecord, ScheduleRepository};
use super::mapping::{map_schedule, LastSlotRow, PlaybackRecordRow, ScheduleRow, SlotRow};
pub struct PostgresScheduleRepository {
pool: sqlx::Pool<sqlx::Postgres>,
}
impl PostgresScheduleRepository {
pub fn new(pool: sqlx::Pool<sqlx::Postgres>) -> Self {
Self { pool }
}
async fn fetch_slots(&self, schedule_id: &str) -> DomainResult<Vec<SlotRow>> {
sqlx::query_as(
"SELECT id, schedule_id, start_at, end_at, item, source_block_id \
FROM scheduled_slots WHERE schedule_id = $1 ORDER BY start_at",
)
.bind(schedule_id)
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))
}
}
#[async_trait]
impl ScheduleRepository for PostgresScheduleRepository {
async fn find_active(
&self,
channel_id: ChannelId,
at: DateTime<Utc>,
) -> DomainResult<Option<GeneratedSchedule>> {
let at_str = at.to_rfc3339();
let row: Option<ScheduleRow> = sqlx::query_as(
"SELECT id, channel_id, valid_from, valid_until, generation \
FROM generated_schedules \
WHERE channel_id = $1 AND valid_from <= $2 AND valid_until > $3 \
LIMIT 1",
)
.bind(channel_id.to_string())
.bind(&at_str)
.bind(&at_str)
.fetch_optional(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
match row {
None => Ok(None),
Some(r) => {
let slots = self.fetch_slots(&r.id).await?;
Some(map_schedule(r, slots)).transpose()
}
}
}
async fn find_latest(&self, channel_id: ChannelId) -> DomainResult<Option<GeneratedSchedule>> {
let row: Option<ScheduleRow> = sqlx::query_as(
"SELECT id, channel_id, valid_from, valid_until, generation \
FROM generated_schedules \
WHERE channel_id = $1 ORDER BY valid_from DESC LIMIT 1",
)
.bind(channel_id.to_string())
.fetch_optional(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
match row {
None => Ok(None),
Some(r) => {
let slots = self.fetch_slots(&r.id).await?;
Some(map_schedule(r, slots)).transpose()
}
}
}
async fn save(&self, schedule: &GeneratedSchedule) -> DomainResult<()> {
sqlx::query(
r#"
INSERT INTO generated_schedules (id, channel_id, valid_from, valid_until, generation)
VALUES ($1, $2, $3, $4, $5)
ON CONFLICT(id) DO UPDATE SET
valid_from = EXCLUDED.valid_from,
valid_until = EXCLUDED.valid_until,
generation = EXCLUDED.generation
"#,
)
.bind(schedule.id.to_string())
.bind(schedule.channel_id.to_string())
.bind(schedule.valid_from.to_rfc3339())
.bind(schedule.valid_until.to_rfc3339())
.bind(schedule.generation as i64)
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
sqlx::query("DELETE FROM scheduled_slots WHERE schedule_id = $1")
.bind(schedule.id.to_string())
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
for slot in &schedule.slots {
let item_json = serde_json::to_string(&slot.item).map_err(|e| {
DomainError::RepositoryError(format!("Failed to serialize slot item: {}", e))
})?;
sqlx::query(
r#"
INSERT INTO scheduled_slots (id, schedule_id, start_at, end_at, item, source_block_id)
VALUES ($1, $2, $3, $4, $5, $6)
"#,
)
.bind(slot.id.to_string())
.bind(schedule.id.to_string())
.bind(slot.start_at.to_rfc3339())
.bind(slot.end_at.to_rfc3339())
.bind(&item_json)
.bind(slot.source_block_id.to_string())
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
}
Ok(())
}
async fn find_playback_history(
&self,
channel_id: ChannelId,
) -> DomainResult<Vec<PlaybackRecord>> {
let rows: Vec<PlaybackRecordRow> = sqlx::query_as(
"SELECT id, channel_id, item_id, played_at, generation \
FROM playback_records WHERE channel_id = $1 ORDER BY played_at DESC",
)
.bind(channel_id.to_string())
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
rows.into_iter().map(PlaybackRecord::try_from).collect()
}
async fn find_last_slot_per_block(
&self,
channel_id: ChannelId,
) -> DomainResult<HashMap<BlockId, MediaItemId>> {
let channel_id_str = channel_id.to_string();
let rows: Vec<LastSlotRow> = sqlx::query_as(
"SELECT ss.source_block_id, ss.item \
FROM scheduled_slots ss \
INNER JOIN generated_schedules gs ON gs.id = ss.schedule_id \
WHERE gs.channel_id = $1 \
AND ss.start_at = ( \
SELECT MAX(ss2.start_at) \
FROM scheduled_slots ss2 \
INNER JOIN generated_schedules gs2 ON gs2.id = ss2.schedule_id \
WHERE ss2.source_block_id = ss.source_block_id \
AND gs2.channel_id = $2 \
)",
)
.bind(&channel_id_str)
.bind(&channel_id_str)
.fetch_all(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
let mut map = HashMap::new();
for row in rows {
let block_id = uuid::Uuid::parse_str(&row.source_block_id)
.map_err(|e| DomainError::RepositoryError(format!("Invalid block UUID: {}", e)))?;
let item: domain::MediaItem = serde_json::from_str(&row.item)
.map_err(|e| DomainError::RepositoryError(format!("Invalid slot item JSON: {}", e)))?;
map.insert(block_id, item.id);
}
Ok(map)
}
async fn save_playback_record(&self, record: &PlaybackRecord) -> DomainResult<()> {
sqlx::query(
r#"
INSERT INTO playback_records (id, channel_id, item_id, played_at, generation)
VALUES ($1, $2, $3, $4, $5)
ON CONFLICT(id) DO NOTHING
"#,
)
.bind(record.id.to_string())
.bind(record.channel_id.to_string())
.bind(record.item_id.as_ref())
.bind(record.played_at.to_rfc3339())
.bind(record.generation as i64)
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
Ok(())
}
}

Some files were not shown because too many files have changed in this diff Show More