feat: initialize k-tv-frontend with Next.js and Tailwind CSS
- Added package.json with dependencies and scripts for development, build, and linting. - Created postcss.config.mjs for Tailwind CSS integration. - Added SVG assets for UI components including file, globe, next, vercel, and window icons. - Configured TypeScript with tsconfig.json for strict type checking and module resolution.
This commit is contained in:
2
k-tv-backend/.dockerignore
Normal file
2
k-tv-backend/.dockerignore
Normal file
@@ -0,0 +1,2 @@
|
||||
/target
|
||||
*.db
|
||||
77
k-tv-backend/.env.example
Normal file
77
k-tv-backend/.env.example
Normal file
@@ -0,0 +1,77 @@
|
||||
# ============================================================================
|
||||
# K-TV Configuration
|
||||
# ============================================================================
|
||||
# Copy this file to .env and adjust values for your environment.
|
||||
|
||||
# ============================================================================
|
||||
# Server
|
||||
# ============================================================================
|
||||
HOST=127.0.0.1
|
||||
PORT=3000
|
||||
|
||||
# ============================================================================
|
||||
# Database
|
||||
# ============================================================================
|
||||
# SQLite (default)
|
||||
DATABASE_URL=sqlite:data.db?mode=rwc
|
||||
|
||||
# PostgreSQL (requires postgres feature flag)
|
||||
# DATABASE_URL=postgres://user:password@localhost:5432/mydb
|
||||
|
||||
DB_MAX_CONNECTIONS=5
|
||||
DB_MIN_CONNECTIONS=1
|
||||
|
||||
# ============================================================================
|
||||
# Cookie Secret
|
||||
# ============================================================================
|
||||
# Used to encrypt the OIDC state cookie (CSRF token, PKCE verifier, nonce).
|
||||
# Must be at least 64 characters in production.
|
||||
COOKIE_SECRET=your-cookie-secret-key-must-be-at-least-64-characters-long-for-security!!
|
||||
|
||||
# Set to true when serving over HTTPS
|
||||
SECURE_COOKIE=false
|
||||
|
||||
# ============================================================================
|
||||
# JWT
|
||||
# ============================================================================
|
||||
# Must be at least 32 characters in production.
|
||||
JWT_SECRET=your-jwt-secret-key-at-least-32-chars
|
||||
|
||||
# Optional: embed issuer/audience claims in tokens
|
||||
# JWT_ISSUER=your-app-name
|
||||
# JWT_AUDIENCE=your-app-audience
|
||||
|
||||
# Token lifetime in hours (default: 24)
|
||||
JWT_EXPIRY_HOURS=24
|
||||
|
||||
# ============================================================================
|
||||
# OIDC (optional — requires auth-oidc feature flag)
|
||||
# ============================================================================
|
||||
# OIDC_ISSUER=https://your-oidc-provider.com
|
||||
# OIDC_CLIENT_ID=your-client-id
|
||||
# OIDC_CLIENT_SECRET=your-client-secret
|
||||
# OIDC_REDIRECT_URL=http://localhost:3000/api/v1/auth/callback
|
||||
# OIDC_RESOURCE_ID=your-resource-id # optional audience claim to verify
|
||||
|
||||
# ============================================================================
|
||||
# CORS
|
||||
# ============================================================================
|
||||
CORS_ALLOWED_ORIGINS=http://localhost:5173,http://localhost:3000
|
||||
|
||||
# ============================================================================
|
||||
# Jellyfin (required for schedule generation)
|
||||
# ============================================================================
|
||||
# Base URL of your Jellyfin instance
|
||||
JELLYFIN_BASE_URL=http://192.168.1.10:8096
|
||||
|
||||
# API key — Jellyfin Dashboard → API Keys
|
||||
JELLYFIN_API_KEY=your-jellyfin-api-key
|
||||
|
||||
# User ID used for library browsing — Jellyfin Dashboard → Users → click user → copy ID from URL
|
||||
JELLYFIN_USER_ID=your-jellyfin-user-id
|
||||
|
||||
# ============================================================================
|
||||
# Production Mode
|
||||
# ============================================================================
|
||||
# Set to true/production/1 to enforce minimum secret lengths and other checks.
|
||||
PRODUCTION=false
|
||||
3
k-tv-backend/.gitignore
vendored
Normal file
3
k-tv-backend/.gitignore
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
/target
|
||||
.env
|
||||
*.db
|
||||
4076
k-tv-backend/Cargo.lock
generated
Normal file
4076
k-tv-backend/Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
3
k-tv-backend/Cargo.toml
Normal file
3
k-tv-backend/Cargo.toml
Normal file
@@ -0,0 +1,3 @@
|
||||
[workspace]
|
||||
members = ["domain", "infra", "api"]
|
||||
resolver = "2"
|
||||
27
k-tv-backend/Dockerfile
Normal file
27
k-tv-backend/Dockerfile
Normal file
@@ -0,0 +1,27 @@
|
||||
FROM rust:1.92 AS builder
|
||||
|
||||
WORKDIR /app
|
||||
COPY . .
|
||||
|
||||
# Build the release binary
|
||||
RUN cargo build --release -p api
|
||||
|
||||
FROM debian:bookworm-slim
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Install OpenSSL (required for many Rust networking crates) and CA certificates
|
||||
RUN apt-get update && apt-get install -y libssl3 ca-certificates && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY --from=builder /app/target/release/api .
|
||||
|
||||
|
||||
# Create data directory for SQLite
|
||||
RUN mkdir -p /app/data
|
||||
|
||||
ENV DATABASE_URL=sqlite:///app/data/template.db
|
||||
ENV SESSION_SECRET=supersecretchangeinproduction
|
||||
|
||||
EXPOSE 3000
|
||||
|
||||
CMD ["./api"]
|
||||
396
k-tv-backend/README.md
Normal file
396
k-tv-backend/README.md
Normal file
@@ -0,0 +1,396 @@
|
||||
# K-TV Backend
|
||||
|
||||
The Rust API server for K-TV — a self-hosted linear TV channel orchestration layer for personal media servers.
|
||||
|
||||
K-TV turns your Jellyfin library (or any compatible media source) into broadcast-style TV channels with wall-clock scheduling. Instead of scrolling for what to watch, you tune into your "90s Sitcom Network" or "Friday Night Horror" channel and let the schedule run.
|
||||
|
||||
## Architecture
|
||||
|
||||
The backend is a Cargo workspace with three crates following Hexagonal (Ports & Adapters) architecture:
|
||||
|
||||
```
|
||||
k-tv-backend/
|
||||
├── domain/ # Pure business logic — no I/O, no frameworks
|
||||
├── infra/ # Adapters: SQLite/Postgres repositories, Jellyfin HTTP client
|
||||
└── api/ # Axum HTTP server — routes, DTOs, startup wiring
|
||||
```
|
||||
|
||||
The domain defines ports (traits). Infra implements them. The API wires everything together. The domain never imports from infra or api.
|
||||
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
cp .env.example .env
|
||||
# Edit .env — minimum required: JELLYFIN_BASE_URL, JELLYFIN_API_KEY, JELLYFIN_USER_ID
|
||||
cargo run
|
||||
```
|
||||
|
||||
The API is available at `http://localhost:3000/api/v1/`.
|
||||
|
||||
## Configuration
|
||||
|
||||
All configuration is via environment variables. See [`.env.example`](.env.example).
|
||||
|
||||
### Server
|
||||
|
||||
| Variable | Default | Description |
|
||||
|----------|---------|-------------|
|
||||
| `HOST` | `127.0.0.1` | Bind address |
|
||||
| `PORT` | `3000` | Listen port |
|
||||
|
||||
### Database
|
||||
|
||||
| Variable | Default | Description |
|
||||
|----------|---------|-------------|
|
||||
| `DATABASE_URL` | `sqlite:data.db?mode=rwc` | SQLite or Postgres connection string |
|
||||
| `DB_MAX_CONNECTIONS` | `5` | Connection pool max size |
|
||||
| `DB_MIN_CONNECTIONS` | `1` | Connection pool min size |
|
||||
|
||||
### Authentication
|
||||
|
||||
| Variable | Default | Description |
|
||||
|----------|---------|-------------|
|
||||
| `JWT_SECRET` | _(insecure dev default)_ | Signing key for JWT tokens (min 32 chars in production) |
|
||||
| `JWT_EXPIRY_HOURS` | `24` | Token lifetime in hours |
|
||||
| `JWT_ISSUER` | — | Optional issuer claim embedded in tokens |
|
||||
| `JWT_AUDIENCE` | — | Optional audience claim embedded in tokens |
|
||||
| `COOKIE_SECRET` | _(insecure dev default)_ | Encrypts OIDC state cookie (min 64 chars in production) |
|
||||
| `SECURE_COOKIE` | `false` | Set `true` when serving over HTTPS |
|
||||
|
||||
### OIDC (optional — requires `auth-oidc` feature)
|
||||
|
||||
| Variable | Description |
|
||||
|----------|-------------|
|
||||
| `OIDC_ISSUER` | Identity provider URL (Keycloak, Auth0, Zitadel, etc.) |
|
||||
| `OIDC_CLIENT_ID` | OIDC client ID |
|
||||
| `OIDC_CLIENT_SECRET` | OIDC client secret |
|
||||
| `OIDC_REDIRECT_URL` | Callback URL — must be `http(s)://<host>/api/v1/auth/callback` |
|
||||
| `OIDC_RESOURCE_ID` | Optional audience claim to verify |
|
||||
|
||||
OIDC state (CSRF token, PKCE verifier, nonce) is stored in a short-lived encrypted cookie — no database session table required.
|
||||
|
||||
### Jellyfin
|
||||
|
||||
| Variable | Description |
|
||||
|----------|-------------|
|
||||
| `JELLYFIN_BASE_URL` | Base URL of your Jellyfin instance, e.g. `http://192.168.1.10:8096` |
|
||||
| `JELLYFIN_API_KEY` | API key — Jellyfin Dashboard → API Keys |
|
||||
| `JELLYFIN_USER_ID` | User ID used for library browsing |
|
||||
|
||||
If Jellyfin variables are not set, the server starts normally but schedule generation endpoints return an error. Channel CRUD and auth still work.
|
||||
|
||||
### CORS & Production
|
||||
|
||||
| Variable | Default | Description |
|
||||
|----------|---------|-------------|
|
||||
| `CORS_ALLOWED_ORIGINS` | `http://localhost:5173` | Comma-separated allowed origins |
|
||||
| `PRODUCTION` | `false` | Enforces minimum secret lengths when `true` |
|
||||
|
||||
## Feature Flags
|
||||
|
||||
```toml
|
||||
# api/Cargo.toml defaults
|
||||
default = ["sqlite", "auth-jwt", "jellyfin"]
|
||||
```
|
||||
|
||||
| Feature | Description |
|
||||
|---------|-------------|
|
||||
| `sqlite` | SQLite database (default) |
|
||||
| `postgres` | PostgreSQL database |
|
||||
| `auth-jwt` | JWT Bearer token authentication |
|
||||
| `auth-oidc` | OpenID Connect integration |
|
||||
| `jellyfin` | Jellyfin media provider adapter |
|
||||
|
||||
## API Reference
|
||||
|
||||
All endpoints are under `/api/v1/`. Endpoints marked **Bearer** require an `Authorization: Bearer <token>` header.
|
||||
|
||||
### Authentication
|
||||
|
||||
| Method | Path | Auth | Description |
|
||||
|--------|------|------|-------------|
|
||||
| `POST` | `/auth/register` | — | Register with email + password → JWT |
|
||||
| `POST` | `/auth/login` | — | Login with email + password → JWT |
|
||||
| `POST` | `/auth/logout` | — | Returns 200; client discards the token |
|
||||
| `GET` | `/auth/me` | Bearer | Current user info |
|
||||
| `POST` | `/auth/token` | Bearer | Issue a fresh JWT (`auth-jwt`) |
|
||||
| `GET` | `/auth/login/oidc` | — | Start OIDC flow (`auth-oidc`) |
|
||||
| `GET` | `/auth/callback` | — | Complete OIDC flow → JWT (`auth-oidc`) |
|
||||
|
||||
### Channels
|
||||
|
||||
| Method | Path | Auth | Description |
|
||||
|--------|------|------|-------------|
|
||||
| `GET` | `/channels` | Bearer | List all channels owned by the current user |
|
||||
| `POST` | `/channels` | Bearer | Create a channel |
|
||||
| `GET` | `/channels/:id` | Bearer | Get a channel |
|
||||
| `PUT` | `/channels/:id` | Bearer | Update a channel — only provided fields change |
|
||||
| `DELETE` | `/channels/:id` | Bearer | Delete a channel and all its schedules |
|
||||
|
||||
### Schedule & Broadcast
|
||||
|
||||
| Method | Path | Auth | Description |
|
||||
|--------|------|------|-------------|
|
||||
| `POST` | `/channels/:id/schedule` | Bearer | Generate a fresh 48-hour schedule (replaces existing) |
|
||||
| `GET` | `/channels/:id/schedule` | Bearer | Get the currently active 48-hour schedule |
|
||||
| `GET` | `/channels/:id/now` | Bearer | What is playing right now — `204` means no-signal (gap between blocks) |
|
||||
| `GET` | `/channels/:id/epg?from=&until=` | Bearer | EPG slots overlapping a time window (RFC3339 datetimes) |
|
||||
| `GET` | `/channels/:id/stream` | Bearer | `307` redirect to the current item's stream URL — `204` if no-signal |
|
||||
|
||||
### Other
|
||||
|
||||
| Method | Path | Auth | Description |
|
||||
|--------|------|------|-------------|
|
||||
| `GET` | `/config` | — | Server configuration flags |
|
||||
|
||||
## Examples
|
||||
|
||||
### Register and get a token
|
||||
|
||||
```bash
|
||||
curl -s -X POST http://localhost:3000/api/v1/auth/register \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"email": "you@example.com", "password": "yourpassword"}' | jq .
|
||||
|
||||
# → {"access_token": "eyJ...", "token_type": "Bearer", "expires_in": 86400}
|
||||
TOKEN="eyJ..."
|
||||
```
|
||||
|
||||
### Create a channel
|
||||
|
||||
```bash
|
||||
curl -s -X POST http://localhost:3000/api/v1/channels \
|
||||
-H "Authorization: Bearer $TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"name": "90s Sitcom Network",
|
||||
"description": "Nothing but classic sitcoms, all day",
|
||||
"timezone": "America/New_York"
|
||||
}' | jq .id
|
||||
```
|
||||
|
||||
### Design a schedule
|
||||
|
||||
The `schedule_config` is the shareable programming template. Each block describes a time window and how to fill it. The channel's `timezone` determines when each block starts.
|
||||
|
||||
```bash
|
||||
curl -s -X PUT http://localhost:3000/api/v1/channels/<id> \
|
||||
-H "Authorization: Bearer $TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"schedule_config": {
|
||||
"blocks": [
|
||||
{
|
||||
"id": "00000000-0000-0000-0000-000000000001",
|
||||
"name": "Evening Sitcoms",
|
||||
"start_time": "20:00:00",
|
||||
"duration_mins": 120,
|
||||
"content": {
|
||||
"type": "algorithmic",
|
||||
"filter": {
|
||||
"content_type": "episode",
|
||||
"genres": ["Comedy"],
|
||||
"decade": 1990,
|
||||
"min_duration_secs": 1200,
|
||||
"max_duration_secs": 1800,
|
||||
"collections": [],
|
||||
"tags": []
|
||||
},
|
||||
"strategy": "random"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "00000000-0000-0000-0000-000000000002",
|
||||
"name": "Late Night Movie",
|
||||
"start_time": "22:00:00",
|
||||
"duration_mins": 150,
|
||||
"content": {
|
||||
"type": "algorithmic",
|
||||
"filter": {
|
||||
"content_type": "movie",
|
||||
"genres": ["Comedy"],
|
||||
"decade": null,
|
||||
"min_duration_secs": null,
|
||||
"max_duration_secs": null,
|
||||
"collections": [],
|
||||
"tags": []
|
||||
},
|
||||
"strategy": "best_fit"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}' | jq .
|
||||
```
|
||||
|
||||
Blocks that don't cover the full 24 hours leave gaps — the client renders those as a no-signal screen.
|
||||
|
||||
For a manual block, use `"type": "manual"` with an `"items"` array of Jellyfin item IDs:
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "manual",
|
||||
"items": ["abc123", "def456", "ghi789"]
|
||||
}
|
||||
```
|
||||
|
||||
### Generate and tune in
|
||||
|
||||
```bash
|
||||
# Generate the 48-hour schedule from now
|
||||
curl -s -X POST http://localhost:3000/api/v1/channels/<id>/schedule \
|
||||
-H "Authorization: Bearer $TOKEN" | jq '{generation: .generation, slots: (.slots | length)}'
|
||||
|
||||
# What is playing right now?
|
||||
curl -s http://localhost:3000/api/v1/channels/<id>/now \
|
||||
-H "Authorization: Bearer $TOKEN" \
|
||||
| jq '{title: .slot.item.title, offset_secs: .offset_secs}'
|
||||
# → 204 No Content if the channel is in a gap between blocks
|
||||
|
||||
# EPG for the next 4 hours (RFC3339 datetimes)
|
||||
FROM=$(date -u +%Y-%m-%dT%H:%M:%SZ)
|
||||
UNTIL=$(date -u -d '+4 hours' +%Y-%m-%dT%H:%M:%SZ) # Linux
|
||||
# UNTIL=$(date -u -v+4H +%Y-%m-%dT%H:%M:%SZ) # macOS
|
||||
curl -s "http://localhost:3000/api/v1/channels/<id>/epg?from=$FROM&until=$UNTIL" \
|
||||
-H "Authorization: Bearer $TOKEN" \
|
||||
| jq '[.[] | {title: .item.title, start: .start_at, end: .end_at}]'
|
||||
|
||||
# Get the stream redirect for the current item (feed into your player)
|
||||
curl -s -I http://localhost:3000/api/v1/channels/<id>/stream \
|
||||
-H "Authorization: Bearer $TOKEN"
|
||||
# → HTTP/1.1 307 Temporary Redirect
|
||||
# → Location: http://jellyfin:8096/Videos/<id>/stream?static=true&api_key=...
|
||||
```
|
||||
|
||||
## Key Domain Concepts
|
||||
|
||||
### Channel
|
||||
A named broadcast channel owned by a user. Holds a `schedule_config` (the programming template) and a `recycle_policy`.
|
||||
|
||||
### ScheduleConfig
|
||||
The shareable programming template: an ordered list of `ProgrammingBlock`s. Channels do not need to cover all 24 hours — gaps are valid and produce a no-signal state.
|
||||
|
||||
### ProgrammingBlock
|
||||
Each block occupies a time-of-day window and is either:
|
||||
- **Algorithmic** — the engine selects items from the media provider using a `MediaFilter` and a `FillStrategy`
|
||||
- **Manual** — the user hand-picks specific items by ID in a specific order
|
||||
|
||||
### MediaFilter
|
||||
Provider-agnostic filter used by algorithmic blocks:
|
||||
|
||||
| Field | Description |
|
||||
|-------|-------------|
|
||||
| `content_type` | `"movie"`, `"episode"`, or `"short"` |
|
||||
| `genres` | List of genre strings, e.g. `["Comedy", "Action"]` |
|
||||
| `decade` | Starting year of a decade — `1990` means 1990–1999 |
|
||||
| `tags` | Provider tag strings |
|
||||
| `min_duration_secs` / `max_duration_secs` | Duration bounds for item selection |
|
||||
| `collections` | Abstract groupings (Jellyfin library IDs, Plex sections, folder paths, etc.) |
|
||||
|
||||
### FillStrategy
|
||||
How an algorithmic block fills its time budget:
|
||||
|
||||
| Value | Behaviour |
|
||||
|-------|-----------|
|
||||
| `best_fit` | Greedy bin-packing — picks the longest item that still fits, minimises dead air |
|
||||
| `sequential` | Items in provider order — good for series where episode sequence matters |
|
||||
| `random` | Shuffle pool then fill — good for variety channels |
|
||||
|
||||
### RecyclePolicy
|
||||
Controls when previously aired items become eligible again:
|
||||
|
||||
| Field | Description |
|
||||
|-------|-------------|
|
||||
| `cooldown_days` | Don't replay an item within this many calendar days |
|
||||
| `cooldown_generations` | Don't replay within this many schedule generations |
|
||||
| `min_available_ratio` | Always keep at least this fraction (0.0–1.0) of the matching pool selectable, even if their cooldown hasn't expired. Prevents small libraries from running dry. |
|
||||
|
||||
### No-signal state
|
||||
`GET /channels/:id/now` and `GET /channels/:id/stream` return `204 No Content` when the current time falls in a gap between blocks. The frontend should display static / noise in this case — matching the broadcast TV experience.
|
||||
|
||||
### GeneratedSchedule
|
||||
A resolved 48-hour program: concrete UTC wall-clock timestamps for every scheduled item. Generated on demand via `POST /channels/:id/schedule`. The generation counter increments on each regeneration and drives the recycle policy.
|
||||
|
||||
## Development
|
||||
|
||||
### Run tests
|
||||
|
||||
```bash
|
||||
cargo test # all crates
|
||||
cargo test -p domain # domain unit tests only
|
||||
```
|
||||
|
||||
### Run migrations manually
|
||||
|
||||
```bash
|
||||
sqlx migrate run --source migrations_sqlite # SQLite
|
||||
sqlx migrate run --source migrations_postgres # PostgreSQL
|
||||
```
|
||||
|
||||
### Build variants
|
||||
|
||||
```bash
|
||||
# Default: SQLite + JWT + Jellyfin
|
||||
cargo build
|
||||
|
||||
# Add OIDC
|
||||
cargo build -F sqlite,auth-jwt,auth-oidc,jellyfin
|
||||
|
||||
# PostgreSQL variant
|
||||
cargo build --no-default-features -F postgres,auth-jwt,jellyfin
|
||||
```
|
||||
|
||||
### Docker
|
||||
|
||||
```bash
|
||||
docker compose up
|
||||
```
|
||||
|
||||
See [`compose.yml`](compose.yml) for the configuration.
|
||||
|
||||
## Project Structure
|
||||
|
||||
```
|
||||
k-tv-backend/
|
||||
├── domain/src/
|
||||
│ ├── entities.rs # Channel, ProgrammingBlock, GeneratedSchedule,
|
||||
│ │ # ScheduledSlot, MediaItem, PlaybackRecord, User, ...
|
||||
│ ├── value_objects.rs # MediaFilter, FillStrategy, RecyclePolicy,
|
||||
│ │ # MediaItemId, ContentType, Email, ...
|
||||
│ ├── ports.rs # IMediaProvider trait
|
||||
│ ├── repositories.rs # ChannelRepository, ScheduleRepository, UserRepository
|
||||
│ ├── services.rs # ChannelService, ScheduleEngineService, UserService
|
||||
│ └── errors.rs # DomainError
|
||||
│
|
||||
├── infra/src/
|
||||
│ ├── channel_repository.rs # SQLite + Postgres ChannelRepository adapters
|
||||
│ ├── schedule_repository.rs # SQLite + Postgres ScheduleRepository adapters
|
||||
│ ├── user_repository.rs # SQLite + Postgres UserRepository adapters
|
||||
│ ├── jellyfin.rs # Jellyfin IMediaProvider adapter
|
||||
│ ├── auth/
|
||||
│ │ ├── jwt.rs # JWT create + validate
|
||||
│ │ └── oidc.rs # OIDC flow (stateless cookie state)
|
||||
│ ├── db.rs # Connection pool
|
||||
│ └── factory.rs # Repository builder functions
|
||||
│
|
||||
├── api/src/
|
||||
│ ├── routes/
|
||||
│ │ ├── auth.rs # /auth/* endpoints
|
||||
│ │ ├── channels.rs # /channels/* endpoints (CRUD, EPG, broadcast)
|
||||
│ │ └── config.rs # /config endpoint
|
||||
│ ├── config.rs # Config::from_env()
|
||||
│ ├── state.rs # AppState
|
||||
│ ├── extractors.rs # CurrentUser (JWT Bearer extractor)
|
||||
│ ├── error.rs # ApiError → HTTP status mapping
|
||||
│ ├── dto.rs # All request + response types
|
||||
│ └── main.rs # Startup wiring
|
||||
│
|
||||
├── migrations_sqlite/
|
||||
├── migrations_postgres/
|
||||
├── .env.example
|
||||
└── compose.yml
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
MIT
|
||||
51
k-tv-backend/api/Cargo.toml
Normal file
51
k-tv-backend/api/Cargo.toml
Normal file
@@ -0,0 +1,51 @@
|
||||
[package]
|
||||
name = "api"
|
||||
version = "0.1.0"
|
||||
edition = "2024"
|
||||
default-run = "api"
|
||||
|
||||
[features]
|
||||
default = ["sqlite", "auth-jwt", "jellyfin"]
|
||||
sqlite = ["infra/sqlite"]
|
||||
postgres = ["infra/postgres"]
|
||||
auth-oidc = ["infra/auth-oidc"]
|
||||
auth-jwt = ["infra/auth-jwt"]
|
||||
jellyfin = ["infra/jellyfin"]
|
||||
|
||||
[dependencies]
|
||||
k-core = { git = "https://git.gabrielkaszewski.dev/GKaszewski/k-core", features = [
|
||||
"logging",
|
||||
"db-sqlx",
|
||||
"sqlite",
|
||||
"http",
|
||||
] }
|
||||
domain = { path = "../domain" }
|
||||
infra = { path = "../infra", default-features = false, features = ["sqlite"] }
|
||||
|
||||
# Web framework
|
||||
axum = { version = "0.8.8", features = ["macros"] }
|
||||
axum-extra = { version = "0.10", features = ["cookie-private", "cookie-key-expansion"] }
|
||||
tower = "0.5.2"
|
||||
tower-http = { version = "0.6.2", features = ["cors", "trace"] }
|
||||
|
||||
# Async runtime
|
||||
tokio = { version = "1.48.0", features = ["full"] }
|
||||
|
||||
# Serialization
|
||||
serde = { version = "1.0.228", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
|
||||
# Error handling
|
||||
thiserror = "2.0.17"
|
||||
anyhow = "1.0"
|
||||
|
||||
# Utilities
|
||||
chrono = { version = "0.4.42", features = ["serde"] }
|
||||
uuid = { version = "1.19.0", features = ["v4", "serde"] }
|
||||
|
||||
# Logging
|
||||
tracing = "0.1"
|
||||
|
||||
async-trait = "0.1"
|
||||
dotenvy = "0.15.7"
|
||||
time = "0.3"
|
||||
49
k-tv-backend/api/Cargo.toml.template
Normal file
49
k-tv-backend/api/Cargo.toml.template
Normal file
@@ -0,0 +1,49 @@
|
||||
[package]
|
||||
name = "k-tv"
|
||||
version = "0.1.0"
|
||||
edition = "2024"
|
||||
default-run = "k-tv"
|
||||
|
||||
[features]
|
||||
default = ["sqlite", "auth-jwt"]
|
||||
sqlite = ["infra/sqlite"]
|
||||
postgres = ["infra/postgres"]
|
||||
auth-oidc = ["infra/auth-oidc"]
|
||||
auth-jwt = ["infra/auth-jwt"]
|
||||
|
||||
[dependencies]
|
||||
k-core = { git = "https://git.gabrielkaszewski.dev/GKaszewski/k-core", features = [
|
||||
"logging",
|
||||
"db-sqlx",
|
||||
"sqlite",
|
||||
"http",
|
||||
] }
|
||||
domain = { path = "../domain" }
|
||||
infra = { path = "../infra", default-features = false, features = ["sqlite"] }
|
||||
|
||||
# Web framework
|
||||
axum = { version = "0.8.8", features = ["macros"] }
|
||||
axum-extra = { version = "0.10", features = ["cookie-private", "cookie-key-expansion"] }
|
||||
tower = "0.5.2"
|
||||
tower-http = { version = "0.6.2", features = ["cors", "trace"] }
|
||||
|
||||
# Async runtime
|
||||
tokio = { version = "1.48.0", features = ["full"] }
|
||||
|
||||
# Serialization
|
||||
serde = { version = "1.0.228", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
|
||||
# Error handling
|
||||
thiserror = "2.0.17"
|
||||
anyhow = "1.0"
|
||||
|
||||
# Utilities
|
||||
chrono = { version = "0.4.42", features = ["serde"] }
|
||||
uuid = { version = "1.19.0", features = ["v4", "serde"] }
|
||||
|
||||
# Logging
|
||||
tracing = "0.1"
|
||||
|
||||
dotenvy = "0.15.7"
|
||||
time = "0.3"
|
||||
131
k-tv-backend/api/src/config.rs
Normal file
131
k-tv-backend/api/src/config.rs
Normal file
@@ -0,0 +1,131 @@
|
||||
//! Application Configuration
|
||||
//!
|
||||
//! Loads configuration from environment variables.
|
||||
|
||||
use std::env;
|
||||
|
||||
/// Application configuration loaded from environment variables
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Config {
|
||||
pub database_url: String,
|
||||
pub cookie_secret: String,
|
||||
pub cors_allowed_origins: Vec<String>,
|
||||
pub port: u16,
|
||||
pub host: String,
|
||||
pub secure_cookie: bool,
|
||||
pub db_max_connections: u32,
|
||||
pub db_min_connections: u32,
|
||||
|
||||
// OIDC configuration
|
||||
pub oidc_issuer: Option<String>,
|
||||
pub oidc_client_id: Option<String>,
|
||||
pub oidc_client_secret: Option<String>,
|
||||
pub oidc_redirect_url: Option<String>,
|
||||
pub oidc_resource_id: Option<String>,
|
||||
|
||||
// JWT configuration
|
||||
pub jwt_secret: Option<String>,
|
||||
pub jwt_issuer: Option<String>,
|
||||
pub jwt_audience: Option<String>,
|
||||
pub jwt_expiry_hours: u64,
|
||||
|
||||
/// Whether the application is running in production mode
|
||||
pub is_production: bool,
|
||||
|
||||
// Jellyfin media provider
|
||||
pub jellyfin_base_url: Option<String>,
|
||||
pub jellyfin_api_key: Option<String>,
|
||||
pub jellyfin_user_id: Option<String>,
|
||||
}
|
||||
|
||||
impl Config {
|
||||
pub fn from_env() -> Self {
|
||||
let _ = dotenvy::dotenv();
|
||||
|
||||
let host = env::var("HOST").unwrap_or_else(|_| "127.0.0.1".to_string());
|
||||
let port = env::var("PORT")
|
||||
.ok()
|
||||
.and_then(|p| p.parse().ok())
|
||||
.unwrap_or(3000);
|
||||
|
||||
let database_url =
|
||||
env::var("DATABASE_URL").unwrap_or_else(|_| "sqlite:data.db?mode=rwc".to_string());
|
||||
|
||||
// Cookie secret for PrivateCookieJar (OIDC state encryption).
|
||||
// Must be at least 64 bytes in production.
|
||||
let cookie_secret = env::var("COOKIE_SECRET").unwrap_or_else(|_| {
|
||||
"k-template-cookie-secret-key-must-be-at-least-64-bytes-long!!".to_string()
|
||||
});
|
||||
|
||||
let cors_origins_str = env::var("CORS_ALLOWED_ORIGINS")
|
||||
.unwrap_or_else(|_| "http://localhost:5173".to_string());
|
||||
|
||||
let cors_allowed_origins = cors_origins_str
|
||||
.split(',')
|
||||
.map(|s| s.trim().to_string())
|
||||
.filter(|s| !s.is_empty())
|
||||
.collect();
|
||||
|
||||
let secure_cookie = env::var("SECURE_COOKIE")
|
||||
.ok()
|
||||
.and_then(|s| s.parse().ok())
|
||||
.unwrap_or(false);
|
||||
|
||||
let db_max_connections = env::var("DB_MAX_CONNECTIONS")
|
||||
.ok()
|
||||
.and_then(|s| s.parse().ok())
|
||||
.unwrap_or(5);
|
||||
|
||||
let db_min_connections = env::var("DB_MIN_CONNECTIONS")
|
||||
.ok()
|
||||
.and_then(|s| s.parse().ok())
|
||||
.unwrap_or(1);
|
||||
|
||||
let oidc_issuer = env::var("OIDC_ISSUER").ok();
|
||||
let oidc_client_id = env::var("OIDC_CLIENT_ID").ok();
|
||||
let oidc_client_secret = env::var("OIDC_CLIENT_SECRET").ok();
|
||||
let oidc_redirect_url = env::var("OIDC_REDIRECT_URL").ok();
|
||||
let oidc_resource_id = env::var("OIDC_RESOURCE_ID").ok();
|
||||
|
||||
let jwt_secret = env::var("JWT_SECRET").ok();
|
||||
let jwt_issuer = env::var("JWT_ISSUER").ok();
|
||||
let jwt_audience = env::var("JWT_AUDIENCE").ok();
|
||||
let jwt_expiry_hours = env::var("JWT_EXPIRY_HOURS")
|
||||
.ok()
|
||||
.and_then(|s| s.parse().ok())
|
||||
.unwrap_or(24);
|
||||
|
||||
let is_production = env::var("PRODUCTION")
|
||||
.or_else(|_| env::var("RUST_ENV"))
|
||||
.map(|v| v.to_lowercase() == "production" || v == "1" || v == "true")
|
||||
.unwrap_or(false);
|
||||
|
||||
let jellyfin_base_url = env::var("JELLYFIN_BASE_URL").ok();
|
||||
let jellyfin_api_key = env::var("JELLYFIN_API_KEY").ok();
|
||||
let jellyfin_user_id = env::var("JELLYFIN_USER_ID").ok();
|
||||
|
||||
Self {
|
||||
host,
|
||||
port,
|
||||
database_url,
|
||||
cookie_secret,
|
||||
cors_allowed_origins,
|
||||
secure_cookie,
|
||||
db_max_connections,
|
||||
db_min_connections,
|
||||
oidc_issuer,
|
||||
oidc_client_id,
|
||||
oidc_client_secret,
|
||||
oidc_redirect_url,
|
||||
oidc_resource_id,
|
||||
jwt_secret,
|
||||
jwt_issuer,
|
||||
jwt_audience,
|
||||
jwt_expiry_hours,
|
||||
is_production,
|
||||
jellyfin_base_url,
|
||||
jellyfin_api_key,
|
||||
jellyfin_user_id,
|
||||
}
|
||||
}
|
||||
}
|
||||
184
k-tv-backend/api/src/dto.rs
Normal file
184
k-tv-backend/api/src/dto.rs
Normal file
@@ -0,0 +1,184 @@
|
||||
//! Request and Response DTOs
|
||||
//!
|
||||
//! Data Transfer Objects for the API.
|
||||
//! Uses domain newtypes for validation instead of the validator crate.
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
use domain::{Email, Password};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use uuid::Uuid;
|
||||
|
||||
/// Login request with validated email and password newtypes
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct LoginRequest {
|
||||
/// Email is validated on deserialization
|
||||
pub email: Email,
|
||||
/// Password is validated on deserialization (min 8 chars)
|
||||
pub password: Password,
|
||||
}
|
||||
|
||||
/// Register request with validated email and password newtypes
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct RegisterRequest {
|
||||
/// Email is validated on deserialization
|
||||
pub email: Email,
|
||||
/// Password is validated on deserialization (min 8 chars)
|
||||
pub password: Password,
|
||||
}
|
||||
|
||||
/// User response DTO
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct UserResponse {
|
||||
pub id: Uuid,
|
||||
pub email: String,
|
||||
pub created_at: DateTime<Utc>,
|
||||
}
|
||||
|
||||
/// JWT token response
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct TokenResponse {
|
||||
pub access_token: String,
|
||||
pub token_type: String,
|
||||
pub expires_in: u64,
|
||||
}
|
||||
|
||||
/// System configuration response
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct ConfigResponse {
|
||||
pub allow_registration: bool,
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Channel DTOs
|
||||
// ============================================================================
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct CreateChannelRequest {
|
||||
pub name: String,
|
||||
pub description: Option<String>,
|
||||
/// IANA timezone, e.g. "UTC" or "America/New_York"
|
||||
pub timezone: String,
|
||||
}
|
||||
|
||||
/// All fields are optional — only provided fields are updated.
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct UpdateChannelRequest {
|
||||
pub name: Option<String>,
|
||||
pub description: Option<String>,
|
||||
pub timezone: Option<String>,
|
||||
/// Replace the entire schedule config (template import/edit)
|
||||
pub schedule_config: Option<domain::ScheduleConfig>,
|
||||
pub recycle_policy: Option<domain::RecyclePolicy>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct ChannelResponse {
|
||||
pub id: Uuid,
|
||||
pub owner_id: Uuid,
|
||||
pub name: String,
|
||||
pub description: Option<String>,
|
||||
pub timezone: String,
|
||||
pub schedule_config: domain::ScheduleConfig,
|
||||
pub recycle_policy: domain::RecyclePolicy,
|
||||
pub created_at: DateTime<Utc>,
|
||||
pub updated_at: DateTime<Utc>,
|
||||
}
|
||||
|
||||
impl From<domain::Channel> for ChannelResponse {
|
||||
fn from(c: domain::Channel) -> Self {
|
||||
Self {
|
||||
id: c.id,
|
||||
owner_id: c.owner_id,
|
||||
name: c.name,
|
||||
description: c.description,
|
||||
timezone: c.timezone,
|
||||
schedule_config: c.schedule_config,
|
||||
recycle_policy: c.recycle_policy,
|
||||
created_at: c.created_at,
|
||||
updated_at: c.updated_at,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// EPG / playback DTOs
|
||||
// ============================================================================
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct MediaItemResponse {
|
||||
pub id: String,
|
||||
pub title: String,
|
||||
pub content_type: domain::ContentType,
|
||||
pub duration_secs: u32,
|
||||
pub genres: Vec<String>,
|
||||
pub year: Option<u16>,
|
||||
pub tags: Vec<String>,
|
||||
}
|
||||
|
||||
impl From<domain::MediaItem> for MediaItemResponse {
|
||||
fn from(i: domain::MediaItem) -> Self {
|
||||
Self {
|
||||
id: i.id.into_inner(),
|
||||
title: i.title,
|
||||
content_type: i.content_type,
|
||||
duration_secs: i.duration_secs,
|
||||
genres: i.genres,
|
||||
year: i.year,
|
||||
tags: i.tags,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct ScheduledSlotResponse {
|
||||
pub id: Uuid,
|
||||
pub start_at: DateTime<Utc>,
|
||||
pub end_at: DateTime<Utc>,
|
||||
pub item: MediaItemResponse,
|
||||
pub source_block_id: Uuid,
|
||||
}
|
||||
|
||||
impl From<domain::ScheduledSlot> for ScheduledSlotResponse {
|
||||
fn from(s: domain::ScheduledSlot) -> Self {
|
||||
Self {
|
||||
id: s.id,
|
||||
start_at: s.start_at,
|
||||
end_at: s.end_at,
|
||||
item: s.item.into(),
|
||||
source_block_id: s.source_block_id,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// What is currently playing on a channel.
|
||||
/// A 204 No Content response is returned instead when there is no active slot (no-signal).
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct CurrentBroadcastResponse {
|
||||
pub slot: ScheduledSlotResponse,
|
||||
/// Seconds elapsed since the start of the current item — use this as the
|
||||
/// initial seek position for the player.
|
||||
pub offset_secs: u32,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct ScheduleResponse {
|
||||
pub id: Uuid,
|
||||
pub channel_id: Uuid,
|
||||
pub valid_from: DateTime<Utc>,
|
||||
pub valid_until: DateTime<Utc>,
|
||||
pub generation: u32,
|
||||
pub slots: Vec<ScheduledSlotResponse>,
|
||||
}
|
||||
|
||||
impl From<domain::GeneratedSchedule> for ScheduleResponse {
|
||||
fn from(s: domain::GeneratedSchedule) -> Self {
|
||||
Self {
|
||||
id: s.id,
|
||||
channel_id: s.channel_id,
|
||||
valid_from: s.valid_from,
|
||||
valid_until: s.valid_until,
|
||||
generation: s.generation,
|
||||
slots: s.slots.into_iter().map(Into::into).collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
130
k-tv-backend/api/src/error.rs
Normal file
130
k-tv-backend/api/src/error.rs
Normal file
@@ -0,0 +1,130 @@
|
||||
//! API error handling
|
||||
//!
|
||||
//! Maps domain errors to HTTP responses
|
||||
|
||||
use axum::{
|
||||
Json,
|
||||
http::StatusCode,
|
||||
response::{IntoResponse, Response},
|
||||
};
|
||||
use serde::Serialize;
|
||||
use thiserror::Error;
|
||||
|
||||
use domain::DomainError;
|
||||
|
||||
/// API-level errors
|
||||
#[derive(Debug, Error)]
|
||||
pub enum ApiError {
|
||||
#[error("{0}")]
|
||||
Domain(#[from] DomainError),
|
||||
|
||||
#[error("Validation error: {0}")]
|
||||
Validation(String),
|
||||
|
||||
#[error("Internal server error")]
|
||||
Internal(String),
|
||||
|
||||
#[error("Forbidden: {0}")]
|
||||
Forbidden(String),
|
||||
|
||||
#[error("Unauthorized: {0}")]
|
||||
Unauthorized(String),
|
||||
}
|
||||
|
||||
/// Error response body
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct ErrorResponse {
|
||||
pub error: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub details: Option<String>,
|
||||
}
|
||||
|
||||
impl IntoResponse for ApiError {
|
||||
fn into_response(self) -> Response {
|
||||
let (status, error_response) = match &self {
|
||||
ApiError::Domain(domain_error) => {
|
||||
let status = match domain_error {
|
||||
DomainError::UserNotFound(_) | DomainError::ChannelNotFound(_) | DomainError::NoActiveSchedule(_) => {
|
||||
StatusCode::NOT_FOUND
|
||||
}
|
||||
|
||||
DomainError::UserAlreadyExists(_) => StatusCode::CONFLICT,
|
||||
|
||||
DomainError::ValidationError(_) | DomainError::TimezoneError(_) => {
|
||||
StatusCode::BAD_REQUEST
|
||||
}
|
||||
|
||||
// Unauthenticated = not logged in → 401
|
||||
DomainError::Unauthenticated(_) => StatusCode::UNAUTHORIZED,
|
||||
|
||||
// Forbidden = not allowed to perform action → 403
|
||||
DomainError::Forbidden(_) => StatusCode::FORBIDDEN,
|
||||
|
||||
DomainError::RepositoryError(_) | DomainError::InfrastructureError(_) => {
|
||||
StatusCode::INTERNAL_SERVER_ERROR
|
||||
}
|
||||
|
||||
_ => StatusCode::INTERNAL_SERVER_ERROR,
|
||||
};
|
||||
|
||||
(
|
||||
status,
|
||||
ErrorResponse {
|
||||
error: domain_error.to_string(),
|
||||
details: None,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
ApiError::Validation(msg) => (
|
||||
StatusCode::BAD_REQUEST,
|
||||
ErrorResponse {
|
||||
error: "Validation error".to_string(),
|
||||
details: Some(msg.clone()),
|
||||
},
|
||||
),
|
||||
|
||||
ApiError::Internal(msg) => {
|
||||
tracing::error!("Internal error: {}", msg);
|
||||
(
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
ErrorResponse {
|
||||
error: "Internal server error".to_string(),
|
||||
details: None,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
ApiError::Forbidden(msg) => (
|
||||
StatusCode::FORBIDDEN,
|
||||
ErrorResponse {
|
||||
error: "Forbidden".to_string(),
|
||||
details: Some(msg.clone()),
|
||||
},
|
||||
),
|
||||
|
||||
ApiError::Unauthorized(msg) => (
|
||||
StatusCode::UNAUTHORIZED,
|
||||
ErrorResponse {
|
||||
error: "Unauthorized".to_string(),
|
||||
details: Some(msg.clone()),
|
||||
},
|
||||
),
|
||||
};
|
||||
|
||||
(status, Json(error_response)).into_response()
|
||||
}
|
||||
}
|
||||
|
||||
impl ApiError {
|
||||
pub fn validation(msg: impl Into<String>) -> Self {
|
||||
Self::Validation(msg.into())
|
||||
}
|
||||
|
||||
pub fn internal(msg: impl Into<String>) -> Self {
|
||||
Self::Internal(msg.into())
|
||||
}
|
||||
}
|
||||
|
||||
/// Result type alias for API handlers
|
||||
pub type ApiResult<T> = Result<T, ApiError>;
|
||||
89
k-tv-backend/api/src/extractors.rs
Normal file
89
k-tv-backend/api/src/extractors.rs
Normal file
@@ -0,0 +1,89 @@
|
||||
//! Auth extractors for API handlers
|
||||
//!
|
||||
//! Provides the `CurrentUser` extractor that validates JWT Bearer tokens.
|
||||
|
||||
use axum::{extract::FromRequestParts, http::request::Parts};
|
||||
use domain::User;
|
||||
|
||||
use crate::error::ApiError;
|
||||
use crate::state::AppState;
|
||||
|
||||
/// Extracted current user from the request.
|
||||
///
|
||||
/// Validates a JWT Bearer token from the `Authorization` header.
|
||||
pub struct CurrentUser(pub User);
|
||||
|
||||
impl FromRequestParts<AppState> for CurrentUser {
|
||||
type Rejection = ApiError;
|
||||
|
||||
async fn from_request_parts(
|
||||
parts: &mut Parts,
|
||||
state: &AppState,
|
||||
) -> Result<Self, Self::Rejection> {
|
||||
#[cfg(feature = "auth-jwt")]
|
||||
{
|
||||
return match try_jwt_auth(parts, state).await {
|
||||
Ok(user) => Ok(CurrentUser(user)),
|
||||
Err(e) => Err(e),
|
||||
};
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "auth-jwt"))]
|
||||
{
|
||||
let _ = (parts, state);
|
||||
Err(ApiError::Unauthorized(
|
||||
"No authentication backend configured".to_string(),
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Authenticate using JWT Bearer token
|
||||
#[cfg(feature = "auth-jwt")]
|
||||
async fn try_jwt_auth(parts: &mut Parts, state: &AppState) -> Result<User, ApiError> {
|
||||
use axum::http::header::AUTHORIZATION;
|
||||
|
||||
let auth_header = parts
|
||||
.headers
|
||||
.get(AUTHORIZATION)
|
||||
.ok_or_else(|| ApiError::Unauthorized("Missing Authorization header".to_string()))?;
|
||||
|
||||
let auth_str = auth_header
|
||||
.to_str()
|
||||
.map_err(|_| ApiError::Unauthorized("Invalid Authorization header encoding".to_string()))?;
|
||||
|
||||
let token = auth_str.strip_prefix("Bearer ").ok_or_else(|| {
|
||||
ApiError::Unauthorized("Authorization header must use Bearer scheme".to_string())
|
||||
})?;
|
||||
|
||||
let validator = state
|
||||
.jwt_validator
|
||||
.as_ref()
|
||||
.ok_or_else(|| ApiError::Internal("JWT validator not configured".to_string()))?;
|
||||
|
||||
let claims = validator.validate_token(token).map_err(|e| {
|
||||
tracing::debug!("JWT validation failed: {:?}", e);
|
||||
match e {
|
||||
infra::auth::jwt::JwtError::Expired => {
|
||||
ApiError::Unauthorized("Token expired".to_string())
|
||||
}
|
||||
infra::auth::jwt::JwtError::InvalidFormat => {
|
||||
ApiError::Unauthorized("Invalid token format".to_string())
|
||||
}
|
||||
_ => ApiError::Unauthorized("Token validation failed".to_string()),
|
||||
}
|
||||
})?;
|
||||
|
||||
let user_id: uuid::Uuid = claims
|
||||
.sub
|
||||
.parse()
|
||||
.map_err(|_| ApiError::Unauthorized("Invalid user ID in token".to_string()))?;
|
||||
|
||||
let user = state
|
||||
.user_service
|
||||
.find_by_id(user_id)
|
||||
.await
|
||||
.map_err(|e| ApiError::Internal(format!("Failed to fetch user: {}", e)))?;
|
||||
|
||||
Ok(user)
|
||||
}
|
||||
168
k-tv-backend/api/src/main.rs
Normal file
168
k-tv-backend/api/src/main.rs
Normal file
@@ -0,0 +1,168 @@
|
||||
//! API Server Entry Point
|
||||
//!
|
||||
//! Configures and starts the HTTP server with JWT-based authentication.
|
||||
|
||||
use std::net::SocketAddr;
|
||||
use std::time::Duration as StdDuration;
|
||||
|
||||
use axum::Router;
|
||||
use std::sync::Arc;
|
||||
|
||||
use domain::{ChannelService, IMediaProvider, ScheduleEngineService, UserService};
|
||||
use infra::factory::{build_channel_repository, build_schedule_repository, build_user_repository};
|
||||
use infra::run_migrations;
|
||||
use k_core::http::server::{ServerConfig, apply_standard_middleware};
|
||||
use k_core::logging;
|
||||
use tokio::net::TcpListener;
|
||||
use tracing::info;
|
||||
|
||||
mod config;
|
||||
mod dto;
|
||||
mod error;
|
||||
mod extractors;
|
||||
mod routes;
|
||||
mod state;
|
||||
|
||||
use crate::config::Config;
|
||||
use crate::state::AppState;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
logging::init("api");
|
||||
|
||||
let config = Config::from_env();
|
||||
|
||||
info!("Starting server on {}:{}", config.host, config.port);
|
||||
|
||||
// Setup database
|
||||
tracing::info!("Connecting to database: {}", config.database_url);
|
||||
|
||||
#[cfg(all(feature = "sqlite", not(feature = "postgres")))]
|
||||
let db_type = k_core::db::DbType::Sqlite;
|
||||
|
||||
#[cfg(all(feature = "postgres", not(feature = "sqlite")))]
|
||||
let db_type = k_core::db::DbType::Postgres;
|
||||
|
||||
// Both features enabled: fall back to URL inspection at runtime
|
||||
#[cfg(all(feature = "sqlite", feature = "postgres"))]
|
||||
let db_type = if config.database_url.starts_with("postgres") {
|
||||
k_core::db::DbType::Postgres
|
||||
} else {
|
||||
k_core::db::DbType::Sqlite
|
||||
};
|
||||
|
||||
let db_config = k_core::db::DatabaseConfig {
|
||||
db_type,
|
||||
url: config.database_url.clone(),
|
||||
max_connections: config.db_max_connections,
|
||||
min_connections: config.db_min_connections,
|
||||
acquire_timeout: StdDuration::from_secs(30),
|
||||
};
|
||||
|
||||
let db_pool = k_core::db::connect(&db_config).await?;
|
||||
run_migrations(&db_pool).await?;
|
||||
|
||||
let user_repo = build_user_repository(&db_pool).await?;
|
||||
let channel_repo = build_channel_repository(&db_pool).await?;
|
||||
let schedule_repo = build_schedule_repository(&db_pool).await?;
|
||||
|
||||
let user_service = UserService::new(user_repo);
|
||||
let channel_service = ChannelService::new(channel_repo.clone());
|
||||
|
||||
// Build media provider — Jellyfin if configured, no-op fallback otherwise.
|
||||
let media_provider: Arc<dyn IMediaProvider> = build_media_provider(&config);
|
||||
|
||||
let schedule_engine = ScheduleEngineService::new(media_provider, channel_repo, schedule_repo);
|
||||
|
||||
let state = AppState::new(user_service, channel_service, schedule_engine, config.clone()).await?;
|
||||
|
||||
let server_config = ServerConfig {
|
||||
cors_origins: config.cors_allowed_origins.clone(),
|
||||
};
|
||||
|
||||
let app = Router::new()
|
||||
.nest("/api/v1", routes::api_v1_router())
|
||||
.with_state(state);
|
||||
|
||||
let app = apply_standard_middleware(app, &server_config);
|
||||
|
||||
let addr: SocketAddr = format!("{}:{}", config.host, config.port).parse()?;
|
||||
let listener = TcpListener::bind(addr).await?;
|
||||
|
||||
tracing::info!("🚀 API server running at http://{}", addr);
|
||||
tracing::info!("🔒 Authentication mode: JWT (Bearer token)");
|
||||
|
||||
#[cfg(feature = "auth-jwt")]
|
||||
tracing::info!(" ✓ JWT auth enabled");
|
||||
|
||||
#[cfg(feature = "auth-oidc")]
|
||||
tracing::info!(" ✓ OIDC integration enabled (stateless cookie state)");
|
||||
|
||||
tracing::info!("📝 API endpoints available at /api/v1/...");
|
||||
|
||||
axum::serve(listener, app).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Build the media provider from config.
|
||||
/// Falls back to a no-op provider that returns an informative error when
|
||||
/// Jellyfin env vars are not set, so other API features still work in dev.
|
||||
fn build_media_provider(config: &Config) -> Arc<dyn IMediaProvider> {
|
||||
#[cfg(feature = "jellyfin")]
|
||||
if let (Some(base_url), Some(api_key), Some(user_id)) = (
|
||||
&config.jellyfin_base_url,
|
||||
&config.jellyfin_api_key,
|
||||
&config.jellyfin_user_id,
|
||||
) {
|
||||
tracing::info!("Media provider: Jellyfin at {}", base_url);
|
||||
return Arc::new(infra::JellyfinMediaProvider::new(infra::JellyfinConfig {
|
||||
base_url: base_url.clone(),
|
||||
api_key: api_key.clone(),
|
||||
user_id: user_id.clone(),
|
||||
}));
|
||||
}
|
||||
|
||||
tracing::warn!(
|
||||
"No media provider configured. Set JELLYFIN_BASE_URL, JELLYFIN_API_KEY, \
|
||||
and JELLYFIN_USER_ID to enable schedule generation."
|
||||
);
|
||||
Arc::new(NoopMediaProvider)
|
||||
}
|
||||
|
||||
/// Stand-in provider used when no real media source is configured.
|
||||
/// Returns a descriptive error for every call so schedule endpoints fail
|
||||
/// gracefully rather than panicking at startup.
|
||||
struct NoopMediaProvider;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl IMediaProvider for NoopMediaProvider {
|
||||
async fn fetch_items(
|
||||
&self,
|
||||
_: &domain::MediaFilter,
|
||||
) -> domain::DomainResult<Vec<domain::MediaItem>> {
|
||||
Err(domain::DomainError::InfrastructureError(
|
||||
"No media provider configured. Set JELLYFIN_BASE_URL, JELLYFIN_API_KEY, \
|
||||
and JELLYFIN_USER_ID."
|
||||
.into(),
|
||||
))
|
||||
}
|
||||
|
||||
async fn fetch_by_id(
|
||||
&self,
|
||||
_: &domain::MediaItemId,
|
||||
) -> domain::DomainResult<Option<domain::MediaItem>> {
|
||||
Err(domain::DomainError::InfrastructureError(
|
||||
"No media provider configured.".into(),
|
||||
))
|
||||
}
|
||||
|
||||
async fn get_stream_url(
|
||||
&self,
|
||||
_: &domain::MediaItemId,
|
||||
) -> domain::DomainResult<String> {
|
||||
Err(domain::DomainError::InfrastructureError(
|
||||
"No media provider configured.".into(),
|
||||
))
|
||||
}
|
||||
}
|
||||
253
k-tv-backend/api/src/routes/auth.rs
Normal file
253
k-tv-backend/api/src/routes/auth.rs
Normal file
@@ -0,0 +1,253 @@
|
||||
//! Authentication routes
|
||||
//!
|
||||
//! Provides login, register, logout, token, and OIDC endpoints.
|
||||
//! All authentication is JWT-based. OIDC state is stored in an encrypted cookie.
|
||||
|
||||
use axum::{
|
||||
Router,
|
||||
extract::{Json, State},
|
||||
http::StatusCode,
|
||||
response::IntoResponse,
|
||||
routing::{get, post},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
dto::{LoginRequest, RegisterRequest, TokenResponse, UserResponse},
|
||||
error::ApiError,
|
||||
extractors::CurrentUser,
|
||||
state::AppState,
|
||||
};
|
||||
|
||||
pub fn router() -> Router<AppState> {
|
||||
let r = Router::new()
|
||||
.route("/login", post(login))
|
||||
.route("/register", post(register))
|
||||
.route("/logout", post(logout))
|
||||
.route("/me", get(me));
|
||||
|
||||
#[cfg(feature = "auth-jwt")]
|
||||
let r = r.route("/token", post(get_token));
|
||||
|
||||
#[cfg(feature = "auth-oidc")]
|
||||
let r = r
|
||||
.route("/login/oidc", get(oidc_login))
|
||||
.route("/callback", get(oidc_callback));
|
||||
|
||||
r
|
||||
}
|
||||
|
||||
/// Login with email + password → JWT token
|
||||
async fn login(
|
||||
State(state): State<AppState>,
|
||||
Json(payload): Json<LoginRequest>,
|
||||
) -> Result<impl IntoResponse, ApiError> {
|
||||
let user = state
|
||||
.user_service
|
||||
.find_by_email(payload.email.as_ref())
|
||||
.await?
|
||||
.ok_or_else(|| ApiError::Unauthorized("Invalid credentials".to_string()))?;
|
||||
|
||||
let hash = user
|
||||
.password_hash
|
||||
.as_deref()
|
||||
.ok_or_else(|| ApiError::Unauthorized("Invalid credentials".to_string()))?;
|
||||
|
||||
if !infra::auth::verify_password(payload.password.as_ref(), hash) {
|
||||
return Err(ApiError::Unauthorized("Invalid credentials".to_string()));
|
||||
}
|
||||
|
||||
let token = create_jwt(&user, &state)?;
|
||||
|
||||
Ok((
|
||||
StatusCode::OK,
|
||||
Json(TokenResponse {
|
||||
access_token: token,
|
||||
token_type: "Bearer".to_string(),
|
||||
expires_in: state.config.jwt_expiry_hours * 3600,
|
||||
}),
|
||||
))
|
||||
}
|
||||
|
||||
/// Register a new local user → JWT token
|
||||
async fn register(
|
||||
State(state): State<AppState>,
|
||||
Json(payload): Json<RegisterRequest>,
|
||||
) -> Result<impl IntoResponse, ApiError> {
|
||||
let password_hash = infra::auth::hash_password(payload.password.as_ref());
|
||||
|
||||
let user = state
|
||||
.user_service
|
||||
.create_local(payload.email.as_ref(), &password_hash)
|
||||
.await?;
|
||||
|
||||
let token = create_jwt(&user, &state)?;
|
||||
|
||||
Ok((
|
||||
StatusCode::CREATED,
|
||||
Json(TokenResponse {
|
||||
access_token: token,
|
||||
token_type: "Bearer".to_string(),
|
||||
expires_in: state.config.jwt_expiry_hours * 3600,
|
||||
}),
|
||||
))
|
||||
}
|
||||
|
||||
/// Logout — JWT is stateless; instruct the client to drop the token
|
||||
async fn logout() -> impl IntoResponse {
|
||||
StatusCode::OK
|
||||
}
|
||||
|
||||
/// Get current user info from JWT
|
||||
async fn me(CurrentUser(user): CurrentUser) -> Result<impl IntoResponse, ApiError> {
|
||||
Ok(Json(UserResponse {
|
||||
id: user.id,
|
||||
email: user.email.into_inner(),
|
||||
created_at: user.created_at,
|
||||
}))
|
||||
}
|
||||
|
||||
/// Issue a new JWT for the currently authenticated user (OIDC→JWT exchange or token refresh)
|
||||
#[cfg(feature = "auth-jwt")]
|
||||
async fn get_token(
|
||||
State(state): State<AppState>,
|
||||
CurrentUser(user): CurrentUser,
|
||||
) -> Result<impl IntoResponse, ApiError> {
|
||||
let token = create_jwt(&user, &state)?;
|
||||
|
||||
Ok(Json(TokenResponse {
|
||||
access_token: token,
|
||||
token_type: "Bearer".to_string(),
|
||||
expires_in: state.config.jwt_expiry_hours * 3600,
|
||||
}))
|
||||
}
|
||||
|
||||
/// Helper: create JWT for a user
|
||||
#[cfg(feature = "auth-jwt")]
|
||||
fn create_jwt(user: &domain::User, state: &AppState) -> Result<String, ApiError> {
|
||||
let validator = state
|
||||
.jwt_validator
|
||||
.as_ref()
|
||||
.ok_or_else(|| ApiError::Internal("JWT not configured".to_string()))?;
|
||||
|
||||
validator
|
||||
.create_token(user)
|
||||
.map_err(|e| ApiError::Internal(format!("Failed to create token: {}", e)))
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "auth-jwt"))]
|
||||
fn create_jwt(_user: &domain::User, _state: &AppState) -> Result<String, ApiError> {
|
||||
Err(ApiError::Internal("JWT feature not enabled".to_string()))
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// OIDC Routes
|
||||
// ============================================================================
|
||||
|
||||
#[cfg(feature = "auth-oidc")]
|
||||
#[derive(serde::Deserialize)]
|
||||
struct CallbackParams {
|
||||
code: String,
|
||||
state: String,
|
||||
}
|
||||
|
||||
/// Start OIDC login: generate authorization URL and store state in encrypted cookie
|
||||
#[cfg(feature = "auth-oidc")]
|
||||
async fn oidc_login(
|
||||
State(state): State<AppState>,
|
||||
jar: axum_extra::extract::PrivateCookieJar,
|
||||
) -> Result<impl IntoResponse, ApiError> {
|
||||
use axum::http::header;
|
||||
use axum::response::Response;
|
||||
use axum_extra::extract::cookie::{Cookie, SameSite};
|
||||
|
||||
let service = state
|
||||
.oidc_service
|
||||
.as_ref()
|
||||
.ok_or(ApiError::Internal("OIDC not configured".into()))?;
|
||||
|
||||
let (auth_data, oidc_state) = service.get_authorization_url();
|
||||
|
||||
let state_json = serde_json::to_string(&oidc_state)
|
||||
.map_err(|e| ApiError::Internal(format!("Failed to serialize OIDC state: {}", e)))?;
|
||||
|
||||
let cookie = Cookie::build(("oidc_state", state_json))
|
||||
.max_age(time::Duration::minutes(5))
|
||||
.http_only(true)
|
||||
.same_site(SameSite::Lax)
|
||||
.secure(state.config.secure_cookie)
|
||||
.path("/")
|
||||
.build();
|
||||
|
||||
let updated_jar = jar.add(cookie);
|
||||
|
||||
let redirect = axum::response::Redirect::to(auth_data.url.as_str()).into_response();
|
||||
let (mut parts, body) = redirect.into_parts();
|
||||
parts.headers.insert(
|
||||
header::CACHE_CONTROL,
|
||||
"no-cache, no-store, must-revalidate".parse().unwrap(),
|
||||
);
|
||||
parts
|
||||
.headers
|
||||
.insert(header::PRAGMA, "no-cache".parse().unwrap());
|
||||
parts.headers.insert(header::EXPIRES, "0".parse().unwrap());
|
||||
|
||||
Ok((updated_jar, Response::from_parts(parts, body)))
|
||||
}
|
||||
|
||||
/// Handle OIDC callback: verify state cookie, complete exchange, issue JWT, clear cookie
|
||||
#[cfg(feature = "auth-oidc")]
|
||||
async fn oidc_callback(
|
||||
State(state): State<AppState>,
|
||||
jar: axum_extra::extract::PrivateCookieJar,
|
||||
axum::extract::Query(params): axum::extract::Query<CallbackParams>,
|
||||
) -> Result<impl IntoResponse, ApiError> {
|
||||
use infra::auth::oidc::OidcState;
|
||||
|
||||
let service = state
|
||||
.oidc_service
|
||||
.as_ref()
|
||||
.ok_or(ApiError::Internal("OIDC not configured".into()))?;
|
||||
|
||||
// Read and decrypt OIDC state from cookie
|
||||
let cookie = jar
|
||||
.get("oidc_state")
|
||||
.ok_or(ApiError::Validation("Missing OIDC state cookie".into()))?;
|
||||
|
||||
let oidc_state: OidcState = serde_json::from_str(cookie.value())
|
||||
.map_err(|_| ApiError::Validation("Invalid OIDC state cookie".into()))?;
|
||||
|
||||
// Verify CSRF token
|
||||
if params.state != oidc_state.csrf_token.as_ref() {
|
||||
return Err(ApiError::Validation("Invalid CSRF token".into()));
|
||||
}
|
||||
|
||||
// Complete OIDC exchange
|
||||
let oidc_user = service
|
||||
.resolve_callback(
|
||||
domain::AuthorizationCode::new(params.code),
|
||||
oidc_state.nonce,
|
||||
oidc_state.pkce_verifier,
|
||||
)
|
||||
.await
|
||||
.map_err(|e| ApiError::Internal(e.to_string()))?;
|
||||
|
||||
let user = state
|
||||
.user_service
|
||||
.find_or_create(&oidc_user.subject, &oidc_user.email)
|
||||
.await
|
||||
.map_err(|e| ApiError::Internal(e.to_string()))?;
|
||||
|
||||
// Clear the OIDC state cookie
|
||||
let cleared_jar = jar.remove(axum_extra::extract::cookie::Cookie::from("oidc_state"));
|
||||
|
||||
let token = create_jwt(&user, &state)?;
|
||||
|
||||
Ok((
|
||||
cleared_jar,
|
||||
Json(TokenResponse {
|
||||
access_token: token,
|
||||
token_type: "Bearer".to_string(),
|
||||
expires_in: state.config.jwt_expiry_hours * 3600,
|
||||
}),
|
||||
))
|
||||
}
|
||||
292
k-tv-backend/api/src/routes/channels.rs
Normal file
292
k-tv-backend/api/src/routes/channels.rs
Normal file
@@ -0,0 +1,292 @@
|
||||
//! Channel routes
|
||||
//!
|
||||
//! CRUD for channels and broadcast/EPG endpoints.
|
||||
//!
|
||||
//! All routes require authentication (Bearer JWT).
|
||||
|
||||
use axum::{
|
||||
Json, Router,
|
||||
extract::{Path, Query, State},
|
||||
http::StatusCode,
|
||||
response::{IntoResponse, Redirect, Response},
|
||||
routing::{get, post},
|
||||
};
|
||||
use chrono::{DateTime, Utc};
|
||||
use serde::Deserialize;
|
||||
use uuid::Uuid;
|
||||
|
||||
use domain::{DomainError, ScheduleEngineService};
|
||||
|
||||
use crate::{
|
||||
dto::{
|
||||
ChannelResponse, CreateChannelRequest, CurrentBroadcastResponse, ScheduleResponse,
|
||||
ScheduledSlotResponse, UpdateChannelRequest,
|
||||
},
|
||||
error::ApiError,
|
||||
extractors::CurrentUser,
|
||||
state::AppState,
|
||||
};
|
||||
|
||||
pub fn router() -> Router<AppState> {
|
||||
Router::new()
|
||||
.route("/", get(list_channels).post(create_channel))
|
||||
.route(
|
||||
"/{id}",
|
||||
get(get_channel).put(update_channel).delete(delete_channel),
|
||||
)
|
||||
.route(
|
||||
"/{id}/schedule",
|
||||
post(generate_schedule).get(get_active_schedule),
|
||||
)
|
||||
.route("/{id}/now", get(get_current_broadcast))
|
||||
.route("/{id}/epg", get(get_epg))
|
||||
.route("/{id}/stream", get(get_stream))
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Channel CRUD
|
||||
// ============================================================================
|
||||
|
||||
async fn list_channels(
|
||||
State(state): State<AppState>,
|
||||
CurrentUser(user): CurrentUser,
|
||||
) -> Result<impl IntoResponse, ApiError> {
|
||||
let channels = state.channel_service.find_by_owner(user.id).await?;
|
||||
let response: Vec<ChannelResponse> = channels.into_iter().map(Into::into).collect();
|
||||
Ok(Json(response))
|
||||
}
|
||||
|
||||
async fn create_channel(
|
||||
State(state): State<AppState>,
|
||||
CurrentUser(user): CurrentUser,
|
||||
Json(payload): Json<CreateChannelRequest>,
|
||||
) -> Result<impl IntoResponse, ApiError> {
|
||||
let mut channel = state
|
||||
.channel_service
|
||||
.create(user.id, &payload.name, &payload.timezone)
|
||||
.await?;
|
||||
|
||||
if let Some(desc) = payload.description {
|
||||
channel.description = Some(desc);
|
||||
channel = state.channel_service.update(channel).await?;
|
||||
}
|
||||
|
||||
Ok((StatusCode::CREATED, Json(ChannelResponse::from(channel))))
|
||||
}
|
||||
|
||||
async fn get_channel(
|
||||
State(state): State<AppState>,
|
||||
CurrentUser(user): CurrentUser,
|
||||
Path(channel_id): Path<Uuid>,
|
||||
) -> Result<impl IntoResponse, ApiError> {
|
||||
let channel = state.channel_service.find_by_id(channel_id).await?;
|
||||
require_owner(&channel, user.id)?;
|
||||
Ok(Json(ChannelResponse::from(channel)))
|
||||
}
|
||||
|
||||
async fn update_channel(
|
||||
State(state): State<AppState>,
|
||||
CurrentUser(user): CurrentUser,
|
||||
Path(channel_id): Path<Uuid>,
|
||||
Json(payload): Json<UpdateChannelRequest>,
|
||||
) -> Result<impl IntoResponse, ApiError> {
|
||||
let mut channel = state.channel_service.find_by_id(channel_id).await?;
|
||||
require_owner(&channel, user.id)?;
|
||||
|
||||
if let Some(name) = payload.name {
|
||||
channel.name = name;
|
||||
}
|
||||
if let Some(desc) = payload.description {
|
||||
channel.description = Some(desc);
|
||||
}
|
||||
if let Some(tz) = payload.timezone {
|
||||
channel.timezone = tz;
|
||||
}
|
||||
if let Some(sc) = payload.schedule_config {
|
||||
channel.schedule_config = sc;
|
||||
}
|
||||
if let Some(rp) = payload.recycle_policy {
|
||||
channel.recycle_policy = rp;
|
||||
}
|
||||
channel.updated_at = Utc::now();
|
||||
|
||||
let channel = state.channel_service.update(channel).await?;
|
||||
Ok(Json(ChannelResponse::from(channel)))
|
||||
}
|
||||
|
||||
async fn delete_channel(
|
||||
State(state): State<AppState>,
|
||||
CurrentUser(user): CurrentUser,
|
||||
Path(channel_id): Path<Uuid>,
|
||||
) -> Result<impl IntoResponse, ApiError> {
|
||||
// ChannelService::delete enforces ownership internally
|
||||
state.channel_service.delete(channel_id, user.id).await?;
|
||||
Ok(StatusCode::NO_CONTENT)
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Schedule generation + retrieval
|
||||
// ============================================================================
|
||||
|
||||
/// Trigger 48-hour schedule generation for a channel, starting from now.
|
||||
/// Replaces any existing schedule for the same window.
|
||||
async fn generate_schedule(
|
||||
State(state): State<AppState>,
|
||||
CurrentUser(user): CurrentUser,
|
||||
Path(channel_id): Path<Uuid>,
|
||||
) -> Result<impl IntoResponse, ApiError> {
|
||||
let channel = state.channel_service.find_by_id(channel_id).await?;
|
||||
require_owner(&channel, user.id)?;
|
||||
|
||||
let schedule = state
|
||||
.schedule_engine
|
||||
.generate_schedule(channel_id, Utc::now())
|
||||
.await?;
|
||||
|
||||
Ok((StatusCode::CREATED, Json(ScheduleResponse::from(schedule))))
|
||||
}
|
||||
|
||||
/// Return the currently active 48-hour schedule for a channel.
|
||||
/// 404 if no schedule has been generated yet — call POST /:id/schedule first.
|
||||
async fn get_active_schedule(
|
||||
State(state): State<AppState>,
|
||||
CurrentUser(user): CurrentUser,
|
||||
Path(channel_id): Path<Uuid>,
|
||||
) -> Result<impl IntoResponse, ApiError> {
|
||||
let channel = state.channel_service.find_by_id(channel_id).await?;
|
||||
require_owner(&channel, user.id)?;
|
||||
|
||||
let schedule = state
|
||||
.schedule_engine
|
||||
.get_active_schedule(channel_id, Utc::now())
|
||||
.await?
|
||||
.ok_or(DomainError::NoActiveSchedule(channel_id))?;
|
||||
|
||||
Ok(Json(ScheduleResponse::from(schedule)))
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Live broadcast endpoints
|
||||
// ============================================================================
|
||||
|
||||
/// What is currently playing right now on this channel.
|
||||
/// Returns 204 No Content when the channel is in a gap between blocks (no-signal).
|
||||
async fn get_current_broadcast(
|
||||
State(state): State<AppState>,
|
||||
CurrentUser(user): CurrentUser,
|
||||
Path(channel_id): Path<Uuid>,
|
||||
) -> Result<Response, ApiError> {
|
||||
let channel = state.channel_service.find_by_id(channel_id).await?;
|
||||
require_owner(&channel, user.id)?;
|
||||
|
||||
let now = Utc::now();
|
||||
let schedule = state
|
||||
.schedule_engine
|
||||
.get_active_schedule(channel_id, now)
|
||||
.await?
|
||||
.ok_or(DomainError::NoActiveSchedule(channel_id))?;
|
||||
|
||||
match ScheduleEngineService::get_current_broadcast(&schedule, now) {
|
||||
None => Ok(StatusCode::NO_CONTENT.into_response()),
|
||||
Some(broadcast) => Ok(Json(CurrentBroadcastResponse {
|
||||
slot: broadcast.slot.into(),
|
||||
offset_secs: broadcast.offset_secs,
|
||||
})
|
||||
.into_response()),
|
||||
}
|
||||
}
|
||||
|
||||
/// EPG: return scheduled slots that overlap a time window.
|
||||
///
|
||||
/// Query params (both RFC3339, both optional):
|
||||
/// - `from` — start of window (default: now)
|
||||
/// - `until` — end of window (default: now + 4 hours)
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct EpgQuery {
|
||||
from: Option<String>,
|
||||
until: Option<String>,
|
||||
}
|
||||
|
||||
async fn get_epg(
|
||||
State(state): State<AppState>,
|
||||
CurrentUser(user): CurrentUser,
|
||||
Path(channel_id): Path<Uuid>,
|
||||
Query(params): Query<EpgQuery>,
|
||||
) -> Result<impl IntoResponse, ApiError> {
|
||||
let channel = state.channel_service.find_by_id(channel_id).await?;
|
||||
require_owner(&channel, user.id)?;
|
||||
|
||||
let now = Utc::now();
|
||||
let from = parse_optional_dt(params.from, now)?;
|
||||
let until = parse_optional_dt(params.until, now + chrono::Duration::hours(4))?;
|
||||
|
||||
if until <= from {
|
||||
return Err(ApiError::validation("'until' must be after 'from'"));
|
||||
}
|
||||
|
||||
let schedule = state
|
||||
.schedule_engine
|
||||
.get_active_schedule(channel_id, from)
|
||||
.await?
|
||||
.ok_or(DomainError::NoActiveSchedule(channel_id))?;
|
||||
|
||||
let slots: Vec<ScheduledSlotResponse> = ScheduleEngineService::get_epg(&schedule, from, until)
|
||||
.into_iter()
|
||||
.cloned()
|
||||
.map(Into::into)
|
||||
.collect();
|
||||
|
||||
Ok(Json(slots))
|
||||
}
|
||||
|
||||
/// Redirect to the stream URL for whatever is currently playing.
|
||||
/// Returns 307 Temporary Redirect so the client fetches from the media provider directly.
|
||||
/// Returns 204 No Content when the channel is in a gap (no-signal).
|
||||
async fn get_stream(
|
||||
State(state): State<AppState>,
|
||||
CurrentUser(user): CurrentUser,
|
||||
Path(channel_id): Path<Uuid>,
|
||||
) -> Result<Response, ApiError> {
|
||||
let channel = state.channel_service.find_by_id(channel_id).await?;
|
||||
require_owner(&channel, user.id)?;
|
||||
|
||||
let now = Utc::now();
|
||||
let schedule = state
|
||||
.schedule_engine
|
||||
.get_active_schedule(channel_id, now)
|
||||
.await?
|
||||
.ok_or(DomainError::NoActiveSchedule(channel_id))?;
|
||||
|
||||
let broadcast = match ScheduleEngineService::get_current_broadcast(&schedule, now) {
|
||||
None => return Ok(StatusCode::NO_CONTENT.into_response()),
|
||||
Some(b) => b,
|
||||
};
|
||||
|
||||
let url = state
|
||||
.schedule_engine
|
||||
.get_stream_url(&broadcast.slot.item.id)
|
||||
.await?;
|
||||
|
||||
Ok(Redirect::temporary(&url).into_response())
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Helpers
|
||||
// ============================================================================
|
||||
|
||||
fn require_owner(channel: &domain::Channel, user_id: Uuid) -> Result<(), ApiError> {
|
||||
if channel.owner_id != user_id {
|
||||
Err(ApiError::Forbidden("You don't own this channel".into()))
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_optional_dt(s: Option<String>, default: DateTime<Utc>) -> Result<DateTime<Utc>, ApiError> {
|
||||
match s {
|
||||
None => Ok(default),
|
||||
Some(raw) => DateTime::parse_from_rfc3339(&raw)
|
||||
.map(|dt| dt.with_timezone(&Utc))
|
||||
.map_err(|_| ApiError::validation(format!("Invalid datetime '{}' — use RFC3339", raw))),
|
||||
}
|
||||
}
|
||||
13
k-tv-backend/api/src/routes/config.rs
Normal file
13
k-tv-backend/api/src/routes/config.rs
Normal file
@@ -0,0 +1,13 @@
|
||||
use axum::{Json, Router, routing::get};
|
||||
use crate::dto::ConfigResponse;
|
||||
use crate::state::AppState;
|
||||
|
||||
pub fn router() -> Router<AppState> {
|
||||
Router::new().route("/", get(get_config))
|
||||
}
|
||||
|
||||
async fn get_config() -> Json<ConfigResponse> {
|
||||
Json(ConfigResponse {
|
||||
allow_registration: true, // Default to true for template
|
||||
})
|
||||
}
|
||||
18
k-tv-backend/api/src/routes/mod.rs
Normal file
18
k-tv-backend/api/src/routes/mod.rs
Normal file
@@ -0,0 +1,18 @@
|
||||
//! API Routes
|
||||
//!
|
||||
//! Defines the API endpoints and maps them to handler functions.
|
||||
|
||||
use crate::state::AppState;
|
||||
use axum::Router;
|
||||
|
||||
pub mod auth;
|
||||
pub mod channels;
|
||||
pub mod config;
|
||||
|
||||
/// Construct the API v1 router
|
||||
pub fn api_v1_router() -> Router<AppState> {
|
||||
Router::new()
|
||||
.nest("/auth", auth::router())
|
||||
.nest("/channels", channels::router())
|
||||
.nest("/config", config::router())
|
||||
}
|
||||
125
k-tv-backend/api/src/state.rs
Normal file
125
k-tv-backend/api/src/state.rs
Normal file
@@ -0,0 +1,125 @@
|
||||
//! Application State
|
||||
//!
|
||||
//! Holds shared state for the application.
|
||||
|
||||
use axum::extract::FromRef;
|
||||
use axum_extra::extract::cookie::Key;
|
||||
#[cfg(feature = "auth-jwt")]
|
||||
use infra::auth::jwt::{JwtConfig, JwtValidator};
|
||||
#[cfg(feature = "auth-oidc")]
|
||||
use infra::auth::oidc::OidcService;
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::config::Config;
|
||||
use domain::{ChannelService, ScheduleEngineService, UserService};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct AppState {
|
||||
pub user_service: Arc<UserService>,
|
||||
pub channel_service: Arc<ChannelService>,
|
||||
pub schedule_engine: Arc<ScheduleEngineService>,
|
||||
pub cookie_key: Key,
|
||||
#[cfg(feature = "auth-oidc")]
|
||||
pub oidc_service: Option<Arc<OidcService>>,
|
||||
#[cfg(feature = "auth-jwt")]
|
||||
pub jwt_validator: Option<Arc<JwtValidator>>,
|
||||
pub config: Arc<Config>,
|
||||
}
|
||||
|
||||
impl AppState {
|
||||
pub async fn new(
|
||||
user_service: UserService,
|
||||
channel_service: ChannelService,
|
||||
schedule_engine: ScheduleEngineService,
|
||||
config: Config,
|
||||
) -> anyhow::Result<Self> {
|
||||
let cookie_key = Key::derive_from(config.cookie_secret.as_bytes());
|
||||
|
||||
#[cfg(feature = "auth-oidc")]
|
||||
let oidc_service = if let (Some(issuer), Some(id), secret, Some(redirect), resource_id) = (
|
||||
&config.oidc_issuer,
|
||||
&config.oidc_client_id,
|
||||
&config.oidc_client_secret,
|
||||
&config.oidc_redirect_url,
|
||||
&config.oidc_resource_id,
|
||||
) {
|
||||
tracing::info!("Initializing OIDC service with issuer: {}", issuer);
|
||||
|
||||
let issuer_url = domain::IssuerUrl::new(issuer)
|
||||
.map_err(|e| anyhow::anyhow!("Invalid OIDC issuer URL: {}", e))?;
|
||||
let client_id = domain::ClientId::new(id)
|
||||
.map_err(|e| anyhow::anyhow!("Invalid OIDC client ID: {}", e))?;
|
||||
let client_secret = secret.as_ref().map(|s| domain::ClientSecret::new(s));
|
||||
let redirect_url = domain::RedirectUrl::new(redirect)
|
||||
.map_err(|e| anyhow::anyhow!("Invalid OIDC redirect URL: {}", e))?;
|
||||
let resource = resource_id
|
||||
.as_ref()
|
||||
.map(|r| domain::ResourceId::new(r))
|
||||
.transpose()
|
||||
.map_err(|e| anyhow::anyhow!("Invalid OIDC resource ID: {}", e))?;
|
||||
|
||||
Some(Arc::new(
|
||||
OidcService::new(issuer_url, client_id, client_secret, redirect_url, resource)
|
||||
.await?,
|
||||
))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
#[cfg(feature = "auth-jwt")]
|
||||
let jwt_validator = {
|
||||
let secret = match &config.jwt_secret {
|
||||
Some(s) if !s.is_empty() => s.clone(),
|
||||
_ => {
|
||||
if config.is_production {
|
||||
anyhow::bail!("JWT_SECRET is required in production");
|
||||
}
|
||||
tracing::warn!(
|
||||
"⚠️ JWT_SECRET not set — using insecure development secret. DO NOT USE IN PRODUCTION!"
|
||||
);
|
||||
"k-template-dev-secret-not-for-production-use-only".to_string()
|
||||
}
|
||||
};
|
||||
|
||||
tracing::info!("Initializing JWT validator");
|
||||
let jwt_config = JwtConfig::new(
|
||||
secret,
|
||||
config.jwt_issuer.clone(),
|
||||
config.jwt_audience.clone(),
|
||||
Some(config.jwt_expiry_hours),
|
||||
config.is_production,
|
||||
)?;
|
||||
Some(Arc::new(JwtValidator::new(jwt_config)))
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
user_service: Arc::new(user_service),
|
||||
channel_service: Arc::new(channel_service),
|
||||
schedule_engine: Arc::new(schedule_engine),
|
||||
cookie_key,
|
||||
#[cfg(feature = "auth-oidc")]
|
||||
oidc_service,
|
||||
#[cfg(feature = "auth-jwt")]
|
||||
jwt_validator,
|
||||
config: Arc::new(config),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl FromRef<AppState> for Arc<UserService> {
|
||||
fn from_ref(input: &AppState) -> Self {
|
||||
input.user_service.clone()
|
||||
}
|
||||
}
|
||||
|
||||
impl FromRef<AppState> for Arc<Config> {
|
||||
fn from_ref(input: &AppState) -> Self {
|
||||
input.config.clone()
|
||||
}
|
||||
}
|
||||
|
||||
impl FromRef<AppState> for Key {
|
||||
fn from_ref(input: &AppState) -> Self {
|
||||
input.cookie_key.clone()
|
||||
}
|
||||
}
|
||||
89
k-tv-backend/compose.yml
Normal file
89
k-tv-backend/compose.yml
Normal file
@@ -0,0 +1,89 @@
|
||||
services:
|
||||
backend:
|
||||
build: .
|
||||
ports:
|
||||
- "3000:3000"
|
||||
environment:
|
||||
- SESSION_SECRET=dev_secret_key_12345
|
||||
- DATABASE_URL=sqlite:///app/data/notes.db
|
||||
- CORS_ALLOWED_ORIGINS=http://localhost:8080,http://localhost:5173
|
||||
- HOST=0.0.0.0
|
||||
- PORT=3000
|
||||
- DB_MAX_CONNECTIONS=5
|
||||
- DB_MIN_CONNECTIONS=1
|
||||
- SECURE_COOKIE=true
|
||||
volumes:
|
||||
- ./data:/app/data
|
||||
|
||||
# nats:
|
||||
# image: nats:alpine
|
||||
# ports:
|
||||
# - "4222:4222"
|
||||
# - "6222:6222"
|
||||
# - "8222:8222"
|
||||
# restart: unless-stopped
|
||||
|
||||
db:
|
||||
image: postgres:15-alpine
|
||||
environment:
|
||||
POSTGRES_USER: user
|
||||
POSTGRES_PASSWORD: password
|
||||
POSTGRES_DB: k_template_db
|
||||
ports:
|
||||
- "5439:5432"
|
||||
volumes:
|
||||
- db_data:/var/lib/postgresql/data
|
||||
|
||||
zitadel-db:
|
||||
image: postgres:16-alpine
|
||||
container_name: zitadel_db
|
||||
environment:
|
||||
POSTGRES_USER: zitadel
|
||||
POSTGRES_PASSWORD: zitadel_password
|
||||
POSTGRES_DB: zitadel
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U zitadel -d zitadel"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
volumes:
|
||||
- zitadel_db_data:/var/lib/postgresql/data
|
||||
|
||||
zitadel:
|
||||
image: ghcr.io/zitadel/zitadel:latest
|
||||
container_name: zitadel_local
|
||||
depends_on:
|
||||
zitadel-db:
|
||||
condition: service_healthy
|
||||
ports:
|
||||
- "8086:8080"
|
||||
# USE start-from-init (Fixes the "relation does not exist" bug)
|
||||
command: 'start-from-init --masterkey "MasterkeyNeedsToBeExactly32Bytes"'
|
||||
environment:
|
||||
# Database Connection
|
||||
ZITADEL_DATABASE_POSTGRES_HOST: zitadel-db
|
||||
ZITADEL_DATABASE_POSTGRES_PORT: 5432
|
||||
ZITADEL_DATABASE_POSTGRES_DATABASE: zitadel
|
||||
|
||||
# APPLICATION USER (Zitadel uses this to run)
|
||||
ZITADEL_DATABASE_POSTGRES_USER_USERNAME: zitadel
|
||||
ZITADEL_DATABASE_POSTGRES_USER_PASSWORD: zitadel_password
|
||||
ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE: disable
|
||||
|
||||
# ADMIN USER (Zitadel uses this to create tables/migrations)
|
||||
# We use 'zitadel' because it is the owner of the DB in your postgres container.
|
||||
ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME: zitadel
|
||||
ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD: zitadel_password
|
||||
ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE: disable
|
||||
|
||||
# General Config
|
||||
ZITADEL_EXTERNALDOMAIN: localhost
|
||||
ZITADEL_EXTERNALPORT: 8086
|
||||
ZITADEL_EXTERNALSECURE: "false"
|
||||
ZITADEL_TLS_ENABLED: "false"
|
||||
|
||||
ZITADEL_DEFAULTINSTANCE_FEATURES_LOGINV2_REQUIRED: "false"
|
||||
|
||||
volumes:
|
||||
db_data:
|
||||
zitadel_db_data:
|
||||
18
k-tv-backend/domain/Cargo.toml
Normal file
18
k-tv-backend/domain/Cargo.toml
Normal file
@@ -0,0 +1,18 @@
|
||||
[package]
|
||||
name = "domain"
|
||||
version = "0.1.0"
|
||||
edition = "2024"
|
||||
|
||||
[dependencies]
|
||||
async-trait = "0.1.89"
|
||||
chrono = { version = "0.4.42", features = ["serde"] }
|
||||
chrono-tz = { version = "0.10", features = ["serde"] }
|
||||
email_address = "0.2"
|
||||
rand = "0.8"
|
||||
serde = { version = "1.0.228", features = ["derive"] }
|
||||
thiserror = "2.0.17"
|
||||
url = { version = "2.5", features = ["serde"] }
|
||||
uuid = { version = "1.19.0", features = ["v4", "serde"] }
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { version = "1", features = ["rt", "macros"] }
|
||||
296
k-tv-backend/domain/src/entities.rs
Normal file
296
k-tv-backend/domain/src/entities.rs
Normal file
@@ -0,0 +1,296 @@
|
||||
//! Domain entities
|
||||
//!
|
||||
//! This module contains pure domain types with no I/O dependencies.
|
||||
//! These represent the core business concepts of the application.
|
||||
|
||||
pub use crate::value_objects::{Email, UserId};
|
||||
use chrono::{DateTime, NaiveTime, Timelike, Utc};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::value_objects::{
|
||||
BlockId, ChannelId, ContentType, FillStrategy, MediaFilter, MediaItemId, RecyclePolicy, SlotId,
|
||||
};
|
||||
|
||||
/// A user in the system.
|
||||
///
|
||||
/// Designed to be OIDC-ready: the `subject` field stores the OIDC subject claim
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct User {
|
||||
pub id: UserId,
|
||||
pub subject: String,
|
||||
pub email: Email,
|
||||
pub password_hash: Option<String>,
|
||||
pub created_at: DateTime<Utc>,
|
||||
}
|
||||
|
||||
impl User {
|
||||
pub fn new(subject: impl Into<String>, email: Email) -> Self {
|
||||
Self {
|
||||
id: Uuid::new_v4(),
|
||||
subject: subject.into(),
|
||||
email,
|
||||
password_hash: None,
|
||||
created_at: Utc::now(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with_id(
|
||||
id: Uuid,
|
||||
subject: impl Into<String>,
|
||||
email: Email,
|
||||
password_hash: Option<String>,
|
||||
created_at: DateTime<Utc>,
|
||||
) -> Self {
|
||||
Self {
|
||||
id,
|
||||
subject: subject.into(),
|
||||
email,
|
||||
password_hash,
|
||||
created_at,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_local(email: Email, password_hash: impl Into<String>) -> Self {
|
||||
Self {
|
||||
id: Uuid::new_v4(),
|
||||
subject: format!("local|{}", Uuid::new_v4()),
|
||||
email,
|
||||
password_hash: Some(password_hash.into()),
|
||||
created_at: Utc::now(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Channel
|
||||
// ============================================================================
|
||||
|
||||
/// A broadcast channel owned by a user.
|
||||
///
|
||||
/// Holds the user-designed `ScheduleConfig` (the template) and `RecyclePolicy`.
|
||||
/// The engine consumes these to produce a concrete `GeneratedSchedule`.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Channel {
|
||||
pub id: ChannelId,
|
||||
pub owner_id: UserId,
|
||||
pub name: String,
|
||||
pub description: Option<String>,
|
||||
/// IANA timezone string, e.g. `"America/New_York"`. All `start_time` fields
|
||||
/// inside `ScheduleConfig` are interpreted in this timezone.
|
||||
pub timezone: String,
|
||||
pub schedule_config: ScheduleConfig,
|
||||
pub recycle_policy: RecyclePolicy,
|
||||
pub created_at: DateTime<Utc>,
|
||||
pub updated_at: DateTime<Utc>,
|
||||
}
|
||||
|
||||
impl Channel {
|
||||
pub fn new(
|
||||
owner_id: UserId,
|
||||
name: impl Into<String>,
|
||||
timezone: impl Into<String>,
|
||||
) -> Self {
|
||||
let now = Utc::now();
|
||||
Self {
|
||||
id: Uuid::new_v4(),
|
||||
owner_id,
|
||||
name: name.into(),
|
||||
description: None,
|
||||
timezone: timezone.into(),
|
||||
schedule_config: ScheduleConfig::default(),
|
||||
recycle_policy: RecyclePolicy::default(),
|
||||
created_at: now,
|
||||
updated_at: now,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The user-designed programming template.
|
||||
///
|
||||
/// This is the shareable/exportable part of a channel. It contains an ordered
|
||||
/// list of `ProgrammingBlock`s but makes no assumptions about the media source.
|
||||
/// A channel does not need to cover all 24 hours — gaps are valid and render
|
||||
/// as a no-signal state on the client.
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
pub struct ScheduleConfig {
|
||||
pub blocks: Vec<ProgrammingBlock>,
|
||||
}
|
||||
|
||||
impl ScheduleConfig {
|
||||
/// Return the block whose time window contains `time`, if any.
|
||||
///
|
||||
/// Handles blocks that span midnight (e.g. start 23:00, duration 180 min).
|
||||
pub fn find_block_at(&self, time: NaiveTime) -> Option<&ProgrammingBlock> {
|
||||
let secs = time.num_seconds_from_midnight();
|
||||
self.blocks.iter().find(|block| {
|
||||
let start = block.start_time.num_seconds_from_midnight();
|
||||
let end = start + block.duration_mins * 60;
|
||||
if end <= 86_400 {
|
||||
secs >= start && secs < end
|
||||
} else {
|
||||
// Block crosses midnight: active from `start` to `end % 86400` next day
|
||||
secs >= start || secs < (end % 86_400)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Return the start time of the next block that begins strictly after `time`,
|
||||
/// within the same calendar day.
|
||||
pub fn next_block_start_after(&self, time: NaiveTime) -> Option<NaiveTime> {
|
||||
let secs = time.num_seconds_from_midnight();
|
||||
self.blocks
|
||||
.iter()
|
||||
.map(|b| b.start_time.num_seconds_from_midnight())
|
||||
.filter(|&s| s > secs)
|
||||
.min()
|
||||
.and_then(|s| NaiveTime::from_num_seconds_from_midnight_opt(s, 0))
|
||||
}
|
||||
|
||||
/// The earliest block start time across all blocks (used for next-day rollover).
|
||||
pub fn earliest_block_start(&self) -> Option<NaiveTime> {
|
||||
self.blocks.iter().map(|b| b.start_time).min()
|
||||
}
|
||||
}
|
||||
|
||||
/// A single programming rule within a `ScheduleConfig`.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ProgrammingBlock {
|
||||
pub id: BlockId,
|
||||
pub name: String,
|
||||
/// Local time of day (in the channel's timezone) when this block starts.
|
||||
pub start_time: NaiveTime,
|
||||
/// Target duration in minutes. The engine fills this window as closely as
|
||||
/// possible; remaining time at the end becomes dead air (no-signal).
|
||||
pub duration_mins: u32,
|
||||
pub content: BlockContent,
|
||||
}
|
||||
|
||||
impl ProgrammingBlock {
|
||||
pub fn new_algorithmic(
|
||||
name: impl Into<String>,
|
||||
start_time: NaiveTime,
|
||||
duration_mins: u32,
|
||||
filter: MediaFilter,
|
||||
strategy: FillStrategy,
|
||||
) -> Self {
|
||||
Self {
|
||||
id: Uuid::new_v4(),
|
||||
name: name.into(),
|
||||
start_time,
|
||||
duration_mins,
|
||||
content: BlockContent::Algorithmic { filter, strategy },
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_manual(
|
||||
name: impl Into<String>,
|
||||
start_time: NaiveTime,
|
||||
duration_mins: u32,
|
||||
items: Vec<MediaItemId>,
|
||||
) -> Self {
|
||||
Self {
|
||||
id: Uuid::new_v4(),
|
||||
name: name.into(),
|
||||
start_time,
|
||||
duration_mins,
|
||||
content: BlockContent::Manual { items },
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// How the content of a `ProgrammingBlock` is determined.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(tag = "type", rename_all = "snake_case")]
|
||||
pub enum BlockContent {
|
||||
/// The user hand-picked specific items in a specific order.
|
||||
Manual { items: Vec<MediaItemId> },
|
||||
/// The engine selects items from the provider using the given filter and strategy.
|
||||
Algorithmic {
|
||||
filter: MediaFilter,
|
||||
strategy: FillStrategy,
|
||||
},
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Media / Schedule resolution types
|
||||
// ============================================================================
|
||||
|
||||
/// A snapshot of a media item's metadata at schedule-generation time.
|
||||
///
|
||||
/// Stream URLs are intentionally absent — they are fetched on-demand from the
|
||||
/// provider at tune-in time so they stay fresh and provider-agnostic.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct MediaItem {
|
||||
pub id: MediaItemId,
|
||||
pub title: String,
|
||||
pub content_type: ContentType,
|
||||
pub duration_secs: u32,
|
||||
pub genres: Vec<String>,
|
||||
pub year: Option<u16>,
|
||||
pub tags: Vec<String>,
|
||||
}
|
||||
|
||||
/// A fully resolved 48-hour broadcast program for one channel.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct GeneratedSchedule {
|
||||
pub id: Uuid,
|
||||
pub channel_id: ChannelId,
|
||||
pub valid_from: DateTime<Utc>,
|
||||
pub valid_until: DateTime<Utc>,
|
||||
/// Monotonically increasing counter per channel, used by `RecyclePolicy`.
|
||||
pub generation: u32,
|
||||
/// Resolved slots, sorted ascending by `start_at`.
|
||||
pub slots: Vec<ScheduledSlot>,
|
||||
}
|
||||
|
||||
impl GeneratedSchedule {
|
||||
pub fn is_active_at(&self, time: DateTime<Utc>) -> bool {
|
||||
time >= self.valid_from && time < self.valid_until
|
||||
}
|
||||
}
|
||||
|
||||
/// A single resolved broadcast moment within a `GeneratedSchedule`.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ScheduledSlot {
|
||||
pub id: SlotId,
|
||||
pub start_at: DateTime<Utc>,
|
||||
pub end_at: DateTime<Utc>,
|
||||
/// Metadata snapshot captured at schedule-generation time.
|
||||
pub item: MediaItem,
|
||||
/// Which `ProgrammingBlock` rule produced this slot.
|
||||
pub source_block_id: BlockId,
|
||||
}
|
||||
|
||||
/// What is currently broadcasting on a channel — derived from `GeneratedSchedule`
|
||||
/// and the wall clock. Never stored. `None` means no block is scheduled right now
|
||||
/// (dead air / no-signal).
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CurrentBroadcast {
|
||||
pub slot: ScheduledSlot,
|
||||
/// Seconds elapsed since the start of the current item.
|
||||
pub offset_secs: u32,
|
||||
}
|
||||
|
||||
/// Records that an item was aired on a channel. Persisted to drive `RecyclePolicy`.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct PlaybackRecord {
|
||||
pub id: Uuid,
|
||||
pub channel_id: ChannelId,
|
||||
pub item_id: MediaItemId,
|
||||
pub played_at: DateTime<Utc>,
|
||||
/// The generation of the schedule that scheduled this play.
|
||||
pub generation: u32,
|
||||
}
|
||||
|
||||
impl PlaybackRecord {
|
||||
pub fn new(channel_id: ChannelId, item_id: MediaItemId, generation: u32) -> Self {
|
||||
Self {
|
||||
id: Uuid::new_v4(),
|
||||
channel_id,
|
||||
item_id,
|
||||
played_at: Utc::now(),
|
||||
generation,
|
||||
}
|
||||
}
|
||||
}
|
||||
92
k-tv-backend/domain/src/errors.rs
Normal file
92
k-tv-backend/domain/src/errors.rs
Normal file
@@ -0,0 +1,92 @@
|
||||
//! Domain errors for K-Notes
|
||||
//!
|
||||
//! Uses `thiserror` for ergonomic error definitions.
|
||||
//! These errors represent domain-level failures and will be mapped
|
||||
//! to HTTP status codes in the API layer.
|
||||
|
||||
use thiserror::Error;
|
||||
use uuid::Uuid;
|
||||
|
||||
/// Domain-level errors for K-TV operations
|
||||
#[derive(Debug, Error)]
|
||||
#[non_exhaustive]
|
||||
pub enum DomainError {
|
||||
/// The requested user was not found
|
||||
#[error("User not found: {0}")]
|
||||
UserNotFound(Uuid),
|
||||
|
||||
/// User with this email/subject already exists
|
||||
#[error("User already exists: {0}")]
|
||||
UserAlreadyExists(String),
|
||||
|
||||
/// The requested channel was not found
|
||||
#[error("Channel not found: {0}")]
|
||||
ChannelNotFound(Uuid),
|
||||
|
||||
/// No generated schedule exists and is active for the given channel and time
|
||||
#[error("No active schedule for channel: {0}")]
|
||||
NoActiveSchedule(Uuid),
|
||||
|
||||
/// A validation error occurred
|
||||
#[error("Validation error: {0}")]
|
||||
ValidationError(String),
|
||||
|
||||
/// A timezone string could not be parsed
|
||||
#[error("Invalid timezone: {0}")]
|
||||
TimezoneError(String),
|
||||
|
||||
/// User is not authenticated (maps to HTTP 401)
|
||||
#[error("Unauthenticated: {0}")]
|
||||
Unauthenticated(String),
|
||||
|
||||
/// User is not allowed to perform this action (maps to HTTP 403)
|
||||
#[error("Forbidden: {0}")]
|
||||
Forbidden(String),
|
||||
|
||||
/// A repository/infrastructure error occurred
|
||||
#[error("Repository error: {0}")]
|
||||
RepositoryError(String),
|
||||
|
||||
/// An infrastructure adapter error occurred
|
||||
#[error("Infrastructure error: {0}")]
|
||||
InfrastructureError(String),
|
||||
}
|
||||
|
||||
impl DomainError {
|
||||
/// Create a validation error
|
||||
pub fn validation(message: impl Into<String>) -> Self {
|
||||
Self::ValidationError(message.into())
|
||||
}
|
||||
|
||||
/// Create an unauthenticated error (not logged in → 401)
|
||||
pub fn unauthenticated(message: impl Into<String>) -> Self {
|
||||
Self::Unauthenticated(message.into())
|
||||
}
|
||||
|
||||
/// Create a forbidden error (not allowed → 403)
|
||||
pub fn forbidden(message: impl Into<String>) -> Self {
|
||||
Self::Forbidden(message.into())
|
||||
}
|
||||
|
||||
/// Check if this error indicates a "not found" condition
|
||||
pub fn is_not_found(&self) -> bool {
|
||||
matches!(
|
||||
self,
|
||||
DomainError::UserNotFound(_) | DomainError::ChannelNotFound(_)
|
||||
)
|
||||
}
|
||||
|
||||
/// Check if this error indicates a conflict (already exists)
|
||||
pub fn is_conflict(&self) -> bool {
|
||||
matches!(self, DomainError::UserAlreadyExists(_))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<crate::value_objects::ValidationError> for DomainError {
|
||||
fn from(error: crate::value_objects::ValidationError) -> Self {
|
||||
DomainError::ValidationError(error.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
/// Result type alias for domain operations
|
||||
pub type DomainResult<T> = Result<T, DomainError>;
|
||||
19
k-tv-backend/domain/src/lib.rs
Normal file
19
k-tv-backend/domain/src/lib.rs
Normal file
@@ -0,0 +1,19 @@
|
||||
//! Domain Logic
|
||||
//!
|
||||
//! This crate contains the core business logic, entities, and repository interfaces.
|
||||
//! It is completely independent of the infrastructure layer (databases, HTTP, etc.).
|
||||
|
||||
pub mod entities;
|
||||
pub mod errors;
|
||||
pub mod ports;
|
||||
pub mod repositories;
|
||||
pub mod services;
|
||||
pub mod value_objects;
|
||||
|
||||
// Re-export commonly used types
|
||||
pub use entities::*;
|
||||
pub use errors::{DomainError, DomainResult};
|
||||
pub use ports::IMediaProvider;
|
||||
pub use repositories::*;
|
||||
pub use services::{ChannelService, ScheduleEngineService, UserService};
|
||||
pub use value_objects::*;
|
||||
39
k-tv-backend/domain/src/ports.rs
Normal file
39
k-tv-backend/domain/src/ports.rs
Normal file
@@ -0,0 +1,39 @@
|
||||
//! Provider ports
|
||||
//!
|
||||
//! Abstract interfaces for fetching media from any source.
|
||||
//! The domain never knows whether the backing provider is Jellyfin, Plex,
|
||||
//! a local filesystem, or anything else — adapters in the infra crate implement
|
||||
//! these traits for each concrete source.
|
||||
|
||||
use async_trait::async_trait;
|
||||
|
||||
use crate::entities::{MediaItem};
|
||||
use crate::errors::DomainResult;
|
||||
use crate::value_objects::{MediaFilter, MediaItemId};
|
||||
|
||||
/// Port for reading media content from an external provider.
|
||||
///
|
||||
/// Implementations live in the infra layer. One adapter per provider type
|
||||
/// (e.g. `JellyfinMediaProvider`, `PlexMediaProvider`, `LocalFileProvider`).
|
||||
#[async_trait]
|
||||
pub trait IMediaProvider: Send + Sync {
|
||||
/// Fetch metadata for all items matching `filter` from this provider.
|
||||
///
|
||||
/// The provider interprets each field of `MediaFilter` in terms of its own
|
||||
/// API (e.g. Jellyfin libraries, Plex sections, filesystem paths).
|
||||
/// Returns an empty vec — not an error — when nothing matches.
|
||||
async fn fetch_items(&self, filter: &MediaFilter) -> DomainResult<Vec<MediaItem>>;
|
||||
|
||||
/// Fetch metadata for a single item by its opaque ID.
|
||||
///
|
||||
/// Used by the scheduler when resolving `BlockContent::Manual` blocks, where
|
||||
/// the user has hand-picked specific items. Returns `None` if the item no
|
||||
/// longer exists in the provider (deleted, unavailable, etc.).
|
||||
async fn fetch_by_id(&self, item_id: &MediaItemId) -> DomainResult<Option<MediaItem>>;
|
||||
|
||||
/// Get a playback URL for an item, called on-demand at tune-in time.
|
||||
///
|
||||
/// URLs are intentionally *not* stored in the schedule because they may be
|
||||
/// short-lived (signed URLs, session tokens) or depend on client context.
|
||||
async fn get_stream_url(&self, item_id: &MediaItemId) -> DomainResult<String>;
|
||||
}
|
||||
71
k-tv-backend/domain/src/repositories.rs
Normal file
71
k-tv-backend/domain/src/repositories.rs
Normal file
@@ -0,0 +1,71 @@
|
||||
//! Repository ports (traits)
|
||||
//!
|
||||
//! These traits define the interface for data persistence.
|
||||
//! Implementations live in the infra layer.
|
||||
|
||||
use async_trait::async_trait;
|
||||
use chrono::DateTime;
|
||||
use chrono::Utc;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::entities::{Channel, GeneratedSchedule, PlaybackRecord, User};
|
||||
use crate::errors::DomainResult;
|
||||
use crate::value_objects::{ChannelId, UserId};
|
||||
|
||||
/// Repository port for User persistence
|
||||
#[async_trait]
|
||||
pub trait UserRepository: Send + Sync {
|
||||
/// Find a user by their internal ID
|
||||
async fn find_by_id(&self, id: Uuid) -> DomainResult<Option<User>>;
|
||||
|
||||
/// Find a user by their OIDC subject (used for authentication)
|
||||
async fn find_by_subject(&self, subject: &str) -> DomainResult<Option<User>>;
|
||||
|
||||
/// Find a user by their email
|
||||
async fn find_by_email(&self, email: &str) -> DomainResult<Option<User>>;
|
||||
|
||||
/// Save a new user or update an existing one
|
||||
async fn save(&self, user: &User) -> DomainResult<()>;
|
||||
|
||||
/// Delete a user by their ID
|
||||
async fn delete(&self, id: Uuid) -> DomainResult<()>;
|
||||
}
|
||||
|
||||
/// Repository port for `Channel` persistence.
|
||||
#[async_trait]
|
||||
pub trait ChannelRepository: Send + Sync {
|
||||
async fn find_by_id(&self, id: ChannelId) -> DomainResult<Option<Channel>>;
|
||||
async fn find_by_owner(&self, owner_id: UserId) -> DomainResult<Vec<Channel>>;
|
||||
/// Insert or update a channel.
|
||||
async fn save(&self, channel: &Channel) -> DomainResult<()>;
|
||||
async fn delete(&self, id: ChannelId) -> DomainResult<()>;
|
||||
}
|
||||
|
||||
/// Repository port for `GeneratedSchedule` and `PlaybackRecord` persistence.
|
||||
#[async_trait]
|
||||
pub trait ScheduleRepository: Send + Sync {
|
||||
/// Find the schedule whose `[valid_from, valid_until)` window contains `at`.
|
||||
async fn find_active(
|
||||
&self,
|
||||
channel_id: ChannelId,
|
||||
at: DateTime<Utc>,
|
||||
) -> DomainResult<Option<GeneratedSchedule>>;
|
||||
|
||||
/// Find the most recently generated schedule for a channel.
|
||||
/// Used to derive the next generation number.
|
||||
async fn find_latest(
|
||||
&self,
|
||||
channel_id: ChannelId,
|
||||
) -> DomainResult<Option<GeneratedSchedule>>;
|
||||
|
||||
/// Insert or replace a generated schedule.
|
||||
async fn save(&self, schedule: &GeneratedSchedule) -> DomainResult<()>;
|
||||
|
||||
/// All playback records for a channel, used by the recycle policy engine.
|
||||
async fn find_playback_history(
|
||||
&self,
|
||||
channel_id: ChannelId,
|
||||
) -> DomainResult<Vec<PlaybackRecord>>;
|
||||
|
||||
async fn save_playback_record(&self, record: &PlaybackRecord) -> DomainResult<()>;
|
||||
}
|
||||
565
k-tv-backend/domain/src/services.rs
Normal file
565
k-tv-backend/domain/src/services.rs
Normal file
@@ -0,0 +1,565 @@
|
||||
//! Domain Services
|
||||
//!
|
||||
//! Services contain the business logic of the application.
|
||||
|
||||
use std::collections::HashSet;
|
||||
use std::sync::Arc;
|
||||
|
||||
use chrono::{DateTime, Duration, TimeZone, Utc};
|
||||
use chrono_tz::Tz;
|
||||
use rand::seq::SliceRandom;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::entities::{
|
||||
BlockContent, CurrentBroadcast, GeneratedSchedule, MediaItem, PlaybackRecord,
|
||||
ProgrammingBlock, ScheduledSlot,
|
||||
};
|
||||
use crate::errors::{DomainError, DomainResult};
|
||||
use crate::ports::IMediaProvider;
|
||||
use crate::repositories::{ChannelRepository, ScheduleRepository, UserRepository};
|
||||
use crate::value_objects::{
|
||||
BlockId, ChannelId, Email, FillStrategy, MediaFilter, MediaItemId, RecyclePolicy,
|
||||
};
|
||||
|
||||
// ============================================================================
|
||||
// UserService
|
||||
// ============================================================================
|
||||
|
||||
/// Service for managing users.
|
||||
pub struct UserService {
|
||||
user_repository: Arc<dyn UserRepository>,
|
||||
}
|
||||
|
||||
impl UserService {
|
||||
pub fn new(user_repository: Arc<dyn UserRepository>) -> Self {
|
||||
Self { user_repository }
|
||||
}
|
||||
|
||||
pub async fn find_or_create(&self, subject: &str, email: &str) -> DomainResult<crate::entities::User> {
|
||||
if let Some(user) = self.user_repository.find_by_subject(subject).await? {
|
||||
return Ok(user);
|
||||
}
|
||||
|
||||
if let Some(mut user) = self.user_repository.find_by_email(email).await? {
|
||||
if user.subject != subject {
|
||||
user.subject = subject.to_string();
|
||||
self.user_repository.save(&user).await?;
|
||||
}
|
||||
return Ok(user);
|
||||
}
|
||||
|
||||
let email = Email::try_from(email)?;
|
||||
let user = crate::entities::User::new(subject, email);
|
||||
self.user_repository.save(&user).await?;
|
||||
Ok(user)
|
||||
}
|
||||
|
||||
pub async fn find_by_id(&self, id: Uuid) -> DomainResult<crate::entities::User> {
|
||||
self.user_repository
|
||||
.find_by_id(id)
|
||||
.await?
|
||||
.ok_or(DomainError::UserNotFound(id))
|
||||
}
|
||||
|
||||
pub async fn find_by_email(&self, email: &str) -> DomainResult<Option<crate::entities::User>> {
|
||||
self.user_repository.find_by_email(email).await
|
||||
}
|
||||
|
||||
pub async fn create_local(
|
||||
&self,
|
||||
email: &str,
|
||||
password_hash: &str,
|
||||
) -> DomainResult<crate::entities::User> {
|
||||
let email = Email::try_from(email)?;
|
||||
let user = crate::entities::User::new_local(email, password_hash);
|
||||
self.user_repository.save(&user).await?;
|
||||
Ok(user)
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// ChannelService
|
||||
// ============================================================================
|
||||
|
||||
/// Service for managing channels (CRUD + ownership enforcement).
|
||||
pub struct ChannelService {
|
||||
channel_repo: Arc<dyn ChannelRepository>,
|
||||
}
|
||||
|
||||
impl ChannelService {
|
||||
pub fn new(channel_repo: Arc<dyn ChannelRepository>) -> Self {
|
||||
Self { channel_repo }
|
||||
}
|
||||
|
||||
pub async fn create(
|
||||
&self,
|
||||
owner_id: crate::value_objects::UserId,
|
||||
name: &str,
|
||||
timezone: &str,
|
||||
) -> DomainResult<crate::entities::Channel> {
|
||||
let channel = crate::entities::Channel::new(owner_id, name, timezone);
|
||||
self.channel_repo.save(&channel).await?;
|
||||
Ok(channel)
|
||||
}
|
||||
|
||||
pub async fn find_by_id(
|
||||
&self,
|
||||
id: ChannelId,
|
||||
) -> DomainResult<crate::entities::Channel> {
|
||||
self.channel_repo
|
||||
.find_by_id(id)
|
||||
.await?
|
||||
.ok_or(DomainError::ChannelNotFound(id))
|
||||
}
|
||||
|
||||
pub async fn find_by_owner(
|
||||
&self,
|
||||
owner_id: crate::value_objects::UserId,
|
||||
) -> DomainResult<Vec<crate::entities::Channel>> {
|
||||
self.channel_repo.find_by_owner(owner_id).await
|
||||
}
|
||||
|
||||
pub async fn update(
|
||||
&self,
|
||||
channel: crate::entities::Channel,
|
||||
) -> DomainResult<crate::entities::Channel> {
|
||||
self.channel_repo.save(&channel).await?;
|
||||
Ok(channel)
|
||||
}
|
||||
|
||||
/// Delete a channel, enforcing that `requester_id` is the owner.
|
||||
pub async fn delete(
|
||||
&self,
|
||||
id: ChannelId,
|
||||
requester_id: crate::value_objects::UserId,
|
||||
) -> DomainResult<()> {
|
||||
let channel = self.find_by_id(id).await?;
|
||||
if channel.owner_id != requester_id {
|
||||
return Err(DomainError::forbidden("You don't own this channel"));
|
||||
}
|
||||
self.channel_repo.delete(id).await
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// ScheduleEngineService
|
||||
// ============================================================================
|
||||
|
||||
/// Core scheduling engine.
|
||||
///
|
||||
/// Generates 48-hour broadcast schedules by walking through a channel's
|
||||
/// `ScheduleConfig` day by day, resolving each `ProgrammingBlock` into concrete
|
||||
/// `ScheduledSlot`s via the `IMediaProvider`, and applying the `RecyclePolicy`
|
||||
/// to avoid replaying recently aired items.
|
||||
pub struct ScheduleEngineService {
|
||||
media_provider: Arc<dyn IMediaProvider>,
|
||||
channel_repo: Arc<dyn ChannelRepository>,
|
||||
schedule_repo: Arc<dyn ScheduleRepository>,
|
||||
}
|
||||
|
||||
impl ScheduleEngineService {
|
||||
pub fn new(
|
||||
media_provider: Arc<dyn IMediaProvider>,
|
||||
channel_repo: Arc<dyn ChannelRepository>,
|
||||
schedule_repo: Arc<dyn ScheduleRepository>,
|
||||
) -> Self {
|
||||
Self {
|
||||
media_provider,
|
||||
channel_repo,
|
||||
schedule_repo,
|
||||
}
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Public API
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
/// Generate and persist a 48-hour schedule for `channel_id` starting at `from`.
|
||||
///
|
||||
/// The algorithm:
|
||||
/// 1. Walk each calendar day in the 48-hour window.
|
||||
/// 2. For each `ProgrammingBlock`, compute its UTC wall-clock interval for that day.
|
||||
/// 3. Clip the interval to `[from, from + 48h)`.
|
||||
/// 4. Resolve the block content via the media provider, applying the recycle policy.
|
||||
/// 5. Record every played item in the playback history.
|
||||
///
|
||||
/// Gaps between blocks are left empty — clients render them as a no-signal state.
|
||||
pub async fn generate_schedule(
|
||||
&self,
|
||||
channel_id: ChannelId,
|
||||
from: DateTime<Utc>,
|
||||
) -> DomainResult<GeneratedSchedule> {
|
||||
let channel = self
|
||||
.channel_repo
|
||||
.find_by_id(channel_id)
|
||||
.await?
|
||||
.ok_or(DomainError::ChannelNotFound(channel_id))?;
|
||||
|
||||
let tz: Tz = channel
|
||||
.timezone
|
||||
.parse()
|
||||
.map_err(|_| DomainError::TimezoneError(channel.timezone.clone()))?;
|
||||
|
||||
let history = self
|
||||
.schedule_repo
|
||||
.find_playback_history(channel_id)
|
||||
.await?;
|
||||
|
||||
let generation = self
|
||||
.schedule_repo
|
||||
.find_latest(channel_id)
|
||||
.await?
|
||||
.map(|s| s.generation + 1)
|
||||
.unwrap_or(1);
|
||||
|
||||
let valid_from = from;
|
||||
let valid_until = from + Duration::hours(48);
|
||||
|
||||
let start_date = from.with_timezone(&tz).date_naive();
|
||||
let end_date = valid_until.with_timezone(&tz).date_naive();
|
||||
|
||||
let mut slots: Vec<ScheduledSlot> = Vec::new();
|
||||
let mut current_date = start_date;
|
||||
|
||||
while current_date <= end_date {
|
||||
for block in &channel.schedule_config.blocks {
|
||||
let naive_start = current_date.and_time(block.start_time);
|
||||
|
||||
// `earliest()` handles DST gaps — if the local time doesn't exist
|
||||
// (e.g. clocks spring forward) we skip this block occurrence.
|
||||
let block_start_utc = match tz.from_local_datetime(&naive_start).earliest() {
|
||||
Some(dt) => dt.with_timezone(&Utc),
|
||||
None => continue,
|
||||
};
|
||||
|
||||
let block_end_utc =
|
||||
block_start_utc + Duration::minutes(block.duration_mins as i64);
|
||||
|
||||
// Clip to the 48-hour window.
|
||||
let slot_start = block_start_utc.max(valid_from);
|
||||
let slot_end = block_end_utc.min(valid_until);
|
||||
|
||||
if slot_end <= slot_start {
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut block_slots = self
|
||||
.resolve_block(
|
||||
block,
|
||||
slot_start,
|
||||
slot_end,
|
||||
&history,
|
||||
&channel.recycle_policy,
|
||||
generation,
|
||||
)
|
||||
.await?;
|
||||
|
||||
slots.append(&mut block_slots);
|
||||
}
|
||||
|
||||
current_date = current_date.succ_opt().ok_or_else(|| {
|
||||
DomainError::validation("Date overflow during schedule generation")
|
||||
})?;
|
||||
}
|
||||
|
||||
// Blocks in ScheduleConfig are not required to be sorted; sort resolved slots.
|
||||
slots.sort_by_key(|s| s.start_at);
|
||||
|
||||
let schedule = GeneratedSchedule {
|
||||
id: Uuid::new_v4(),
|
||||
channel_id,
|
||||
valid_from,
|
||||
valid_until,
|
||||
generation,
|
||||
slots,
|
||||
};
|
||||
|
||||
self.schedule_repo.save(&schedule).await?;
|
||||
|
||||
// Persist playback history so the recycle policy has data for next generation.
|
||||
for slot in &schedule.slots {
|
||||
let record =
|
||||
PlaybackRecord::new(channel_id, slot.item.id.clone(), generation);
|
||||
self.schedule_repo.save_playback_record(&record).await?;
|
||||
}
|
||||
|
||||
Ok(schedule)
|
||||
}
|
||||
|
||||
/// Determine what is currently broadcasting on a schedule.
|
||||
///
|
||||
/// Returns `None` when `now` falls in a gap between blocks — the client
|
||||
/// should display a no-signal / static screen in that case.
|
||||
pub fn get_current_broadcast(
|
||||
schedule: &GeneratedSchedule,
|
||||
now: DateTime<Utc>,
|
||||
) -> Option<CurrentBroadcast> {
|
||||
schedule
|
||||
.slots
|
||||
.iter()
|
||||
.find(|s| s.start_at <= now && now < s.end_at)
|
||||
.map(|slot| CurrentBroadcast {
|
||||
slot: slot.clone(),
|
||||
offset_secs: (now - slot.start_at).num_seconds() as u32,
|
||||
})
|
||||
}
|
||||
|
||||
/// Look up the schedule currently active at `at` without generating a new one.
|
||||
pub async fn get_active_schedule(
|
||||
&self,
|
||||
channel_id: ChannelId,
|
||||
at: DateTime<Utc>,
|
||||
) -> DomainResult<Option<GeneratedSchedule>> {
|
||||
self.schedule_repo.find_active(channel_id, at).await
|
||||
}
|
||||
|
||||
/// Delegate stream URL resolution to the configured media provider.
|
||||
pub async fn get_stream_url(&self, item_id: &MediaItemId) -> DomainResult<String> {
|
||||
self.media_provider.get_stream_url(item_id).await
|
||||
}
|
||||
|
||||
/// Return all slots that overlap the given time window — the EPG data.
|
||||
pub fn get_epg<'a>(
|
||||
schedule: &'a GeneratedSchedule,
|
||||
from: DateTime<Utc>,
|
||||
until: DateTime<Utc>,
|
||||
) -> Vec<&'a ScheduledSlot> {
|
||||
schedule
|
||||
.slots
|
||||
.iter()
|
||||
.filter(|s| s.start_at < until && s.end_at > from)
|
||||
.collect()
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Block resolution
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
async fn resolve_block(
|
||||
&self,
|
||||
block: &ProgrammingBlock,
|
||||
start: DateTime<Utc>,
|
||||
end: DateTime<Utc>,
|
||||
history: &[PlaybackRecord],
|
||||
policy: &RecyclePolicy,
|
||||
generation: u32,
|
||||
) -> DomainResult<Vec<ScheduledSlot>> {
|
||||
match &block.content {
|
||||
BlockContent::Manual { items } => {
|
||||
self.resolve_manual(items, start, end, block.id).await
|
||||
}
|
||||
BlockContent::Algorithmic { filter, strategy } => {
|
||||
self.resolve_algorithmic(
|
||||
filter, strategy, start, end, history, policy, generation, block.id,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Resolve a manual block by fetching each hand-picked item in order.
|
||||
/// Stops when the block's time budget (`end`) is exhausted.
|
||||
async fn resolve_manual(
|
||||
&self,
|
||||
item_ids: &[MediaItemId],
|
||||
start: DateTime<Utc>,
|
||||
end: DateTime<Utc>,
|
||||
block_id: BlockId,
|
||||
) -> DomainResult<Vec<ScheduledSlot>> {
|
||||
let mut slots = Vec::new();
|
||||
let mut cursor = start;
|
||||
|
||||
for item_id in item_ids {
|
||||
if cursor >= end {
|
||||
break;
|
||||
}
|
||||
if let Some(item) = self.media_provider.fetch_by_id(item_id).await? {
|
||||
let item_end =
|
||||
(cursor + Duration::seconds(item.duration_secs as i64)).min(end);
|
||||
slots.push(ScheduledSlot {
|
||||
id: Uuid::new_v4(),
|
||||
start_at: cursor,
|
||||
end_at: item_end,
|
||||
item,
|
||||
source_block_id: block_id,
|
||||
});
|
||||
cursor = item_end;
|
||||
}
|
||||
// If item is not found (deleted/unavailable), silently skip it.
|
||||
}
|
||||
|
||||
Ok(slots)
|
||||
}
|
||||
|
||||
/// Resolve an algorithmic block: fetch candidates, apply recycle policy,
|
||||
/// run the fill strategy, and build slots.
|
||||
async fn resolve_algorithmic(
|
||||
&self,
|
||||
filter: &MediaFilter,
|
||||
strategy: &FillStrategy,
|
||||
start: DateTime<Utc>,
|
||||
end: DateTime<Utc>,
|
||||
history: &[PlaybackRecord],
|
||||
policy: &RecyclePolicy,
|
||||
generation: u32,
|
||||
block_id: BlockId,
|
||||
) -> DomainResult<Vec<ScheduledSlot>> {
|
||||
let candidates = self.media_provider.fetch_items(filter).await?;
|
||||
|
||||
if candidates.is_empty() {
|
||||
return Ok(vec![]);
|
||||
}
|
||||
|
||||
let pool = Self::apply_recycle_policy(candidates, history, policy, generation);
|
||||
let target_secs = (end - start).num_seconds() as u32;
|
||||
let selected = Self::fill_block(&pool, target_secs, strategy);
|
||||
|
||||
let mut slots = Vec::new();
|
||||
let mut cursor = start;
|
||||
|
||||
for item in selected {
|
||||
if cursor >= end {
|
||||
break;
|
||||
}
|
||||
let item_end =
|
||||
(cursor + Duration::seconds(item.duration_secs as i64)).min(end);
|
||||
slots.push(ScheduledSlot {
|
||||
id: Uuid::new_v4(),
|
||||
start_at: cursor,
|
||||
end_at: item_end,
|
||||
item: item.clone(),
|
||||
source_block_id: block_id,
|
||||
});
|
||||
cursor = item_end;
|
||||
}
|
||||
|
||||
Ok(slots)
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Recycle policy
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
/// Filter `candidates` according to `policy`, returning the eligible pool.
|
||||
///
|
||||
/// An item is on cooldown if *either* the day-based or generation-based
|
||||
/// threshold is exceeded. If honouring all cooldowns would leave fewer items
|
||||
/// than `policy.min_available_ratio` of the total, all cooldowns are waived
|
||||
/// and the full pool is returned (prevents small libraries from stalling).
|
||||
fn apply_recycle_policy(
|
||||
candidates: Vec<MediaItem>,
|
||||
history: &[PlaybackRecord],
|
||||
policy: &RecyclePolicy,
|
||||
current_generation: u32,
|
||||
) -> Vec<MediaItem> {
|
||||
let now = Utc::now();
|
||||
|
||||
let excluded: HashSet<MediaItemId> = history
|
||||
.iter()
|
||||
.filter(|record| {
|
||||
let by_days = policy
|
||||
.cooldown_days
|
||||
.map(|days| (now - record.played_at).num_days() < days as i64)
|
||||
.unwrap_or(false);
|
||||
|
||||
let by_gen = policy
|
||||
.cooldown_generations
|
||||
.map(|gens| {
|
||||
current_generation.saturating_sub(record.generation) < gens
|
||||
})
|
||||
.unwrap_or(false);
|
||||
|
||||
by_days || by_gen
|
||||
})
|
||||
.map(|r| r.item_id.clone())
|
||||
.collect();
|
||||
|
||||
let available: Vec<MediaItem> = candidates
|
||||
.iter()
|
||||
.filter(|i| !excluded.contains(&i.id))
|
||||
.cloned()
|
||||
.collect();
|
||||
|
||||
let min_count =
|
||||
(candidates.len() as f32 * policy.min_available_ratio).ceil() as usize;
|
||||
|
||||
if available.len() < min_count {
|
||||
// Pool too small after applying cooldowns — recycle everything.
|
||||
candidates
|
||||
} else {
|
||||
available
|
||||
}
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Fill strategies
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
fn fill_block<'a>(
|
||||
pool: &'a [MediaItem],
|
||||
target_secs: u32,
|
||||
strategy: &FillStrategy,
|
||||
) -> Vec<&'a MediaItem> {
|
||||
match strategy {
|
||||
FillStrategy::BestFit => Self::fill_best_fit(pool, target_secs),
|
||||
FillStrategy::Sequential => Self::fill_sequential(pool, target_secs),
|
||||
FillStrategy::Random => {
|
||||
let mut indices: Vec<usize> = (0..pool.len()).collect();
|
||||
indices.shuffle(&mut rand::thread_rng());
|
||||
let mut remaining = target_secs;
|
||||
let mut result = Vec::new();
|
||||
for i in indices {
|
||||
let item = &pool[i];
|
||||
if item.duration_secs <= remaining {
|
||||
remaining -= item.duration_secs;
|
||||
result.push(item);
|
||||
}
|
||||
}
|
||||
result
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Greedy bin-packing: at each step pick the longest item that still fits
|
||||
/// in the remaining budget, without repeating items within the same block.
|
||||
fn fill_best_fit(pool: &[MediaItem], target_secs: u32) -> Vec<&MediaItem> {
|
||||
let mut remaining = target_secs;
|
||||
let mut selected: Vec<&MediaItem> = Vec::new();
|
||||
let mut used: HashSet<usize> = HashSet::new();
|
||||
|
||||
loop {
|
||||
let best = pool
|
||||
.iter()
|
||||
.enumerate()
|
||||
.filter(|(idx, item)| {
|
||||
!used.contains(idx) && item.duration_secs <= remaining
|
||||
})
|
||||
.max_by_key(|(_, item)| item.duration_secs);
|
||||
|
||||
match best {
|
||||
Some((idx, item)) => {
|
||||
remaining -= item.duration_secs;
|
||||
used.insert(idx);
|
||||
selected.push(item);
|
||||
}
|
||||
None => break,
|
||||
}
|
||||
}
|
||||
|
||||
selected
|
||||
}
|
||||
|
||||
/// Sequential: iterate the pool in order, picking items that fit within
|
||||
/// the remaining budget. Good for series where episode order matters.
|
||||
fn fill_sequential(pool: &[MediaItem], target_secs: u32) -> Vec<&MediaItem> {
|
||||
let mut remaining = target_secs;
|
||||
let mut result = Vec::new();
|
||||
for item in pool {
|
||||
if item.duration_secs <= remaining {
|
||||
remaining -= item.duration_secs;
|
||||
result.push(item);
|
||||
}
|
||||
}
|
||||
result
|
||||
}
|
||||
}
|
||||
768
k-tv-backend/domain/src/value_objects.rs
Normal file
768
k-tv-backend/domain/src/value_objects.rs
Normal file
@@ -0,0 +1,768 @@
|
||||
//! Value Objects for K-Notes Domain
|
||||
//!
|
||||
//! Newtypes that encapsulate validation logic, following the "parse, don't validate" pattern.
|
||||
//! These types can only be constructed if the input is valid, providing compile-time guarantees.
|
||||
|
||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
use std::fmt;
|
||||
use thiserror::Error;
|
||||
use url::Url;
|
||||
use uuid::Uuid;
|
||||
|
||||
pub type UserId = Uuid;
|
||||
|
||||
// ============================================================================
|
||||
// Validation Error
|
||||
// ============================================================================
|
||||
|
||||
/// Errors that occur when parsing/validating value objects
|
||||
#[derive(Debug, Error, Clone, PartialEq, Eq)]
|
||||
#[non_exhaustive]
|
||||
pub enum ValidationError {
|
||||
#[error("Invalid email format: {0}")]
|
||||
InvalidEmail(String),
|
||||
|
||||
#[error("Password must be at least {min} characters, got {actual}")]
|
||||
PasswordTooShort { min: usize, actual: usize },
|
||||
|
||||
#[error("Invalid URL: {0}")]
|
||||
InvalidUrl(String),
|
||||
|
||||
#[error("Value cannot be empty: {0}")]
|
||||
Empty(String),
|
||||
|
||||
#[error("Secret too short: minimum {min} bytes required, got {actual}")]
|
||||
SecretTooShort { min: usize, actual: usize },
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Email (using email_address crate for RFC-compliant validation)
|
||||
// ============================================================================
|
||||
|
||||
/// A validated email address using RFC-compliant validation.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
|
||||
pub struct Email(email_address::EmailAddress);
|
||||
|
||||
impl Email {
|
||||
/// Create a new validated email address
|
||||
pub fn new(value: impl AsRef<str>) -> Result<Self, ValidationError> {
|
||||
let value = value.as_ref().trim().to_lowercase();
|
||||
let addr: email_address::EmailAddress = value
|
||||
.parse()
|
||||
.map_err(|_| ValidationError::InvalidEmail(value.clone()))?;
|
||||
Ok(Self(addr))
|
||||
}
|
||||
|
||||
/// Get the inner value
|
||||
pub fn into_inner(self) -> String {
|
||||
self.0.to_string()
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<str> for Email {
|
||||
fn as_ref(&self) -> &str {
|
||||
self.0.as_ref()
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for Email {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<String> for Email {
|
||||
type Error = ValidationError;
|
||||
|
||||
fn try_from(value: String) -> Result<Self, Self::Error> {
|
||||
Self::new(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<&str> for Email {
|
||||
type Error = ValidationError;
|
||||
|
||||
fn try_from(value: &str) -> Result<Self, Self::Error> {
|
||||
Self::new(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for Email {
|
||||
fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
|
||||
serializer.serialize_str(self.0.as_ref())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for Email {
|
||||
fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
|
||||
let s = String::deserialize(deserializer)?;
|
||||
Self::new(s).map_err(serde::de::Error::custom)
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Password
|
||||
// ============================================================================
|
||||
|
||||
/// A validated password input (NOT the hash).
|
||||
///
|
||||
/// Enforces minimum length of 6 characters.
|
||||
#[derive(Clone, PartialEq, Eq)]
|
||||
pub struct Password(String);
|
||||
|
||||
/// Minimum password length (NIST recommendation)
|
||||
pub const MIN_PASSWORD_LENGTH: usize = 8;
|
||||
|
||||
impl Password {
|
||||
pub fn new(value: impl Into<String>) -> Result<Self, ValidationError> {
|
||||
let value = value.into();
|
||||
|
||||
if value.len() < MIN_PASSWORD_LENGTH {
|
||||
return Err(ValidationError::PasswordTooShort {
|
||||
min: MIN_PASSWORD_LENGTH,
|
||||
actual: value.len(),
|
||||
});
|
||||
}
|
||||
|
||||
Ok(Self(value))
|
||||
}
|
||||
|
||||
pub fn into_inner(self) -> String {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<str> for Password {
|
||||
fn as_ref(&self) -> &str {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
// Intentionally hide password content in Debug
|
||||
impl fmt::Debug for Password {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "Password(***)")
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<String> for Password {
|
||||
type Error = ValidationError;
|
||||
|
||||
fn try_from(value: String) -> Result<Self, Self::Error> {
|
||||
Self::new(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<&str> for Password {
|
||||
type Error = ValidationError;
|
||||
|
||||
fn try_from(value: &str) -> Result<Self, Self::Error> {
|
||||
Self::new(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for Password {
|
||||
fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
|
||||
let s = String::deserialize(deserializer)?;
|
||||
Self::new(s).map_err(serde::de::Error::custom)
|
||||
}
|
||||
}
|
||||
|
||||
// Note: Password should NOT implement Serialize to prevent accidental exposure
|
||||
|
||||
// ============================================================================
|
||||
// OIDC Configuration Newtypes
|
||||
// ============================================================================
|
||||
|
||||
/// OIDC Issuer URL - validated URL for the identity provider
|
||||
///
|
||||
/// Stores the original string to preserve exact formatting (e.g., trailing slashes)
|
||||
/// since OIDC providers expect issuer URLs to match exactly.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(try_from = "String", into = "String")]
|
||||
pub struct IssuerUrl(String);
|
||||
|
||||
impl IssuerUrl {
|
||||
pub fn new(value: impl AsRef<str>) -> Result<Self, ValidationError> {
|
||||
let value = value.as_ref().trim().to_string();
|
||||
// Validate URL format but store original string to preserve exact formatting
|
||||
Url::parse(&value).map_err(|e| ValidationError::InvalidUrl(e.to_string()))?;
|
||||
Ok(Self(value))
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<str> for IssuerUrl {
|
||||
fn as_ref(&self) -> &str {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for IssuerUrl {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<String> for IssuerUrl {
|
||||
type Error = ValidationError;
|
||||
fn try_from(value: String) -> Result<Self, Self::Error> {
|
||||
Self::new(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<IssuerUrl> for String {
|
||||
fn from(val: IssuerUrl) -> Self {
|
||||
val.0
|
||||
}
|
||||
}
|
||||
|
||||
/// OIDC Client Identifier
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(try_from = "String", into = "String")]
|
||||
pub struct ClientId(String);
|
||||
|
||||
impl ClientId {
|
||||
pub fn new(value: impl Into<String>) -> Result<Self, ValidationError> {
|
||||
let value = value.into().trim().to_string();
|
||||
if value.is_empty() {
|
||||
return Err(ValidationError::Empty("client_id".to_string()));
|
||||
}
|
||||
Ok(Self(value))
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<str> for ClientId {
|
||||
fn as_ref(&self) -> &str {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for ClientId {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<String> for ClientId {
|
||||
type Error = ValidationError;
|
||||
fn try_from(value: String) -> Result<Self, Self::Error> {
|
||||
Self::new(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ClientId> for String {
|
||||
fn from(val: ClientId) -> Self {
|
||||
val.0
|
||||
}
|
||||
}
|
||||
|
||||
/// OIDC Client Secret - hidden in Debug output
|
||||
#[derive(Clone, PartialEq, Eq)]
|
||||
pub struct ClientSecret(String);
|
||||
|
||||
impl ClientSecret {
|
||||
pub fn new(value: impl Into<String>) -> Self {
|
||||
Self(value.into())
|
||||
}
|
||||
|
||||
/// Check if the secret is empty (for public clients)
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.0.trim().is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<str> for ClientSecret {
|
||||
fn as_ref(&self) -> &str {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for ClientSecret {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "ClientSecret(***)")
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for ClientSecret {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "***")
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for ClientSecret {
|
||||
fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
|
||||
let s = String::deserialize(deserializer)?;
|
||||
Ok(Self::new(s))
|
||||
}
|
||||
}
|
||||
|
||||
// Note: ClientSecret should NOT implement Serialize
|
||||
|
||||
/// OAuth Redirect URL - validated URL
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(try_from = "String", into = "String")]
|
||||
pub struct RedirectUrl(Url);
|
||||
|
||||
impl RedirectUrl {
|
||||
pub fn new(value: impl AsRef<str>) -> Result<Self, ValidationError> {
|
||||
let value = value.as_ref().trim();
|
||||
let url = Url::parse(value).map_err(|e| ValidationError::InvalidUrl(e.to_string()))?;
|
||||
Ok(Self(url))
|
||||
}
|
||||
|
||||
pub fn as_url(&self) -> &Url {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<str> for RedirectUrl {
|
||||
fn as_ref(&self) -> &str {
|
||||
self.0.as_str()
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for RedirectUrl {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<String> for RedirectUrl {
|
||||
type Error = ValidationError;
|
||||
fn try_from(value: String) -> Result<Self, Self::Error> {
|
||||
Self::new(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<RedirectUrl> for String {
|
||||
fn from(val: RedirectUrl) -> Self {
|
||||
val.0.to_string()
|
||||
}
|
||||
}
|
||||
|
||||
/// OIDC Resource Identifier (optional audience)
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(try_from = "String", into = "String")]
|
||||
pub struct ResourceId(String);
|
||||
|
||||
impl ResourceId {
|
||||
pub fn new(value: impl Into<String>) -> Result<Self, ValidationError> {
|
||||
let value = value.into().trim().to_string();
|
||||
if value.is_empty() {
|
||||
return Err(ValidationError::Empty("resource_id".to_string()));
|
||||
}
|
||||
Ok(Self(value))
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<str> for ResourceId {
|
||||
fn as_ref(&self) -> &str {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for ResourceId {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<String> for ResourceId {
|
||||
type Error = ValidationError;
|
||||
fn try_from(value: String) -> Result<Self, Self::Error> {
|
||||
Self::new(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ResourceId> for String {
|
||||
fn from(val: ResourceId) -> Self {
|
||||
val.0
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// OIDC Flow Newtypes (for type-safe session storage)
|
||||
// ============================================================================
|
||||
|
||||
/// CSRF Token for OIDC state parameter
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct CsrfToken(String);
|
||||
|
||||
impl CsrfToken {
|
||||
pub fn new(value: impl Into<String>) -> Self {
|
||||
Self(value.into())
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<str> for CsrfToken {
|
||||
fn as_ref(&self) -> &str {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for CsrfToken {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
/// Nonce for OIDC ID token verification
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct OidcNonce(String);
|
||||
|
||||
impl OidcNonce {
|
||||
pub fn new(value: impl Into<String>) -> Self {
|
||||
Self(value.into())
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<str> for OidcNonce {
|
||||
fn as_ref(&self) -> &str {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for OidcNonce {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
/// PKCE Code Verifier
|
||||
#[derive(Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct PkceVerifier(String);
|
||||
|
||||
impl PkceVerifier {
|
||||
pub fn new(value: impl Into<String>) -> Self {
|
||||
Self(value.into())
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<str> for PkceVerifier {
|
||||
fn as_ref(&self) -> &str {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
// Hide PKCE verifier in Debug (security)
|
||||
impl fmt::Debug for PkceVerifier {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "PkceVerifier(***)")
|
||||
}
|
||||
}
|
||||
|
||||
/// OAuth2 Authorization Code
|
||||
#[derive(Clone, PartialEq, Eq)]
|
||||
pub struct AuthorizationCode(String);
|
||||
|
||||
impl AuthorizationCode {
|
||||
pub fn new(value: impl Into<String>) -> Self {
|
||||
Self(value.into())
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<str> for AuthorizationCode {
|
||||
fn as_ref(&self) -> &str {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
// Hide authorization code in Debug (security)
|
||||
impl fmt::Debug for AuthorizationCode {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "AuthorizationCode(***)")
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for AuthorizationCode {
|
||||
fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
|
||||
let s = String::deserialize(deserializer)?;
|
||||
Ok(Self::new(s))
|
||||
}
|
||||
}
|
||||
|
||||
/// Complete authorization URL data returned when starting OIDC flow
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct AuthorizationUrlData {
|
||||
/// The URL to redirect the user to
|
||||
pub url: Url,
|
||||
/// CSRF token to store in session
|
||||
pub csrf_token: CsrfToken,
|
||||
/// Nonce to store in session
|
||||
pub nonce: OidcNonce,
|
||||
/// PKCE verifier to store in session
|
||||
pub pkce_verifier: PkceVerifier,
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Configuration Newtypes
|
||||
// ============================================================================
|
||||
|
||||
/// JWT signing secret with minimum length requirement
|
||||
pub const MIN_JWT_SECRET_LENGTH: usize = 32;
|
||||
|
||||
#[derive(Clone, PartialEq, Eq)]
|
||||
pub struct JwtSecret(String);
|
||||
|
||||
impl JwtSecret {
|
||||
pub fn new(value: impl Into<String>, is_production: bool) -> Result<Self, ValidationError> {
|
||||
let value = value.into();
|
||||
if is_production && value.len() < MIN_JWT_SECRET_LENGTH {
|
||||
return Err(ValidationError::SecretTooShort {
|
||||
min: MIN_JWT_SECRET_LENGTH,
|
||||
actual: value.len(),
|
||||
});
|
||||
}
|
||||
Ok(Self(value))
|
||||
}
|
||||
|
||||
/// Create without validation (for development/testing)
|
||||
pub fn new_unchecked(value: impl Into<String>) -> Self {
|
||||
Self(value.into())
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<str> for JwtSecret {
|
||||
fn as_ref(&self) -> &str {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for JwtSecret {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "JwtSecret(***)")
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Channel / Schedule types
|
||||
// ============================================================================
|
||||
|
||||
pub type ChannelId = Uuid;
|
||||
pub type SlotId = Uuid;
|
||||
pub type BlockId = Uuid;
|
||||
|
||||
/// Opaque media item identifier — format is provider-specific internally.
|
||||
/// The domain never inspects the string; it just passes it back to the provider.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||
pub struct MediaItemId(String);
|
||||
|
||||
impl MediaItemId {
|
||||
pub fn new(value: impl Into<String>) -> Self {
|
||||
Self(value.into())
|
||||
}
|
||||
|
||||
pub fn into_inner(self) -> String {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<str> for MediaItemId {
|
||||
fn as_ref(&self) -> &str {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for MediaItemId {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<String> for MediaItemId {
|
||||
fn from(s: String) -> Self {
|
||||
Self(s)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&str> for MediaItemId {
|
||||
fn from(s: &str) -> Self {
|
||||
Self(s.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
/// The broad category of a media item.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum ContentType {
|
||||
Movie,
|
||||
Episode,
|
||||
Short,
|
||||
}
|
||||
|
||||
/// Provider-agnostic filter for querying media items.
|
||||
///
|
||||
/// Each field is optional — omitting it means "no constraint on this dimension".
|
||||
/// The `IMediaProvider` adapter interprets these fields in terms of its own API.
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
pub struct MediaFilter {
|
||||
pub content_type: Option<ContentType>,
|
||||
pub genres: Vec<String>,
|
||||
/// Starting year of a decade: 1990 means 1990–1999.
|
||||
pub decade: Option<u16>,
|
||||
pub tags: Vec<String>,
|
||||
pub min_duration_secs: Option<u32>,
|
||||
pub max_duration_secs: Option<u32>,
|
||||
/// Abstract groupings interpreted by each provider (Jellyfin library, Plex section,
|
||||
/// filesystem path, etc.). An empty list means "all available content".
|
||||
pub collections: Vec<String>,
|
||||
}
|
||||
|
||||
/// How the scheduling engine fills a time block with selected media items.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum FillStrategy {
|
||||
/// Greedy bin-packing: at each step pick the longest item that still fits,
|
||||
/// minimising dead air. Good for variety blocks.
|
||||
BestFit,
|
||||
/// Pick items in the order returned by the provider — ideal for series
|
||||
/// where episode sequence matters.
|
||||
Sequential,
|
||||
/// Shuffle the pool randomly then fill sequentially. Good for "shuffle play" channels.
|
||||
Random,
|
||||
}
|
||||
|
||||
/// Controls when previously aired items become eligible to play again.
|
||||
///
|
||||
/// An item is *on cooldown* if *either* threshold is met.
|
||||
/// `min_available_ratio` is a safety valve: if honouring the cooldown would
|
||||
/// leave fewer items than this fraction of the total pool, the cooldown is
|
||||
/// ignored and all items become eligible. This prevents small libraries from
|
||||
/// running completely dry.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct RecyclePolicy {
|
||||
/// Do not replay an item within this many calendar days.
|
||||
pub cooldown_days: Option<u32>,
|
||||
/// Do not replay an item within this many schedule generations.
|
||||
pub cooldown_generations: Option<u32>,
|
||||
/// Always keep at least this fraction (0.0–1.0) of the matching pool
|
||||
/// available for selection, even if their cooldown has not yet expired.
|
||||
pub min_available_ratio: f32,
|
||||
}
|
||||
|
||||
impl Default for RecyclePolicy {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
cooldown_days: Some(30),
|
||||
cooldown_generations: None,
|
||||
min_available_ratio: 0.2,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Tests
|
||||
// ============================================================================
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
mod email_tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_valid_email() {
|
||||
assert!(Email::new("user@example.com").is_ok());
|
||||
assert!(Email::new("USER@EXAMPLE.COM").is_ok()); // Should lowercase
|
||||
assert!(Email::new(" user@example.com ").is_ok()); // Should trim
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_email_normalizes() {
|
||||
let email = Email::new(" USER@EXAMPLE.COM ").unwrap();
|
||||
assert_eq!(email.as_ref(), "user@example.com");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_email_no_at() {
|
||||
assert!(Email::new("userexample.com").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_email_no_domain() {
|
||||
assert!(Email::new("user@").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_email_no_local() {
|
||||
assert!(Email::new("@example.com").is_err());
|
||||
}
|
||||
}
|
||||
|
||||
mod password_tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_valid_password() {
|
||||
assert!(Password::new("secret123").is_ok());
|
||||
assert!(Password::new("12345678").is_ok()); // Exactly 8 chars
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_password_too_short() {
|
||||
assert!(Password::new("1234567").is_err()); // 7 chars
|
||||
assert!(Password::new("").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_password_debug_hides_content() {
|
||||
let password = Password::new("supersecret").unwrap();
|
||||
let debug = format!("{:?}", password);
|
||||
assert!(!debug.contains("supersecret"));
|
||||
assert!(debug.contains("***"));
|
||||
}
|
||||
}
|
||||
|
||||
mod oidc_tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_issuer_url_valid() {
|
||||
assert!(IssuerUrl::new("https://auth.example.com").is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_issuer_url_invalid() {
|
||||
assert!(IssuerUrl::new("not-a-url").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_client_id_non_empty() {
|
||||
assert!(ClientId::new("my-client").is_ok());
|
||||
assert!(ClientId::new("").is_err());
|
||||
assert!(ClientId::new(" ").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_client_secret_hides_in_debug() {
|
||||
let secret = ClientSecret::new("super-secret");
|
||||
let debug = format!("{:?}", secret);
|
||||
assert!(!debug.contains("super-secret"));
|
||||
assert!(debug.contains("***"));
|
||||
}
|
||||
}
|
||||
|
||||
mod secret_tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_jwt_secret_production_check() {
|
||||
let short = "short";
|
||||
let long = "a".repeat(32);
|
||||
|
||||
// Production mode enforces length
|
||||
assert!(JwtSecret::new(short, true).is_err());
|
||||
assert!(JwtSecret::new(&long, true).is_ok());
|
||||
|
||||
// Development mode allows short secrets
|
||||
assert!(JwtSecret::new(short, false).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_secrets_hide_in_debug() {
|
||||
let jwt = JwtSecret::new_unchecked("secret");
|
||||
assert!(!format!("{:?}", jwt).contains("secret"));
|
||||
}
|
||||
}
|
||||
}
|
||||
48
k-tv-backend/infra/Cargo.toml
Normal file
48
k-tv-backend/infra/Cargo.toml
Normal file
@@ -0,0 +1,48 @@
|
||||
[package]
|
||||
name = "infra"
|
||||
version = "0.1.0"
|
||||
edition = "2024"
|
||||
|
||||
[features]
|
||||
default = ["sqlite", "jellyfin"]
|
||||
sqlite = ["sqlx/sqlite", "k-core/sqlite"]
|
||||
postgres = ["sqlx/postgres", "k-core/postgres"]
|
||||
broker-nats = ["dep:futures-util", "k-core/broker-nats"]
|
||||
auth-oidc = ["dep:openidconnect", "dep:url", "dep:axum-extra"]
|
||||
auth-jwt = ["dep:jsonwebtoken"]
|
||||
jellyfin = ["dep:reqwest"]
|
||||
|
||||
[dependencies]
|
||||
k-core = { git = "https://git.gabrielkaszewski.dev/GKaszewski/k-core", features = [
|
||||
"logging",
|
||||
"db-sqlx",
|
||||
] }
|
||||
domain = { path = "../domain" }
|
||||
|
||||
async-trait = "0.1.89"
|
||||
chrono = { version = "0.4.42", features = ["serde"] }
|
||||
sqlx = { version = "0.8.6", features = ["runtime-tokio", "chrono", "migrate"] }
|
||||
thiserror = "2.0.17"
|
||||
anyhow = "1.0"
|
||||
tokio = { version = "1.48.0", features = ["full"] }
|
||||
tracing = "0.1"
|
||||
uuid = { version = "1.19.0", features = ["v4", "serde"] }
|
||||
serde_json = "1.0"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
futures-core = "0.3"
|
||||
password-auth = "1.0"
|
||||
|
||||
# Optional dependencies
|
||||
async-nats = { version = "0.45", optional = true }
|
||||
futures-util = { version = "0.3", optional = true }
|
||||
openidconnect = { version = "4.0.1", optional = true }
|
||||
url = { version = "2.5.8", optional = true }
|
||||
axum-extra = { version = "0.10", features = ["cookie-private"], optional = true }
|
||||
reqwest = { version = "0.12", features = ["json"], optional = true }
|
||||
jsonwebtoken = { version = "10.2.0", features = [
|
||||
"sha2",
|
||||
"p256",
|
||||
"hmac",
|
||||
"rsa",
|
||||
"rust_crypto",
|
||||
], optional = true }
|
||||
40
k-tv-backend/infra/Cargo.toml.template
Normal file
40
k-tv-backend/infra/Cargo.toml.template
Normal file
@@ -0,0 +1,40 @@
|
||||
[package]
|
||||
name = "infra"
|
||||
version = "0.1.0"
|
||||
edition = "2024"
|
||||
|
||||
[features]
|
||||
default = ["sqlite", "auth-jwt"]
|
||||
sqlite = ["sqlx/sqlite", "k-core/sqlite"]
|
||||
postgres = ["sqlx/postgres", "k-core/postgres"]
|
||||
broker-nats = ["dep:futures-util", "k-core/broker-nats"]
|
||||
auth-oidc = ["dep:openidconnect", "dep:url", "dep:axum-extra"]
|
||||
auth-jwt = ["dep:jsonwebtoken"]
|
||||
|
||||
[dependencies]
|
||||
k-core = { git = "https://git.gabrielkaszewski.dev/GKaszewski/k-core", features = [
|
||||
"logging",
|
||||
"db-sqlx",
|
||||
] }
|
||||
domain = { path = "../domain" }
|
||||
|
||||
async-trait = "0.1.89"
|
||||
chrono = { version = "0.4.42", features = ["serde"] }
|
||||
sqlx = { version = "0.8.6", features = ["runtime-tokio", "chrono", "migrate"] }
|
||||
thiserror = "2.0.17"
|
||||
anyhow = "1.0"
|
||||
tokio = { version = "1.48.0", features = ["full"] }
|
||||
tracing = "0.1"
|
||||
uuid = { version = "1.19.0", features = ["v4", "serde"] }
|
||||
serde_json = "1.0"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
futures-core = "0.3"
|
||||
password-auth = "1.0"
|
||||
|
||||
# Optional dependencies
|
||||
async-nats = { version = "0.45", optional = true }
|
||||
futures-util = { version = "0.3", optional = true }
|
||||
openidconnect = { version = "4.0.1", optional = true }
|
||||
url = { version = "2.5.8", optional = true }
|
||||
axum-extra = { version = "0.10", features = ["cookie-private"], optional = true }
|
||||
jsonwebtoken = { version = "9.3", optional = true }
|
||||
278
k-tv-backend/infra/src/auth/jwt.rs
Normal file
278
k-tv-backend/infra/src/auth/jwt.rs
Normal file
@@ -0,0 +1,278 @@
|
||||
//! JWT Authentication Infrastructure
|
||||
//!
|
||||
//! Provides JWT token creation and validation using HS256 (secret-based).
|
||||
//! For OIDC/JWKS validation, see the `oidc` module.
|
||||
|
||||
use domain::User;
|
||||
use jsonwebtoken::{Algorithm, DecodingKey, EncodingKey, Header, Validation, decode, encode};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
/// Minimum secret length for production (256 bits = 32 bytes)
|
||||
const MIN_SECRET_LENGTH: usize = 32;
|
||||
|
||||
/// JWT configuration
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct JwtConfig {
|
||||
/// Secret key for HS256 signing/verification
|
||||
pub secret: String,
|
||||
/// Expected issuer (for validation)
|
||||
pub issuer: Option<String>,
|
||||
/// Expected audience (for validation)
|
||||
pub audience: Option<String>,
|
||||
/// Token expiry in hours (default: 24)
|
||||
pub expiry_hours: u64,
|
||||
}
|
||||
|
||||
impl JwtConfig {
|
||||
/// Create a new JWT config with validation
|
||||
///
|
||||
/// In production mode, this will reject weak secrets.
|
||||
pub fn new(
|
||||
secret: String,
|
||||
issuer: Option<String>,
|
||||
audience: Option<String>,
|
||||
expiry_hours: Option<u64>,
|
||||
is_production: bool,
|
||||
) -> Result<Self, JwtError> {
|
||||
// Validate secret strength in production
|
||||
if is_production && secret.len() < MIN_SECRET_LENGTH {
|
||||
return Err(JwtError::WeakSecret {
|
||||
min_length: MIN_SECRET_LENGTH,
|
||||
actual_length: secret.len(),
|
||||
});
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
secret,
|
||||
issuer,
|
||||
audience,
|
||||
expiry_hours: expiry_hours.unwrap_or(24),
|
||||
})
|
||||
}
|
||||
|
||||
/// Create config without validation (for testing)
|
||||
pub fn new_unchecked(secret: String) -> Self {
|
||||
Self {
|
||||
secret,
|
||||
issuer: None,
|
||||
audience: None,
|
||||
expiry_hours: 24,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// JWT claims structure
|
||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||
pub struct JwtClaims {
|
||||
/// Subject - the user's unique identifier (user ID as string)
|
||||
pub sub: String,
|
||||
/// User's email address
|
||||
pub email: String,
|
||||
/// Expiry timestamp (seconds since UNIX epoch)
|
||||
pub exp: usize,
|
||||
/// Issued at timestamp (seconds since UNIX epoch)
|
||||
pub iat: usize,
|
||||
/// Issuer
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub iss: Option<String>,
|
||||
/// Audience
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub aud: Option<String>,
|
||||
}
|
||||
|
||||
/// JWT-related errors
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum JwtError {
|
||||
#[error("JWT secret is too weak: minimum {min_length} bytes required, got {actual_length}")]
|
||||
WeakSecret {
|
||||
min_length: usize,
|
||||
actual_length: usize,
|
||||
},
|
||||
|
||||
#[error("Token creation failed: {0}")]
|
||||
CreationFailed(#[from] jsonwebtoken::errors::Error),
|
||||
|
||||
#[error("Token validation failed: {0}")]
|
||||
ValidationFailed(String),
|
||||
|
||||
#[error("Token expired")]
|
||||
Expired,
|
||||
|
||||
#[error("Invalid token format")]
|
||||
InvalidFormat,
|
||||
|
||||
#[error("Missing configuration")]
|
||||
MissingConfig,
|
||||
}
|
||||
|
||||
/// JWT token validator and generator
|
||||
#[derive(Clone)]
|
||||
pub struct JwtValidator {
|
||||
config: JwtConfig,
|
||||
encoding_key: EncodingKey,
|
||||
decoding_key: DecodingKey,
|
||||
validation: Validation,
|
||||
}
|
||||
|
||||
impl JwtValidator {
|
||||
/// Create a new JWT validator with the given configuration
|
||||
pub fn new(config: JwtConfig) -> Self {
|
||||
let encoding_key = EncodingKey::from_secret(config.secret.as_bytes());
|
||||
let decoding_key = DecodingKey::from_secret(config.secret.as_bytes());
|
||||
|
||||
let mut validation = Validation::new(Algorithm::HS256);
|
||||
|
||||
// Configure issuer validation if set
|
||||
if let Some(ref issuer) = config.issuer {
|
||||
validation.set_issuer(&[issuer]);
|
||||
}
|
||||
|
||||
// Configure audience validation if set
|
||||
if let Some(ref audience) = config.audience {
|
||||
validation.set_audience(&[audience]);
|
||||
}
|
||||
|
||||
Self {
|
||||
config,
|
||||
encoding_key,
|
||||
decoding_key,
|
||||
validation,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a JWT token for the given user
|
||||
pub fn create_token(&self, user: &User) -> Result<String, JwtError> {
|
||||
let now = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.expect("Time went backwards")
|
||||
.as_secs() as usize;
|
||||
|
||||
let expiry = now + (self.config.expiry_hours as usize * 3600);
|
||||
|
||||
let claims = JwtClaims {
|
||||
sub: user.id.to_string(),
|
||||
email: user.email.as_ref().to_string(),
|
||||
exp: expiry,
|
||||
iat: now,
|
||||
iss: self.config.issuer.clone(),
|
||||
aud: self.config.audience.clone(),
|
||||
};
|
||||
|
||||
let header = Header::new(Algorithm::HS256);
|
||||
encode(&header, &claims, &self.encoding_key).map_err(JwtError::CreationFailed)
|
||||
}
|
||||
|
||||
/// Validate a JWT token and return the claims
|
||||
pub fn validate_token(&self, token: &str) -> Result<JwtClaims, JwtError> {
|
||||
let token_data = decode::<JwtClaims>(token, &self.decoding_key, &self.validation).map_err(
|
||||
|e| match e.kind() {
|
||||
jsonwebtoken::errors::ErrorKind::ExpiredSignature => JwtError::Expired,
|
||||
jsonwebtoken::errors::ErrorKind::InvalidToken => JwtError::InvalidFormat,
|
||||
_ => JwtError::ValidationFailed(e.to_string()),
|
||||
},
|
||||
)?;
|
||||
|
||||
Ok(token_data.claims)
|
||||
}
|
||||
|
||||
/// Get the user ID (subject) from a token without full validation
|
||||
/// Useful for logging/debugging, but should not be trusted for auth
|
||||
pub fn decode_unverified(&self, token: &str) -> Result<JwtClaims, JwtError> {
|
||||
let mut validation = Validation::new(Algorithm::HS256);
|
||||
validation.insecure_disable_signature_validation();
|
||||
validation.validate_exp = false;
|
||||
|
||||
let token_data = decode::<JwtClaims>(token, &self.decoding_key, &validation)
|
||||
.map_err(|_| JwtError::InvalidFormat)?;
|
||||
|
||||
Ok(token_data.claims)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for JwtValidator {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("JwtValidator")
|
||||
.field("issuer", &self.config.issuer)
|
||||
.field("audience", &self.config.audience)
|
||||
.field("expiry_hours", &self.config.expiry_hours)
|
||||
.finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use domain::Email;
|
||||
|
||||
fn create_test_user() -> User {
|
||||
let email = Email::try_from("test@example.com").unwrap();
|
||||
User::new("test-subject", email)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_create_and_validate_token() {
|
||||
let config = JwtConfig::new_unchecked("test-secret-key-that-is-long-enough".to_string());
|
||||
let validator = JwtValidator::new(config);
|
||||
let user = create_test_user();
|
||||
|
||||
let token = validator.create_token(&user).expect("Should create token");
|
||||
let claims = validator
|
||||
.validate_token(&token)
|
||||
.expect("Should validate token");
|
||||
|
||||
assert_eq!(claims.sub, user.id.to_string());
|
||||
assert_eq!(claims.email, "test@example.com");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_weak_secret_rejected_in_production() {
|
||||
let result = JwtConfig::new(
|
||||
"short".to_string(), // Too short
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
true, // Production mode
|
||||
);
|
||||
|
||||
assert!(matches!(result, Err(JwtError::WeakSecret { .. })));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_weak_secret_allowed_in_development() {
|
||||
let result = JwtConfig::new(
|
||||
"short".to_string(), // Too short but OK in dev
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
false, // Development mode
|
||||
);
|
||||
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_token_rejected() {
|
||||
let config = JwtConfig::new_unchecked("test-secret-key-that-is-long-enough".to_string());
|
||||
let validator = JwtValidator::new(config);
|
||||
|
||||
let result = validator.validate_token("invalid.token.here");
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_wrong_secret_rejected() {
|
||||
let config1 = JwtConfig::new_unchecked("secret-one-that-is-long-enough".to_string());
|
||||
let config2 = JwtConfig::new_unchecked("secret-two-that-is-long-enough".to_string());
|
||||
|
||||
let validator1 = JwtValidator::new(config1);
|
||||
let validator2 = JwtValidator::new(config2);
|
||||
|
||||
let user = create_test_user();
|
||||
let token = validator1.create_token(&user).unwrap();
|
||||
|
||||
// Token from validator1 should fail on validator2
|
||||
let result = validator2.validate_token(&token);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
}
|
||||
19
k-tv-backend/infra/src/auth/mod.rs
Normal file
19
k-tv-backend/infra/src/auth/mod.rs
Normal file
@@ -0,0 +1,19 @@
|
||||
//! Authentication infrastructure
|
||||
//!
|
||||
//! This module contains the concrete implementation of authentication mechanisms.
|
||||
|
||||
/// Hash a password using the password-auth crate
|
||||
pub fn hash_password(password: &str) -> String {
|
||||
password_auth::generate_hash(password)
|
||||
}
|
||||
|
||||
/// Verify a password against a stored hash
|
||||
pub fn verify_password(password: &str, hash: &str) -> bool {
|
||||
password_auth::verify_password(password, hash).is_ok()
|
||||
}
|
||||
|
||||
#[cfg(feature = "auth-oidc")]
|
||||
pub mod oidc;
|
||||
|
||||
#[cfg(feature = "auth-jwt")]
|
||||
pub mod jwt;
|
||||
212
k-tv-backend/infra/src/auth/oidc.rs
Normal file
212
k-tv-backend/infra/src/auth/oidc.rs
Normal file
@@ -0,0 +1,212 @@
|
||||
use anyhow::anyhow;
|
||||
use domain::{
|
||||
AuthorizationCode, AuthorizationUrlData, ClientId, ClientSecret, CsrfToken, IssuerUrl,
|
||||
OidcNonce, PkceVerifier, RedirectUrl, ResourceId,
|
||||
};
|
||||
use openidconnect::{
|
||||
AccessTokenHash, Client, EmptyAdditionalClaims, EndpointMaybeSet, EndpointNotSet, EndpointSet,
|
||||
OAuth2TokenResponse, PkceCodeChallenge, Scope, StandardErrorResponse, TokenResponse,
|
||||
UserInfoClaims,
|
||||
core::{
|
||||
CoreAuthDisplay, CoreAuthPrompt, CoreAuthenticationFlow, CoreClient, CoreErrorResponseType,
|
||||
CoreGenderClaim, CoreJsonWebKey, CoreJweContentEncryptionAlgorithm, CoreProviderMetadata,
|
||||
CoreRevocableToken, CoreRevocationErrorResponse, CoreTokenIntrospectionResponse,
|
||||
CoreTokenResponse,
|
||||
},
|
||||
reqwest,
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
pub type OidcClient = Client<
|
||||
EmptyAdditionalClaims,
|
||||
CoreAuthDisplay,
|
||||
CoreGenderClaim,
|
||||
CoreJweContentEncryptionAlgorithm,
|
||||
CoreJsonWebKey,
|
||||
CoreAuthPrompt,
|
||||
StandardErrorResponse<CoreErrorResponseType>,
|
||||
CoreTokenResponse,
|
||||
CoreTokenIntrospectionResponse,
|
||||
CoreRevocableToken,
|
||||
CoreRevocationErrorResponse,
|
||||
EndpointSet, // HasAuthUrl (Required and guaranteed by discovery)
|
||||
EndpointNotSet, // HasDeviceAuthUrl
|
||||
EndpointNotSet, // HasIntrospectionUrl
|
||||
EndpointNotSet, // HasRevocationUrl
|
||||
EndpointMaybeSet, // HasTokenUrl (Discovered, might be missing)
|
||||
EndpointMaybeSet, // HasUserInfoUrl (Discovered, might be missing)
|
||||
>;
|
||||
|
||||
/// Serializable OIDC state stored in an encrypted cookie during the auth code flow
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct OidcState {
|
||||
pub csrf_token: CsrfToken,
|
||||
pub nonce: OidcNonce,
|
||||
pub pkce_verifier: PkceVerifier,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct OidcService {
|
||||
client: OidcClient,
|
||||
http_client: reqwest::Client,
|
||||
resource_id: Option<ResourceId>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct OidcUser {
|
||||
pub subject: String,
|
||||
pub email: String,
|
||||
}
|
||||
|
||||
impl OidcService {
|
||||
/// Create a new OIDC service with validated configuration newtypes
|
||||
pub async fn new(
|
||||
issuer: IssuerUrl,
|
||||
client_id: ClientId,
|
||||
client_secret: Option<ClientSecret>,
|
||||
redirect_url: RedirectUrl,
|
||||
resource_id: Option<ResourceId>,
|
||||
) -> anyhow::Result<Self> {
|
||||
tracing::debug!("🔵 OIDC Setup: Client ID = '{}'", client_id);
|
||||
tracing::debug!("🔵 OIDC Setup: Redirect = '{}'", redirect_url);
|
||||
tracing::debug!(
|
||||
"🔵 OIDC Setup: Secret = {:?}",
|
||||
if client_secret.is_some() { "SET" } else { "NONE" }
|
||||
);
|
||||
|
||||
let http_client = reqwest::ClientBuilder::new()
|
||||
.redirect(reqwest::redirect::Policy::none())
|
||||
.build()?;
|
||||
|
||||
let provider_metadata = CoreProviderMetadata::discover_async(
|
||||
openidconnect::IssuerUrl::new(issuer.as_ref().to_string())?,
|
||||
&http_client,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let oidc_client_id = openidconnect::ClientId::new(client_id.as_ref().to_string());
|
||||
let oidc_client_secret = client_secret
|
||||
.as_ref()
|
||||
.filter(|s| !s.is_empty())
|
||||
.map(|s| openidconnect::ClientSecret::new(s.as_ref().to_string()));
|
||||
let oidc_redirect_url =
|
||||
openidconnect::RedirectUrl::new(redirect_url.as_ref().to_string())?;
|
||||
|
||||
let client = CoreClient::from_provider_metadata(
|
||||
provider_metadata,
|
||||
oidc_client_id,
|
||||
oidc_client_secret,
|
||||
)
|
||||
.set_redirect_uri(oidc_redirect_url);
|
||||
|
||||
Ok(Self {
|
||||
client,
|
||||
http_client,
|
||||
resource_id,
|
||||
})
|
||||
}
|
||||
|
||||
/// Get the authorization URL and associated state for OIDC login.
|
||||
///
|
||||
/// Returns `(AuthorizationUrlData, OidcState)` where `OidcState` should be
|
||||
/// serialized and stored in an encrypted cookie for the duration of the flow.
|
||||
pub fn get_authorization_url(&self) -> (AuthorizationUrlData, OidcState) {
|
||||
let (pkce_challenge, pkce_verifier) = PkceCodeChallenge::new_random_sha256();
|
||||
|
||||
let (auth_url, csrf_token, nonce) = self
|
||||
.client
|
||||
.authorize_url(
|
||||
CoreAuthenticationFlow::AuthorizationCode,
|
||||
openidconnect::CsrfToken::new_random,
|
||||
openidconnect::Nonce::new_random,
|
||||
)
|
||||
.add_scope(Scope::new("profile".to_string()))
|
||||
.add_scope(Scope::new("email".to_string()))
|
||||
.set_pkce_challenge(pkce_challenge)
|
||||
.url();
|
||||
|
||||
let oidc_state = OidcState {
|
||||
csrf_token: CsrfToken::new(csrf_token.secret().to_string()),
|
||||
nonce: OidcNonce::new(nonce.secret().to_string()),
|
||||
pkce_verifier: PkceVerifier::new(pkce_verifier.secret().to_string()),
|
||||
};
|
||||
|
||||
let auth_data = AuthorizationUrlData {
|
||||
url: auth_url.into(),
|
||||
csrf_token: oidc_state.csrf_token.clone(),
|
||||
nonce: oidc_state.nonce.clone(),
|
||||
pkce_verifier: oidc_state.pkce_verifier.clone(),
|
||||
};
|
||||
|
||||
(auth_data, oidc_state)
|
||||
}
|
||||
|
||||
/// Resolve the OIDC callback with type-safe parameters
|
||||
pub async fn resolve_callback(
|
||||
&self,
|
||||
code: AuthorizationCode,
|
||||
nonce: OidcNonce,
|
||||
pkce_verifier: PkceVerifier,
|
||||
) -> anyhow::Result<OidcUser> {
|
||||
let oidc_pkce_verifier =
|
||||
openidconnect::PkceCodeVerifier::new(pkce_verifier.as_ref().to_string());
|
||||
let oidc_nonce = openidconnect::Nonce::new(nonce.as_ref().to_string());
|
||||
|
||||
let token_response = self
|
||||
.client
|
||||
.exchange_code(openidconnect::AuthorizationCode::new(
|
||||
code.as_ref().to_string(),
|
||||
))?
|
||||
.set_pkce_verifier(oidc_pkce_verifier)
|
||||
.request_async(&self.http_client)
|
||||
.await?;
|
||||
|
||||
let id_token = token_response
|
||||
.id_token()
|
||||
.ok_or_else(|| anyhow!("Server did not return an ID token"))?;
|
||||
|
||||
let mut id_token_verifier = self.client.id_token_verifier().clone();
|
||||
|
||||
if let Some(resource_id) = &self.resource_id {
|
||||
let trusted_resource_id = resource_id.as_ref().to_string();
|
||||
id_token_verifier = id_token_verifier
|
||||
.set_other_audience_verifier_fn(move |aud| aud.as_str() == trusted_resource_id);
|
||||
}
|
||||
|
||||
let claims = id_token.claims(&id_token_verifier, &oidc_nonce)?;
|
||||
|
||||
if let Some(expected_access_token_hash) = claims.access_token_hash() {
|
||||
let actual_access_token_hash = AccessTokenHash::from_token(
|
||||
token_response.access_token(),
|
||||
id_token.signing_alg()?,
|
||||
id_token.signing_key(&id_token_verifier)?,
|
||||
)?;
|
||||
|
||||
if actual_access_token_hash != *expected_access_token_hash {
|
||||
return Err(anyhow!("Invalid access token"));
|
||||
}
|
||||
}
|
||||
|
||||
let email = if let Some(email) = claims.email() {
|
||||
Some(email.as_str().to_string())
|
||||
} else {
|
||||
tracing::debug!("🔵 Email missing in ID Token, fetching UserInfo...");
|
||||
|
||||
let user_info: UserInfoClaims<EmptyAdditionalClaims, CoreGenderClaim> = self
|
||||
.client
|
||||
.user_info(token_response.access_token().clone(), None)?
|
||||
.request_async(&self.http_client)
|
||||
.await?;
|
||||
|
||||
user_info.email().map(|e| e.as_str().to_string())
|
||||
};
|
||||
|
||||
let email =
|
||||
email.ok_or_else(|| anyhow!("User has no verified email address in ZITADEL"))?;
|
||||
|
||||
Ok(OidcUser {
|
||||
subject: claims.subject().to_string(),
|
||||
email,
|
||||
})
|
||||
}
|
||||
}
|
||||
255
k-tv-backend/infra/src/channel_repository.rs
Normal file
255
k-tv-backend/infra/src/channel_repository.rs
Normal file
@@ -0,0 +1,255 @@
|
||||
//! SQLite and PostgreSQL adapters for ChannelRepository
|
||||
|
||||
use async_trait::async_trait;
|
||||
use chrono::{DateTime, Utc};
|
||||
use sqlx::FromRow;
|
||||
use uuid::Uuid;
|
||||
|
||||
use domain::{
|
||||
Channel, ChannelId, ChannelRepository, DomainError, DomainResult, RecyclePolicy,
|
||||
ScheduleConfig, UserId,
|
||||
};
|
||||
|
||||
// ============================================================================
|
||||
// Row type + mapping (shared between SQLite and Postgres)
|
||||
// ============================================================================
|
||||
|
||||
#[derive(Debug, FromRow)]
|
||||
struct ChannelRow {
|
||||
id: String,
|
||||
owner_id: String,
|
||||
name: String,
|
||||
description: Option<String>,
|
||||
timezone: String,
|
||||
schedule_config: String,
|
||||
recycle_policy: String,
|
||||
created_at: String,
|
||||
updated_at: String,
|
||||
}
|
||||
|
||||
fn parse_dt(s: &str) -> Result<DateTime<Utc>, DomainError> {
|
||||
DateTime::parse_from_rfc3339(s)
|
||||
.map(|dt| dt.with_timezone(&Utc))
|
||||
.or_else(|_| {
|
||||
chrono::NaiveDateTime::parse_from_str(s, "%Y-%m-%d %H:%M:%S").map(|dt| dt.and_utc())
|
||||
})
|
||||
.map_err(|e| DomainError::RepositoryError(format!("Invalid datetime '{}': {}", s, e)))
|
||||
}
|
||||
|
||||
impl TryFrom<ChannelRow> for Channel {
|
||||
type Error = DomainError;
|
||||
|
||||
fn try_from(row: ChannelRow) -> Result<Self, Self::Error> {
|
||||
let id: ChannelId = Uuid::parse_str(&row.id)
|
||||
.map_err(|e| DomainError::RepositoryError(format!("Invalid channel UUID: {}", e)))?;
|
||||
let owner_id: UserId = Uuid::parse_str(&row.owner_id)
|
||||
.map_err(|e| DomainError::RepositoryError(format!("Invalid owner UUID: {}", e)))?;
|
||||
let schedule_config: ScheduleConfig = serde_json::from_str(&row.schedule_config)
|
||||
.map_err(|e| {
|
||||
DomainError::RepositoryError(format!("Invalid schedule_config JSON: {}", e))
|
||||
})?;
|
||||
let recycle_policy: RecyclePolicy = serde_json::from_str(&row.recycle_policy)
|
||||
.map_err(|e| {
|
||||
DomainError::RepositoryError(format!("Invalid recycle_policy JSON: {}", e))
|
||||
})?;
|
||||
|
||||
Ok(Channel {
|
||||
id,
|
||||
owner_id,
|
||||
name: row.name,
|
||||
description: row.description,
|
||||
timezone: row.timezone,
|
||||
schedule_config,
|
||||
recycle_policy,
|
||||
created_at: parse_dt(&row.created_at)?,
|
||||
updated_at: parse_dt(&row.updated_at)?,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const SELECT_COLS: &str =
|
||||
"id, owner_id, name, description, timezone, schedule_config, recycle_policy, created_at, updated_at";
|
||||
|
||||
// ============================================================================
|
||||
// SQLite adapter
|
||||
// ============================================================================
|
||||
|
||||
#[cfg(feature = "sqlite")]
|
||||
pub struct SqliteChannelRepository {
|
||||
pool: sqlx::SqlitePool,
|
||||
}
|
||||
|
||||
#[cfg(feature = "sqlite")]
|
||||
impl SqliteChannelRepository {
|
||||
pub fn new(pool: sqlx::SqlitePool) -> Self {
|
||||
Self { pool }
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "sqlite")]
|
||||
#[async_trait]
|
||||
impl ChannelRepository for SqliteChannelRepository {
|
||||
async fn find_by_id(&self, id: ChannelId) -> DomainResult<Option<Channel>> {
|
||||
let sql = format!("SELECT {SELECT_COLS} FROM channels WHERE id = ?");
|
||||
let row: Option<ChannelRow> = sqlx::query_as(&sql)
|
||||
.bind(id.to_string())
|
||||
.fetch_optional(&self.pool)
|
||||
.await
|
||||
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
|
||||
|
||||
row.map(Channel::try_from).transpose()
|
||||
}
|
||||
|
||||
async fn find_by_owner(&self, owner_id: UserId) -> DomainResult<Vec<Channel>> {
|
||||
let sql = format!(
|
||||
"SELECT {SELECT_COLS} FROM channels WHERE owner_id = ? ORDER BY created_at ASC"
|
||||
);
|
||||
let rows: Vec<ChannelRow> = sqlx::query_as(&sql)
|
||||
.bind(owner_id.to_string())
|
||||
.fetch_all(&self.pool)
|
||||
.await
|
||||
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
|
||||
|
||||
rows.into_iter().map(Channel::try_from).collect()
|
||||
}
|
||||
|
||||
async fn save(&self, channel: &Channel) -> DomainResult<()> {
|
||||
let schedule_config = serde_json::to_string(&channel.schedule_config).map_err(|e| {
|
||||
DomainError::RepositoryError(format!("Failed to serialize schedule_config: {}", e))
|
||||
})?;
|
||||
let recycle_policy = serde_json::to_string(&channel.recycle_policy).map_err(|e| {
|
||||
DomainError::RepositoryError(format!("Failed to serialize recycle_policy: {}", e))
|
||||
})?;
|
||||
|
||||
sqlx::query(
|
||||
r#"
|
||||
INSERT INTO channels
|
||||
(id, owner_id, name, description, timezone, schedule_config, recycle_policy, created_at, updated_at)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
ON CONFLICT(id) DO UPDATE SET
|
||||
name = excluded.name,
|
||||
description = excluded.description,
|
||||
timezone = excluded.timezone,
|
||||
schedule_config = excluded.schedule_config,
|
||||
recycle_policy = excluded.recycle_policy,
|
||||
updated_at = excluded.updated_at
|
||||
"#,
|
||||
)
|
||||
.bind(channel.id.to_string())
|
||||
.bind(channel.owner_id.to_string())
|
||||
.bind(&channel.name)
|
||||
.bind(&channel.description)
|
||||
.bind(&channel.timezone)
|
||||
.bind(&schedule_config)
|
||||
.bind(&recycle_policy)
|
||||
.bind(channel.created_at.to_rfc3339())
|
||||
.bind(channel.updated_at.to_rfc3339())
|
||||
.execute(&self.pool)
|
||||
.await
|
||||
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn delete(&self, id: ChannelId) -> DomainResult<()> {
|
||||
sqlx::query("DELETE FROM channels WHERE id = ?")
|
||||
.bind(id.to_string())
|
||||
.execute(&self.pool)
|
||||
.await
|
||||
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// PostgreSQL adapter
|
||||
// ============================================================================
|
||||
|
||||
#[cfg(feature = "postgres")]
|
||||
pub struct PostgresChannelRepository {
|
||||
pool: sqlx::Pool<sqlx::Postgres>,
|
||||
}
|
||||
|
||||
#[cfg(feature = "postgres")]
|
||||
impl PostgresChannelRepository {
|
||||
pub fn new(pool: sqlx::Pool<sqlx::Postgres>) -> Self {
|
||||
Self { pool }
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "postgres")]
|
||||
#[async_trait]
|
||||
impl ChannelRepository for PostgresChannelRepository {
|
||||
async fn find_by_id(&self, id: ChannelId) -> DomainResult<Option<Channel>> {
|
||||
let sql = format!("SELECT {SELECT_COLS} FROM channels WHERE id = $1");
|
||||
let row: Option<ChannelRow> = sqlx::query_as(&sql)
|
||||
.bind(id.to_string())
|
||||
.fetch_optional(&self.pool)
|
||||
.await
|
||||
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
|
||||
|
||||
row.map(Channel::try_from).transpose()
|
||||
}
|
||||
|
||||
async fn find_by_owner(&self, owner_id: UserId) -> DomainResult<Vec<Channel>> {
|
||||
let sql = format!(
|
||||
"SELECT {SELECT_COLS} FROM channels WHERE owner_id = $1 ORDER BY created_at ASC"
|
||||
);
|
||||
let rows: Vec<ChannelRow> = sqlx::query_as(&sql)
|
||||
.bind(owner_id.to_string())
|
||||
.fetch_all(&self.pool)
|
||||
.await
|
||||
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
|
||||
|
||||
rows.into_iter().map(Channel::try_from).collect()
|
||||
}
|
||||
|
||||
async fn save(&self, channel: &Channel) -> DomainResult<()> {
|
||||
let schedule_config = serde_json::to_string(&channel.schedule_config).map_err(|e| {
|
||||
DomainError::RepositoryError(format!("Failed to serialize schedule_config: {}", e))
|
||||
})?;
|
||||
let recycle_policy = serde_json::to_string(&channel.recycle_policy).map_err(|e| {
|
||||
DomainError::RepositoryError(format!("Failed to serialize recycle_policy: {}", e))
|
||||
})?;
|
||||
|
||||
sqlx::query(
|
||||
r#"
|
||||
INSERT INTO channels
|
||||
(id, owner_id, name, description, timezone, schedule_config, recycle_policy, created_at, updated_at)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
|
||||
ON CONFLICT(id) DO UPDATE SET
|
||||
name = EXCLUDED.name,
|
||||
description = EXCLUDED.description,
|
||||
timezone = EXCLUDED.timezone,
|
||||
schedule_config = EXCLUDED.schedule_config,
|
||||
recycle_policy = EXCLUDED.recycle_policy,
|
||||
updated_at = EXCLUDED.updated_at
|
||||
"#,
|
||||
)
|
||||
.bind(channel.id.to_string())
|
||||
.bind(channel.owner_id.to_string())
|
||||
.bind(&channel.name)
|
||||
.bind(&channel.description)
|
||||
.bind(&channel.timezone)
|
||||
.bind(&schedule_config)
|
||||
.bind(&recycle_policy)
|
||||
.bind(channel.created_at.to_rfc3339())
|
||||
.bind(channel.updated_at.to_rfc3339())
|
||||
.execute(&self.pool)
|
||||
.await
|
||||
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn delete(&self, id: ChannelId) -> DomainResult<()> {
|
||||
sqlx::query("DELETE FROM channels WHERE id = $1")
|
||||
.bind(id.to_string())
|
||||
.execute(&self.pool)
|
||||
.await
|
||||
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
17
k-tv-backend/infra/src/db.rs
Normal file
17
k-tv-backend/infra/src/db.rs
Normal file
@@ -0,0 +1,17 @@
|
||||
pub use k_core::db::DatabasePool;
|
||||
|
||||
pub async fn run_migrations(pool: &DatabasePool) -> Result<(), sqlx::Error> {
|
||||
match pool {
|
||||
#[cfg(feature = "sqlite")]
|
||||
DatabasePool::Sqlite(pool) => {
|
||||
// Point specifically to the sqlite folder
|
||||
sqlx::migrate!("../migrations_sqlite").run(pool).await?;
|
||||
}
|
||||
#[cfg(feature = "postgres")]
|
||||
DatabasePool::Postgres(pool) => {
|
||||
// Point specifically to the postgres folder
|
||||
sqlx::migrate!("../migrations_postgres").run(pool).await?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
71
k-tv-backend/infra/src/factory.rs
Normal file
71
k-tv-backend/infra/src/factory.rs
Normal file
@@ -0,0 +1,71 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::db::DatabasePool;
|
||||
use domain::{ChannelRepository, ScheduleRepository, UserRepository};
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum FactoryError {
|
||||
#[error("Database error: {0}")]
|
||||
Database(#[from] sqlx::Error),
|
||||
#[error("Not implemented: {0}")]
|
||||
NotImplemented(String),
|
||||
#[error("Infrastructure error: {0}")]
|
||||
Infrastructure(#[from] domain::DomainError),
|
||||
}
|
||||
|
||||
pub type FactoryResult<T> = Result<T, FactoryError>;
|
||||
|
||||
pub async fn build_user_repository(pool: &DatabasePool) -> FactoryResult<Arc<dyn UserRepository>> {
|
||||
match pool {
|
||||
#[cfg(feature = "sqlite")]
|
||||
DatabasePool::Sqlite(pool) => Ok(Arc::new(
|
||||
crate::user_repository::SqliteUserRepository::new(pool.clone()),
|
||||
)),
|
||||
#[cfg(feature = "postgres")]
|
||||
DatabasePool::Postgres(pool) => Ok(Arc::new(
|
||||
crate::user_repository::PostgresUserRepository::new(pool.clone()),
|
||||
)),
|
||||
#[allow(unreachable_patterns)]
|
||||
_ => Err(FactoryError::NotImplemented(
|
||||
"No database feature enabled".to_string(),
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn build_channel_repository(
|
||||
pool: &DatabasePool,
|
||||
) -> FactoryResult<Arc<dyn ChannelRepository>> {
|
||||
match pool {
|
||||
#[cfg(feature = "sqlite")]
|
||||
DatabasePool::Sqlite(pool) => Ok(Arc::new(
|
||||
crate::channel_repository::SqliteChannelRepository::new(pool.clone()),
|
||||
)),
|
||||
#[cfg(feature = "postgres")]
|
||||
DatabasePool::Postgres(pool) => Ok(Arc::new(
|
||||
crate::channel_repository::PostgresChannelRepository::new(pool.clone()),
|
||||
)),
|
||||
#[allow(unreachable_patterns)]
|
||||
_ => Err(FactoryError::NotImplemented(
|
||||
"No database feature enabled".to_string(),
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn build_schedule_repository(
|
||||
pool: &DatabasePool,
|
||||
) -> FactoryResult<Arc<dyn ScheduleRepository>> {
|
||||
match pool {
|
||||
#[cfg(feature = "sqlite")]
|
||||
DatabasePool::Sqlite(pool) => Ok(Arc::new(
|
||||
crate::schedule_repository::SqliteScheduleRepository::new(pool.clone()),
|
||||
)),
|
||||
#[cfg(feature = "postgres")]
|
||||
DatabasePool::Postgres(pool) => Ok(Arc::new(
|
||||
crate::schedule_repository::PostgresScheduleRepository::new(pool.clone()),
|
||||
)),
|
||||
#[allow(unreachable_patterns)]
|
||||
_ => Err(FactoryError::NotImplemented(
|
||||
"No database feature enabled".to_string(),
|
||||
)),
|
||||
}
|
||||
}
|
||||
234
k-tv-backend/infra/src/jellyfin.rs
Normal file
234
k-tv-backend/infra/src/jellyfin.rs
Normal file
@@ -0,0 +1,234 @@
|
||||
//! Jellyfin media provider adapter
|
||||
//!
|
||||
//! Implements [`IMediaProvider`] by talking to the Jellyfin HTTP API.
|
||||
//! The domain never sees Jellyfin-specific types — this module translates
|
||||
//! between Jellyfin's API model and the domain's abstract `MediaItem`/`MediaFilter`.
|
||||
|
||||
#![cfg(feature = "jellyfin")]
|
||||
|
||||
use async_trait::async_trait;
|
||||
use serde::Deserialize;
|
||||
|
||||
use domain::{ContentType, DomainError, DomainResult, IMediaProvider, MediaFilter, MediaItem, MediaItemId};
|
||||
|
||||
/// Ticks are Jellyfin's time unit: 1 tick = 100 nanoseconds → 10,000,000 ticks/sec.
|
||||
const TICKS_PER_SEC: i64 = 10_000_000;
|
||||
|
||||
// ============================================================================
|
||||
// Configuration
|
||||
// ============================================================================
|
||||
|
||||
/// Connection details for a single Jellyfin instance.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct JellyfinConfig {
|
||||
/// e.g. `"http://192.168.1.10:8096"` — no trailing slash
|
||||
pub base_url: String,
|
||||
/// Jellyfin API key (Settings → API Keys)
|
||||
pub api_key: String,
|
||||
/// The Jellyfin user ID used for library browsing
|
||||
pub user_id: String,
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Adapter
|
||||
// ============================================================================
|
||||
|
||||
pub struct JellyfinMediaProvider {
|
||||
client: reqwest::Client,
|
||||
config: JellyfinConfig,
|
||||
}
|
||||
|
||||
impl JellyfinMediaProvider {
|
||||
pub fn new(config: JellyfinConfig) -> Self {
|
||||
Self {
|
||||
client: reqwest::Client::new(),
|
||||
config,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl IMediaProvider for JellyfinMediaProvider {
|
||||
/// Fetch items matching `filter` from the Jellyfin library.
|
||||
///
|
||||
/// `MediaFilter.collections` maps to Jellyfin `ParentId` (library/folder UUID).
|
||||
/// Multiple collections are not supported in a single call; the first entry wins.
|
||||
/// Decades are mapped to Jellyfin's `MinYear`/`MaxYear`.
|
||||
async fn fetch_items(&self, filter: &MediaFilter) -> DomainResult<Vec<MediaItem>> {
|
||||
let url = format!(
|
||||
"{}/Users/{}/Items",
|
||||
self.config.base_url, self.config.user_id
|
||||
);
|
||||
|
||||
let mut params: Vec<(&str, String)> = vec![
|
||||
("Recursive", "true".into()),
|
||||
("Fields", "Genres,Tags,RunTimeTicks,ProductionYear".into()),
|
||||
];
|
||||
|
||||
if let Some(ct) = &filter.content_type {
|
||||
params.push(("IncludeItemTypes", jellyfin_item_type(ct).into()));
|
||||
}
|
||||
|
||||
if !filter.genres.is_empty() {
|
||||
params.push(("Genres", filter.genres.join("|")));
|
||||
}
|
||||
|
||||
if let Some(decade) = filter.decade {
|
||||
params.push(("MinYear", decade.to_string()));
|
||||
params.push(("MaxYear", (decade + 9).to_string()));
|
||||
}
|
||||
|
||||
if !filter.tags.is_empty() {
|
||||
params.push(("Tags", filter.tags.join("|")));
|
||||
}
|
||||
|
||||
if let Some(min) = filter.min_duration_secs {
|
||||
params.push(("MinRunTimeTicks", (min as i64 * TICKS_PER_SEC).to_string()));
|
||||
}
|
||||
if let Some(max) = filter.max_duration_secs {
|
||||
params.push(("MaxRunTimeTicks", (max as i64 * TICKS_PER_SEC).to_string()));
|
||||
}
|
||||
|
||||
// Treat the first collection entry as a Jellyfin ParentId (library/folder)
|
||||
if let Some(parent_id) = filter.collections.first() {
|
||||
params.push(("ParentId", parent_id.clone()));
|
||||
}
|
||||
|
||||
let response = self
|
||||
.client
|
||||
.get(&url)
|
||||
.header("X-Emby-Token", &self.config.api_key)
|
||||
.query(¶ms)
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| {
|
||||
DomainError::InfrastructureError(format!("Jellyfin request failed: {}", e))
|
||||
})?;
|
||||
|
||||
if !response.status().is_success() {
|
||||
return Err(DomainError::InfrastructureError(format!(
|
||||
"Jellyfin returned HTTP {}",
|
||||
response.status()
|
||||
)));
|
||||
}
|
||||
|
||||
let body: JellyfinItemsResponse = response.json().await.map_err(|e| {
|
||||
DomainError::InfrastructureError(format!("Failed to parse Jellyfin response: {}", e))
|
||||
})?;
|
||||
|
||||
Ok(body.items.into_iter().filter_map(map_jellyfin_item).collect())
|
||||
}
|
||||
|
||||
/// Fetch a single item by its opaque ID.
|
||||
///
|
||||
/// Returns `None` if the item is not found or cannot be mapped.
|
||||
async fn fetch_by_id(&self, item_id: &MediaItemId) -> DomainResult<Option<MediaItem>> {
|
||||
let url = format!(
|
||||
"{}/Users/{}/Items",
|
||||
self.config.base_url, self.config.user_id
|
||||
);
|
||||
|
||||
let response = self
|
||||
.client
|
||||
.get(&url)
|
||||
.header("X-Emby-Token", &self.config.api_key)
|
||||
.query(&[
|
||||
("Ids", item_id.as_ref()),
|
||||
("Fields", "Genres,Tags,RunTimeTicks,ProductionYear"),
|
||||
])
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| {
|
||||
DomainError::InfrastructureError(format!("Jellyfin request failed: {}", e))
|
||||
})?;
|
||||
|
||||
if !response.status().is_success() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let body: JellyfinItemsResponse = response.json().await.map_err(|e| {
|
||||
DomainError::InfrastructureError(format!("Failed to parse Jellyfin response: {}", e))
|
||||
})?;
|
||||
|
||||
Ok(body.items.into_iter().next().and_then(map_jellyfin_item))
|
||||
}
|
||||
|
||||
/// Build a direct-play stream URL for a Jellyfin item.
|
||||
///
|
||||
/// Uses `static=true` to request the original file without transcoding.
|
||||
/// The API key is embedded in the URL so the player does not need separate auth.
|
||||
async fn get_stream_url(&self, item_id: &MediaItemId) -> DomainResult<String> {
|
||||
Ok(format!(
|
||||
"{}/Videos/{}/stream?static=true&api_key={}",
|
||||
self.config.base_url,
|
||||
item_id.as_ref(),
|
||||
self.config.api_key,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Jellyfin API response types
|
||||
// ============================================================================
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct JellyfinItemsResponse {
|
||||
#[serde(rename = "Items")]
|
||||
items: Vec<JellyfinItem>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct JellyfinItem {
|
||||
#[serde(rename = "Id")]
|
||||
id: String,
|
||||
#[serde(rename = "Name")]
|
||||
name: String,
|
||||
#[serde(rename = "Type")]
|
||||
item_type: String,
|
||||
#[serde(rename = "RunTimeTicks")]
|
||||
run_time_ticks: Option<i64>,
|
||||
#[serde(rename = "Genres")]
|
||||
genres: Option<Vec<String>>,
|
||||
#[serde(rename = "ProductionYear")]
|
||||
production_year: Option<u16>,
|
||||
#[serde(rename = "Tags")]
|
||||
tags: Option<Vec<String>>,
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Mapping helpers
|
||||
// ============================================================================
|
||||
|
||||
fn jellyfin_item_type(ct: &ContentType) -> &'static str {
|
||||
match ct {
|
||||
ContentType::Movie => "Movie",
|
||||
ContentType::Episode => "Episode",
|
||||
// Jellyfin has no native "Short" type; short films are filed as Movies
|
||||
ContentType::Short => "Movie",
|
||||
}
|
||||
}
|
||||
|
||||
/// Map a raw Jellyfin item to a domain `MediaItem`. Returns `None` for unknown
|
||||
/// item types (e.g. Season, Series, Folder) so they are silently skipped.
|
||||
fn map_jellyfin_item(item: JellyfinItem) -> Option<MediaItem> {
|
||||
let content_type = match item.item_type.as_str() {
|
||||
"Movie" => ContentType::Movie,
|
||||
"Episode" => ContentType::Episode,
|
||||
_ => return None,
|
||||
};
|
||||
|
||||
let duration_secs = item
|
||||
.run_time_ticks
|
||||
.map(|t| (t / TICKS_PER_SEC) as u32)
|
||||
.unwrap_or(0);
|
||||
|
||||
Some(MediaItem {
|
||||
id: MediaItemId::new(item.id),
|
||||
title: item.name,
|
||||
content_type,
|
||||
duration_secs,
|
||||
genres: item.genres.unwrap_or_default(),
|
||||
year: item.production_year,
|
||||
tags: item.tags.unwrap_or_default(),
|
||||
})
|
||||
}
|
||||
35
k-tv-backend/infra/src/lib.rs
Normal file
35
k-tv-backend/infra/src/lib.rs
Normal file
@@ -0,0 +1,35 @@
|
||||
//! K-TV Infrastructure Layer
|
||||
//!
|
||||
//! Concrete adapters for the repository and provider ports defined in the domain.
|
||||
//!
|
||||
//! ## Repository adapters
|
||||
//! - `SqliteUserRepository` / `PostgresUserRepository`
|
||||
//! - `SqliteChannelRepository` / `PostgresChannelRepository`
|
||||
//! - `SqliteScheduleRepository` / `PostgresScheduleRepository`
|
||||
//!
|
||||
//! ## Media provider adapters
|
||||
//! - `JellyfinMediaProvider` (feature = `"jellyfin"`)
|
||||
//!
|
||||
//! ## Database
|
||||
//! - [`db::run_migrations`] — run all pending SQLite/Postgres migrations
|
||||
|
||||
pub mod auth;
|
||||
pub mod db;
|
||||
pub mod factory;
|
||||
pub mod jellyfin;
|
||||
mod channel_repository;
|
||||
mod schedule_repository;
|
||||
mod user_repository;
|
||||
|
||||
// Re-export for convenience
|
||||
pub use db::run_migrations;
|
||||
|
||||
#[cfg(feature = "sqlite")]
|
||||
pub use user_repository::SqliteUserRepository;
|
||||
#[cfg(feature = "sqlite")]
|
||||
pub use channel_repository::SqliteChannelRepository;
|
||||
#[cfg(feature = "sqlite")]
|
||||
pub use schedule_repository::SqliteScheduleRepository;
|
||||
|
||||
#[cfg(feature = "jellyfin")]
|
||||
pub use jellyfin::{JellyfinConfig, JellyfinMediaProvider};
|
||||
447
k-tv-backend/infra/src/schedule_repository.rs
Normal file
447
k-tv-backend/infra/src/schedule_repository.rs
Normal file
@@ -0,0 +1,447 @@
|
||||
//! SQLite and PostgreSQL adapters for ScheduleRepository
|
||||
|
||||
use async_trait::async_trait;
|
||||
use chrono::{DateTime, Utc};
|
||||
use sqlx::FromRow;
|
||||
use uuid::Uuid;
|
||||
|
||||
use domain::{
|
||||
ChannelId, DomainError, DomainResult, GeneratedSchedule, MediaItem, MediaItemId,
|
||||
PlaybackRecord, ScheduleRepository, ScheduledSlot,
|
||||
};
|
||||
|
||||
// ============================================================================
|
||||
// Row types
|
||||
// ============================================================================
|
||||
|
||||
#[derive(Debug, FromRow)]
|
||||
struct ScheduleRow {
|
||||
id: String,
|
||||
channel_id: String,
|
||||
valid_from: String,
|
||||
valid_until: String,
|
||||
generation: i64,
|
||||
}
|
||||
|
||||
#[derive(Debug, FromRow)]
|
||||
struct SlotRow {
|
||||
id: String,
|
||||
// schedule_id selected but only used to drive the JOIN; not needed for domain type
|
||||
#[allow(dead_code)]
|
||||
schedule_id: String,
|
||||
start_at: String,
|
||||
end_at: String,
|
||||
item: String,
|
||||
source_block_id: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, FromRow)]
|
||||
struct PlaybackRecordRow {
|
||||
id: String,
|
||||
channel_id: String,
|
||||
item_id: String,
|
||||
played_at: String,
|
||||
generation: i64,
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Mapping
|
||||
// ============================================================================
|
||||
|
||||
fn parse_dt(s: &str) -> Result<DateTime<Utc>, DomainError> {
|
||||
DateTime::parse_from_rfc3339(s)
|
||||
.map(|dt| dt.with_timezone(&Utc))
|
||||
.or_else(|_| {
|
||||
chrono::NaiveDateTime::parse_from_str(s, "%Y-%m-%d %H:%M:%S").map(|dt| dt.and_utc())
|
||||
})
|
||||
.map_err(|e| DomainError::RepositoryError(format!("Invalid datetime '{}': {}", s, e)))
|
||||
}
|
||||
|
||||
fn map_slot_row(row: SlotRow) -> Result<ScheduledSlot, DomainError> {
|
||||
let id = Uuid::parse_str(&row.id)
|
||||
.map_err(|e| DomainError::RepositoryError(format!("Invalid slot UUID: {}", e)))?;
|
||||
let source_block_id = Uuid::parse_str(&row.source_block_id)
|
||||
.map_err(|e| DomainError::RepositoryError(format!("Invalid block UUID: {}", e)))?;
|
||||
let item: MediaItem = serde_json::from_str(&row.item)
|
||||
.map_err(|e| DomainError::RepositoryError(format!("Invalid slot item JSON: {}", e)))?;
|
||||
|
||||
Ok(ScheduledSlot {
|
||||
id,
|
||||
start_at: parse_dt(&row.start_at)?,
|
||||
end_at: parse_dt(&row.end_at)?,
|
||||
item,
|
||||
source_block_id,
|
||||
})
|
||||
}
|
||||
|
||||
fn map_schedule(row: ScheduleRow, slot_rows: Vec<SlotRow>) -> Result<GeneratedSchedule, DomainError> {
|
||||
let id = Uuid::parse_str(&row.id)
|
||||
.map_err(|e| DomainError::RepositoryError(format!("Invalid schedule UUID: {}", e)))?;
|
||||
let channel_id = Uuid::parse_str(&row.channel_id)
|
||||
.map_err(|e| DomainError::RepositoryError(format!("Invalid channel UUID: {}", e)))?;
|
||||
|
||||
let slots: Result<Vec<ScheduledSlot>, _> = slot_rows.into_iter().map(map_slot_row).collect();
|
||||
|
||||
Ok(GeneratedSchedule {
|
||||
id,
|
||||
channel_id,
|
||||
valid_from: parse_dt(&row.valid_from)?,
|
||||
valid_until: parse_dt(&row.valid_until)?,
|
||||
generation: row.generation as u32,
|
||||
slots: slots?,
|
||||
})
|
||||
}
|
||||
|
||||
impl TryFrom<PlaybackRecordRow> for PlaybackRecord {
|
||||
type Error = DomainError;
|
||||
|
||||
fn try_from(row: PlaybackRecordRow) -> Result<Self, Self::Error> {
|
||||
let id = Uuid::parse_str(&row.id)
|
||||
.map_err(|e| DomainError::RepositoryError(format!("Invalid UUID: {}", e)))?;
|
||||
let channel_id = Uuid::parse_str(&row.channel_id)
|
||||
.map_err(|e| DomainError::RepositoryError(format!("Invalid channel UUID: {}", e)))?;
|
||||
|
||||
Ok(PlaybackRecord {
|
||||
id,
|
||||
channel_id,
|
||||
item_id: MediaItemId::new(row.item_id),
|
||||
played_at: parse_dt(&row.played_at)?,
|
||||
generation: row.generation as u32,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// SQLite adapter
|
||||
// ============================================================================
|
||||
|
||||
#[cfg(feature = "sqlite")]
|
||||
pub struct SqliteScheduleRepository {
|
||||
pool: sqlx::SqlitePool,
|
||||
}
|
||||
|
||||
#[cfg(feature = "sqlite")]
|
||||
impl SqliteScheduleRepository {
|
||||
pub fn new(pool: sqlx::SqlitePool) -> Self {
|
||||
Self { pool }
|
||||
}
|
||||
|
||||
async fn fetch_slots(&self, schedule_id: &str) -> DomainResult<Vec<SlotRow>> {
|
||||
sqlx::query_as(
|
||||
"SELECT id, schedule_id, start_at, end_at, item, source_block_id \
|
||||
FROM scheduled_slots WHERE schedule_id = ? ORDER BY start_at",
|
||||
)
|
||||
.bind(schedule_id)
|
||||
.fetch_all(&self.pool)
|
||||
.await
|
||||
.map_err(|e| DomainError::RepositoryError(e.to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "sqlite")]
|
||||
#[async_trait]
|
||||
impl ScheduleRepository for SqliteScheduleRepository {
|
||||
async fn find_active(
|
||||
&self,
|
||||
channel_id: ChannelId,
|
||||
at: DateTime<Utc>,
|
||||
) -> DomainResult<Option<GeneratedSchedule>> {
|
||||
let at_str = at.to_rfc3339();
|
||||
let row: Option<ScheduleRow> = sqlx::query_as(
|
||||
"SELECT id, channel_id, valid_from, valid_until, generation \
|
||||
FROM generated_schedules \
|
||||
WHERE channel_id = ? AND valid_from <= ? AND valid_until > ? \
|
||||
LIMIT 1",
|
||||
)
|
||||
.bind(channel_id.to_string())
|
||||
.bind(&at_str)
|
||||
.bind(&at_str)
|
||||
.fetch_optional(&self.pool)
|
||||
.await
|
||||
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
|
||||
|
||||
match row {
|
||||
None => Ok(None),
|
||||
Some(r) => {
|
||||
let slots = self.fetch_slots(&r.id).await?;
|
||||
Some(map_schedule(r, slots)).transpose()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn find_latest(&self, channel_id: ChannelId) -> DomainResult<Option<GeneratedSchedule>> {
|
||||
let row: Option<ScheduleRow> = sqlx::query_as(
|
||||
"SELECT id, channel_id, valid_from, valid_until, generation \
|
||||
FROM generated_schedules \
|
||||
WHERE channel_id = ? ORDER BY valid_from DESC LIMIT 1",
|
||||
)
|
||||
.bind(channel_id.to_string())
|
||||
.fetch_optional(&self.pool)
|
||||
.await
|
||||
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
|
||||
|
||||
match row {
|
||||
None => Ok(None),
|
||||
Some(r) => {
|
||||
let slots = self.fetch_slots(&r.id).await?;
|
||||
Some(map_schedule(r, slots)).transpose()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn save(&self, schedule: &GeneratedSchedule) -> DomainResult<()> {
|
||||
// Upsert the schedule header
|
||||
sqlx::query(
|
||||
r#"
|
||||
INSERT INTO generated_schedules (id, channel_id, valid_from, valid_until, generation)
|
||||
VALUES (?, ?, ?, ?, ?)
|
||||
ON CONFLICT(id) DO UPDATE SET
|
||||
valid_from = excluded.valid_from,
|
||||
valid_until = excluded.valid_until,
|
||||
generation = excluded.generation
|
||||
"#,
|
||||
)
|
||||
.bind(schedule.id.to_string())
|
||||
.bind(schedule.channel_id.to_string())
|
||||
.bind(schedule.valid_from.to_rfc3339())
|
||||
.bind(schedule.valid_until.to_rfc3339())
|
||||
.bind(schedule.generation as i64)
|
||||
.execute(&self.pool)
|
||||
.await
|
||||
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
|
||||
|
||||
// Replace all slots (delete-then-insert is safe here; schedule saves are
|
||||
// infrequent and atomic within a single-writer SQLite connection)
|
||||
sqlx::query("DELETE FROM scheduled_slots WHERE schedule_id = ?")
|
||||
.bind(schedule.id.to_string())
|
||||
.execute(&self.pool)
|
||||
.await
|
||||
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
|
||||
|
||||
for slot in &schedule.slots {
|
||||
let item_json = serde_json::to_string(&slot.item).map_err(|e| {
|
||||
DomainError::RepositoryError(format!("Failed to serialize slot item: {}", e))
|
||||
})?;
|
||||
|
||||
sqlx::query(
|
||||
r#"
|
||||
INSERT INTO scheduled_slots (id, schedule_id, start_at, end_at, item, source_block_id)
|
||||
VALUES (?, ?, ?, ?, ?, ?)
|
||||
"#,
|
||||
)
|
||||
.bind(slot.id.to_string())
|
||||
.bind(schedule.id.to_string())
|
||||
.bind(slot.start_at.to_rfc3339())
|
||||
.bind(slot.end_at.to_rfc3339())
|
||||
.bind(&item_json)
|
||||
.bind(slot.source_block_id.to_string())
|
||||
.execute(&self.pool)
|
||||
.await
|
||||
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn find_playback_history(
|
||||
&self,
|
||||
channel_id: ChannelId,
|
||||
) -> DomainResult<Vec<PlaybackRecord>> {
|
||||
let rows: Vec<PlaybackRecordRow> = sqlx::query_as(
|
||||
"SELECT id, channel_id, item_id, played_at, generation \
|
||||
FROM playback_records WHERE channel_id = ? ORDER BY played_at DESC",
|
||||
)
|
||||
.bind(channel_id.to_string())
|
||||
.fetch_all(&self.pool)
|
||||
.await
|
||||
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
|
||||
|
||||
rows.into_iter().map(PlaybackRecord::try_from).collect()
|
||||
}
|
||||
|
||||
async fn save_playback_record(&self, record: &PlaybackRecord) -> DomainResult<()> {
|
||||
sqlx::query(
|
||||
r#"
|
||||
INSERT INTO playback_records (id, channel_id, item_id, played_at, generation)
|
||||
VALUES (?, ?, ?, ?, ?)
|
||||
ON CONFLICT(id) DO NOTHING
|
||||
"#,
|
||||
)
|
||||
.bind(record.id.to_string())
|
||||
.bind(record.channel_id.to_string())
|
||||
.bind(record.item_id.as_ref())
|
||||
.bind(record.played_at.to_rfc3339())
|
||||
.bind(record.generation as i64)
|
||||
.execute(&self.pool)
|
||||
.await
|
||||
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// PostgreSQL adapter
|
||||
// ============================================================================
|
||||
|
||||
#[cfg(feature = "postgres")]
|
||||
pub struct PostgresScheduleRepository {
|
||||
pool: sqlx::Pool<sqlx::Postgres>,
|
||||
}
|
||||
|
||||
#[cfg(feature = "postgres")]
|
||||
impl PostgresScheduleRepository {
|
||||
pub fn new(pool: sqlx::Pool<sqlx::Postgres>) -> Self {
|
||||
Self { pool }
|
||||
}
|
||||
|
||||
async fn fetch_slots(&self, schedule_id: &str) -> DomainResult<Vec<SlotRow>> {
|
||||
sqlx::query_as(
|
||||
"SELECT id, schedule_id, start_at, end_at, item, source_block_id \
|
||||
FROM scheduled_slots WHERE schedule_id = $1 ORDER BY start_at",
|
||||
)
|
||||
.bind(schedule_id)
|
||||
.fetch_all(&self.pool)
|
||||
.await
|
||||
.map_err(|e| DomainError::RepositoryError(e.to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "postgres")]
|
||||
#[async_trait]
|
||||
impl ScheduleRepository for PostgresScheduleRepository {
|
||||
async fn find_active(
|
||||
&self,
|
||||
channel_id: ChannelId,
|
||||
at: DateTime<Utc>,
|
||||
) -> DomainResult<Option<GeneratedSchedule>> {
|
||||
let at_str = at.to_rfc3339();
|
||||
let row: Option<ScheduleRow> = sqlx::query_as(
|
||||
"SELECT id, channel_id, valid_from, valid_until, generation \
|
||||
FROM generated_schedules \
|
||||
WHERE channel_id = $1 AND valid_from <= $2 AND valid_until > $3 \
|
||||
LIMIT 1",
|
||||
)
|
||||
.bind(channel_id.to_string())
|
||||
.bind(&at_str)
|
||||
.bind(&at_str)
|
||||
.fetch_optional(&self.pool)
|
||||
.await
|
||||
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
|
||||
|
||||
match row {
|
||||
None => Ok(None),
|
||||
Some(r) => {
|
||||
let slots = self.fetch_slots(&r.id).await?;
|
||||
Some(map_schedule(r, slots)).transpose()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn find_latest(&self, channel_id: ChannelId) -> DomainResult<Option<GeneratedSchedule>> {
|
||||
let row: Option<ScheduleRow> = sqlx::query_as(
|
||||
"SELECT id, channel_id, valid_from, valid_until, generation \
|
||||
FROM generated_schedules \
|
||||
WHERE channel_id = $1 ORDER BY valid_from DESC LIMIT 1",
|
||||
)
|
||||
.bind(channel_id.to_string())
|
||||
.fetch_optional(&self.pool)
|
||||
.await
|
||||
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
|
||||
|
||||
match row {
|
||||
None => Ok(None),
|
||||
Some(r) => {
|
||||
let slots = self.fetch_slots(&r.id).await?;
|
||||
Some(map_schedule(r, slots)).transpose()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn save(&self, schedule: &GeneratedSchedule) -> DomainResult<()> {
|
||||
sqlx::query(
|
||||
r#"
|
||||
INSERT INTO generated_schedules (id, channel_id, valid_from, valid_until, generation)
|
||||
VALUES ($1, $2, $3, $4, $5)
|
||||
ON CONFLICT(id) DO UPDATE SET
|
||||
valid_from = EXCLUDED.valid_from,
|
||||
valid_until = EXCLUDED.valid_until,
|
||||
generation = EXCLUDED.generation
|
||||
"#,
|
||||
)
|
||||
.bind(schedule.id.to_string())
|
||||
.bind(schedule.channel_id.to_string())
|
||||
.bind(schedule.valid_from.to_rfc3339())
|
||||
.bind(schedule.valid_until.to_rfc3339())
|
||||
.bind(schedule.generation as i64)
|
||||
.execute(&self.pool)
|
||||
.await
|
||||
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
|
||||
|
||||
sqlx::query("DELETE FROM scheduled_slots WHERE schedule_id = $1")
|
||||
.bind(schedule.id.to_string())
|
||||
.execute(&self.pool)
|
||||
.await
|
||||
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
|
||||
|
||||
for slot in &schedule.slots {
|
||||
let item_json = serde_json::to_string(&slot.item).map_err(|e| {
|
||||
DomainError::RepositoryError(format!("Failed to serialize slot item: {}", e))
|
||||
})?;
|
||||
|
||||
sqlx::query(
|
||||
r#"
|
||||
INSERT INTO scheduled_slots (id, schedule_id, start_at, end_at, item, source_block_id)
|
||||
VALUES ($1, $2, $3, $4, $5, $6)
|
||||
"#,
|
||||
)
|
||||
.bind(slot.id.to_string())
|
||||
.bind(schedule.id.to_string())
|
||||
.bind(slot.start_at.to_rfc3339())
|
||||
.bind(slot.end_at.to_rfc3339())
|
||||
.bind(&item_json)
|
||||
.bind(slot.source_block_id.to_string())
|
||||
.execute(&self.pool)
|
||||
.await
|
||||
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn find_playback_history(
|
||||
&self,
|
||||
channel_id: ChannelId,
|
||||
) -> DomainResult<Vec<PlaybackRecord>> {
|
||||
let rows: Vec<PlaybackRecordRow> = sqlx::query_as(
|
||||
"SELECT id, channel_id, item_id, played_at, generation \
|
||||
FROM playback_records WHERE channel_id = $1 ORDER BY played_at DESC",
|
||||
)
|
||||
.bind(channel_id.to_string())
|
||||
.fetch_all(&self.pool)
|
||||
.await
|
||||
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
|
||||
|
||||
rows.into_iter().map(PlaybackRecord::try_from).collect()
|
||||
}
|
||||
|
||||
async fn save_playback_record(&self, record: &PlaybackRecord) -> DomainResult<()> {
|
||||
sqlx::query(
|
||||
r#"
|
||||
INSERT INTO playback_records (id, channel_id, item_id, played_at, generation)
|
||||
VALUES ($1, $2, $3, $4, $5)
|
||||
ON CONFLICT(id) DO NOTHING
|
||||
"#,
|
||||
)
|
||||
.bind(record.id.to_string())
|
||||
.bind(record.channel_id.to_string())
|
||||
.bind(record.item_id.as_ref())
|
||||
.bind(record.played_at.to_rfc3339())
|
||||
.bind(record.generation as i64)
|
||||
.execute(&self.pool)
|
||||
.await
|
||||
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
324
k-tv-backend/infra/src/user_repository.rs
Normal file
324
k-tv-backend/infra/src/user_repository.rs
Normal file
@@ -0,0 +1,324 @@
|
||||
//! SQLite and PostgreSQL implementations of UserRepository
|
||||
|
||||
use async_trait::async_trait;
|
||||
use chrono::{DateTime, Utc};
|
||||
use sqlx::FromRow;
|
||||
use uuid::Uuid;
|
||||
|
||||
use domain::{DomainError, DomainResult, Email, User, UserRepository};
|
||||
|
||||
/// Row type for database query results (shared between SQLite and PostgreSQL)
|
||||
#[derive(Debug, FromRow)]
|
||||
struct UserRow {
|
||||
id: String,
|
||||
subject: String,
|
||||
email: String,
|
||||
password_hash: Option<String>,
|
||||
created_at: String,
|
||||
}
|
||||
|
||||
impl TryFrom<UserRow> for User {
|
||||
type Error = DomainError;
|
||||
|
||||
fn try_from(row: UserRow) -> Result<Self, Self::Error> {
|
||||
let id = Uuid::parse_str(&row.id)
|
||||
.map_err(|e| DomainError::RepositoryError(format!("Invalid UUID: {}", e)))?;
|
||||
let created_at = DateTime::parse_from_rfc3339(&row.created_at)
|
||||
.map(|dt| dt.with_timezone(&Utc))
|
||||
.or_else(|_| {
|
||||
// Fallback for SQLite datetime format
|
||||
chrono::NaiveDateTime::parse_from_str(&row.created_at, "%Y-%m-%d %H:%M:%S")
|
||||
.map(|dt| dt.and_utc())
|
||||
})
|
||||
.map_err(|e| DomainError::RepositoryError(format!("Invalid datetime: {}", e)))?;
|
||||
|
||||
let email = Email::try_from(row.email)
|
||||
.map_err(|e| DomainError::RepositoryError(format!("Invalid email in DB: {}", e)))?;
|
||||
|
||||
Ok(User::with_id(
|
||||
id,
|
||||
row.subject,
|
||||
email,
|
||||
row.password_hash,
|
||||
created_at,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// SQLite adapter for UserRepository
|
||||
#[cfg(feature = "sqlite")]
|
||||
#[derive(Clone)]
|
||||
pub struct SqliteUserRepository {
|
||||
pool: sqlx::SqlitePool,
|
||||
}
|
||||
|
||||
#[cfg(feature = "sqlite")]
|
||||
impl SqliteUserRepository {
|
||||
pub fn new(pool: sqlx::SqlitePool) -> Self {
|
||||
Self { pool }
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "sqlite")]
|
||||
#[async_trait]
|
||||
impl UserRepository for SqliteUserRepository {
|
||||
async fn find_by_id(&self, id: Uuid) -> DomainResult<Option<User>> {
|
||||
let id_str = id.to_string();
|
||||
let row: Option<UserRow> = sqlx::query_as(
|
||||
"SELECT id, subject, email, password_hash, created_at FROM users WHERE id = ?",
|
||||
)
|
||||
.bind(&id_str)
|
||||
.fetch_optional(&self.pool)
|
||||
.await
|
||||
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
|
||||
|
||||
row.map(User::try_from).transpose()
|
||||
}
|
||||
|
||||
async fn find_by_subject(&self, subject: &str) -> DomainResult<Option<User>> {
|
||||
let row: Option<UserRow> = sqlx::query_as(
|
||||
"SELECT id, subject, email, password_hash, created_at FROM users WHERE subject = ?",
|
||||
)
|
||||
.bind(subject)
|
||||
.fetch_optional(&self.pool)
|
||||
.await
|
||||
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
|
||||
|
||||
row.map(User::try_from).transpose()
|
||||
}
|
||||
|
||||
async fn find_by_email(&self, email: &str) -> DomainResult<Option<User>> {
|
||||
let row: Option<UserRow> = sqlx::query_as(
|
||||
"SELECT id, subject, email, password_hash, created_at FROM users WHERE email = ?",
|
||||
)
|
||||
.bind(email)
|
||||
.fetch_optional(&self.pool)
|
||||
.await
|
||||
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
|
||||
|
||||
row.map(User::try_from).transpose()
|
||||
}
|
||||
|
||||
async fn save(&self, user: &User) -> DomainResult<()> {
|
||||
let id = user.id.to_string();
|
||||
let created_at = user.created_at.to_rfc3339();
|
||||
|
||||
sqlx::query(
|
||||
r#"
|
||||
INSERT INTO users (id, subject, email, password_hash, created_at)
|
||||
VALUES (?, ?, ?, ?, ?)
|
||||
ON CONFLICT(id) DO UPDATE SET
|
||||
subject = excluded.subject,
|
||||
email = excluded.email,
|
||||
password_hash = excluded.password_hash
|
||||
"#,
|
||||
)
|
||||
.bind(&id)
|
||||
.bind(&user.subject)
|
||||
.bind(user.email.as_ref())
|
||||
.bind(&user.password_hash)
|
||||
.bind(&created_at)
|
||||
.execute(&self.pool)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
// Surface UNIQUE constraint violations as domain-level conflicts
|
||||
let msg = e.to_string();
|
||||
if msg.contains("UNIQUE constraint failed") || msg.contains("unique constraint") {
|
||||
DomainError::UserAlreadyExists(user.email.as_ref().to_string())
|
||||
} else {
|
||||
DomainError::RepositoryError(msg)
|
||||
}
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn delete(&self, id: Uuid) -> DomainResult<()> {
|
||||
let id_str = id.to_string();
|
||||
sqlx::query("DELETE FROM users WHERE id = ?")
|
||||
.bind(&id_str)
|
||||
.execute(&self.pool)
|
||||
.await
|
||||
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(test, feature = "sqlite"))]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::db::run_migrations;
|
||||
use k_core::db::{DatabaseConfig, DatabasePool, connect};
|
||||
|
||||
async fn setup_test_db() -> sqlx::SqlitePool {
|
||||
let config = DatabaseConfig::default();
|
||||
let db_pool = connect(&config).await.expect("Failed to create pool");
|
||||
|
||||
run_migrations(&db_pool).await.unwrap();
|
||||
|
||||
match db_pool {
|
||||
DatabasePool::Sqlite(pool) => pool,
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_save_and_find_user() {
|
||||
let pool = setup_test_db().await;
|
||||
let repo = SqliteUserRepository::new(pool);
|
||||
|
||||
let email = Email::try_from("test@example.com").unwrap();
|
||||
let user = User::new("oidc|123", email);
|
||||
repo.save(&user).await.unwrap();
|
||||
|
||||
let found = repo.find_by_id(user.id).await.unwrap();
|
||||
assert!(found.is_some());
|
||||
let found = found.unwrap();
|
||||
assert_eq!(found.subject, "oidc|123");
|
||||
assert_eq!(found.email.as_ref(), "test@example.com");
|
||||
assert!(found.password_hash.is_none());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_save_and_find_user_with_password() {
|
||||
let pool = setup_test_db().await;
|
||||
let repo = SqliteUserRepository::new(pool);
|
||||
|
||||
let email = Email::try_from("local@example.com").unwrap();
|
||||
let user = User::new_local(email, "hashed_pw");
|
||||
repo.save(&user).await.unwrap();
|
||||
|
||||
let found = repo.find_by_id(user.id).await.unwrap();
|
||||
assert!(found.is_some());
|
||||
let found = found.unwrap();
|
||||
assert_eq!(found.email.as_ref(), "local@example.com");
|
||||
assert_eq!(found.password_hash, Some("hashed_pw".to_string()));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_find_by_subject() {
|
||||
let pool = setup_test_db().await;
|
||||
let repo = SqliteUserRepository::new(pool);
|
||||
|
||||
let email = Email::try_from("user@gmail.com").unwrap();
|
||||
let user = User::new("google|456", email);
|
||||
repo.save(&user).await.unwrap();
|
||||
|
||||
let found = repo.find_by_subject("google|456").await.unwrap();
|
||||
assert!(found.is_some());
|
||||
assert_eq!(found.unwrap().id, user.id);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_delete_user() {
|
||||
let pool = setup_test_db().await;
|
||||
let repo = SqliteUserRepository::new(pool);
|
||||
|
||||
let email = Email::try_from("delete@test.com").unwrap();
|
||||
let user = User::new("test|789", email);
|
||||
repo.save(&user).await.unwrap();
|
||||
repo.delete(user.id).await.unwrap();
|
||||
|
||||
let found = repo.find_by_id(user.id).await.unwrap();
|
||||
assert!(found.is_none());
|
||||
}
|
||||
}
|
||||
|
||||
/// PostgreSQL adapter for UserRepository
|
||||
#[cfg(feature = "postgres")]
|
||||
#[derive(Clone)]
|
||||
pub struct PostgresUserRepository {
|
||||
pool: sqlx::Pool<sqlx::Postgres>,
|
||||
}
|
||||
|
||||
#[cfg(feature = "postgres")]
|
||||
impl PostgresUserRepository {
|
||||
pub fn new(pool: sqlx::Pool<sqlx::Postgres>) -> Self {
|
||||
Self { pool }
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "postgres")]
|
||||
#[async_trait]
|
||||
impl UserRepository for PostgresUserRepository {
|
||||
async fn find_by_id(&self, id: Uuid) -> DomainResult<Option<User>> {
|
||||
let id_str = id.to_string();
|
||||
let row: Option<UserRow> = sqlx::query_as(
|
||||
"SELECT id, subject, email, password_hash, created_at FROM users WHERE id = $1",
|
||||
)
|
||||
.bind(&id_str)
|
||||
.fetch_optional(&self.pool)
|
||||
.await
|
||||
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
|
||||
|
||||
row.map(User::try_from).transpose()
|
||||
}
|
||||
|
||||
async fn find_by_subject(&self, subject: &str) -> DomainResult<Option<User>> {
|
||||
let row: Option<UserRow> = sqlx::query_as(
|
||||
"SELECT id, subject, email, password_hash, created_at FROM users WHERE subject = $1",
|
||||
)
|
||||
.bind(subject)
|
||||
.fetch_optional(&self.pool)
|
||||
.await
|
||||
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
|
||||
|
||||
row.map(User::try_from).transpose()
|
||||
}
|
||||
|
||||
async fn find_by_email(&self, email: &str) -> DomainResult<Option<User>> {
|
||||
let row: Option<UserRow> = sqlx::query_as(
|
||||
"SELECT id, subject, email, password_hash, created_at FROM users WHERE email = $1",
|
||||
)
|
||||
.bind(email)
|
||||
.fetch_optional(&self.pool)
|
||||
.await
|
||||
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
|
||||
|
||||
row.map(User::try_from).transpose()
|
||||
}
|
||||
|
||||
async fn save(&self, user: &User) -> DomainResult<()> {
|
||||
let id = user.id.to_string();
|
||||
let created_at = user.created_at.to_rfc3339();
|
||||
|
||||
sqlx::query(
|
||||
r#"
|
||||
INSERT INTO users (id, subject, email, password_hash, created_at)
|
||||
VALUES ($1, $2, $3, $4, $5)
|
||||
ON CONFLICT(id) DO UPDATE SET
|
||||
subject = excluded.subject,
|
||||
email = excluded.email,
|
||||
password_hash = excluded.password_hash
|
||||
"#,
|
||||
)
|
||||
.bind(&id)
|
||||
.bind(&user.subject)
|
||||
.bind(user.email.as_ref())
|
||||
.bind(&user.password_hash)
|
||||
.bind(&created_at)
|
||||
.execute(&self.pool)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let msg = e.to_string();
|
||||
if msg.contains("unique constraint") || msg.contains("duplicate key") {
|
||||
DomainError::UserAlreadyExists(user.email.as_ref().to_string())
|
||||
} else {
|
||||
DomainError::RepositoryError(msg)
|
||||
}
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn delete(&self, id: Uuid) -> DomainResult<()> {
|
||||
let id_str = id.to_string();
|
||||
sqlx::query("DELETE FROM users WHERE id = $1")
|
||||
.bind(&id_str)
|
||||
.execute(&self.pool)
|
||||
.await
|
||||
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
11
k-tv-backend/migrations_sqlite/20240101000000_init_users.sql
Normal file
11
k-tv-backend/migrations_sqlite/20240101000000_init_users.sql
Normal file
@@ -0,0 +1,11 @@
|
||||
-- Create users table
|
||||
CREATE TABLE IF NOT EXISTS users (
|
||||
id TEXT PRIMARY KEY NOT NULL,
|
||||
subject TEXT NOT NULL,
|
||||
email TEXT NOT NULL,
|
||||
password_hash TEXT,
|
||||
created_at TEXT NOT NULL
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS idx_users_subject ON users(subject);
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS idx_users_email ON users(email);
|
||||
@@ -0,0 +1,59 @@
|
||||
-- Channels: user-defined broadcast channels with their schedule template
|
||||
CREATE TABLE IF NOT EXISTS channels (
|
||||
id TEXT PRIMARY KEY NOT NULL,
|
||||
owner_id TEXT NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
description TEXT,
|
||||
timezone TEXT NOT NULL DEFAULT 'UTC',
|
||||
-- JSON-encoded ScheduleConfig (the shareable/exportable template)
|
||||
schedule_config TEXT NOT NULL DEFAULT '{"blocks":[]}',
|
||||
-- JSON-encoded RecyclePolicy
|
||||
recycle_policy TEXT NOT NULL DEFAULT '{"cooldown_days":30,"cooldown_generations":null,"min_available_ratio":0.2}',
|
||||
created_at TEXT NOT NULL,
|
||||
updated_at TEXT NOT NULL
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_channels_owner ON channels(owner_id);
|
||||
|
||||
-- Generated 48-hour schedules (resolved from a channel's template)
|
||||
CREATE TABLE IF NOT EXISTS generated_schedules (
|
||||
id TEXT PRIMARY KEY NOT NULL,
|
||||
channel_id TEXT NOT NULL,
|
||||
valid_from TEXT NOT NULL,
|
||||
valid_until TEXT NOT NULL,
|
||||
generation INTEGER NOT NULL,
|
||||
FOREIGN KEY (channel_id) REFERENCES channels(id) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
-- Composite index supports both "find active at time T" and "find latest"
|
||||
CREATE INDEX IF NOT EXISTS idx_schedules_channel_valid
|
||||
ON generated_schedules(channel_id, valid_from DESC);
|
||||
|
||||
-- Individual scheduled slots within a generated schedule.
|
||||
-- The MediaItem snapshot is stored as JSON so the EPG survives library changes.
|
||||
CREATE TABLE IF NOT EXISTS scheduled_slots (
|
||||
id TEXT PRIMARY KEY NOT NULL,
|
||||
schedule_id TEXT NOT NULL,
|
||||
start_at TEXT NOT NULL,
|
||||
end_at TEXT NOT NULL,
|
||||
-- JSON-encoded MediaItem (metadata snapshot at generation time)
|
||||
item TEXT NOT NULL,
|
||||
source_block_id TEXT NOT NULL,
|
||||
FOREIGN KEY (schedule_id) REFERENCES generated_schedules(id) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_slots_schedule_start
|
||||
ON scheduled_slots(schedule_id, start_at);
|
||||
|
||||
-- Playback history for the recycle policy engine
|
||||
CREATE TABLE IF NOT EXISTS playback_records (
|
||||
id TEXT PRIMARY KEY NOT NULL,
|
||||
channel_id TEXT NOT NULL,
|
||||
item_id TEXT NOT NULL,
|
||||
played_at TEXT NOT NULL,
|
||||
generation INTEGER NOT NULL,
|
||||
FOREIGN KEY (channel_id) REFERENCES channels(id) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_playback_channel_date
|
||||
ON playback_records(channel_id, played_at DESC);
|
||||
Reference in New Issue
Block a user