feat: ConfigSource enum, RwLock provider_registry, is_admin in UserResponse, available_provider_types

This commit is contained in:
2026-03-16 03:30:44 +01:00
parent 0e51b7c0f1
commit 46333853d2
8 changed files with 131 additions and 78 deletions

View File

@@ -5,9 +5,16 @@
use std::env;
use std::path::PathBuf;
#[derive(Debug, Clone, PartialEq)]
pub enum ConfigSource {
Env,
Db,
}
/// Application configuration loaded from environment variables
#[derive(Debug, Clone)]
pub struct Config {
pub config_source: ConfigSource,
pub database_url: String,
pub cookie_secret: String,
pub cors_allowed_origins: Vec<String>,
@@ -134,7 +141,13 @@ impl Config {
let base_url = env::var("BASE_URL")
.unwrap_or_else(|_| format!("http://localhost:{}", port));
let config_source = match env::var("CONFIG_SOURCE").as_deref() {
Ok("db") | Ok("DB") => ConfigSource::Db,
_ => ConfigSource::Env,
};
Self {
config_source,
host,
port,
database_url,

View File

@@ -32,6 +32,7 @@ pub struct UserResponse {
pub id: Uuid,
pub email: String,
pub created_at: DateTime<Utc>,
pub is_admin: bool,
}
/// JWT token response
@@ -57,6 +58,8 @@ pub struct ConfigResponse {
pub providers: Vec<ProviderInfo>,
/// Capabilities of the primary provider — kept for backward compatibility.
pub provider_capabilities: domain::ProviderCapabilities,
/// Provider type strings supported by this build (feature-gated).
pub available_provider_types: Vec<String>,
}
// ============================================================================

View File

@@ -15,7 +15,7 @@ use tracing::info;
use tracing_subscriber::{EnvFilter, fmt, layer::SubscriberExt, util::SubscriberInitExt};
use domain::{ChannelService, IMediaProvider, IProviderRegistry, ProviderCapabilities, ScheduleEngineService, StreamingProtocol, UserService};
use infra::factory::{build_activity_log_repository, build_channel_repository, build_schedule_repository, build_user_repository};
use infra::factory::{build_activity_log_repository, build_channel_repository, build_provider_config_repository, build_schedule_repository, build_user_repository};
use infra::run_migrations;
use k_core::http::server::{ServerConfig, apply_standard_middleware};
use tokio::net::TcpListener;
@@ -32,7 +32,7 @@ mod scheduler;
mod state;
mod webhook;
use crate::config::Config;
use crate::config::{Config, ConfigSource};
use crate::state::AppState;
#[tokio::main]
@@ -98,64 +98,84 @@ async fn main() -> anyhow::Result<()> {
let mut registry = infra::ProviderRegistry::new();
#[cfg(feature = "jellyfin")]
if let (Some(base_url), Some(api_key), Some(user_id)) = (
&config.jellyfin_base_url,
&config.jellyfin_api_key,
&config.jellyfin_user_id,
) {
tracing::info!("Media provider: Jellyfin at {}", base_url);
registry.register("jellyfin", Arc::new(infra::JellyfinMediaProvider::new(infra::JellyfinConfig {
base_url: base_url.clone(),
api_key: api_key.clone(),
user_id: user_id.clone(),
})));
}
let provider_config_repo = build_provider_config_repository(&db_pool).await?;
#[cfg(feature = "local-files")]
if let Some(dir) = &config.local_files_dir {
if let k_core::db::DatabasePool::Sqlite(ref sqlite_pool) = db_pool {
tracing::info!("Media provider: local files at {:?}", dir);
let lf_cfg = infra::LocalFilesConfig {
root_dir: dir.clone(),
base_url: config.base_url.clone(),
transcode_dir: config.transcode_dir.clone(),
cleanup_ttl_hours: config.transcode_cleanup_ttl_hours,
};
let idx = Arc::new(infra::LocalIndex::new(&lf_cfg, sqlite_pool.clone()).await);
local_index = Some(Arc::clone(&idx));
let scan_idx = Arc::clone(&idx);
tokio::spawn(async move { scan_idx.rescan().await; });
// Build TranscodeManager if TRANSCODE_DIR is set.
let tm = config.transcode_dir.as_ref().map(|td| {
std::fs::create_dir_all(td).ok();
tracing::info!("Transcoding enabled; cache dir: {:?}", td);
let tm = infra::TranscodeManager::new(td.clone(), config.transcode_cleanup_ttl_hours);
// Load persisted TTL from DB.
let tm_clone = Arc::clone(&tm);
let pool_clone = sqlite_pool.clone();
tokio::spawn(async move {
if let Ok(row) = sqlx::query_as::<_, (i64,)>(
"SELECT cleanup_ttl_hours FROM transcode_settings WHERE id = 1",
)
.fetch_one(&pool_clone)
.await
{
tm_clone.set_cleanup_ttl(row.0 as u32);
if config.config_source == ConfigSource::Db {
tracing::info!("CONFIG_SOURCE=db: loading provider configs from database");
let rows = provider_config_repo.get_all().await?;
for row in &rows {
if !row.enabled { continue; }
match row.provider_type.as_str() {
#[cfg(feature = "jellyfin")]
"jellyfin" => {
if let Ok(cfg) = serde_json::from_str::<infra::JellyfinConfig>(&row.config_json) {
tracing::info!("Loading Jellyfin provider from DB config");
registry.register("jellyfin", Arc::new(infra::JellyfinMediaProvider::new(cfg)));
}
});
tm
});
}
_ => {}
}
}
} else {
#[cfg(feature = "jellyfin")]
if let (Some(base_url), Some(api_key), Some(user_id)) = (
&config.jellyfin_base_url,
&config.jellyfin_api_key,
&config.jellyfin_user_id,
) {
tracing::info!("Media provider: Jellyfin at {}", base_url);
registry.register("jellyfin", Arc::new(infra::JellyfinMediaProvider::new(infra::JellyfinConfig {
base_url: base_url.clone(),
api_key: api_key.clone(),
user_id: user_id.clone(),
})));
}
registry.register(
"local",
Arc::new(infra::LocalFilesProvider::new(idx, lf_cfg, tm.clone())),
);
transcode_manager = tm;
sqlite_pool_for_state = Some(sqlite_pool.clone());
} else {
tracing::warn!("local-files requires SQLite; ignoring LOCAL_FILES_DIR");
#[cfg(feature = "local-files")]
if let Some(dir) = &config.local_files_dir {
if let k_core::db::DatabasePool::Sqlite(ref sqlite_pool) = db_pool {
tracing::info!("Media provider: local files at {:?}", dir);
let lf_cfg = infra::LocalFilesConfig {
root_dir: dir.clone(),
base_url: config.base_url.clone(),
transcode_dir: config.transcode_dir.clone(),
cleanup_ttl_hours: config.transcode_cleanup_ttl_hours,
};
let idx = Arc::new(infra::LocalIndex::new(&lf_cfg, sqlite_pool.clone()).await);
local_index = Some(Arc::clone(&idx));
let scan_idx = Arc::clone(&idx);
tokio::spawn(async move { scan_idx.rescan().await; });
// Build TranscodeManager if TRANSCODE_DIR is set.
let tm = config.transcode_dir.as_ref().map(|td| {
std::fs::create_dir_all(td).ok();
tracing::info!("Transcoding enabled; cache dir: {:?}", td);
let tm = infra::TranscodeManager::new(td.clone(), config.transcode_cleanup_ttl_hours);
// Load persisted TTL from DB.
let tm_clone = Arc::clone(&tm);
let pool_clone = sqlite_pool.clone();
tokio::spawn(async move {
if let Ok(row) = sqlx::query_as::<_, (i64,)>(
"SELECT cleanup_ttl_hours FROM transcode_settings WHERE id = 1",
)
.fetch_one(&pool_clone)
.await
{
tm_clone.set_cleanup_ttl(row.0 as u32);
}
});
tm
});
registry.register(
"local",
Arc::new(infra::LocalFilesProvider::new(idx, lf_cfg, tm.clone())),
);
transcode_manager = tm;
sqlite_pool_for_state = Some(sqlite_pool.clone());
} else {
tracing::warn!("local-files requires SQLite; ignoring LOCAL_FILES_DIR");
}
}
}
@@ -164,7 +184,9 @@ async fn main() -> anyhow::Result<()> {
registry.register("noop", Arc::new(NoopMediaProvider));
}
let registry = Arc::new(registry);
let registry_arc = Arc::new(registry);
let provider_registry: Arc<tokio::sync::RwLock<Arc<infra::ProviderRegistry>>> =
Arc::new(tokio::sync::RwLock::new(Arc::clone(&registry_arc)));
let (event_tx, event_rx) = tokio::sync::broadcast::channel::<domain::DomainEvent>(64);
@@ -177,7 +199,7 @@ async fn main() -> anyhow::Result<()> {
));
let schedule_engine = ScheduleEngineService::new(
Arc::clone(&registry) as Arc<dyn IProviderRegistry>,
Arc::clone(&registry_arc) as Arc<dyn IProviderRegistry>,
channel_repo,
schedule_repo,
);
@@ -187,7 +209,8 @@ async fn main() -> anyhow::Result<()> {
user_service,
channel_service,
schedule_engine,
registry,
provider_registry,
provider_config_repo,
config.clone(),
event_tx.clone(),
log_tx,

View File

@@ -86,6 +86,7 @@ pub(super) async fn me(CurrentUser(user): CurrentUser) -> Result<impl IntoRespon
id: user.id,
email: user.email.into_inner(),
created_at: user.created_at,
is_admin: user.is_admin,
}))
}

View File

@@ -9,21 +9,21 @@ pub fn router() -> Router<AppState> {
}
async fn get_config(State(state): State<AppState>) -> Json<ConfigResponse> {
let providers: Vec<ProviderInfo> = state
.provider_registry
let registry = state.provider_registry.read().await;
let providers: Vec<ProviderInfo> = registry
.provider_ids()
.into_iter()
.filter_map(|id| {
state.provider_registry.capabilities(&id).map(|caps| ProviderInfo {
registry.capabilities(&id).map(|caps| ProviderInfo {
id: id.clone(),
capabilities: caps,
})
})
.collect();
let primary_capabilities = state
.provider_registry
.capabilities(state.provider_registry.primary_id())
let primary_capabilities = registry
.capabilities(registry.primary_id())
.unwrap_or(ProviderCapabilities {
collections: false,
series: false,
@@ -36,9 +36,16 @@ async fn get_config(State(state): State<AppState>) -> Json<ConfigResponse> {
transcode: false,
});
let mut available_provider_types = Vec::new();
#[cfg(feature = "jellyfin")]
available_provider_types.push("jellyfin".to_string());
#[cfg(feature = "local-files")]
available_provider_types.push("local_files".to_string());
Json(ConfigResponse {
allow_registration: state.config.allow_registration,
providers,
provider_capabilities: primary_capabilities,
available_provider_types,
})
}

View File

@@ -151,13 +151,14 @@ async fn list_collections(
Query(params): Query<CollectionsQuery>,
) -> Result<Json<Vec<CollectionResponse>>, ApiError> {
let provider_id = params.provider.as_deref().unwrap_or("");
let caps = state.provider_registry.capabilities(provider_id).ok_or_else(|| {
let registry = state.provider_registry.read().await;
let caps = registry.capabilities(provider_id).ok_or_else(|| {
ApiError::validation(format!("Unknown provider '{}'", provider_id))
})?;
if !caps.collections {
return Err(ApiError::not_implemented("collections not supported by this provider"));
}
let collections = state.provider_registry.list_collections(provider_id).await?;
let collections = registry.list_collections(provider_id).await?;
Ok(Json(collections.into_iter().map(Into::into).collect()))
}
@@ -168,14 +169,14 @@ async fn list_series(
Query(params): Query<SeriesQuery>,
) -> Result<Json<Vec<SeriesResponse>>, ApiError> {
let provider_id = params.provider.as_deref().unwrap_or("");
let caps = state.provider_registry.capabilities(provider_id).ok_or_else(|| {
let registry = state.provider_registry.read().await;
let caps = registry.capabilities(provider_id).ok_or_else(|| {
ApiError::validation(format!("Unknown provider '{}'", provider_id))
})?;
if !caps.series {
return Err(ApiError::not_implemented("series not supported by this provider"));
}
let series = state
.provider_registry
let series = registry
.list_series(provider_id, params.collection.as_deref())
.await?;
Ok(Json(series.into_iter().map(Into::into).collect()))
@@ -188,14 +189,15 @@ async fn list_genres(
Query(params): Query<GenresQuery>,
) -> Result<Json<Vec<String>>, ApiError> {
let provider_id = params.provider.as_deref().unwrap_or("");
let caps = state.provider_registry.capabilities(provider_id).ok_or_else(|| {
let registry = state.provider_registry.read().await;
let caps = registry.capabilities(provider_id).ok_or_else(|| {
ApiError::validation(format!("Unknown provider '{}'", provider_id))
})?;
if !caps.genres {
return Err(ApiError::not_implemented("genres not supported by this provider"));
}
let ct = parse_content_type(params.content_type.as_deref())?;
let genres = state.provider_registry.list_genres(provider_id, ct.as_ref()).await?;
let genres = registry.list_genres(provider_id, ct.as_ref()).await?;
Ok(Json(genres))
}
@@ -228,7 +230,8 @@ async fn search_items(
..Default::default()
};
let mut items = state.provider_registry.fetch_items(provider_id, &filter).await?;
let registry = state.provider_registry.read().await;
let mut items = registry.fetch_items(provider_id, &filter).await?;
// Apply the same ordering the schedule engine uses so the preview reflects
// what will actually be scheduled rather than raw provider order.

View File

@@ -15,14 +15,15 @@ use tokio::sync::broadcast;
use crate::config::Config;
use crate::events::EventBus;
use crate::log_layer::LogLine;
use domain::{ActivityLogRepository, ChannelService, ScheduleEngineService, UserService};
use domain::{ActivityLogRepository, ChannelService, ProviderConfigRepository, ScheduleEngineService, UserService};
#[derive(Clone)]
pub struct AppState {
pub user_service: Arc<UserService>,
pub channel_service: Arc<ChannelService>,
pub schedule_engine: Arc<ScheduleEngineService>,
pub provider_registry: Arc<infra::ProviderRegistry>,
pub provider_registry: Arc<tokio::sync::RwLock<Arc<infra::ProviderRegistry>>>,
pub provider_config_repo: Arc<dyn ProviderConfigRepository>,
pub cookie_key: Key,
#[cfg(feature = "auth-oidc")]
pub oidc_service: Option<Arc<OidcService>>,
@@ -52,7 +53,8 @@ impl AppState {
user_service: UserService,
channel_service: ChannelService,
schedule_engine: ScheduleEngineService,
provider_registry: Arc<infra::ProviderRegistry>,
provider_registry: Arc<tokio::sync::RwLock<Arc<infra::ProviderRegistry>>>,
provider_config_repo: Arc<dyn ProviderConfigRepository>,
config: Config,
event_tx: EventBus,
log_tx: broadcast::Sender<LogLine>,
@@ -123,6 +125,7 @@ impl AppState {
channel_service: Arc::new(channel_service),
schedule_engine: Arc::new(schedule_engine),
provider_registry,
provider_config_repo,
cookie_key,
#[cfg(feature = "auth-oidc")]
oidc_service,