feat: multi-instance provider support

- provider_configs: add id TEXT PK; migrate existing rows (provider_type becomes id)
- local_files_index: add provider_id column + index; scope all queries per instance
- ProviderConfigRow: add id field; add get_by_id to trait
- LocalIndex:🆕 add provider_id param; all SQL scoped by provider_id
- factory: thread provider_id through build_local_files_bundle
- AppState.local_index: Option<Arc<LocalIndex>> → HashMap<String, Arc<LocalIndex>>
- admin_providers: restructured routes (POST /admin/providers create, PUT/DELETE /{id}, POST /test)
- admin_providers: use row.id as registry key for jellyfin and local_files
- files.rescan: optional ?provider=<id> query param
- frontend: add id to ProviderConfig, update api/hooks, new multi-instance panel UX
This commit is contained in:
2026-03-19 22:54:41 +01:00
parent 373e1c7c0a
commit 311fdd4006
14 changed files with 563 additions and 111 deletions

View File

@@ -99,8 +99,8 @@ async fn main() -> anyhow::Result<()> {
.await?;
#[cfg(feature = "local-files")]
if let Some(idx) = bundle.local_index {
*state.local_index.write().await = Some(idx);
if !bundle.local_index.is_empty() {
*state.local_index.write().await = bundle.local_index;
}
#[cfg(feature = "local-files")]
if let Some(tm) = bundle.transcode_manager {

View File

@@ -14,7 +14,7 @@ use infra::factory::build_transcode_settings_repository;
pub struct ProviderBundle {
pub registry: Arc<infra::ProviderRegistry>,
#[cfg(feature = "local-files")]
pub local_index: Option<Arc<infra::LocalIndex>>,
pub local_index: std::collections::HashMap<String, Arc<infra::LocalIndex>>,
#[cfg(feature = "local-files")]
pub transcode_manager: Option<Arc<infra::TranscodeManager>>,
}
@@ -26,7 +26,7 @@ pub async fn build_provider_registry(
provider_config_repo: &Arc<dyn ProviderConfigRepository>,
) -> anyhow::Result<ProviderBundle> {
#[cfg(feature = "local-files")]
let mut local_index: Option<Arc<infra::LocalIndex>> = None;
let mut local_index: std::collections::HashMap<String, Arc<infra::LocalIndex>> = std::collections::HashMap::new();
#[cfg(feature = "local-files")]
let mut transcode_manager: Option<Arc<infra::TranscodeManager>> = None;
@@ -41,8 +41,8 @@ pub async fn build_provider_registry(
#[cfg(feature = "jellyfin")]
"jellyfin" => {
if let Ok(cfg) = serde_json::from_str::<infra::JellyfinConfig>(&row.config_json) {
tracing::info!("Loading Jellyfin provider from DB config");
registry.register("jellyfin", Arc::new(infra::JellyfinMediaProvider::new(cfg)));
tracing::info!("Loading Jellyfin provider [{}] from DB config", row.id);
registry.register(&row.id, Arc::new(infra::JellyfinMediaProvider::new(cfg)));
}
}
#[cfg(feature = "local-files")]
@@ -56,19 +56,20 @@ pub async fn build_provider_registry(
let cleanup_ttl_hours: u32 = cfg_map.get("cleanup_ttl_hours")
.and_then(|s| s.parse().ok())
.unwrap_or(24);
tracing::info!("Loading local-files provider from DB config at {:?}", files_dir);
tracing::info!("Loading local-files provider [{}] from DB config at {:?}", row.id, files_dir);
match infra::factory::build_local_files_bundle(
db_pool,
std::path::PathBuf::from(files_dir),
transcode_dir,
cleanup_ttl_hours,
config.base_url.clone(),
&row.id,
).await {
Ok(bundle) => {
let scan_idx = Arc::clone(&bundle.local_index);
tokio::spawn(async move { scan_idx.rescan().await; });
if let Some(ref tm) = bundle.transcode_manager {
tracing::info!("Transcoding enabled");
tracing::info!("Transcoding enabled for [{}]", row.id);
// Load persisted TTL override from transcode_settings table.
let tm_clone = Arc::clone(tm);
let repo = build_transcode_settings_repository(db_pool).await.ok();
@@ -80,11 +81,13 @@ pub async fn build_provider_registry(
}
});
}
registry.register("local", bundle.provider);
transcode_manager = bundle.transcode_manager;
local_index = Some(bundle.local_index);
registry.register(&row.id, bundle.provider);
if transcode_manager.is_none() {
transcode_manager = bundle.transcode_manager;
}
local_index.insert(row.id.clone(), bundle.local_index);
}
Err(e) => tracing::warn!("Failed to build local-files provider: {}", e),
Err(e) => tracing::warn!("Failed to build local-files provider [{}]: {}", row.id, e),
}
}
}
@@ -115,6 +118,7 @@ pub async fn build_provider_registry(
config.transcode_dir.clone(),
config.transcode_cleanup_ttl_hours,
config.base_url.clone(),
"local",
).await {
Ok(bundle) => {
let scan_idx = Arc::clone(&bundle.local_index);
@@ -133,7 +137,7 @@ pub async fn build_provider_registry(
}
registry.register("local", bundle.provider);
transcode_manager = bundle.transcode_manager;
local_index = Some(bundle.local_index);
local_index.insert("local".to_string(), bundle.local_index);
}
Err(e) => tracing::warn!("local-files requires SQLite; ignoring LOCAL_FILES_DIR: {}", e),
}

View File

@@ -1,6 +1,6 @@
//! Admin provider management routes.
//!
//! All routes require an admin user. Allows listing, updating, deleting, and
//! All routes require an admin user. Allows listing, creating, updating, deleting, and
//! testing media provider configs stored in the DB. Only available when
//! CONFIG_SOURCE=db.
@@ -26,14 +26,36 @@ use crate::state::AppState;
// DTOs
// ---------------------------------------------------------------------------
/// Validate that an instance id is a safe slug (alphanumeric + hyphens, 1-40 chars).
fn is_valid_instance_id(id: &str) -> bool {
!id.is_empty()
&& id.len() <= 40
&& id.chars().all(|c| c.is_ascii_alphanumeric() || c == '-')
}
#[derive(Debug, Deserialize)]
pub struct ProviderConfigPayload {
pub struct CreateProviderRequest {
pub id: String,
pub provider_type: String,
pub config_json: HashMap<String, String>,
pub enabled: bool,
}
#[derive(Debug, Deserialize)]
pub struct UpdateProviderRequest {
pub config_json: HashMap<String, String>,
pub enabled: bool,
}
#[derive(Debug, Deserialize)]
pub struct TestProviderRequest {
pub provider_type: String,
pub config_json: HashMap<String, String>,
}
#[derive(Debug, Serialize)]
pub struct ProviderConfigResponse {
pub id: String,
pub provider_type: String,
pub config_json: HashMap<String, serde_json::Value>,
pub enabled: bool,
@@ -51,9 +73,9 @@ pub struct TestResult {
pub fn router() -> Router<AppState> {
Router::new()
.route("/", get(list_providers))
.route("/{type}", put(update_provider).delete(delete_provider))
.route("/{type}/test", post(test_provider))
.route("/", get(list_providers).post(create_provider))
.route("/{id}", put(update_provider).delete(delete_provider))
.route("/test", post(test_provider))
}
// ---------------------------------------------------------------------------
@@ -97,6 +119,12 @@ async fn rebuild_registry(state: &AppState) -> DomainResult<()> {
let rows = state.provider_config_repo.get_all().await?;
let mut new_registry = infra::ProviderRegistry::new();
#[cfg(feature = "local-files")]
let mut new_local_index: std::collections::HashMap<String, Arc<infra::LocalIndex>> =
std::collections::HashMap::new();
#[cfg(feature = "local-files")]
let mut first_transcode_manager: Option<Arc<infra::TranscodeManager>> = None;
for row in &rows {
if !row.enabled {
continue;
@@ -108,7 +136,7 @@ async fn rebuild_registry(state: &AppState) -> DomainResult<()> {
serde_json::from_str::<infra::JellyfinConfig>(&row.config_json)
{
new_registry.register(
"jellyfin",
&row.id,
Arc::new(infra::JellyfinMediaProvider::new(cfg)),
);
}
@@ -144,16 +172,19 @@ async fn rebuild_registry(state: &AppState) -> DomainResult<()> {
transcode_dir,
cleanup_ttl_hours,
base_url,
&row.id,
).await {
Ok(bundle) => {
let scan_idx = Arc::clone(&bundle.local_index);
tokio::spawn(async move { scan_idx.rescan().await; });
new_registry.register("local", bundle.provider);
*state.local_index.write().await = Some(bundle.local_index);
*state.transcode_manager.write().await = bundle.transcode_manager;
new_registry.register(&row.id, bundle.provider);
new_local_index.insert(row.id.clone(), bundle.local_index);
if first_transcode_manager.is_none() {
first_transcode_manager = bundle.transcode_manager;
}
}
Err(e) => {
tracing::warn!("local_files provider requires SQLite; skipping: {}", e);
tracing::warn!("local_files provider [{}] requires SQLite; skipping: {}", row.id, e);
continue;
}
}
@@ -167,6 +198,11 @@ async fn rebuild_registry(state: &AppState) -> DomainResult<()> {
}
*state.provider_registry.write().await = Arc::new(new_registry);
#[cfg(feature = "local-files")]
{
*state.local_index.write().await = new_local_index;
*state.transcode_manager.write().await = first_transcode_manager;
}
Ok(())
}
@@ -187,6 +223,7 @@ pub async fn list_providers(
let response: Vec<ProviderConfigResponse> = rows
.iter()
.map(|row| ProviderConfigResponse {
id: row.id.clone(),
provider_type: row.provider_type.clone(),
config_json: mask_config(&row.config_json),
enabled: row.enabled,
@@ -196,29 +233,49 @@ pub async fn list_providers(
Ok(Json(response))
}
pub async fn update_provider(
pub async fn create_provider(
State(state): State<AppState>,
AdminUser(_user): AdminUser,
Path(provider_type): Path<String>,
Json(payload): Json<ProviderConfigPayload>,
Json(payload): Json<CreateProviderRequest>,
) -> Result<impl IntoResponse, ApiError> {
if state.config.config_source != ConfigSource::Db {
return Ok(conflict_response().into_response());
}
let known = matches!(provider_type.as_str(), "jellyfin" | "local_files");
if !is_valid_instance_id(&payload.id) {
return Err(ApiError::Validation(
"Instance id must be 1-40 alphanumeric+hyphen characters".to_string(),
));
}
let known = matches!(payload.provider_type.as_str(), "jellyfin" | "local_files");
if !known {
return Err(ApiError::Validation(format!(
"Unknown provider type: {}",
provider_type
payload.provider_type
)));
}
// Check for uniqueness
if state
.provider_config_repo
.get_by_id(&payload.id)
.await
.map_err(ApiError::from)?
.is_some()
{
return Ok((
StatusCode::CONFLICT,
Json(serde_json::json!({ "error": format!("Provider instance '{}' already exists", payload.id) })),
).into_response());
}
let config_json = serde_json::to_string(&payload.config_json)
.map_err(|e| ApiError::Internal(format!("Failed to serialize config: {}", e)))?;
let row = ProviderConfigRow {
provider_type: provider_type.clone(),
id: payload.id.clone(),
provider_type: payload.provider_type.clone(),
config_json: config_json.clone(),
enabled: payload.enabled,
updated_at: chrono::Utc::now().to_rfc3339(),
@@ -235,7 +292,56 @@ pub async fn update_provider(
.map_err(ApiError::from)?;
let response = ProviderConfigResponse {
provider_type,
id: payload.id,
provider_type: payload.provider_type,
config_json: mask_config(&config_json),
enabled: payload.enabled,
};
Ok((StatusCode::CREATED, Json(response)).into_response())
}
pub async fn update_provider(
State(state): State<AppState>,
AdminUser(_user): AdminUser,
Path(instance_id): Path<String>,
Json(payload): Json<UpdateProviderRequest>,
) -> Result<impl IntoResponse, ApiError> {
if state.config.config_source != ConfigSource::Db {
return Ok(conflict_response().into_response());
}
let existing = state
.provider_config_repo
.get_by_id(&instance_id)
.await
.map_err(ApiError::from)?
.ok_or_else(|| ApiError::NotFound(format!("Provider instance '{}' not found", instance_id)))?;
let config_json = serde_json::to_string(&payload.config_json)
.map_err(|e| ApiError::Internal(format!("Failed to serialize config: {}", e)))?;
let row = ProviderConfigRow {
id: existing.id.clone(),
provider_type: existing.provider_type.clone(),
config_json: config_json.clone(),
enabled: payload.enabled,
updated_at: chrono::Utc::now().to_rfc3339(),
};
state
.provider_config_repo
.upsert(&row)
.await
.map_err(ApiError::from)?;
rebuild_registry(&state)
.await
.map_err(ApiError::from)?;
let response = ProviderConfigResponse {
id: existing.id,
provider_type: existing.provider_type,
config_json: mask_config(&config_json),
enabled: payload.enabled,
};
@@ -246,7 +352,7 @@ pub async fn update_provider(
pub async fn delete_provider(
State(state): State<AppState>,
AdminUser(_user): AdminUser,
Path(provider_type): Path<String>,
Path(instance_id): Path<String>,
) -> Result<impl IntoResponse, ApiError> {
if state.config.config_source != ConfigSource::Db {
return Ok(conflict_response().into_response());
@@ -254,7 +360,7 @@ pub async fn delete_provider(
state
.provider_config_repo
.delete(&provider_type)
.delete(&instance_id)
.await
.map_err(ApiError::from)?;
@@ -268,10 +374,9 @@ pub async fn delete_provider(
pub async fn test_provider(
State(_state): State<AppState>,
AdminUser(_user): AdminUser,
Path(provider_type): Path<String>,
Json(payload): Json<ProviderConfigPayload>,
Json(payload): Json<TestProviderRequest>,
) -> Result<impl IntoResponse, ApiError> {
let result = match provider_type.as_str() {
let result = match payload.provider_type.as_str() {
"jellyfin" => test_jellyfin(&payload.config_json).await,
"local_files" => test_local_files(&payload.config_json),
_ => TestResult {

View File

@@ -22,6 +22,7 @@ use crate::{error::ApiError, state::AppState};
#[cfg(feature = "local-files")]
use axum::{
Json,
extract::Query,
http::StatusCode,
routing::{delete, post},
};
@@ -143,13 +144,25 @@ async fn stream_file(
// Rescan
// ============================================================================
#[cfg(feature = "local-files")]
#[derive(Deserialize)]
struct RescanQuery {
provider: Option<String>,
}
#[cfg(feature = "local-files")]
async fn trigger_rescan(
State(state): State<AppState>,
CurrentUser(_user): CurrentUser,
Query(query): Query<RescanQuery>,
) -> Result<Json<serde_json::Value>, ApiError> {
let index = state.local_index.read().await.clone()
.ok_or_else(|| ApiError::not_implemented("no local files provider active"))?;
let map = state.local_index.read().await.clone();
let index = if let Some(id) = &query.provider {
map.get(id).cloned()
} else {
map.values().next().cloned()
};
let index = index.ok_or_else(|| ApiError::not_implemented("no local files provider active"))?;
let count = index.rescan().await;
Ok(Json(serde_json::json!({ "items_found": count })))
}

View File

@@ -9,6 +9,8 @@ use infra::auth::jwt::{JwtConfig, JwtValidator};
#[cfg(feature = "auth-oidc")]
use infra::auth::oidc::OidcService;
use std::collections::VecDeque;
#[cfg(feature = "local-files")]
use std::collections::HashMap;
use std::sync::{Arc, Mutex};
use tokio::sync::broadcast;
@@ -40,9 +42,9 @@ pub struct AppState {
pub log_history: Arc<Mutex<VecDeque<LogLine>>>,
/// Repository for persisted in-app activity events.
pub activity_log_repo: Arc<dyn ActivityLogRepository>,
/// Index for the local-files provider, used by the rescan route.
/// Indexes for local-files provider instances, keyed by provider instance id.
#[cfg(feature = "local-files")]
pub local_index: Arc<tokio::sync::RwLock<Option<Arc<infra::LocalIndex>>>>,
pub local_index: Arc<tokio::sync::RwLock<HashMap<String, Arc<infra::LocalIndex>>>>,
/// TranscodeManager for FFmpeg HLS transcoding (requires TRANSCODE_DIR).
#[cfg(feature = "local-files")]
pub transcode_manager: Arc<tokio::sync::RwLock<Option<Arc<infra::TranscodeManager>>>>,
@@ -147,7 +149,7 @@ impl AppState {
log_history,
activity_log_repo,
#[cfg(feature = "local-files")]
local_index: Arc::new(tokio::sync::RwLock::new(None)),
local_index: Arc::new(tokio::sync::RwLock::new(HashMap::new())),
#[cfg(feature = "local-files")]
transcode_manager: Arc::new(tokio::sync::RwLock::new(None)),
#[cfg(feature = "local-files")]

View File

@@ -48,6 +48,7 @@ pub trait UserRepository: Send + Sync {
#[derive(Debug, Clone)]
pub struct ProviderConfigRow {
pub id: String,
pub provider_type: String,
pub config_json: String,
pub enabled: bool,
@@ -57,8 +58,9 @@ pub struct ProviderConfigRow {
#[async_trait]
pub trait ProviderConfigRepository: Send + Sync {
async fn get_all(&self) -> DomainResult<Vec<ProviderConfigRow>>;
async fn get_by_id(&self, id: &str) -> DomainResult<Option<ProviderConfigRow>>;
async fn upsert(&self, row: &ProviderConfigRow) -> DomainResult<()>;
async fn delete(&self, provider_type: &str) -> DomainResult<()>;
async fn delete(&self, id: &str) -> DomainResult<()>;
}
/// Repository port for `Channel` persistence.

View File

@@ -133,6 +133,7 @@ pub async fn build_local_files_bundle(
transcode_dir: Option<std::path::PathBuf>,
cleanup_ttl_hours: u32,
base_url: String,
provider_id: &str,
) -> FactoryResult<LocalFilesBundle> {
match pool {
#[cfg(feature = "sqlite")]
@@ -143,7 +144,7 @@ pub async fn build_local_files_bundle(
transcode_dir: transcode_dir.clone(),
cleanup_ttl_hours,
};
let idx = Arc::new(crate::LocalIndex::new(&cfg, sqlite_pool.clone()).await);
let idx = Arc::new(crate::LocalIndex::new(&cfg, sqlite_pool.clone(), provider_id.to_string()).await);
let tm = transcode_dir.as_ref().map(|td| {
std::fs::create_dir_all(td).ok();
crate::TranscodeManager::new(td.clone(), cleanup_ttl_hours)

View File

@@ -36,15 +36,17 @@ pub fn decode_id(id: &MediaItemId) -> Option<String> {
pub struct LocalIndex {
items: Arc<RwLock<HashMap<MediaItemId, LocalFileItem>>>,
pub root_dir: PathBuf,
provider_id: String,
pool: sqlx::SqlitePool,
}
impl LocalIndex {
/// Create the index, immediately loading persisted entries from SQLite.
pub async fn new(config: &LocalFilesConfig, pool: sqlx::SqlitePool) -> Self {
pub async fn new(config: &LocalFilesConfig, pool: sqlx::SqlitePool, provider_id: String) -> Self {
let idx = Self {
items: Arc::new(RwLock::new(HashMap::new())),
root_dir: config.root_dir.clone(),
provider_id,
pool,
};
idx.load_from_db().await;
@@ -65,8 +67,10 @@ impl LocalIndex {
}
let rows = sqlx::query_as::<_, Row>(
"SELECT id, rel_path, title, duration_secs, year, tags, top_dir FROM local_files_index",
"SELECT id, rel_path, title, duration_secs, year, tags, top_dir \
FROM local_files_index WHERE provider_id = ?",
)
.bind(&self.provider_id)
.fetch_all(&self.pool)
.await;
@@ -86,7 +90,7 @@ impl LocalIndex {
};
map.insert(MediaItemId::new(row.id), item);
}
info!("Local files index: loaded {} items from DB", map.len());
info!("Local files index [{}]: loaded {} items from DB", self.provider_id, map.len());
}
Err(e) => {
// Table might not exist yet on first run — that's fine.
@@ -100,7 +104,7 @@ impl LocalIndex {
/// Returns the number of items found. Called on startup (background task)
/// and via `POST /files/rescan`.
pub async fn rescan(&self) -> u32 {
info!("Local files: scanning {:?}", self.root_dir);
info!("Local files [{}]: scanning {:?}", self.provider_id, self.root_dir);
let new_items = scan_dir(&self.root_dir).await;
let count = new_items.len() as u32;
@@ -119,15 +123,16 @@ impl LocalIndex {
error!("Failed to persist local files index: {}", e);
}
info!("Local files: indexed {} items", count);
info!("Local files [{}]: indexed {} items", self.provider_id, count);
count
}
async fn save_to_db(&self, items: &[LocalFileItem]) -> Result<(), sqlx::Error> {
// Rebuild the table in one transaction.
// Rebuild the table in one transaction, scoped to this provider.
let mut tx = self.pool.begin().await?;
sqlx::query("DELETE FROM local_files_index")
sqlx::query("DELETE FROM local_files_index WHERE provider_id = ?")
.bind(&self.provider_id)
.execute(&mut *tx)
.await?;
@@ -137,8 +142,8 @@ impl LocalIndex {
let tags_json = serde_json::to_string(&item.tags).unwrap_or_else(|_| "[]".into());
sqlx::query(
"INSERT INTO local_files_index \
(id, rel_path, title, duration_secs, year, tags, top_dir, scanned_at) \
VALUES (?, ?, ?, ?, ?, ?, ?, ?)",
(id, rel_path, title, duration_secs, year, tags, top_dir, scanned_at, provider_id) \
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)",
)
.bind(&id)
.bind(&item.rel_path)
@@ -148,6 +153,7 @@ impl LocalIndex {
.bind(&tags_json)
.bind(&item.top_dir)
.bind(&now)
.bind(&self.provider_id)
.execute(&mut *tx)
.await?;
}

View File

@@ -15,8 +15,8 @@ impl SqliteProviderConfigRepository {
#[async_trait]
impl ProviderConfigRepository for SqliteProviderConfigRepository {
async fn get_all(&self) -> DomainResult<Vec<ProviderConfigRow>> {
let rows: Vec<(String, String, i64, String)> = sqlx::query_as(
"SELECT provider_type, config_json, enabled, updated_at FROM provider_configs",
let rows: Vec<(String, String, String, i64, String)> = sqlx::query_as(
"SELECT id, provider_type, config_json, enabled, updated_at FROM provider_configs",
)
.fetch_all(&self.pool)
.await
@@ -24,7 +24,8 @@ impl ProviderConfigRepository for SqliteProviderConfigRepository {
Ok(rows
.into_iter()
.map(|(provider_type, config_json, enabled, updated_at)| ProviderConfigRow {
.map(|(id, provider_type, config_json, enabled, updated_at)| ProviderConfigRow {
id,
provider_type,
config_json,
enabled: enabled != 0,
@@ -33,15 +34,35 @@ impl ProviderConfigRepository for SqliteProviderConfigRepository {
.collect())
}
async fn get_by_id(&self, id: &str) -> DomainResult<Option<ProviderConfigRow>> {
let row: Option<(String, String, String, i64, String)> = sqlx::query_as(
"SELECT id, provider_type, config_json, enabled, updated_at FROM provider_configs WHERE id = ?",
)
.bind(id)
.fetch_optional(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;
Ok(row.map(|(id, provider_type, config_json, enabled, updated_at)| ProviderConfigRow {
id,
provider_type,
config_json,
enabled: enabled != 0,
updated_at,
}))
}
async fn upsert(&self, row: &ProviderConfigRow) -> DomainResult<()> {
sqlx::query(
r#"INSERT INTO provider_configs (provider_type, config_json, enabled, updated_at)
VALUES (?, ?, ?, ?)
ON CONFLICT(provider_type) DO UPDATE SET
r#"INSERT INTO provider_configs (id, provider_type, config_json, enabled, updated_at)
VALUES (?, ?, ?, ?, ?)
ON CONFLICT(id) DO UPDATE SET
provider_type = excluded.provider_type,
config_json = excluded.config_json,
enabled = excluded.enabled,
updated_at = excluded.updated_at"#,
)
.bind(&row.id)
.bind(&row.provider_type)
.bind(&row.config_json)
.bind(row.enabled as i64)
@@ -52,9 +73,9 @@ impl ProviderConfigRepository for SqliteProviderConfigRepository {
Ok(())
}
async fn delete(&self, provider_type: &str) -> DomainResult<()> {
sqlx::query("DELETE FROM provider_configs WHERE provider_type = ?")
.bind(provider_type)
async fn delete(&self, id: &str) -> DomainResult<()> {
sqlx::query("DELETE FROM provider_configs WHERE id = ?")
.bind(id)
.execute(&self.pool)
.await
.map_err(|e| DomainError::RepositoryError(e.to_string()))?;

View File

@@ -0,0 +1,17 @@
-- Recreate provider_configs with per-instance id as PK
CREATE TABLE provider_configs_new (
id TEXT PRIMARY KEY,
provider_type TEXT NOT NULL,
config_json TEXT NOT NULL,
enabled INTEGER NOT NULL DEFAULT 1,
updated_at TEXT NOT NULL
);
INSERT INTO provider_configs_new (id, provider_type, config_json, enabled, updated_at)
SELECT provider_type, provider_type, config_json, enabled, updated_at
FROM provider_configs;
DROP TABLE provider_configs;
ALTER TABLE provider_configs_new RENAME TO provider_configs;
-- Scope local_files_index entries by provider instance
ALTER TABLE local_files_index ADD COLUMN provider_id TEXT NOT NULL DEFAULT 'local';
CREATE INDEX IF NOT EXISTS idx_local_files_provider ON local_files_index(provider_id);