diff --git a/.gitignore b/.gitignore
index 222b279..36a2a68 100644
--- a/.gitignore
+++ b/.gitignore
@@ -19,4 +19,7 @@ __pycache__/
backend/app/static/
# Config data
-config/
\ No newline at end of file
+config/
+radarr-config/
+sonarr-config/
+test-data/
\ No newline at end of file
diff --git a/backend/app/arr/__init__.py b/backend/app/arr/__init__.py
index daefa05..274154c 100644
--- a/backend/app/arr/__init__.py
+++ b/backend/app/arr/__init__.py
@@ -45,6 +45,16 @@ def add_config():
try:
config = request.json
+
+ # Validate sync_interval if schedule method
+ if config.get('sync_method') == 'schedule':
+ sync_interval = config.get('sync_interval', 0)
+ if sync_interval < 60 or sync_interval > 43200:
+ return jsonify({
+ 'success': False,
+ 'error': 'Sync interval must be between 60 minutes (1 hour) and 43200 minutes (1 month)'
+ }), 400
+
result = save_arr_config(config)
# Handle the conflict case first
@@ -95,7 +105,18 @@ def handle_config(id):
}), 404
elif request.method == 'PUT':
- result = update_arr_config(id, request.json)
+ config = request.json
+
+ # Validate sync_interval if schedule method
+ if config.get('sync_method') == 'schedule':
+ sync_interval = config.get('sync_interval', 0)
+ if sync_interval < 60 or sync_interval > 43200:
+ return jsonify({
+ 'success': False,
+ 'error': 'Sync interval must be between 60 minutes (1 hour) and 43200 minutes (1 month)'
+ }), 400
+
+ result = update_arr_config(id, config)
# Handle the conflict case first
if not result['success'] and result.get('status_code') == 409:
@@ -156,8 +177,8 @@ def trigger_sync(id):
}), 400
# Run the import
- from .manager import run_import_for_config
- run_import_for_config(config_data)
+ from ..importer import handle_pull_import
+ handle_pull_import(id)
logger.debug(f"Manual sync triggered for arr config: {id}")
return jsonify({'success': True}), 200
diff --git a/backend/app/arr/manager.py b/backend/app/arr/manager.py
index 5ee19d6..7334bef 100644
--- a/backend/app/arr/manager.py
+++ b/backend/app/arr/manager.py
@@ -231,8 +231,7 @@ def delete_arr_config(id):
# 3) If there's a scheduled task, remove it
if existing_task_id:
- delete_import_task_for_arr_config(existing_task_id,
- config_id=id)
+ delete_import_task_for_arr_config(existing_task_id)
scheduler = TaskScheduler.get_instance()
if scheduler:
@@ -397,208 +396,6 @@ def get_pull_configs():
return results
-def run_import_for_config(config_row):
- """
- Perform the same import logic as the /import endpoints, but automatically
- for a "pull-based" or "schedule-based" ARR config.
-
- Calculates sync percentage based on explicitly selected items only:
- - Each selected profile counts as 1
- - Each selected custom format counts as 1
- """
- from datetime import datetime
- from ..db import get_db
-
- arr_id = config_row['id']
- arr_name = config_row['name']
- arr_type = config_row['type']
- arr_server = config_row['arrServer']
- api_key = config_row['apiKey']
- import_as_unique = config_row.get('import_as_unique', False)
-
- logger.info(
- f"[Pull Import] Running import for ARR config #{arr_id} ({arr_name})")
-
- # Safely parse data_to_sync
- data_to_sync = config_row['data_to_sync'] or {}
-
- selected_profiles = data_to_sync.get('profiles', [])
- selected_formats = data_to_sync.get('customFormats', [])
-
- # Log import_as_unique setting
- if import_as_unique:
- logger.info(f"Unique imports for {arr_name} are on, adjusting names")
- else:
- logger.info(
- f"Unique imports for {arr_name} are off, using original names")
-
- # Calculate total_attempted based on explicitly selected items only
- total_attempted = len(selected_formats) + len(selected_profiles)
- total_successful = 0
-
- # 1) Import user-selected custom formats (counting these in percentage)
- if selected_formats:
- logger.info(
- f"[Pull Import] Importing {len(selected_formats)} user-selected CFs for ARR #{arr_id}"
- )
- try:
- from ..importarr.format import import_formats_to_arr
- format_names = selected_formats
- original_names = format_names.copy()
-
- # Modify format names if import_as_unique is true
- if import_as_unique:
- format_names = [
- f"{name} [Dictionarry]" for name in format_names
- ]
- logger.info(
- f"Modified format names for unique import: {format_names}")
-
- format_result = import_formats_to_arr(
- format_names=format_names,
- original_names=original_names,
- base_url=arr_server,
- api_key=api_key,
- arr_type=arr_type)
-
- if format_result.get('success'):
- total_successful += (format_result.get('added', 0) +
- format_result.get('updated', 0))
- else:
- logger.warning(
- f"[Pull Import] Importing user-selected CFs for ARR #{arr_id} had errors: {format_result}"
- )
- except Exception as e:
- logger.exception(
- f"[Pull Import] Failed importing user-selected CFs for ARR #{arr_id}: {str(e)}"
- )
-
- # 2) For user-selected profiles, gather any referenced CFs
- referenced_cf_names = set()
- if selected_profiles:
- from pathlib import Path
- from ..data.utils import get_category_directory, load_yaml_file
-
- for profile_name in selected_profiles:
- try:
- profile_file = Path(
- get_category_directory('profile')) / f"{profile_name}.yml"
- if not profile_file.exists():
- logger.error(
- f"[Pull Import] Profile file not found: {profile_file}"
- )
- continue
-
- profile_data = load_yaml_file(str(profile_file))
- for cf in profile_data.get('custom_formats', []):
- if 'name' in cf:
- referenced_cf_names.add(cf['name'])
- except Exception as e:
- logger.error(
- f"[Pull Import] Error loading profile {profile_name}: {str(e)}"
- )
-
- # Import referenced CFs
- if referenced_cf_names:
- try:
- from ..importarr.format import import_formats_to_arr
- format_names = list(referenced_cf_names)
- original_names = format_names.copy()
-
- # Modify format names if import_as_unique is true
- if import_as_unique:
- format_names = [
- f"{name} [Dictionarry]" for name in format_names
- ]
- logger.info(
- f"Modified format names for unique import: {format_names}")
-
- cf_result = import_formats_to_arr(format_names=format_names,
- original_names=original_names,
- base_url=arr_server,
- api_key=api_key,
- arr_type=arr_type)
-
- if not cf_result.get('success'):
- logger.warning(
- f"[Pull Import] Importing referenced CFs had errors: {cf_result}"
- )
- except Exception as e:
- logger.exception(
- f"[Pull Import] Failed importing referenced CFs: {str(e)}")
-
- # 3) Import the profiles themselves
- if selected_profiles:
- try:
- from ..importarr.profile import import_profiles_to_arr
- profile_names = selected_profiles
- original_names = profile_names.copy()
-
- # Modify profile names if import_as_unique is true
- if import_as_unique:
- profile_names = [
- f"{name} [Dictionarry]" for name in profile_names
- ]
- logger.info(
- f"Modified profile names for unique import: {profile_names}"
- )
-
- profile_result = import_profiles_to_arr(
- profile_names=profile_names,
- original_names=original_names,
- base_url=arr_server,
- api_key=api_key,
- arr_type=arr_type,
- arr_id=arr_id,
- import_as_unique=import_as_unique)
-
- if profile_result.get('success'):
- # Count successful profile imports in total
- total_successful += (profile_result.get('added', 0) +
- profile_result.get('updated', 0))
- else:
- logger.warning(
- f"[Pull Import] Importing profiles had errors: {profile_result}"
- )
- except Exception as e:
- logger.exception(
- f"[Pull Import] Failed importing profiles: {str(e)}")
-
- # Calculate percentage based on successful imports vs attempted
- sync_percentage = int((total_successful / total_attempted *
- 100) if total_attempted > 0 else 0)
-
- logger.info(
- f"[Pull Import] Done importing for ARR config #{arr_id} ({arr_name}). "
- f"Success rate: {total_successful}/{total_attempted} => {sync_percentage}%"
- )
-
- # Update arr_config with results
- now = datetime.now()
- with get_db() as conn:
- cursor = conn.cursor()
- cursor.execute(
- '''
- UPDATE arr_config
- SET last_sync_time = ?,
- sync_percentage = ?,
- updated_at = CURRENT_TIMESTAMP
- WHERE id = ?
- ''', (now, sync_percentage, arr_id))
- conn.commit()
-
- logger.info(
- f"[Pull Import] Updated ARR config #{arr_id} last_sync_time={now} & sync_percentage={sync_percentage}."
- )
-
- return {
- 'success': True if total_successful > 0 else False,
- 'total_attempted': total_attempted,
- 'total_successful': total_successful,
- 'sync_percentage': sync_percentage
- }
-
-
def check_active_sync_configs():
"""
Check if there are any ARR configurations with non-manual sync methods.
diff --git a/backend/app/arr/task_utils.py b/backend/app/arr/task_utils.py
index d37fdce..f27545e 100644
--- a/backend/app/arr/task_utils.py
+++ b/backend/app/arr/task_utils.py
@@ -21,16 +21,16 @@ def create_import_task_for_arr_config(config_id, config_name, sync_method,
with get_db() as conn:
cursor = conn.cursor()
- # 'pull' tasks can be represented with interval 0 or a special type
- # 'schedule' tasks can be represented with the normal interval
-
+ # pull: not scheduled; on-demand during git pull
if sync_method == 'pull':
- # You could store a special type for pull-based tasks
- task_type = 'ImportPull'
- interval_minutes = 0
- else: # 'schedule'
- task_type = 'ImportSchedule'
- interval_minutes = sync_interval or 0
+ logger.debug(
+ f"[ARR Tasks] No scheduled task created for {config_name} because sync_method=pull (runs on git pull)"
+ )
+ return None
+
+ # schedule: create an interval-based task
+ task_type = 'ImportSchedule'
+ interval_minutes = sync_interval or 0
# Insert into scheduled_tasks table
cursor.execute(
diff --git a/backend/app/compile/profile_compiler.py b/backend/app/compile/profile_compiler.py
index 92ebb08..497ca4d 100644
--- a/backend/app/compile/profile_compiler.py
+++ b/backend/app/compile/profile_compiler.py
@@ -430,6 +430,22 @@ class ProfileConverter:
format_item = {"name": cf["name"], "score": cf["score"]}
converted_profile.format_items.append(format_item)
+ # Process app-specific custom formats based on target app
+ app_specific_field = None
+ if self.target_app == TargetApp.RADARR:
+ app_specific_field = "custom_formats_radarr"
+ elif self.target_app == TargetApp.SONARR:
+ app_specific_field = "custom_formats_sonarr"
+
+ if app_specific_field and app_specific_field in profile:
+ for cf in profile[app_specific_field]:
+ format_name = cf["name"]
+ # Apply [Dictionarry] suffix if import_as_unique is enabled
+ if self.import_as_unique:
+ format_name = f"{format_name} [Dictionarry]"
+ format_item = {"name": format_name, "score": cf["score"]}
+ converted_profile.format_items.append(format_item)
+
converted_profile.items.reverse()
return converted_profile
diff --git a/backend/app/data/utils.py b/backend/app/data/utils.py
index 884a664..41d53b8 100644
--- a/backend/app/data/utils.py
+++ b/backend/app/data/utils.py
@@ -31,7 +31,9 @@ PROFILE_FIELDS = [
"minCustomFormatScore",
"upgradeUntilScore",
"minScoreIncrement",
- "custom_formats", # Array of {name, score} objects
+ "custom_formats", # Array of {name, score} objects (backwards compatible)
+ "custom_formats_radarr", # Array of {name, score} objects for radarr-specific scores
+ "custom_formats_sonarr", # Array of {name, score} objects for sonarr-specific scores
"qualities", # Array of strings
"upgrade_until",
"language"
@@ -295,21 +297,45 @@ def check_delete_constraints(category: str, name: str) -> Tuple[bool, str]:
profile_path = os.path.join(profile_dir, profile_file)
try:
profile_data = load_yaml_file(profile_path)
- # Check custom_formats array in profile
- for format_ref in profile_data.get('custom_formats', []):
- format_name = format_ref.get('name', '')
- # Convert format name to use parentheses for comparison
- format_name = format_name.replace('[', '(').replace(
- ']', ')')
- logger.debug(
- f"Comparing '{format_name}' with '{check_name}'")
+
+ # Check custom_formats (both/backwards compatible)
+ custom_formats = profile_data.get('custom_formats', [])
+ if isinstance(custom_formats, list):
+ for format_ref in custom_formats:
+ format_name = format_ref.get('name', '')
+ # Convert format name to use parentheses for comparison
+ format_name = format_name.replace('[', '(').replace(']', ')')
+ logger.debug(f"Comparing '{format_name}' with '{check_name}' in both")
- if format_name == check_name:
- references.append(
- f"quality profile: {profile_data['name']}")
+ if format_name == check_name:
+ references.append(f"quality profile: {profile_data['name']} (both)")
+
+ # Check custom_formats_radarr
+ custom_formats_radarr = profile_data.get('custom_formats_radarr', [])
+ if isinstance(custom_formats_radarr, list):
+ for format_ref in custom_formats_radarr:
+ format_name = format_ref.get('name', '')
+ # Convert format name to use parentheses for comparison
+ format_name = format_name.replace('[', '(').replace(']', ')')
+ logger.debug(f"Comparing '{format_name}' with '{check_name}' in radarr")
+
+ if format_name == check_name:
+ references.append(f"quality profile: {profile_data['name']} (radarr)")
+
+ # Check custom_formats_sonarr
+ custom_formats_sonarr = profile_data.get('custom_formats_sonarr', [])
+ if isinstance(custom_formats_sonarr, list):
+ for format_ref in custom_formats_sonarr:
+ format_name = format_ref.get('name', '')
+ # Convert format name to use parentheses for comparison
+ format_name = format_name.replace('[', '(').replace(']', ')')
+ logger.debug(f"Comparing '{format_name}' with '{check_name}' in sonarr")
+
+ if format_name == check_name:
+ references.append(f"quality profile: {profile_data['name']} (sonarr)")
+
except Exception as e:
- logger.error(
- f"Error checking profile file {profile_file}: {e}")
+ logger.error(f"Error checking profile file {profile_file}: {e}")
continue
# Update arr configs for formats and profiles
@@ -392,16 +418,41 @@ def update_references(category: str, old_name: str,
profile_data = load_yaml_file(profile_path)
updated = False
- # Update custom_formats array in profile
- for format_ref in profile_data.get('custom_formats', []):
- format_name = format_ref.get('name', '')
- # Convert format name to use parentheses for comparison
- format_name = format_name.replace('[', '(').replace(
- ']', ')')
+ # Update custom_formats (both/backwards compatible)
+ custom_formats = profile_data.get('custom_formats', [])
+ if isinstance(custom_formats, list):
+ for format_ref in custom_formats:
+ format_name = format_ref.get('name', '')
+ # Convert format name to use parentheses for comparison
+ format_name = format_name.replace('[', '(').replace(']', ')')
- if format_name == old_check_name:
- format_ref['name'] = new_name
- updated = True
+ if format_name == old_check_name:
+ format_ref['name'] = new_name
+ updated = True
+
+ # Update custom_formats_radarr
+ custom_formats_radarr = profile_data.get('custom_formats_radarr', [])
+ if isinstance(custom_formats_radarr, list):
+ for format_ref in custom_formats_radarr:
+ format_name = format_ref.get('name', '')
+ # Convert format name to use parentheses for comparison
+ format_name = format_name.replace('[', '(').replace(']', ')')
+
+ if format_name == old_check_name:
+ format_ref['name'] = new_name
+ updated = True
+
+ # Update custom_formats_sonarr
+ custom_formats_sonarr = profile_data.get('custom_formats_sonarr', [])
+ if isinstance(custom_formats_sonarr, list):
+ for format_ref in custom_formats_sonarr:
+ format_name = format_ref.get('name', '')
+ # Convert format name to use parentheses for comparison
+ format_name = format_name.replace('[', '(').replace(']', ')')
+
+ if format_name == old_check_name:
+ format_ref['name'] = new_name
+ updated = True
if updated:
save_yaml_file(profile_path,
diff --git a/backend/app/git/operations/pull.py b/backend/app/git/operations/pull.py
index 6ee3e82..61202d0 100644
--- a/backend/app/git/operations/pull.py
+++ b/backend/app/git/operations/pull.py
@@ -4,7 +4,8 @@ import git
import logging
from git import GitCommandError
from ..status.status import GitStatusManager
-from ...arr.manager import get_pull_configs, run_import_for_config
+from ...arr.manager import get_pull_configs
+from ...importer import handle_pull_import
logger = logging.getLogger(__name__)
@@ -35,16 +36,16 @@ def pull_branch(repo_path, branch_name):
status_manager.update_remote_status()
# -------------------------------
- # *** "On pull" ARR import logic:
+ # *** "On pull" ARR import logic using new importer:
# 1) Query all ARR configs that have sync_method="pull"
- # 2) For each, run the import
+ # 2) For each, run the importer pull handler
# -------------------------------
pull_configs = get_pull_configs()
logger.info(
f"[Pull] Found {len(pull_configs)} ARR configs to import (sync_method='pull')"
)
for cfg in pull_configs:
- run_import_for_config(cfg)
+ handle_pull_import(cfg['id'])
return True, f"Successfully pulled changes for branch {branch_name}"
diff --git a/backend/app/git/status/comparison.py b/backend/app/git/status/comparison.py
index 2d2f5b8..07f5fd3 100644
--- a/backend/app/git/status/comparison.py
+++ b/backend/app/git/status/comparison.py
@@ -111,14 +111,14 @@ def compare_yaml(old_data: Any,
changes.append({
"key": path,
"change": "added",
- "value": sorted(list(added))
+ "value": sorted([x for x in added if x is not None])
})
if removed := old_set - new_set:
logger.debug(f"Removed values at {path}: {removed}")
changes.append({
"key": path,
"change": "removed",
- "value": sorted(list(removed))
+ "value": sorted([x for x in removed if x is not None])
})
elif isinstance(old_data, dict):
diff --git a/backend/app/importarr/__init__.py b/backend/app/importarr/__init__.py
index 04a7315..22531b6 100644
--- a/backend/app/importarr/__init__.py
+++ b/backend/app/importarr/__init__.py
@@ -200,8 +200,16 @@ def import_profiles():
try:
profile_file = f"{get_category_directory('profile')}/{profile_name}.yml"
format_data = load_yaml_file(profile_file)
+
+ # Extract from main custom_formats
for cf in format_data.get('custom_formats', []):
format_names.add(cf['name'])
+
+ # Extract from app-specific custom_formats
+ for cf in format_data.get('custom_formats_radarr', []):
+ format_names.add(cf['name'])
+ for cf in format_data.get('custom_formats_sonarr', []):
+ format_names.add(cf['name'])
except Exception as e:
logger.error(f"Error loading profile {profile_name}: {str(e)}")
continue
diff --git a/backend/app/importer/__init__.py b/backend/app/importer/__init__.py
new file mode 100644
index 0000000..2bda87a
--- /dev/null
+++ b/backend/app/importer/__init__.py
@@ -0,0 +1,325 @@
+"""Main import module entry point."""
+import sys
+import logging
+from typing import Dict, Any, List
+from .strategies import FormatStrategy, ProfileStrategy
+from .logger import reset_import_logger
+
+logger = logging.getLogger(__name__)
+
+
+def handle_import_request(request: Dict[str, Any]) -> Dict[str, Any]:
+ """
+ Handle an import request.
+
+ Args:
+ request: Request dictionary containing:
+ - arrID: ID of the arr_config to use
+ - strategy: 'format' or 'profile'
+ - filenames: List of filenames to import
+ - dryRun: Optional boolean for dry-run mode (default: false)
+
+ Returns:
+ Import results with added/updated/failed counts
+ """
+ from ..db import get_db
+
+ try:
+ # Extract request parameters
+ arr_id = request.get('arrID')
+ strategy_type = request.get('strategy')
+ filenames = request.get('filenames', [])
+ dry_run = request.get('dryRun', False)
+
+ # Validate inputs
+ if not arr_id:
+ return {'success': False, 'error': 'arrID is required'}
+
+ if strategy_type not in ['format', 'profile']:
+ return {
+ 'success': False,
+ 'error': 'strategy must be "format" or "profile"'
+ }
+
+ if not filenames:
+ return {'success': False, 'error': 'filenames list is required'}
+
+ # Load arr_config from database
+ with get_db() as conn:
+ cursor = conn.execute("SELECT * FROM arr_config WHERE id = ?",
+ (arr_id, ))
+ arr_config = cursor.fetchone()
+
+ if not arr_config:
+ return {
+ 'success': False,
+ 'error': f'arr_config {arr_id} not found'
+ }
+
+ # Select strategy
+ strategy_map = {'format': FormatStrategy, 'profile': ProfileStrategy}
+
+ strategy_class = strategy_map[strategy_type]
+ strategy = strategy_class(arr_config)
+
+ # Execute import with new logger
+ import_logger = reset_import_logger()
+
+ # Show start message
+ dry_run_text = " [DRY RUN]" if dry_run else ""
+ print(f"Starting {strategy_type} import for {arr_config['name']} ({arr_config['type']}): {len(filenames)} items{dry_run_text}", file=sys.stderr)
+
+ result = strategy.execute(filenames, dry_run=dry_run)
+
+ added = result.get('added', 0)
+ updated = result.get('updated', 0)
+ failed = result.get('failed', 0)
+
+ # Determine status
+ is_partial = failed > 0 and (added > 0 or updated > 0)
+ is_success = failed == 0
+
+ result['success'] = is_success or is_partial
+ if is_partial:
+ result['status'] = "partial"
+ elif is_success:
+ result['status'] = "success"
+ else:
+ result['status'] = "failed"
+
+ result['arr_config_id'] = arr_id
+ result['arr_config_name'] = arr_config['name']
+ result['strategy'] = strategy_type
+
+ # Complete logging
+ import_logger.complete()
+
+ return result
+
+ except Exception as e:
+ logger.exception("Import request failed")
+ return {'success': False, 'error': str(e)}
+
+
+def handle_scheduled_import(task_id: int) -> Dict[str, Any]:
+ """
+ Handle a scheduled import task.
+
+ Args:
+ task_id: ID from scheduled_tasks table
+
+ Returns:
+ Import results
+ """
+ from ..db import get_db
+ import json
+
+ try:
+ # Find arr_config for this task
+ with get_db() as conn:
+ cursor = conn.execute(
+ "SELECT * FROM arr_config WHERE import_task_id = ?",
+ (task_id, ))
+ arr_config = cursor.fetchone()
+
+ if not arr_config:
+ return {
+ 'success': False,
+ 'error': f'No arr_config found for task {task_id}'
+ }
+
+ # Parse data_to_sync
+ data_to_sync = json.loads(arr_config['data_to_sync'] or '{}')
+
+ # Build import requests
+ results = []
+
+ # Import custom formats
+ format_names = data_to_sync.get('customFormats', [])
+ if format_names:
+ # Remove .yml extension if present
+ format_names = [f.replace('.yml', '') for f in format_names]
+
+ request = {
+ 'arrID': arr_config['id'],
+ 'strategy': 'format',
+ 'filenames': format_names
+ }
+ result = handle_import_request(request)
+ results.append(result)
+
+ # Import profiles
+ profile_names = data_to_sync.get('profiles', [])
+ if profile_names:
+ # Remove .yml extension if present
+ profile_names = [p.replace('.yml', '') for p in profile_names]
+
+ request = {
+ 'arrID': arr_config['id'],
+ 'strategy': 'profile',
+ 'filenames': profile_names
+ }
+ result = handle_import_request(request)
+ results.append(result)
+
+ # Combine results
+ total_added = sum(r.get('added', 0) for r in results)
+ total_updated = sum(r.get('updated', 0) for r in results)
+ total_failed = sum(r.get('failed', 0) for r in results)
+
+ is_partial = total_failed > 0 and (total_added > 0
+ or total_updated > 0)
+ is_success = total_failed == 0
+
+ status = "failed"
+ if is_partial:
+ status = "partial"
+ elif is_success:
+ status = "success"
+
+ combined_result = {
+ 'success': is_success or is_partial,
+ 'status': status,
+ 'task_id': task_id,
+ 'arr_config_id': arr_config['id'],
+ 'arr_config_name': arr_config['name'],
+ 'added': total_added,
+ 'updated': total_updated,
+ 'failed': total_failed,
+ 'results': results
+ }
+
+ # Update sync status
+ _update_sync_status(arr_config['id'], combined_result)
+
+ return combined_result
+
+ except Exception as e:
+ logger.exception(f"Scheduled import {task_id} failed")
+ return {'success': False, 'error': str(e)}
+
+
+def handle_pull_import(arr_config_id: int) -> Dict[str, Any]:
+ """
+ Handle an on-pull import for a specific ARR config.
+
+ This mirrors scheduled import behavior but is triggered immediately
+ during a git pull (not scheduled).
+ """
+ from ..db import get_db
+ import json
+
+ try:
+ # Load arr_config by id
+ with get_db() as conn:
+ cursor = conn.execute("SELECT * FROM arr_config WHERE id = ?",
+ (arr_config_id, ))
+ arr_config = cursor.fetchone()
+ if not arr_config:
+ return {
+ 'success': False,
+ 'error': f'arr_config {arr_config_id} not found'
+ }
+
+ # Parse data_to_sync
+ data_to_sync = json.loads(arr_config['data_to_sync'] or '{}')
+
+ results: List[Dict[str, Any]] = []
+
+ # Import custom formats
+ format_names = data_to_sync.get('customFormats', [])
+ if format_names:
+ format_names = [f.replace('.yml', '') for f in format_names]
+ request = {
+ 'arrID': arr_config['id'],
+ 'strategy': 'format',
+ 'filenames': format_names,
+ }
+ result = handle_import_request(request)
+ results.append(result)
+
+ # Import profiles
+ profile_names = data_to_sync.get('profiles', [])
+ if profile_names:
+ profile_names = [p.replace('.yml', '') for p in profile_names]
+ request = {
+ 'arrID': arr_config['id'],
+ 'strategy': 'profile',
+ 'filenames': profile_names,
+ }
+ result = handle_import_request(request)
+ results.append(result)
+
+ # Combine results
+ total_added = sum(r.get('added', 0) for r in results)
+ total_updated = sum(r.get('updated', 0) for r in results)
+ total_failed = sum(r.get('failed', 0) for r in results)
+
+ is_partial = total_failed > 0 and (total_added > 0
+ or total_updated > 0)
+ is_success = total_failed == 0
+
+ status = "failed"
+ if is_partial:
+ status = "partial"
+ elif is_success:
+ status = "success"
+
+ combined_result = {
+ 'success': is_success or is_partial,
+ 'status': status,
+ 'arr_config_id': arr_config['id'],
+ 'arr_config_name': arr_config['name'],
+ 'added': total_added,
+ 'updated': total_updated,
+ 'failed': total_failed,
+ 'results': results,
+ }
+
+ # Update sync status
+ _update_sync_status(arr_config['id'], combined_result)
+
+ return combined_result
+
+ except Exception as e:
+ logger.exception(f"Pull import for arr_config {arr_config_id} failed")
+ return {
+ 'success': False,
+ 'error': str(e),
+ }
+
+
+def _update_sync_status(config_id: int, result: Dict[str, Any]) -> None:
+ """Update arr_config sync status after scheduled import."""
+ from ..db import get_db
+ from datetime import datetime
+
+ try:
+ total = result.get('added', 0) + result.get('updated', 0) + result.get(
+ 'failed', 0)
+ successful = result.get('added', 0) + result.get('updated', 0)
+
+ sync_percentage = int((successful / total * 100) if total > 0 else 0)
+
+ with get_db() as conn:
+ conn.execute(
+ """
+ UPDATE arr_config
+ SET last_sync_time = ?,
+ sync_percentage = ?
+ WHERE id = ?
+ """, (datetime.now(), sync_percentage, config_id))
+ conn.commit()
+
+ logger.info(
+ f"Updated sync status for arr_config #{config_id}: {sync_percentage}%"
+ )
+
+ except Exception as e:
+ logger.error(f"Failed to update sync status: {e}")
+
+
+# Export main functions
+__all__ = [
+ 'handle_import_request', 'handle_scheduled_import', 'handle_pull_import'
+]
diff --git a/backend/app/importer/arr_handler.py b/backend/app/importer/arr_handler.py
new file mode 100644
index 0000000..5c09198
--- /dev/null
+++ b/backend/app/importer/arr_handler.py
@@ -0,0 +1,150 @@
+"""ArrHandler class - manages all Arr API communication."""
+import logging
+import requests
+from requests.adapters import HTTPAdapter
+from urllib3.util.retry import Retry
+from typing import Dict, List, Any, Optional
+
+logger = logging.getLogger(__name__)
+
+
+class ArrApiError(Exception):
+ """Custom exception for Arr API errors."""
+ def __init__(self, message: str, status_code: Optional[int] = None):
+ super().__init__(message)
+ self.status_code = status_code
+
+
+class ArrHandler:
+ """Manages all communication with Radarr/Sonarr API."""
+
+ def __init__(self, base_url: str, api_key: str):
+ """
+ Initialize the Arr API handler.
+
+ Args:
+ base_url: Base URL of the Arr instance
+ api_key: API key for authentication
+ """
+ self.base_url = base_url.rstrip('/')
+ self.headers = {
+ 'X-Api-Key': api_key,
+ 'Content-Type': 'application/json'
+ }
+ self.session = self._create_session()
+
+ def _create_session(self) -> requests.Session:
+ """Create a session with connection pooling and retry logic."""
+ session = requests.Session()
+
+ # Configure retry strategy
+ retry = Retry(
+ total=3,
+ backoff_factor=0.5,
+ status_forcelist=[500, 502, 503, 504]
+ )
+
+ # Configure connection pooling
+ adapter = HTTPAdapter(
+ pool_connections=5,
+ pool_maxsize=5,
+ max_retries=retry
+ )
+
+ session.mount('http://', adapter)
+ session.mount('https://', adapter)
+ session.headers.update(self.headers)
+
+ return session
+
+ def get(self, endpoint: str) -> Any:
+ """
+ Make a GET request to the Arr API.
+
+ Args:
+ endpoint: API endpoint path
+
+ Returns:
+ JSON response data
+
+ Raises:
+ ArrApiError: If request fails
+ """
+ url = f"{self.base_url}{endpoint}"
+ try:
+ response = self.session.get(url, timeout=30)
+ if response.status_code != 200:
+ raise ArrApiError(
+ f"GET {endpoint} failed: {response.text}",
+ response.status_code
+ )
+ return response.json()
+ except requests.RequestException as e:
+ raise ArrApiError(f"GET {endpoint} failed: {str(e)}")
+
+ def post(self, endpoint: str, data: Dict[str, Any]) -> Any:
+ """
+ Make a POST request to the Arr API.
+
+ Args:
+ endpoint: API endpoint path
+ data: JSON data to send
+
+ Returns:
+ JSON response data
+
+ Raises:
+ ArrApiError: If request fails
+ """
+ url = f"{self.base_url}{endpoint}"
+ try:
+ response = self.session.post(url, json=data, timeout=30)
+ if response.status_code not in [200, 201]:
+ raise ArrApiError(
+ f"POST {endpoint} failed: {response.text}",
+ response.status_code
+ )
+ return response.json()
+ except requests.RequestException as e:
+ raise ArrApiError(f"POST {endpoint} failed: {str(e)}")
+
+ def put(self, endpoint: str, data: Dict[str, Any]) -> Any:
+ """
+ Make a PUT request to the Arr API.
+
+ Args:
+ endpoint: API endpoint path
+ data: JSON data to send
+
+ Returns:
+ JSON response data (if any)
+
+ Raises:
+ ArrApiError: If request fails
+ """
+ url = f"{self.base_url}{endpoint}"
+ try:
+ response = self.session.put(url, json=data, timeout=30)
+ if response.status_code not in [200, 202, 204]:
+ raise ArrApiError(
+ f"PUT {endpoint} failed: {response.text}",
+ response.status_code
+ )
+ # 204 No Content won't have JSON
+ if response.status_code == 204:
+ return {}
+ return response.json()
+ except requests.RequestException as e:
+ raise ArrApiError(f"PUT {endpoint} failed: {str(e)}")
+
+ def get_all_formats(self) -> List[Dict[str, Any]]:
+ """Get all custom formats from the Arr instance."""
+ return self.get("/api/v3/customformat")
+
+ def get_all_profiles(self) -> List[Dict[str, Any]]:
+ """Get all quality profiles from the Arr instance."""
+ return self.get("/api/v3/qualityprofile")
+
+ def close(self):
+ """Close the session."""
+ self.session.close()
\ No newline at end of file
diff --git a/backend/app/importer/compiler.py b/backend/app/importer/compiler.py
new file mode 100644
index 0000000..c8be6be
--- /dev/null
+++ b/backend/app/importer/compiler.py
@@ -0,0 +1,335 @@
+"""Compilation functions to transform YAML data to Arr API format."""
+import logging
+from typing import Dict, List, Any, Optional
+from .mappings import TargetApp, ValueResolver
+from .utils import load_regex_patterns
+from ..db.queries.format_renames import is_format_in_renames
+from .logger import get_import_logger
+
+logger = logging.getLogger(__name__)
+
+# Cache patterns at module level to avoid reloading
+_CACHED_PATTERNS = None
+
+def get_cached_patterns():
+ """Get cached regex patterns, loading them once on first access."""
+ global _CACHED_PATTERNS
+ if _CACHED_PATTERNS is None:
+ _CACHED_PATTERNS = load_regex_patterns()
+ return _CACHED_PATTERNS
+
+
+def compile_format_to_api_structure(
+ format_yaml: Dict[str, Any],
+ arr_type: str
+) -> Dict[str, Any]:
+ """
+ Compile a format from YAML to Arr API structure.
+
+ Args:
+ format_yaml: Format data from YAML file
+ arr_type: 'radarr' or 'sonarr'
+
+ Returns:
+ Compiled format ready for API
+ """
+ target_app = TargetApp.RADARR if arr_type.lower() == 'radarr' else TargetApp.SONARR
+ patterns = get_cached_patterns()
+
+ compiled = {
+ 'name': format_yaml.get('name', 'Unknown')
+ }
+
+ # Check if format should be included in renames
+ if is_format_in_renames(format_yaml.get('name', '')):
+ compiled['includeCustomFormatWhenRenaming'] = True
+
+ # Compile specifications from conditions
+ specifications = []
+ for condition in format_yaml.get('conditions', []):
+ spec = _compile_condition(condition, patterns, target_app)
+ if spec:
+ specifications.append(spec)
+
+ compiled['specifications'] = specifications
+ return compiled
+
+
+def _compile_condition(
+ condition: Dict[str, Any],
+ patterns: Dict[str, str],
+ target_app: TargetApp
+) -> Optional[Dict[str, Any]]:
+ """Compile a single condition to specification."""
+ condition_type = condition.get('type')
+
+ spec = {
+ 'name': condition.get('name', ''),
+ 'negate': condition.get('negate', False),
+ 'required': condition.get('required', False),
+ 'fields': []
+ }
+
+ if condition_type in ['release_title', 'release_group', 'edition']:
+ pattern_name = condition.get('pattern')
+ pattern = patterns.get(pattern_name)
+ if not pattern:
+ import_logger = get_import_logger()
+ import_logger.warning(f"Pattern not found: {pattern_name}")
+ return None
+
+ spec['implementation'] = {
+ 'release_title': 'ReleaseTitleSpecification',
+ 'release_group': 'ReleaseGroupSpecification',
+ 'edition': 'EditionSpecification'
+ }[condition_type]
+ spec['fields'] = [{'name': 'value', 'value': pattern}]
+
+ elif condition_type == 'source':
+ spec['implementation'] = 'SourceSpecification'
+ value = ValueResolver.get_source(condition.get('source'), target_app)
+ spec['fields'] = [{'name': 'value', 'value': value}]
+
+ elif condition_type == 'resolution':
+ spec['implementation'] = 'ResolutionSpecification'
+ value = ValueResolver.get_resolution(condition.get('resolution'))
+ spec['fields'] = [{'name': 'value', 'value': value}]
+
+ elif condition_type == 'indexer_flag':
+ spec['implementation'] = 'IndexerFlagSpecification'
+ value = ValueResolver.get_indexer_flag(condition.get('flag', ''), target_app)
+ spec['fields'] = [{'name': 'value', 'value': value}]
+
+ elif condition_type == 'quality_modifier':
+ if target_app == TargetApp.SONARR:
+ return None
+ spec['implementation'] = 'QualityModifierSpecification'
+ value = ValueResolver.get_quality_modifier(condition.get('qualityModifier'))
+ spec['fields'] = [{'name': 'value', 'value': value}]
+
+ elif condition_type == 'size':
+ spec['implementation'] = 'SizeSpecification'
+ spec['fields'] = [
+ {'name': 'min', 'value': condition.get('minSize', 0)},
+ {'name': 'max', 'value': condition.get('maxSize', 0)}
+ ]
+
+ elif condition_type == 'language':
+ spec['implementation'] = 'LanguageSpecification'
+ language_name = condition.get('language', '').lower()
+ try:
+ language_data = ValueResolver.get_language(language_name, target_app, for_profile=False)
+ spec['fields'] = [{'name': 'value', 'value': language_data['id']}]
+ except Exception:
+ import_logger = get_import_logger()
+ import_logger.warning(f"Language not found: {language_name}")
+ return None
+
+ elif condition_type == 'release_type':
+ # Only supported in Sonarr
+ if target_app == TargetApp.RADARR:
+ return None
+ spec['implementation'] = 'ReleaseTypeSpecification'
+ value = ValueResolver.get_release_type(condition.get('releaseType'))
+ spec['fields'] = [{'name': 'value', 'value': value}]
+
+ elif condition_type == 'year':
+ spec['implementation'] = 'YearSpecification'
+ spec['fields'] = [
+ {'name': 'min', 'value': condition.get('minYear', 0)},
+ {'name': 'max', 'value': condition.get('maxYear', 0)}
+ ]
+
+ else:
+ import_logger = get_import_logger()
+ import_logger.warning(f"Unknown condition type: {condition_type}")
+ return None
+
+ return spec
+
+
+def compile_profile_to_api_structure(
+ profile_yaml: Dict[str, Any],
+ arr_type: str
+) -> Dict[str, Any]:
+ """
+ Compile a profile from YAML to Arr API structure.
+
+ Args:
+ profile_yaml: Profile data from YAML file
+ arr_type: 'radarr' or 'sonarr'
+
+ Returns:
+ Compiled profile ready for API
+ """
+ target_app = TargetApp.RADARR if arr_type.lower() == 'radarr' else TargetApp.SONARR
+ quality_mappings = ValueResolver.get_qualities(target_app)
+
+ compiled = {
+ 'name': profile_yaml.get('name', 'Unknown')
+ }
+
+ # Build quality items - following the structure from the working compile/profile_compiler.py
+ items = []
+ cutoff_id = None
+ used_qualities = set()
+ quality_ids_in_groups = set()
+
+ # Convert group IDs (negative to positive with offset)
+ def convert_group_id(group_id: int) -> int:
+ if group_id < 0:
+ return 1000 + abs(group_id)
+ return group_id
+
+ # First pass: gather quality IDs in groups to avoid duplicates
+ for quality_entry in profile_yaml.get('qualities', []):
+ if isinstance(quality_entry, dict) and quality_entry.get('id', 0) < 0:
+ # It's a group
+ for q in quality_entry.get('qualities', []):
+ if isinstance(q, dict):
+ q_name = q.get('name', '')
+ mapped_name = ValueResolver.get_quality_name(q_name, target_app)
+ if mapped_name in quality_mappings:
+ quality_ids_in_groups.add(quality_mappings[mapped_name]['id'])
+
+ # Second pass: add groups and individual qualities
+ for quality_entry in profile_yaml.get('qualities', []):
+ if isinstance(quality_entry, dict):
+ if quality_entry.get('id', 0) < 0:
+ # It's a group
+ group_id = convert_group_id(quality_entry.get('id', 0))
+ group_item = {
+ 'id': group_id,
+ 'name': quality_entry.get('name', 'Group'),
+ 'items': [],
+ 'allowed': True
+ }
+
+ for q in quality_entry.get('qualities', []):
+ if isinstance(q, dict):
+ q_name = q.get('name', '')
+ mapped_name = ValueResolver.get_quality_name(q_name, target_app)
+ if mapped_name in quality_mappings:
+ group_item['items'].append({
+ 'quality': quality_mappings[mapped_name].copy(),
+ 'items': [],
+ 'allowed': True
+ })
+ used_qualities.add(mapped_name.upper())
+
+ if group_item['items']:
+ items.append(group_item)
+ else:
+ # Individual quality
+ q_name = quality_entry.get('name', '')
+ mapped_name = ValueResolver.get_quality_name(q_name, target_app)
+ if mapped_name in quality_mappings:
+ items.append({
+ 'quality': quality_mappings[mapped_name].copy(),
+ 'items': [],
+ 'allowed': True
+ })
+ used_qualities.add(mapped_name.upper())
+ elif isinstance(quality_entry, str):
+ # Simple quality name string
+ mapped_name = ValueResolver.get_quality_name(quality_entry, target_app)
+ if mapped_name in quality_mappings:
+ items.append({
+ 'quality': quality_mappings[mapped_name].copy(),
+ 'items': [],
+ 'allowed': True
+ })
+ used_qualities.add(mapped_name.upper())
+
+ # Add all unused qualities as disabled
+ for quality_name, quality_data in quality_mappings.items():
+ if (quality_name.upper() not in used_qualities and
+ quality_data['id'] not in quality_ids_in_groups):
+ items.append({
+ 'quality': quality_data.copy(),
+ 'items': [],
+ 'allowed': False
+ })
+
+ # Handle cutoff/upgrade_until
+ if 'upgrade_until' in profile_yaml and isinstance(profile_yaml['upgrade_until'], dict):
+ cutoff_id_raw = profile_yaml['upgrade_until'].get('id')
+ cutoff_name = profile_yaml['upgrade_until'].get('name', '')
+ mapped_cutoff_name = ValueResolver.get_quality_name(cutoff_name, target_app)
+
+ if cutoff_id_raw and cutoff_id_raw < 0:
+ cutoff_id = convert_group_id(cutoff_id_raw)
+ elif mapped_cutoff_name in quality_mappings:
+ cutoff_id = quality_mappings[mapped_cutoff_name]['id']
+
+ # Handle language
+ language = profile_yaml.get('language', 'any')
+ if language != 'any' and '_' not in language:
+ # Simple language mode
+ try:
+ language_data = ValueResolver.get_language(language, target_app, for_profile=True)
+ except Exception:
+ language_data = ValueResolver.get_language('any', target_app, for_profile=True)
+ else:
+ # Advanced mode or any
+ language_data = ValueResolver.get_language('any', target_app, for_profile=True)
+
+ # Build format items (without IDs, those get synced later)
+ format_items = []
+
+ # Add language-specific formats for advanced mode
+ if language != 'any' and '_' in language:
+ behavior, language_code = language.split('_', 1)
+
+ # Add "Not [Language]" format with appropriate score
+ # Use proper capitalization for the language name
+ lang_display = language_code.capitalize()
+ not_language_name = f"Not {lang_display}"
+ format_items.append({
+ 'name': not_language_name,
+ 'score': -9999 # Standard score for language exclusion
+ })
+
+ # For 'only' behavior, add additional formats
+ if behavior == 'only':
+ format_items.append({
+ 'name': f"Not Only {lang_display}",
+ 'score': -9999
+ })
+ format_items.append({
+ 'name': f"Not Only {lang_display} (Missing)",
+ 'score': -9999
+ })
+
+ # Main custom formats
+ for cf in profile_yaml.get('custom_formats', []):
+ format_items.append({
+ 'name': cf.get('name'),
+ 'score': cf.get('score', 0)
+ })
+
+ # App-specific custom formats
+ app_key = f'custom_formats_{arr_type.lower()}'
+ for cf in profile_yaml.get(app_key, []):
+ format_items.append({
+ 'name': cf.get('name'),
+ 'score': cf.get('score', 0)
+ })
+
+ # Reverse items to match expected order
+ items.reverse()
+
+ compiled['items'] = items
+ compiled['language'] = language_data
+ compiled['upgradeAllowed'] = profile_yaml.get('upgradesAllowed', True)
+ compiled['minFormatScore'] = profile_yaml.get('minCustomFormatScore', 0)
+ compiled['cutoffFormatScore'] = profile_yaml.get('upgradeUntilScore', 0)
+ compiled['formatItems'] = format_items
+
+ if cutoff_id is not None:
+ compiled['cutoff'] = cutoff_id
+
+ # Handle minUpgradeFormatScore with proper default
+ compiled['minUpgradeFormatScore'] = max(1, profile_yaml.get('minScoreIncrement', 1))
+
+ return compiled
\ No newline at end of file
diff --git a/backend/app/importer/logger.py b/backend/app/importer/logger.py
new file mode 100644
index 0000000..eb3315c
--- /dev/null
+++ b/backend/app/importer/logger.py
@@ -0,0 +1,138 @@
+"""Custom logger for importer with progress tracking and colored output."""
+import sys
+from typing import List, Dict, Any
+from datetime import datetime
+
+
+class ImportLogger:
+ """Custom logger with progress tracking and colored error output."""
+
+ def __init__(self):
+ """Initialize the import logger."""
+ self.compilation_errors: List[Dict[str, str]] = []
+ self.import_errors: List[Dict[str, str]] = []
+ self.warnings: List[str] = []
+
+ self.current_compilation = 0
+ self.total_compilation = 0
+ self.current_import = 0
+ self.total_import = 0
+
+ self.added = 0
+ self.updated = 0
+ self.failed = 0
+
+ self.start_time = None
+ self.compilation_items: List[str] = []
+ self.import_items: List[Dict[str, str]] = []
+
+ def _write_colored(self, text: str, color: str = None):
+ """Write colored text to stderr."""
+ if color == 'red':
+ text = f"\033[91m{text}\033[0m"
+ elif color == 'yellow':
+ text = f"\033[93m{text}\033[0m"
+ elif color == 'green':
+ text = f"\033[92m{text}\033[0m"
+
+ print(text, file=sys.stderr)
+
+ def start(self, total_compilation: int, total_import: int):
+ """Start the import process."""
+ self.start_time = datetime.now()
+ self.total_compilation = total_compilation
+ self.total_import = total_import
+ self.current_compilation = 0
+ self.current_import = 0
+
+ def update_compilation(self, item_name: str):
+ """Track compilation progress."""
+ self.current_compilation += 1
+ self.compilation_items.append(item_name)
+
+ def compilation_complete(self):
+ """Show compilation summary."""
+ if self.total_compilation > 0:
+ print(f"Compiled: {self.current_compilation}/{self.total_compilation}", file=sys.stderr)
+
+ # Show compilation errors if any
+ if self.compilation_errors:
+ for error in self.compilation_errors:
+ self._write_colored(f"ERROR: Failed to compile {error['item']}: {error['message']}", 'red')
+
+ def update_import(self, item_name: str, action: str):
+ """Track import progress."""
+ self.import_items.append({'name': item_name, 'action': action})
+
+ # Update counts based on action
+ if action == 'added':
+ self.added += 1
+ self.current_import += 1 # Only count successful imports
+ elif action == 'updated':
+ self.updated += 1
+ self.current_import += 1 # Only count successful imports
+ elif action == 'failed':
+ self.failed += 1
+ # Don't increment current_import for failures
+
+ def import_complete(self):
+ """Show import summary."""
+ if self.total_import > 0:
+ print(f"Imported: {self.current_import}/{self.total_import}", file=sys.stderr)
+
+ # Show import errors if any
+ if self.import_errors:
+ for error in self.import_errors:
+ self._write_colored(f"ERROR: {error['message']}", 'red')
+
+ # Show warnings if any
+ if self.warnings:
+ for warning in self.warnings:
+ self._write_colored(f"WARNING: {warning}", 'yellow')
+
+ def error(self, message: str, item_name: str = None, phase: str = 'import'):
+ """Log an error."""
+ if phase == 'compilation':
+ self.compilation_errors.append({'item': item_name or 'unknown', 'message': message})
+ else:
+ self.import_errors.append({'item': item_name or 'unknown', 'message': message})
+
+ def warning(self, message: str):
+ """Log a warning."""
+ self.warnings.append(message)
+
+ def complete(self):
+ """Complete the import and show final summary."""
+ # Show import summary first if not already shown
+ if self.current_import > 0 and not hasattr(self, '_import_shown'):
+ self.import_complete()
+
+ # Calculate duration
+ if self.start_time:
+ duration = (datetime.now() - self.start_time).total_seconds()
+ duration_str = f"{duration:.1f}s"
+ else:
+ duration_str = "N/A"
+
+ # Simple final summary
+ print(f"\n{'='*50}", file=sys.stderr)
+ print(f"Import Complete in {duration_str}", file=sys.stderr)
+ print(f"Added: {self.added}, Updated: {self.updated}, Failed: {self.failed}", file=sys.stderr)
+ print(f"{'='*50}\n", file=sys.stderr)
+
+
+# Global instance
+_logger = None
+
+def get_import_logger() -> ImportLogger:
+ """Get the import logger instance."""
+ global _logger
+ if _logger is None:
+ _logger = ImportLogger()
+ return _logger
+
+def reset_import_logger() -> ImportLogger:
+ """Reset and return a new import logger."""
+ global _logger
+ _logger = ImportLogger()
+ return _logger
\ No newline at end of file
diff --git a/backend/app/importer/mappings.py b/backend/app/importer/mappings.py
new file mode 100644
index 0000000..8c14874
--- /dev/null
+++ b/backend/app/importer/mappings.py
@@ -0,0 +1,990 @@
+# app/compile/mappings.py
+"""Centralized constants and mappings for arr applications"""
+from enum import Enum, auto
+from typing import Dict, Any
+import logging
+
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.DEBUG)
+
+
+class TargetApp(Enum):
+ """Enum for target application types"""
+ RADARR = auto()
+ SONARR = auto()
+
+
+class IndexerFlags:
+ """Indexer flag mappings for both applications"""
+ RADARR = {
+ 'freeleech': 1,
+ 'halfleech': 2,
+ 'double_upload': 4,
+ 'internal': 32,
+ 'scene': 128,
+ 'freeleech_75': 256,
+ 'freeleech_25': 512,
+ 'nuked': 2048,
+ 'ptp_golden': 8,
+ 'ptp_approved': 16
+ }
+
+ SONARR = {
+ 'freeleech': 1,
+ 'halfleech': 2,
+ 'double_upload': 4,
+ 'internal': 8,
+ 'scene': 16,
+ 'freeleech_75': 32,
+ 'freeleech_25': 64,
+ 'nuked': 128
+ }
+
+
+class Sources:
+ """Source mappings for both applications"""
+ RADARR = {
+ 'cam': 1,
+ 'telesync': 2,
+ 'telecine': 3,
+ 'workprint': 4,
+ 'dvd': 5,
+ 'tv': 6,
+ 'web_dl': 7,
+ 'webrip': 8,
+ 'bluray': 9
+ }
+
+ SONARR = {
+ 'television': 1,
+ 'television_raw': 2,
+ 'web_dl': 3,
+ 'webrip': 4,
+ 'dvd': 5,
+ 'bluray': 6,
+ 'bluray_raw': 7
+ }
+
+
+class Quality_Modifiers:
+ """Quality modifier mappings for Radarr ONLY"""
+ RADARR = {
+ 'none': 0,
+ 'regional': 1,
+ 'screener': 2,
+ 'rawhd': 3,
+ 'brdisk': 4,
+ 'remux': 5,
+ }
+
+
+class Release_Types:
+ """Release type mappings for Sonarr ONLY"""
+ SONARR = {
+ 'none': 0,
+ 'single_episode': 1,
+ 'multi_episode': 2,
+ 'season_pack': 3,
+ }
+
+
+class Qualities:
+ """Quality mappings for both applications"""
+ COMMON_RESOLUTIONS = {
+ '360p': 360,
+ '480p': 480,
+ '540p': 540,
+ '576p': 576,
+ '720p': 720,
+ '1080p': 1080,
+ '2160p': 2160
+ }
+
+ RADARR = {
+ "Unknown": {
+ "id": 0,
+ "name": "Unknown",
+ "source": "unknown",
+ "resolution": 0
+ },
+ "SDTV": {
+ "id": 1,
+ "name": "SDTV",
+ "source": "tv",
+ "resolution": 480
+ },
+ "DVD": {
+ "id": 2,
+ "name": "DVD",
+ "source": "dvd",
+ "resolution": 480
+ },
+ "WEBDL-1080p": {
+ "id": 3,
+ "name": "WEBDL-1080p",
+ "source": "webdl",
+ "resolution": 1080
+ },
+ "HDTV-720p": {
+ "id": 4,
+ "name": "HDTV-720p",
+ "source": "tv",
+ "resolution": 720
+ },
+ "WEBDL-720p": {
+ "id": 5,
+ "name": "WEBDL-720p",
+ "source": "webdl",
+ "resolution": 720
+ },
+ "Bluray-720p": {
+ "id": 6,
+ "name": "Bluray-720p",
+ "source": "bluray",
+ "resolution": 720
+ },
+ "Bluray-1080p": {
+ "id": 7,
+ "name": "Bluray-1080p",
+ "source": "bluray",
+ "resolution": 1080
+ },
+ "WEBDL-480p": {
+ "id": 8,
+ "name": "WEBDL-480p",
+ "source": "webdl",
+ "resolution": 480
+ },
+ "HDTV-1080p": {
+ "id": 9,
+ "name": "HDTV-1080p",
+ "source": "tv",
+ "resolution": 1080
+ },
+ "Raw-HD": {
+ "id": 10,
+ "name": "Raw-HD",
+ "source": "tv",
+ "resolution": 1080
+ },
+ "WEBRip-480p": {
+ "id": 12,
+ "name": "WEBRip-480p",
+ "source": "webrip",
+ "resolution": 480
+ },
+ "WEBRip-720p": {
+ "id": 14,
+ "name": "WEBRip-720p",
+ "source": "webrip",
+ "resolution": 720
+ },
+ "WEBRip-1080p": {
+ "id": 15,
+ "name": "WEBRip-1080p",
+ "source": "webrip",
+ "resolution": 1080
+ },
+ "HDTV-2160p": {
+ "id": 16,
+ "name": "HDTV-2160p",
+ "source": "tv",
+ "resolution": 2160
+ },
+ "WEBRip-2160p": {
+ "id": 17,
+ "name": "WEBRip-2160p",
+ "source": "webrip",
+ "resolution": 2160
+ },
+ "WEBDL-2160p": {
+ "id": 18,
+ "name": "WEBDL-2160p",
+ "source": "webdl",
+ "resolution": 2160
+ },
+ "Bluray-2160p": {
+ "id": 19,
+ "name": "Bluray-2160p",
+ "source": "bluray",
+ "resolution": 2160
+ },
+ "Bluray-480p": {
+ "id": 20,
+ "name": "Bluray-480p",
+ "source": "bluray",
+ "resolution": 480
+ },
+ "Bluray-576p": {
+ "id": 21,
+ "name": "Bluray-576p",
+ "source": "bluray",
+ "resolution": 576
+ },
+ "BR-DISK": {
+ "id": 22,
+ "name": "BR-DISK",
+ "source": "bluray",
+ "resolution": 1080
+ },
+ "DVD-R": {
+ "id": 23,
+ "name": "DVD-R",
+ "source": "dvd",
+ "resolution": 480
+ },
+ "WORKPRINT": {
+ "id": 24,
+ "name": "WORKPRINT",
+ "source": "workprint",
+ "resolution": 0
+ },
+ "CAM": {
+ "id": 25,
+ "name": "CAM",
+ "source": "cam",
+ "resolution": 0
+ },
+ "TELESYNC": {
+ "id": 26,
+ "name": "TELESYNC",
+ "source": "telesync",
+ "resolution": 0
+ },
+ "TELECINE": {
+ "id": 27,
+ "name": "TELECINE",
+ "source": "telecine",
+ "resolution": 0
+ },
+ "DVDSCR": {
+ "id": 28,
+ "name": "DVDSCR",
+ "source": "dvd",
+ "resolution": 480
+ },
+ "REGIONAL": {
+ "id": 29,
+ "name": "REGIONAL",
+ "source": "dvd",
+ "resolution": 480
+ },
+ "Remux-1080p": {
+ "id": 30,
+ "name": "Remux-1080p",
+ "source": "bluray",
+ "resolution": 1080
+ },
+ "Remux-2160p": {
+ "id": 31,
+ "name": "Remux-2160p",
+ "source": "bluray",
+ "resolution": 2160
+ }
+ }
+
+ SONARR = {
+ "Unknown": {
+ "id": 0,
+ "name": "Unknown",
+ "source": "unknown",
+ "resolution": 0
+ },
+ "SDTV": {
+ "id": 1,
+ "name": "SDTV",
+ "source": "television",
+ "resolution": 480
+ },
+ "DVD": {
+ "id": 2,
+ "name": "DVD",
+ "source": "dvd",
+ "resolution": 480
+ },
+ "WEBDL-1080p": {
+ "id": 3,
+ "name": "WEBDL-1080p",
+ "source": "web",
+ "resolution": 1080
+ },
+ "HDTV-720p": {
+ "id": 4,
+ "name": "HDTV-720p",
+ "source": "television",
+ "resolution": 720
+ },
+ "WEBDL-720p": {
+ "id": 5,
+ "name": "WEBDL-720p",
+ "source": "web",
+ "resolution": 720
+ },
+ "Bluray-720p": {
+ "id": 6,
+ "name": "Bluray-720p",
+ "source": "bluray",
+ "resolution": 720
+ },
+ "Bluray-1080p": {
+ "id": 7,
+ "name": "Bluray-1080p",
+ "source": "bluray",
+ "resolution": 1080
+ },
+ "WEBDL-480p": {
+ "id": 8,
+ "name": "WEBDL-480p",
+ "source": "web",
+ "resolution": 480
+ },
+ "HDTV-1080p": {
+ "id": 9,
+ "name": "HDTV-1080p",
+ "source": "television",
+ "resolution": 1080
+ },
+ "Raw-HD": {
+ "id": 10,
+ "name": "Raw-HD",
+ "source": "televisionRaw",
+ "resolution": 1080
+ },
+ "WEBRip-480p": {
+ "id": 12,
+ "name": "WEBRip-480p",
+ "source": "webRip",
+ "resolution": 480
+ },
+ "Bluray-480p": {
+ "id": 13,
+ "name": "Bluray-480p",
+ "source": "bluray",
+ "resolution": 480
+ },
+ "WEBRip-720p": {
+ "id": 14,
+ "name": "WEBRip-720p",
+ "source": "webRip",
+ "resolution": 720
+ },
+ "WEBRip-1080p": {
+ "id": 15,
+ "name": "WEBRip-1080p",
+ "source": "webRip",
+ "resolution": 1080
+ },
+ "HDTV-2160p": {
+ "id": 16,
+ "name": "HDTV-2160p",
+ "source": "television",
+ "resolution": 2160
+ },
+ "WEBRip-2160p": {
+ "id": 17,
+ "name": "WEBRip-2160p",
+ "source": "webRip",
+ "resolution": 2160
+ },
+ "WEBDL-2160p": {
+ "id": 18,
+ "name": "WEBDL-2160p",
+ "source": "web",
+ "resolution": 2160
+ },
+ "Bluray-2160p": {
+ "id": 19,
+ "name": "Bluray-2160p",
+ "source": "bluray",
+ "resolution": 2160
+ },
+ "Bluray-1080p Remux": {
+ "id": 20,
+ "name": "Bluray-1080p Remux",
+ "source": "blurayRaw",
+ "resolution": 1080
+ },
+ "Bluray-2160p Remux": {
+ "id": 21,
+ "name": "Bluray-2160p Remux",
+ "source": "blurayRaw",
+ "resolution": 2160
+ },
+ "Bluray-576p": {
+ "id": 22,
+ "name": "Bluray-576p",
+ "source": "bluray",
+ "resolution": 576
+ }
+ }
+
+
+class Languages:
+ """Language mappings for both applications"""
+ RADARR = {
+ 'any': {
+ 'id': -1,
+ 'name': 'Any'
+ },
+ 'original': {
+ 'id': -2,
+ 'name': 'Original'
+ },
+ 'unknown': {
+ 'id': 0,
+ 'name': 'Unknown'
+ },
+ 'english': {
+ 'id': 1,
+ 'name': 'English'
+ },
+ 'french': {
+ 'id': 2,
+ 'name': 'French'
+ },
+ 'spanish': {
+ 'id': 3,
+ 'name': 'Spanish'
+ },
+ 'german': {
+ 'id': 4,
+ 'name': 'German'
+ },
+ 'italian': {
+ 'id': 5,
+ 'name': 'Italian'
+ },
+ 'danish': {
+ 'id': 6,
+ 'name': 'Danish'
+ },
+ 'dutch': {
+ 'id': 7,
+ 'name': 'Dutch'
+ },
+ 'japanese': {
+ 'id': 8,
+ 'name': 'Japanese'
+ },
+ 'icelandic': {
+ 'id': 9,
+ 'name': 'Icelandic'
+ },
+ 'chinese': {
+ 'id': 10,
+ 'name': 'Chinese'
+ },
+ 'russian': {
+ 'id': 11,
+ 'name': 'Russian'
+ },
+ 'polish': {
+ 'id': 12,
+ 'name': 'Polish'
+ },
+ 'vietnamese': {
+ 'id': 13,
+ 'name': 'Vietnamese'
+ },
+ 'swedish': {
+ 'id': 14,
+ 'name': 'Swedish'
+ },
+ 'norwegian': {
+ 'id': 15,
+ 'name': 'Norwegian'
+ },
+ 'finnish': {
+ 'id': 16,
+ 'name': 'Finnish'
+ },
+ 'turkish': {
+ 'id': 17,
+ 'name': 'Turkish'
+ },
+ 'portuguese': {
+ 'id': 18,
+ 'name': 'Portuguese'
+ },
+ 'flemish': {
+ 'id': 19,
+ 'name': 'Flemish'
+ },
+ 'greek': {
+ 'id': 20,
+ 'name': 'Greek'
+ },
+ 'korean': {
+ 'id': 21,
+ 'name': 'Korean'
+ },
+ 'hungarian': {
+ 'id': 22,
+ 'name': 'Hungarian'
+ },
+ 'hebrew': {
+ 'id': 23,
+ 'name': 'Hebrew'
+ },
+ 'lithuanian': {
+ 'id': 24,
+ 'name': 'Lithuanian'
+ },
+ 'czech': {
+ 'id': 25,
+ 'name': 'Czech'
+ },
+ 'hindi': {
+ 'id': 26,
+ 'name': 'Hindi'
+ },
+ 'romanian': {
+ 'id': 27,
+ 'name': 'Romanian'
+ },
+ 'thai': {
+ 'id': 28,
+ 'name': 'Thai'
+ },
+ 'bulgarian': {
+ 'id': 29,
+ 'name': 'Bulgarian'
+ },
+ 'portuguese_br': {
+ 'id': 30,
+ 'name': 'Portuguese (Brazil)'
+ },
+ 'arabic': {
+ 'id': 31,
+ 'name': 'Arabic'
+ },
+ 'ukrainian': {
+ 'id': 32,
+ 'name': 'Ukrainian'
+ },
+ 'persian': {
+ 'id': 33,
+ 'name': 'Persian'
+ },
+ 'bengali': {
+ 'id': 34,
+ 'name': 'Bengali'
+ },
+ 'slovak': {
+ 'id': 35,
+ 'name': 'Slovak'
+ },
+ 'latvian': {
+ 'id': 36,
+ 'name': 'Latvian'
+ },
+ 'spanish_latino': {
+ 'id': 37,
+ 'name': 'Spanish (Latino)'
+ },
+ 'catalan': {
+ 'id': 38,
+ 'name': 'Catalan'
+ },
+ 'croatian': {
+ 'id': 39,
+ 'name': 'Croatian'
+ },
+ 'serbian': {
+ 'id': 40,
+ 'name': 'Serbian'
+ },
+ 'bosnian': {
+ 'id': 41,
+ 'name': 'Bosnian'
+ },
+ 'estonian': {
+ 'id': 42,
+ 'name': 'Estonian'
+ },
+ 'tamil': {
+ 'id': 43,
+ 'name': 'Tamil'
+ },
+ 'indonesian': {
+ 'id': 44,
+ 'name': 'Indonesian'
+ },
+ 'telugu': {
+ 'id': 45,
+ 'name': 'Telugu'
+ },
+ 'macedonian': {
+ 'id': 46,
+ 'name': 'Macedonian'
+ },
+ 'slovenian': {
+ 'id': 47,
+ 'name': 'Slovenian'
+ },
+ 'malayalam': {
+ 'id': 48,
+ 'name': 'Malayalam'
+ },
+ 'kannada': {
+ 'id': 49,
+ 'name': 'Kannada'
+ },
+ 'albanian': {
+ 'id': 50,
+ 'name': 'Albanian'
+ },
+ 'afrikaans': {
+ 'id': 51,
+ 'name': 'Afrikaans'
+ }
+ }
+
+ SONARR = {
+ 'unknown': {
+ 'id': 0,
+ 'name': 'Unknown'
+ },
+ 'english': {
+ 'id': 1,
+ 'name': 'English'
+ },
+ 'french': {
+ 'id': 2,
+ 'name': 'French'
+ },
+ 'spanish': {
+ 'id': 3,
+ 'name': 'Spanish'
+ },
+ 'german': {
+ 'id': 4,
+ 'name': 'German'
+ },
+ 'italian': {
+ 'id': 5,
+ 'name': 'Italian'
+ },
+ 'danish': {
+ 'id': 6,
+ 'name': 'Danish'
+ },
+ 'dutch': {
+ 'id': 7,
+ 'name': 'Dutch'
+ },
+ 'japanese': {
+ 'id': 8,
+ 'name': 'Japanese'
+ },
+ 'icelandic': {
+ 'id': 9,
+ 'name': 'Icelandic'
+ },
+ 'chinese': {
+ 'id': 10,
+ 'name': 'Chinese'
+ },
+ 'russian': {
+ 'id': 11,
+ 'name': 'Russian'
+ },
+ 'polish': {
+ 'id': 12,
+ 'name': 'Polish'
+ },
+ 'vietnamese': {
+ 'id': 13,
+ 'name': 'Vietnamese'
+ },
+ 'swedish': {
+ 'id': 14,
+ 'name': 'Swedish'
+ },
+ 'norwegian': {
+ 'id': 15,
+ 'name': 'Norwegian'
+ },
+ 'finnish': {
+ 'id': 16,
+ 'name': 'Finnish'
+ },
+ 'turkish': {
+ 'id': 17,
+ 'name': 'Turkish'
+ },
+ 'portuguese': {
+ 'id': 18,
+ 'name': 'Portuguese'
+ },
+ 'flemish': {
+ 'id': 19,
+ 'name': 'Flemish'
+ },
+ 'greek': {
+ 'id': 20,
+ 'name': 'Greek'
+ },
+ 'korean': {
+ 'id': 21,
+ 'name': 'Korean'
+ },
+ 'hungarian': {
+ 'id': 22,
+ 'name': 'Hungarian'
+ },
+ 'hebrew': {
+ 'id': 23,
+ 'name': 'Hebrew'
+ },
+ 'lithuanian': {
+ 'id': 24,
+ 'name': 'Lithuanian'
+ },
+ 'czech': {
+ 'id': 25,
+ 'name': 'Czech'
+ },
+ 'arabic': {
+ 'id': 26,
+ 'name': 'Arabic'
+ },
+ 'hindi': {
+ 'id': 27,
+ 'name': 'Hindi'
+ },
+ 'bulgarian': {
+ 'id': 28,
+ 'name': 'Bulgarian'
+ },
+ 'malayalam': {
+ 'id': 29,
+ 'name': 'Malayalam'
+ },
+ 'ukrainian': {
+ 'id': 30,
+ 'name': 'Ukrainian'
+ },
+ 'slovak': {
+ 'id': 31,
+ 'name': 'Slovak'
+ },
+ 'thai': {
+ 'id': 32,
+ 'name': 'Thai'
+ },
+ 'portuguese_br': {
+ 'id': 33,
+ 'name': 'Portuguese (Brazil)'
+ },
+ 'spanish_latino': {
+ 'id': 34,
+ 'name': 'Spanish (Latino)'
+ },
+ 'romanian': {
+ 'id': 35,
+ 'name': 'Romanian'
+ },
+ 'latvian': {
+ 'id': 36,
+ 'name': 'Latvian'
+ },
+ 'persian': {
+ 'id': 37,
+ 'name': 'Persian'
+ },
+ 'catalan': {
+ 'id': 38,
+ 'name': 'Catalan'
+ },
+ 'croatian': {
+ 'id': 39,
+ 'name': 'Croatian'
+ },
+ 'serbian': {
+ 'id': 40,
+ 'name': 'Serbian'
+ },
+ 'bosnian': {
+ 'id': 41,
+ 'name': 'Bosnian'
+ },
+ 'estonian': {
+ 'id': 42,
+ 'name': 'Estonian'
+ },
+ 'tamil': {
+ 'id': 43,
+ 'name': 'Tamil'
+ },
+ 'indonesian': {
+ 'id': 44,
+ 'name': 'Indonesian'
+ },
+ 'macedonian': {
+ 'id': 45,
+ 'name': 'Macedonian'
+ },
+ 'slovenian': {
+ 'id': 46,
+ 'name': 'Slovenian'
+ },
+ 'original': {
+ 'id': -2,
+ 'name': 'Original'
+ }
+ }
+
+
+class QualityNameMapper:
+ """Maps between different quality naming conventions"""
+ REMUX_MAPPINGS = {
+ TargetApp.SONARR: {
+ "Remux-1080p": "Bluray-1080p Remux",
+ "Remux-2160p": "Bluray-2160p Remux"
+ },
+ TargetApp.RADARR: {
+ "Remux-1080p": "Remux-1080p",
+ "Remux-2160p": "Remux-2160p"
+ }
+ }
+
+ ALTERNATE_NAMES = {
+ "BR-Disk": "BR-DISK",
+ "BR-DISK": "BR-DISK",
+ "BRDISK": "BR-DISK",
+ "BR_DISK": "BR-DISK",
+ "BLURAY-DISK": "BR-DISK",
+ "BLURAY_DISK": "BR-DISK",
+ "BLURAYDISK": "BR-DISK",
+ "Telecine": "TELECINE",
+ "TELECINE": "TELECINE",
+ "TeleCine": "TELECINE",
+ "Telesync": "TELESYNC",
+ "TELESYNC": "TELESYNC",
+ "TeleSync": "TELESYNC",
+ }
+
+ @classmethod
+ def map_quality_name(cls, name: str, target_app: TargetApp) -> str:
+ """
+ Maps quality names between different formats based on target app
+ Args:
+ name: The quality name to map
+ target_app: The target application (RADARR or SONARR)
+ Returns:
+ The mapped quality name
+ """
+ # Handle empty or None cases
+ if not name:
+ return name
+
+ # First check for remux mappings
+ if name in cls.REMUX_MAPPINGS.get(target_app, {}):
+ return cls.REMUX_MAPPINGS[target_app][name]
+
+ # Then check for alternate spellings
+ normalized_name = name.upper().replace("-", "").replace("_", "")
+ for alt_name, standard_name in cls.ALTERNATE_NAMES.items():
+ if normalized_name == alt_name.upper().replace("-", "").replace(
+ "_", ""):
+ return standard_name
+
+ return name
+
+
+class LanguageNameMapper:
+ """Maps between different language naming conventions"""
+ ALTERNATE_NAMES = {
+ "spanish-latino": "spanish_latino",
+ "spanish_latino": "spanish_latino",
+ "spanishlatino": "spanish_latino",
+ "portuguese-br": "portuguese_br",
+ "portuguese_br": "portuguese_br",
+ "portuguesebr": "portuguese_br",
+ "portuguese-brazil": "portuguese_br",
+ "portuguese_brazil": "portuguese_br"
+ }
+
+ @classmethod
+ def normalize_language_name(cls, name: str) -> str:
+ """
+ Normalizes language names to a consistent format
+ Args:
+ name: The language name to normalize
+ Returns:
+ The normalized language name
+ """
+ if not name:
+ return name
+
+ normalized = name.lower().replace(" ", "_")
+ return cls.ALTERNATE_NAMES.get(normalized, normalized)
+
+
+class ValueResolver:
+ """Helper class to resolve values based on target app"""
+
+ @classmethod
+ def get_indexer_flag(cls, flag: str, target_app: TargetApp) -> int:
+ flags = IndexerFlags.RADARR if target_app == TargetApp.RADARR else IndexerFlags.SONARR
+ return flags.get(flag.lower(), 0)
+
+ @classmethod
+ def get_source(cls, source: str, target_app: TargetApp) -> int:
+ sources = Sources.RADARR if target_app == TargetApp.RADARR else Sources.SONARR
+ return sources.get(source.lower(), 0)
+
+ @classmethod
+ def get_resolution(cls, resolution: str) -> int:
+ return Qualities.COMMON_RESOLUTIONS.get(resolution.lower(), 0)
+
+ @classmethod
+ def get_qualities(cls, target_app: TargetApp) -> Dict[str, Any]:
+ qualities = Qualities.RADARR if target_app == TargetApp.RADARR else Qualities.SONARR
+ return qualities
+
+ @classmethod
+ def get_quality_name(cls, name: str, target_app: TargetApp) -> str:
+ """Maps quality names between different formats based on target app"""
+ return QualityNameMapper.map_quality_name(name, target_app)
+
+ @classmethod
+ def get_quality_modifier(cls, quality_modifier: str) -> int:
+ return Quality_Modifiers.RADARR.get(quality_modifier.lower(), 0)
+
+ @classmethod
+ def get_release_type(cls, release_type: str) -> int:
+ return Release_Types.SONARR.get(release_type.lower(), 0)
+
+ @classmethod
+ def get_language(cls,
+ language_name: str,
+ target_app: TargetApp,
+ for_profile: bool = True) -> Dict[str, Any]:
+ """
+ Get language mapping based on target app and context
+
+ Args:
+ language_name: Name of the language to look up
+ target_app: Target application (RADARR or SONARR)
+ for_profile: If True, this is for a quality profile. If False, this is for a custom format.
+ """
+ languages = Languages.RADARR if target_app == TargetApp.RADARR else Languages.SONARR
+
+ # For profiles, only Radarr uses language settings
+ if for_profile and target_app == TargetApp.SONARR:
+ return {'id': -2, 'name': 'Original'}
+
+ # Normalize the language name
+ normalized_name = LanguageNameMapper.normalize_language_name(
+ language_name)
+ language_data = languages.get(normalized_name)
+
+ if not language_data:
+ logger.warning(
+ f"Language '{language_name}' (normalized: '{normalized_name}') "
+ f"not found in {target_app} mappings, falling back to Unknown")
+ language_data = languages['unknown']
+
+ return language_data
diff --git a/backend/app/importer/routes.py b/backend/app/importer/routes.py
new file mode 100644
index 0000000..e384df3
--- /dev/null
+++ b/backend/app/importer/routes.py
@@ -0,0 +1,59 @@
+"""Routes for the new import module."""
+from flask import Blueprint, request, jsonify
+from flask_cors import cross_origin
+import logging
+from . import handle_import_request
+
+logger = logging.getLogger(__name__)
+
+bp = Blueprint('new_import', __name__)
+
+
+@bp.route('', methods=['POST', 'OPTIONS'])
+@cross_origin()
+def import_items():
+ """
+ Import formats or profiles to an Arr instance.
+
+ Request body:
+ {
+ "arrID": int, # ID of arr_config to use
+ "strategy": str, # "format" or "profile"
+ "filenames": [str], # List of filenames to import
+ "dryRun": bool # Optional: simulate import without changes (default: false)
+ }
+ """
+ if request.method == 'OPTIONS':
+ return jsonify({}), 200
+
+ try:
+ data = request.get_json()
+
+ # Validate request
+ if not data:
+ return jsonify({
+ 'success': False,
+ 'error': 'Request body is required'
+ }), 400
+
+ # Call the import handler
+ result = handle_import_request(data)
+
+ # Return appropriate status code
+ status_code = 200
+ if result.get('status') == 'partial':
+ status_code = 207
+ elif not result.get('success'):
+ if 'not found' in result.get('error', '').lower():
+ status_code = 404
+ else:
+ status_code = 400
+
+ return jsonify(result), status_code
+
+ except Exception as e:
+ logger.error(f"Error handling import request: {str(e)}")
+ return jsonify({
+ 'success': False,
+ 'error': str(e)
+ }), 500
diff --git a/backend/app/importer/strategies/__init__.py b/backend/app/importer/strategies/__init__.py
new file mode 100644
index 0000000..f15496b
--- /dev/null
+++ b/backend/app/importer/strategies/__init__.py
@@ -0,0 +1,6 @@
+"""Import strategies."""
+from .base import ImportStrategy
+from .format import FormatStrategy
+from .profile import ProfileStrategy
+
+__all__ = ['ImportStrategy', 'FormatStrategy', 'ProfileStrategy']
\ No newline at end of file
diff --git a/backend/app/importer/strategies/base.py b/backend/app/importer/strategies/base.py
new file mode 100644
index 0000000..19b807b
--- /dev/null
+++ b/backend/app/importer/strategies/base.py
@@ -0,0 +1,103 @@
+"""Base strategy class for import operations."""
+import logging
+from abc import ABC, abstractmethod
+from typing import Dict, List, Any
+from ..arr_handler import ArrHandler
+from ..logger import get_import_logger
+
+logger = logging.getLogger(__name__)
+
+
+class ImportStrategy(ABC):
+ """Base class for import strategies."""
+
+ def __init__(self, arr_config):
+ """
+ Initialize the import strategy.
+
+ Args:
+ arr_config: Database row from arr_config table containing:
+ - type: 'radarr' or 'sonarr'
+ - arr_server: Base URL
+ - api_key: API key
+ - import_as_unique: Whether to add [Dictionarry] suffix
+ """
+ # Handle sqlite3.Row objects (they support dict-like access)
+ self.arr_type = arr_config['type']
+ self.base_url = arr_config['arr_server']
+ self.api_key = arr_config['api_key']
+ # sqlite3.Row doesn't have .get() method, so we need to handle None
+ import_as_unique = arr_config['import_as_unique'] if 'import_as_unique' in arr_config.keys() else False
+ self.import_as_unique = bool(import_as_unique) if import_as_unique is not None else False
+ self.arr = ArrHandler(self.base_url, self.api_key)
+
+ @abstractmethod
+ def compile(self, filenames: List[str]) -> Dict[str, Any]:
+ """
+ Compile files to API-ready format.
+
+ Args:
+ filenames: List of filenames to compile
+
+ Returns:
+ Dictionary with compiled data
+ """
+ pass
+
+ @abstractmethod
+ def import_data(self, compiled_data: Dict[str, Any], dry_run: bool = False) -> Dict[str, Any]:
+ """
+ Import compiled data to Arr instance.
+
+ Args:
+ compiled_data: Data from compile() method
+ dry_run: If True, simulate import without making changes
+
+ Returns:
+ Import results with added/updated/failed counts
+ """
+ pass
+
+ def execute(self, filenames: List[str], dry_run: bool = False) -> Dict[str, Any]:
+ """
+ Execute the full import process.
+
+ Args:
+ filenames: List of filenames to import
+ dry_run: If True, simulate import without making changes
+
+ Returns:
+ Import results
+ """
+ try:
+ # Compile
+ compiled = self.compile(filenames)
+
+ # Import
+ results = self.import_data(compiled, dry_run=dry_run)
+
+ # Add dry_run flag and compiled data to results
+ if dry_run:
+ results['dry_run'] = True
+ results['compiled_data'] = compiled
+
+ return results
+
+ except Exception as e:
+ import_logger = get_import_logger()
+ import_logger.error(f"Strategy execution failed: {e}", phase='import')
+ return {
+ 'added': 0,
+ 'updated': 0,
+ 'failed': len(filenames),
+ 'error': str(e)
+ }
+ finally:
+ # Clean up
+ self.arr.close()
+
+ def add_unique_suffix(self, name: str) -> str:
+ """Add [Dictionarry] suffix if unique import is enabled."""
+ if self.import_as_unique and not name.endswith('[Dictionarry]'):
+ return f"{name} [Dictionarry]"
+ return name
\ No newline at end of file
diff --git a/backend/app/importer/strategies/format.py b/backend/app/importer/strategies/format.py
new file mode 100644
index 0000000..76ad96a
--- /dev/null
+++ b/backend/app/importer/strategies/format.py
@@ -0,0 +1,132 @@
+"""Format import strategy."""
+import logging
+from typing import Dict, List, Any
+from .base import ImportStrategy
+from ..utils import load_yaml
+from ..compiler import compile_format_to_api_structure
+from ..logger import get_import_logger
+
+logger = logging.getLogger(__name__)
+
+
+class FormatStrategy(ImportStrategy):
+ """Strategy for importing custom formats."""
+
+ def compile(self, filenames: List[str]) -> Dict[str, Any]:
+ """
+ Compile format files to API-ready format.
+
+ Args:
+ filenames: List of format filenames (without .yml)
+
+ Returns:
+ Dictionary with 'formats' key containing compiled formats
+ """
+ formats = []
+ failed = []
+ import_logger = get_import_logger()
+
+ # Don't try to predict - we'll count as we go
+ import_logger.start(0, 0) # Will update counts as we compile
+
+ for filename in filenames:
+ try:
+ # Load YAML
+ format_yaml = load_yaml(f"custom_format/{filename}.yml")
+
+ # Compile to API structure
+ compiled = compile_format_to_api_structure(format_yaml, self.arr_type)
+
+ # Add unique suffix if needed
+ if self.import_as_unique:
+ compiled['name'] = self.add_unique_suffix(compiled['name'])
+
+ formats.append(compiled)
+ import_logger.update_compilation(filename)
+
+ except Exception as e:
+ import_logger.error(f"{e}", filename, 'compilation')
+ failed.append(filename)
+ # Don't count failed compilations
+
+ # Set final compilation count
+ import_logger.total_compilation = len(formats)
+ import_logger.current_compilation = len(formats)
+ import_logger.compilation_complete()
+
+ return {'formats': formats}
+
+ def import_data(self, compiled_data: Dict[str, Any], dry_run: bool = False) -> Dict[str, Any]:
+ """
+ Import compiled formats to Arr instance.
+
+ Args:
+ compiled_data: Dictionary with 'formats' key
+ dry_run: If True, simulate import without making changes
+
+ Returns:
+ Import results
+ """
+ # Get existing formats
+ existing = self.arr.get_all_formats()
+ existing_map = {f['name']: f['id'] for f in existing}
+
+ results = {
+ 'added': 0,
+ 'updated': 0,
+ 'failed': 0,
+ 'details': []
+ }
+
+ import_logger = get_import_logger()
+
+ # Set import count
+ import_logger.total_import = len(compiled_data['formats'])
+ import_logger._import_shown = False # Reset import shown flag
+
+ for format_data in compiled_data['formats']:
+ format_name = format_data['name']
+
+ try:
+ if format_name in existing_map:
+ # Update existing
+ if not dry_run:
+ format_data['id'] = existing_map[format_name]
+ self.arr.put(
+ f"/api/v3/customformat/{existing_map[format_name]}",
+ format_data
+ )
+
+ import_logger.update_import(format_name, "updated")
+ results['updated'] += 1
+ results['details'].append({
+ 'name': format_name,
+ 'action': 'updated'
+ })
+ else:
+ # Add new
+ if not dry_run:
+ self.arr.post("/api/v3/customformat", format_data)
+
+ import_logger.update_import(format_name, "added")
+ results['added'] += 1
+ results['details'].append({
+ 'name': format_name,
+ 'action': 'added'
+ })
+
+ except Exception as e:
+ import_logger.update_import(format_name, "failed")
+ import_logger.error(f"Failed to import format {format_name}: {e}", format_name)
+ results['failed'] += 1
+ results['details'].append({
+ 'name': format_name,
+ 'action': 'failed',
+ 'error': str(e)
+ })
+
+ # Show import summary
+ import_logger.import_complete()
+ import_logger._import_shown = True
+
+ return results
\ No newline at end of file
diff --git a/backend/app/importer/strategies/profile.py b/backend/app/importer/strategies/profile.py
new file mode 100644
index 0000000..f6414a8
--- /dev/null
+++ b/backend/app/importer/strategies/profile.py
@@ -0,0 +1,262 @@
+"""Profile import strategy."""
+import logging
+from typing import Dict, List, Any, Set
+from .base import ImportStrategy
+from ..utils import load_yaml, extract_format_names, generate_language_formats
+from ..compiler import compile_format_to_api_structure, compile_profile_to_api_structure
+from ..logger import get_import_logger
+
+logger = logging.getLogger(__name__)
+
+
+class ProfileStrategy(ImportStrategy):
+ """Strategy for importing quality profiles."""
+
+ def compile(self, filenames: List[str]) -> Dict[str, Any]:
+ """
+ Compile profile files and their dependent formats to API-ready format.
+
+ Args:
+ filenames: List of profile filenames (without .yml)
+
+ Returns:
+ Dictionary with 'profiles' and 'formats' keys
+ """
+ profiles = []
+ all_formats = []
+ processed_formats: Set[str] = set()
+ # Cache for language formats to avoid recompiling
+ language_formats_cache: Dict[str, List[Dict]] = {}
+
+ import_logger = get_import_logger()
+
+ # Don't try to predict - we'll count as we go
+ import_logger.start(0, 0) # Will update counts as we compile
+
+ for filename in filenames:
+ try:
+ # Load profile YAML
+ profile_yaml = load_yaml(f"profile/{filename}.yml")
+
+ # Extract referenced custom formats
+ format_names = extract_format_names(profile_yaml)
+
+ for format_name in format_names:
+ # Skip if already processed
+ display_name = self.add_unique_suffix(format_name) if self.import_as_unique else format_name
+ if display_name in processed_formats:
+ continue
+
+ try:
+ format_yaml = load_yaml(f"custom_format/{format_name}.yml")
+ compiled_format = compile_format_to_api_structure(format_yaml, self.arr_type)
+
+ if self.import_as_unique:
+ compiled_format['name'] = self.add_unique_suffix(compiled_format['name'])
+
+ all_formats.append(compiled_format)
+ processed_formats.add(compiled_format['name'])
+ import_logger.update_compilation(format_name)
+
+ except Exception as e:
+ # Count the failed attempt
+ import_logger.update_compilation(f"{format_name} (failed)")
+
+ # Generate language formats if needed
+ language = profile_yaml.get('language', 'any')
+ if language != 'any' and '_' in language:
+ # Check cache first
+ if language not in language_formats_cache:
+ language_formats = generate_language_formats(language, self.arr_type)
+ compiled_langs = []
+
+ for lang_format in language_formats:
+ lang_name = lang_format.get('name', 'Language format')
+ compiled_lang = compile_format_to_api_structure(lang_format, self.arr_type)
+
+ if self.import_as_unique:
+ compiled_lang['name'] = self.add_unique_suffix(compiled_lang['name'])
+
+ compiled_langs.append(compiled_lang)
+
+ # Add to all_formats only on first compilation
+ if compiled_lang['name'] not in processed_formats:
+ all_formats.append(compiled_lang)
+ processed_formats.add(compiled_lang['name'])
+ import_logger.update_compilation(lang_name)
+
+ # Store in cache
+ language_formats_cache[language] = compiled_langs
+
+ # Compile profile
+ compiled_profile = compile_profile_to_api_structure(profile_yaml, self.arr_type)
+
+ if self.import_as_unique:
+ compiled_profile['name'] = self.add_unique_suffix(compiled_profile['name'])
+
+ # Update format references in profile
+ for item in compiled_profile.get('formatItems', []):
+ item['name'] = self.add_unique_suffix(item['name'])
+
+ profiles.append(compiled_profile)
+ import_logger.update_compilation(f"Profile: {compiled_profile['name']}")
+
+ except Exception as e:
+ import_logger.error(f"{str(e)}", f"Profile: {filename}", 'compilation')
+ import_logger.update_compilation(f"Profile: {filename} (failed)")
+
+ # Set total to what we actually attempted
+ import_logger.total_compilation = import_logger.current_compilation
+ import_logger.compilation_complete()
+
+ return {
+ 'profiles': profiles,
+ 'formats': all_formats
+ }
+
+ def import_data(self, compiled_data: Dict[str, Any], dry_run: bool = False) -> Dict[str, Any]:
+ """
+ Import compiled profiles and formats to Arr instance.
+
+ Args:
+ compiled_data: Dictionary with 'profiles' and 'formats' keys
+ dry_run: If True, simulate import without making changes
+
+ Returns:
+ Import results
+ """
+ results = {
+ 'added': 0,
+ 'updated': 0,
+ 'failed': 0,
+ 'details': []
+ }
+
+ import_logger = get_import_logger()
+
+ # Set total import count
+ import_logger.total_import = len(compiled_data['formats']) + len(compiled_data['profiles'])
+ import_logger._import_shown = False # Reset import shown flag
+
+ # Import formats first
+ if compiled_data['formats']:
+ existing_formats = self.arr.get_all_formats()
+ format_map = {f['name']: f['id'] for f in existing_formats}
+
+ formats_failed = []
+
+ for format_data in compiled_data['formats']:
+ format_name = format_data['name']
+
+ try:
+ if format_name in format_map:
+ # Update existing
+ if not dry_run:
+ format_data['id'] = format_map[format_name]
+ self.arr.put(
+ f"/api/v3/customformat/{format_map[format_name]}",
+ format_data
+ )
+ import_logger.update_import(format_name, "updated")
+ else:
+ # Add new
+ if dry_run:
+ # In dry run, pretend we got an ID
+ # Use a predictable fake ID for dry run
+ fake_id = 999000 + len(format_map)
+ format_map[format_name] = fake_id
+ else:
+ response = self.arr.post("/api/v3/customformat", format_data)
+ format_map[format_name] = response['id']
+ import_logger.update_import(format_name, "added")
+
+ except Exception as e:
+ import_logger.update_import(format_name, "failed")
+ import_logger.error(f"Failed to import format {format_name}: {e}", format_name)
+ formats_failed.append(format_name)
+
+ # Refresh format map for profile syncing (MUST be done after importing formats)
+ if not dry_run:
+ # In real mode, get the actual current formats from the server
+ existing_formats = self.arr.get_all_formats()
+ format_map = {f['name']: f['id'] for f in existing_formats}
+ # In dry run mode, format_map already has fake IDs from above
+
+ # Sync format IDs in profiles
+ for profile in compiled_data['profiles']:
+ synced_items = []
+ processed_formats = set()
+
+ # First add all explicitly defined formats with their scores
+ for item in profile.get('formatItems', []):
+ if item['name'] in format_map:
+ synced_items.append({
+ 'format': format_map[item['name']],
+ 'name': item['name'],
+ 'score': item.get('score', 0)
+ })
+ processed_formats.add(item['name'])
+ else:
+ import_logger.warning(f"Format {item['name']} not found for profile {profile['name']}")
+
+ # Then add ALL other existing formats with score 0 (Arr requirement)
+ for format_name, format_id in format_map.items():
+ if format_name not in processed_formats:
+ synced_items.append({
+ 'format': format_id,
+ 'name': format_name,
+ 'score': 0
+ })
+
+ profile['formatItems'] = synced_items
+
+ # Import profiles
+ existing_profiles = self.arr.get_all_profiles()
+ profile_map = {p['name']: p['id'] for p in existing_profiles}
+
+ for profile_data in compiled_data['profiles']:
+ profile_name = profile_data['name']
+
+ try:
+ if profile_name in profile_map:
+ # Update existing
+ if not dry_run:
+ profile_data['id'] = profile_map[profile_name]
+ self.arr.put(
+ f"/api/v3/qualityprofile/{profile_data['id']}",
+ profile_data
+ )
+
+ import_logger.update_import(f"Profile: {profile_name}", "updated")
+ results['updated'] += 1
+ results['details'].append({
+ 'name': profile_name,
+ 'action': 'updated'
+ })
+ else:
+ # Add new
+ if not dry_run:
+ self.arr.post("/api/v3/qualityprofile", profile_data)
+
+ import_logger.update_import(f"Profile: {profile_name}", "added")
+ results['added'] += 1
+ results['details'].append({
+ 'name': profile_name,
+ 'action': 'added'
+ })
+
+ except Exception as e:
+ import_logger.update_import(f"Profile: {profile_name}", "failed")
+ import_logger.error(f"Failed to import profile {profile_name}: {e}", profile_name)
+ results['failed'] += 1
+ results['details'].append({
+ 'name': profile_name,
+ 'action': 'failed',
+ 'error': str(e)
+ })
+
+ # Show import summary
+ import_logger.import_complete()
+ import_logger._import_shown = True
+
+ return results
\ No newline at end of file
diff --git a/backend/app/importer/utils.py b/backend/app/importer/utils.py
new file mode 100644
index 0000000..0fc6ba2
--- /dev/null
+++ b/backend/app/importer/utils.py
@@ -0,0 +1,165 @@
+"""Utility functions for import operations."""
+import logging
+import yaml
+from pathlib import Path
+from typing import Dict, List, Any, Set
+from ..data.utils import get_category_directory
+
+logger = logging.getLogger(__name__)
+
+
+def load_yaml(file_path: str) -> Dict[str, Any]:
+ """
+ Load a YAML file.
+
+ Args:
+ file_path: Path to YAML file (relative to data directory)
+
+ Returns:
+ Parsed YAML data
+
+ Raises:
+ FileNotFoundError: If file doesn't exist
+ yaml.YAMLError: If YAML is invalid
+ """
+ # Handle both absolute and relative paths
+ if file_path.startswith('/'):
+ full_path = Path(file_path)
+ else:
+ # Check if it starts with a category
+ if file_path.startswith('custom_format/'):
+ base_dir = get_category_directory('custom_format')
+ filename = file_path.replace('custom_format/', '')
+ full_path = Path(base_dir) / filename
+ elif file_path.startswith('profile/'):
+ base_dir = get_category_directory('profile')
+ filename = file_path.replace('profile/', '')
+ full_path = Path(base_dir) / filename
+ else:
+ # Assume it's just a filename, figure out category
+ full_path = Path(file_path)
+
+ if not full_path.exists():
+ raise FileNotFoundError(f"File not found: {full_path}")
+
+ with open(full_path, 'r', encoding='utf-8') as f:
+ return yaml.safe_load(f)
+
+
+def extract_format_names(profile_data: Dict[str, Any]) -> Set[str]:
+ """
+ Extract all custom format names referenced in a profile.
+
+ Args:
+ profile_data: Profile YAML data
+
+ Returns:
+ Set of unique format names
+ """
+ format_names = set()
+
+ # Extract from main custom_formats
+ for cf in profile_data.get('custom_formats', []):
+ if isinstance(cf, dict) and 'name' in cf:
+ format_names.add(cf['name'])
+
+ # Extract from app-specific custom_formats
+ for key in ['custom_formats_radarr', 'custom_formats_sonarr']:
+ for cf in profile_data.get(key, []):
+ if isinstance(cf, dict) and 'name' in cf:
+ format_names.add(cf['name'])
+
+ return format_names
+
+
+def generate_language_formats(language: str, arr_type: str) -> List[Dict[str, Any]]:
+ """
+ Generate language-specific format configurations.
+
+ Args:
+ language: Language string (e.g., 'must_english', 'prefer_french')
+ arr_type: 'radarr' or 'sonarr'
+
+ Returns:
+ List of format configurations for language handling
+ """
+ if language == 'any' or '_' not in language:
+ return []
+
+ behavior, language_code = language.split('_', 1)
+ formats = []
+
+ # Load base "Not English" format as template
+ try:
+ base_format = load_yaml('custom_format/Not English.yml')
+
+ # Create "Not [Language]" format
+ not_format = base_format.copy()
+ lang_display = language_code.capitalize()
+ not_format['name'] = f"Not {lang_display}"
+
+ # Update conditions for the specific language
+ for condition in not_format.get('conditions', []):
+ if condition.get('type') == 'language':
+ condition['language'] = language_code
+ if 'name' in condition:
+ condition['name'] = condition['name'].replace('English', lang_display)
+
+ formats.append(not_format)
+
+ # For 'only' behavior, add additional formats
+ if behavior == 'only':
+ additional_format_names = [
+ "Not Only English",
+ "Not Only English (Missing)"
+ ]
+
+ for format_name in additional_format_names:
+ try:
+ additional = load_yaml(f'custom_format/{format_name}.yml')
+ additional['name'] = additional['name'].replace('English', lang_display)
+
+ for condition in additional.get('conditions', []):
+ if condition.get('type') == 'language':
+ condition['language'] = language_code
+ if 'name' in condition:
+ condition['name'] = condition['name'].replace('English', lang_display)
+
+ formats.append(additional)
+ except Exception as e:
+ # Silent fail - format doesn't exist
+ pass
+
+ except Exception as e:
+ # Silent fail - will be caught at higher level
+ pass
+
+ return formats
+
+
+def load_regex_patterns() -> Dict[str, str]:
+ """
+ Load all regex patterns from the regex directory.
+
+ Returns:
+ Dictionary mapping pattern names to regex patterns
+ """
+ from ..data.utils import REGEX_DIR
+
+ patterns = {}
+ pattern_dir = Path(REGEX_DIR)
+
+ if not pattern_dir.exists():
+ return patterns
+
+ for pattern_file in pattern_dir.glob('*.yml'):
+ try:
+ with open(pattern_file, 'r', encoding='utf-8') as f:
+ data = yaml.safe_load(f)
+ if data and 'name' in data and 'pattern' in data:
+ patterns[data['name']] = data['pattern']
+ except Exception as e:
+ # Silent fail for individual pattern files
+ pass
+
+ return patterns
\ No newline at end of file
diff --git a/backend/app/init.py b/backend/app/init.py
index 84e0f2f..d175ee3 100644
--- a/backend/app/init.py
+++ b/backend/app/init.py
@@ -67,6 +67,12 @@ def setup_logging():
'handlers': ['console', 'file', 'importarr_file'],
'propagate': False
},
+ # The 'importer' logger (new import module) - reduce verbosity
+ 'importer': {
+ 'level': 'WARNING',
+ 'handlers': ['file'],
+ 'propagate': False
+ },
# The 'hash' logger uses all three handlers
'hash': {
@@ -90,6 +96,16 @@ def setup_logging():
'level': 'ERROR',
'handlers': ['console', 'file'],
'propagate': False
+ },
+ 'urllib3': {
+ 'level': 'WARNING',
+ 'handlers': ['console', 'file'],
+ 'propagate': False
+ },
+ 'urllib3.connectionpool': {
+ 'level': 'WARNING',
+ 'handlers': ['console', 'file'],
+ 'propagate': False
}
}
}
diff --git a/backend/app/main.py b/backend/app/main.py
index f6c743a..efcb54c 100644
--- a/backend/app/main.py
+++ b/backend/app/main.py
@@ -8,6 +8,7 @@ from .git import bp as git_bp
from .arr import bp as arr_bp
from .data import bp as data_bp
from .importarr import bp as importarr_bp
+from .importer.routes import bp as new_import_bp
from .task import bp as tasks_bp, TaskScheduler
from .backup import bp as backup_bp
from .db import run_migrations, get_settings
@@ -70,6 +71,7 @@ def create_app():
app.register_blueprint(git_bp, url_prefix='/api/git')
app.register_blueprint(data_bp, url_prefix='/api/data')
app.register_blueprint(importarr_bp, url_prefix='/api/import')
+ app.register_blueprint(new_import_bp, url_prefix='/api/v2/import')
app.register_blueprint(arr_bp, url_prefix='/api/arr')
app.register_blueprint(tasks_bp, url_prefix='/api/tasks')
app.register_blueprint(media_management_bp)
diff --git a/backend/app/task/tasks.py b/backend/app/task/tasks.py
index d09b60c..87ed68e 100644
--- a/backend/app/task/tasks.py
+++ b/backend/app/task/tasks.py
@@ -165,36 +165,17 @@ class ImportScheduleTask(Task):
"""
def run_job(self):
+ from ..importer import handle_scheduled_import
- from ..arr.manager import get_arr_config, run_import_for_config
-
- # 1) Attempt to parse the config ID from the self.name
- match = re.search(r"#(\d+)", self.name)
- if not match:
- task_logger.error(
- f"[ImportScheduleTask] Could not parse config ID from task name '{self.name}'. Skipping."
- )
- return
-
- config_id = match.group(1)
task_logger.info(
- f"[ImportScheduleTask] Found config_id={config_id} from task '{self.name}'"
+ f"[ImportScheduleTask] Running scheduled import for task_id={self.id} ({self.name})"
)
-
- # 2) Get the corresponding arr_config
- arr_config_response = get_arr_config(config_id)
- if not arr_config_response.get('success'):
+ result = handle_scheduled_import(self.id)
+ if not result.get('success'):
task_logger.error(
- f"[ImportScheduleTask] arr_config id={config_id} not found. Skipping."
+ f"[ImportScheduleTask] Scheduled import failed for task_id={self.id}: {result}"
+ )
+ else:
+ task_logger.info(
+ f"[ImportScheduleTask] Scheduled import completed for task_id={self.id}: added={result.get('added', 0)}, updated={result.get('updated', 0)}, failed={result.get('failed', 0)}"
)
- return
-
- config_data = arr_config_response['data']
-
- # 3) Call run_import_for_config
- task_logger.info(
- f"[ImportScheduleTask] Running run_import_for_config for arr_config #{config_id}"
- )
- run_import_for_config(config_data)
- task_logger.info(
- f"[ImportScheduleTask] Done importing for arr_config #{config_id}")
diff --git a/docker-compose.yml b/docker-compose.yml
index 0be269c..4ddecb5 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -1,5 +1,4 @@
version: '3.8'
-
services:
frontend:
build: ./frontend
@@ -18,7 +17,8 @@ services:
volumes:
- ./backend:/app
- ./config:/config
+ environment:
+ - TZ=Australia/Adelaide
env_file:
- .env
- restart: always
-
+ restart: always
\ No newline at end of file
diff --git a/frontend/src/api/arr.js b/frontend/src/api/arr.js
index f6fe5fe..5974728 100644
--- a/frontend/src/api/arr.js
+++ b/frontend/src/api/arr.js
@@ -33,7 +33,17 @@ export const pingService = async (url, apiKey, type) => {
export const saveArrConfig = async config => {
try {
- const response = await axios.post(`/api/arr/config`, config, {
+ // Validate and auto-correct sync_interval if schedule method
+ const validatedConfig = {...config};
+ if (validatedConfig.sync_method === 'schedule' && validatedConfig.sync_interval) {
+ if (validatedConfig.sync_interval < 60) {
+ validatedConfig.sync_interval = 60;
+ } else if (validatedConfig.sync_interval > 43200) {
+ validatedConfig.sync_interval = 43200;
+ }
+ }
+
+ const response = await axios.post(`/api/arr/config`, validatedConfig, {
validateStatus: status => {
return (status >= 200 && status < 300) || status === 409;
}
@@ -54,7 +64,17 @@ export const saveArrConfig = async config => {
export const updateArrConfig = async (id, config) => {
try {
- const response = await axios.put(`/api/arr/config/${id}`, config, {
+ // Validate and auto-correct sync_interval if schedule method
+ const validatedConfig = {...config};
+ if (validatedConfig.sync_method === 'schedule' && validatedConfig.sync_interval) {
+ if (validatedConfig.sync_interval < 60) {
+ validatedConfig.sync_interval = 60;
+ } else if (validatedConfig.sync_interval > 43200) {
+ validatedConfig.sync_interval = 43200;
+ }
+ }
+
+ const response = await axios.put(`/api/arr/config/${id}`, validatedConfig, {
validateStatus: status => {
return (status >= 200 && status < 300) || status === 409;
}
diff --git a/frontend/src/api/import.js b/frontend/src/api/import.js
index c9a0b34..d89d1f9 100644
--- a/frontend/src/api/import.js
+++ b/frontend/src/api/import.js
@@ -1,77 +1,40 @@
import axios from 'axios';
-const IMPORT_BASE_URL = '/api/import';
+const API_URL = '/api/v2/import';
/**
- * Import multiple formats to a specified arr instance
- * @param {string|number} arr - The arr ID to import to
- * @param {string[]} formatNames - Array of format file names to import
- * @param {boolean} [all] - Whether to import all formats
- * @returns {Promise
- Are you sure you want to unlink the repository? +
+ This will disconnect your repository from Profilarr. You will need to re-link it to sync configuration files again.
-+ This will permanently remove all cloned repository files from your system. This action cannot be undone. +
+