diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index 3edf8c5..0000000 --- a/Dockerfile +++ /dev/null @@ -1,21 +0,0 @@ -# Dockerfile -FROM python:3.9-slim -WORKDIR /app -# Install git and gosu for user switching -RUN apt-get update && apt-get install -y git gosu && rm -rf /var/lib/apt/lists/* -# Copy pre-built files from dist directory -COPY dist/backend/app ./app -COPY dist/static ./app/static -COPY dist/requirements.txt . -RUN pip install --no-cache-dir -r requirements.txt -# Copy and setup entrypoint script -COPY entrypoint.sh /entrypoint.sh -RUN chmod +x /entrypoint.sh -LABEL org.opencontainers.image.authors="Dictionarry dictionarry@pm.me" -LABEL org.opencontainers.image.description="Profilarr - Profile manager for *arr apps" -LABEL org.opencontainers.image.source="https://github.com/Dictionarry-Hub/profilarr" -LABEL org.opencontainers.image.title="Profilarr" -LABEL org.opencontainers.image.version="beta" -EXPOSE 6868 -ENTRYPOINT ["/entrypoint.sh"] -CMD ["gunicorn", "--bind", "0.0.0.0:6868", "--timeout", "600", "app.main:create_app()"] \ No newline at end of file diff --git a/README.md b/README.md deleted file mode 100644 index 0b57f53..0000000 --- a/README.md +++ /dev/null @@ -1,69 +0,0 @@ -# Profilarr - -[![GitHub release](https://img.shields.io/github/v/release/Dictionarry-Hub/profilarr?color=blue)](https://github.com/Dictionarry-Hub/profilarr/releases) -[![Docker Pulls](https://img.shields.io/docker/pulls/santiagosayshey/profilarr?color=blue)](https://hub.docker.com/r/santiagosayshey/profilarr) -[![License](https://img.shields.io/github/license/Dictionarry-Hub/profilarr?color=blue)](https://github.com/Dictionarry-Hub/profilarr/blob/main/LICENSE) -[![Website](https://img.shields.io/badge/Website-dictionarry.dev-blue)](https://dictionarry.dev/) -[![Discord](https://img.shields.io/discord/1202375791556431892?color=blue&logo=discord&logoColor=white)](https://discord.com/invite/Y9TYP6jeYZ) -[![Buy Me A Coffee](https://img.shields.io/badge/Buy%20Me%20A%20Coffee-Support-blue?logo=buy-me-a-coffee)](https://www.buymeacoffee.com/santiagosayshey) -[![GitHub Sponsors](https://img.shields.io/badge/GitHub%20Sponsors-Support-blue?logo=github-sponsors)](https://github.com/sponsors/Dictionarry-Hub) - -Configuration management tool for Radarr/Sonarr that automates importing and version control of custom formats and quality profiles. - -![Profilarr Preview](.github/images/preview.png) - -## Features - -- 🔄 Automatic synchronization with remote configuration databases -- 🎯 Direct import to Radarr/Sonarr instances -- 🔧 Git-based version control of your configurations -- ⚡ Preserve local customizations during updates -- 🛠️ Built-in conflict resolution - -## Getting Started - -### Compatibility - -| Architecture | Support | -| ------------------------------ | ------------ | -| amd64 (x86_64) | ✅ Supported | -| arm64 (Apple Silicon, RPi 4/5) | ✅ Supported | - -### Quick Installation (Docker Compose) - -```yaml -services: - profilarr: - image: santiagosayshey/profilarr:latest # Use :beta for early access to new features - container_name: profilarr - ports: - - 6868:6868 - volumes: - - /path/to/your/data:/config # Replace with your actual path - environment: - - TZ=UTC # Set your timezone - restart: unless-stopped -``` - -After deployment, access the web UI at `http://[address]:6868` to begin setup. - -> **Note for Windows users:** The database is case-sensitive. Use a docker volume or the WSL file system to avoid issues: -> -> - Docker volume example: `profilarr_data:/config` -> - WSL filesystem example: `/home/username/docker/profilarr:/config` - -### Complete Documentation - -Visit our comprehensive documentation at [dictionarry.dev](https://dictionarry.dev/profilarr-setup/installation) for detailed installation instructions and usage guides. - -## Status - -Currently in beta. Part of the [Dictionarry](https://github.com/Dictionarry-Hub) project to simplify media automation. - -### Known Issues - -- https://github.com/Dictionarry-Hub/profilarr/issues - -### Personal Note - -Profilarr is maintained by a single CS student with no formal development experience, in their spare time. Development happens when time allows, which may affect response times for fixes and new features. The project is continuously improving, and your patience, understanding, and contributions are greatly appreciated as Profilarr grows and matures. diff --git a/backend/Dockerfile b/backend/Dockerfile deleted file mode 100644 index 6e6b528..0000000 --- a/backend/Dockerfile +++ /dev/null @@ -1,7 +0,0 @@ -FROM python:3.9 -WORKDIR /app -COPY requirements.txt . -RUN pip install --no-cache-dir -r requirements.txt -COPY . . -# Use gunicorn with 10-minute timeout -CMD ["python", "-m", "app.main"] \ No newline at end of file diff --git a/backend/app/data/__init__.py b/backend/app/data/__init__.py deleted file mode 100644 index 0267b9d..0000000 --- a/backend/app/data/__init__.py +++ /dev/null @@ -1,288 +0,0 @@ -from flask import Blueprint, request, jsonify -import logging -import os -import yaml -from .utils import (get_category_directory, load_yaml_file, validate, - save_yaml_file, update_yaml_file, get_file_modified_date, - test_regex_pattern, test_format_conditions, - check_delete_constraints, filename_to_display) -from ..db import add_format_to_renames, remove_format_from_renames, is_format_in_renames - -logger = logging.getLogger(__name__) -logger.setLevel(logging.INFO) -bp = Blueprint('data', __name__) - - -@bp.route('/', methods=['GET']) -def retrieve_all(category): - try: - directory = get_category_directory(category) - files = [f for f in os.listdir(directory) if f.endswith('.yml')] - logger.debug(f"Found {len(files)} files in {category}") - - if not files: - return jsonify([]), 200 - - result = [] - errors = 0 - for file_name in files: - file_path = os.path.join(directory, file_name) - try: - content = load_yaml_file(file_path) - # Add metadata for custom formats - if category == 'custom_format': - content['metadata'] = { - 'includeInRename': - is_format_in_renames(content['name']) - } - result.append({ - "file_name": - file_name, - "content": - content, - "modified_date": - get_file_modified_date(file_path) - }) - except yaml.YAMLError: - errors += 1 - result.append({ - "file_name": file_name, - "error": "Failed to parse YAML" - }) - - logger.info( - f"Processed {len(files)} {category} files ({errors} errors)") - return jsonify(result), 200 - - except ValueError as ve: - logger.error(ve) - return jsonify({"error": str(ve)}), 400 - except FileNotFoundError as fnfe: - logger.error(fnfe) - return jsonify({"error": str(fnfe)}), 404 - except Exception as e: - logger.exception("Unexpected error occurred") - return jsonify({"error": "An unexpected error occurred"}), 500 - - -@bp.route('//', - methods=['GET', 'POST', 'PUT', 'DELETE']) -def handle_item(category, name): - try: - directory = get_category_directory(category) - file_name = f"{name}.yml" if not name.endswith('.yml') else name - file_path = os.path.join(directory, file_name) - - if request.method == 'GET': - try: - content = load_yaml_file(file_path) - # Add metadata for custom formats - if category == 'custom_format': - content['metadata'] = { - 'includeInRename': - is_format_in_renames(content['name']) - } - return jsonify({ - "file_name": - file_name, - "content": - content, - "modified_date": - get_file_modified_date(file_path) - }), 200 - except FileNotFoundError: - return jsonify({"error": f"File {file_name} not found"}), 404 - except yaml.YAMLError: - return jsonify( - {"error": f"Failed to parse YAML file {file_name}"}), 500 - - elif request.method == 'DELETE': - if not os.path.exists(file_path): - return jsonify({"error": f"File {file_name} not found"}), 404 - - # Check for references before deleting - can_delete, error_message = check_delete_constraints( - category, filename_to_display(name)) - if not can_delete: - logger.error( - f"Delete constraint check failed for {name}: {error_message}" - ) - return jsonify({"error": error_message}), 409 - - try: - # If it's a custom format, remove from renames table first - if category == 'custom_format': - # Get the format name from the file before deleting it - content = load_yaml_file(file_path) - format_name = content.get('name') - if format_name: - # Check if it exists in renames before trying to remove - if is_format_in_renames(format_name): - remove_format_from_renames(format_name) - logger.info( - f"Removed {format_name} from renames table") - else: - logger.info( - f"{format_name} was not in renames table") - - # Then delete the file - os.remove(file_path) - return jsonify( - {"message": f"Successfully deleted {file_name}"}), 200 - except OSError as e: - logger.error(f"Error deleting file {file_path}: {e}") - return jsonify({"error": f"Failed to delete {file_name}"}), 500 - - elif request.method == 'POST': - # If a file already exists with that name, conflict - if os.path.exists(file_path): - return jsonify({"error": - f"File {file_name} already exists"}), 409 - - try: - data = request.get_json() - - if data and 'name' in data: - data['name'] = data['name'].strip() - - # Handle rename inclusion for custom formats - if category == 'custom_format': - include_in_rename = data.get('metadata', {}).get( - 'includeInRename', False) - # Remove metadata before saving YAML - if 'metadata' in data: - del data['metadata'] - - if validate(data, category): - # Save YAML - save_yaml_file(file_path, data, category) - - # If custom format, handle rename table - if category == 'custom_format' and include_in_rename: - add_format_to_renames(data['name']) - - return jsonify( - {"message": f"Successfully created {file_name}"}), 201 - - return jsonify({"error": "Validation failed"}), 400 - - except Exception as e: - logger.error(f"Error creating file: {e}") - return jsonify({"error": str(e)}), 500 - - elif request.method == 'PUT': - if not os.path.exists(file_path): - return jsonify({"error": f"File {file_name} not found"}), 404 - - try: - data = request.get_json() - logger.info(f"Received PUT data for {name}: {data}") - - if data and 'name' in data: - data['name'] = data['name'].strip() - if data and 'rename' in data: - data['rename'] = data['rename'].strip() - - # Handle rename inclusion for custom formats - if category == 'custom_format': - include_in_rename = data.get('metadata', {}).get( - 'includeInRename', False) - - # Get current content to check for rename - current_content = load_yaml_file(file_path) - old_name = current_content.get('name') - new_name = data['name'] - - # Handle renames and toggles - if old_name != new_name and include_in_rename: - # Handle rename while keeping in table - remove_format_from_renames(old_name) - add_format_to_renames(new_name) - elif include_in_rename: - # Just turning it on - add_format_to_renames(new_name) - else: - # Turning it off - remove_format_from_renames(data['name']) - - # Remove metadata before saving YAML - if 'metadata' in data: - del data['metadata'] - - # Save YAML - update_yaml_file(file_path, data, category) - return jsonify( - {"message": f"Successfully updated {file_name}"}), 200 - - except Exception as e: - logger.error(f"Error updating file: {e}") - return jsonify({"error": str(e)}), 500 - - except ValueError as ve: - logger.error(ve) - return jsonify({"error": str(ve)}), 400 - except Exception as e: - logger.exception("Unexpected error occurred") - return jsonify({"error": "An unexpected error occurred"}), 500 - - -@bp.route('//test', methods=['POST']) -def run_tests(category): - logger.info(f"Received test request for category: {category}") - - try: - data = request.get_json() - if not data: - logger.warning("Rejected test request - no JSON data provided") - return jsonify({"error": "No JSON data provided"}), 400 - - tests = data.get('tests', []) - if not tests: - logger.warning("Rejected test request - no test cases provided") - return jsonify({"error": - "At least one test case is required"}), 400 - - if category == 'regex_pattern': - pattern = data.get('pattern') - logger.info(f"Processing regex test request - Pattern: {pattern}") - - if not pattern: - logger.warning("Rejected test request - missing pattern") - return jsonify({"error": "Pattern is required"}), 400 - - success, message, updated_tests = test_regex_pattern( - pattern, tests) - - elif category == 'custom_format': - conditions = data.get('conditions', []) - logger.info( - f"Processing format test request - Conditions: {len(conditions)}" - ) - - if not conditions: - logger.warning( - "Rejected test request - no conditions provided") - return jsonify({"error": - "At least one condition is required"}), 400 - - success, message, updated_tests = test_format_conditions( - conditions, tests) - - else: - logger.warning( - f"Rejected test request - invalid category: {category}") - return jsonify( - {"error": "Testing not supported for this category"}), 400 - - logger.info(f"Test execution completed - Success: {success}") - - if not success: - logger.warning(f"Test execution failed - {message}") - return jsonify({"success": False, "message": message}), 400 - - return jsonify({"success": True, "tests": updated_tests}), 200 - - except Exception as e: - logger.warning(f"Unexpected error in test endpoint: {str(e)}", - exc_info=True) - return jsonify({"success": False, "message": str(e)}), 500 diff --git a/backend/app/data/utils.py b/backend/app/data/utils.py deleted file mode 100644 index 41d53b8..0000000 --- a/backend/app/data/utils.py +++ /dev/null @@ -1,725 +0,0 @@ -import os -import yaml -import shutil -import logging -from datetime import datetime -from typing import Dict, List, Any, Tuple, Union -import git -import regex -import logging -from ..db.queries.arr import update_arr_config_on_rename, update_arr_config_on_delete - -logger = logging.getLogger(__name__) -logger.setLevel(logging.INFO) - -from ..config.config import config - -# Directory constants -REPO_PATH = config.DB_DIR -REGEX_DIR = config.REGEX_DIR -FORMAT_DIR = config.FORMAT_DIR -PROFILE_DIR = config.PROFILE_DIR - -# Expected fields for each category -REGEX_FIELDS = ["name", "pattern", "description", "tags", "tests"] -FORMAT_FIELDS = ["name", "description", "tags", "conditions", "tests"] -PROFILE_FIELDS = [ - "name", - "description", - "tags", - "upgradesAllowed", - "minCustomFormatScore", - "upgradeUntilScore", - "minScoreIncrement", - "custom_formats", # Array of {name, score} objects (backwards compatible) - "custom_formats_radarr", # Array of {name, score} objects for radarr-specific scores - "custom_formats_sonarr", # Array of {name, score} objects for sonarr-specific scores - "qualities", # Array of strings - "upgrade_until", - "language" -] - -# Category mappings -CATEGORY_MAP = { - "custom_format": (FORMAT_DIR, FORMAT_FIELDS), - "regex_pattern": (REGEX_DIR, REGEX_FIELDS), - "profile": (PROFILE_DIR, PROFILE_FIELDS) -} - - -def display_to_filename(name: str) -> str: - """Convert display name (with []) to filename (with ())""" - return f"{name.replace('[', '(').replace(']', ')')}.yml" - - -def filename_to_display(filename: str) -> str: - """Convert filename (with ()) back to display name (with [])""" - name = filename[:-4] if filename.endswith('.yml') else filename - return name.replace('(', '[').replace(')', ']') - - -def _setup_yaml_quotes(): - """Configure YAML to quote string values""" - - def str_presenter(dumper, data): - return dumper.represent_scalar('tag:yaml.org,2002:str', - data, - style="'") - - yaml.add_representer(str, str_presenter) - - -def get_file_modified_date(file_path: str) -> str: - """Get file last modified date in ISO format""" - try: - stats = os.stat(file_path) - return datetime.fromtimestamp(stats.st_mtime).isoformat() - except Exception as e: - logger.error(f"Error getting modified date for {file_path}: {e}") - return None - - -def get_category_directory(category: str) -> str: - try: - directory, _ = CATEGORY_MAP[category] - except KeyError: - logger.error(f"Invalid category requested: {category}") - raise ValueError(f"Invalid category: {category}") - - if not os.path.exists(directory): - logger.error(f"Directory not found: {directory}") - raise FileNotFoundError(f"Directory not found: {directory}") - - return directory - - -def load_yaml_file(file_path: str) -> Dict[str, Any]: - file_path = file_path.replace('[', '(').replace(']', ')') - - if not os.path.exists(file_path): - logger.error(f"File not found: {file_path}") - raise FileNotFoundError(f"File not found: {file_path}") - - try: - with open(file_path, 'r') as f: - content = yaml.safe_load(f) - return content - - except yaml.YAMLError as e: - logger.error(f"Error parsing YAML file {file_path}: {e}") - raise - - except Exception as e: - logger.error(f"Unexpected error reading file {file_path}: {e}") - raise - - -def validate(data: Dict[str, Any], category: str) -> bool: - if not isinstance(data, dict): - return False - - _, fields = CATEGORY_MAP[category] - return all(field in data for field in fields) - - -def save_yaml_file(file_path: str, - data: Dict[str, Any], - category: str, - use_data_name: bool = True) -> None: - """ - Save YAML data to a file - Args: - file_path: The path where the file should be saved - data: The data to save - category: The category of data - use_data_name: If True, use the name from data to create filename. If False, use the provided file_path as is. - """ - if not validate(data, category): - raise ValueError("Invalid data format") - - directory = os.path.dirname(file_path) - - if use_data_name: - filename = display_to_filename(data['name']) - safe_file_path = os.path.join(directory, filename) - else: - safe_file_path = file_path - - _, fields = CATEGORY_MAP[category] - ordered_data = {field: data[field] for field in fields} - - _setup_yaml_quotes() - - with open(safe_file_path, 'w') as f: - yaml.safe_dump(ordered_data, f, sort_keys=False) - - -def update_yaml_file(file_path: str, data: Dict[str, Any], - category: str) -> None: - try: - # Check if this is a rename operation - if 'rename' in data: - new_name = data['rename'] - old_name = filename_to_display(os.path.basename(file_path)[:-4]) - - directory = os.path.dirname(file_path) - new_file_path = os.path.join(directory, - display_to_filename(new_name)) - - # Update references before performing the rename - try: - # Update regular references - updated_files = update_references(category, old_name, new_name) - logger.info(f"Updated references in: {updated_files}") - - # Update arr configs if this is a format or profile - if category in ['custom_format', 'profile']: - arr_category = 'customFormats' if category == 'custom_format' else 'profiles' - updated_configs = update_arr_config_on_rename( - arr_category, old_name, new_name) - if updated_configs: - logger.info( - f"Updated arr configs for {category} rename: {updated_configs}" - ) - - except Exception as e: - logger.error(f"Failed to update references: {e}") - raise Exception(f"Failed to update references: {str(e)}") - - # Remove rename field and update the name field in the data - data_to_save = {k: v for k, v in data.items() if k != 'rename'} - data_to_save['name'] = new_name - - repo = git.Repo(REPO_PATH) - rel_old_path = os.path.relpath(file_path, REPO_PATH) - rel_new_path = os.path.relpath(new_file_path, REPO_PATH) - - try: - # First, save the content changes to the current file - save_yaml_file(file_path, - data_to_save, - category, - use_data_name=False) - - # Stage the content changes first - repo.index.add([rel_old_path]) - - # Then perform the rename - tracked_files = repo.git.ls_files().splitlines() - is_tracked = rel_old_path in tracked_files - - if is_tracked: - # Use git mv for tracked files - repo.git.mv(rel_old_path, rel_new_path) - else: - # For untracked files, manually move - os.rename(file_path, new_file_path) - # Stage the new file - repo.index.add([rel_new_path]) - - except git.GitCommandError as e: - logger.error(f"Git operation failed: {e}") - raise Exception(f"Failed to rename file: {str(e)}") - except OSError as e: - logger.error(f"File operation failed: {e}") - raise Exception(f"Failed to rename file: {str(e)}") - - else: - # Normal update without rename - backup_path = f"{file_path}.bak" - shutil.copy2(file_path, backup_path) - try: - save_yaml_file(file_path, data, category) - os.remove(backup_path) - except Exception as e: - shutil.move(backup_path, file_path) - raise - - except Exception as e: - raise - - -def check_delete_constraints(category: str, name: str) -> Tuple[bool, str]: - """ - Check if deleting an item would break any references. - Returns (can_delete, error_message) tuple. - """ - try: - # Protected custom formats that cannot be deleted - PROTECTED_FORMATS = [ - "Not English", "Not Only English", "Not Only English (Missing)" - ] - - # Convert the input name to use parentheses for comparison - check_name = name.replace('[', '(').replace(']', ')') - logger.debug( - f"Checking constraints for {category}: {name} (normalized as {check_name})" - ) - - # Check protected formats first - if category == 'custom_format' and check_name in [ - f.replace('[', '(').replace(']', ')') - for f in PROTECTED_FORMATS - ]: - return False, "This format cannot be deleted as it's required for language processing functionality" - - references = [] - - if category == 'regex_pattern': - # Check all custom formats for references to this pattern - format_dir = get_category_directory('custom_format') - for format_file in os.listdir(format_dir): - if not format_file.endswith('.yml'): - continue - - format_path = os.path.join(format_dir, format_file) - try: - format_data = load_yaml_file(format_path) - # Check each condition in the format - for condition in format_data.get('conditions', []): - if condition['type'] in [ - 'release_title', 'release_group', 'edition' - ] and condition.get('pattern') == check_name: - references.append( - f"custom format: {format_data['name']}") - except Exception as e: - logger.error( - f"Error checking format file {format_file}: {e}") - continue - - elif category == 'custom_format': - # Check all quality profiles for references to this format - profile_dir = get_category_directory('profile') - for profile_file in os.listdir(profile_dir): - if not profile_file.endswith('.yml'): - continue - - profile_path = os.path.join(profile_dir, profile_file) - try: - profile_data = load_yaml_file(profile_path) - - # Check custom_formats (both/backwards compatible) - custom_formats = profile_data.get('custom_formats', []) - if isinstance(custom_formats, list): - for format_ref in custom_formats: - format_name = format_ref.get('name', '') - # Convert format name to use parentheses for comparison - format_name = format_name.replace('[', '(').replace(']', ')') - logger.debug(f"Comparing '{format_name}' with '{check_name}' in both") - - if format_name == check_name: - references.append(f"quality profile: {profile_data['name']} (both)") - - # Check custom_formats_radarr - custom_formats_radarr = profile_data.get('custom_formats_radarr', []) - if isinstance(custom_formats_radarr, list): - for format_ref in custom_formats_radarr: - format_name = format_ref.get('name', '') - # Convert format name to use parentheses for comparison - format_name = format_name.replace('[', '(').replace(']', ')') - logger.debug(f"Comparing '{format_name}' with '{check_name}' in radarr") - - if format_name == check_name: - references.append(f"quality profile: {profile_data['name']} (radarr)") - - # Check custom_formats_sonarr - custom_formats_sonarr = profile_data.get('custom_formats_sonarr', []) - if isinstance(custom_formats_sonarr, list): - for format_ref in custom_formats_sonarr: - format_name = format_ref.get('name', '') - # Convert format name to use parentheses for comparison - format_name = format_name.replace('[', '(').replace(']', ')') - logger.debug(f"Comparing '{format_name}' with '{check_name}' in sonarr") - - if format_name == check_name: - references.append(f"quality profile: {profile_data['name']} (sonarr)") - - except Exception as e: - logger.error(f"Error checking profile file {profile_file}: {e}") - continue - - # Update arr configs for formats and profiles - if category in ['custom_format', 'profile']: - arr_category = 'customFormats' if category == 'custom_format' else 'profiles' - updated_configs = update_arr_config_on_delete(arr_category, name) - if updated_configs: - logger.info( - f"Removed {name} from arr configs: {updated_configs}") - - if references: - error_msg = f"Cannot delete - item is referenced in:\n" + "\n".join( - f"- {ref}" for ref in references) - logger.info(f"Found references for {name}: {error_msg}") - return False, error_msg - - logger.info(f"No references found for {name}") - return True, "" - - except Exception as e: - logger.error(f"Error checking delete constraints: {e}") - return False, f"Error checking references: {str(e)}" - - -def update_references(category: str, old_name: str, - new_name: str) -> List[str]: - """ - Update references to a renamed item across all relevant files. - Returns a list of files that were updated. - """ - updated_files = [] - - try: - # Convert names to use parentheses for comparison - old_check_name = old_name.replace('[', '(').replace(']', ')') - new_check_name = new_name.replace('[', '(').replace(']', ')') - - if category == 'regex_pattern': - # Update references in custom formats - format_dir = get_category_directory('custom_format') - for format_file in os.listdir(format_dir): - if not format_file.endswith('.yml'): - continue - - format_path = os.path.join(format_dir, format_file) - try: - format_data = load_yaml_file(format_path) - updated = False - - # Check and update each condition in the format - for condition in format_data.get('conditions', []): - if (condition['type'] in [ - 'release_title', 'release_group', 'edition' - ] and condition.get('pattern') == old_check_name): - condition['pattern'] = new_check_name - updated = True - - if updated: - save_yaml_file(format_path, - format_data, - 'custom_format', - use_data_name=False) - updated_files.append( - f"custom format: {format_data['name']}") - - except Exception as e: - logger.error( - f"Error updating format file {format_file}: {e}") - continue - - elif category == 'custom_format': - # Update references in quality profiles - profile_dir = get_category_directory('profile') - for profile_file in os.listdir(profile_dir): - if not profile_file.endswith('.yml'): - continue - - profile_path = os.path.join(profile_dir, profile_file) - try: - profile_data = load_yaml_file(profile_path) - updated = False - - # Update custom_formats (both/backwards compatible) - custom_formats = profile_data.get('custom_formats', []) - if isinstance(custom_formats, list): - for format_ref in custom_formats: - format_name = format_ref.get('name', '') - # Convert format name to use parentheses for comparison - format_name = format_name.replace('[', '(').replace(']', ')') - - if format_name == old_check_name: - format_ref['name'] = new_name - updated = True - - # Update custom_formats_radarr - custom_formats_radarr = profile_data.get('custom_formats_radarr', []) - if isinstance(custom_formats_radarr, list): - for format_ref in custom_formats_radarr: - format_name = format_ref.get('name', '') - # Convert format name to use parentheses for comparison - format_name = format_name.replace('[', '(').replace(']', ')') - - if format_name == old_check_name: - format_ref['name'] = new_name - updated = True - - # Update custom_formats_sonarr - custom_formats_sonarr = profile_data.get('custom_formats_sonarr', []) - if isinstance(custom_formats_sonarr, list): - for format_ref in custom_formats_sonarr: - format_name = format_ref.get('name', '') - # Convert format name to use parentheses for comparison - format_name = format_name.replace('[', '(').replace(']', ')') - - if format_name == old_check_name: - format_ref['name'] = new_name - updated = True - - if updated: - save_yaml_file(profile_path, - profile_data, - 'profile', - use_data_name=False) - updated_files.append( - f"quality profile: {profile_data['name']}") - - except Exception as e: - logger.error( - f"Error updating profile file {profile_file}: {e}") - continue - - return updated_files - - except Exception as e: - logger.error(f"Error updating references: {e}") - raise - - -def test_regex_pattern( - pattern: str, - tests: List[Dict[str, Any]]) -> Tuple[bool, str, List[Dict[str, Any]]]: - """ - Test a regex pattern against a list of test cases using PCRE2 compatible engine. - Returns match information along with test results. - """ - logger.info(f"Starting regex pattern test - Pattern: {pattern}") - - try: - try: - compiled_pattern = regex.compile(pattern, - regex.V1 | regex.IGNORECASE) - logger.info( - "Pattern compiled successfully with PCRE2 compatibility") - except regex.error as e: - logger.warning(f"Invalid regex pattern: {str(e)}") - return False, f"Invalid regex pattern: {str(e)}", tests - - current_time = datetime.now().isoformat() - logger.info(f"Processing {len(tests)} test cases") - - for test in tests: - test_id = test.get('id', 'unknown') - test_input = test.get('input', '') - expected = test.get('expected', False) - - try: - match = compiled_pattern.search(test_input) - matches = bool(match) - - # Update test result with basic fields - test['passes'] = matches == expected - test['lastRun'] = current_time - - # Add match information - if match: - test['matchedContent'] = match.group(0) - test['matchSpan'] = { - 'start': match.start(), - 'end': match.end() - } - # Get all capture groups if they exist - test['matchedGroups'] = [g for g in match.groups() - ] if match.groups() else [] - else: - test['matchedContent'] = None - test['matchSpan'] = None - test['matchedGroups'] = [] - - logger.info( - f"Test {test_id} {'passed' if test['passes'] else 'failed'} - Match: {matches}, Expected: {expected}" - ) - - except Exception as e: - logger.error(f"Error running test {test_id}: {str(e)}") - test['passes'] = False - test['lastRun'] = current_time - test['matchedContent'] = None - test['matchSpan'] = None - test['matchedGroups'] = [] - - # Log overall results - passed_tests = sum(1 for test in tests if test.get('passes', False)) - logger.info( - f"Test execution complete - {passed_tests}/{len(tests)} tests passed" - ) - - return True, "", tests - - except Exception as e: - logger.error(f"Unexpected error in test_regex_pattern: {str(e)}", - exc_info=True) - return False, str(e), tests - - -def test_format_conditions(conditions: List[Dict], - tests: List[Dict]) -> Tuple[bool, str, List[Dict]]: - """ - Test a set of format conditions against a list of test cases. - Tests only pattern-based conditions (release_title, release_group, edition). - """ - logger.info( - f"Starting format condition test - {len(conditions)} conditions") - logger.error(f"Received conditions: {conditions}") - logger.error(f"Received tests: {tests}") - - try: - # First, load all regex patterns from the patterns directory - patterns_dir = os.path.join(REPO_PATH, 'regex_patterns') - pattern_map = {} - - logger.error(f"Loading patterns from directory: {patterns_dir}") - if not os.path.exists(patterns_dir): - logger.error(f"Patterns directory not found: {patterns_dir}") - return False, "Patterns directory not found", tests - - for pattern_file in os.listdir(patterns_dir): - if pattern_file.endswith('.yml'): - pattern_path = os.path.join(patterns_dir, pattern_file) - try: - with open(pattern_path, 'r') as f: - pattern_data = yaml.safe_load(f) - if pattern_data and 'name' in pattern_data and 'pattern' in pattern_data: - pattern_map[ - pattern_data['name']] = pattern_data['pattern'] - logger.error( - f"Loaded pattern: {pattern_data['name']} = {pattern_data['pattern']}" - ) - except Exception as e: - logger.error( - f"Error loading pattern file {pattern_file}: {e}") - continue - - logger.error(f"Total patterns loaded: {len(pattern_map)}") - - # Compile all regex patterns first - compiled_patterns = {} - for condition in conditions: - if condition['type'] in [ - 'release_title', 'release_group', 'edition' - ]: - logger.error(f"Processing condition: {condition}") - try: - pattern_name = condition.get('pattern', '') - if pattern_name: - # Look up the actual pattern using the pattern name - actual_pattern = pattern_map.get(pattern_name) - if actual_pattern: - compiled_patterns[ - condition['name']] = regex.compile( - actual_pattern, - regex.V1 | regex.IGNORECASE) - logger.error( - f"Successfully compiled pattern for {condition['name']}: {actual_pattern}" - ) - else: - logger.error( - f"Pattern not found for name: {pattern_name}") - return False, f"Pattern not found: {pattern_name}", tests - except regex.error as e: - logger.error( - f"Invalid regex pattern in condition {condition['name']}: {str(e)}" - ) - return False, f"Invalid regex pattern in condition {condition['name']}: {str(e)}", tests - - logger.error(f"Total patterns compiled: {len(compiled_patterns)}") - current_time = datetime.now().isoformat() - - # Process each test - for test in tests: - test_input = test.get('input', '') - expected = test.get('expected', False) - condition_results = [] - logger.error( - f"Processing test input: {test_input}, expected: {expected}") - - # Check each condition - for condition in conditions: - if condition['type'] not in [ - 'release_title', 'release_group', 'edition' - ]: - logger.error( - f"Skipping non-pattern condition: {condition['type']}") - continue - - pattern = compiled_patterns.get(condition['name']) - if not pattern: - logger.error( - f"No compiled pattern found for condition: {condition['name']}" - ) - continue - - # Test if pattern matches input - matches = bool(pattern.search(test_input)) - logger.error( - f"Condition {condition['name']} match result: {matches}") - - # Add result - condition_results.append({ - 'name': - condition['name'], - 'type': - condition['type'], - 'pattern': - condition.get('pattern', ''), - 'required': - condition.get('required', False), - 'negate': - condition.get('negate', False), - 'matches': - matches - }) - - # Determine if format applies - format_applies = True - - # Check required conditions - for result in condition_results: - if result['required']: - logger.error( - f"Checking required condition: {result['name']}, negate: {result['negate']}, matches: {result['matches']}" - ) - if result['negate']: - if result['matches']: - format_applies = False - logger.error( - f"Required negated condition {result['name']} matched - format does not apply" - ) - break - else: - if not result['matches']: - format_applies = False - logger.error( - f"Required condition {result['name']} did not match - format does not apply" - ) - break - - # Check non-required conditions - if format_applies: - for result in condition_results: - if not result['required'] and result['negate'] and result[ - 'matches']: - format_applies = False - logger.error( - f"Non-required negated condition {result['name']} matched - format does not apply" - ) - break - - test['passes'] = format_applies == expected - test['lastRun'] = current_time - test['conditionResults'] = condition_results - - logger.error( - f"Test result - format_applies: {format_applies}, expected: {expected}, passes: {test['passes']}" - ) - - # Log final results - passed_tests = sum(1 for test in tests if test.get('passes', False)) - logger.error( - f"Final test results - {passed_tests}/{len(tests)} tests passed") - logger.error(f"Updated tests: {tests}") - - return True, "", tests - - except Exception as e: - logger.error(f"Unexpected error in test_format_conditions: {str(e)}", - exc_info=True) - return False, str(e), tests diff --git a/backend/app/git/branches/checkout.py b/backend/app/git/branches/checkout.py deleted file mode 100644 index f688dd7..0000000 --- a/backend/app/git/branches/checkout.py +++ /dev/null @@ -1,53 +0,0 @@ -# git/branches/checkout.py - -import git -import logging -from ...arr.manager import check_active_sync_configs - -logger = logging.getLogger(__name__) - - -def checkout_branch(repo_path, branch_name): - try: - # Check for active sync configurations first - has_active_configs, configs = check_active_sync_configs() - if has_active_configs: - error_msg = ( - "Cannot checkout branch while automatic sync configurations are active.\n" - "The following configurations must be set to manual sync first:\n" - ) - for config in configs: - error_msg += f"- {config['name']} (ID: {config['id']}, {config['sync_method']} sync)\n" - - logger.error(error_msg) - return False, { - "error": error_msg, - "code": "ACTIVE_SYNC_CONFIGS", - "configs": configs - } - - logger.debug(f"Attempting to checkout branch: {branch_name}") - repo = git.Repo(repo_path) - - # Check if the branch exists locally - if branch_name in repo.heads: - repo.git.checkout(branch_name) - else: - # Check if the branch exists in any of the remotes - for remote in repo.remotes: - remote_branch = f"{remote.name}/{branch_name}" - if remote_branch in repo.refs: - # Create a new local branch tracking the remote branch - repo.git.checkout('-b', branch_name, remote_branch) - break - else: - return False, f"Branch '{branch_name}' does not exist locally or in any remote." - - logger.debug(f"Successfully checked out branch: {branch_name}") - return True, { - "message": f"Checked out branch: {branch_name}", - "current_branch": branch_name - } - except Exception as e: - logger.error(f"Error checking out branch: {str(e)}", exc_info=True) - return False, {"error": f"Error checking out branch: {str(e)}"} diff --git a/backend/app/git/operations/delete.py b/backend/app/git/operations/delete.py deleted file mode 100644 index 2713a77..0000000 --- a/backend/app/git/operations/delete.py +++ /dev/null @@ -1,20 +0,0 @@ -# git/operations/delete.py - -import os -import logging - -logger = logging.getLogger(__name__) - -def delete_file(repo_path, file_path): - try: - full_file_path = os.path.join(repo_path, file_path) - - if os.path.exists(full_file_path): - os.remove(full_file_path) - message = f"File {file_path} has been deleted." - return True, message - else: - return False, "File does not exist." - except Exception as e: - logger.error(f"Error deleting file: {str(e)}", exc_info=True) - return False, f"Error deleting file: {str(e)}" \ No newline at end of file diff --git a/backend/app/git/operations/merge.py b/backend/app/git/operations/merge.py deleted file mode 100644 index be34700..0000000 --- a/backend/app/git/operations/merge.py +++ /dev/null @@ -1,103 +0,0 @@ -# git/operations/merge.py -import git -import logging -import os -from typing import Dict, Any -from ..status.status import GitStatusManager - -logger = logging.getLogger(__name__) - - -def finalize_merge(repo) -> Dict[str, Any]: - """ - Finalize a merge by committing all staged files after conflict resolution. - """ - try: - if not os.path.exists(os.path.join(repo.git_dir, 'MERGE_HEAD')): - return { - 'success': False, - 'error': 'Not currently in a merge state' - } - - # Get unmerged files - unmerged_files = [] - status = repo.git.status('--porcelain', '-z').split('\0') - for item in status: - if item and len(item) >= 4: - x, y, file_path = item[0], item[1], item[3:] - if 'U' in (x, y): - unmerged_files.append(file_path) - - # Force update the index for unmerged files - for file_path in unmerged_files: - # Remove from index first - try: - repo.git.execute(['git', 'reset', '--', file_path]) - except git.GitCommandError: - pass - - # Add back to index - try: - repo.git.execute(['git', 'add', '--', file_path]) - except git.GitCommandError as e: - logger.error(f"Error adding file {file_path}: {str(e)}") - return { - 'success': False, - 'error': f"Failed to stage resolved file {file_path}" - } - - # Create commit message - commit_message = "Merge complete: resolved conflicts" - - # Commit - try: - repo.git.commit('-m', commit_message) - logger.info("Successfully finalized merge") - - # Update remote status after merge - repo_path = repo.working_dir - status_manager = GitStatusManager.get_instance(repo_path) - if status_manager: - status_manager.update_remote_status() - - return {'success': True, 'message': 'Merge completed successfully'} - except git.GitCommandError as e: - logger.error(f"Git command error during commit: {str(e)}") - return { - 'success': False, - 'error': f"Failed to commit merge: {str(e)}" - } - - except Exception as e: - logger.error(f"Failed to finalize merge: {str(e)}") - return { - 'success': False, - 'error': f"Failed to finalize merge: {str(e)}" - } - - -def abort_merge(repo_path): - try: - repo = git.Repo(repo_path) - - # Try aborting the merge using git merge --abort - try: - repo.git.execute(['git', 'merge', '--abort']) - return True, "Merge aborted successfully" - except git.GitCommandError as e: - logger.warning( - "Error aborting merge with 'git merge --abort'. Trying 'git reset --hard'." - ) - - # If git merge --abort fails, try resetting to the previous commit using git reset --hard - try: - repo.git.execute(['git', 'reset', '--hard']) - return True, "Merge aborted and repository reset to the previous commit" - except git.GitCommandError as e: - logger.exception( - "Error resetting repository with 'git reset --hard'") - return False, str(e) - - except Exception as e: - logger.exception("Unexpected error aborting merge") - return False, str(e) diff --git a/backend/app/git/operations/pull.py b/backend/app/git/operations/pull.py deleted file mode 100644 index 61202d0..0000000 --- a/backend/app/git/operations/pull.py +++ /dev/null @@ -1,65 +0,0 @@ -# git/operations/pull.py - -import git -import logging -from git import GitCommandError -from ..status.status import GitStatusManager -from ...arr.manager import get_pull_configs -from ...importer import handle_pull_import - -logger = logging.getLogger(__name__) - - -def pull_branch(repo_path, branch_name): - try: - repo = git.Repo(repo_path) - - # Check for uncommitted changes first - if repo.is_dirty(untracked_files=True): - return False, { - 'type': 'uncommitted_changes', - 'message': - 'Cannot pull: You have uncommitted local changes that would be lost', - 'details': 'Please commit or stash your changes before pulling' - } - - # Fetch first to get remote changes - repo.remotes.origin.fetch() - - try: - # Pull with explicit merge strategy - repo.git.pull('origin', branch_name, '--no-rebase') - - # Update remote status - status_manager = GitStatusManager.get_instance(repo_path) - if status_manager: - status_manager.update_remote_status() - - # ------------------------------- - # *** "On pull" ARR import logic using new importer: - # 1) Query all ARR configs that have sync_method="pull" - # 2) For each, run the importer pull handler - # ------------------------------- - pull_configs = get_pull_configs() - logger.info( - f"[Pull] Found {len(pull_configs)} ARR configs to import (sync_method='pull')" - ) - for cfg in pull_configs: - handle_pull_import(cfg['id']) - - return True, f"Successfully pulled changes for branch {branch_name}" - - except GitCommandError as e: - if "CONFLICT" in str(e): - return True, { - 'state': 'resolve', - 'type': 'merge_conflict', - 'message': - 'Repository is now in conflict resolution state. Please resolve conflicts to continue merge.', - 'details': 'Please resolve conflicts to continue merge' - } - raise e - - except Exception as e: - logger.error(f"Error pulling branch: {str(e)}", exc_info=True) - return False, f"Error pulling branch: {str(e)}" diff --git a/backend/app/git/operations/resolve.py b/backend/app/git/operations/resolve.py deleted file mode 100644 index 363d718..0000000 --- a/backend/app/git/operations/resolve.py +++ /dev/null @@ -1,333 +0,0 @@ -import yaml -from git import GitCommandError -import logging -from typing import Dict, Any -import os -from copy import deepcopy -from ...data.utils import CATEGORY_MAP - -logger = logging.getLogger(__name__) - - -def determine_type(file_path): - if 'regex_patterns' in file_path: - return 'Regex Pattern' - elif 'custom_formats' in file_path: - return 'Custom Format' - elif 'profiles' in file_path: - return 'Quality Profile' - return 'Unknown' - - -def get_version_data(repo, ref, file_path): - """Get YAML data from a specific version of a file.""" - try: - content = repo.git.show(f'{ref}:{file_path}') - return yaml.safe_load(content) if content else None - except GitCommandError: - return None - - -def resolve_conflicts( - repo, resolutions: Dict[str, Dict[str, str]]) -> Dict[str, Any]: - """ - Resolve merge conflicts based on provided resolutions. - """ - logger.debug(f"Received resolutions for files: {list(resolutions.keys())}") - logger.debug(f"Full resolutions data: {resolutions}") - - try: - status = repo.git.status('--porcelain', '-z').split('\0') - conflicts = [] - for item in status: - if not item or len(item) < 4: - continue - x, y, file_path = item[0], item[1], item[3:] - # Include modify/delete conflicts - if 'U' in (x, y) or (x == 'D' and y == 'D') or ( - x == 'D' and y == 'U') or (x == 'U' and y == 'D'): - conflicts.append((file_path, x, y)) - - # Track which files are modify/delete conflicts - modify_delete_conflicts = { - path: (x == 'D' and y == 'U') or (x == 'U' and y == 'D') - for path, x, y in conflicts - } - - # Validate resolutions are for actual conflicting files - for file_path in resolutions: - if file_path not in {path for path, _, _ in conflicts}: - return { - 'success': False, - 'error': f"File not in conflict: {file_path}" - } - - # Store initial states for rollback - initial_states = {} - for file_path in resolutions: - try: - full_path = os.path.join(repo.working_dir, file_path) - try: - with open(full_path, 'r') as f: - initial_states[file_path] = f.read() - except FileNotFoundError: - initial_states[file_path] = None - except Exception as e: - return { - 'success': False, - 'error': f"Couldn't read file {file_path}: {str(e)}" - } - - results = {} - for file_path, field_resolutions in resolutions.items(): - # Handle modify/delete conflicts differently - if modify_delete_conflicts[file_path]: - logger.debug( - f"Handling modify/delete conflict for {file_path}") - logger.debug(f"Field resolutions for modify/delete: {field_resolutions}") - - # Get the existing version (either from HEAD or MERGE_HEAD) - head_data = get_version_data(repo, 'HEAD', file_path) - merge_head_data = get_version_data(repo, 'MERGE_HEAD', - file_path) - - # Determine which version exists - is_deleted_in_head = head_data is None - existing_data = merge_head_data if is_deleted_in_head else head_data - logger.debug(f"Existing version data: {existing_data}") - logger.debug(f"is_deleted_in_head: {is_deleted_in_head}") - logger.debug(f"head_data: {head_data}") - logger.debug(f"merge_head_data: {merge_head_data}") - - # Try both lowercase and capitalized versions of 'file' - choice = field_resolutions.get('file') or field_resolutions.get('File') - logger.debug(f"Resolution choice for file: {choice}") - - if not choice: - logger.error("No 'file' or 'File' resolution found in field_resolutions!") - logger.error(f"Available keys: {list(field_resolutions.keys())}") - raise Exception( - "No resolution provided for modify/delete conflict") - - full_path = os.path.join(repo.working_dir, file_path) - - if choice == 'local': - if is_deleted_in_head: - logger.debug(f"Keeping file deleted: {file_path}") - # File should stay deleted - try: - os.remove(full_path) - except FileNotFoundError: - pass # File is already gone - repo.index.remove([file_path]) - else: - logger.debug(f"Keeping local version: {file_path}") - # Keep our version - with open(full_path, 'w') as f: - yaml.safe_dump(head_data, - f, - default_flow_style=False) - repo.index.add([file_path]) - - elif choice == 'incoming': - if is_deleted_in_head: - logger.debug( - f"Restoring from incoming version: {file_path}") - # Restore the file from MERGE_HEAD - with open(full_path, 'w') as f: - yaml.safe_dump(merge_head_data, - f, - default_flow_style=False) - repo.index.add([file_path]) - else: - logger.debug(f"Accepting deletion: {file_path}") - # Accept the deletion - try: - os.remove(full_path) - except FileNotFoundError: - pass # File is already gone - repo.index.remove([file_path]) - - results[file_path] = { - 'resolution': - choice, - 'action': - 'delete' if (choice == 'local' and is_deleted_in_head) or - (choice == 'incoming' and not is_deleted_in_head) else - 'keep' - } - - else: - # Regular conflict resolution - # Get all three versions - base_data = get_version_data(repo, 'HEAD^', file_path) - ours_data = get_version_data(repo, 'HEAD', file_path) - theirs_data = get_version_data(repo, 'MERGE_HEAD', file_path) - - # For files that were previously involved in modify/delete conflicts - # we may not be able to get all versions - if not base_data or not ours_data or not theirs_data: - logger.warning(f"Couldn't get all versions of {file_path} - may have been previously resolved as a modify/delete conflict") - logger.warning(f"base_data: {base_data}, ours_data: {ours_data}, theirs_data: {theirs_data}") - - # If it was previously resolved as "incoming" but ours_data is missing, use theirs_data - if not ours_data and theirs_data: - logger.info(f"Using incoming version for {file_path} as base for resolution") - ours_data = theirs_data - # If it was previously resolved as "local" but theirs_data is missing, use ours_data - elif ours_data and not theirs_data: - logger.info(f"Using local version for {file_path} as base for resolution") - theirs_data = ours_data - # If we can't recover either version, we can't proceed - else: - raise Exception(f"Couldn't get required versions of {file_path}") - - # Start with a deep copy of ours_data to preserve all fields - resolved_data = deepcopy(ours_data) - - # Track changes - kept_values = {} - discarded_values = {} - - # Handle each resolution field - for field, choice in field_resolutions.items(): - if field.startswith('custom_format_'): - format_name = field[len('custom_format_'):] - - ours_cf = next( - (item - for item in ours_data.get('custom_formats', []) - if item['name'] == format_name), None) - theirs_cf = next( - (item - for item in theirs_data.get('custom_formats', []) - if item['name'] == format_name), None) - - if choice == 'local' and ours_cf: - resolved_cf = ours_cf - kept_values[field] = ours_cf - discarded_values[field] = theirs_cf - elif choice == 'incoming' and theirs_cf: - resolved_cf = theirs_cf - kept_values[field] = theirs_cf - discarded_values[field] = ours_cf - else: - raise Exception( - f"Invalid choice or missing custom format {format_name}" - ) - - resolved_cf_list = resolved_data.get( - 'custom_formats', []) - for idx, item in enumerate(resolved_cf_list): - if item['name'] == format_name: - resolved_cf_list[idx] = resolved_cf - break - else: - resolved_cf_list.append(resolved_cf) - resolved_data['custom_formats'] = resolved_cf_list - - elif field.startswith('tag_'): - tag_name = field[len('tag_'):] - current_tags = set(resolved_data.get('tags', [])) - - if choice == 'local': - if tag_name in ours_data.get('tags', []): - current_tags.add(tag_name) - kept_values[field] = 'local' - discarded_values[field] = 'incoming' - else: - current_tags.discard(tag_name) - kept_values[field] = 'none' - discarded_values[field] = 'incoming' - elif choice == 'incoming': - if tag_name in theirs_data.get('tags', []): - current_tags.add(tag_name) - kept_values[field] = 'incoming' - discarded_values[field] = 'local' - else: - current_tags.discard(tag_name) - kept_values[field] = 'none' - discarded_values[field] = 'local' - else: - raise Exception( - f"Invalid choice for tag field: {field}") - - resolved_data['tags'] = sorted(current_tags) - - else: - field_key = field - if choice == 'local': - resolved_data[field_key] = ours_data.get(field_key) - kept_values[field_key] = ours_data.get(field_key) - discarded_values[field_key] = theirs_data.get( - field_key) - elif choice == 'incoming': - resolved_data[field_key] = theirs_data.get( - field_key) - kept_values[field_key] = theirs_data.get(field_key) - discarded_values[field_key] = ours_data.get( - field_key) - else: - raise Exception( - f"Invalid choice for field: {field}") - - # Get file type and apply appropriate field ordering - file_type = determine_type(file_path) - if file_type == 'Quality Profile': - _, fields = CATEGORY_MAP['profile'] - elif file_type == 'Custom Format': - _, fields = CATEGORY_MAP['custom_format'] - elif file_type == 'Regex Pattern': - _, fields = CATEGORY_MAP['regex_pattern'] - - # Order the fields according to the category's field order - ordered_data = { - field: resolved_data.get(field) - for field in fields if field in resolved_data - } - resolved_data = ordered_data - - # Write resolved version - full_path = os.path.join(repo.working_dir, file_path) - with open(full_path, 'w') as f: - yaml.safe_dump(resolved_data, f, default_flow_style=False) - - # Stage the resolved file - repo.index.add([file_path]) - - results[file_path] = { - 'kept_values': kept_values, - 'discarded_values': discarded_values - } - - logger.debug( - f"Successfully resolved regular conflict for {file_path}") - - logger.debug("==== Status after resolve_conflicts ====") - status_output = repo.git.status('--porcelain', '-z').split('\0') - for item in status_output: - if item: - logger.debug(f"File status: {item}") - logger.debug("=======================================") - - return {'success': True, 'results': results} - - except Exception as e: - # Rollback on any error - for file_path, initial_state in initial_states.items(): - try: - full_path = os.path.join(repo.working_dir, file_path) - if initial_state is None: - try: - os.remove(full_path) - except FileNotFoundError: - pass - else: - with open(full_path, 'w') as f: - f.write(initial_state) - except Exception as rollback_error: - logger.error( - f"Failed to rollback {file_path}: {str(rollback_error)}") - - logger.error(f"Failed to resolve conflicts: {str(e)}") - return {'success': False, 'error': str(e)} diff --git a/backend/app/git/operations/revert.py b/backend/app/git/operations/revert.py deleted file mode 100644 index 77a5bc3..0000000 --- a/backend/app/git/operations/revert.py +++ /dev/null @@ -1,109 +0,0 @@ -# git/operations/revert.py - -import git -import os -import logging - -logger = logging.getLogger(__name__) - - -def revert_file(repo_path, file_path): - """ - Revert changes in a file, handling tracked files, staged deletions, and new files. - - Args: - repo_path: Path to the git repository - file_path: Path to the file to revert - - Returns: - tuple: (success: bool, message: str) - """ - try: - repo = git.Repo(repo_path) - file_absolute_path = os.path.join(repo_path, file_path) - - # Check if file is untracked (new) - untracked_files = repo.untracked_files - is_untracked = any(f == file_path for f in untracked_files) - - if is_untracked: - # For untracked files, we need to remove them - try: - os.remove(file_absolute_path) - message = f"New file {file_path} has been removed." - except FileNotFoundError: - message = f"File {file_path} was already removed." - return True, message - - # Check if file is staged for deletion - staged_deletions = repo.index.diff("HEAD", R=True) - is_staged_for_deletion = any(d.a_path == file_path - for d in staged_deletions) - - if is_staged_for_deletion: - # Restore file staged for deletion - repo.git.reset("--", file_path) - repo.git.checkout('HEAD', "--", file_path) - message = f"File {file_path} has been restored and unstaged from deletion." - else: - # Regular revert for tracked files with changes - repo.git.restore("--", file_path) - repo.git.restore('--staged', "--", file_path) - message = f"File {file_path} has been reverted." - - return True, message - - except git.exc.GitCommandError as e: - error_msg = str(e) - if "pathspec" in error_msg and "did not match any file(s) known to git" in error_msg: - logger.error(f"File {file_path} not found in git repository") - return False, f"File {file_path} not found in git repository" - logger.error(f"Git error reverting file: {error_msg}", exc_info=True) - return False, f"Git error reverting file: {error_msg}" - except Exception as e: - logger.error(f"Error reverting file: {str(e)}", exc_info=True) - return False, f"Error reverting file: {str(e)}" - - -def revert_all(repo_path): - """ - Revert all changes in the repository, including new files. - - Args: - repo_path: Path to the git repository - - Returns: - tuple: (success: bool, message: str) - """ - try: - repo = git.Repo(repo_path) - - # First, clean untracked files - untracked_files = repo.untracked_files - for file_path in untracked_files: - try: - os.remove(os.path.join(repo_path, file_path)) - except FileNotFoundError: - continue - except Exception as e: - logger.warning( - f"Could not remove untracked file {file_path}: {str(e)}") - - # Then restore tracked files - repo.git.restore('--staged', '.') - repo.git.restore('.') - - message = "All changes have been reverted to the last commit" - if untracked_files: - message += f" and {len(untracked_files)} new file(s) have been removed" - message += "." - - return True, message - - except git.exc.GitCommandError as e: - logger.error(f"Git error reverting all changes: {str(e)}", - exc_info=True) - return False, f"Git error reverting all changes: {str(e)}" - except Exception as e: - logger.error(f"Error reverting all changes: {str(e)}", exc_info=True) - return False, f"Error reverting all changes: {str(e)}" diff --git a/backend/app/git/repo/clone.py b/backend/app/git/repo/clone.py deleted file mode 100644 index 801f82a..0000000 --- a/backend/app/git/repo/clone.py +++ /dev/null @@ -1,156 +0,0 @@ -# git/clone_repo.py - -import os -import shutil -import logging -import yaml -from git.exc import GitCommandError -import git -from ..auth.authenticate import GitHubAuth - -logger = logging.getLogger(__name__) - - -def clone_repository(repo_url, repo_path): - temp_dir = f"{repo_path}_temp" - backup_dir = f"{repo_path}_backup" - logger = logging.getLogger(__name__) - - try: - # Initial clone attempt - logger.info(f"Starting clone operation for {repo_url}") - try: - # First try without authentication (for public repos) - repo = git.Repo.clone_from(repo_url, temp_dir) - logger.info("Repository clone successful") - except GitCommandError as e: - error_str = str(e) - # If authentication error, try with token - if "could not read Username" in error_str or "Authentication failed" in error_str: - logger.info("Initial clone failed due to authentication. Trying with token...") - try: - # Verify token availability - if not GitHubAuth.verify_token(): - logger.error("Private repository requires GitHub authentication. Please configure PAT.") - return False, "This appears to be a private repository. Please configure PROFILARR_PAT environment variable." - - # Get authenticated URL for private repositories - authenticated_url = GitHubAuth.get_authenticated_url(repo_url) - repo = git.Repo.clone_from(authenticated_url, temp_dir) - logger.info("Repository clone with authentication successful") - except GitCommandError as auth_e: - logger.error(f"Clone with authentication failed: {str(auth_e)}") - return False, f"Failed to clone repository: {str(auth_e)}" - # If repository not found, create new one - elif "remote: Repository not found" in error_str: - logger.info("Creating new repository - remote not found") - repo = git.Repo.init(temp_dir) - repo.create_remote('origin', repo_url) - else: - logger.error(f"Clone failed: {error_str}") - return False, f"Failed to clone repository: {error_str}" - - # Check if repo is empty - try: - repo.head.reference - except ValueError: - logger.info("Initializing empty repository with default structure") - _initialize_empty_repo(repo) - - # Backup handling - if os.path.exists(repo_path): - logger.info("Creating backup of existing repository") - shutil.move(repo_path, backup_dir) - - # Move repo to final location - logger.info("Moving repository to final location") - shutil.move(temp_dir, repo_path) - - # Process folders - for folder_name in ['regex_patterns', 'custom_formats', 'profiles']: - folder_path = os.path.join(repo_path, folder_name) - backup_folder_path = os.path.join(backup_dir, folder_name) - - if not os.path.exists(folder_path): - logger.debug(f"Creating folder: {folder_name}") - os.makedirs(folder_path) - - # File merging process - cloned_files = set( - f.replace('.yml', '') for f in os.listdir(folder_path) - if f.endswith('.yml')) - - if os.path.exists(backup_folder_path): - local_files = [ - f for f in os.listdir(backup_folder_path) - if f.endswith('.yml') - ] - - if local_files: - logger.info( - f"Merging {len(local_files)} files in {folder_name}") - - for file_name in local_files: - old_file_path = os.path.join(backup_folder_path, file_name) - with open(old_file_path, 'r') as file: - data = yaml.safe_load(file) - - base_name = data['name'] - new_name = base_name - counter = 1 - - while new_name in cloned_files: - new_name = f"{base_name} ({counter})" - counter += 1 - - cloned_files.add(new_name) - new_file_path = os.path.join(folder_path, - f"{new_name}.yml") - - with open(new_file_path, 'w') as file: - yaml.dump(data, file) - logger.debug(f"Merged file: {file_name} → {new_name}.yml") - - # Cleanup - if os.path.exists(backup_dir): - logger.info("Removing backup directory") - shutil.rmtree(backup_dir) - - logger.info("Clone operation completed successfully") - return True, "Repository cloned and local files merged successfully" - - except Exception as e: - logger.exception("Critical error during clone operation") - if os.path.exists(temp_dir): - shutil.rmtree(temp_dir) - if os.path.exists(backup_dir): - shutil.move(backup_dir, repo_path) - return False, f"Critical error: {str(e)}" - - -def _initialize_empty_repo(repo): - # Create basic folder structure - os.makedirs(os.path.join(repo.working_tree_dir, 'regex_patterns'), - exist_ok=True) - os.makedirs(os.path.join(repo.working_tree_dir, 'custom_formats'), - exist_ok=True) - os.makedirs(os.path.join(repo.working_tree_dir, 'quality_profiles'), - exist_ok=True) - - # Create a README file - with open(os.path.join(repo.working_tree_dir, 'README.md'), 'w') as f: - f.write( - "# Profilarr Repository\n\nThis repository contains regex patterns, custom formats and quality profiles." - ) - - repo.git.add(A=True) - repo.index.commit("Initial commit: Basic repository structure") - repo.create_head('main') - repo.heads.main.checkout() - origin = repo.remote(name='origin') - origin.push('main') - origin.push('main:main') - - logger.info( - f"Initialized empty repository with basic structure and pushed to main" - ) diff --git a/backend/app/git/repo/unlink.py b/backend/app/git/repo/unlink.py deleted file mode 100644 index a2a3395..0000000 --- a/backend/app/git/repo/unlink.py +++ /dev/null @@ -1,74 +0,0 @@ -# git/repo/unlink.py -import os -import shutil -import logging -from ...db import save_settings -from ...arr.manager import check_active_sync_configs - -logger = logging.getLogger(__name__) - - -def unlink_repository(repo_path, remove_files=False): - try: - # Check for active sync configurations first - has_active_configs, configs = check_active_sync_configs() - if has_active_configs: - error_msg = ( - "Cannot unlink repository while automatic sync configurations are active.\n" - "The following configurations must be set to manual sync first:\n" - ) - for config in configs: - error_msg += f"- {config['name']} (ID: {config['id']}, {config['sync_method']} sync)\n" - - logger.error(error_msg) - return False, { - "error": error_msg, - "code": "ACTIVE_SYNC_CONFIGS", - "configs": configs - } - - logger.info( - f"Starting unlink_repository with repo_path: {repo_path} and remove_files: {remove_files}" - ) - - # Check if repo_path exists - if not os.path.exists(repo_path): - logger.error(f"Path {repo_path} does not exist.") - return False, f"Path {repo_path} does not exist." - - # Remove the .git folder and optionally the repo files - if remove_files: - logger.info(f"Removing all files in the repository at {repo_path}") - for root, dirs, files in os.walk(repo_path): - for file in files: - os.remove(os.path.join(root, file)) - for dir in dirs: - shutil.rmtree(os.path.join(root, dir)) - logger.info( - f"Successfully removed all files in the repository at {repo_path}" - ) - - # Recreate necessary folders - required_dirs = ['custom_formats', 'profiles', 'regex_patterns'] - for dir_name in required_dirs: - os.makedirs(os.path.join(repo_path, dir_name), exist_ok=True) - logger.info( - f"Recreated the directory {dir_name} at {repo_path}") - else: - git_folder = os.path.join(repo_path, '.git') - if os.path.exists(git_folder): - logger.info(f"Removing .git folder at {git_folder}") - shutil.rmtree(git_folder) - logger.info( - f"Successfully removed .git folder at {git_folder}") - else: - logger.warning(f".git folder does not exist at {git_folder}") - - # Clear git settings - save_settings({'gitRepo': None}) - logger.info("Updated settings to remove git information") - - return True, "Repository successfully unlinked" - except Exception as e: - logger.error(f"Error unlinking repository: {str(e)}", exc_info=True) - return False, f"Error unlinking repository: {str(e)}" diff --git a/backend/app/importer/compiler.py b/backend/app/importer/compiler.py deleted file mode 100644 index 3e9c7f3..0000000 --- a/backend/app/importer/compiler.py +++ /dev/null @@ -1,352 +0,0 @@ -"""Compilation functions to transform YAML data to Arr API format.""" -import logging -from typing import Dict, List, Any, Optional -from .mappings import TargetApp, ValueResolver -from .utils import load_regex_patterns -from ..db.queries.format_renames import is_format_in_renames -from ..db.queries.settings import get_language_import_score -from .logger import get_import_logger - -logger = logging.getLogger(__name__) - -# Cache patterns at module level to avoid reloading -_CACHED_PATTERNS = None - -def get_cached_patterns(): - """Get cached regex patterns, loading them once on first access.""" - global _CACHED_PATTERNS - if _CACHED_PATTERNS is None: - _CACHED_PATTERNS = load_regex_patterns() - return _CACHED_PATTERNS - - -def compile_format_to_api_structure( - format_yaml: Dict[str, Any], - arr_type: str -) -> Dict[str, Any]: - """ - Compile a format from YAML to Arr API structure. - - Args: - format_yaml: Format data from YAML file - arr_type: 'radarr' or 'sonarr' - - Returns: - Compiled format ready for API - """ - target_app = TargetApp.RADARR if arr_type.lower() == 'radarr' else TargetApp.SONARR - patterns = get_cached_patterns() - - compiled = { - 'name': format_yaml.get('name', 'Unknown') - } - - # Check if format should be included in renames - if is_format_in_renames(format_yaml.get('name', '')): - compiled['includeCustomFormatWhenRenaming'] = True - - # Compile specifications from conditions - specifications = [] - for condition in format_yaml.get('conditions', []): - spec = _compile_condition(condition, patterns, target_app) - if spec: - specifications.append(spec) - - compiled['specifications'] = specifications - return compiled - - -def _compile_condition( - condition: Dict[str, Any], - patterns: Dict[str, str], - target_app: TargetApp -) -> Optional[Dict[str, Any]]: - """Compile a single condition to specification.""" - condition_type = condition.get('type') - - spec = { - 'name': condition.get('name', ''), - 'negate': condition.get('negate', False), - 'required': condition.get('required', False), - 'fields': [] - } - - if condition_type in ['release_title', 'release_group', 'edition']: - pattern_name = condition.get('pattern') - pattern = patterns.get(pattern_name) - if not pattern: - import_logger = get_import_logger() - import_logger.warning(f"Pattern not found: {pattern_name}") - return None - - spec['implementation'] = { - 'release_title': 'ReleaseTitleSpecification', - 'release_group': 'ReleaseGroupSpecification', - 'edition': 'EditionSpecification' - }[condition_type] - spec['fields'] = [{'name': 'value', 'value': pattern}] - - elif condition_type == 'source': - spec['implementation'] = 'SourceSpecification' - value = ValueResolver.get_source(condition.get('source'), target_app) - spec['fields'] = [{'name': 'value', 'value': value}] - - elif condition_type == 'resolution': - spec['implementation'] = 'ResolutionSpecification' - value = ValueResolver.get_resolution(condition.get('resolution')) - spec['fields'] = [{'name': 'value', 'value': value}] - - elif condition_type == 'indexer_flag': - spec['implementation'] = 'IndexerFlagSpecification' - value = ValueResolver.get_indexer_flag(condition.get('flag', ''), target_app) - spec['fields'] = [{'name': 'value', 'value': value}] - - elif condition_type == 'quality_modifier': - if target_app == TargetApp.SONARR: - return None - spec['implementation'] = 'QualityModifierSpecification' - value = ValueResolver.get_quality_modifier(condition.get('qualityModifier')) - spec['fields'] = [{'name': 'value', 'value': value}] - - elif condition_type == 'size': - spec['implementation'] = 'SizeSpecification' - spec['fields'] = [ - {'name': 'min', 'value': condition.get('minSize', 0)}, - {'name': 'max', 'value': condition.get('maxSize', 0)} - ] - - elif condition_type == 'language': - spec['implementation'] = 'LanguageSpecification' - language_name = condition.get('language', '').lower() - try: - language_data = ValueResolver.get_language(language_name, target_app, for_profile=False) - fields = [{'name': 'value', 'value': language_data['id']}] - - # Handle exceptLanguage field if present - if 'exceptLanguage' in condition: - except_value = condition['exceptLanguage'] - fields.append({ - 'name': 'exceptLanguage', - 'value': except_value - }) - - spec['fields'] = fields - except Exception: - import_logger = get_import_logger() - import_logger.warning(f"Language not found: {language_name}") - return None - - elif condition_type == 'release_type': - # Only supported in Sonarr - if target_app == TargetApp.RADARR: - return None - spec['implementation'] = 'ReleaseTypeSpecification' - value = ValueResolver.get_release_type(condition.get('releaseType')) - spec['fields'] = [{'name': 'value', 'value': value}] - - elif condition_type == 'year': - spec['implementation'] = 'YearSpecification' - spec['fields'] = [ - {'name': 'min', 'value': condition.get('minYear', 0)}, - {'name': 'max', 'value': condition.get('maxYear', 0)} - ] - - else: - import_logger = get_import_logger() - import_logger.warning(f"Unknown condition type: {condition_type}") - return None - - return spec - - -def compile_profile_to_api_structure( - profile_yaml: Dict[str, Any], - arr_type: str -) -> Dict[str, Any]: - """ - Compile a profile from YAML to Arr API structure. - - Args: - profile_yaml: Profile data from YAML file - arr_type: 'radarr' or 'sonarr' - - Returns: - Compiled profile ready for API - """ - target_app = TargetApp.RADARR if arr_type.lower() == 'radarr' else TargetApp.SONARR - quality_mappings = ValueResolver.get_qualities(target_app) - - compiled = { - 'name': profile_yaml.get('name', 'Unknown') - } - - # Build quality items - following the structure from the working compile/profile_compiler.py - items = [] - cutoff_id = None - used_qualities = set() - quality_ids_in_groups = set() - - # Convert group IDs (negative to positive with offset) - def convert_group_id(group_id: int) -> int: - if group_id < 0: - return 1000 + abs(group_id) - return group_id - - # First pass: gather quality IDs in groups to avoid duplicates - for quality_entry in profile_yaml.get('qualities', []): - if isinstance(quality_entry, dict) and quality_entry.get('id', 0) < 0: - # It's a group - for q in quality_entry.get('qualities', []): - if isinstance(q, dict): - q_name = q.get('name', '') - mapped_name = ValueResolver.get_quality_name(q_name, target_app) - if mapped_name in quality_mappings: - quality_ids_in_groups.add(quality_mappings[mapped_name]['id']) - - # Second pass: add groups and individual qualities - for quality_entry in profile_yaml.get('qualities', []): - if isinstance(quality_entry, dict): - if quality_entry.get('id', 0) < 0: - # It's a group - group_id = convert_group_id(quality_entry.get('id', 0)) - group_item = { - 'id': group_id, - 'name': quality_entry.get('name', 'Group'), - 'items': [], - 'allowed': True - } - - for q in quality_entry.get('qualities', []): - if isinstance(q, dict): - q_name = q.get('name', '') - mapped_name = ValueResolver.get_quality_name(q_name, target_app) - if mapped_name in quality_mappings: - group_item['items'].append({ - 'quality': quality_mappings[mapped_name].copy(), - 'items': [], - 'allowed': True - }) - used_qualities.add(mapped_name.upper()) - - if group_item['items']: - items.append(group_item) - else: - # Individual quality - q_name = quality_entry.get('name', '') - mapped_name = ValueResolver.get_quality_name(q_name, target_app) - if mapped_name in quality_mappings: - items.append({ - 'quality': quality_mappings[mapped_name].copy(), - 'items': [], - 'allowed': True - }) - used_qualities.add(mapped_name.upper()) - elif isinstance(quality_entry, str): - # Simple quality name string - mapped_name = ValueResolver.get_quality_name(quality_entry, target_app) - if mapped_name in quality_mappings: - items.append({ - 'quality': quality_mappings[mapped_name].copy(), - 'items': [], - 'allowed': True - }) - used_qualities.add(mapped_name.upper()) - - # Add all unused qualities as disabled - for quality_name, quality_data in quality_mappings.items(): - if (quality_name.upper() not in used_qualities and - quality_data['id'] not in quality_ids_in_groups): - items.append({ - 'quality': quality_data.copy(), - 'items': [], - 'allowed': False - }) - - # Handle cutoff/upgrade_until - if 'upgrade_until' in profile_yaml and isinstance(profile_yaml['upgrade_until'], dict): - cutoff_id_raw = profile_yaml['upgrade_until'].get('id') - cutoff_name = profile_yaml['upgrade_until'].get('name', '') - mapped_cutoff_name = ValueResolver.get_quality_name(cutoff_name, target_app) - - if cutoff_id_raw and cutoff_id_raw < 0: - cutoff_id = convert_group_id(cutoff_id_raw) - elif mapped_cutoff_name in quality_mappings: - cutoff_id = quality_mappings[mapped_cutoff_name]['id'] - - # Handle language - language = profile_yaml.get('language', 'any') - if language != 'any' and '_' not in language: - # Simple language mode - try: - language_data = ValueResolver.get_language(language, target_app, for_profile=True) - except Exception: - language_data = ValueResolver.get_language('any', target_app, for_profile=True) - else: - # Advanced mode or any - language_data = ValueResolver.get_language('any', target_app, for_profile=True) - - # Build format items (without IDs, those get synced later) - format_items = [] - - # Add language-specific formats for advanced mode - if language != 'any' and '_' in language: - behavior, language_code = language.split('_', 1) - - # Get the score from database instead of hardcoding - language_score = get_language_import_score() - - # Use proper capitalization for the language name - lang_display = language_code.capitalize() - - # Handle behaviors: 'must' and 'only' (matching old working logic) - if behavior in ['must', 'only']: - # Add "Not [Language]" format with score from database - not_language_name = f"Not {lang_display}" - format_items.append({ - 'name': not_language_name, - 'score': language_score - }) - - # For 'only' behavior, add additional formats - if behavior == 'only': - format_items.append({ - 'name': f"Not Only {lang_display}", - 'score': language_score - }) - format_items.append({ - 'name': f"Not Only {lang_display} (Missing)", - 'score': language_score - }) - - # Main custom formats - for cf in profile_yaml.get('custom_formats', []): - format_items.append({ - 'name': cf.get('name'), - 'score': cf.get('score', 0) - }) - - # App-specific custom formats - app_key = f'custom_formats_{arr_type.lower()}' - for cf in profile_yaml.get(app_key, []): - format_items.append({ - 'name': cf.get('name'), - 'score': cf.get('score', 0) - }) - - # Reverse items to match expected order - items.reverse() - - compiled['items'] = items - compiled['language'] = language_data - compiled['upgradeAllowed'] = profile_yaml.get('upgradesAllowed', True) - compiled['minFormatScore'] = profile_yaml.get('minCustomFormatScore', 0) - compiled['cutoffFormatScore'] = profile_yaml.get('upgradeUntilScore', 0) - compiled['formatItems'] = format_items - - if cutoff_id is not None: - compiled['cutoff'] = cutoff_id - - # Handle minUpgradeFormatScore with proper default - compiled['minUpgradeFormatScore'] = max(1, profile_yaml.get('minScoreIncrement', 1)) - - return compiled \ No newline at end of file diff --git a/backend/app/importer/strategies/format.py b/backend/app/importer/strategies/format.py deleted file mode 100644 index 76ad96a..0000000 --- a/backend/app/importer/strategies/format.py +++ /dev/null @@ -1,132 +0,0 @@ -"""Format import strategy.""" -import logging -from typing import Dict, List, Any -from .base import ImportStrategy -from ..utils import load_yaml -from ..compiler import compile_format_to_api_structure -from ..logger import get_import_logger - -logger = logging.getLogger(__name__) - - -class FormatStrategy(ImportStrategy): - """Strategy for importing custom formats.""" - - def compile(self, filenames: List[str]) -> Dict[str, Any]: - """ - Compile format files to API-ready format. - - Args: - filenames: List of format filenames (without .yml) - - Returns: - Dictionary with 'formats' key containing compiled formats - """ - formats = [] - failed = [] - import_logger = get_import_logger() - - # Don't try to predict - we'll count as we go - import_logger.start(0, 0) # Will update counts as we compile - - for filename in filenames: - try: - # Load YAML - format_yaml = load_yaml(f"custom_format/{filename}.yml") - - # Compile to API structure - compiled = compile_format_to_api_structure(format_yaml, self.arr_type) - - # Add unique suffix if needed - if self.import_as_unique: - compiled['name'] = self.add_unique_suffix(compiled['name']) - - formats.append(compiled) - import_logger.update_compilation(filename) - - except Exception as e: - import_logger.error(f"{e}", filename, 'compilation') - failed.append(filename) - # Don't count failed compilations - - # Set final compilation count - import_logger.total_compilation = len(formats) - import_logger.current_compilation = len(formats) - import_logger.compilation_complete() - - return {'formats': formats} - - def import_data(self, compiled_data: Dict[str, Any], dry_run: bool = False) -> Dict[str, Any]: - """ - Import compiled formats to Arr instance. - - Args: - compiled_data: Dictionary with 'formats' key - dry_run: If True, simulate import without making changes - - Returns: - Import results - """ - # Get existing formats - existing = self.arr.get_all_formats() - existing_map = {f['name']: f['id'] for f in existing} - - results = { - 'added': 0, - 'updated': 0, - 'failed': 0, - 'details': [] - } - - import_logger = get_import_logger() - - # Set import count - import_logger.total_import = len(compiled_data['formats']) - import_logger._import_shown = False # Reset import shown flag - - for format_data in compiled_data['formats']: - format_name = format_data['name'] - - try: - if format_name in existing_map: - # Update existing - if not dry_run: - format_data['id'] = existing_map[format_name] - self.arr.put( - f"/api/v3/customformat/{existing_map[format_name]}", - format_data - ) - - import_logger.update_import(format_name, "updated") - results['updated'] += 1 - results['details'].append({ - 'name': format_name, - 'action': 'updated' - }) - else: - # Add new - if not dry_run: - self.arr.post("/api/v3/customformat", format_data) - - import_logger.update_import(format_name, "added") - results['added'] += 1 - results['details'].append({ - 'name': format_name, - 'action': 'added' - }) - - except Exception as e: - import_logger.update_import(format_name, "failed") - import_logger.error(f"Failed to import format {format_name}: {e}", format_name) - results['failed'] += 1 - results['details'].append({ - 'name': format_name, - 'action': 'failed', - 'error': str(e) - }) - - # Show import summary - import_logger.import_complete() - import_logger._import_shown = True - - return results \ No newline at end of file diff --git a/backend/app/importer/strategies/profile.py b/backend/app/importer/strategies/profile.py deleted file mode 100644 index 6a2dbc8..0000000 --- a/backend/app/importer/strategies/profile.py +++ /dev/null @@ -1,262 +0,0 @@ -"""Profile import strategy.""" -import logging -from typing import Dict, List, Any, Set -from .base import ImportStrategy -from ..utils import load_yaml, extract_format_names, generate_language_formats -from ..compiler import compile_format_to_api_structure, compile_profile_to_api_structure -from ..logger import get_import_logger - -logger = logging.getLogger(__name__) - - -class ProfileStrategy(ImportStrategy): - """Strategy for importing quality profiles.""" - - def compile(self, filenames: List[str]) -> Dict[str, Any]: - """ - Compile profile files and their dependent formats to API-ready format. - - Args: - filenames: List of profile filenames (without .yml) - - Returns: - Dictionary with 'profiles' and 'formats' keys - """ - profiles = [] - all_formats = [] - processed_formats: Set[str] = set() - # Cache for language formats to avoid recompiling - language_formats_cache: Dict[str, List[Dict]] = {} - - import_logger = get_import_logger() - - # Don't try to predict - we'll count as we go - import_logger.start(0, 0) # Will update counts as we compile - - for filename in filenames: - try: - # Load profile YAML - profile_yaml = load_yaml(f"profile/{filename}.yml") - - # Extract referenced custom formats (only for the target arr type) - format_names = extract_format_names(profile_yaml, self.arr_type) - - for format_name in format_names: - # Skip if already processed - display_name = self.add_unique_suffix(format_name) if self.import_as_unique else format_name - if display_name in processed_formats: - continue - - try: - format_yaml = load_yaml(f"custom_format/{format_name}.yml") - compiled_format = compile_format_to_api_structure(format_yaml, self.arr_type) - - if self.import_as_unique: - compiled_format['name'] = self.add_unique_suffix(compiled_format['name']) - - all_formats.append(compiled_format) - processed_formats.add(compiled_format['name']) - import_logger.update_compilation(format_name) - - except Exception as e: - # Count the failed attempt - import_logger.update_compilation(f"{format_name} (failed)") - - # Generate language formats if needed - language = profile_yaml.get('language', 'any') - if language != 'any' and '_' in language: - # Check cache first - if language not in language_formats_cache: - language_formats = generate_language_formats(language, self.arr_type) - compiled_langs = [] - - for lang_format in language_formats: - lang_name = lang_format.get('name', 'Language format') - compiled_lang = compile_format_to_api_structure(lang_format, self.arr_type) - - if self.import_as_unique: - compiled_lang['name'] = self.add_unique_suffix(compiled_lang['name']) - - compiled_langs.append(compiled_lang) - - # Add to all_formats only on first compilation - if compiled_lang['name'] not in processed_formats: - all_formats.append(compiled_lang) - processed_formats.add(compiled_lang['name']) - import_logger.update_compilation(lang_name) - - # Store in cache - language_formats_cache[language] = compiled_langs - - # Compile profile - compiled_profile = compile_profile_to_api_structure(profile_yaml, self.arr_type) - - if self.import_as_unique: - compiled_profile['name'] = self.add_unique_suffix(compiled_profile['name']) - - # Update format references in profile - for item in compiled_profile.get('formatItems', []): - item['name'] = self.add_unique_suffix(item['name']) - - profiles.append(compiled_profile) - import_logger.update_compilation(f"Profile: {compiled_profile['name']}") - - except Exception as e: - import_logger.error(f"{str(e)}", f"Profile: {filename}", 'compilation') - import_logger.update_compilation(f"Profile: {filename} (failed)") - - # Set total to what we actually attempted - import_logger.total_compilation = import_logger.current_compilation - import_logger.compilation_complete() - - return { - 'profiles': profiles, - 'formats': all_formats - } - - def import_data(self, compiled_data: Dict[str, Any], dry_run: bool = False) -> Dict[str, Any]: - """ - Import compiled profiles and formats to Arr instance. - - Args: - compiled_data: Dictionary with 'profiles' and 'formats' keys - dry_run: If True, simulate import without making changes - - Returns: - Import results - """ - results = { - 'added': 0, - 'updated': 0, - 'failed': 0, - 'details': [] - } - - import_logger = get_import_logger() - - # Set total import count - import_logger.total_import = len(compiled_data['formats']) + len(compiled_data['profiles']) - import_logger._import_shown = False # Reset import shown flag - - # Import formats first - if compiled_data['formats']: - existing_formats = self.arr.get_all_formats() - format_map = {f['name']: f['id'] for f in existing_formats} - - formats_failed = [] - - for format_data in compiled_data['formats']: - format_name = format_data['name'] - - try: - if format_name in format_map: - # Update existing - if not dry_run: - format_data['id'] = format_map[format_name] - self.arr.put( - f"/api/v3/customformat/{format_map[format_name]}", - format_data - ) - import_logger.update_import(format_name, "updated") - else: - # Add new - if dry_run: - # In dry run, pretend we got an ID - # Use a predictable fake ID for dry run - fake_id = 999000 + len(format_map) - format_map[format_name] = fake_id - else: - response = self.arr.post("/api/v3/customformat", format_data) - format_map[format_name] = response['id'] - import_logger.update_import(format_name, "added") - - except Exception as e: - import_logger.update_import(format_name, "failed") - import_logger.error(f"Failed to import format {format_name}: {e}", format_name) - formats_failed.append(format_name) - - # Refresh format map for profile syncing (MUST be done after importing formats) - if not dry_run: - # In real mode, get the actual current formats from the server - existing_formats = self.arr.get_all_formats() - format_map = {f['name']: f['id'] for f in existing_formats} - # In dry run mode, format_map already has fake IDs from above - - # Sync format IDs in profiles - for profile in compiled_data['profiles']: - synced_items = [] - processed_formats = set() - - # First add all explicitly defined formats with their scores - for item in profile.get('formatItems', []): - if item['name'] in format_map: - synced_items.append({ - 'format': format_map[item['name']], - 'name': item['name'], - 'score': item.get('score', 0) - }) - processed_formats.add(item['name']) - else: - import_logger.warning(f"Format {item['name']} not found for profile {profile['name']}") - - # Then add ALL other existing formats with score 0 (Arr requirement) - for format_name, format_id in format_map.items(): - if format_name not in processed_formats: - synced_items.append({ - 'format': format_id, - 'name': format_name, - 'score': 0 - }) - - profile['formatItems'] = synced_items - - # Import profiles - existing_profiles = self.arr.get_all_profiles() - profile_map = {p['name']: p['id'] for p in existing_profiles} - - for profile_data in compiled_data['profiles']: - profile_name = profile_data['name'] - - try: - if profile_name in profile_map: - # Update existing - if not dry_run: - profile_data['id'] = profile_map[profile_name] - self.arr.put( - f"/api/v3/qualityprofile/{profile_data['id']}", - profile_data - ) - - import_logger.update_import(f"Profile: {profile_name}", "updated") - results['updated'] += 1 - results['details'].append({ - 'name': profile_name, - 'action': 'updated' - }) - else: - # Add new - if not dry_run: - self.arr.post("/api/v3/qualityprofile", profile_data) - - import_logger.update_import(f"Profile: {profile_name}", "added") - results['added'] += 1 - results['details'].append({ - 'name': profile_name, - 'action': 'added' - }) - - except Exception as e: - import_logger.update_import(f"Profile: {profile_name}", "failed") - import_logger.error(f"Failed to import profile {profile_name}: {e}", profile_name) - results['failed'] += 1 - results['details'].append({ - 'name': profile_name, - 'action': 'failed', - 'error': str(e) - }) - - # Show import summary - import_logger.import_complete() - import_logger._import_shown = True - - return results \ No newline at end of file diff --git a/backend/app/main.py b/backend/app/main.py deleted file mode 100644 index efcb54c..0000000 --- a/backend/app/main.py +++ /dev/null @@ -1,95 +0,0 @@ -# backend/app/main.py - -from flask import Flask, jsonify, send_from_directory -import os -from flask_cors import CORS -from .config import config -from .git import bp as git_bp -from .arr import bp as arr_bp -from .data import bp as data_bp -from .importarr import bp as importarr_bp -from .importer.routes import bp as new_import_bp -from .task import bp as tasks_bp, TaskScheduler -from .backup import bp as backup_bp -from .db import run_migrations, get_settings -from .auth import bp as auth_bp -from .settings import bp as settings_bp -from .logs import bp as logs_bp -from .media_management import media_management_bp -from .middleware import init_middleware -from .init import setup_logging, init_app_config, init_git_user - - -def create_app(): - # Set up logging first - logger = setup_logging() - - logger.info("Creating Flask application") - app = Flask(__name__, static_folder='static') - CORS(app, resources={r"/*": {"origins": "*"}}) - - # Serve static files - @app.route('/', defaults={'path': ''}) - @app.route('/') - def serve_static(path): - if path.startswith('api/'): - return # Let API routes handle these - if path and os.path.exists(os.path.join(app.static_folder, path)): - return send_from_directory(app.static_folder, path) - return send_from_directory(app.static_folder, 'index.html') - - # Initialize directories and database - logger.info("Ensuring required directories exist") - config.ensure_directories() - - logger.info("Initializing database") - run_migrations() - - # Initialize Git user configuration - logger.info("Initializing Git user") - success, message = init_git_user() - if not success: - logger.warning(f"Git user initialization issue: {message}") - else: - logger.info("Git user initialized successfully") - - # Initialize app configuration - init_app_config(app) - - # Initialize and start task scheduler - logger.info("Starting task scheduler") - scheduler = TaskScheduler() - scheduler.load_tasks_from_db() - scheduler.start() - - # Register all blueprints - logger.info("Registering blueprints") - app.register_blueprint(auth_bp, url_prefix='/api/auth') - app.register_blueprint(settings_bp, url_prefix='/api/settings') - app.register_blueprint(backup_bp, url_prefix='/api/backup') - app.register_blueprint(logs_bp, url_prefix='/api/logs') - app.register_blueprint(git_bp, url_prefix='/api/git') - app.register_blueprint(data_bp, url_prefix='/api/data') - app.register_blueprint(importarr_bp, url_prefix='/api/import') - app.register_blueprint(new_import_bp, url_prefix='/api/v2/import') - app.register_blueprint(arr_bp, url_prefix='/api/arr') - app.register_blueprint(tasks_bp, url_prefix='/api/tasks') - app.register_blueprint(media_management_bp) - - # Initialize middleware - logger.info("Initializing middleware") - init_middleware(app) - - # Add settings route - @app.route('/api/settings', methods=['GET']) - def handle_settings(): - settings = get_settings() - return jsonify(settings), 200 - - logger.info("Flask application creation completed") - return app - - -if __name__ == '__main__': - app = create_app() - app.run(debug=True, host='0.0.0.0') diff --git a/frontend/src/api/data.js b/frontend/src/api/data.js deleted file mode 100644 index 526f670..0000000 --- a/frontend/src/api/data.js +++ /dev/null @@ -1,305 +0,0 @@ -import axios from 'axios'; - -const BASE_URL = '/api/data'; - -// Define all special endpoints that could conflict with resource names -const SPECIAL_ENDPOINTS = [ - 'test', - 'validate', - 'search', - 'batch', - 'export', - 'import', - 'stats', - 'metrics', - 'health', - 'status', - 'config', - 'settings', - 'logs', - 'audit', - 'backup', - 'restore', - 'sync', - 'preview', - 'publish', - 'deploy', - 'run', - 'execute', - 'process', - 'analyze', - 'verify', - 'check' -]; - -// Define characters and patterns that could cause routing issues with descriptive messages -const UNSAFE_PATTERNS = [ - {pattern: /[\/\\]/, message: 'Cannot contain forward or backward slashes'}, - { - pattern: /[<>:"|?*]/, - message: 'Cannot contain special characters (<, >, :, ", |, ?, *)' - }, - { - pattern: /^\.+/, - message: 'Cannot start with dots (prevents relative paths)' - }, - {pattern: /\.+$/, message: 'Cannot end with dots'}, - {pattern: /^-/, message: 'Cannot start with a dash'}, - {pattern: /-$/, message: 'Cannot end with a dash'}, - { - pattern: /--|__|\.\./, - message: 'Cannot contain consecutive dashes, underscores, or dots' - } -]; - -// Comprehensive name validation with specific error messages -const validateResourceName = (category, name) => { - // Basic type check - if (!name) { - throw new Error(`${category} name cannot be empty`); - } - - if (typeof name !== 'string') { - throw new Error( - `${category} name must be a string, received ${typeof name}` - ); - } - - // Length check - if (name.length < 1) { - throw new Error(`${category} name must be at least 1 character long`); - } - - if (name.length > 64) { - throw new Error( - `${category} name cannot exceed 64 characters (current length: ${name.length})` - ); - } - - // Check for special endpoints - if (SPECIAL_ENDPOINTS.includes(name.toLowerCase())) { - throw new Error( - `'${name}' is a reserved word and cannot be used as a ${category} name. Reserved words: ${SPECIAL_ENDPOINTS.join( - ', ' - )}` - ); - } - - // Check for unsafe patterns with specific messages - for (const {pattern, message} of UNSAFE_PATTERNS) { - if (pattern.test(name)) { - throw new Error(`Invalid ${category} name '${name}': ${message}`); - } - } - - return true; -}; - -// Validate entire path with specific error messages -const validatePath = parts => { - if (!Array.isArray(parts)) { - throw new Error( - `Path must be an array of segments, received ${typeof parts}` - ); - } - - if (parts.length === 0) { - throw new Error('Path cannot be empty'); - } - - const joinedPath = parts.join('/'); - - // Check for path traversal attempts - if (joinedPath.includes('..')) { - throw new Error( - 'Invalid path: Contains parent directory reference (..)' - ); - } - - if (joinedPath.includes('./')) { - throw new Error( - 'Invalid path: Contains current directory reference (./)' - ); - } - - if (joinedPath.includes('/.')) { - throw new Error( - 'Invalid path: Contains hidden directory reference (/.)' - ); - } - - // Check for invalid start/end - if (joinedPath.startsWith('/')) { - throw new Error('Invalid path: Cannot start with a separator (/)'); - } - - if (joinedPath.endsWith('/')) { - throw new Error('Invalid path: Cannot end with a separator (/)'); - } - - // Validate each path segment - parts.forEach((segment, index) => { - if (typeof segment !== 'string') { - throw new Error( - `Path segment at position ${index} must be a string, received ${typeof segment}` - ); - } - - if (segment.length === 0) { - throw new Error( - `Path segment at position ${index} cannot be empty` - ); - } - }); - - return true; -}; - -// Enhanced error handler with more specific messages -const handleError = (error, operation) => { - console.error(`Error during ${operation}:`, error); - - if (error instanceof Error) { - throw error; - } - - if (error.response?.data) { - const errorData = error.response.data; - const message = - errorData.error || - errorData.message || - errorData.detail || - (typeof errorData === 'string' ? errorData : null); - - if (message) { - throw new Error(`${operation} failed: ${message}`); - } - } - - // Include HTTP status in generic error if available - if (error.response?.status) { - throw new Error( - `Failed to ${operation} (HTTP ${error.response.status})` - ); - } - - throw new Error(`Failed to ${operation}: Unknown error occurred`); -}; - -// Get all items for a category -export const getAllItems = async category => { - try { - validateResourceName('category', category); - validatePath([category]); - const response = await axios.get(`${BASE_URL}/${category}`); - return response.data; - } catch (error) { - throw handleError(error, `fetch ${category} items`); - } -}; - -// Get single item -export const getItem = async (category, name) => { - try { - validateResourceName('category', category); - validateResourceName(category, name); - validatePath([category, name]); - const response = await axios.get(`${BASE_URL}/${category}/${name}`); - return response.data; - } catch (error) { - throw handleError(error, `fetch ${category} item ${name}`); - } -}; - -// Create new item -export const createItem = async (category, data) => { - try { - validateResourceName('category', category); - validateResourceName(category, data.name); - validatePath([category, data.name]); - const response = await axios.post( - `${BASE_URL}/${category}/${data.name}`, - data - ); - return response.data; - } catch (error) { - throw handleError(error, `create ${category} item`); - } -}; - -// Update existing item -export const updateItem = async (category, name, data, newName) => { - try { - validateResourceName('category', category); - validateResourceName(category, name); - if (newName) { - validateResourceName(category, newName); - validatePath([category, newName]); - } - validatePath([category, name]); - const response = await axios.put(`${BASE_URL}/${category}/${name}`, { - ...data, - ...(newName && {rename: newName}) - }); - return response.data; - } catch (error) { - throw handleError(error, `update ${category} item ${name}`); - } -}; - -// Delete item -export const deleteItem = async (category, name) => { - try { - validateResourceName('category', category); - validateResourceName(category, name); - validatePath([category, name]); - const response = await axios.delete(`${BASE_URL}/${category}/${name}`); - return response.data; - } catch (error) { - throw handleError(error, `delete ${category} item ${name}`); - } -}; - -// Special endpoint wrapper -const createSpecialEndpoint = (category, endpoint) => async data => { - try { - validateResourceName('category', category); - validatePath([category, endpoint]); - console.log(`Sending data to ${endpoint}:`, data); - const response = await axios.post( - `${BASE_URL}/${category}/${endpoint}`, - data - ); - return response.data; - } catch (error) { - throw handleError(error, `execute ${category} ${endpoint}`); - } -}; - -export const Profiles = { - getAll: () => getAllItems('profile'), - get: name => getItem('profile', name), - create: data => createItem('profile', data), - update: (name, data, newName) => updateItem('profile', name, data, newName), - delete: name => deleteItem('profile', name) -}; - -export const CustomFormats = { - getAll: () => getAllItems('custom_format'), - get: name => getItem('custom_format', name), - create: data => createItem('custom_format', data), - update: (name, data, newName) => - updateItem('custom_format', name, data, newName), - delete: name => deleteItem('custom_format', name), - runTests: createSpecialEndpoint('custom_format', 'test') -}; - -export const RegexPatterns = { - getAll: () => getAllItems('regex_pattern'), - get: name => getItem('regex_pattern', name), - create: data => createItem('regex_pattern', data), - update: (name, data, newName) => - updateItem('regex_pattern', name, data, newName), - delete: name => deleteItem('regex_pattern', name), - runTests: createSpecialEndpoint('regex_pattern', 'test') -}; diff --git a/frontend/src/components/format/FormatCard.jsx b/frontend/src/components/format/FormatCard.jsx deleted file mode 100644 index 506cb43..0000000 --- a/frontend/src/components/format/FormatCard.jsx +++ /dev/null @@ -1,280 +0,0 @@ -import React, {useState} from 'react'; -import PropTypes from 'prop-types'; -import {Copy, Check, FlaskConical, FileText, ListFilter} from 'lucide-react'; -import Tooltip from '@ui/Tooltip'; -import ReactMarkdown from 'react-markdown'; - -function FormatCard({ - format, - onEdit, - onClone, - sortBy, - isSelectionMode, - isSelected, - willBeSelected, - onSelect -}) { - const [showDescription, setShowDescription] = useState(() => { - const saved = localStorage.getItem(`format-view-${format.file_name}`); - return saved !== null ? JSON.parse(saved) : true; - }); - const {content} = format; - const totalTests = content.tests?.length || 0; - const passedTests = content.tests?.filter(t => t.passes)?.length || 0; - const passRate = Math.round((passedTests / totalTests) * 100) || 0; - - const getConditionStyle = condition => { - if (condition.negate) { - return 'bg-red-500/20 text-red-400 border border-red-500/20'; - } - if (condition.required) { - return 'bg-green-500/20 text-green-400 border border-green-500/20'; - } - return 'bg-blue-500/20 text-blue-400 border border-blue-500/20'; - }; - - const handleClick = e => { - if (isSelectionMode) { - onSelect(e); - } else { - onEdit(); - } - }; - - const handleCloneClick = e => { - e.stopPropagation(); - onClone(format); - }; - - const handleViewToggle = e => { - e.stopPropagation(); - setShowDescription(prev => { - const newState = !prev; - localStorage.setItem( - `format-view-${format.file_name}`, - JSON.stringify(newState) - ); - return newState; - }); - }; - - const handleMouseDown = e => { - if (e.shiftKey) { - e.preventDefault(); - } - }; - - return ( -
-
- {/* Header Section */} -
-
-

- {content.name} -

-
-
- {content.tags?.map(tag => ( - - {tag} - - ))} -
-
-
- -
- - - - {!isSelectionMode && ( - - )} - {isSelectionMode && ( - -
- {isSelected && ( - - )} - {willBeSelected && !isSelected && ( -
- )} -
- - )} -
-
- -
- - {/* Content Area with Slide Animation */} -
-
- {/* Conditions */} -
-
- {content.conditions?.map((condition, index) => ( - - {condition.name} - - ))} -
-
-
-
- {/* Description */} -
- {content.description ? ( -
- - {content.description} - -
- ) : ( - - No description provided - - )} -
-
-
- -
- - {/* Footer - Tests */} -
- {totalTests > 0 ? ( -
= 80 - ? 'bg-yellow-500/10 text-yellow-400' - : 'bg-red-500/10 text-red-400' - }`}> - - - {passedTests}/{totalTests} passing - -
- ) : ( -
- - No tests -
- )} - {sortBy === 'dateModified' && format.modified_date && ( - - Modified:{' '} - {new Date(format.modified_date).toLocaleString()} - - )} -
-
-
- ); -} - -FormatCard.propTypes = { - format: PropTypes.shape({ - file_name: PropTypes.string.isRequired, - modified_date: PropTypes.string.isRequired, - content: PropTypes.shape({ - name: PropTypes.string.isRequired, - description: PropTypes.string, - conditions: PropTypes.arrayOf( - PropTypes.shape({ - name: PropTypes.string.isRequired, - type: PropTypes.string.isRequired, - pattern: PropTypes.string, - required: PropTypes.bool, - negate: PropTypes.bool - }) - ), - tags: PropTypes.arrayOf(PropTypes.string), - tests: PropTypes.arrayOf( - PropTypes.shape({ - id: PropTypes.number.isRequired, - input: PropTypes.string.isRequired, - expected: PropTypes.bool.isRequired, - passes: PropTypes.bool.isRequired - }) - ) - }).isRequired - }).isRequired, - onEdit: PropTypes.func.isRequired, - onClone: PropTypes.func.isRequired, - sortBy: PropTypes.string.isRequired, - isSelectionMode: PropTypes.bool.isRequired, - isSelected: PropTypes.bool.isRequired, - willBeSelected: PropTypes.bool, - onSelect: PropTypes.func.isRequired -}; - -export default FormatCard; diff --git a/frontend/src/components/regex/AddUnitTestModal.jsx b/frontend/src/components/regex/AddUnitTestModal.jsx deleted file mode 100644 index 4efe4cd..0000000 --- a/frontend/src/components/regex/AddUnitTestModal.jsx +++ /dev/null @@ -1,138 +0,0 @@ -// AddUnitTestModal.jsx -import React, {useState, useEffect} from 'react'; -import PropTypes from 'prop-types'; -import Modal from '../ui/Modal'; - -const AddUnitTestModal = ({isOpen, onClose, onAdd, tests, editTest = null}) => { - const [input, setInput] = useState(''); - const [shouldMatch, setShouldMatch] = useState(true); - - // Reset form when opening modal, handling both new and edit cases - useEffect(() => { - if (isOpen) { - if (editTest) { - setInput(editTest.input); - setShouldMatch(editTest.expected); - } else { - setInput(''); - setShouldMatch(true); - } - } - }, [isOpen, editTest]); - - const handleSubmit = () => { - const getNextTestId = testArray => { - if (!testArray || testArray.length === 0) return 1; - return Math.max(...testArray.map(test => test.id)) + 1; - }; - - const testData = { - id: editTest ? editTest.id : getNextTestId(tests), - input, - expected: shouldMatch, - passes: false, - lastRun: null - }; - - onAdd(testData); - handleClose(); - }; - - const handleClose = () => { - setInput(''); - setShouldMatch(true); - onClose(); - }; - - return ( - - - -
- }> - {/* Rest of the modal content remains the same */} -
-
- - setInput(e.target.value)} - className='w-full px-3 py-2 border border-gray-300 dark:border-gray-600 - rounded-md bg-white dark:bg-gray-700 text-gray-900 dark:text-gray-100 - placeholder-gray-500 dark:placeholder-gray-400 - focus:outline-none focus:ring-2 focus:ring-blue-500 focus:border-transparent' - placeholder='Enter string to test against pattern...' - autoFocus - /> -
- -
- -
- - -
-
-
- - ); -}; - -AddUnitTestModal.propTypes = { - isOpen: PropTypes.bool.isRequired, - onClose: PropTypes.func.isRequired, - onAdd: PropTypes.func.isRequired, - tests: PropTypes.array.isRequired, - editTest: PropTypes.shape({ - id: PropTypes.number.isRequired, - input: PropTypes.string.isRequired, - expected: PropTypes.bool.isRequired, - passes: PropTypes.bool.isRequired, - lastRun: PropTypes.string - }) -}; - -export default AddUnitTestModal; diff --git a/frontend/src/components/regex/RegexCard.jsx b/frontend/src/components/regex/RegexCard.jsx deleted file mode 100644 index 1fde0ac..0000000 --- a/frontend/src/components/regex/RegexCard.jsx +++ /dev/null @@ -1,215 +0,0 @@ -import React from 'react'; -import PropTypes from 'prop-types'; -import {Copy, Check, FlaskConical} from 'lucide-react'; -import Tooltip from '@ui/Tooltip'; -import ReactMarkdown from 'react-markdown'; - -const RegexCard = ({ - pattern, - onEdit, - onClone, - formatDate, - sortBy, - isSelectionMode, - isSelected, - willBeSelected, - onSelect -}) => { - const totalTests = pattern.tests?.length || 0; - const passedTests = pattern.tests?.filter(t => t.passes)?.length || 0; - const passRate = - totalTests > 0 ? Math.round((passedTests / totalTests) * 100) : 0; - - const handleClick = e => { - if (isSelectionMode) { - onSelect(e); - } else { - onEdit(); - } - }; - - const handleCloneClick = e => { - e.stopPropagation(); - onClone(pattern); - }; - - const handleMouseDown = e => { - if (e.shiftKey) { - e.preventDefault(); - } - }; - - const getTestColor = () => { - if (totalTests === 0) return 'text-gray-400'; - if (passRate === 100) return 'text-green-400'; - if (passRate >= 80) return 'text-yellow-400'; - return 'text-red-400'; - }; - - return ( -
-
- {/* Header Section */} -
-
-
-

- {pattern.name} -

- {pattern.tags && pattern.tags.length > 0 && ( -
- {pattern.tags.map(tag => ( - - {tag} - - ))} -
- )} -
-
-
- {isSelectionMode ? ( - -
- {isSelected && ( - - )} - {willBeSelected && !isSelected && ( -
- )} -
- - ) : ( - - )} -
-
-
- - {/* Pattern Display */} -
- - {pattern.pattern} - -
-
- -
- - {/* Description and Footer Section */} -
- {pattern.description && ( -
- {pattern.description} -
- )} -
- -
- -
-
- {totalTests > 0 ? ( -
= 80 - ? 'bg-yellow-500/10 text-yellow-400' - : 'bg-red-500/10 text-red-400' - }`}> - - - {passedTests}/{totalTests} passing - -
- ) : ( -
- - - No tests - -
- )} -
- - {sortBy === 'dateModified' && pattern.modified_date && ( - - Modified {formatDate(pattern.modified_date)} - - )} -
-
-
- ); -}; - -RegexCard.propTypes = { - pattern: PropTypes.shape({ - name: PropTypes.string.isRequired, - pattern: PropTypes.string.isRequired, - description: PropTypes.string, - tags: PropTypes.arrayOf(PropTypes.string), - tests: PropTypes.arrayOf( - PropTypes.shape({ - input: PropTypes.string.isRequired, - expected: PropTypes.bool.isRequired, - passes: PropTypes.bool.isRequired - }) - ), - modified_date: PropTypes.string - }).isRequired, - onEdit: PropTypes.func.isRequired, - onClone: PropTypes.func.isRequired, - formatDate: PropTypes.func.isRequired, - sortBy: PropTypes.string.isRequired, - isSelectionMode: PropTypes.bool.isRequired, - isSelected: PropTypes.bool.isRequired, - willBeSelected: PropTypes.bool, - onSelect: PropTypes.func.isRequired -}; - -export default RegexCard; diff --git a/frontend/src/components/regex/RegexGeneralTab.jsx b/frontend/src/components/regex/RegexGeneralTab.jsx deleted file mode 100644 index b18db9f..0000000 --- a/frontend/src/components/regex/RegexGeneralTab.jsx +++ /dev/null @@ -1,215 +0,0 @@ -import React, {useState} from 'react'; -import PropTypes from 'prop-types'; -import MarkdownEditor from '@ui/MarkdownEditor'; -import AddButton from '@ui/DataBar/AddButton'; -import {InfoIcon} from 'lucide-react'; - -const RegexGeneralTab = ({ - name, - description, - pattern, - tags, - onNameChange, - onDescriptionChange, - onPatternChange, - onAddTag, - onRemoveTag, - error, - patternError -}) => { - const [newTag, setNewTag] = useState(''); - - const handleAddTag = () => { - if (newTag.trim() && !tags.includes(newTag.trim())) { - onAddTag(newTag.trim()); - setNewTag(''); - } - }; - - const handleKeyPress = e => { - if (e.key === 'Enter') { - e.preventDefault(); - handleAddTag(); - } - }; - - return ( -
- {error && ( -
-

- {error} -

-
- )} -
- {/* Name Input */} -
-
- -

- Give your regex pattern a descriptive name -

-
- onNameChange(e.target.value)} - placeholder='Enter pattern name' - className='w-full rounded-md border border-gray-300 dark:border-gray-600 - bg-gray-50 dark:bg-gray-800 px-3 py-2 text-sm - text-gray-900 dark:text-gray-100 - placeholder-gray-500 dark:placeholder-gray-400 - focus:outline-none focus:ring-2 focus:ring-blue-500 focus:border-transparent - transition-colors duration-200' - /> -
- - {/* Description */} -
-
- -

- Describe what this pattern matches. Use markdown to - format your description. -

-
- onDescriptionChange(e.target.value)} - placeholder='Describe what this pattern matches...' - /> -
- - {/* Pattern Input */} -
-
-
- -
- - Case insensitive PCRE2 -
-
-

- Enter your regular expression pattern -

-
- {patternError && ( -

- {patternError} -

- )} -