chore(setup): remove remaining files

This commit is contained in:
Sam Chau
2025-10-18 00:09:59 +10:30
parent 0622046e53
commit 462f0ced94
27 changed files with 0 additions and 4741 deletions

View File

@@ -1,21 +0,0 @@
# Dockerfile
FROM python:3.9-slim
WORKDIR /app
# Install git and gosu for user switching
RUN apt-get update && apt-get install -y git gosu && rm -rf /var/lib/apt/lists/*
# Copy pre-built files from dist directory
COPY dist/backend/app ./app
COPY dist/static ./app/static
COPY dist/requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
# Copy and setup entrypoint script
COPY entrypoint.sh /entrypoint.sh
RUN chmod +x /entrypoint.sh
LABEL org.opencontainers.image.authors="Dictionarry dictionarry@pm.me"
LABEL org.opencontainers.image.description="Profilarr - Profile manager for *arr apps"
LABEL org.opencontainers.image.source="https://github.com/Dictionarry-Hub/profilarr"
LABEL org.opencontainers.image.title="Profilarr"
LABEL org.opencontainers.image.version="beta"
EXPOSE 6868
ENTRYPOINT ["/entrypoint.sh"]
CMD ["gunicorn", "--bind", "0.0.0.0:6868", "--timeout", "600", "app.main:create_app()"]

View File

@@ -1,69 +0,0 @@
# Profilarr
[![GitHub release](https://img.shields.io/github/v/release/Dictionarry-Hub/profilarr?color=blue)](https://github.com/Dictionarry-Hub/profilarr/releases)
[![Docker Pulls](https://img.shields.io/docker/pulls/santiagosayshey/profilarr?color=blue)](https://hub.docker.com/r/santiagosayshey/profilarr)
[![License](https://img.shields.io/github/license/Dictionarry-Hub/profilarr?color=blue)](https://github.com/Dictionarry-Hub/profilarr/blob/main/LICENSE)
[![Website](https://img.shields.io/badge/Website-dictionarry.dev-blue)](https://dictionarry.dev/)
[![Discord](https://img.shields.io/discord/1202375791556431892?color=blue&logo=discord&logoColor=white)](https://discord.com/invite/Y9TYP6jeYZ)
[![Buy Me A Coffee](https://img.shields.io/badge/Buy%20Me%20A%20Coffee-Support-blue?logo=buy-me-a-coffee)](https://www.buymeacoffee.com/santiagosayshey)
[![GitHub Sponsors](https://img.shields.io/badge/GitHub%20Sponsors-Support-blue?logo=github-sponsors)](https://github.com/sponsors/Dictionarry-Hub)
Configuration management tool for Radarr/Sonarr that automates importing and version control of custom formats and quality profiles.
![Profilarr Preview](.github/images/preview.png)
## Features
- 🔄 Automatic synchronization with remote configuration databases
- 🎯 Direct import to Radarr/Sonarr instances
- 🔧 Git-based version control of your configurations
- ⚡ Preserve local customizations during updates
- 🛠️ Built-in conflict resolution
## Getting Started
### Compatibility
| Architecture | Support |
| ------------------------------ | ------------ |
| amd64 (x86_64) | ✅ Supported |
| arm64 (Apple Silicon, RPi 4/5) | ✅ Supported |
### Quick Installation (Docker Compose)
```yaml
services:
profilarr:
image: santiagosayshey/profilarr:latest # Use :beta for early access to new features
container_name: profilarr
ports:
- 6868:6868
volumes:
- /path/to/your/data:/config # Replace with your actual path
environment:
- TZ=UTC # Set your timezone
restart: unless-stopped
```
After deployment, access the web UI at `http://[address]:6868` to begin setup.
> **Note for Windows users:** The database is case-sensitive. Use a docker volume or the WSL file system to avoid issues:
>
> - Docker volume example: `profilarr_data:/config`
> - WSL filesystem example: `/home/username/docker/profilarr:/config`
### Complete Documentation
Visit our comprehensive documentation at [dictionarry.dev](https://dictionarry.dev/profilarr-setup/installation) for detailed installation instructions and usage guides.
## Status
Currently in beta. Part of the [Dictionarry](https://github.com/Dictionarry-Hub) project to simplify media automation.
### Known Issues
- https://github.com/Dictionarry-Hub/profilarr/issues
### Personal Note
Profilarr is maintained by a single CS student with no formal development experience, in their spare time. Development happens when time allows, which may affect response times for fixes and new features. The project is continuously improving, and your patience, understanding, and contributions are greatly appreciated as Profilarr grows and matures.

View File

@@ -1,7 +0,0 @@
FROM python:3.9
WORKDIR /app
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
COPY . .
# Use gunicorn with 10-minute timeout
CMD ["python", "-m", "app.main"]

View File

@@ -1,288 +0,0 @@
from flask import Blueprint, request, jsonify
import logging
import os
import yaml
from .utils import (get_category_directory, load_yaml_file, validate,
save_yaml_file, update_yaml_file, get_file_modified_date,
test_regex_pattern, test_format_conditions,
check_delete_constraints, filename_to_display)
from ..db import add_format_to_renames, remove_format_from_renames, is_format_in_renames
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
bp = Blueprint('data', __name__)
@bp.route('/<string:category>', methods=['GET'])
def retrieve_all(category):
try:
directory = get_category_directory(category)
files = [f for f in os.listdir(directory) if f.endswith('.yml')]
logger.debug(f"Found {len(files)} files in {category}")
if not files:
return jsonify([]), 200
result = []
errors = 0
for file_name in files:
file_path = os.path.join(directory, file_name)
try:
content = load_yaml_file(file_path)
# Add metadata for custom formats
if category == 'custom_format':
content['metadata'] = {
'includeInRename':
is_format_in_renames(content['name'])
}
result.append({
"file_name":
file_name,
"content":
content,
"modified_date":
get_file_modified_date(file_path)
})
except yaml.YAMLError:
errors += 1
result.append({
"file_name": file_name,
"error": "Failed to parse YAML"
})
logger.info(
f"Processed {len(files)} {category} files ({errors} errors)")
return jsonify(result), 200
except ValueError as ve:
logger.error(ve)
return jsonify({"error": str(ve)}), 400
except FileNotFoundError as fnfe:
logger.error(fnfe)
return jsonify({"error": str(fnfe)}), 404
except Exception as e:
logger.exception("Unexpected error occurred")
return jsonify({"error": "An unexpected error occurred"}), 500
@bp.route('/<string:category>/<string:name>',
methods=['GET', 'POST', 'PUT', 'DELETE'])
def handle_item(category, name):
try:
directory = get_category_directory(category)
file_name = f"{name}.yml" if not name.endswith('.yml') else name
file_path = os.path.join(directory, file_name)
if request.method == 'GET':
try:
content = load_yaml_file(file_path)
# Add metadata for custom formats
if category == 'custom_format':
content['metadata'] = {
'includeInRename':
is_format_in_renames(content['name'])
}
return jsonify({
"file_name":
file_name,
"content":
content,
"modified_date":
get_file_modified_date(file_path)
}), 200
except FileNotFoundError:
return jsonify({"error": f"File {file_name} not found"}), 404
except yaml.YAMLError:
return jsonify(
{"error": f"Failed to parse YAML file {file_name}"}), 500
elif request.method == 'DELETE':
if not os.path.exists(file_path):
return jsonify({"error": f"File {file_name} not found"}), 404
# Check for references before deleting
can_delete, error_message = check_delete_constraints(
category, filename_to_display(name))
if not can_delete:
logger.error(
f"Delete constraint check failed for {name}: {error_message}"
)
return jsonify({"error": error_message}), 409
try:
# If it's a custom format, remove from renames table first
if category == 'custom_format':
# Get the format name from the file before deleting it
content = load_yaml_file(file_path)
format_name = content.get('name')
if format_name:
# Check if it exists in renames before trying to remove
if is_format_in_renames(format_name):
remove_format_from_renames(format_name)
logger.info(
f"Removed {format_name} from renames table")
else:
logger.info(
f"{format_name} was not in renames table")
# Then delete the file
os.remove(file_path)
return jsonify(
{"message": f"Successfully deleted {file_name}"}), 200
except OSError as e:
logger.error(f"Error deleting file {file_path}: {e}")
return jsonify({"error": f"Failed to delete {file_name}"}), 500
elif request.method == 'POST':
# If a file already exists with that name, conflict
if os.path.exists(file_path):
return jsonify({"error":
f"File {file_name} already exists"}), 409
try:
data = request.get_json()
if data and 'name' in data:
data['name'] = data['name'].strip()
# Handle rename inclusion for custom formats
if category == 'custom_format':
include_in_rename = data.get('metadata', {}).get(
'includeInRename', False)
# Remove metadata before saving YAML
if 'metadata' in data:
del data['metadata']
if validate(data, category):
# Save YAML
save_yaml_file(file_path, data, category)
# If custom format, handle rename table
if category == 'custom_format' and include_in_rename:
add_format_to_renames(data['name'])
return jsonify(
{"message": f"Successfully created {file_name}"}), 201
return jsonify({"error": "Validation failed"}), 400
except Exception as e:
logger.error(f"Error creating file: {e}")
return jsonify({"error": str(e)}), 500
elif request.method == 'PUT':
if not os.path.exists(file_path):
return jsonify({"error": f"File {file_name} not found"}), 404
try:
data = request.get_json()
logger.info(f"Received PUT data for {name}: {data}")
if data and 'name' in data:
data['name'] = data['name'].strip()
if data and 'rename' in data:
data['rename'] = data['rename'].strip()
# Handle rename inclusion for custom formats
if category == 'custom_format':
include_in_rename = data.get('metadata', {}).get(
'includeInRename', False)
# Get current content to check for rename
current_content = load_yaml_file(file_path)
old_name = current_content.get('name')
new_name = data['name']
# Handle renames and toggles
if old_name != new_name and include_in_rename:
# Handle rename while keeping in table
remove_format_from_renames(old_name)
add_format_to_renames(new_name)
elif include_in_rename:
# Just turning it on
add_format_to_renames(new_name)
else:
# Turning it off
remove_format_from_renames(data['name'])
# Remove metadata before saving YAML
if 'metadata' in data:
del data['metadata']
# Save YAML
update_yaml_file(file_path, data, category)
return jsonify(
{"message": f"Successfully updated {file_name}"}), 200
except Exception as e:
logger.error(f"Error updating file: {e}")
return jsonify({"error": str(e)}), 500
except ValueError as ve:
logger.error(ve)
return jsonify({"error": str(ve)}), 400
except Exception as e:
logger.exception("Unexpected error occurred")
return jsonify({"error": "An unexpected error occurred"}), 500
@bp.route('/<string:category>/test', methods=['POST'])
def run_tests(category):
logger.info(f"Received test request for category: {category}")
try:
data = request.get_json()
if not data:
logger.warning("Rejected test request - no JSON data provided")
return jsonify({"error": "No JSON data provided"}), 400
tests = data.get('tests', [])
if not tests:
logger.warning("Rejected test request - no test cases provided")
return jsonify({"error":
"At least one test case is required"}), 400
if category == 'regex_pattern':
pattern = data.get('pattern')
logger.info(f"Processing regex test request - Pattern: {pattern}")
if not pattern:
logger.warning("Rejected test request - missing pattern")
return jsonify({"error": "Pattern is required"}), 400
success, message, updated_tests = test_regex_pattern(
pattern, tests)
elif category == 'custom_format':
conditions = data.get('conditions', [])
logger.info(
f"Processing format test request - Conditions: {len(conditions)}"
)
if not conditions:
logger.warning(
"Rejected test request - no conditions provided")
return jsonify({"error":
"At least one condition is required"}), 400
success, message, updated_tests = test_format_conditions(
conditions, tests)
else:
logger.warning(
f"Rejected test request - invalid category: {category}")
return jsonify(
{"error": "Testing not supported for this category"}), 400
logger.info(f"Test execution completed - Success: {success}")
if not success:
logger.warning(f"Test execution failed - {message}")
return jsonify({"success": False, "message": message}), 400
return jsonify({"success": True, "tests": updated_tests}), 200
except Exception as e:
logger.warning(f"Unexpected error in test endpoint: {str(e)}",
exc_info=True)
return jsonify({"success": False, "message": str(e)}), 500

View File

@@ -1,725 +0,0 @@
import os
import yaml
import shutil
import logging
from datetime import datetime
from typing import Dict, List, Any, Tuple, Union
import git
import regex
import logging
from ..db.queries.arr import update_arr_config_on_rename, update_arr_config_on_delete
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
from ..config.config import config
# Directory constants
REPO_PATH = config.DB_DIR
REGEX_DIR = config.REGEX_DIR
FORMAT_DIR = config.FORMAT_DIR
PROFILE_DIR = config.PROFILE_DIR
# Expected fields for each category
REGEX_FIELDS = ["name", "pattern", "description", "tags", "tests"]
FORMAT_FIELDS = ["name", "description", "tags", "conditions", "tests"]
PROFILE_FIELDS = [
"name",
"description",
"tags",
"upgradesAllowed",
"minCustomFormatScore",
"upgradeUntilScore",
"minScoreIncrement",
"custom_formats", # Array of {name, score} objects (backwards compatible)
"custom_formats_radarr", # Array of {name, score} objects for radarr-specific scores
"custom_formats_sonarr", # Array of {name, score} objects for sonarr-specific scores
"qualities", # Array of strings
"upgrade_until",
"language"
]
# Category mappings
CATEGORY_MAP = {
"custom_format": (FORMAT_DIR, FORMAT_FIELDS),
"regex_pattern": (REGEX_DIR, REGEX_FIELDS),
"profile": (PROFILE_DIR, PROFILE_FIELDS)
}
def display_to_filename(name: str) -> str:
"""Convert display name (with []) to filename (with ())"""
return f"{name.replace('[', '(').replace(']', ')')}.yml"
def filename_to_display(filename: str) -> str:
"""Convert filename (with ()) back to display name (with [])"""
name = filename[:-4] if filename.endswith('.yml') else filename
return name.replace('(', '[').replace(')', ']')
def _setup_yaml_quotes():
"""Configure YAML to quote string values"""
def str_presenter(dumper, data):
return dumper.represent_scalar('tag:yaml.org,2002:str',
data,
style="'")
yaml.add_representer(str, str_presenter)
def get_file_modified_date(file_path: str) -> str:
"""Get file last modified date in ISO format"""
try:
stats = os.stat(file_path)
return datetime.fromtimestamp(stats.st_mtime).isoformat()
except Exception as e:
logger.error(f"Error getting modified date for {file_path}: {e}")
return None
def get_category_directory(category: str) -> str:
try:
directory, _ = CATEGORY_MAP[category]
except KeyError:
logger.error(f"Invalid category requested: {category}")
raise ValueError(f"Invalid category: {category}")
if not os.path.exists(directory):
logger.error(f"Directory not found: {directory}")
raise FileNotFoundError(f"Directory not found: {directory}")
return directory
def load_yaml_file(file_path: str) -> Dict[str, Any]:
file_path = file_path.replace('[', '(').replace(']', ')')
if not os.path.exists(file_path):
logger.error(f"File not found: {file_path}")
raise FileNotFoundError(f"File not found: {file_path}")
try:
with open(file_path, 'r') as f:
content = yaml.safe_load(f)
return content
except yaml.YAMLError as e:
logger.error(f"Error parsing YAML file {file_path}: {e}")
raise
except Exception as e:
logger.error(f"Unexpected error reading file {file_path}: {e}")
raise
def validate(data: Dict[str, Any], category: str) -> bool:
if not isinstance(data, dict):
return False
_, fields = CATEGORY_MAP[category]
return all(field in data for field in fields)
def save_yaml_file(file_path: str,
data: Dict[str, Any],
category: str,
use_data_name: bool = True) -> None:
"""
Save YAML data to a file
Args:
file_path: The path where the file should be saved
data: The data to save
category: The category of data
use_data_name: If True, use the name from data to create filename. If False, use the provided file_path as is.
"""
if not validate(data, category):
raise ValueError("Invalid data format")
directory = os.path.dirname(file_path)
if use_data_name:
filename = display_to_filename(data['name'])
safe_file_path = os.path.join(directory, filename)
else:
safe_file_path = file_path
_, fields = CATEGORY_MAP[category]
ordered_data = {field: data[field] for field in fields}
_setup_yaml_quotes()
with open(safe_file_path, 'w') as f:
yaml.safe_dump(ordered_data, f, sort_keys=False)
def update_yaml_file(file_path: str, data: Dict[str, Any],
category: str) -> None:
try:
# Check if this is a rename operation
if 'rename' in data:
new_name = data['rename']
old_name = filename_to_display(os.path.basename(file_path)[:-4])
directory = os.path.dirname(file_path)
new_file_path = os.path.join(directory,
display_to_filename(new_name))
# Update references before performing the rename
try:
# Update regular references
updated_files = update_references(category, old_name, new_name)
logger.info(f"Updated references in: {updated_files}")
# Update arr configs if this is a format or profile
if category in ['custom_format', 'profile']:
arr_category = 'customFormats' if category == 'custom_format' else 'profiles'
updated_configs = update_arr_config_on_rename(
arr_category, old_name, new_name)
if updated_configs:
logger.info(
f"Updated arr configs for {category} rename: {updated_configs}"
)
except Exception as e:
logger.error(f"Failed to update references: {e}")
raise Exception(f"Failed to update references: {str(e)}")
# Remove rename field and update the name field in the data
data_to_save = {k: v for k, v in data.items() if k != 'rename'}
data_to_save['name'] = new_name
repo = git.Repo(REPO_PATH)
rel_old_path = os.path.relpath(file_path, REPO_PATH)
rel_new_path = os.path.relpath(new_file_path, REPO_PATH)
try:
# First, save the content changes to the current file
save_yaml_file(file_path,
data_to_save,
category,
use_data_name=False)
# Stage the content changes first
repo.index.add([rel_old_path])
# Then perform the rename
tracked_files = repo.git.ls_files().splitlines()
is_tracked = rel_old_path in tracked_files
if is_tracked:
# Use git mv for tracked files
repo.git.mv(rel_old_path, rel_new_path)
else:
# For untracked files, manually move
os.rename(file_path, new_file_path)
# Stage the new file
repo.index.add([rel_new_path])
except git.GitCommandError as e:
logger.error(f"Git operation failed: {e}")
raise Exception(f"Failed to rename file: {str(e)}")
except OSError as e:
logger.error(f"File operation failed: {e}")
raise Exception(f"Failed to rename file: {str(e)}")
else:
# Normal update without rename
backup_path = f"{file_path}.bak"
shutil.copy2(file_path, backup_path)
try:
save_yaml_file(file_path, data, category)
os.remove(backup_path)
except Exception as e:
shutil.move(backup_path, file_path)
raise
except Exception as e:
raise
def check_delete_constraints(category: str, name: str) -> Tuple[bool, str]:
"""
Check if deleting an item would break any references.
Returns (can_delete, error_message) tuple.
"""
try:
# Protected custom formats that cannot be deleted
PROTECTED_FORMATS = [
"Not English", "Not Only English", "Not Only English (Missing)"
]
# Convert the input name to use parentheses for comparison
check_name = name.replace('[', '(').replace(']', ')')
logger.debug(
f"Checking constraints for {category}: {name} (normalized as {check_name})"
)
# Check protected formats first
if category == 'custom_format' and check_name in [
f.replace('[', '(').replace(']', ')')
for f in PROTECTED_FORMATS
]:
return False, "This format cannot be deleted as it's required for language processing functionality"
references = []
if category == 'regex_pattern':
# Check all custom formats for references to this pattern
format_dir = get_category_directory('custom_format')
for format_file in os.listdir(format_dir):
if not format_file.endswith('.yml'):
continue
format_path = os.path.join(format_dir, format_file)
try:
format_data = load_yaml_file(format_path)
# Check each condition in the format
for condition in format_data.get('conditions', []):
if condition['type'] in [
'release_title', 'release_group', 'edition'
] and condition.get('pattern') == check_name:
references.append(
f"custom format: {format_data['name']}")
except Exception as e:
logger.error(
f"Error checking format file {format_file}: {e}")
continue
elif category == 'custom_format':
# Check all quality profiles for references to this format
profile_dir = get_category_directory('profile')
for profile_file in os.listdir(profile_dir):
if not profile_file.endswith('.yml'):
continue
profile_path = os.path.join(profile_dir, profile_file)
try:
profile_data = load_yaml_file(profile_path)
# Check custom_formats (both/backwards compatible)
custom_formats = profile_data.get('custom_formats', [])
if isinstance(custom_formats, list):
for format_ref in custom_formats:
format_name = format_ref.get('name', '')
# Convert format name to use parentheses for comparison
format_name = format_name.replace('[', '(').replace(']', ')')
logger.debug(f"Comparing '{format_name}' with '{check_name}' in both")
if format_name == check_name:
references.append(f"quality profile: {profile_data['name']} (both)")
# Check custom_formats_radarr
custom_formats_radarr = profile_data.get('custom_formats_radarr', [])
if isinstance(custom_formats_radarr, list):
for format_ref in custom_formats_radarr:
format_name = format_ref.get('name', '')
# Convert format name to use parentheses for comparison
format_name = format_name.replace('[', '(').replace(']', ')')
logger.debug(f"Comparing '{format_name}' with '{check_name}' in radarr")
if format_name == check_name:
references.append(f"quality profile: {profile_data['name']} (radarr)")
# Check custom_formats_sonarr
custom_formats_sonarr = profile_data.get('custom_formats_sonarr', [])
if isinstance(custom_formats_sonarr, list):
for format_ref in custom_formats_sonarr:
format_name = format_ref.get('name', '')
# Convert format name to use parentheses for comparison
format_name = format_name.replace('[', '(').replace(']', ')')
logger.debug(f"Comparing '{format_name}' with '{check_name}' in sonarr")
if format_name == check_name:
references.append(f"quality profile: {profile_data['name']} (sonarr)")
except Exception as e:
logger.error(f"Error checking profile file {profile_file}: {e}")
continue
# Update arr configs for formats and profiles
if category in ['custom_format', 'profile']:
arr_category = 'customFormats' if category == 'custom_format' else 'profiles'
updated_configs = update_arr_config_on_delete(arr_category, name)
if updated_configs:
logger.info(
f"Removed {name} from arr configs: {updated_configs}")
if references:
error_msg = f"Cannot delete - item is referenced in:\n" + "\n".join(
f"- {ref}" for ref in references)
logger.info(f"Found references for {name}: {error_msg}")
return False, error_msg
logger.info(f"No references found for {name}")
return True, ""
except Exception as e:
logger.error(f"Error checking delete constraints: {e}")
return False, f"Error checking references: {str(e)}"
def update_references(category: str, old_name: str,
new_name: str) -> List[str]:
"""
Update references to a renamed item across all relevant files.
Returns a list of files that were updated.
"""
updated_files = []
try:
# Convert names to use parentheses for comparison
old_check_name = old_name.replace('[', '(').replace(']', ')')
new_check_name = new_name.replace('[', '(').replace(']', ')')
if category == 'regex_pattern':
# Update references in custom formats
format_dir = get_category_directory('custom_format')
for format_file in os.listdir(format_dir):
if not format_file.endswith('.yml'):
continue
format_path = os.path.join(format_dir, format_file)
try:
format_data = load_yaml_file(format_path)
updated = False
# Check and update each condition in the format
for condition in format_data.get('conditions', []):
if (condition['type'] in [
'release_title', 'release_group', 'edition'
] and condition.get('pattern') == old_check_name):
condition['pattern'] = new_check_name
updated = True
if updated:
save_yaml_file(format_path,
format_data,
'custom_format',
use_data_name=False)
updated_files.append(
f"custom format: {format_data['name']}")
except Exception as e:
logger.error(
f"Error updating format file {format_file}: {e}")
continue
elif category == 'custom_format':
# Update references in quality profiles
profile_dir = get_category_directory('profile')
for profile_file in os.listdir(profile_dir):
if not profile_file.endswith('.yml'):
continue
profile_path = os.path.join(profile_dir, profile_file)
try:
profile_data = load_yaml_file(profile_path)
updated = False
# Update custom_formats (both/backwards compatible)
custom_formats = profile_data.get('custom_formats', [])
if isinstance(custom_formats, list):
for format_ref in custom_formats:
format_name = format_ref.get('name', '')
# Convert format name to use parentheses for comparison
format_name = format_name.replace('[', '(').replace(']', ')')
if format_name == old_check_name:
format_ref['name'] = new_name
updated = True
# Update custom_formats_radarr
custom_formats_radarr = profile_data.get('custom_formats_radarr', [])
if isinstance(custom_formats_radarr, list):
for format_ref in custom_formats_radarr:
format_name = format_ref.get('name', '')
# Convert format name to use parentheses for comparison
format_name = format_name.replace('[', '(').replace(']', ')')
if format_name == old_check_name:
format_ref['name'] = new_name
updated = True
# Update custom_formats_sonarr
custom_formats_sonarr = profile_data.get('custom_formats_sonarr', [])
if isinstance(custom_formats_sonarr, list):
for format_ref in custom_formats_sonarr:
format_name = format_ref.get('name', '')
# Convert format name to use parentheses for comparison
format_name = format_name.replace('[', '(').replace(']', ')')
if format_name == old_check_name:
format_ref['name'] = new_name
updated = True
if updated:
save_yaml_file(profile_path,
profile_data,
'profile',
use_data_name=False)
updated_files.append(
f"quality profile: {profile_data['name']}")
except Exception as e:
logger.error(
f"Error updating profile file {profile_file}: {e}")
continue
return updated_files
except Exception as e:
logger.error(f"Error updating references: {e}")
raise
def test_regex_pattern(
pattern: str,
tests: List[Dict[str, Any]]) -> Tuple[bool, str, List[Dict[str, Any]]]:
"""
Test a regex pattern against a list of test cases using PCRE2 compatible engine.
Returns match information along with test results.
"""
logger.info(f"Starting regex pattern test - Pattern: {pattern}")
try:
try:
compiled_pattern = regex.compile(pattern,
regex.V1 | regex.IGNORECASE)
logger.info(
"Pattern compiled successfully with PCRE2 compatibility")
except regex.error as e:
logger.warning(f"Invalid regex pattern: {str(e)}")
return False, f"Invalid regex pattern: {str(e)}", tests
current_time = datetime.now().isoformat()
logger.info(f"Processing {len(tests)} test cases")
for test in tests:
test_id = test.get('id', 'unknown')
test_input = test.get('input', '')
expected = test.get('expected', False)
try:
match = compiled_pattern.search(test_input)
matches = bool(match)
# Update test result with basic fields
test['passes'] = matches == expected
test['lastRun'] = current_time
# Add match information
if match:
test['matchedContent'] = match.group(0)
test['matchSpan'] = {
'start': match.start(),
'end': match.end()
}
# Get all capture groups if they exist
test['matchedGroups'] = [g for g in match.groups()
] if match.groups() else []
else:
test['matchedContent'] = None
test['matchSpan'] = None
test['matchedGroups'] = []
logger.info(
f"Test {test_id} {'passed' if test['passes'] else 'failed'} - Match: {matches}, Expected: {expected}"
)
except Exception as e:
logger.error(f"Error running test {test_id}: {str(e)}")
test['passes'] = False
test['lastRun'] = current_time
test['matchedContent'] = None
test['matchSpan'] = None
test['matchedGroups'] = []
# Log overall results
passed_tests = sum(1 for test in tests if test.get('passes', False))
logger.info(
f"Test execution complete - {passed_tests}/{len(tests)} tests passed"
)
return True, "", tests
except Exception as e:
logger.error(f"Unexpected error in test_regex_pattern: {str(e)}",
exc_info=True)
return False, str(e), tests
def test_format_conditions(conditions: List[Dict],
tests: List[Dict]) -> Tuple[bool, str, List[Dict]]:
"""
Test a set of format conditions against a list of test cases.
Tests only pattern-based conditions (release_title, release_group, edition).
"""
logger.info(
f"Starting format condition test - {len(conditions)} conditions")
logger.error(f"Received conditions: {conditions}")
logger.error(f"Received tests: {tests}")
try:
# First, load all regex patterns from the patterns directory
patterns_dir = os.path.join(REPO_PATH, 'regex_patterns')
pattern_map = {}
logger.error(f"Loading patterns from directory: {patterns_dir}")
if not os.path.exists(patterns_dir):
logger.error(f"Patterns directory not found: {patterns_dir}")
return False, "Patterns directory not found", tests
for pattern_file in os.listdir(patterns_dir):
if pattern_file.endswith('.yml'):
pattern_path = os.path.join(patterns_dir, pattern_file)
try:
with open(pattern_path, 'r') as f:
pattern_data = yaml.safe_load(f)
if pattern_data and 'name' in pattern_data and 'pattern' in pattern_data:
pattern_map[
pattern_data['name']] = pattern_data['pattern']
logger.error(
f"Loaded pattern: {pattern_data['name']} = {pattern_data['pattern']}"
)
except Exception as e:
logger.error(
f"Error loading pattern file {pattern_file}: {e}")
continue
logger.error(f"Total patterns loaded: {len(pattern_map)}")
# Compile all regex patterns first
compiled_patterns = {}
for condition in conditions:
if condition['type'] in [
'release_title', 'release_group', 'edition'
]:
logger.error(f"Processing condition: {condition}")
try:
pattern_name = condition.get('pattern', '')
if pattern_name:
# Look up the actual pattern using the pattern name
actual_pattern = pattern_map.get(pattern_name)
if actual_pattern:
compiled_patterns[
condition['name']] = regex.compile(
actual_pattern,
regex.V1 | regex.IGNORECASE)
logger.error(
f"Successfully compiled pattern for {condition['name']}: {actual_pattern}"
)
else:
logger.error(
f"Pattern not found for name: {pattern_name}")
return False, f"Pattern not found: {pattern_name}", tests
except regex.error as e:
logger.error(
f"Invalid regex pattern in condition {condition['name']}: {str(e)}"
)
return False, f"Invalid regex pattern in condition {condition['name']}: {str(e)}", tests
logger.error(f"Total patterns compiled: {len(compiled_patterns)}")
current_time = datetime.now().isoformat()
# Process each test
for test in tests:
test_input = test.get('input', '')
expected = test.get('expected', False)
condition_results = []
logger.error(
f"Processing test input: {test_input}, expected: {expected}")
# Check each condition
for condition in conditions:
if condition['type'] not in [
'release_title', 'release_group', 'edition'
]:
logger.error(
f"Skipping non-pattern condition: {condition['type']}")
continue
pattern = compiled_patterns.get(condition['name'])
if not pattern:
logger.error(
f"No compiled pattern found for condition: {condition['name']}"
)
continue
# Test if pattern matches input
matches = bool(pattern.search(test_input))
logger.error(
f"Condition {condition['name']} match result: {matches}")
# Add result
condition_results.append({
'name':
condition['name'],
'type':
condition['type'],
'pattern':
condition.get('pattern', ''),
'required':
condition.get('required', False),
'negate':
condition.get('negate', False),
'matches':
matches
})
# Determine if format applies
format_applies = True
# Check required conditions
for result in condition_results:
if result['required']:
logger.error(
f"Checking required condition: {result['name']}, negate: {result['negate']}, matches: {result['matches']}"
)
if result['negate']:
if result['matches']:
format_applies = False
logger.error(
f"Required negated condition {result['name']} matched - format does not apply"
)
break
else:
if not result['matches']:
format_applies = False
logger.error(
f"Required condition {result['name']} did not match - format does not apply"
)
break
# Check non-required conditions
if format_applies:
for result in condition_results:
if not result['required'] and result['negate'] and result[
'matches']:
format_applies = False
logger.error(
f"Non-required negated condition {result['name']} matched - format does not apply"
)
break
test['passes'] = format_applies == expected
test['lastRun'] = current_time
test['conditionResults'] = condition_results
logger.error(
f"Test result - format_applies: {format_applies}, expected: {expected}, passes: {test['passes']}"
)
# Log final results
passed_tests = sum(1 for test in tests if test.get('passes', False))
logger.error(
f"Final test results - {passed_tests}/{len(tests)} tests passed")
logger.error(f"Updated tests: {tests}")
return True, "", tests
except Exception as e:
logger.error(f"Unexpected error in test_format_conditions: {str(e)}",
exc_info=True)
return False, str(e), tests

View File

@@ -1,53 +0,0 @@
# git/branches/checkout.py
import git
import logging
from ...arr.manager import check_active_sync_configs
logger = logging.getLogger(__name__)
def checkout_branch(repo_path, branch_name):
try:
# Check for active sync configurations first
has_active_configs, configs = check_active_sync_configs()
if has_active_configs:
error_msg = (
"Cannot checkout branch while automatic sync configurations are active.\n"
"The following configurations must be set to manual sync first:\n"
)
for config in configs:
error_msg += f"- {config['name']} (ID: {config['id']}, {config['sync_method']} sync)\n"
logger.error(error_msg)
return False, {
"error": error_msg,
"code": "ACTIVE_SYNC_CONFIGS",
"configs": configs
}
logger.debug(f"Attempting to checkout branch: {branch_name}")
repo = git.Repo(repo_path)
# Check if the branch exists locally
if branch_name in repo.heads:
repo.git.checkout(branch_name)
else:
# Check if the branch exists in any of the remotes
for remote in repo.remotes:
remote_branch = f"{remote.name}/{branch_name}"
if remote_branch in repo.refs:
# Create a new local branch tracking the remote branch
repo.git.checkout('-b', branch_name, remote_branch)
break
else:
return False, f"Branch '{branch_name}' does not exist locally or in any remote."
logger.debug(f"Successfully checked out branch: {branch_name}")
return True, {
"message": f"Checked out branch: {branch_name}",
"current_branch": branch_name
}
except Exception as e:
logger.error(f"Error checking out branch: {str(e)}", exc_info=True)
return False, {"error": f"Error checking out branch: {str(e)}"}

View File

@@ -1,20 +0,0 @@
# git/operations/delete.py
import os
import logging
logger = logging.getLogger(__name__)
def delete_file(repo_path, file_path):
try:
full_file_path = os.path.join(repo_path, file_path)
if os.path.exists(full_file_path):
os.remove(full_file_path)
message = f"File {file_path} has been deleted."
return True, message
else:
return False, "File does not exist."
except Exception as e:
logger.error(f"Error deleting file: {str(e)}", exc_info=True)
return False, f"Error deleting file: {str(e)}"

View File

@@ -1,103 +0,0 @@
# git/operations/merge.py
import git
import logging
import os
from typing import Dict, Any
from ..status.status import GitStatusManager
logger = logging.getLogger(__name__)
def finalize_merge(repo) -> Dict[str, Any]:
"""
Finalize a merge by committing all staged files after conflict resolution.
"""
try:
if not os.path.exists(os.path.join(repo.git_dir, 'MERGE_HEAD')):
return {
'success': False,
'error': 'Not currently in a merge state'
}
# Get unmerged files
unmerged_files = []
status = repo.git.status('--porcelain', '-z').split('\0')
for item in status:
if item and len(item) >= 4:
x, y, file_path = item[0], item[1], item[3:]
if 'U' in (x, y):
unmerged_files.append(file_path)
# Force update the index for unmerged files
for file_path in unmerged_files:
# Remove from index first
try:
repo.git.execute(['git', 'reset', '--', file_path])
except git.GitCommandError:
pass
# Add back to index
try:
repo.git.execute(['git', 'add', '--', file_path])
except git.GitCommandError as e:
logger.error(f"Error adding file {file_path}: {str(e)}")
return {
'success': False,
'error': f"Failed to stage resolved file {file_path}"
}
# Create commit message
commit_message = "Merge complete: resolved conflicts"
# Commit
try:
repo.git.commit('-m', commit_message)
logger.info("Successfully finalized merge")
# Update remote status after merge
repo_path = repo.working_dir
status_manager = GitStatusManager.get_instance(repo_path)
if status_manager:
status_manager.update_remote_status()
return {'success': True, 'message': 'Merge completed successfully'}
except git.GitCommandError as e:
logger.error(f"Git command error during commit: {str(e)}")
return {
'success': False,
'error': f"Failed to commit merge: {str(e)}"
}
except Exception as e:
logger.error(f"Failed to finalize merge: {str(e)}")
return {
'success': False,
'error': f"Failed to finalize merge: {str(e)}"
}
def abort_merge(repo_path):
try:
repo = git.Repo(repo_path)
# Try aborting the merge using git merge --abort
try:
repo.git.execute(['git', 'merge', '--abort'])
return True, "Merge aborted successfully"
except git.GitCommandError as e:
logger.warning(
"Error aborting merge with 'git merge --abort'. Trying 'git reset --hard'."
)
# If git merge --abort fails, try resetting to the previous commit using git reset --hard
try:
repo.git.execute(['git', 'reset', '--hard'])
return True, "Merge aborted and repository reset to the previous commit"
except git.GitCommandError as e:
logger.exception(
"Error resetting repository with 'git reset --hard'")
return False, str(e)
except Exception as e:
logger.exception("Unexpected error aborting merge")
return False, str(e)

View File

@@ -1,65 +0,0 @@
# git/operations/pull.py
import git
import logging
from git import GitCommandError
from ..status.status import GitStatusManager
from ...arr.manager import get_pull_configs
from ...importer import handle_pull_import
logger = logging.getLogger(__name__)
def pull_branch(repo_path, branch_name):
try:
repo = git.Repo(repo_path)
# Check for uncommitted changes first
if repo.is_dirty(untracked_files=True):
return False, {
'type': 'uncommitted_changes',
'message':
'Cannot pull: You have uncommitted local changes that would be lost',
'details': 'Please commit or stash your changes before pulling'
}
# Fetch first to get remote changes
repo.remotes.origin.fetch()
try:
# Pull with explicit merge strategy
repo.git.pull('origin', branch_name, '--no-rebase')
# Update remote status
status_manager = GitStatusManager.get_instance(repo_path)
if status_manager:
status_manager.update_remote_status()
# -------------------------------
# *** "On pull" ARR import logic using new importer:
# 1) Query all ARR configs that have sync_method="pull"
# 2) For each, run the importer pull handler
# -------------------------------
pull_configs = get_pull_configs()
logger.info(
f"[Pull] Found {len(pull_configs)} ARR configs to import (sync_method='pull')"
)
for cfg in pull_configs:
handle_pull_import(cfg['id'])
return True, f"Successfully pulled changes for branch {branch_name}"
except GitCommandError as e:
if "CONFLICT" in str(e):
return True, {
'state': 'resolve',
'type': 'merge_conflict',
'message':
'Repository is now in conflict resolution state. Please resolve conflicts to continue merge.',
'details': 'Please resolve conflicts to continue merge'
}
raise e
except Exception as e:
logger.error(f"Error pulling branch: {str(e)}", exc_info=True)
return False, f"Error pulling branch: {str(e)}"

View File

@@ -1,333 +0,0 @@
import yaml
from git import GitCommandError
import logging
from typing import Dict, Any
import os
from copy import deepcopy
from ...data.utils import CATEGORY_MAP
logger = logging.getLogger(__name__)
def determine_type(file_path):
if 'regex_patterns' in file_path:
return 'Regex Pattern'
elif 'custom_formats' in file_path:
return 'Custom Format'
elif 'profiles' in file_path:
return 'Quality Profile'
return 'Unknown'
def get_version_data(repo, ref, file_path):
"""Get YAML data from a specific version of a file."""
try:
content = repo.git.show(f'{ref}:{file_path}')
return yaml.safe_load(content) if content else None
except GitCommandError:
return None
def resolve_conflicts(
repo, resolutions: Dict[str, Dict[str, str]]) -> Dict[str, Any]:
"""
Resolve merge conflicts based on provided resolutions.
"""
logger.debug(f"Received resolutions for files: {list(resolutions.keys())}")
logger.debug(f"Full resolutions data: {resolutions}")
try:
status = repo.git.status('--porcelain', '-z').split('\0')
conflicts = []
for item in status:
if not item or len(item) < 4:
continue
x, y, file_path = item[0], item[1], item[3:]
# Include modify/delete conflicts
if 'U' in (x, y) or (x == 'D' and y == 'D') or (
x == 'D' and y == 'U') or (x == 'U' and y == 'D'):
conflicts.append((file_path, x, y))
# Track which files are modify/delete conflicts
modify_delete_conflicts = {
path: (x == 'D' and y == 'U') or (x == 'U' and y == 'D')
for path, x, y in conflicts
}
# Validate resolutions are for actual conflicting files
for file_path in resolutions:
if file_path not in {path for path, _, _ in conflicts}:
return {
'success': False,
'error': f"File not in conflict: {file_path}"
}
# Store initial states for rollback
initial_states = {}
for file_path in resolutions:
try:
full_path = os.path.join(repo.working_dir, file_path)
try:
with open(full_path, 'r') as f:
initial_states[file_path] = f.read()
except FileNotFoundError:
initial_states[file_path] = None
except Exception as e:
return {
'success': False,
'error': f"Couldn't read file {file_path}: {str(e)}"
}
results = {}
for file_path, field_resolutions in resolutions.items():
# Handle modify/delete conflicts differently
if modify_delete_conflicts[file_path]:
logger.debug(
f"Handling modify/delete conflict for {file_path}")
logger.debug(f"Field resolutions for modify/delete: {field_resolutions}")
# Get the existing version (either from HEAD or MERGE_HEAD)
head_data = get_version_data(repo, 'HEAD', file_path)
merge_head_data = get_version_data(repo, 'MERGE_HEAD',
file_path)
# Determine which version exists
is_deleted_in_head = head_data is None
existing_data = merge_head_data if is_deleted_in_head else head_data
logger.debug(f"Existing version data: {existing_data}")
logger.debug(f"is_deleted_in_head: {is_deleted_in_head}")
logger.debug(f"head_data: {head_data}")
logger.debug(f"merge_head_data: {merge_head_data}")
# Try both lowercase and capitalized versions of 'file'
choice = field_resolutions.get('file') or field_resolutions.get('File')
logger.debug(f"Resolution choice for file: {choice}")
if not choice:
logger.error("No 'file' or 'File' resolution found in field_resolutions!")
logger.error(f"Available keys: {list(field_resolutions.keys())}")
raise Exception(
"No resolution provided for modify/delete conflict")
full_path = os.path.join(repo.working_dir, file_path)
if choice == 'local':
if is_deleted_in_head:
logger.debug(f"Keeping file deleted: {file_path}")
# File should stay deleted
try:
os.remove(full_path)
except FileNotFoundError:
pass # File is already gone
repo.index.remove([file_path])
else:
logger.debug(f"Keeping local version: {file_path}")
# Keep our version
with open(full_path, 'w') as f:
yaml.safe_dump(head_data,
f,
default_flow_style=False)
repo.index.add([file_path])
elif choice == 'incoming':
if is_deleted_in_head:
logger.debug(
f"Restoring from incoming version: {file_path}")
# Restore the file from MERGE_HEAD
with open(full_path, 'w') as f:
yaml.safe_dump(merge_head_data,
f,
default_flow_style=False)
repo.index.add([file_path])
else:
logger.debug(f"Accepting deletion: {file_path}")
# Accept the deletion
try:
os.remove(full_path)
except FileNotFoundError:
pass # File is already gone
repo.index.remove([file_path])
results[file_path] = {
'resolution':
choice,
'action':
'delete' if (choice == 'local' and is_deleted_in_head) or
(choice == 'incoming' and not is_deleted_in_head) else
'keep'
}
else:
# Regular conflict resolution
# Get all three versions
base_data = get_version_data(repo, 'HEAD^', file_path)
ours_data = get_version_data(repo, 'HEAD', file_path)
theirs_data = get_version_data(repo, 'MERGE_HEAD', file_path)
# For files that were previously involved in modify/delete conflicts
# we may not be able to get all versions
if not base_data or not ours_data or not theirs_data:
logger.warning(f"Couldn't get all versions of {file_path} - may have been previously resolved as a modify/delete conflict")
logger.warning(f"base_data: {base_data}, ours_data: {ours_data}, theirs_data: {theirs_data}")
# If it was previously resolved as "incoming" but ours_data is missing, use theirs_data
if not ours_data and theirs_data:
logger.info(f"Using incoming version for {file_path} as base for resolution")
ours_data = theirs_data
# If it was previously resolved as "local" but theirs_data is missing, use ours_data
elif ours_data and not theirs_data:
logger.info(f"Using local version for {file_path} as base for resolution")
theirs_data = ours_data
# If we can't recover either version, we can't proceed
else:
raise Exception(f"Couldn't get required versions of {file_path}")
# Start with a deep copy of ours_data to preserve all fields
resolved_data = deepcopy(ours_data)
# Track changes
kept_values = {}
discarded_values = {}
# Handle each resolution field
for field, choice in field_resolutions.items():
if field.startswith('custom_format_'):
format_name = field[len('custom_format_'):]
ours_cf = next(
(item
for item in ours_data.get('custom_formats', [])
if item['name'] == format_name), None)
theirs_cf = next(
(item
for item in theirs_data.get('custom_formats', [])
if item['name'] == format_name), None)
if choice == 'local' and ours_cf:
resolved_cf = ours_cf
kept_values[field] = ours_cf
discarded_values[field] = theirs_cf
elif choice == 'incoming' and theirs_cf:
resolved_cf = theirs_cf
kept_values[field] = theirs_cf
discarded_values[field] = ours_cf
else:
raise Exception(
f"Invalid choice or missing custom format {format_name}"
)
resolved_cf_list = resolved_data.get(
'custom_formats', [])
for idx, item in enumerate(resolved_cf_list):
if item['name'] == format_name:
resolved_cf_list[idx] = resolved_cf
break
else:
resolved_cf_list.append(resolved_cf)
resolved_data['custom_formats'] = resolved_cf_list
elif field.startswith('tag_'):
tag_name = field[len('tag_'):]
current_tags = set(resolved_data.get('tags', []))
if choice == 'local':
if tag_name in ours_data.get('tags', []):
current_tags.add(tag_name)
kept_values[field] = 'local'
discarded_values[field] = 'incoming'
else:
current_tags.discard(tag_name)
kept_values[field] = 'none'
discarded_values[field] = 'incoming'
elif choice == 'incoming':
if tag_name in theirs_data.get('tags', []):
current_tags.add(tag_name)
kept_values[field] = 'incoming'
discarded_values[field] = 'local'
else:
current_tags.discard(tag_name)
kept_values[field] = 'none'
discarded_values[field] = 'local'
else:
raise Exception(
f"Invalid choice for tag field: {field}")
resolved_data['tags'] = sorted(current_tags)
else:
field_key = field
if choice == 'local':
resolved_data[field_key] = ours_data.get(field_key)
kept_values[field_key] = ours_data.get(field_key)
discarded_values[field_key] = theirs_data.get(
field_key)
elif choice == 'incoming':
resolved_data[field_key] = theirs_data.get(
field_key)
kept_values[field_key] = theirs_data.get(field_key)
discarded_values[field_key] = ours_data.get(
field_key)
else:
raise Exception(
f"Invalid choice for field: {field}")
# Get file type and apply appropriate field ordering
file_type = determine_type(file_path)
if file_type == 'Quality Profile':
_, fields = CATEGORY_MAP['profile']
elif file_type == 'Custom Format':
_, fields = CATEGORY_MAP['custom_format']
elif file_type == 'Regex Pattern':
_, fields = CATEGORY_MAP['regex_pattern']
# Order the fields according to the category's field order
ordered_data = {
field: resolved_data.get(field)
for field in fields if field in resolved_data
}
resolved_data = ordered_data
# Write resolved version
full_path = os.path.join(repo.working_dir, file_path)
with open(full_path, 'w') as f:
yaml.safe_dump(resolved_data, f, default_flow_style=False)
# Stage the resolved file
repo.index.add([file_path])
results[file_path] = {
'kept_values': kept_values,
'discarded_values': discarded_values
}
logger.debug(
f"Successfully resolved regular conflict for {file_path}")
logger.debug("==== Status after resolve_conflicts ====")
status_output = repo.git.status('--porcelain', '-z').split('\0')
for item in status_output:
if item:
logger.debug(f"File status: {item}")
logger.debug("=======================================")
return {'success': True, 'results': results}
except Exception as e:
# Rollback on any error
for file_path, initial_state in initial_states.items():
try:
full_path = os.path.join(repo.working_dir, file_path)
if initial_state is None:
try:
os.remove(full_path)
except FileNotFoundError:
pass
else:
with open(full_path, 'w') as f:
f.write(initial_state)
except Exception as rollback_error:
logger.error(
f"Failed to rollback {file_path}: {str(rollback_error)}")
logger.error(f"Failed to resolve conflicts: {str(e)}")
return {'success': False, 'error': str(e)}

View File

@@ -1,109 +0,0 @@
# git/operations/revert.py
import git
import os
import logging
logger = logging.getLogger(__name__)
def revert_file(repo_path, file_path):
"""
Revert changes in a file, handling tracked files, staged deletions, and new files.
Args:
repo_path: Path to the git repository
file_path: Path to the file to revert
Returns:
tuple: (success: bool, message: str)
"""
try:
repo = git.Repo(repo_path)
file_absolute_path = os.path.join(repo_path, file_path)
# Check if file is untracked (new)
untracked_files = repo.untracked_files
is_untracked = any(f == file_path for f in untracked_files)
if is_untracked:
# For untracked files, we need to remove them
try:
os.remove(file_absolute_path)
message = f"New file {file_path} has been removed."
except FileNotFoundError:
message = f"File {file_path} was already removed."
return True, message
# Check if file is staged for deletion
staged_deletions = repo.index.diff("HEAD", R=True)
is_staged_for_deletion = any(d.a_path == file_path
for d in staged_deletions)
if is_staged_for_deletion:
# Restore file staged for deletion
repo.git.reset("--", file_path)
repo.git.checkout('HEAD', "--", file_path)
message = f"File {file_path} has been restored and unstaged from deletion."
else:
# Regular revert for tracked files with changes
repo.git.restore("--", file_path)
repo.git.restore('--staged', "--", file_path)
message = f"File {file_path} has been reverted."
return True, message
except git.exc.GitCommandError as e:
error_msg = str(e)
if "pathspec" in error_msg and "did not match any file(s) known to git" in error_msg:
logger.error(f"File {file_path} not found in git repository")
return False, f"File {file_path} not found in git repository"
logger.error(f"Git error reverting file: {error_msg}", exc_info=True)
return False, f"Git error reverting file: {error_msg}"
except Exception as e:
logger.error(f"Error reverting file: {str(e)}", exc_info=True)
return False, f"Error reverting file: {str(e)}"
def revert_all(repo_path):
"""
Revert all changes in the repository, including new files.
Args:
repo_path: Path to the git repository
Returns:
tuple: (success: bool, message: str)
"""
try:
repo = git.Repo(repo_path)
# First, clean untracked files
untracked_files = repo.untracked_files
for file_path in untracked_files:
try:
os.remove(os.path.join(repo_path, file_path))
except FileNotFoundError:
continue
except Exception as e:
logger.warning(
f"Could not remove untracked file {file_path}: {str(e)}")
# Then restore tracked files
repo.git.restore('--staged', '.')
repo.git.restore('.')
message = "All changes have been reverted to the last commit"
if untracked_files:
message += f" and {len(untracked_files)} new file(s) have been removed"
message += "."
return True, message
except git.exc.GitCommandError as e:
logger.error(f"Git error reverting all changes: {str(e)}",
exc_info=True)
return False, f"Git error reverting all changes: {str(e)}"
except Exception as e:
logger.error(f"Error reverting all changes: {str(e)}", exc_info=True)
return False, f"Error reverting all changes: {str(e)}"

View File

@@ -1,156 +0,0 @@
# git/clone_repo.py
import os
import shutil
import logging
import yaml
from git.exc import GitCommandError
import git
from ..auth.authenticate import GitHubAuth
logger = logging.getLogger(__name__)
def clone_repository(repo_url, repo_path):
temp_dir = f"{repo_path}_temp"
backup_dir = f"{repo_path}_backup"
logger = logging.getLogger(__name__)
try:
# Initial clone attempt
logger.info(f"Starting clone operation for {repo_url}")
try:
# First try without authentication (for public repos)
repo = git.Repo.clone_from(repo_url, temp_dir)
logger.info("Repository clone successful")
except GitCommandError as e:
error_str = str(e)
# If authentication error, try with token
if "could not read Username" in error_str or "Authentication failed" in error_str:
logger.info("Initial clone failed due to authentication. Trying with token...")
try:
# Verify token availability
if not GitHubAuth.verify_token():
logger.error("Private repository requires GitHub authentication. Please configure PAT.")
return False, "This appears to be a private repository. Please configure PROFILARR_PAT environment variable."
# Get authenticated URL for private repositories
authenticated_url = GitHubAuth.get_authenticated_url(repo_url)
repo = git.Repo.clone_from(authenticated_url, temp_dir)
logger.info("Repository clone with authentication successful")
except GitCommandError as auth_e:
logger.error(f"Clone with authentication failed: {str(auth_e)}")
return False, f"Failed to clone repository: {str(auth_e)}"
# If repository not found, create new one
elif "remote: Repository not found" in error_str:
logger.info("Creating new repository - remote not found")
repo = git.Repo.init(temp_dir)
repo.create_remote('origin', repo_url)
else:
logger.error(f"Clone failed: {error_str}")
return False, f"Failed to clone repository: {error_str}"
# Check if repo is empty
try:
repo.head.reference
except ValueError:
logger.info("Initializing empty repository with default structure")
_initialize_empty_repo(repo)
# Backup handling
if os.path.exists(repo_path):
logger.info("Creating backup of existing repository")
shutil.move(repo_path, backup_dir)
# Move repo to final location
logger.info("Moving repository to final location")
shutil.move(temp_dir, repo_path)
# Process folders
for folder_name in ['regex_patterns', 'custom_formats', 'profiles']:
folder_path = os.path.join(repo_path, folder_name)
backup_folder_path = os.path.join(backup_dir, folder_name)
if not os.path.exists(folder_path):
logger.debug(f"Creating folder: {folder_name}")
os.makedirs(folder_path)
# File merging process
cloned_files = set(
f.replace('.yml', '') for f in os.listdir(folder_path)
if f.endswith('.yml'))
if os.path.exists(backup_folder_path):
local_files = [
f for f in os.listdir(backup_folder_path)
if f.endswith('.yml')
]
if local_files:
logger.info(
f"Merging {len(local_files)} files in {folder_name}")
for file_name in local_files:
old_file_path = os.path.join(backup_folder_path, file_name)
with open(old_file_path, 'r') as file:
data = yaml.safe_load(file)
base_name = data['name']
new_name = base_name
counter = 1
while new_name in cloned_files:
new_name = f"{base_name} ({counter})"
counter += 1
cloned_files.add(new_name)
new_file_path = os.path.join(folder_path,
f"{new_name}.yml")
with open(new_file_path, 'w') as file:
yaml.dump(data, file)
logger.debug(f"Merged file: {file_name}{new_name}.yml")
# Cleanup
if os.path.exists(backup_dir):
logger.info("Removing backup directory")
shutil.rmtree(backup_dir)
logger.info("Clone operation completed successfully")
return True, "Repository cloned and local files merged successfully"
except Exception as e:
logger.exception("Critical error during clone operation")
if os.path.exists(temp_dir):
shutil.rmtree(temp_dir)
if os.path.exists(backup_dir):
shutil.move(backup_dir, repo_path)
return False, f"Critical error: {str(e)}"
def _initialize_empty_repo(repo):
# Create basic folder structure
os.makedirs(os.path.join(repo.working_tree_dir, 'regex_patterns'),
exist_ok=True)
os.makedirs(os.path.join(repo.working_tree_dir, 'custom_formats'),
exist_ok=True)
os.makedirs(os.path.join(repo.working_tree_dir, 'quality_profiles'),
exist_ok=True)
# Create a README file
with open(os.path.join(repo.working_tree_dir, 'README.md'), 'w') as f:
f.write(
"# Profilarr Repository\n\nThis repository contains regex patterns, custom formats and quality profiles."
)
repo.git.add(A=True)
repo.index.commit("Initial commit: Basic repository structure")
repo.create_head('main')
repo.heads.main.checkout()
origin = repo.remote(name='origin')
origin.push('main')
origin.push('main:main')
logger.info(
f"Initialized empty repository with basic structure and pushed to main"
)

View File

@@ -1,74 +0,0 @@
# git/repo/unlink.py
import os
import shutil
import logging
from ...db import save_settings
from ...arr.manager import check_active_sync_configs
logger = logging.getLogger(__name__)
def unlink_repository(repo_path, remove_files=False):
try:
# Check for active sync configurations first
has_active_configs, configs = check_active_sync_configs()
if has_active_configs:
error_msg = (
"Cannot unlink repository while automatic sync configurations are active.\n"
"The following configurations must be set to manual sync first:\n"
)
for config in configs:
error_msg += f"- {config['name']} (ID: {config['id']}, {config['sync_method']} sync)\n"
logger.error(error_msg)
return False, {
"error": error_msg,
"code": "ACTIVE_SYNC_CONFIGS",
"configs": configs
}
logger.info(
f"Starting unlink_repository with repo_path: {repo_path} and remove_files: {remove_files}"
)
# Check if repo_path exists
if not os.path.exists(repo_path):
logger.error(f"Path {repo_path} does not exist.")
return False, f"Path {repo_path} does not exist."
# Remove the .git folder and optionally the repo files
if remove_files:
logger.info(f"Removing all files in the repository at {repo_path}")
for root, dirs, files in os.walk(repo_path):
for file in files:
os.remove(os.path.join(root, file))
for dir in dirs:
shutil.rmtree(os.path.join(root, dir))
logger.info(
f"Successfully removed all files in the repository at {repo_path}"
)
# Recreate necessary folders
required_dirs = ['custom_formats', 'profiles', 'regex_patterns']
for dir_name in required_dirs:
os.makedirs(os.path.join(repo_path, dir_name), exist_ok=True)
logger.info(
f"Recreated the directory {dir_name} at {repo_path}")
else:
git_folder = os.path.join(repo_path, '.git')
if os.path.exists(git_folder):
logger.info(f"Removing .git folder at {git_folder}")
shutil.rmtree(git_folder)
logger.info(
f"Successfully removed .git folder at {git_folder}")
else:
logger.warning(f".git folder does not exist at {git_folder}")
# Clear git settings
save_settings({'gitRepo': None})
logger.info("Updated settings to remove git information")
return True, "Repository successfully unlinked"
except Exception as e:
logger.error(f"Error unlinking repository: {str(e)}", exc_info=True)
return False, f"Error unlinking repository: {str(e)}"

View File

@@ -1,352 +0,0 @@
"""Compilation functions to transform YAML data to Arr API format."""
import logging
from typing import Dict, List, Any, Optional
from .mappings import TargetApp, ValueResolver
from .utils import load_regex_patterns
from ..db.queries.format_renames import is_format_in_renames
from ..db.queries.settings import get_language_import_score
from .logger import get_import_logger
logger = logging.getLogger(__name__)
# Cache patterns at module level to avoid reloading
_CACHED_PATTERNS = None
def get_cached_patterns():
"""Get cached regex patterns, loading them once on first access."""
global _CACHED_PATTERNS
if _CACHED_PATTERNS is None:
_CACHED_PATTERNS = load_regex_patterns()
return _CACHED_PATTERNS
def compile_format_to_api_structure(
format_yaml: Dict[str, Any],
arr_type: str
) -> Dict[str, Any]:
"""
Compile a format from YAML to Arr API structure.
Args:
format_yaml: Format data from YAML file
arr_type: 'radarr' or 'sonarr'
Returns:
Compiled format ready for API
"""
target_app = TargetApp.RADARR if arr_type.lower() == 'radarr' else TargetApp.SONARR
patterns = get_cached_patterns()
compiled = {
'name': format_yaml.get('name', 'Unknown')
}
# Check if format should be included in renames
if is_format_in_renames(format_yaml.get('name', '')):
compiled['includeCustomFormatWhenRenaming'] = True
# Compile specifications from conditions
specifications = []
for condition in format_yaml.get('conditions', []):
spec = _compile_condition(condition, patterns, target_app)
if spec:
specifications.append(spec)
compiled['specifications'] = specifications
return compiled
def _compile_condition(
condition: Dict[str, Any],
patterns: Dict[str, str],
target_app: TargetApp
) -> Optional[Dict[str, Any]]:
"""Compile a single condition to specification."""
condition_type = condition.get('type')
spec = {
'name': condition.get('name', ''),
'negate': condition.get('negate', False),
'required': condition.get('required', False),
'fields': []
}
if condition_type in ['release_title', 'release_group', 'edition']:
pattern_name = condition.get('pattern')
pattern = patterns.get(pattern_name)
if not pattern:
import_logger = get_import_logger()
import_logger.warning(f"Pattern not found: {pattern_name}")
return None
spec['implementation'] = {
'release_title': 'ReleaseTitleSpecification',
'release_group': 'ReleaseGroupSpecification',
'edition': 'EditionSpecification'
}[condition_type]
spec['fields'] = [{'name': 'value', 'value': pattern}]
elif condition_type == 'source':
spec['implementation'] = 'SourceSpecification'
value = ValueResolver.get_source(condition.get('source'), target_app)
spec['fields'] = [{'name': 'value', 'value': value}]
elif condition_type == 'resolution':
spec['implementation'] = 'ResolutionSpecification'
value = ValueResolver.get_resolution(condition.get('resolution'))
spec['fields'] = [{'name': 'value', 'value': value}]
elif condition_type == 'indexer_flag':
spec['implementation'] = 'IndexerFlagSpecification'
value = ValueResolver.get_indexer_flag(condition.get('flag', ''), target_app)
spec['fields'] = [{'name': 'value', 'value': value}]
elif condition_type == 'quality_modifier':
if target_app == TargetApp.SONARR:
return None
spec['implementation'] = 'QualityModifierSpecification'
value = ValueResolver.get_quality_modifier(condition.get('qualityModifier'))
spec['fields'] = [{'name': 'value', 'value': value}]
elif condition_type == 'size':
spec['implementation'] = 'SizeSpecification'
spec['fields'] = [
{'name': 'min', 'value': condition.get('minSize', 0)},
{'name': 'max', 'value': condition.get('maxSize', 0)}
]
elif condition_type == 'language':
spec['implementation'] = 'LanguageSpecification'
language_name = condition.get('language', '').lower()
try:
language_data = ValueResolver.get_language(language_name, target_app, for_profile=False)
fields = [{'name': 'value', 'value': language_data['id']}]
# Handle exceptLanguage field if present
if 'exceptLanguage' in condition:
except_value = condition['exceptLanguage']
fields.append({
'name': 'exceptLanguage',
'value': except_value
})
spec['fields'] = fields
except Exception:
import_logger = get_import_logger()
import_logger.warning(f"Language not found: {language_name}")
return None
elif condition_type == 'release_type':
# Only supported in Sonarr
if target_app == TargetApp.RADARR:
return None
spec['implementation'] = 'ReleaseTypeSpecification'
value = ValueResolver.get_release_type(condition.get('releaseType'))
spec['fields'] = [{'name': 'value', 'value': value}]
elif condition_type == 'year':
spec['implementation'] = 'YearSpecification'
spec['fields'] = [
{'name': 'min', 'value': condition.get('minYear', 0)},
{'name': 'max', 'value': condition.get('maxYear', 0)}
]
else:
import_logger = get_import_logger()
import_logger.warning(f"Unknown condition type: {condition_type}")
return None
return spec
def compile_profile_to_api_structure(
profile_yaml: Dict[str, Any],
arr_type: str
) -> Dict[str, Any]:
"""
Compile a profile from YAML to Arr API structure.
Args:
profile_yaml: Profile data from YAML file
arr_type: 'radarr' or 'sonarr'
Returns:
Compiled profile ready for API
"""
target_app = TargetApp.RADARR if arr_type.lower() == 'radarr' else TargetApp.SONARR
quality_mappings = ValueResolver.get_qualities(target_app)
compiled = {
'name': profile_yaml.get('name', 'Unknown')
}
# Build quality items - following the structure from the working compile/profile_compiler.py
items = []
cutoff_id = None
used_qualities = set()
quality_ids_in_groups = set()
# Convert group IDs (negative to positive with offset)
def convert_group_id(group_id: int) -> int:
if group_id < 0:
return 1000 + abs(group_id)
return group_id
# First pass: gather quality IDs in groups to avoid duplicates
for quality_entry in profile_yaml.get('qualities', []):
if isinstance(quality_entry, dict) and quality_entry.get('id', 0) < 0:
# It's a group
for q in quality_entry.get('qualities', []):
if isinstance(q, dict):
q_name = q.get('name', '')
mapped_name = ValueResolver.get_quality_name(q_name, target_app)
if mapped_name in quality_mappings:
quality_ids_in_groups.add(quality_mappings[mapped_name]['id'])
# Second pass: add groups and individual qualities
for quality_entry in profile_yaml.get('qualities', []):
if isinstance(quality_entry, dict):
if quality_entry.get('id', 0) < 0:
# It's a group
group_id = convert_group_id(quality_entry.get('id', 0))
group_item = {
'id': group_id,
'name': quality_entry.get('name', 'Group'),
'items': [],
'allowed': True
}
for q in quality_entry.get('qualities', []):
if isinstance(q, dict):
q_name = q.get('name', '')
mapped_name = ValueResolver.get_quality_name(q_name, target_app)
if mapped_name in quality_mappings:
group_item['items'].append({
'quality': quality_mappings[mapped_name].copy(),
'items': [],
'allowed': True
})
used_qualities.add(mapped_name.upper())
if group_item['items']:
items.append(group_item)
else:
# Individual quality
q_name = quality_entry.get('name', '')
mapped_name = ValueResolver.get_quality_name(q_name, target_app)
if mapped_name in quality_mappings:
items.append({
'quality': quality_mappings[mapped_name].copy(),
'items': [],
'allowed': True
})
used_qualities.add(mapped_name.upper())
elif isinstance(quality_entry, str):
# Simple quality name string
mapped_name = ValueResolver.get_quality_name(quality_entry, target_app)
if mapped_name in quality_mappings:
items.append({
'quality': quality_mappings[mapped_name].copy(),
'items': [],
'allowed': True
})
used_qualities.add(mapped_name.upper())
# Add all unused qualities as disabled
for quality_name, quality_data in quality_mappings.items():
if (quality_name.upper() not in used_qualities and
quality_data['id'] not in quality_ids_in_groups):
items.append({
'quality': quality_data.copy(),
'items': [],
'allowed': False
})
# Handle cutoff/upgrade_until
if 'upgrade_until' in profile_yaml and isinstance(profile_yaml['upgrade_until'], dict):
cutoff_id_raw = profile_yaml['upgrade_until'].get('id')
cutoff_name = profile_yaml['upgrade_until'].get('name', '')
mapped_cutoff_name = ValueResolver.get_quality_name(cutoff_name, target_app)
if cutoff_id_raw and cutoff_id_raw < 0:
cutoff_id = convert_group_id(cutoff_id_raw)
elif mapped_cutoff_name in quality_mappings:
cutoff_id = quality_mappings[mapped_cutoff_name]['id']
# Handle language
language = profile_yaml.get('language', 'any')
if language != 'any' and '_' not in language:
# Simple language mode
try:
language_data = ValueResolver.get_language(language, target_app, for_profile=True)
except Exception:
language_data = ValueResolver.get_language('any', target_app, for_profile=True)
else:
# Advanced mode or any
language_data = ValueResolver.get_language('any', target_app, for_profile=True)
# Build format items (without IDs, those get synced later)
format_items = []
# Add language-specific formats for advanced mode
if language != 'any' and '_' in language:
behavior, language_code = language.split('_', 1)
# Get the score from database instead of hardcoding
language_score = get_language_import_score()
# Use proper capitalization for the language name
lang_display = language_code.capitalize()
# Handle behaviors: 'must' and 'only' (matching old working logic)
if behavior in ['must', 'only']:
# Add "Not [Language]" format with score from database
not_language_name = f"Not {lang_display}"
format_items.append({
'name': not_language_name,
'score': language_score
})
# For 'only' behavior, add additional formats
if behavior == 'only':
format_items.append({
'name': f"Not Only {lang_display}",
'score': language_score
})
format_items.append({
'name': f"Not Only {lang_display} (Missing)",
'score': language_score
})
# Main custom formats
for cf in profile_yaml.get('custom_formats', []):
format_items.append({
'name': cf.get('name'),
'score': cf.get('score', 0)
})
# App-specific custom formats
app_key = f'custom_formats_{arr_type.lower()}'
for cf in profile_yaml.get(app_key, []):
format_items.append({
'name': cf.get('name'),
'score': cf.get('score', 0)
})
# Reverse items to match expected order
items.reverse()
compiled['items'] = items
compiled['language'] = language_data
compiled['upgradeAllowed'] = profile_yaml.get('upgradesAllowed', True)
compiled['minFormatScore'] = profile_yaml.get('minCustomFormatScore', 0)
compiled['cutoffFormatScore'] = profile_yaml.get('upgradeUntilScore', 0)
compiled['formatItems'] = format_items
if cutoff_id is not None:
compiled['cutoff'] = cutoff_id
# Handle minUpgradeFormatScore with proper default
compiled['minUpgradeFormatScore'] = max(1, profile_yaml.get('minScoreIncrement', 1))
return compiled

View File

@@ -1,132 +0,0 @@
"""Format import strategy."""
import logging
from typing import Dict, List, Any
from .base import ImportStrategy
from ..utils import load_yaml
from ..compiler import compile_format_to_api_structure
from ..logger import get_import_logger
logger = logging.getLogger(__name__)
class FormatStrategy(ImportStrategy):
"""Strategy for importing custom formats."""
def compile(self, filenames: List[str]) -> Dict[str, Any]:
"""
Compile format files to API-ready format.
Args:
filenames: List of format filenames (without .yml)
Returns:
Dictionary with 'formats' key containing compiled formats
"""
formats = []
failed = []
import_logger = get_import_logger()
# Don't try to predict - we'll count as we go
import_logger.start(0, 0) # Will update counts as we compile
for filename in filenames:
try:
# Load YAML
format_yaml = load_yaml(f"custom_format/{filename}.yml")
# Compile to API structure
compiled = compile_format_to_api_structure(format_yaml, self.arr_type)
# Add unique suffix if needed
if self.import_as_unique:
compiled['name'] = self.add_unique_suffix(compiled['name'])
formats.append(compiled)
import_logger.update_compilation(filename)
except Exception as e:
import_logger.error(f"{e}", filename, 'compilation')
failed.append(filename)
# Don't count failed compilations
# Set final compilation count
import_logger.total_compilation = len(formats)
import_logger.current_compilation = len(formats)
import_logger.compilation_complete()
return {'formats': formats}
def import_data(self, compiled_data: Dict[str, Any], dry_run: bool = False) -> Dict[str, Any]:
"""
Import compiled formats to Arr instance.
Args:
compiled_data: Dictionary with 'formats' key
dry_run: If True, simulate import without making changes
Returns:
Import results
"""
# Get existing formats
existing = self.arr.get_all_formats()
existing_map = {f['name']: f['id'] for f in existing}
results = {
'added': 0,
'updated': 0,
'failed': 0,
'details': []
}
import_logger = get_import_logger()
# Set import count
import_logger.total_import = len(compiled_data['formats'])
import_logger._import_shown = False # Reset import shown flag
for format_data in compiled_data['formats']:
format_name = format_data['name']
try:
if format_name in existing_map:
# Update existing
if not dry_run:
format_data['id'] = existing_map[format_name]
self.arr.put(
f"/api/v3/customformat/{existing_map[format_name]}",
format_data
)
import_logger.update_import(format_name, "updated")
results['updated'] += 1
results['details'].append({
'name': format_name,
'action': 'updated'
})
else:
# Add new
if not dry_run:
self.arr.post("/api/v3/customformat", format_data)
import_logger.update_import(format_name, "added")
results['added'] += 1
results['details'].append({
'name': format_name,
'action': 'added'
})
except Exception as e:
import_logger.update_import(format_name, "failed")
import_logger.error(f"Failed to import format {format_name}: {e}", format_name)
results['failed'] += 1
results['details'].append({
'name': format_name,
'action': 'failed',
'error': str(e)
})
# Show import summary
import_logger.import_complete()
import_logger._import_shown = True
return results

View File

@@ -1,262 +0,0 @@
"""Profile import strategy."""
import logging
from typing import Dict, List, Any, Set
from .base import ImportStrategy
from ..utils import load_yaml, extract_format_names, generate_language_formats
from ..compiler import compile_format_to_api_structure, compile_profile_to_api_structure
from ..logger import get_import_logger
logger = logging.getLogger(__name__)
class ProfileStrategy(ImportStrategy):
"""Strategy for importing quality profiles."""
def compile(self, filenames: List[str]) -> Dict[str, Any]:
"""
Compile profile files and their dependent formats to API-ready format.
Args:
filenames: List of profile filenames (without .yml)
Returns:
Dictionary with 'profiles' and 'formats' keys
"""
profiles = []
all_formats = []
processed_formats: Set[str] = set()
# Cache for language formats to avoid recompiling
language_formats_cache: Dict[str, List[Dict]] = {}
import_logger = get_import_logger()
# Don't try to predict - we'll count as we go
import_logger.start(0, 0) # Will update counts as we compile
for filename in filenames:
try:
# Load profile YAML
profile_yaml = load_yaml(f"profile/{filename}.yml")
# Extract referenced custom formats (only for the target arr type)
format_names = extract_format_names(profile_yaml, self.arr_type)
for format_name in format_names:
# Skip if already processed
display_name = self.add_unique_suffix(format_name) if self.import_as_unique else format_name
if display_name in processed_formats:
continue
try:
format_yaml = load_yaml(f"custom_format/{format_name}.yml")
compiled_format = compile_format_to_api_structure(format_yaml, self.arr_type)
if self.import_as_unique:
compiled_format['name'] = self.add_unique_suffix(compiled_format['name'])
all_formats.append(compiled_format)
processed_formats.add(compiled_format['name'])
import_logger.update_compilation(format_name)
except Exception as e:
# Count the failed attempt
import_logger.update_compilation(f"{format_name} (failed)")
# Generate language formats if needed
language = profile_yaml.get('language', 'any')
if language != 'any' and '_' in language:
# Check cache first
if language not in language_formats_cache:
language_formats = generate_language_formats(language, self.arr_type)
compiled_langs = []
for lang_format in language_formats:
lang_name = lang_format.get('name', 'Language format')
compiled_lang = compile_format_to_api_structure(lang_format, self.arr_type)
if self.import_as_unique:
compiled_lang['name'] = self.add_unique_suffix(compiled_lang['name'])
compiled_langs.append(compiled_lang)
# Add to all_formats only on first compilation
if compiled_lang['name'] not in processed_formats:
all_formats.append(compiled_lang)
processed_formats.add(compiled_lang['name'])
import_logger.update_compilation(lang_name)
# Store in cache
language_formats_cache[language] = compiled_langs
# Compile profile
compiled_profile = compile_profile_to_api_structure(profile_yaml, self.arr_type)
if self.import_as_unique:
compiled_profile['name'] = self.add_unique_suffix(compiled_profile['name'])
# Update format references in profile
for item in compiled_profile.get('formatItems', []):
item['name'] = self.add_unique_suffix(item['name'])
profiles.append(compiled_profile)
import_logger.update_compilation(f"Profile: {compiled_profile['name']}")
except Exception as e:
import_logger.error(f"{str(e)}", f"Profile: {filename}", 'compilation')
import_logger.update_compilation(f"Profile: {filename} (failed)")
# Set total to what we actually attempted
import_logger.total_compilation = import_logger.current_compilation
import_logger.compilation_complete()
return {
'profiles': profiles,
'formats': all_formats
}
def import_data(self, compiled_data: Dict[str, Any], dry_run: bool = False) -> Dict[str, Any]:
"""
Import compiled profiles and formats to Arr instance.
Args:
compiled_data: Dictionary with 'profiles' and 'formats' keys
dry_run: If True, simulate import without making changes
Returns:
Import results
"""
results = {
'added': 0,
'updated': 0,
'failed': 0,
'details': []
}
import_logger = get_import_logger()
# Set total import count
import_logger.total_import = len(compiled_data['formats']) + len(compiled_data['profiles'])
import_logger._import_shown = False # Reset import shown flag
# Import formats first
if compiled_data['formats']:
existing_formats = self.arr.get_all_formats()
format_map = {f['name']: f['id'] for f in existing_formats}
formats_failed = []
for format_data in compiled_data['formats']:
format_name = format_data['name']
try:
if format_name in format_map:
# Update existing
if not dry_run:
format_data['id'] = format_map[format_name]
self.arr.put(
f"/api/v3/customformat/{format_map[format_name]}",
format_data
)
import_logger.update_import(format_name, "updated")
else:
# Add new
if dry_run:
# In dry run, pretend we got an ID
# Use a predictable fake ID for dry run
fake_id = 999000 + len(format_map)
format_map[format_name] = fake_id
else:
response = self.arr.post("/api/v3/customformat", format_data)
format_map[format_name] = response['id']
import_logger.update_import(format_name, "added")
except Exception as e:
import_logger.update_import(format_name, "failed")
import_logger.error(f"Failed to import format {format_name}: {e}", format_name)
formats_failed.append(format_name)
# Refresh format map for profile syncing (MUST be done after importing formats)
if not dry_run:
# In real mode, get the actual current formats from the server
existing_formats = self.arr.get_all_formats()
format_map = {f['name']: f['id'] for f in existing_formats}
# In dry run mode, format_map already has fake IDs from above
# Sync format IDs in profiles
for profile in compiled_data['profiles']:
synced_items = []
processed_formats = set()
# First add all explicitly defined formats with their scores
for item in profile.get('formatItems', []):
if item['name'] in format_map:
synced_items.append({
'format': format_map[item['name']],
'name': item['name'],
'score': item.get('score', 0)
})
processed_formats.add(item['name'])
else:
import_logger.warning(f"Format {item['name']} not found for profile {profile['name']}")
# Then add ALL other existing formats with score 0 (Arr requirement)
for format_name, format_id in format_map.items():
if format_name not in processed_formats:
synced_items.append({
'format': format_id,
'name': format_name,
'score': 0
})
profile['formatItems'] = synced_items
# Import profiles
existing_profiles = self.arr.get_all_profiles()
profile_map = {p['name']: p['id'] for p in existing_profiles}
for profile_data in compiled_data['profiles']:
profile_name = profile_data['name']
try:
if profile_name in profile_map:
# Update existing
if not dry_run:
profile_data['id'] = profile_map[profile_name]
self.arr.put(
f"/api/v3/qualityprofile/{profile_data['id']}",
profile_data
)
import_logger.update_import(f"Profile: {profile_name}", "updated")
results['updated'] += 1
results['details'].append({
'name': profile_name,
'action': 'updated'
})
else:
# Add new
if not dry_run:
self.arr.post("/api/v3/qualityprofile", profile_data)
import_logger.update_import(f"Profile: {profile_name}", "added")
results['added'] += 1
results['details'].append({
'name': profile_name,
'action': 'added'
})
except Exception as e:
import_logger.update_import(f"Profile: {profile_name}", "failed")
import_logger.error(f"Failed to import profile {profile_name}: {e}", profile_name)
results['failed'] += 1
results['details'].append({
'name': profile_name,
'action': 'failed',
'error': str(e)
})
# Show import summary
import_logger.import_complete()
import_logger._import_shown = True
return results

View File

@@ -1,95 +0,0 @@
# backend/app/main.py
from flask import Flask, jsonify, send_from_directory
import os
from flask_cors import CORS
from .config import config
from .git import bp as git_bp
from .arr import bp as arr_bp
from .data import bp as data_bp
from .importarr import bp as importarr_bp
from .importer.routes import bp as new_import_bp
from .task import bp as tasks_bp, TaskScheduler
from .backup import bp as backup_bp
from .db import run_migrations, get_settings
from .auth import bp as auth_bp
from .settings import bp as settings_bp
from .logs import bp as logs_bp
from .media_management import media_management_bp
from .middleware import init_middleware
from .init import setup_logging, init_app_config, init_git_user
def create_app():
# Set up logging first
logger = setup_logging()
logger.info("Creating Flask application")
app = Flask(__name__, static_folder='static')
CORS(app, resources={r"/*": {"origins": "*"}})
# Serve static files
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def serve_static(path):
if path.startswith('api/'):
return # Let API routes handle these
if path and os.path.exists(os.path.join(app.static_folder, path)):
return send_from_directory(app.static_folder, path)
return send_from_directory(app.static_folder, 'index.html')
# Initialize directories and database
logger.info("Ensuring required directories exist")
config.ensure_directories()
logger.info("Initializing database")
run_migrations()
# Initialize Git user configuration
logger.info("Initializing Git user")
success, message = init_git_user()
if not success:
logger.warning(f"Git user initialization issue: {message}")
else:
logger.info("Git user initialized successfully")
# Initialize app configuration
init_app_config(app)
# Initialize and start task scheduler
logger.info("Starting task scheduler")
scheduler = TaskScheduler()
scheduler.load_tasks_from_db()
scheduler.start()
# Register all blueprints
logger.info("Registering blueprints")
app.register_blueprint(auth_bp, url_prefix='/api/auth')
app.register_blueprint(settings_bp, url_prefix='/api/settings')
app.register_blueprint(backup_bp, url_prefix='/api/backup')
app.register_blueprint(logs_bp, url_prefix='/api/logs')
app.register_blueprint(git_bp, url_prefix='/api/git')
app.register_blueprint(data_bp, url_prefix='/api/data')
app.register_blueprint(importarr_bp, url_prefix='/api/import')
app.register_blueprint(new_import_bp, url_prefix='/api/v2/import')
app.register_blueprint(arr_bp, url_prefix='/api/arr')
app.register_blueprint(tasks_bp, url_prefix='/api/tasks')
app.register_blueprint(media_management_bp)
# Initialize middleware
logger.info("Initializing middleware")
init_middleware(app)
# Add settings route
@app.route('/api/settings', methods=['GET'])
def handle_settings():
settings = get_settings()
return jsonify(settings), 200
logger.info("Flask application creation completed")
return app
if __name__ == '__main__':
app = create_app()
app.run(debug=True, host='0.0.0.0')

View File

@@ -1,305 +0,0 @@
import axios from 'axios';
const BASE_URL = '/api/data';
// Define all special endpoints that could conflict with resource names
const SPECIAL_ENDPOINTS = [
'test',
'validate',
'search',
'batch',
'export',
'import',
'stats',
'metrics',
'health',
'status',
'config',
'settings',
'logs',
'audit',
'backup',
'restore',
'sync',
'preview',
'publish',
'deploy',
'run',
'execute',
'process',
'analyze',
'verify',
'check'
];
// Define characters and patterns that could cause routing issues with descriptive messages
const UNSAFE_PATTERNS = [
{pattern: /[\/\\]/, message: 'Cannot contain forward or backward slashes'},
{
pattern: /[<>:"|?*]/,
message: 'Cannot contain special characters (<, >, :, ", |, ?, *)'
},
{
pattern: /^\.+/,
message: 'Cannot start with dots (prevents relative paths)'
},
{pattern: /\.+$/, message: 'Cannot end with dots'},
{pattern: /^-/, message: 'Cannot start with a dash'},
{pattern: /-$/, message: 'Cannot end with a dash'},
{
pattern: /--|__|\.\./,
message: 'Cannot contain consecutive dashes, underscores, or dots'
}
];
// Comprehensive name validation with specific error messages
const validateResourceName = (category, name) => {
// Basic type check
if (!name) {
throw new Error(`${category} name cannot be empty`);
}
if (typeof name !== 'string') {
throw new Error(
`${category} name must be a string, received ${typeof name}`
);
}
// Length check
if (name.length < 1) {
throw new Error(`${category} name must be at least 1 character long`);
}
if (name.length > 64) {
throw new Error(
`${category} name cannot exceed 64 characters (current length: ${name.length})`
);
}
// Check for special endpoints
if (SPECIAL_ENDPOINTS.includes(name.toLowerCase())) {
throw new Error(
`'${name}' is a reserved word and cannot be used as a ${category} name. Reserved words: ${SPECIAL_ENDPOINTS.join(
', '
)}`
);
}
// Check for unsafe patterns with specific messages
for (const {pattern, message} of UNSAFE_PATTERNS) {
if (pattern.test(name)) {
throw new Error(`Invalid ${category} name '${name}': ${message}`);
}
}
return true;
};
// Validate entire path with specific error messages
const validatePath = parts => {
if (!Array.isArray(parts)) {
throw new Error(
`Path must be an array of segments, received ${typeof parts}`
);
}
if (parts.length === 0) {
throw new Error('Path cannot be empty');
}
const joinedPath = parts.join('/');
// Check for path traversal attempts
if (joinedPath.includes('..')) {
throw new Error(
'Invalid path: Contains parent directory reference (..)'
);
}
if (joinedPath.includes('./')) {
throw new Error(
'Invalid path: Contains current directory reference (./)'
);
}
if (joinedPath.includes('/.')) {
throw new Error(
'Invalid path: Contains hidden directory reference (/.)'
);
}
// Check for invalid start/end
if (joinedPath.startsWith('/')) {
throw new Error('Invalid path: Cannot start with a separator (/)');
}
if (joinedPath.endsWith('/')) {
throw new Error('Invalid path: Cannot end with a separator (/)');
}
// Validate each path segment
parts.forEach((segment, index) => {
if (typeof segment !== 'string') {
throw new Error(
`Path segment at position ${index} must be a string, received ${typeof segment}`
);
}
if (segment.length === 0) {
throw new Error(
`Path segment at position ${index} cannot be empty`
);
}
});
return true;
};
// Enhanced error handler with more specific messages
const handleError = (error, operation) => {
console.error(`Error during ${operation}:`, error);
if (error instanceof Error) {
throw error;
}
if (error.response?.data) {
const errorData = error.response.data;
const message =
errorData.error ||
errorData.message ||
errorData.detail ||
(typeof errorData === 'string' ? errorData : null);
if (message) {
throw new Error(`${operation} failed: ${message}`);
}
}
// Include HTTP status in generic error if available
if (error.response?.status) {
throw new Error(
`Failed to ${operation} (HTTP ${error.response.status})`
);
}
throw new Error(`Failed to ${operation}: Unknown error occurred`);
};
// Get all items for a category
export const getAllItems = async category => {
try {
validateResourceName('category', category);
validatePath([category]);
const response = await axios.get(`${BASE_URL}/${category}`);
return response.data;
} catch (error) {
throw handleError(error, `fetch ${category} items`);
}
};
// Get single item
export const getItem = async (category, name) => {
try {
validateResourceName('category', category);
validateResourceName(category, name);
validatePath([category, name]);
const response = await axios.get(`${BASE_URL}/${category}/${name}`);
return response.data;
} catch (error) {
throw handleError(error, `fetch ${category} item ${name}`);
}
};
// Create new item
export const createItem = async (category, data) => {
try {
validateResourceName('category', category);
validateResourceName(category, data.name);
validatePath([category, data.name]);
const response = await axios.post(
`${BASE_URL}/${category}/${data.name}`,
data
);
return response.data;
} catch (error) {
throw handleError(error, `create ${category} item`);
}
};
// Update existing item
export const updateItem = async (category, name, data, newName) => {
try {
validateResourceName('category', category);
validateResourceName(category, name);
if (newName) {
validateResourceName(category, newName);
validatePath([category, newName]);
}
validatePath([category, name]);
const response = await axios.put(`${BASE_URL}/${category}/${name}`, {
...data,
...(newName && {rename: newName})
});
return response.data;
} catch (error) {
throw handleError(error, `update ${category} item ${name}`);
}
};
// Delete item
export const deleteItem = async (category, name) => {
try {
validateResourceName('category', category);
validateResourceName(category, name);
validatePath([category, name]);
const response = await axios.delete(`${BASE_URL}/${category}/${name}`);
return response.data;
} catch (error) {
throw handleError(error, `delete ${category} item ${name}`);
}
};
// Special endpoint wrapper
const createSpecialEndpoint = (category, endpoint) => async data => {
try {
validateResourceName('category', category);
validatePath([category, endpoint]);
console.log(`Sending data to ${endpoint}:`, data);
const response = await axios.post(
`${BASE_URL}/${category}/${endpoint}`,
data
);
return response.data;
} catch (error) {
throw handleError(error, `execute ${category} ${endpoint}`);
}
};
export const Profiles = {
getAll: () => getAllItems('profile'),
get: name => getItem('profile', name),
create: data => createItem('profile', data),
update: (name, data, newName) => updateItem('profile', name, data, newName),
delete: name => deleteItem('profile', name)
};
export const CustomFormats = {
getAll: () => getAllItems('custom_format'),
get: name => getItem('custom_format', name),
create: data => createItem('custom_format', data),
update: (name, data, newName) =>
updateItem('custom_format', name, data, newName),
delete: name => deleteItem('custom_format', name),
runTests: createSpecialEndpoint('custom_format', 'test')
};
export const RegexPatterns = {
getAll: () => getAllItems('regex_pattern'),
get: name => getItem('regex_pattern', name),
create: data => createItem('regex_pattern', data),
update: (name, data, newName) =>
updateItem('regex_pattern', name, data, newName),
delete: name => deleteItem('regex_pattern', name),
runTests: createSpecialEndpoint('regex_pattern', 'test')
};

View File

@@ -1,280 +0,0 @@
import React, {useState} from 'react';
import PropTypes from 'prop-types';
import {Copy, Check, FlaskConical, FileText, ListFilter} from 'lucide-react';
import Tooltip from '@ui/Tooltip';
import ReactMarkdown from 'react-markdown';
function FormatCard({
format,
onEdit,
onClone,
sortBy,
isSelectionMode,
isSelected,
willBeSelected,
onSelect
}) {
const [showDescription, setShowDescription] = useState(() => {
const saved = localStorage.getItem(`format-view-${format.file_name}`);
return saved !== null ? JSON.parse(saved) : true;
});
const {content} = format;
const totalTests = content.tests?.length || 0;
const passedTests = content.tests?.filter(t => t.passes)?.length || 0;
const passRate = Math.round((passedTests / totalTests) * 100) || 0;
const getConditionStyle = condition => {
if (condition.negate) {
return 'bg-red-500/20 text-red-400 border border-red-500/20';
}
if (condition.required) {
return 'bg-green-500/20 text-green-400 border border-green-500/20';
}
return 'bg-blue-500/20 text-blue-400 border border-blue-500/20';
};
const handleClick = e => {
if (isSelectionMode) {
onSelect(e);
} else {
onEdit();
}
};
const handleCloneClick = e => {
e.stopPropagation();
onClone(format);
};
const handleViewToggle = e => {
e.stopPropagation();
setShowDescription(prev => {
const newState = !prev;
localStorage.setItem(
`format-view-${format.file_name}`,
JSON.stringify(newState)
);
return newState;
});
};
const handleMouseDown = e => {
if (e.shiftKey) {
e.preventDefault();
}
};
return (
<div
className={`w-full h-[12rem] bg-gradient-to-br from-gray-800/95 to-gray-900 border ${
isSelected
? 'border-blue-500'
: willBeSelected
? 'border-blue-300'
: 'border-gray-700'
} rounded-lg shadow-lg hover:shadow-xl ${
isSelectionMode
? isSelected
? 'hover:border-blue-400'
: 'hover:border-gray-400'
: 'hover:border-blue-400'
} transition-all cursor-pointer relative`}
onClick={handleClick}
onMouseDown={handleMouseDown}>
<div className='p-4 flex flex-col h-full'>
{/* Header Section */}
<div className='flex justify-between items-start'>
<div className='flex flex-col min-w-0 flex-1'>
<h3 className='text-base font-bold text-gray-100 truncate mb-1.5'>
{content.name}
</h3>
<div className='flex-1 overflow-x-auto scrollbar-none'>
<div className='flex items-center gap-2 text-xs'>
{content.tags?.map(tag => (
<span
key={tag}
className='bg-blue-600/20 text-blue-400 px-1.5 py-0.5 rounded font-semibold whitespace-nowrap'>
{tag}
</span>
))}
</div>
</div>
</div>
<div className='flex items-center gap-2 shrink-0 ml-4'>
<Tooltip
content={
showDescription
? 'Show Conditions'
: 'Show Description'
}>
<button
onClick={handleViewToggle}
className='w-7 h-7 flex items-center justify-center rounded hover:bg-gray-700/50 transition-colors text-gray-400 hover:text-white relative'>
{showDescription ? (
<ListFilter className='w-4 h-4' />
) : (
<FileText className='w-4 h-4' />
)}
</button>
</Tooltip>
{!isSelectionMode && (
<button
onClick={handleCloneClick}
className='text-gray-400 hover:text-white transition-colors w-7 h-7 flex items-center justify-center rounded hover:bg-gray-700/50 relative'>
<Copy className='w-4 h-4' />
</button>
)}
{isSelectionMode && (
<Tooltip
content={
isSelected
? 'Selected'
: willBeSelected
? 'Will be selected'
: 'Select'
}>
<div
className={`w-6 h-6 rounded-full flex items-center justify-center relative ${
isSelected
? 'bg-blue-500'
: willBeSelected
? 'bg-blue-200/20'
: 'bg-gray-200/20'
} transition-colors hover:bg-blue-600`}>
{isSelected && (
<Check
size={14}
className='text-white'
/>
)}
{willBeSelected && !isSelected && (
<div className='w-1.5 h-1.5 rounded-full bg-blue-400' />
)}
</div>
</Tooltip>
)}
</div>
</div>
<hr className='border-gray-700 my-2' />
{/* Content Area with Slide Animation */}
<div className='relative flex-1 overflow-hidden'>
<div
className={`absolute inset-0 w-full h-full transition-transform duration-300 ease-in-out flex ${
showDescription
? '-translate-x-full'
: 'translate-x-0'
}`}>
{/* Conditions */}
<div className='w-full flex-shrink-0 overflow-y-auto scrollable'>
<div className='flex flex-wrap gap-1.5 content-start'>
{content.conditions?.map((condition, index) => (
<span
key={index}
className={`px-1.5 py-0.5 rounded text-xs font-medium ${getConditionStyle(
condition
)}`}>
{condition.name}
</span>
))}
</div>
</div>
</div>
<div
className={`absolute inset-0 w-full h-full transition-transform duration-300 ease-in-out ${
showDescription
? 'translate-x-0'
: 'translate-x-full'
}`}>
{/* Description */}
<div className='w-full h-full overflow-y-auto scrollable'>
{content.description ? (
<div className='text-gray-300 text-xs prose prose-invert prose-gray max-w-none'>
<ReactMarkdown>
{content.description}
</ReactMarkdown>
</div>
) : (
<span className='text-gray-500 text-xs italic'>
No description provided
</span>
)}
</div>
</div>
</div>
<hr className='border-gray-700 my-2' />
{/* Footer - Tests */}
<div className='flex items-center justify-between text-xs'>
{totalTests > 0 ? (
<div
className={`px-2.5 py-1 rounded-md flex items-center gap-1.5 ${
passRate === 100
? 'bg-green-500/10 text-green-400'
: passRate >= 80
? 'bg-yellow-500/10 text-yellow-400'
: 'bg-red-500/10 text-red-400'
}`}>
<FlaskConical className='w-3.5 h-3.5' />
<span className='font-medium'>
{passedTests}/{totalTests} passing
</span>
</div>
) : (
<div className='px-2.5 py-1 rounded-md bg-gray-500/10 text-gray-400 flex items-center gap-1.5'>
<FlaskConical className='w-3.5 h-3.5' />
<span className='font-medium'>No tests</span>
</div>
)}
{sortBy === 'dateModified' && format.modified_date && (
<span className='text-gray-400'>
Modified:{' '}
{new Date(format.modified_date).toLocaleString()}
</span>
)}
</div>
</div>
</div>
);
}
FormatCard.propTypes = {
format: PropTypes.shape({
file_name: PropTypes.string.isRequired,
modified_date: PropTypes.string.isRequired,
content: PropTypes.shape({
name: PropTypes.string.isRequired,
description: PropTypes.string,
conditions: PropTypes.arrayOf(
PropTypes.shape({
name: PropTypes.string.isRequired,
type: PropTypes.string.isRequired,
pattern: PropTypes.string,
required: PropTypes.bool,
negate: PropTypes.bool
})
),
tags: PropTypes.arrayOf(PropTypes.string),
tests: PropTypes.arrayOf(
PropTypes.shape({
id: PropTypes.number.isRequired,
input: PropTypes.string.isRequired,
expected: PropTypes.bool.isRequired,
passes: PropTypes.bool.isRequired
})
)
}).isRequired
}).isRequired,
onEdit: PropTypes.func.isRequired,
onClone: PropTypes.func.isRequired,
sortBy: PropTypes.string.isRequired,
isSelectionMode: PropTypes.bool.isRequired,
isSelected: PropTypes.bool.isRequired,
willBeSelected: PropTypes.bool,
onSelect: PropTypes.func.isRequired
};
export default FormatCard;

View File

@@ -1,138 +0,0 @@
// AddUnitTestModal.jsx
import React, {useState, useEffect} from 'react';
import PropTypes from 'prop-types';
import Modal from '../ui/Modal';
const AddUnitTestModal = ({isOpen, onClose, onAdd, tests, editTest = null}) => {
const [input, setInput] = useState('');
const [shouldMatch, setShouldMatch] = useState(true);
// Reset form when opening modal, handling both new and edit cases
useEffect(() => {
if (isOpen) {
if (editTest) {
setInput(editTest.input);
setShouldMatch(editTest.expected);
} else {
setInput('');
setShouldMatch(true);
}
}
}, [isOpen, editTest]);
const handleSubmit = () => {
const getNextTestId = testArray => {
if (!testArray || testArray.length === 0) return 1;
return Math.max(...testArray.map(test => test.id)) + 1;
};
const testData = {
id: editTest ? editTest.id : getNextTestId(tests),
input,
expected: shouldMatch,
passes: false,
lastRun: null
};
onAdd(testData);
handleClose();
};
const handleClose = () => {
setInput('');
setShouldMatch(true);
onClose();
};
return (
<Modal
isOpen={isOpen}
onClose={handleClose}
title={editTest ? 'Edit Test Case' : 'Add Test Case'}
width='3xl'
footer={
<div className='flex justify-end space-x-3'>
<button
onClick={handleClose}
className='px-3 py-1.5 text-sm font-medium text-gray-700 dark:text-gray-200
bg-white dark:bg-gray-800 border border-gray-300 dark:border-gray-600 rounded-md
hover:bg-gray-50 dark:hover:bg-gray-700'>
Cancel
</button>
<button
onClick={handleSubmit}
disabled={!input.trim()}
className='px-3 py-1.5 text-sm font-medium text-white bg-blue-600 rounded-md
hover:bg-blue-700 disabled:opacity-50 disabled:cursor-not-allowed'>
{editTest ? 'Save Changes' : 'Add Test'}
</button>
</div>
}>
{/* Rest of the modal content remains the same */}
<div className='space-y-4'>
<div className='space-y-2'>
<label className='block text-sm font-medium text-gray-700 dark:text-gray-300'>
Test String
</label>
<input
type='text'
value={input}
onChange={e => setInput(e.target.value)}
className='w-full px-3 py-2 border border-gray-300 dark:border-gray-600
rounded-md bg-white dark:bg-gray-700 text-gray-900 dark:text-gray-100
placeholder-gray-500 dark:placeholder-gray-400
focus:outline-none focus:ring-2 focus:ring-blue-500 focus:border-transparent'
placeholder='Enter string to test against pattern...'
autoFocus
/>
</div>
<div className='space-y-2'>
<label className='block text-sm font-medium text-gray-700 dark:text-gray-300'>
Expected Result
</label>
<div className='flex space-x-4'>
<label className='flex items-center space-x-2'>
<input
type='radio'
checked={shouldMatch}
onChange={() => setShouldMatch(true)}
className='text-blue-600 focus:ring-blue-500'
/>
<span className='text-sm text-gray-700 dark:text-gray-300'>
Should Match
</span>
</label>
<label className='flex items-center space-x-2'>
<input
type='radio'
checked={!shouldMatch}
onChange={() => setShouldMatch(false)}
className='text-blue-600 focus:ring-blue-500'
/>
<span className='text-sm text-gray-700 dark:text-gray-300'>
Should Not Match
</span>
</label>
</div>
</div>
</div>
</Modal>
);
};
AddUnitTestModal.propTypes = {
isOpen: PropTypes.bool.isRequired,
onClose: PropTypes.func.isRequired,
onAdd: PropTypes.func.isRequired,
tests: PropTypes.array.isRequired,
editTest: PropTypes.shape({
id: PropTypes.number.isRequired,
input: PropTypes.string.isRequired,
expected: PropTypes.bool.isRequired,
passes: PropTypes.bool.isRequired,
lastRun: PropTypes.string
})
};
export default AddUnitTestModal;

View File

@@ -1,215 +0,0 @@
import React from 'react';
import PropTypes from 'prop-types';
import {Copy, Check, FlaskConical} from 'lucide-react';
import Tooltip from '@ui/Tooltip';
import ReactMarkdown from 'react-markdown';
const RegexCard = ({
pattern,
onEdit,
onClone,
formatDate,
sortBy,
isSelectionMode,
isSelected,
willBeSelected,
onSelect
}) => {
const totalTests = pattern.tests?.length || 0;
const passedTests = pattern.tests?.filter(t => t.passes)?.length || 0;
const passRate =
totalTests > 0 ? Math.round((passedTests / totalTests) * 100) : 0;
const handleClick = e => {
if (isSelectionMode) {
onSelect(e);
} else {
onEdit();
}
};
const handleCloneClick = e => {
e.stopPropagation();
onClone(pattern);
};
const handleMouseDown = e => {
if (e.shiftKey) {
e.preventDefault();
}
};
const getTestColor = () => {
if (totalTests === 0) return 'text-gray-400';
if (passRate === 100) return 'text-green-400';
if (passRate >= 80) return 'text-yellow-400';
return 'text-red-400';
};
return (
<div
className={`w-full h-[20rem] bg-gradient-to-br from-gray-800/95 to-gray-900 border ${
isSelected
? 'border-blue-500'
: willBeSelected
? 'border-blue-300'
: 'border-gray-700'
} rounded-lg shadow-lg hover:shadow-xl ${
isSelectionMode
? isSelected
? 'hover:border-blue-400'
: 'hover:border-gray-400'
: 'hover:border-blue-400'
} transition-all cursor-pointer overflow-hidden`}
onClick={handleClick}
onMouseDown={handleMouseDown}>
<div className='p-6 flex flex-col h-full'>
{/* Header Section */}
<div className='flex-none'>
<div className='flex justify-between items-start'>
<div className='flex items-center gap-3 flex-wrap'>
<h3 className='text-base font-bold text-gray-100'>
{pattern.name}
</h3>
{pattern.tags && pattern.tags.length > 0 && (
<div className='flex flex-wrap gap-2'>
{pattern.tags.map(tag => (
<span
key={tag}
className='bg-blue-600/20 text-blue-400 px-1.5 py-0.5 rounded text-xs shadow-sm'>
{tag}
</span>
))}
</div>
)}
</div>
<div className='flex items-center'>
<div className='w-8 h-8 flex items-center justify-center'>
{isSelectionMode ? (
<Tooltip
content={
isSelected
? 'Selected'
: willBeSelected
? 'Will be selected'
: 'Select'
}>
<div
className={`w-6 h-6 rounded-full flex items-center justify-center ${
isSelected
? 'bg-blue-500'
: willBeSelected
? 'bg-blue-200/20'
: 'bg-gray-200/20'
} transition-colors hover:bg-blue-600`}>
{isSelected && (
<Check
size={14}
className='text-white'
/>
)}
{willBeSelected && !isSelected && (
<div className='w-1.5 h-1.5 rounded-full bg-blue-400' />
)}
</div>
</Tooltip>
) : (
<button
onClick={handleCloneClick}
className='text-gray-400 hover:text-white transition-colors'>
<Copy className='w-5 h-5' />
</button>
)}
</div>
</div>
</div>
{/* Pattern Display */}
<div className='mt-4 bg-gray-900/50 rounded-md p-3 font-mono text-xs border border-gray-700/50'>
<code className='text-gray-200 break-all line-clamp-3'>
{pattern.pattern}
</code>
</div>
</div>
<hr className='border-gray-700 my-3' />
{/* Description and Footer Section */}
<div className='flex-1 overflow-hidden'>
{pattern.description && (
<div
className='text-gray-300 text-xs h-full overflow-y-auto prose prose-invert prose-gray max-w-none
[&>ul]:list-disc [&>ul]:ml-4 [&>ul]:mt-2 [&>ul]:mb-4
[&>ol]:list-decimal [&>ol]:ml-4 [&>ol]:mt-2 [&>ol]:mb-4
[&>ul>li]:mt-0.5 [&>ol>li]:mt-0.5
[&_code]:bg-gray-900/50 [&_code]:px-1.5 [&_code]:py-0.5 [&_code]:rounded-md [&_code]:text-blue-300 [&_code]:border [&_code]:border-gray-700/50 scrollable'>
<ReactMarkdown>{pattern.description}</ReactMarkdown>
</div>
)}
</div>
<hr className='border-gray-700 my-3' />
<div className='flex items-center justify-between'>
<div className='flex items-center'>
{totalTests > 0 ? (
<div
className={`px-2.5 py-1 rounded-md flex items-center gap-2 ${
passRate === 100
? 'bg-green-500/10 text-green-400'
: passRate >= 80
? 'bg-yellow-500/10 text-yellow-400'
: 'bg-red-500/10 text-red-400'
}`}>
<FlaskConical className='w-3.5 h-3.5' />
<span className='text-xs font-medium'>
{passedTests}/{totalTests} passing
</span>
</div>
) : (
<div className='px-2.5 py-1 rounded-md bg-gray-500/10 text-gray-400 flex items-center gap-2'>
<FlaskConical className='w-3.5 h-3.5' />
<span className='text-xs font-medium'>
No tests
</span>
</div>
)}
</div>
{sortBy === 'dateModified' && pattern.modified_date && (
<span className='text-xs text-gray-400'>
Modified {formatDate(pattern.modified_date)}
</span>
)}
</div>
</div>
</div>
);
};
RegexCard.propTypes = {
pattern: PropTypes.shape({
name: PropTypes.string.isRequired,
pattern: PropTypes.string.isRequired,
description: PropTypes.string,
tags: PropTypes.arrayOf(PropTypes.string),
tests: PropTypes.arrayOf(
PropTypes.shape({
input: PropTypes.string.isRequired,
expected: PropTypes.bool.isRequired,
passes: PropTypes.bool.isRequired
})
),
modified_date: PropTypes.string
}).isRequired,
onEdit: PropTypes.func.isRequired,
onClone: PropTypes.func.isRequired,
formatDate: PropTypes.func.isRequired,
sortBy: PropTypes.string.isRequired,
isSelectionMode: PropTypes.bool.isRequired,
isSelected: PropTypes.bool.isRequired,
willBeSelected: PropTypes.bool,
onSelect: PropTypes.func.isRequired
};
export default RegexCard;

View File

@@ -1,215 +0,0 @@
import React, {useState} from 'react';
import PropTypes from 'prop-types';
import MarkdownEditor from '@ui/MarkdownEditor';
import AddButton from '@ui/DataBar/AddButton';
import {InfoIcon} from 'lucide-react';
const RegexGeneralTab = ({
name,
description,
pattern,
tags,
onNameChange,
onDescriptionChange,
onPatternChange,
onAddTag,
onRemoveTag,
error,
patternError
}) => {
const [newTag, setNewTag] = useState('');
const handleAddTag = () => {
if (newTag.trim() && !tags.includes(newTag.trim())) {
onAddTag(newTag.trim());
setNewTag('');
}
};
const handleKeyPress = e => {
if (e.key === 'Enter') {
e.preventDefault();
handleAddTag();
}
};
return (
<div className='w-full'>
{error && (
<div className='bg-red-50 dark:bg-red-900/30 border border-red-200 dark:border-red-800 rounded-md p-4 mb-6'>
<p className='text-sm text-red-600 dark:text-red-400'>
{error}
</p>
</div>
)}
<div className='space-y-8'>
{/* Name Input */}
<div className='space-y-2'>
<div className='space-y-1'>
<label className='text-sm font-medium text-gray-700 dark:text-gray-300'>
Pattern Name
</label>
<p className='text-xs text-gray-500 dark:text-gray-400'>
Give your regex pattern a descriptive name
</p>
</div>
<input
type='text'
value={name}
onChange={e => onNameChange(e.target.value)}
placeholder='Enter pattern name'
className='w-full rounded-md border border-gray-300 dark:border-gray-600
bg-gray-50 dark:bg-gray-800 px-3 py-2 text-sm
text-gray-900 dark:text-gray-100
placeholder-gray-500 dark:placeholder-gray-400
focus:outline-none focus:ring-2 focus:ring-blue-500 focus:border-transparent
transition-colors duration-200'
/>
</div>
{/* Description */}
<div className='space-y-2'>
<div className='space-y-1'>
<label className='text-sm font-medium text-gray-700 dark:text-gray-300'>
Description
</label>
<p className='text-xs text-gray-500 dark:text-gray-400'>
Describe what this pattern matches. Use markdown to
format your description.
</p>
</div>
<MarkdownEditor
value={description}
onChange={e => onDescriptionChange(e.target.value)}
placeholder='Describe what this pattern matches...'
/>
</div>
{/* Pattern Input */}
<div className='space-y-2'>
<div className='space-y-1'>
<div className='flex items-center justify-between'>
<label className='text-sm font-medium text-gray-700 dark:text-gray-300'>
Pattern
</label>
<div className='flex items-center gap-2 text-xs text-blue-600 dark:text-blue-400'>
<InfoIcon className='h-4 w-4' />
<span>Case insensitive PCRE2</span>
</div>
</div>
<p className='text-xs text-gray-500 dark:text-gray-400'>
Enter your regular expression pattern
</p>
</div>
{patternError && (
<p className='text-sm text-red-600 dark:text-red-400'>
{patternError}
</p>
)}
<textarea
value={pattern}
onChange={e => onPatternChange(e.target.value)}
className='w-full h-24 rounded-md border border-gray-300 dark:border-gray-600
bg-gray-50 dark:bg-gray-800 px-3 py-2
text-gray-900 dark:text-gray-100 font-mono text-sm
focus:outline-none focus:ring-2 focus:ring-blue-500 focus:border-transparent
transition-colors duration-200'
placeholder='Enter your regex pattern here...'
/>
</div>
{/* Tags */}
<div className='space-y-4'>
<div className='space-y-1'>
<label className='text-sm font-medium text-gray-700 dark:text-gray-300'>
Tags
</label>
<p className='text-xs text-gray-500 dark:text-gray-400'>
Add tags to organize and categorize this pattern
</p>
</div>
<div className='flex space-x-2'>
<input
type='text'
value={newTag}
onChange={e => setNewTag(e.target.value)}
onKeyPress={handleKeyPress}
placeholder='Add a tag'
className='w-full rounded-md border border-gray-300 dark:border-gray-600
bg-gray-50 dark:bg-gray-800 px-3 py-2 text-sm
text-gray-900 dark:text-gray-100
placeholder-gray-500 dark:placeholder-gray-400
focus:outline-none focus:ring-2 focus:ring-blue-500 focus:border-transparent
transition-colors duration-200'
/>
<AddButton
onClick={handleAddTag}
disabled={!newTag.trim()}
label='Add'
/>
</div>
{tags.length > 0 ? (
<div className='flex flex-wrap gap-2 rounded-md'>
{tags.map(tag => (
<span
key={tag}
className='inline-flex items-center px-2.5 py-1 rounded-md
text-xs font-semibold
bg-blue-600/20 text-blue-400
group'>
{tag}
<button
onClick={() => onRemoveTag(tag)}
className='ml-1.5 p-0.5 rounded-md
hover:bg-blue-500/20
focus:outline-none focus:ring-2
focus:ring-blue-500 focus:ring-offset-1
transition-colors duration-200'>
<svg
className='w-3.5 h-3.5 text-blue-400
opacity-60 group-hover:opacity-100 transition-opacity'
fill='none'
stroke='currentColor'
viewBox='0 0 24 24'>
<path
strokeLinecap='round'
strokeLinejoin='round'
strokeWidth='2'
d='M6 18L18 6M6 6l12 12'
/>
</svg>
</button>
</span>
))}
</div>
) : (
<div
className='flex items-center justify-center h-12
text-sm text-gray-500 dark:text-gray-400
rounded-md border border-dashed
border-gray-300 dark:border-gray-700
bg-gray-50 dark:bg-gray-800/50'>
No tags added yet
</div>
)}
</div>
</div>
</div>
);
};
RegexGeneralTab.propTypes = {
name: PropTypes.string.isRequired,
description: PropTypes.string.isRequired,
pattern: PropTypes.string.isRequired,
tags: PropTypes.arrayOf(PropTypes.string).isRequired,
onNameChange: PropTypes.func.isRequired,
onDescriptionChange: PropTypes.func.isRequired,
onPatternChange: PropTypes.func.isRequired,
onAddTag: PropTypes.func.isRequired,
onRemoveTag: PropTypes.func.isRequired,
error: PropTypes.string,
patternError: PropTypes.string
};
export default RegexGeneralTab;

View File

@@ -1,199 +0,0 @@
import React, {useEffect} from 'react';
import PropTypes from 'prop-types';
import Modal from '@ui/Modal';
import RegexGeneralTab from './RegexGeneralTab';
import RegexTestingTab from './RegexTestingTab';
import {useRegexModal} from '@hooks/useRegexModal';
import {RegexPatterns} from '@api/data';
import Alert from '@ui/Alert';
import {Loader, Play} from 'lucide-react';
const RegexModal = ({
pattern: initialPattern,
isOpen,
onClose,
onSave,
isCloning = false
}) => {
const {
// Form state
name,
description,
patternValue,
tags,
tests,
// UI state
error,
patternError,
activeTab,
isDeleting,
isRunningTests,
// Actions
setName,
setDescription,
setPatternValue,
setTags,
setTests,
setActiveTab,
setIsDeleting,
// Main handlers
initializeForm,
handleSave,
handleRunTests
} = useRegexModal(initialPattern, onSave);
const tabs = [
{id: 'general', label: 'General'},
{id: 'testing', label: 'Testing'}
];
useEffect(() => {
if (isOpen) {
initializeForm(initialPattern, isCloning);
}
}, [initialPattern, isOpen, isCloning, initializeForm]);
const handleDelete = async () => {
if (!initialPattern) return;
if (isDeleting) {
try {
await RegexPatterns.delete(
initialPattern.file_name.replace('.yml', '')
);
onSave();
onClose();
} catch (error) {
console.error('Error deleting pattern:', error);
Alert.error(
error.response?.data?.error ||
'Failed to delete pattern. Please try again.'
);
} finally {
setIsDeleting(false);
}
} else {
setIsDeleting(true);
}
};
const footerContent = (
<div className='flex justify-between'>
{initialPattern && !isCloning && (
<button
onClick={handleDelete}
className={`px-4 py-2 text-white rounded transition-colors ${
isDeleting
? 'bg-red-600 hover:bg-red-700'
: 'bg-red-500 hover:bg-red-600'
}`}>
{isDeleting ? 'Confirm Delete' : 'Delete'}
</button>
)}
<div className='flex gap-2'>
{activeTab === 'testing' && tests?.length > 0 && (
<button
onClick={() => handleRunTests(patternValue, tests)}
disabled={isRunningTests}
className='inline-flex items-center px-4 py-2 bg-green-600 hover:bg-green-700
disabled:bg-green-600/50 text-white rounded transition-colors'>
{isRunningTests ? (
<Loader className='w-4 h-4 mr-2 animate-spin' />
) : (
<Play className='w-4 h-4 mr-2' />
)}
Run Tests
</button>
)}
<button
onClick={handleSave}
className='bg-blue-500 hover:bg-blue-600 text-white px-4 py-2 rounded transition-colors'>
Save
</button>
</div>
</div>
);
return (
<Modal
isOpen={isOpen}
onClose={onClose}
title={
isCloning
? 'Clone Pattern'
: initialPattern
? 'Edit Pattern'
: 'Add Pattern'
}
height='6xl'
width='4xl'
tabs={tabs}
activeTab={activeTab}
onTabChange={setActiveTab}
footer={footerContent}>
{activeTab => {
return (
<div className='h-full'>
{activeTab === 'general' && (
<RegexGeneralTab
name={name}
description={description}
pattern={patternValue}
error={error}
patternError={patternError}
tags={tags}
onNameChange={setName}
onDescriptionChange={setDescription}
onPatternChange={newPattern => {
setPatternValue(newPattern);
setPatternError('');
}}
onAddTag={tag => setTags([...tags, tag])}
onRemoveTag={tag =>
setTags(tags.filter(t => t !== tag))
}
/>
)}
{activeTab === 'testing' && (
<RegexTestingTab
pattern={patternValue}
tests={tests || []}
onTestsChange={setTests}
isRunningTests={isRunningTests}
onRunTests={handleRunTests}
/>
)}
</div>
);
}}
</Modal>
);
};
RegexModal.propTypes = {
pattern: PropTypes.shape({
name: PropTypes.string.isRequired,
pattern: PropTypes.string.isRequired,
description: PropTypes.string,
tags: PropTypes.arrayOf(PropTypes.string),
tests: PropTypes.arrayOf(
PropTypes.shape({
id: PropTypes.number.isRequired,
input: PropTypes.string.isRequired,
expected: PropTypes.bool.isRequired,
passes: PropTypes.bool.isRequired,
lastRun: PropTypes.string
})
),
created_date: PropTypes.string,
modified_date: PropTypes.string
}),
isOpen: PropTypes.bool.isRequired,
onClose: PropTypes.func.isRequired,
onSave: PropTypes.func.isRequired,
isCloning: PropTypes.bool
};
export default RegexModal;

View File

@@ -1,192 +0,0 @@
import React, {useState, useCallback, useEffect} from 'react';
import PropTypes from 'prop-types';
import {Plus, Loader, Play} from 'lucide-react';
import UnitTest from './UnitTest';
import AddUnitTestModal from './AddUnitTestModal';
const RegexTestingTab = ({
pattern,
tests,
onTestsChange,
isRunningTests,
onRunTests
}) => {
const [isModalOpen, setIsModalOpen] = useState(false);
const [editingTest, setEditingTest] = useState(null);
useEffect(() => {
const needsAutoRun =
tests?.length > 0 &&
pattern &&
tests.some(test => test.passes !== undefined && !test.matchSpan);
if (needsAutoRun && !isRunningTests) {
onRunTests(pattern, tests);
}
}, []);
const handleAddOrUpdateTest = useCallback(
testData => {
let updatedTests;
if (editingTest) {
updatedTests = tests.map(test =>
test.id === testData.id
? {
...testData,
passes: false,
lastRun: null,
matchedContent: null,
matchSpan: null,
matchedGroups: []
}
: test
);
} else {
updatedTests = [
...tests,
{
...testData,
passes: false,
lastRun: null,
matchedContent: null,
matchSpan: null,
matchedGroups: []
}
];
}
onTestsChange(updatedTests);
onRunTests(pattern, updatedTests);
setEditingTest(null);
},
[tests, onTestsChange, onRunTests, pattern, editingTest]
);
const handleEditTest = useCallback(test => {
setEditingTest(test);
setIsModalOpen(true);
}, []);
const handleDeleteTest = useCallback(
testId => {
const updatedTests = tests.filter(test => test.id !== testId);
onTestsChange(updatedTests);
},
[tests, onTestsChange]
);
const handleCloseModal = useCallback(() => {
setIsModalOpen(false);
setEditingTest(null);
}, []);
const totalTests = tests?.length || 0;
const passedTests = tests?.filter(test => test.passes)?.length || 0;
return (
<div className='flex flex-col h-full'>
{/* Header with Progress Bar */}
<div className='flex items-center justify-between pb-4 pr-2'>
<div>
<h2 className='text-xl font-semibold text-gray-900 dark:text-white mb-3'>
Unit Tests
</h2>
<div className='flex items-center gap-3'>
<div className='h-1.5 w-32 bg-gray-200 dark:bg-gray-700 rounded-full overflow-hidden'>
<div
className='h-full bg-emerald-500 rounded-full transition-all duration-300'
style={{
width: `${
totalTests
? (passedTests / totalTests) * 100
: 0
}%`
}}
/>
</div>
<span className='text-sm text-gray-600 dark:text-gray-300'>
{totalTests > 0
? `${passedTests}/${totalTests} tests passing`
: 'No tests added yet'}
</span>
</div>
</div>
<div className='flex items-center gap-2'>
{tests?.length > 0 && (
<button
onClick={() => onRunTests(pattern, tests)}
disabled={isRunningTests}
className='inline-flex items-center px-3 py-2 text-sm font-medium rounded-md bg-green-600 hover:bg-green-700 disabled:bg-green-600/50 text-white'>
{isRunningTests ? (
<Loader className='w-4 h-4 mr-2 animate-spin' />
) : (
<Play className='w-4 h-4 mr-2' />
)}
Run Tests
</button>
)}
<button
onClick={() => setIsModalOpen(true)}
className='inline-flex items-center px-3 py-2 text-sm font-medium rounded-md bg-blue-600 hover:bg-blue-700 text-white'>
<Plus className='w-4 h-4 mr-2' />
Add Test
</button>
</div>
</div>
{/* Test List */}
<div className='flex-1 overflow-y-auto pr-2'>
{tests?.length > 0 ? (
<div className='space-y-3'>
{tests.map(test => (
<UnitTest
key={test.id}
test={test}
pattern={pattern}
onDelete={() => handleDeleteTest(test.id)}
onEdit={() => handleEditTest(test)}
/>
))}
</div>
) : (
<div className='text-center py-12 rounded-lg'>
<p className='text-gray-500 dark:text-gray-400'>
No tests added yet
</p>
</div>
)}
</div>
<AddUnitTestModal
isOpen={isModalOpen}
onClose={handleCloseModal}
onAdd={handleAddOrUpdateTest}
editTest={editingTest}
tests={tests}
/>
</div>
);
};
RegexTestingTab.propTypes = {
pattern: PropTypes.string.isRequired,
tests: PropTypes.arrayOf(
PropTypes.shape({
id: PropTypes.number.isRequired,
input: PropTypes.string.isRequired,
expected: PropTypes.bool.isRequired,
passes: PropTypes.bool.isRequired,
lastRun: PropTypes.string,
matchedContent: PropTypes.string,
matchedGroups: PropTypes.arrayOf(PropTypes.string),
matchSpan: PropTypes.shape({
start: PropTypes.number,
end: PropTypes.number
})
})
),
onTestsChange: PropTypes.func.isRequired,
isRunningTests: PropTypes.bool.isRequired,
onRunTests: PropTypes.func.isRequired
};
export default RegexTestingTab;

View File

@@ -1,128 +0,0 @@
import React, {useState} from 'react';
import PropTypes from 'prop-types';
import {Trash2, Pencil} from 'lucide-react';
import DeleteConfirmationModal from '@ui/DeleteConfirmationModal';
const UnitTest = ({test, pattern, onDelete, onEdit}) => {
const [showDeleteModal, setShowDeleteModal] = useState(false);
const renderHighlightedInput = () => {
if (!test.matchSpan) {
return (
<span className='font-mono text-gray-100'>{test.input}</span>
);
}
const preMatch = test.input.slice(0, test.matchSpan.start);
const match = test.input.slice(
test.matchSpan.start,
test.matchSpan.end
);
const postMatch = test.input.slice(test.matchSpan.end);
return (
<span className='font-mono'>
<span className='text-gray-100'>{preMatch}</span>
<span
className={`px-0.5 rounded ${
test.passes
? 'bg-emerald-200 dark:bg-emerald-600 text-emerald-900 dark:text-emerald-100'
: 'bg-red-200 dark:bg-red-600 text-red-900 dark:text-red-100'
}`}>
{match}
</span>
<span className='text-gray-100'>{postMatch}</span>
</span>
);
};
return (
<>
<div
className={`
relative rounded-lg border group border border-gray-200 dark:border-gray-700
`}>
{/* Header */}
<div className='px-4 py-2 pr-2 flex items-center justify-between border-b border-inherit'>
<div className='flex items-center gap-2'>
<div
className={`
w-2 h-2 rounded-full
${
test.passes
? 'bg-emerald-500 shadow-sm shadow-emerald-500/50'
: 'bg-red-500 shadow-sm shadow-red-500/50'
}
`}
/>
<span
className={`text-xs font-medium
${
test.passes
? 'text-emerald-700 dark:text-emerald-300'
: 'text-red-700 dark:text-red-300'
}
`}>
{test.expected
? 'Should Match'
: 'Should Not Match'}
</span>
</div>
<div className='flex items-center gap-2'>
<span className='text-xs text-gray-500 dark:text-gray-400'>
Last run: {test.lastRun}
</span>
<div className='flex gap-2'>
<button
onClick={onEdit}
className='p-1 rounded shrink-0 transition-transform transform hover:scale-110'>
<Pencil className='w-4 h-4 text-gray-500 dark:text-gray-400' />
</button>
<button
onClick={() => setShowDeleteModal(true)}
className='p-1 rounded shrink-0 transition-transform transform hover:scale-110'>
<Trash2 className='w-4 h-4 text-gray-500 dark:text-gray-400' />
</button>
</div>
</div>
</div>
{/* Content */}
<div className='p-2 flex items-start gap-3'>
<div className='flex-1 min-w-0'>
<div className='rounded bg-white/75 dark:bg-black/25 px-2 py-1.5 text-xs'>
{renderHighlightedInput()}
</div>
</div>
</div>
</div>
<DeleteConfirmationModal
isOpen={showDeleteModal}
onClose={() => setShowDeleteModal(false)}
onConfirm={onDelete}
/>
</>
);
};
UnitTest.propTypes = {
test: PropTypes.shape({
id: PropTypes.number.isRequired,
input: PropTypes.string.isRequired,
expected: PropTypes.bool.isRequired,
passes: PropTypes.bool.isRequired,
lastRun: PropTypes.string,
matchedContent: PropTypes.string,
matchedGroups: PropTypes.arrayOf(PropTypes.string),
matchSpan: PropTypes.shape({
start: PropTypes.number,
end: PropTypes.number
})
}).isRequired,
pattern: PropTypes.string.isRequired,
onDelete: PropTypes.func.isRequired,
onEdit: PropTypes.func.isRequired
};
export default UnitTest;

View File

@@ -1,144 +0,0 @@
import {useState, useCallback} from 'react';
import {RegexPatterns} from '@api/data';
import Alert from '@ui/Alert';
import {useRegexTesting} from './useRegexTesting';
export const useRegexModal = (initialPattern, onSave) => {
// Form state
const [name, setName] = useState('');
const [originalName, setOriginalName] = useState('');
const [description, setDescription] = useState('');
const [patternValue, setPatternValue] = useState('');
const [tags, setTags] = useState([]);
const [tests, setTests] = useState([]);
const [isCloning, setIsCloning] = useState(false);
// UI state with more specific error handling
const [formErrors, setFormErrors] = useState({
name: '',
pattern: '',
general: ''
});
const [activeTab, setActiveTab] = useState('general');
const [isDeleting, setIsDeleting] = useState(false);
// Initialize testing functionality
const {isRunningTests, runTests} = useRegexTesting();
const initializeForm = useCallback((pattern, cloning) => {
setIsCloning(cloning || false);
if (pattern) {
const initialName = cloning ? `${pattern.name}` : pattern.name;
setName(initialName);
setOriginalName(cloning ? '' : pattern.name);
setDescription(pattern.description || '');
setPatternValue(pattern.pattern || '');
setTags(pattern.tags || []);
setTests(pattern.tests || []);
} else {
setName('');
setOriginalName('');
setDescription('');
setPatternValue('');
setTags([]);
setTests([]);
}
setFormErrors({name: '', pattern: '', general: ''});
setIsDeleting(false);
}, []);
const handleSave = async () => {
// Name validation
if (!name.trim()) {
Alert.error('Name is required');
return;
}
if (name.length > 64) {
Alert.error('Name must be less than 64 characters');
return;
}
// Pattern validation
if (!patternValue.trim()) {
Alert.error('Pattern is required');
return;
}
try {
const data = {
name,
pattern: patternValue,
description,
tags,
tests
};
if (initialPattern && !isCloning) {
const hasNameChanged = name !== originalName;
await RegexPatterns.update(
initialPattern.file_name.replace('.yml', ''),
data,
hasNameChanged ? name : undefined
);
Alert.success('Pattern updated successfully');
} else {
await RegexPatterns.create(data);
Alert.success('Pattern created successfully');
}
onSave();
} catch (error) {
console.error('Error saving pattern:', error);
Alert.error(
error.message || 'Failed to save pattern. Please try again.'
);
}
};
const handleRunTests = useCallback(
async (pattern, tests) => {
try {
const updatedTests = await runTests(pattern, tests);
if (updatedTests) {
setTests(updatedTests);
}
} catch (error) {
console.error('Error running tests:', error);
Alert.error(
error.message || 'Failed to run tests. Please try again.'
);
}
},
[runTests]
);
return {
// Form state
name,
description,
patternValue,
tags,
tests,
// UI state
formErrors,
activeTab,
isDeleting,
isRunningTests,
isCloning,
// Actions
setName,
setDescription,
setPatternValue,
setTags,
setTests,
setActiveTab,
setIsDeleting,
// Main handlers
initializeForm,
handleSave,
handleRunTests
};
};

View File

@@ -1,61 +0,0 @@
// useRegexTesting.js
import {useState, useCallback} from 'react';
import {RegexPatterns} from '@api/data';
import Alert from '@ui/Alert';
export const useRegexTesting = onUpdateTests => {
const [isRunningTests, setIsRunningTests] = useState(false);
const runTests = useCallback(
async (pattern, tests) => {
if (!pattern?.trim() || !tests?.length) {
return tests;
}
setIsRunningTests(true);
try {
// Format the data properly as a single object
const result = await RegexPatterns.runTests({
pattern: pattern,
tests: tests
});
if (result.success) {
// Calculate test statistics
const totalTests = result.tests.length;
const passedTests = result.tests.filter(
test => test.passes
).length;
// Show success message with statistics
Alert.success(
`Tests completed: ${passedTests}/${totalTests} passed`,
{
autoClose: 3000,
hideProgressBar: false
}
);
// Update tests through the callback
if (onUpdateTests) {
onUpdateTests(result.tests);
}
return result.tests;
} else {
Alert.error(result.message || 'Failed to run tests');
return tests;
}
} catch (error) {
console.error('Error running tests:', error);
Alert.error('An error occurred while running tests');
return tests;
} finally {
setIsRunningTests(false);
}
},
[onUpdateTests]
);
return {
isRunningTests,
runTests
};
};