22 Commits
v1.1.1 ... main

Author SHA1 Message Date
santiagosayshey
3edd76224b Merge pull request #285 from Dictionarry-Hub/dev 2026-01-30 01:23:11 +10:30
Sam Chau
cf67a1c985 perf(cache): add proper file caching at the import instantiation 2025-09-04 00:17:55 +09:30
Sam Chau
88f11b65aa fix(revert): add deletion check back 2025-09-03 09:28:38 +09:30
Sam Chau
baba7ad3c5 fix(cache): remove regex compiler cache, reload cache after ANY git revert 2025-09-03 08:43:04 +09:30
Sam Chau
9e5e5ce523 fix(cache): force reinitialize on git ops 2025-08-28 13:24:43 +09:30
Samuel Chau
3a0deb16fa docs(README): update setup link, clean old sections 2025-08-28 10:29:37 +09:30
Sam Chau
5ee22f7201 fix(cache): reload cache on git operations 2025-08-28 01:45:14 +09:30
Sam Chau
61854e3d02 feat(cache): implement in-memory caching for YAML data to improve performance 2025-08-27 03:38:07 +09:30
Sam Chau
666f98c68b feat(card): add visibility handling and loading placeholders for FormatCard and RegexCard components to improve performance 2025-08-27 03:23:24 +09:30
Sam Chau
77f996f8c5 feat(regex): implement .NET regex testing via PowerShell and enhance UI components 2025-08-27 02:38:01 +09:30
Sam Chau
ef86fa251f feat(regex): add .NET regex validation via PowerShell and integrate into frontend 2025-08-27 01:34:10 +09:30
Samuel Chau
bb514b20cc Merge pull request #224 from Dictionarry-Hub/dev
fix(backend): perms env, mm import refactor, deserialize error
2025-08-26 22:11:39 +09:30
Sam Chau
99925be174 Merge branch 'main' of https://github.com/Dictionarry-Hub/profilarr into dev 2025-08-26 22:08:05 +09:30
Sam Chau
21e44d592f fix(entrypoint): simplify umask handling 2025-08-24 17:12:45 +09:30
Sam Chau
212dd695b6 fix(entrypoint): start shell with overriden umask 2025-08-24 16:20:17 +09:30
Sam Chau
6c40d352c9 fix(migration): update default language score 2025-08-24 16:07:38 +09:30
Sam Chau
7270bbfedb chore(docker): add entrypoint script and user permissions 2025-08-24 15:35:42 +09:30
Sam Chau
2e2abb93be feat(task): add update logic for task intervals for backup/sync 2025-08-23 10:12:12 +09:30
Sam Chau
7f5f44cd77 refactor(media-management): replace requests with ArrHandler for API interactions 2025-08-23 09:04:59 +09:30
Sam Chau
c30dc33828 fix(importer): pass arr type to format extractor to only compile/import arr specific formats 2025-08-23 08:23:29 +09:30
Samuel Chau
29a64511b8 fix(conflict): deserialize arr specific score objects when checking for conflicts (#220) 2025-08-21 10:25:26 +09:30
Sam Chau
eb9733807e fix(conflict): deserialize arr specific score objects when checking for conflicts 2025-08-21 10:23:27 +09:30
45 changed files with 1199 additions and 473 deletions

1
.gitignore vendored
View File

@@ -20,6 +20,7 @@ backend/app/static/
# Config data # Config data
config/ config/
config-test/
radarr-config/ radarr-config/
sonarr-config/ sonarr-config/
test-data/ test-data/

View File

@@ -1,17 +1,36 @@
# Dockerfile # Dockerfile
FROM python:3.9-slim FROM python:3.9-slim
WORKDIR /app WORKDIR /app
# Install git (since we're still using slim) # Install git, gosu, and PowerShell Core
RUN apt-get update && apt-get install -y git && rm -rf /var/lib/apt/lists/* RUN apt-get update && apt-get install -y \
git \
gosu \
wget \
ca-certificates \
libicu-dev \
&& wget -O /tmp/powershell.tar.gz https://github.com/PowerShell/PowerShell/releases/download/v7.4.0/powershell-7.4.0-linux-x64.tar.gz \
&& mkdir -p /opt/microsoft/powershell/7 \
&& tar zxf /tmp/powershell.tar.gz -C /opt/microsoft/powershell/7 \
&& chmod +x /opt/microsoft/powershell/7/pwsh \
&& ln -s /opt/microsoft/powershell/7/pwsh /usr/bin/pwsh \
&& rm /tmp/powershell.tar.gz \
&& rm -rf /var/lib/apt/lists/*
# Copy pre-built files from dist directory # Copy pre-built files from dist directory
COPY dist/backend/app ./app COPY dist/backend/app ./app
COPY dist/backend/scripts ./app/scripts
COPY dist/static ./app/static COPY dist/static ./app/static
COPY dist/requirements.txt . COPY dist/requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt RUN pip install --no-cache-dir -r requirements.txt
# Ensure scripts are executable
RUN chmod +x /app/scripts/*.ps1 || true
# Copy and setup entrypoint script
COPY entrypoint.sh /entrypoint.sh
RUN chmod +x /entrypoint.sh
LABEL org.opencontainers.image.authors="Dictionarry dictionarry@pm.me" LABEL org.opencontainers.image.authors="Dictionarry dictionarry@pm.me"
LABEL org.opencontainers.image.description="Profilarr - Profile manager for *arr apps" LABEL org.opencontainers.image.description="Profilarr - Profile manager for *arr apps"
LABEL org.opencontainers.image.source="https://github.com/Dictionarry-Hub/profilarr" LABEL org.opencontainers.image.source="https://github.com/Dictionarry-Hub/profilarr"
LABEL org.opencontainers.image.title="Profilarr" LABEL org.opencontainers.image.title="Profilarr"
LABEL org.opencontainers.image.version="beta" LABEL org.opencontainers.image.version="beta"
EXPOSE 6868 EXPOSE 6868
ENTRYPOINT ["/entrypoint.sh"]
CMD ["gunicorn", "--bind", "0.0.0.0:6868", "--timeout", "600", "app.main:create_app()"] CMD ["gunicorn", "--bind", "0.0.0.0:6868", "--timeout", "600", "app.main:create_app()"]

View File

@@ -54,26 +54,7 @@ After deployment, access the web UI at `http://[address]:6868` to begin setup.
### Complete Documentation ### Complete Documentation
Visit our comprehensive documentation at [dictionarry.dev/wiki/profilarr-setup](https://dictionarry.dev/wiki/profilarr-setup) for detailed installation instructions and usage guides. Visit our comprehensive documentation at [dictionarry.dev](https://dictionarry.dev/profilarr-setup/installation) for detailed installation instructions and usage guides.
## Support
### Need Help?
- **Bug Reports & Issues**: Submit technical issues via our [GitHub Issues tracker](https://github.com/Dictionarry-Hub/profilarr/issues)
- **Community Support**: Join our [Discord community](https://discord.com/invite/Y9TYP6jeYZ) for help from developers and other users
- **Database Issues**: Please direct database-specific issues to their respective repositories, as this repository focuses exclusively on Profilarr development
## Contributing
We welcome contributions from the community! Here's how you can help improve Profilarr:
- **Pull Requests**: Feel free to submit PRs for bug fixes or new features
- **Feature Suggestions**: Share your ideas through GitHub issues
- **Documentation**: Help improve our guides and documentation
- **Testing**: Try new features and report any issues
Detailed contributing guidelines will be available soon. Join our Discord to discuss potential contributions with the development team.
## Status ## Status
@@ -83,11 +64,6 @@ Currently in beta. Part of the [Dictionarry](https://github.com/Dictionarry-Hub)
- https://github.com/Dictionarry-Hub/profilarr/issues - https://github.com/Dictionarry-Hub/profilarr/issues
### Development
- Currently focused on fixing bugs found in open beta
- 1.1 will focus on improving the 'setup' side of profilarr - adding media management / quality settings syncs
### Personal Note ### Personal Note
Profilarr is maintained by a single CS student with no formal development experience, in their spare time. Development happens when time allows, which may affect response times for fixes and new features. The project is continuously improving, and your patience, understanding, and contributions are greatly appreciated as Profilarr grows and matures. Profilarr is maintained by a single CS student with no formal development experience, in their spare time. Development happens when time allows, which may affect response times for fixes and new features. The project is continuously improving, and your patience, understanding, and contributions are greatly appreciated as Profilarr grows and matures.

View File

@@ -1,7 +1,21 @@
FROM python:3.9 FROM python:3.9
WORKDIR /app WORKDIR /app
# Install PowerShell Core
RUN apt-get update && apt-get install -y \
wget \
ca-certificates \
libicu-dev \
&& wget -O /tmp/powershell.tar.gz https://github.com/PowerShell/PowerShell/releases/download/v7.4.0/powershell-7.4.0-linux-x64.tar.gz \
&& mkdir -p /opt/microsoft/powershell/7 \
&& tar zxf /tmp/powershell.tar.gz -C /opt/microsoft/powershell/7 \
&& chmod +x /opt/microsoft/powershell/7/pwsh \
&& ln -s /opt/microsoft/powershell/7/pwsh /usr/bin/pwsh \
&& rm /tmp/powershell.tar.gz \
&& rm -rf /var/lib/apt/lists/*
COPY requirements.txt . COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt RUN pip install --no-cache-dir -r requirements.txt
COPY . . COPY . .
# Ensure scripts are executable
RUN chmod +x /app/scripts/*.ps1 || true
# Use gunicorn with 10-minute timeout # Use gunicorn with 10-minute timeout
CMD ["python", "-m", "app.main"] CMD ["python", "-m", "app.main"]

View File

@@ -7,6 +7,7 @@ from .utils import (get_category_directory, load_yaml_file, validate,
test_regex_pattern, test_format_conditions, test_regex_pattern, test_format_conditions,
check_delete_constraints, filename_to_display) check_delete_constraints, filename_to_display)
from ..db import add_format_to_renames, remove_format_from_renames, is_format_in_renames from ..db import add_format_to_renames, remove_format_from_renames, is_format_in_renames
from .cache import data_cache
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO) logger.setLevel(logging.INFO)
@@ -16,43 +17,19 @@ bp = Blueprint('data', __name__)
@bp.route('/<string:category>', methods=['GET']) @bp.route('/<string:category>', methods=['GET'])
def retrieve_all(category): def retrieve_all(category):
try: try:
directory = get_category_directory(category) # Use cache instead of reading from disk
files = [f for f in os.listdir(directory) if f.endswith('.yml')] items = data_cache.get_all(category)
logger.debug(f"Found {len(files)} files in {category}")
# Add metadata for custom formats
if not files: if category == 'custom_format':
return jsonify([]), 200 for item in items:
if 'content' in item and 'name' in item['content']:
result = [] item['content']['metadata'] = {
errors = 0 'includeInRename': is_format_in_renames(item['content']['name'])
for file_name in files:
file_path = os.path.join(directory, file_name)
try:
content = load_yaml_file(file_path)
# Add metadata for custom formats
if category == 'custom_format':
content['metadata'] = {
'includeInRename':
is_format_in_renames(content['name'])
} }
result.append({
"file_name": logger.info(f"Retrieved {len(items)} {category} items from cache")
file_name, return jsonify(items), 200
"content":
content,
"modified_date":
get_file_modified_date(file_path)
})
except yaml.YAMLError:
errors += 1
result.append({
"file_name": file_name,
"error": "Failed to parse YAML"
})
logger.info(
f"Processed {len(files)} {category} files ({errors} errors)")
return jsonify(result), 200
except ValueError as ve: except ValueError as ve:
logger.error(ve) logger.error(ve)
@@ -127,6 +104,10 @@ def handle_item(category, name):
# Then delete the file # Then delete the file
os.remove(file_path) os.remove(file_path)
# Update cache
data_cache.remove_item(category, file_name)
return jsonify( return jsonify(
{"message": f"Successfully deleted {file_name}"}), 200 {"message": f"Successfully deleted {file_name}"}), 200
except OSError as e: except OSError as e:
@@ -226,6 +207,32 @@ def handle_item(category, name):
return jsonify({"error": "An unexpected error occurred"}), 500 return jsonify({"error": "An unexpected error occurred"}), 500
@bp.route('/regex/verify', methods=['POST'])
def verify_regex():
"""Verify a regex pattern using .NET regex engine via PowerShell"""
try:
data = request.get_json()
if not data:
return jsonify({"error": "No JSON data provided"}), 400
pattern = data.get('pattern')
if not pattern:
return jsonify({"error": "Pattern is required"}), 400
from .utils import verify_dotnet_regex
success, message = verify_dotnet_regex(pattern)
if success:
return jsonify({"valid": True, "message": "Pattern is valid"}), 200
else:
return jsonify({"valid": False, "error": message}), 200
except Exception as e:
logger.exception("Error verifying regex pattern")
return jsonify({"valid": False, "error": str(e)}), 500
@bp.route('/<string:category>/test', methods=['POST']) @bp.route('/<string:category>/test', methods=['POST'])
def run_tests(category): def run_tests(category):
logger.info(f"Received test request for category: {category}") logger.info(f"Received test request for category: {category}")
@@ -233,25 +240,29 @@ def run_tests(category):
try: try:
data = request.get_json() data = request.get_json()
if not data: if not data:
logger.warning("Rejected test request - no JSON data provided") logger.warning("Test request rejected: no JSON data")
return jsonify({"error": "No JSON data provided"}), 400 return jsonify({"error": "No JSON data provided"}), 400
tests = data.get('tests', []) tests = data.get('tests', [])
if not tests: if not tests:
logger.warning("Rejected test request - no test cases provided") logger.warning("Test request rejected: no tests provided")
return jsonify({"error": return jsonify({"error":
"At least one test case is required"}), 400 "At least one test case is required"}), 400
if category == 'regex_pattern': if category == 'regex_pattern':
pattern = data.get('pattern') pattern = data.get('pattern')
logger.info(f"Processing regex test request - Pattern: {pattern}")
if not pattern: if not pattern:
logger.warning("Rejected test request - missing pattern") logger.warning("Test request rejected: missing pattern")
return jsonify({"error": "Pattern is required"}), 400 return jsonify({"error": "Pattern is required"}), 400
success, message, updated_tests = test_regex_pattern( success, message, updated_tests = test_regex_pattern(
pattern, tests) pattern, tests)
if success and updated_tests:
passed = sum(1 for t in updated_tests if t.get('passes'))
total = len(updated_tests)
logger.info(f"Tests completed: {passed}/{total} passed")
elif category == 'custom_format': elif category == 'custom_format':
conditions = data.get('conditions', []) conditions = data.get('conditions', [])
@@ -274,10 +285,8 @@ def run_tests(category):
return jsonify( return jsonify(
{"error": "Testing not supported for this category"}), 400 {"error": "Testing not supported for this category"}), 400
logger.info(f"Test execution completed - Success: {success}")
if not success: if not success:
logger.warning(f"Test execution failed - {message}") logger.error(f"Test execution failed: {message}")
return jsonify({"success": False, "message": message}), 400 return jsonify({"success": False, "message": message}), 400
return jsonify({"success": True, "tests": updated_tests}), 200 return jsonify({"success": True, "tests": updated_tests}), 200

117
backend/app/data/cache.py Normal file
View File

@@ -0,0 +1,117 @@
import os
import yaml
import logging
from typing import Dict, List, Any, Optional
from datetime import datetime
import threading
from .utils import get_category_directory, get_file_modified_date, filename_to_display
logger = logging.getLogger(__name__)
class DataCache:
"""In-memory cache for YAML data"""
def __init__(self):
self._cache = {
'regex_pattern': {},
'custom_format': {},
'profile': {}
}
self._lock = threading.RLock()
self._initialized = False
def initialize(self, force_reload=False):
"""Load all data into memory on startup
Args:
force_reload: If True, force a reload even if already initialized
"""
with self._lock:
if self._initialized and not force_reload:
return
logger.info("Initializing data cache..." if not force_reload else "Reloading data cache...")
for category in self._cache.keys():
self._load_category(category)
self._initialized = True
logger.info("Data cache initialized successfully" if not force_reload else "Data cache reloaded successfully")
def _load_category(self, category: str):
"""Load all items from a category into cache"""
try:
directory = get_category_directory(category)
items = {}
for filename in os.listdir(directory):
if not filename.endswith('.yml'):
continue
file_path = os.path.join(directory, filename)
try:
with open(file_path, 'r') as f:
content = yaml.safe_load(f)
if content:
# Store with metadata
items[filename] = {
'file_name': filename,
'modified_date': get_file_modified_date(file_path),
'content': content
}
except Exception as e:
logger.error(f"Error loading {file_path}: {e}")
self._cache[category] = items
logger.info(f"Loaded {len(items)} items for category {category}")
except Exception as e:
logger.error(f"Error loading category {category}: {e}")
def get_all(self, category: str) -> List[Dict[str, Any]]:
"""Get all items from a category"""
with self._lock:
if not self._initialized:
self.initialize()
return list(self._cache.get(category, {}).values())
def get_item(self, category: str, name: str) -> Optional[Dict[str, Any]]:
"""Get a specific item"""
with self._lock:
if not self._initialized:
self.initialize()
# Convert name to filename
filename = f"{name.replace('[', '(').replace(']', ')')}.yml"
return self._cache.get(category, {}).get(filename)
def update_item(self, category: str, filename: str, content: Dict[str, Any]):
"""Update an item in cache"""
with self._lock:
if category in self._cache:
file_path = os.path.join(get_category_directory(category), filename)
self._cache[category][filename] = {
'file_name': filename,
'modified_date': get_file_modified_date(file_path),
'content': content
}
logger.debug(f"Updated cache for {category}/{filename}")
def remove_item(self, category: str, filename: str):
"""Remove an item from cache"""
with self._lock:
if category in self._cache and filename in self._cache[category]:
del self._cache[category][filename]
logger.debug(f"Removed from cache: {category}/{filename}")
def rename_item(self, category: str, old_filename: str, new_filename: str):
"""Rename an item in cache"""
with self._lock:
if category in self._cache and old_filename in self._cache[category]:
item = self._cache[category].pop(old_filename)
item['file_name'] = new_filename
self._cache[category][new_filename] = item
logger.debug(f"Renamed in cache: {category}/{old_filename} -> {new_filename}")
# Global cache instance
data_cache = DataCache()

View File

@@ -7,6 +7,8 @@ from typing import Dict, List, Any, Tuple, Union
import git import git
import regex import regex
import logging import logging
import subprocess
import json
from ..db.queries.arr import update_arr_config_on_rename, update_arr_config_on_delete from ..db.queries.arr import update_arr_config_on_rename, update_arr_config_on_delete
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@@ -152,6 +154,11 @@ def save_yaml_file(file_path: str,
with open(safe_file_path, 'w') as f: with open(safe_file_path, 'w') as f:
yaml.safe_dump(ordered_data, f, sort_keys=False) yaml.safe_dump(ordered_data, f, sort_keys=False)
# Update cache
from .cache import data_cache
filename = os.path.basename(safe_file_path)
data_cache.update_item(category, filename, ordered_data)
def update_yaml_file(file_path: str, data: Dict[str, Any], def update_yaml_file(file_path: str, data: Dict[str, Any],
@@ -216,6 +223,12 @@ def update_yaml_file(file_path: str, data: Dict[str, Any],
os.rename(file_path, new_file_path) os.rename(file_path, new_file_path)
# Stage the new file # Stage the new file
repo.index.add([rel_new_path]) repo.index.add([rel_new_path])
# Update cache for rename
from .cache import data_cache
old_filename = os.path.basename(file_path)
new_filename = os.path.basename(new_file_path)
data_cache.rename_item(category, old_filename, new_filename)
except git.GitCommandError as e: except git.GitCommandError as e:
logger.error(f"Git operation failed: {e}") logger.error(f"Git operation failed: {e}")
@@ -360,6 +373,68 @@ def check_delete_constraints(category: str, name: str) -> Tuple[bool, str]:
return False, f"Error checking references: {str(e)}" return False, f"Error checking references: {str(e)}"
def verify_dotnet_regex(pattern: str) -> Tuple[bool, str]:
"""
Verify a regex pattern using .NET regex engine via PowerShell.
Returns (success, message) tuple.
"""
try:
# Get the path to the validate.ps1 script
# In Docker, the structure is /app/app/data/utils.py and script is at /app/scripts/validate.ps1
script_path = os.path.join('/app', 'scripts', 'validate.ps1')
if not os.path.exists(script_path):
# Fallback for local development
script_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'scripts', 'validate.ps1')
# Run PowerShell script, passing pattern via stdin to avoid shell escaping issues
result = subprocess.run(
['pwsh', '-File', script_path],
input=pattern,
capture_output=True,
text=True,
timeout=5
)
if result.returncode != 0 and not result.stdout:
logger.error(f"PowerShell script failed: {result.stderr}")
return False, "Failed to validate pattern"
# Log the raw output for debugging
logger.debug(f"PowerShell output: {result.stdout}")
# Parse JSON output
try:
output = json.loads(result.stdout.strip())
except json.JSONDecodeError:
# Try to find JSON in the output
lines = result.stdout.strip().split('\n')
for line in reversed(lines):
if line.strip():
try:
output = json.loads(line)
break
except json.JSONDecodeError:
continue
else:
logger.error(f"No valid JSON found in output: {result.stdout}")
return False, "Failed to parse validation result"
if output.get('valid'):
return True, output.get('message', 'Pattern is valid')
else:
return False, output.get('error', 'Invalid pattern')
except subprocess.TimeoutExpired:
logger.error("Pattern validation timed out")
return False, "Pattern validation timed out"
except FileNotFoundError:
logger.error("PowerShell (pwsh) not found")
return False, "PowerShell is not available"
except Exception as e:
logger.error(f"Error validating pattern: {e}")
return False, f"Validation error: {str(e)}"
def update_references(category: str, old_name: str, def update_references(category: str, old_name: str,
new_name: str) -> List[str]: new_name: str) -> List[str]:
""" """
@@ -478,76 +553,67 @@ def test_regex_pattern(
pattern: str, pattern: str,
tests: List[Dict[str, Any]]) -> Tuple[bool, str, List[Dict[str, Any]]]: tests: List[Dict[str, Any]]) -> Tuple[bool, str, List[Dict[str, Any]]]:
""" """
Test a regex pattern against a list of test cases using PCRE2 compatible engine. Test a regex pattern against a list of test cases using .NET regex engine via PowerShell.
Returns match information along with test results. Returns match information along with test results.
""" """
logger.info(f"Starting regex pattern test - Pattern: {pattern}")
try: try:
try: # Get the path to the test.ps1 script
compiled_pattern = regex.compile(pattern, script_path = os.path.join('/app', 'scripts', 'test.ps1')
regex.V1 | regex.IGNORECASE) if not os.path.exists(script_path):
logger.info( # Fallback for local development
"Pattern compiled successfully with PCRE2 compatibility") script_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'scripts', 'test.ps1')
except regex.error as e:
logger.warning(f"Invalid regex pattern: {str(e)}") # Prepare the input data
return False, f"Invalid regex pattern: {str(e)}", tests input_data = {
'pattern': pattern,
current_time = datetime.now().isoformat() 'tests': tests
logger.info(f"Processing {len(tests)} test cases") }
for test in tests: # Run PowerShell script
test_id = test.get('id', 'unknown') result = subprocess.run(
test_input = test.get('input', '') ['pwsh', '-File', script_path],
expected = test.get('expected', False) input=json.dumps(input_data),
capture_output=True,
try: text=True,
match = compiled_pattern.search(test_input) timeout=10
matches = bool(match)
# Update test result with basic fields
test['passes'] = matches == expected
test['lastRun'] = current_time
# Add match information
if match:
test['matchedContent'] = match.group(0)
test['matchSpan'] = {
'start': match.start(),
'end': match.end()
}
# Get all capture groups if they exist
test['matchedGroups'] = [g for g in match.groups()
] if match.groups() else []
else:
test['matchedContent'] = None
test['matchSpan'] = None
test['matchedGroups'] = []
logger.info(
f"Test {test_id} {'passed' if test['passes'] else 'failed'} - Match: {matches}, Expected: {expected}"
)
except Exception as e:
logger.error(f"Error running test {test_id}: {str(e)}")
test['passes'] = False
test['lastRun'] = current_time
test['matchedContent'] = None
test['matchSpan'] = None
test['matchedGroups'] = []
# Log overall results
passed_tests = sum(1 for test in tests if test.get('passes', False))
logger.info(
f"Test execution complete - {passed_tests}/{len(tests)} tests passed"
) )
return True, "", tests if result.returncode != 0 and not result.stdout:
logger.error(f"PowerShell script failed: {result.stderr}")
return False, "Failed to run tests", tests
# Parse JSON output
try:
output = json.loads(result.stdout.strip())
except json.JSONDecodeError:
# Try to find JSON in the output
lines = result.stdout.strip().split('\n')
for line in reversed(lines):
if line.strip():
try:
output = json.loads(line)
break
except json.JSONDecodeError:
continue
else:
logger.error(f"No valid JSON found in output: {result.stdout}")
return False, "Failed to parse test results", tests
if output.get('success'):
return True, "Tests completed successfully", output.get('tests', tests)
else:
return False, output.get('message', 'Tests failed'), tests
except subprocess.TimeoutExpired:
logger.error("Test execution timed out")
return False, "Test execution timed out", tests
except FileNotFoundError:
logger.error("PowerShell (pwsh) not found")
return False, "PowerShell is not available", tests
except Exception as e: except Exception as e:
logger.error(f"Unexpected error in test_regex_pattern: {str(e)}", logger.error(f"Error running tests: {e}")
exc_info=True) return False, f"Test error: {str(e)}", tests
return False, str(e), tests
def test_format_conditions(conditions: List[Dict], def test_format_conditions(conditions: List[Dict],

View File

@@ -0,0 +1,33 @@
# backend/app/db/migrations/versions/004_update_language_score_default.py
from ...connection import get_db
version = 4
name = "update_language_score_default"
def up():
"""Update default language import score to -999999."""
with get_db() as conn:
# Update existing record to new default value
conn.execute('''
UPDATE language_import_config
SET score = -999999,
updated_at = CURRENT_TIMESTAMP
WHERE id = 1
''')
conn.commit()
def down():
"""Revert language import score to previous default."""
with get_db() as conn:
# Revert to previous default value
conn.execute('''
UPDATE language_import_config
SET score = -99999,
updated_at = CURRENT_TIMESTAMP
WHERE id = 1
''')
conn.commit()

View File

@@ -44,6 +44,12 @@ def checkout_branch(repo_path, branch_name):
return False, f"Branch '{branch_name}' does not exist locally or in any remote." return False, f"Branch '{branch_name}' does not exist locally or in any remote."
logger.debug(f"Successfully checked out branch: {branch_name}") logger.debug(f"Successfully checked out branch: {branch_name}")
# Reload cache after branch checkout since files may have changed
from ...data.cache import data_cache
logger.info("Reloading data cache after branch checkout")
data_cache.initialize(force_reload=True)
return True, { return True, {
"message": f"Checked out branch: {branch_name}", "message": f"Checked out branch: {branch_name}",
"current_branch": branch_name "current_branch": branch_name

View File

@@ -11,6 +11,11 @@ def delete_file(repo_path, file_path):
if os.path.exists(full_file_path): if os.path.exists(full_file_path):
os.remove(full_file_path) os.remove(full_file_path)
# Reload cache after file deletion
from ...data.cache import data_cache
data_cache.initialize(force_reload=True)
message = f"File {file_path} has been deleted." message = f"File {file_path} has been deleted."
return True, message return True, message
else: else:

View File

@@ -60,6 +60,11 @@ def finalize_merge(repo) -> Dict[str, Any]:
if status_manager: if status_manager:
status_manager.update_remote_status() status_manager.update_remote_status()
# Reload cache for modified data files
from ...data.cache import data_cache
logger.info("Reloading data cache after merge completion")
data_cache.initialize(force_reload=True) # This will reload all data
return {'success': True, 'message': 'Merge completed successfully'} return {'success': True, 'message': 'Merge completed successfully'}
except git.GitCommandError as e: except git.GitCommandError as e:
logger.error(f"Git command error during commit: {str(e)}") logger.error(f"Git command error during commit: {str(e)}")

View File

@@ -35,6 +35,11 @@ def pull_branch(repo_path, branch_name):
if status_manager: if status_manager:
status_manager.update_remote_status() status_manager.update_remote_status()
# Reload cache for updated data files
from ...data.cache import data_cache
logger.info("Reloading data cache after pull")
data_cache.initialize(force_reload=True) # This will reload all data
# ------------------------------- # -------------------------------
# *** "On pull" ARR import logic using new importer: # *** "On pull" ARR import logic using new importer:
# 1) Query all ARR configs that have sync_method="pull" # 1) Query all ARR configs that have sync_method="pull"

View File

@@ -310,6 +310,11 @@ def resolve_conflicts(
logger.debug(f"File status: {item}") logger.debug(f"File status: {item}")
logger.debug("=======================================") logger.debug("=======================================")
# Reload cache after conflict resolution
from ...data.cache import data_cache
logger.info("Reloading data cache after conflict resolution")
data_cache.initialize(force_reload=True)
return {'success': True, 'results': results} return {'success': True, 'results': results}
except Exception as e: except Exception as e:

View File

@@ -26,6 +26,11 @@ def revert_file(repo_path, file_path):
untracked_files = repo.untracked_files untracked_files = repo.untracked_files
is_untracked = any(f == file_path for f in untracked_files) is_untracked = any(f == file_path for f in untracked_files)
# Check if file is staged for deletion
staged_deletions = repo.index.diff("HEAD", R=True)
is_staged_for_deletion = any(d.a_path == file_path
for d in staged_deletions)
if is_untracked: if is_untracked:
# For untracked files, we need to remove them # For untracked files, we need to remove them
try: try:
@@ -33,14 +38,7 @@ def revert_file(repo_path, file_path):
message = f"New file {file_path} has been removed." message = f"New file {file_path} has been removed."
except FileNotFoundError: except FileNotFoundError:
message = f"File {file_path} was already removed." message = f"File {file_path} was already removed."
return True, message elif is_staged_for_deletion:
# Check if file is staged for deletion
staged_deletions = repo.index.diff("HEAD", R=True)
is_staged_for_deletion = any(d.a_path == file_path
for d in staged_deletions)
if is_staged_for_deletion:
# Restore file staged for deletion # Restore file staged for deletion
repo.git.reset("--", file_path) repo.git.reset("--", file_path)
repo.git.checkout('HEAD', "--", file_path) repo.git.checkout('HEAD', "--", file_path)
@@ -51,6 +49,10 @@ def revert_file(repo_path, file_path):
repo.git.restore('--staged', "--", file_path) repo.git.restore('--staged', "--", file_path)
message = f"File {file_path} has been reverted." message = f"File {file_path} has been reverted."
# Reload cache after ANY revert operation
from ...data.cache import data_cache
data_cache.initialize(force_reload=True)
return True, message return True, message
except git.exc.GitCommandError as e: except git.exc.GitCommandError as e:
@@ -98,6 +100,10 @@ def revert_all(repo_path):
message += f" and {len(untracked_files)} new file(s) have been removed" message += f" and {len(untracked_files)} new file(s) have been removed"
message += "." message += "."
# Reload cache after reverting all
from ...data.cache import data_cache
data_cache.initialize(force_reload=True)
return True, message return True, message
except git.exc.GitCommandError as e: except git.exc.GitCommandError as e:

View File

@@ -116,6 +116,11 @@ def clone_repository(repo_url, repo_path):
logger.info("Removing backup directory") logger.info("Removing backup directory")
shutil.rmtree(backup_dir) shutil.rmtree(backup_dir)
# Reload cache after clone operation
from ...data.cache import data_cache
logger.info("Reloading data cache after clone")
data_cache.initialize(force_reload=True)
logger.info("Clone operation completed successfully") logger.info("Clone operation completed successfully")
return True, "Repository cloned and local files merged successfully" return True, "Repository cloned and local files merged successfully"

View File

@@ -68,6 +68,12 @@ def unlink_repository(repo_path, remove_files=False):
save_settings({'gitRepo': None}) save_settings({'gitRepo': None})
logger.info("Updated settings to remove git information") logger.info("Updated settings to remove git information")
# Reload cache if files were removed
if remove_files:
from ...data.cache import data_cache
logger.info("Reloading data cache after removing repository files")
data_cache.initialize(force_reload=True)
return True, "Repository successfully unlinked" return True, "Repository successfully unlinked"
except Exception as e: except Exception as e:
logger.error(f"Error unlinking repository: {str(e)}", exc_info=True) logger.error(f"Error unlinking repository: {str(e)}", exc_info=True)

View File

@@ -153,6 +153,30 @@ def compare_primitive_arrays(ours_data: List, theirs_data: List,
return conflicts return conflicts
def format_array_for_display(data):
"""Format array data for display in conflict resolution"""
if isinstance(data, list):
if not data:
return "[] (empty array)"
elif all(isinstance(x, dict) and 'name' in x for x in data):
# Array of objects with names - show the names
names = [x['name'] for x in data]
if len(names) <= 5:
return f"[{', '.join(names)}]"
else:
return f"[{', '.join(names[:5])}, ... and {len(names) - 5} more]"
elif all(not isinstance(x, (dict, list)) for x in data):
# Array of primitives
if len(data) <= 5:
return f"[{', '.join(str(x) for x in data)}]"
else:
return f"[{', '.join(str(x) for x in data[:5])}, ... and {len(data) - 5} more]"
else:
# Mixed or complex array
return f"Array with {len(data)} items"
return data
def compare_dicts(ours_data: Dict, theirs_data: Dict, path: str) -> List[Dict]: def compare_dicts(ours_data: Dict, theirs_data: Dict, path: str) -> List[Dict]:
"""Compare dictionaries recursively""" """Compare dictionaries recursively"""
conflicts = [] conflicts = []
@@ -164,15 +188,23 @@ def compare_dicts(ours_data: Dict, theirs_data: Dict, path: str) -> List[Dict]:
new_path = f"{path}.{key}" if path else key new_path = f"{path}.{key}" if path else key
if key not in ours_data: if key not in ours_data:
# Format arrays for better display when field is missing locally
incoming_val = theirs_data[key]
if isinstance(incoming_val, list):
incoming_val = format_array_for_display(incoming_val)
conflicts.append({ conflicts.append({
'parameter': new_path, 'parameter': new_path,
'local_value': None, 'local_value': None,
'incoming_value': theirs_data[key] 'incoming_value': incoming_val
}) })
elif key not in theirs_data: elif key not in theirs_data:
# Format arrays for better display when field is missing remotely
local_val = ours_data[key]
if isinstance(local_val, list):
local_val = format_array_for_display(local_val)
conflicts.append({ conflicts.append({
'parameter': new_path, 'parameter': new_path,
'local_value': ours_data[key], 'local_value': local_val,
'incoming_value': None 'incoming_value': None
}) })
elif ours_data[key] != theirs_data[key]: elif ours_data[key] != theirs_data[key]:

View File

@@ -9,20 +9,11 @@ from .logger import get_import_logger
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
# Cache patterns at module level to avoid reloading
_CACHED_PATTERNS = None
def get_cached_patterns():
"""Get cached regex patterns, loading them once on first access."""
global _CACHED_PATTERNS
if _CACHED_PATTERNS is None:
_CACHED_PATTERNS = load_regex_patterns()
return _CACHED_PATTERNS
def compile_format_to_api_structure( def compile_format_to_api_structure(
format_yaml: Dict[str, Any], format_yaml: Dict[str, Any],
arr_type: str arr_type: str,
patterns: Dict[str, str] = None
) -> Dict[str, Any]: ) -> Dict[str, Any]:
""" """
Compile a format from YAML to Arr API structure. Compile a format from YAML to Arr API structure.
@@ -30,12 +21,15 @@ def compile_format_to_api_structure(
Args: Args:
format_yaml: Format data from YAML file format_yaml: Format data from YAML file
arr_type: 'radarr' or 'sonarr' arr_type: 'radarr' or 'sonarr'
patterns: Pre-loaded regex patterns (if None, will load from disk)
Returns: Returns:
Compiled format ready for API Compiled format ready for API
""" """
target_app = TargetApp.RADARR if arr_type.lower() == 'radarr' else TargetApp.SONARR target_app = TargetApp.RADARR if arr_type.lower() == 'radarr' else TargetApp.SONARR
patterns = get_cached_patterns() # Only load patterns if not provided
if patterns is None:
patterns = load_regex_patterns()
compiled = { compiled = {
'name': format_yaml.get('name', 'Unknown') 'name': format_yaml.get('name', 'Unknown')

View File

@@ -22,6 +22,11 @@ class FormatStrategy(ImportStrategy):
Returns: Returns:
Dictionary with 'formats' key containing compiled formats Dictionary with 'formats' key containing compiled formats
""" """
from ..utils import load_regex_patterns
# Load all regex patterns once at the start
patterns = load_regex_patterns()
formats = [] formats = []
failed = [] failed = []
import_logger = get_import_logger() import_logger = get_import_logger()
@@ -35,7 +40,7 @@ class FormatStrategy(ImportStrategy):
format_yaml = load_yaml(f"custom_format/{filename}.yml") format_yaml = load_yaml(f"custom_format/{filename}.yml")
# Compile to API structure # Compile to API structure
compiled = compile_format_to_api_structure(format_yaml, self.arr_type) compiled = compile_format_to_api_structure(format_yaml, self.arr_type, patterns)
# Add unique suffix if needed # Add unique suffix if needed
if self.import_as_unique: if self.import_as_unique:

View File

@@ -22,6 +22,11 @@ class ProfileStrategy(ImportStrategy):
Returns: Returns:
Dictionary with 'profiles' and 'formats' keys Dictionary with 'profiles' and 'formats' keys
""" """
from ..utils import load_regex_patterns
# Load all regex patterns once at the start
patterns = load_regex_patterns()
profiles = [] profiles = []
all_formats = [] all_formats = []
processed_formats: Set[str] = set() processed_formats: Set[str] = set()
@@ -38,8 +43,8 @@ class ProfileStrategy(ImportStrategy):
# Load profile YAML # Load profile YAML
profile_yaml = load_yaml(f"profile/{filename}.yml") profile_yaml = load_yaml(f"profile/{filename}.yml")
# Extract referenced custom formats # Extract referenced custom formats (only for the target arr type)
format_names = extract_format_names(profile_yaml) format_names = extract_format_names(profile_yaml, self.arr_type)
for format_name in format_names: for format_name in format_names:
# Skip if already processed # Skip if already processed
@@ -49,7 +54,7 @@ class ProfileStrategy(ImportStrategy):
try: try:
format_yaml = load_yaml(f"custom_format/{format_name}.yml") format_yaml = load_yaml(f"custom_format/{format_name}.yml")
compiled_format = compile_format_to_api_structure(format_yaml, self.arr_type) compiled_format = compile_format_to_api_structure(format_yaml, self.arr_type, patterns)
if self.import_as_unique: if self.import_as_unique:
compiled_format['name'] = self.add_unique_suffix(compiled_format['name']) compiled_format['name'] = self.add_unique_suffix(compiled_format['name'])
@@ -72,7 +77,7 @@ class ProfileStrategy(ImportStrategy):
for lang_format in language_formats: for lang_format in language_formats:
lang_name = lang_format.get('name', 'Language format') lang_name = lang_format.get('name', 'Language format')
compiled_lang = compile_format_to_api_structure(lang_format, self.arr_type) compiled_lang = compile_format_to_api_structure(lang_format, self.arr_type, patterns)
if self.import_as_unique: if self.import_as_unique:
compiled_lang['name'] = self.add_unique_suffix(compiled_lang['name']) compiled_lang['name'] = self.add_unique_suffix(compiled_lang['name'])

View File

@@ -46,12 +46,14 @@ def load_yaml(file_path: str) -> Dict[str, Any]:
return yaml.safe_load(f) return yaml.safe_load(f)
def extract_format_names(profile_data: Dict[str, Any]) -> Set[str]: def extract_format_names(profile_data: Dict[str, Any], arr_type: str = None) -> Set[str]:
""" """
Extract all custom format names referenced in a profile. Extract all custom format names referenced in a profile.
Args: Args:
profile_data: Profile YAML data profile_data: Profile YAML data
arr_type: Target arr type ('radarr' or 'sonarr'). If provided, only extracts
formats for that specific arr type.
Returns: Returns:
Set of unique format names Set of unique format names
@@ -64,10 +66,18 @@ def extract_format_names(profile_data: Dict[str, Any]) -> Set[str]:
format_names.add(cf['name']) format_names.add(cf['name'])
# Extract from app-specific custom_formats # Extract from app-specific custom_formats
for key in ['custom_formats_radarr', 'custom_formats_sonarr']: if arr_type:
for cf in profile_data.get(key, []): # Only extract formats for the specific arr type
app_key = f'custom_formats_{arr_type.lower()}'
for cf in profile_data.get(app_key, []):
if isinstance(cf, dict) and 'name' in cf: if isinstance(cf, dict) and 'name' in cf:
format_names.add(cf['name']) format_names.add(cf['name'])
else:
# Extract from all app-specific sections (backwards compatibility)
for key in ['custom_formats_radarr', 'custom_formats_sonarr']:
for cf in profile_data.get(key, []):
if isinstance(cf, dict) and 'name' in cf:
format_names.add(cf['name'])
return format_names return format_names

View File

@@ -124,11 +124,14 @@ def setup_logging():
def init_git_user(): def init_git_user():
"""Initialize Git user configuration globally and update PAT status.""" """Initialize Git user configuration for the repository and update PAT status."""
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
logger.info("Starting Git user configuration") logger.info("Starting Git user configuration")
try: try:
from .config import config
repo_path = config.DB_DIR
git_name = os.environ.get('GIT_USER_NAME', 'Profilarr') git_name = os.environ.get('GIT_USER_NAME', 'Profilarr')
git_email = os.environ.get('GIT_USER_EMAIL', git_email = os.environ.get('GIT_USER_EMAIL',
'profilarr@dictionarry.com') 'profilarr@dictionarry.com')
@@ -139,30 +142,38 @@ def init_git_user():
if git_name == 'Profilarr' or git_email == 'profilarr@dictionarry.com': if git_name == 'Profilarr' or git_email == 'profilarr@dictionarry.com':
logger.info("Using default Git user configuration") logger.info("Using default Git user configuration")
# Set global Git configuration # Set repository-level Git configuration if repo exists
subprocess.run(['git', 'config', '--global', 'user.name', git_name], if os.path.exists(os.path.join(repo_path, '.git')):
check=True) logger.info(f"Setting git config for repository at {repo_path}")
subprocess.run(['git', 'config', '--global', 'user.email', git_email], subprocess.run(['git', '-C', repo_path, 'config', '--local', 'user.name', git_name],
check=True) check=True)
subprocess.run(['git', '-C', repo_path, 'config', '--local', 'user.email', git_email],
check=True)
# Add safe.directory to prevent ownership issues
subprocess.run(['git', '-C', repo_path, 'config', '--local', '--add', 'safe.directory', repo_path],
check=True)
else:
logger.warning(f"No git repository found at {repo_path}, skipping git config")
# Update PAT status in database # Update PAT status in database
update_pat_status() update_pat_status()
# Verify configuration # Verify configuration if repository exists
configured_name = subprocess.run( if os.path.exists(os.path.join(repo_path, '.git')):
['git', 'config', '--global', 'user.name'], configured_name = subprocess.run(
capture_output=True, ['git', '-C', repo_path, 'config', '--local', 'user.name'],
text=True, capture_output=True,
check=True).stdout.strip() text=True,
configured_email = subprocess.run( check=True).stdout.strip()
['git', 'config', '--global', 'user.email'], configured_email = subprocess.run(
capture_output=True, ['git', '-C', repo_path, 'config', '--local', 'user.email'],
text=True, capture_output=True,
check=True).stdout.strip() text=True,
check=True).stdout.strip()
if configured_name != git_name or configured_email != git_email: if configured_name != git_name or configured_email != git_email:
logger.error("Git configuration verification failed") logger.error("Git configuration verification failed")
return False, "Git configuration verification failed" return False, "Git configuration verification failed"
logger.info("Git user configuration completed successfully") logger.info("Git user configuration completed successfully")
return True, "Git configuration successful" return True, "Git configuration successful"

View File

@@ -18,6 +18,7 @@ from .logs import bp as logs_bp
from .media_management import media_management_bp from .media_management import media_management_bp
from .middleware import init_middleware from .middleware import init_middleware
from .init import setup_logging, init_app_config, init_git_user from .init import setup_logging, init_app_config, init_git_user
from .data.cache import data_cache
def create_app(): def create_app():
@@ -48,6 +49,10 @@ def create_app():
# Initialize Git user configuration # Initialize Git user configuration
logger.info("Initializing Git user") logger.info("Initializing Git user")
success, message = init_git_user() success, message = init_git_user()
# Initialize data cache
logger.info("Initializing data cache")
data_cache.initialize()
if not success: if not success:
logger.warning(f"Git user initialization issue: {message}") logger.warning(f"Git user initialization issue: {message}")
else: else:

View File

@@ -101,13 +101,12 @@ def sync_media_management():
try: try:
# Get the current media management data for this category # Get the current media management data for this category
category_data = get_media_management_data(category) category_data = get_media_management_data(category)
logger.info(f"Raw category_data for {category}: {category_data}")
arr_type_data = category_data.get(arr_type, {})
logger.info(f"Extracted arr_type_data for {arr_type}: {arr_type_data}")
if category == 'naming': if category == 'naming':
arr_type_data = category_data.get(arr_type, {})
success, message = sync_naming_config(base_url, api_key, arr_type, arr_type_data) success, message = sync_naming_config(base_url, api_key, arr_type, arr_type_data)
elif category == 'misc': elif category == 'misc':
arr_type_data = category_data.get(arr_type, {})
success, message = sync_media_management_config(base_url, api_key, arr_type, arr_type_data) success, message = sync_media_management_config(base_url, api_key, arr_type, arr_type_data)
elif category == 'quality_definitions': elif category == 'quality_definitions':
# Quality definitions has a nested structure: qualityDefinitions -> arr_type -> qualities # Quality definitions has a nested structure: qualityDefinitions -> arr_type -> qualities

View File

@@ -1,6 +1,6 @@
import logging import logging
import requests
from typing import Dict, Any, Tuple from typing import Dict, Any, Tuple
from ..importer.arr_handler import ArrHandler, ArrApiError
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@@ -18,22 +18,14 @@ def sync_naming_config(base_url: str, api_key: str, arr_type: str, naming_data:
Returns: Returns:
Tuple of (success, message) Tuple of (success, message)
""" """
arr = None
try: try:
# Construct the endpoint URL # Initialize ArrHandler
endpoint = f"{base_url}/api/v3/config/naming" arr = ArrHandler(base_url, api_key)
headers = { logger.info(f"Syncing naming config to {arr_type}")
"X-Api-Key": api_key,
"Content-Type": "application/json"
}
# GET current naming config # GET current naming config using ArrHandler
logger.info(f"Fetching current naming config from {arr_type} at {base_url}") current_config = arr.get("/api/v3/config/naming")
response = requests.get(endpoint, headers=headers, timeout=10)
response.raise_for_status()
current_config = response.json()
logger.info(f"Current naming config for {arr_type}:")
logger.info(current_config)
# Update current_config with fields from naming_data # Update current_config with fields from naming_data
if arr_type == 'radarr': if arr_type == 'radarr':
@@ -73,24 +65,22 @@ def sync_naming_config(base_url: str, api_key: str, arr_type: str, naming_data:
if 'specialsFolderFormat' in naming_data: if 'specialsFolderFormat' in naming_data:
current_config['specialsFolderFormat'] = naming_data['specialsFolderFormat'] current_config['specialsFolderFormat'] = naming_data['specialsFolderFormat']
# PUT the updated config back # PUT the updated config back using ArrHandler
logger.info(f"Updating naming config for {arr_type}") arr.put("/api/v3/config/naming", current_config)
logger.info(f"Request body for naming sync:") logger.info(f"Successfully synced naming config to {arr_type}")
logger.info(current_config)
put_response = requests.put(endpoint, json=current_config, headers=headers, timeout=10)
put_response.raise_for_status()
logger.info(f"Successfully synced naming config for {arr_type}")
return True, "Naming config sync successful" return True, "Naming config sync successful"
except requests.exceptions.RequestException as e: except ArrApiError as e:
error_msg = f"Failed to sync naming config: {str(e)}" error_msg = f"Failed to sync naming config: {str(e)}"
logger.error(error_msg) logger.error(error_msg)
return False, error_msg return False, error_msg
except Exception as e: except Exception as e:
error_msg = f"Unexpected error syncing naming config: {str(e)}" error_msg = f"Failed to sync naming config: {str(e)}"
logger.error(error_msg) logger.error(error_msg)
return False, error_msg return False, error_msg
finally:
if arr:
arr.close()
def sync_media_management_config(base_url: str, api_key: str, arr_type: str, misc_data: Dict[str, Any]) -> Tuple[bool, str]: def sync_media_management_config(base_url: str, api_key: str, arr_type: str, misc_data: Dict[str, Any]) -> Tuple[bool, str]:
@@ -107,48 +97,37 @@ def sync_media_management_config(base_url: str, api_key: str, arr_type: str, mis
Returns: Returns:
Tuple of (success, message) Tuple of (success, message)
""" """
arr = None
try: try:
# Construct the endpoint URL # Initialize ArrHandler
endpoint = f"{base_url}/api/v3/config/mediamanagement" arr = ArrHandler(base_url, api_key)
headers = { logger.info(f"Syncing media management config to {arr_type}")
"X-Api-Key": api_key,
"Content-Type": "application/json"
}
# GET current media management config # GET current media management config using ArrHandler
logger.info(f"Fetching current media management config from {arr_type} at {base_url}") current_config = arr.get("/api/v3/config/mediamanagement")
response = requests.get(endpoint, headers=headers, timeout=10)
response.raise_for_status()
current_config = response.json()
logger.info(f"Current media management config for {arr_type}:")
logger.info(current_config)
# Update current_config with fields from misc_data # Update current_config with fields from misc_data
# We only manage two fields: propersRepacks and enableMediaInfo
if 'propersRepacks' in misc_data: if 'propersRepacks' in misc_data:
current_config['downloadPropersAndRepacks'] = misc_data['propersRepacks'] current_config['downloadPropersAndRepacks'] = misc_data['propersRepacks']
if 'enableMediaInfo' in misc_data: if 'enableMediaInfo' in misc_data:
current_config['enableMediaInfo'] = misc_data['enableMediaInfo'] current_config['enableMediaInfo'] = misc_data['enableMediaInfo']
# PUT the updated config back # PUT the updated config back using ArrHandler
logger.info(f"Updating media management config for {arr_type}") arr.put("/api/v3/config/mediamanagement", current_config)
logger.info(f"Request body for media management sync:") logger.info(f"Successfully synced media management config to {arr_type}")
logger.info(current_config)
put_response = requests.put(endpoint, json=current_config, headers=headers, timeout=10)
put_response.raise_for_status()
logger.info(f"Successfully synced media management config for {arr_type}")
return True, "Media management config sync successful" return True, "Media management config sync successful"
except requests.exceptions.RequestException as e: except ArrApiError as e:
error_msg = f"Failed to sync media management config: {str(e)}" error_msg = f"Failed to sync media management config: {str(e)}"
logger.error(error_msg) logger.error(error_msg)
return False, error_msg return False, error_msg
except Exception as e: except Exception as e:
error_msg = f"Unexpected error syncing media management config: {str(e)}" error_msg = f"Failed to sync media management config: {str(e)}"
logger.error(error_msg) logger.error(error_msg)
return False, error_msg return False, error_msg
finally:
if arr:
arr.close()
def sync_quality_definitions(base_url: str, api_key: str, arr_type: str, quality_data: Dict[str, Any]) -> Tuple[bool, str]: def sync_quality_definitions(base_url: str, api_key: str, arr_type: str, quality_data: Dict[str, Any]) -> Tuple[bool, str]:
@@ -165,94 +144,43 @@ def sync_quality_definitions(base_url: str, api_key: str, arr_type: str, quality
Returns: Returns:
Tuple of (success, message) Tuple of (success, message)
""" """
arr = None
try: try:
# Construct the endpoint URL # Initialize ArrHandler
endpoint = f"{base_url}/api/v3/qualitydefinition" arr = ArrHandler(base_url, api_key)
headers = { logger.info(f"Syncing quality definitions to {arr_type}")
"X-Api-Key": api_key,
"Content-Type": "application/json"
}
# GET current quality definitions (for logging/comparison) # GET current quality definitions using ArrHandler
logger.info(f"Fetching current quality definitions from {arr_type} at {base_url}") current_definitions = arr.get("/api/v3/qualitydefinition")
response = requests.get(endpoint, headers=headers, timeout=10)
response.raise_for_status()
current_definitions = response.json() # Create a mapping of quality names to current definitions for easier lookup
logger.info(f"Current quality definitions for {arr_type}:") quality_map = {def_['quality']['name']: def_ for def_ in current_definitions}
logger.info(current_definitions)
if arr_type == 'sonarr': # Update each quality definition with our values
# Log the quality data we received from YML for quality_name, settings in quality_data.items():
logger.info(f"Quality data from YML:") if quality_name in quality_map:
logger.info(quality_data) definition = quality_map[quality_name]
# Update size limits from our YML data
# Create a mapping of quality names to current definitions for easier lookup if 'min' in settings:
quality_map = {def_['quality']['name']: def_ for def_ in current_definitions} definition['minSize'] = settings['min']
if 'preferred' in settings:
# Update each quality definition with our values definition['preferredSize'] = settings['preferred']
for quality_name, settings in quality_data.items(): if 'max' in settings:
if quality_name in quality_map: definition['maxSize'] = settings['max']
definition = quality_map[quality_name]
# Update size limits from our YML data
if 'min' in settings:
definition['minSize'] = settings['min']
if 'preferred' in settings:
definition['preferredSize'] = settings['preferred']
if 'max' in settings:
definition['maxSize'] = settings['max']
# PUT the updated definitions back
logger.info(f"Updating quality definitions for {arr_type}")
logger.info(f"Request body for quality definitions sync:")
logger.info(current_definitions)
# Sonarr expects the full array of definitions at the update endpoint
update_endpoint = f"{base_url}/api/v3/qualitydefinition/update"
put_response = requests.put(update_endpoint, json=current_definitions, headers=headers, timeout=10)
put_response.raise_for_status()
logger.info(f"Successfully synced quality definitions for {arr_type}")
return True, "Quality definitions sync successful"
else: # radarr # PUT the updated definitions back using ArrHandler
# Log the quality data we received from YML arr.put("/api/v3/qualitydefinition/update", current_definitions)
logger.info(f"Quality data from YML:") logger.info(f"Successfully synced quality definitions to {arr_type}")
logger.info(quality_data) return True, "Quality definitions sync successful"
# Create a mapping of quality names to current definitions for easier lookup
quality_map = {def_['quality']['name']: def_ for def_ in current_definitions}
# Update each quality definition with our values
for quality_name, settings in quality_data.items():
if quality_name in quality_map:
definition = quality_map[quality_name]
# Update size limits from our YML data
if 'min' in settings:
definition['minSize'] = settings['min']
if 'preferred' in settings:
definition['preferredSize'] = settings['preferred']
if 'max' in settings:
definition['maxSize'] = settings['max']
# PUT the updated definitions back
logger.info(f"Updating quality definitions for {arr_type}")
logger.info(f"Request body for quality definitions sync:")
logger.info(current_definitions)
# Radarr expects the full array of definitions at the update endpoint
update_endpoint = f"{base_url}/api/v3/qualitydefinition/update"
put_response = requests.put(update_endpoint, json=current_definitions, headers=headers, timeout=10)
put_response.raise_for_status()
logger.info(f"Successfully synced quality definitions for {arr_type}")
return True, "Quality definitions sync successful"
except requests.exceptions.RequestException as e: except ArrApiError as e:
error_msg = f"Failed to sync quality definitions: {str(e)}" error_msg = f"Failed to sync quality definitions: {str(e)}"
logger.error(error_msg) logger.error(error_msg)
return False, error_msg return False, error_msg
except Exception as e: except Exception as e:
error_msg = f"Unexpected error syncing quality definitions: {str(e)}" error_msg = f"Failed to sync quality definitions: {str(e)}"
logger.error(error_msg) logger.error(error_msg)
return False, error_msg return False, error_msg
finally:
if arr:
arr.close()

View File

@@ -1,5 +1,5 @@
# app/task/__init__.py # app/task/__init__.py
from flask import Blueprint, jsonify from flask import Blueprint, jsonify, request
import logging import logging
from ..db import get_db from ..db import get_db
from .tasks import TaskScheduler from .tasks import TaskScheduler
@@ -78,6 +78,63 @@ def get_task(task_id):
return jsonify({"error": "An unexpected error occurred"}), 500 return jsonify({"error": "An unexpected error occurred"}), 500
@bp.route('/<int:task_id>', methods=['PUT'])
def update_task(task_id):
try:
data = request.get_json()
if not data:
return jsonify({"error": "No data provided"}), 400
interval_minutes = data.get('interval_minutes')
if interval_minutes is None:
return jsonify({"error": "interval_minutes is required"}), 400
if not isinstance(interval_minutes, int) or interval_minutes < 1:
return jsonify({"error": "interval_minutes must be a positive integer"}), 400
with get_db() as conn:
# Check if task exists
task = conn.execute('SELECT * FROM scheduled_tasks WHERE id = ?',
(task_id, )).fetchone()
if not task:
return jsonify({"error": "Task not found"}), 404
# Update the interval in database
conn.execute(
'UPDATE scheduled_tasks SET interval_minutes = ? WHERE id = ?',
(interval_minutes, task_id)
)
conn.commit()
# Update the scheduler
scheduler_instance = TaskScheduler.get_instance()
if scheduler_instance and interval_minutes > 0:
# Remove old job
scheduler_instance.scheduler.remove_job(str(task_id))
# Create new task instance with updated interval
task_class = TaskScheduler.get_task_class(task['type'])
if task_class:
new_task = task_class(
id=task_id,
name=task['name'],
interval_minutes=interval_minutes
)
scheduler_instance.schedule_task(new_task)
logger.info(f"Updated task {task_id} interval to {interval_minutes} minutes")
return jsonify({
"success": True,
"message": f"Task interval updated to {interval_minutes} minutes"
}), 200
except Exception as e:
logger.exception(f"Failed to update task {task_id}")
return jsonify({"error": f"Failed to update task: {str(e)}"}), 500
@bp.route('/<int:task_id>/run', methods=['POST']) @bp.route('/<int:task_id>/run', methods=['POST'])
def trigger_task(task_id): def trigger_task(task_id):
try: try:

107
backend/scripts/test.ps1 Executable file
View File

@@ -0,0 +1,107 @@
#!/usr/bin/env pwsh
# Run regex tests against a pattern
# Set output encoding to UTF-8
[Console]::OutputEncoding = [System.Text.Encoding]::UTF8
$ErrorActionPreference = "Stop"
# Read from stdin
$inputText = $input
if (-not $inputText) {
$inputText = [System.Console]::In.ReadToEnd()
}
if (-not $inputText) {
Write-Output (ConvertTo-Json @{
success = $false
message = "No input provided"
} -Compress)
exit 0
}
try {
$data = $inputText | ConvertFrom-Json
$Pattern = $data.pattern
$tests = $data.tests
}
catch {
Write-Output (ConvertTo-Json @{
success = $false
message = "Failed to parse input JSON: $_"
} -Compress)
exit 0
}
# Ensure we have required inputs
if ([string]::IsNullOrWhiteSpace($Pattern)) {
Write-Output (ConvertTo-Json @{
success = $false
message = "No pattern provided"
} -Compress)
exit 0
}
if (-not $tests -or $tests.Count -eq 0) {
Write-Output (ConvertTo-Json @{
success = $false
message = "No tests provided"
} -Compress)
exit 0
}
try {
# Create the regex object with case-insensitive option
$regex = [System.Text.RegularExpressions.Regex]::new($Pattern, [System.Text.RegularExpressions.RegexOptions]::IgnoreCase)
# Process each test
$results = @()
foreach ($test in $tests) {
$match = $regex.Match($test.input)
$passes = ($match.Success -eq $test.expected)
$result = @{
id = $test.id
input = $test.input
expected = $test.expected
passes = $passes
}
if ($match.Success) {
# Include match details for highlighting (using original format)
$result.matchedContent = $match.Value
$result.matchSpan = @{
start = $match.Index
end = $match.Index + $match.Length
}
# Include capture groups if any
$groups = @()
for ($i = 1; $i -lt $match.Groups.Count; $i++) {
if ($match.Groups[$i].Success) {
$groups += $match.Groups[$i].Value
}
}
$result.matchedGroups = $groups
}
else {
$result.matchedContent = $null
$result.matchSpan = $null
$result.matchedGroups = @()
}
$results += $result
}
Write-Output (ConvertTo-Json @{
success = $true
tests = $results
} -Compress -Depth 10)
}
catch {
Write-Output (ConvertTo-Json @{
success = $false
message = $_.Exception.Message
} -Compress)
}

73
backend/scripts/validate.ps1 Executable file
View File

@@ -0,0 +1,73 @@
#!/usr/bin/env pwsh
# Validate a .NET regex pattern
param(
[Parameter(Mandatory=$false)]
[string]$Pattern
)
# Set output encoding to UTF-8
[Console]::OutputEncoding = [System.Text.Encoding]::UTF8
$ErrorActionPreference = "Stop"
# Read pattern from stdin if not provided as parameter
if (-not $Pattern) {
$Pattern = [System.Console]::In.ReadToEnd()
}
# Ensure we have a pattern
if ([string]::IsNullOrWhiteSpace($Pattern)) {
$result = @{
valid = $false
error = "No pattern provided"
}
Write-Output (ConvertTo-Json $result -Compress)
exit 0
}
try {
# Attempt to create a .NET Regex object with the pattern
# Using IgnoreCase option as per requirement
$regex = [System.Text.RegularExpressions.Regex]::new($Pattern, [System.Text.RegularExpressions.RegexOptions]::IgnoreCase)
# If we get here, the pattern is valid
$result = @{
valid = $true
message = "Pattern is valid .NET regex"
}
Write-Output (ConvertTo-Json $result -Compress)
exit 0
}
catch {
# Pattern is invalid, extract the meaningful part of the error message
$errorMessage = $_.Exception.Message
# Try to extract just the useful part of .NET regex errors
if ($errorMessage -match "Invalid pattern '.*?' at offset (\d+)\. (.+)") {
$errorMessage = "At position $($matches[1]): $($matches[2])"
}
elseif ($errorMessage -match 'parsing ".*?" - (.+)') {
$errorMessage = $matches[1]
}
elseif ($errorMessage -match 'Exception calling .* with .* argument\(s\): "(.+)"') {
$innerError = $matches[1]
if ($innerError -match "Invalid pattern '.*?' at offset (\d+)\. (.+)") {
$errorMessage = "At position $($matches[1]): $($matches[2])"
}
else {
$errorMessage = $innerError
}
}
# Remove any trailing quotes or periods followed by quotes
$errorMessage = $errorMessage -replace '\."$', '.' -replace '"$', ''
$result = @{
valid = $false
error = $errorMessage
}
Write-Output (ConvertTo-Json $result -Compress)
exit 0
}

View File

@@ -1,19 +1,16 @@
# docker-compose.yml
version: '3.8'
services: services:
profilarr: profilarr:
image: santiagosayshey/profilarr:beta build:
context: .
dockerfile: Dockerfile
container_name: profilarr container_name: profilarr
ports: ports:
- 6868:6868 - 6870:6868
volumes: volumes:
- profilarr_data:/config - ./config-test:/config
environment: environment:
- PUID=1000
- PGID=1000
- UMASK=002
- TZ=Australia/Adelaide - TZ=Australia/Adelaide
env_file:
- .env
restart: unless-stopped restart: unless-stopped
volumes:
profilarr_data:
name: profilarr_data

View File

@@ -17,5 +17,7 @@ services:
- ./backend:/app - ./backend:/app
- ./config:/config - ./config:/config
environment: environment:
- PUID=1000
- PGID=1000
- TZ=Australia/Adelaide - TZ=Australia/Adelaide
restart: always restart: always

34
entrypoint.sh Normal file
View File

@@ -0,0 +1,34 @@
#!/bin/bash
set -e
# Default to UID/GID 1000 if not provided
PUID=${PUID:-1000}
PGID=${PGID:-1000}
# Default umask to 022 if not provided
UMASK=${UMASK:-022}
echo "Starting with UID: $PUID, GID: $PGID, UMASK: $UMASK"
umask "$UMASK"
# Create group with specified GID
groupadd -g "$PGID" appgroup 2>/dev/null || true
# Create user with specified UID and GID
useradd -u "$PUID" -g "$PGID" -d /home/appuser -s /bin/bash appuser 2>/dev/null || true
# Create home directory if it doesn't exist
mkdir -p /home/appuser
chown "$PUID:$PGID" /home/appuser
# Fix permissions on /config if it exists
if [ -d "/config" ]; then
echo "Setting up /config directory permissions"
# Change ownership of /config and all its contents to PUID:PGID
# This ensures files created by different UIDs are accessible
chown -R "$PUID:$PGID" /config
fi
# Execute the main command as the specified user
echo "Starting application as user $PUID:$PGID"
exec gosu "$PUID:$PGID" "$@"

View File

@@ -301,5 +301,15 @@ export const RegexPatterns = {
update: (name, data, newName) => update: (name, data, newName) =>
updateItem('regex_pattern', name, data, newName), updateItem('regex_pattern', name, data, newName),
delete: name => deleteItem('regex_pattern', name), delete: name => deleteItem('regex_pattern', name),
runTests: createSpecialEndpoint('regex_pattern', 'test') runTests: createSpecialEndpoint('regex_pattern', 'test'),
verify: async pattern => {
try {
const response = await axios.post(`${BASE_URL}/regex/verify`, {
pattern
});
return response.data;
} catch (error) {
throw handleError(error, 'verify regex pattern');
}
}
}; };

View File

@@ -1,4 +1,5 @@
import axios from 'axios'; import axios from 'axios';
import Alert from '@ui/Alert';
export const getAllTasks = async () => { export const getAllTasks = async () => {
try { try {
@@ -37,3 +38,23 @@ export const triggerTask = async taskId => {
}; };
} }
}; };
export const updateTaskInterval = async (taskId, intervalMinutes) => {
try {
const response = await axios.put(`/api/tasks/${taskId}`, {
interval_minutes: intervalMinutes
});
Alert.success(response.data.message || 'Task interval updated successfully');
return {
success: true,
data: response.data
};
} catch (error) {
const errorMessage = error.response?.data?.error || 'Failed to update task interval';
Alert.error(errorMessage);
return {
success: false,
error: errorMessage
};
}
};

View File

@@ -1,4 +1,4 @@
import React, {useState} from 'react'; import React, {useState, useEffect, useRef} from 'react';
import PropTypes from 'prop-types'; import PropTypes from 'prop-types';
import {Copy, Check, FlaskConical, FileText, ListFilter} from 'lucide-react'; import {Copy, Check, FlaskConical, FileText, ListFilter} from 'lucide-react';
import Tooltip from '@ui/Tooltip'; import Tooltip from '@ui/Tooltip';
@@ -14,6 +14,8 @@ function FormatCard({
willBeSelected, willBeSelected,
onSelect onSelect
}) { }) {
const [isVisible, setIsVisible] = useState(false);
const cardRef = useRef(null);
const [showDescription, setShowDescription] = useState(() => { const [showDescription, setShowDescription] = useState(() => {
const saved = localStorage.getItem(`format-view-${format.file_name}`); const saved = localStorage.getItem(`format-view-${format.file_name}`);
return saved !== null ? JSON.parse(saved) : true; return saved !== null ? JSON.parse(saved) : true;
@@ -64,8 +66,27 @@ function FormatCard({
} }
}; };
useEffect(() => {
const observer = new IntersectionObserver(
([entry]) => {
setIsVisible(entry.isIntersecting);
},
{
threshold: 0,
rootMargin: '100px' // Keep cards rendered 100px outside viewport
}
);
if (cardRef.current) {
observer.observe(cardRef.current);
}
return () => observer.disconnect();
}, []);
return ( return (
<div <div
ref={cardRef}
className={`w-full h-[12rem] bg-gradient-to-br from-gray-800/95 to-gray-900 border ${ className={`w-full h-[12rem] bg-gradient-to-br from-gray-800/95 to-gray-900 border ${
isSelected isSelected
? 'border-blue-500' ? 'border-blue-500'
@@ -81,7 +102,8 @@ function FormatCard({
} transition-all cursor-pointer relative`} } transition-all cursor-pointer relative`}
onClick={handleClick} onClick={handleClick}
onMouseDown={handleMouseDown}> onMouseDown={handleMouseDown}>
<div className='p-4 flex flex-col h-full'> {isVisible ? (
<div className='p-4 flex flex-col h-full'>
{/* Header Section */} {/* Header Section */}
<div className='flex justify-between items-start'> <div className='flex justify-between items-start'>
<div className='flex flex-col min-w-0 flex-1'> <div className='flex flex-col min-w-0 flex-1'>
@@ -237,6 +259,15 @@ function FormatCard({
)} )}
</div> </div>
</div> </div>
) : (
<div className='p-4 flex items-center justify-center h-full'>
<div className='w-full space-y-2'>
<div className='h-5 bg-gray-700/50 rounded animate-pulse'/>
<div className='h-3 bg-gray-700/50 rounded animate-pulse w-3/4'/>
<div className='h-3 bg-gray-700/50 rounded animate-pulse w-1/2'/>
</div>
</div>
)}
</div> </div>
); );
} }

View File

@@ -23,15 +23,13 @@ const AddUnitTestModal = ({isOpen, onClose, onAdd, tests, editTest = null}) => {
const handleSubmit = () => { const handleSubmit = () => {
const getNextTestId = testArray => { const getNextTestId = testArray => {
if (!testArray || testArray.length === 0) return 1; if (!testArray || testArray.length === 0) return 1;
return Math.max(...testArray.map(test => test.id)) + 1; return Math.max(...testArray.map(test => test.id || 0)) + 1;
}; };
const testData = { const testData = {
id: editTest ? editTest.id : getNextTestId(tests), id: editTest ? editTest.id : getNextTestId(tests),
input, input,
expected: shouldMatch, expected: shouldMatch
passes: false,
lastRun: null
}; };
onAdd(testData); onAdd(testData);

View File

@@ -1,4 +1,4 @@
import React from 'react'; import React, {useState, useEffect, useRef} from 'react';
import PropTypes from 'prop-types'; import PropTypes from 'prop-types';
import {Copy, Check, FlaskConical} from 'lucide-react'; import {Copy, Check, FlaskConical} from 'lucide-react';
import Tooltip from '@ui/Tooltip'; import Tooltip from '@ui/Tooltip';
@@ -15,6 +15,9 @@ const RegexCard = ({
willBeSelected, willBeSelected,
onSelect onSelect
}) => { }) => {
const [isVisible, setIsVisible] = useState(false);
const cardRef = useRef(null);
const totalTests = pattern.tests?.length || 0; const totalTests = pattern.tests?.length || 0;
const passedTests = pattern.tests?.filter(t => t.passes)?.length || 0; const passedTests = pattern.tests?.filter(t => t.passes)?.length || 0;
const passRate = const passRate =
@@ -46,8 +49,27 @@ const RegexCard = ({
return 'text-red-400'; return 'text-red-400';
}; };
useEffect(() => {
const observer = new IntersectionObserver(
([entry]) => {
setIsVisible(entry.isIntersecting);
},
{
threshold: 0,
rootMargin: '100px' // Keep cards rendered 100px outside viewport
}
);
if (cardRef.current) {
observer.observe(cardRef.current);
}
return () => observer.disconnect();
}, []);
return ( return (
<div <div
ref={cardRef}
className={`w-full h-[20rem] bg-gradient-to-br from-gray-800/95 to-gray-900 border ${ className={`w-full h-[20rem] bg-gradient-to-br from-gray-800/95 to-gray-900 border ${
isSelected isSelected
? 'border-blue-500' ? 'border-blue-500'
@@ -63,7 +85,8 @@ const RegexCard = ({
} transition-all cursor-pointer overflow-hidden`} } transition-all cursor-pointer overflow-hidden`}
onClick={handleClick} onClick={handleClick}
onMouseDown={handleMouseDown}> onMouseDown={handleMouseDown}>
<div className='p-6 flex flex-col h-full'> {isVisible ? (
<div className='p-6 flex flex-col h-full'>
{/* Header Section */} {/* Header Section */}
<div className='flex-none'> <div className='flex-none'>
<div className='flex justify-between items-start'> <div className='flex justify-between items-start'>
@@ -183,6 +206,15 @@ const RegexCard = ({
)} )}
</div> </div>
</div> </div>
) : (
<div className='p-6 flex items-center justify-center h-full'>
<div className='w-full space-y-3'>
<div className='h-6 bg-gray-700/50 rounded animate-pulse'/>
<div className='h-20 bg-gray-700/50 rounded animate-pulse'/>
<div className='h-4 bg-gray-700/50 rounded animate-pulse w-3/4'/>
</div>
</div>
)}
</div> </div>
); );
}; };

View File

@@ -2,7 +2,9 @@ import React, {useState} from 'react';
import PropTypes from 'prop-types'; import PropTypes from 'prop-types';
import MarkdownEditor from '@ui/MarkdownEditor'; import MarkdownEditor from '@ui/MarkdownEditor';
import AddButton from '@ui/DataBar/AddButton'; import AddButton from '@ui/DataBar/AddButton';
import {InfoIcon} from 'lucide-react'; import {Regex, Loader} from 'lucide-react';
import {RegexPatterns} from '@api/data';
import Alert from '@ui/Alert';
const RegexGeneralTab = ({ const RegexGeneralTab = ({
name, name,
@@ -18,6 +20,7 @@ const RegexGeneralTab = ({
patternError patternError
}) => { }) => {
const [newTag, setNewTag] = useState(''); const [newTag, setNewTag] = useState('');
const [validating, setValidating] = useState(false);
const handleAddTag = () => { const handleAddTag = () => {
if (newTag.trim() && !tags.includes(newTag.trim())) { if (newTag.trim() && !tags.includes(newTag.trim())) {
@@ -33,6 +36,30 @@ const RegexGeneralTab = ({
} }
}; };
const handleValidatePattern = async () => {
if (!pattern?.trim()) {
Alert.warning('Please enter a pattern to validate');
return;
}
setValidating(true);
try {
const result = await RegexPatterns.verify(pattern);
if (result.valid) {
Alert.success('Pattern is valid .NET regex');
} else {
Alert.error(result.error || 'Invalid pattern');
}
} catch (error) {
console.error('Validation error:', error);
Alert.error('Failed to validate pattern');
} finally {
setValidating(false);
}
};
return ( return (
<div className='w-full'> <div className='w-full'>
{error && ( {error && (
@@ -89,17 +116,28 @@ const RegexGeneralTab = ({
<div className='space-y-2'> <div className='space-y-2'>
<div className='space-y-1'> <div className='space-y-1'>
<div className='flex items-center justify-between'> <div className='flex items-center justify-between'>
<label className='text-sm font-medium text-gray-700 dark:text-gray-300'> <div>
Pattern <label className='text-sm font-medium text-gray-700 dark:text-gray-300'>
</label> Pattern
<div className='flex items-center gap-2 text-xs text-blue-600 dark:text-blue-400'> </label>
<InfoIcon className='h-4 w-4' /> <p className='text-xs text-gray-500 dark:text-gray-400'>
<span>Case insensitive PCRE2</span> Enter your regular expression pattern (case-insensitive .NET)
</p>
</div> </div>
<button
onClick={handleValidatePattern}
disabled={validating || !pattern?.trim()}
className='inline-flex items-center px-3 py-1.5 text-sm font-medium rounded-md
bg-blue-600 hover:bg-blue-700 disabled:bg-blue-600/50 text-white
transition-colors duration-200'>
{validating ? (
<Loader className='w-4 h-4 mr-2 animate-spin' />
) : (
<Regex className='w-4 h-4 mr-2' />
)}
Validate
</button>
</div> </div>
<p className='text-xs text-gray-500 dark:text-gray-400'>
Enter your regular expression pattern
</p>
</div> </div>
{patternError && ( {patternError && (
<p className='text-sm text-red-600 dark:text-red-400'> <p className='text-sm text-red-600 dark:text-red-400'>

View File

@@ -6,7 +6,7 @@ import RegexTestingTab from './RegexTestingTab';
import {useRegexModal} from '@hooks/useRegexModal'; import {useRegexModal} from '@hooks/useRegexModal';
import {RegexPatterns} from '@api/data'; import {RegexPatterns} from '@api/data';
import Alert from '@ui/Alert'; import Alert from '@ui/Alert';
import {Loader, Play} from 'lucide-react'; import {Loader, Play, Save, Trash2, Check} from 'lucide-react';
const RegexModal = ({ const RegexModal = ({
pattern: initialPattern, pattern: initialPattern,
@@ -84,12 +84,13 @@ const RegexModal = ({
{initialPattern && !isCloning && ( {initialPattern && !isCloning && (
<button <button
onClick={handleDelete} onClick={handleDelete}
className={`px-4 py-2 text-white rounded transition-colors ${ className='inline-flex items-center gap-2 px-4 py-2 rounded bg-gray-800 border border-gray-700 text-gray-200 hover:bg-gray-700 transition-colors'>
isDeleting {isDeleting ? (
? 'bg-red-600 hover:bg-red-700' <Check className="w-4 h-4 text-green-500" />
: 'bg-red-500 hover:bg-red-600' ) : (
}`}> <Trash2 className="w-4 h-4 text-red-500" />
{isDeleting ? 'Confirm Delete' : 'Delete'} )}
<span>Delete</span>
</button> </button>
)} )}
<div className='flex gap-2'> <div className='flex gap-2'>
@@ -97,20 +98,20 @@ const RegexModal = ({
<button <button
onClick={() => handleRunTests(patternValue, tests)} onClick={() => handleRunTests(patternValue, tests)}
disabled={isRunningTests} disabled={isRunningTests}
className='inline-flex items-center px-4 py-2 bg-green-600 hover:bg-green-700 className='inline-flex items-center gap-2 px-4 py-2 rounded bg-gray-800 border border-gray-700 text-gray-200 hover:bg-gray-700 disabled:opacity-50 transition-colors'>
disabled:bg-green-600/50 text-white rounded transition-colors'>
{isRunningTests ? ( {isRunningTests ? (
<Loader className='w-4 h-4 mr-2 animate-spin' /> <Loader className="w-4 h-4 text-yellow-500 animate-spin" />
) : ( ) : (
<Play className='w-4 h-4 mr-2' /> <Play className="w-4 h-4 text-green-500" />
)} )}
Run Tests <span>Run Tests</span>
</button> </button>
)} )}
<button <button
onClick={handleSave} onClick={handleSave}
className='bg-blue-500 hover:bg-blue-600 text-white px-4 py-2 rounded transition-colors'> className='inline-flex items-center gap-2 px-4 py-2 rounded bg-gray-800 border border-gray-700 text-gray-200 hover:bg-gray-700 transition-colors'>
Save <Save className="w-4 h-4 text-blue-500" />
<span>Save</span>
</button> </button>
</div> </div>
</div> </div>

View File

@@ -13,52 +13,47 @@ const RegexTestingTab = ({
}) => { }) => {
const [isModalOpen, setIsModalOpen] = useState(false); const [isModalOpen, setIsModalOpen] = useState(false);
const [editingTest, setEditingTest] = useState(null); const [editingTest, setEditingTest] = useState(null);
const [testResults, setTestResults] = useState({});
// Wrapped run tests function that stores results
const handleRunTests = useCallback(async (testPattern, testData) => {
const results = await onRunTests(testPattern, testData);
if (results && Array.isArray(results)) {
// Store results by test ID
const resultsMap = {};
results.forEach(result => {
resultsMap[result.id] = result;
});
setTestResults(resultsMap);
}
return results;
}, [onRunTests]);
useEffect(() => { useEffect(() => {
const needsAutoRun = // Run tests when pattern or tests change
tests?.length > 0 && if (tests?.length > 0 && pattern && !isRunningTests) {
pattern && handleRunTests(pattern, tests);
tests.some(test => test.passes !== undefined && !test.matchSpan);
if (needsAutoRun && !isRunningTests) {
onRunTests(pattern, tests);
} }
}, []); }, [pattern]); // Only re-run when pattern changes
const handleAddOrUpdateTest = useCallback( const handleAddOrUpdateTest = useCallback(
testData => { testData => {
let updatedTests; let updatedTests;
if (editingTest) { if (editingTest) {
updatedTests = tests.map(test => updatedTests = tests.map(test =>
test.id === testData.id test.id === testData.id ? testData : test
? {
...testData,
passes: false,
lastRun: null,
matchedContent: null,
matchSpan: null,
matchedGroups: []
}
: test
); );
} else { } else {
updatedTests = [ updatedTests = [...tests, testData];
...tests,
{
...testData,
passes: false,
lastRun: null,
matchedContent: null,
matchSpan: null,
matchedGroups: []
}
];
} }
onTestsChange(updatedTests); onTestsChange(updatedTests);
onRunTests(pattern, updatedTests); // Run tests automatically after adding/updating
if (pattern) {
handleRunTests(pattern, updatedTests);
}
setEditingTest(null); setEditingTest(null);
}, },
[tests, onTestsChange, onRunTests, pattern, editingTest] [tests, onTestsChange, handleRunTests, pattern, editingTest]
); );
const handleEditTest = useCallback(test => { const handleEditTest = useCallback(test => {
@@ -80,72 +75,81 @@ const RegexTestingTab = ({
}, []); }, []);
const totalTests = tests?.length || 0; const totalTests = tests?.length || 0;
const passedTests = tests?.filter(test => test.passes)?.length || 0; const passedTests = tests?.filter(test => {
const result = testResults[test.id];
return result?.passes;
})?.length || 0;
return ( return (
<div className='flex flex-col h-full'> <div className='flex flex-col h-full'>
{/* Header with Progress Bar */} {/* Header */}
<div className='flex items-center justify-between pb-4 pr-2'> <div className='flex items-center justify-between pb-4'>
<div> <div>
<h2 className='text-xl font-semibold text-gray-900 dark:text-white mb-3'> <h2 className='text-xl font-semibold text-gray-900 dark:text-white mb-1'>
Unit Tests Unit Tests
</h2> </h2>
<div className='flex items-center gap-3'> {totalTests > 0 && (
<div className='h-1.5 w-32 bg-gray-200 dark:bg-gray-700 rounded-full overflow-hidden'> <p className='text-sm text-gray-600 dark:text-gray-400'>
<div {passedTests} of {totalTests} tests passing
className='h-full bg-emerald-500 rounded-full transition-all duration-300' {totalTests > 0 && ` (${Math.round((passedTests / totalTests) * 100)}%)`}
style={{ </p>
width: `${ )}
totalTests
? (passedTests / totalTests) * 100
: 0
}%`
}}
/>
</div>
<span className='text-sm text-gray-600 dark:text-gray-300'>
{totalTests > 0
? `${passedTests}/${totalTests} tests passing`
: 'No tests added yet'}
</span>
</div>
</div> </div>
<div className='flex items-center gap-2'> <div className='flex items-center gap-2'>
{tests?.length > 0 && ( {tests?.length > 0 && (
<button <button
onClick={() => onRunTests(pattern, tests)} onClick={() => handleRunTests(pattern, tests)}
disabled={isRunningTests} disabled={isRunningTests}
className='inline-flex items-center px-3 py-2 text-sm font-medium rounded-md bg-green-600 hover:bg-green-700 disabled:bg-green-600/50 text-white'> className='inline-flex items-center gap-2 px-3 py-1.5 text-sm rounded bg-gray-800 border border-gray-700 text-gray-200 hover:bg-gray-700 disabled:opacity-50 transition-colors'>
{isRunningTests ? ( {isRunningTests ? (
<Loader className='w-4 h-4 mr-2 animate-spin' /> <Loader className='w-3.5 h-3.5 text-yellow-500 animate-spin' />
) : ( ) : (
<Play className='w-4 h-4 mr-2' /> <Play className='w-3.5 h-3.5 text-green-500' />
)} )}
Run Tests <span>Run Tests</span>
</button> </button>
)} )}
<button <button
onClick={() => setIsModalOpen(true)} onClick={() => setIsModalOpen(true)}
className='inline-flex items-center px-3 py-2 text-sm font-medium rounded-md bg-blue-600 hover:bg-blue-700 text-white'> className='inline-flex items-center gap-2 px-3 py-1.5 text-sm rounded bg-gray-800 border border-gray-700 text-gray-200 hover:bg-gray-700 transition-colors'>
<Plus className='w-4 h-4 mr-2' /> <Plus className='w-3.5 h-3.5 text-blue-500' />
Add Test <span>Add Test</span>
</button> </button>
</div> </div>
</div> </div>
{/* Progress Bar */}
{totalTests > 0 && (
<div className='mb-4'>
<div className='h-2 bg-gray-200 dark:bg-gray-700 rounded-full overflow-hidden'>
<div
className='h-full bg-emerald-500 transition-all duration-500 ease-out'
style={{width: `${(passedTests / totalTests) * 100}%`}}
/>
</div>
</div>
)}
{/* Test List */} {/* Test List */}
<div className='flex-1 overflow-y-auto pr-2'> <div className='flex-1 overflow-y-auto pr-2'>
{tests?.length > 0 ? ( {tests?.length > 0 ? (
<div className='space-y-3'> <div className='space-y-3'>
{tests.map(test => ( {tests.map(test => {
<UnitTest // Merge saved test with runtime results
key={test.id} const testWithResults = {
test={test} ...test,
pattern={pattern} ...testResults[test.id]
onDelete={() => handleDeleteTest(test.id)} };
onEdit={() => handleEditTest(test)} return (
/> <UnitTest
))} key={test.id}
test={testWithResults}
pattern={pattern}
onDelete={() => handleDeleteTest(test.id)}
onEdit={() => handleEditTest(test)}
/>
);
})}
</div> </div>
) : ( ) : (
<div className='text-center py-12 rounded-lg'> <div className='text-center py-12 rounded-lg'>
@@ -173,15 +177,7 @@ RegexTestingTab.propTypes = {
PropTypes.shape({ PropTypes.shape({
id: PropTypes.number.isRequired, id: PropTypes.number.isRequired,
input: PropTypes.string.isRequired, input: PropTypes.string.isRequired,
expected: PropTypes.bool.isRequired, expected: PropTypes.bool.isRequired
passes: PropTypes.bool.isRequired,
lastRun: PropTypes.string,
matchedContent: PropTypes.string,
matchedGroups: PropTypes.arrayOf(PropTypes.string),
matchSpan: PropTypes.shape({
start: PropTypes.number,
end: PropTypes.number
})
}) })
), ),
onTestsChange: PropTypes.func.isRequired, onTestsChange: PropTypes.func.isRequired,

View File

@@ -68,11 +68,7 @@ const UnitTest = ({test, pattern, onDelete, onEdit}) => {
: 'Should Not Match'} : 'Should Not Match'}
</span> </span>
</div> </div>
<div className='flex items-center gap-2'> <div className='flex gap-2'>
<span className='text-xs text-gray-500 dark:text-gray-400'>
Last run: {test.lastRun}
</span>
<div className='flex gap-2'>
<button <button
onClick={onEdit} onClick={onEdit}
className='p-1 rounded shrink-0 transition-transform transform hover:scale-110'> className='p-1 rounded shrink-0 transition-transform transform hover:scale-110'>
@@ -83,7 +79,6 @@ const UnitTest = ({test, pattern, onDelete, onEdit}) => {
className='p-1 rounded shrink-0 transition-transform transform hover:scale-110'> className='p-1 rounded shrink-0 transition-transform transform hover:scale-110'>
<Trash2 className='w-4 h-4 text-gray-500 dark:text-gray-400' /> <Trash2 className='w-4 h-4 text-gray-500 dark:text-gray-400' />
</button> </button>
</div>
</div> </div>
</div> </div>
@@ -112,7 +107,6 @@ UnitTest.propTypes = {
input: PropTypes.string.isRequired, input: PropTypes.string.isRequired,
expected: PropTypes.bool.isRequired, expected: PropTypes.bool.isRequired,
passes: PropTypes.bool.isRequired, passes: PropTypes.bool.isRequired,
lastRun: PropTypes.string,
matchedContent: PropTypes.string, matchedContent: PropTypes.string,
matchedGroups: PropTypes.arrayOf(PropTypes.string), matchedGroups: PropTypes.arrayOf(PropTypes.string),
matchSpan: PropTypes.shape({ matchSpan: PropTypes.shape({

View File

@@ -1,8 +1,15 @@
// components/settings/TaskCard.jsx // components/settings/TaskCard.jsx
import React from 'react'; import React, {useState, useEffect} from 'react';
import {Play, Loader} from 'lucide-react'; import {Play, Loader, Edit2, Check, X} from 'lucide-react';
import NumberInput from '@ui/NumberInput';
import {updateTaskInterval} from '@/api/task';
const TaskCard = ({task, onTrigger, isTriggering}) => { const TaskCard = ({task, onTrigger, isTriggering, isLast, onIntervalUpdate}) => {
const [intervalValue, setIntervalValue] = useState(task.interval_minutes);
const [originalValue, setOriginalValue] = useState(task.interval_minutes);
// Only allow editing for Repository Sync and Backup tasks
const isEditable = task.type === 'Sync' || task.type === 'Backup';
const formatDateTime = dateString => { const formatDateTime = dateString => {
if (!dateString) return 'Never'; if (!dateString) return 'Never';
return new Date(dateString).toLocaleString(); return new Date(dateString).toLocaleString();
@@ -13,8 +20,32 @@ const TaskCard = ({task, onTrigger, isTriggering}) => {
return `${duration}s`; return `${duration}s`;
}; };
useEffect(() => {
setIntervalValue(task.interval_minutes);
setOriginalValue(task.interval_minutes);
}, [task.interval_minutes]);
useEffect(() => {
if (intervalValue !== originalValue && intervalValue > 0) {
const updateInterval = async () => {
const result = await updateTaskInterval(task.id, intervalValue);
if (result.success) {
setOriginalValue(intervalValue);
// Refresh task data to get new next_run time
if (onIntervalUpdate) {
onIntervalUpdate();
}
} else {
// Reset to original value if update failed
setIntervalValue(originalValue);
}
};
updateInterval();
}
}, [intervalValue]);
return ( return (
<tr className='bg-gray-900 border-b border-gray-700'> <tr className={`bg-gray-900 ${!isLast ? 'border-b border-gray-700' : ''}`}>
<td className='py-4 px-4'> <td className='py-4 px-4'>
<div className='flex items-center space-x-3'> <div className='flex items-center space-x-3'>
<span className='font-medium text-gray-100'> <span className='font-medium text-gray-100'>
@@ -23,7 +54,21 @@ const TaskCard = ({task, onTrigger, isTriggering}) => {
</div> </div>
</td> </td>
<td className='py-4 px-4 text-gray-300'> <td className='py-4 px-4 text-gray-300'>
{task.interval_minutes} minutes {isEditable ? (
<div className='flex items-center space-x-2'>
<NumberInput
value={intervalValue}
onChange={setIntervalValue}
min={1}
max={43200}
step={1}
className='w-24'
/>
<span className='text-gray-400 text-sm'>minutes</span>
</div>
) : (
<span>{task.interval_minutes} minutes</span>
)}
</td> </td>
<td className='py-4 px-4 text-gray-300'> <td className='py-4 px-4 text-gray-300'>
{formatDateTime(task.last_run)} {formatDateTime(task.last_run)}

View File

@@ -77,12 +77,14 @@ const TaskContainer = () => {
</tr> </tr>
</thead> </thead>
<tbody> <tbody>
{tasks.map(task => ( {tasks.map((task, index) => (
<TaskCard <TaskCard
key={task.id} key={task.id}
task={task} task={task}
onTrigger={handleTriggerTask} onTrigger={handleTriggerTask}
isTriggering={triggeringTask === task.id} isTriggering={triggeringTask === task.id}
isLast={index === tasks.length - 1}
onIntervalUpdate={fetchTasks}
/> />
))} ))}
</tbody> </tbody>

View File

@@ -5,6 +5,8 @@ import {ChevronUp, ChevronDown} from 'lucide-react';
const NumberInput = ({ const NumberInput = ({
value, value,
onChange, onChange,
onBlur = () => {},
onFocus = () => {},
className = '', className = '',
step = 1, step = 1,
disabled = false, disabled = false,
@@ -24,26 +26,26 @@ const NumberInput = ({
} }
}; };
const handleBlur = () => { const handleBlur = (e) => {
setIsFocused(false); setIsFocused(false);
const numValue = const numValue =
localValue === '' || localValue === '-' ? 0 : parseInt(localValue); localValue === '' || localValue === '-' ? 0 : parseInt(localValue);
if (min !== undefined && numValue < min) { if (min !== undefined && numValue < min) {
onChange(min); onChange(min);
return; } else if (max !== undefined && numValue > max) {
}
if (max !== undefined && numValue > max) {
onChange(max); onChange(max);
return; } else {
onChange(numValue);
} }
onChange(numValue); onBlur(e);
}; };
const handleFocus = () => { const handleFocus = (e) => {
setIsFocused(true); setIsFocused(true);
setLocalValue(value.toString()); setLocalValue(value.toString());
onFocus(e);
}; };
const increment = () => { const increment = () => {

View File

@@ -65,13 +65,33 @@ export const useRegexModal = (initialPattern, onSave) => {
return; return;
} }
// Validate pattern with .NET regex engine
try { try {
const validationResult = await RegexPatterns.verify(patternValue);
if (!validationResult.valid) {
Alert.error(`Invalid regex pattern: ${validationResult.error || 'Pattern validation failed'}`);
return;
}
} catch (error) {
console.error('Pattern validation error:', error);
Alert.error('Failed to validate pattern. Please check the pattern and try again.');
return;
}
try {
// Clean tests to only include saved data
const cleanTests = tests.map((test, index) => ({
id: test.id || index + 1,
input: test.input,
expected: test.expected
}));
const data = { const data = {
name, name,
pattern: patternValue, pattern: patternValue,
description, description,
tags, tags,
tests tests: cleanTests
}; };
if (initialPattern && !isCloning) { if (initialPattern && !isCloning) {
@@ -98,15 +118,16 @@ export const useRegexModal = (initialPattern, onSave) => {
const handleRunTests = useCallback( const handleRunTests = useCallback(
async (pattern, tests) => { async (pattern, tests) => {
try { try {
const updatedTests = await runTests(pattern, tests); const testResults = await runTests(pattern, tests);
if (updatedTests) { // We don't update the tests state with results
setTests(updatedTests); // Results are only used for display, not saved
} return testResults;
} catch (error) { } catch (error) {
console.error('Error running tests:', error); console.error('Error running tests:', error);
Alert.error( Alert.error(
error.message || 'Failed to run tests. Please try again.' error.message || 'Failed to run tests. Please try again.'
); );
return null;
} }
}, },
[runTests] [runTests]

View File

@@ -34,14 +34,12 @@ export const useRegexTesting = onUpdateTests => {
} }
); );
// Update tests through the callback // Return the test results (with match information)
if (onUpdateTests) { // Don't save these results, just return them for display
onUpdateTests(result.tests);
}
return result.tests; return result.tests;
} else { } else {
Alert.error(result.message || 'Failed to run tests'); Alert.error(result.message || 'Failed to run tests');
return tests; return null;
} }
} catch (error) { } catch (error) {
console.error('Error running tests:', error); console.error('Error running tests:', error);