261 Commits
v1.1.2 ... v2

Author SHA1 Message Date
Sam Chau
7bd2ee2493 fix: fallback to execCommand to copy logs to clipboard 2026-01-22 18:01:46 +10:30
Sam Chau
97c21b9572 feat: condition improvements
- refactor cards into unified component with modes
- add placeholders to dropdown selects
- style autocomplete similar to other ui components
- add placeholders to number inputs
- show any in language conditions
- add boolean for except langauge
2026-01-22 15:17:18 +10:30
Sam Chau
4efefe63ca feat: simplify language support in quality profiles
- moved language field in quality profile general page
- simplify transformation for sonarr by making languages optional
2026-01-22 14:02:43 +10:30
Sam Chau
12ba7540f7 docs: font/squish 2026-01-22 12:15:33 +10:30
Sam Chau
7ad2da8739 feat: github caching with tll, improves loading times on databases/changes/about pages 2026-01-22 12:13:04 +10:30
Sam Chau
e011c2df1a fix: ocd 2026-01-22 11:49:47 +10:30
Sam Chau
6577174a22 feat: implement basic cooldown, remove old time based one 2026-01-22 11:37:05 +10:30
Sam Chau
ac9dea7186 feat: default new filter is upgradinatorr-like 2026-01-22 10:45:10 +10:30
Sam Chau
46c4c4e4ff feat: add tag to filter options in upgrades 2026-01-22 09:56:15 +10:30
Sam Chau
d41971dfc1 feat: add pagination to settings > logs 2026-01-22 09:33:07 +10:30
Sam Chau
46e5e2a059 refactor: moved upgrade/filter info into a seperate page on upgrades/info 2026-01-22 09:25:39 +10:30
Sam Chau
e6d16d76be feat: apply default delay profile to arrs when adding a new one 2026-01-22 09:05:30 +10:30
Sam Chau
76e51c9194 feat: add default delay profiles for arrs, with feedback mechanism 2026-01-22 08:24:58 +10:30
Sam Chau
1043c2a2e7 fix: handle unlimited values for quality defs. unlimited -> 0 saved. arrpi expects 0 = unlimited for some bizarre reason 2026-01-22 08:02:12 +10:30
Sam Chau
9a55932500 docs: quality definition requests 2026-01-22 08:01:33 +10:30
Sam Chau
14cfbdc60c fix: add FKs to sync tables, remove dangling references 2026-01-22 06:22:34 +10:30
Sam Chau
0db641e6ed fix: qp sync debugging. also disabling arm build until stable release 2026-01-22 06:07:37 +10:30
Sam Chau
627a673453 style: use dropdown select instead of toggle on upgrades / rename page 2026-01-21 17:18:20 +10:30
Sam Chau
ebced3e5b6 style: add better default sorting to custom format conditions 2026-01-21 10:00:56 +10:30
Sam Chau
353fe3832f refactor: delay profile handling. remove tags, only allow 1 delay profile to be synced at once. simplified dp sync config 2026-01-21 09:30:48 +10:30
Sam Chau
64a4656b1c fix: qp name instead of qp id for sync. Fixes mm and qp sync 2026-01-21 09:29:54 +10:30
Sam Chau
0d1e95532b docs(scratchpad): adding feedback tasks from seraphys 2026-01-21 07:54:20 +10:30
Sam Chau
a1b0d9c7f0 fix: same fixes for user in entry 2026-01-21 03:51:13 +10:30
Sam Chau
9b5b00fd99 fix: entrypoint - only make group if does not exist at GID 100. Also other docs rambling because i can't be bothered to make another commit 2026-01-21 03:36:41 +10:30
Sam Chau
793b2b6bce docs(scratchpad): floating some thoughts for more fleshed out filter cooldown (exponential backoff) 2026-01-21 00:53:19 +10:30
Sam Chau
894e91a413 fix: weird hmr env var behaviour for isDev on upgrades page resolved 2026-01-21 00:19:45 +10:30
Sam Chau
caf33abcd7 docs: consistent roles in about 2026-01-21 00:19:06 +10:30
Sam Chau
a0ba39bb35 feat(api): add bruno Radarr API requests for movies and quality profiles (we stan bruno) 2026-01-21 00:06:06 +10:30
Sam Chau
4c90c729e4 style(ui): add compact versions of button, input, number input and a combined button + dropdown component 2026-01-21 00:05:38 +10:30
Sam Chau
51d382754a feat(upgrades): add digital and physical release fields, introduce ordinal operators and availability order for (better) minimum availability filtering 2026-01-21 00:04:56 +10:30
Sam Chau
7aaebf7dfe feat(tests): implement test runner script for specific test execution 2026-01-21 00:03:53 +10:30
Sam Chau
a64dc8a858 feat(tests): add ordinal operators tests for minimum_availability filtering 2026-01-21 00:03:39 +10:30
Sam Chau
ac963cd083 fix(platform): enhance macOS platform detection for Intel architecture 2026-01-20 22:36:28 +10:30
Sam Chau
90d3257424 chore: correct license detection to AGPL-3.0 2026-01-20 19:08:52 +10:30
Sam Chau
f0853990c2 chore(parser): remove local build config before publishing 2026-01-20 18:46:51 +10:30
Sam Chau
7534553ba2 feat(dev): add platform detection and update environment variables for Vite 2026-01-20 18:46:37 +10:30
Sam Chau
ff6a9be8c1 refactor(parser): restructure service with proper logging and sepofcon 2026-01-20 18:40:28 +10:30
Sam Chau
facf391f16 style: improve platform/version card in sidebar 2026-01-20 00:56:51 +10:30
Sam Chau
ec0db073d2 feat: add dev mode override for manual upgrade runs 2026-01-20 00:23:12 +10:30
Sam Chau
b5ab5ec285 style: x/y toggle 2026-01-20 00:23:01 +10:30
Sam Chau
dc36b06366 fix: ensure value change dispatch in NumberInput component 2026-01-20 00:22:37 +10:30
Sam Chau
6a09d9093e style: card based selection for quality / delay profile syncing 2026-01-19 23:18:11 +10:30
Sam Chau
1ec8da5742 fix: update IconCheckbox event binding from onclick to on:click 2026-01-19 23:15:12 +10:30
Sam Chau
8026bc93c9 fix: add uuid fallback for non-secure contexts (HTTP) 2026-01-19 22:28:49 +10:30
Sam Chau
e1c0a66603 docs: add Docker configuration example and update parser service notes 2026-01-19 22:27:15 +10:30
Sam Chau
f29d80c7ab chore: docker build workflows 2026-01-19 21:56:39 +10:30
Sam Chau
fd20cd84e8 feat: add Docker deployment support 2026-01-19 20:23:10 +10:30
Sam Chau
dd77d1af35 refactor: replace PowerShell regex testing with parser service integration 2026-01-19 20:21:15 +10:30
Sam Chau
9a925b19f1 chore: rename dev:vite task to dev:server 2026-01-19 19:05:34 +10:30
Sam Chau
fc56a67b28 fix: various dirty tracking bug fixes, unused variables 2026-01-19 18:18:22 +10:30
Sam Chau
c476775bc2 feat: add SQL validation method to PCDCache and integrate it into writeOperation as final lod 2026-01-19 10:41:21 +10:30
Sam Chau
78c7cc19a2 fix: update condition type filtering to show all options 2026-01-19 10:40:58 +10:30
Sam Chau
487b043278 fix: clear dirty state for read-only testing page 2026-01-19 09:59:14 +10:30
Sam Chau
221d0fffb4 chore: formatting 2026-01-19 09:54:41 +10:30
Sam Chau
10244efe91 fix: cf/qp names instead of ids for entity testing 2026-01-19 07:58:49 +10:30
Sam Chau
6ffc9cd5ce refactor: the great ID purge of 2026 2026-01-19 07:45:03 +10:30
Sam Chau
4f565ebd6f fix: use name+tmdbid primary key instead of auto incmremented id 2026-01-19 04:12:37 +10:30
Sam Chau
95930edc53 feat(conditions): add per-condition arr_type support 2026-01-19 03:37:26 +10:30
Sam Chau
9b2f0d393c feat(arr): add logs page for viewing Radarr/Sonarr logs 2026-01-19 02:39:59 +10:30
Sam Chau
d11e060c91 feat: flexible response parsing in HTTP client (allows text now) 2026-01-19 02:28:32 +10:30
Sam Chau
8d3e20d3c3 refactor: use base http client for notifications, parser, autocomp 2026-01-19 02:26:12 +10:30
Sam Chau
f6d99bc267 feat: run history for rename jobs 2026-01-19 01:52:26 +10:30
Sam Chau
1b3a5828c4 style: make rename page look more like upgrades 2026-01-19 01:44:36 +10:30
Sam Chau
ebf1ee781f fix: sync page dirty state tracking, remove old upgrade logs page *content* (will be refactored for something else) 2026-01-19 01:31:56 +10:30
Sam Chau
99c5046ed8 feat: upgrade improvements - more compact settings page, improved filter builder, clearer buttons, searching / filtering, improved run history, notifications 2026-01-19 01:27:28 +10:30
Sam Chau
5c529e665a fix: require media management settings before quality profile sync 2026-01-18 18:10:53 +10:30
Sam Chau
6bc72b032d chore: add --allow-sys flag to build commands 2026-01-18 17:54:09 +10:30
Sam Chau
0e762e3470 refactor: backups page to use reusable table and actions bar 2026-01-18 17:38:13 +10:30
Sam Chau
ee65444717 refactor: remove old card view for jobs, replace with expandable table. Use table comp for history 2026-01-18 17:32:57 +10:30
Sam Chau
efcc30f8c9 feat: simplify rename info, add stuff about rich/summary notifs 2026-01-18 02:38:10 +10:30
Sam Chau
0ab315d4a9 feat: add summary notifs for rename jobs, rename original to rich notifs 2026-01-18 02:24:18 +10:30
Sam Chau
de39481d4d feat: renaminatorr logic, jobs, long notif support 2026-01-18 02:06:51 +10:30
Sam Chau
47ba9dd7e9 feat: manual incoming changes handling
- Enhanced Git class to include method for fetching incoming changes from remote repository.
- Implemented logic to retrieve and display incoming commits in the changes page.
- Updated API routes to handle incoming changes and pull requests.
- Modified UI components to show incoming changes and allow users to pull updates.
- Improved actions bar to disable commit actions when there are incoming changes.
- Added sync button to refresh repository status and check for updates.
2026-01-17 15:25:24 +10:30
Sam Chau
b13ec91e32 fix: set last sync after *every* sync, not just when changes are pulled 2026-01-17 14:33:46 +10:30
Sam Chau
7c1952d264 refactor: add date utility for consistent UTC handling 2026-01-17 14:31:48 +10:30
Sam Chau
3d1e55e46c feat: backend support for manifest/readme updates 2026-01-17 00:40:03 +10:30
Sam Chau
4186e1413a feat: add database configuration page with manifest and README support 2026-01-17 00:06:08 +10:30
Sam Chau
e104676f77 fix: search filtering on arr and databases pages 2026-01-16 21:30:00 +10:30
Sam Chau
bab8eeb946 refactor: update arr and databases pages with action bars, badges, and info modals 2026-01-16 21:26:55 +10:30
Sam Chau
ad7e77dcea style: reduce top padding above tabs on entity pages
fix: remove unfinished arr types from instance form
2026-01-16 21:02:45 +10:30
Sam Chau
1f97a96e34 refactor: shared CustomFormatBadge and Score components to be used in library/testing views 2026-01-16 20:47:47 +10:30
Sam Chau
643ba8ce00 refactor: move arr header buttons into action bar, improve breadcrumbing 2026-01-16 20:35:29 +10:30
Sam Chau
2efb006482 feat: add emoji toggle option for theme switcher 2026-01-16 19:54:08 +10:30
Sam Chau
4217fabebf fix: add warning message for items not found in library selection 2026-01-16 19:39:46 +10:30
Sam Chau
656a3e3114 feat: enhance entity testing with auto import releases functionality 2026-01-16 19:05:57 +10:30
Sam Chau
d892b30be2 fix: persist expanded row state in release table by defining variable in entity table and binding inside release table 2026-01-16 13:21:56 +10:30
Sam Chau
621b051eeb docs: center banner, badges, add V2 warning 2026-01-15 19:42:04 +10:30
Sam Chau
008455fada style/docs: add custom banner with logo and header 2026-01-15 19:38:37 +10:30
Sam Chau
71d804064c style: maybe improved tabular layout? 2026-01-15 19:27:38 +10:30
Sam Chau
93259d3ce0 chore: license (AGPL-3.0), contributing and readme complete
- removed old dev docs
2026-01-15 19:25:42 +10:30
Sam Chau
c4ea8cfe4a style: update logo asset 2026-01-15 18:56:33 +10:30
Sam Chau
728d0f8aef refactor(parser): move parser into src/services, remove docker setup for dev environment, add dev startup script 2026-01-15 17:04:24 +10:30
Sam Chau
bcf0a3ba55 feat: add yaml dependency and update import in OpenAPI server handler 2026-01-15 16:53:07 +10:30
Sam Chau
456ecc298b feat(api): add health check and OpenAPI docs
- Implemented health check endpoint to monitor application status and components.
- Added OpenAPI specification endpoint to serve the API documentation.
- Introduced new TypeScript definitions for API paths and components.
2026-01-15 16:50:28 +10:30
Sam Chau
55c5886125 feat: remove 'none' trigger from sync strategies, add dirty tracking / unsaved changes to sync page 2026-01-15 15:39:55 +10:30
Sam Chau
cabf1ff815 fix: handle empty response body in HTTP client 2026-01-15 15:20:02 +10:30
Sam Chau
f35a01f111 feat: sync transformers and quality profile handling
- Introduced new sync transformers for custom formats and quality profiles.
- Implemented the `transformQualityProfile` function to convert PCD quality profile data to ARR API format.
- Added functions to fetch quality profiles and custom formats from PCD cache.
- Enhanced `BaseArrClient` with methods for managing custom formats and quality profiles.
- Updated types to include custom format specifications and quality profile payloads.
- Modified sync page server logic to calculate next run time for scheduled syncs.
2026-01-15 15:14:54 +10:30
Sam Chau
27835c3426 feat: add next_run_at column to sync config tables and update job schedules to cron expressions 2026-01-15 15:14:21 +10:30
Sam Chau
f73d08c5b3 fix: update label for entity testing -> testing 2026-01-15 01:04:25 +10:30
Sam Chau
847cd8a1dc feat: add conceptual tweaks management page 2026-01-15 00:59:10 +10:30
Sam Chau
9203f79ad2 style: colored icons for entity type, reorder action bar buttons to actually make sense 2026-01-15 00:38:10 +10:30
Sam Chau
9517832129 feat: implement layer selection for entity and release crud 2026-01-15 00:20:05 +10:30
Sam Chau
c3a9d23b91 fix: add personal access token handling in database update action 2026-01-15 00:19:42 +10:30
Sam Chau
eb22e0385c feat: implement navigation icon style store and UI settings for emoji/icon toggle 2026-01-15 00:06:02 +10:30
Sam Chau
74b38df686 feat: add entity and release management components
- Created EntityTable component for displaying test entities with expandable rows for releases.
- Implemented ReleaseTable component to manage and display test releases with actions for editing and deleting.
- Added ReleaseModal component for creating and editing releases
- Introduced types for TestEntity, TestRelease, and related evaluations
- Enhanced general settings page to include TMDB API configuration with connection testing functionality.
- Added TMDBSettings component for managing TMDB API access token with reset and test connection features.
2026-01-14 23:50:20 +10:30
Sam Chau
aec6d79695 feat: updateLanguages, updateQualities functionality 2026-01-14 16:03:14 +10:30
Sam Chau
f4b531b61a feat: Refactor quality profile management and scoring
- Removed unsaved changes modal and related logic from qualities page.
- Enhanced scoring page with improved state management and save functionality.
- Introduced a new GeneralForm component for creating and editing quality profiles.
- Added server-side validation for duplicate quality profile names.
- Implemented delete confirmation modals for both quality profiles and regular expressions.
- Improved handling of custom format scores and enabled states in scoring table.
- Added new page for creating quality profiles with appropriate server-side logic.
- Updated regular expressions to check for duplicate names during creation and editing.
2026-01-05 01:40:23 +10:30
Sam Chau
aa071e50cf feat: remove databaseName prop from GeneralForm in edit and create modes 2026-01-04 04:00:47 +10:30
Sam Chau
8deef25c9e feat: add create and delete custom format functionality
- Implemented `create.ts` for creating custom formats with associated tags.
- Added `delete.ts` for deleting custom formats with cascading deletes for related entities.
- Updated `index.ts` to export new create and delete functions.
- Enhanced the server-side logic in `+page.server.ts` for handling new custom format creation.
- Created a new Svelte component `GeneralForm.svelte` for managing custom format details.
- Updated the UI in `+page.svelte` for creating new custom formats and handling form submissions.
- Integrated dirty state management for form inputs in `TestForm.svelte` and `GeneralForm.svelte`.
- Added delete functionality in the UI for custom formats with confirmation modals.
2026-01-03 04:07:08 +10:30
Sam Chau
08710ffcb4 feat: implement condition management with draft support and layer permissions
- Added server-side actions for updating conditions with layer permissions.
- Enhanced the conditions page to handle draft conditions and validation.
- Introduced a modal for selecting save targets based on user permissions.
- Refactored condition and draft condition components to emit changes immutably.
- Updated general page to manage form data more reactively and validate inputs.
2026-01-03 03:22:29 +10:30
Sam Chau
3462c8b84d feat(custom-formats): add updateGeneral functionality and integrate with the general page 2026-01-03 02:17:02 +10:30
Sam Chau
9ffc08b76e feat(description): replace FormInput with MarkdownInput for improved description handling 2026-01-03 02:07:12 +10:30
Sam Chau
2250dd8460 feat(description): replace FormInput with MarkdownInput for enhanced description formatting 2026-01-03 02:03:30 +10:30
Sam Chau
e44228d147 feat(logging): implement change tracking and logging for media settings and naming updates 2026-01-03 02:01:38 +10:30
Sam Chau
fc2211c146 feat(dirty-tracking): implement dirty tracking and unsaved changes modal for media settings and naming sections 2026-01-03 00:39:11 +10:30
Sam Chau
d8b650a145 feat(table): expose expandedRows as a prop for external control 2026-01-03 00:09:39 +10:30
Sam Chau
76b864a05f feat(table): add flushBottom prop for conditional styling in ExpandableTable
feat(media-settings): update class for better overflow handling in MediaSettingsSection
feat(naming): enhance overflow handling in NamingSection component
feat(quality-definitions): improve overflow handling in QualityDefinitionsSection
2026-01-03 00:07:58 +10:30
Sam Chau
e79a2babe9 feat(delay-profiles, regular-expressions): add created_at and updated_at fields to profiles and expressions 2026-01-02 23:43:24 +10:30
Sam Chau
897cfa6b06 feat(logging): add logging for regular expression updates and creation actions 2026-01-02 23:35:04 +10:30
Sam Chau
9948782cc2 feat(regular-expressions): integrate MarkdownInput for description field with markdown support 2026-01-02 23:27:49 +10:30
Sam Chau
1a9b2bb1c6 feat(delay-profiles): implement explicit ordering for delay profiles and add info modal for user guidance 2026-01-02 22:24:38 +10:30
Sam Chau
c7bd63aaff fix(media-settings): change label to paragraph for better semantics in Propers and Repacks section 2026-01-02 22:16:19 +10:30
Sam Chau
1ff6208ba7 refactor(logging): change log level from info to debug for upgrade logging 2026-01-02 22:16:11 +10:30
Sam Chau
bc55d0c7bb feat(migrations): add app_info table and change default log level to DEBUG
refactor(logging): update logging levels and improve log messages across jobs
feat(jobs): enhance job initialization and sync with database
2026-01-02 22:13:04 +10:30
Sam Chau
8c12de7f89 feat(cache): enhance cache build process with detailed stats and improve logging 2026-01-02 20:41:25 +10:30
Sam Chau
bca25af6dc refactor(scoring): remove debug logging from scoring query 2026-01-02 20:29:06 +10:30
Sam Chau
59b032aab0 feat(highlight): integrate Highlight.js for syntax highlighting in JSON and SQL views 2026-01-02 20:21:03 +10:30
Sam Chau
77237b54ac feat(logging): enhance logging for delay profile updates and write operations 2026-01-02 20:20:55 +10:30
Sam Chau
77a3375889 feat: refactor RegularExpressionForm and RegexPatternField to track changes 2026-01-01 17:09:14 +10:30
Sam Chau
ca1b276327 fix(build): correct output paths for Linux and preview tasks 2026-01-01 16:58:01 +10:30
Sam Chau
0cdec6d19a feat(dirty): implement form dirty state tracking and navigation confirmation 2025-12-31 17:15:01 +10:30
Sam Chau
95795d5f0e feat: add conditions management for custom formats
- Introduced a new `listConditions` query to fetch conditions for custom formats.
- Created a new `ConditionListItem` type for better type safety.
- Added a new route for managing conditions under custom formats.
- Implemented UI components for displaying and managing conditions, including `ConditionCard` and `DraftConditionCard`.
- Enhanced the layout to include a new tab for conditions.
- Added support for various condition types and their respective options.
2025-12-31 16:40:41 +10:30
Sam Chau
56cf061a4b feat(customFormats): implement general queries and update related types and components 2025-12-31 03:31:59 +10:30
Sam Chau
445ebf1a39 feat(tests): add test count to custom formats in list and views 2025-12-31 03:13:53 +10:30
Sam Chau
5d82cc910b feat: add testing functionality for custom formats
- Implemented server-side logic for loading and managing tests in custom formats.
- Created new page for editing existing tests with form handling.
- Developed a reusable TestForm component for creating and editing test cases.
- Added functionality to create new tests with validation and error handling.
- Integrated layer permission checks for writing to base layer.
- Enhanced user experience with modals for save and delete actions.
2025-12-31 03:05:09 +10:30
Sam Chau
af269b030f feat(cache): implement regex101 cache table and queries for API response caching 2025-12-31 00:58:17 +10:30
Sam Chau
b360dfbcae feat(docs): add database schema and manifest specification 2025-12-31 00:02:50 +10:30
Sam Chau
5c26d6d7b2 feat(parser): implement C# parser microservice with regex-based title parsing
- Added RegexReplace class for handling regex replacements.
- Created ReleaseGroupParser for extracting release groups from titles.
- Developed TitleParser for parsing movie titles, including editions and IDs.
- Introduced QualitySource, Resolution, QualityModifier enums and QualityResult class for quality metadata.
- Set up Dockerfile and docker-compose for containerized deployment.
- Implemented ASP.NET Core web API for parsing requests.
- Added TypeScript client for interacting with the parser service.
- Enhanced configuration to support dynamic parser service URL.
2025-12-30 10:33:52 +10:30
Sam Chau
8a3f266593 feat(custom-formats): implement custom formats management with list, detail views, and search functionality 2025-12-30 08:23:36 +10:30
Sam Chau
f8c62c51ba feat(QualityDefinitions): bind expandedRows for shared state in tables 2025-12-30 07:25:08 +10:30
Sam Chau
bd702f126c feat(media-management): enhance MediaManagementSyncer to support multiple config types and improve sync functionality 2025-12-30 07:15:44 +10:30
Sam Chau
17d3d756f6 feat(settings): enhance UI components with IconCheckbox and update styling for better accessibility 2025-12-30 05:40:18 +10:30
Sam Chau
9119afdf49 feat(jobs): update styling to use accent colors for job description and schedule inputs 2025-12-30 05:39:14 +10:30
Sam Chau
e39a3195f6 fix(logs): update check icon color in source filter to use accent colors 2025-12-30 05:26:53 +10:30
Sam Chau
aad21f494d feat(backups): update backup button icon and styling to use accent colors 2025-12-30 05:26:47 +10:30
Sam Chau
f63188082c fix(notification-history): correct date formatting to handle UTC properly 2025-12-30 05:26:40 +10:30
Sam Chau
bf1ccb956d feat(notifications): update styles for notification service forms and Discord configuration to use accent coloring 2025-12-30 05:20:23 +10:30
Sam Chau
3b14b300d5 feat(sidebar): implement collapsible sidebar functionality with localStorage support 2025-12-30 05:07:01 +10:30
Sam Chau
4aa914664e feat(media-management): add Quality Definitions, Media, Naming sections for Radarr and Sonarr
- Implemented QualityDefinitionsSection component to manage quality definitions for Radarr and Sonarr.
- Added server-side logic for loading and updating quality definitions in Radarr and Sonarr.
- Created new pages for Radarr and Sonarr media management, integrating the QualityDefinitionsSection.
- Enhanced media settings and naming settings management for both Radarr and Sonarr.
- Introduced validation and logging for media settings updates.
2025-12-30 04:56:54 +10:30
Sam Chau
7e7561e35a feat: Implement regular expression management features
- Add server-side logic for loading, updating, and deleting regular expressions in +page.server.ts.
- Create a new Svelte component for editing regular expressions, including form handling and validation.
- Introduce a RegexPatternField component for managing regex patterns and associated unit tests.
- Develop a RegularExpressionForm component for both creating and editing regex entries.
- Implement a SearchFilterAction component for filtering regex entries.
- Add new routes for creating and managing regular expressions, including a preset feature for common patterns.
- Enhance the UI with CardView and TableView components for displaying regex entries in different formats.
- Integrate markdown parsing for descriptions in the UI.
2025-12-29 21:06:49 +10:30
Sam Chau
ac0cc7d4c9 feat(database): add database info display in StatusCard and update form action for editing 2025-12-29 21:06:34 +10:30
Sam Chau
8035820156 feat(ops): implement natural sorting for uncommitted operation files 2025-12-29 21:06:17 +10:30
Sam Chau
912861abca fix(writer): improve SQL query conversion to handle multiple placeholders correctly 2025-12-29 21:06:00 +10:30
Sam Chau
7a6f8cfd08 feat(docs): add comprehensive architecture guide for Profilarr 2025-12-29 19:00:20 +10:30
Sam Chau
54e64a2ed4 feat(config): enhance base path logic to use executable directory as fallback 2025-12-29 19:00:12 +10:30
Sam Chau
473a0cf474 fix(preview): update preview task to include PORT environment variable 2025-12-29 06:26:03 +10:30
Sam Chau
1e8fc7a42d feat(sync): implement sync functionality for delay profiles
- Added syncArrJob to handle syncing of PCD profiles and settings to arr instances.
- Created syncArr logic to process pending syncs and log results.
- Introduced BaseSyncer class for common sync operations and specific syncers for delay profiles
- Implemented fetch, transform, and push methods for delay profiles
- Added manual sync actions in the UI for delay profiles
- Enhanced logging for sync operations and error handling.
2025-12-29 05:37:55 +10:30
Sam Chau
ea5c543647 feat: add sync configuration for ARR instances
- Introduced a new sync page for ARR instances, allowing users to configure quality profiles, delay profiles, and media management settings.
- Implemented backend logic to handle saving sync configurations.
- Enhanced the cache management system to support debouncing based on specific paths.
- Updated the layout to include a new "Sync" tab in the navigation.
- Added UI components for managing quality profiles, delay profiles, and media management settings with appropriate state management.
- Included informative modals to guide users on how the sync process works.
2025-12-29 04:39:52 +10:30
Sam Chau
aef58ea804 feat(ai): implement AI settings management and commit message generation 2025-12-29 04:39:41 +10:30
Sam Chau
4aa1c0c8e3 feat(git): add isFileUncommitted utility and update cancelOutCreate logic
fix(repo): change pull command to standard without rebase
fix(changes): ensure UI refresh after discarding and adding changes
fix(delay-profile): correct label structure for tags in DelayProfileForm
2025-12-29 01:35:50 +10:30
Sam Chau
7db49af4a2 fix(dropdown): remove unused 'open' property from Dropdown component 2025-12-29 01:35:30 +10:30
Sam Chau
d120aa5d02 fix(alerts): adjust alert container position for improved visibility 2025-12-29 01:35:23 +10:30
Sam Chau
3ae82153d9 feat(commits): implement commit history retrieval and display in the database view 2025-12-29 01:13:10 +10:30
Sam Chau
def987d8e9 feat: enhance accent color support across the application
- Introduced new accent colors (green, orange, teal, purple, rose) in the accent store.
- Updated CSS variables for accent colors in app.css.
- Refactored components to utilize accent colors for buttons, inputs, dropdowns, and tags.
- Improved accessibility and visual consistency by replacing hardcoded colors with accent variables.
- Adjusted styles in various components including modals, tables, and forms to reflect the new accent color scheme.
2025-12-29 01:12:59 +10:30
Sam Chau
0af19ed7ea feat(accent): implement accent color store and picker for app theming 2025-12-29 00:55:40 +10:30
Sam Chau
862dc0c097 feat(databases): streamline navigation and remove sync page; redirect to changes for all databases 2025-12-29 00:22:07 +10:30
Sam Chau
1eb2e983a5 feat: enhance Git operations and add changes management for databases
- Updated Git class to include options for fetching status and retrieving branches.
- Introduced getRepoInfo function to fetch repository details from GitHub API.
- Implemented changes page for database instances, allowing users to view and manage uncommitted changes.
- Added actions for discarding and adding changes with appropriate logging.
- Created UI components for displaying changes and managing actions.
- Implemented server-side redirects based on database access token presence.
- Enhanced delay profile management with improved delete functionality and logging.
2025-12-29 00:15:00 +10:30
Sam Chau
9ddb426f13 feat(git): refactor git utilities and introduce Git class for repository operations 2025-12-28 22:37:14 +10:30
Sam Chau
7c07f87d7c feat(delay-profiles): add delay profiles management functionality
- Create a new page for displaying delay profiles with an empty state when no databases are linked.
- Implement server-side loading for delay profiles based on the selected database.
- Add a detailed view for editing and deleting delay profiles, including form validation and error handling.
- Introduce a form component for creating and editing delay profiles with appropriate fields and validation.
- Implement table and card views for displaying delay profiles, allowing users to navigate to detailed views.
- Add functionality for creating new delay profiles with validation and error handling.
2025-12-28 21:28:17 +10:30
Sam Chau
3d27fbf411 fix(dependencies): downgrade kysely to version 0.27.6 for compatibility 2025-12-28 21:27:41 +10:30
Sam Chau
a5a12f1658 feat(delay-profiles): add delay profile queries and types for database integration 2025-12-28 20:14:28 +10:30
Sam Chau
d2e098d412 feat(data-page): implement data page store for search and view management 2025-12-28 20:08:35 +10:30
Sam Chau
b6f3263f74 feat(settings): update VSCode settings for improved development experience 2025-12-28 19:54:52 +10:30
Sam Chau
4e15ffa168 feat(notifications): refactor notification system to use a fluent builder pattern 2025-12-28 19:43:51 +10:30
Sam Chau
b7efaa567c feat(upgrades): enhance upgrade manager with detailed notifications for success and failure 2025-12-28 19:30:50 +10:30
Sam Chau
5b82b4305c feat(upgrades): add test run button for existing configurations 2025-12-28 19:30:41 +10:30
Sam Chau
8a52f1db9d feat(jobs): enhance job name formatting in JobCard and JobHistory components 2025-12-28 19:14:36 +10:30
Sam Chau
64cd5d7d04 feat(logs): refactor log actions into separate LogsActionsBar component 2025-12-28 19:11:37 +10:30
Sam Chau
66095f6be1 feat(dependencies): implement syncDependencies to update dependency versions from manifest 2025-12-28 18:59:06 +10:30
Sam Chau
8066df5f92 feat(library): implement client-side library cache and API integration for fetching library data 2025-12-27 11:52:29 +10:30
Sam Chau
92d7a812a5 fix(tests): remove unused import in selectors test file 2025-12-27 11:24:09 +10:30
Sam Chau
0ce195ce36 Add unit tests for normalization and selector logic
- Implement tests for normalization functions in `normalize.test.ts`, covering various scenarios including field mapping, size conversion, ratings, cutoff calculations, date handling, and batch normalization.
- Create tests for selector functions in `selectors.test.ts`, validating the behavior of different selectors such as random, oldest, newest, lowest score, most popular, and least popular, along with edge cases and integration scenarios.
2025-12-27 11:23:48 +10:30
Sam Chau
926da00858 feat(upgrades): enhance upgrade logs and configuration management
- Added filtering options for upgrade runs based on their status (all, success, partial, failed, skipped).
- Implemented a refresh button to reload the logs.
- Created a new component `UpgradeRunCard` to display individual upgrade run details.
- Introduced a cooldown tracker to show the next scheduled run time and progress.
- Added a dry run toggle to the upgrade configuration settings.
- Implemented clipboard functionality to copy and paste filter configurations.
- Updated the upgrade run action to support dry run mode and validate configurations.
- Refactored various components for improved readability and maintainability.
2025-12-27 11:23:36 +10:30
Sam Chau
6dbdd9a0f0 feat(upgrades): add last_run_at tracking to upgrade_configs and implement upgrade manager job 2025-12-27 06:43:57 +10:30
Sam Chau
3a2d98491c feat(upgrades): add upgrade configuration management with CRUD operations 2025-12-27 06:31:27 +10:30
Sam Chau
a740937246 feat(upgrades): implement frontend upgrade configuration and filtering system
- Added shared selectors for item selection methods in upgrades.
- Updated navigation to point to the new upgrades page.
- Removed obsolete search priority page.
- Created server-side loading for upgrades page to fetch instance data.
- Developed upgrades page layout with core settings and filter settings components.
- Implemented core settings component for upgrade scheduling and filter mode selection.
- Added filter group component to manage complex filtering rules.
- Created filter settings component to manage multiple filters with detailed configurations.
- Introduced info modals for filters and upgrades to guide users on functionality.
2025-12-27 06:04:06 +10:30
Sam Chau
0d99680414 feat: add Delay Profiles group to navigation 2025-12-27 06:03:07 +10:30
Sam Chau
119131bab6 feat: add expandable table component for displaying Radarr library items with detailed views
feat: implement caching mechanism for library data with TTL
feat: enhance Radarr client with methods to fetch movies and quality profiles
feat: update library page to support profile changing and improved UI elements
fix: update navigation icons and improve layout for better user experience
fix: correct cache handling and error management in library loading
2025-12-26 07:41:04 +10:30
Sam Chau
85b594cdf1 feat: implement arr instances management with CRUD operations and navigation
- Added new routes and components for managing Arr instances, including library, logs, and search priority.
- Implemented server-side logic for loading, creating, updating, and deleting instances.
- Enhanced the InstanceForm component to include an enabled toggle for instance activation.
- Updated navigation to consolidate Arr instances under a single route.
- Removed deprecated routes and streamlined the instance management process.
2025-12-26 06:00:21 +10:30
Sam Chau
e9ce6a76bc fix: change font style to mono for navigation components 2025-12-26 05:28:09 +10:30
Sam Chau
ec7616c7a1 fix: remove scoring streaming 2025-11-09 07:41:20 +11:00
Sam Chau
1181729da5 fix: update active state logic to include nested routes in navigation components 2025-11-09 07:11:12 +11:00
Sam Chau
78f33aae43 feat: implement drag-and-drop functionality for quality page 2025-11-09 07:07:03 +11:00
Sam Chau
53a74a98e8 fix: enhance unsaved changes tracking and improve state management in scoring components 2025-11-09 05:26:42 +11:00
Sam Chau
a0fc3e7ece fix(+page.svelte): add unsaved changes tracking to scoring options 2025-11-09 05:19:51 +11:00
Sam Chau
55e0c9eb67 feat: add scoring page with custom format management
- Implemented server-side loading for scoring data in `+page.server.ts`.
- Created a new Svelte component for the scoring page in `+page.svelte`, including UI for managing custom formats, profiles, and display options.
- Added a `ScoringTable` component to display custom formats and their scores.
- Introduced local storage management for user preferences such as grouping, tiling, and profiles.
- Enhanced user experience with modals for information and unsaved changes.
2025-11-09 05:12:36 +11:00
Sam Chau
e1de8f88cf feat: Add Kysely integration for quality profile queries and refactor database interactions
- Updated package.json to include Kysely and Deno Vite plugin dependencies.
- Introduced new types for sorting in table component.
- Refactored PCDCache to utilize Kysely for type-safe database queries.
- Created new query files for quality profiles, including general information, languages, and list queries.
- Removed outdated qualityProfiles.ts file and replaced it with modular query structure.
- Updated routes to use new query functions for loading quality profile data.
- Enhanced Vite configuration to include Deno plugin and improved watch settings.
2025-11-09 05:07:48 +11:00
Sam Chau
d69064803a fix: update Dropdown.svelte to properly close empty div tag and enhance markdown sanitizer
- Fixed an empty div tag in Dropdown.svelte to ensure proper HTML structure.
- Implemented a custom HTML sanitizer in markdown.ts to avoid postcss dependency issues, allowing only specific tags and attributes to enhance security against XSS attacks.
2025-11-09 01:11:19 +11:00
Sam Chau
92035e8fc5 fix(+page.svelte): add type annotation for typeOptions array for better type safety 2025-11-05 21:44:51 +10:30
Sam Chau
4ce966a41d feat(quality-profiles): implement quality profile management with detailed views, including languages and general settings 2025-11-05 21:41:49 +10:30
Sam Chau
80019b72a6 fix(ActionButton): add z-index class to dropdown for proper stacking 2025-11-05 21:41:31 +10:30
Sam Chau
697d241adf feat(tabs): enhance tab navigation with back button support and layout adjustments 2025-11-05 21:40:33 +10:30
Sam Chau
ee35e335d7 feat(unsaved-changes): implement utility for detecting and handling unsaved changes 2025-11-05 21:40:14 +10:30
Sam Chau
f274c9900f refactor(table): move Column interface to separate types file for better organization 2025-11-05 07:35:48 +10:30
Sam Chau
e7fac48962 feat(quality-profiles): add quality profile management with views and queries 2025-11-05 07:30:42 +10:30
Sam Chau
2abc9aa86a feat(pcd): implement PCD cache management and initialization logic 2025-11-05 07:30:33 +10:30
Sam Chau
4280403cfa fix(navbar): adjust margin for brand name container in navigation 2025-11-05 07:30:02 +10:30
Sam Chau
bba8590b07 feat(table): add header icon support and enhance cell rendering options 2025-11-05 07:29:55 +10:30
Sam Chau
4bcbdd77c8 feat(ui): implement search and dropdown components with actions 2025-11-05 07:29:38 +10:30
Sam Chau
9b6b746ed6 chore(css): remove unused tailwindcss forms plugin import 2025-11-05 07:29:14 +10:30
Sam Chau
b8588b84f4 Add marked library as a dependency in package.json 2025-11-05 07:29:06 +10:30
Sam Chau
302a80d1cd feat(markdown): add utility functions for parsing and stripping markdown 2025-11-05 07:28:52 +10:30
Sam Chau
58bd036a3a chore(pcd): cleanup unused files for schema deps 2025-11-04 07:02:05 +10:30
Sam Chau
37ae5164e6 feat(pcd): add database linking functionality 2025-11-04 06:58:54 +10:30
Sam Chau
a7d9685ed9 fix: instance form path in new arr page 2025-11-03 22:25:28 +10:30
Sam Chau
f2d9a8fa76 style(logo): use temp firefox logo 2025-11-03 21:10:05 +10:30
Sam Chau
ed8f81619e refactor(state): move empty state into reusable ui comp 2025-11-03 21:09:26 +10:30
Sam Chau
c17ae8eac4 perf(about): progressively stream data for instant page load 2025-11-03 20:57:17 +10:30
Sam Chau
849d1ec1b6 chore: move tests into src, temp config into dist, cleanup tasks 2025-11-03 20:44:43 +10:30
Sam Chau
64bde2017b chore(logs): Cleanup double logger alias 2025-11-03 17:32:45 +10:30
Sam Chau
fcdd45952b refactor(everything): properly movied into lib 2025-11-03 17:27:33 +10:30
Sam Chau
7df6d1eec3 refactor(alerts): move to lib/client
- also remove reusable request wrapper
2025-11-03 17:05:48 +10:30
Sam Chau
3a2e778b98 refactor(stores): move to lib/client 2025-11-03 16:50:18 +10:30
Sam Chau
b18012b032 refactor(assets): move to lib/client 2025-11-03 16:48:31 +10:30
Sam Chau
51eaa2cdca style(arrInstance): transition to card styling 2025-10-26 10:30:48 +10:30
Sam Chau
77cff2de8f style(scrollbar): add custom safari/chromium scrollbars 2025-10-26 10:00:33 +10:30
Sam Chau
c83217a72a feat(notifications): add notification module with Discord webhook support
- Database schema for notification services and history tracking
  - Notifier interface with Discord webhook implementation
  - UI for creating/editing/managing notification services
  - Integration with job completion events
  - Service-level enable/disable and notification type filtering
  - Test notifications and notification history view
2025-10-22 04:07:03 +10:30
Sam Chau
5cd1bf82ff refactor(createBackup): extract logic and add comprehensive tests
- Extract pure backup logic to /src/jobs/logic/createBackup.ts
  - Update job definition to use extracted logic
  - Fix absolute path parsing for tar command
  - Add 7 tests: success cases, filename format, file extraction,
    error handling (non-existent dir, file instead of dir), empty dir
  - Uses BaseTest framework with temp directories and tar extraction
2025-10-21 09:02:58 +10:30
Sam Chau
b8949b5148 test(cleanupLogs): add comprehensive test suite
- 8 tests covering deletion, retention, parsing, and edge cases
  - Tests empty/non-existent directories and boundary conditions
  - Uses BaseTest framework for isolated temp directories
2025-10-21 08:25:22 +10:30
Sam Chau
97ad75f238 chore(paths): move jobs into src 2025-10-21 08:24:50 +10:30
Sam Chau
7e8068f1fb test: add BaseTest framework with utilities
- Create abstract BaseTest class with lifecycle hooks
  - Add beforeAll/afterAll and beforeEach/afterEach hooks
  - Implement temp directory management (auto cleanup)
  - Add file assertion helpers (exists, contains, matches)
  - Add async utilities (waitFor, waitForFile, sleep)
  - Include example test demonstrating usage
  - Add test tasks to deno.json (test, test:watch)
  - Add @std/assert dependency
2025-10-21 08:04:46 +10:30
Sam Chau
1884e9308f refactor(logger): make Logger independently testable
- Add LoggerConfig interface for dependency injection
  - Logger class now accepts optional config in constructor
  - Default singleton still uses system config/database (no breaking changes)
  - Enables standalone usage for testing without full system init
  - Add auto-creation of logs directory if missing
2025-10-21 07:57:13 +10:30
Sam Chau
0a9b287825 refactor(logging): simplify to daily-only rotation
- Remove rotation_strategy and max_file_size settings
  - Always use YYYY-MM-DD.log format for log files
  - Update cleanup job to parse dates from filenames
  - Simplify settings UI to retention days only
2025-10-21 07:44:37 +10:30
Sam Chau
303e81507f stack(logs, jobs, backups): implemented 2025-10-21 06:10:00 +10:30
Sam Chau
357c5023e2 fix(arr): parameterise api version to let clients override 2025-10-20 04:31:31 +10:30
Sam Chau
c7f0698f2d refactor(arr): create shared instance component for new / edit instance
- add ability to edit existing instances
- add ability to delete existing instances
2025-10-20 04:02:56 +10:30
Sam Chau
ea9a01c0d6 frontend(about): add dev team / dedication 2025-10-20 02:29:55 +10:30
Sam Chau
e24410f6f3 stack(arrConfig): implemented arr config handling
- database module + migrations handler
- http client class + arr client child (with connection pooling, retries, backoff)
- toast alerts
- add new arr configs
2025-10-20 02:13:09 +10:30
Sam Chau
24e5571a66 chore(style): frontend formatting 2025-10-19 22:56:05 +10:30
Sam Chau
8386d8bacb chore(codebase): add ignore stuff for ai related files, add more build commands 2025-10-19 22:27:23 +10:30
Sam Chau
77e8f011b2 frontend(version): add version info to pageNav 2025-10-18 06:26:53 +10:30
Sam Chau
7e25d844e9 style(pageNav): thicker, rounded vertical lines 2025-10-18 05:53:38 +10:30
Sam Chau
d6a9be58bc frontend(error): add error page 2025-10-18 05:50:42 +10:30
Sam Chau
86e9590337 frontend(nav): pageNav 2025-10-18 05:28:47 +10:30
Sam Chau
309297d263 style(nav): fix the nav to top of page 2025-10-18 04:48:12 +10:30
Sam Chau
24a20fcf76 frontend(nav): add navbar, themeToggle, theme store 2025-10-18 04:37:47 +10:30
Sam Chau
f3379b9ea4 server(utils): config, logger, startup 2025-10-18 03:55:11 +10:30
Sam Chau
72415af8a5 chore(setup): sveltekit + deno 2025-10-18 00:39:02 +10:30
Sam Chau
462f0ced94 chore(setup): remove remaining files 2025-10-18 00:09:59 +10:30
Sam Chau
0622046e53 chore(setup): remove existing codebase 2025-10-18 00:09:35 +10:30
Samuel Chau
3a0deb16fa docs(README): update setup link, clean old sections 2025-08-28 10:29:37 +09:30
Samuel Chau
bb514b20cc Merge pull request #224 from Dictionarry-Hub/dev
fix(backend): perms env, mm import refactor, deserialize error
2025-08-26 22:11:39 +09:30
Sam Chau
99925be174 Merge branch 'main' of https://github.com/Dictionarry-Hub/profilarr into dev 2025-08-26 22:08:05 +09:30
Sam Chau
21e44d592f fix(entrypoint): simplify umask handling 2025-08-24 17:12:45 +09:30
Sam Chau
212dd695b6 fix(entrypoint): start shell with overriden umask 2025-08-24 16:20:17 +09:30
Sam Chau
6c40d352c9 fix(migration): update default language score 2025-08-24 16:07:38 +09:30
Sam Chau
7270bbfedb chore(docker): add entrypoint script and user permissions 2025-08-24 15:35:42 +09:30
Sam Chau
2e2abb93be feat(task): add update logic for task intervals for backup/sync 2025-08-23 10:12:12 +09:30
Sam Chau
7f5f44cd77 refactor(media-management): replace requests with ArrHandler for API interactions 2025-08-23 09:04:59 +09:30
Sam Chau
c30dc33828 fix(importer): pass arr type to format extractor to only compile/import arr specific formats 2025-08-23 08:23:29 +09:30
Sam Chau
eb9733807e fix(conflict): deserialize arr specific score objects when checking for conflicts 2025-08-21 10:23:27 +09:30
858 changed files with 88455 additions and 44049 deletions

85
.dockerignore Normal file
View File

@@ -0,0 +1,85 @@
# =============================================================================
# Docker Build Exclusions
# =============================================================================
# These files are NOT sent to Docker during build, making builds faster
# and images smaller.
# -----------------------------------------------------------------------------
# Dependencies (reinstalled during build)
# -----------------------------------------------------------------------------
node_modules/
.npm/
.pnpm-store/
# -----------------------------------------------------------------------------
# Build outputs (rebuilt during build)
# -----------------------------------------------------------------------------
dist/
.svelte-kit/
# -----------------------------------------------------------------------------
# .NET build artifacts
# -----------------------------------------------------------------------------
src/services/parser/bin/
src/services/parser/obj/
# -----------------------------------------------------------------------------
# Git (not needed in image)
# -----------------------------------------------------------------------------
.git/
.gitignore
.gitattributes
# -----------------------------------------------------------------------------
# IDE and editor files
# -----------------------------------------------------------------------------
.vscode/
.idea/
*.swp
*.swo
*~
# -----------------------------------------------------------------------------
# Documentation (not needed in image)
# -----------------------------------------------------------------------------
*.md
!README.md
docs/
LICENSE
# -----------------------------------------------------------------------------
# Development and test files
# -----------------------------------------------------------------------------
.env
.env.*
*.log
*.tmp
temp/
coverage/
.nyc_output/
# -----------------------------------------------------------------------------
# Docker files themselves (prevent recursion)
# -----------------------------------------------------------------------------
Dockerfile*
compose.yml
compose.yaml
docker-compose.yml
docker-compose.yaml
# Keep entrypoint script, ignore the rest
!docker/entrypoint.sh
# -----------------------------------------------------------------------------
# CI/CD
# -----------------------------------------------------------------------------
.github/
.gitlab-ci.yml
.travis.yml
Jenkinsfile
# -----------------------------------------------------------------------------
# Misc
# -----------------------------------------------------------------------------
*.tgz
*.tar.gz
*.zip

Binary file not shown.

Before

Width:  |  Height:  |  Size: 863 KiB

View File

@@ -1 +0,0 @@
* @santiagosayshey

View File

@@ -1,57 +0,0 @@
name: Build Beta Docker Image
on:
push:
branches:
- dev
pull_request:
branches:
- dev
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
with:
platforms: linux/amd64,linux/arm64/v8
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: '18'
- name: Build frontend
working-directory: ./frontend
run: |
npm ci
npm run build
- name: Prepare dist directory
run: |
mkdir -p dist/backend dist/static
cp -r frontend/dist/* dist/static/
cp -r backend/* dist/backend/
cp backend/requirements.txt dist/
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
if: github.event_name != 'pull_request'
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push
uses: docker/build-push-action@v5
with:
platforms: linux/amd64,linux/arm64/v8
context: .
push: ${{ github.event_name != 'pull_request' }}
tags: santiagosayshey/profilarr:beta

99
.github/workflows/docker.yml vendored Normal file
View File

@@ -0,0 +1,99 @@
name: Docker
on:
push:
branches:
- v2
tags:
- 'v*'
env:
REGISTRY: ghcr.io
IMAGE_BASE: ghcr.io/dictionarry-hub
jobs:
build:
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
attestations: write
id-token: write
strategy:
matrix:
include:
- image: profilarr
dockerfile: Dockerfile
- image: profilarr-parser
dockerfile: Dockerfile.parser
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to GHCR
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Determine channel
id: channel
run: |
if [[ "${{ github.ref }}" == "refs/heads/v2" ]]; then
echo "value=develop" >> $GITHUB_OUTPUT
elif [[ "${{ github.ref }}" == refs/tags/v*-beta* ]]; then
echo "value=beta" >> $GITHUB_OUTPUT
elif [[ "${{ github.ref }}" == refs/tags/v* ]]; then
echo "value=stable" >> $GITHUB_OUTPUT
else
echo "value=develop" >> $GITHUB_OUTPUT
fi
- name: Extract metadata
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ env.IMAGE_BASE }}/${{ matrix.image }}
labels: |
org.opencontainers.image.licenses=AGPL-3.0
tags: |
# Branch push -> develop
type=raw,value=develop,enable=${{ github.ref == 'refs/heads/v2' }}
# Beta tag -> beta + version
type=raw,value=beta,enable=${{ startsWith(github.ref, 'refs/tags/v') && contains(github.ref, '-beta') }}
type=semver,pattern={{version}},enable=${{ startsWith(github.ref, 'refs/tags/v') && contains(github.ref, '-beta') }}
# Stable tag -> latest + version
type=raw,value=latest,enable=${{ startsWith(github.ref, 'refs/tags/v') && !contains(github.ref, '-beta') }}
type=semver,pattern={{version}},enable=${{ startsWith(github.ref, 'refs/tags/v') && !contains(github.ref, '-beta') }}
- name: Build and push
id: push
uses: docker/build-push-action@v6
with:
context: .
file: ${{ matrix.dockerfile }}
# FOR NOW: ARM disabled for faster debugging - re-enable linux/arm64 when stable
platforms: linux/amd64
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
build-args: |
VITE_CHANNEL=${{ steps.channel.outputs.value }}
cache-from: type=registry,ref=${{ env.IMAGE_BASE }}/${{ matrix.image }}:buildcache
cache-to: type=registry,ref=${{ env.IMAGE_BASE }}/${{ matrix.image }}:buildcache,mode=max
- name: Generate attestation
uses: actions/attest-build-provenance@v2
with:
subject-name: ${{ env.IMAGE_BASE }}/${{ matrix.image }}
subject-digest: ${{ steps.push.outputs.digest }}
push-to-registry: true

View File

@@ -1,11 +1,12 @@
name: Release Notification
name: Notify
on:
release:
types: [published]
push:
branches:
- 'v2'
- 'stable'
- 'dev'
jobs:
call-notify-release:
uses: Dictionarry-Hub/parrot/.github/workflows/notify-release.yml@v1
secrets:
PARROT_URL: ${{ secrets.PARROT_URL }}
call-notify-commit:
uses: Dictionarry-Hub/parrot/.github/workflows/notify-commit.yml@v1
secrets:
PARROT_URL: ${{ secrets.PARROT_URL }}

View File

@@ -1,59 +0,0 @@
name: Build Release Docker Image
on:
push:
tags:
- 'v*'
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Get tag
id: tag
run: echo "tag=${GITHUB_REF#refs/tags/}" >> $GITHUB_OUTPUT
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
with:
platforms: linux/amd64,linux/arm64/v8
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: '18'
- name: Build frontend
working-directory: ./frontend
run: |
npm ci
npm run build
- name: Prepare dist directory
run: |
mkdir -p dist/backend dist/static
cp -r frontend/dist/* dist/static/
cp -r backend/* dist/backend/
cp backend/requirements.txt dist/
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push
uses: docker/build-push-action@v5
with:
context: .
platforms: linux/amd64,linux/arm64/v8
push: true
tags: |
santiagosayshey/profilarr:latest
santiagosayshey/profilarr:${{ steps.tag.outputs.tag }}

54
.gitignore vendored
View File

@@ -1,25 +1,41 @@
# Node
node_modules/
dist/
node_modules
# Python
__pycache__/
*.pyc
# Claude context
CLAUDE.md
.claudeignore
# Environment variables
.env
.env.prod
.env.1
.env.2
# Output
.output
.vercel
.netlify
.wrangler
/dist
/static
# OS files
# Application
/temp
/config
# OS
.DS_Store
Thumbs.db
# build files
backend/app/static/
# Env
.env
.env.*
!.env.example
!.env.test
# Config data
config/
radarr-config/
sonarr-config/
test-data/
# Vite
vite.config.js.timestamp-*
vite.config.ts.timestamp-*
# .NET
bin/
obj/
*.user
*.suo
.vs/
# Bruno environments (contain API keys)
bruno/environments/

2
.npmrc Normal file
View File

@@ -0,0 +1,2 @@
engine-strict=true
@jsr:registry=https://npm.jsr.io

9
.prettierignore Normal file
View File

@@ -0,0 +1,9 @@
# Package Managers
package-lock.json
pnpm-lock.yaml
yarn.lock
bun.lock
bun.lockb
# Miscellaneous
/static/

View File

@@ -1,12 +1,16 @@
{
"tabWidth": 4,
"useTabs": false,
"printWidth": 80,
"singleQuote": true,
"trailingComma": "none",
"bracketSpacing": false,
"jsxSingleQuote": true,
"arrowParens": "avoid",
"proseWrap": "preserve",
"bracketSameLine": true
"useTabs": true,
"singleQuote": true,
"trailingComma": "none",
"printWidth": 100,
"plugins": ["prettier-plugin-svelte", "prettier-plugin-tailwindcss"],
"overrides": [
{
"files": "*.svelte",
"options": {
"parser": "svelte"
}
}
],
"tailwindStylesheet": "./src/app.css"
}

25
.vscode/settings.json vendored Normal file
View File

@@ -0,0 +1,25 @@
{
"css.lint.unknownAtRules": "ignore",
"editor.tabSize": 4,
"editor.fontFamily": "JetBrains Mono, Consolas, monospace",
"editor.fontLigatures": true,
"editor.minimap.enabled": false,
"terminal.integrated.fontFamily": "JetBrains Mono",
"debug.console.fontFamily": "JetBrains Mono",
"scm.inputFontFamily": "JetBrains Mono",
"apc.font.family": "JetBrains Mono",
"apc.stylesheet": {
".monaco-workbench": "font-family: 'JetBrains Mono'"
},
"editor.smoothScrolling": true,
"workbench.list.smoothScrolling": true,
"editor.cursorBlinking": "smooth",
"editor.cursorSmoothCaretAnimation": "on",
"editor.bracketPairColorization.enabled": true,
"editor.guides.bracketPairs": true,
"editor.renderWhitespace": "none",
"breadcrumbs.enabled": false,
"files.autoSave": "onFocusChange",
"editor.stickyScroll.enabled": true,
"editor.formatOnSave": true
}

View File

@@ -1,25 +0,0 @@
# Profilarr Development Guide
## Commands
- **Frontend**: `cd frontend && npm run dev` - Start React dev server
- **Backend**: `cd backend && gunicorn -b 0.0.0.0:5000 app.main:app` - Run Flask server
- **Docker**: `docker compose up` - Start both frontend/backend in dev mode
- **Lint**: `cd frontend && npx eslint 'src/**/*.{js,jsx}'` - Check frontend code style
- **Build**: `cd frontend && npm run build` - Build for production
## Code Style
### Frontend (React)
- **Imports**: React first, third-party libs next, components, then utils
- **Components**: Functional components with hooks, PascalCase naming
- **Props**: PropTypes for validation, destructure props in component signature
- **State**: Group related state, useCallback for memoized handlers
- **JSX**: 4-space indentation, attributes on new lines for readability
- **Error Handling**: try/catch for async operations, toast notifications
### Backend (Python)
- **Imports**: Standard lib first, third-party next, local modules last
- **Naming**: snake_case for functions/vars/files, PascalCase for classes
- **Functions**: Single responsibility, descriptive docstrings
- **Error Handling**: Specific exception catches, return (success, message) tuples
- **Indentation**: 4 spaces consistently
- **Modularity**: Related functionality grouped in directories

View File

@@ -1,17 +1,106 @@
# Dockerfile
FROM python:3.9-slim
WORKDIR /app
# Install git (since we're still using slim)
RUN apt-get update && apt-get install -y git && rm -rf /var/lib/apt/lists/*
# Copy pre-built files from dist directory
COPY dist/backend/app ./app
COPY dist/static ./app/static
COPY dist/requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
LABEL org.opencontainers.image.authors="Dictionarry dictionarry@pm.me"
LABEL org.opencontainers.image.description="Profilarr - Profile manager for *arr apps"
LABEL org.opencontainers.image.source="https://github.com/Dictionarry-Hub/profilarr"
# =============================================================================
# Profilarr Dockerfile
# =============================================================================
# Multi-stage build for minimal final image size
#
# Build: docker build -t profilarr .
# Run: docker run -v ./config:/config -p 6868:6868 profilarr
# -----------------------------------------------------------------------------
# Stage 1: Build
# -----------------------------------------------------------------------------
FROM denoland/deno:2.5.6 AS builder
WORKDIR /build
# Copy everything
COPY . .
# Install dependencies (creates node_modules for npm packages)
RUN deno install --node-modules-dir
# Build the application
# 1. Vite builds SvelteKit to dist/build/
# 2. Deno compiles to standalone binary
# Build-time variables for version card
# TARGETARCH is automatically set by Docker buildx (amd64 or arm64)
ARG TARGETARCH
ARG VITE_CHANNEL=stable
ENV VITE_PLATFORM=docker-${TARGETARCH}
ENV VITE_CHANNEL=${VITE_CHANNEL}
ENV APP_BASE_PATH=/build/dist/build
RUN deno run -A npm:vite build
RUN deno compile \
--no-check \
--allow-net \
--allow-read \
--allow-write \
--allow-env \
--allow-ffi \
--allow-run \
--allow-sys \
--target x86_64-unknown-linux-gnu \
--output dist/build/profilarr \
dist/build/mod.ts
# -----------------------------------------------------------------------------
# Stage 2: Runtime
# -----------------------------------------------------------------------------
FROM debian:12-slim
# Labels for container metadata
LABEL org.opencontainers.image.title="Profilarr"
LABEL org.opencontainers.image.version="beta"
LABEL org.opencontainers.image.description="Configuration management for Radarr and Sonarr"
LABEL org.opencontainers.image.source="https://github.com/Dictionarry-Hub/profilarr"
LABEL org.opencontainers.image.licenses="AGPL-3.0"
# Install runtime dependencies
# - git: PCD repository operations (clone, pull, push)
# - tar: Backup creation and restoration
# - curl: Health checks
# - gosu: Drop privileges to non-root user
# - ca-certificates: HTTPS connections
RUN apt-get update && apt-get install -y --no-install-recommends \
git \
tar \
curl \
gosu \
ca-certificates \
&& rm -rf /var/lib/apt/lists/* \
&& apt-get clean
# Create application directory
WORKDIR /app
# Copy built application from builder stage
COPY --from=builder /build/dist/build/profilarr /app/profilarr
COPY --from=builder /build/dist/build/server.js /app/server.js
COPY --from=builder /build/dist/build/static /app/static
# Copy entrypoint script
COPY docker/entrypoint.sh /entrypoint.sh
RUN chmod +x /entrypoint.sh
# Create config directory
RUN mkdir -p /config
# Environment variables
ENV PORT=6868
ENV HOST=0.0.0.0
ENV APP_BASE_PATH=/config
ENV TZ=UTC
# Expose port
EXPOSE 6868
CMD ["gunicorn", "--bind", "0.0.0.0:6868", "--timeout", "600", "app.main:create_app()"]
# Health check
HEALTHCHECK --interval=30s --timeout=10s --start-period=10s --retries=3 \
CMD curl -sf http://localhost:${PORT}/api/v1/health || exit 1
# Volume for persistent data
VOLUME /config
# Entrypoint handles PUID/PGID/UMASK then runs the app
ENTRYPOINT ["/entrypoint.sh"]

61
Dockerfile.parser Normal file
View File

@@ -0,0 +1,61 @@
# =============================================================================
# Profilarr Parser Dockerfile
# =============================================================================
# .NET 8.0 microservice for parsing release titles
# This service is OPTIONAL - only needed for custom format/quality profile testing
#
# Build: docker build -f Dockerfile.parser -t profilarr-parser .
# Run: docker run -p 5000:5000 profilarr-parser
# -----------------------------------------------------------------------------
# Stage 1: Build
# -----------------------------------------------------------------------------
FROM mcr.microsoft.com/dotnet/sdk:8.0-alpine AS builder
WORKDIR /build
# Copy project file first for better layer caching
COPY src/services/parser/Parser.csproj ./
RUN dotnet restore
# Copy source and build
COPY src/services/parser/ ./
# Remove local build config (uses paths that don't exist in container)
RUN rm -f Directory.Build.props && dotnet publish -c Release -o /app --no-restore
# -----------------------------------------------------------------------------
# Stage 2: Runtime
# -----------------------------------------------------------------------------
FROM mcr.microsoft.com/dotnet/aspnet:8.0-alpine
# Labels for container metadata
LABEL org.opencontainers.image.title="Profilarr Parser"
LABEL org.opencontainers.image.description="Release title parser for Profilarr (optional)"
LABEL org.opencontainers.image.source="https://github.com/Dictionarry-Hub/profilarr"
LABEL org.opencontainers.image.licenses="AGPL-3.0"
WORKDIR /app
# Copy built application
COPY --from=builder /app ./
# Create non-root user
RUN addgroup -g 1000 parser && \
adduser -u 1000 -G parser -D -h /app parser
# Switch to non-root user
USER parser
# Environment variables
ENV ASPNETCORE_URLS=http://+:5000
ENV ASPNETCORE_ENVIRONMENT=Production
# Expose port
EXPOSE 5000
# Health check
HEALTHCHECK --interval=30s --timeout=5s --start-period=5s --retries=3 \
CMD wget -qO- http://localhost:5000/health || exit 1
# Run the application
ENTRYPOINT ["dotnet", "Parser.dll"]

141
LICENSE
View File

@@ -1,5 +1,5 @@
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
GNU AFFERO GENERAL PUBLIC LICENSE
Version 3, 19 November 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
@@ -7,17 +7,15 @@
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The GNU Affero General Public License is a free, copyleft license for
software and other kinds of works, specifically designed to ensure
cooperation with the community in the case of network server software.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
our General Public Licenses are intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
software for all its users.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
@@ -26,44 +24,34 @@ them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
Developers that use our General Public Licenses protect your rights
with two steps: (1) assert copyright on the software, and (2) offer
you this License which gives you legal permission to copy, distribute
and/or modify the software.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
A secondary benefit of defending all users' freedom is that
improvements made in alternate versions of the program, if they
receive widespread use, become available for other developers to
incorporate. Many developers of free software are heartened and
encouraged by the resulting cooperation. However, in the case of
software used on network servers, this result may fail to come about.
The GNU General Public License permits making a modified version and
letting the public access it on a server without ever releasing its
source code to the public.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
The GNU Affero General Public License is designed specifically to
ensure that, in such cases, the modified source code becomes available
to the community. It requires the operator of a network server to
provide the source code of the modified version running there to the
users of that server. Therefore, public use of a modified version, on
a publicly accessible server, gives the public access to the source
code of the modified version.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
An older license, called the Affero General Public License and
published by Affero, was designed to accomplish similar goals. This is
a different license, not a version of the Affero GPL, but Affero has
released a new version of the Affero GPL which permits relicensing under
this license.
The precise terms and conditions for copying, distribution and
modification follow.
@@ -72,7 +60,7 @@ modification follow.
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"This License" refers to version 3 of the GNU Affero General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
@@ -549,35 +537,45 @@ to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
13. Remote Network Interaction; Use with the GNU General Public License.
Notwithstanding any other provision of this License, if you modify the
Program, your modified version must prominently offer all users
interacting with it remotely through a computer network (if your version
supports such interaction) an opportunity to receive the Corresponding
Source of your version by providing access to the Corresponding Source
from a network server at no charge, through some standard or customary
means of facilitating copying of software. This Corresponding Source
shall include the Corresponding Source for any work covered by version 3
of the GNU General Public License that is incorporated pursuant to the
following paragraph.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
under version 3 of the GNU General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
but the work with which it is combined will remain governed by version
3 of the GNU General Public License.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
the GNU Affero General Public License from time to time. Such new versions
will be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Program specifies that a certain numbered version of the GNU Affero General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
GNU Affero General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
versions of the GNU Affero General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
@@ -635,40 +633,29 @@ the "copyright" line and a pointer to where the full notice is found.
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
GNU Affero General Public License for more details.
You should have received a copy of the GNU General Public License
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
<program> Copyright (C) <year> <name of author>
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
If your software can interact with users remotely through a computer
network, you should also make sure that it provides a way for users to
get its source. For example, if your program is a web application, its
interface could display a "Source" link that leads users to an archive
of the code. There are many ways you could offer source, and different
solutions will be better for different programs; see section 13 for the
specific requirements.
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
For more information on this, and how to apply and follow the GNU AGPL, see
<https://www.gnu.org/licenses/>.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<https://www.gnu.org/licenses/why-not-lgpl.html>.

198
README.md
View File

@@ -1,93 +1,143 @@
# Profilarr
<br>
[![GitHub release](https://img.shields.io/github/v/release/Dictionarry-Hub/profilarr?color=blue)](https://github.com/Dictionarry-Hub/profilarr/releases)
[![Docker Pulls](https://img.shields.io/docker/pulls/santiagosayshey/profilarr?color=blue)](https://hub.docker.com/r/santiagosayshey/profilarr)
[![License](https://img.shields.io/github/license/Dictionarry-Hub/profilarr?color=blue)](https://github.com/Dictionarry-Hub/profilarr/blob/main/LICENSE)
[![Website](https://img.shields.io/badge/Website-dictionarry.dev-blue)](https://dictionarry.dev/)
[![Discord](https://img.shields.io/discord/1202375791556431892?color=blue&logo=discord&logoColor=white)](https://discord.com/invite/Y9TYP6jeYZ)
[![Buy Me A Coffee](https://img.shields.io/badge/Buy%20Me%20A%20Coffee-Support-blue?logo=buy-me-a-coffee)](https://www.buymeacoffee.com/santiagosayshey)
[![GitHub Sponsors](https://img.shields.io/badge/GitHub%20Sponsors-Support-blue?logo=github-sponsors)](https://github.com/sponsors/Dictionarry-Hub)
<p align="center">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="src/lib/client/assets/banner-light.svg">
<source media="(prefers-color-scheme: light)" srcset="src/lib/client/assets/banner-dark.svg">
<img alt="Profilarr" src="src/lib/client/assets/banner-dark.svg" width="500">
</picture>
</p>
Configuration management tool for Radarr/Sonarr that automates importing and version control of custom formats and quality profiles.
<br>
![Profilarr Preview](.github/images/preview.png)
<p align="center">
<a href="https://github.com/Dictionarry-Hub/profilarr/releases"><img src="https://img.shields.io/github/v/release/Dictionarry-Hub/profilarr?color=blue" alt="GitHub release"></a>
<a href="https://hub.docker.com/r/santiagosayshey/profilarr"><img src="https://img.shields.io/docker/pulls/santiagosayshey/profilarr?color=blue" alt="Docker Pulls"></a>
<a href="https://github.com/Dictionarry-Hub/profilarr/blob/main/LICENSE"><img src="https://img.shields.io/badge/License-AGPL--3.0-blue" alt="License"></a>
<a href="https://dictionarry.dev/"><img src="https://img.shields.io/badge/Website-dictionarry.dev-blue" alt="Website"></a>
<a href="https://discord.com/invite/Y9TYP6jeYZ"><img src="https://img.shields.io/discord/1202375791556431892?color=blue&logo=discord&logoColor=white" alt="Discord"></a>
<a href="https://www.buymeacoffee.com/santiagosayshey"><img src="https://img.shields.io/badge/Buy%20Me%20A%20Coffee-Support-blue?logo=buy-me-a-coffee" alt="Buy Me A Coffee"></a>
<a href="https://github.com/sponsors/Dictionarry-Hub"><img src="https://img.shields.io/badge/GitHub%20Sponsors-Support-blue?logo=github-sponsors" alt="GitHub Sponsors"></a>
</p>
<p>Manage quality profiles, custom formats, and release profiles across your Radarr and Sonarr instances. Define your profiles once with a Git-backed configuration database, then sync them to any number of *arr instances.</p>
> [!WARNING]
> V2 is under heavy development and is _NOT_ ready for production use. Use
> [Profilarr V1](https://github.com/Dictionarry-Hub/profilarr/tree/main) until
> V2 is ready.
## Features
- 🔄 Automatic synchronization with remote configuration databases
- 🎯 Direct import to Radarr/Sonarr instances
- 🔧 Git-based version control of your configurations
- ⚡ Preserve local customizations during updates
- 🛠️ Built-in conflict resolution
**Core**
- **Link** - Connect to configuration databases like the
[Dictionarry database](https://github.com/Dictionarry-Hub/db) or any Profilarr
Compliant Database (PCD)
- **Bridge** - Add your Radarr and Sonarr instances by URL and API key
- **Sync** - Push configurations to your instances. Profilarr compiles
everything to the right format automatically
**For Users**
- **Ready-to-Use Configurations** - Stop spending hours piecing together
settings from forum posts. Get complete, tested quality profiles, custom
formats, and media settings designed around specific goals
- **Stay Updated** - Make local tweaks that persist across upstream updates.
View changelogs, diffs, and revert changes when needed. Merge conflicts are
handled transparently
- **Automated Upgrades** - The arrs don't search for the best release, they grab
the first RSS item that qualifies. Profilarr triggers intelligent searches
based on filters and selectors
**For Developers**
- **Unified Architecture** - One configuration language that compiles to
Radarr/Sonarr-specific formats on sync. No more maintaining separate configs
for each app
- **Reusable Components** - Regular expressions are separate entities shared
across custom formats. Change once, update everywhere
- **OSQL** - Configurations stored as append-only SQL operations. Readable,
auditable, diffable. Git-native version control with complete history
- **Testing** - Validate regex patterns, custom format conditions, and quality
profile behavior before syncing
## Documentation
See **[dictionarry.dev](https://dictionarry.dev/)** for complete installation,
usage, and API documenation.
## Getting Started
### Compatibility
| Architecture | Support |
| ------------------------------ | ------------ |
| amd64 (x86_64) | ✅ Supported |
| arm64 (Apple Silicon, RPi 4/5) | ✅ Supported |
### Quick Installation (Docker Compose)
### Production
```yaml
services:
profilarr:
image: santiagosayshey/profilarr:latest # Use :beta for early access to new features
container_name: profilarr
ports:
- 6868:6868
volumes:
- /path/to/your/data:/config # Replace with your actual path
environment:
- TZ=UTC # Set your timezone
restart: unless-stopped
profilarr:
image: ghcr.io/dictionarry-hub/profilarr:latest
container_name: profilarr
ports:
- "6868:6868"
volumes:
- ./config:/config
environment:
- PUID=1000
- PGID=1000
- TZ=Etc/UTC
- PARSER_HOST=parser
- PARSER_PORT=5000
depends_on:
parser:
condition: service_healthy
# Optional - only needed for CF/QP testing
parser:
image: ghcr.io/dictionarry-hub/profilarr-parser:latest
container_name: profilarr-parser
expose:
- "5000"
```
After deployment, access the web UI at `http://[address]:6868` to begin setup.
> **Note for Windows users:** The database is case-sensitive. Use a docker volume or the WSL file system to avoid issues:
>
> - Docker volume example: `profilarr_data:/config`
> - WSL filesystem example: `/home/username/docker/profilarr:/config`
### Complete Documentation
Visit our comprehensive documentation at [dictionarry.dev/wiki/profilarr-setup](https://dictionarry.dev/wiki/profilarr-setup) for detailed installation instructions and usage guides.
## Support
### Need Help?
- **Bug Reports & Issues**: Submit technical issues via our [GitHub Issues tracker](https://github.com/Dictionarry-Hub/profilarr/issues)
- **Community Support**: Join our [Discord community](https://discord.com/invite/Y9TYP6jeYZ) for help from developers and other users
- **Database Issues**: Please direct database-specific issues to their respective repositories, as this repository focuses exclusively on Profilarr development
## Contributing
We welcome contributions from the community! Here's how you can help improve Profilarr:
- **Pull Requests**: Feel free to submit PRs for bug fixes or new features
- **Feature Suggestions**: Share your ideas through GitHub issues
- **Documentation**: Help improve our guides and documentation
- **Testing**: Try new features and report any issues
Detailed contributing guidelines will be available soon. Join our Discord to discuss potential contributions with the development team.
## Status
Currently in beta. Part of the [Dictionarry](https://github.com/Dictionarry-Hub) project to simplify media automation.
### Known Issues
- https://github.com/Dictionarry-Hub/profilarr/issues
> [!NOTE]
> The parser service is only required for custom format and quality profile
> testing. Linking, syncing, and all other features work without it. Remove the
> `parser` service and related environment variables if you don't need it.
### Development
- Currently focused on fixing bugs found in open beta
- 1.1 will focus on improving the 'setup' side of profilarr - adding media management / quality settings syncs
**Prerequisites**
### Personal Note
- [Git](https://git-scm.com/) (for PCD operations)
- [Deno](https://deno.com/) 2.x
- [.NET SDK](https://dotnet.microsoft.com/) 8.0+ (optional, for parser)
Profilarr is maintained by a single CS student with no formal development experience, in their spare time. Development happens when time allows, which may affect response times for fixes and new features. The project is continuously improving, and your patience, understanding, and contributions are greatly appreciated as Profilarr grows and matures.
```bash
git clone https://github.com/Dictionarry-Hub/profilarr.git
cd profilarr
deno task dev
```
This runs the parser service and Vite dev server concurrently. See
[CONTRIBUTING.md](docs/CONTRIBUTING.md) for architecture documentation.
### Environment Variables
| Variable | Default | Description |
| --------------- | -------------------- | --------------------------------- |
| `PUID` | `1000` | User ID for file permissions |
| `PGID` | `1000` | Group ID for file permissions |
| `UMASK` | `022` | File creation mask |
| `TZ` | `Etc/UTC` | Timezone for scheduling |
| `PORT` | `6868` | Web UI port |
| `HOST` | `0.0.0.0` | Bind address |
| `APP_BASE_PATH` | `/config` | Base path for data, logs, backups |
| `PARSER_HOST` | `localhost` | Parser service host |
| `PARSER_PORT` | `5000` | Parser service port |
## License
[AGPL-3.0](LICENSE)
Profilarr is free and open source. You do not need to pay anyone to use it. If
someone is charging you for access to Profilarr, they are violating the spirit
of this project.

View File

@@ -1,7 +0,0 @@
FROM python:3.9
WORKDIR /app
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
COPY . .
# Use gunicorn with 10-minute timeout
CMD ["python", "-m", "app.main"]

View File

@@ -1,188 +0,0 @@
from flask import Blueprint, request, jsonify
from flask_cors import cross_origin
import logging
from .status.ping import ping_service
from .manager import (save_arr_config, get_all_arr_configs, get_arr_config,
update_arr_config, delete_arr_config)
logger = logging.getLogger(__name__)
logger.setLevel(logging.ERROR)
bp = Blueprint('arr', __name__)
@bp.route('/ping', methods=['POST', 'OPTIONS'])
@cross_origin()
def ping():
if request.method == 'OPTIONS':
return jsonify({}), 200
data = request.get_json()
url = data.get('url')
api_key = data.get('apiKey')
arr_type = data.get('type')
if not url or not api_key or not arr_type:
return jsonify({
'success': False,
'error': 'URL, API key, and type are required'
}), 400
logger.error(f"Attempting to ping URL: {url} of type: {arr_type}")
success, message = ping_service(url, api_key, arr_type)
logger.error(f"Ping result - Success: {success}, Message: {message}")
return jsonify({
'success': success,
'message': message
}), 200 if success else 400
@bp.route('/config', methods=['POST', 'OPTIONS'])
@cross_origin()
def add_config():
if request.method == 'OPTIONS':
return jsonify({}), 200
try:
config = request.json
# Validate sync_interval if schedule method
if config.get('sync_method') == 'schedule':
sync_interval = config.get('sync_interval', 0)
if sync_interval < 60 or sync_interval > 43200:
return jsonify({
'success': False,
'error': 'Sync interval must be between 60 minutes (1 hour) and 43200 minutes (1 month)'
}), 400
result = save_arr_config(config)
# Handle the conflict case first
if not result['success'] and result.get('status_code') == 409:
return jsonify({'success': False, 'error': result['error']}), 409
# Handle other failure cases
if not result['success']:
return jsonify(result), 400
return jsonify(result), 200
except Exception as e:
logger.error(f"Error saving arr config: {str(e)}")
return jsonify({'success': False, 'error': str(e)}), 400
@bp.route('/config', methods=['GET', 'OPTIONS'])
@cross_origin()
def get_configs():
if request.method == 'OPTIONS':
return jsonify({}), 200
try:
configs = get_all_arr_configs()
logger.debug(f"Retrieved {len(configs)} arr configs")
return jsonify(configs), 200
except Exception as e:
logger.error(f"Error getting arr configs: {str(e)}")
return jsonify({'success': False, 'error': str(e)}), 400
@bp.route('/config/<int:id>', methods=['GET', 'PUT', 'DELETE', 'OPTIONS'])
@cross_origin()
def handle_config(id):
if request.method == 'OPTIONS':
return jsonify({}), 200
try:
if request.method == 'GET':
config = get_arr_config(id)
if config:
logger.debug(f"Retrieved arr config: {id}")
return jsonify({'success': True, 'data': config}), 200
logger.debug(f"Arr config not found: {id}")
return jsonify({
'success': False,
'error': 'Config not found'
}), 404
elif request.method == 'PUT':
config = request.json
# Validate sync_interval if schedule method
if config.get('sync_method') == 'schedule':
sync_interval = config.get('sync_interval', 0)
if sync_interval < 60 or sync_interval > 43200:
return jsonify({
'success': False,
'error': 'Sync interval must be between 60 minutes (1 hour) and 43200 minutes (1 month)'
}), 400
result = update_arr_config(id, config)
# Handle the conflict case first
if not result['success'] and result.get('status_code') == 409:
return jsonify({
'success': False,
'error': result['error']
}), 409
# Handle other failure cases
if not result['success']:
logger.debug(f"Arr config not found for update: {id}")
return jsonify({
'success': False,
'error': 'Config not found'
}), 404
logger.debug(f"Updated arr config: {id}")
return jsonify({'success': True}), 200
elif request.method == 'DELETE':
success = delete_arr_config(id)
if success:
logger.debug(f"Deleted arr config: {id}")
return jsonify({'success': True}), 200
logger.debug(f"Arr config not found for deletion: {id}")
return jsonify({
'success': False,
'error': 'Config not found'
}), 404
except Exception as e:
logger.error(f"Error handling arr config {id}: {str(e)}")
return jsonify({'success': False, 'error': str(e)}), 400
@bp.route('/config/<int:id>/sync', methods=['POST', 'OPTIONS'])
@cross_origin()
def trigger_sync(id):
if request.method == 'OPTIONS':
return jsonify({}), 200
try:
# Get the config first
config_result = get_arr_config(id)
if not config_result.get('success'):
logger.error(f"Config not found for sync: {id}")
return jsonify({
'success': False,
'error': 'Configuration not found'
}), 404
config_data = config_result.get('data')
if not config_data:
logger.error(f"Invalid config data for sync: {id}")
return jsonify({
'success': False,
'error': 'Invalid configuration data'
}), 400
# Run the import
from ..importer import handle_pull_import
handle_pull_import(id)
logger.debug(f"Manual sync triggered for arr config: {id}")
return jsonify({'success': True}), 200
except Exception as e:
logger.error(f"Error triggering sync for arr config {id}: {str(e)}")
return jsonify({'success': False, 'error': str(e)}), 400

View File

@@ -1,428 +0,0 @@
# arr/manager.py
from ..db import get_db
import json
import logging
# Import our task-utils that handle DB insertion for scheduled tasks
from .task_utils import (create_import_task_for_arr_config,
update_import_task_for_arr_config,
delete_import_task_for_arr_config)
from ..task.tasks import TaskScheduler
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def save_arr_config(config):
"""
Create a new arr_config row, then create a corresponding scheduled task (if sync_method != manual).
Store the newly created task's ID in arr_config.import_task_id.
"""
with get_db() as conn:
cursor = conn.cursor()
try:
# Check if name already exists
existing = cursor.execute(
'SELECT id FROM arr_config WHERE name = ?',
(config['name'], )).fetchone()
if existing:
logger.warning(
f"[save_arr_config] Attempted to create duplicate config name: {config['name']}"
)
return {
'success': False,
'error': 'Configuration with this name already exists',
'status_code': 409
}
# 1) Insert the arr_config row
logger.debug(
f"[save_arr_config] Attempting to create new arr_config with name={config['name']} sync_method={config.get('sync_method')}"
)
cursor.execute(
'''
INSERT INTO arr_config (
name, type, tags, arr_server, api_key,
data_to_sync, last_sync_time, sync_percentage,
sync_method, sync_interval, import_as_unique,
import_task_id
)
VALUES (?, ?, ?, ?, ?, ?, NULL, 0, ?, ?, ?, NULL)
''', (
config['name'],
config['type'],
json.dumps(config.get('tags', [])),
config['arrServer'],
config['apiKey'],
json.dumps(config.get('data_to_sync', {})),
config.get('sync_method', 'manual'),
config.get('sync_interval', 0),
config.get('import_as_unique', False),
))
conn.commit()
new_config_id = cursor.lastrowid
logger.info(
f"[save_arr_config] Created new arr_config row #{new_config_id} for '{config['name']}'"
)
# 2) Create a scheduled task row if needed
sync_method = config.get('sync_method', 'manual')
sync_interval = config.get('sync_interval', 0)
task_id = create_import_task_for_arr_config(
config_id=new_config_id,
config_name=config['name'],
sync_method=sync_method,
sync_interval=sync_interval)
# 3) Update arr_config.import_task_id if a task was created
if task_id:
logger.debug(
f"[save_arr_config] Updating arr_config #{new_config_id} with import_task_id={task_id}"
)
cursor.execute(
'UPDATE arr_config SET import_task_id = ? WHERE id = ?',
(task_id, new_config_id))
conn.commit()
scheduler = TaskScheduler.get_instance()
if scheduler:
logger.debug("[save_arr_config] Reloading tasks from DB...")
scheduler.load_tasks_from_db()
return {'success': True, 'id': new_config_id}
except Exception as e:
logger.error(
f"[save_arr_config] Error saving arr config: {str(e)}")
return {'success': False, 'error': str(e)}
def update_arr_config(id, config):
"""
Update an existing arr_config row, then create/update/remove the corresponding scheduled task as needed.
"""
with get_db() as conn:
cursor = conn.cursor()
try:
# Check if name already exists (excluding current config)
existing = cursor.execute(
'SELECT id FROM arr_config WHERE name = ? AND id != ?',
(config['name'], id)).fetchone()
if existing:
logger.warning(
f"[update_arr_config] Attempted to update config #{id} to duplicate name: {config['name']}"
)
return {
'success': False,
'error': 'Configuration with this name already exists',
'status_code': 409
}
# 1) Grab existing row so we know the existing import_task_id
existing_row = cursor.execute(
'SELECT * FROM arr_config WHERE id = ?', (id, )).fetchone()
if not existing_row:
logger.debug(
f"[update_arr_config] No arr_config row found with id={id}"
)
return {'success': False, 'error': 'Configuration not found'}
existing_task_id = existing_row['import_task_id']
# 2) Update the arr_config row itself
logger.debug(
f"[update_arr_config] Updating arr_config #{id} name={config['name']} sync_method={config.get('sync_method')}"
)
cursor.execute(
'''
UPDATE arr_config
SET name = ?,
type = ?,
tags = ?,
arr_server = ?,
api_key = ?,
data_to_sync = ?,
sync_method = ?,
sync_interval = ?,
import_as_unique = ?
WHERE id = ?
''',
(config['name'], config['type'],
json.dumps(config.get('tags', [])), config['arrServer'],
config['apiKey'], json.dumps(config.get(
'data_to_sync', {})), config.get('sync_method', 'manual'),
config.get('sync_interval',
0), config.get('import_as_unique', False), id))
conn.commit()
if cursor.rowcount == 0:
logger.debug(
f"[update_arr_config] arr_config #{id} not found for update"
)
return {'success': False, 'error': 'Configuration not found'}
logger.info(f"[update_arr_config] Updated arr_config row #{id}")
# 3) Create/Update/Remove the scheduled task row
new_task_id = update_import_task_for_arr_config(
config_id=id,
config_name=config['name'],
sync_method=config.get('sync_method', 'manual'),
sync_interval=config.get('sync_interval', 0),
existing_task_id=existing_task_id)
# 4) Store new_task_id in arr_config.import_task_id
logger.debug(
f"[update_arr_config] Setting arr_config #{id} import_task_id to {new_task_id}"
)
cursor.execute(
'UPDATE arr_config SET import_task_id = ? WHERE id = ?',
(new_task_id, id))
conn.commit()
scheduler = TaskScheduler.get_instance()
if scheduler:
logger.debug("[update_arr_config] Reloading tasks from DB...")
scheduler.load_tasks_from_db()
return {'success': True}
except Exception as e:
logger.error(
f"[update_arr_config] Error updating arr config: {str(e)}")
return {'success': False, 'error': str(e)}
def delete_arr_config(id):
"""
Delete an arr_config row, plus remove its scheduled_task if any.
"""
with get_db() as conn:
cursor = conn.cursor()
try:
# 1) Fetch the row so we know which task to remove
existing_row = cursor.execute(
'SELECT * FROM arr_config WHERE id = ?', (id, )).fetchone()
if not existing_row:
logger.debug(
f"[delete_arr_config] No arr_config row found with id={id}"
)
return {'success': False, 'error': 'Configuration not found'}
existing_task_id = existing_row['import_task_id']
# 2) Delete the arr_config
logger.debug(f"[delete_arr_config] Removing arr_config #{id}")
cursor.execute('DELETE FROM arr_config WHERE id = ?', (id, ))
conn.commit()
if cursor.rowcount == 0:
logger.debug(
f"[delete_arr_config] arr_config #{id} not found for deletion"
)
return {'success': False, 'error': 'Configuration not found'}
logger.info(f"[delete_arr_config] Deleted arr_config #{id}")
# 3) If there's a scheduled task, remove it
if existing_task_id:
delete_import_task_for_arr_config(existing_task_id)
scheduler = TaskScheduler.get_instance()
if scheduler:
logger.debug("[delete_arr_config] Reloading tasks from DB...")
scheduler.load_tasks_from_db()
return {'success': True}
except Exception as e:
logger.error(
f"[delete_arr_config] Error deleting arr config: {str(e)}")
return {'success': False, 'error': str(e)}
def get_all_arr_configs():
with get_db() as conn:
cursor = conn.execute('SELECT * FROM arr_config')
rows = cursor.fetchall()
try:
configs = []
for row in rows:
configs.append({
'id':
row['id'],
'name':
row['name'],
'type':
row['type'],
'tags':
json.loads(row['tags']) if row['tags'] else [],
'arrServer':
row['arr_server'],
'apiKey':
row['api_key'],
'data_to_sync': (json.loads(row['data_to_sync'])
if row['data_to_sync'] else {}),
'last_sync_time':
row['last_sync_time'],
'sync_percentage':
row['sync_percentage'],
'sync_method':
row['sync_method'],
'sync_interval':
row['sync_interval'],
'import_as_unique':
bool(row['import_as_unique']),
'import_task_id':
row['import_task_id']
})
return {'success': True, 'data': configs}
except Exception as e:
logger.error(f"[get_all_arr_configs] Error: {str(e)}")
return {'success': False, 'error': str(e)}
def get_arr_config(id):
with get_db() as conn:
cursor = conn.execute('SELECT * FROM arr_config WHERE id = ?', (id, ))
row = cursor.fetchone()
try:
if row:
return {
'success': True,
'data': {
'id':
row['id'],
'name':
row['name'],
'type':
row['type'],
'tags':
json.loads(row['tags']) if row['tags'] else [],
'arrServer':
row['arr_server'],
'apiKey':
row['api_key'],
'data_to_sync': (json.loads(row['data_to_sync'])
if row['data_to_sync'] else {}),
'last_sync_time':
row['last_sync_time'],
'sync_percentage':
row['sync_percentage'],
# Keep these as-is
'sync_method':
row['sync_method'],
'sync_interval':
row['sync_interval'],
'import_as_unique':
bool(row['import_as_unique']),
'import_task_id':
row['import_task_id']
}
}
logger.debug(
f"[get_arr_config] No arr_config row found with id={id}")
return {'success': False, 'error': 'Configuration not found'}
except Exception as e:
logger.error(f"[get_arr_config] Error: {str(e)}")
return {'success': False, 'error': str(e)}
def get_scheduled_configs():
"""
Return all arr_configs where sync_method='schedule'.
Potentially used if you want to see scheduled ones explicitly.
"""
with get_db() as conn:
cursor = conn.execute('SELECT * FROM arr_config WHERE sync_method = ?',
('schedule', ))
rows = cursor.fetchall()
try:
configs = []
for row in rows:
configs.append({
'id': row['id'],
'name': row['name'],
'sync_interval': row['sync_interval'],
'import_task_id': row['import_task_id']
})
return {'success': True, 'data': configs}
except Exception as e:
logger.error(f"[get_scheduled_configs] Error: {str(e)}")
return {'success': False, 'error': str(e)}
def get_pull_configs():
with get_db() as conn:
rows = conn.execute(
'SELECT * FROM arr_config WHERE sync_method = "pull"').fetchall()
results = []
for row in rows:
results.append({
'id':
row['id'],
'name':
row['name'],
'type':
row['type'],
'tags':
json.loads(row['tags']) if row['tags'] else [],
'arrServer':
row['arr_server'],
'apiKey':
row['api_key'],
'data_to_sync': (json.loads(row['data_to_sync'])
if row['data_to_sync'] else {}),
'last_sync_time':
row['last_sync_time'],
'sync_percentage':
row['sync_percentage'],
'sync_method':
row['sync_method'],
'sync_interval':
row['sync_interval'],
'import_as_unique':
bool(row['import_as_unique']),
'import_task_id':
row['import_task_id']
})
return results
def check_active_sync_configs():
"""
Check if there are any ARR configurations with non-manual sync methods.
Returns (has_active_configs, details) tuple.
"""
with get_db() as conn:
cursor = conn.execute('''
SELECT id, name, sync_method, data_to_sync
FROM arr_config
WHERE sync_method != 'manual'
''')
active_configs = cursor.fetchall()
if not active_configs:
return False, None
details = []
for config in active_configs:
data_to_sync = json.loads(
config['data_to_sync'] if config['data_to_sync'] else '{}')
if data_to_sync.get('profiles') or data_to_sync.get(
'customFormats'):
details.append({
'id': config['id'],
'name': config['name'],
'sync_method': config['sync_method'],
'data': data_to_sync
})
return bool(details), details

View File

@@ -1,78 +0,0 @@
# app/arr/status/ping.py
import socket
import requests
import logging
logger = logging.getLogger(__name__)
REQUIRED_VERSIONS = {'radarr': '5.10.4', 'sonarr': '4.0.10'}
def check_version_compatibility(installed_version, required_version):
"""
Check if installed version meets minimum required version for Radarr/Sonarr.
"""
installed_parts = [int(x) for x in installed_version.split('.')]
required_parts = [int(x) for x in required_version.split('.')]
# Only compare the parts we care about (first 3 numbers for Radarr/Sonarr)
for installed, required in zip(installed_parts[:3], required_parts[:3]):
if installed < required:
return False
if installed > required:
return True
return True
def ping_service(url, api_key, arr_type):
"""
Ping an Arr service and verify its type and version
"""
try:
base_url = url.rstrip('/')
headers = {'X-Api-Key': api_key}
logger.warning(f"Attempting to connect to {base_url} for {arr_type}")
response = requests.get(f"{base_url}/api/v3/system/status",
headers=headers,
timeout=10)
logger.warning(f"Response status: {response.status_code}")
logger.warning(f"Response content: {response.text}")
if response.status_code != 200:
return False, f"Service returned status code: {response.status_code}"
data = response.json()
logger.warning(f"Parsed response data: {data}")
# First check app type
app_name = data.get('appName', '').lower()
version = data.get('version')
logger.warning(f"Found app: {app_name} version: {version}")
# Check app type
if arr_type == 'radarr' and app_name != 'radarr':
return False, f"Expected Radarr but found {app_name}"
elif arr_type == 'sonarr' and app_name != 'sonarr':
return False, f"Expected Sonarr but found {app_name}"
# Check version
if not version:
return False, "Could not determine application version"
required_version = REQUIRED_VERSIONS.get(arr_type)
if not check_version_compatibility(version, required_version):
return False, f"{app_name.title()} version {version} is not supported. Minimum required version is {required_version}"
return True, "Connection successful and application type and version verified"
except requests.exceptions.Timeout:
return False, "Connection timed out"
except requests.exceptions.ConnectionError:
return False, "Failed to connect to service"
except Exception as e:
logger.error(f"Error pinging service: {str(e)}")
return False, f"Error: {str(e)}"

View File

@@ -1,140 +0,0 @@
# arr/task_utils.py
import logging
from ..db import get_db
logger = logging.getLogger(__name__)
def create_import_task_for_arr_config(config_id, config_name, sync_method,
sync_interval):
"""
Create a scheduled task for the given ARR config (if needed).
Returns the newly-created task id or None.
"""
if sync_method == 'manual':
logger.debug(
f"[ARR Tasks] No import task created for {config_name} because sync_method=manual"
)
return None
with get_db() as conn:
cursor = conn.cursor()
# pull: not scheduled; on-demand during git pull
if sync_method == 'pull':
logger.debug(
f"[ARR Tasks] No scheduled task created for {config_name} because sync_method=pull (runs on git pull)"
)
return None
# schedule: create an interval-based task
task_type = 'ImportSchedule'
interval_minutes = sync_interval or 0
# Insert into scheduled_tasks table
cursor.execute(
'''
INSERT INTO scheduled_tasks (name, type, interval_minutes, status)
VALUES (?, ?, ?, ?)
''', (f"Import for ARR #{config_id} - {config_name}", task_type,
interval_minutes, 'pending'))
new_task_id = cursor.lastrowid
conn.commit()
logger.debug(
f"[ARR Tasks] Created new {task_type} task with ID {new_task_id} for ARR config {config_id}"
)
return new_task_id
def update_import_task_for_arr_config(config_id, config_name, sync_method,
sync_interval, existing_task_id):
"""
Update the existing scheduled task for the given ARR config (if needed).
If the sync_method changes from 'pull' or 'manual' to 'schedule', we create or update.
If it changes from 'schedule' to 'pull' (or 'manual'), we delete the old scheduled row.
"""
with get_db() as conn:
cursor = conn.cursor()
# If user changed to manual or pull => remove the old row (if any)
if sync_method in ['manual', 'pull']:
if existing_task_id:
logger.debug(
f"[update_import_task_for_arr_config] Removing old task {existing_task_id} because sync_method={sync_method}"
)
cursor.execute('DELETE FROM scheduled_tasks WHERE id = ?',
(existing_task_id, ))
deleted_count = cursor.rowcount
conn.commit()
if deleted_count:
logger.info(
f"[update_import_task_for_arr_config] Deleted old task {existing_task_id} for ARR #{config_id}"
)
# For 'pull' or 'manual', we do NOT create a new row in `scheduled_tasks`
return None
# Otherwise, sync_method='schedule' => create or update
# (We keep the same logic as before if user wants a scheduled import)
task_type = 'ImportSchedule'
interval_minutes = sync_interval or 0
# If there's NO existing task, create a new one
if not existing_task_id:
logger.debug(
f"[update_import_task_for_arr_config] No existing task for ARR #{config_id}; creating new schedule."
)
return create_import_task_for_arr_config(config_id, config_name,
sync_method,
sync_interval)
# If we DO have an existing scheduled task => update it
logger.debug(
f"[update_import_task_for_arr_config] Updating existing task {existing_task_id} for ARR #{config_id}, interval={interval_minutes}"
)
cursor.execute(
'''
UPDATE scheduled_tasks
SET name = ?, type = ?, interval_minutes = ?
WHERE id = ?
''', (
f"Import for ARR #{config_id} - {config_name}",
task_type,
interval_minutes,
existing_task_id,
))
updated_count = cursor.rowcount
conn.commit()
if updated_count == 0:
logger.warning(
f"[update_import_task_for_arr_config] Could not find scheduled task {existing_task_id} for ARR #{config_id}, creating new."
)
return create_import_task_for_arr_config(config_id, config_name,
sync_method,
sync_interval)
logger.debug(
f"[update_import_task_for_arr_config] Successfully updated scheduled task {existing_task_id} for ARR #{config_id}"
)
return existing_task_id
def delete_import_task_for_arr_config(task_id):
"""
Delete the import task if it exists.
"""
if not task_id:
return
with get_db() as conn:
cursor = conn.cursor()
cursor.execute('DELETE FROM scheduled_tasks WHERE id = ?', (task_id, ))
conn.commit()
if cursor.rowcount > 0:
logger.debug(f"[ARR Tasks] Deleted import task with ID {task_id}")
else:
logger.debug(
f"[ARR Tasks] No import task found to delete with ID {task_id}"
)

View File

@@ -1,118 +0,0 @@
# backend/app/auth/__init__.py
from flask import Blueprint, jsonify, request, session
from werkzeug.security import generate_password_hash, check_password_hash
import secrets
import logging
from ..db import get_db
logger = logging.getLogger(__name__)
bp = Blueprint('auth', __name__)
@bp.route('/setup', methods=['GET', 'POST'])
def setup():
db = get_db()
# Handle GET request to check if setup is needed
if request.method == 'GET':
if db.execute('SELECT 1 FROM auth').fetchone():
return jsonify({'error': 'Auth already configured'}), 400
return jsonify({'needs_setup': True}), 200
# Handle POST request for actual setup
if db.execute('SELECT 1 FROM auth').fetchone():
logger.warning('Failed setup attempt - auth already configured')
return jsonify({'error': 'Auth already configured'}), 400
data = request.get_json()
username = data.get('username', 'admin')
password = data.get('password')
if not password:
logger.error('Setup failed - no password provided')
return jsonify({'error': 'Password is required'}), 400
api_key = secrets.token_urlsafe(32)
password_hash = generate_password_hash(password)
session_id = secrets.token_urlsafe(32) # Generate a new session ID
try:
db.execute(
'INSERT INTO auth (username, password_hash, api_key, session_id) VALUES (?, ?, ?, ?)',
(username, password_hash, api_key, session_id))
db.commit()
logger.info('Initial auth setup completed successfully')
# Set up session after successful creation
session['authenticated'] = True
session['session_id'] = session_id
session.permanent = True
return jsonify({
'message': 'Auth configured successfully',
'username': username,
'api_key': api_key,
'authenticated': True
})
except Exception as e:
logger.error(f'Setup failed - database error: {str(e)}')
return jsonify({'error': 'Failed to setup authentication'}), 500
@bp.route('/authenticate', methods=['POST'])
def authenticate():
db = get_db()
data = request.get_json()
username = data.get('username')
password = data.get('password')
ip_address = request.remote_addr
# Check recent failed attempts
recent_attempts = db.execute(
'''
SELECT COUNT(*) as count FROM failed_attempts
WHERE ip_address = ?
AND attempt_time > datetime('now', '-15 minutes')
''', (ip_address, )).fetchone()['count']
if recent_attempts >= 5:
logger.warning(f'Too many failed attempts from IP: {ip_address}')
return jsonify({'error':
'Too many failed attempts. Try again later.'}), 429
if not username or not password:
logger.warning('Authentication attempt with missing credentials')
return jsonify({'error': 'Username and password required'}), 400
user = db.execute('SELECT * FROM auth WHERE username = ?',
(username, )).fetchone()
if user and check_password_hash(user['password_hash'], password):
# Generate a new session ID
new_session_id = secrets.token_urlsafe(32)
db.execute('UPDATE auth SET session_id = ? WHERE username = ?',
(new_session_id, username))
db.commit()
# Set up session
session['authenticated'] = True
session[
'session_id'] = new_session_id # Store session ID in the session
session.permanent = True
# Clear failed attempts on success
db.execute('DELETE FROM failed_attempts WHERE ip_address = ?',
(ip_address, ))
db.commit()
logger.info(f'Successful authentication for user: {username}')
return jsonify({'authenticated': True})
# Record failed attempt
db.execute('INSERT INTO failed_attempts (ip_address) VALUES (?)',
(ip_address, ))
db.commit()
logger.warning(f'Failed authentication attempt for user: {username}')
return jsonify({'error': 'Invalid credentials'}), 401

View File

@@ -1,179 +0,0 @@
# app/backup/__init__.py
from flask import Blueprint, request, jsonify, send_file
import logging
from ..task.backup.backup import BackupManager
from ..db import get_db
import os
from datetime import datetime
import tempfile
import zipfile
logger = logging.getLogger(__name__)
bp = Blueprint('backup', __name__)
@bp.route('', methods=['GET'])
def list_backups():
"""Get list of all backups"""
try:
manager = BackupManager()
backups = manager.list_backups()
# Add file size and last modified time to each backup
for backup in backups:
file_path = os.path.join(manager.backup_dir, backup['filename'])
if os.path.exists(file_path):
backup['size'] = os.path.getsize(file_path)
backup['created_at'] = datetime.fromtimestamp(
os.path.getmtime(file_path)).isoformat()
else:
backup['size'] = None
backup['created_at'] = None
return jsonify(backups), 200
except Exception as e:
logger.error(f'Error listing backups: {str(e)}')
return jsonify({'error': 'Failed to list backups'}), 500
@bp.route('', methods=['POST'])
def create_backup():
"""Create a new backup manually"""
try:
manager = BackupManager()
success, result = manager.create_backup()
if success:
return jsonify({
'message': 'Backup created successfully',
'filename': result
}), 201
else:
return jsonify({'error':
f'Failed to create backup: {result}'}), 500
except Exception as e:
logger.error(f'Error creating backup: {str(e)}')
return jsonify({'error': 'Failed to create backup'}), 500
@bp.route('/<path:filename>', methods=['GET'])
def download_backup(filename):
"""Download a specific backup file"""
try:
manager = BackupManager()
file_path = os.path.join(manager.backup_dir, filename)
if not os.path.exists(file_path):
return jsonify({'error': 'Backup file not found'}), 404
return send_file(file_path,
mimetype='application/zip',
as_attachment=True,
download_name=filename)
except Exception as e:
logger.error(f'Error downloading backup: {str(e)}')
return jsonify({'error': 'Failed to download backup'}), 500
@bp.route('/<path:filename>/restore', methods=['POST'])
def restore_backup(filename):
"""Restore from a specific backup"""
try:
manager = BackupManager()
success, message = manager.restore_backup(filename)
if success:
return jsonify({'message': 'Backup restored successfully'}), 200
else:
return jsonify({'error':
f'Failed to restore backup: {message}'}), 500
except Exception as e:
logger.error(f'Error restoring backup: {str(e)}')
return jsonify({'error': 'Failed to restore backup'}), 500
@bp.route('/<path:filename>', methods=['DELETE'])
def delete_backup(filename):
"""Delete a specific backup"""
try:
manager = BackupManager()
file_path = os.path.join(manager.backup_dir, filename)
if not os.path.exists(file_path):
return jsonify({'error': 'Backup file not found'}), 404
# Remove the file
os.remove(file_path)
# Remove from database
with get_db() as conn:
conn.execute('DELETE FROM backups WHERE filename = ?',
(filename, ))
conn.commit()
return jsonify({'message': 'Backup deleted successfully'}), 200
except Exception as e:
logger.error(f'Error deleting backup: {str(e)}')
return jsonify({'error': 'Failed to delete backup'}), 500
@bp.route('/import', methods=['POST'])
def import_backup():
"""Import and restore from an uploaded backup file"""
if 'file' not in request.files:
return jsonify({'error': 'No file part in the request'}), 400
file = request.files['file']
if file.filename == '':
return jsonify({'error': 'No file selected for uploading'}), 400
if not file.filename.endswith('.zip'):
return jsonify({'error': 'File must be a zip archive'}), 400
try:
# Create a temporary file to store the upload
with tempfile.NamedTemporaryFile(delete=False) as temp_file:
file.save(temp_file.name)
# Validate the zip file
validation_result = is_valid_backup_zip(temp_file.name)
if not validation_result[0]:
os.unlink(temp_file.name)
return jsonify({'error': validation_result[1]}), 400
# Use the BackupManager to restore from this file
manager = BackupManager()
success, message = manager.restore_backup_from_file(temp_file.name)
# Delete the temporary file
os.unlink(temp_file.name)
if success:
return jsonify(
{'message': 'Backup imported and restored successfully'}), 200
else:
return jsonify(
{'error':
f'Failed to import and restore backup: {message}'}), 500
except Exception as e:
logger.error(f'Error importing and restoring backup: {str(e)}')
return jsonify({'error': 'Failed to import and restore backup'}), 500
def is_valid_backup_zip(file_path):
"""Check if the zip file is a valid backup"""
try:
if os.path.getsize(file_path) > 100 * 1024 * 1024: # 100 MB
return False, "Backup file is too large (max 100 MB)"
with zipfile.ZipFile(file_path, 'r') as zipf:
file_list = zipf.namelist()
if 'profilarr.db' not in file_list:
return False, "Backup file does not contain profilarr.db"
return True, "Valid backup file"
except zipfile.BadZipFile:
return False, "Invalid zip file"

View File

@@ -1,12 +0,0 @@
# app/compile/__init__.py
from .mappings import TargetApp, ValueResolver
from .format_compiler import (CustomFormat, FormatConverter, FormatProcessor,
compile_custom_format)
from .profile_compiler import (ProfileConverter, ProfileProcessor,
compile_quality_profile)
__all__ = [
'TargetApp', 'ValueResolver', 'CustomFormat', 'FormatConverter',
'FormatProcessor', 'compile_custom_format', 'ProfileConverter',
'ProfileProcessor', 'compile_quality_profile'
]

View File

@@ -1,224 +0,0 @@
# app/compile/format_compiler.py
"""Format compilation module for converting custom formats"""
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, List, Optional
import json
import yaml
from .mappings import TargetApp, ValueResolver
@dataclass
class Specification:
"""Data class for format specifications"""
name: str
implementation: str
negate: bool = False
required: bool = False
fields: List[Dict[str, str]] = None
def __post_init__(self):
if self.fields is None:
self.fields = []
@dataclass
class CustomFormat:
"""Data class for custom format definitions"""
name: str
description: str
tags: List[str]
conditions: List[Dict]
tests: List[Dict]
@dataclass
class ConvertedFormat:
"""Data class for converted format output"""
name: str
specifications: List[Specification]
class FormatConverter:
"""Converts between different format types"""
def __init__(self, patterns: Dict[str, str]):
self.patterns = patterns
def _create_specification(
self, condition: Dict,
target_app: TargetApp) -> Optional[Specification]:
condition_type = condition['type']
if condition_type in ['release_title', 'release_group', 'edition']:
pattern_name = condition['pattern']
pattern = self.patterns.get(pattern_name)
if not pattern:
return None
implementation = ('ReleaseTitleSpecification'
if condition_type == 'release_title' else
'ReleaseGroupSpecification' if condition_type
== 'release_group' else 'EditionSpecification')
fields = [{'name': 'value', 'value': pattern}]
elif condition_type == 'source':
implementation = 'SourceSpecification'
value = ValueResolver.get_source(condition['source'], target_app)
fields = [{'name': 'value', 'value': value}]
elif condition_type == 'resolution':
implementation = 'ResolutionSpecification'
value = ValueResolver.get_resolution(condition['resolution'])
fields = [{'name': 'value', 'value': value}]
elif condition_type == 'indexer_flag':
implementation = 'IndexerFlagSpecification'
value = ValueResolver.get_indexer_flag(condition.get('flag', ''),
target_app)
fields = [{'name': 'value', 'value': value}]
elif condition_type == 'quality_modifier':
if target_app == TargetApp.SONARR:
return None
implementation = 'QualityModifierSpecification'
value = ValueResolver.get_quality_modifier(
condition['qualityModifier'])
fields = [{'name': 'value', 'value': value}]
elif condition_type == 'size':
implementation = 'SizeSpecification'
min_size = condition.get('minSize')
max_size = condition.get('maxSize')
fields = [{
'name': 'min',
'value': min_size
}, {
'name': 'max',
'value': max_size
}]
elif condition_type == 'year':
implementation = 'YearSpecification'
min_year = condition.get('minYear')
max_year = condition.get('maxYear')
fields = [{
'name': 'min',
'value': min_year
}, {
'name': 'max',
'value': max_year
}]
elif condition_type == 'release_type':
if target_app == TargetApp.RADARR:
return None
implementation = 'ReleaseTypeSpecification'
value = ValueResolver.get_release_type(condition['releaseType'])
fields = [{'name': 'value', 'value': value}]
elif condition_type == 'language':
implementation = 'LanguageSpecification'
language_name = condition['language'].lower()
try:
language_data = ValueResolver.get_language(language_name,
target_app,
for_profile=False)
fields = [{'name': 'value', 'value': language_data['id']}]
if 'exceptLanguage' in condition:
except_value = condition['exceptLanguage']
fields.append({
'name': 'exceptLanguage',
'value': except_value
})
except Exception:
return None
else:
return None
return Specification(name=condition.get('name', ''),
implementation=implementation,
negate=condition.get('negate', False),
required=condition.get('required', False),
fields=fields)
def convert_format(self, custom_format: CustomFormat,
target_app: TargetApp) -> ConvertedFormat:
specifications = []
for condition in custom_format.conditions:
try:
spec = self._create_specification(condition, target_app)
if spec:
specifications.append(spec)
except Exception:
continue
return ConvertedFormat(name=custom_format.name,
specifications=specifications)
class FormatProcessor:
"""Main class for processing format files"""
def __init__(self, input_dir: Path, output_dir: Path, patterns_dir: Path):
self.input_dir = input_dir
self.output_dir = output_dir
self.patterns = self._load_patterns(patterns_dir)
self.converter = FormatConverter(self.patterns)
@staticmethod
def _load_patterns(patterns_dir: Path) -> Dict[str, str]:
patterns = {}
for file_path in patterns_dir.glob('*.yml'):
with file_path.open('r') as f:
pattern_data = yaml.safe_load(f)
patterns[pattern_data['name']] = pattern_data['pattern']
return patterns
def _load_custom_format(self, format_name: str) -> Optional[CustomFormat]:
format_path = self.input_dir / f"{format_name}.yml"
if not format_path.exists():
return None
with format_path.open('r') as f:
raw_data = yaml.safe_load(f)
return CustomFormat(**raw_data)
def process_format(self,
format_name: str,
target_app: TargetApp,
return_data: bool = False) -> Optional[ConvertedFormat]:
custom_format = self._load_custom_format(format_name)
if not custom_format:
return None
converted_format = self.converter.convert_format(
custom_format, target_app)
output_data = [{
'name':
converted_format.name,
'specifications':
[vars(spec) for spec in converted_format.specifications]
}]
if not return_data:
output_path = self.output_dir / f"{format_name}.json"
with output_path.open('w') as f:
json.dump(output_data, f, indent=2)
return converted_format
def compile_custom_format(format_data: Dict) -> List[Dict]:
custom_format = CustomFormat(**format_data)
patterns = {}
converter = FormatConverter(patterns)
converted = converter.convert_format(custom_format, TargetApp.RADARR)
output_data = [{
'name':
converted.name,
'specifications': [vars(spec) for spec in converted.specifications]
}]
return output_data

View File

@@ -1,990 +0,0 @@
# app/compile/mappings.py
"""Centralized constants and mappings for arr applications"""
from enum import Enum, auto
from typing import Dict, Any
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class TargetApp(Enum):
"""Enum for target application types"""
RADARR = auto()
SONARR = auto()
class IndexerFlags:
"""Indexer flag mappings for both applications"""
RADARR = {
'freeleech': 1,
'halfleech': 2,
'double_upload': 4,
'internal': 32,
'scene': 128,
'freeleech_75': 256,
'freeleech_25': 512,
'nuked': 2048,
'ptp_golden': 8,
'ptp_approved': 16
}
SONARR = {
'freeleech': 1,
'halfleech': 2,
'double_upload': 4,
'internal': 8,
'scene': 16,
'freeleech_75': 32,
'freeleech_25': 64,
'nuked': 128
}
class Sources:
"""Source mappings for both applications"""
RADARR = {
'cam': 1,
'telesync': 2,
'telecine': 3,
'workprint': 4,
'dvd': 5,
'tv': 6,
'web_dl': 7,
'webrip': 8,
'bluray': 9
}
SONARR = {
'television': 1,
'television_raw': 2,
'web_dl': 3,
'webrip': 4,
'dvd': 5,
'bluray': 6,
'bluray_raw': 7
}
class Quality_Modifiers:
"""Quality modifier mappings for Radarr ONLY"""
RADARR = {
'none': 0,
'regional': 1,
'screener': 2,
'rawhd': 3,
'brdisk': 4,
'remux': 5,
}
class Release_Types:
"""Release type mappings for Sonarr ONLY"""
SONARR = {
'none': 0,
'single_episode': 1,
'multi_episode': 2,
'season_pack': 3,
}
class Qualities:
"""Quality mappings for both applications"""
COMMON_RESOLUTIONS = {
'360p': 360,
'480p': 480,
'540p': 540,
'576p': 576,
'720p': 720,
'1080p': 1080,
'2160p': 2160
}
RADARR = {
"Unknown": {
"id": 0,
"name": "Unknown",
"source": "unknown",
"resolution": 0
},
"SDTV": {
"id": 1,
"name": "SDTV",
"source": "tv",
"resolution": 480
},
"DVD": {
"id": 2,
"name": "DVD",
"source": "dvd",
"resolution": 480
},
"WEBDL-1080p": {
"id": 3,
"name": "WEBDL-1080p",
"source": "webdl",
"resolution": 1080
},
"HDTV-720p": {
"id": 4,
"name": "HDTV-720p",
"source": "tv",
"resolution": 720
},
"WEBDL-720p": {
"id": 5,
"name": "WEBDL-720p",
"source": "webdl",
"resolution": 720
},
"Bluray-720p": {
"id": 6,
"name": "Bluray-720p",
"source": "bluray",
"resolution": 720
},
"Bluray-1080p": {
"id": 7,
"name": "Bluray-1080p",
"source": "bluray",
"resolution": 1080
},
"WEBDL-480p": {
"id": 8,
"name": "WEBDL-480p",
"source": "webdl",
"resolution": 480
},
"HDTV-1080p": {
"id": 9,
"name": "HDTV-1080p",
"source": "tv",
"resolution": 1080
},
"Raw-HD": {
"id": 10,
"name": "Raw-HD",
"source": "tv",
"resolution": 1080
},
"WEBRip-480p": {
"id": 12,
"name": "WEBRip-480p",
"source": "webrip",
"resolution": 480
},
"WEBRip-720p": {
"id": 14,
"name": "WEBRip-720p",
"source": "webrip",
"resolution": 720
},
"WEBRip-1080p": {
"id": 15,
"name": "WEBRip-1080p",
"source": "webrip",
"resolution": 1080
},
"HDTV-2160p": {
"id": 16,
"name": "HDTV-2160p",
"source": "tv",
"resolution": 2160
},
"WEBRip-2160p": {
"id": 17,
"name": "WEBRip-2160p",
"source": "webrip",
"resolution": 2160
},
"WEBDL-2160p": {
"id": 18,
"name": "WEBDL-2160p",
"source": "webdl",
"resolution": 2160
},
"Bluray-2160p": {
"id": 19,
"name": "Bluray-2160p",
"source": "bluray",
"resolution": 2160
},
"Bluray-480p": {
"id": 20,
"name": "Bluray-480p",
"source": "bluray",
"resolution": 480
},
"Bluray-576p": {
"id": 21,
"name": "Bluray-576p",
"source": "bluray",
"resolution": 576
},
"BR-DISK": {
"id": 22,
"name": "BR-DISK",
"source": "bluray",
"resolution": 1080
},
"DVD-R": {
"id": 23,
"name": "DVD-R",
"source": "dvd",
"resolution": 480
},
"WORKPRINT": {
"id": 24,
"name": "WORKPRINT",
"source": "workprint",
"resolution": 0
},
"CAM": {
"id": 25,
"name": "CAM",
"source": "cam",
"resolution": 0
},
"TELESYNC": {
"id": 26,
"name": "TELESYNC",
"source": "telesync",
"resolution": 0
},
"TELECINE": {
"id": 27,
"name": "TELECINE",
"source": "telecine",
"resolution": 0
},
"DVDSCR": {
"id": 28,
"name": "DVDSCR",
"source": "dvd",
"resolution": 480
},
"REGIONAL": {
"id": 29,
"name": "REGIONAL",
"source": "dvd",
"resolution": 480
},
"Remux-1080p": {
"id": 30,
"name": "Remux-1080p",
"source": "bluray",
"resolution": 1080
},
"Remux-2160p": {
"id": 31,
"name": "Remux-2160p",
"source": "bluray",
"resolution": 2160
}
}
SONARR = {
"Unknown": {
"id": 0,
"name": "Unknown",
"source": "unknown",
"resolution": 0
},
"SDTV": {
"id": 1,
"name": "SDTV",
"source": "television",
"resolution": 480
},
"DVD": {
"id": 2,
"name": "DVD",
"source": "dvd",
"resolution": 480
},
"WEBDL-1080p": {
"id": 3,
"name": "WEBDL-1080p",
"source": "web",
"resolution": 1080
},
"HDTV-720p": {
"id": 4,
"name": "HDTV-720p",
"source": "television",
"resolution": 720
},
"WEBDL-720p": {
"id": 5,
"name": "WEBDL-720p",
"source": "web",
"resolution": 720
},
"Bluray-720p": {
"id": 6,
"name": "Bluray-720p",
"source": "bluray",
"resolution": 720
},
"Bluray-1080p": {
"id": 7,
"name": "Bluray-1080p",
"source": "bluray",
"resolution": 1080
},
"WEBDL-480p": {
"id": 8,
"name": "WEBDL-480p",
"source": "web",
"resolution": 480
},
"HDTV-1080p": {
"id": 9,
"name": "HDTV-1080p",
"source": "television",
"resolution": 1080
},
"Raw-HD": {
"id": 10,
"name": "Raw-HD",
"source": "televisionRaw",
"resolution": 1080
},
"WEBRip-480p": {
"id": 12,
"name": "WEBRip-480p",
"source": "webRip",
"resolution": 480
},
"Bluray-480p": {
"id": 13,
"name": "Bluray-480p",
"source": "bluray",
"resolution": 480
},
"WEBRip-720p": {
"id": 14,
"name": "WEBRip-720p",
"source": "webRip",
"resolution": 720
},
"WEBRip-1080p": {
"id": 15,
"name": "WEBRip-1080p",
"source": "webRip",
"resolution": 1080
},
"HDTV-2160p": {
"id": 16,
"name": "HDTV-2160p",
"source": "television",
"resolution": 2160
},
"WEBRip-2160p": {
"id": 17,
"name": "WEBRip-2160p",
"source": "webRip",
"resolution": 2160
},
"WEBDL-2160p": {
"id": 18,
"name": "WEBDL-2160p",
"source": "web",
"resolution": 2160
},
"Bluray-2160p": {
"id": 19,
"name": "Bluray-2160p",
"source": "bluray",
"resolution": 2160
},
"Bluray-1080p Remux": {
"id": 20,
"name": "Bluray-1080p Remux",
"source": "blurayRaw",
"resolution": 1080
},
"Bluray-2160p Remux": {
"id": 21,
"name": "Bluray-2160p Remux",
"source": "blurayRaw",
"resolution": 2160
},
"Bluray-576p": {
"id": 22,
"name": "Bluray-576p",
"source": "bluray",
"resolution": 576
}
}
class Languages:
"""Language mappings for both applications"""
RADARR = {
'any': {
'id': -1,
'name': 'Any'
},
'original': {
'id': -2,
'name': 'Original'
},
'unknown': {
'id': 0,
'name': 'Unknown'
},
'english': {
'id': 1,
'name': 'English'
},
'french': {
'id': 2,
'name': 'French'
},
'spanish': {
'id': 3,
'name': 'Spanish'
},
'german': {
'id': 4,
'name': 'German'
},
'italian': {
'id': 5,
'name': 'Italian'
},
'danish': {
'id': 6,
'name': 'Danish'
},
'dutch': {
'id': 7,
'name': 'Dutch'
},
'japanese': {
'id': 8,
'name': 'Japanese'
},
'icelandic': {
'id': 9,
'name': 'Icelandic'
},
'chinese': {
'id': 10,
'name': 'Chinese'
},
'russian': {
'id': 11,
'name': 'Russian'
},
'polish': {
'id': 12,
'name': 'Polish'
},
'vietnamese': {
'id': 13,
'name': 'Vietnamese'
},
'swedish': {
'id': 14,
'name': 'Swedish'
},
'norwegian': {
'id': 15,
'name': 'Norwegian'
},
'finnish': {
'id': 16,
'name': 'Finnish'
},
'turkish': {
'id': 17,
'name': 'Turkish'
},
'portuguese': {
'id': 18,
'name': 'Portuguese'
},
'flemish': {
'id': 19,
'name': 'Flemish'
},
'greek': {
'id': 20,
'name': 'Greek'
},
'korean': {
'id': 21,
'name': 'Korean'
},
'hungarian': {
'id': 22,
'name': 'Hungarian'
},
'hebrew': {
'id': 23,
'name': 'Hebrew'
},
'lithuanian': {
'id': 24,
'name': 'Lithuanian'
},
'czech': {
'id': 25,
'name': 'Czech'
},
'hindi': {
'id': 26,
'name': 'Hindi'
},
'romanian': {
'id': 27,
'name': 'Romanian'
},
'thai': {
'id': 28,
'name': 'Thai'
},
'bulgarian': {
'id': 29,
'name': 'Bulgarian'
},
'portuguese_br': {
'id': 30,
'name': 'Portuguese (Brazil)'
},
'arabic': {
'id': 31,
'name': 'Arabic'
},
'ukrainian': {
'id': 32,
'name': 'Ukrainian'
},
'persian': {
'id': 33,
'name': 'Persian'
},
'bengali': {
'id': 34,
'name': 'Bengali'
},
'slovak': {
'id': 35,
'name': 'Slovak'
},
'latvian': {
'id': 36,
'name': 'Latvian'
},
'spanish_latino': {
'id': 37,
'name': 'Spanish (Latino)'
},
'catalan': {
'id': 38,
'name': 'Catalan'
},
'croatian': {
'id': 39,
'name': 'Croatian'
},
'serbian': {
'id': 40,
'name': 'Serbian'
},
'bosnian': {
'id': 41,
'name': 'Bosnian'
},
'estonian': {
'id': 42,
'name': 'Estonian'
},
'tamil': {
'id': 43,
'name': 'Tamil'
},
'indonesian': {
'id': 44,
'name': 'Indonesian'
},
'telugu': {
'id': 45,
'name': 'Telugu'
},
'macedonian': {
'id': 46,
'name': 'Macedonian'
},
'slovenian': {
'id': 47,
'name': 'Slovenian'
},
'malayalam': {
'id': 48,
'name': 'Malayalam'
},
'kannada': {
'id': 49,
'name': 'Kannada'
},
'albanian': {
'id': 50,
'name': 'Albanian'
},
'afrikaans': {
'id': 51,
'name': 'Afrikaans'
}
}
SONARR = {
'unknown': {
'id': 0,
'name': 'Unknown'
},
'english': {
'id': 1,
'name': 'English'
},
'french': {
'id': 2,
'name': 'French'
},
'spanish': {
'id': 3,
'name': 'Spanish'
},
'german': {
'id': 4,
'name': 'German'
},
'italian': {
'id': 5,
'name': 'Italian'
},
'danish': {
'id': 6,
'name': 'Danish'
},
'dutch': {
'id': 7,
'name': 'Dutch'
},
'japanese': {
'id': 8,
'name': 'Japanese'
},
'icelandic': {
'id': 9,
'name': 'Icelandic'
},
'chinese': {
'id': 10,
'name': 'Chinese'
},
'russian': {
'id': 11,
'name': 'Russian'
},
'polish': {
'id': 12,
'name': 'Polish'
},
'vietnamese': {
'id': 13,
'name': 'Vietnamese'
},
'swedish': {
'id': 14,
'name': 'Swedish'
},
'norwegian': {
'id': 15,
'name': 'Norwegian'
},
'finnish': {
'id': 16,
'name': 'Finnish'
},
'turkish': {
'id': 17,
'name': 'Turkish'
},
'portuguese': {
'id': 18,
'name': 'Portuguese'
},
'flemish': {
'id': 19,
'name': 'Flemish'
},
'greek': {
'id': 20,
'name': 'Greek'
},
'korean': {
'id': 21,
'name': 'Korean'
},
'hungarian': {
'id': 22,
'name': 'Hungarian'
},
'hebrew': {
'id': 23,
'name': 'Hebrew'
},
'lithuanian': {
'id': 24,
'name': 'Lithuanian'
},
'czech': {
'id': 25,
'name': 'Czech'
},
'arabic': {
'id': 26,
'name': 'Arabic'
},
'hindi': {
'id': 27,
'name': 'Hindi'
},
'bulgarian': {
'id': 28,
'name': 'Bulgarian'
},
'malayalam': {
'id': 29,
'name': 'Malayalam'
},
'ukrainian': {
'id': 30,
'name': 'Ukrainian'
},
'slovak': {
'id': 31,
'name': 'Slovak'
},
'thai': {
'id': 32,
'name': 'Thai'
},
'portuguese_br': {
'id': 33,
'name': 'Portuguese (Brazil)'
},
'spanish_latino': {
'id': 34,
'name': 'Spanish (Latino)'
},
'romanian': {
'id': 35,
'name': 'Romanian'
},
'latvian': {
'id': 36,
'name': 'Latvian'
},
'persian': {
'id': 37,
'name': 'Persian'
},
'catalan': {
'id': 38,
'name': 'Catalan'
},
'croatian': {
'id': 39,
'name': 'Croatian'
},
'serbian': {
'id': 40,
'name': 'Serbian'
},
'bosnian': {
'id': 41,
'name': 'Bosnian'
},
'estonian': {
'id': 42,
'name': 'Estonian'
},
'tamil': {
'id': 43,
'name': 'Tamil'
},
'indonesian': {
'id': 44,
'name': 'Indonesian'
},
'macedonian': {
'id': 45,
'name': 'Macedonian'
},
'slovenian': {
'id': 46,
'name': 'Slovenian'
},
'original': {
'id': -2,
'name': 'Original'
}
}
class QualityNameMapper:
"""Maps between different quality naming conventions"""
REMUX_MAPPINGS = {
TargetApp.SONARR: {
"Remux-1080p": "Bluray-1080p Remux",
"Remux-2160p": "Bluray-2160p Remux"
},
TargetApp.RADARR: {
"Remux-1080p": "Remux-1080p",
"Remux-2160p": "Remux-2160p"
}
}
ALTERNATE_NAMES = {
"BR-Disk": "BR-DISK",
"BR-DISK": "BR-DISK",
"BRDISK": "BR-DISK",
"BR_DISK": "BR-DISK",
"BLURAY-DISK": "BR-DISK",
"BLURAY_DISK": "BR-DISK",
"BLURAYDISK": "BR-DISK",
"Telecine": "TELECINE",
"TELECINE": "TELECINE",
"TeleCine": "TELECINE",
"Telesync": "TELESYNC",
"TELESYNC": "TELESYNC",
"TeleSync": "TELESYNC",
}
@classmethod
def map_quality_name(cls, name: str, target_app: TargetApp) -> str:
"""
Maps quality names between different formats based on target app
Args:
name: The quality name to map
target_app: The target application (RADARR or SONARR)
Returns:
The mapped quality name
"""
# Handle empty or None cases
if not name:
return name
# First check for remux mappings
if name in cls.REMUX_MAPPINGS.get(target_app, {}):
return cls.REMUX_MAPPINGS[target_app][name]
# Then check for alternate spellings
normalized_name = name.upper().replace("-", "").replace("_", "")
for alt_name, standard_name in cls.ALTERNATE_NAMES.items():
if normalized_name == alt_name.upper().replace("-", "").replace(
"_", ""):
return standard_name
return name
class LanguageNameMapper:
"""Maps between different language naming conventions"""
ALTERNATE_NAMES = {
"spanish-latino": "spanish_latino",
"spanish_latino": "spanish_latino",
"spanishlatino": "spanish_latino",
"portuguese-br": "portuguese_br",
"portuguese_br": "portuguese_br",
"portuguesebr": "portuguese_br",
"portuguese-brazil": "portuguese_br",
"portuguese_brazil": "portuguese_br"
}
@classmethod
def normalize_language_name(cls, name: str) -> str:
"""
Normalizes language names to a consistent format
Args:
name: The language name to normalize
Returns:
The normalized language name
"""
if not name:
return name
normalized = name.lower().replace(" ", "_")
return cls.ALTERNATE_NAMES.get(normalized, normalized)
class ValueResolver:
"""Helper class to resolve values based on target app"""
@classmethod
def get_indexer_flag(cls, flag: str, target_app: TargetApp) -> int:
flags = IndexerFlags.RADARR if target_app == TargetApp.RADARR else IndexerFlags.SONARR
return flags.get(flag.lower(), 0)
@classmethod
def get_source(cls, source: str, target_app: TargetApp) -> int:
sources = Sources.RADARR if target_app == TargetApp.RADARR else Sources.SONARR
return sources.get(source.lower(), 0)
@classmethod
def get_resolution(cls, resolution: str) -> int:
return Qualities.COMMON_RESOLUTIONS.get(resolution.lower(), 0)
@classmethod
def get_qualities(cls, target_app: TargetApp) -> Dict[str, Any]:
qualities = Qualities.RADARR if target_app == TargetApp.RADARR else Qualities.SONARR
return qualities
@classmethod
def get_quality_name(cls, name: str, target_app: TargetApp) -> str:
"""Maps quality names between different formats based on target app"""
return QualityNameMapper.map_quality_name(name, target_app)
@classmethod
def get_quality_modifier(cls, quality_modifier: str) -> int:
return Quality_Modifiers.RADARR.get(quality_modifier.lower(), 0)
@classmethod
def get_release_type(cls, release_type: str) -> int:
return Release_Types.SONARR.get(release_type.lower(), 0)
@classmethod
def get_language(cls,
language_name: str,
target_app: TargetApp,
for_profile: bool = True) -> Dict[str, Any]:
"""
Get language mapping based on target app and context
Args:
language_name: Name of the language to look up
target_app: Target application (RADARR or SONARR)
for_profile: If True, this is for a quality profile. If False, this is for a custom format.
"""
languages = Languages.RADARR if target_app == TargetApp.RADARR else Languages.SONARR
# For profiles, only Radarr uses language settings
if for_profile and target_app == TargetApp.SONARR:
return {'id': -2, 'name': 'Original'}
# Normalize the language name
normalized_name = LanguageNameMapper.normalize_language_name(
language_name)
language_data = languages.get(normalized_name)
if not language_data:
logger.warning(
f"Language '{language_name}' (normalized: '{normalized_name}') "
f"not found in {target_app} mappings, falling back to Unknown")
language_data = languages['unknown']
return language_data

View File

@@ -1,536 +0,0 @@
"""Profile compilation module for converting quality profiles"""
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, List, Optional, Any, Callable
import json
import yaml
import logging
import asyncio
import aiohttp
from .mappings import TargetApp, ValueResolver
from ..data.utils import load_yaml_file, get_category_directory
from ..importarr.format_memory import import_format_from_memory, async_import_format_from_memory
from ..db.queries.settings import get_language_import_score
logger = logging.getLogger(__name__)
@dataclass
class ConvertedProfile:
"""Data class for converted profile output"""
name: str
items: List[Dict]
format_items: List[Dict]
upgrade_allowed: bool
min_format_score: int
cutoff_format_score: int
min_upgrade_format_score: int
language: Dict
cutoff: Optional[int] = None
class ProfileConverter:
"""Converts quality profiles between different formats"""
def __init__(self,
target_app: TargetApp,
base_url: str = None,
api_key: str = None,
format_importer: Callable = None,
import_as_unique: bool = False):
self.target_app = target_app
self.base_url = base_url
self.api_key = api_key
self.format_importer = format_importer
self.import_as_unique = import_as_unique
self.quality_mappings = ValueResolver.get_qualities(target_app)
def _convert_group_id(self, group_id: int) -> int:
if group_id < 0:
return 1000 + abs(group_id)
return group_id
def _create_all_qualities(self,
allowed_qualities: List[str]) -> List[Dict]:
qualities = []
for quality_name in allowed_qualities:
if quality_name in self.quality_mappings:
qualities.append({
"quality":
self.quality_mappings[quality_name].copy(),
"items": [],
"allowed":
True
})
return qualities
def _generate_language_formats(self,
behaviour: str,
language: str) -> List[Dict]:
"""
Generate language-specific format configurations without importing them.
This is useful for pre-loading and caching language formats.
Args:
behaviour: Language behavior ('must', 'prefer', 'only')
language: Language code ('english', 'french', etc.)
Returns:
List of format configurations for the specified language
"""
try:
formats_to_import = []
# Get the base format as a template
base_format_path = f"{get_category_directory('custom_format')}/Not English.yml"
base_format = load_yaml_file(base_format_path)
# Get language data for translations
language_data = ValueResolver.get_language(
language, self.target_app, for_profile=False
)
# Create the main "Not X" format (e.g., "Not French")
modified_format = base_format.copy()
base_name = f"Not {language_data['name']}"
modified_format['name'] = base_name
# Update conditions to refer to the specific language
for condition in modified_format['conditions']:
if condition.get('type') == 'language':
condition['language'] = language
if condition.get('name') == 'Not English':
condition['name'] = f"Not {language_data['name']}"
elif condition.get('name') == 'Includes English':
condition['name'] = f"Includes {language_data['name']}"
formats_to_import.append(modified_format)
# Add additional formats for 'only' behavior
if behaviour == 'only':
additional_formats = [
"Not Only English", "Not Only English (Missing)"
]
for format_name in additional_formats:
format_path = f"{get_category_directory('custom_format')}/{format_name}.yml"
format_data = load_yaml_file(format_path)
format_data['name'] = format_data['name'].replace(
'English', language_data['name'])
for c in format_data.get('conditions', []):
if c.get('type') == 'language':
c['language'] = language
if c.get('name') == 'Not English':
c['name'] = f"Not {language_data['name']}"
elif c.get('name') == 'Includes English':
c['name'] = f"Includes {language_data['name']}"
formats_to_import.append(format_data)
return formats_to_import
except Exception as e:
logger.error(f"Error generating language formats: {str(e)}")
raise
def _process_language_formats(
self,
behaviour: str,
language: str,
import_as_unique: bool = False) -> List[Dict]:
"""
Process language formats by either importing them directly or using the format_importer.
When using the cached profile import, the format_importer will be a dummy function that
just returns success without actually importing, since the formats were already imported.
"""
try:
# Generate the format configurations
formats_to_import = self._generate_language_formats(behaviour, language)
format_configs = []
# Check if we're using a format importer (might be None for direct format returns)
if self.format_importer is None:
# No importer provided - we're in the special caching mode
# Just create the format configs directly without importing
logger.info(f"Using pre-cached language formats for {behaviour}_{language}")
for format_data in formats_to_import:
format_name = format_data['name']
if import_as_unique:
format_name = f"{format_name} [Dictionarry]"
format_configs.append({
'name': format_name,
'score': get_language_import_score()
})
return format_configs
# Regular mode with an importer - check if it's our dummy cached importer
if self.format_importer and hasattr(self.format_importer, '__name__') and self.format_importer.__name__ == 'cached_format_importer':
logger.info(f"Using cached importer for language formats {behaviour}_{language}")
# Simply call the dummy importer just to keep the flow consistent,
# but we'll generate our own format configs
self.format_importer()
# Create format configs directly
for format_data in formats_to_import:
format_name = format_data['name']
if import_as_unique:
format_name = f"{format_name} [Dictionarry]"
format_configs.append({
'name': format_name,
'score': get_language_import_score()
})
return format_configs
# If we've reached here, we're doing a regular import
if not self.base_url or not self.api_key or not self.format_importer:
logger.error("Missing required credentials or format importer")
raise ValueError(
"base_url, api_key, and format_importer are required for language format processing"
)
arr_type = 'radarr' if self.target_app == TargetApp.RADARR else 'sonarr'
# Use asyncio if there are multiple formats to import
if len(formats_to_import) > 1:
# Run in event loop
return asyncio.run(self._async_process_language_formats(
formats_to_import=formats_to_import,
arr_type=arr_type,
import_as_unique=import_as_unique
))
# For single format, use regular synchronous version
for format_data in formats_to_import:
try:
result = import_format_from_memory(
format_data,
self.base_url,
self.api_key,
arr_type,
import_as_unique=self.import_as_unique)
if not result.get('success', False):
logger.error(
f"Format import failed for: {format_data['name']}")
raise Exception(
f"Failed to import format {format_data['name']}")
format_name = format_data['name']
if import_as_unique:
format_name = f"{format_name} [Dictionarry]"
format_configs.append({
'name': format_name,
'score': get_language_import_score()
})
except Exception as e:
logger.error(
f"Error importing format {format_data['name']}: {str(e)}"
)
raise
return format_configs
except Exception as e:
logger.error(f"Error processing language formats: {str(e)}")
raise
async def _async_process_language_formats(
self,
formats_to_import: List[Dict],
arr_type: str,
import_as_unique: bool = False) -> List[Dict]:
"""
Asynchronous version of _process_language_formats for concurrent imports
"""
logger.info(f"Processing language formats asynchronously: {len(formats_to_import)} formats")
format_configs = []
tasks = []
# Create tasks for all formats
for format_data in formats_to_import:
task = asyncio.create_task(
async_import_format_from_memory(
format_data=format_data,
base_url=self.base_url,
api_key=self.api_key,
arr_type=arr_type,
import_as_unique=self.import_as_unique
)
)
tasks.append((format_data['name'], task))
# Process all format import results
for format_name, task in tasks:
try:
result = await task
if not result.get('success', False):
logger.error(f"Format import failed for: {format_name} (async)")
raise Exception(f"Failed to import format {format_name}")
display_name = format_name
if import_as_unique:
display_name = f"{format_name} [Dictionarry]"
format_configs.append({
'name': display_name,
'score': get_language_import_score()
})
except Exception as e:
logger.error(f"Error importing format {format_name}: {str(e)} (async)")
raise
return format_configs
def convert_quality_group(self, group: Dict) -> Dict:
original_id = group.get("id", 0)
converted_id = self._convert_group_id(original_id)
allowed_qualities = []
for q_item in group.get("qualities", []):
input_name = q_item.get("name", "")
# First map the quality name to handle remux qualities properly
mapped_name = ValueResolver.get_quality_name(
input_name, self.target_app)
# Create a case-insensitive lookup map
quality_map = {k.lower(): k for k in self.quality_mappings}
# Try to find the mapped name in quality mappings
if mapped_name.lower() in quality_map:
allowed_qualities.append(quality_map[mapped_name.lower()])
# Fallback to the original name
elif input_name.lower() in quality_map:
allowed_qualities.append(quality_map[input_name.lower()])
converted_group = {
"name": group["name"],
"items": self._create_all_qualities(allowed_qualities),
"allowed": True,
"id": converted_id
}
return converted_group
def convert_profile(self, profile: Dict) -> ConvertedProfile:
language = profile.get('language', 'any')
# Handle language processing for advanced mode (with behavior_language format)
if language != 'any' and '_' in language:
language_parts = language.split('_', 1)
behaviour, language_code = language_parts
# Check if we're using a special importer with cached formats
if self.format_importer and hasattr(self.format_importer, '__name__') and self.format_importer.__name__ == 'cached_format_importer':
# If we're using the cached importer, skip processing
# The formats were already added directly to the profile
pass # Using pre-added language formats
else:
# Normal processing path
try:
language_formats = self._process_language_formats(
behaviour, language_code)
if 'custom_formats' not in profile:
profile['custom_formats'] = []
profile['custom_formats'].extend(language_formats)
except Exception as e:
logger.error(f"Failed to process language formats: {e}")
# Simple mode: just use the language directly without custom formats
# This lets the Arr application's built-in language filter handle it
# Get the appropriate language data for the profile
if language != 'any' and '_' not in language:
# Simple mode - use the language directly
selected_language = ValueResolver.get_language(language,
self.target_app,
for_profile=True)
# Using simple language mode
else:
# Advanced mode or 'any' - set language to 'any' as filtering is done via formats
selected_language = ValueResolver.get_language('any',
self.target_app,
for_profile=True)
# Using advanced mode, setting language to 'any'
converted_profile = ConvertedProfile(
name=profile["name"],
upgrade_allowed=profile.get("upgradesAllowed", True),
items=[],
format_items=[],
min_format_score=profile.get("minCustomFormatScore", 0),
cutoff_format_score=profile.get("upgradeUntilScore", 0),
min_upgrade_format_score=max(1,
profile.get("minScoreIncrement", 1)),
language=selected_language)
used_qualities = set()
quality_ids_in_groups = set()
# First pass: Gather all quality IDs in groups to avoid duplicates
for quality_entry in profile.get("qualities", []):
if quality_entry.get("id", 0) < 0: # It's a group
# Process this group to collect quality IDs
converted_group = self.convert_quality_group(quality_entry)
for item in converted_group["items"]:
if "quality" in item and "id" in item["quality"]:
quality_ids_in_groups.add(item["quality"]["id"])
# Second pass: Add groups and individual qualities to the profile
for quality_entry in profile.get("qualities", []):
if quality_entry.get("id", 0) < 0: # It's a group
converted_group = self.convert_quality_group(quality_entry)
if converted_group["items"]:
converted_profile.items.append(converted_group)
for q in quality_entry.get("qualities", []):
used_qualities.add(q.get("name", "").upper())
else: # It's a single quality
quality_name = quality_entry.get("name")
mapped_name = ValueResolver.get_quality_name(
quality_name, self.target_app)
if mapped_name in self.quality_mappings:
converted_profile.items.append({
"quality": self.quality_mappings[mapped_name],
"items": [],
"allowed": True
})
used_qualities.add(mapped_name.upper())
# Add all unused qualities as disabled, but skip those already in groups
for quality_name, quality_data in self.quality_mappings.items():
if (quality_name.upper() not in used_qualities and
quality_data["id"] not in quality_ids_in_groups):
converted_profile.items.append({
"quality": quality_data,
"items": [],
"allowed": False
})
if "upgrade_until" in profile and "id" in profile["upgrade_until"]:
cutoff_id = profile["upgrade_until"]["id"]
cutoff_name = profile["upgrade_until"]["name"]
mapped_cutoff_name = ValueResolver.get_quality_name(
cutoff_name, self.target_app)
if cutoff_id < 0:
converted_profile.cutoff = self._convert_group_id(cutoff_id)
else:
converted_profile.cutoff = self.quality_mappings[
mapped_cutoff_name]["id"]
for cf in profile.get("custom_formats", []):
format_item = {"name": cf["name"], "score": cf["score"]}
converted_profile.format_items.append(format_item)
# Process app-specific custom formats based on target app
app_specific_field = None
if self.target_app == TargetApp.RADARR:
app_specific_field = "custom_formats_radarr"
elif self.target_app == TargetApp.SONARR:
app_specific_field = "custom_formats_sonarr"
if app_specific_field and app_specific_field in profile:
for cf in profile[app_specific_field]:
format_name = cf["name"]
# Apply [Dictionarry] suffix if import_as_unique is enabled
if self.import_as_unique:
format_name = f"{format_name} [Dictionarry]"
format_item = {"name": format_name, "score": cf["score"]}
converted_profile.format_items.append(format_item)
converted_profile.items.reverse()
return converted_profile
class ProfileProcessor:
"""Main class for processing profile files"""
def __init__(self,
input_dir: Path,
output_dir: Path,
target_app: TargetApp,
base_url: str = None,
api_key: str = None,
format_importer: Callable = None):
self.input_dir = input_dir
self.output_dir = output_dir
self.converter = ProfileConverter(target_app, base_url, api_key,
format_importer)
def _load_profile(self, profile_name: str) -> Optional[Dict]:
profile_path = self.input_dir / f"{profile_name}.yml"
if not profile_path.exists():
return None
with profile_path.open('r') as f:
return yaml.safe_load(f)
def process_profile(
self,
profile_name: str,
return_data: bool = False) -> Optional[ConvertedProfile]:
profile_data = self._load_profile(profile_name)
if not profile_data:
return None
converted = self.converter.convert_profile(profile_data)
if return_data:
return converted
output_data = [{
'name': converted.name,
'upgradeAllowed': converted.upgrade_allowed,
'items': converted.items,
'formatItems': converted.format_items,
'minFormatScore': converted.min_format_score,
'cutoffFormatScore': converted.cutoff_format_score,
'minUpgradeFormatScore': converted.min_upgrade_format_score,
'language': converted.language
}]
if converted.cutoff is not None:
output_data[0]['cutoff'] = converted.cutoff
output_path = self.output_dir / f"{profile_name}.json"
with output_path.open('w') as f:
json.dump(output_data, f, indent=2)
return converted
def compile_quality_profile(profile_data: Dict,
target_app: TargetApp,
base_url: str = None,
api_key: str = None,
format_importer: Callable = None,
import_as_unique: bool = False) -> List[Dict]:
converter = ProfileConverter(target_app,
base_url,
api_key,
format_importer,
import_as_unique=import_as_unique)
converted = converter.convert_profile(profile_data)
output = {
'name': converted.name,
'upgradeAllowed': converted.upgrade_allowed,
'items': converted.items,
'formatItems': converted.format_items,
'minFormatScore': converted.min_format_score,
'cutoffFormatScore': converted.cutoff_format_score,
'minUpgradeFormatScore': converted.min_upgrade_format_score,
'language': converted.language
}
if converted.cutoff is not None:
output['cutoff'] = converted.cutoff
return [output]

View File

@@ -1,3 +0,0 @@
from .config import config
__all__ = ['config']

View File

@@ -1,56 +0,0 @@
import os
import logging
class Config:
# Base Paths
CONFIG_DIR = '/config'
DB_PATH = os.path.join(CONFIG_DIR, 'profilarr.db')
DB_DIR = os.path.join(CONFIG_DIR, 'db')
REGEX_DIR = os.path.join(DB_DIR, 'regex_patterns')
FORMAT_DIR = os.path.join(DB_DIR, 'custom_formats')
PROFILE_DIR = os.path.join(DB_DIR, 'profiles')
MEDIA_MANAGEMENT_DIR = os.path.join(DB_DIR, 'media_management')
# Logging
LOG_DIR = os.path.join(CONFIG_DIR, 'log')
GENERAL_LOG_FILE = os.path.join(LOG_DIR, 'profilarr.log')
IMPORTARR_LOG_FILE = os.path.join(LOG_DIR, 'importarr.log')
HASH_LOG_FILE = os.path.join(LOG_DIR, 'hash.log')
# Flask Configuration
FLASK_ENV = os.getenv('FLASK_ENV', 'production')
DEBUG = FLASK_ENV == 'development'
# CORS Configuration
CORS_ORIGINS = "*"
# Session Configuration
SESSION_LIFETIME_DAYS = 30
SESSION_COOKIE_SECURE = False
SESSION_COOKIE_HTTPONLY = True
SESSION_COOKIE_SAMESITE = 'Lax'
# Git Configuration
GIT_USER_NAME = os.getenv('GIT_USER_NAME')
GIT_USER_EMAIL = os.getenv('GIT_USER_EMAIL')
@staticmethod
def ensure_directories():
"""Create all required directories if they don't exist."""
directories = [
Config.CONFIG_DIR, Config.DB_DIR, Config.REGEX_DIR,
Config.FORMAT_DIR, Config.PROFILE_DIR, Config.MEDIA_MANAGEMENT_DIR, Config.LOG_DIR
]
logger = logging.getLogger(__name__)
for directory in directories:
try:
os.makedirs(directory, exist_ok=True)
logger.info(f"Ensured directory exists: {directory}")
except Exception as e:
logger.error(
f"Failed to create directory {directory}: {str(e)}")
config = Config()

View File

@@ -1,288 +0,0 @@
from flask import Blueprint, request, jsonify
import logging
import os
import yaml
from .utils import (get_category_directory, load_yaml_file, validate,
save_yaml_file, update_yaml_file, get_file_modified_date,
test_regex_pattern, test_format_conditions,
check_delete_constraints, filename_to_display)
from ..db import add_format_to_renames, remove_format_from_renames, is_format_in_renames
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
bp = Blueprint('data', __name__)
@bp.route('/<string:category>', methods=['GET'])
def retrieve_all(category):
try:
directory = get_category_directory(category)
files = [f for f in os.listdir(directory) if f.endswith('.yml')]
logger.debug(f"Found {len(files)} files in {category}")
if not files:
return jsonify([]), 200
result = []
errors = 0
for file_name in files:
file_path = os.path.join(directory, file_name)
try:
content = load_yaml_file(file_path)
# Add metadata for custom formats
if category == 'custom_format':
content['metadata'] = {
'includeInRename':
is_format_in_renames(content['name'])
}
result.append({
"file_name":
file_name,
"content":
content,
"modified_date":
get_file_modified_date(file_path)
})
except yaml.YAMLError:
errors += 1
result.append({
"file_name": file_name,
"error": "Failed to parse YAML"
})
logger.info(
f"Processed {len(files)} {category} files ({errors} errors)")
return jsonify(result), 200
except ValueError as ve:
logger.error(ve)
return jsonify({"error": str(ve)}), 400
except FileNotFoundError as fnfe:
logger.error(fnfe)
return jsonify({"error": str(fnfe)}), 404
except Exception as e:
logger.exception("Unexpected error occurred")
return jsonify({"error": "An unexpected error occurred"}), 500
@bp.route('/<string:category>/<string:name>',
methods=['GET', 'POST', 'PUT', 'DELETE'])
def handle_item(category, name):
try:
directory = get_category_directory(category)
file_name = f"{name}.yml" if not name.endswith('.yml') else name
file_path = os.path.join(directory, file_name)
if request.method == 'GET':
try:
content = load_yaml_file(file_path)
# Add metadata for custom formats
if category == 'custom_format':
content['metadata'] = {
'includeInRename':
is_format_in_renames(content['name'])
}
return jsonify({
"file_name":
file_name,
"content":
content,
"modified_date":
get_file_modified_date(file_path)
}), 200
except FileNotFoundError:
return jsonify({"error": f"File {file_name} not found"}), 404
except yaml.YAMLError:
return jsonify(
{"error": f"Failed to parse YAML file {file_name}"}), 500
elif request.method == 'DELETE':
if not os.path.exists(file_path):
return jsonify({"error": f"File {file_name} not found"}), 404
# Check for references before deleting
can_delete, error_message = check_delete_constraints(
category, filename_to_display(name))
if not can_delete:
logger.error(
f"Delete constraint check failed for {name}: {error_message}"
)
return jsonify({"error": error_message}), 409
try:
# If it's a custom format, remove from renames table first
if category == 'custom_format':
# Get the format name from the file before deleting it
content = load_yaml_file(file_path)
format_name = content.get('name')
if format_name:
# Check if it exists in renames before trying to remove
if is_format_in_renames(format_name):
remove_format_from_renames(format_name)
logger.info(
f"Removed {format_name} from renames table")
else:
logger.info(
f"{format_name} was not in renames table")
# Then delete the file
os.remove(file_path)
return jsonify(
{"message": f"Successfully deleted {file_name}"}), 200
except OSError as e:
logger.error(f"Error deleting file {file_path}: {e}")
return jsonify({"error": f"Failed to delete {file_name}"}), 500
elif request.method == 'POST':
# If a file already exists with that name, conflict
if os.path.exists(file_path):
return jsonify({"error":
f"File {file_name} already exists"}), 409
try:
data = request.get_json()
if data and 'name' in data:
data['name'] = data['name'].strip()
# Handle rename inclusion for custom formats
if category == 'custom_format':
include_in_rename = data.get('metadata', {}).get(
'includeInRename', False)
# Remove metadata before saving YAML
if 'metadata' in data:
del data['metadata']
if validate(data, category):
# Save YAML
save_yaml_file(file_path, data, category)
# If custom format, handle rename table
if category == 'custom_format' and include_in_rename:
add_format_to_renames(data['name'])
return jsonify(
{"message": f"Successfully created {file_name}"}), 201
return jsonify({"error": "Validation failed"}), 400
except Exception as e:
logger.error(f"Error creating file: {e}")
return jsonify({"error": str(e)}), 500
elif request.method == 'PUT':
if not os.path.exists(file_path):
return jsonify({"error": f"File {file_name} not found"}), 404
try:
data = request.get_json()
logger.info(f"Received PUT data for {name}: {data}")
if data and 'name' in data:
data['name'] = data['name'].strip()
if data and 'rename' in data:
data['rename'] = data['rename'].strip()
# Handle rename inclusion for custom formats
if category == 'custom_format':
include_in_rename = data.get('metadata', {}).get(
'includeInRename', False)
# Get current content to check for rename
current_content = load_yaml_file(file_path)
old_name = current_content.get('name')
new_name = data['name']
# Handle renames and toggles
if old_name != new_name and include_in_rename:
# Handle rename while keeping in table
remove_format_from_renames(old_name)
add_format_to_renames(new_name)
elif include_in_rename:
# Just turning it on
add_format_to_renames(new_name)
else:
# Turning it off
remove_format_from_renames(data['name'])
# Remove metadata before saving YAML
if 'metadata' in data:
del data['metadata']
# Save YAML
update_yaml_file(file_path, data, category)
return jsonify(
{"message": f"Successfully updated {file_name}"}), 200
except Exception as e:
logger.error(f"Error updating file: {e}")
return jsonify({"error": str(e)}), 500
except ValueError as ve:
logger.error(ve)
return jsonify({"error": str(ve)}), 400
except Exception as e:
logger.exception("Unexpected error occurred")
return jsonify({"error": "An unexpected error occurred"}), 500
@bp.route('/<string:category>/test', methods=['POST'])
def run_tests(category):
logger.info(f"Received test request for category: {category}")
try:
data = request.get_json()
if not data:
logger.warning("Rejected test request - no JSON data provided")
return jsonify({"error": "No JSON data provided"}), 400
tests = data.get('tests', [])
if not tests:
logger.warning("Rejected test request - no test cases provided")
return jsonify({"error":
"At least one test case is required"}), 400
if category == 'regex_pattern':
pattern = data.get('pattern')
logger.info(f"Processing regex test request - Pattern: {pattern}")
if not pattern:
logger.warning("Rejected test request - missing pattern")
return jsonify({"error": "Pattern is required"}), 400
success, message, updated_tests = test_regex_pattern(
pattern, tests)
elif category == 'custom_format':
conditions = data.get('conditions', [])
logger.info(
f"Processing format test request - Conditions: {len(conditions)}"
)
if not conditions:
logger.warning(
"Rejected test request - no conditions provided")
return jsonify({"error":
"At least one condition is required"}), 400
success, message, updated_tests = test_format_conditions(
conditions, tests)
else:
logger.warning(
f"Rejected test request - invalid category: {category}")
return jsonify(
{"error": "Testing not supported for this category"}), 400
logger.info(f"Test execution completed - Success: {success}")
if not success:
logger.warning(f"Test execution failed - {message}")
return jsonify({"success": False, "message": message}), 400
return jsonify({"success": True, "tests": updated_tests}), 200
except Exception as e:
logger.warning(f"Unexpected error in test endpoint: {str(e)}",
exc_info=True)
return jsonify({"success": False, "message": str(e)}), 500

View File

@@ -1,725 +0,0 @@
import os
import yaml
import shutil
import logging
from datetime import datetime
from typing import Dict, List, Any, Tuple, Union
import git
import regex
import logging
from ..db.queries.arr import update_arr_config_on_rename, update_arr_config_on_delete
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
from ..config.config import config
# Directory constants
REPO_PATH = config.DB_DIR
REGEX_DIR = config.REGEX_DIR
FORMAT_DIR = config.FORMAT_DIR
PROFILE_DIR = config.PROFILE_DIR
# Expected fields for each category
REGEX_FIELDS = ["name", "pattern", "description", "tags", "tests"]
FORMAT_FIELDS = ["name", "description", "tags", "conditions", "tests"]
PROFILE_FIELDS = [
"name",
"description",
"tags",
"upgradesAllowed",
"minCustomFormatScore",
"upgradeUntilScore",
"minScoreIncrement",
"custom_formats", # Array of {name, score} objects (backwards compatible)
"custom_formats_radarr", # Array of {name, score} objects for radarr-specific scores
"custom_formats_sonarr", # Array of {name, score} objects for sonarr-specific scores
"qualities", # Array of strings
"upgrade_until",
"language"
]
# Category mappings
CATEGORY_MAP = {
"custom_format": (FORMAT_DIR, FORMAT_FIELDS),
"regex_pattern": (REGEX_DIR, REGEX_FIELDS),
"profile": (PROFILE_DIR, PROFILE_FIELDS)
}
def display_to_filename(name: str) -> str:
"""Convert display name (with []) to filename (with ())"""
return f"{name.replace('[', '(').replace(']', ')')}.yml"
def filename_to_display(filename: str) -> str:
"""Convert filename (with ()) back to display name (with [])"""
name = filename[:-4] if filename.endswith('.yml') else filename
return name.replace('(', '[').replace(')', ']')
def _setup_yaml_quotes():
"""Configure YAML to quote string values"""
def str_presenter(dumper, data):
return dumper.represent_scalar('tag:yaml.org,2002:str',
data,
style="'")
yaml.add_representer(str, str_presenter)
def get_file_modified_date(file_path: str) -> str:
"""Get file last modified date in ISO format"""
try:
stats = os.stat(file_path)
return datetime.fromtimestamp(stats.st_mtime).isoformat()
except Exception as e:
logger.error(f"Error getting modified date for {file_path}: {e}")
return None
def get_category_directory(category: str) -> str:
try:
directory, _ = CATEGORY_MAP[category]
except KeyError:
logger.error(f"Invalid category requested: {category}")
raise ValueError(f"Invalid category: {category}")
if not os.path.exists(directory):
logger.error(f"Directory not found: {directory}")
raise FileNotFoundError(f"Directory not found: {directory}")
return directory
def load_yaml_file(file_path: str) -> Dict[str, Any]:
file_path = file_path.replace('[', '(').replace(']', ')')
if not os.path.exists(file_path):
logger.error(f"File not found: {file_path}")
raise FileNotFoundError(f"File not found: {file_path}")
try:
with open(file_path, 'r') as f:
content = yaml.safe_load(f)
return content
except yaml.YAMLError as e:
logger.error(f"Error parsing YAML file {file_path}: {e}")
raise
except Exception as e:
logger.error(f"Unexpected error reading file {file_path}: {e}")
raise
def validate(data: Dict[str, Any], category: str) -> bool:
if not isinstance(data, dict):
return False
_, fields = CATEGORY_MAP[category]
return all(field in data for field in fields)
def save_yaml_file(file_path: str,
data: Dict[str, Any],
category: str,
use_data_name: bool = True) -> None:
"""
Save YAML data to a file
Args:
file_path: The path where the file should be saved
data: The data to save
category: The category of data
use_data_name: If True, use the name from data to create filename. If False, use the provided file_path as is.
"""
if not validate(data, category):
raise ValueError("Invalid data format")
directory = os.path.dirname(file_path)
if use_data_name:
filename = display_to_filename(data['name'])
safe_file_path = os.path.join(directory, filename)
else:
safe_file_path = file_path
_, fields = CATEGORY_MAP[category]
ordered_data = {field: data[field] for field in fields}
_setup_yaml_quotes()
with open(safe_file_path, 'w') as f:
yaml.safe_dump(ordered_data, f, sort_keys=False)
def update_yaml_file(file_path: str, data: Dict[str, Any],
category: str) -> None:
try:
# Check if this is a rename operation
if 'rename' in data:
new_name = data['rename']
old_name = filename_to_display(os.path.basename(file_path)[:-4])
directory = os.path.dirname(file_path)
new_file_path = os.path.join(directory,
display_to_filename(new_name))
# Update references before performing the rename
try:
# Update regular references
updated_files = update_references(category, old_name, new_name)
logger.info(f"Updated references in: {updated_files}")
# Update arr configs if this is a format or profile
if category in ['custom_format', 'profile']:
arr_category = 'customFormats' if category == 'custom_format' else 'profiles'
updated_configs = update_arr_config_on_rename(
arr_category, old_name, new_name)
if updated_configs:
logger.info(
f"Updated arr configs for {category} rename: {updated_configs}"
)
except Exception as e:
logger.error(f"Failed to update references: {e}")
raise Exception(f"Failed to update references: {str(e)}")
# Remove rename field and update the name field in the data
data_to_save = {k: v for k, v in data.items() if k != 'rename'}
data_to_save['name'] = new_name
repo = git.Repo(REPO_PATH)
rel_old_path = os.path.relpath(file_path, REPO_PATH)
rel_new_path = os.path.relpath(new_file_path, REPO_PATH)
try:
# First, save the content changes to the current file
save_yaml_file(file_path,
data_to_save,
category,
use_data_name=False)
# Stage the content changes first
repo.index.add([rel_old_path])
# Then perform the rename
tracked_files = repo.git.ls_files().splitlines()
is_tracked = rel_old_path in tracked_files
if is_tracked:
# Use git mv for tracked files
repo.git.mv(rel_old_path, rel_new_path)
else:
# For untracked files, manually move
os.rename(file_path, new_file_path)
# Stage the new file
repo.index.add([rel_new_path])
except git.GitCommandError as e:
logger.error(f"Git operation failed: {e}")
raise Exception(f"Failed to rename file: {str(e)}")
except OSError as e:
logger.error(f"File operation failed: {e}")
raise Exception(f"Failed to rename file: {str(e)}")
else:
# Normal update without rename
backup_path = f"{file_path}.bak"
shutil.copy2(file_path, backup_path)
try:
save_yaml_file(file_path, data, category)
os.remove(backup_path)
except Exception as e:
shutil.move(backup_path, file_path)
raise
except Exception as e:
raise
def check_delete_constraints(category: str, name: str) -> Tuple[bool, str]:
"""
Check if deleting an item would break any references.
Returns (can_delete, error_message) tuple.
"""
try:
# Protected custom formats that cannot be deleted
PROTECTED_FORMATS = [
"Not English", "Not Only English", "Not Only English (Missing)"
]
# Convert the input name to use parentheses for comparison
check_name = name.replace('[', '(').replace(']', ')')
logger.debug(
f"Checking constraints for {category}: {name} (normalized as {check_name})"
)
# Check protected formats first
if category == 'custom_format' and check_name in [
f.replace('[', '(').replace(']', ')')
for f in PROTECTED_FORMATS
]:
return False, "This format cannot be deleted as it's required for language processing functionality"
references = []
if category == 'regex_pattern':
# Check all custom formats for references to this pattern
format_dir = get_category_directory('custom_format')
for format_file in os.listdir(format_dir):
if not format_file.endswith('.yml'):
continue
format_path = os.path.join(format_dir, format_file)
try:
format_data = load_yaml_file(format_path)
# Check each condition in the format
for condition in format_data.get('conditions', []):
if condition['type'] in [
'release_title', 'release_group', 'edition'
] and condition.get('pattern') == check_name:
references.append(
f"custom format: {format_data['name']}")
except Exception as e:
logger.error(
f"Error checking format file {format_file}: {e}")
continue
elif category == 'custom_format':
# Check all quality profiles for references to this format
profile_dir = get_category_directory('profile')
for profile_file in os.listdir(profile_dir):
if not profile_file.endswith('.yml'):
continue
profile_path = os.path.join(profile_dir, profile_file)
try:
profile_data = load_yaml_file(profile_path)
# Check custom_formats (both/backwards compatible)
custom_formats = profile_data.get('custom_formats', [])
if isinstance(custom_formats, list):
for format_ref in custom_formats:
format_name = format_ref.get('name', '')
# Convert format name to use parentheses for comparison
format_name = format_name.replace('[', '(').replace(']', ')')
logger.debug(f"Comparing '{format_name}' with '{check_name}' in both")
if format_name == check_name:
references.append(f"quality profile: {profile_data['name']} (both)")
# Check custom_formats_radarr
custom_formats_radarr = profile_data.get('custom_formats_radarr', [])
if isinstance(custom_formats_radarr, list):
for format_ref in custom_formats_radarr:
format_name = format_ref.get('name', '')
# Convert format name to use parentheses for comparison
format_name = format_name.replace('[', '(').replace(']', ')')
logger.debug(f"Comparing '{format_name}' with '{check_name}' in radarr")
if format_name == check_name:
references.append(f"quality profile: {profile_data['name']} (radarr)")
# Check custom_formats_sonarr
custom_formats_sonarr = profile_data.get('custom_formats_sonarr', [])
if isinstance(custom_formats_sonarr, list):
for format_ref in custom_formats_sonarr:
format_name = format_ref.get('name', '')
# Convert format name to use parentheses for comparison
format_name = format_name.replace('[', '(').replace(']', ')')
logger.debug(f"Comparing '{format_name}' with '{check_name}' in sonarr")
if format_name == check_name:
references.append(f"quality profile: {profile_data['name']} (sonarr)")
except Exception as e:
logger.error(f"Error checking profile file {profile_file}: {e}")
continue
# Update arr configs for formats and profiles
if category in ['custom_format', 'profile']:
arr_category = 'customFormats' if category == 'custom_format' else 'profiles'
updated_configs = update_arr_config_on_delete(arr_category, name)
if updated_configs:
logger.info(
f"Removed {name} from arr configs: {updated_configs}")
if references:
error_msg = f"Cannot delete - item is referenced in:\n" + "\n".join(
f"- {ref}" for ref in references)
logger.info(f"Found references for {name}: {error_msg}")
return False, error_msg
logger.info(f"No references found for {name}")
return True, ""
except Exception as e:
logger.error(f"Error checking delete constraints: {e}")
return False, f"Error checking references: {str(e)}"
def update_references(category: str, old_name: str,
new_name: str) -> List[str]:
"""
Update references to a renamed item across all relevant files.
Returns a list of files that were updated.
"""
updated_files = []
try:
# Convert names to use parentheses for comparison
old_check_name = old_name.replace('[', '(').replace(']', ')')
new_check_name = new_name.replace('[', '(').replace(']', ')')
if category == 'regex_pattern':
# Update references in custom formats
format_dir = get_category_directory('custom_format')
for format_file in os.listdir(format_dir):
if not format_file.endswith('.yml'):
continue
format_path = os.path.join(format_dir, format_file)
try:
format_data = load_yaml_file(format_path)
updated = False
# Check and update each condition in the format
for condition in format_data.get('conditions', []):
if (condition['type'] in [
'release_title', 'release_group', 'edition'
] and condition.get('pattern') == old_check_name):
condition['pattern'] = new_check_name
updated = True
if updated:
save_yaml_file(format_path,
format_data,
'custom_format',
use_data_name=False)
updated_files.append(
f"custom format: {format_data['name']}")
except Exception as e:
logger.error(
f"Error updating format file {format_file}: {e}")
continue
elif category == 'custom_format':
# Update references in quality profiles
profile_dir = get_category_directory('profile')
for profile_file in os.listdir(profile_dir):
if not profile_file.endswith('.yml'):
continue
profile_path = os.path.join(profile_dir, profile_file)
try:
profile_data = load_yaml_file(profile_path)
updated = False
# Update custom_formats (both/backwards compatible)
custom_formats = profile_data.get('custom_formats', [])
if isinstance(custom_formats, list):
for format_ref in custom_formats:
format_name = format_ref.get('name', '')
# Convert format name to use parentheses for comparison
format_name = format_name.replace('[', '(').replace(']', ')')
if format_name == old_check_name:
format_ref['name'] = new_name
updated = True
# Update custom_formats_radarr
custom_formats_radarr = profile_data.get('custom_formats_radarr', [])
if isinstance(custom_formats_radarr, list):
for format_ref in custom_formats_radarr:
format_name = format_ref.get('name', '')
# Convert format name to use parentheses for comparison
format_name = format_name.replace('[', '(').replace(']', ')')
if format_name == old_check_name:
format_ref['name'] = new_name
updated = True
# Update custom_formats_sonarr
custom_formats_sonarr = profile_data.get('custom_formats_sonarr', [])
if isinstance(custom_formats_sonarr, list):
for format_ref in custom_formats_sonarr:
format_name = format_ref.get('name', '')
# Convert format name to use parentheses for comparison
format_name = format_name.replace('[', '(').replace(']', ')')
if format_name == old_check_name:
format_ref['name'] = new_name
updated = True
if updated:
save_yaml_file(profile_path,
profile_data,
'profile',
use_data_name=False)
updated_files.append(
f"quality profile: {profile_data['name']}")
except Exception as e:
logger.error(
f"Error updating profile file {profile_file}: {e}")
continue
return updated_files
except Exception as e:
logger.error(f"Error updating references: {e}")
raise
def test_regex_pattern(
pattern: str,
tests: List[Dict[str, Any]]) -> Tuple[bool, str, List[Dict[str, Any]]]:
"""
Test a regex pattern against a list of test cases using PCRE2 compatible engine.
Returns match information along with test results.
"""
logger.info(f"Starting regex pattern test - Pattern: {pattern}")
try:
try:
compiled_pattern = regex.compile(pattern,
regex.V1 | regex.IGNORECASE)
logger.info(
"Pattern compiled successfully with PCRE2 compatibility")
except regex.error as e:
logger.warning(f"Invalid regex pattern: {str(e)}")
return False, f"Invalid regex pattern: {str(e)}", tests
current_time = datetime.now().isoformat()
logger.info(f"Processing {len(tests)} test cases")
for test in tests:
test_id = test.get('id', 'unknown')
test_input = test.get('input', '')
expected = test.get('expected', False)
try:
match = compiled_pattern.search(test_input)
matches = bool(match)
# Update test result with basic fields
test['passes'] = matches == expected
test['lastRun'] = current_time
# Add match information
if match:
test['matchedContent'] = match.group(0)
test['matchSpan'] = {
'start': match.start(),
'end': match.end()
}
# Get all capture groups if they exist
test['matchedGroups'] = [g for g in match.groups()
] if match.groups() else []
else:
test['matchedContent'] = None
test['matchSpan'] = None
test['matchedGroups'] = []
logger.info(
f"Test {test_id} {'passed' if test['passes'] else 'failed'} - Match: {matches}, Expected: {expected}"
)
except Exception as e:
logger.error(f"Error running test {test_id}: {str(e)}")
test['passes'] = False
test['lastRun'] = current_time
test['matchedContent'] = None
test['matchSpan'] = None
test['matchedGroups'] = []
# Log overall results
passed_tests = sum(1 for test in tests if test.get('passes', False))
logger.info(
f"Test execution complete - {passed_tests}/{len(tests)} tests passed"
)
return True, "", tests
except Exception as e:
logger.error(f"Unexpected error in test_regex_pattern: {str(e)}",
exc_info=True)
return False, str(e), tests
def test_format_conditions(conditions: List[Dict],
tests: List[Dict]) -> Tuple[bool, str, List[Dict]]:
"""
Test a set of format conditions against a list of test cases.
Tests only pattern-based conditions (release_title, release_group, edition).
"""
logger.info(
f"Starting format condition test - {len(conditions)} conditions")
logger.error(f"Received conditions: {conditions}")
logger.error(f"Received tests: {tests}")
try:
# First, load all regex patterns from the patterns directory
patterns_dir = os.path.join(REPO_PATH, 'regex_patterns')
pattern_map = {}
logger.error(f"Loading patterns from directory: {patterns_dir}")
if not os.path.exists(patterns_dir):
logger.error(f"Patterns directory not found: {patterns_dir}")
return False, "Patterns directory not found", tests
for pattern_file in os.listdir(patterns_dir):
if pattern_file.endswith('.yml'):
pattern_path = os.path.join(patterns_dir, pattern_file)
try:
with open(pattern_path, 'r') as f:
pattern_data = yaml.safe_load(f)
if pattern_data and 'name' in pattern_data and 'pattern' in pattern_data:
pattern_map[
pattern_data['name']] = pattern_data['pattern']
logger.error(
f"Loaded pattern: {pattern_data['name']} = {pattern_data['pattern']}"
)
except Exception as e:
logger.error(
f"Error loading pattern file {pattern_file}: {e}")
continue
logger.error(f"Total patterns loaded: {len(pattern_map)}")
# Compile all regex patterns first
compiled_patterns = {}
for condition in conditions:
if condition['type'] in [
'release_title', 'release_group', 'edition'
]:
logger.error(f"Processing condition: {condition}")
try:
pattern_name = condition.get('pattern', '')
if pattern_name:
# Look up the actual pattern using the pattern name
actual_pattern = pattern_map.get(pattern_name)
if actual_pattern:
compiled_patterns[
condition['name']] = regex.compile(
actual_pattern,
regex.V1 | regex.IGNORECASE)
logger.error(
f"Successfully compiled pattern for {condition['name']}: {actual_pattern}"
)
else:
logger.error(
f"Pattern not found for name: {pattern_name}")
return False, f"Pattern not found: {pattern_name}", tests
except regex.error as e:
logger.error(
f"Invalid regex pattern in condition {condition['name']}: {str(e)}"
)
return False, f"Invalid regex pattern in condition {condition['name']}: {str(e)}", tests
logger.error(f"Total patterns compiled: {len(compiled_patterns)}")
current_time = datetime.now().isoformat()
# Process each test
for test in tests:
test_input = test.get('input', '')
expected = test.get('expected', False)
condition_results = []
logger.error(
f"Processing test input: {test_input}, expected: {expected}")
# Check each condition
for condition in conditions:
if condition['type'] not in [
'release_title', 'release_group', 'edition'
]:
logger.error(
f"Skipping non-pattern condition: {condition['type']}")
continue
pattern = compiled_patterns.get(condition['name'])
if not pattern:
logger.error(
f"No compiled pattern found for condition: {condition['name']}"
)
continue
# Test if pattern matches input
matches = bool(pattern.search(test_input))
logger.error(
f"Condition {condition['name']} match result: {matches}")
# Add result
condition_results.append({
'name':
condition['name'],
'type':
condition['type'],
'pattern':
condition.get('pattern', ''),
'required':
condition.get('required', False),
'negate':
condition.get('negate', False),
'matches':
matches
})
# Determine if format applies
format_applies = True
# Check required conditions
for result in condition_results:
if result['required']:
logger.error(
f"Checking required condition: {result['name']}, negate: {result['negate']}, matches: {result['matches']}"
)
if result['negate']:
if result['matches']:
format_applies = False
logger.error(
f"Required negated condition {result['name']} matched - format does not apply"
)
break
else:
if not result['matches']:
format_applies = False
logger.error(
f"Required condition {result['name']} did not match - format does not apply"
)
break
# Check non-required conditions
if format_applies:
for result in condition_results:
if not result['required'] and result['negate'] and result[
'matches']:
format_applies = False
logger.error(
f"Non-required negated condition {result['name']} matched - format does not apply"
)
break
test['passes'] = format_applies == expected
test['lastRun'] = current_time
test['conditionResults'] = condition_results
logger.error(
f"Test result - format_applies: {format_applies}, expected: {expected}, passes: {test['passes']}"
)
# Log final results
passed_tests = sum(1 for test in tests if test.get('passes', False))
logger.error(
f"Final test results - {passed_tests}/{len(tests)} tests passed")
logger.error(f"Updated tests: {tests}")
return True, "", tests
except Exception as e:
logger.error(f"Unexpected error in test_format_conditions: {str(e)}",
exc_info=True)
return False, str(e), tests

View File

@@ -1,15 +0,0 @@
from .connection import get_db
from .queries.settings import get_settings, get_secret_key, save_settings, update_pat_status
from .queries.arr import (get_unique_arrs, update_arr_config_on_rename,
update_arr_config_on_delete)
from .queries.format_renames import (add_format_to_renames,
remove_format_from_renames,
is_format_in_renames)
from .migrations.runner import run_migrations
__all__ = [
'get_db', 'get_settings', 'get_secret_key', 'save_settings',
'get_unique_arrs', 'update_arr_config_on_rename',
'update_arr_config_on_delete', 'run_migrations', 'add_format_to_renames',
'remove_format_from_renames', 'is_format_in_renames', 'update_pat_status'
]

View File

@@ -1,12 +0,0 @@
# backend/app/db/connection.py
import sqlite3
from ..config import config
DB_PATH = config.DB_PATH
def get_db():
"""Create and return a database connection with Row factory."""
conn = sqlite3.connect(DB_PATH)
conn.row_factory = sqlite3.Row
return conn

View File

@@ -1,64 +0,0 @@
# backend/app/db/migrations/runner.py
import os
import importlib
from pathlib import Path
from ..connection import get_db
def init_migrations():
"""Create migrations table if it doesn't exist."""
with get_db() as conn:
conn.execute('''
CREATE TABLE IF NOT EXISTS migrations (
id INTEGER PRIMARY KEY AUTOINCREMENT,
version INTEGER NOT NULL,
name TEXT NOT NULL,
applied_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
)
''')
conn.commit()
def get_applied_migrations():
"""Get list of already applied migrations."""
with get_db() as conn:
result = conn.execute(
'SELECT version FROM migrations ORDER BY version')
return [row[0] for row in result.fetchall()]
def get_available_migrations():
"""Get all migration files from versions directory."""
versions_dir = Path(__file__).parent / 'versions'
migrations = []
for file in versions_dir.glob('[0-9]*.py'):
if file.stem != '__init__':
# Import the migration module
module = importlib.import_module(f'.versions.{file.stem}',
package='app.db.migrations')
migrations.append((module.version, module.name, module))
return sorted(migrations, key=lambda x: x[0])
def run_migrations():
"""Run all pending migrations in order."""
init_migrations()
applied = set(get_applied_migrations())
available = get_available_migrations()
for version, name, module in available:
if version not in applied:
print(f"Applying migration {version}: {name}")
try:
module.up()
with get_db() as conn:
conn.execute(
'INSERT INTO migrations (version, name) VALUES (?, ?)',
(version, name))
conn.commit()
print(f"Successfully applied migration {version}")
except Exception as e:
print(f"Error applying migration {version}: {str(e)}")
raise

View File

@@ -1,145 +0,0 @@
# backend/app/db/migrations/versions/001_initial_schema.py
import os
import secrets
from ...connection import get_db
version = 1
name = "initial_schema"
def up():
"""Apply the initial database schema."""
with get_db() as conn:
# Create backups table
conn.execute('''
CREATE TABLE IF NOT EXISTS backups (
id INTEGER PRIMARY KEY AUTOINCREMENT,
filename TEXT NOT NULL,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
status TEXT DEFAULT 'pending'
)
''')
# Create arr_config table
conn.execute('''
CREATE TABLE IF NOT EXISTS arr_config (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT UNIQUE NOT NULL,
type TEXT NOT NULL,
tags TEXT,
arr_server TEXT NOT NULL,
api_key TEXT NOT NULL,
data_to_sync TEXT,
last_sync_time TIMESTAMP,
sync_percentage INTEGER DEFAULT 0,
sync_method TEXT DEFAULT 'manual',
sync_interval INTEGER DEFAULT 0,
import_as_unique BOOLEAN DEFAULT 0,
import_task_id INTEGER DEFAULT NULL,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
)
''')
# Create scheduled_tasks table
conn.execute('''
CREATE TABLE IF NOT EXISTS scheduled_tasks (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL,
type TEXT NOT NULL,
interval_minutes INTEGER NOT NULL,
last_run TIMESTAMP,
status TEXT DEFAULT 'pending',
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
)
''')
# Create settings table
conn.execute('''
CREATE TABLE IF NOT EXISTS settings (
id INTEGER PRIMARY KEY AUTOINCREMENT,
key TEXT UNIQUE NOT NULL,
value TEXT,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
)
''')
# Create auth table
conn.execute('''
CREATE TABLE IF NOT EXISTS auth (
username TEXT NOT NULL,
password_hash TEXT NOT NULL,
api_key TEXT,
session_id TEXT,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
)
''')
# Create failed_attempts table
conn.execute('''
CREATE TABLE IF NOT EXISTS failed_attempts (
id INTEGER PRIMARY KEY AUTOINCREMENT,
ip_address TEXT NOT NULL,
attempt_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP
)
''')
# Insert initial required data
required_tasks = [
('Repository Sync', 'Sync', 2),
('Backup', 'Backup', 1440),
]
for task_name, task_type, interval in required_tasks:
cursor = conn.execute(
'SELECT COUNT(*) FROM scheduled_tasks WHERE name = ?',
(task_name, ))
if cursor.fetchone()[0] == 0:
conn.execute(
'''
INSERT INTO scheduled_tasks (name, type, interval_minutes)
VALUES (?, ?, ?)
''', (task_name, task_type, interval))
# Insert initial settings
conn.execute('''
INSERT OR IGNORE INTO settings (key, value, updated_at)
VALUES ('auto_pull_enabled', '0', CURRENT_TIMESTAMP)
''')
# Handle profilarr_pat setting
profilarr_pat = os.environ.get('PROFILARR_PAT')
conn.execute(
'''
INSERT INTO settings (key, value, updated_at)
VALUES ('has_profilarr_pat', ?, CURRENT_TIMESTAMP)
ON CONFLICT(key) DO UPDATE SET
value = ?,
updated_at = CURRENT_TIMESTAMP
''', (str(bool(profilarr_pat)).lower(), str(
bool(profilarr_pat)).lower()))
# Handle secret_key setting
secret_key = conn.execute(
'SELECT value FROM settings WHERE key = "secret_key"').fetchone()
if not secret_key:
new_secret_key = secrets.token_hex(32)
conn.execute(
'''
INSERT INTO settings (key, value, updated_at)
VALUES ('secret_key', ?, CURRENT_TIMESTAMP)
''', (new_secret_key, ))
conn.commit()
def down():
"""Revert the initial schema migration."""
with get_db() as conn:
# Drop all tables in reverse order of creation
tables = [
'failed_attempts', 'auth', 'settings', 'scheduled_tasks',
'arr_config', 'backups'
]
for table in tables:
conn.execute(f'DROP TABLE IF EXISTS {table}')
conn.commit()

View File

@@ -1,23 +0,0 @@
# backend/app/db/migrations/versions/002_format_renames.py
from ...connection import get_db
version = 2
name = "format_renames"
def up():
"""Add table for tracking which formats to include in renames"""
with get_db() as conn:
conn.execute('''
CREATE TABLE IF NOT EXISTS format_renames (
format_name TEXT PRIMARY KEY NOT NULL
)
''')
conn.commit()
def down():
"""Remove the format_renames table"""
with get_db() as conn:
conn.execute('DROP TABLE IF EXISTS format_renames')
conn.commit()

View File

@@ -1,33 +0,0 @@
# backend/app/db/migrations/versions/003_language_import_score.py
from ...connection import get_db
version = 3
name = "language_import_score"
def up():
"""Add language_import_config table."""
with get_db() as conn:
# Create language_import_config table
conn.execute('''
CREATE TABLE IF NOT EXISTS language_import_config (
id INTEGER PRIMARY KEY AUTOINCREMENT,
score INTEGER NOT NULL DEFAULT -99999,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
)
''')
# Insert default record
conn.execute('''
INSERT INTO language_import_config (score, updated_at)
VALUES (-99999, CURRENT_TIMESTAMP)
''')
conn.commit()
def down():
"""Remove language_import_config table."""
with get_db() as conn:
conn.execute('DROP TABLE IF EXISTS language_import_config')
conn.commit()

View File

@@ -1,119 +0,0 @@
from ..connection import get_db
import json
import logging
logger = logging.getLogger(__name__)
def get_unique_arrs(arr_ids):
"""
Get import_as_unique settings for a list of arr IDs.
Args:
arr_ids (list): List of arr configuration IDs
Returns:
dict: Dictionary mapping arr IDs to their import_as_unique settings and names
"""
if not arr_ids:
return {}
with get_db() as conn:
placeholders = ','.join('?' * len(arr_ids))
query = f'''
SELECT id, name, import_as_unique
FROM arr_config
WHERE id IN ({placeholders})
'''
results = conn.execute(query, arr_ids).fetchall()
return {
row['id']: {
'import_as_unique': bool(row['import_as_unique']),
'name': row['name']
}
for row in results
}
def update_arr_config_on_rename(category, old_name, new_name):
"""
Update arr_config data_to_sync when a format or profile is renamed.
Args:
category (str): Either 'customFormats' or 'profiles'
old_name (str): Original name being changed
new_name (str): New name to change to
Returns:
list: IDs of arr_config rows that were updated
"""
updated_ids = []
with get_db() as conn:
# Get all configs that might reference this name
rows = conn.execute(
'SELECT id, data_to_sync FROM arr_config WHERE data_to_sync IS NOT NULL'
).fetchall()
for row in rows:
try:
data = json.loads(row['data_to_sync'])
# Check if this config has the relevant category data
if category in data:
# Update any matching names
if old_name in data[category]:
# Replace old name with new name
data[category] = [
new_name if x == old_name else x
for x in data[category]
]
# Save changes back to database
conn.execute(
'UPDATE arr_config SET data_to_sync = ? WHERE id = ?',
(json.dumps(data), row['id']))
updated_ids.append(row['id'])
except json.JSONDecodeError:
logger.error(f"Invalid JSON in arr_config id={row['id']}")
continue
if updated_ids:
conn.commit()
return updated_ids
def update_arr_config_on_delete(category, name):
"""
Update arr_config data_to_sync when a format or profile is deleted.
Args:
category (str): Either 'customFormats' or 'profiles'
name (str): Name being deleted
Returns:
list: IDs of arr_config rows that were updated
"""
updated_ids = []
with get_db() as conn:
# Get all configs that might reference this name
rows = conn.execute(
'SELECT id, data_to_sync FROM arr_config WHERE data_to_sync IS NOT NULL'
).fetchall()
for row in rows:
try:
data = json.loads(row['data_to_sync'])
# Check if this config has the relevant category data
if category in data:
# Remove any matching names
if name in data[category]:
data[category].remove(name)
# Save changes back to database
conn.execute(
'UPDATE arr_config SET data_to_sync = ? WHERE id = ?',
(json.dumps(data), row['id']))
updated_ids.append(row['id'])
except json.JSONDecodeError:
logger.error(f"Invalid JSON in arr_config id={row['id']}")
continue
if updated_ids:
conn.commit()
return updated_ids

View File

@@ -1,33 +0,0 @@
# backend/app/db/queries/format_renames.py
import logging
from ..connection import get_db
logger = logging.getLogger(__name__)
def add_format_to_renames(format_name: str) -> None:
"""Add a format to the renames table"""
with get_db() as conn:
conn.execute(
'INSERT OR REPLACE INTO format_renames (format_name) VALUES (?)',
(format_name, ))
conn.commit()
logger.info(f"Added format to renames table: {format_name}")
def remove_format_from_renames(format_name: str) -> None:
"""Remove a format from the renames table"""
with get_db() as conn:
conn.execute('DELETE FROM format_renames WHERE format_name = ?',
(format_name, ))
conn.commit()
logger.info(f"Removed format from renames table: {format_name}")
def is_format_in_renames(format_name: str) -> bool:
"""Check if a format is in the renames table"""
with get_db() as conn:
result = conn.execute(
'SELECT 1 FROM format_renames WHERE format_name = ?',
(format_name, )).fetchone()
return bool(result)

View File

@@ -1,111 +0,0 @@
# backend/app/db/queries/settings.py
from ..connection import get_db
import logging
import os
logger = logging.getLogger(__name__)
def get_settings():
with get_db() as conn:
result = conn.execute(
'SELECT key, value FROM settings WHERE key NOT IN ("secret_key")'
).fetchall()
settings = {row['key']: row['value'] for row in result}
return settings if 'gitRepo' in settings else None
def get_secret_key():
with get_db() as conn:
result = conn.execute(
'SELECT value FROM settings WHERE key = "secret_key"').fetchone()
return result['value'] if result else None
def save_settings(settings_dict):
with get_db() as conn:
for key, value in settings_dict.items():
conn.execute(
'''
INSERT INTO settings (key, value, updated_at)
VALUES (?, ?, CURRENT_TIMESTAMP)
ON CONFLICT(key) DO UPDATE SET
value = excluded.value,
updated_at = CURRENT_TIMESTAMP
''', (key, value))
conn.commit()
def update_pat_status():
"""Update the has_profilarr_pat setting based on current environment."""
with get_db() as conn:
profilarr_pat = os.environ.get('PROFILARR_PAT')
pat_exists = str(bool(profilarr_pat)).lower()
# Get current value
current = conn.execute('SELECT value FROM settings WHERE key = ?',
('has_profilarr_pat', )).fetchone()
conn.execute(
'''
INSERT INTO settings (key, value, updated_at)
VALUES ('has_profilarr_pat', ?, CURRENT_TIMESTAMP)
ON CONFLICT(key) DO UPDATE SET
value = ?,
updated_at = CURRENT_TIMESTAMP
''', (pat_exists, pat_exists))
conn.commit()
if current is None:
logger.info(f"PAT status created: {pat_exists}")
elif current[0] != pat_exists:
logger.info(
f"PAT status updated from {current[0]} to {pat_exists}")
else:
logger.debug("PAT status unchanged")
def get_language_import_score():
"""Get the current language import score."""
with get_db() as conn:
result = conn.execute(
'SELECT score FROM language_import_config ORDER BY id DESC LIMIT 1'
).fetchone()
return result['score'] if result else -99999
def update_language_import_score(score):
"""Update the language import score."""
with get_db() as conn:
# Get current score first
current = conn.execute(
'SELECT score FROM language_import_config ORDER BY id DESC LIMIT 1'
).fetchone()
current_score = current['score'] if current else None
# Check if record exists
existing = conn.execute(
'SELECT id FROM language_import_config ORDER BY id DESC LIMIT 1'
).fetchone()
if existing:
# Update existing record
conn.execute(
'''
UPDATE language_import_config
SET score = ?, updated_at = CURRENT_TIMESTAMP
WHERE id = ?
''', (score, existing['id']))
else:
# Insert new record
conn.execute(
'''
INSERT INTO language_import_config (score, updated_at)
VALUES (?, CURRENT_TIMESTAMP)
''', (score,))
conn.commit()
if current_score is not None:
logger.info(f"Language import score updated from {current_score} to {score}")
else:
logger.info(f"Language import score set to: {score}")

View File

@@ -1,403 +0,0 @@
# git/__init__.py
from flask import Blueprint, request, jsonify
from .status.status import get_git_status
from .status.commit_history import get_git_commit_history
from .branches.manager import Branch_Manager
from .operations.manager import GitOperations
from .repo.unlink import unlink_repository
from .repo.clone import clone_repository
from ..db import save_settings, get_settings
from ..config.config import config
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
bp = Blueprint('git', __name__)
REPO_PATH = config.DB_DIR
branch_manager = Branch_Manager(REPO_PATH)
git_operations = GitOperations(REPO_PATH)
@bp.route('/clone', methods=['POST'])
def handle_clone_repository():
try:
new_settings = request.json
logger.info(f"Received new settings: {new_settings}")
if 'gitRepo' not in new_settings:
logger.error("Missing required field: gitRepo")
return jsonify({"error": "Missing required field: gitRepo"}), 400
success, message = clone_repository(new_settings['gitRepo'], REPO_PATH)
if success:
# Store repository URL in database
save_settings({'gitRepo': new_settings['gitRepo']})
logger.info("Settings updated and repository cloned successfully")
return jsonify({
"message":
"Repository cloned and settings updated successfully"
}), 200
else:
logger.error(f"Failed to clone repository: {message}")
return jsonify({"error": message}), 400
except Exception as e:
logger.exception("Unexpected error in clone_repository")
return jsonify({"error": f"Failed to clone repository: {str(e)}"}), 500
@bp.route('/status', methods=['GET'])
def get_status():
logger.debug("Received request for git status")
success, message = get_git_status(REPO_PATH)
if isinstance(message, str) and "No git repository" in message:
return jsonify({'success': True, 'data': None}), 200
if success:
logger.debug("Successfully retrieved git status")
return jsonify({'success': True, 'data': message}), 200
else:
logger.error(f"Failed to retrieve git status: {message}")
return jsonify({'success': False, 'error': message}), 400
@bp.route('/branch', methods=['POST'])
def create_branch():
branch_name = request.json.get('name')
base_branch = request.json.get('base', 'main')
logger.debug(
f"Received request to create branch {branch_name} from {base_branch}")
success, result = branch_manager.create(branch_name, base_branch)
if success:
logger.debug(f"Successfully created branch: {branch_name}")
return jsonify({'success': True, **result}), 200
else:
logger.error(f"Failed to create branch: {result}")
if 'merging' in result.get('error', '').lower():
return jsonify({'success': False, 'error': result}), 409
return jsonify({'success': False, 'error': result}), 400
@bp.route('/branches', methods=['GET'])
def get_branches():
logger.debug("Received request for branches")
success, result = branch_manager.get_all()
if success:
logger.debug("Successfully retrieved branches")
return jsonify({'success': True, 'data': result}), 200
else:
logger.error(f"Failed to retrieve branches: {result}")
return jsonify({'success': False, 'error': result}), 400
@bp.route('/checkout', methods=['POST'])
def checkout_branch():
branch_name = request.json.get('branch')
logger.debug(f"Received request to checkout branch: {branch_name}")
success, result = branch_manager.checkout(branch_name)
if success:
logger.debug(f"Successfully checked out branch: {branch_name}")
return jsonify({'success': True, **result}), 200
else:
logger.error(f"Failed to checkout branch: {result}")
if 'merging' in result.get('error', '').lower():
return jsonify({'success': False, 'error': result}), 409
return jsonify({'success': False, 'error': result}), 400
@bp.route('/branch/<branch_name>', methods=['DELETE'])
def delete_branch(branch_name):
logger.debug(f"Received request to delete branch: {branch_name}")
success, result = branch_manager.delete(branch_name)
if success:
logger.debug(f"Successfully deleted branch: {branch_name}")
return jsonify({'success': True, **result}), 200
else:
logger.error(f"Failed to delete branch: {result}")
if 'merging' in result.get('error', '').lower():
return jsonify({'success': False, 'error': result}), 409
return jsonify({'success': False, 'error': result}), 400
@bp.route('/branch/push', methods=['POST'])
def push_branch():
data = request.json
logger.debug(f"Received request to push branch: {data}")
branch_name = data.get('branch')
if not branch_name:
return jsonify({
"success": False,
"error": "Branch name is required"
}), 400
success, result = branch_manager.push(branch_name)
if success:
return jsonify({"success": True, "message": result}), 200
else:
if isinstance(result, str):
return jsonify({"success": False, "error": result}), 400
return jsonify({
"success": False,
"error": result.get('error', 'Unknown error occurred')
}), 400
@bp.route('/commit', methods=['POST'])
def commit_files():
files = request.json.get('files', [])
user_commit_message = request.json.get('commit_message', "Commit changes")
logger.debug(f"Received request to commit files: {files}")
commit_message = generate_commit_message(user_commit_message, files)
success, message = git_operations.commit(files, commit_message)
if success:
logger.debug("Successfully committed files")
return jsonify({'success': True, 'message': message}), 200
else:
logger.error(f"Error committing files: {message}")
return jsonify({'success': False, 'error': message}), 400
@bp.route('/push', methods=['POST'])
def push_files():
logger.debug("Received request to push changes")
success, message = git_operations.push()
if success:
logger.debug("Successfully pushed changes")
return jsonify({'success': True, 'message': message}), 200
else:
logger.error(f"Error pushing changes: {message}")
return jsonify({'success': False, 'error': message}), 400
@bp.route('/revert', methods=['POST'])
def revert_file():
file_path = request.json.get('file_path')
if not file_path:
return jsonify({
'success': False,
'error': "File path is required."
}), 400
success, message = git_operations.revert(file_path)
if success:
return jsonify({'success': True, 'message': message}), 200
else:
logger.error(f"Error reverting file: {message}")
return jsonify({'success': False, 'error': message}), 400
@bp.route('/revert-all', methods=['POST'])
def revert_all():
success, message = git_operations.revert_all()
if success:
return jsonify({'success': True, 'message': message}), 200
else:
logger.error(f"Error reverting all changes: {message}")
return jsonify({'success': False, 'error': message}), 400
@bp.route('/file', methods=['DELETE'])
def delete_file():
file_path = request.json.get('file_path')
if not file_path:
return jsonify({
'success': False,
'error': "File path is required."
}), 400
success, message = git_operations.delete(file_path)
if success:
return jsonify({'success': True, 'message': message}), 200
else:
logger.error(f"Error deleting file: {message}")
return jsonify({'success': False, 'error': message}), 400
@bp.route('/pull', methods=['POST'])
def pull_branch():
branch_name = request.json.get('branch')
success, response = git_operations.pull(branch_name)
# Handle different response types
if isinstance(response, dict):
if response.get('state') == 'resolve':
# Merge conflict is now a success case with state='resolve'
return jsonify({
'success': True,
'state': 'resolve',
'message': response['message'],
'details': response['details']
}), 200
elif response.get('state') == 'error':
# Handle error states
return jsonify({
'success': False,
'state': 'error',
'message': response['message'],
'details': response.get('details', {})
}), 409 if response.get('type') in [
'merge_conflict', 'uncommitted_changes'
] else 400
elif response.get('state') == 'complete':
# Normal success case
return jsonify({
'success': True,
'state': 'complete',
'message': response['message'],
'details': response.get('details', {})
}), 200
# Fallback for string responses or unexpected formats
if success:
return jsonify({
'success': True,
'state': 'complete',
'message': response
}), 200
return jsonify({
'success': False,
'state': 'error',
'message': str(response)
}), 400
@bp.route('/stage', methods=['POST'])
def handle_stage_files():
files = request.json.get('files', [])
success, message = git_operations.stage(files)
if success:
return jsonify({'success': True, 'message': message}), 200
else:
return jsonify({'success': False, 'error': message}), 400
@bp.route('/unstage', methods=['POST'])
def handle_unstage_files():
files = request.json.get('files', [])
success, message = git_operations.unstage(files)
if success:
return jsonify({'success': True, 'message': message}), 200
else:
return jsonify({'success': False, 'error': message}), 400
@bp.route('/unlink', methods=['POST'])
def unlink():
data = request.get_json()
remove_files = data.get('removeFiles', False)
success, message = unlink_repository(REPO_PATH, remove_files)
if success:
return jsonify({'success': True, 'message': message}), 200
else:
return jsonify({'success': False, 'error': message}), 400
def generate_commit_message(user_message, files):
return user_message
@bp.route('/resolve', methods=['POST'])
def resolve_conflicts():
logger.debug("Received request to resolve conflicts")
resolutions = request.json.get('resolutions')
if not resolutions:
return jsonify({
'success': False,
'error': "Resolutions are required"
}), 400
result = git_operations.resolve(resolutions)
if result.get('success'):
logger.debug("Successfully resolved conflicts")
return jsonify(result), 200
else:
logger.error(f"Error resolving conflicts: {result.get('error')}")
return jsonify(result), 400
@bp.route('/merge/finalize', methods=['POST'])
def finalize_merge():
"""
Route to finalize a merge after all conflicts have been resolved.
Expected to be called only after all conflicts are resolved and changes are staged.
"""
logger.debug("Received request to finalize merge")
result = git_operations.finalize_merge()
if result.get('success'):
logger.debug(
f"Successfully finalized merge with files: {result.get('committed_files', [])}"
)
return jsonify({
'success': True,
'message': result.get('message'),
'committed_files': result.get('committed_files', [])
}), 200
else:
logger.error(f"Error finalizing merge: {result.get('error')}")
return jsonify({'success': False, 'error': result.get('error')}), 400
@bp.route('/merge/abort', methods=['POST'])
def abort_merge():
logger.debug("Received request to abort merge")
success, message = git_operations.abort_merge()
if success:
logger.debug("Successfully aborted merge")
return jsonify({'success': True, 'message': message}), 200
else:
logger.error(f"Error aborting merge: {message}")
return jsonify({'success': False, 'error': message}), 400
@bp.route('/commits', methods=['GET'])
def get_commit_history():
logger.debug("Received request for commit history")
branch = request.args.get('branch') # Optional branch parameter
success, result = get_git_commit_history(REPO_PATH, branch)
if success:
logger.debug("Successfully retrieved commit history")
return jsonify({'success': True, 'data': result}), 200
else:
logger.error(f"Failed to retrieve commit history: {result}")
return jsonify({'success': False, 'error': result}), 400
@bp.route('/autopull', methods=['GET', 'POST'])
def handle_auto_pull():
try:
if request.method == 'GET':
settings = get_settings()
return jsonify({
'success':
True,
'enabled':
bool(int(settings.get('auto_pull_enabled', 0)))
}), 200
# POST handling
data = request.json
enabled = data.get('enabled')
if enabled is None:
return jsonify({
'success': False,
'error': 'enabled field is required'
}), 400
save_settings({'auto_pull_enabled': 1 if enabled else 0})
logger.info(
f"Auto-pull has been {'enabled' if enabled else 'disabled'}")
return jsonify({'success': True}), 200
except Exception as e:
logger.error(f"Error handling auto pull setting: {str(e)}")
return jsonify({'success': False, 'error': str(e)}), 500

View File

@@ -1,49 +0,0 @@
# git/auth/authenticate.py
import os
import logging
logger = logging.getLogger(__name__)
class GitHubAuth:
"""
A modular authentication handler for GitHub repositories.
Supports Personal Access Tokens (PAT) for HTTPS authentication.
"""
@staticmethod
def get_authenticated_url(https_url):
"""
Convert an HTTPS URL to include authentication via PAT.
Ensures the token is not duplicated in the URL.
"""
token = os.getenv("PROFILARR_PAT")
if not token:
raise ValueError(
"PROFILARR_PAT is not set in environment variables")
# Check if the URL already contains authentication
if "@" in https_url:
# Already has some form of authentication, remove it to add our token
# This handles URLs that might have a token already
protocol_part, rest = https_url.split("://", 1)
if "@" in rest:
# Remove any existing authentication
_, server_part = rest.split("@", 1)
https_url = f"{protocol_part}://{server_part}"
# Now add our token
authenticated_url = https_url.replace("https://", f"https://{token}@")
return authenticated_url
@staticmethod
def verify_token():
"""
Verify if the Personal Access Token is valid.
"""
token = os.getenv("PROFILARR_PAT")
if not token:
logger.error("PROFILARR_PAT is not set")
return False
logger.info("Token verification skipped (assume valid)")
return True

View File

@@ -1,53 +0,0 @@
# git/branches/checkout.py
import git
import logging
from ...arr.manager import check_active_sync_configs
logger = logging.getLogger(__name__)
def checkout_branch(repo_path, branch_name):
try:
# Check for active sync configurations first
has_active_configs, configs = check_active_sync_configs()
if has_active_configs:
error_msg = (
"Cannot checkout branch while automatic sync configurations are active.\n"
"The following configurations must be set to manual sync first:\n"
)
for config in configs:
error_msg += f"- {config['name']} (ID: {config['id']}, {config['sync_method']} sync)\n"
logger.error(error_msg)
return False, {
"error": error_msg,
"code": "ACTIVE_SYNC_CONFIGS",
"configs": configs
}
logger.debug(f"Attempting to checkout branch: {branch_name}")
repo = git.Repo(repo_path)
# Check if the branch exists locally
if branch_name in repo.heads:
repo.git.checkout(branch_name)
else:
# Check if the branch exists in any of the remotes
for remote in repo.remotes:
remote_branch = f"{remote.name}/{branch_name}"
if remote_branch in repo.refs:
# Create a new local branch tracking the remote branch
repo.git.checkout('-b', branch_name, remote_branch)
break
else:
return False, f"Branch '{branch_name}' does not exist locally or in any remote."
logger.debug(f"Successfully checked out branch: {branch_name}")
return True, {
"message": f"Checked out branch: {branch_name}",
"current_branch": branch_name
}
except Exception as e:
logger.error(f"Error checking out branch: {str(e)}", exc_info=True)
return False, {"error": f"Error checking out branch: {str(e)}"}

View File

@@ -1,24 +0,0 @@
# git/branches/create.py
import git
import logging
logger = logging.getLogger(__name__)
def create_branch(repo_path, branch_name, base_branch='main'):
try:
logger.debug(f"Attempting to create branch {branch_name} from {base_branch}")
repo = git.Repo(repo_path)
# Check if the branch already exists
if branch_name in repo.heads:
return False, f"Branch '{branch_name}' already exists."
# Create and checkout the new branch
new_branch = repo.create_head(branch_name, commit=base_branch)
new_branch.checkout()
logger.debug(f"Successfully created branch: {branch_name}")
return True, {"message": f"Created branch: {branch_name}", "current_branch": branch_name}
except Exception as e:
logger.error(f"Error creating branch: {str(e)}", exc_info=True)
return False, {"error": f"Error creating branch: {str(e)}"}

View File

@@ -1,46 +0,0 @@
# git/branches/delete.py
import git
from git.exc import GitCommandError
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def delete_branch(repo_path, branch_name):
try:
logger.debug(f"Attempting to delete branch: {branch_name}")
logger.debug(
f"Attempting to delete branch from repo at path: {repo_path}")
repo = git.Repo(repo_path)
# Fetch updates from remote
logger.debug("Fetching updates from remote...")
repo.git.fetch('--all')
# Update local repository state
logger.debug("Updating local repository state...")
repo.git.remote('update', 'origin', '--prune')
# Check if it's a local branch
if branch_name in repo.heads:
logger.debug(f"Deleting local branch: {branch_name}")
if repo.active_branch.name == branch_name:
return False, f"Cannot delete the current branch: {branch_name}"
repo.delete_head(branch_name, force=True)
logger.debug(f"Local branch {branch_name} deleted")
# Check if remote branch exists
remote_branch = f"origin/{branch_name}"
if remote_branch in repo.refs:
pass
return True, {
"message": f"Deleted branch: {branch_name}",
"current_branch": repo.active_branch.name
}
except Exception as e:
logger.error(f"Error deleting branch: {str(e)}", exc_info=True)
return False, {"error": f"Error deleting branch: {str(e)}"}

View File

@@ -1,48 +0,0 @@
import git
import logging
from flask import Blueprint, jsonify
logger = logging.getLogger(__name__)
def get_branches(repo_path):
try:
logger.debug("Attempting to get branches")
repo = git.Repo(repo_path)
# Get local branches
local_branches = [{'name': branch.name, 'isLocal': True, 'isRemote': False} for branch in repo.heads]
logger.debug(f"Local branches found: {[branch['name'] for branch in local_branches]}")
# Get remote branches
remote_branches = [{'name': ref.remote_head, 'isLocal': False, 'isRemote': True} for ref in repo.remote().refs if not ref.remote_head == 'HEAD']
logger.debug(f"Remote branches found: {[branch['name'] for branch in remote_branches]}")
# Combine and update status for branches that are both local and remote
all_branches = local_branches + remote_branches
branch_dict = {}
for branch in all_branches:
if branch['name'] in branch_dict:
branch_dict[branch['name']]['isLocal'] = branch_dict[branch['name']]['isLocal'] or branch['isLocal']
branch_dict[branch['name']]['isRemote'] = branch_dict[branch['name']]['isRemote'] or branch['isRemote']
else:
branch_dict[branch['name']] = branch
all_branches = list(branch_dict.values())
logger.debug(f"All branches combined (local and remote): {[branch['name'] for branch in all_branches]}")
logger.info(f"Branches being sent: {[branch['name'] for branch in all_branches]}")
return True, {"branches": all_branches}
except Exception as e:
logger.error(f"Error getting branches: {str(e)}", exc_info=True)
return False, {"error": f"Error getting branches: {str(e)}"}
def get_current_branch(repo_path):
try:
repo = git.Repo(repo_path)
current_branch = repo.active_branch.name
logger.debug(f"Current branch: {current_branch}")
return current_branch
except Exception as e:
logger.error(f"Error getting current branch: {str(e)}", exc_info=True)
return None

View File

@@ -1,56 +0,0 @@
# git/branches/branches.py
import git
import os
from .create import create_branch
from .checkout import checkout_branch
from .delete import delete_branch
from .get import get_branches, get_current_branch
from .push import push_branch_to_remote
class Branch_Manager:
def __init__(self, repo_path):
self.repo_path = repo_path
def is_merging(self):
repo = git.Repo(self.repo_path)
return os.path.exists(os.path.join(repo.git_dir, 'MERGE_HEAD'))
def create(self, branch_name, base_branch='main'):
if self.is_merging():
return False, {
'error':
'Cannot create branch while merging. Resolve conflicts first.'
}
return create_branch(self.repo_path, branch_name, base_branch)
def checkout(self, branch_name):
if self.is_merging():
return False, {
'error':
'Cannot checkout while merging. Resolve conflicts first.'
}
return checkout_branch(self.repo_path, branch_name)
def delete(self, branch_name):
if self.is_merging():
return False, {
'error':
'Cannot delete branch while merging. Resolve conflicts first.'
}
return delete_branch(self.repo_path, branch_name)
def get_all(self):
return get_branches(self.repo_path)
def get_current(self):
return get_current_branch(self.repo_path)
def push(self, branch_name):
if self.is_merging():
return False, {
'error': 'Cannot push while merging. Resolve conflicts first.'
}
return push_branch_to_remote(self.repo_path, branch_name)

View File

@@ -1,59 +0,0 @@
# git/branches/push.py
import git
import logging
from ..auth.authenticate import GitHubAuth
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def _handle_git_error(error):
"""Helper function to process git errors and return user-friendly messages"""
error_msg = str(error)
if "403" in error_msg:
return "Authentication failed: The provided PAT doesn't have sufficient permissions or is invalid."
elif "401" in error_msg:
return "Authentication failed: No PAT provided or the token is invalid."
elif "non-fast-forward" in error_msg:
return "Push rejected: Remote contains work that you do not have locally. Please pull the latest changes first."
return f"Git error: {error_msg}"
def push_branch_to_remote(repo_path, branch_name):
try:
logger.debug(f"Attempting to push branch {branch_name} to remote")
# Verify token before attempting push
if not GitHubAuth.verify_token():
return False, "Push operation requires GitHub authentication. Please configure PAT."
repo = git.Repo(repo_path)
# Check if the branch exists locally
if branch_name not in repo.heads:
return False, f"Branch '{branch_name}' does not exist locally."
origin = repo.remote(name='origin')
original_url = origin.url
try:
# Set authenticated URL
auth_url = GitHubAuth.get_authenticated_url(original_url)
origin.set_url(auth_url)
# Push the branch to remote and set the upstream branch
origin.push(refspec=f"{branch_name}:{branch_name}",
set_upstream=True)
return True, f"Pushed branch to remote: {branch_name}"
except git.GitCommandError as e:
return False, _handle_git_error(e)
finally:
# Always restore original URL
origin.set_url(original_url)
except Exception as e:
logger.error(f"Error pushing branch to remote: {str(e)}",
exc_info=True)
return False, str(e)

View File

@@ -1,135 +0,0 @@
# git/operations/commit.py
import git
import os
import logging
from ..status.status import GitStatusManager
logger = logging.getLogger(__name__)
def parse_git_status(status_output):
"""
Parse git status --porcelain output into a structured format.
Returns dict with staged and unstaged changes, identifying status of each file.
"""
changes = {}
for line in status_output:
if not line:
continue
index_status = line[0] # First character: staged status
worktree_status = line[1] # Second character: unstaged status
file_path = line[3:]
changes[file_path] = {
'staged': index_status != ' ',
'staged_status': index_status,
'unstaged_status': worktree_status
}
return changes
def commit_changes(repo_path, files, message):
"""
Commit changes to git repository, optimizing staging operations.
Only re-stages files if their current staging status is incorrect.
Args:
repo_path: Path to git repository
files: List of files to commit, or None/empty for all staged changes
message: Commit message
Returns:
tuple: (success: bool, message: str)
"""
try:
repo = git.Repo(repo_path)
# If no specific files provided, commit all staged changes
if not files:
commit = repo.index.commit(message)
# Update remote status after commit
status_manager = GitStatusManager.get_instance(repo_path)
if status_manager:
status_manager.update_remote_status()
return True, "Successfully committed all staged changes."
# Get current status of the repository
status_output = repo.git.status('--porcelain').splitlines()
status = parse_git_status(status_output)
# Track files that need staging operations
to_add = []
to_remove = []
already_staged = []
for file_path in files:
if file_path in status:
file_status = status[file_path]
# File is already properly staged
if file_status['staged']:
if file_status['staged_status'] == 'D':
already_staged.append(('deleted', file_path))
else:
already_staged.append(('modified', file_path))
continue
# File needs to be staged
if file_status['unstaged_status'] == 'D':
to_remove.append(file_path)
else:
to_add.append(file_path)
else:
logger.warning(f"File not found in git status: {file_path}")
# Perform necessary staging operations
if to_add:
logger.debug(f"Staging modified files: {to_add}")
repo.index.add(to_add)
if to_remove:
logger.debug(f"Staging deleted files: {to_remove}")
repo.index.remove(to_remove, working_tree=True)
# Commit the changes
commit = repo.index.commit(message)
# Update remote status after commit
status_manager = GitStatusManager.get_instance(repo_path)
if status_manager:
status_manager.update_remote_status()
# Build detailed success message
staged_counts = {
'added/modified': len(to_add),
'deleted': len(to_remove),
'already_staged': len(already_staged)
}
message_parts = []
if staged_counts['added/modified']:
message_parts.append(
f"{staged_counts['added/modified']} files staged")
if staged_counts['deleted']:
message_parts.append(
f"{staged_counts['deleted']} deletions staged")
if staged_counts['already_staged']:
message_parts.append(
f"{staged_counts['already_staged']} files already staged")
if message_parts:
details = " and ".join(message_parts)
return True, f"Successfully committed changes ({details})"
else:
return True, "Successfully committed changes (no files needed staging)"
except git.exc.GitCommandError as e:
logger.error(f"Git command error committing changes: {str(e)}",
exc_info=True)
return False, f"Error committing changes: {str(e)}"
except Exception as e:
logger.error(f"Error committing changes: {str(e)}", exc_info=True)
return False, f"Error committing changes: {str(e)}"

View File

@@ -1,20 +0,0 @@
# git/operations/delete.py
import os
import logging
logger = logging.getLogger(__name__)
def delete_file(repo_path, file_path):
try:
full_file_path = os.path.join(repo_path, file_path)
if os.path.exists(full_file_path):
os.remove(full_file_path)
message = f"File {file_path} has been deleted."
return True, message
else:
return False, "File does not exist."
except Exception as e:
logger.error(f"Error deleting file: {str(e)}", exc_info=True)
return False, f"Error deleting file: {str(e)}"

View File

@@ -1,54 +0,0 @@
import git
from .stage import stage_files
from .commit import commit_changes
from .push import push_changes
from .revert import revert_file, revert_all
from .delete import delete_file
from .pull import pull_branch
from .unstage import unstage_files
from .merge import abort_merge, finalize_merge
from .resolve import resolve_conflicts
import logging
logger = logging.getLogger(__name__)
class GitOperations:
def __init__(self, repo_path):
self.repo_path = repo_path
def stage(self, files):
return stage_files(self.repo_path, files)
def unstage(self, files):
return unstage_files(self.repo_path, files)
def commit(self, files, message):
return commit_changes(self.repo_path, files, message)
def push(self):
return push_changes(self.repo_path)
def revert(self, file_path):
return revert_file(self.repo_path, file_path)
def revert_all(self):
return revert_all(self.repo_path)
def delete(self, file_path):
return delete_file(self.repo_path, file_path)
def pull(self, branch_name):
return pull_branch(self.repo_path, branch_name)
def finalize_merge(self):
repo = git.Repo(self.repo_path)
return finalize_merge(repo)
def abort_merge(self):
return abort_merge(self.repo_path)
def resolve(self, resolutions):
repo = git.Repo(self.repo_path)
return resolve_conflicts(repo, resolutions)

View File

@@ -1,103 +0,0 @@
# git/operations/merge.py
import git
import logging
import os
from typing import Dict, Any
from ..status.status import GitStatusManager
logger = logging.getLogger(__name__)
def finalize_merge(repo) -> Dict[str, Any]:
"""
Finalize a merge by committing all staged files after conflict resolution.
"""
try:
if not os.path.exists(os.path.join(repo.git_dir, 'MERGE_HEAD')):
return {
'success': False,
'error': 'Not currently in a merge state'
}
# Get unmerged files
unmerged_files = []
status = repo.git.status('--porcelain', '-z').split('\0')
for item in status:
if item and len(item) >= 4:
x, y, file_path = item[0], item[1], item[3:]
if 'U' in (x, y):
unmerged_files.append(file_path)
# Force update the index for unmerged files
for file_path in unmerged_files:
# Remove from index first
try:
repo.git.execute(['git', 'reset', '--', file_path])
except git.GitCommandError:
pass
# Add back to index
try:
repo.git.execute(['git', 'add', '--', file_path])
except git.GitCommandError as e:
logger.error(f"Error adding file {file_path}: {str(e)}")
return {
'success': False,
'error': f"Failed to stage resolved file {file_path}"
}
# Create commit message
commit_message = "Merge complete: resolved conflicts"
# Commit
try:
repo.git.commit('-m', commit_message)
logger.info("Successfully finalized merge")
# Update remote status after merge
repo_path = repo.working_dir
status_manager = GitStatusManager.get_instance(repo_path)
if status_manager:
status_manager.update_remote_status()
return {'success': True, 'message': 'Merge completed successfully'}
except git.GitCommandError as e:
logger.error(f"Git command error during commit: {str(e)}")
return {
'success': False,
'error': f"Failed to commit merge: {str(e)}"
}
except Exception as e:
logger.error(f"Failed to finalize merge: {str(e)}")
return {
'success': False,
'error': f"Failed to finalize merge: {str(e)}"
}
def abort_merge(repo_path):
try:
repo = git.Repo(repo_path)
# Try aborting the merge using git merge --abort
try:
repo.git.execute(['git', 'merge', '--abort'])
return True, "Merge aborted successfully"
except git.GitCommandError as e:
logger.warning(
"Error aborting merge with 'git merge --abort'. Trying 'git reset --hard'."
)
# If git merge --abort fails, try resetting to the previous commit using git reset --hard
try:
repo.git.execute(['git', 'reset', '--hard'])
return True, "Merge aborted and repository reset to the previous commit"
except git.GitCommandError as e:
logger.exception(
"Error resetting repository with 'git reset --hard'")
return False, str(e)
except Exception as e:
logger.exception("Unexpected error aborting merge")
return False, str(e)

View File

@@ -1,65 +0,0 @@
# git/operations/pull.py
import git
import logging
from git import GitCommandError
from ..status.status import GitStatusManager
from ...arr.manager import get_pull_configs
from ...importer import handle_pull_import
logger = logging.getLogger(__name__)
def pull_branch(repo_path, branch_name):
try:
repo = git.Repo(repo_path)
# Check for uncommitted changes first
if repo.is_dirty(untracked_files=True):
return False, {
'type': 'uncommitted_changes',
'message':
'Cannot pull: You have uncommitted local changes that would be lost',
'details': 'Please commit or stash your changes before pulling'
}
# Fetch first to get remote changes
repo.remotes.origin.fetch()
try:
# Pull with explicit merge strategy
repo.git.pull('origin', branch_name, '--no-rebase')
# Update remote status
status_manager = GitStatusManager.get_instance(repo_path)
if status_manager:
status_manager.update_remote_status()
# -------------------------------
# *** "On pull" ARR import logic using new importer:
# 1) Query all ARR configs that have sync_method="pull"
# 2) For each, run the importer pull handler
# -------------------------------
pull_configs = get_pull_configs()
logger.info(
f"[Pull] Found {len(pull_configs)} ARR configs to import (sync_method='pull')"
)
for cfg in pull_configs:
handle_pull_import(cfg['id'])
return True, f"Successfully pulled changes for branch {branch_name}"
except GitCommandError as e:
if "CONFLICT" in str(e):
return True, {
'state': 'resolve',
'type': 'merge_conflict',
'message':
'Repository is now in conflict resolution state. Please resolve conflicts to continue merge.',
'details': 'Please resolve conflicts to continue merge'
}
raise e
except Exception as e:
logger.error(f"Error pulling branch: {str(e)}", exc_info=True)
return False, f"Error pulling branch: {str(e)}"

View File

@@ -1,59 +0,0 @@
# git/operations/push.py
import git
import logging
from ..auth.authenticate import GitHubAuth
from ..status.status import GitStatusManager
logger = logging.getLogger(__name__)
def _handle_git_error(error):
"""Helper function to process git errors and return user-friendly messages"""
error_msg = str(error)
if "403" in error_msg:
return "Authentication failed: The provided PAT doesn't have sufficient permissions or is invalid."
elif "401" in error_msg:
return "Authentication failed: No PAT provided or the token is invalid."
elif "non-fast-forward" in error_msg:
return "Push rejected: Remote contains work that you do not have locally. Please pull the latest changes first."
return f"Git error: {error_msg}"
def push_changes(repo_path):
try:
# Verify token before attempting push
if not GitHubAuth.verify_token():
return False, "Push operation requires GitHub authentication. Please configure PAT."
repo = git.Repo(repo_path)
origin = repo.remote(name='origin')
original_url = origin.url
try:
# Set authenticated URL
auth_url = GitHubAuth.get_authenticated_url(original_url)
origin.set_url(auth_url)
# Push changes
push_info = origin.push()
if push_info and push_info[0].flags & push_info[0].ERROR:
raise git.GitCommandError("git push", push_info[0].summary)
# Update remote status after successful push
status_manager = GitStatusManager.get_instance(repo_path)
if status_manager:
status_manager.update_remote_status()
return True, "Successfully pushed changes."
finally:
# Always restore original URL
origin.set_url(original_url)
except git.GitCommandError as e:
logger.error(f"Git command error during push: {str(e)}")
return False, _handle_git_error(e)
except Exception as e:
logger.error(f"Error pushing changes: {str(e)}", exc_info=True)
return False, str(e)

View File

@@ -1,333 +0,0 @@
import yaml
from git import GitCommandError
import logging
from typing import Dict, Any
import os
from copy import deepcopy
from ...data.utils import CATEGORY_MAP
logger = logging.getLogger(__name__)
def determine_type(file_path):
if 'regex_patterns' in file_path:
return 'Regex Pattern'
elif 'custom_formats' in file_path:
return 'Custom Format'
elif 'profiles' in file_path:
return 'Quality Profile'
return 'Unknown'
def get_version_data(repo, ref, file_path):
"""Get YAML data from a specific version of a file."""
try:
content = repo.git.show(f'{ref}:{file_path}')
return yaml.safe_load(content) if content else None
except GitCommandError:
return None
def resolve_conflicts(
repo, resolutions: Dict[str, Dict[str, str]]) -> Dict[str, Any]:
"""
Resolve merge conflicts based on provided resolutions.
"""
logger.debug(f"Received resolutions for files: {list(resolutions.keys())}")
logger.debug(f"Full resolutions data: {resolutions}")
try:
status = repo.git.status('--porcelain', '-z').split('\0')
conflicts = []
for item in status:
if not item or len(item) < 4:
continue
x, y, file_path = item[0], item[1], item[3:]
# Include modify/delete conflicts
if 'U' in (x, y) or (x == 'D' and y == 'D') or (
x == 'D' and y == 'U') or (x == 'U' and y == 'D'):
conflicts.append((file_path, x, y))
# Track which files are modify/delete conflicts
modify_delete_conflicts = {
path: (x == 'D' and y == 'U') or (x == 'U' and y == 'D')
for path, x, y in conflicts
}
# Validate resolutions are for actual conflicting files
for file_path in resolutions:
if file_path not in {path for path, _, _ in conflicts}:
return {
'success': False,
'error': f"File not in conflict: {file_path}"
}
# Store initial states for rollback
initial_states = {}
for file_path in resolutions:
try:
full_path = os.path.join(repo.working_dir, file_path)
try:
with open(full_path, 'r') as f:
initial_states[file_path] = f.read()
except FileNotFoundError:
initial_states[file_path] = None
except Exception as e:
return {
'success': False,
'error': f"Couldn't read file {file_path}: {str(e)}"
}
results = {}
for file_path, field_resolutions in resolutions.items():
# Handle modify/delete conflicts differently
if modify_delete_conflicts[file_path]:
logger.debug(
f"Handling modify/delete conflict for {file_path}")
logger.debug(f"Field resolutions for modify/delete: {field_resolutions}")
# Get the existing version (either from HEAD or MERGE_HEAD)
head_data = get_version_data(repo, 'HEAD', file_path)
merge_head_data = get_version_data(repo, 'MERGE_HEAD',
file_path)
# Determine which version exists
is_deleted_in_head = head_data is None
existing_data = merge_head_data if is_deleted_in_head else head_data
logger.debug(f"Existing version data: {existing_data}")
logger.debug(f"is_deleted_in_head: {is_deleted_in_head}")
logger.debug(f"head_data: {head_data}")
logger.debug(f"merge_head_data: {merge_head_data}")
# Try both lowercase and capitalized versions of 'file'
choice = field_resolutions.get('file') or field_resolutions.get('File')
logger.debug(f"Resolution choice for file: {choice}")
if not choice:
logger.error("No 'file' or 'File' resolution found in field_resolutions!")
logger.error(f"Available keys: {list(field_resolutions.keys())}")
raise Exception(
"No resolution provided for modify/delete conflict")
full_path = os.path.join(repo.working_dir, file_path)
if choice == 'local':
if is_deleted_in_head:
logger.debug(f"Keeping file deleted: {file_path}")
# File should stay deleted
try:
os.remove(full_path)
except FileNotFoundError:
pass # File is already gone
repo.index.remove([file_path])
else:
logger.debug(f"Keeping local version: {file_path}")
# Keep our version
with open(full_path, 'w') as f:
yaml.safe_dump(head_data,
f,
default_flow_style=False)
repo.index.add([file_path])
elif choice == 'incoming':
if is_deleted_in_head:
logger.debug(
f"Restoring from incoming version: {file_path}")
# Restore the file from MERGE_HEAD
with open(full_path, 'w') as f:
yaml.safe_dump(merge_head_data,
f,
default_flow_style=False)
repo.index.add([file_path])
else:
logger.debug(f"Accepting deletion: {file_path}")
# Accept the deletion
try:
os.remove(full_path)
except FileNotFoundError:
pass # File is already gone
repo.index.remove([file_path])
results[file_path] = {
'resolution':
choice,
'action':
'delete' if (choice == 'local' and is_deleted_in_head) or
(choice == 'incoming' and not is_deleted_in_head) else
'keep'
}
else:
# Regular conflict resolution
# Get all three versions
base_data = get_version_data(repo, 'HEAD^', file_path)
ours_data = get_version_data(repo, 'HEAD', file_path)
theirs_data = get_version_data(repo, 'MERGE_HEAD', file_path)
# For files that were previously involved in modify/delete conflicts
# we may not be able to get all versions
if not base_data or not ours_data or not theirs_data:
logger.warning(f"Couldn't get all versions of {file_path} - may have been previously resolved as a modify/delete conflict")
logger.warning(f"base_data: {base_data}, ours_data: {ours_data}, theirs_data: {theirs_data}")
# If it was previously resolved as "incoming" but ours_data is missing, use theirs_data
if not ours_data and theirs_data:
logger.info(f"Using incoming version for {file_path} as base for resolution")
ours_data = theirs_data
# If it was previously resolved as "local" but theirs_data is missing, use ours_data
elif ours_data and not theirs_data:
logger.info(f"Using local version for {file_path} as base for resolution")
theirs_data = ours_data
# If we can't recover either version, we can't proceed
else:
raise Exception(f"Couldn't get required versions of {file_path}")
# Start with a deep copy of ours_data to preserve all fields
resolved_data = deepcopy(ours_data)
# Track changes
kept_values = {}
discarded_values = {}
# Handle each resolution field
for field, choice in field_resolutions.items():
if field.startswith('custom_format_'):
format_name = field[len('custom_format_'):]
ours_cf = next(
(item
for item in ours_data.get('custom_formats', [])
if item['name'] == format_name), None)
theirs_cf = next(
(item
for item in theirs_data.get('custom_formats', [])
if item['name'] == format_name), None)
if choice == 'local' and ours_cf:
resolved_cf = ours_cf
kept_values[field] = ours_cf
discarded_values[field] = theirs_cf
elif choice == 'incoming' and theirs_cf:
resolved_cf = theirs_cf
kept_values[field] = theirs_cf
discarded_values[field] = ours_cf
else:
raise Exception(
f"Invalid choice or missing custom format {format_name}"
)
resolved_cf_list = resolved_data.get(
'custom_formats', [])
for idx, item in enumerate(resolved_cf_list):
if item['name'] == format_name:
resolved_cf_list[idx] = resolved_cf
break
else:
resolved_cf_list.append(resolved_cf)
resolved_data['custom_formats'] = resolved_cf_list
elif field.startswith('tag_'):
tag_name = field[len('tag_'):]
current_tags = set(resolved_data.get('tags', []))
if choice == 'local':
if tag_name in ours_data.get('tags', []):
current_tags.add(tag_name)
kept_values[field] = 'local'
discarded_values[field] = 'incoming'
else:
current_tags.discard(tag_name)
kept_values[field] = 'none'
discarded_values[field] = 'incoming'
elif choice == 'incoming':
if tag_name in theirs_data.get('tags', []):
current_tags.add(tag_name)
kept_values[field] = 'incoming'
discarded_values[field] = 'local'
else:
current_tags.discard(tag_name)
kept_values[field] = 'none'
discarded_values[field] = 'local'
else:
raise Exception(
f"Invalid choice for tag field: {field}")
resolved_data['tags'] = sorted(current_tags)
else:
field_key = field
if choice == 'local':
resolved_data[field_key] = ours_data.get(field_key)
kept_values[field_key] = ours_data.get(field_key)
discarded_values[field_key] = theirs_data.get(
field_key)
elif choice == 'incoming':
resolved_data[field_key] = theirs_data.get(
field_key)
kept_values[field_key] = theirs_data.get(field_key)
discarded_values[field_key] = ours_data.get(
field_key)
else:
raise Exception(
f"Invalid choice for field: {field}")
# Get file type and apply appropriate field ordering
file_type = determine_type(file_path)
if file_type == 'Quality Profile':
_, fields = CATEGORY_MAP['profile']
elif file_type == 'Custom Format':
_, fields = CATEGORY_MAP['custom_format']
elif file_type == 'Regex Pattern':
_, fields = CATEGORY_MAP['regex_pattern']
# Order the fields according to the category's field order
ordered_data = {
field: resolved_data.get(field)
for field in fields if field in resolved_data
}
resolved_data = ordered_data
# Write resolved version
full_path = os.path.join(repo.working_dir, file_path)
with open(full_path, 'w') as f:
yaml.safe_dump(resolved_data, f, default_flow_style=False)
# Stage the resolved file
repo.index.add([file_path])
results[file_path] = {
'kept_values': kept_values,
'discarded_values': discarded_values
}
logger.debug(
f"Successfully resolved regular conflict for {file_path}")
logger.debug("==== Status after resolve_conflicts ====")
status_output = repo.git.status('--porcelain', '-z').split('\0')
for item in status_output:
if item:
logger.debug(f"File status: {item}")
logger.debug("=======================================")
return {'success': True, 'results': results}
except Exception as e:
# Rollback on any error
for file_path, initial_state in initial_states.items():
try:
full_path = os.path.join(repo.working_dir, file_path)
if initial_state is None:
try:
os.remove(full_path)
except FileNotFoundError:
pass
else:
with open(full_path, 'w') as f:
f.write(initial_state)
except Exception as rollback_error:
logger.error(
f"Failed to rollback {file_path}: {str(rollback_error)}")
logger.error(f"Failed to resolve conflicts: {str(e)}")
return {'success': False, 'error': str(e)}

View File

@@ -1,109 +0,0 @@
# git/operations/revert.py
import git
import os
import logging
logger = logging.getLogger(__name__)
def revert_file(repo_path, file_path):
"""
Revert changes in a file, handling tracked files, staged deletions, and new files.
Args:
repo_path: Path to the git repository
file_path: Path to the file to revert
Returns:
tuple: (success: bool, message: str)
"""
try:
repo = git.Repo(repo_path)
file_absolute_path = os.path.join(repo_path, file_path)
# Check if file is untracked (new)
untracked_files = repo.untracked_files
is_untracked = any(f == file_path for f in untracked_files)
if is_untracked:
# For untracked files, we need to remove them
try:
os.remove(file_absolute_path)
message = f"New file {file_path} has been removed."
except FileNotFoundError:
message = f"File {file_path} was already removed."
return True, message
# Check if file is staged for deletion
staged_deletions = repo.index.diff("HEAD", R=True)
is_staged_for_deletion = any(d.a_path == file_path
for d in staged_deletions)
if is_staged_for_deletion:
# Restore file staged for deletion
repo.git.reset("--", file_path)
repo.git.checkout('HEAD', "--", file_path)
message = f"File {file_path} has been restored and unstaged from deletion."
else:
# Regular revert for tracked files with changes
repo.git.restore("--", file_path)
repo.git.restore('--staged', "--", file_path)
message = f"File {file_path} has been reverted."
return True, message
except git.exc.GitCommandError as e:
error_msg = str(e)
if "pathspec" in error_msg and "did not match any file(s) known to git" in error_msg:
logger.error(f"File {file_path} not found in git repository")
return False, f"File {file_path} not found in git repository"
logger.error(f"Git error reverting file: {error_msg}", exc_info=True)
return False, f"Git error reverting file: {error_msg}"
except Exception as e:
logger.error(f"Error reverting file: {str(e)}", exc_info=True)
return False, f"Error reverting file: {str(e)}"
def revert_all(repo_path):
"""
Revert all changes in the repository, including new files.
Args:
repo_path: Path to the git repository
Returns:
tuple: (success: bool, message: str)
"""
try:
repo = git.Repo(repo_path)
# First, clean untracked files
untracked_files = repo.untracked_files
for file_path in untracked_files:
try:
os.remove(os.path.join(repo_path, file_path))
except FileNotFoundError:
continue
except Exception as e:
logger.warning(
f"Could not remove untracked file {file_path}: {str(e)}")
# Then restore tracked files
repo.git.restore('--staged', '.')
repo.git.restore('.')
message = "All changes have been reverted to the last commit"
if untracked_files:
message += f" and {len(untracked_files)} new file(s) have been removed"
message += "."
return True, message
except git.exc.GitCommandError as e:
logger.error(f"Git error reverting all changes: {str(e)}",
exc_info=True)
return False, f"Git error reverting all changes: {str(e)}"
except Exception as e:
logger.error(f"Error reverting all changes: {str(e)}", exc_info=True)
return False, f"Error reverting all changes: {str(e)}"

View File

@@ -1,71 +0,0 @@
# git/operations/stage.py
import git
import os
import logging
logger = logging.getLogger(__name__)
def stage_files(repo_path, files):
"""
Stage files in git repository, properly handling both existing and deleted files.
Args:
repo_path: Path to git repository
files: List of files to stage, or None/empty list to stage all changes
Returns:
tuple: (success: bool, message: str)
"""
try:
repo = git.Repo(repo_path)
# Stage all changes if no specific files provided
if not files:
repo.git.add(A=True)
return True, "All changes have been staged."
# Handle specific files
existing_files = []
deleted_files = []
# Separate existing and deleted files
for file_path in files:
full_path = os.path.join(repo_path, file_path)
if os.path.exists(full_path):
existing_files.append(file_path)
else:
# Check if file is tracked but deleted
try:
repo.git.ls_files(file_path, error_unmatch=True)
deleted_files.append(file_path)
except git.exc.GitCommandError:
logger.warning(f"Untracked file not found: {file_path}")
continue
# Stage existing files
if existing_files:
repo.index.add(existing_files)
# Stage deleted files
if deleted_files:
repo.index.remove(deleted_files, working_tree=True)
message_parts = []
if existing_files:
message_parts.append(
f"{len(existing_files)} existing files staged")
if deleted_files:
message_parts.append(f"{len(deleted_files)} deleted files staged")
message = " and ".join(
message_parts) if message_parts else "No files staged"
return True, message
except git.exc.GitCommandError as e:
logger.error(f"Git command error staging files: {str(e)}",
exc_info=True)
return False, f"Error staging files: {str(e)}"
except Exception as e:
logger.error(f"Error staging files: {str(e)}", exc_info=True)
return False, f"Error staging files: {str(e)}"

View File

@@ -1,52 +0,0 @@
from dataclasses import dataclass
from typing import List, Dict, Optional, Literal
from enum import Enum
class FileType(str, Enum):
REGEX = "regex"
CUSTOM_FORMAT = "custom format"
QUALITY_PROFILE = "quality profile"
class ResolutionChoice(str, Enum):
LOCAL = "local"
INCOMING = "incoming"
@dataclass
class TagConflict:
tag: str
local_status: Literal["Present", "Absent"]
incoming_status: Literal["Present", "Absent"]
resolution: Optional[ResolutionChoice] = None
@dataclass
class FormatConflict:
format_id: str
local_score: Optional[int]
incoming_score: Optional[int]
resolution: Optional[ResolutionChoice] = None
@dataclass
class GeneralConflict:
key: str
local_value: any
incoming_value: any
resolution: Optional[ResolutionChoice] = None
@dataclass
class FileResolution:
file_type: FileType
filename: str
tags: List[TagConflict]
formats: List[FormatConflict]
general: List[GeneralConflict]
@dataclass
class ResolutionRequest:
resolutions: Dict[str, FileResolution]

View File

@@ -1,15 +0,0 @@
# git/operations/unstage.py
import git
import logging
logger = logging.getLogger(__name__)
def unstage_files(repo_path, files):
try:
repo = git.Repo(repo_path)
repo.index.reset(files=files)
return True, "Successfully unstaged files."
except Exception as e:
logger.error(f"Error unstaging files: {str(e)}", exc_info=True)
return False, f"Error unstaging files: {str(e)}"

View File

@@ -1,156 +0,0 @@
# git/clone_repo.py
import os
import shutil
import logging
import yaml
from git.exc import GitCommandError
import git
from ..auth.authenticate import GitHubAuth
logger = logging.getLogger(__name__)
def clone_repository(repo_url, repo_path):
temp_dir = f"{repo_path}_temp"
backup_dir = f"{repo_path}_backup"
logger = logging.getLogger(__name__)
try:
# Initial clone attempt
logger.info(f"Starting clone operation for {repo_url}")
try:
# First try without authentication (for public repos)
repo = git.Repo.clone_from(repo_url, temp_dir)
logger.info("Repository clone successful")
except GitCommandError as e:
error_str = str(e)
# If authentication error, try with token
if "could not read Username" in error_str or "Authentication failed" in error_str:
logger.info("Initial clone failed due to authentication. Trying with token...")
try:
# Verify token availability
if not GitHubAuth.verify_token():
logger.error("Private repository requires GitHub authentication. Please configure PAT.")
return False, "This appears to be a private repository. Please configure PROFILARR_PAT environment variable."
# Get authenticated URL for private repositories
authenticated_url = GitHubAuth.get_authenticated_url(repo_url)
repo = git.Repo.clone_from(authenticated_url, temp_dir)
logger.info("Repository clone with authentication successful")
except GitCommandError as auth_e:
logger.error(f"Clone with authentication failed: {str(auth_e)}")
return False, f"Failed to clone repository: {str(auth_e)}"
# If repository not found, create new one
elif "remote: Repository not found" in error_str:
logger.info("Creating new repository - remote not found")
repo = git.Repo.init(temp_dir)
repo.create_remote('origin', repo_url)
else:
logger.error(f"Clone failed: {error_str}")
return False, f"Failed to clone repository: {error_str}"
# Check if repo is empty
try:
repo.head.reference
except ValueError:
logger.info("Initializing empty repository with default structure")
_initialize_empty_repo(repo)
# Backup handling
if os.path.exists(repo_path):
logger.info("Creating backup of existing repository")
shutil.move(repo_path, backup_dir)
# Move repo to final location
logger.info("Moving repository to final location")
shutil.move(temp_dir, repo_path)
# Process folders
for folder_name in ['regex_patterns', 'custom_formats', 'profiles']:
folder_path = os.path.join(repo_path, folder_name)
backup_folder_path = os.path.join(backup_dir, folder_name)
if not os.path.exists(folder_path):
logger.debug(f"Creating folder: {folder_name}")
os.makedirs(folder_path)
# File merging process
cloned_files = set(
f.replace('.yml', '') for f in os.listdir(folder_path)
if f.endswith('.yml'))
if os.path.exists(backup_folder_path):
local_files = [
f for f in os.listdir(backup_folder_path)
if f.endswith('.yml')
]
if local_files:
logger.info(
f"Merging {len(local_files)} files in {folder_name}")
for file_name in local_files:
old_file_path = os.path.join(backup_folder_path, file_name)
with open(old_file_path, 'r') as file:
data = yaml.safe_load(file)
base_name = data['name']
new_name = base_name
counter = 1
while new_name in cloned_files:
new_name = f"{base_name} ({counter})"
counter += 1
cloned_files.add(new_name)
new_file_path = os.path.join(folder_path,
f"{new_name}.yml")
with open(new_file_path, 'w') as file:
yaml.dump(data, file)
logger.debug(f"Merged file: {file_name}{new_name}.yml")
# Cleanup
if os.path.exists(backup_dir):
logger.info("Removing backup directory")
shutil.rmtree(backup_dir)
logger.info("Clone operation completed successfully")
return True, "Repository cloned and local files merged successfully"
except Exception as e:
logger.exception("Critical error during clone operation")
if os.path.exists(temp_dir):
shutil.rmtree(temp_dir)
if os.path.exists(backup_dir):
shutil.move(backup_dir, repo_path)
return False, f"Critical error: {str(e)}"
def _initialize_empty_repo(repo):
# Create basic folder structure
os.makedirs(os.path.join(repo.working_tree_dir, 'regex_patterns'),
exist_ok=True)
os.makedirs(os.path.join(repo.working_tree_dir, 'custom_formats'),
exist_ok=True)
os.makedirs(os.path.join(repo.working_tree_dir, 'quality_profiles'),
exist_ok=True)
# Create a README file
with open(os.path.join(repo.working_tree_dir, 'README.md'), 'w') as f:
f.write(
"# Profilarr Repository\n\nThis repository contains regex patterns, custom formats and quality profiles."
)
repo.git.add(A=True)
repo.index.commit("Initial commit: Basic repository structure")
repo.create_head('main')
repo.heads.main.checkout()
origin = repo.remote(name='origin')
origin.push('main')
origin.push('main:main')
logger.info(
f"Initialized empty repository with basic structure and pushed to main"
)

View File

@@ -1,74 +0,0 @@
# git/repo/unlink.py
import os
import shutil
import logging
from ...db import save_settings
from ...arr.manager import check_active_sync_configs
logger = logging.getLogger(__name__)
def unlink_repository(repo_path, remove_files=False):
try:
# Check for active sync configurations first
has_active_configs, configs = check_active_sync_configs()
if has_active_configs:
error_msg = (
"Cannot unlink repository while automatic sync configurations are active.\n"
"The following configurations must be set to manual sync first:\n"
)
for config in configs:
error_msg += f"- {config['name']} (ID: {config['id']}, {config['sync_method']} sync)\n"
logger.error(error_msg)
return False, {
"error": error_msg,
"code": "ACTIVE_SYNC_CONFIGS",
"configs": configs
}
logger.info(
f"Starting unlink_repository with repo_path: {repo_path} and remove_files: {remove_files}"
)
# Check if repo_path exists
if not os.path.exists(repo_path):
logger.error(f"Path {repo_path} does not exist.")
return False, f"Path {repo_path} does not exist."
# Remove the .git folder and optionally the repo files
if remove_files:
logger.info(f"Removing all files in the repository at {repo_path}")
for root, dirs, files in os.walk(repo_path):
for file in files:
os.remove(os.path.join(root, file))
for dir in dirs:
shutil.rmtree(os.path.join(root, dir))
logger.info(
f"Successfully removed all files in the repository at {repo_path}"
)
# Recreate necessary folders
required_dirs = ['custom_formats', 'profiles', 'regex_patterns']
for dir_name in required_dirs:
os.makedirs(os.path.join(repo_path, dir_name), exist_ok=True)
logger.info(
f"Recreated the directory {dir_name} at {repo_path}")
else:
git_folder = os.path.join(repo_path, '.git')
if os.path.exists(git_folder):
logger.info(f"Removing .git folder at {git_folder}")
shutil.rmtree(git_folder)
logger.info(
f"Successfully removed .git folder at {git_folder}")
else:
logger.warning(f".git folder does not exist at {git_folder}")
# Clear git settings
save_settings({'gitRepo': None})
logger.info("Updated settings to remove git information")
return True, "Repository successfully unlinked"
except Exception as e:
logger.error(f"Error unlinking repository: {str(e)}", exc_info=True)
return False, f"Error unlinking repository: {str(e)}"

View File

@@ -1,159 +0,0 @@
# status/commit_history.py
import git
from datetime import datetime
import logging
logger = logging.getLogger(__name__)
def format_commit(commit, repo, tracking_branch=None):
"""Helper function to format a single commit's information"""
# Check if it's a merge commit
is_merge = len(commit.parents) > 1
# Get the remote URL for the commit if possible
remote_url = None
if tracking_branch:
remote_url = repo.remote().url
if remote_url.endswith('.git'):
remote_url = remote_url[:-4]
remote_url += f"/commit/{commit.hexsha}"
commit_info = {
'hash': commit.hexsha,
'message': commit.message.strip(),
'author': f"{commit.author.name} <{commit.author.email}>",
'date': commit.committed_datetime.isoformat(),
'isMerge': is_merge,
'remoteUrl': remote_url,
'details': {
'files_changed': [],
'insertions': 0,
'deletions': 0
}
}
# Get detailed stats
try:
if len(commit.parents) > 0:
# Get the diff between this commit and its first parent
diff = commit.parents[0].diff(commit)
# Initialize stats
stats = {'files_changed': [], 'insertions': 0, 'deletions': 0}
# Get the total diff stats using git diff --numstat
raw_stats = repo.git.diff(commit.parents[0].hexsha,
commit.hexsha,
numstat=True).splitlines()
for line in raw_stats:
if not line.strip():
continue
adds, dels, file_path = line.split('\t')
# Handle binary files which show up as '-' in numstat
if adds != '-' and dels != '-':
stats['insertions'] += int(adds)
stats['deletions'] += int(dels)
stats['files_changed'].append(file_path)
commit_info['details'] = stats
except Exception as e:
logger.debug(f"Error getting commit details: {e}")
return commit_info
def get_git_commit_history(repo_path, branch=None):
"""
Get both local and remote commit history for the repository.
Args:
repo_path (str): Path to the git repository
branch (str, optional): Branch name to get history for. Defaults to current branch.
Returns:
tuple: (success: bool, result: dict/str)
On success, returns (True, {
'local_commits': [...],
'remote_commits': [...],
'ahead_count': int,
'behind_count': int,
'branch': str,
'has_remote': bool
})
On failure, returns (False, error_message)
"""
try:
repo = git.Repo(repo_path)
current_branch = repo.active_branch
branch_to_check = branch if branch else current_branch.name
# Get the tracking branch
tracking_branch = None
try:
tracking_branch = repo.active_branch.tracking_branch()
except Exception as e:
logger.debug(f"No tracking branch found: {e}")
local_commits = []
remote_commits = []
ahead_count = 0
behind_count = 0
if tracking_branch:
try:
# Find the merge base (common ancestor)
merge_base = repo.merge_base(tracking_branch,
current_branch)[0]
# Get commits that are in local but not in remote (ahead)
local_commits = [
format_commit(commit, repo, tracking_branch)
for commit in repo.iter_commits(
f"{tracking_branch.name}..{current_branch.name}")
]
ahead_count = len(local_commits)
# Get commits that are in remote but not in local (behind)
remote_commits = [
format_commit(commit, repo, tracking_branch)
for commit in repo.iter_commits(
f"{current_branch.name}..{tracking_branch.name}")
]
behind_count = len(remote_commits)
# If no divergence, get recent commits from current branch
if not local_commits and not remote_commits:
local_commits = [
format_commit(commit, repo, tracking_branch)
for commit in repo.iter_commits(current_branch.name,
max_count=50)
]
except git.GitCommandError as e:
logger.error(f"Git command error while getting commits: {e}")
return False, f"Error getting commits: {str(e)}"
else:
# If no tracking branch, just get recent local commits
local_commits = [
format_commit(commit, repo)
for commit in repo.iter_commits(current_branch.name,
max_count=50)
]
return True, {
'local_commits': local_commits,
'remote_commits': remote_commits,
'ahead_count': ahead_count,
'behind_count': behind_count,
'branch': branch_to_check,
'has_remote': tracking_branch is not None
}
except Exception as e:
logger.exception("Error getting commit history")
return False, f"Unexpected error getting commit history: {str(e)}"

View File

@@ -1,232 +0,0 @@
import logging
import os
from typing import Any, Dict, List, Optional
logger = logging.getLogger(__name__)
def compare_yaml(old_data: Any,
new_data: Any,
path: str = "") -> List[Dict[str, Any]]:
"""
Recursively compare two YAML structures and generate a list of changes.
Handles nested structures including:
- Simple values (strings, numbers, booleans)
- Lists of primitives (like tags: ['1080p', 'x264'])
- Lists of objects (like custom_formats: [{name: 'DON', score: 80}])
- Nested objects (like qualities: {id: 1, name: 'HD', qualities: [...]})
Args:
old_data: Original data structure
new_data: New data structure to compare against
path: Current path in the data structure (for tracking nested changes)
Returns:
List of changes, where each change is a dict containing:
{
key: Path to the changed field (e.g. "custom_formats[DON].score")
change: 'added' | 'removed' | 'modified'
from: Original value (for modified/removed)
to: New value (for modified/added)
value: List of values (for array additions/removals)
}
"""
logger.debug(f"Comparing path: {path or 'root'}")
changes = []
if old_data is None and new_data is None:
return changes
if old_data is None and new_data is not None:
if isinstance(new_data, dict):
old_data = {}
elif isinstance(new_data, list):
old_data = []
else:
old_data = None
if old_data is not None and new_data is None:
logger.debug(f"Path {path} removed")
return [{"key": path, "change": "removed", "from": old_data}]
if type(old_data) != type(new_data):
logger.debug(
f"Type mismatch at {path}: {type(old_data)}{type(new_data)}")
return [{
"key": path,
"change": "modified",
"from": old_data,
"to": new_data
}]
if isinstance(old_data, list):
has_objects = any(
isinstance(x, dict) for x in old_data + new_data if x is not None)
if has_objects:
try:
old_dict = {x.get("name"): x for x in old_data if x}
new_dict = {x.get("name"): x for x in new_data if x}
added = set(new_dict) - set(old_dict)
removed = set(old_dict) - set(new_dict)
common = set(old_dict) & set(new_dict)
if added:
logger.debug(f"Added items at {path}: {added}")
if removed:
logger.debug(f"Removed items at {path}: {removed}")
for key in added:
changes.append({
"key": f"{path}[{key}]",
"change": "added",
"to": new_dict[key]
})
for key in removed:
changes.append({
"key": f"{path}[{key}]",
"change": "removed",
"from": old_dict[key]
})
for key in common:
if old_dict[key] != new_dict[key]:
logger.debug(
f"Found changes in common item {key} at {path}")
changes.extend(
compare_yaml(old_dict[key], new_dict[key],
f"{path}[{key}]"))
except Exception as e:
logger.warning(
f"Failed to compare by name at {path}, falling back to index comparison: {str(e)}"
)
for i, (old_item,
new_item) in enumerate(zip(old_data, new_data)):
if old_item != new_item:
changes.extend(
compare_yaml(old_item, new_item, f"{path}[{i}]"))
else:
old_set = set(old_data)
new_set = set(new_data)
if added := new_set - old_set:
logger.debug(f"Added values at {path}: {added}")
changes.append({
"key": path,
"change": "added",
"value": sorted([x for x in added if x is not None])
})
if removed := old_set - new_set:
logger.debug(f"Removed values at {path}: {removed}")
changes.append({
"key": path,
"change": "removed",
"value": sorted([x for x in removed if x is not None])
})
elif isinstance(old_data, dict):
all_keys = set(old_data) | set(new_data)
for key in all_keys:
new_path = f"{path}.{key}" if path else key
if key not in old_data:
logger.debug(f"Added key at {new_path}")
changes.append({
"key": new_path,
"change": "added",
"to": new_data[key]
})
elif key not in new_data:
logger.debug(f"Removed key at {new_path}")
changes.append({
"key": new_path,
"change": "removed",
"from": old_data[key]
})
else:
changes.extend(
compare_yaml(old_data[key], new_data[key], new_path))
else:
if old_data != new_data:
logger.debug(f"Modified value at {path}: {old_data}{new_data}")
changes.append({
"key": path,
"change": "modified",
"from": old_data,
"to": new_data
})
for c in changes:
if c["change"] == "added" and "from" not in c:
c["from"] = "~"
return changes
def normalize_yaml_keys(data):
"""Convert boolean keys to strings in YAML data to avoid JSON serialization issues"""
if isinstance(data, dict):
return {str(k): normalize_yaml_keys(v) for k, v in data.items()}
elif isinstance(data, list):
return [normalize_yaml_keys(item) for item in data]
else:
return data
def create_change_summary(old_data: Optional[Dict], new_data: Optional[Dict],
file_path: str) -> Dict[str, Any]:
"""
Create a summary of changes between two YAML structures with file metadata.
This wrapper adds git-specific fields like name, status, and file path.
Args:
old_data: Original YAML data (from git HEAD)
new_data: New YAML data (from working directory)
file_path: Path to the file being compared
Returns:
Dict containing:
- name: Current name (from new_data or filename)
- prior_name: Previous name (from old_data)
- outgoing_name: New name if changed, else None
- status: 'New' | 'Modified' | 'Deleted'
- file_path: Path to the file
- modified: True if file was modified/added
- deleted: True if file was deleted
- changes: Detailed changes from compare_yaml
"""
try:
# Normalize keys to avoid JSON serialization issues with boolean keys
old_data = normalize_yaml_keys(old_data) if old_data else None
new_data = normalize_yaml_keys(new_data) if new_data else None
filename = os.path.basename(file_path)
new_name = new_data.get("name") if new_data else None
old_name = old_data.get("name") if old_data else None
current_name = new_name or filename
if old_data is None and new_data is not None:
status = "New"
logger.info(f"New file detected: {file_path}")
elif old_data is not None and new_data is None:
status = "Deleted"
logger.info(f"Deleted file detected: {file_path}")
else:
status = "Modified"
logger.info(f"Modified file detected: {file_path}")
detailed_changes = compare_yaml(old_data, new_data)
if detailed_changes:
logger.info(
f"Found {len(detailed_changes)} changes in {file_path}")
logger.debug(f"Detailed changes: {detailed_changes}")
return {
"name": current_name,
"prior_name": old_name,
"outgoing_name": new_name if new_name != old_name else None,
"status": status,
"file_path": file_path,
"modified": status != "Deleted",
"deleted": status == "Deleted",
"changes": detailed_changes
}
except Exception as e:
logger.error(
f"Error creating change summary for {file_path}: {str(e)}",
exc_info=True)
raise

View File

@@ -1,283 +0,0 @@
import os
import yaml
import logging
from typing import Any, Dict, List, Optional, Union
logger = logging.getLogger(__name__)
# Define conflict states
UNRESOLVED = "UNRESOLVED"
RESOLVED = "RESOLVED"
MODIFY_DELETE = "MODIFY_DELETE"
def compare_conflict_yaml(ours_data: Any,
theirs_data: Any,
path: str = "") -> List[Dict[str, Any]]:
"""
Compare two YAML structures and generate conflict information.
Handles nested structures and produces conflict records in the format:
{
'parameter': 'Field Name',
'local_value': value_from_ours,
'incoming_value': value_from_theirs
}
"""
conflicts = []
# Handle None/deletion cases
if ours_data is None and theirs_data is None:
return conflicts
if ours_data is None:
# Local version deleted
param_name = path or 'File'
return [{
'parameter': param_name,
'local_value': '🗑️ File deleted in local version',
'incoming_value': '📄 File exists in incoming version'
}]
if theirs_data is None:
# Incoming version deleted
param_name = path or 'File'
return [{
'parameter': param_name,
'local_value': '📄 File exists in local version',
'incoming_value': '🗑️ File deleted in incoming version'
}]
# Handle different types as conflicts
if type(ours_data) != type(theirs_data):
return [{
'parameter': path,
'local_value': ours_data,
'incoming_value': theirs_data
}]
# Handle lists
if isinstance(ours_data, list):
# Check if list contains objects
has_objects = any(
isinstance(x, dict) for x in ours_data + theirs_data
if x is not None)
if has_objects:
return compare_object_arrays(ours_data, theirs_data, path)
else:
return compare_primitive_arrays(ours_data, theirs_data, path)
# Handle dictionaries
elif isinstance(ours_data, dict):
return compare_dicts(ours_data, theirs_data, path)
# Handle primitive values
elif ours_data != theirs_data:
return [{
'parameter': path,
'local_value': ours_data,
'incoming_value': theirs_data
}]
return conflicts
def compare_object_arrays(ours_data: List[Dict], theirs_data: List[Dict],
path: str) -> List[Dict]:
"""Compare arrays of objects using name field as identifier"""
conflicts = []
try:
# Build lookup dictionaries
ours_dict = {x.get('name'): x for x in ours_data if x}
theirs_dict = {x.get('name'): x for x in theirs_data if x}
# Find additions/removals
ours_keys = set(ours_dict.keys())
theirs_keys = set(theirs_dict.keys())
# Handle added items
for key in (theirs_keys - ours_keys):
conflicts.append({
'parameter': f"{path}[{key}]" if path else key,
'local_value': None,
'incoming_value': theirs_dict[key]
})
# Handle removed items
for key in (ours_keys - theirs_keys):
conflicts.append({
'parameter': f"{path}[{key}]" if path else key,
'local_value': ours_dict[key],
'incoming_value': None
})
# Compare common items
for key in (ours_keys & theirs_keys):
if ours_dict[key] != theirs_dict[key]:
new_path = f"{path}[{key}]" if path else key
conflicts.extend(
compare_conflict_yaml(ours_dict[key], theirs_dict[key],
new_path))
except Exception as e:
logger.warning(
f"Failed to compare objects by name at {path}, using positional comparison: {str(e)}"
)
# Fallback to positional comparison
for i, (ours_item,
theirs_item) in enumerate(zip(ours_data, theirs_data)):
if ours_item != theirs_item:
new_path = f"{path}[{i}]" if path else str(i)
conflicts.extend(
compare_conflict_yaml(ours_item, theirs_item, new_path))
return conflicts
def compare_primitive_arrays(ours_data: List, theirs_data: List,
path: str) -> List[Dict]:
"""Compare arrays of primitive values"""
conflicts = []
ours_set = set(ours_data)
theirs_set = set(theirs_data)
# Handle additions
added = theirs_set - ours_set
if added:
conflicts.append({
'parameter': path or 'Array',
'local_value': sorted(list(ours_set)),
'incoming_value': sorted(list(theirs_set))
})
return conflicts
def format_array_for_display(data):
"""Format array data for display in conflict resolution"""
if isinstance(data, list):
if not data:
return "[] (empty array)"
elif all(isinstance(x, dict) and 'name' in x for x in data):
# Array of objects with names - show the names
names = [x['name'] for x in data]
if len(names) <= 5:
return f"[{', '.join(names)}]"
else:
return f"[{', '.join(names[:5])}, ... and {len(names) - 5} more]"
elif all(not isinstance(x, (dict, list)) for x in data):
# Array of primitives
if len(data) <= 5:
return f"[{', '.join(str(x) for x in data)}]"
else:
return f"[{', '.join(str(x) for x in data[:5])}, ... and {len(data) - 5} more]"
else:
# Mixed or complex array
return f"Array with {len(data)} items"
return data
def compare_dicts(ours_data: Dict, theirs_data: Dict, path: str) -> List[Dict]:
"""Compare dictionaries recursively"""
conflicts = []
# Get all keys from both dictionaries
all_keys = set(ours_data.keys()) | set(theirs_data.keys())
for key in all_keys:
new_path = f"{path}.{key}" if path else key
if key not in ours_data:
# Format arrays for better display when field is missing locally
incoming_val = theirs_data[key]
if isinstance(incoming_val, list):
incoming_val = format_array_for_display(incoming_val)
conflicts.append({
'parameter': new_path,
'local_value': None,
'incoming_value': incoming_val
})
elif key not in theirs_data:
# Format arrays for better display when field is missing remotely
local_val = ours_data[key]
if isinstance(local_val, list):
local_val = format_array_for_display(local_val)
conflicts.append({
'parameter': new_path,
'local_value': local_val,
'incoming_value': None
})
elif ours_data[key] != theirs_data[key]:
conflicts.extend(
compare_conflict_yaml(ours_data[key], theirs_data[key],
new_path))
return conflicts
def create_conflict_summary(file_path: str,
ours_data: Optional[Dict],
theirs_data: Optional[Dict],
status: str = UNRESOLVED) -> Dict[str, Any]:
"""
Create a summary of conflicts between two versions of a file.
Args:
file_path: Path to the file in conflict
ours_data: Our version of the YAML data
theirs_data: Their version of the YAML data
status: Conflict status (UNRESOLVED, RESOLVED, or MODIFY_DELETE)
Returns:
Dict containing:
- file_path: Path to the conflicted file
- type: Type of item
- name: Name from our version or filename
- incoming_name: Name from their version (if available)
- status: Current conflict status
- conflict_details: List of specific conflicts
"""
try:
from .utils import determine_type # Import here to avoid circular imports
# Generate conflict details
conflict_details = {
'conflicting_parameters':
compare_conflict_yaml(ours_data, theirs_data)
}
# Get local name
local_name = None
if ours_data and isinstance(ours_data, dict) and 'name' in ours_data:
local_name = ours_data.get('name')
if not local_name:
# Strip the extension to get a cleaner name
basename = os.path.basename(file_path)
local_name = os.path.splitext(basename)[0]
# Get incoming name
incoming_name = None
if theirs_data and isinstance(theirs_data, dict) and 'name' in theirs_data:
incoming_name = theirs_data.get('name')
if not incoming_name:
# Strip the extension to get a cleaner name
basename = os.path.basename(file_path)
incoming_name = os.path.splitext(basename)[0]
result = {
'file_path': file_path,
'type': determine_type(file_path),
'name': local_name,
'incoming_name': incoming_name,
'status': status,
'conflict_details': conflict_details
}
return result
except Exception as e:
logger.error(
f"Failed to create conflict summary for {file_path}: {str(e)}")
return None

View File

@@ -1,229 +0,0 @@
import os
import yaml
import logging
from git import GitCommandError
from .comparison import create_change_summary
from .utils import determine_type, parse_commit_message, extract_name_from_path
logger = logging.getLogger(__name__)
# Use the centralized extract_name_from_path function from utils
extract_name = extract_name_from_path
def check_merge_conflict(repo, branch, file_path):
"""Check if pulling a file would cause merge conflicts"""
try:
# Check for local changes (uncommitted or unpushed)
status = repo.git.status('--porcelain', file_path).strip()
if status:
status_code = status[:2] if len(status) >= 2 else ''
has_changes = 'M' in status_code or 'A' in status_code or 'D' in status_code or 'R' in status_code
else:
# Check for unpushed commits
merge_base = repo.git.merge_base('HEAD',
f'origin/{branch}').strip()
committed_changes = repo.git.log(f'{merge_base}..HEAD',
'--',
file_path,
ignore_missing=True).strip()
has_changes = bool(committed_changes)
if has_changes:
# Test if merge would cause conflicts
try:
merge_test = repo.git.merge_tree('--write-tree', 'HEAD',
f'origin/{branch}')
return any(
line.startswith('<<<<<<< ')
for line in merge_test.splitlines() if file_path in line)
except GitCommandError:
return True # Assume conflict if merge test fails
return False
except Exception as e:
logger.error(f"Failed to check conflicts for {file_path}: {str(e)}")
return False
def get_commit_message(repo, branch, file_path):
"""Get commit message for incoming changes to a file"""
try:
raw_message = repo.git.show(f'HEAD...origin/{branch}', '--format=%B',
'-s', '--', file_path).strip()
return parse_commit_message(raw_message)
except GitCommandError as e:
logger.error(
f"Git command error getting commit message for {file_path}: {str(e)}"
)
return {
"body": "",
"footer": "",
"scope": "",
"subject": f"Error retrieving commit message: {str(e)}",
"type": ""
}
def parse_commit_message(message):
"""Parse a commit message into its components"""
try:
# Default structure
parsed = {
"type": "Unknown Type",
"scope": "Unknown Scope",
"subject": "",
"body": "",
"footer": ""
}
if not message:
return parsed
# Split message into lines
lines = message.strip().split('\n')
# Parse first line (header)
if lines:
header = lines[0]
# Try to parse conventional commit format: type(scope): subject
import re
conventional_format = re.match(r'^(\w+)(?:\(([^)]+)\))?: (.+)$',
header)
if conventional_format:
groups = conventional_format.groups()
parsed.update({
"type": groups[0] or "Unknown Type",
"scope": groups[1] or "Unknown Scope",
"subject": groups[2]
})
else:
parsed["subject"] = header
# Parse body and footer
if len(lines) > 1:
# Find the divider between body and footer (if any)
footer_start = -1
for i, line in enumerate(lines[1:], 1):
if re.match(r'^[A-Z_-]+:', line):
footer_start = i
break
# Extract body and footer
if footer_start != -1:
parsed["body"] = '\n'.join(lines[1:footer_start]).strip()
parsed["footer"] = '\n'.join(lines[footer_start:]).strip()
else:
parsed["body"] = '\n'.join(lines[1:]).strip()
return parsed
except Exception as e:
logger.error(f"Error parsing commit message: {str(e)}")
return {
"type": "Unknown Type",
"scope": "Unknown Scope",
"subject": "Error parsing commit message",
"body": "",
"footer": ""
}
def get_incoming_changes(repo, branch):
"""Get list of changes that would come in from origin"""
try:
# Get status including renames
diff_output = repo.git.diff(f'HEAD...origin/{branch}', '--name-status',
'-M').split('\n')
changed_files = []
rename_mapping = {}
# Process status to identify renames
for line in diff_output:
if not line:
continue
parts = line.split('\t')
if len(parts) < 2:
continue
status = parts[0]
if status.startswith('R'):
old_path, new_path = parts[1], parts[2]
rename_mapping[new_path] = old_path
changed_files.append(new_path)
else:
changed_files.append(parts[1])
logger.info(f"Processing {len(changed_files)} incoming changes")
incoming_changes = []
for file_path in changed_files:
try:
# Handle renamed files
old_path = rename_mapping.get(file_path, file_path)
is_rename = file_path in rename_mapping
# Get local and remote versions
try:
local_content = repo.git.show(f'HEAD:{old_path}')
local_data = yaml.safe_load(local_content)
except (GitCommandError, yaml.YAMLError):
local_data = None
try:
remote_content = repo.git.show(
f'origin/{branch}:{file_path}')
remote_data = yaml.safe_load(remote_content)
except (GitCommandError, yaml.YAMLError):
remote_data = None
# Skip if no actual changes
if local_data == remote_data and not is_rename:
continue
# Check for conflicts and get commit info
will_conflict = check_merge_conflict(repo, branch, file_path)
commit_message = get_commit_message(repo, branch, file_path)
# Generate change summary
change = create_change_summary(local_data, remote_data,
file_path)
# Add incoming-specific fields
change.update({
'commit_message':
commit_message,
'type':
determine_type(file_path),
'will_conflict':
will_conflict,
'id':
remote_data.get('id') if remote_data else None,
'local_name':
extract_name(old_path)
if is_rename else extract_name(file_path),
'incoming_name':
extract_name(file_path),
'staged':
False
})
if is_rename:
change['status'] = 'Renamed'
incoming_changes.append(change)
except Exception as e:
logger.error(
f"Failed to process incoming change for {file_path}: {str(e)}"
)
continue
return incoming_changes
except Exception as e:
logger.error(f"Failed to get incoming changes: {str(e)}")
return []

View File

@@ -1,141 +0,0 @@
import os
import yaml
import logging
from git import GitCommandError
from .conflict_comparison import create_conflict_summary, UNRESOLVED, RESOLVED, MODIFY_DELETE
logger = logging.getLogger(__name__)
def get_version_data(repo, ref, file_path):
"""Get YAML data from a specific version of a file"""
try:
content = repo.git.show(f'{ref}:{file_path}')
return yaml.safe_load(content) if content else None
except GitCommandError:
return None
def process_modify_delete_conflict(repo, file_path, deleted_in_head):
"""Handle case where one side modified while other deleted"""
try:
# Check if conflict is resolved
status_output = repo.git.status('--porcelain', file_path)
file_exists = os.path.exists(os.path.join(repo.working_dir, file_path))
is_staged = status_output and status_output[0] in ['M', 'A']
# Determine status
if (file_exists and is_staged) or (not file_exists
and status_output.startswith('D ')):
status = RESOLVED
else:
status = MODIFY_DELETE
# For delete conflicts, we need to extract the name for display purposes
# This will be the name of the actual file before it was deleted
basename = os.path.basename(file_path)
filename = os.path.splitext(basename)[0] # Strip extension
# Get metadata from existing version to extract name if possible
if file_exists:
# File exists locally, read it
try:
with open(os.path.join(repo.working_dir, file_path), 'r') as f:
existing_data = yaml.safe_load(f.read())
except Exception as read_error:
logger.warning(f"Could not read existing file {file_path}: {str(read_error)}")
existing_data = {'name': filename}
else:
# File was deleted locally, try to get from merge head
try:
existing_data = get_version_data(repo, 'MERGE_HEAD', file_path)
except Exception as merge_error:
logger.warning(f"Could not get merge head for {file_path}: {str(merge_error)}")
existing_data = {'name': filename}
# Simplified placeholder data for deleted version
if deleted_in_head:
# File was deleted in HEAD (local) but exists in MERGE_HEAD (incoming)
local_data = None # This indicates deleted
try:
# Try to get name from incoming
incoming_data = existing_data if existing_data else {'name': filename}
except Exception:
incoming_data = {'name': filename}
else:
# File exists in HEAD (local) but deleted in MERGE_HEAD (incoming)
try:
local_data = existing_data if existing_data else {'name': filename}
except Exception:
local_data = {'name': filename}
incoming_data = None # This indicates deleted
return create_conflict_summary(file_path, local_data, incoming_data, status)
except Exception as e:
logger.error(
f"Failed to process modify/delete conflict for {file_path}: {str(e)}"
)
return None
def process_regular_conflict(repo, file_path):
"""Handle standard merge conflict between two versions"""
try:
# Get both versions
ours_data = get_version_data(repo, 'HEAD', file_path)
theirs_data = get_version_data(repo, 'MERGE_HEAD', file_path)
if not ours_data and not theirs_data:
return None
# Check if conflict is resolved
status_output = repo.git.status('--porcelain', file_path)
status = UNRESOLVED if status_output.startswith('UU') else RESOLVED
return create_conflict_summary(file_path, ours_data, theirs_data,
status)
except Exception as e:
logger.error(f"Failed to process conflict for {file_path}: {str(e)}")
return None
def get_merge_conflicts(repo):
"""Get all merge conflicts in the repository"""
try:
# Check if we're in a merge state
if not os.path.exists(os.path.join(repo.git_dir, 'MERGE_HEAD')):
return []
conflicts = []
status = repo.git.status('--porcelain', '-z').split('\0')
# Process each status entry
for item in status:
if not item or len(item) < 4:
continue
x, y = item[0], item[1]
file_path = item[3:]
# Handle modify/delete conflicts
if (x == 'D' and y == 'U') or (x == 'U'
and y == 'D') or (x == 'A'
and y == 'U'):
conflict = process_modify_delete_conflict(
repo, file_path, x == 'D')
if conflict:
conflicts.append(conflict)
# Handle regular conflicts
elif 'U' in (x, y) or (x == 'D' and y == 'D'):
conflict = process_regular_conflict(repo, file_path)
if conflict:
conflicts.append(conflict)
return conflicts
except Exception as e:
logger.error(f"Failed to get merge conflicts: {str(e)}")
return []

View File

@@ -1,110 +0,0 @@
import os
import yaml
import logging
from git import GitCommandError
from .comparison import create_change_summary
from .utils import determine_type, extract_name_from_path
logger = logging.getLogger(__name__)
# Use the centralized extract_name_from_path function from utils
extract_name = extract_name_from_path
def get_outgoing_changes(repo):
"""Get list of changes in working directory"""
try:
status = repo.git.status('--porcelain', '-z').split('\0')
logger.info(f"Processing {len(status)} changes from git status")
changes = []
i = 0
while i < len(status):
item = status[i]
if not item:
i += 1
continue
if len(item) < 4:
logger.warning(f"Invalid status item format: {item}")
i += 1
continue
x, y = item[0], item[1]
file_path = item[3:]
# Skip files in conflict state
if x == 'U' or y == 'U':
i += 1
continue
# Handle renamed files
if x == 'R' or y == 'R':
if i + 1 < len(status) and status[i + 1]:
outgoing_name = extract_name(file_path)
prior_name = extract_name(status[i + 1])
original_path = status[i + 1] # Path for old content
new_path = file_path # Path for new content
is_staged = x == 'R'
status_value = 'Renamed'
i += 2
else:
i += 1
else:
name = extract_name(file_path)
prior_name = name
outgoing_name = name
original_path = file_path
new_path = file_path
is_staged = x != ' ' and x != '?'
status_value = None
i += 1
try:
# Get old content (from HEAD)
try:
old_content = repo.git.show(f'HEAD:{original_path}')
old_data = yaml.safe_load(old_content)
except GitCommandError:
old_data = None
except yaml.YAMLError as e:
logger.warning(
f"Failed to parse old YAML for {original_path}: {str(e)}"
)
old_data = None
# Get new content (from working directory)
try:
full_path = os.path.join(repo.working_dir, new_path)
with open(full_path, 'r') as f:
new_data = yaml.safe_load(f.read())
except (IOError, yaml.YAMLError) as e:
logger.warning(
f"Failed to read/parse current file {new_path}: {str(e)}"
)
new_data = None
# Generate change summary
change = create_change_summary(old_data, new_data, new_path)
change['type'] = determine_type(new_path)
change['staged'] = is_staged
change['prior_name'] = prior_name
change['outgoing_name'] = outgoing_name
if status_value:
change['status'] = status_value
changes.append(change)
except Exception as e:
logger.error(f"Failed to process {file_path}: {str(e)}",
exc_info=True)
return changes
except Exception as e:
logger.error(f"Failed to get outgoing changes: {str(e)}",
exc_info=True)
return []

View File

@@ -1,302 +0,0 @@
# git/status/status.py
import git
from git.exc import GitCommandError, InvalidGitRepositoryError
import logging
from .incoming_changes import get_incoming_changes
from .outgoing_changes import get_outgoing_changes
from .merge_conflicts import get_merge_conflicts
from .utils import determine_type
import os
import yaml
import threading
from datetime import datetime
import json
from ...db import get_settings
logger = logging.getLogger(__name__)
class GitStatusManager:
_instance = None
_lock = threading.Lock()
def __init__(self, repo_path):
self.repo_path = repo_path
self.repo = git.Repo(repo_path)
self.status = {
# Local status
"branch": "",
"outgoing_changes": [],
"is_merging": False,
"merge_conflicts": [],
"has_conflicts": False,
# Remote status
"remote_branch_exists": False,
"commits_behind": 0,
"commits_ahead": 0,
"incoming_changes": [],
"has_unpushed_commits": False,
"unpushed_files": [],
# Metadata
"last_local_update": None,
"last_remote_update": None
}
@classmethod
def get_instance(cls, repo_path=None):
if not cls._instance and repo_path:
with cls._lock:
if not cls._instance:
cls._instance = cls(repo_path)
return cls._instance
def update_local_status(self):
"""Update only local repository status"""
try:
self.repo = git.Repo(self.repo_path) # Refresh repo instance
with self._lock:
# Update branch
self.status["branch"] = self.repo.active_branch.name
# Check merge status
self.status["is_merging"] = os.path.exists(
os.path.join(self.repo.git_dir, 'MERGE_HEAD'))
# Get local changes
self.status["outgoing_changes"] = get_outgoing_changes(
self.repo)
# Get merge conflicts if merging
self.status["merge_conflicts"] = (get_merge_conflicts(
self.repo) if self.status["is_merging"] else [])
self.status["has_conflicts"] = bool(
self.status["merge_conflicts"])
# Update timestamp
self.status["last_local_update"] = datetime.now().isoformat()
return True
except Exception as e:
logger.error(f"Error updating local status: {str(e)}")
return False
def update_remote_status(self):
"""Update remote repository status - called by scheduled task"""
try:
logger.info(
f"Updating remote status for branch: {self.status['branch']}")
# Do the fetch outside the lock
self.repo.remotes.origin.fetch()
# Get branch name safely
with self._lock:
branch = self.status["branch"]
# Do git operations outside lock
remote_refs = [ref.name for ref in self.repo.remotes.origin.refs]
remote_branch_exists = f"origin/{branch}" in remote_refs
if remote_branch_exists:
commits_behind = list(
self.repo.iter_commits(f'{branch}..origin/{branch}'))
commits_ahead = list(
self.repo.iter_commits(f'origin/{branch}..{branch}'))
# Handle auto-pull before updating status
if len(commits_behind) > 0:
logger.info(
f"Branch is {len(commits_behind)} commits behind")
try:
settings = get_settings()
if int(settings.get('auto_pull_enabled', 0)):
logger.info("Auto-pull enabled, pulling changes")
from ..operations.manager import GitOperations
git_ops = GitOperations(self.repo_path)
pull_result = git_ops.pull(branch)
logger.info(f"Auto-pull result: {pull_result}")
success, message = pull_result
if not success:
logger.error(f"Auto-pull failed: {message}")
# Refresh counts after pull
commits_behind = list(
self.repo.iter_commits(
f'{branch}..origin/{branch}'))
commits_ahead = list(
self.repo.iter_commits(
f'origin/{branch}..{branch}'))
except Exception as e:
logger.error(f"Error during auto-pull: {str(e)}")
# Prepare the status update
incoming = get_incoming_changes(self.repo, branch)
unpushed = self._get_unpushed_changes(
branch) if commits_ahead else []
# Only lock when updating the status
with self._lock:
self.status.update({
"remote_branch_exists":
remote_branch_exists,
"commits_behind":
len(commits_behind),
"commits_ahead":
len(commits_ahead),
"has_unpushed_commits":
len(commits_ahead) > 0,
"incoming_changes":
incoming,
"unpushed_files":
unpushed,
"last_remote_update":
datetime.now().isoformat()
})
else:
with self._lock:
self.status.update({
"remote_branch_exists":
False,
"commits_behind":
0,
"commits_ahead":
0,
"has_unpushed_commits":
False,
"incoming_changes": [],
"unpushed_files": [],
"last_remote_update":
datetime.now().isoformat()
})
return True
except Exception as e:
logger.error(f"Error updating remote status: {str(e)}")
return False
def _get_unpushed_changes(self, branch):
"""Get detailed info about files modified in unpushed commits"""
try:
unpushed_files = self.repo.git.diff(f'origin/{branch}..{branch}',
'--name-only').split('\n')
unpushed_files = [f for f in unpushed_files if f]
detailed_changes = []
for file_path in unpushed_files:
try:
with open(os.path.join(self.repo.working_dir, file_path),
'r') as f:
content = yaml.safe_load(f.read())
detailed_changes.append({
'type':
determine_type(file_path),
'name':
content.get('name', os.path.basename(file_path)),
'file_path':
file_path
})
except Exception as e:
logger.warning(
f"Could not get details for {file_path}: {str(e)}")
detailed_changes.append({
'type': determine_type(file_path),
'name': os.path.basename(file_path),
'file_path': file_path
})
return detailed_changes
except Exception as e:
logger.error(f"Error getting unpushed changes: {str(e)}")
return []
def get_status(self):
"""Get the current status without updating"""
with self._lock:
return self.status.copy()
def format_git_status(status):
"""Format git status for logging with truncation and pretty printing.
Args:
status (dict): The git status dictionary to format
Returns:
str: Formatted status string
"""
def truncate_list(lst, max_items=3):
"""Truncate a list and add count of remaining items."""
if len(lst) <= max_items:
return lst
return lst[:max_items] + [f"... and {len(lst) - max_items} more items"]
def truncate_string(s, max_length=50):
"""Truncate a string if it's too long."""
if not s or len(s) <= max_length:
return s
return s[:max_length] + "..."
# Create a copy to modify
formatted_status = status.copy()
# Truncate lists
for key in [
'outgoing_changes', 'merge_conflicts', 'incoming_changes',
'unpushed_files'
]:
if key in formatted_status and isinstance(formatted_status[key], list):
formatted_status[key] = truncate_list(formatted_status[key])
# Format any nested dictionaries in the lists
for key in formatted_status:
if isinstance(formatted_status[key], list):
formatted_status[key] = [{
k: truncate_string(str(v))
for k, v in item.items()
} if isinstance(item, dict) else item
for item in formatted_status[key]]
# Convert to JSON with nice formatting
formatted_json = json.dumps(formatted_status, indent=2, default=str)
# Add a timestamp header
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
return f"=== Git Status at {timestamp} ===\n{formatted_json}"
def get_git_status(repo_path):
try:
status_manager = GitStatusManager.get_instance(repo_path)
status_manager.update_local_status()
success, status = True, status_manager.get_status()
# Log the formatted status
logger.info("\n" + format_git_status(status))
return success, status
except git.exc.InvalidGitRepositoryError:
logger.info(f"No git repository found at {repo_path}")
empty_status = {
"branch": "",
"outgoing_changes": [],
"is_merging": False,
"merge_conflicts": [],
"has_conflicts": False,
"remote_branch_exists": False,
"commits_behind": 0,
"commits_ahead": 0,
"incoming_changes": [],
"has_unpushed_commits": False,
"unpushed_files": [],
"last_local_update": None,
"last_remote_update": None,
"has_repo": False
}
return True, empty_status
except Exception as e:
logger.error(f"Error in get_git_status: {str(e)}", exc_info=True)
return False, str(e)

View File

@@ -1,173 +0,0 @@
# git/status/utils.py
import os
import yaml
import logging
import re
logger = logging.getLogger(__name__)
def extract_data_from_yaml(file_path):
logger.debug(f"Extracting data from file: {file_path}")
try:
with open(file_path, 'r') as f:
content = yaml.safe_load(f)
logger.debug(
f"File content: {content}") # Log the full file content
if content is None:
logger.error(
f"Failed to parse YAML file or file is empty: {file_path}")
return None
# Check if expected keys are in the content
if 'name' not in content or 'id' not in content:
logger.warning(
f"'name' or 'id' not found in file: {file_path}")
return {'name': content.get('name'), 'id': content.get('id')}
except Exception as e:
logger.warning(f"Error reading file {file_path}: {str(e)}")
return None
def determine_type(file_path):
if 'regex_patterns' in file_path:
return 'Regex Pattern'
elif 'custom_formats' in file_path:
return 'Custom Format'
elif 'profiles' in file_path:
return 'Quality Profile'
elif 'media_management' in file_path:
return 'Media Management'
return 'Unknown'
def format_media_management_name(name):
"""Format media management category names for display"""
name_mapping = {
'misc': 'Miscellaneous',
'naming': 'Naming',
'quality_definitions': 'Quality Definitions'
}
return name_mapping.get(name, name)
def extract_name_from_path(file_path):
"""Extract and format name from file path"""
# Remove the file extension
name = os.path.splitext(file_path)[0]
# Remove the type prefix (everything before the first '/')
if '/' in name:
name = name.split('/', 1)[1]
# Format media management names
if 'media_management' in file_path:
return format_media_management_name(name)
return name
def interpret_git_status(x, y):
if x == 'D' or y == 'D':
return 'Deleted'
elif x == 'A':
return 'Added'
elif x == 'M' or y == 'M':
return 'Modified'
elif x == 'R':
return 'Renamed'
elif x == 'C':
return 'Copied'
elif x == 'U':
return 'Updated but unmerged'
elif x == '?' and y == '?':
return 'Untracked'
else:
return 'Unknown'
def parse_commit_message(commit_message):
# Default placeholders for missing parts of the commit message
placeholders = {
'type': 'Unknown Type',
'scope': 'Unknown Scope',
'subject': 'No subject provided',
'body': 'No body provided',
'footer': ''
}
# Mapping of commit types and scopes to canonical forms
type_mapping = {
'feat': 'New Feature',
'feature': 'New Feature',
'new': 'New Feature',
'fix': 'BugFix',
'bugfix': 'BugFix',
'bug': 'BugFix',
'docs': 'Documentation',
'documentation': 'Documentation',
'doc': 'Documentation',
'style': 'Style Change',
'formatting': 'Style Change',
'format': 'Style Change',
'lint': 'Style Change',
'refactor': 'Refactor',
'refactoring': 'Refactor',
'restructure': 'Refactor',
'redesign': 'Refactor',
'perf': 'Performance Improvement',
'performance': 'Performance Improvement',
'optimize': 'Performance Improvement',
'optimisation': 'Performance Improvement',
'test': 'Test',
'testing': 'Test',
'chore': 'Maintenance',
'maintenance': 'Maintenance',
'maintain': 'Maintenance'
}
scope_mapping = {
'regex': 'Regex Pattern',
'regex pattern': 'Regex Pattern',
'format': 'Custom Format',
'custom format': 'Custom Format',
'profile': 'Quality Profile',
'quality profile': 'Quality Profile'
}
# Regex patterns for each part of the commit message
type_pattern = r'^(?P<type>feat|feature|new|fix|bugfix|bug|docs|documentation|doc|style|formatting|format|lint|refactor|refactoring|restructure|redesign|perf|performance|optimize|optimisation|test|testing|chore|maintenance|maintain)'
scope_pattern = r'\((?P<scope>regex|regex pattern|format|custom format|profile|quality profile)\)'
subject_pattern = r':\s(?P<subject>.+)'
body_pattern = r'(?P<body>(?:- .+\n?)+)' # Handles multiple lines in the body
footer_pattern = r'(?P<footer>(Fixes|Resolves|See also|Relates to)\s.+)'
# Initialize result with placeholders
parsed_message = placeholders.copy()
# Parse the type and scope
type_scope_match = re.match(
f'{type_pattern}{scope_pattern}{subject_pattern}', commit_message,
re.IGNORECASE)
if type_scope_match:
matched_type = type_scope_match.group('type').lower()
matched_scope = type_scope_match.group('scope').lower()
# Map the matched values to their canonical forms
parsed_message['type'] = type_mapping.get(matched_type, 'Unknown Type')
parsed_message['scope'] = scope_mapping.get(matched_scope,
'Unknown Scope')
parsed_message['subject'] = type_scope_match.group('subject').strip()
# Match and extract the body part
body_match = re.search(body_pattern, commit_message, re.MULTILINE)
if body_match:
parsed_message['body'] = body_match.group('body').strip()
# Match and extract the footer (if present)
footer_match = re.search(footer_pattern, commit_message)
if footer_match:
parsed_message['footer'] = footer_match.group('footer').strip()
return parsed_message

View File

@@ -1,259 +0,0 @@
# app/importarr/__init__.py
from flask import Blueprint, request, jsonify
from flask_cors import cross_origin
import logging
import asyncio
from pathlib import Path
from ..arr.manager import get_arr_config
from ..data.utils import get_category_directory, load_yaml_file
from .format import import_formats_to_arr, async_import_formats_to_arr
from .profile import import_profiles_to_arr, async_import_profiles_to_arr
from ..db import get_unique_arrs
logger = logging.getLogger('importarr')
bp = Blueprint('import', __name__)
@bp.route('/format', methods=['POST', 'OPTIONS'])
@cross_origin()
def import_formats():
if request.method == 'OPTIONS':
return jsonify({}), 200
try:
data = request.get_json()
arr_id = data.get('arrId')
all_formats = data.get('all', False)
format_names = data.get('formatNames', [])
if not arr_id:
return jsonify({
'success': False,
'error': 'Arr ID is required'
}), 400
if not all_formats and not format_names:
return jsonify({
'success':
False,
'error':
'Either formatNames or all=true is required'
}), 400
# Get import_as_unique setting using the new function
import_settings = get_unique_arrs([arr_id])
arr_settings = import_settings.get(arr_id, {
'import_as_unique': False,
'name': 'Unknown'
})
import_as_unique = arr_settings['import_as_unique']
if import_as_unique:
logger.info(
f"Unique imports for {arr_settings['name']} are on, adjusting names for custom formats"
)
else:
logger.info(
f"Unique imports for {arr_settings['name']} is off, using original names"
)
# Get arr configuration
arr_config = get_arr_config(arr_id)
if not arr_config['success']:
return jsonify({
'success': False,
'error': 'Arr configuration not found'
}), 404
arr_data = arr_config['data']
# If all=true, get all format names from the custom_format directory
if all_formats:
try:
format_dir = Path(get_category_directory('custom_format'))
format_names = [f.stem for f in format_dir.glob('*.yml')]
if not format_names:
return jsonify({
'success': False,
'error': 'No custom formats found'
}), 404
except Exception as e:
logger.error(
f"Error reading custom formats directory: {str(e)}")
return jsonify({
'success':
False,
'error':
'Failed to read custom formats directory'
}), 500
# Store original names for file lookups
original_names = format_names.copy()
# Modify format names if import_as_unique is true
if import_as_unique:
format_names = [f"{name} [Dictionarry]" for name in format_names]
logger.info(
f"Modified format names for unique import: {format_names}")
# Import formats with arr type from config, but use original names for file lookups
result = import_formats_to_arr(format_names=format_names,
original_names=original_names,
base_url=arr_data['arrServer'],
api_key=arr_data['apiKey'],
arr_type=arr_data['type'])
return jsonify(result), 200 if result['success'] else 400
except Exception as e:
logger.error(f"Error importing custom formats: {str(e)}")
return jsonify({'success': False, 'error': str(e)}), 400
@bp.route('/profile', methods=['POST', 'OPTIONS'])
@cross_origin()
def import_profiles():
if request.method == 'OPTIONS':
return jsonify({}), 200
try:
data = request.get_json()
arr_id = data.get('arrId')
all_profiles = data.get('all', False)
profile_names = data.get('profileNames', [])
if not arr_id:
return jsonify({
'success': False,
'error': 'Arr ID is required'
}), 400
if not all_profiles and not profile_names:
return jsonify({
'success':
False,
'error':
'Either profileNames or all=true is required'
}), 400
# Get import_as_unique setting
import_settings = get_unique_arrs([arr_id])
arr_settings = import_settings.get(arr_id, {
'import_as_unique': False,
'name': 'Unknown'
})
import_as_unique = arr_settings['import_as_unique']
if import_as_unique:
logger.info(
f"Unique imports for {arr_settings['name']} are on, adjusting names for quality profiles"
)
else:
logger.info(
f"Unique imports for {arr_settings['name']} is off, using original names"
)
# Get arr configuration
arr_config = get_arr_config(arr_id)
if not arr_config['success']:
return jsonify({
'success': False,
'error': 'Arr configuration not found'
}), 404
arr_data = arr_config['data']
# If all=true, get all profile names
if all_profiles:
try:
profile_dir = Path(get_category_directory('profile'))
profile_names = [f.stem for f in profile_dir.glob('*.yml')]
if not profile_names:
return jsonify({
'success': False,
'error': 'No quality profiles found'
}), 404
except Exception as e:
logger.error(f"Error reading profiles directory: {str(e)}")
return jsonify({
'success': False,
'error': 'Failed to read profiles directory'
}), 500
# Store original names for file lookups
original_names = profile_names.copy()
# Modify profile names if import_as_unique is true
if import_as_unique:
profile_names = [f"{name} [Dictionarry]" for name in profile_names]
logger.info(
f"Modified profile names for unique import: {profile_names}")
logger.debug(
f"Attempting to import profiles: {profile_names} for {arr_data['type']}: {arr_data['name']}"
)
# Get any custom formats referenced in these profiles
format_names = set()
for profile_name in original_names: # Use original names for file lookup
try:
profile_file = f"{get_category_directory('profile')}/{profile_name}.yml"
format_data = load_yaml_file(profile_file)
# Extract from main custom_formats
for cf in format_data.get('custom_formats', []):
format_names.add(cf['name'])
# Extract from app-specific custom_formats
for cf in format_data.get('custom_formats_radarr', []):
format_names.add(cf['name'])
for cf in format_data.get('custom_formats_sonarr', []):
format_names.add(cf['name'])
except Exception as e:
logger.error(f"Error loading profile {profile_name}: {str(e)}")
continue
# Import/Update formats first - use async version for larger batch sizes
if format_names:
format_names_list = list(format_names)
# When we have more than a few formats, use the async import path
# which will parallelize the requests
if import_as_unique:
modified_format_names = [
f"{name} [Dictionarry]" for name in format_names_list
]
# Use the regular import function which will detect large batches
# and automatically use async when appropriate
import_formats_to_arr(
format_names=modified_format_names,
original_names=format_names_list,
base_url=arr_data['arrServer'],
api_key=arr_data['apiKey'],
arr_type=arr_data['type']
)
else:
# Use the regular import function which will detect large batches
# and automatically use async when appropriate
import_formats_to_arr(
format_names=format_names_list,
original_names=format_names_list,
base_url=arr_data['arrServer'],
api_key=arr_data['apiKey'],
arr_type=arr_data['type']
)
# Import profiles
result = import_profiles_to_arr(profile_names=profile_names,
original_names=original_names,
base_url=arr_data['arrServer'],
api_key=arr_data['apiKey'],
arr_type=arr_data['type'],
arr_id=arr_id,
import_as_unique=import_as_unique)
return jsonify(result), 200 if result['success'] else 400
except Exception as e:
logger.error(f"Error importing quality profiles: {str(e)}")
return jsonify({'success': False, 'error': str(e)}), 400

View File

@@ -1,398 +0,0 @@
import requests
import logging
import json
import yaml
import asyncio
import aiohttp
from pathlib import Path
from typing import Dict, List, Optional, Any, Tuple
from ..data.utils import (load_yaml_file, get_category_directory, REGEX_DIR,
FORMAT_DIR)
from ..compile import CustomFormat, FormatConverter, TargetApp
from ..db.queries.format_renames import is_format_in_renames
logger = logging.getLogger('importarr')
def import_formats_to_arr(format_names, base_url, api_key, arr_type,
original_names):
"""
Import custom formats to arr instance.
This function supports bulk importing of formats with sequential processing.
"""
logger.info(
f"Received {len(format_names)} formats to import for {arr_type}")
# For larger imports, use the async version to improve performance
if len(format_names) > 5:
# Run async function within the event loop
return asyncio.run(
async_import_formats_to_arr(
format_names=format_names,
base_url=base_url,
api_key=api_key,
arr_type=arr_type,
original_names=original_names
)
)
# For smaller imports, use the regular synchronous version
results = {
'success': True,
'added': 0,
'updated': 0,
'failed': 0,
'details': []
}
try:
logger.info("Looking for existing formats...")
existing_formats = get_existing_formats(base_url, api_key)
if existing_formats is None:
return {
'success': False,
'error': 'Failed to get existing formats'
}
existing_names = {fmt['name']: fmt['id'] for fmt in existing_formats}
patterns = {}
for pattern_file in Path(REGEX_DIR).glob('*.yml'):
try:
pattern_data = load_yaml_file(str(pattern_file))
if pattern_data and 'name' in pattern_data and 'pattern' in pattern_data:
patterns[pattern_data['name']] = pattern_data['pattern']
except Exception as e:
logger.error(
f"Error loading pattern file {pattern_file}: {str(e)}")
continue
converter = FormatConverter(patterns)
target_app = TargetApp.RADARR if arr_type.lower(
) == 'radarr' else TargetApp.SONARR
for i, format_name in enumerate(format_names):
try:
# Use original name for file lookup
original_name = original_names[i]
format_file = f"{get_category_directory('custom_format')}/{original_name}.yml"
format_data = load_yaml_file(format_file)
custom_format = CustomFormat(**format_data)
converted_format = converter.convert_format(
custom_format, target_app)
if not converted_format:
raise ValueError("Format conversion failed")
# Create base compiled data with ordered fields
compiled_data = {'name': format_name} # Start with name
# Check rename status and add field right after name if true
if is_format_in_renames(original_name):
compiled_data['includeCustomFormatWhenRenaming'] = True
logger.info(
f"Format {original_name} has renames enabled, including field"
)
# Add specifications last
compiled_data['specifications'] = [
vars(spec) for spec in converted_format.specifications
]
result = process_format(compiled_data, existing_names,
base_url, api_key)
if result['success']:
results[result['action']] += 1
else:
results['failed'] += 1
results['success'] = False
results['details'].append(result['detail'])
except Exception as e:
logger.error(
f"Error processing format {format_name}: {str(e)}")
results['failed'] += 1
results['success'] = False
results['details'].append({
'name': format_name,
'action': 'failed',
'success': False,
'error': str(e)
})
logger.info(
f"Importing {len(format_names)} formats complete. "
f"Added: {results['added']}, Updated: {results['updated']}, "
f"Failed: {results['failed']}")
return results
except Exception as e:
logger.error(f"Error in import_formats_to_arr: {str(e)}")
return {'success': False, 'error': str(e)}
async def async_import_formats_to_arr(format_names: List[str],
base_url: str,
api_key: str,
arr_type: str,
original_names: List[str]) -> Dict:
"""
Asynchronous version of import_formats_to_arr that processes formats concurrently.
This significantly improves performance for large batches.
"""
logger.info(
f"Received {len(format_names)} formats to import (async) for {arr_type}")
results = {
'success': True,
'added': 0,
'updated': 0,
'failed': 0,
'details': []
}
try:
logger.info("Looking for existing formats (async)...")
existing_formats = await async_get_existing_formats(base_url, api_key)
if existing_formats is None:
return {
'success': False,
'error': 'Failed to get existing formats'
}
existing_names = {fmt['name']: fmt['id'] for fmt in existing_formats}
# Load patterns - this doesn't need to be async as it's file system operations
patterns = {}
for pattern_file in Path(REGEX_DIR).glob('*.yml'):
try:
pattern_data = load_yaml_file(str(pattern_file))
if pattern_data and 'name' in pattern_data and 'pattern' in pattern_data:
patterns[pattern_data['name']] = pattern_data['pattern']
except Exception as e:
logger.error(
f"Error loading pattern file {pattern_file}: {str(e)}")
continue
converter = FormatConverter(patterns)
target_app = TargetApp.RADARR if arr_type.lower() == 'radarr' else TargetApp.SONARR
# Process all formats into API-ready format first
compiled_formats = []
format_tasks = []
for i, format_name in enumerate(format_names):
try:
# Use original name for file lookup
original_name = original_names[i]
format_file = f"{get_category_directory('custom_format')}/{original_name}.yml"
format_data = load_yaml_file(format_file)
custom_format = CustomFormat(**format_data)
converted_format = converter.convert_format(custom_format, target_app)
if not converted_format:
raise ValueError("Format conversion failed")
# Create base compiled data with ordered fields
compiled_data = {'name': format_name} # Start with name
# Check rename status and add field right after name if true
if is_format_in_renames(original_name):
compiled_data['includeCustomFormatWhenRenaming'] = True
logger.info(
f"Format {original_name} has renames enabled, including field"
)
# Add specifications last
compiled_data['specifications'] = [
vars(spec) for spec in converted_format.specifications
]
compiled_formats.append((format_name, compiled_data))
except Exception as e:
logger.error(f"Error processing format {format_name}: {str(e)}")
results['failed'] += 1
results['success'] = False
results['details'].append({
'name': format_name,
'action': 'failed',
'success': False,
'error': str(e)
})
# Now create async tasks for all formats to upload them concurrently
for format_name, compiled_data in compiled_formats:
task = asyncio.ensure_future(
async_process_format(
format_data=compiled_data,
existing_names=existing_names,
base_url=base_url,
api_key=api_key
)
)
format_tasks.append((format_name, task))
# Wait for all format uploads to complete
for format_name, task in format_tasks:
try:
result = await task
if result['success']:
results[result['action']] += 1
else:
results['failed'] += 1
results['success'] = False
results['details'].append(result['detail'])
except Exception as e:
logger.error(f"Error waiting for format task {format_name}: {str(e)}")
results['failed'] += 1
results['success'] = False
results['details'].append({
'name': format_name,
'action': 'failed',
'success': False,
'error': str(e)
})
logger.info(
f"Async importing {len(format_names)} formats complete. "
f"Added: {results['added']}, Updated: {results['updated']}, "
f"Failed: {results['failed']}")
return results
except Exception as e:
logger.error(f"Error in async_import_formats_to_arr: {str(e)}")
return {'success': False, 'error': str(e)}
def get_existing_formats(base_url, api_key):
try:
response = requests.get(f"{base_url.rstrip('/')}/api/v3/customformat",
headers={'X-Api-Key': api_key})
if response.status_code == 200:
return response.json()
return None
except Exception as e:
logger.error(f"Error getting existing formats: {str(e)}")
return None
async def async_get_existing_formats(base_url: str, api_key: str) -> Optional[List[Dict]]:
"""Async version of get_existing_formats"""
try:
async with aiohttp.ClientSession() as session:
async with session.get(
f"{base_url.rstrip('/')}/api/v3/customformat",
headers={'X-Api-Key': api_key}
) as response:
if response.status == 200:
return await response.json()
return None
except Exception as e:
logger.error(f"Error getting existing formats (async): {str(e)}")
return None
def process_format(format_data, existing_names, base_url, api_key):
format_name = format_data['name']
if format_name in existing_names:
format_data['id'] = existing_names[format_name]
success = update_format(base_url, api_key, format_data)
action = 'updated'
else:
success = add_format(base_url, api_key, format_data)
action = 'added'
return {
'success': success,
'action': action if success else 'failed',
'detail': {
'name': format_name,
'action': action if success else 'failed',
'success': success
}
}
async def async_process_format(format_data: Dict, existing_names: Dict[str, int],
base_url: str, api_key: str) -> Dict:
"""Async version of process_format"""
format_name = format_data['name']
if format_name in existing_names:
format_data['id'] = existing_names[format_name]
success = await async_update_format(base_url, api_key, format_data)
action = 'updated'
else:
success = await async_add_format(base_url, api_key, format_data)
action = 'added'
return {
'success': success,
'action': action if success else 'failed',
'detail': {
'name': format_name,
'action': action if success else 'failed',
'success': success
}
}
def update_format(base_url, api_key, format_data):
try:
url = f"{base_url.rstrip('/')}/api/v3/customformat/{format_data['id']}"
response = requests.put(url,
headers={'X-Api-Key': api_key},
json=format_data)
logger.info(f"Update format '{format_data['name']}' response: {response.status_code}")
return response.status_code in [200, 201, 202, 204]
except Exception as e:
logger.error(f"Error updating format: {str(e)}")
return False
async def async_update_format(base_url: str, api_key: str, format_data: Dict) -> bool:
"""Async version of update_format"""
try:
url = f"{base_url.rstrip('/')}/api/v3/customformat/{format_data['id']}"
async with aiohttp.ClientSession() as session:
async with session.put(
url,
headers={'X-Api-Key': api_key},
json=format_data
) as response:
logger.info(f"Update format '{format_data['name']}' response: {response.status} (async)")
return response.status in [200, 201, 202, 204]
except Exception as e:
logger.error(f"Error updating format (async): {str(e)}")
return False
def add_format(base_url, api_key, format_data):
try:
url = f"{base_url.rstrip('/')}/api/v3/customformat"
response = requests.post(url,
headers={'X-Api-Key': api_key},
json=format_data)
logger.info(f"Add format '{format_data['name']}' response: {response.status_code}")
return response.status_code in [200, 201, 202, 204]
except Exception as e:
logger.error(f"Error adding format: {str(e)}")
return False
async def async_add_format(base_url: str, api_key: str, format_data: Dict) -> bool:
"""Async version of add_format"""
try:
url = f"{base_url.rstrip('/')}/api/v3/customformat"
async with aiohttp.ClientSession() as session:
async with session.post(
url,
headers={'X-Api-Key': api_key},
json=format_data
) as response:
logger.info(f"Add format '{format_data['name']}' response: {response.status} (async)")
return response.status in [200, 201, 202, 204]
except Exception as e:
logger.error(f"Error adding format (async): {str(e)}")
return False

View File

@@ -1,365 +0,0 @@
# app/importarr/format_memory.py
"""Imports custom formats from memory, not YML files"""
import requests
import logging
import json
import asyncio
import aiohttp
from typing import Dict, List, Optional
from pathlib import Path
from ..data.utils import (load_yaml_file, get_category_directory, REGEX_DIR,
FORMAT_DIR)
from ..compile import CustomFormat, FormatConverter, TargetApp
logger = logging.getLogger('importarr')
def get_existing_formats(base_url: str, api_key: str) -> Optional[List[Dict]]:
"""Get existing custom formats from arr instance"""
try:
response = requests.get(f"{base_url.rstrip('/')}/api/v3/customformat",
headers={'X-Api-Key': api_key})
if response.status_code == 200:
return response.json()
return None
except Exception as e:
logger.error(f"Error getting existing formats: {str(e)}")
return None
async def async_get_existing_formats(base_url: str, api_key: str) -> Optional[List[Dict]]:
"""Async version of get_existing_formats"""
try:
async with aiohttp.ClientSession() as session:
async with session.get(
f"{base_url.rstrip('/')}/api/v3/customformat",
headers={'X-Api-Key': api_key}
) as response:
if response.status == 200:
return await response.json()
return None
except Exception as e:
logger.error(f"Error getting existing formats (async): {str(e)}")
return None
def process_format(format_data: Dict, existing_names: Dict[str, int],
base_url: str, api_key: str) -> Dict:
"""Process single format - either update or add new"""
format_name = format_data['name']
if format_name in existing_names:
format_data['id'] = existing_names[format_name]
success = update_format(base_url, api_key, format_data)
action = 'updated'
else:
success = add_format(base_url, api_key, format_data)
action = 'added'
return {
'success': success,
'action': action if success else 'failed',
'detail': {
'name': format_name,
'action': action if success else 'failed',
'success': success
}
}
async def async_process_format(format_data: Dict, existing_names: Dict[str, int],
base_url: str, api_key: str) -> Dict:
"""Async version of process_format"""
format_name = format_data['name']
if format_name in existing_names:
format_data['id'] = existing_names[format_name]
success = await async_update_format(base_url, api_key, format_data)
action = 'updated'
else:
success = await async_add_format(base_url, api_key, format_data)
action = 'added'
return {
'success': success,
'action': action if success else 'failed',
'detail': {
'name': format_name,
'action': action if success else 'failed',
'success': success
}
}
def update_format(base_url: str, api_key: str, format_data: Dict) -> bool:
"""Update existing custom format"""
try:
url = f"{base_url.rstrip('/')}/api/v3/customformat/{format_data['id']}"
response = requests.put(url,
headers={'X-Api-Key': api_key},
json=format_data)
logger.info(f"Update format '{format_data['name']}' response: {response.status_code}")
return response.status_code in [200, 201, 202, 204]
except Exception as e:
logger.error(f"Error updating format: {str(e)}")
return False
async def async_update_format(base_url: str, api_key: str, format_data: Dict) -> bool:
"""Async version of update_format"""
try:
url = f"{base_url.rstrip('/')}/api/v3/customformat/{format_data['id']}"
async with aiohttp.ClientSession() as session:
async with session.put(
url,
headers={'X-Api-Key': api_key},
json=format_data
) as response:
logger.info(f"Update format '{format_data['name']}' response: {response.status} (async)")
return response.status in [200, 201, 202, 204]
except Exception as e:
logger.error(f"Error updating format (async): {str(e)}")
return False
def add_format(base_url: str, api_key: str, format_data: Dict) -> bool:
"""Add new custom format"""
try:
url = f"{base_url.rstrip('/')}/api/v3/customformat"
response = requests.post(url,
headers={'X-Api-Key': api_key},
json=format_data)
logger.info(f"Add format '{format_data['name']}' response: {response.status_code}")
return response.status_code in [200, 201, 202, 204]
except Exception as e:
logger.error(f"Error adding format: {str(e)}")
return False
async def async_add_format(base_url: str, api_key: str, format_data: Dict) -> bool:
"""Async version of add_format"""
try:
url = f"{base_url.rstrip('/')}/api/v3/customformat"
async with aiohttp.ClientSession() as session:
async with session.post(
url,
headers={'X-Api-Key': api_key},
json=format_data
) as response:
logger.info(f"Add format '{format_data['name']}' response: {response.status} (async)")
return response.status in [200, 201, 202, 204]
except Exception as e:
logger.error(f"Error adding format (async): {str(e)}")
return False
def import_format_from_memory(format_data: Dict,
base_url: str,
api_key: str,
arr_type: str,
import_as_unique: bool = False) -> Dict:
"""
Import a format directly from memory without requiring file loading.
Args:
format_data: Dictionary containing the format specification
base_url: Arr instance base URL
api_key: API key for arr instance
arr_type: Type of arr instance (radarr/sonarr)
import_as_unique: Whether to append [Dictionarry] to format names
Returns:
Dict containing import results
"""
# For memory-based imports, no need to check size threshold
# as these are typically used for language formats which are few
results = {
'success': True,
'added': 0,
'updated': 0,
'failed': 0,
'details': []
}
try:
# Modify format name if import_as_unique is true
original_name = format_data['name']
if import_as_unique:
format_data['name'] = f"{original_name} [Dictionarry]"
logger.info(
f"Modified format name for unique import: {format_data['name']}"
)
logger.info("Looking for existing formats (memory-based import)...")
existing_formats = get_existing_formats(base_url, api_key)
if existing_formats is None:
return {
'success': False,
'error': 'Failed to get existing formats'
}
existing_format_map = {
fmt['name']: fmt['id']
for fmt in existing_formats
}
# Convert from raw data into a CustomFormat object
custom_format = CustomFormat(**format_data)
# Load patterns from regex directory
patterns = {}
for pattern_file in Path(REGEX_DIR).glob('*.yml'):
try:
pattern_data = load_yaml_file(str(pattern_file))
if pattern_data and 'name' in pattern_data and 'pattern' in pattern_data:
patterns[pattern_data['name']] = pattern_data['pattern']
except Exception as e:
logger.error(
f"Error loading pattern file {pattern_file}: {str(e)}")
continue
target_app = TargetApp.RADARR if arr_type.lower(
) == 'radarr' else TargetApp.SONARR
converter = FormatConverter(patterns)
converted_format = converter.convert_format(custom_format, target_app)
if not converted_format:
raise ValueError("Format conversion failed")
# Prepare final JSON data
api_format = {
'name':
converted_format.name,
'specifications':
[vars(spec) for spec in converted_format.specifications]
}
# Format compiled successfully
# Process the compiled format (update/add)
result = process_format(api_format, existing_format_map, base_url,
api_key)
if result['success']:
results[result['action']] += 1
else:
results['failed'] += 1
results['success'] = False
results['details'].append(result['detail'])
return results
except Exception as e:
logger.error(f"Error importing format data: {str(e)}")
return {
'success':
False,
'error':
str(e),
'details': [{
'name': format_data.get('name', 'unknown'),
'action': 'failed',
'success': False,
'error': str(e)
}]
}
async def async_import_format_from_memory(format_data: Dict,
base_url: str,
api_key: str,
arr_type: str,
import_as_unique: bool = False) -> Dict:
"""
Asynchronous version of import_format_from_memory
Args:
format_data: Dictionary containing the format specification
base_url: Arr instance base URL
api_key: API key for arr instance
arr_type: Type of arr instance (radarr/sonarr)
import_as_unique: Whether to append [Dictionarry] to format names
Returns:
Dict containing import results
"""
results = {
'success': True,
'added': 0,
'updated': 0,
'failed': 0,
'details': []
}
try:
# Modify format name if import_as_unique is true
original_name = format_data['name']
if import_as_unique:
format_data['name'] = f"{original_name} [Dictionarry]"
logger.info(
f"Modified format name for unique import: {format_data['name']}"
)
logger.info("Looking for existing formats (memory-based import, async)...")
existing_formats = await async_get_existing_formats(base_url, api_key)
if existing_formats is None:
return {
'success': False,
'error': 'Failed to get existing formats'
}
existing_format_map = {
fmt['name']: fmt['id']
for fmt in existing_formats
}
# Convert from raw data into a CustomFormat object
custom_format = CustomFormat(**format_data)
# Load patterns from regex directory (file system operations, no need for async)
patterns = {}
for pattern_file in Path(REGEX_DIR).glob('*.yml'):
try:
pattern_data = load_yaml_file(str(pattern_file))
if pattern_data and 'name' in pattern_data and 'pattern' in pattern_data:
patterns[pattern_data['name']] = pattern_data['pattern']
except Exception as e:
logger.error(
f"Error loading pattern file {pattern_file}: {str(e)}")
continue
target_app = TargetApp.RADARR if arr_type.lower() == 'radarr' else TargetApp.SONARR
converter = FormatConverter(patterns)
converted_format = converter.convert_format(custom_format, target_app)
if not converted_format:
raise ValueError("Format conversion failed")
# Prepare final JSON data
api_format = {
'name': converted_format.name,
'specifications': [vars(spec) for spec in converted_format.specifications]
}
# Format compiled successfully
# Process the compiled format (update/add) using async methods
result = await async_process_format(api_format, existing_format_map, base_url, api_key)
if result['success']:
results[result['action']] += 1
else:
results['failed'] += 1
results['success'] = False
results['details'].append(result['detail'])
return results
except Exception as e:
logger.error(f"Error importing format data (async): {str(e)}")
return {
'success': False,
'error': str(e),
'details': [{
'name': format_data.get('name', 'unknown'),
'action': 'failed',
'success': False,
'error': str(e)
}]
}

View File

@@ -1,861 +0,0 @@
# app/importarr/profile.py
import requests
import logging
import json
import yaml
import asyncio
import aiohttp
from pathlib import Path
from typing import Dict, List, Optional, Any, Tuple
from ..data.utils import load_yaml_file, get_category_directory
from ..compile.profile_compiler import compile_quality_profile
from ..compile.mappings import TargetApp
from .format import import_formats_to_arr
from .format_memory import import_format_from_memory, async_import_format_from_memory
from ..arr.manager import get_arr_config
logger = logging.getLogger('importarr')
def import_profiles_to_arr(profile_names: List[str], original_names: List[str],
base_url: str, api_key: str, arr_type: str,
arr_id: str, import_as_unique: bool) -> Dict:
"""
Import quality profiles to arr instance.
This function supports bulk importing of profiles with sequential or concurrent processing.
"""
logger.info(
f"Received {len(profile_names)} profiles to import for {arr_type}")
# For larger imports, use the async version to improve performance
if len(profile_names) > 1:
# Run async function within the event loop
return asyncio.run(
async_import_profiles_to_arr(profile_names=profile_names,
original_names=original_names,
base_url=base_url,
api_key=api_key,
arr_type=arr_type,
arr_id=arr_id,
import_as_unique=import_as_unique))
# For smaller imports, use the regular synchronous version
results = {
'success': True,
'added': 0,
'updated': 0,
'failed': 0,
'details': []
}
try:
arr_config_response = get_arr_config(arr_id)
if not arr_config_response['success']:
return {
'success': False,
'error': 'Failed to get arr configuration'
}
arr_config = arr_config_response['data']
logger.info("Looking for existing profiles...")
existing_profiles = get_existing_profiles(base_url, api_key)
if existing_profiles is None:
return {
'success': False,
'error': 'Failed to get existing profiles'
}
# Create mapping for existing profiles
existing_profile_map = {}
for profile in existing_profiles:
existing_profile_map[profile['name']] = profile['id']
target_app = TargetApp.RADARR if arr_type.lower(
) == 'radarr' else TargetApp.SONARR
for i, profile_name in enumerate(profile_names):
try:
# Use original name for file lookup
original_name = original_names[i]
profile_file = f"{get_category_directory('profile')}/{original_name}.yml"
profile_data = load_yaml_file(profile_file)
# Set the potentially modified profile name
profile_data['name'] = profile_name
# Modify custom format names if import_as_unique is true
if import_as_unique and 'custom_formats' in profile_data:
for cf in profile_data['custom_formats']:
cf['name'] = f"{cf['name']} [Dictionarry]"
# Profile loaded
profile_language = profile_data.get('language', 'any')
if profile_language != 'any':
# Detect if we're using simple or advanced mode
is_simple_mode = '_' not in profile_language
if is_simple_mode:
logger.info(
f"Profile '{profile_name}' has simple mode language: {profile_language}"
)
logger.info(
f"Simple mode will set language filter to: {profile_language}"
)
else:
logger.info(
f"Profile '{profile_name}' has advanced mode language: {profile_language}"
)
compiled_profiles = compile_quality_profile(
profile_data=profile_data,
target_app=target_app,
base_url=base_url,
api_key=api_key,
format_importer=import_formats_to_arr,
import_as_unique=import_as_unique)
if not compiled_profiles:
raise ValueError("Profile compilation returned no data")
profile_data = compiled_profiles[0]
logger.info(
"Looking for existing custom formats to sync format IDs..."
)
existing_formats = get_existing_formats(base_url, api_key)
if existing_formats is None:
raise ValueError("Failed to get updated format list")
format_id_map = {
fmt['name']: fmt['id']
for fmt in existing_formats
}
logger.debug(
f"Found {len(format_id_map)} existing custom formats")
profile_data = sync_format_ids(profile_data, format_id_map)
logger.debug("Format items after sync:")
for item in profile_data.get('formatItems', []):
logger.debug(
f" {item['name']} => Score: {item.get('score', 0)}, "
f"Format ID: {item.get('format', 'missing')}")
# Profile compiled successfully
result = process_profile(profile_data=profile_data,
existing_names=existing_profile_map,
base_url=base_url,
api_key=api_key)
results[result['action']] += 1
results['details'].append(result['detail'])
if not result['success']:
results['success'] = False
except Exception as e:
logger.error(
f"Error processing profile {profile_name}: {str(e)}, type: {type(e).__name__}"
)
logger.exception("Full traceback:")
results['failed'] += 1
results['details'].append({
'name': profile_name,
'action': 'failed',
'success': False,
'error': str(e)
})
results['success'] = False
logger.info(
f"Importing {len(profile_names)} profiles complete. "
f"Added: {results['added']}, Updated: {results['updated']}, "
f"Failed: {results['failed']}")
return results
except Exception as e:
logger.error(f"Error in import_profiles_to_arr: {str(e)}")
return {'success': False, 'error': str(e)}
async def async_import_profiles_to_arr(profile_names: List[str],
original_names: List[str],
base_url: str, api_key: str,
arr_type: str, arr_id: str,
import_as_unique: bool) -> Dict:
"""
Asynchronous version of import_profiles_to_arr that processes profiles concurrently.
This significantly improves performance for larger batches of profile imports.
"""
logger.info(
f"Received {len(profile_names)} profiles to import (async) for {arr_type}"
)
results = {
'success': True,
'added': 0,
'updated': 0,
'failed': 0,
'details': []
}
try:
arr_config_response = get_arr_config(arr_id)
if not arr_config_response['success']:
return {
'success': False,
'error': 'Failed to get arr configuration'
}
arr_config = arr_config_response['data']
logger.info("Looking for existing profiles (async)...")
existing_profiles = await async_get_existing_profiles(
base_url, api_key)
if existing_profiles is None:
return {
'success': False,
'error': 'Failed to get existing profiles'
}
# Create mapping for existing profiles
existing_profile_map = {}
for profile in existing_profiles:
existing_profile_map[profile['name']] = profile['id']
target_app = TargetApp.RADARR if arr_type.lower(
) == 'radarr' else TargetApp.SONARR
# Fetch all existing formats once upfront
logger.info("Pre-fetching existing custom formats for all profiles...")
existing_formats = await async_get_existing_formats(base_url, api_key)
if existing_formats is None:
return {
'success': False,
'error': 'Failed to get existing custom formats'
}
format_id_map = {fmt['name']: fmt['id'] for fmt in existing_formats}
logger.info(f"Successfully pre-fetched {len(format_id_map)} existing custom formats")
# Pre-scan all profiles to identify and cache language formats
needed_language_formats = set()
initial_profiles_data = []
# First, load and analyze all profile files
for i, profile_name in enumerate(profile_names):
try:
# Use original name for file lookup
original_name = original_names[i]
profile_file = f"{get_category_directory('profile')}/{original_name}.yml"
profile_data = load_yaml_file(profile_file)
# Store original profile data for later processing
initial_profiles_data.append((i, profile_name, original_name, profile_data))
# Extract language from profile data
profile_language = profile_data.get('language', 'any')
if profile_language != 'any' and '_' in profile_language:
# This is an advanced mode language that needs special format handling
needed_language_formats.add(profile_language)
# Language format identified
except Exception as e:
logger.error(f"Error pre-scanning profile {profile_name}: {str(e)}")
results['failed'] += 1
results['details'].append({
'name': profile_name,
'action': 'failed',
'success': False,
'error': f"Error pre-scanning profile: {str(e)}"
})
results['success'] = False
# Pre-load all language formats if any exist
language_format_cache = {}
if needed_language_formats:
logger.info(f"Pre-importing {len(needed_language_formats)} unique language formats for {len(profile_names)} profiles")
language_format_cache = await preload_language_formats(
language_formats=list(needed_language_formats),
target_app=target_app,
base_url=base_url,
api_key=api_key,
arr_type=arr_type,
import_as_unique=import_as_unique
)
logger.info(f"Successfully pre-loaded language formats for {len(language_format_cache)} languages")
# Process each profile with the cached language formats
profile_tasks = []
for i, profile_name, original_name, profile_data in initial_profiles_data:
try:
# Set the potentially modified profile name
profile_data['name'] = profile_name
# Modify custom format names if import_as_unique is true
if import_as_unique and 'custom_formats' in profile_data:
for cf in profile_data['custom_formats']:
cf['name'] = f"{cf['name']} [Dictionarry]"
# Profile loaded
profile_language = profile_data.get('language', 'any')
if profile_language != 'any':
# Detect if we're using simple or advanced mode
is_simple_mode = '_' not in profile_language
# Language mode detected
# Setup the profile compilation with the cached language formats
# By default, use normal import
format_importer = import_formats_to_arr
# For profiles with language formats, attach the cached formats
if language_format_cache and profile_language != 'any' and '_' in profile_language:
language_format_configs = language_format_cache.get(profile_language, [])
if language_format_configs:
# Using cached language formats
# Define a special function that will be detected by the profile compiler
# The function name is checked in _process_language_formats
def cached_format_importer(*args, **kwargs):
# Using cached formats from importer
return {
'success': True,
'added': 0,
'updated': len(language_format_configs),
'failed': 0,
'details': []
}
# Add the cached formats to the function so they can be accessed by the compiler
cached_format_importer.cached_formats = language_format_configs
format_importer = cached_format_importer
else:
logger.warning(f"No cached formats found for language {profile_language}")
# Add language formats from cache directly to the profile for the compiler
# This way we don't need to modify the compiler code at all
if profile_language != 'any' and '_' in profile_language and profile_language in language_format_cache:
# Add the cached language formats directly to the profile
if 'custom_formats' not in profile_data:
profile_data['custom_formats'] = []
# Add the cached formats - these are already imported, we just need to reference them
profile_data['custom_formats'].extend(language_format_cache[profile_language])
compiled_profiles = compile_quality_profile(
profile_data=profile_data,
target_app=target_app,
base_url=base_url,
api_key=api_key,
format_importer=format_importer,
import_as_unique=import_as_unique
)
if not compiled_profiles:
raise ValueError("Profile compilation returned no data")
compiled_profile = compiled_profiles[0]
# Sync format IDs upfront using the cached format_id_map
synced_profile = sync_format_ids(compiled_profile, format_id_map)
# Create a task for processing this profile (without fetching formats again)
task = asyncio.create_task(
async_process_profile(
profile_data=synced_profile,
existing_names=existing_profile_map,
base_url=base_url,
api_key=api_key
)
)
profile_tasks.append((profile_name, task))
except Exception as e:
logger.error(
f"Error processing profile {profile_name}: {str(e)}, type: {type(e).__name__} (async)"
)
logger.exception("Full traceback:")
results['failed'] += 1
results['details'].append({
'name': profile_name,
'action': 'failed',
'success': False,
'error': str(e)
})
results['success'] = False
# Process all profile upload results
for profile_name, task in profile_tasks:
try:
result = await task
if result['success']:
results[result['action']] += 1
else:
results['failed'] += 1
results['success'] = False
results['details'].append(result['detail'])
except Exception as e:
logger.error(
f"Error waiting for profile task {profile_name}: {str(e)} (async)"
)
results['failed'] += 1
results['details'].append({
'name': profile_name,
'action': 'failed',
'success': False,
'error': str(e)
})
results['success'] = False
logger.info(
f"Async importing {len(profile_names)} profiles complete. "
f"Added: {results['added']}, Updated: {results['updated']}, "
f"Failed: {results['failed']}")
return results
except Exception as e:
logger.error(f"Error in async_import_profiles_to_arr: {str(e)}")
return {'success': False, 'error': str(e)}
def get_existing_profiles(base_url: str, api_key: str) -> Optional[List[Dict]]:
try:
response = requests.get(
f"{base_url.rstrip('/')}/api/v3/qualityprofile",
headers={'X-Api-Key': api_key})
if response.status_code == 200:
return response.json()
return None
except Exception as e:
logger.error(f"Error getting existing profiles: {str(e)}")
return None
async def async_get_existing_profiles(base_url: str,
api_key: str) -> Optional[List[Dict]]:
"""Async version of get_existing_profiles"""
try:
async with aiohttp.ClientSession() as session:
async with session.get(
f"{base_url.rstrip('/')}/api/v3/qualityprofile",
headers={'X-Api-Key': api_key}) as response:
if response.status == 200:
return await response.json()
return None
except Exception as e:
logger.error(f"Error getting existing profiles (async): {str(e)}")
return None
def get_existing_formats(base_url: str, api_key: str) -> Optional[List[Dict]]:
try:
response = requests.get(f"{base_url.rstrip('/')}/api/v3/customformat",
headers={'X-Api-Key': api_key})
if response.status_code == 200:
return response.json()
return None
except Exception as e:
logger.error(f"Error getting existing formats: {str(e)}")
return None
async def async_get_existing_formats(base_url: str,
api_key: str) -> Optional[List[Dict]]:
"""Async version of get_existing_formats"""
try:
async with aiohttp.ClientSession() as session:
async with session.get(
f"{base_url.rstrip('/')}/api/v3/customformat",
headers={'X-Api-Key': api_key}) as response:
if response.status == 200:
return await response.json()
return None
except Exception as e:
logger.error(f"Error getting existing formats (async): {str(e)}")
return None
async def preload_language_formats(language_formats: List[str],
target_app: TargetApp,
base_url: str,
api_key: str,
arr_type: str,
import_as_unique: bool) -> Dict[str, List[Dict]]:
"""
Pre-load all language formats for the specified languages to avoid
duplicate imports when multiple profiles use the same language settings.
Args:
language_formats: List of language identifiers (e.g. ["must_english", "prefer_french"])
target_app: TargetApp enum value (RADARR or SONARR)
base_url: API base URL
api_key: API key for the arr instance
arr_type: Type of arr (radarr or sonarr)
import_as_unique: Whether to append [Dictionarry] to format names
Returns:
Dictionary mapping language IDs to their imported format configs
"""
from ..compile.profile_compiler import ProfileConverter
language_format_cache = {}
# Create a single ProfileConverter instance for all languages
converter = ProfileConverter(
target_app=target_app,
base_url=base_url,
api_key=api_key,
format_importer=None, # We'll handle importing manually
import_as_unique=import_as_unique
)
# For each unique language, process and cache its formats
for language_id in language_formats:
try:
# Skip if we've already processed this language
if language_id in language_format_cache:
continue
# Parse the language behavior and code
if '_' in language_id:
behavior, language_code = language_id.split('_', 1)
else:
# Skip simple language modes - they don't need special format imports
continue
logger.info(f"Pre-importing language formats for {language_id} (async batch)")
# First generate format data for this language
formats_data = converter._generate_language_formats(behavior, language_code)
# Import these language formats just once
format_results = await import_language_formats_once(
formats_data=formats_data,
base_url=base_url,
api_key=api_key,
arr_type=arr_type,
import_as_unique=import_as_unique
)
# Store the format configs for this language
language_format_cache[language_id] = format_results
logger.info(f"Successfully cached {len(format_results)} formats for language {language_id}")
except Exception as e:
logger.error(f"Error pre-loading language formats for {language_id}: {str(e)}")
language_format_cache[language_id] = [] # Empty list to indicate failure
return language_format_cache
async def import_language_formats_once(formats_data: List[Dict],
base_url: str,
api_key: str,
arr_type: str,
import_as_unique: bool) -> List[Dict]:
"""
Helper function to import language formats once and return the results.
Args:
formats_data: List of format data dictionaries to import
base_url: API base URL
api_key: API key for arr instance
arr_type: Type of arr (radarr or sonarr)
import_as_unique: Whether to append [Dictionarry] to format names
Returns:
List of format configs ready to be added to profiles
"""
# Create tasks for concurrent format imports
format_configs = []
import_tasks = []
for format_data in formats_data:
# Setup task for importing this format
task = asyncio.create_task(
async_import_format_from_memory(
format_data=format_data,
base_url=base_url,
api_key=api_key,
arr_type=arr_type,
import_as_unique=import_as_unique
)
)
import_tasks.append((format_data['name'], task))
# Process all format imports
for format_name, task in import_tasks:
try:
result = await task
if not result.get('success', False):
logger.error(f"Format import failed for cached language format: {format_name}")
continue
# Determine final format name (after any [Dictionarry] suffix)
display_name = format_name
if import_as_unique:
display_name = f"{format_name} [Dictionarry]"
# Create format config exactly as needed by profile compiler
format_configs.append({
'name': display_name,
'score': -9999
})
except Exception as e:
logger.error(f"Error importing cached language format {format_name}: {str(e)}")
return format_configs
def use_cached_language_formats(language_cache: Dict[str, List[Dict]],
format_names: List[str],
base_url: str,
api_key: str,
arr_type: str,
original_names: List[str]) -> Dict:
"""
Custom format importer that returns cached language formats instead
of re-importing them. This is used by the profile compiler when we've
already pre-loaded the language formats.
This is a replacement for the regular import_formats_to_arr function.
"""
# Extract the language ID from the original profile data
# This is passed from the profile compiler's context when calling this function
language_id = getattr(use_cached_language_formats, 'current_language_id', None)
if language_id and language_id in language_cache:
logger.info(f"Using cached language formats for {language_id}")
return {
'success': True,
'added': 0,
'updated': len(language_cache[language_id]),
'failed': 0,
'details': [
{'name': fmt['name'], 'action': 'updated', 'success': True}
for fmt in language_cache[language_id]
]
}
else:
# Fall back to normal import if no cache entry exists
# or if this isn't a language format import
logger.info(f"No cached formats for language ID {language_id}, using normal import")
return import_formats_to_arr(
format_names=format_names,
base_url=base_url,
api_key=api_key,
arr_type=arr_type,
original_names=original_names
)
def sync_format_ids(profile_data: Dict, format_id_map: Dict[str, int]) -> Dict:
if 'formatItems' not in profile_data:
profile_data['formatItems'] = []
# Create a set to track format names we've already processed
processed_formats = set()
synced_items = []
# First process existing items
for item in profile_data.get('formatItems', []):
if item['name'] not in processed_formats:
if item['name'] in format_id_map:
synced_items.append({
'format': format_id_map[item['name']],
'name': item['name'],
'score': item['score']
})
processed_formats.add(item['name'])
else:
logger.warning(
f"Custom format not found in arr: {item['name']}")
# Only add formats that haven't been processed yet
for format_name, format_id in format_id_map.items():
if format_name not in processed_formats:
synced_items.append({
'format': format_id,
'name': format_name,
'score': 0 # Default score for new formats
})
processed_formats.add(format_name)
profile_data['formatItems'] = synced_items
return profile_data
# This function is now deprecated and replaced by direct use of sync_format_ids and async_process_profile
# We're keeping the signature for backward compatibility but not using it in the optimized code path
async def async_process_profile_with_formats(profile_name: str,
profile_data: Dict,
existing_profile_map: Dict[str,
int],
base_url: str,
api_key: str) -> Dict:
"""
Asynchronous function that handles getting formats and processing a profile in one go.
This allows for concurrent profile processing.
Note: This function is deprecated and should not be used in new code.
It's better to fetch formats once upfront for all profiles.
"""
try:
# Get formats for profile synchronization
logger.info(
f"Looking for existing custom formats to sync format IDs (async)..."
)
existing_formats = await async_get_existing_formats(base_url, api_key)
if existing_formats is None:
raise ValueError("Failed to get updated format list")
format_id_map = {fmt['name']: fmt['id'] for fmt in existing_formats}
logger.debug(
f"Found {len(format_id_map)} existing custom formats (async)")
# Sync format IDs in the profile
synced_profile = sync_format_ids(profile_data, format_id_map)
# Process the profile (add or update)
return await async_process_profile(profile_data=synced_profile,
existing_names=existing_profile_map,
base_url=base_url,
api_key=api_key)
except Exception as e:
logger.error(
f"Error in async_process_profile_with_formats for {profile_name}: {str(e)}"
)
return {
'success': False,
'action': 'failed',
'detail': {
'name': profile_name,
'action': 'failed',
'success': False,
'error': str(e)
}
}
def process_profile(profile_data: Dict, existing_names: Dict[str, int],
base_url: str, api_key: str) -> Dict:
profile_name = profile_data['name']
if profile_name in existing_names:
profile_data['id'] = existing_names[profile_name]
success = update_profile(base_url, api_key, profile_data)
return {
'success': success,
'action': 'updated' if success else 'failed',
'detail': {
'name': profile_name,
'action': 'updated',
'success': success
}
}
else:
success = add_profile(base_url, api_key, profile_data)
return {
'success': success,
'action': 'added' if success else 'failed',
'detail': {
'name': profile_name,
'action': 'added',
'success': success
}
}
async def async_process_profile(profile_data: Dict, existing_names: Dict[str,
int],
base_url: str, api_key: str) -> Dict:
"""Async version of process_profile"""
profile_name = profile_data['name']
if profile_name in existing_names:
profile_data['id'] = existing_names[profile_name]
success = await async_update_profile(base_url, api_key, profile_data)
return {
'success': success,
'action': 'updated' if success else 'failed',
'detail': {
'name': profile_name,
'action': 'updated',
'success': success
}
}
else:
success = await async_add_profile(base_url, api_key, profile_data)
return {
'success': success,
'action': 'added' if success else 'failed',
'detail': {
'name': profile_name,
'action': 'added',
'success': success
}
}
def update_profile(base_url: str, api_key: str, profile_data: Dict) -> bool:
try:
url = f"{base_url.rstrip('/')}/api/v3/qualityprofile/{profile_data['id']}"
response = requests.put(url,
headers={'X-Api-Key': api_key},
json=profile_data)
logger.info(f"Update profile '{profile_data['name']}' response: {response.status_code}")
return response.status_code in [200, 201, 202, 204]
except Exception as e:
logger.error(f"Error updating profile: {str(e)}")
return False
async def async_update_profile(base_url: str, api_key: str,
profile_data: Dict) -> bool:
"""Async version of update_profile"""
try:
url = f"{base_url.rstrip('/')}/api/v3/qualityprofile/{profile_data['id']}"
async with aiohttp.ClientSession() as session:
async with session.put(url,
headers={'X-Api-Key': api_key},
json=profile_data) as response:
logger.info(f"Update profile '{profile_data['name']}' response: {response.status} (async)")
return response.status in [200, 201, 202, 204]
except Exception as e:
logger.error(f"Error updating profile (async): {str(e)}")
return False
def add_profile(base_url: str, api_key: str, profile_data: Dict) -> bool:
try:
url = f"{base_url.rstrip('/')}/api/v3/qualityprofile"
response = requests.post(url,
headers={'X-Api-Key': api_key},
json=profile_data)
logger.info(f"Add profile '{profile_data['name']}' response: {response.status_code}")
return response.status_code in [200, 201, 202, 204]
except Exception as e:
logger.error(f"Error adding profile: {str(e)}")
return False
async def async_add_profile(base_url: str, api_key: str,
profile_data: Dict) -> bool:
"""Async version of add_profile"""
try:
url = f"{base_url.rstrip('/')}/api/v3/qualityprofile"
async with aiohttp.ClientSession() as session:
async with session.post(url,
headers={'X-Api-Key': api_key},
json=profile_data) as response:
logger.info(f"Add profile '{profile_data['name']}' response: {response.status} (async)")
return response.status in [200, 201, 202, 204]
except Exception as e:
logger.error(f"Error adding profile (async): {str(e)}")
return False

View File

@@ -1,325 +0,0 @@
"""Main import module entry point."""
import sys
import logging
from typing import Dict, Any, List
from .strategies import FormatStrategy, ProfileStrategy
from .logger import reset_import_logger
logger = logging.getLogger(__name__)
def handle_import_request(request: Dict[str, Any]) -> Dict[str, Any]:
"""
Handle an import request.
Args:
request: Request dictionary containing:
- arrID: ID of the arr_config to use
- strategy: 'format' or 'profile'
- filenames: List of filenames to import
- dryRun: Optional boolean for dry-run mode (default: false)
Returns:
Import results with added/updated/failed counts
"""
from ..db import get_db
try:
# Extract request parameters
arr_id = request.get('arrID')
strategy_type = request.get('strategy')
filenames = request.get('filenames', [])
dry_run = request.get('dryRun', False)
# Validate inputs
if not arr_id:
return {'success': False, 'error': 'arrID is required'}
if strategy_type not in ['format', 'profile']:
return {
'success': False,
'error': 'strategy must be "format" or "profile"'
}
if not filenames:
return {'success': False, 'error': 'filenames list is required'}
# Load arr_config from database
with get_db() as conn:
cursor = conn.execute("SELECT * FROM arr_config WHERE id = ?",
(arr_id, ))
arr_config = cursor.fetchone()
if not arr_config:
return {
'success': False,
'error': f'arr_config {arr_id} not found'
}
# Select strategy
strategy_map = {'format': FormatStrategy, 'profile': ProfileStrategy}
strategy_class = strategy_map[strategy_type]
strategy = strategy_class(arr_config)
# Execute import with new logger
import_logger = reset_import_logger()
# Show start message
dry_run_text = " [DRY RUN]" if dry_run else ""
print(f"Starting {strategy_type} import for {arr_config['name']} ({arr_config['type']}): {len(filenames)} items{dry_run_text}", file=sys.stderr)
result = strategy.execute(filenames, dry_run=dry_run)
added = result.get('added', 0)
updated = result.get('updated', 0)
failed = result.get('failed', 0)
# Determine status
is_partial = failed > 0 and (added > 0 or updated > 0)
is_success = failed == 0
result['success'] = is_success or is_partial
if is_partial:
result['status'] = "partial"
elif is_success:
result['status'] = "success"
else:
result['status'] = "failed"
result['arr_config_id'] = arr_id
result['arr_config_name'] = arr_config['name']
result['strategy'] = strategy_type
# Complete logging
import_logger.complete()
return result
except Exception as e:
logger.exception("Import request failed")
return {'success': False, 'error': str(e)}
def handle_scheduled_import(task_id: int) -> Dict[str, Any]:
"""
Handle a scheduled import task.
Args:
task_id: ID from scheduled_tasks table
Returns:
Import results
"""
from ..db import get_db
import json
try:
# Find arr_config for this task
with get_db() as conn:
cursor = conn.execute(
"SELECT * FROM arr_config WHERE import_task_id = ?",
(task_id, ))
arr_config = cursor.fetchone()
if not arr_config:
return {
'success': False,
'error': f'No arr_config found for task {task_id}'
}
# Parse data_to_sync
data_to_sync = json.loads(arr_config['data_to_sync'] or '{}')
# Build import requests
results = []
# Import custom formats
format_names = data_to_sync.get('customFormats', [])
if format_names:
# Remove .yml extension if present
format_names = [f.replace('.yml', '') for f in format_names]
request = {
'arrID': arr_config['id'],
'strategy': 'format',
'filenames': format_names
}
result = handle_import_request(request)
results.append(result)
# Import profiles
profile_names = data_to_sync.get('profiles', [])
if profile_names:
# Remove .yml extension if present
profile_names = [p.replace('.yml', '') for p in profile_names]
request = {
'arrID': arr_config['id'],
'strategy': 'profile',
'filenames': profile_names
}
result = handle_import_request(request)
results.append(result)
# Combine results
total_added = sum(r.get('added', 0) for r in results)
total_updated = sum(r.get('updated', 0) for r in results)
total_failed = sum(r.get('failed', 0) for r in results)
is_partial = total_failed > 0 and (total_added > 0
or total_updated > 0)
is_success = total_failed == 0
status = "failed"
if is_partial:
status = "partial"
elif is_success:
status = "success"
combined_result = {
'success': is_success or is_partial,
'status': status,
'task_id': task_id,
'arr_config_id': arr_config['id'],
'arr_config_name': arr_config['name'],
'added': total_added,
'updated': total_updated,
'failed': total_failed,
'results': results
}
# Update sync status
_update_sync_status(arr_config['id'], combined_result)
return combined_result
except Exception as e:
logger.exception(f"Scheduled import {task_id} failed")
return {'success': False, 'error': str(e)}
def handle_pull_import(arr_config_id: int) -> Dict[str, Any]:
"""
Handle an on-pull import for a specific ARR config.
This mirrors scheduled import behavior but is triggered immediately
during a git pull (not scheduled).
"""
from ..db import get_db
import json
try:
# Load arr_config by id
with get_db() as conn:
cursor = conn.execute("SELECT * FROM arr_config WHERE id = ?",
(arr_config_id, ))
arr_config = cursor.fetchone()
if not arr_config:
return {
'success': False,
'error': f'arr_config {arr_config_id} not found'
}
# Parse data_to_sync
data_to_sync = json.loads(arr_config['data_to_sync'] or '{}')
results: List[Dict[str, Any]] = []
# Import custom formats
format_names = data_to_sync.get('customFormats', [])
if format_names:
format_names = [f.replace('.yml', '') for f in format_names]
request = {
'arrID': arr_config['id'],
'strategy': 'format',
'filenames': format_names,
}
result = handle_import_request(request)
results.append(result)
# Import profiles
profile_names = data_to_sync.get('profiles', [])
if profile_names:
profile_names = [p.replace('.yml', '') for p in profile_names]
request = {
'arrID': arr_config['id'],
'strategy': 'profile',
'filenames': profile_names,
}
result = handle_import_request(request)
results.append(result)
# Combine results
total_added = sum(r.get('added', 0) for r in results)
total_updated = sum(r.get('updated', 0) for r in results)
total_failed = sum(r.get('failed', 0) for r in results)
is_partial = total_failed > 0 and (total_added > 0
or total_updated > 0)
is_success = total_failed == 0
status = "failed"
if is_partial:
status = "partial"
elif is_success:
status = "success"
combined_result = {
'success': is_success or is_partial,
'status': status,
'arr_config_id': arr_config['id'],
'arr_config_name': arr_config['name'],
'added': total_added,
'updated': total_updated,
'failed': total_failed,
'results': results,
}
# Update sync status
_update_sync_status(arr_config['id'], combined_result)
return combined_result
except Exception as e:
logger.exception(f"Pull import for arr_config {arr_config_id} failed")
return {
'success': False,
'error': str(e),
}
def _update_sync_status(config_id: int, result: Dict[str, Any]) -> None:
"""Update arr_config sync status after scheduled import."""
from ..db import get_db
from datetime import datetime
try:
total = result.get('added', 0) + result.get('updated', 0) + result.get(
'failed', 0)
successful = result.get('added', 0) + result.get('updated', 0)
sync_percentage = int((successful / total * 100) if total > 0 else 0)
with get_db() as conn:
conn.execute(
"""
UPDATE arr_config
SET last_sync_time = ?,
sync_percentage = ?
WHERE id = ?
""", (datetime.now(), sync_percentage, config_id))
conn.commit()
logger.info(
f"Updated sync status for arr_config #{config_id}: {sync_percentage}%"
)
except Exception as e:
logger.error(f"Failed to update sync status: {e}")
# Export main functions
__all__ = [
'handle_import_request', 'handle_scheduled_import', 'handle_pull_import'
]

View File

@@ -1,150 +0,0 @@
"""ArrHandler class - manages all Arr API communication."""
import logging
import requests
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
from typing import Dict, List, Any, Optional
logger = logging.getLogger(__name__)
class ArrApiError(Exception):
"""Custom exception for Arr API errors."""
def __init__(self, message: str, status_code: Optional[int] = None):
super().__init__(message)
self.status_code = status_code
class ArrHandler:
"""Manages all communication with Radarr/Sonarr API."""
def __init__(self, base_url: str, api_key: str):
"""
Initialize the Arr API handler.
Args:
base_url: Base URL of the Arr instance
api_key: API key for authentication
"""
self.base_url = base_url.rstrip('/')
self.headers = {
'X-Api-Key': api_key,
'Content-Type': 'application/json'
}
self.session = self._create_session()
def _create_session(self) -> requests.Session:
"""Create a session with connection pooling and retry logic."""
session = requests.Session()
# Configure retry strategy
retry = Retry(
total=3,
backoff_factor=0.5,
status_forcelist=[500, 502, 503, 504]
)
# Configure connection pooling
adapter = HTTPAdapter(
pool_connections=5,
pool_maxsize=5,
max_retries=retry
)
session.mount('http://', adapter)
session.mount('https://', adapter)
session.headers.update(self.headers)
return session
def get(self, endpoint: str) -> Any:
"""
Make a GET request to the Arr API.
Args:
endpoint: API endpoint path
Returns:
JSON response data
Raises:
ArrApiError: If request fails
"""
url = f"{self.base_url}{endpoint}"
try:
response = self.session.get(url, timeout=30)
if response.status_code != 200:
raise ArrApiError(
f"GET {endpoint} failed: {response.text}",
response.status_code
)
return response.json()
except requests.RequestException as e:
raise ArrApiError(f"GET {endpoint} failed: {str(e)}")
def post(self, endpoint: str, data: Dict[str, Any]) -> Any:
"""
Make a POST request to the Arr API.
Args:
endpoint: API endpoint path
data: JSON data to send
Returns:
JSON response data
Raises:
ArrApiError: If request fails
"""
url = f"{self.base_url}{endpoint}"
try:
response = self.session.post(url, json=data, timeout=30)
if response.status_code not in [200, 201]:
raise ArrApiError(
f"POST {endpoint} failed: {response.text}",
response.status_code
)
return response.json()
except requests.RequestException as e:
raise ArrApiError(f"POST {endpoint} failed: {str(e)}")
def put(self, endpoint: str, data: Dict[str, Any]) -> Any:
"""
Make a PUT request to the Arr API.
Args:
endpoint: API endpoint path
data: JSON data to send
Returns:
JSON response data (if any)
Raises:
ArrApiError: If request fails
"""
url = f"{self.base_url}{endpoint}"
try:
response = self.session.put(url, json=data, timeout=30)
if response.status_code not in [200, 202, 204]:
raise ArrApiError(
f"PUT {endpoint} failed: {response.text}",
response.status_code
)
# 204 No Content won't have JSON
if response.status_code == 204:
return {}
return response.json()
except requests.RequestException as e:
raise ArrApiError(f"PUT {endpoint} failed: {str(e)}")
def get_all_formats(self) -> List[Dict[str, Any]]:
"""Get all custom formats from the Arr instance."""
return self.get("/api/v3/customformat")
def get_all_profiles(self) -> List[Dict[str, Any]]:
"""Get all quality profiles from the Arr instance."""
return self.get("/api/v3/qualityprofile")
def close(self):
"""Close the session."""
self.session.close()

View File

@@ -1,352 +0,0 @@
"""Compilation functions to transform YAML data to Arr API format."""
import logging
from typing import Dict, List, Any, Optional
from .mappings import TargetApp, ValueResolver
from .utils import load_regex_patterns
from ..db.queries.format_renames import is_format_in_renames
from ..db.queries.settings import get_language_import_score
from .logger import get_import_logger
logger = logging.getLogger(__name__)
# Cache patterns at module level to avoid reloading
_CACHED_PATTERNS = None
def get_cached_patterns():
"""Get cached regex patterns, loading them once on first access."""
global _CACHED_PATTERNS
if _CACHED_PATTERNS is None:
_CACHED_PATTERNS = load_regex_patterns()
return _CACHED_PATTERNS
def compile_format_to_api_structure(
format_yaml: Dict[str, Any],
arr_type: str
) -> Dict[str, Any]:
"""
Compile a format from YAML to Arr API structure.
Args:
format_yaml: Format data from YAML file
arr_type: 'radarr' or 'sonarr'
Returns:
Compiled format ready for API
"""
target_app = TargetApp.RADARR if arr_type.lower() == 'radarr' else TargetApp.SONARR
patterns = get_cached_patterns()
compiled = {
'name': format_yaml.get('name', 'Unknown')
}
# Check if format should be included in renames
if is_format_in_renames(format_yaml.get('name', '')):
compiled['includeCustomFormatWhenRenaming'] = True
# Compile specifications from conditions
specifications = []
for condition in format_yaml.get('conditions', []):
spec = _compile_condition(condition, patterns, target_app)
if spec:
specifications.append(spec)
compiled['specifications'] = specifications
return compiled
def _compile_condition(
condition: Dict[str, Any],
patterns: Dict[str, str],
target_app: TargetApp
) -> Optional[Dict[str, Any]]:
"""Compile a single condition to specification."""
condition_type = condition.get('type')
spec = {
'name': condition.get('name', ''),
'negate': condition.get('negate', False),
'required': condition.get('required', False),
'fields': []
}
if condition_type in ['release_title', 'release_group', 'edition']:
pattern_name = condition.get('pattern')
pattern = patterns.get(pattern_name)
if not pattern:
import_logger = get_import_logger()
import_logger.warning(f"Pattern not found: {pattern_name}")
return None
spec['implementation'] = {
'release_title': 'ReleaseTitleSpecification',
'release_group': 'ReleaseGroupSpecification',
'edition': 'EditionSpecification'
}[condition_type]
spec['fields'] = [{'name': 'value', 'value': pattern}]
elif condition_type == 'source':
spec['implementation'] = 'SourceSpecification'
value = ValueResolver.get_source(condition.get('source'), target_app)
spec['fields'] = [{'name': 'value', 'value': value}]
elif condition_type == 'resolution':
spec['implementation'] = 'ResolutionSpecification'
value = ValueResolver.get_resolution(condition.get('resolution'))
spec['fields'] = [{'name': 'value', 'value': value}]
elif condition_type == 'indexer_flag':
spec['implementation'] = 'IndexerFlagSpecification'
value = ValueResolver.get_indexer_flag(condition.get('flag', ''), target_app)
spec['fields'] = [{'name': 'value', 'value': value}]
elif condition_type == 'quality_modifier':
if target_app == TargetApp.SONARR:
return None
spec['implementation'] = 'QualityModifierSpecification'
value = ValueResolver.get_quality_modifier(condition.get('qualityModifier'))
spec['fields'] = [{'name': 'value', 'value': value}]
elif condition_type == 'size':
spec['implementation'] = 'SizeSpecification'
spec['fields'] = [
{'name': 'min', 'value': condition.get('minSize', 0)},
{'name': 'max', 'value': condition.get('maxSize', 0)}
]
elif condition_type == 'language':
spec['implementation'] = 'LanguageSpecification'
language_name = condition.get('language', '').lower()
try:
language_data = ValueResolver.get_language(language_name, target_app, for_profile=False)
fields = [{'name': 'value', 'value': language_data['id']}]
# Handle exceptLanguage field if present
if 'exceptLanguage' in condition:
except_value = condition['exceptLanguage']
fields.append({
'name': 'exceptLanguage',
'value': except_value
})
spec['fields'] = fields
except Exception:
import_logger = get_import_logger()
import_logger.warning(f"Language not found: {language_name}")
return None
elif condition_type == 'release_type':
# Only supported in Sonarr
if target_app == TargetApp.RADARR:
return None
spec['implementation'] = 'ReleaseTypeSpecification'
value = ValueResolver.get_release_type(condition.get('releaseType'))
spec['fields'] = [{'name': 'value', 'value': value}]
elif condition_type == 'year':
spec['implementation'] = 'YearSpecification'
spec['fields'] = [
{'name': 'min', 'value': condition.get('minYear', 0)},
{'name': 'max', 'value': condition.get('maxYear', 0)}
]
else:
import_logger = get_import_logger()
import_logger.warning(f"Unknown condition type: {condition_type}")
return None
return spec
def compile_profile_to_api_structure(
profile_yaml: Dict[str, Any],
arr_type: str
) -> Dict[str, Any]:
"""
Compile a profile from YAML to Arr API structure.
Args:
profile_yaml: Profile data from YAML file
arr_type: 'radarr' or 'sonarr'
Returns:
Compiled profile ready for API
"""
target_app = TargetApp.RADARR if arr_type.lower() == 'radarr' else TargetApp.SONARR
quality_mappings = ValueResolver.get_qualities(target_app)
compiled = {
'name': profile_yaml.get('name', 'Unknown')
}
# Build quality items - following the structure from the working compile/profile_compiler.py
items = []
cutoff_id = None
used_qualities = set()
quality_ids_in_groups = set()
# Convert group IDs (negative to positive with offset)
def convert_group_id(group_id: int) -> int:
if group_id < 0:
return 1000 + abs(group_id)
return group_id
# First pass: gather quality IDs in groups to avoid duplicates
for quality_entry in profile_yaml.get('qualities', []):
if isinstance(quality_entry, dict) and quality_entry.get('id', 0) < 0:
# It's a group
for q in quality_entry.get('qualities', []):
if isinstance(q, dict):
q_name = q.get('name', '')
mapped_name = ValueResolver.get_quality_name(q_name, target_app)
if mapped_name in quality_mappings:
quality_ids_in_groups.add(quality_mappings[mapped_name]['id'])
# Second pass: add groups and individual qualities
for quality_entry in profile_yaml.get('qualities', []):
if isinstance(quality_entry, dict):
if quality_entry.get('id', 0) < 0:
# It's a group
group_id = convert_group_id(quality_entry.get('id', 0))
group_item = {
'id': group_id,
'name': quality_entry.get('name', 'Group'),
'items': [],
'allowed': True
}
for q in quality_entry.get('qualities', []):
if isinstance(q, dict):
q_name = q.get('name', '')
mapped_name = ValueResolver.get_quality_name(q_name, target_app)
if mapped_name in quality_mappings:
group_item['items'].append({
'quality': quality_mappings[mapped_name].copy(),
'items': [],
'allowed': True
})
used_qualities.add(mapped_name.upper())
if group_item['items']:
items.append(group_item)
else:
# Individual quality
q_name = quality_entry.get('name', '')
mapped_name = ValueResolver.get_quality_name(q_name, target_app)
if mapped_name in quality_mappings:
items.append({
'quality': quality_mappings[mapped_name].copy(),
'items': [],
'allowed': True
})
used_qualities.add(mapped_name.upper())
elif isinstance(quality_entry, str):
# Simple quality name string
mapped_name = ValueResolver.get_quality_name(quality_entry, target_app)
if mapped_name in quality_mappings:
items.append({
'quality': quality_mappings[mapped_name].copy(),
'items': [],
'allowed': True
})
used_qualities.add(mapped_name.upper())
# Add all unused qualities as disabled
for quality_name, quality_data in quality_mappings.items():
if (quality_name.upper() not in used_qualities and
quality_data['id'] not in quality_ids_in_groups):
items.append({
'quality': quality_data.copy(),
'items': [],
'allowed': False
})
# Handle cutoff/upgrade_until
if 'upgrade_until' in profile_yaml and isinstance(profile_yaml['upgrade_until'], dict):
cutoff_id_raw = profile_yaml['upgrade_until'].get('id')
cutoff_name = profile_yaml['upgrade_until'].get('name', '')
mapped_cutoff_name = ValueResolver.get_quality_name(cutoff_name, target_app)
if cutoff_id_raw and cutoff_id_raw < 0:
cutoff_id = convert_group_id(cutoff_id_raw)
elif mapped_cutoff_name in quality_mappings:
cutoff_id = quality_mappings[mapped_cutoff_name]['id']
# Handle language
language = profile_yaml.get('language', 'any')
if language != 'any' and '_' not in language:
# Simple language mode
try:
language_data = ValueResolver.get_language(language, target_app, for_profile=True)
except Exception:
language_data = ValueResolver.get_language('any', target_app, for_profile=True)
else:
# Advanced mode or any
language_data = ValueResolver.get_language('any', target_app, for_profile=True)
# Build format items (without IDs, those get synced later)
format_items = []
# Add language-specific formats for advanced mode
if language != 'any' and '_' in language:
behavior, language_code = language.split('_', 1)
# Get the score from database instead of hardcoding
language_score = get_language_import_score()
# Use proper capitalization for the language name
lang_display = language_code.capitalize()
# Handle behaviors: 'must' and 'only' (matching old working logic)
if behavior in ['must', 'only']:
# Add "Not [Language]" format with score from database
not_language_name = f"Not {lang_display}"
format_items.append({
'name': not_language_name,
'score': language_score
})
# For 'only' behavior, add additional formats
if behavior == 'only':
format_items.append({
'name': f"Not Only {lang_display}",
'score': language_score
})
format_items.append({
'name': f"Not Only {lang_display} (Missing)",
'score': language_score
})
# Main custom formats
for cf in profile_yaml.get('custom_formats', []):
format_items.append({
'name': cf.get('name'),
'score': cf.get('score', 0)
})
# App-specific custom formats
app_key = f'custom_formats_{arr_type.lower()}'
for cf in profile_yaml.get(app_key, []):
format_items.append({
'name': cf.get('name'),
'score': cf.get('score', 0)
})
# Reverse items to match expected order
items.reverse()
compiled['items'] = items
compiled['language'] = language_data
compiled['upgradeAllowed'] = profile_yaml.get('upgradesAllowed', True)
compiled['minFormatScore'] = profile_yaml.get('minCustomFormatScore', 0)
compiled['cutoffFormatScore'] = profile_yaml.get('upgradeUntilScore', 0)
compiled['formatItems'] = format_items
if cutoff_id is not None:
compiled['cutoff'] = cutoff_id
# Handle minUpgradeFormatScore with proper default
compiled['minUpgradeFormatScore'] = max(1, profile_yaml.get('minScoreIncrement', 1))
return compiled

View File

@@ -1,138 +0,0 @@
"""Custom logger for importer with progress tracking and colored output."""
import sys
from typing import List, Dict, Any
from datetime import datetime
class ImportLogger:
"""Custom logger with progress tracking and colored error output."""
def __init__(self):
"""Initialize the import logger."""
self.compilation_errors: List[Dict[str, str]] = []
self.import_errors: List[Dict[str, str]] = []
self.warnings: List[str] = []
self.current_compilation = 0
self.total_compilation = 0
self.current_import = 0
self.total_import = 0
self.added = 0
self.updated = 0
self.failed = 0
self.start_time = None
self.compilation_items: List[str] = []
self.import_items: List[Dict[str, str]] = []
def _write_colored(self, text: str, color: str = None):
"""Write colored text to stderr."""
if color == 'red':
text = f"\033[91m{text}\033[0m"
elif color == 'yellow':
text = f"\033[93m{text}\033[0m"
elif color == 'green':
text = f"\033[92m{text}\033[0m"
print(text, file=sys.stderr)
def start(self, total_compilation: int, total_import: int):
"""Start the import process."""
self.start_time = datetime.now()
self.total_compilation = total_compilation
self.total_import = total_import
self.current_compilation = 0
self.current_import = 0
def update_compilation(self, item_name: str):
"""Track compilation progress."""
self.current_compilation += 1
self.compilation_items.append(item_name)
def compilation_complete(self):
"""Show compilation summary."""
if self.total_compilation > 0:
print(f"Compiled: {self.current_compilation}/{self.total_compilation}", file=sys.stderr)
# Show compilation errors if any
if self.compilation_errors:
for error in self.compilation_errors:
self._write_colored(f"ERROR: Failed to compile {error['item']}: {error['message']}", 'red')
def update_import(self, item_name: str, action: str):
"""Track import progress."""
self.import_items.append({'name': item_name, 'action': action})
# Update counts based on action
if action == 'added':
self.added += 1
self.current_import += 1 # Only count successful imports
elif action == 'updated':
self.updated += 1
self.current_import += 1 # Only count successful imports
elif action == 'failed':
self.failed += 1
# Don't increment current_import for failures
def import_complete(self):
"""Show import summary."""
if self.total_import > 0:
print(f"Imported: {self.current_import}/{self.total_import}", file=sys.stderr)
# Show import errors if any
if self.import_errors:
for error in self.import_errors:
self._write_colored(f"ERROR: {error['message']}", 'red')
# Show warnings if any
if self.warnings:
for warning in self.warnings:
self._write_colored(f"WARNING: {warning}", 'yellow')
def error(self, message: str, item_name: str = None, phase: str = 'import'):
"""Log an error."""
if phase == 'compilation':
self.compilation_errors.append({'item': item_name or 'unknown', 'message': message})
else:
self.import_errors.append({'item': item_name or 'unknown', 'message': message})
def warning(self, message: str):
"""Log a warning."""
self.warnings.append(message)
def complete(self):
"""Complete the import and show final summary."""
# Show import summary first if not already shown
if self.current_import > 0 and not hasattr(self, '_import_shown'):
self.import_complete()
# Calculate duration
if self.start_time:
duration = (datetime.now() - self.start_time).total_seconds()
duration_str = f"{duration:.1f}s"
else:
duration_str = "N/A"
# Simple final summary
print(f"\n{'='*50}", file=sys.stderr)
print(f"Import Complete in {duration_str}", file=sys.stderr)
print(f"Added: {self.added}, Updated: {self.updated}, Failed: {self.failed}", file=sys.stderr)
print(f"{'='*50}\n", file=sys.stderr)
# Global instance
_logger = None
def get_import_logger() -> ImportLogger:
"""Get the import logger instance."""
global _logger
if _logger is None:
_logger = ImportLogger()
return _logger
def reset_import_logger() -> ImportLogger:
"""Reset and return a new import logger."""
global _logger
_logger = ImportLogger()
return _logger

View File

@@ -1,990 +0,0 @@
# app/compile/mappings.py
"""Centralized constants and mappings for arr applications"""
from enum import Enum, auto
from typing import Dict, Any
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class TargetApp(Enum):
"""Enum for target application types"""
RADARR = auto()
SONARR = auto()
class IndexerFlags:
"""Indexer flag mappings for both applications"""
RADARR = {
'freeleech': 1,
'halfleech': 2,
'double_upload': 4,
'internal': 32,
'scene': 128,
'freeleech_75': 256,
'freeleech_25': 512,
'nuked': 2048,
'ptp_golden': 8,
'ptp_approved': 16
}
SONARR = {
'freeleech': 1,
'halfleech': 2,
'double_upload': 4,
'internal': 8,
'scene': 16,
'freeleech_75': 32,
'freeleech_25': 64,
'nuked': 128
}
class Sources:
"""Source mappings for both applications"""
RADARR = {
'cam': 1,
'telesync': 2,
'telecine': 3,
'workprint': 4,
'dvd': 5,
'tv': 6,
'web_dl': 7,
'webrip': 8,
'bluray': 9
}
SONARR = {
'television': 1,
'television_raw': 2,
'web_dl': 3,
'webrip': 4,
'dvd': 5,
'bluray': 6,
'bluray_raw': 7
}
class Quality_Modifiers:
"""Quality modifier mappings for Radarr ONLY"""
RADARR = {
'none': 0,
'regional': 1,
'screener': 2,
'rawhd': 3,
'brdisk': 4,
'remux': 5,
}
class Release_Types:
"""Release type mappings for Sonarr ONLY"""
SONARR = {
'none': 0,
'single_episode': 1,
'multi_episode': 2,
'season_pack': 3,
}
class Qualities:
"""Quality mappings for both applications"""
COMMON_RESOLUTIONS = {
'360p': 360,
'480p': 480,
'540p': 540,
'576p': 576,
'720p': 720,
'1080p': 1080,
'2160p': 2160
}
RADARR = {
"Unknown": {
"id": 0,
"name": "Unknown",
"source": "unknown",
"resolution": 0
},
"SDTV": {
"id": 1,
"name": "SDTV",
"source": "tv",
"resolution": 480
},
"DVD": {
"id": 2,
"name": "DVD",
"source": "dvd",
"resolution": 480
},
"WEBDL-1080p": {
"id": 3,
"name": "WEBDL-1080p",
"source": "webdl",
"resolution": 1080
},
"HDTV-720p": {
"id": 4,
"name": "HDTV-720p",
"source": "tv",
"resolution": 720
},
"WEBDL-720p": {
"id": 5,
"name": "WEBDL-720p",
"source": "webdl",
"resolution": 720
},
"Bluray-720p": {
"id": 6,
"name": "Bluray-720p",
"source": "bluray",
"resolution": 720
},
"Bluray-1080p": {
"id": 7,
"name": "Bluray-1080p",
"source": "bluray",
"resolution": 1080
},
"WEBDL-480p": {
"id": 8,
"name": "WEBDL-480p",
"source": "webdl",
"resolution": 480
},
"HDTV-1080p": {
"id": 9,
"name": "HDTV-1080p",
"source": "tv",
"resolution": 1080
},
"Raw-HD": {
"id": 10,
"name": "Raw-HD",
"source": "tv",
"resolution": 1080
},
"WEBRip-480p": {
"id": 12,
"name": "WEBRip-480p",
"source": "webrip",
"resolution": 480
},
"WEBRip-720p": {
"id": 14,
"name": "WEBRip-720p",
"source": "webrip",
"resolution": 720
},
"WEBRip-1080p": {
"id": 15,
"name": "WEBRip-1080p",
"source": "webrip",
"resolution": 1080
},
"HDTV-2160p": {
"id": 16,
"name": "HDTV-2160p",
"source": "tv",
"resolution": 2160
},
"WEBRip-2160p": {
"id": 17,
"name": "WEBRip-2160p",
"source": "webrip",
"resolution": 2160
},
"WEBDL-2160p": {
"id": 18,
"name": "WEBDL-2160p",
"source": "webdl",
"resolution": 2160
},
"Bluray-2160p": {
"id": 19,
"name": "Bluray-2160p",
"source": "bluray",
"resolution": 2160
},
"Bluray-480p": {
"id": 20,
"name": "Bluray-480p",
"source": "bluray",
"resolution": 480
},
"Bluray-576p": {
"id": 21,
"name": "Bluray-576p",
"source": "bluray",
"resolution": 576
},
"BR-DISK": {
"id": 22,
"name": "BR-DISK",
"source": "bluray",
"resolution": 1080
},
"DVD-R": {
"id": 23,
"name": "DVD-R",
"source": "dvd",
"resolution": 480
},
"WORKPRINT": {
"id": 24,
"name": "WORKPRINT",
"source": "workprint",
"resolution": 0
},
"CAM": {
"id": 25,
"name": "CAM",
"source": "cam",
"resolution": 0
},
"TELESYNC": {
"id": 26,
"name": "TELESYNC",
"source": "telesync",
"resolution": 0
},
"TELECINE": {
"id": 27,
"name": "TELECINE",
"source": "telecine",
"resolution": 0
},
"DVDSCR": {
"id": 28,
"name": "DVDSCR",
"source": "dvd",
"resolution": 480
},
"REGIONAL": {
"id": 29,
"name": "REGIONAL",
"source": "dvd",
"resolution": 480
},
"Remux-1080p": {
"id": 30,
"name": "Remux-1080p",
"source": "bluray",
"resolution": 1080
},
"Remux-2160p": {
"id": 31,
"name": "Remux-2160p",
"source": "bluray",
"resolution": 2160
}
}
SONARR = {
"Unknown": {
"id": 0,
"name": "Unknown",
"source": "unknown",
"resolution": 0
},
"SDTV": {
"id": 1,
"name": "SDTV",
"source": "television",
"resolution": 480
},
"DVD": {
"id": 2,
"name": "DVD",
"source": "dvd",
"resolution": 480
},
"WEBDL-1080p": {
"id": 3,
"name": "WEBDL-1080p",
"source": "web",
"resolution": 1080
},
"HDTV-720p": {
"id": 4,
"name": "HDTV-720p",
"source": "television",
"resolution": 720
},
"WEBDL-720p": {
"id": 5,
"name": "WEBDL-720p",
"source": "web",
"resolution": 720
},
"Bluray-720p": {
"id": 6,
"name": "Bluray-720p",
"source": "bluray",
"resolution": 720
},
"Bluray-1080p": {
"id": 7,
"name": "Bluray-1080p",
"source": "bluray",
"resolution": 1080
},
"WEBDL-480p": {
"id": 8,
"name": "WEBDL-480p",
"source": "web",
"resolution": 480
},
"HDTV-1080p": {
"id": 9,
"name": "HDTV-1080p",
"source": "television",
"resolution": 1080
},
"Raw-HD": {
"id": 10,
"name": "Raw-HD",
"source": "televisionRaw",
"resolution": 1080
},
"WEBRip-480p": {
"id": 12,
"name": "WEBRip-480p",
"source": "webRip",
"resolution": 480
},
"Bluray-480p": {
"id": 13,
"name": "Bluray-480p",
"source": "bluray",
"resolution": 480
},
"WEBRip-720p": {
"id": 14,
"name": "WEBRip-720p",
"source": "webRip",
"resolution": 720
},
"WEBRip-1080p": {
"id": 15,
"name": "WEBRip-1080p",
"source": "webRip",
"resolution": 1080
},
"HDTV-2160p": {
"id": 16,
"name": "HDTV-2160p",
"source": "television",
"resolution": 2160
},
"WEBRip-2160p": {
"id": 17,
"name": "WEBRip-2160p",
"source": "webRip",
"resolution": 2160
},
"WEBDL-2160p": {
"id": 18,
"name": "WEBDL-2160p",
"source": "web",
"resolution": 2160
},
"Bluray-2160p": {
"id": 19,
"name": "Bluray-2160p",
"source": "bluray",
"resolution": 2160
},
"Bluray-1080p Remux": {
"id": 20,
"name": "Bluray-1080p Remux",
"source": "blurayRaw",
"resolution": 1080
},
"Bluray-2160p Remux": {
"id": 21,
"name": "Bluray-2160p Remux",
"source": "blurayRaw",
"resolution": 2160
},
"Bluray-576p": {
"id": 22,
"name": "Bluray-576p",
"source": "bluray",
"resolution": 576
}
}
class Languages:
"""Language mappings for both applications"""
RADARR = {
'any': {
'id': -1,
'name': 'Any'
},
'original': {
'id': -2,
'name': 'Original'
},
'unknown': {
'id': 0,
'name': 'Unknown'
},
'english': {
'id': 1,
'name': 'English'
},
'french': {
'id': 2,
'name': 'French'
},
'spanish': {
'id': 3,
'name': 'Spanish'
},
'german': {
'id': 4,
'name': 'German'
},
'italian': {
'id': 5,
'name': 'Italian'
},
'danish': {
'id': 6,
'name': 'Danish'
},
'dutch': {
'id': 7,
'name': 'Dutch'
},
'japanese': {
'id': 8,
'name': 'Japanese'
},
'icelandic': {
'id': 9,
'name': 'Icelandic'
},
'chinese': {
'id': 10,
'name': 'Chinese'
},
'russian': {
'id': 11,
'name': 'Russian'
},
'polish': {
'id': 12,
'name': 'Polish'
},
'vietnamese': {
'id': 13,
'name': 'Vietnamese'
},
'swedish': {
'id': 14,
'name': 'Swedish'
},
'norwegian': {
'id': 15,
'name': 'Norwegian'
},
'finnish': {
'id': 16,
'name': 'Finnish'
},
'turkish': {
'id': 17,
'name': 'Turkish'
},
'portuguese': {
'id': 18,
'name': 'Portuguese'
},
'flemish': {
'id': 19,
'name': 'Flemish'
},
'greek': {
'id': 20,
'name': 'Greek'
},
'korean': {
'id': 21,
'name': 'Korean'
},
'hungarian': {
'id': 22,
'name': 'Hungarian'
},
'hebrew': {
'id': 23,
'name': 'Hebrew'
},
'lithuanian': {
'id': 24,
'name': 'Lithuanian'
},
'czech': {
'id': 25,
'name': 'Czech'
},
'hindi': {
'id': 26,
'name': 'Hindi'
},
'romanian': {
'id': 27,
'name': 'Romanian'
},
'thai': {
'id': 28,
'name': 'Thai'
},
'bulgarian': {
'id': 29,
'name': 'Bulgarian'
},
'portuguese_br': {
'id': 30,
'name': 'Portuguese (Brazil)'
},
'arabic': {
'id': 31,
'name': 'Arabic'
},
'ukrainian': {
'id': 32,
'name': 'Ukrainian'
},
'persian': {
'id': 33,
'name': 'Persian'
},
'bengali': {
'id': 34,
'name': 'Bengali'
},
'slovak': {
'id': 35,
'name': 'Slovak'
},
'latvian': {
'id': 36,
'name': 'Latvian'
},
'spanish_latino': {
'id': 37,
'name': 'Spanish (Latino)'
},
'catalan': {
'id': 38,
'name': 'Catalan'
},
'croatian': {
'id': 39,
'name': 'Croatian'
},
'serbian': {
'id': 40,
'name': 'Serbian'
},
'bosnian': {
'id': 41,
'name': 'Bosnian'
},
'estonian': {
'id': 42,
'name': 'Estonian'
},
'tamil': {
'id': 43,
'name': 'Tamil'
},
'indonesian': {
'id': 44,
'name': 'Indonesian'
},
'telugu': {
'id': 45,
'name': 'Telugu'
},
'macedonian': {
'id': 46,
'name': 'Macedonian'
},
'slovenian': {
'id': 47,
'name': 'Slovenian'
},
'malayalam': {
'id': 48,
'name': 'Malayalam'
},
'kannada': {
'id': 49,
'name': 'Kannada'
},
'albanian': {
'id': 50,
'name': 'Albanian'
},
'afrikaans': {
'id': 51,
'name': 'Afrikaans'
}
}
SONARR = {
'unknown': {
'id': 0,
'name': 'Unknown'
},
'english': {
'id': 1,
'name': 'English'
},
'french': {
'id': 2,
'name': 'French'
},
'spanish': {
'id': 3,
'name': 'Spanish'
},
'german': {
'id': 4,
'name': 'German'
},
'italian': {
'id': 5,
'name': 'Italian'
},
'danish': {
'id': 6,
'name': 'Danish'
},
'dutch': {
'id': 7,
'name': 'Dutch'
},
'japanese': {
'id': 8,
'name': 'Japanese'
},
'icelandic': {
'id': 9,
'name': 'Icelandic'
},
'chinese': {
'id': 10,
'name': 'Chinese'
},
'russian': {
'id': 11,
'name': 'Russian'
},
'polish': {
'id': 12,
'name': 'Polish'
},
'vietnamese': {
'id': 13,
'name': 'Vietnamese'
},
'swedish': {
'id': 14,
'name': 'Swedish'
},
'norwegian': {
'id': 15,
'name': 'Norwegian'
},
'finnish': {
'id': 16,
'name': 'Finnish'
},
'turkish': {
'id': 17,
'name': 'Turkish'
},
'portuguese': {
'id': 18,
'name': 'Portuguese'
},
'flemish': {
'id': 19,
'name': 'Flemish'
},
'greek': {
'id': 20,
'name': 'Greek'
},
'korean': {
'id': 21,
'name': 'Korean'
},
'hungarian': {
'id': 22,
'name': 'Hungarian'
},
'hebrew': {
'id': 23,
'name': 'Hebrew'
},
'lithuanian': {
'id': 24,
'name': 'Lithuanian'
},
'czech': {
'id': 25,
'name': 'Czech'
},
'arabic': {
'id': 26,
'name': 'Arabic'
},
'hindi': {
'id': 27,
'name': 'Hindi'
},
'bulgarian': {
'id': 28,
'name': 'Bulgarian'
},
'malayalam': {
'id': 29,
'name': 'Malayalam'
},
'ukrainian': {
'id': 30,
'name': 'Ukrainian'
},
'slovak': {
'id': 31,
'name': 'Slovak'
},
'thai': {
'id': 32,
'name': 'Thai'
},
'portuguese_br': {
'id': 33,
'name': 'Portuguese (Brazil)'
},
'spanish_latino': {
'id': 34,
'name': 'Spanish (Latino)'
},
'romanian': {
'id': 35,
'name': 'Romanian'
},
'latvian': {
'id': 36,
'name': 'Latvian'
},
'persian': {
'id': 37,
'name': 'Persian'
},
'catalan': {
'id': 38,
'name': 'Catalan'
},
'croatian': {
'id': 39,
'name': 'Croatian'
},
'serbian': {
'id': 40,
'name': 'Serbian'
},
'bosnian': {
'id': 41,
'name': 'Bosnian'
},
'estonian': {
'id': 42,
'name': 'Estonian'
},
'tamil': {
'id': 43,
'name': 'Tamil'
},
'indonesian': {
'id': 44,
'name': 'Indonesian'
},
'macedonian': {
'id': 45,
'name': 'Macedonian'
},
'slovenian': {
'id': 46,
'name': 'Slovenian'
},
'original': {
'id': -2,
'name': 'Original'
}
}
class QualityNameMapper:
"""Maps between different quality naming conventions"""
REMUX_MAPPINGS = {
TargetApp.SONARR: {
"Remux-1080p": "Bluray-1080p Remux",
"Remux-2160p": "Bluray-2160p Remux"
},
TargetApp.RADARR: {
"Remux-1080p": "Remux-1080p",
"Remux-2160p": "Remux-2160p"
}
}
ALTERNATE_NAMES = {
"BR-Disk": "BR-DISK",
"BR-DISK": "BR-DISK",
"BRDISK": "BR-DISK",
"BR_DISK": "BR-DISK",
"BLURAY-DISK": "BR-DISK",
"BLURAY_DISK": "BR-DISK",
"BLURAYDISK": "BR-DISK",
"Telecine": "TELECINE",
"TELECINE": "TELECINE",
"TeleCine": "TELECINE",
"Telesync": "TELESYNC",
"TELESYNC": "TELESYNC",
"TeleSync": "TELESYNC",
}
@classmethod
def map_quality_name(cls, name: str, target_app: TargetApp) -> str:
"""
Maps quality names between different formats based on target app
Args:
name: The quality name to map
target_app: The target application (RADARR or SONARR)
Returns:
The mapped quality name
"""
# Handle empty or None cases
if not name:
return name
# First check for remux mappings
if name in cls.REMUX_MAPPINGS.get(target_app, {}):
return cls.REMUX_MAPPINGS[target_app][name]
# Then check for alternate spellings
normalized_name = name.upper().replace("-", "").replace("_", "")
for alt_name, standard_name in cls.ALTERNATE_NAMES.items():
if normalized_name == alt_name.upper().replace("-", "").replace(
"_", ""):
return standard_name
return name
class LanguageNameMapper:
"""Maps between different language naming conventions"""
ALTERNATE_NAMES = {
"spanish-latino": "spanish_latino",
"spanish_latino": "spanish_latino",
"spanishlatino": "spanish_latino",
"portuguese-br": "portuguese_br",
"portuguese_br": "portuguese_br",
"portuguesebr": "portuguese_br",
"portuguese-brazil": "portuguese_br",
"portuguese_brazil": "portuguese_br"
}
@classmethod
def normalize_language_name(cls, name: str) -> str:
"""
Normalizes language names to a consistent format
Args:
name: The language name to normalize
Returns:
The normalized language name
"""
if not name:
return name
normalized = name.lower().replace(" ", "_")
return cls.ALTERNATE_NAMES.get(normalized, normalized)
class ValueResolver:
"""Helper class to resolve values based on target app"""
@classmethod
def get_indexer_flag(cls, flag: str, target_app: TargetApp) -> int:
flags = IndexerFlags.RADARR if target_app == TargetApp.RADARR else IndexerFlags.SONARR
return flags.get(flag.lower(), 0)
@classmethod
def get_source(cls, source: str, target_app: TargetApp) -> int:
sources = Sources.RADARR if target_app == TargetApp.RADARR else Sources.SONARR
return sources.get(source.lower(), 0)
@classmethod
def get_resolution(cls, resolution: str) -> int:
return Qualities.COMMON_RESOLUTIONS.get(resolution.lower(), 0)
@classmethod
def get_qualities(cls, target_app: TargetApp) -> Dict[str, Any]:
qualities = Qualities.RADARR if target_app == TargetApp.RADARR else Qualities.SONARR
return qualities
@classmethod
def get_quality_name(cls, name: str, target_app: TargetApp) -> str:
"""Maps quality names between different formats based on target app"""
return QualityNameMapper.map_quality_name(name, target_app)
@classmethod
def get_quality_modifier(cls, quality_modifier: str) -> int:
return Quality_Modifiers.RADARR.get(quality_modifier.lower(), 0)
@classmethod
def get_release_type(cls, release_type: str) -> int:
return Release_Types.SONARR.get(release_type.lower(), 0)
@classmethod
def get_language(cls,
language_name: str,
target_app: TargetApp,
for_profile: bool = True) -> Dict[str, Any]:
"""
Get language mapping based on target app and context
Args:
language_name: Name of the language to look up
target_app: Target application (RADARR or SONARR)
for_profile: If True, this is for a quality profile. If False, this is for a custom format.
"""
languages = Languages.RADARR if target_app == TargetApp.RADARR else Languages.SONARR
# For profiles, only Radarr uses language settings
if for_profile and target_app == TargetApp.SONARR:
return {'id': -2, 'name': 'Original'}
# Normalize the language name
normalized_name = LanguageNameMapper.normalize_language_name(
language_name)
language_data = languages.get(normalized_name)
if not language_data:
logger.warning(
f"Language '{language_name}' (normalized: '{normalized_name}') "
f"not found in {target_app} mappings, falling back to Unknown")
language_data = languages['unknown']
return language_data

View File

@@ -1,59 +0,0 @@
"""Routes for the new import module."""
from flask import Blueprint, request, jsonify
from flask_cors import cross_origin
import logging
from . import handle_import_request
logger = logging.getLogger(__name__)
bp = Blueprint('new_import', __name__)
@bp.route('', methods=['POST', 'OPTIONS'])
@cross_origin()
def import_items():
"""
Import formats or profiles to an Arr instance.
Request body:
{
"arrID": int, # ID of arr_config to use
"strategy": str, # "format" or "profile"
"filenames": [str], # List of filenames to import
"dryRun": bool # Optional: simulate import without changes (default: false)
}
"""
if request.method == 'OPTIONS':
return jsonify({}), 200
try:
data = request.get_json()
# Validate request
if not data:
return jsonify({
'success': False,
'error': 'Request body is required'
}), 400
# Call the import handler
result = handle_import_request(data)
# Return appropriate status code
status_code = 200
if result.get('status') == 'partial':
status_code = 207
elif not result.get('success'):
if 'not found' in result.get('error', '').lower():
status_code = 404
else:
status_code = 400
return jsonify(result), status_code
except Exception as e:
logger.error(f"Error handling import request: {str(e)}")
return jsonify({
'success': False,
'error': str(e)
}), 500

View File

@@ -1,6 +0,0 @@
"""Import strategies."""
from .base import ImportStrategy
from .format import FormatStrategy
from .profile import ProfileStrategy
__all__ = ['ImportStrategy', 'FormatStrategy', 'ProfileStrategy']

View File

@@ -1,103 +0,0 @@
"""Base strategy class for import operations."""
import logging
from abc import ABC, abstractmethod
from typing import Dict, List, Any
from ..arr_handler import ArrHandler
from ..logger import get_import_logger
logger = logging.getLogger(__name__)
class ImportStrategy(ABC):
"""Base class for import strategies."""
def __init__(self, arr_config):
"""
Initialize the import strategy.
Args:
arr_config: Database row from arr_config table containing:
- type: 'radarr' or 'sonarr'
- arr_server: Base URL
- api_key: API key
- import_as_unique: Whether to add [Dictionarry] suffix
"""
# Handle sqlite3.Row objects (they support dict-like access)
self.arr_type = arr_config['type']
self.base_url = arr_config['arr_server']
self.api_key = arr_config['api_key']
# sqlite3.Row doesn't have .get() method, so we need to handle None
import_as_unique = arr_config['import_as_unique'] if 'import_as_unique' in arr_config.keys() else False
self.import_as_unique = bool(import_as_unique) if import_as_unique is not None else False
self.arr = ArrHandler(self.base_url, self.api_key)
@abstractmethod
def compile(self, filenames: List[str]) -> Dict[str, Any]:
"""
Compile files to API-ready format.
Args:
filenames: List of filenames to compile
Returns:
Dictionary with compiled data
"""
pass
@abstractmethod
def import_data(self, compiled_data: Dict[str, Any], dry_run: bool = False) -> Dict[str, Any]:
"""
Import compiled data to Arr instance.
Args:
compiled_data: Data from compile() method
dry_run: If True, simulate import without making changes
Returns:
Import results with added/updated/failed counts
"""
pass
def execute(self, filenames: List[str], dry_run: bool = False) -> Dict[str, Any]:
"""
Execute the full import process.
Args:
filenames: List of filenames to import
dry_run: If True, simulate import without making changes
Returns:
Import results
"""
try:
# Compile
compiled = self.compile(filenames)
# Import
results = self.import_data(compiled, dry_run=dry_run)
# Add dry_run flag and compiled data to results
if dry_run:
results['dry_run'] = True
results['compiled_data'] = compiled
return results
except Exception as e:
import_logger = get_import_logger()
import_logger.error(f"Strategy execution failed: {e}", phase='import')
return {
'added': 0,
'updated': 0,
'failed': len(filenames),
'error': str(e)
}
finally:
# Clean up
self.arr.close()
def add_unique_suffix(self, name: str) -> str:
"""Add [Dictionarry] suffix if unique import is enabled."""
if self.import_as_unique and not name.endswith('[Dictionarry]'):
return f"{name} [Dictionarry]"
return name

View File

@@ -1,132 +0,0 @@
"""Format import strategy."""
import logging
from typing import Dict, List, Any
from .base import ImportStrategy
from ..utils import load_yaml
from ..compiler import compile_format_to_api_structure
from ..logger import get_import_logger
logger = logging.getLogger(__name__)
class FormatStrategy(ImportStrategy):
"""Strategy for importing custom formats."""
def compile(self, filenames: List[str]) -> Dict[str, Any]:
"""
Compile format files to API-ready format.
Args:
filenames: List of format filenames (without .yml)
Returns:
Dictionary with 'formats' key containing compiled formats
"""
formats = []
failed = []
import_logger = get_import_logger()
# Don't try to predict - we'll count as we go
import_logger.start(0, 0) # Will update counts as we compile
for filename in filenames:
try:
# Load YAML
format_yaml = load_yaml(f"custom_format/{filename}.yml")
# Compile to API structure
compiled = compile_format_to_api_structure(format_yaml, self.arr_type)
# Add unique suffix if needed
if self.import_as_unique:
compiled['name'] = self.add_unique_suffix(compiled['name'])
formats.append(compiled)
import_logger.update_compilation(filename)
except Exception as e:
import_logger.error(f"{e}", filename, 'compilation')
failed.append(filename)
# Don't count failed compilations
# Set final compilation count
import_logger.total_compilation = len(formats)
import_logger.current_compilation = len(formats)
import_logger.compilation_complete()
return {'formats': formats}
def import_data(self, compiled_data: Dict[str, Any], dry_run: bool = False) -> Dict[str, Any]:
"""
Import compiled formats to Arr instance.
Args:
compiled_data: Dictionary with 'formats' key
dry_run: If True, simulate import without making changes
Returns:
Import results
"""
# Get existing formats
existing = self.arr.get_all_formats()
existing_map = {f['name']: f['id'] for f in existing}
results = {
'added': 0,
'updated': 0,
'failed': 0,
'details': []
}
import_logger = get_import_logger()
# Set import count
import_logger.total_import = len(compiled_data['formats'])
import_logger._import_shown = False # Reset import shown flag
for format_data in compiled_data['formats']:
format_name = format_data['name']
try:
if format_name in existing_map:
# Update existing
if not dry_run:
format_data['id'] = existing_map[format_name]
self.arr.put(
f"/api/v3/customformat/{existing_map[format_name]}",
format_data
)
import_logger.update_import(format_name, "updated")
results['updated'] += 1
results['details'].append({
'name': format_name,
'action': 'updated'
})
else:
# Add new
if not dry_run:
self.arr.post("/api/v3/customformat", format_data)
import_logger.update_import(format_name, "added")
results['added'] += 1
results['details'].append({
'name': format_name,
'action': 'added'
})
except Exception as e:
import_logger.update_import(format_name, "failed")
import_logger.error(f"Failed to import format {format_name}: {e}", format_name)
results['failed'] += 1
results['details'].append({
'name': format_name,
'action': 'failed',
'error': str(e)
})
# Show import summary
import_logger.import_complete()
import_logger._import_shown = True
return results

View File

@@ -1,262 +0,0 @@
"""Profile import strategy."""
import logging
from typing import Dict, List, Any, Set
from .base import ImportStrategy
from ..utils import load_yaml, extract_format_names, generate_language_formats
from ..compiler import compile_format_to_api_structure, compile_profile_to_api_structure
from ..logger import get_import_logger
logger = logging.getLogger(__name__)
class ProfileStrategy(ImportStrategy):
"""Strategy for importing quality profiles."""
def compile(self, filenames: List[str]) -> Dict[str, Any]:
"""
Compile profile files and their dependent formats to API-ready format.
Args:
filenames: List of profile filenames (without .yml)
Returns:
Dictionary with 'profiles' and 'formats' keys
"""
profiles = []
all_formats = []
processed_formats: Set[str] = set()
# Cache for language formats to avoid recompiling
language_formats_cache: Dict[str, List[Dict]] = {}
import_logger = get_import_logger()
# Don't try to predict - we'll count as we go
import_logger.start(0, 0) # Will update counts as we compile
for filename in filenames:
try:
# Load profile YAML
profile_yaml = load_yaml(f"profile/{filename}.yml")
# Extract referenced custom formats
format_names = extract_format_names(profile_yaml)
for format_name in format_names:
# Skip if already processed
display_name = self.add_unique_suffix(format_name) if self.import_as_unique else format_name
if display_name in processed_formats:
continue
try:
format_yaml = load_yaml(f"custom_format/{format_name}.yml")
compiled_format = compile_format_to_api_structure(format_yaml, self.arr_type)
if self.import_as_unique:
compiled_format['name'] = self.add_unique_suffix(compiled_format['name'])
all_formats.append(compiled_format)
processed_formats.add(compiled_format['name'])
import_logger.update_compilation(format_name)
except Exception as e:
# Count the failed attempt
import_logger.update_compilation(f"{format_name} (failed)")
# Generate language formats if needed
language = profile_yaml.get('language', 'any')
if language != 'any' and '_' in language:
# Check cache first
if language not in language_formats_cache:
language_formats = generate_language_formats(language, self.arr_type)
compiled_langs = []
for lang_format in language_formats:
lang_name = lang_format.get('name', 'Language format')
compiled_lang = compile_format_to_api_structure(lang_format, self.arr_type)
if self.import_as_unique:
compiled_lang['name'] = self.add_unique_suffix(compiled_lang['name'])
compiled_langs.append(compiled_lang)
# Add to all_formats only on first compilation
if compiled_lang['name'] not in processed_formats:
all_formats.append(compiled_lang)
processed_formats.add(compiled_lang['name'])
import_logger.update_compilation(lang_name)
# Store in cache
language_formats_cache[language] = compiled_langs
# Compile profile
compiled_profile = compile_profile_to_api_structure(profile_yaml, self.arr_type)
if self.import_as_unique:
compiled_profile['name'] = self.add_unique_suffix(compiled_profile['name'])
# Update format references in profile
for item in compiled_profile.get('formatItems', []):
item['name'] = self.add_unique_suffix(item['name'])
profiles.append(compiled_profile)
import_logger.update_compilation(f"Profile: {compiled_profile['name']}")
except Exception as e:
import_logger.error(f"{str(e)}", f"Profile: {filename}", 'compilation')
import_logger.update_compilation(f"Profile: {filename} (failed)")
# Set total to what we actually attempted
import_logger.total_compilation = import_logger.current_compilation
import_logger.compilation_complete()
return {
'profiles': profiles,
'formats': all_formats
}
def import_data(self, compiled_data: Dict[str, Any], dry_run: bool = False) -> Dict[str, Any]:
"""
Import compiled profiles and formats to Arr instance.
Args:
compiled_data: Dictionary with 'profiles' and 'formats' keys
dry_run: If True, simulate import without making changes
Returns:
Import results
"""
results = {
'added': 0,
'updated': 0,
'failed': 0,
'details': []
}
import_logger = get_import_logger()
# Set total import count
import_logger.total_import = len(compiled_data['formats']) + len(compiled_data['profiles'])
import_logger._import_shown = False # Reset import shown flag
# Import formats first
if compiled_data['formats']:
existing_formats = self.arr.get_all_formats()
format_map = {f['name']: f['id'] for f in existing_formats}
formats_failed = []
for format_data in compiled_data['formats']:
format_name = format_data['name']
try:
if format_name in format_map:
# Update existing
if not dry_run:
format_data['id'] = format_map[format_name]
self.arr.put(
f"/api/v3/customformat/{format_map[format_name]}",
format_data
)
import_logger.update_import(format_name, "updated")
else:
# Add new
if dry_run:
# In dry run, pretend we got an ID
# Use a predictable fake ID for dry run
fake_id = 999000 + len(format_map)
format_map[format_name] = fake_id
else:
response = self.arr.post("/api/v3/customformat", format_data)
format_map[format_name] = response['id']
import_logger.update_import(format_name, "added")
except Exception as e:
import_logger.update_import(format_name, "failed")
import_logger.error(f"Failed to import format {format_name}: {e}", format_name)
formats_failed.append(format_name)
# Refresh format map for profile syncing (MUST be done after importing formats)
if not dry_run:
# In real mode, get the actual current formats from the server
existing_formats = self.arr.get_all_formats()
format_map = {f['name']: f['id'] for f in existing_formats}
# In dry run mode, format_map already has fake IDs from above
# Sync format IDs in profiles
for profile in compiled_data['profiles']:
synced_items = []
processed_formats = set()
# First add all explicitly defined formats with their scores
for item in profile.get('formatItems', []):
if item['name'] in format_map:
synced_items.append({
'format': format_map[item['name']],
'name': item['name'],
'score': item.get('score', 0)
})
processed_formats.add(item['name'])
else:
import_logger.warning(f"Format {item['name']} not found for profile {profile['name']}")
# Then add ALL other existing formats with score 0 (Arr requirement)
for format_name, format_id in format_map.items():
if format_name not in processed_formats:
synced_items.append({
'format': format_id,
'name': format_name,
'score': 0
})
profile['formatItems'] = synced_items
# Import profiles
existing_profiles = self.arr.get_all_profiles()
profile_map = {p['name']: p['id'] for p in existing_profiles}
for profile_data in compiled_data['profiles']:
profile_name = profile_data['name']
try:
if profile_name in profile_map:
# Update existing
if not dry_run:
profile_data['id'] = profile_map[profile_name]
self.arr.put(
f"/api/v3/qualityprofile/{profile_data['id']}",
profile_data
)
import_logger.update_import(f"Profile: {profile_name}", "updated")
results['updated'] += 1
results['details'].append({
'name': profile_name,
'action': 'updated'
})
else:
# Add new
if not dry_run:
self.arr.post("/api/v3/qualityprofile", profile_data)
import_logger.update_import(f"Profile: {profile_name}", "added")
results['added'] += 1
results['details'].append({
'name': profile_name,
'action': 'added'
})
except Exception as e:
import_logger.update_import(f"Profile: {profile_name}", "failed")
import_logger.error(f"Failed to import profile {profile_name}: {e}", profile_name)
results['failed'] += 1
results['details'].append({
'name': profile_name,
'action': 'failed',
'error': str(e)
})
# Show import summary
import_logger.import_complete()
import_logger._import_shown = True
return results

View File

@@ -1,169 +0,0 @@
"""Utility functions for import operations."""
import logging
import yaml
from pathlib import Path
from typing import Dict, List, Any, Set
from ..data.utils import get_category_directory
logger = logging.getLogger(__name__)
def load_yaml(file_path: str) -> Dict[str, Any]:
"""
Load a YAML file.
Args:
file_path: Path to YAML file (relative to data directory)
Returns:
Parsed YAML data
Raises:
FileNotFoundError: If file doesn't exist
yaml.YAMLError: If YAML is invalid
"""
# Handle both absolute and relative paths
if file_path.startswith('/'):
full_path = Path(file_path)
else:
# Check if it starts with a category
if file_path.startswith('custom_format/'):
base_dir = get_category_directory('custom_format')
filename = file_path.replace('custom_format/', '')
full_path = Path(base_dir) / filename
elif file_path.startswith('profile/'):
base_dir = get_category_directory('profile')
filename = file_path.replace('profile/', '')
full_path = Path(base_dir) / filename
else:
# Assume it's just a filename, figure out category
full_path = Path(file_path)
if not full_path.exists():
raise FileNotFoundError(f"File not found: {full_path}")
with open(full_path, 'r', encoding='utf-8') as f:
return yaml.safe_load(f)
def extract_format_names(profile_data: Dict[str, Any]) -> Set[str]:
"""
Extract all custom format names referenced in a profile.
Args:
profile_data: Profile YAML data
Returns:
Set of unique format names
"""
format_names = set()
# Extract from main custom_formats
for cf in profile_data.get('custom_formats', []):
if isinstance(cf, dict) and 'name' in cf:
format_names.add(cf['name'])
# Extract from app-specific custom_formats
for key in ['custom_formats_radarr', 'custom_formats_sonarr']:
for cf in profile_data.get(key, []):
if isinstance(cf, dict) and 'name' in cf:
format_names.add(cf['name'])
return format_names
def generate_language_formats(language: str, arr_type: str) -> List[Dict[str, Any]]:
"""
Generate language-specific format configurations.
Args:
language: Language string (e.g., 'must_english', 'only_french')
arr_type: 'radarr' or 'sonarr'
Returns:
List of format configurations for language handling
"""
if language == 'any' or '_' not in language:
return []
behavior, language_code = language.split('_', 1)
formats = []
# Handle behaviors: 'must' and 'only' (matching old working logic)
if behavior in ['must', 'only']:
# Load base "Not English" format as template
try:
base_format = load_yaml('custom_format/Not English.yml')
# Create "Not [Language]" format
not_format = base_format.copy()
lang_display = language_code.capitalize()
not_format['name'] = f"Not {lang_display}"
# Update conditions for the specific language
for condition in not_format.get('conditions', []):
if condition.get('type') == 'language':
condition['language'] = language_code
if 'name' in condition:
condition['name'] = condition['name'].replace('English', lang_display)
# Note: exceptLanguage field is preserved from the base format
formats.append(not_format)
# For 'only' behavior, add additional formats
if behavior == 'only':
additional_format_names = [
"Not Only English",
"Not Only English (Missing)"
]
for format_name in additional_format_names:
try:
additional = load_yaml(f'custom_format/{format_name}.yml')
additional['name'] = additional['name'].replace('English', lang_display)
for condition in additional.get('conditions', []):
if condition.get('type') == 'language':
condition['language'] = language_code
if 'name' in condition:
condition['name'] = condition['name'].replace('English', lang_display)
# Note: exceptLanguage field is preserved from the base format
formats.append(additional)
except Exception as e:
# Silent fail - format doesn't exist
pass
except Exception as e:
# Silent fail - will be caught at higher level
pass
return formats
def load_regex_patterns() -> Dict[str, str]:
"""
Load all regex patterns from the regex directory.
Returns:
Dictionary mapping pattern names to regex patterns
"""
from ..data.utils import REGEX_DIR
patterns = {}
pattern_dir = Path(REGEX_DIR)
if not pattern_dir.exists():
return patterns
for pattern_file in pattern_dir.glob('*.yml'):
try:
with open(pattern_file, 'r', encoding='utf-8') as f:
data = yaml.safe_load(f)
if data and 'name' in data and 'pattern' in data:
patterns[data['name']] = data['pattern']
except Exception as e:
# Silent fail for individual pattern files
pass
return patterns

View File

@@ -1,191 +0,0 @@
from datetime import timedelta
import os
import subprocess
import logging
import logging.config
from .config import config
from .db import get_secret_key, update_pat_status
def setup_logging():
log_config = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'detailed': {
'format':
'%(asctime)s - %(name)s - %(levelname)s - %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S'
}
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'level': 'DEBUG',
'formatter': 'detailed',
'stream': 'ext://sys.stdout'
},
# general_file handler
'file': {
'class': 'logging.handlers.RotatingFileHandler',
'level': 'INFO',
'formatter': 'detailed',
'filename': config.GENERAL_LOG_FILE,
'maxBytes': 1048576, # 1MB
'backupCount': 20
},
# importarr_file handler
'importarr_file': {
'class': 'logging.handlers.RotatingFileHandler',
'level': 'DEBUG',
'formatter': 'detailed',
'filename': config.IMPORTARR_LOG_FILE,
'maxBytes': 1048576,
'backupCount': 20
},
# hash_file handler
'hash_file': {
'class': 'logging.handlers.RotatingFileHandler',
'level': 'INFO',
'formatter': 'detailed',
'filename': config.HASH_LOG_FILE,
'maxBytes': 1048576, # 1MB
'backupCount': 20
}
},
'root': {
'level': 'DEBUG',
'handlers': ['console', 'file']
},
'loggers': {
# The 'importarr' logger uses all three handlers
'importarr': {
'level': 'DEBUG',
'handlers': ['console', 'file', 'importarr_file'],
'propagate': False
},
# The 'importer' logger (new import module) - reduce verbosity
'importer': {
'level': 'WARNING',
'handlers': ['file'],
'propagate': False
},
# The 'hash' logger uses all three handlers
'hash': {
'level': 'INFO',
'handlers': ['console', 'file', 'hash_file'],
'propagate': False
},
# Example: Keep these if you want separate loggers
'werkzeug': {
'level': 'INFO',
'handlers': ['console', 'file'],
'propagate': False
},
'flask': {
'level': 'INFO',
'handlers': ['console', 'file'],
'propagate': False
},
'git': {
'level': 'ERROR',
'handlers': ['console', 'file'],
'propagate': False
},
'urllib3': {
'level': 'WARNING',
'handlers': ['console', 'file'],
'propagate': False
},
'urllib3.connectionpool': {
'level': 'WARNING',
'handlers': ['console', 'file'],
'propagate': False
}
}
}
# Make sure the log directory exists
os.makedirs(os.path.dirname(config.GENERAL_LOG_FILE), exist_ok=True)
# Apply the configuration
logging.config.dictConfig(log_config)
# Create a logger for this module
logger = logging.getLogger(__name__)
logger.info("Logging system initialized")
return logger
def init_git_user():
"""Initialize Git user configuration globally and update PAT status."""
logger = logging.getLogger(__name__)
logger.info("Starting Git user configuration")
try:
git_name = os.environ.get('GIT_USER_NAME', 'Profilarr')
git_email = os.environ.get('GIT_USER_EMAIL',
'profilarr@dictionarry.com')
logger.debug(
f"Retrieved Git config - Name: {git_name}, Email: {git_email}")
if git_name == 'Profilarr' or git_email == 'profilarr@dictionarry.com':
logger.info("Using default Git user configuration")
# Set global Git configuration
subprocess.run(['git', 'config', '--global', 'user.name', git_name],
check=True)
subprocess.run(['git', 'config', '--global', 'user.email', git_email],
check=True)
# Update PAT status in database
update_pat_status()
# Verify configuration
configured_name = subprocess.run(
['git', 'config', '--global', 'user.name'],
capture_output=True,
text=True,
check=True).stdout.strip()
configured_email = subprocess.run(
['git', 'config', '--global', 'user.email'],
capture_output=True,
text=True,
check=True).stdout.strip()
if configured_name != git_name or configured_email != git_email:
logger.error("Git configuration verification failed")
return False, "Git configuration verification failed"
logger.info("Git user configuration completed successfully")
return True, "Git configuration successful"
except subprocess.CalledProcessError as e:
logger.error(f"Error configuring git: {str(e)}", exc_info=True)
return False, f"Failed to configure git: {str(e)}"
except Exception as e:
logger.error(f"Unexpected error configuring git: {str(e)}",
exc_info=True)
return False, f"Unexpected error configuring git: {str(e)}"
def init_app_config(app):
"""Initialize Flask app configuration."""
logger = logging.getLogger(__name__)
logger.info("Initializing app configuration")
app.config['SECRET_KEY'] = get_secret_key()
app.config['PERMANENT_SESSION_LIFETIME'] = timedelta(
days=config.SESSION_LIFETIME_DAYS)
app.config['SESSION_COOKIE_SECURE'] = config.SESSION_COOKIE_SECURE
app.config['SESSION_COOKIE_HTTPONLY'] = config.SESSION_COOKIE_HTTPONLY
app.config['SESSION_COOKIE_SAMESITE'] = config.SESSION_COOKIE_SAMESITE
logger.info("App configuration initialized")

View File

@@ -1,148 +0,0 @@
from flask import Blueprint, jsonify, request, send_file
import os
from ..config import config
import logging
bp = Blueprint('logs', __name__)
logger = logging.getLogger(__name__)
@bp.route('/', methods=['GET'])
def get_logs():
"""Get list of available log files."""
try:
log_dir = os.path.dirname(config.GENERAL_LOG_FILE)
log_files = []
# Get all log files including rotated ones
for filename in os.listdir(log_dir):
if filename.endswith('.log') or filename.endswith('.log.1'):
file_path = os.path.join(log_dir, filename)
file_stat = os.stat(file_path)
log_files.append({
'filename': filename,
'size': file_stat.st_size,
'last_modified': file_stat.st_mtime
})
return jsonify(log_files), 200
except Exception as e:
logger.error(f"Error getting log files: {str(e)}")
return jsonify({'error': str(e)}), 500
@bp.route('/<filename>', methods=['GET'])
def get_log_content(filename):
"""Get content of a specific log file."""
try:
log_dir = os.path.dirname(config.GENERAL_LOG_FILE)
file_path = os.path.join(log_dir, filename)
# Ensure the file exists and is within the log directory
if not os.path.exists(file_path) or not file_path.startswith(log_dir):
return jsonify({'error': 'Log file not found'}), 404
# Get query parameters for filtering
lines = request.args.get('lines',
type=int) # Number of lines to return
level = request.args.get('level') # Log level filter
search = request.args.get('search') # Search term
# If no filters, return the whole file
if not any([lines, level, search]):
return send_file(file_path, mimetype='text/plain')
# Read and filter log content
with open(file_path, 'r') as f:
content = f.readlines()
# Apply filters
filtered_content = content
if level:
filtered_content = [
line for line in filtered_content
if f' - {level.upper()} - ' in line
]
if search:
filtered_content = [
line for line in filtered_content
if search.lower() in line.lower()
]
if lines:
filtered_content = filtered_content[-lines:]
return jsonify({
'filename': filename,
'total_lines': len(content),
'filtered_lines': len(filtered_content),
'content': filtered_content
}), 200
except Exception as e:
logger.error(f"Error reading log file {filename}: {str(e)}")
return jsonify({'error': str(e)}), 500
@bp.route('/level/<level>', methods=['GET'])
def get_logs_by_level(level):
"""Get all logs of a specific level."""
try:
log_dir = os.path.dirname(config.GENERAL_LOG_FILE)
results = []
for filename in os.listdir(log_dir):
if filename.endswith('.log'):
file_path = os.path.join(log_dir, filename)
with open(file_path, 'r') as f:
matching_lines = [
line.strip() for line in f
if f' - {level.upper()} - ' in line
]
if matching_lines:
results.extend(matching_lines)
return jsonify({
'level': level.upper(),
'count': len(results),
'logs': results
}), 200
except Exception as e:
logger.error(f"Error getting logs for level {level}: {str(e)}")
return jsonify({'error': str(e)}), 500
@bp.route('/search', methods=['GET'])
def search_logs():
"""Search all logs for a specific term."""
try:
term = request.args.get('q')
if not term:
return jsonify({'error': 'Search term required'}), 400
log_dir = os.path.dirname(config.GENERAL_LOG_FILE)
results = []
for filename in os.listdir(log_dir):
if filename.endswith('.log'):
file_path = os.path.join(log_dir, filename)
with open(file_path, 'r') as f:
matching_lines = [
line.strip() for line in f
if term.lower() in line.lower()
]
if matching_lines:
results.extend(matching_lines)
return jsonify({
'term': term,
'count': len(results),
'logs': results
}), 200
except Exception as e:
logger.error(f"Error searching logs: {str(e)}")
return jsonify({'error': str(e)}), 500

View File

@@ -1,95 +0,0 @@
# backend/app/main.py
from flask import Flask, jsonify, send_from_directory
import os
from flask_cors import CORS
from .config import config
from .git import bp as git_bp
from .arr import bp as arr_bp
from .data import bp as data_bp
from .importarr import bp as importarr_bp
from .importer.routes import bp as new_import_bp
from .task import bp as tasks_bp, TaskScheduler
from .backup import bp as backup_bp
from .db import run_migrations, get_settings
from .auth import bp as auth_bp
from .settings import bp as settings_bp
from .logs import bp as logs_bp
from .media_management import media_management_bp
from .middleware import init_middleware
from .init import setup_logging, init_app_config, init_git_user
def create_app():
# Set up logging first
logger = setup_logging()
logger.info("Creating Flask application")
app = Flask(__name__, static_folder='static')
CORS(app, resources={r"/*": {"origins": "*"}})
# Serve static files
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def serve_static(path):
if path.startswith('api/'):
return # Let API routes handle these
if path and os.path.exists(os.path.join(app.static_folder, path)):
return send_from_directory(app.static_folder, path)
return send_from_directory(app.static_folder, 'index.html')
# Initialize directories and database
logger.info("Ensuring required directories exist")
config.ensure_directories()
logger.info("Initializing database")
run_migrations()
# Initialize Git user configuration
logger.info("Initializing Git user")
success, message = init_git_user()
if not success:
logger.warning(f"Git user initialization issue: {message}")
else:
logger.info("Git user initialized successfully")
# Initialize app configuration
init_app_config(app)
# Initialize and start task scheduler
logger.info("Starting task scheduler")
scheduler = TaskScheduler()
scheduler.load_tasks_from_db()
scheduler.start()
# Register all blueprints
logger.info("Registering blueprints")
app.register_blueprint(auth_bp, url_prefix='/api/auth')
app.register_blueprint(settings_bp, url_prefix='/api/settings')
app.register_blueprint(backup_bp, url_prefix='/api/backup')
app.register_blueprint(logs_bp, url_prefix='/api/logs')
app.register_blueprint(git_bp, url_prefix='/api/git')
app.register_blueprint(data_bp, url_prefix='/api/data')
app.register_blueprint(importarr_bp, url_prefix='/api/import')
app.register_blueprint(new_import_bp, url_prefix='/api/v2/import')
app.register_blueprint(arr_bp, url_prefix='/api/arr')
app.register_blueprint(tasks_bp, url_prefix='/api/tasks')
app.register_blueprint(media_management_bp)
# Initialize middleware
logger.info("Initializing middleware")
init_middleware(app)
# Add settings route
@app.route('/api/settings', methods=['GET'])
def handle_settings():
settings = get_settings()
return jsonify(settings), 200
logger.info("Flask application creation completed")
return app
if __name__ == '__main__':
app = create_app()
app.run(debug=True, host='0.0.0.0')

View File

@@ -1,141 +0,0 @@
from flask import Blueprint, jsonify, request
import logging
from .utils import (
get_media_management_data,
save_media_management_data,
update_media_management_data,
get_all_media_management_data,
MEDIA_MANAGEMENT_CATEGORIES
)
from .sync import (
sync_naming_config,
sync_media_management_config,
sync_quality_definitions
)
from ..arr.manager import get_arr_config
logger = logging.getLogger(__name__)
media_management_bp = Blueprint('media_management', __name__)
@media_management_bp.route('/api/media-management', methods=['GET'])
def get_all_media_management():
"""Get all media management data for all categories"""
try:
data = get_all_media_management_data()
return jsonify(data), 200
except Exception as e:
logger.error(f"Error retrieving media management data: {e}")
return jsonify({'error': str(e)}), 500
@media_management_bp.route('/api/media-management/<category>', methods=['GET'])
def get_media_management(category):
"""Get media management data for a specific category"""
if category not in MEDIA_MANAGEMENT_CATEGORIES:
return jsonify({'error': f'Invalid category: {category}'}), 400
try:
data = get_media_management_data(category)
return jsonify(data), 200
except Exception as e:
logger.error(f"Error retrieving {category}: {e}")
return jsonify({'error': str(e)}), 500
@media_management_bp.route('/api/media-management/<category>', methods=['PUT'])
def update_media_management(category):
"""Update media management data for a specific category"""
if category not in MEDIA_MANAGEMENT_CATEGORIES:
return jsonify({'error': f'Invalid category: {category}'}), 400
try:
data = request.get_json()
if not data:
return jsonify({'error': 'No data provided'}), 400
updated_data = update_media_management_data(category, data)
return jsonify(updated_data), 200
except Exception as e:
logger.error(f"Error updating {category}: {e}")
return jsonify({'error': str(e)}), 500
@media_management_bp.route('/api/media-management/sync', methods=['POST'])
def sync_media_management():
"""Sync media management data to arr instance"""
try:
data = request.get_json()
if not data:
return jsonify({'error': 'No data provided'}), 400
arr_id = data.get('arr_id')
categories = data.get('categories', [])
if not arr_id:
return jsonify({'error': 'arr_id is required'}), 400
if not categories:
return jsonify({'error': 'categories list is required'}), 400
# Validate categories
invalid_categories = [cat for cat in categories if cat not in MEDIA_MANAGEMENT_CATEGORIES]
if invalid_categories:
return jsonify({'error': f'Invalid categories: {invalid_categories}'}), 400
# Get arr config
arr_result = get_arr_config(arr_id)
if not arr_result.get('success'):
return jsonify({'error': 'Arr configuration not found'}), 404
arr_config = arr_result.get('data')
base_url = arr_config['arrServer']
api_key = arr_config['apiKey']
arr_type = arr_config['type']
results = {}
# Sync each requested category
for category in categories:
try:
# Get the current media management data for this category
category_data = get_media_management_data(category)
logger.info(f"Raw category_data for {category}: {category_data}")
arr_type_data = category_data.get(arr_type, {})
logger.info(f"Extracted arr_type_data for {arr_type}: {arr_type_data}")
if category == 'naming':
success, message = sync_naming_config(base_url, api_key, arr_type, arr_type_data)
elif category == 'misc':
success, message = sync_media_management_config(base_url, api_key, arr_type, arr_type_data)
elif category == 'quality_definitions':
# Quality definitions has a nested structure: qualityDefinitions -> arr_type -> qualities
quality_defs = category_data.get('qualityDefinitions', {}).get(arr_type, {})
success, message = sync_quality_definitions(base_url, api_key, arr_type, quality_defs)
else:
success, message = False, f"Unknown category: {category}"
results[category] = {
'success': success,
'message': message
}
except Exception as e:
logger.error(f"Error syncing {category}: {e}")
results[category] = {
'success': False,
'message': str(e)
}
# Determine overall success
overall_success = all(result['success'] for result in results.values())
return jsonify({
'success': overall_success,
'results': results
}), 200
except Exception as e:
logger.error(f"Error in media management sync: {e}")
return jsonify({'error': str(e)}), 500

View File

@@ -1,258 +0,0 @@
import logging
import requests
from typing import Dict, Any, Tuple
logger = logging.getLogger(__name__)
def sync_naming_config(base_url: str, api_key: str, arr_type: str, naming_data: Dict[str, Any]) -> Tuple[bool, str]:
"""
Sync naming configuration to arr instance.
First GET current config, update with our data, then PUT back.
Args:
base_url: The arr instance base URL
api_key: The arr instance API key
arr_type: Either 'radarr' or 'sonarr'
naming_data: The naming configuration from our YML file
Returns:
Tuple of (success, message)
"""
try:
# Construct the endpoint URL
endpoint = f"{base_url}/api/v3/config/naming"
headers = {
"X-Api-Key": api_key,
"Content-Type": "application/json"
}
# GET current naming config
logger.info(f"Fetching current naming config from {arr_type} at {base_url}")
response = requests.get(endpoint, headers=headers, timeout=10)
response.raise_for_status()
current_config = response.json()
logger.info(f"Current naming config for {arr_type}:")
logger.info(current_config)
# Update current_config with fields from naming_data
if arr_type == 'radarr':
# Map our YML fields to Radarr API fields
if 'rename' in naming_data:
current_config['renameMovies'] = naming_data['rename']
if 'replaceIllegalCharacters' in naming_data:
current_config['replaceIllegalCharacters'] = naming_data['replaceIllegalCharacters']
if 'colonReplacementFormat' in naming_data:
current_config['colonReplacementFormat'] = naming_data['colonReplacementFormat']
if 'movieFormat' in naming_data:
current_config['standardMovieFormat'] = naming_data['movieFormat']
if 'movieFolderFormat' in naming_data:
current_config['movieFolderFormat'] = naming_data['movieFolderFormat']
else: # sonarr
# Map our YML fields to Sonarr API fields
if 'rename' in naming_data:
current_config['renameEpisodes'] = naming_data['rename']
if 'replaceIllegalCharacters' in naming_data:
current_config['replaceIllegalCharacters'] = naming_data['replaceIllegalCharacters']
if 'colonReplacementFormat' in naming_data:
current_config['colonReplacementFormat'] = naming_data['colonReplacementFormat']
if 'customColonReplacementFormat' in naming_data:
current_config['customColonReplacementFormat'] = naming_data['customColonReplacementFormat']
if 'multiEpisodeStyle' in naming_data:
current_config['multiEpisodeStyle'] = naming_data['multiEpisodeStyle']
if 'standardEpisodeFormat' in naming_data:
current_config['standardEpisodeFormat'] = naming_data['standardEpisodeFormat']
if 'dailyEpisodeFormat' in naming_data:
current_config['dailyEpisodeFormat'] = naming_data['dailyEpisodeFormat']
if 'animeEpisodeFormat' in naming_data:
current_config['animeEpisodeFormat'] = naming_data['animeEpisodeFormat']
if 'seriesFolderFormat' in naming_data:
current_config['seriesFolderFormat'] = naming_data['seriesFolderFormat']
if 'seasonFolderFormat' in naming_data:
current_config['seasonFolderFormat'] = naming_data['seasonFolderFormat']
if 'specialsFolderFormat' in naming_data:
current_config['specialsFolderFormat'] = naming_data['specialsFolderFormat']
# PUT the updated config back
logger.info(f"Updating naming config for {arr_type}")
logger.info(f"Request body for naming sync:")
logger.info(current_config)
put_response = requests.put(endpoint, json=current_config, headers=headers, timeout=10)
put_response.raise_for_status()
logger.info(f"Successfully synced naming config for {arr_type}")
return True, "Naming config sync successful"
except requests.exceptions.RequestException as e:
error_msg = f"Failed to sync naming config: {str(e)}"
logger.error(error_msg)
return False, error_msg
except Exception as e:
error_msg = f"Unexpected error syncing naming config: {str(e)}"
logger.error(error_msg)
return False, error_msg
def sync_media_management_config(base_url: str, api_key: str, arr_type: str, misc_data: Dict[str, Any]) -> Tuple[bool, str]:
"""
Sync media management (misc) configuration to arr instance.
First GET current config, update with our data, then PUT back.
Args:
base_url: The arr instance base URL
api_key: The arr instance API key
arr_type: Either 'radarr' or 'sonarr'
misc_data: The misc configuration from our YML file
Returns:
Tuple of (success, message)
"""
try:
# Construct the endpoint URL
endpoint = f"{base_url}/api/v3/config/mediamanagement"
headers = {
"X-Api-Key": api_key,
"Content-Type": "application/json"
}
# GET current media management config
logger.info(f"Fetching current media management config from {arr_type} at {base_url}")
response = requests.get(endpoint, headers=headers, timeout=10)
response.raise_for_status()
current_config = response.json()
logger.info(f"Current media management config for {arr_type}:")
logger.info(current_config)
# Update current_config with fields from misc_data
# We only manage two fields: propersRepacks and enableMediaInfo
if 'propersRepacks' in misc_data:
current_config['downloadPropersAndRepacks'] = misc_data['propersRepacks']
if 'enableMediaInfo' in misc_data:
current_config['enableMediaInfo'] = misc_data['enableMediaInfo']
# PUT the updated config back
logger.info(f"Updating media management config for {arr_type}")
logger.info(f"Request body for media management sync:")
logger.info(current_config)
put_response = requests.put(endpoint, json=current_config, headers=headers, timeout=10)
put_response.raise_for_status()
logger.info(f"Successfully synced media management config for {arr_type}")
return True, "Media management config sync successful"
except requests.exceptions.RequestException as e:
error_msg = f"Failed to sync media management config: {str(e)}"
logger.error(error_msg)
return False, error_msg
except Exception as e:
error_msg = f"Unexpected error syncing media management config: {str(e)}"
logger.error(error_msg)
return False, error_msg
def sync_quality_definitions(base_url: str, api_key: str, arr_type: str, quality_data: Dict[str, Any]) -> Tuple[bool, str]:
"""
Sync quality definitions to arr instance.
Quality definitions contain all required data, so we can directly PUT.
Args:
base_url: The arr instance base URL
api_key: The arr instance API key
arr_type: Either 'radarr' or 'sonarr'
quality_data: The quality definitions from our YML file
Returns:
Tuple of (success, message)
"""
try:
# Construct the endpoint URL
endpoint = f"{base_url}/api/v3/qualitydefinition"
headers = {
"X-Api-Key": api_key,
"Content-Type": "application/json"
}
# GET current quality definitions (for logging/comparison)
logger.info(f"Fetching current quality definitions from {arr_type} at {base_url}")
response = requests.get(endpoint, headers=headers, timeout=10)
response.raise_for_status()
current_definitions = response.json()
logger.info(f"Current quality definitions for {arr_type}:")
logger.info(current_definitions)
if arr_type == 'sonarr':
# Log the quality data we received from YML
logger.info(f"Quality data from YML:")
logger.info(quality_data)
# Create a mapping of quality names to current definitions for easier lookup
quality_map = {def_['quality']['name']: def_ for def_ in current_definitions}
# Update each quality definition with our values
for quality_name, settings in quality_data.items():
if quality_name in quality_map:
definition = quality_map[quality_name]
# Update size limits from our YML data
if 'min' in settings:
definition['minSize'] = settings['min']
if 'preferred' in settings:
definition['preferredSize'] = settings['preferred']
if 'max' in settings:
definition['maxSize'] = settings['max']
# PUT the updated definitions back
logger.info(f"Updating quality definitions for {arr_type}")
logger.info(f"Request body for quality definitions sync:")
logger.info(current_definitions)
# Sonarr expects the full array of definitions at the update endpoint
update_endpoint = f"{base_url}/api/v3/qualitydefinition/update"
put_response = requests.put(update_endpoint, json=current_definitions, headers=headers, timeout=10)
put_response.raise_for_status()
logger.info(f"Successfully synced quality definitions for {arr_type}")
return True, "Quality definitions sync successful"
else: # radarr
# Log the quality data we received from YML
logger.info(f"Quality data from YML:")
logger.info(quality_data)
# Create a mapping of quality names to current definitions for easier lookup
quality_map = {def_['quality']['name']: def_ for def_ in current_definitions}
# Update each quality definition with our values
for quality_name, settings in quality_data.items():
if quality_name in quality_map:
definition = quality_map[quality_name]
# Update size limits from our YML data
if 'min' in settings:
definition['minSize'] = settings['min']
if 'preferred' in settings:
definition['preferredSize'] = settings['preferred']
if 'max' in settings:
definition['maxSize'] = settings['max']
# PUT the updated definitions back
logger.info(f"Updating quality definitions for {arr_type}")
logger.info(f"Request body for quality definitions sync:")
logger.info(current_definitions)
# Radarr expects the full array of definitions at the update endpoint
update_endpoint = f"{base_url}/api/v3/qualitydefinition/update"
put_response = requests.put(update_endpoint, json=current_definitions, headers=headers, timeout=10)
put_response.raise_for_status()
logger.info(f"Successfully synced quality definitions for {arr_type}")
return True, "Quality definitions sync successful"
except requests.exceptions.RequestException as e:
error_msg = f"Failed to sync quality definitions: {str(e)}"
logger.error(error_msg)
return False, error_msg
except Exception as e:
error_msg = f"Unexpected error syncing quality definitions: {str(e)}"
logger.error(error_msg)
return False, error_msg

View File

@@ -1,211 +0,0 @@
import os
import yaml
import logging
from typing import Dict, Any
from datetime import datetime
from ..config.config import config
logger = logging.getLogger(__name__)
# Media management directory
MEDIA_MANAGEMENT_DIR = config.MEDIA_MANAGEMENT_DIR
# Media management categories
MEDIA_MANAGEMENT_CATEGORIES = ["misc", "naming", "quality_definitions"]
def _preserve_order(data: Dict[str, Any], category: str) -> Dict[str, Any]:
"""Preserve the desired key order based on category"""
if category == "misc":
# Order: radarr, sonarr
ordered = {}
for arr_type in ["radarr", "sonarr"]:
if arr_type in data:
arr_data = data[arr_type]
# Order within each: propersRepacks, enableMediaInfo
ordered_arr = {}
for key in ["propersRepacks", "enableMediaInfo"]:
if key in arr_data:
ordered_arr[key] = arr_data[key]
# Add any remaining keys
for key, value in arr_data.items():
if key not in ordered_arr:
ordered_arr[key] = value
ordered[arr_type] = ordered_arr
# Add any remaining top-level keys
for key, value in data.items():
if key not in ordered:
ordered[key] = value
return ordered
elif category == "naming":
# Order: radarr, sonarr
ordered = {}
for arr_type in ["radarr", "sonarr"]:
if arr_type in data:
arr_data = data[arr_type]
ordered_arr = {}
if arr_type == "radarr":
# Radarr order: rename, movieFormat, movieFolderFormat, replaceIllegalCharacters, colonReplacementFormat
for key in ["rename", "movieFormat", "movieFolderFormat", "replaceIllegalCharacters", "colonReplacementFormat"]:
if key in arr_data:
ordered_arr[key] = arr_data[key]
elif arr_type == "sonarr":
# Sonarr order: rename, standardEpisodeFormat, dailyEpisodeFormat, animeEpisodeFormat, seriesFolderFormat, seasonFolderFormat, replaceIllegalCharacters, colonReplacementFormat, customColonReplacementFormat, multiEpisodeStyle
for key in ["rename", "standardEpisodeFormat", "dailyEpisodeFormat", "animeEpisodeFormat", "seriesFolderFormat", "seasonFolderFormat", "replaceIllegalCharacters", "colonReplacementFormat", "customColonReplacementFormat", "multiEpisodeStyle"]:
if key in arr_data:
ordered_arr[key] = arr_data[key]
# Add any remaining keys
for key, value in arr_data.items():
if key not in ordered_arr:
ordered_arr[key] = value
ordered[arr_type] = ordered_arr
# Add any remaining top-level keys
for key, value in data.items():
if key not in ordered:
ordered[key] = value
return ordered
elif category == "quality_definitions":
# For quality_definitions, preserve the structure: qualityDefinitions -> radarr/sonarr -> qualities
return data
return data
def _get_file_path(category: str) -> str:
"""Get the file path for a media management category"""
return os.path.join(MEDIA_MANAGEMENT_DIR, f"{category}.yml")
def _load_yaml_file(file_path: str) -> Dict[str, Any]:
"""Load YAML file and return contents"""
if not os.path.exists(file_path):
logger.error(f"File not found: {file_path}")
raise FileNotFoundError(f"File not found: {file_path}")
try:
with open(file_path, 'r') as f:
return yaml.safe_load(f) or {}
except Exception as e:
logger.error(f"Error loading {file_path}: {e}")
raise
def _save_yaml_file(file_path: str, data: Dict[str, Any], category: str = None) -> None:
"""Save data to YAML file"""
try:
# Preserve key order if category is specified
if category:
data = _preserve_order(data, category)
with open(file_path, 'w') as f:
yaml.safe_dump(
data,
f,
sort_keys=False,
default_flow_style=False,
width=1000, # Prevent line wrapping
allow_unicode=True
)
except Exception as e:
logger.error(f"Error saving {file_path}: {e}")
raise
def get_media_management_data(category: str) -> Dict[str, Any]:
"""Get media management data for a specific category"""
if category not in MEDIA_MANAGEMENT_CATEGORIES:
raise ValueError(f"Invalid category: {category}")
file_path = _get_file_path(category)
# If file doesn't exist, return empty dict
if not os.path.exists(file_path):
logger.info(f"Media management file not found: {file_path}")
return {}
try:
data = _load_yaml_file(file_path)
return data
except Exception as e:
logger.error(f"Error reading {category}: {e}")
# Return empty dict on error
return {}
def save_media_management_data(category: str, data: Dict[str, Any]) -> Dict[str, Any]:
"""Save media management data for a specific category"""
if category not in MEDIA_MANAGEMENT_CATEGORIES:
raise ValueError(f"Invalid category: {category}")
file_path = _get_file_path(category)
try:
_save_yaml_file(file_path, data, category)
logger.info(f"Saved {category} data")
return get_media_management_data(category)
except Exception as e:
logger.error(f"Error saving {category}: {e}")
raise
def update_media_management_data(category: str, data: Dict[str, Any]) -> Dict[str, Any]:
"""Update media management data for a specific category"""
if category not in MEDIA_MANAGEMENT_CATEGORIES:
raise ValueError(f"Invalid category: {category}")
# For media management, update is the same as save
# since these files can't be deleted
return save_media_management_data(category, data)
def get_all_media_management_data() -> Dict[str, Any]:
"""Get all media management data for all categories, transformed to have arr type at top level"""
# First get all data in original structure
original_data = {}
for category in MEDIA_MANAGEMENT_CATEGORIES:
try:
data = get_media_management_data(category)
# Only include if data exists
if data:
original_data[category] = data
except Exception as e:
logger.error(f"Error getting {category} data: {e}")
# Transform to have radarr/sonarr at top level
result = {
"radarr": {},
"sonarr": {}
}
for category, data in original_data.items():
if category == "misc":
# misc has radarr/sonarr subdivisions
if "radarr" in data and data["radarr"]:
result["radarr"]["misc"] = data["radarr"]
if "sonarr" in data and data["sonarr"]:
result["sonarr"]["misc"] = data["sonarr"]
elif category == "naming":
# naming has radarr/sonarr subdivisions
if "radarr" in data and data["radarr"]:
result["radarr"]["naming"] = data["radarr"]
if "sonarr" in data and data["sonarr"]:
result["sonarr"]["naming"] = data["sonarr"]
elif category == "quality_definitions":
# quality_definitions has qualityDefinitions.radarr/sonarr
quality_defs = data.get("qualityDefinitions", {})
if "radarr" in quality_defs and quality_defs["radarr"]:
result["radarr"]["quality_definitions"] = quality_defs["radarr"]
if "sonarr" in quality_defs and quality_defs["sonarr"]:
result["sonarr"]["quality_definitions"] = quality_defs["sonarr"]
# Remove empty arr types
if not result["radarr"]:
del result["radarr"]
if not result["sonarr"]:
del result["sonarr"]
return result

View File

@@ -1,60 +0,0 @@
# backend/app/middleware.py
from flask import request, session, jsonify, send_from_directory
from .db import get_db
import logging
logger = logging.getLogger(__name__)
def init_middleware(app):
"""Initialize authentication middleware for the Flask app"""
@app.before_request
def authenticate_request():
# Skip authentication for OPTIONS requests (CORS preflight)
if request.method == 'OPTIONS':
return
# Always allow auth endpoints
if request.path.startswith('/api/auth/'):
return
# Allow static assets needed for auth pages
if request.path.startswith(
('/assets/',
'/static/')) or request.path in ['/', '/regex.svg', '/clone.svg']:
return
# For API routes, require auth
if request.path.startswith('/api/'):
# Check session authentication (for web users)
if session.get('authenticated'):
db = get_db()
user = db.execute('SELECT session_id FROM auth').fetchone()
if user and session.get('session_id') == user['session_id']:
return
# Check API key authentication (for API users)
api_key = request.headers.get('X-Api-Key')
if api_key:
db = get_db()
try:
user = db.execute('SELECT 1 FROM auth WHERE api_key = ?',
(api_key, )).fetchone()
if user:
return
logger.warning(
f'Invalid API key attempt: {api_key[:10]}...')
except Exception as e:
logger.error(
f'Database error during API key check: {str(e)}')
return jsonify({'error': 'Internal server error'}), 500
# If no valid authentication is found, return 401
logger.warning(f'Unauthorized access attempt to {request.path}')
return jsonify({'error': 'Unauthorized'}), 401
# For all other routes (frontend routes), serve index.html
# This lets React handle auth and routing
return send_from_directory(app.static_folder, 'index.html')

View File

@@ -1,158 +0,0 @@
# backend/app/settings/__init__.py
from flask import Blueprint, jsonify, request, session
from werkzeug.security import generate_password_hash, check_password_hash
import secrets
from ..db import get_db
from ..db.queries.settings import get_language_import_score, update_language_import_score
import logging
logger = logging.getLogger(__name__)
bp = Blueprint('settings', __name__)
@bp.route('/general', methods=['GET'])
def get_general_settings():
db = get_db()
try:
user = db.execute('SELECT username, api_key FROM auth').fetchone()
if not user:
logger.error('No user found in auth table')
return jsonify({'error': 'No user configuration found'}), 500
return jsonify({
'username': user['username'],
'api_key': user['api_key']
})
except Exception as e:
logger.error(f'Error fetching general settings: {str(e)}')
return jsonify({'error': 'Failed to fetch settings'}), 500
@bp.route('/username', methods=['PUT'])
def update_username():
db = get_db()
data = request.get_json()
new_username = data.get('username')
current_password = data.get('current_password')
if not new_username or not current_password:
return jsonify({'error':
'Username and current password are required'}), 400
try:
# Verify current password
user = db.execute('SELECT password_hash FROM auth').fetchone()
if not check_password_hash(user['password_hash'], current_password):
logger.warning('Failed username change - invalid password')
return jsonify({'error': 'Invalid password'}), 401
db.execute('UPDATE auth SET username = ?', (new_username, ))
db.commit()
logger.info(f'Username updated to: {new_username}')
return jsonify({'message': 'Username updated successfully'})
except Exception as e:
logger.error(f'Failed to update username: {str(e)}')
return jsonify({'error': 'Failed to update username'}), 500
@bp.route('/password', methods=['PUT'])
def update_password():
db = get_db()
data = request.get_json()
current_password = data.get('current_password')
new_password = data.get('new_password')
if not current_password or not new_password:
return jsonify({'error':
'Current and new passwords are required'}), 400
try:
# Verify current password
user = db.execute(
'SELECT password_hash, session_id FROM auth').fetchone()
if not check_password_hash(user['password_hash'], current_password):
logger.warning('Failed password change - invalid current password')
return jsonify({'error': 'Invalid current password'}), 401
# Update password and generate a new session ID
password_hash = generate_password_hash(new_password)
new_session_id = secrets.token_urlsafe(32)
db.execute('UPDATE auth SET password_hash = ?, session_id = ?',
(password_hash, new_session_id))
db.commit()
# Clear the current session to force re-login
session.clear()
logger.info('Password updated successfully')
return jsonify({
'message': 'Password updated successfully. Please log in again.',
'requireRelogin': True
})
except Exception as e:
logger.error(f'Failed to update password: {str(e)}')
return jsonify({'error': 'Failed to update password'}), 500
@bp.route('/api-key', methods=['POST'])
def reset_api_key():
db = get_db()
data = request.get_json()
current_password = data.get('current_password')
if not current_password:
return jsonify({'error': 'Current password is required'}), 400
try:
# Verify current password
user = db.execute('SELECT password_hash FROM auth').fetchone()
if not check_password_hash(user['password_hash'], current_password):
logger.warning('Failed API key reset - invalid password')
return jsonify({'error': 'Invalid password'}), 401
# Generate and save new API key
new_api_key = secrets.token_urlsafe(32)
db.execute('UPDATE auth SET api_key = ?', (new_api_key, ))
db.commit()
logger.info('API key reset successfully')
return jsonify({
'message': 'API key reset successfully',
'api_key': new_api_key
})
except Exception as e:
logger.error(f'Failed to reset API key: {str(e)}')
return jsonify({'error': 'Failed to reset API key'}), 500
@bp.route('/language-import-score', methods=['GET'])
def get_language_import_score_route():
try:
score = get_language_import_score()
return jsonify({'score': score})
except Exception as e:
logger.error(f'Failed to get language import score: {str(e)}')
return jsonify({'error': 'Failed to get language import score'}), 500
@bp.route('/language-import-score', methods=['PUT'])
def update_language_import_score_route():
data = request.get_json()
score = data.get('score')
if score is None:
return jsonify({'error': 'Score is required'}), 400
try:
score = int(score)
except (ValueError, TypeError):
return jsonify({'error': 'Score must be an integer'}), 400
try:
update_language_import_score(score)
return jsonify({'message': 'Language import score updated successfully'})
except Exception as e:
logger.error(f'Failed to update language import score: {str(e)}')
return jsonify({'error': 'Failed to update language import score'}), 500

View File

@@ -1,117 +0,0 @@
# app/task/__init__.py
from flask import Blueprint, jsonify
import logging
from ..db import get_db
from .tasks import TaskScheduler
bp = Blueprint('tasks', __name__)
logger = logging.getLogger(__name__)
@bp.route('', methods=['GET'])
def get_all_tasks():
try:
with get_db() as conn:
tasks = conn.execute('SELECT * FROM scheduled_tasks').fetchall()
result = []
scheduler_instance = TaskScheduler.get_instance()
if scheduler_instance:
for task in tasks:
# Get the job from scheduler
job = scheduler_instance.scheduler.get_job(str(task['id']))
next_run = job.next_run_time if job else None
result.append({
'id':
task['id'],
'name':
task['name'],
'type':
task['type'],
'interval_minutes':
task['interval_minutes'],
'last_run':
task['last_run'],
'next_run':
next_run.isoformat() if next_run else None,
'status':
task['status']
})
return jsonify(result), 200
except Exception as e:
logger.exception("Unexpected error occurred")
return jsonify({"error": "An unexpected error occurred"}), 500
@bp.route('/<int:task_id>', methods=['GET'])
def get_task(task_id):
try:
with get_db() as conn:
task = conn.execute('SELECT * FROM scheduled_tasks WHERE id = ?',
(task_id, )).fetchone()
if not task:
return jsonify({"error": "Task not found"}), 404
scheduler_instance = TaskScheduler.get_instance()
if scheduler_instance:
job = scheduler_instance.scheduler.get_job(str(task['id']))
next_run = job.next_run_time if job else None
else:
next_run = None
return jsonify({
'id': task['id'],
'name': task['name'],
'type': task['type'],
'interval_minutes': task['interval_minutes'],
'last_run': task['last_run'],
'next_run': next_run.isoformat() if next_run else None,
'status': task['status']
}), 200
except Exception as e:
logger.exception("Unexpected error occurred")
return jsonify({"error": "An unexpected error occurred"}), 500
@bp.route('/<int:task_id>/run', methods=['POST'])
def trigger_task(task_id):
try:
with get_db() as conn:
task = conn.execute('SELECT * FROM scheduled_tasks WHERE id = ?',
(task_id, )).fetchone()
if not task:
return jsonify({"error": "Task not found"}), 404
# Get the task class and run it
task_class = TaskScheduler.get_task_class(task['type'])
if not task_class:
return jsonify({"error": "Invalid task type"}), 400
task_instance = task_class(
id=task['id'],
name=task['name'],
interval_minutes=task['interval_minutes'])
try:
task_instance.update_status('running')
task_instance.run_job()
task_instance.update_status('success')
return jsonify(
{"message": f"Task {task_id} triggered successfully"}), 200
except Exception as e:
task_instance.update_status('failed')
logger.error(f"Task {task_id} failed: {str(e)}")
return jsonify({"error": f"Task failed: {str(e)}"}), 500
except Exception as e:
logger.exception("Unexpected error occurred")
return jsonify({"error": "An unexpected error occurred"}), 500
__all__ = ['bp', 'TaskScheduler']

View File

@@ -1,4 +0,0 @@
# app/task/backup/__init__.py
from .backup import BackupManager
__all__ = ['BackupManager']

View File

@@ -1,186 +0,0 @@
# app/task/backup/backup.py
import os
import shutil
from datetime import datetime, timedelta
import logging
from pathlib import Path
import zipfile
import tempfile
from ...config.config import config
from ...db import get_db
logger = logging.getLogger(__name__)
class BackupManager:
def __init__(self):
self.backup_dir = os.path.join(config.CONFIG_DIR, 'backups')
self.retention_days = 30
self._ensure_backup_directory()
def _ensure_backup_directory(self):
"""Ensure backup directory exists"""
os.makedirs(self.backup_dir, exist_ok=True)
def create_backup(self):
"""Create a new backup of the config directory"""
try:
# Generate backup filename with timestamp
timestamp = datetime.now().strftime('%Y_%m_%d_%H%M%S')
backup_filename = f'backup_{timestamp}.zip'
backup_path = os.path.join(self.backup_dir, backup_filename)
# Create zip file
with zipfile.ZipFile(backup_path, 'w',
zipfile.ZIP_DEFLATED) as zipf:
# Walk through all files in config directory
for root, dirs, files in os.walk(config.CONFIG_DIR):
# Skip the backups directory itself
if 'backups' in root:
continue
for file in files:
file_path = os.path.join(root, file)
# Calculate path relative to config directory
arc_path = os.path.relpath(file_path,
config.CONFIG_DIR)
zipf.write(file_path, arc_path)
# Record backup in database
with get_db() as conn:
conn.execute(
'''
INSERT INTO backups (filename, created_at, status)
VALUES (?, CURRENT_TIMESTAMP, 'completed')
''', (backup_filename, ))
conn.commit()
logger.info(f'Backup created successfully: {backup_filename}')
return True, backup_filename
except Exception as e:
logger.error(f'Error creating backup: {str(e)}')
return False, str(e)
def restore_backup(self, backup_filename):
"""Restore from a backup file"""
backup_path = os.path.join(self.backup_dir, backup_filename)
if not os.path.exists(backup_path):
return False, "Backup file not found"
try:
# Create a temporary directory for extraction
temp_dir = os.path.join(self.backup_dir, 'temp_restore')
os.makedirs(temp_dir, exist_ok=True)
# Extract backup to temporary directory
with zipfile.ZipFile(backup_path, 'r') as zipf:
zipf.extractall(temp_dir)
# Move files to config directory
for item in os.listdir(temp_dir):
s = os.path.join(temp_dir, item)
d = os.path.join(config.CONFIG_DIR, item)
if os.path.isdir(s):
# Skip backups directory if it exists in the backup
if item == 'backups':
continue
shutil.rmtree(d, ignore_errors=True)
shutil.copytree(s, d, dirs_exist_ok=True)
else:
shutil.copy2(s, d)
# Clean up temporary directory
shutil.rmtree(temp_dir)
logger.info(f'Backup restored successfully: {backup_filename}')
return True, "Backup restored successfully"
except Exception as e:
logger.error(f'Error restoring backup: {str(e)}')
return False, str(e)
def cleanup_old_backups(self):
"""Remove backups older than retention period"""
try:
cutoff_date = datetime.now() - timedelta(days=self.retention_days)
with get_db() as conn:
# Get list of old backups
old_backups = conn.execute(
'''
SELECT filename FROM backups
WHERE created_at < ?
''', (cutoff_date.isoformat(), )).fetchall()
# Remove old backup files and database entries
for backup in old_backups:
backup_path = os.path.join(self.backup_dir,
backup['filename'])
if os.path.exists(backup_path):
os.remove(backup_path)
conn.execute('DELETE FROM backups WHERE filename = ?',
(backup['filename'], ))
conn.commit()
logger.info('Old backups cleaned up successfully')
return True, "Cleanup completed successfully"
except Exception as e:
logger.error(f'Error cleaning up old backups: {str(e)}')
return False, str(e)
def list_backups(self):
"""List all available backups"""
try:
with get_db() as conn:
backups = conn.execute('''
SELECT filename, created_at, status
FROM backups
ORDER BY created_at DESC
''').fetchall()
return [{
'filename': backup['filename'],
'created_at': backup['created_at'],
'status': backup['status']
} for backup in backups]
except Exception as e:
logger.error(f'Error listing backups: {str(e)}')
return []
def restore_backup_from_file(self, file_path):
"""Restore from a backup file path"""
try:
# Create a temporary directory for extraction
with tempfile.TemporaryDirectory() as temp_dir:
# Extract backup to temporary directory
with zipfile.ZipFile(file_path, 'r') as zipf:
zipf.extractall(temp_dir)
# Move files to config directory
for item in os.listdir(temp_dir):
s = os.path.join(temp_dir, item)
d = os.path.join(config.CONFIG_DIR, item)
if os.path.isdir(s):
# Skip backups directory if it exists in the backup
if item == 'backups':
continue
shutil.rmtree(d, ignore_errors=True)
shutil.copytree(s, d, dirs_exist_ok=True)
else:
shutil.copy2(s, d)
logger.info(f'Backup imported and restored successfully')
return True, "Backup imported and restored successfully"
except Exception as e:
logger.error(f'Error importing and restoring backup: {str(e)}')
return False, str(e)

Some files were not shown because too many files have changed in this diff Show More