chore(setup): remove existing codebase

This commit is contained in:
Sam Chau
2025-10-18 00:09:35 +10:30
parent 3a0deb16fa
commit 0622046e53
257 changed files with 0 additions and 40058 deletions

Binary file not shown.

Before

Width:  |  Height:  |  Size: 863 KiB

View File

@@ -1 +0,0 @@
* @santiagosayshey

View File

@@ -1,57 +0,0 @@
name: Build Beta Docker Image
on:
push:
branches:
- dev
pull_request:
branches:
- dev
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
with:
platforms: linux/amd64,linux/arm64/v8
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: '18'
- name: Build frontend
working-directory: ./frontend
run: |
npm ci
npm run build
- name: Prepare dist directory
run: |
mkdir -p dist/backend dist/static
cp -r frontend/dist/* dist/static/
cp -r backend/* dist/backend/
cp backend/requirements.txt dist/
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
if: github.event_name != 'pull_request'
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push
uses: docker/build-push-action@v5
with:
platforms: linux/amd64,linux/arm64/v8
context: .
push: ${{ github.event_name != 'pull_request' }}
tags: santiagosayshey/profilarr:beta

View File

@@ -1,11 +0,0 @@
name: Release Notification
on:
release:
types: [published]
jobs:
call-notify-release:
uses: Dictionarry-Hub/parrot/.github/workflows/notify-release.yml@v1
secrets:
PARROT_URL: ${{ secrets.PARROT_URL }}

View File

@@ -1,59 +0,0 @@
name: Build Release Docker Image
on:
push:
tags:
- 'v*'
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Get tag
id: tag
run: echo "tag=${GITHUB_REF#refs/tags/}" >> $GITHUB_OUTPUT
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
with:
platforms: linux/amd64,linux/arm64/v8
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: '18'
- name: Build frontend
working-directory: ./frontend
run: |
npm ci
npm run build
- name: Prepare dist directory
run: |
mkdir -p dist/backend dist/static
cp -r frontend/dist/* dist/static/
cp -r backend/* dist/backend/
cp backend/requirements.txt dist/
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push
uses: docker/build-push-action@v5
with:
context: .
platforms: linux/amd64,linux/arm64/v8
push: true
tags: |
santiagosayshey/profilarr:latest
santiagosayshey/profilarr:${{ steps.tag.outputs.tag }}

26
.gitignore vendored
View File

@@ -1,26 +0,0 @@
# Node
node_modules/
dist/
# Python
__pycache__/
*.pyc
# Environment variables
.env
.env.prod
.env.1
.env.2
# OS files
.DS_Store
# build files
backend/app/static/
# Config data
config/
config-test/
radarr-config/
sonarr-config/
test-data/

View File

@@ -1,12 +0,0 @@
{
"tabWidth": 4,
"useTabs": false,
"printWidth": 80,
"singleQuote": true,
"trailingComma": "none",
"bracketSpacing": false,
"jsxSingleQuote": true,
"arrowParens": "avoid",
"proseWrap": "preserve",
"bracketSameLine": true
}

View File

@@ -1,25 +0,0 @@
# Profilarr Development Guide
## Commands
- **Frontend**: `cd frontend && npm run dev` - Start React dev server
- **Backend**: `cd backend && gunicorn -b 0.0.0.0:5000 app.main:app` - Run Flask server
- **Docker**: `docker compose up` - Start both frontend/backend in dev mode
- **Lint**: `cd frontend && npx eslint 'src/**/*.{js,jsx}'` - Check frontend code style
- **Build**: `cd frontend && npm run build` - Build for production
## Code Style
### Frontend (React)
- **Imports**: React first, third-party libs next, components, then utils
- **Components**: Functional components with hooks, PascalCase naming
- **Props**: PropTypes for validation, destructure props in component signature
- **State**: Group related state, useCallback for memoized handlers
- **JSX**: 4-space indentation, attributes on new lines for readability
- **Error Handling**: try/catch for async operations, toast notifications
### Backend (Python)
- **Imports**: Standard lib first, third-party next, local modules last
- **Naming**: snake_case for functions/vars/files, PascalCase for classes
- **Functions**: Single responsibility, descriptive docstrings
- **Error Handling**: Specific exception catches, return (success, message) tuples
- **Indentation**: 4 spaces consistently
- **Modularity**: Related functionality grouped in directories

674
LICENSE
View File

@@ -1,674 +0,0 @@
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
<program> Copyright (C) <year> <name of author>
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<https://www.gnu.org/licenses/>.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<https://www.gnu.org/licenses/why-not-lgpl.html>.

View File

View File

@@ -1,188 +0,0 @@
from flask import Blueprint, request, jsonify
from flask_cors import cross_origin
import logging
from .status.ping import ping_service
from .manager import (save_arr_config, get_all_arr_configs, get_arr_config,
update_arr_config, delete_arr_config)
logger = logging.getLogger(__name__)
logger.setLevel(logging.ERROR)
bp = Blueprint('arr', __name__)
@bp.route('/ping', methods=['POST', 'OPTIONS'])
@cross_origin()
def ping():
if request.method == 'OPTIONS':
return jsonify({}), 200
data = request.get_json()
url = data.get('url')
api_key = data.get('apiKey')
arr_type = data.get('type')
if not url or not api_key or not arr_type:
return jsonify({
'success': False,
'error': 'URL, API key, and type are required'
}), 400
logger.error(f"Attempting to ping URL: {url} of type: {arr_type}")
success, message = ping_service(url, api_key, arr_type)
logger.error(f"Ping result - Success: {success}, Message: {message}")
return jsonify({
'success': success,
'message': message
}), 200 if success else 400
@bp.route('/config', methods=['POST', 'OPTIONS'])
@cross_origin()
def add_config():
if request.method == 'OPTIONS':
return jsonify({}), 200
try:
config = request.json
# Validate sync_interval if schedule method
if config.get('sync_method') == 'schedule':
sync_interval = config.get('sync_interval', 0)
if sync_interval < 60 or sync_interval > 43200:
return jsonify({
'success': False,
'error': 'Sync interval must be between 60 minutes (1 hour) and 43200 minutes (1 month)'
}), 400
result = save_arr_config(config)
# Handle the conflict case first
if not result['success'] and result.get('status_code') == 409:
return jsonify({'success': False, 'error': result['error']}), 409
# Handle other failure cases
if not result['success']:
return jsonify(result), 400
return jsonify(result), 200
except Exception as e:
logger.error(f"Error saving arr config: {str(e)}")
return jsonify({'success': False, 'error': str(e)}), 400
@bp.route('/config', methods=['GET', 'OPTIONS'])
@cross_origin()
def get_configs():
if request.method == 'OPTIONS':
return jsonify({}), 200
try:
configs = get_all_arr_configs()
logger.debug(f"Retrieved {len(configs)} arr configs")
return jsonify(configs), 200
except Exception as e:
logger.error(f"Error getting arr configs: {str(e)}")
return jsonify({'success': False, 'error': str(e)}), 400
@bp.route('/config/<int:id>', methods=['GET', 'PUT', 'DELETE', 'OPTIONS'])
@cross_origin()
def handle_config(id):
if request.method == 'OPTIONS':
return jsonify({}), 200
try:
if request.method == 'GET':
config = get_arr_config(id)
if config:
logger.debug(f"Retrieved arr config: {id}")
return jsonify({'success': True, 'data': config}), 200
logger.debug(f"Arr config not found: {id}")
return jsonify({
'success': False,
'error': 'Config not found'
}), 404
elif request.method == 'PUT':
config = request.json
# Validate sync_interval if schedule method
if config.get('sync_method') == 'schedule':
sync_interval = config.get('sync_interval', 0)
if sync_interval < 60 or sync_interval > 43200:
return jsonify({
'success': False,
'error': 'Sync interval must be between 60 minutes (1 hour) and 43200 minutes (1 month)'
}), 400
result = update_arr_config(id, config)
# Handle the conflict case first
if not result['success'] and result.get('status_code') == 409:
return jsonify({
'success': False,
'error': result['error']
}), 409
# Handle other failure cases
if not result['success']:
logger.debug(f"Arr config not found for update: {id}")
return jsonify({
'success': False,
'error': 'Config not found'
}), 404
logger.debug(f"Updated arr config: {id}")
return jsonify({'success': True}), 200
elif request.method == 'DELETE':
success = delete_arr_config(id)
if success:
logger.debug(f"Deleted arr config: {id}")
return jsonify({'success': True}), 200
logger.debug(f"Arr config not found for deletion: {id}")
return jsonify({
'success': False,
'error': 'Config not found'
}), 404
except Exception as e:
logger.error(f"Error handling arr config {id}: {str(e)}")
return jsonify({'success': False, 'error': str(e)}), 400
@bp.route('/config/<int:id>/sync', methods=['POST', 'OPTIONS'])
@cross_origin()
def trigger_sync(id):
if request.method == 'OPTIONS':
return jsonify({}), 200
try:
# Get the config first
config_result = get_arr_config(id)
if not config_result.get('success'):
logger.error(f"Config not found for sync: {id}")
return jsonify({
'success': False,
'error': 'Configuration not found'
}), 404
config_data = config_result.get('data')
if not config_data:
logger.error(f"Invalid config data for sync: {id}")
return jsonify({
'success': False,
'error': 'Invalid configuration data'
}), 400
# Run the import
from ..importer import handle_pull_import
handle_pull_import(id)
logger.debug(f"Manual sync triggered for arr config: {id}")
return jsonify({'success': True}), 200
except Exception as e:
logger.error(f"Error triggering sync for arr config {id}: {str(e)}")
return jsonify({'success': False, 'error': str(e)}), 400

View File

@@ -1,428 +0,0 @@
# arr/manager.py
from ..db import get_db
import json
import logging
# Import our task-utils that handle DB insertion for scheduled tasks
from .task_utils import (create_import_task_for_arr_config,
update_import_task_for_arr_config,
delete_import_task_for_arr_config)
from ..task.tasks import TaskScheduler
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def save_arr_config(config):
"""
Create a new arr_config row, then create a corresponding scheduled task (if sync_method != manual).
Store the newly created task's ID in arr_config.import_task_id.
"""
with get_db() as conn:
cursor = conn.cursor()
try:
# Check if name already exists
existing = cursor.execute(
'SELECT id FROM arr_config WHERE name = ?',
(config['name'], )).fetchone()
if existing:
logger.warning(
f"[save_arr_config] Attempted to create duplicate config name: {config['name']}"
)
return {
'success': False,
'error': 'Configuration with this name already exists',
'status_code': 409
}
# 1) Insert the arr_config row
logger.debug(
f"[save_arr_config] Attempting to create new arr_config with name={config['name']} sync_method={config.get('sync_method')}"
)
cursor.execute(
'''
INSERT INTO arr_config (
name, type, tags, arr_server, api_key,
data_to_sync, last_sync_time, sync_percentage,
sync_method, sync_interval, import_as_unique,
import_task_id
)
VALUES (?, ?, ?, ?, ?, ?, NULL, 0, ?, ?, ?, NULL)
''', (
config['name'],
config['type'],
json.dumps(config.get('tags', [])),
config['arrServer'],
config['apiKey'],
json.dumps(config.get('data_to_sync', {})),
config.get('sync_method', 'manual'),
config.get('sync_interval', 0),
config.get('import_as_unique', False),
))
conn.commit()
new_config_id = cursor.lastrowid
logger.info(
f"[save_arr_config] Created new arr_config row #{new_config_id} for '{config['name']}'"
)
# 2) Create a scheduled task row if needed
sync_method = config.get('sync_method', 'manual')
sync_interval = config.get('sync_interval', 0)
task_id = create_import_task_for_arr_config(
config_id=new_config_id,
config_name=config['name'],
sync_method=sync_method,
sync_interval=sync_interval)
# 3) Update arr_config.import_task_id if a task was created
if task_id:
logger.debug(
f"[save_arr_config] Updating arr_config #{new_config_id} with import_task_id={task_id}"
)
cursor.execute(
'UPDATE arr_config SET import_task_id = ? WHERE id = ?',
(task_id, new_config_id))
conn.commit()
scheduler = TaskScheduler.get_instance()
if scheduler:
logger.debug("[save_arr_config] Reloading tasks from DB...")
scheduler.load_tasks_from_db()
return {'success': True, 'id': new_config_id}
except Exception as e:
logger.error(
f"[save_arr_config] Error saving arr config: {str(e)}")
return {'success': False, 'error': str(e)}
def update_arr_config(id, config):
"""
Update an existing arr_config row, then create/update/remove the corresponding scheduled task as needed.
"""
with get_db() as conn:
cursor = conn.cursor()
try:
# Check if name already exists (excluding current config)
existing = cursor.execute(
'SELECT id FROM arr_config WHERE name = ? AND id != ?',
(config['name'], id)).fetchone()
if existing:
logger.warning(
f"[update_arr_config] Attempted to update config #{id} to duplicate name: {config['name']}"
)
return {
'success': False,
'error': 'Configuration with this name already exists',
'status_code': 409
}
# 1) Grab existing row so we know the existing import_task_id
existing_row = cursor.execute(
'SELECT * FROM arr_config WHERE id = ?', (id, )).fetchone()
if not existing_row:
logger.debug(
f"[update_arr_config] No arr_config row found with id={id}"
)
return {'success': False, 'error': 'Configuration not found'}
existing_task_id = existing_row['import_task_id']
# 2) Update the arr_config row itself
logger.debug(
f"[update_arr_config] Updating arr_config #{id} name={config['name']} sync_method={config.get('sync_method')}"
)
cursor.execute(
'''
UPDATE arr_config
SET name = ?,
type = ?,
tags = ?,
arr_server = ?,
api_key = ?,
data_to_sync = ?,
sync_method = ?,
sync_interval = ?,
import_as_unique = ?
WHERE id = ?
''',
(config['name'], config['type'],
json.dumps(config.get('tags', [])), config['arrServer'],
config['apiKey'], json.dumps(config.get(
'data_to_sync', {})), config.get('sync_method', 'manual'),
config.get('sync_interval',
0), config.get('import_as_unique', False), id))
conn.commit()
if cursor.rowcount == 0:
logger.debug(
f"[update_arr_config] arr_config #{id} not found for update"
)
return {'success': False, 'error': 'Configuration not found'}
logger.info(f"[update_arr_config] Updated arr_config row #{id}")
# 3) Create/Update/Remove the scheduled task row
new_task_id = update_import_task_for_arr_config(
config_id=id,
config_name=config['name'],
sync_method=config.get('sync_method', 'manual'),
sync_interval=config.get('sync_interval', 0),
existing_task_id=existing_task_id)
# 4) Store new_task_id in arr_config.import_task_id
logger.debug(
f"[update_arr_config] Setting arr_config #{id} import_task_id to {new_task_id}"
)
cursor.execute(
'UPDATE arr_config SET import_task_id = ? WHERE id = ?',
(new_task_id, id))
conn.commit()
scheduler = TaskScheduler.get_instance()
if scheduler:
logger.debug("[update_arr_config] Reloading tasks from DB...")
scheduler.load_tasks_from_db()
return {'success': True}
except Exception as e:
logger.error(
f"[update_arr_config] Error updating arr config: {str(e)}")
return {'success': False, 'error': str(e)}
def delete_arr_config(id):
"""
Delete an arr_config row, plus remove its scheduled_task if any.
"""
with get_db() as conn:
cursor = conn.cursor()
try:
# 1) Fetch the row so we know which task to remove
existing_row = cursor.execute(
'SELECT * FROM arr_config WHERE id = ?', (id, )).fetchone()
if not existing_row:
logger.debug(
f"[delete_arr_config] No arr_config row found with id={id}"
)
return {'success': False, 'error': 'Configuration not found'}
existing_task_id = existing_row['import_task_id']
# 2) Delete the arr_config
logger.debug(f"[delete_arr_config] Removing arr_config #{id}")
cursor.execute('DELETE FROM arr_config WHERE id = ?', (id, ))
conn.commit()
if cursor.rowcount == 0:
logger.debug(
f"[delete_arr_config] arr_config #{id} not found for deletion"
)
return {'success': False, 'error': 'Configuration not found'}
logger.info(f"[delete_arr_config] Deleted arr_config #{id}")
# 3) If there's a scheduled task, remove it
if existing_task_id:
delete_import_task_for_arr_config(existing_task_id)
scheduler = TaskScheduler.get_instance()
if scheduler:
logger.debug("[delete_arr_config] Reloading tasks from DB...")
scheduler.load_tasks_from_db()
return {'success': True}
except Exception as e:
logger.error(
f"[delete_arr_config] Error deleting arr config: {str(e)}")
return {'success': False, 'error': str(e)}
def get_all_arr_configs():
with get_db() as conn:
cursor = conn.execute('SELECT * FROM arr_config')
rows = cursor.fetchall()
try:
configs = []
for row in rows:
configs.append({
'id':
row['id'],
'name':
row['name'],
'type':
row['type'],
'tags':
json.loads(row['tags']) if row['tags'] else [],
'arrServer':
row['arr_server'],
'apiKey':
row['api_key'],
'data_to_sync': (json.loads(row['data_to_sync'])
if row['data_to_sync'] else {}),
'last_sync_time':
row['last_sync_time'],
'sync_percentage':
row['sync_percentage'],
'sync_method':
row['sync_method'],
'sync_interval':
row['sync_interval'],
'import_as_unique':
bool(row['import_as_unique']),
'import_task_id':
row['import_task_id']
})
return {'success': True, 'data': configs}
except Exception as e:
logger.error(f"[get_all_arr_configs] Error: {str(e)}")
return {'success': False, 'error': str(e)}
def get_arr_config(id):
with get_db() as conn:
cursor = conn.execute('SELECT * FROM arr_config WHERE id = ?', (id, ))
row = cursor.fetchone()
try:
if row:
return {
'success': True,
'data': {
'id':
row['id'],
'name':
row['name'],
'type':
row['type'],
'tags':
json.loads(row['tags']) if row['tags'] else [],
'arrServer':
row['arr_server'],
'apiKey':
row['api_key'],
'data_to_sync': (json.loads(row['data_to_sync'])
if row['data_to_sync'] else {}),
'last_sync_time':
row['last_sync_time'],
'sync_percentage':
row['sync_percentage'],
# Keep these as-is
'sync_method':
row['sync_method'],
'sync_interval':
row['sync_interval'],
'import_as_unique':
bool(row['import_as_unique']),
'import_task_id':
row['import_task_id']
}
}
logger.debug(
f"[get_arr_config] No arr_config row found with id={id}")
return {'success': False, 'error': 'Configuration not found'}
except Exception as e:
logger.error(f"[get_arr_config] Error: {str(e)}")
return {'success': False, 'error': str(e)}
def get_scheduled_configs():
"""
Return all arr_configs where sync_method='schedule'.
Potentially used if you want to see scheduled ones explicitly.
"""
with get_db() as conn:
cursor = conn.execute('SELECT * FROM arr_config WHERE sync_method = ?',
('schedule', ))
rows = cursor.fetchall()
try:
configs = []
for row in rows:
configs.append({
'id': row['id'],
'name': row['name'],
'sync_interval': row['sync_interval'],
'import_task_id': row['import_task_id']
})
return {'success': True, 'data': configs}
except Exception as e:
logger.error(f"[get_scheduled_configs] Error: {str(e)}")
return {'success': False, 'error': str(e)}
def get_pull_configs():
with get_db() as conn:
rows = conn.execute(
'SELECT * FROM arr_config WHERE sync_method = "pull"').fetchall()
results = []
for row in rows:
results.append({
'id':
row['id'],
'name':
row['name'],
'type':
row['type'],
'tags':
json.loads(row['tags']) if row['tags'] else [],
'arrServer':
row['arr_server'],
'apiKey':
row['api_key'],
'data_to_sync': (json.loads(row['data_to_sync'])
if row['data_to_sync'] else {}),
'last_sync_time':
row['last_sync_time'],
'sync_percentage':
row['sync_percentage'],
'sync_method':
row['sync_method'],
'sync_interval':
row['sync_interval'],
'import_as_unique':
bool(row['import_as_unique']),
'import_task_id':
row['import_task_id']
})
return results
def check_active_sync_configs():
"""
Check if there are any ARR configurations with non-manual sync methods.
Returns (has_active_configs, details) tuple.
"""
with get_db() as conn:
cursor = conn.execute('''
SELECT id, name, sync_method, data_to_sync
FROM arr_config
WHERE sync_method != 'manual'
''')
active_configs = cursor.fetchall()
if not active_configs:
return False, None
details = []
for config in active_configs:
data_to_sync = json.loads(
config['data_to_sync'] if config['data_to_sync'] else '{}')
if data_to_sync.get('profiles') or data_to_sync.get(
'customFormats'):
details.append({
'id': config['id'],
'name': config['name'],
'sync_method': config['sync_method'],
'data': data_to_sync
})
return bool(details), details

View File

@@ -1,78 +0,0 @@
# app/arr/status/ping.py
import socket
import requests
import logging
logger = logging.getLogger(__name__)
REQUIRED_VERSIONS = {'radarr': '5.10.4', 'sonarr': '4.0.10'}
def check_version_compatibility(installed_version, required_version):
"""
Check if installed version meets minimum required version for Radarr/Sonarr.
"""
installed_parts = [int(x) for x in installed_version.split('.')]
required_parts = [int(x) for x in required_version.split('.')]
# Only compare the parts we care about (first 3 numbers for Radarr/Sonarr)
for installed, required in zip(installed_parts[:3], required_parts[:3]):
if installed < required:
return False
if installed > required:
return True
return True
def ping_service(url, api_key, arr_type):
"""
Ping an Arr service and verify its type and version
"""
try:
base_url = url.rstrip('/')
headers = {'X-Api-Key': api_key}
logger.warning(f"Attempting to connect to {base_url} for {arr_type}")
response = requests.get(f"{base_url}/api/v3/system/status",
headers=headers,
timeout=10)
logger.warning(f"Response status: {response.status_code}")
logger.warning(f"Response content: {response.text}")
if response.status_code != 200:
return False, f"Service returned status code: {response.status_code}"
data = response.json()
logger.warning(f"Parsed response data: {data}")
# First check app type
app_name = data.get('appName', '').lower()
version = data.get('version')
logger.warning(f"Found app: {app_name} version: {version}")
# Check app type
if arr_type == 'radarr' and app_name != 'radarr':
return False, f"Expected Radarr but found {app_name}"
elif arr_type == 'sonarr' and app_name != 'sonarr':
return False, f"Expected Sonarr but found {app_name}"
# Check version
if not version:
return False, "Could not determine application version"
required_version = REQUIRED_VERSIONS.get(arr_type)
if not check_version_compatibility(version, required_version):
return False, f"{app_name.title()} version {version} is not supported. Minimum required version is {required_version}"
return True, "Connection successful and application type and version verified"
except requests.exceptions.Timeout:
return False, "Connection timed out"
except requests.exceptions.ConnectionError:
return False, "Failed to connect to service"
except Exception as e:
logger.error(f"Error pinging service: {str(e)}")
return False, f"Error: {str(e)}"

View File

@@ -1,140 +0,0 @@
# arr/task_utils.py
import logging
from ..db import get_db
logger = logging.getLogger(__name__)
def create_import_task_for_arr_config(config_id, config_name, sync_method,
sync_interval):
"""
Create a scheduled task for the given ARR config (if needed).
Returns the newly-created task id or None.
"""
if sync_method == 'manual':
logger.debug(
f"[ARR Tasks] No import task created for {config_name} because sync_method=manual"
)
return None
with get_db() as conn:
cursor = conn.cursor()
# pull: not scheduled; on-demand during git pull
if sync_method == 'pull':
logger.debug(
f"[ARR Tasks] No scheduled task created for {config_name} because sync_method=pull (runs on git pull)"
)
return None
# schedule: create an interval-based task
task_type = 'ImportSchedule'
interval_minutes = sync_interval or 0
# Insert into scheduled_tasks table
cursor.execute(
'''
INSERT INTO scheduled_tasks (name, type, interval_minutes, status)
VALUES (?, ?, ?, ?)
''', (f"Import for ARR #{config_id} - {config_name}", task_type,
interval_minutes, 'pending'))
new_task_id = cursor.lastrowid
conn.commit()
logger.debug(
f"[ARR Tasks] Created new {task_type} task with ID {new_task_id} for ARR config {config_id}"
)
return new_task_id
def update_import_task_for_arr_config(config_id, config_name, sync_method,
sync_interval, existing_task_id):
"""
Update the existing scheduled task for the given ARR config (if needed).
If the sync_method changes from 'pull' or 'manual' to 'schedule', we create or update.
If it changes from 'schedule' to 'pull' (or 'manual'), we delete the old scheduled row.
"""
with get_db() as conn:
cursor = conn.cursor()
# If user changed to manual or pull => remove the old row (if any)
if sync_method in ['manual', 'pull']:
if existing_task_id:
logger.debug(
f"[update_import_task_for_arr_config] Removing old task {existing_task_id} because sync_method={sync_method}"
)
cursor.execute('DELETE FROM scheduled_tasks WHERE id = ?',
(existing_task_id, ))
deleted_count = cursor.rowcount
conn.commit()
if deleted_count:
logger.info(
f"[update_import_task_for_arr_config] Deleted old task {existing_task_id} for ARR #{config_id}"
)
# For 'pull' or 'manual', we do NOT create a new row in `scheduled_tasks`
return None
# Otherwise, sync_method='schedule' => create or update
# (We keep the same logic as before if user wants a scheduled import)
task_type = 'ImportSchedule'
interval_minutes = sync_interval or 0
# If there's NO existing task, create a new one
if not existing_task_id:
logger.debug(
f"[update_import_task_for_arr_config] No existing task for ARR #{config_id}; creating new schedule."
)
return create_import_task_for_arr_config(config_id, config_name,
sync_method,
sync_interval)
# If we DO have an existing scheduled task => update it
logger.debug(
f"[update_import_task_for_arr_config] Updating existing task {existing_task_id} for ARR #{config_id}, interval={interval_minutes}"
)
cursor.execute(
'''
UPDATE scheduled_tasks
SET name = ?, type = ?, interval_minutes = ?
WHERE id = ?
''', (
f"Import for ARR #{config_id} - {config_name}",
task_type,
interval_minutes,
existing_task_id,
))
updated_count = cursor.rowcount
conn.commit()
if updated_count == 0:
logger.warning(
f"[update_import_task_for_arr_config] Could not find scheduled task {existing_task_id} for ARR #{config_id}, creating new."
)
return create_import_task_for_arr_config(config_id, config_name,
sync_method,
sync_interval)
logger.debug(
f"[update_import_task_for_arr_config] Successfully updated scheduled task {existing_task_id} for ARR #{config_id}"
)
return existing_task_id
def delete_import_task_for_arr_config(task_id):
"""
Delete the import task if it exists.
"""
if not task_id:
return
with get_db() as conn:
cursor = conn.cursor()
cursor.execute('DELETE FROM scheduled_tasks WHERE id = ?', (task_id, ))
conn.commit()
if cursor.rowcount > 0:
logger.debug(f"[ARR Tasks] Deleted import task with ID {task_id}")
else:
logger.debug(
f"[ARR Tasks] No import task found to delete with ID {task_id}"
)

View File

@@ -1,118 +0,0 @@
# backend/app/auth/__init__.py
from flask import Blueprint, jsonify, request, session
from werkzeug.security import generate_password_hash, check_password_hash
import secrets
import logging
from ..db import get_db
logger = logging.getLogger(__name__)
bp = Blueprint('auth', __name__)
@bp.route('/setup', methods=['GET', 'POST'])
def setup():
db = get_db()
# Handle GET request to check if setup is needed
if request.method == 'GET':
if db.execute('SELECT 1 FROM auth').fetchone():
return jsonify({'error': 'Auth already configured'}), 400
return jsonify({'needs_setup': True}), 200
# Handle POST request for actual setup
if db.execute('SELECT 1 FROM auth').fetchone():
logger.warning('Failed setup attempt - auth already configured')
return jsonify({'error': 'Auth already configured'}), 400
data = request.get_json()
username = data.get('username', 'admin')
password = data.get('password')
if not password:
logger.error('Setup failed - no password provided')
return jsonify({'error': 'Password is required'}), 400
api_key = secrets.token_urlsafe(32)
password_hash = generate_password_hash(password)
session_id = secrets.token_urlsafe(32) # Generate a new session ID
try:
db.execute(
'INSERT INTO auth (username, password_hash, api_key, session_id) VALUES (?, ?, ?, ?)',
(username, password_hash, api_key, session_id))
db.commit()
logger.info('Initial auth setup completed successfully')
# Set up session after successful creation
session['authenticated'] = True
session['session_id'] = session_id
session.permanent = True
return jsonify({
'message': 'Auth configured successfully',
'username': username,
'api_key': api_key,
'authenticated': True
})
except Exception as e:
logger.error(f'Setup failed - database error: {str(e)}')
return jsonify({'error': 'Failed to setup authentication'}), 500
@bp.route('/authenticate', methods=['POST'])
def authenticate():
db = get_db()
data = request.get_json()
username = data.get('username')
password = data.get('password')
ip_address = request.remote_addr
# Check recent failed attempts
recent_attempts = db.execute(
'''
SELECT COUNT(*) as count FROM failed_attempts
WHERE ip_address = ?
AND attempt_time > datetime('now', '-15 minutes')
''', (ip_address, )).fetchone()['count']
if recent_attempts >= 5:
logger.warning(f'Too many failed attempts from IP: {ip_address}')
return jsonify({'error':
'Too many failed attempts. Try again later.'}), 429
if not username or not password:
logger.warning('Authentication attempt with missing credentials')
return jsonify({'error': 'Username and password required'}), 400
user = db.execute('SELECT * FROM auth WHERE username = ?',
(username, )).fetchone()
if user and check_password_hash(user['password_hash'], password):
# Generate a new session ID
new_session_id = secrets.token_urlsafe(32)
db.execute('UPDATE auth SET session_id = ? WHERE username = ?',
(new_session_id, username))
db.commit()
# Set up session
session['authenticated'] = True
session[
'session_id'] = new_session_id # Store session ID in the session
session.permanent = True
# Clear failed attempts on success
db.execute('DELETE FROM failed_attempts WHERE ip_address = ?',
(ip_address, ))
db.commit()
logger.info(f'Successful authentication for user: {username}')
return jsonify({'authenticated': True})
# Record failed attempt
db.execute('INSERT INTO failed_attempts (ip_address) VALUES (?)',
(ip_address, ))
db.commit()
logger.warning(f'Failed authentication attempt for user: {username}')
return jsonify({'error': 'Invalid credentials'}), 401

View File

@@ -1,179 +0,0 @@
# app/backup/__init__.py
from flask import Blueprint, request, jsonify, send_file
import logging
from ..task.backup.backup import BackupManager
from ..db import get_db
import os
from datetime import datetime
import tempfile
import zipfile
logger = logging.getLogger(__name__)
bp = Blueprint('backup', __name__)
@bp.route('', methods=['GET'])
def list_backups():
"""Get list of all backups"""
try:
manager = BackupManager()
backups = manager.list_backups()
# Add file size and last modified time to each backup
for backup in backups:
file_path = os.path.join(manager.backup_dir, backup['filename'])
if os.path.exists(file_path):
backup['size'] = os.path.getsize(file_path)
backup['created_at'] = datetime.fromtimestamp(
os.path.getmtime(file_path)).isoformat()
else:
backup['size'] = None
backup['created_at'] = None
return jsonify(backups), 200
except Exception as e:
logger.error(f'Error listing backups: {str(e)}')
return jsonify({'error': 'Failed to list backups'}), 500
@bp.route('', methods=['POST'])
def create_backup():
"""Create a new backup manually"""
try:
manager = BackupManager()
success, result = manager.create_backup()
if success:
return jsonify({
'message': 'Backup created successfully',
'filename': result
}), 201
else:
return jsonify({'error':
f'Failed to create backup: {result}'}), 500
except Exception as e:
logger.error(f'Error creating backup: {str(e)}')
return jsonify({'error': 'Failed to create backup'}), 500
@bp.route('/<path:filename>', methods=['GET'])
def download_backup(filename):
"""Download a specific backup file"""
try:
manager = BackupManager()
file_path = os.path.join(manager.backup_dir, filename)
if not os.path.exists(file_path):
return jsonify({'error': 'Backup file not found'}), 404
return send_file(file_path,
mimetype='application/zip',
as_attachment=True,
download_name=filename)
except Exception as e:
logger.error(f'Error downloading backup: {str(e)}')
return jsonify({'error': 'Failed to download backup'}), 500
@bp.route('/<path:filename>/restore', methods=['POST'])
def restore_backup(filename):
"""Restore from a specific backup"""
try:
manager = BackupManager()
success, message = manager.restore_backup(filename)
if success:
return jsonify({'message': 'Backup restored successfully'}), 200
else:
return jsonify({'error':
f'Failed to restore backup: {message}'}), 500
except Exception as e:
logger.error(f'Error restoring backup: {str(e)}')
return jsonify({'error': 'Failed to restore backup'}), 500
@bp.route('/<path:filename>', methods=['DELETE'])
def delete_backup(filename):
"""Delete a specific backup"""
try:
manager = BackupManager()
file_path = os.path.join(manager.backup_dir, filename)
if not os.path.exists(file_path):
return jsonify({'error': 'Backup file not found'}), 404
# Remove the file
os.remove(file_path)
# Remove from database
with get_db() as conn:
conn.execute('DELETE FROM backups WHERE filename = ?',
(filename, ))
conn.commit()
return jsonify({'message': 'Backup deleted successfully'}), 200
except Exception as e:
logger.error(f'Error deleting backup: {str(e)}')
return jsonify({'error': 'Failed to delete backup'}), 500
@bp.route('/import', methods=['POST'])
def import_backup():
"""Import and restore from an uploaded backup file"""
if 'file' not in request.files:
return jsonify({'error': 'No file part in the request'}), 400
file = request.files['file']
if file.filename == '':
return jsonify({'error': 'No file selected for uploading'}), 400
if not file.filename.endswith('.zip'):
return jsonify({'error': 'File must be a zip archive'}), 400
try:
# Create a temporary file to store the upload
with tempfile.NamedTemporaryFile(delete=False) as temp_file:
file.save(temp_file.name)
# Validate the zip file
validation_result = is_valid_backup_zip(temp_file.name)
if not validation_result[0]:
os.unlink(temp_file.name)
return jsonify({'error': validation_result[1]}), 400
# Use the BackupManager to restore from this file
manager = BackupManager()
success, message = manager.restore_backup_from_file(temp_file.name)
# Delete the temporary file
os.unlink(temp_file.name)
if success:
return jsonify(
{'message': 'Backup imported and restored successfully'}), 200
else:
return jsonify(
{'error':
f'Failed to import and restore backup: {message}'}), 500
except Exception as e:
logger.error(f'Error importing and restoring backup: {str(e)}')
return jsonify({'error': 'Failed to import and restore backup'}), 500
def is_valid_backup_zip(file_path):
"""Check if the zip file is a valid backup"""
try:
if os.path.getsize(file_path) > 100 * 1024 * 1024: # 100 MB
return False, "Backup file is too large (max 100 MB)"
with zipfile.ZipFile(file_path, 'r') as zipf:
file_list = zipf.namelist()
if 'profilarr.db' not in file_list:
return False, "Backup file does not contain profilarr.db"
return True, "Valid backup file"
except zipfile.BadZipFile:
return False, "Invalid zip file"

View File

@@ -1,12 +0,0 @@
# app/compile/__init__.py
from .mappings import TargetApp, ValueResolver
from .format_compiler import (CustomFormat, FormatConverter, FormatProcessor,
compile_custom_format)
from .profile_compiler import (ProfileConverter, ProfileProcessor,
compile_quality_profile)
__all__ = [
'TargetApp', 'ValueResolver', 'CustomFormat', 'FormatConverter',
'FormatProcessor', 'compile_custom_format', 'ProfileConverter',
'ProfileProcessor', 'compile_quality_profile'
]

View File

@@ -1,224 +0,0 @@
# app/compile/format_compiler.py
"""Format compilation module for converting custom formats"""
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, List, Optional
import json
import yaml
from .mappings import TargetApp, ValueResolver
@dataclass
class Specification:
"""Data class for format specifications"""
name: str
implementation: str
negate: bool = False
required: bool = False
fields: List[Dict[str, str]] = None
def __post_init__(self):
if self.fields is None:
self.fields = []
@dataclass
class CustomFormat:
"""Data class for custom format definitions"""
name: str
description: str
tags: List[str]
conditions: List[Dict]
tests: List[Dict]
@dataclass
class ConvertedFormat:
"""Data class for converted format output"""
name: str
specifications: List[Specification]
class FormatConverter:
"""Converts between different format types"""
def __init__(self, patterns: Dict[str, str]):
self.patterns = patterns
def _create_specification(
self, condition: Dict,
target_app: TargetApp) -> Optional[Specification]:
condition_type = condition['type']
if condition_type in ['release_title', 'release_group', 'edition']:
pattern_name = condition['pattern']
pattern = self.patterns.get(pattern_name)
if not pattern:
return None
implementation = ('ReleaseTitleSpecification'
if condition_type == 'release_title' else
'ReleaseGroupSpecification' if condition_type
== 'release_group' else 'EditionSpecification')
fields = [{'name': 'value', 'value': pattern}]
elif condition_type == 'source':
implementation = 'SourceSpecification'
value = ValueResolver.get_source(condition['source'], target_app)
fields = [{'name': 'value', 'value': value}]
elif condition_type == 'resolution':
implementation = 'ResolutionSpecification'
value = ValueResolver.get_resolution(condition['resolution'])
fields = [{'name': 'value', 'value': value}]
elif condition_type == 'indexer_flag':
implementation = 'IndexerFlagSpecification'
value = ValueResolver.get_indexer_flag(condition.get('flag', ''),
target_app)
fields = [{'name': 'value', 'value': value}]
elif condition_type == 'quality_modifier':
if target_app == TargetApp.SONARR:
return None
implementation = 'QualityModifierSpecification'
value = ValueResolver.get_quality_modifier(
condition['qualityModifier'])
fields = [{'name': 'value', 'value': value}]
elif condition_type == 'size':
implementation = 'SizeSpecification'
min_size = condition.get('minSize')
max_size = condition.get('maxSize')
fields = [{
'name': 'min',
'value': min_size
}, {
'name': 'max',
'value': max_size
}]
elif condition_type == 'year':
implementation = 'YearSpecification'
min_year = condition.get('minYear')
max_year = condition.get('maxYear')
fields = [{
'name': 'min',
'value': min_year
}, {
'name': 'max',
'value': max_year
}]
elif condition_type == 'release_type':
if target_app == TargetApp.RADARR:
return None
implementation = 'ReleaseTypeSpecification'
value = ValueResolver.get_release_type(condition['releaseType'])
fields = [{'name': 'value', 'value': value}]
elif condition_type == 'language':
implementation = 'LanguageSpecification'
language_name = condition['language'].lower()
try:
language_data = ValueResolver.get_language(language_name,
target_app,
for_profile=False)
fields = [{'name': 'value', 'value': language_data['id']}]
if 'exceptLanguage' in condition:
except_value = condition['exceptLanguage']
fields.append({
'name': 'exceptLanguage',
'value': except_value
})
except Exception:
return None
else:
return None
return Specification(name=condition.get('name', ''),
implementation=implementation,
negate=condition.get('negate', False),
required=condition.get('required', False),
fields=fields)
def convert_format(self, custom_format: CustomFormat,
target_app: TargetApp) -> ConvertedFormat:
specifications = []
for condition in custom_format.conditions:
try:
spec = self._create_specification(condition, target_app)
if spec:
specifications.append(spec)
except Exception:
continue
return ConvertedFormat(name=custom_format.name,
specifications=specifications)
class FormatProcessor:
"""Main class for processing format files"""
def __init__(self, input_dir: Path, output_dir: Path, patterns_dir: Path):
self.input_dir = input_dir
self.output_dir = output_dir
self.patterns = self._load_patterns(patterns_dir)
self.converter = FormatConverter(self.patterns)
@staticmethod
def _load_patterns(patterns_dir: Path) -> Dict[str, str]:
patterns = {}
for file_path in patterns_dir.glob('*.yml'):
with file_path.open('r') as f:
pattern_data = yaml.safe_load(f)
patterns[pattern_data['name']] = pattern_data['pattern']
return patterns
def _load_custom_format(self, format_name: str) -> Optional[CustomFormat]:
format_path = self.input_dir / f"{format_name}.yml"
if not format_path.exists():
return None
with format_path.open('r') as f:
raw_data = yaml.safe_load(f)
return CustomFormat(**raw_data)
def process_format(self,
format_name: str,
target_app: TargetApp,
return_data: bool = False) -> Optional[ConvertedFormat]:
custom_format = self._load_custom_format(format_name)
if not custom_format:
return None
converted_format = self.converter.convert_format(
custom_format, target_app)
output_data = [{
'name':
converted_format.name,
'specifications':
[vars(spec) for spec in converted_format.specifications]
}]
if not return_data:
output_path = self.output_dir / f"{format_name}.json"
with output_path.open('w') as f:
json.dump(output_data, f, indent=2)
return converted_format
def compile_custom_format(format_data: Dict) -> List[Dict]:
custom_format = CustomFormat(**format_data)
patterns = {}
converter = FormatConverter(patterns)
converted = converter.convert_format(custom_format, TargetApp.RADARR)
output_data = [{
'name':
converted.name,
'specifications': [vars(spec) for spec in converted.specifications]
}]
return output_data

View File

@@ -1,990 +0,0 @@
# app/compile/mappings.py
"""Centralized constants and mappings for arr applications"""
from enum import Enum, auto
from typing import Dict, Any
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class TargetApp(Enum):
"""Enum for target application types"""
RADARR = auto()
SONARR = auto()
class IndexerFlags:
"""Indexer flag mappings for both applications"""
RADARR = {
'freeleech': 1,
'halfleech': 2,
'double_upload': 4,
'internal': 32,
'scene': 128,
'freeleech_75': 256,
'freeleech_25': 512,
'nuked': 2048,
'ptp_golden': 8,
'ptp_approved': 16
}
SONARR = {
'freeleech': 1,
'halfleech': 2,
'double_upload': 4,
'internal': 8,
'scene': 16,
'freeleech_75': 32,
'freeleech_25': 64,
'nuked': 128
}
class Sources:
"""Source mappings for both applications"""
RADARR = {
'cam': 1,
'telesync': 2,
'telecine': 3,
'workprint': 4,
'dvd': 5,
'tv': 6,
'web_dl': 7,
'webrip': 8,
'bluray': 9
}
SONARR = {
'television': 1,
'television_raw': 2,
'web_dl': 3,
'webrip': 4,
'dvd': 5,
'bluray': 6,
'bluray_raw': 7
}
class Quality_Modifiers:
"""Quality modifier mappings for Radarr ONLY"""
RADARR = {
'none': 0,
'regional': 1,
'screener': 2,
'rawhd': 3,
'brdisk': 4,
'remux': 5,
}
class Release_Types:
"""Release type mappings for Sonarr ONLY"""
SONARR = {
'none': 0,
'single_episode': 1,
'multi_episode': 2,
'season_pack': 3,
}
class Qualities:
"""Quality mappings for both applications"""
COMMON_RESOLUTIONS = {
'360p': 360,
'480p': 480,
'540p': 540,
'576p': 576,
'720p': 720,
'1080p': 1080,
'2160p': 2160
}
RADARR = {
"Unknown": {
"id": 0,
"name": "Unknown",
"source": "unknown",
"resolution": 0
},
"SDTV": {
"id": 1,
"name": "SDTV",
"source": "tv",
"resolution": 480
},
"DVD": {
"id": 2,
"name": "DVD",
"source": "dvd",
"resolution": 480
},
"WEBDL-1080p": {
"id": 3,
"name": "WEBDL-1080p",
"source": "webdl",
"resolution": 1080
},
"HDTV-720p": {
"id": 4,
"name": "HDTV-720p",
"source": "tv",
"resolution": 720
},
"WEBDL-720p": {
"id": 5,
"name": "WEBDL-720p",
"source": "webdl",
"resolution": 720
},
"Bluray-720p": {
"id": 6,
"name": "Bluray-720p",
"source": "bluray",
"resolution": 720
},
"Bluray-1080p": {
"id": 7,
"name": "Bluray-1080p",
"source": "bluray",
"resolution": 1080
},
"WEBDL-480p": {
"id": 8,
"name": "WEBDL-480p",
"source": "webdl",
"resolution": 480
},
"HDTV-1080p": {
"id": 9,
"name": "HDTV-1080p",
"source": "tv",
"resolution": 1080
},
"Raw-HD": {
"id": 10,
"name": "Raw-HD",
"source": "tv",
"resolution": 1080
},
"WEBRip-480p": {
"id": 12,
"name": "WEBRip-480p",
"source": "webrip",
"resolution": 480
},
"WEBRip-720p": {
"id": 14,
"name": "WEBRip-720p",
"source": "webrip",
"resolution": 720
},
"WEBRip-1080p": {
"id": 15,
"name": "WEBRip-1080p",
"source": "webrip",
"resolution": 1080
},
"HDTV-2160p": {
"id": 16,
"name": "HDTV-2160p",
"source": "tv",
"resolution": 2160
},
"WEBRip-2160p": {
"id": 17,
"name": "WEBRip-2160p",
"source": "webrip",
"resolution": 2160
},
"WEBDL-2160p": {
"id": 18,
"name": "WEBDL-2160p",
"source": "webdl",
"resolution": 2160
},
"Bluray-2160p": {
"id": 19,
"name": "Bluray-2160p",
"source": "bluray",
"resolution": 2160
},
"Bluray-480p": {
"id": 20,
"name": "Bluray-480p",
"source": "bluray",
"resolution": 480
},
"Bluray-576p": {
"id": 21,
"name": "Bluray-576p",
"source": "bluray",
"resolution": 576
},
"BR-DISK": {
"id": 22,
"name": "BR-DISK",
"source": "bluray",
"resolution": 1080
},
"DVD-R": {
"id": 23,
"name": "DVD-R",
"source": "dvd",
"resolution": 480
},
"WORKPRINT": {
"id": 24,
"name": "WORKPRINT",
"source": "workprint",
"resolution": 0
},
"CAM": {
"id": 25,
"name": "CAM",
"source": "cam",
"resolution": 0
},
"TELESYNC": {
"id": 26,
"name": "TELESYNC",
"source": "telesync",
"resolution": 0
},
"TELECINE": {
"id": 27,
"name": "TELECINE",
"source": "telecine",
"resolution": 0
},
"DVDSCR": {
"id": 28,
"name": "DVDSCR",
"source": "dvd",
"resolution": 480
},
"REGIONAL": {
"id": 29,
"name": "REGIONAL",
"source": "dvd",
"resolution": 480
},
"Remux-1080p": {
"id": 30,
"name": "Remux-1080p",
"source": "bluray",
"resolution": 1080
},
"Remux-2160p": {
"id": 31,
"name": "Remux-2160p",
"source": "bluray",
"resolution": 2160
}
}
SONARR = {
"Unknown": {
"id": 0,
"name": "Unknown",
"source": "unknown",
"resolution": 0
},
"SDTV": {
"id": 1,
"name": "SDTV",
"source": "television",
"resolution": 480
},
"DVD": {
"id": 2,
"name": "DVD",
"source": "dvd",
"resolution": 480
},
"WEBDL-1080p": {
"id": 3,
"name": "WEBDL-1080p",
"source": "web",
"resolution": 1080
},
"HDTV-720p": {
"id": 4,
"name": "HDTV-720p",
"source": "television",
"resolution": 720
},
"WEBDL-720p": {
"id": 5,
"name": "WEBDL-720p",
"source": "web",
"resolution": 720
},
"Bluray-720p": {
"id": 6,
"name": "Bluray-720p",
"source": "bluray",
"resolution": 720
},
"Bluray-1080p": {
"id": 7,
"name": "Bluray-1080p",
"source": "bluray",
"resolution": 1080
},
"WEBDL-480p": {
"id": 8,
"name": "WEBDL-480p",
"source": "web",
"resolution": 480
},
"HDTV-1080p": {
"id": 9,
"name": "HDTV-1080p",
"source": "television",
"resolution": 1080
},
"Raw-HD": {
"id": 10,
"name": "Raw-HD",
"source": "televisionRaw",
"resolution": 1080
},
"WEBRip-480p": {
"id": 12,
"name": "WEBRip-480p",
"source": "webRip",
"resolution": 480
},
"Bluray-480p": {
"id": 13,
"name": "Bluray-480p",
"source": "bluray",
"resolution": 480
},
"WEBRip-720p": {
"id": 14,
"name": "WEBRip-720p",
"source": "webRip",
"resolution": 720
},
"WEBRip-1080p": {
"id": 15,
"name": "WEBRip-1080p",
"source": "webRip",
"resolution": 1080
},
"HDTV-2160p": {
"id": 16,
"name": "HDTV-2160p",
"source": "television",
"resolution": 2160
},
"WEBRip-2160p": {
"id": 17,
"name": "WEBRip-2160p",
"source": "webRip",
"resolution": 2160
},
"WEBDL-2160p": {
"id": 18,
"name": "WEBDL-2160p",
"source": "web",
"resolution": 2160
},
"Bluray-2160p": {
"id": 19,
"name": "Bluray-2160p",
"source": "bluray",
"resolution": 2160
},
"Bluray-1080p Remux": {
"id": 20,
"name": "Bluray-1080p Remux",
"source": "blurayRaw",
"resolution": 1080
},
"Bluray-2160p Remux": {
"id": 21,
"name": "Bluray-2160p Remux",
"source": "blurayRaw",
"resolution": 2160
},
"Bluray-576p": {
"id": 22,
"name": "Bluray-576p",
"source": "bluray",
"resolution": 576
}
}
class Languages:
"""Language mappings for both applications"""
RADARR = {
'any': {
'id': -1,
'name': 'Any'
},
'original': {
'id': -2,
'name': 'Original'
},
'unknown': {
'id': 0,
'name': 'Unknown'
},
'english': {
'id': 1,
'name': 'English'
},
'french': {
'id': 2,
'name': 'French'
},
'spanish': {
'id': 3,
'name': 'Spanish'
},
'german': {
'id': 4,
'name': 'German'
},
'italian': {
'id': 5,
'name': 'Italian'
},
'danish': {
'id': 6,
'name': 'Danish'
},
'dutch': {
'id': 7,
'name': 'Dutch'
},
'japanese': {
'id': 8,
'name': 'Japanese'
},
'icelandic': {
'id': 9,
'name': 'Icelandic'
},
'chinese': {
'id': 10,
'name': 'Chinese'
},
'russian': {
'id': 11,
'name': 'Russian'
},
'polish': {
'id': 12,
'name': 'Polish'
},
'vietnamese': {
'id': 13,
'name': 'Vietnamese'
},
'swedish': {
'id': 14,
'name': 'Swedish'
},
'norwegian': {
'id': 15,
'name': 'Norwegian'
},
'finnish': {
'id': 16,
'name': 'Finnish'
},
'turkish': {
'id': 17,
'name': 'Turkish'
},
'portuguese': {
'id': 18,
'name': 'Portuguese'
},
'flemish': {
'id': 19,
'name': 'Flemish'
},
'greek': {
'id': 20,
'name': 'Greek'
},
'korean': {
'id': 21,
'name': 'Korean'
},
'hungarian': {
'id': 22,
'name': 'Hungarian'
},
'hebrew': {
'id': 23,
'name': 'Hebrew'
},
'lithuanian': {
'id': 24,
'name': 'Lithuanian'
},
'czech': {
'id': 25,
'name': 'Czech'
},
'hindi': {
'id': 26,
'name': 'Hindi'
},
'romanian': {
'id': 27,
'name': 'Romanian'
},
'thai': {
'id': 28,
'name': 'Thai'
},
'bulgarian': {
'id': 29,
'name': 'Bulgarian'
},
'portuguese_br': {
'id': 30,
'name': 'Portuguese (Brazil)'
},
'arabic': {
'id': 31,
'name': 'Arabic'
},
'ukrainian': {
'id': 32,
'name': 'Ukrainian'
},
'persian': {
'id': 33,
'name': 'Persian'
},
'bengali': {
'id': 34,
'name': 'Bengali'
},
'slovak': {
'id': 35,
'name': 'Slovak'
},
'latvian': {
'id': 36,
'name': 'Latvian'
},
'spanish_latino': {
'id': 37,
'name': 'Spanish (Latino)'
},
'catalan': {
'id': 38,
'name': 'Catalan'
},
'croatian': {
'id': 39,
'name': 'Croatian'
},
'serbian': {
'id': 40,
'name': 'Serbian'
},
'bosnian': {
'id': 41,
'name': 'Bosnian'
},
'estonian': {
'id': 42,
'name': 'Estonian'
},
'tamil': {
'id': 43,
'name': 'Tamil'
},
'indonesian': {
'id': 44,
'name': 'Indonesian'
},
'telugu': {
'id': 45,
'name': 'Telugu'
},
'macedonian': {
'id': 46,
'name': 'Macedonian'
},
'slovenian': {
'id': 47,
'name': 'Slovenian'
},
'malayalam': {
'id': 48,
'name': 'Malayalam'
},
'kannada': {
'id': 49,
'name': 'Kannada'
},
'albanian': {
'id': 50,
'name': 'Albanian'
},
'afrikaans': {
'id': 51,
'name': 'Afrikaans'
}
}
SONARR = {
'unknown': {
'id': 0,
'name': 'Unknown'
},
'english': {
'id': 1,
'name': 'English'
},
'french': {
'id': 2,
'name': 'French'
},
'spanish': {
'id': 3,
'name': 'Spanish'
},
'german': {
'id': 4,
'name': 'German'
},
'italian': {
'id': 5,
'name': 'Italian'
},
'danish': {
'id': 6,
'name': 'Danish'
},
'dutch': {
'id': 7,
'name': 'Dutch'
},
'japanese': {
'id': 8,
'name': 'Japanese'
},
'icelandic': {
'id': 9,
'name': 'Icelandic'
},
'chinese': {
'id': 10,
'name': 'Chinese'
},
'russian': {
'id': 11,
'name': 'Russian'
},
'polish': {
'id': 12,
'name': 'Polish'
},
'vietnamese': {
'id': 13,
'name': 'Vietnamese'
},
'swedish': {
'id': 14,
'name': 'Swedish'
},
'norwegian': {
'id': 15,
'name': 'Norwegian'
},
'finnish': {
'id': 16,
'name': 'Finnish'
},
'turkish': {
'id': 17,
'name': 'Turkish'
},
'portuguese': {
'id': 18,
'name': 'Portuguese'
},
'flemish': {
'id': 19,
'name': 'Flemish'
},
'greek': {
'id': 20,
'name': 'Greek'
},
'korean': {
'id': 21,
'name': 'Korean'
},
'hungarian': {
'id': 22,
'name': 'Hungarian'
},
'hebrew': {
'id': 23,
'name': 'Hebrew'
},
'lithuanian': {
'id': 24,
'name': 'Lithuanian'
},
'czech': {
'id': 25,
'name': 'Czech'
},
'arabic': {
'id': 26,
'name': 'Arabic'
},
'hindi': {
'id': 27,
'name': 'Hindi'
},
'bulgarian': {
'id': 28,
'name': 'Bulgarian'
},
'malayalam': {
'id': 29,
'name': 'Malayalam'
},
'ukrainian': {
'id': 30,
'name': 'Ukrainian'
},
'slovak': {
'id': 31,
'name': 'Slovak'
},
'thai': {
'id': 32,
'name': 'Thai'
},
'portuguese_br': {
'id': 33,
'name': 'Portuguese (Brazil)'
},
'spanish_latino': {
'id': 34,
'name': 'Spanish (Latino)'
},
'romanian': {
'id': 35,
'name': 'Romanian'
},
'latvian': {
'id': 36,
'name': 'Latvian'
},
'persian': {
'id': 37,
'name': 'Persian'
},
'catalan': {
'id': 38,
'name': 'Catalan'
},
'croatian': {
'id': 39,
'name': 'Croatian'
},
'serbian': {
'id': 40,
'name': 'Serbian'
},
'bosnian': {
'id': 41,
'name': 'Bosnian'
},
'estonian': {
'id': 42,
'name': 'Estonian'
},
'tamil': {
'id': 43,
'name': 'Tamil'
},
'indonesian': {
'id': 44,
'name': 'Indonesian'
},
'macedonian': {
'id': 45,
'name': 'Macedonian'
},
'slovenian': {
'id': 46,
'name': 'Slovenian'
},
'original': {
'id': -2,
'name': 'Original'
}
}
class QualityNameMapper:
"""Maps between different quality naming conventions"""
REMUX_MAPPINGS = {
TargetApp.SONARR: {
"Remux-1080p": "Bluray-1080p Remux",
"Remux-2160p": "Bluray-2160p Remux"
},
TargetApp.RADARR: {
"Remux-1080p": "Remux-1080p",
"Remux-2160p": "Remux-2160p"
}
}
ALTERNATE_NAMES = {
"BR-Disk": "BR-DISK",
"BR-DISK": "BR-DISK",
"BRDISK": "BR-DISK",
"BR_DISK": "BR-DISK",
"BLURAY-DISK": "BR-DISK",
"BLURAY_DISK": "BR-DISK",
"BLURAYDISK": "BR-DISK",
"Telecine": "TELECINE",
"TELECINE": "TELECINE",
"TeleCine": "TELECINE",
"Telesync": "TELESYNC",
"TELESYNC": "TELESYNC",
"TeleSync": "TELESYNC",
}
@classmethod
def map_quality_name(cls, name: str, target_app: TargetApp) -> str:
"""
Maps quality names between different formats based on target app
Args:
name: The quality name to map
target_app: The target application (RADARR or SONARR)
Returns:
The mapped quality name
"""
# Handle empty or None cases
if not name:
return name
# First check for remux mappings
if name in cls.REMUX_MAPPINGS.get(target_app, {}):
return cls.REMUX_MAPPINGS[target_app][name]
# Then check for alternate spellings
normalized_name = name.upper().replace("-", "").replace("_", "")
for alt_name, standard_name in cls.ALTERNATE_NAMES.items():
if normalized_name == alt_name.upper().replace("-", "").replace(
"_", ""):
return standard_name
return name
class LanguageNameMapper:
"""Maps between different language naming conventions"""
ALTERNATE_NAMES = {
"spanish-latino": "spanish_latino",
"spanish_latino": "spanish_latino",
"spanishlatino": "spanish_latino",
"portuguese-br": "portuguese_br",
"portuguese_br": "portuguese_br",
"portuguesebr": "portuguese_br",
"portuguese-brazil": "portuguese_br",
"portuguese_brazil": "portuguese_br"
}
@classmethod
def normalize_language_name(cls, name: str) -> str:
"""
Normalizes language names to a consistent format
Args:
name: The language name to normalize
Returns:
The normalized language name
"""
if not name:
return name
normalized = name.lower().replace(" ", "_")
return cls.ALTERNATE_NAMES.get(normalized, normalized)
class ValueResolver:
"""Helper class to resolve values based on target app"""
@classmethod
def get_indexer_flag(cls, flag: str, target_app: TargetApp) -> int:
flags = IndexerFlags.RADARR if target_app == TargetApp.RADARR else IndexerFlags.SONARR
return flags.get(flag.lower(), 0)
@classmethod
def get_source(cls, source: str, target_app: TargetApp) -> int:
sources = Sources.RADARR if target_app == TargetApp.RADARR else Sources.SONARR
return sources.get(source.lower(), 0)
@classmethod
def get_resolution(cls, resolution: str) -> int:
return Qualities.COMMON_RESOLUTIONS.get(resolution.lower(), 0)
@classmethod
def get_qualities(cls, target_app: TargetApp) -> Dict[str, Any]:
qualities = Qualities.RADARR if target_app == TargetApp.RADARR else Qualities.SONARR
return qualities
@classmethod
def get_quality_name(cls, name: str, target_app: TargetApp) -> str:
"""Maps quality names between different formats based on target app"""
return QualityNameMapper.map_quality_name(name, target_app)
@classmethod
def get_quality_modifier(cls, quality_modifier: str) -> int:
return Quality_Modifiers.RADARR.get(quality_modifier.lower(), 0)
@classmethod
def get_release_type(cls, release_type: str) -> int:
return Release_Types.SONARR.get(release_type.lower(), 0)
@classmethod
def get_language(cls,
language_name: str,
target_app: TargetApp,
for_profile: bool = True) -> Dict[str, Any]:
"""
Get language mapping based on target app and context
Args:
language_name: Name of the language to look up
target_app: Target application (RADARR or SONARR)
for_profile: If True, this is for a quality profile. If False, this is for a custom format.
"""
languages = Languages.RADARR if target_app == TargetApp.RADARR else Languages.SONARR
# For profiles, only Radarr uses language settings
if for_profile and target_app == TargetApp.SONARR:
return {'id': -2, 'name': 'Original'}
# Normalize the language name
normalized_name = LanguageNameMapper.normalize_language_name(
language_name)
language_data = languages.get(normalized_name)
if not language_data:
logger.warning(
f"Language '{language_name}' (normalized: '{normalized_name}') "
f"not found in {target_app} mappings, falling back to Unknown")
language_data = languages['unknown']
return language_data

View File

@@ -1,536 +0,0 @@
"""Profile compilation module for converting quality profiles"""
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, List, Optional, Any, Callable
import json
import yaml
import logging
import asyncio
import aiohttp
from .mappings import TargetApp, ValueResolver
from ..data.utils import load_yaml_file, get_category_directory
from ..importarr.format_memory import import_format_from_memory, async_import_format_from_memory
from ..db.queries.settings import get_language_import_score
logger = logging.getLogger(__name__)
@dataclass
class ConvertedProfile:
"""Data class for converted profile output"""
name: str
items: List[Dict]
format_items: List[Dict]
upgrade_allowed: bool
min_format_score: int
cutoff_format_score: int
min_upgrade_format_score: int
language: Dict
cutoff: Optional[int] = None
class ProfileConverter:
"""Converts quality profiles between different formats"""
def __init__(self,
target_app: TargetApp,
base_url: str = None,
api_key: str = None,
format_importer: Callable = None,
import_as_unique: bool = False):
self.target_app = target_app
self.base_url = base_url
self.api_key = api_key
self.format_importer = format_importer
self.import_as_unique = import_as_unique
self.quality_mappings = ValueResolver.get_qualities(target_app)
def _convert_group_id(self, group_id: int) -> int:
if group_id < 0:
return 1000 + abs(group_id)
return group_id
def _create_all_qualities(self,
allowed_qualities: List[str]) -> List[Dict]:
qualities = []
for quality_name in allowed_qualities:
if quality_name in self.quality_mappings:
qualities.append({
"quality":
self.quality_mappings[quality_name].copy(),
"items": [],
"allowed":
True
})
return qualities
def _generate_language_formats(self,
behaviour: str,
language: str) -> List[Dict]:
"""
Generate language-specific format configurations without importing them.
This is useful for pre-loading and caching language formats.
Args:
behaviour: Language behavior ('must', 'prefer', 'only')
language: Language code ('english', 'french', etc.)
Returns:
List of format configurations for the specified language
"""
try:
formats_to_import = []
# Get the base format as a template
base_format_path = f"{get_category_directory('custom_format')}/Not English.yml"
base_format = load_yaml_file(base_format_path)
# Get language data for translations
language_data = ValueResolver.get_language(
language, self.target_app, for_profile=False
)
# Create the main "Not X" format (e.g., "Not French")
modified_format = base_format.copy()
base_name = f"Not {language_data['name']}"
modified_format['name'] = base_name
# Update conditions to refer to the specific language
for condition in modified_format['conditions']:
if condition.get('type') == 'language':
condition['language'] = language
if condition.get('name') == 'Not English':
condition['name'] = f"Not {language_data['name']}"
elif condition.get('name') == 'Includes English':
condition['name'] = f"Includes {language_data['name']}"
formats_to_import.append(modified_format)
# Add additional formats for 'only' behavior
if behaviour == 'only':
additional_formats = [
"Not Only English", "Not Only English (Missing)"
]
for format_name in additional_formats:
format_path = f"{get_category_directory('custom_format')}/{format_name}.yml"
format_data = load_yaml_file(format_path)
format_data['name'] = format_data['name'].replace(
'English', language_data['name'])
for c in format_data.get('conditions', []):
if c.get('type') == 'language':
c['language'] = language
if c.get('name') == 'Not English':
c['name'] = f"Not {language_data['name']}"
elif c.get('name') == 'Includes English':
c['name'] = f"Includes {language_data['name']}"
formats_to_import.append(format_data)
return formats_to_import
except Exception as e:
logger.error(f"Error generating language formats: {str(e)}")
raise
def _process_language_formats(
self,
behaviour: str,
language: str,
import_as_unique: bool = False) -> List[Dict]:
"""
Process language formats by either importing them directly or using the format_importer.
When using the cached profile import, the format_importer will be a dummy function that
just returns success without actually importing, since the formats were already imported.
"""
try:
# Generate the format configurations
formats_to_import = self._generate_language_formats(behaviour, language)
format_configs = []
# Check if we're using a format importer (might be None for direct format returns)
if self.format_importer is None:
# No importer provided - we're in the special caching mode
# Just create the format configs directly without importing
logger.info(f"Using pre-cached language formats for {behaviour}_{language}")
for format_data in formats_to_import:
format_name = format_data['name']
if import_as_unique:
format_name = f"{format_name} [Dictionarry]"
format_configs.append({
'name': format_name,
'score': get_language_import_score()
})
return format_configs
# Regular mode with an importer - check if it's our dummy cached importer
if self.format_importer and hasattr(self.format_importer, '__name__') and self.format_importer.__name__ == 'cached_format_importer':
logger.info(f"Using cached importer for language formats {behaviour}_{language}")
# Simply call the dummy importer just to keep the flow consistent,
# but we'll generate our own format configs
self.format_importer()
# Create format configs directly
for format_data in formats_to_import:
format_name = format_data['name']
if import_as_unique:
format_name = f"{format_name} [Dictionarry]"
format_configs.append({
'name': format_name,
'score': get_language_import_score()
})
return format_configs
# If we've reached here, we're doing a regular import
if not self.base_url or not self.api_key or not self.format_importer:
logger.error("Missing required credentials or format importer")
raise ValueError(
"base_url, api_key, and format_importer are required for language format processing"
)
arr_type = 'radarr' if self.target_app == TargetApp.RADARR else 'sonarr'
# Use asyncio if there are multiple formats to import
if len(formats_to_import) > 1:
# Run in event loop
return asyncio.run(self._async_process_language_formats(
formats_to_import=formats_to_import,
arr_type=arr_type,
import_as_unique=import_as_unique
))
# For single format, use regular synchronous version
for format_data in formats_to_import:
try:
result = import_format_from_memory(
format_data,
self.base_url,
self.api_key,
arr_type,
import_as_unique=self.import_as_unique)
if not result.get('success', False):
logger.error(
f"Format import failed for: {format_data['name']}")
raise Exception(
f"Failed to import format {format_data['name']}")
format_name = format_data['name']
if import_as_unique:
format_name = f"{format_name} [Dictionarry]"
format_configs.append({
'name': format_name,
'score': get_language_import_score()
})
except Exception as e:
logger.error(
f"Error importing format {format_data['name']}: {str(e)}"
)
raise
return format_configs
except Exception as e:
logger.error(f"Error processing language formats: {str(e)}")
raise
async def _async_process_language_formats(
self,
formats_to_import: List[Dict],
arr_type: str,
import_as_unique: bool = False) -> List[Dict]:
"""
Asynchronous version of _process_language_formats for concurrent imports
"""
logger.info(f"Processing language formats asynchronously: {len(formats_to_import)} formats")
format_configs = []
tasks = []
# Create tasks for all formats
for format_data in formats_to_import:
task = asyncio.create_task(
async_import_format_from_memory(
format_data=format_data,
base_url=self.base_url,
api_key=self.api_key,
arr_type=arr_type,
import_as_unique=self.import_as_unique
)
)
tasks.append((format_data['name'], task))
# Process all format import results
for format_name, task in tasks:
try:
result = await task
if not result.get('success', False):
logger.error(f"Format import failed for: {format_name} (async)")
raise Exception(f"Failed to import format {format_name}")
display_name = format_name
if import_as_unique:
display_name = f"{format_name} [Dictionarry]"
format_configs.append({
'name': display_name,
'score': get_language_import_score()
})
except Exception as e:
logger.error(f"Error importing format {format_name}: {str(e)} (async)")
raise
return format_configs
def convert_quality_group(self, group: Dict) -> Dict:
original_id = group.get("id", 0)
converted_id = self._convert_group_id(original_id)
allowed_qualities = []
for q_item in group.get("qualities", []):
input_name = q_item.get("name", "")
# First map the quality name to handle remux qualities properly
mapped_name = ValueResolver.get_quality_name(
input_name, self.target_app)
# Create a case-insensitive lookup map
quality_map = {k.lower(): k for k in self.quality_mappings}
# Try to find the mapped name in quality mappings
if mapped_name.lower() in quality_map:
allowed_qualities.append(quality_map[mapped_name.lower()])
# Fallback to the original name
elif input_name.lower() in quality_map:
allowed_qualities.append(quality_map[input_name.lower()])
converted_group = {
"name": group["name"],
"items": self._create_all_qualities(allowed_qualities),
"allowed": True,
"id": converted_id
}
return converted_group
def convert_profile(self, profile: Dict) -> ConvertedProfile:
language = profile.get('language', 'any')
# Handle language processing for advanced mode (with behavior_language format)
if language != 'any' and '_' in language:
language_parts = language.split('_', 1)
behaviour, language_code = language_parts
# Check if we're using a special importer with cached formats
if self.format_importer and hasattr(self.format_importer, '__name__') and self.format_importer.__name__ == 'cached_format_importer':
# If we're using the cached importer, skip processing
# The formats were already added directly to the profile
pass # Using pre-added language formats
else:
# Normal processing path
try:
language_formats = self._process_language_formats(
behaviour, language_code)
if 'custom_formats' not in profile:
profile['custom_formats'] = []
profile['custom_formats'].extend(language_formats)
except Exception as e:
logger.error(f"Failed to process language formats: {e}")
# Simple mode: just use the language directly without custom formats
# This lets the Arr application's built-in language filter handle it
# Get the appropriate language data for the profile
if language != 'any' and '_' not in language:
# Simple mode - use the language directly
selected_language = ValueResolver.get_language(language,
self.target_app,
for_profile=True)
# Using simple language mode
else:
# Advanced mode or 'any' - set language to 'any' as filtering is done via formats
selected_language = ValueResolver.get_language('any',
self.target_app,
for_profile=True)
# Using advanced mode, setting language to 'any'
converted_profile = ConvertedProfile(
name=profile["name"],
upgrade_allowed=profile.get("upgradesAllowed", True),
items=[],
format_items=[],
min_format_score=profile.get("minCustomFormatScore", 0),
cutoff_format_score=profile.get("upgradeUntilScore", 0),
min_upgrade_format_score=max(1,
profile.get("minScoreIncrement", 1)),
language=selected_language)
used_qualities = set()
quality_ids_in_groups = set()
# First pass: Gather all quality IDs in groups to avoid duplicates
for quality_entry in profile.get("qualities", []):
if quality_entry.get("id", 0) < 0: # It's a group
# Process this group to collect quality IDs
converted_group = self.convert_quality_group(quality_entry)
for item in converted_group["items"]:
if "quality" in item and "id" in item["quality"]:
quality_ids_in_groups.add(item["quality"]["id"])
# Second pass: Add groups and individual qualities to the profile
for quality_entry in profile.get("qualities", []):
if quality_entry.get("id", 0) < 0: # It's a group
converted_group = self.convert_quality_group(quality_entry)
if converted_group["items"]:
converted_profile.items.append(converted_group)
for q in quality_entry.get("qualities", []):
used_qualities.add(q.get("name", "").upper())
else: # It's a single quality
quality_name = quality_entry.get("name")
mapped_name = ValueResolver.get_quality_name(
quality_name, self.target_app)
if mapped_name in self.quality_mappings:
converted_profile.items.append({
"quality": self.quality_mappings[mapped_name],
"items": [],
"allowed": True
})
used_qualities.add(mapped_name.upper())
# Add all unused qualities as disabled, but skip those already in groups
for quality_name, quality_data in self.quality_mappings.items():
if (quality_name.upper() not in used_qualities and
quality_data["id"] not in quality_ids_in_groups):
converted_profile.items.append({
"quality": quality_data,
"items": [],
"allowed": False
})
if "upgrade_until" in profile and "id" in profile["upgrade_until"]:
cutoff_id = profile["upgrade_until"]["id"]
cutoff_name = profile["upgrade_until"]["name"]
mapped_cutoff_name = ValueResolver.get_quality_name(
cutoff_name, self.target_app)
if cutoff_id < 0:
converted_profile.cutoff = self._convert_group_id(cutoff_id)
else:
converted_profile.cutoff = self.quality_mappings[
mapped_cutoff_name]["id"]
for cf in profile.get("custom_formats", []):
format_item = {"name": cf["name"], "score": cf["score"]}
converted_profile.format_items.append(format_item)
# Process app-specific custom formats based on target app
app_specific_field = None
if self.target_app == TargetApp.RADARR:
app_specific_field = "custom_formats_radarr"
elif self.target_app == TargetApp.SONARR:
app_specific_field = "custom_formats_sonarr"
if app_specific_field and app_specific_field in profile:
for cf in profile[app_specific_field]:
format_name = cf["name"]
# Apply [Dictionarry] suffix if import_as_unique is enabled
if self.import_as_unique:
format_name = f"{format_name} [Dictionarry]"
format_item = {"name": format_name, "score": cf["score"]}
converted_profile.format_items.append(format_item)
converted_profile.items.reverse()
return converted_profile
class ProfileProcessor:
"""Main class for processing profile files"""
def __init__(self,
input_dir: Path,
output_dir: Path,
target_app: TargetApp,
base_url: str = None,
api_key: str = None,
format_importer: Callable = None):
self.input_dir = input_dir
self.output_dir = output_dir
self.converter = ProfileConverter(target_app, base_url, api_key,
format_importer)
def _load_profile(self, profile_name: str) -> Optional[Dict]:
profile_path = self.input_dir / f"{profile_name}.yml"
if not profile_path.exists():
return None
with profile_path.open('r') as f:
return yaml.safe_load(f)
def process_profile(
self,
profile_name: str,
return_data: bool = False) -> Optional[ConvertedProfile]:
profile_data = self._load_profile(profile_name)
if not profile_data:
return None
converted = self.converter.convert_profile(profile_data)
if return_data:
return converted
output_data = [{
'name': converted.name,
'upgradeAllowed': converted.upgrade_allowed,
'items': converted.items,
'formatItems': converted.format_items,
'minFormatScore': converted.min_format_score,
'cutoffFormatScore': converted.cutoff_format_score,
'minUpgradeFormatScore': converted.min_upgrade_format_score,
'language': converted.language
}]
if converted.cutoff is not None:
output_data[0]['cutoff'] = converted.cutoff
output_path = self.output_dir / f"{profile_name}.json"
with output_path.open('w') as f:
json.dump(output_data, f, indent=2)
return converted
def compile_quality_profile(profile_data: Dict,
target_app: TargetApp,
base_url: str = None,
api_key: str = None,
format_importer: Callable = None,
import_as_unique: bool = False) -> List[Dict]:
converter = ProfileConverter(target_app,
base_url,
api_key,
format_importer,
import_as_unique=import_as_unique)
converted = converter.convert_profile(profile_data)
output = {
'name': converted.name,
'upgradeAllowed': converted.upgrade_allowed,
'items': converted.items,
'formatItems': converted.format_items,
'minFormatScore': converted.min_format_score,
'cutoffFormatScore': converted.cutoff_format_score,
'minUpgradeFormatScore': converted.min_upgrade_format_score,
'language': converted.language
}
if converted.cutoff is not None:
output['cutoff'] = converted.cutoff
return [output]

View File

@@ -1,3 +0,0 @@
from .config import config
__all__ = ['config']

View File

@@ -1,56 +0,0 @@
import os
import logging
class Config:
# Base Paths
CONFIG_DIR = '/config'
DB_PATH = os.path.join(CONFIG_DIR, 'profilarr.db')
DB_DIR = os.path.join(CONFIG_DIR, 'db')
REGEX_DIR = os.path.join(DB_DIR, 'regex_patterns')
FORMAT_DIR = os.path.join(DB_DIR, 'custom_formats')
PROFILE_DIR = os.path.join(DB_DIR, 'profiles')
MEDIA_MANAGEMENT_DIR = os.path.join(DB_DIR, 'media_management')
# Logging
LOG_DIR = os.path.join(CONFIG_DIR, 'log')
GENERAL_LOG_FILE = os.path.join(LOG_DIR, 'profilarr.log')
IMPORTARR_LOG_FILE = os.path.join(LOG_DIR, 'importarr.log')
HASH_LOG_FILE = os.path.join(LOG_DIR, 'hash.log')
# Flask Configuration
FLASK_ENV = os.getenv('FLASK_ENV', 'production')
DEBUG = FLASK_ENV == 'development'
# CORS Configuration
CORS_ORIGINS = "*"
# Session Configuration
SESSION_LIFETIME_DAYS = 30
SESSION_COOKIE_SECURE = False
SESSION_COOKIE_HTTPONLY = True
SESSION_COOKIE_SAMESITE = 'Lax'
# Git Configuration
GIT_USER_NAME = os.getenv('GIT_USER_NAME')
GIT_USER_EMAIL = os.getenv('GIT_USER_EMAIL')
@staticmethod
def ensure_directories():
"""Create all required directories if they don't exist."""
directories = [
Config.CONFIG_DIR, Config.DB_DIR, Config.REGEX_DIR,
Config.FORMAT_DIR, Config.PROFILE_DIR, Config.MEDIA_MANAGEMENT_DIR, Config.LOG_DIR
]
logger = logging.getLogger(__name__)
for directory in directories:
try:
os.makedirs(directory, exist_ok=True)
logger.info(f"Ensured directory exists: {directory}")
except Exception as e:
logger.error(
f"Failed to create directory {directory}: {str(e)}")
config = Config()

View File

@@ -1,15 +0,0 @@
from .connection import get_db
from .queries.settings import get_settings, get_secret_key, save_settings, update_pat_status
from .queries.arr import (get_unique_arrs, update_arr_config_on_rename,
update_arr_config_on_delete)
from .queries.format_renames import (add_format_to_renames,
remove_format_from_renames,
is_format_in_renames)
from .migrations.runner import run_migrations
__all__ = [
'get_db', 'get_settings', 'get_secret_key', 'save_settings',
'get_unique_arrs', 'update_arr_config_on_rename',
'update_arr_config_on_delete', 'run_migrations', 'add_format_to_renames',
'remove_format_from_renames', 'is_format_in_renames', 'update_pat_status'
]

View File

@@ -1,12 +0,0 @@
# backend/app/db/connection.py
import sqlite3
from ..config import config
DB_PATH = config.DB_PATH
def get_db():
"""Create and return a database connection with Row factory."""
conn = sqlite3.connect(DB_PATH)
conn.row_factory = sqlite3.Row
return conn

View File

@@ -1,64 +0,0 @@
# backend/app/db/migrations/runner.py
import os
import importlib
from pathlib import Path
from ..connection import get_db
def init_migrations():
"""Create migrations table if it doesn't exist."""
with get_db() as conn:
conn.execute('''
CREATE TABLE IF NOT EXISTS migrations (
id INTEGER PRIMARY KEY AUTOINCREMENT,
version INTEGER NOT NULL,
name TEXT NOT NULL,
applied_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
)
''')
conn.commit()
def get_applied_migrations():
"""Get list of already applied migrations."""
with get_db() as conn:
result = conn.execute(
'SELECT version FROM migrations ORDER BY version')
return [row[0] for row in result.fetchall()]
def get_available_migrations():
"""Get all migration files from versions directory."""
versions_dir = Path(__file__).parent / 'versions'
migrations = []
for file in versions_dir.glob('[0-9]*.py'):
if file.stem != '__init__':
# Import the migration module
module = importlib.import_module(f'.versions.{file.stem}',
package='app.db.migrations')
migrations.append((module.version, module.name, module))
return sorted(migrations, key=lambda x: x[0])
def run_migrations():
"""Run all pending migrations in order."""
init_migrations()
applied = set(get_applied_migrations())
available = get_available_migrations()
for version, name, module in available:
if version not in applied:
print(f"Applying migration {version}: {name}")
try:
module.up()
with get_db() as conn:
conn.execute(
'INSERT INTO migrations (version, name) VALUES (?, ?)',
(version, name))
conn.commit()
print(f"Successfully applied migration {version}")
except Exception as e:
print(f"Error applying migration {version}: {str(e)}")
raise

View File

@@ -1,145 +0,0 @@
# backend/app/db/migrations/versions/001_initial_schema.py
import os
import secrets
from ...connection import get_db
version = 1
name = "initial_schema"
def up():
"""Apply the initial database schema."""
with get_db() as conn:
# Create backups table
conn.execute('''
CREATE TABLE IF NOT EXISTS backups (
id INTEGER PRIMARY KEY AUTOINCREMENT,
filename TEXT NOT NULL,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
status TEXT DEFAULT 'pending'
)
''')
# Create arr_config table
conn.execute('''
CREATE TABLE IF NOT EXISTS arr_config (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT UNIQUE NOT NULL,
type TEXT NOT NULL,
tags TEXT,
arr_server TEXT NOT NULL,
api_key TEXT NOT NULL,
data_to_sync TEXT,
last_sync_time TIMESTAMP,
sync_percentage INTEGER DEFAULT 0,
sync_method TEXT DEFAULT 'manual',
sync_interval INTEGER DEFAULT 0,
import_as_unique BOOLEAN DEFAULT 0,
import_task_id INTEGER DEFAULT NULL,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
)
''')
# Create scheduled_tasks table
conn.execute('''
CREATE TABLE IF NOT EXISTS scheduled_tasks (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL,
type TEXT NOT NULL,
interval_minutes INTEGER NOT NULL,
last_run TIMESTAMP,
status TEXT DEFAULT 'pending',
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
)
''')
# Create settings table
conn.execute('''
CREATE TABLE IF NOT EXISTS settings (
id INTEGER PRIMARY KEY AUTOINCREMENT,
key TEXT UNIQUE NOT NULL,
value TEXT,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
)
''')
# Create auth table
conn.execute('''
CREATE TABLE IF NOT EXISTS auth (
username TEXT NOT NULL,
password_hash TEXT NOT NULL,
api_key TEXT,
session_id TEXT,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
)
''')
# Create failed_attempts table
conn.execute('''
CREATE TABLE IF NOT EXISTS failed_attempts (
id INTEGER PRIMARY KEY AUTOINCREMENT,
ip_address TEXT NOT NULL,
attempt_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP
)
''')
# Insert initial required data
required_tasks = [
('Repository Sync', 'Sync', 2),
('Backup', 'Backup', 1440),
]
for task_name, task_type, interval in required_tasks:
cursor = conn.execute(
'SELECT COUNT(*) FROM scheduled_tasks WHERE name = ?',
(task_name, ))
if cursor.fetchone()[0] == 0:
conn.execute(
'''
INSERT INTO scheduled_tasks (name, type, interval_minutes)
VALUES (?, ?, ?)
''', (task_name, task_type, interval))
# Insert initial settings
conn.execute('''
INSERT OR IGNORE INTO settings (key, value, updated_at)
VALUES ('auto_pull_enabled', '0', CURRENT_TIMESTAMP)
''')
# Handle profilarr_pat setting
profilarr_pat = os.environ.get('PROFILARR_PAT')
conn.execute(
'''
INSERT INTO settings (key, value, updated_at)
VALUES ('has_profilarr_pat', ?, CURRENT_TIMESTAMP)
ON CONFLICT(key) DO UPDATE SET
value = ?,
updated_at = CURRENT_TIMESTAMP
''', (str(bool(profilarr_pat)).lower(), str(
bool(profilarr_pat)).lower()))
# Handle secret_key setting
secret_key = conn.execute(
'SELECT value FROM settings WHERE key = "secret_key"').fetchone()
if not secret_key:
new_secret_key = secrets.token_hex(32)
conn.execute(
'''
INSERT INTO settings (key, value, updated_at)
VALUES ('secret_key', ?, CURRENT_TIMESTAMP)
''', (new_secret_key, ))
conn.commit()
def down():
"""Revert the initial schema migration."""
with get_db() as conn:
# Drop all tables in reverse order of creation
tables = [
'failed_attempts', 'auth', 'settings', 'scheduled_tasks',
'arr_config', 'backups'
]
for table in tables:
conn.execute(f'DROP TABLE IF EXISTS {table}')
conn.commit()

View File

@@ -1,23 +0,0 @@
# backend/app/db/migrations/versions/002_format_renames.py
from ...connection import get_db
version = 2
name = "format_renames"
def up():
"""Add table for tracking which formats to include in renames"""
with get_db() as conn:
conn.execute('''
CREATE TABLE IF NOT EXISTS format_renames (
format_name TEXT PRIMARY KEY NOT NULL
)
''')
conn.commit()
def down():
"""Remove the format_renames table"""
with get_db() as conn:
conn.execute('DROP TABLE IF EXISTS format_renames')
conn.commit()

View File

@@ -1,33 +0,0 @@
# backend/app/db/migrations/versions/003_language_import_score.py
from ...connection import get_db
version = 3
name = "language_import_score"
def up():
"""Add language_import_config table."""
with get_db() as conn:
# Create language_import_config table
conn.execute('''
CREATE TABLE IF NOT EXISTS language_import_config (
id INTEGER PRIMARY KEY AUTOINCREMENT,
score INTEGER NOT NULL DEFAULT -99999,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
)
''')
# Insert default record
conn.execute('''
INSERT INTO language_import_config (score, updated_at)
VALUES (-99999, CURRENT_TIMESTAMP)
''')
conn.commit()
def down():
"""Remove language_import_config table."""
with get_db() as conn:
conn.execute('DROP TABLE IF EXISTS language_import_config')
conn.commit()

View File

@@ -1,33 +0,0 @@
# backend/app/db/migrations/versions/004_update_language_score_default.py
from ...connection import get_db
version = 4
name = "update_language_score_default"
def up():
"""Update default language import score to -999999."""
with get_db() as conn:
# Update existing record to new default value
conn.execute('''
UPDATE language_import_config
SET score = -999999,
updated_at = CURRENT_TIMESTAMP
WHERE id = 1
''')
conn.commit()
def down():
"""Revert language import score to previous default."""
with get_db() as conn:
# Revert to previous default value
conn.execute('''
UPDATE language_import_config
SET score = -99999,
updated_at = CURRENT_TIMESTAMP
WHERE id = 1
''')
conn.commit()

View File

@@ -1,119 +0,0 @@
from ..connection import get_db
import json
import logging
logger = logging.getLogger(__name__)
def get_unique_arrs(arr_ids):
"""
Get import_as_unique settings for a list of arr IDs.
Args:
arr_ids (list): List of arr configuration IDs
Returns:
dict: Dictionary mapping arr IDs to their import_as_unique settings and names
"""
if not arr_ids:
return {}
with get_db() as conn:
placeholders = ','.join('?' * len(arr_ids))
query = f'''
SELECT id, name, import_as_unique
FROM arr_config
WHERE id IN ({placeholders})
'''
results = conn.execute(query, arr_ids).fetchall()
return {
row['id']: {
'import_as_unique': bool(row['import_as_unique']),
'name': row['name']
}
for row in results
}
def update_arr_config_on_rename(category, old_name, new_name):
"""
Update arr_config data_to_sync when a format or profile is renamed.
Args:
category (str): Either 'customFormats' or 'profiles'
old_name (str): Original name being changed
new_name (str): New name to change to
Returns:
list: IDs of arr_config rows that were updated
"""
updated_ids = []
with get_db() as conn:
# Get all configs that might reference this name
rows = conn.execute(
'SELECT id, data_to_sync FROM arr_config WHERE data_to_sync IS NOT NULL'
).fetchall()
for row in rows:
try:
data = json.loads(row['data_to_sync'])
# Check if this config has the relevant category data
if category in data:
# Update any matching names
if old_name in data[category]:
# Replace old name with new name
data[category] = [
new_name if x == old_name else x
for x in data[category]
]
# Save changes back to database
conn.execute(
'UPDATE arr_config SET data_to_sync = ? WHERE id = ?',
(json.dumps(data), row['id']))
updated_ids.append(row['id'])
except json.JSONDecodeError:
logger.error(f"Invalid JSON in arr_config id={row['id']}")
continue
if updated_ids:
conn.commit()
return updated_ids
def update_arr_config_on_delete(category, name):
"""
Update arr_config data_to_sync when a format or profile is deleted.
Args:
category (str): Either 'customFormats' or 'profiles'
name (str): Name being deleted
Returns:
list: IDs of arr_config rows that were updated
"""
updated_ids = []
with get_db() as conn:
# Get all configs that might reference this name
rows = conn.execute(
'SELECT id, data_to_sync FROM arr_config WHERE data_to_sync IS NOT NULL'
).fetchall()
for row in rows:
try:
data = json.loads(row['data_to_sync'])
# Check if this config has the relevant category data
if category in data:
# Remove any matching names
if name in data[category]:
data[category].remove(name)
# Save changes back to database
conn.execute(
'UPDATE arr_config SET data_to_sync = ? WHERE id = ?',
(json.dumps(data), row['id']))
updated_ids.append(row['id'])
except json.JSONDecodeError:
logger.error(f"Invalid JSON in arr_config id={row['id']}")
continue
if updated_ids:
conn.commit()
return updated_ids

View File

@@ -1,33 +0,0 @@
# backend/app/db/queries/format_renames.py
import logging
from ..connection import get_db
logger = logging.getLogger(__name__)
def add_format_to_renames(format_name: str) -> None:
"""Add a format to the renames table"""
with get_db() as conn:
conn.execute(
'INSERT OR REPLACE INTO format_renames (format_name) VALUES (?)',
(format_name, ))
conn.commit()
logger.info(f"Added format to renames table: {format_name}")
def remove_format_from_renames(format_name: str) -> None:
"""Remove a format from the renames table"""
with get_db() as conn:
conn.execute('DELETE FROM format_renames WHERE format_name = ?',
(format_name, ))
conn.commit()
logger.info(f"Removed format from renames table: {format_name}")
def is_format_in_renames(format_name: str) -> bool:
"""Check if a format is in the renames table"""
with get_db() as conn:
result = conn.execute(
'SELECT 1 FROM format_renames WHERE format_name = ?',
(format_name, )).fetchone()
return bool(result)

View File

@@ -1,111 +0,0 @@
# backend/app/db/queries/settings.py
from ..connection import get_db
import logging
import os
logger = logging.getLogger(__name__)
def get_settings():
with get_db() as conn:
result = conn.execute(
'SELECT key, value FROM settings WHERE key NOT IN ("secret_key")'
).fetchall()
settings = {row['key']: row['value'] for row in result}
return settings if 'gitRepo' in settings else None
def get_secret_key():
with get_db() as conn:
result = conn.execute(
'SELECT value FROM settings WHERE key = "secret_key"').fetchone()
return result['value'] if result else None
def save_settings(settings_dict):
with get_db() as conn:
for key, value in settings_dict.items():
conn.execute(
'''
INSERT INTO settings (key, value, updated_at)
VALUES (?, ?, CURRENT_TIMESTAMP)
ON CONFLICT(key) DO UPDATE SET
value = excluded.value,
updated_at = CURRENT_TIMESTAMP
''', (key, value))
conn.commit()
def update_pat_status():
"""Update the has_profilarr_pat setting based on current environment."""
with get_db() as conn:
profilarr_pat = os.environ.get('PROFILARR_PAT')
pat_exists = str(bool(profilarr_pat)).lower()
# Get current value
current = conn.execute('SELECT value FROM settings WHERE key = ?',
('has_profilarr_pat', )).fetchone()
conn.execute(
'''
INSERT INTO settings (key, value, updated_at)
VALUES ('has_profilarr_pat', ?, CURRENT_TIMESTAMP)
ON CONFLICT(key) DO UPDATE SET
value = ?,
updated_at = CURRENT_TIMESTAMP
''', (pat_exists, pat_exists))
conn.commit()
if current is None:
logger.info(f"PAT status created: {pat_exists}")
elif current[0] != pat_exists:
logger.info(
f"PAT status updated from {current[0]} to {pat_exists}")
else:
logger.debug("PAT status unchanged")
def get_language_import_score():
"""Get the current language import score."""
with get_db() as conn:
result = conn.execute(
'SELECT score FROM language_import_config ORDER BY id DESC LIMIT 1'
).fetchone()
return result['score'] if result else -99999
def update_language_import_score(score):
"""Update the language import score."""
with get_db() as conn:
# Get current score first
current = conn.execute(
'SELECT score FROM language_import_config ORDER BY id DESC LIMIT 1'
).fetchone()
current_score = current['score'] if current else None
# Check if record exists
existing = conn.execute(
'SELECT id FROM language_import_config ORDER BY id DESC LIMIT 1'
).fetchone()
if existing:
# Update existing record
conn.execute(
'''
UPDATE language_import_config
SET score = ?, updated_at = CURRENT_TIMESTAMP
WHERE id = ?
''', (score, existing['id']))
else:
# Insert new record
conn.execute(
'''
INSERT INTO language_import_config (score, updated_at)
VALUES (?, CURRENT_TIMESTAMP)
''', (score,))
conn.commit()
if current_score is not None:
logger.info(f"Language import score updated from {current_score} to {score}")
else:
logger.info(f"Language import score set to: {score}")

View File

@@ -1,403 +0,0 @@
# git/__init__.py
from flask import Blueprint, request, jsonify
from .status.status import get_git_status
from .status.commit_history import get_git_commit_history
from .branches.manager import Branch_Manager
from .operations.manager import GitOperations
from .repo.unlink import unlink_repository
from .repo.clone import clone_repository
from ..db import save_settings, get_settings
from ..config.config import config
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
bp = Blueprint('git', __name__)
REPO_PATH = config.DB_DIR
branch_manager = Branch_Manager(REPO_PATH)
git_operations = GitOperations(REPO_PATH)
@bp.route('/clone', methods=['POST'])
def handle_clone_repository():
try:
new_settings = request.json
logger.info(f"Received new settings: {new_settings}")
if 'gitRepo' not in new_settings:
logger.error("Missing required field: gitRepo")
return jsonify({"error": "Missing required field: gitRepo"}), 400
success, message = clone_repository(new_settings['gitRepo'], REPO_PATH)
if success:
# Store repository URL in database
save_settings({'gitRepo': new_settings['gitRepo']})
logger.info("Settings updated and repository cloned successfully")
return jsonify({
"message":
"Repository cloned and settings updated successfully"
}), 200
else:
logger.error(f"Failed to clone repository: {message}")
return jsonify({"error": message}), 400
except Exception as e:
logger.exception("Unexpected error in clone_repository")
return jsonify({"error": f"Failed to clone repository: {str(e)}"}), 500
@bp.route('/status', methods=['GET'])
def get_status():
logger.debug("Received request for git status")
success, message = get_git_status(REPO_PATH)
if isinstance(message, str) and "No git repository" in message:
return jsonify({'success': True, 'data': None}), 200
if success:
logger.debug("Successfully retrieved git status")
return jsonify({'success': True, 'data': message}), 200
else:
logger.error(f"Failed to retrieve git status: {message}")
return jsonify({'success': False, 'error': message}), 400
@bp.route('/branch', methods=['POST'])
def create_branch():
branch_name = request.json.get('name')
base_branch = request.json.get('base', 'main')
logger.debug(
f"Received request to create branch {branch_name} from {base_branch}")
success, result = branch_manager.create(branch_name, base_branch)
if success:
logger.debug(f"Successfully created branch: {branch_name}")
return jsonify({'success': True, **result}), 200
else:
logger.error(f"Failed to create branch: {result}")
if 'merging' in result.get('error', '').lower():
return jsonify({'success': False, 'error': result}), 409
return jsonify({'success': False, 'error': result}), 400
@bp.route('/branches', methods=['GET'])
def get_branches():
logger.debug("Received request for branches")
success, result = branch_manager.get_all()
if success:
logger.debug("Successfully retrieved branches")
return jsonify({'success': True, 'data': result}), 200
else:
logger.error(f"Failed to retrieve branches: {result}")
return jsonify({'success': False, 'error': result}), 400
@bp.route('/checkout', methods=['POST'])
def checkout_branch():
branch_name = request.json.get('branch')
logger.debug(f"Received request to checkout branch: {branch_name}")
success, result = branch_manager.checkout(branch_name)
if success:
logger.debug(f"Successfully checked out branch: {branch_name}")
return jsonify({'success': True, **result}), 200
else:
logger.error(f"Failed to checkout branch: {result}")
if 'merging' in result.get('error', '').lower():
return jsonify({'success': False, 'error': result}), 409
return jsonify({'success': False, 'error': result}), 400
@bp.route('/branch/<branch_name>', methods=['DELETE'])
def delete_branch(branch_name):
logger.debug(f"Received request to delete branch: {branch_name}")
success, result = branch_manager.delete(branch_name)
if success:
logger.debug(f"Successfully deleted branch: {branch_name}")
return jsonify({'success': True, **result}), 200
else:
logger.error(f"Failed to delete branch: {result}")
if 'merging' in result.get('error', '').lower():
return jsonify({'success': False, 'error': result}), 409
return jsonify({'success': False, 'error': result}), 400
@bp.route('/branch/push', methods=['POST'])
def push_branch():
data = request.json
logger.debug(f"Received request to push branch: {data}")
branch_name = data.get('branch')
if not branch_name:
return jsonify({
"success": False,
"error": "Branch name is required"
}), 400
success, result = branch_manager.push(branch_name)
if success:
return jsonify({"success": True, "message": result}), 200
else:
if isinstance(result, str):
return jsonify({"success": False, "error": result}), 400
return jsonify({
"success": False,
"error": result.get('error', 'Unknown error occurred')
}), 400
@bp.route('/commit', methods=['POST'])
def commit_files():
files = request.json.get('files', [])
user_commit_message = request.json.get('commit_message', "Commit changes")
logger.debug(f"Received request to commit files: {files}")
commit_message = generate_commit_message(user_commit_message, files)
success, message = git_operations.commit(files, commit_message)
if success:
logger.debug("Successfully committed files")
return jsonify({'success': True, 'message': message}), 200
else:
logger.error(f"Error committing files: {message}")
return jsonify({'success': False, 'error': message}), 400
@bp.route('/push', methods=['POST'])
def push_files():
logger.debug("Received request to push changes")
success, message = git_operations.push()
if success:
logger.debug("Successfully pushed changes")
return jsonify({'success': True, 'message': message}), 200
else:
logger.error(f"Error pushing changes: {message}")
return jsonify({'success': False, 'error': message}), 400
@bp.route('/revert', methods=['POST'])
def revert_file():
file_path = request.json.get('file_path')
if not file_path:
return jsonify({
'success': False,
'error': "File path is required."
}), 400
success, message = git_operations.revert(file_path)
if success:
return jsonify({'success': True, 'message': message}), 200
else:
logger.error(f"Error reverting file: {message}")
return jsonify({'success': False, 'error': message}), 400
@bp.route('/revert-all', methods=['POST'])
def revert_all():
success, message = git_operations.revert_all()
if success:
return jsonify({'success': True, 'message': message}), 200
else:
logger.error(f"Error reverting all changes: {message}")
return jsonify({'success': False, 'error': message}), 400
@bp.route('/file', methods=['DELETE'])
def delete_file():
file_path = request.json.get('file_path')
if not file_path:
return jsonify({
'success': False,
'error': "File path is required."
}), 400
success, message = git_operations.delete(file_path)
if success:
return jsonify({'success': True, 'message': message}), 200
else:
logger.error(f"Error deleting file: {message}")
return jsonify({'success': False, 'error': message}), 400
@bp.route('/pull', methods=['POST'])
def pull_branch():
branch_name = request.json.get('branch')
success, response = git_operations.pull(branch_name)
# Handle different response types
if isinstance(response, dict):
if response.get('state') == 'resolve':
# Merge conflict is now a success case with state='resolve'
return jsonify({
'success': True,
'state': 'resolve',
'message': response['message'],
'details': response['details']
}), 200
elif response.get('state') == 'error':
# Handle error states
return jsonify({
'success': False,
'state': 'error',
'message': response['message'],
'details': response.get('details', {})
}), 409 if response.get('type') in [
'merge_conflict', 'uncommitted_changes'
] else 400
elif response.get('state') == 'complete':
# Normal success case
return jsonify({
'success': True,
'state': 'complete',
'message': response['message'],
'details': response.get('details', {})
}), 200
# Fallback for string responses or unexpected formats
if success:
return jsonify({
'success': True,
'state': 'complete',
'message': response
}), 200
return jsonify({
'success': False,
'state': 'error',
'message': str(response)
}), 400
@bp.route('/stage', methods=['POST'])
def handle_stage_files():
files = request.json.get('files', [])
success, message = git_operations.stage(files)
if success:
return jsonify({'success': True, 'message': message}), 200
else:
return jsonify({'success': False, 'error': message}), 400
@bp.route('/unstage', methods=['POST'])
def handle_unstage_files():
files = request.json.get('files', [])
success, message = git_operations.unstage(files)
if success:
return jsonify({'success': True, 'message': message}), 200
else:
return jsonify({'success': False, 'error': message}), 400
@bp.route('/unlink', methods=['POST'])
def unlink():
data = request.get_json()
remove_files = data.get('removeFiles', False)
success, message = unlink_repository(REPO_PATH, remove_files)
if success:
return jsonify({'success': True, 'message': message}), 200
else:
return jsonify({'success': False, 'error': message}), 400
def generate_commit_message(user_message, files):
return user_message
@bp.route('/resolve', methods=['POST'])
def resolve_conflicts():
logger.debug("Received request to resolve conflicts")
resolutions = request.json.get('resolutions')
if not resolutions:
return jsonify({
'success': False,
'error': "Resolutions are required"
}), 400
result = git_operations.resolve(resolutions)
if result.get('success'):
logger.debug("Successfully resolved conflicts")
return jsonify(result), 200
else:
logger.error(f"Error resolving conflicts: {result.get('error')}")
return jsonify(result), 400
@bp.route('/merge/finalize', methods=['POST'])
def finalize_merge():
"""
Route to finalize a merge after all conflicts have been resolved.
Expected to be called only after all conflicts are resolved and changes are staged.
"""
logger.debug("Received request to finalize merge")
result = git_operations.finalize_merge()
if result.get('success'):
logger.debug(
f"Successfully finalized merge with files: {result.get('committed_files', [])}"
)
return jsonify({
'success': True,
'message': result.get('message'),
'committed_files': result.get('committed_files', [])
}), 200
else:
logger.error(f"Error finalizing merge: {result.get('error')}")
return jsonify({'success': False, 'error': result.get('error')}), 400
@bp.route('/merge/abort', methods=['POST'])
def abort_merge():
logger.debug("Received request to abort merge")
success, message = git_operations.abort_merge()
if success:
logger.debug("Successfully aborted merge")
return jsonify({'success': True, 'message': message}), 200
else:
logger.error(f"Error aborting merge: {message}")
return jsonify({'success': False, 'error': message}), 400
@bp.route('/commits', methods=['GET'])
def get_commit_history():
logger.debug("Received request for commit history")
branch = request.args.get('branch') # Optional branch parameter
success, result = get_git_commit_history(REPO_PATH, branch)
if success:
logger.debug("Successfully retrieved commit history")
return jsonify({'success': True, 'data': result}), 200
else:
logger.error(f"Failed to retrieve commit history: {result}")
return jsonify({'success': False, 'error': result}), 400
@bp.route('/autopull', methods=['GET', 'POST'])
def handle_auto_pull():
try:
if request.method == 'GET':
settings = get_settings()
return jsonify({
'success':
True,
'enabled':
bool(int(settings.get('auto_pull_enabled', 0)))
}), 200
# POST handling
data = request.json
enabled = data.get('enabled')
if enabled is None:
return jsonify({
'success': False,
'error': 'enabled field is required'
}), 400
save_settings({'auto_pull_enabled': 1 if enabled else 0})
logger.info(
f"Auto-pull has been {'enabled' if enabled else 'disabled'}")
return jsonify({'success': True}), 200
except Exception as e:
logger.error(f"Error handling auto pull setting: {str(e)}")
return jsonify({'success': False, 'error': str(e)}), 500

View File

@@ -1,49 +0,0 @@
# git/auth/authenticate.py
import os
import logging
logger = logging.getLogger(__name__)
class GitHubAuth:
"""
A modular authentication handler for GitHub repositories.
Supports Personal Access Tokens (PAT) for HTTPS authentication.
"""
@staticmethod
def get_authenticated_url(https_url):
"""
Convert an HTTPS URL to include authentication via PAT.
Ensures the token is not duplicated in the URL.
"""
token = os.getenv("PROFILARR_PAT")
if not token:
raise ValueError(
"PROFILARR_PAT is not set in environment variables")
# Check if the URL already contains authentication
if "@" in https_url:
# Already has some form of authentication, remove it to add our token
# This handles URLs that might have a token already
protocol_part, rest = https_url.split("://", 1)
if "@" in rest:
# Remove any existing authentication
_, server_part = rest.split("@", 1)
https_url = f"{protocol_part}://{server_part}"
# Now add our token
authenticated_url = https_url.replace("https://", f"https://{token}@")
return authenticated_url
@staticmethod
def verify_token():
"""
Verify if the Personal Access Token is valid.
"""
token = os.getenv("PROFILARR_PAT")
if not token:
logger.error("PROFILARR_PAT is not set")
return False
logger.info("Token verification skipped (assume valid)")
return True

View File

@@ -1,24 +0,0 @@
# git/branches/create.py
import git
import logging
logger = logging.getLogger(__name__)
def create_branch(repo_path, branch_name, base_branch='main'):
try:
logger.debug(f"Attempting to create branch {branch_name} from {base_branch}")
repo = git.Repo(repo_path)
# Check if the branch already exists
if branch_name in repo.heads:
return False, f"Branch '{branch_name}' already exists."
# Create and checkout the new branch
new_branch = repo.create_head(branch_name, commit=base_branch)
new_branch.checkout()
logger.debug(f"Successfully created branch: {branch_name}")
return True, {"message": f"Created branch: {branch_name}", "current_branch": branch_name}
except Exception as e:
logger.error(f"Error creating branch: {str(e)}", exc_info=True)
return False, {"error": f"Error creating branch: {str(e)}"}

View File

@@ -1,46 +0,0 @@
# git/branches/delete.py
import git
from git.exc import GitCommandError
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def delete_branch(repo_path, branch_name):
try:
logger.debug(f"Attempting to delete branch: {branch_name}")
logger.debug(
f"Attempting to delete branch from repo at path: {repo_path}")
repo = git.Repo(repo_path)
# Fetch updates from remote
logger.debug("Fetching updates from remote...")
repo.git.fetch('--all')
# Update local repository state
logger.debug("Updating local repository state...")
repo.git.remote('update', 'origin', '--prune')
# Check if it's a local branch
if branch_name in repo.heads:
logger.debug(f"Deleting local branch: {branch_name}")
if repo.active_branch.name == branch_name:
return False, f"Cannot delete the current branch: {branch_name}"
repo.delete_head(branch_name, force=True)
logger.debug(f"Local branch {branch_name} deleted")
# Check if remote branch exists
remote_branch = f"origin/{branch_name}"
if remote_branch in repo.refs:
pass
return True, {
"message": f"Deleted branch: {branch_name}",
"current_branch": repo.active_branch.name
}
except Exception as e:
logger.error(f"Error deleting branch: {str(e)}", exc_info=True)
return False, {"error": f"Error deleting branch: {str(e)}"}

View File

@@ -1,48 +0,0 @@
import git
import logging
from flask import Blueprint, jsonify
logger = logging.getLogger(__name__)
def get_branches(repo_path):
try:
logger.debug("Attempting to get branches")
repo = git.Repo(repo_path)
# Get local branches
local_branches = [{'name': branch.name, 'isLocal': True, 'isRemote': False} for branch in repo.heads]
logger.debug(f"Local branches found: {[branch['name'] for branch in local_branches]}")
# Get remote branches
remote_branches = [{'name': ref.remote_head, 'isLocal': False, 'isRemote': True} for ref in repo.remote().refs if not ref.remote_head == 'HEAD']
logger.debug(f"Remote branches found: {[branch['name'] for branch in remote_branches]}")
# Combine and update status for branches that are both local and remote
all_branches = local_branches + remote_branches
branch_dict = {}
for branch in all_branches:
if branch['name'] in branch_dict:
branch_dict[branch['name']]['isLocal'] = branch_dict[branch['name']]['isLocal'] or branch['isLocal']
branch_dict[branch['name']]['isRemote'] = branch_dict[branch['name']]['isRemote'] or branch['isRemote']
else:
branch_dict[branch['name']] = branch
all_branches = list(branch_dict.values())
logger.debug(f"All branches combined (local and remote): {[branch['name'] for branch in all_branches]}")
logger.info(f"Branches being sent: {[branch['name'] for branch in all_branches]}")
return True, {"branches": all_branches}
except Exception as e:
logger.error(f"Error getting branches: {str(e)}", exc_info=True)
return False, {"error": f"Error getting branches: {str(e)}"}
def get_current_branch(repo_path):
try:
repo = git.Repo(repo_path)
current_branch = repo.active_branch.name
logger.debug(f"Current branch: {current_branch}")
return current_branch
except Exception as e:
logger.error(f"Error getting current branch: {str(e)}", exc_info=True)
return None

View File

@@ -1,56 +0,0 @@
# git/branches/branches.py
import git
import os
from .create import create_branch
from .checkout import checkout_branch
from .delete import delete_branch
from .get import get_branches, get_current_branch
from .push import push_branch_to_remote
class Branch_Manager:
def __init__(self, repo_path):
self.repo_path = repo_path
def is_merging(self):
repo = git.Repo(self.repo_path)
return os.path.exists(os.path.join(repo.git_dir, 'MERGE_HEAD'))
def create(self, branch_name, base_branch='main'):
if self.is_merging():
return False, {
'error':
'Cannot create branch while merging. Resolve conflicts first.'
}
return create_branch(self.repo_path, branch_name, base_branch)
def checkout(self, branch_name):
if self.is_merging():
return False, {
'error':
'Cannot checkout while merging. Resolve conflicts first.'
}
return checkout_branch(self.repo_path, branch_name)
def delete(self, branch_name):
if self.is_merging():
return False, {
'error':
'Cannot delete branch while merging. Resolve conflicts first.'
}
return delete_branch(self.repo_path, branch_name)
def get_all(self):
return get_branches(self.repo_path)
def get_current(self):
return get_current_branch(self.repo_path)
def push(self, branch_name):
if self.is_merging():
return False, {
'error': 'Cannot push while merging. Resolve conflicts first.'
}
return push_branch_to_remote(self.repo_path, branch_name)

View File

@@ -1,59 +0,0 @@
# git/branches/push.py
import git
import logging
from ..auth.authenticate import GitHubAuth
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def _handle_git_error(error):
"""Helper function to process git errors and return user-friendly messages"""
error_msg = str(error)
if "403" in error_msg:
return "Authentication failed: The provided PAT doesn't have sufficient permissions or is invalid."
elif "401" in error_msg:
return "Authentication failed: No PAT provided or the token is invalid."
elif "non-fast-forward" in error_msg:
return "Push rejected: Remote contains work that you do not have locally. Please pull the latest changes first."
return f"Git error: {error_msg}"
def push_branch_to_remote(repo_path, branch_name):
try:
logger.debug(f"Attempting to push branch {branch_name} to remote")
# Verify token before attempting push
if not GitHubAuth.verify_token():
return False, "Push operation requires GitHub authentication. Please configure PAT."
repo = git.Repo(repo_path)
# Check if the branch exists locally
if branch_name not in repo.heads:
return False, f"Branch '{branch_name}' does not exist locally."
origin = repo.remote(name='origin')
original_url = origin.url
try:
# Set authenticated URL
auth_url = GitHubAuth.get_authenticated_url(original_url)
origin.set_url(auth_url)
# Push the branch to remote and set the upstream branch
origin.push(refspec=f"{branch_name}:{branch_name}",
set_upstream=True)
return True, f"Pushed branch to remote: {branch_name}"
except git.GitCommandError as e:
return False, _handle_git_error(e)
finally:
# Always restore original URL
origin.set_url(original_url)
except Exception as e:
logger.error(f"Error pushing branch to remote: {str(e)}",
exc_info=True)
return False, str(e)

View File

@@ -1,135 +0,0 @@
# git/operations/commit.py
import git
import os
import logging
from ..status.status import GitStatusManager
logger = logging.getLogger(__name__)
def parse_git_status(status_output):
"""
Parse git status --porcelain output into a structured format.
Returns dict with staged and unstaged changes, identifying status of each file.
"""
changes = {}
for line in status_output:
if not line:
continue
index_status = line[0] # First character: staged status
worktree_status = line[1] # Second character: unstaged status
file_path = line[3:]
changes[file_path] = {
'staged': index_status != ' ',
'staged_status': index_status,
'unstaged_status': worktree_status
}
return changes
def commit_changes(repo_path, files, message):
"""
Commit changes to git repository, optimizing staging operations.
Only re-stages files if their current staging status is incorrect.
Args:
repo_path: Path to git repository
files: List of files to commit, or None/empty for all staged changes
message: Commit message
Returns:
tuple: (success: bool, message: str)
"""
try:
repo = git.Repo(repo_path)
# If no specific files provided, commit all staged changes
if not files:
commit = repo.index.commit(message)
# Update remote status after commit
status_manager = GitStatusManager.get_instance(repo_path)
if status_manager:
status_manager.update_remote_status()
return True, "Successfully committed all staged changes."
# Get current status of the repository
status_output = repo.git.status('--porcelain').splitlines()
status = parse_git_status(status_output)
# Track files that need staging operations
to_add = []
to_remove = []
already_staged = []
for file_path in files:
if file_path in status:
file_status = status[file_path]
# File is already properly staged
if file_status['staged']:
if file_status['staged_status'] == 'D':
already_staged.append(('deleted', file_path))
else:
already_staged.append(('modified', file_path))
continue
# File needs to be staged
if file_status['unstaged_status'] == 'D':
to_remove.append(file_path)
else:
to_add.append(file_path)
else:
logger.warning(f"File not found in git status: {file_path}")
# Perform necessary staging operations
if to_add:
logger.debug(f"Staging modified files: {to_add}")
repo.index.add(to_add)
if to_remove:
logger.debug(f"Staging deleted files: {to_remove}")
repo.index.remove(to_remove, working_tree=True)
# Commit the changes
commit = repo.index.commit(message)
# Update remote status after commit
status_manager = GitStatusManager.get_instance(repo_path)
if status_manager:
status_manager.update_remote_status()
# Build detailed success message
staged_counts = {
'added/modified': len(to_add),
'deleted': len(to_remove),
'already_staged': len(already_staged)
}
message_parts = []
if staged_counts['added/modified']:
message_parts.append(
f"{staged_counts['added/modified']} files staged")
if staged_counts['deleted']:
message_parts.append(
f"{staged_counts['deleted']} deletions staged")
if staged_counts['already_staged']:
message_parts.append(
f"{staged_counts['already_staged']} files already staged")
if message_parts:
details = " and ".join(message_parts)
return True, f"Successfully committed changes ({details})"
else:
return True, "Successfully committed changes (no files needed staging)"
except git.exc.GitCommandError as e:
logger.error(f"Git command error committing changes: {str(e)}",
exc_info=True)
return False, f"Error committing changes: {str(e)}"
except Exception as e:
logger.error(f"Error committing changes: {str(e)}", exc_info=True)
return False, f"Error committing changes: {str(e)}"

View File

@@ -1,54 +0,0 @@
import git
from .stage import stage_files
from .commit import commit_changes
from .push import push_changes
from .revert import revert_file, revert_all
from .delete import delete_file
from .pull import pull_branch
from .unstage import unstage_files
from .merge import abort_merge, finalize_merge
from .resolve import resolve_conflicts
import logging
logger = logging.getLogger(__name__)
class GitOperations:
def __init__(self, repo_path):
self.repo_path = repo_path
def stage(self, files):
return stage_files(self.repo_path, files)
def unstage(self, files):
return unstage_files(self.repo_path, files)
def commit(self, files, message):
return commit_changes(self.repo_path, files, message)
def push(self):
return push_changes(self.repo_path)
def revert(self, file_path):
return revert_file(self.repo_path, file_path)
def revert_all(self):
return revert_all(self.repo_path)
def delete(self, file_path):
return delete_file(self.repo_path, file_path)
def pull(self, branch_name):
return pull_branch(self.repo_path, branch_name)
def finalize_merge(self):
repo = git.Repo(self.repo_path)
return finalize_merge(repo)
def abort_merge(self):
return abort_merge(self.repo_path)
def resolve(self, resolutions):
repo = git.Repo(self.repo_path)
return resolve_conflicts(repo, resolutions)

View File

@@ -1,59 +0,0 @@
# git/operations/push.py
import git
import logging
from ..auth.authenticate import GitHubAuth
from ..status.status import GitStatusManager
logger = logging.getLogger(__name__)
def _handle_git_error(error):
"""Helper function to process git errors and return user-friendly messages"""
error_msg = str(error)
if "403" in error_msg:
return "Authentication failed: The provided PAT doesn't have sufficient permissions or is invalid."
elif "401" in error_msg:
return "Authentication failed: No PAT provided or the token is invalid."
elif "non-fast-forward" in error_msg:
return "Push rejected: Remote contains work that you do not have locally. Please pull the latest changes first."
return f"Git error: {error_msg}"
def push_changes(repo_path):
try:
# Verify token before attempting push
if not GitHubAuth.verify_token():
return False, "Push operation requires GitHub authentication. Please configure PAT."
repo = git.Repo(repo_path)
origin = repo.remote(name='origin')
original_url = origin.url
try:
# Set authenticated URL
auth_url = GitHubAuth.get_authenticated_url(original_url)
origin.set_url(auth_url)
# Push changes
push_info = origin.push()
if push_info and push_info[0].flags & push_info[0].ERROR:
raise git.GitCommandError("git push", push_info[0].summary)
# Update remote status after successful push
status_manager = GitStatusManager.get_instance(repo_path)
if status_manager:
status_manager.update_remote_status()
return True, "Successfully pushed changes."
finally:
# Always restore original URL
origin.set_url(original_url)
except git.GitCommandError as e:
logger.error(f"Git command error during push: {str(e)}")
return False, _handle_git_error(e)
except Exception as e:
logger.error(f"Error pushing changes: {str(e)}", exc_info=True)
return False, str(e)

View File

@@ -1,71 +0,0 @@
# git/operations/stage.py
import git
import os
import logging
logger = logging.getLogger(__name__)
def stage_files(repo_path, files):
"""
Stage files in git repository, properly handling both existing and deleted files.
Args:
repo_path: Path to git repository
files: List of files to stage, or None/empty list to stage all changes
Returns:
tuple: (success: bool, message: str)
"""
try:
repo = git.Repo(repo_path)
# Stage all changes if no specific files provided
if not files:
repo.git.add(A=True)
return True, "All changes have been staged."
# Handle specific files
existing_files = []
deleted_files = []
# Separate existing and deleted files
for file_path in files:
full_path = os.path.join(repo_path, file_path)
if os.path.exists(full_path):
existing_files.append(file_path)
else:
# Check if file is tracked but deleted
try:
repo.git.ls_files(file_path, error_unmatch=True)
deleted_files.append(file_path)
except git.exc.GitCommandError:
logger.warning(f"Untracked file not found: {file_path}")
continue
# Stage existing files
if existing_files:
repo.index.add(existing_files)
# Stage deleted files
if deleted_files:
repo.index.remove(deleted_files, working_tree=True)
message_parts = []
if existing_files:
message_parts.append(
f"{len(existing_files)} existing files staged")
if deleted_files:
message_parts.append(f"{len(deleted_files)} deleted files staged")
message = " and ".join(
message_parts) if message_parts else "No files staged"
return True, message
except git.exc.GitCommandError as e:
logger.error(f"Git command error staging files: {str(e)}",
exc_info=True)
return False, f"Error staging files: {str(e)}"
except Exception as e:
logger.error(f"Error staging files: {str(e)}", exc_info=True)
return False, f"Error staging files: {str(e)}"

View File

@@ -1,52 +0,0 @@
from dataclasses import dataclass
from typing import List, Dict, Optional, Literal
from enum import Enum
class FileType(str, Enum):
REGEX = "regex"
CUSTOM_FORMAT = "custom format"
QUALITY_PROFILE = "quality profile"
class ResolutionChoice(str, Enum):
LOCAL = "local"
INCOMING = "incoming"
@dataclass
class TagConflict:
tag: str
local_status: Literal["Present", "Absent"]
incoming_status: Literal["Present", "Absent"]
resolution: Optional[ResolutionChoice] = None
@dataclass
class FormatConflict:
format_id: str
local_score: Optional[int]
incoming_score: Optional[int]
resolution: Optional[ResolutionChoice] = None
@dataclass
class GeneralConflict:
key: str
local_value: any
incoming_value: any
resolution: Optional[ResolutionChoice] = None
@dataclass
class FileResolution:
file_type: FileType
filename: str
tags: List[TagConflict]
formats: List[FormatConflict]
general: List[GeneralConflict]
@dataclass
class ResolutionRequest:
resolutions: Dict[str, FileResolution]

View File

@@ -1,15 +0,0 @@
# git/operations/unstage.py
import git
import logging
logger = logging.getLogger(__name__)
def unstage_files(repo_path, files):
try:
repo = git.Repo(repo_path)
repo.index.reset(files=files)
return True, "Successfully unstaged files."
except Exception as e:
logger.error(f"Error unstaging files: {str(e)}", exc_info=True)
return False, f"Error unstaging files: {str(e)}"

View File

@@ -1,159 +0,0 @@
# status/commit_history.py
import git
from datetime import datetime
import logging
logger = logging.getLogger(__name__)
def format_commit(commit, repo, tracking_branch=None):
"""Helper function to format a single commit's information"""
# Check if it's a merge commit
is_merge = len(commit.parents) > 1
# Get the remote URL for the commit if possible
remote_url = None
if tracking_branch:
remote_url = repo.remote().url
if remote_url.endswith('.git'):
remote_url = remote_url[:-4]
remote_url += f"/commit/{commit.hexsha}"
commit_info = {
'hash': commit.hexsha,
'message': commit.message.strip(),
'author': f"{commit.author.name} <{commit.author.email}>",
'date': commit.committed_datetime.isoformat(),
'isMerge': is_merge,
'remoteUrl': remote_url,
'details': {
'files_changed': [],
'insertions': 0,
'deletions': 0
}
}
# Get detailed stats
try:
if len(commit.parents) > 0:
# Get the diff between this commit and its first parent
diff = commit.parents[0].diff(commit)
# Initialize stats
stats = {'files_changed': [], 'insertions': 0, 'deletions': 0}
# Get the total diff stats using git diff --numstat
raw_stats = repo.git.diff(commit.parents[0].hexsha,
commit.hexsha,
numstat=True).splitlines()
for line in raw_stats:
if not line.strip():
continue
adds, dels, file_path = line.split('\t')
# Handle binary files which show up as '-' in numstat
if adds != '-' and dels != '-':
stats['insertions'] += int(adds)
stats['deletions'] += int(dels)
stats['files_changed'].append(file_path)
commit_info['details'] = stats
except Exception as e:
logger.debug(f"Error getting commit details: {e}")
return commit_info
def get_git_commit_history(repo_path, branch=None):
"""
Get both local and remote commit history for the repository.
Args:
repo_path (str): Path to the git repository
branch (str, optional): Branch name to get history for. Defaults to current branch.
Returns:
tuple: (success: bool, result: dict/str)
On success, returns (True, {
'local_commits': [...],
'remote_commits': [...],
'ahead_count': int,
'behind_count': int,
'branch': str,
'has_remote': bool
})
On failure, returns (False, error_message)
"""
try:
repo = git.Repo(repo_path)
current_branch = repo.active_branch
branch_to_check = branch if branch else current_branch.name
# Get the tracking branch
tracking_branch = None
try:
tracking_branch = repo.active_branch.tracking_branch()
except Exception as e:
logger.debug(f"No tracking branch found: {e}")
local_commits = []
remote_commits = []
ahead_count = 0
behind_count = 0
if tracking_branch:
try:
# Find the merge base (common ancestor)
merge_base = repo.merge_base(tracking_branch,
current_branch)[0]
# Get commits that are in local but not in remote (ahead)
local_commits = [
format_commit(commit, repo, tracking_branch)
for commit in repo.iter_commits(
f"{tracking_branch.name}..{current_branch.name}")
]
ahead_count = len(local_commits)
# Get commits that are in remote but not in local (behind)
remote_commits = [
format_commit(commit, repo, tracking_branch)
for commit in repo.iter_commits(
f"{current_branch.name}..{tracking_branch.name}")
]
behind_count = len(remote_commits)
# If no divergence, get recent commits from current branch
if not local_commits and not remote_commits:
local_commits = [
format_commit(commit, repo, tracking_branch)
for commit in repo.iter_commits(current_branch.name,
max_count=50)
]
except git.GitCommandError as e:
logger.error(f"Git command error while getting commits: {e}")
return False, f"Error getting commits: {str(e)}"
else:
# If no tracking branch, just get recent local commits
local_commits = [
format_commit(commit, repo)
for commit in repo.iter_commits(current_branch.name,
max_count=50)
]
return True, {
'local_commits': local_commits,
'remote_commits': remote_commits,
'ahead_count': ahead_count,
'behind_count': behind_count,
'branch': branch_to_check,
'has_remote': tracking_branch is not None
}
except Exception as e:
logger.exception("Error getting commit history")
return False, f"Unexpected error getting commit history: {str(e)}"

View File

@@ -1,232 +0,0 @@
import logging
import os
from typing import Any, Dict, List, Optional
logger = logging.getLogger(__name__)
def compare_yaml(old_data: Any,
new_data: Any,
path: str = "") -> List[Dict[str, Any]]:
"""
Recursively compare two YAML structures and generate a list of changes.
Handles nested structures including:
- Simple values (strings, numbers, booleans)
- Lists of primitives (like tags: ['1080p', 'x264'])
- Lists of objects (like custom_formats: [{name: 'DON', score: 80}])
- Nested objects (like qualities: {id: 1, name: 'HD', qualities: [...]})
Args:
old_data: Original data structure
new_data: New data structure to compare against
path: Current path in the data structure (for tracking nested changes)
Returns:
List of changes, where each change is a dict containing:
{
key: Path to the changed field (e.g. "custom_formats[DON].score")
change: 'added' | 'removed' | 'modified'
from: Original value (for modified/removed)
to: New value (for modified/added)
value: List of values (for array additions/removals)
}
"""
logger.debug(f"Comparing path: {path or 'root'}")
changes = []
if old_data is None and new_data is None:
return changes
if old_data is None and new_data is not None:
if isinstance(new_data, dict):
old_data = {}
elif isinstance(new_data, list):
old_data = []
else:
old_data = None
if old_data is not None and new_data is None:
logger.debug(f"Path {path} removed")
return [{"key": path, "change": "removed", "from": old_data}]
if type(old_data) != type(new_data):
logger.debug(
f"Type mismatch at {path}: {type(old_data)}{type(new_data)}")
return [{
"key": path,
"change": "modified",
"from": old_data,
"to": new_data
}]
if isinstance(old_data, list):
has_objects = any(
isinstance(x, dict) for x in old_data + new_data if x is not None)
if has_objects:
try:
old_dict = {x.get("name"): x for x in old_data if x}
new_dict = {x.get("name"): x for x in new_data if x}
added = set(new_dict) - set(old_dict)
removed = set(old_dict) - set(new_dict)
common = set(old_dict) & set(new_dict)
if added:
logger.debug(f"Added items at {path}: {added}")
if removed:
logger.debug(f"Removed items at {path}: {removed}")
for key in added:
changes.append({
"key": f"{path}[{key}]",
"change": "added",
"to": new_dict[key]
})
for key in removed:
changes.append({
"key": f"{path}[{key}]",
"change": "removed",
"from": old_dict[key]
})
for key in common:
if old_dict[key] != new_dict[key]:
logger.debug(
f"Found changes in common item {key} at {path}")
changes.extend(
compare_yaml(old_dict[key], new_dict[key],
f"{path}[{key}]"))
except Exception as e:
logger.warning(
f"Failed to compare by name at {path}, falling back to index comparison: {str(e)}"
)
for i, (old_item,
new_item) in enumerate(zip(old_data, new_data)):
if old_item != new_item:
changes.extend(
compare_yaml(old_item, new_item, f"{path}[{i}]"))
else:
old_set = set(old_data)
new_set = set(new_data)
if added := new_set - old_set:
logger.debug(f"Added values at {path}: {added}")
changes.append({
"key": path,
"change": "added",
"value": sorted([x for x in added if x is not None])
})
if removed := old_set - new_set:
logger.debug(f"Removed values at {path}: {removed}")
changes.append({
"key": path,
"change": "removed",
"value": sorted([x for x in removed if x is not None])
})
elif isinstance(old_data, dict):
all_keys = set(old_data) | set(new_data)
for key in all_keys:
new_path = f"{path}.{key}" if path else key
if key not in old_data:
logger.debug(f"Added key at {new_path}")
changes.append({
"key": new_path,
"change": "added",
"to": new_data[key]
})
elif key not in new_data:
logger.debug(f"Removed key at {new_path}")
changes.append({
"key": new_path,
"change": "removed",
"from": old_data[key]
})
else:
changes.extend(
compare_yaml(old_data[key], new_data[key], new_path))
else:
if old_data != new_data:
logger.debug(f"Modified value at {path}: {old_data}{new_data}")
changes.append({
"key": path,
"change": "modified",
"from": old_data,
"to": new_data
})
for c in changes:
if c["change"] == "added" and "from" not in c:
c["from"] = "~"
return changes
def normalize_yaml_keys(data):
"""Convert boolean keys to strings in YAML data to avoid JSON serialization issues"""
if isinstance(data, dict):
return {str(k): normalize_yaml_keys(v) for k, v in data.items()}
elif isinstance(data, list):
return [normalize_yaml_keys(item) for item in data]
else:
return data
def create_change_summary(old_data: Optional[Dict], new_data: Optional[Dict],
file_path: str) -> Dict[str, Any]:
"""
Create a summary of changes between two YAML structures with file metadata.
This wrapper adds git-specific fields like name, status, and file path.
Args:
old_data: Original YAML data (from git HEAD)
new_data: New YAML data (from working directory)
file_path: Path to the file being compared
Returns:
Dict containing:
- name: Current name (from new_data or filename)
- prior_name: Previous name (from old_data)
- outgoing_name: New name if changed, else None
- status: 'New' | 'Modified' | 'Deleted'
- file_path: Path to the file
- modified: True if file was modified/added
- deleted: True if file was deleted
- changes: Detailed changes from compare_yaml
"""
try:
# Normalize keys to avoid JSON serialization issues with boolean keys
old_data = normalize_yaml_keys(old_data) if old_data else None
new_data = normalize_yaml_keys(new_data) if new_data else None
filename = os.path.basename(file_path)
new_name = new_data.get("name") if new_data else None
old_name = old_data.get("name") if old_data else None
current_name = new_name or filename
if old_data is None and new_data is not None:
status = "New"
logger.info(f"New file detected: {file_path}")
elif old_data is not None and new_data is None:
status = "Deleted"
logger.info(f"Deleted file detected: {file_path}")
else:
status = "Modified"
logger.info(f"Modified file detected: {file_path}")
detailed_changes = compare_yaml(old_data, new_data)
if detailed_changes:
logger.info(
f"Found {len(detailed_changes)} changes in {file_path}")
logger.debug(f"Detailed changes: {detailed_changes}")
return {
"name": current_name,
"prior_name": old_name,
"outgoing_name": new_name if new_name != old_name else None,
"status": status,
"file_path": file_path,
"modified": status != "Deleted",
"deleted": status == "Deleted",
"changes": detailed_changes
}
except Exception as e:
logger.error(
f"Error creating change summary for {file_path}: {str(e)}",
exc_info=True)
raise

View File

@@ -1,283 +0,0 @@
import os
import yaml
import logging
from typing import Any, Dict, List, Optional, Union
logger = logging.getLogger(__name__)
# Define conflict states
UNRESOLVED = "UNRESOLVED"
RESOLVED = "RESOLVED"
MODIFY_DELETE = "MODIFY_DELETE"
def compare_conflict_yaml(ours_data: Any,
theirs_data: Any,
path: str = "") -> List[Dict[str, Any]]:
"""
Compare two YAML structures and generate conflict information.
Handles nested structures and produces conflict records in the format:
{
'parameter': 'Field Name',
'local_value': value_from_ours,
'incoming_value': value_from_theirs
}
"""
conflicts = []
# Handle None/deletion cases
if ours_data is None and theirs_data is None:
return conflicts
if ours_data is None:
# Local version deleted
param_name = path or 'File'
return [{
'parameter': param_name,
'local_value': '🗑️ File deleted in local version',
'incoming_value': '📄 File exists in incoming version'
}]
if theirs_data is None:
# Incoming version deleted
param_name = path or 'File'
return [{
'parameter': param_name,
'local_value': '📄 File exists in local version',
'incoming_value': '🗑️ File deleted in incoming version'
}]
# Handle different types as conflicts
if type(ours_data) != type(theirs_data):
return [{
'parameter': path,
'local_value': ours_data,
'incoming_value': theirs_data
}]
# Handle lists
if isinstance(ours_data, list):
# Check if list contains objects
has_objects = any(
isinstance(x, dict) for x in ours_data + theirs_data
if x is not None)
if has_objects:
return compare_object_arrays(ours_data, theirs_data, path)
else:
return compare_primitive_arrays(ours_data, theirs_data, path)
# Handle dictionaries
elif isinstance(ours_data, dict):
return compare_dicts(ours_data, theirs_data, path)
# Handle primitive values
elif ours_data != theirs_data:
return [{
'parameter': path,
'local_value': ours_data,
'incoming_value': theirs_data
}]
return conflicts
def compare_object_arrays(ours_data: List[Dict], theirs_data: List[Dict],
path: str) -> List[Dict]:
"""Compare arrays of objects using name field as identifier"""
conflicts = []
try:
# Build lookup dictionaries
ours_dict = {x.get('name'): x for x in ours_data if x}
theirs_dict = {x.get('name'): x for x in theirs_data if x}
# Find additions/removals
ours_keys = set(ours_dict.keys())
theirs_keys = set(theirs_dict.keys())
# Handle added items
for key in (theirs_keys - ours_keys):
conflicts.append({
'parameter': f"{path}[{key}]" if path else key,
'local_value': None,
'incoming_value': theirs_dict[key]
})
# Handle removed items
for key in (ours_keys - theirs_keys):
conflicts.append({
'parameter': f"{path}[{key}]" if path else key,
'local_value': ours_dict[key],
'incoming_value': None
})
# Compare common items
for key in (ours_keys & theirs_keys):
if ours_dict[key] != theirs_dict[key]:
new_path = f"{path}[{key}]" if path else key
conflicts.extend(
compare_conflict_yaml(ours_dict[key], theirs_dict[key],
new_path))
except Exception as e:
logger.warning(
f"Failed to compare objects by name at {path}, using positional comparison: {str(e)}"
)
# Fallback to positional comparison
for i, (ours_item,
theirs_item) in enumerate(zip(ours_data, theirs_data)):
if ours_item != theirs_item:
new_path = f"{path}[{i}]" if path else str(i)
conflicts.extend(
compare_conflict_yaml(ours_item, theirs_item, new_path))
return conflicts
def compare_primitive_arrays(ours_data: List, theirs_data: List,
path: str) -> List[Dict]:
"""Compare arrays of primitive values"""
conflicts = []
ours_set = set(ours_data)
theirs_set = set(theirs_data)
# Handle additions
added = theirs_set - ours_set
if added:
conflicts.append({
'parameter': path or 'Array',
'local_value': sorted(list(ours_set)),
'incoming_value': sorted(list(theirs_set))
})
return conflicts
def format_array_for_display(data):
"""Format array data for display in conflict resolution"""
if isinstance(data, list):
if not data:
return "[] (empty array)"
elif all(isinstance(x, dict) and 'name' in x for x in data):
# Array of objects with names - show the names
names = [x['name'] for x in data]
if len(names) <= 5:
return f"[{', '.join(names)}]"
else:
return f"[{', '.join(names[:5])}, ... and {len(names) - 5} more]"
elif all(not isinstance(x, (dict, list)) for x in data):
# Array of primitives
if len(data) <= 5:
return f"[{', '.join(str(x) for x in data)}]"
else:
return f"[{', '.join(str(x) for x in data[:5])}, ... and {len(data) - 5} more]"
else:
# Mixed or complex array
return f"Array with {len(data)} items"
return data
def compare_dicts(ours_data: Dict, theirs_data: Dict, path: str) -> List[Dict]:
"""Compare dictionaries recursively"""
conflicts = []
# Get all keys from both dictionaries
all_keys = set(ours_data.keys()) | set(theirs_data.keys())
for key in all_keys:
new_path = f"{path}.{key}" if path else key
if key not in ours_data:
# Format arrays for better display when field is missing locally
incoming_val = theirs_data[key]
if isinstance(incoming_val, list):
incoming_val = format_array_for_display(incoming_val)
conflicts.append({
'parameter': new_path,
'local_value': None,
'incoming_value': incoming_val
})
elif key not in theirs_data:
# Format arrays for better display when field is missing remotely
local_val = ours_data[key]
if isinstance(local_val, list):
local_val = format_array_for_display(local_val)
conflicts.append({
'parameter': new_path,
'local_value': local_val,
'incoming_value': None
})
elif ours_data[key] != theirs_data[key]:
conflicts.extend(
compare_conflict_yaml(ours_data[key], theirs_data[key],
new_path))
return conflicts
def create_conflict_summary(file_path: str,
ours_data: Optional[Dict],
theirs_data: Optional[Dict],
status: str = UNRESOLVED) -> Dict[str, Any]:
"""
Create a summary of conflicts between two versions of a file.
Args:
file_path: Path to the file in conflict
ours_data: Our version of the YAML data
theirs_data: Their version of the YAML data
status: Conflict status (UNRESOLVED, RESOLVED, or MODIFY_DELETE)
Returns:
Dict containing:
- file_path: Path to the conflicted file
- type: Type of item
- name: Name from our version or filename
- incoming_name: Name from their version (if available)
- status: Current conflict status
- conflict_details: List of specific conflicts
"""
try:
from .utils import determine_type # Import here to avoid circular imports
# Generate conflict details
conflict_details = {
'conflicting_parameters':
compare_conflict_yaml(ours_data, theirs_data)
}
# Get local name
local_name = None
if ours_data and isinstance(ours_data, dict) and 'name' in ours_data:
local_name = ours_data.get('name')
if not local_name:
# Strip the extension to get a cleaner name
basename = os.path.basename(file_path)
local_name = os.path.splitext(basename)[0]
# Get incoming name
incoming_name = None
if theirs_data and isinstance(theirs_data, dict) and 'name' in theirs_data:
incoming_name = theirs_data.get('name')
if not incoming_name:
# Strip the extension to get a cleaner name
basename = os.path.basename(file_path)
incoming_name = os.path.splitext(basename)[0]
result = {
'file_path': file_path,
'type': determine_type(file_path),
'name': local_name,
'incoming_name': incoming_name,
'status': status,
'conflict_details': conflict_details
}
return result
except Exception as e:
logger.error(
f"Failed to create conflict summary for {file_path}: {str(e)}")
return None

View File

@@ -1,229 +0,0 @@
import os
import yaml
import logging
from git import GitCommandError
from .comparison import create_change_summary
from .utils import determine_type, parse_commit_message, extract_name_from_path
logger = logging.getLogger(__name__)
# Use the centralized extract_name_from_path function from utils
extract_name = extract_name_from_path
def check_merge_conflict(repo, branch, file_path):
"""Check if pulling a file would cause merge conflicts"""
try:
# Check for local changes (uncommitted or unpushed)
status = repo.git.status('--porcelain', file_path).strip()
if status:
status_code = status[:2] if len(status) >= 2 else ''
has_changes = 'M' in status_code or 'A' in status_code or 'D' in status_code or 'R' in status_code
else:
# Check for unpushed commits
merge_base = repo.git.merge_base('HEAD',
f'origin/{branch}').strip()
committed_changes = repo.git.log(f'{merge_base}..HEAD',
'--',
file_path,
ignore_missing=True).strip()
has_changes = bool(committed_changes)
if has_changes:
# Test if merge would cause conflicts
try:
merge_test = repo.git.merge_tree('--write-tree', 'HEAD',
f'origin/{branch}')
return any(
line.startswith('<<<<<<< ')
for line in merge_test.splitlines() if file_path in line)
except GitCommandError:
return True # Assume conflict if merge test fails
return False
except Exception as e:
logger.error(f"Failed to check conflicts for {file_path}: {str(e)}")
return False
def get_commit_message(repo, branch, file_path):
"""Get commit message for incoming changes to a file"""
try:
raw_message = repo.git.show(f'HEAD...origin/{branch}', '--format=%B',
'-s', '--', file_path).strip()
return parse_commit_message(raw_message)
except GitCommandError as e:
logger.error(
f"Git command error getting commit message for {file_path}: {str(e)}"
)
return {
"body": "",
"footer": "",
"scope": "",
"subject": f"Error retrieving commit message: {str(e)}",
"type": ""
}
def parse_commit_message(message):
"""Parse a commit message into its components"""
try:
# Default structure
parsed = {
"type": "Unknown Type",
"scope": "Unknown Scope",
"subject": "",
"body": "",
"footer": ""
}
if not message:
return parsed
# Split message into lines
lines = message.strip().split('\n')
# Parse first line (header)
if lines:
header = lines[0]
# Try to parse conventional commit format: type(scope): subject
import re
conventional_format = re.match(r'^(\w+)(?:\(([^)]+)\))?: (.+)$',
header)
if conventional_format:
groups = conventional_format.groups()
parsed.update({
"type": groups[0] or "Unknown Type",
"scope": groups[1] or "Unknown Scope",
"subject": groups[2]
})
else:
parsed["subject"] = header
# Parse body and footer
if len(lines) > 1:
# Find the divider between body and footer (if any)
footer_start = -1
for i, line in enumerate(lines[1:], 1):
if re.match(r'^[A-Z_-]+:', line):
footer_start = i
break
# Extract body and footer
if footer_start != -1:
parsed["body"] = '\n'.join(lines[1:footer_start]).strip()
parsed["footer"] = '\n'.join(lines[footer_start:]).strip()
else:
parsed["body"] = '\n'.join(lines[1:]).strip()
return parsed
except Exception as e:
logger.error(f"Error parsing commit message: {str(e)}")
return {
"type": "Unknown Type",
"scope": "Unknown Scope",
"subject": "Error parsing commit message",
"body": "",
"footer": ""
}
def get_incoming_changes(repo, branch):
"""Get list of changes that would come in from origin"""
try:
# Get status including renames
diff_output = repo.git.diff(f'HEAD...origin/{branch}', '--name-status',
'-M').split('\n')
changed_files = []
rename_mapping = {}
# Process status to identify renames
for line in diff_output:
if not line:
continue
parts = line.split('\t')
if len(parts) < 2:
continue
status = parts[0]
if status.startswith('R'):
old_path, new_path = parts[1], parts[2]
rename_mapping[new_path] = old_path
changed_files.append(new_path)
else:
changed_files.append(parts[1])
logger.info(f"Processing {len(changed_files)} incoming changes")
incoming_changes = []
for file_path in changed_files:
try:
# Handle renamed files
old_path = rename_mapping.get(file_path, file_path)
is_rename = file_path in rename_mapping
# Get local and remote versions
try:
local_content = repo.git.show(f'HEAD:{old_path}')
local_data = yaml.safe_load(local_content)
except (GitCommandError, yaml.YAMLError):
local_data = None
try:
remote_content = repo.git.show(
f'origin/{branch}:{file_path}')
remote_data = yaml.safe_load(remote_content)
except (GitCommandError, yaml.YAMLError):
remote_data = None
# Skip if no actual changes
if local_data == remote_data and not is_rename:
continue
# Check for conflicts and get commit info
will_conflict = check_merge_conflict(repo, branch, file_path)
commit_message = get_commit_message(repo, branch, file_path)
# Generate change summary
change = create_change_summary(local_data, remote_data,
file_path)
# Add incoming-specific fields
change.update({
'commit_message':
commit_message,
'type':
determine_type(file_path),
'will_conflict':
will_conflict,
'id':
remote_data.get('id') if remote_data else None,
'local_name':
extract_name(old_path)
if is_rename else extract_name(file_path),
'incoming_name':
extract_name(file_path),
'staged':
False
})
if is_rename:
change['status'] = 'Renamed'
incoming_changes.append(change)
except Exception as e:
logger.error(
f"Failed to process incoming change for {file_path}: {str(e)}"
)
continue
return incoming_changes
except Exception as e:
logger.error(f"Failed to get incoming changes: {str(e)}")
return []

View File

@@ -1,141 +0,0 @@
import os
import yaml
import logging
from git import GitCommandError
from .conflict_comparison import create_conflict_summary, UNRESOLVED, RESOLVED, MODIFY_DELETE
logger = logging.getLogger(__name__)
def get_version_data(repo, ref, file_path):
"""Get YAML data from a specific version of a file"""
try:
content = repo.git.show(f'{ref}:{file_path}')
return yaml.safe_load(content) if content else None
except GitCommandError:
return None
def process_modify_delete_conflict(repo, file_path, deleted_in_head):
"""Handle case where one side modified while other deleted"""
try:
# Check if conflict is resolved
status_output = repo.git.status('--porcelain', file_path)
file_exists = os.path.exists(os.path.join(repo.working_dir, file_path))
is_staged = status_output and status_output[0] in ['M', 'A']
# Determine status
if (file_exists and is_staged) or (not file_exists
and status_output.startswith('D ')):
status = RESOLVED
else:
status = MODIFY_DELETE
# For delete conflicts, we need to extract the name for display purposes
# This will be the name of the actual file before it was deleted
basename = os.path.basename(file_path)
filename = os.path.splitext(basename)[0] # Strip extension
# Get metadata from existing version to extract name if possible
if file_exists:
# File exists locally, read it
try:
with open(os.path.join(repo.working_dir, file_path), 'r') as f:
existing_data = yaml.safe_load(f.read())
except Exception as read_error:
logger.warning(f"Could not read existing file {file_path}: {str(read_error)}")
existing_data = {'name': filename}
else:
# File was deleted locally, try to get from merge head
try:
existing_data = get_version_data(repo, 'MERGE_HEAD', file_path)
except Exception as merge_error:
logger.warning(f"Could not get merge head for {file_path}: {str(merge_error)}")
existing_data = {'name': filename}
# Simplified placeholder data for deleted version
if deleted_in_head:
# File was deleted in HEAD (local) but exists in MERGE_HEAD (incoming)
local_data = None # This indicates deleted
try:
# Try to get name from incoming
incoming_data = existing_data if existing_data else {'name': filename}
except Exception:
incoming_data = {'name': filename}
else:
# File exists in HEAD (local) but deleted in MERGE_HEAD (incoming)
try:
local_data = existing_data if existing_data else {'name': filename}
except Exception:
local_data = {'name': filename}
incoming_data = None # This indicates deleted
return create_conflict_summary(file_path, local_data, incoming_data, status)
except Exception as e:
logger.error(
f"Failed to process modify/delete conflict for {file_path}: {str(e)}"
)
return None
def process_regular_conflict(repo, file_path):
"""Handle standard merge conflict between two versions"""
try:
# Get both versions
ours_data = get_version_data(repo, 'HEAD', file_path)
theirs_data = get_version_data(repo, 'MERGE_HEAD', file_path)
if not ours_data and not theirs_data:
return None
# Check if conflict is resolved
status_output = repo.git.status('--porcelain', file_path)
status = UNRESOLVED if status_output.startswith('UU') else RESOLVED
return create_conflict_summary(file_path, ours_data, theirs_data,
status)
except Exception as e:
logger.error(f"Failed to process conflict for {file_path}: {str(e)}")
return None
def get_merge_conflicts(repo):
"""Get all merge conflicts in the repository"""
try:
# Check if we're in a merge state
if not os.path.exists(os.path.join(repo.git_dir, 'MERGE_HEAD')):
return []
conflicts = []
status = repo.git.status('--porcelain', '-z').split('\0')
# Process each status entry
for item in status:
if not item or len(item) < 4:
continue
x, y = item[0], item[1]
file_path = item[3:]
# Handle modify/delete conflicts
if (x == 'D' and y == 'U') or (x == 'U'
and y == 'D') or (x == 'A'
and y == 'U'):
conflict = process_modify_delete_conflict(
repo, file_path, x == 'D')
if conflict:
conflicts.append(conflict)
# Handle regular conflicts
elif 'U' in (x, y) or (x == 'D' and y == 'D'):
conflict = process_regular_conflict(repo, file_path)
if conflict:
conflicts.append(conflict)
return conflicts
except Exception as e:
logger.error(f"Failed to get merge conflicts: {str(e)}")
return []

View File

@@ -1,110 +0,0 @@
import os
import yaml
import logging
from git import GitCommandError
from .comparison import create_change_summary
from .utils import determine_type, extract_name_from_path
logger = logging.getLogger(__name__)
# Use the centralized extract_name_from_path function from utils
extract_name = extract_name_from_path
def get_outgoing_changes(repo):
"""Get list of changes in working directory"""
try:
status = repo.git.status('--porcelain', '-z').split('\0')
logger.info(f"Processing {len(status)} changes from git status")
changes = []
i = 0
while i < len(status):
item = status[i]
if not item:
i += 1
continue
if len(item) < 4:
logger.warning(f"Invalid status item format: {item}")
i += 1
continue
x, y = item[0], item[1]
file_path = item[3:]
# Skip files in conflict state
if x == 'U' or y == 'U':
i += 1
continue
# Handle renamed files
if x == 'R' or y == 'R':
if i + 1 < len(status) and status[i + 1]:
outgoing_name = extract_name(file_path)
prior_name = extract_name(status[i + 1])
original_path = status[i + 1] # Path for old content
new_path = file_path # Path for new content
is_staged = x == 'R'
status_value = 'Renamed'
i += 2
else:
i += 1
else:
name = extract_name(file_path)
prior_name = name
outgoing_name = name
original_path = file_path
new_path = file_path
is_staged = x != ' ' and x != '?'
status_value = None
i += 1
try:
# Get old content (from HEAD)
try:
old_content = repo.git.show(f'HEAD:{original_path}')
old_data = yaml.safe_load(old_content)
except GitCommandError:
old_data = None
except yaml.YAMLError as e:
logger.warning(
f"Failed to parse old YAML for {original_path}: {str(e)}"
)
old_data = None
# Get new content (from working directory)
try:
full_path = os.path.join(repo.working_dir, new_path)
with open(full_path, 'r') as f:
new_data = yaml.safe_load(f.read())
except (IOError, yaml.YAMLError) as e:
logger.warning(
f"Failed to read/parse current file {new_path}: {str(e)}"
)
new_data = None
# Generate change summary
change = create_change_summary(old_data, new_data, new_path)
change['type'] = determine_type(new_path)
change['staged'] = is_staged
change['prior_name'] = prior_name
change['outgoing_name'] = outgoing_name
if status_value:
change['status'] = status_value
changes.append(change)
except Exception as e:
logger.error(f"Failed to process {file_path}: {str(e)}",
exc_info=True)
return changes
except Exception as e:
logger.error(f"Failed to get outgoing changes: {str(e)}",
exc_info=True)
return []

View File

@@ -1,302 +0,0 @@
# git/status/status.py
import git
from git.exc import GitCommandError, InvalidGitRepositoryError
import logging
from .incoming_changes import get_incoming_changes
from .outgoing_changes import get_outgoing_changes
from .merge_conflicts import get_merge_conflicts
from .utils import determine_type
import os
import yaml
import threading
from datetime import datetime
import json
from ...db import get_settings
logger = logging.getLogger(__name__)
class GitStatusManager:
_instance = None
_lock = threading.Lock()
def __init__(self, repo_path):
self.repo_path = repo_path
self.repo = git.Repo(repo_path)
self.status = {
# Local status
"branch": "",
"outgoing_changes": [],
"is_merging": False,
"merge_conflicts": [],
"has_conflicts": False,
# Remote status
"remote_branch_exists": False,
"commits_behind": 0,
"commits_ahead": 0,
"incoming_changes": [],
"has_unpushed_commits": False,
"unpushed_files": [],
# Metadata
"last_local_update": None,
"last_remote_update": None
}
@classmethod
def get_instance(cls, repo_path=None):
if not cls._instance and repo_path:
with cls._lock:
if not cls._instance:
cls._instance = cls(repo_path)
return cls._instance
def update_local_status(self):
"""Update only local repository status"""
try:
self.repo = git.Repo(self.repo_path) # Refresh repo instance
with self._lock:
# Update branch
self.status["branch"] = self.repo.active_branch.name
# Check merge status
self.status["is_merging"] = os.path.exists(
os.path.join(self.repo.git_dir, 'MERGE_HEAD'))
# Get local changes
self.status["outgoing_changes"] = get_outgoing_changes(
self.repo)
# Get merge conflicts if merging
self.status["merge_conflicts"] = (get_merge_conflicts(
self.repo) if self.status["is_merging"] else [])
self.status["has_conflicts"] = bool(
self.status["merge_conflicts"])
# Update timestamp
self.status["last_local_update"] = datetime.now().isoformat()
return True
except Exception as e:
logger.error(f"Error updating local status: {str(e)}")
return False
def update_remote_status(self):
"""Update remote repository status - called by scheduled task"""
try:
logger.info(
f"Updating remote status for branch: {self.status['branch']}")
# Do the fetch outside the lock
self.repo.remotes.origin.fetch()
# Get branch name safely
with self._lock:
branch = self.status["branch"]
# Do git operations outside lock
remote_refs = [ref.name for ref in self.repo.remotes.origin.refs]
remote_branch_exists = f"origin/{branch}" in remote_refs
if remote_branch_exists:
commits_behind = list(
self.repo.iter_commits(f'{branch}..origin/{branch}'))
commits_ahead = list(
self.repo.iter_commits(f'origin/{branch}..{branch}'))
# Handle auto-pull before updating status
if len(commits_behind) > 0:
logger.info(
f"Branch is {len(commits_behind)} commits behind")
try:
settings = get_settings()
if int(settings.get('auto_pull_enabled', 0)):
logger.info("Auto-pull enabled, pulling changes")
from ..operations.manager import GitOperations
git_ops = GitOperations(self.repo_path)
pull_result = git_ops.pull(branch)
logger.info(f"Auto-pull result: {pull_result}")
success, message = pull_result
if not success:
logger.error(f"Auto-pull failed: {message}")
# Refresh counts after pull
commits_behind = list(
self.repo.iter_commits(
f'{branch}..origin/{branch}'))
commits_ahead = list(
self.repo.iter_commits(
f'origin/{branch}..{branch}'))
except Exception as e:
logger.error(f"Error during auto-pull: {str(e)}")
# Prepare the status update
incoming = get_incoming_changes(self.repo, branch)
unpushed = self._get_unpushed_changes(
branch) if commits_ahead else []
# Only lock when updating the status
with self._lock:
self.status.update({
"remote_branch_exists":
remote_branch_exists,
"commits_behind":
len(commits_behind),
"commits_ahead":
len(commits_ahead),
"has_unpushed_commits":
len(commits_ahead) > 0,
"incoming_changes":
incoming,
"unpushed_files":
unpushed,
"last_remote_update":
datetime.now().isoformat()
})
else:
with self._lock:
self.status.update({
"remote_branch_exists":
False,
"commits_behind":
0,
"commits_ahead":
0,
"has_unpushed_commits":
False,
"incoming_changes": [],
"unpushed_files": [],
"last_remote_update":
datetime.now().isoformat()
})
return True
except Exception as e:
logger.error(f"Error updating remote status: {str(e)}")
return False
def _get_unpushed_changes(self, branch):
"""Get detailed info about files modified in unpushed commits"""
try:
unpushed_files = self.repo.git.diff(f'origin/{branch}..{branch}',
'--name-only').split('\n')
unpushed_files = [f for f in unpushed_files if f]
detailed_changes = []
for file_path in unpushed_files:
try:
with open(os.path.join(self.repo.working_dir, file_path),
'r') as f:
content = yaml.safe_load(f.read())
detailed_changes.append({
'type':
determine_type(file_path),
'name':
content.get('name', os.path.basename(file_path)),
'file_path':
file_path
})
except Exception as e:
logger.warning(
f"Could not get details for {file_path}: {str(e)}")
detailed_changes.append({
'type': determine_type(file_path),
'name': os.path.basename(file_path),
'file_path': file_path
})
return detailed_changes
except Exception as e:
logger.error(f"Error getting unpushed changes: {str(e)}")
return []
def get_status(self):
"""Get the current status without updating"""
with self._lock:
return self.status.copy()
def format_git_status(status):
"""Format git status for logging with truncation and pretty printing.
Args:
status (dict): The git status dictionary to format
Returns:
str: Formatted status string
"""
def truncate_list(lst, max_items=3):
"""Truncate a list and add count of remaining items."""
if len(lst) <= max_items:
return lst
return lst[:max_items] + [f"... and {len(lst) - max_items} more items"]
def truncate_string(s, max_length=50):
"""Truncate a string if it's too long."""
if not s or len(s) <= max_length:
return s
return s[:max_length] + "..."
# Create a copy to modify
formatted_status = status.copy()
# Truncate lists
for key in [
'outgoing_changes', 'merge_conflicts', 'incoming_changes',
'unpushed_files'
]:
if key in formatted_status and isinstance(formatted_status[key], list):
formatted_status[key] = truncate_list(formatted_status[key])
# Format any nested dictionaries in the lists
for key in formatted_status:
if isinstance(formatted_status[key], list):
formatted_status[key] = [{
k: truncate_string(str(v))
for k, v in item.items()
} if isinstance(item, dict) else item
for item in formatted_status[key]]
# Convert to JSON with nice formatting
formatted_json = json.dumps(formatted_status, indent=2, default=str)
# Add a timestamp header
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
return f"=== Git Status at {timestamp} ===\n{formatted_json}"
def get_git_status(repo_path):
try:
status_manager = GitStatusManager.get_instance(repo_path)
status_manager.update_local_status()
success, status = True, status_manager.get_status()
# Log the formatted status
logger.info("\n" + format_git_status(status))
return success, status
except git.exc.InvalidGitRepositoryError:
logger.info(f"No git repository found at {repo_path}")
empty_status = {
"branch": "",
"outgoing_changes": [],
"is_merging": False,
"merge_conflicts": [],
"has_conflicts": False,
"remote_branch_exists": False,
"commits_behind": 0,
"commits_ahead": 0,
"incoming_changes": [],
"has_unpushed_commits": False,
"unpushed_files": [],
"last_local_update": None,
"last_remote_update": None,
"has_repo": False
}
return True, empty_status
except Exception as e:
logger.error(f"Error in get_git_status: {str(e)}", exc_info=True)
return False, str(e)

View File

@@ -1,173 +0,0 @@
# git/status/utils.py
import os
import yaml
import logging
import re
logger = logging.getLogger(__name__)
def extract_data_from_yaml(file_path):
logger.debug(f"Extracting data from file: {file_path}")
try:
with open(file_path, 'r') as f:
content = yaml.safe_load(f)
logger.debug(
f"File content: {content}") # Log the full file content
if content is None:
logger.error(
f"Failed to parse YAML file or file is empty: {file_path}")
return None
# Check if expected keys are in the content
if 'name' not in content or 'id' not in content:
logger.warning(
f"'name' or 'id' not found in file: {file_path}")
return {'name': content.get('name'), 'id': content.get('id')}
except Exception as e:
logger.warning(f"Error reading file {file_path}: {str(e)}")
return None
def determine_type(file_path):
if 'regex_patterns' in file_path:
return 'Regex Pattern'
elif 'custom_formats' in file_path:
return 'Custom Format'
elif 'profiles' in file_path:
return 'Quality Profile'
elif 'media_management' in file_path:
return 'Media Management'
return 'Unknown'
def format_media_management_name(name):
"""Format media management category names for display"""
name_mapping = {
'misc': 'Miscellaneous',
'naming': 'Naming',
'quality_definitions': 'Quality Definitions'
}
return name_mapping.get(name, name)
def extract_name_from_path(file_path):
"""Extract and format name from file path"""
# Remove the file extension
name = os.path.splitext(file_path)[0]
# Remove the type prefix (everything before the first '/')
if '/' in name:
name = name.split('/', 1)[1]
# Format media management names
if 'media_management' in file_path:
return format_media_management_name(name)
return name
def interpret_git_status(x, y):
if x == 'D' or y == 'D':
return 'Deleted'
elif x == 'A':
return 'Added'
elif x == 'M' or y == 'M':
return 'Modified'
elif x == 'R':
return 'Renamed'
elif x == 'C':
return 'Copied'
elif x == 'U':
return 'Updated but unmerged'
elif x == '?' and y == '?':
return 'Untracked'
else:
return 'Unknown'
def parse_commit_message(commit_message):
# Default placeholders for missing parts of the commit message
placeholders = {
'type': 'Unknown Type',
'scope': 'Unknown Scope',
'subject': 'No subject provided',
'body': 'No body provided',
'footer': ''
}
# Mapping of commit types and scopes to canonical forms
type_mapping = {
'feat': 'New Feature',
'feature': 'New Feature',
'new': 'New Feature',
'fix': 'BugFix',
'bugfix': 'BugFix',
'bug': 'BugFix',
'docs': 'Documentation',
'documentation': 'Documentation',
'doc': 'Documentation',
'style': 'Style Change',
'formatting': 'Style Change',
'format': 'Style Change',
'lint': 'Style Change',
'refactor': 'Refactor',
'refactoring': 'Refactor',
'restructure': 'Refactor',
'redesign': 'Refactor',
'perf': 'Performance Improvement',
'performance': 'Performance Improvement',
'optimize': 'Performance Improvement',
'optimisation': 'Performance Improvement',
'test': 'Test',
'testing': 'Test',
'chore': 'Maintenance',
'maintenance': 'Maintenance',
'maintain': 'Maintenance'
}
scope_mapping = {
'regex': 'Regex Pattern',
'regex pattern': 'Regex Pattern',
'format': 'Custom Format',
'custom format': 'Custom Format',
'profile': 'Quality Profile',
'quality profile': 'Quality Profile'
}
# Regex patterns for each part of the commit message
type_pattern = r'^(?P<type>feat|feature|new|fix|bugfix|bug|docs|documentation|doc|style|formatting|format|lint|refactor|refactoring|restructure|redesign|perf|performance|optimize|optimisation|test|testing|chore|maintenance|maintain)'
scope_pattern = r'\((?P<scope>regex|regex pattern|format|custom format|profile|quality profile)\)'
subject_pattern = r':\s(?P<subject>.+)'
body_pattern = r'(?P<body>(?:- .+\n?)+)' # Handles multiple lines in the body
footer_pattern = r'(?P<footer>(Fixes|Resolves|See also|Relates to)\s.+)'
# Initialize result with placeholders
parsed_message = placeholders.copy()
# Parse the type and scope
type_scope_match = re.match(
f'{type_pattern}{scope_pattern}{subject_pattern}', commit_message,
re.IGNORECASE)
if type_scope_match:
matched_type = type_scope_match.group('type').lower()
matched_scope = type_scope_match.group('scope').lower()
# Map the matched values to their canonical forms
parsed_message['type'] = type_mapping.get(matched_type, 'Unknown Type')
parsed_message['scope'] = scope_mapping.get(matched_scope,
'Unknown Scope')
parsed_message['subject'] = type_scope_match.group('subject').strip()
# Match and extract the body part
body_match = re.search(body_pattern, commit_message, re.MULTILINE)
if body_match:
parsed_message['body'] = body_match.group('body').strip()
# Match and extract the footer (if present)
footer_match = re.search(footer_pattern, commit_message)
if footer_match:
parsed_message['footer'] = footer_match.group('footer').strip()
return parsed_message

View File

@@ -1,259 +0,0 @@
# app/importarr/__init__.py
from flask import Blueprint, request, jsonify
from flask_cors import cross_origin
import logging
import asyncio
from pathlib import Path
from ..arr.manager import get_arr_config
from ..data.utils import get_category_directory, load_yaml_file
from .format import import_formats_to_arr, async_import_formats_to_arr
from .profile import import_profiles_to_arr, async_import_profiles_to_arr
from ..db import get_unique_arrs
logger = logging.getLogger('importarr')
bp = Blueprint('import', __name__)
@bp.route('/format', methods=['POST', 'OPTIONS'])
@cross_origin()
def import_formats():
if request.method == 'OPTIONS':
return jsonify({}), 200
try:
data = request.get_json()
arr_id = data.get('arrId')
all_formats = data.get('all', False)
format_names = data.get('formatNames', [])
if not arr_id:
return jsonify({
'success': False,
'error': 'Arr ID is required'
}), 400
if not all_formats and not format_names:
return jsonify({
'success':
False,
'error':
'Either formatNames or all=true is required'
}), 400
# Get import_as_unique setting using the new function
import_settings = get_unique_arrs([arr_id])
arr_settings = import_settings.get(arr_id, {
'import_as_unique': False,
'name': 'Unknown'
})
import_as_unique = arr_settings['import_as_unique']
if import_as_unique:
logger.info(
f"Unique imports for {arr_settings['name']} are on, adjusting names for custom formats"
)
else:
logger.info(
f"Unique imports for {arr_settings['name']} is off, using original names"
)
# Get arr configuration
arr_config = get_arr_config(arr_id)
if not arr_config['success']:
return jsonify({
'success': False,
'error': 'Arr configuration not found'
}), 404
arr_data = arr_config['data']
# If all=true, get all format names from the custom_format directory
if all_formats:
try:
format_dir = Path(get_category_directory('custom_format'))
format_names = [f.stem for f in format_dir.glob('*.yml')]
if not format_names:
return jsonify({
'success': False,
'error': 'No custom formats found'
}), 404
except Exception as e:
logger.error(
f"Error reading custom formats directory: {str(e)}")
return jsonify({
'success':
False,
'error':
'Failed to read custom formats directory'
}), 500
# Store original names for file lookups
original_names = format_names.copy()
# Modify format names if import_as_unique is true
if import_as_unique:
format_names = [f"{name} [Dictionarry]" for name in format_names]
logger.info(
f"Modified format names for unique import: {format_names}")
# Import formats with arr type from config, but use original names for file lookups
result = import_formats_to_arr(format_names=format_names,
original_names=original_names,
base_url=arr_data['arrServer'],
api_key=arr_data['apiKey'],
arr_type=arr_data['type'])
return jsonify(result), 200 if result['success'] else 400
except Exception as e:
logger.error(f"Error importing custom formats: {str(e)}")
return jsonify({'success': False, 'error': str(e)}), 400
@bp.route('/profile', methods=['POST', 'OPTIONS'])
@cross_origin()
def import_profiles():
if request.method == 'OPTIONS':
return jsonify({}), 200
try:
data = request.get_json()
arr_id = data.get('arrId')
all_profiles = data.get('all', False)
profile_names = data.get('profileNames', [])
if not arr_id:
return jsonify({
'success': False,
'error': 'Arr ID is required'
}), 400
if not all_profiles and not profile_names:
return jsonify({
'success':
False,
'error':
'Either profileNames or all=true is required'
}), 400
# Get import_as_unique setting
import_settings = get_unique_arrs([arr_id])
arr_settings = import_settings.get(arr_id, {
'import_as_unique': False,
'name': 'Unknown'
})
import_as_unique = arr_settings['import_as_unique']
if import_as_unique:
logger.info(
f"Unique imports for {arr_settings['name']} are on, adjusting names for quality profiles"
)
else:
logger.info(
f"Unique imports for {arr_settings['name']} is off, using original names"
)
# Get arr configuration
arr_config = get_arr_config(arr_id)
if not arr_config['success']:
return jsonify({
'success': False,
'error': 'Arr configuration not found'
}), 404
arr_data = arr_config['data']
# If all=true, get all profile names
if all_profiles:
try:
profile_dir = Path(get_category_directory('profile'))
profile_names = [f.stem for f in profile_dir.glob('*.yml')]
if not profile_names:
return jsonify({
'success': False,
'error': 'No quality profiles found'
}), 404
except Exception as e:
logger.error(f"Error reading profiles directory: {str(e)}")
return jsonify({
'success': False,
'error': 'Failed to read profiles directory'
}), 500
# Store original names for file lookups
original_names = profile_names.copy()
# Modify profile names if import_as_unique is true
if import_as_unique:
profile_names = [f"{name} [Dictionarry]" for name in profile_names]
logger.info(
f"Modified profile names for unique import: {profile_names}")
logger.debug(
f"Attempting to import profiles: {profile_names} for {arr_data['type']}: {arr_data['name']}"
)
# Get any custom formats referenced in these profiles
format_names = set()
for profile_name in original_names: # Use original names for file lookup
try:
profile_file = f"{get_category_directory('profile')}/{profile_name}.yml"
format_data = load_yaml_file(profile_file)
# Extract from main custom_formats
for cf in format_data.get('custom_formats', []):
format_names.add(cf['name'])
# Extract from app-specific custom_formats
for cf in format_data.get('custom_formats_radarr', []):
format_names.add(cf['name'])
for cf in format_data.get('custom_formats_sonarr', []):
format_names.add(cf['name'])
except Exception as e:
logger.error(f"Error loading profile {profile_name}: {str(e)}")
continue
# Import/Update formats first - use async version for larger batch sizes
if format_names:
format_names_list = list(format_names)
# When we have more than a few formats, use the async import path
# which will parallelize the requests
if import_as_unique:
modified_format_names = [
f"{name} [Dictionarry]" for name in format_names_list
]
# Use the regular import function which will detect large batches
# and automatically use async when appropriate
import_formats_to_arr(
format_names=modified_format_names,
original_names=format_names_list,
base_url=arr_data['arrServer'],
api_key=arr_data['apiKey'],
arr_type=arr_data['type']
)
else:
# Use the regular import function which will detect large batches
# and automatically use async when appropriate
import_formats_to_arr(
format_names=format_names_list,
original_names=format_names_list,
base_url=arr_data['arrServer'],
api_key=arr_data['apiKey'],
arr_type=arr_data['type']
)
# Import profiles
result = import_profiles_to_arr(profile_names=profile_names,
original_names=original_names,
base_url=arr_data['arrServer'],
api_key=arr_data['apiKey'],
arr_type=arr_data['type'],
arr_id=arr_id,
import_as_unique=import_as_unique)
return jsonify(result), 200 if result['success'] else 400
except Exception as e:
logger.error(f"Error importing quality profiles: {str(e)}")
return jsonify({'success': False, 'error': str(e)}), 400

View File

@@ -1,398 +0,0 @@
import requests
import logging
import json
import yaml
import asyncio
import aiohttp
from pathlib import Path
from typing import Dict, List, Optional, Any, Tuple
from ..data.utils import (load_yaml_file, get_category_directory, REGEX_DIR,
FORMAT_DIR)
from ..compile import CustomFormat, FormatConverter, TargetApp
from ..db.queries.format_renames import is_format_in_renames
logger = logging.getLogger('importarr')
def import_formats_to_arr(format_names, base_url, api_key, arr_type,
original_names):
"""
Import custom formats to arr instance.
This function supports bulk importing of formats with sequential processing.
"""
logger.info(
f"Received {len(format_names)} formats to import for {arr_type}")
# For larger imports, use the async version to improve performance
if len(format_names) > 5:
# Run async function within the event loop
return asyncio.run(
async_import_formats_to_arr(
format_names=format_names,
base_url=base_url,
api_key=api_key,
arr_type=arr_type,
original_names=original_names
)
)
# For smaller imports, use the regular synchronous version
results = {
'success': True,
'added': 0,
'updated': 0,
'failed': 0,
'details': []
}
try:
logger.info("Looking for existing formats...")
existing_formats = get_existing_formats(base_url, api_key)
if existing_formats is None:
return {
'success': False,
'error': 'Failed to get existing formats'
}
existing_names = {fmt['name']: fmt['id'] for fmt in existing_formats}
patterns = {}
for pattern_file in Path(REGEX_DIR).glob('*.yml'):
try:
pattern_data = load_yaml_file(str(pattern_file))
if pattern_data and 'name' in pattern_data and 'pattern' in pattern_data:
patterns[pattern_data['name']] = pattern_data['pattern']
except Exception as e:
logger.error(
f"Error loading pattern file {pattern_file}: {str(e)}")
continue
converter = FormatConverter(patterns)
target_app = TargetApp.RADARR if arr_type.lower(
) == 'radarr' else TargetApp.SONARR
for i, format_name in enumerate(format_names):
try:
# Use original name for file lookup
original_name = original_names[i]
format_file = f"{get_category_directory('custom_format')}/{original_name}.yml"
format_data = load_yaml_file(format_file)
custom_format = CustomFormat(**format_data)
converted_format = converter.convert_format(
custom_format, target_app)
if not converted_format:
raise ValueError("Format conversion failed")
# Create base compiled data with ordered fields
compiled_data = {'name': format_name} # Start with name
# Check rename status and add field right after name if true
if is_format_in_renames(original_name):
compiled_data['includeCustomFormatWhenRenaming'] = True
logger.info(
f"Format {original_name} has renames enabled, including field"
)
# Add specifications last
compiled_data['specifications'] = [
vars(spec) for spec in converted_format.specifications
]
result = process_format(compiled_data, existing_names,
base_url, api_key)
if result['success']:
results[result['action']] += 1
else:
results['failed'] += 1
results['success'] = False
results['details'].append(result['detail'])
except Exception as e:
logger.error(
f"Error processing format {format_name}: {str(e)}")
results['failed'] += 1
results['success'] = False
results['details'].append({
'name': format_name,
'action': 'failed',
'success': False,
'error': str(e)
})
logger.info(
f"Importing {len(format_names)} formats complete. "
f"Added: {results['added']}, Updated: {results['updated']}, "
f"Failed: {results['failed']}")
return results
except Exception as e:
logger.error(f"Error in import_formats_to_arr: {str(e)}")
return {'success': False, 'error': str(e)}
async def async_import_formats_to_arr(format_names: List[str],
base_url: str,
api_key: str,
arr_type: str,
original_names: List[str]) -> Dict:
"""
Asynchronous version of import_formats_to_arr that processes formats concurrently.
This significantly improves performance for large batches.
"""
logger.info(
f"Received {len(format_names)} formats to import (async) for {arr_type}")
results = {
'success': True,
'added': 0,
'updated': 0,
'failed': 0,
'details': []
}
try:
logger.info("Looking for existing formats (async)...")
existing_formats = await async_get_existing_formats(base_url, api_key)
if existing_formats is None:
return {
'success': False,
'error': 'Failed to get existing formats'
}
existing_names = {fmt['name']: fmt['id'] for fmt in existing_formats}
# Load patterns - this doesn't need to be async as it's file system operations
patterns = {}
for pattern_file in Path(REGEX_DIR).glob('*.yml'):
try:
pattern_data = load_yaml_file(str(pattern_file))
if pattern_data and 'name' in pattern_data and 'pattern' in pattern_data:
patterns[pattern_data['name']] = pattern_data['pattern']
except Exception as e:
logger.error(
f"Error loading pattern file {pattern_file}: {str(e)}")
continue
converter = FormatConverter(patterns)
target_app = TargetApp.RADARR if arr_type.lower() == 'radarr' else TargetApp.SONARR
# Process all formats into API-ready format first
compiled_formats = []
format_tasks = []
for i, format_name in enumerate(format_names):
try:
# Use original name for file lookup
original_name = original_names[i]
format_file = f"{get_category_directory('custom_format')}/{original_name}.yml"
format_data = load_yaml_file(format_file)
custom_format = CustomFormat(**format_data)
converted_format = converter.convert_format(custom_format, target_app)
if not converted_format:
raise ValueError("Format conversion failed")
# Create base compiled data with ordered fields
compiled_data = {'name': format_name} # Start with name
# Check rename status and add field right after name if true
if is_format_in_renames(original_name):
compiled_data['includeCustomFormatWhenRenaming'] = True
logger.info(
f"Format {original_name} has renames enabled, including field"
)
# Add specifications last
compiled_data['specifications'] = [
vars(spec) for spec in converted_format.specifications
]
compiled_formats.append((format_name, compiled_data))
except Exception as e:
logger.error(f"Error processing format {format_name}: {str(e)}")
results['failed'] += 1
results['success'] = False
results['details'].append({
'name': format_name,
'action': 'failed',
'success': False,
'error': str(e)
})
# Now create async tasks for all formats to upload them concurrently
for format_name, compiled_data in compiled_formats:
task = asyncio.ensure_future(
async_process_format(
format_data=compiled_data,
existing_names=existing_names,
base_url=base_url,
api_key=api_key
)
)
format_tasks.append((format_name, task))
# Wait for all format uploads to complete
for format_name, task in format_tasks:
try:
result = await task
if result['success']:
results[result['action']] += 1
else:
results['failed'] += 1
results['success'] = False
results['details'].append(result['detail'])
except Exception as e:
logger.error(f"Error waiting for format task {format_name}: {str(e)}")
results['failed'] += 1
results['success'] = False
results['details'].append({
'name': format_name,
'action': 'failed',
'success': False,
'error': str(e)
})
logger.info(
f"Async importing {len(format_names)} formats complete. "
f"Added: {results['added']}, Updated: {results['updated']}, "
f"Failed: {results['failed']}")
return results
except Exception as e:
logger.error(f"Error in async_import_formats_to_arr: {str(e)}")
return {'success': False, 'error': str(e)}
def get_existing_formats(base_url, api_key):
try:
response = requests.get(f"{base_url.rstrip('/')}/api/v3/customformat",
headers={'X-Api-Key': api_key})
if response.status_code == 200:
return response.json()
return None
except Exception as e:
logger.error(f"Error getting existing formats: {str(e)}")
return None
async def async_get_existing_formats(base_url: str, api_key: str) -> Optional[List[Dict]]:
"""Async version of get_existing_formats"""
try:
async with aiohttp.ClientSession() as session:
async with session.get(
f"{base_url.rstrip('/')}/api/v3/customformat",
headers={'X-Api-Key': api_key}
) as response:
if response.status == 200:
return await response.json()
return None
except Exception as e:
logger.error(f"Error getting existing formats (async): {str(e)}")
return None
def process_format(format_data, existing_names, base_url, api_key):
format_name = format_data['name']
if format_name in existing_names:
format_data['id'] = existing_names[format_name]
success = update_format(base_url, api_key, format_data)
action = 'updated'
else:
success = add_format(base_url, api_key, format_data)
action = 'added'
return {
'success': success,
'action': action if success else 'failed',
'detail': {
'name': format_name,
'action': action if success else 'failed',
'success': success
}
}
async def async_process_format(format_data: Dict, existing_names: Dict[str, int],
base_url: str, api_key: str) -> Dict:
"""Async version of process_format"""
format_name = format_data['name']
if format_name in existing_names:
format_data['id'] = existing_names[format_name]
success = await async_update_format(base_url, api_key, format_data)
action = 'updated'
else:
success = await async_add_format(base_url, api_key, format_data)
action = 'added'
return {
'success': success,
'action': action if success else 'failed',
'detail': {
'name': format_name,
'action': action if success else 'failed',
'success': success
}
}
def update_format(base_url, api_key, format_data):
try:
url = f"{base_url.rstrip('/')}/api/v3/customformat/{format_data['id']}"
response = requests.put(url,
headers={'X-Api-Key': api_key},
json=format_data)
logger.info(f"Update format '{format_data['name']}' response: {response.status_code}")
return response.status_code in [200, 201, 202, 204]
except Exception as e:
logger.error(f"Error updating format: {str(e)}")
return False
async def async_update_format(base_url: str, api_key: str, format_data: Dict) -> bool:
"""Async version of update_format"""
try:
url = f"{base_url.rstrip('/')}/api/v3/customformat/{format_data['id']}"
async with aiohttp.ClientSession() as session:
async with session.put(
url,
headers={'X-Api-Key': api_key},
json=format_data
) as response:
logger.info(f"Update format '{format_data['name']}' response: {response.status} (async)")
return response.status in [200, 201, 202, 204]
except Exception as e:
logger.error(f"Error updating format (async): {str(e)}")
return False
def add_format(base_url, api_key, format_data):
try:
url = f"{base_url.rstrip('/')}/api/v3/customformat"
response = requests.post(url,
headers={'X-Api-Key': api_key},
json=format_data)
logger.info(f"Add format '{format_data['name']}' response: {response.status_code}")
return response.status_code in [200, 201, 202, 204]
except Exception as e:
logger.error(f"Error adding format: {str(e)}")
return False
async def async_add_format(base_url: str, api_key: str, format_data: Dict) -> bool:
"""Async version of add_format"""
try:
url = f"{base_url.rstrip('/')}/api/v3/customformat"
async with aiohttp.ClientSession() as session:
async with session.post(
url,
headers={'X-Api-Key': api_key},
json=format_data
) as response:
logger.info(f"Add format '{format_data['name']}' response: {response.status} (async)")
return response.status in [200, 201, 202, 204]
except Exception as e:
logger.error(f"Error adding format (async): {str(e)}")
return False

View File

@@ -1,365 +0,0 @@
# app/importarr/format_memory.py
"""Imports custom formats from memory, not YML files"""
import requests
import logging
import json
import asyncio
import aiohttp
from typing import Dict, List, Optional
from pathlib import Path
from ..data.utils import (load_yaml_file, get_category_directory, REGEX_DIR,
FORMAT_DIR)
from ..compile import CustomFormat, FormatConverter, TargetApp
logger = logging.getLogger('importarr')
def get_existing_formats(base_url: str, api_key: str) -> Optional[List[Dict]]:
"""Get existing custom formats from arr instance"""
try:
response = requests.get(f"{base_url.rstrip('/')}/api/v3/customformat",
headers={'X-Api-Key': api_key})
if response.status_code == 200:
return response.json()
return None
except Exception as e:
logger.error(f"Error getting existing formats: {str(e)}")
return None
async def async_get_existing_formats(base_url: str, api_key: str) -> Optional[List[Dict]]:
"""Async version of get_existing_formats"""
try:
async with aiohttp.ClientSession() as session:
async with session.get(
f"{base_url.rstrip('/')}/api/v3/customformat",
headers={'X-Api-Key': api_key}
) as response:
if response.status == 200:
return await response.json()
return None
except Exception as e:
logger.error(f"Error getting existing formats (async): {str(e)}")
return None
def process_format(format_data: Dict, existing_names: Dict[str, int],
base_url: str, api_key: str) -> Dict:
"""Process single format - either update or add new"""
format_name = format_data['name']
if format_name in existing_names:
format_data['id'] = existing_names[format_name]
success = update_format(base_url, api_key, format_data)
action = 'updated'
else:
success = add_format(base_url, api_key, format_data)
action = 'added'
return {
'success': success,
'action': action if success else 'failed',
'detail': {
'name': format_name,
'action': action if success else 'failed',
'success': success
}
}
async def async_process_format(format_data: Dict, existing_names: Dict[str, int],
base_url: str, api_key: str) -> Dict:
"""Async version of process_format"""
format_name = format_data['name']
if format_name in existing_names:
format_data['id'] = existing_names[format_name]
success = await async_update_format(base_url, api_key, format_data)
action = 'updated'
else:
success = await async_add_format(base_url, api_key, format_data)
action = 'added'
return {
'success': success,
'action': action if success else 'failed',
'detail': {
'name': format_name,
'action': action if success else 'failed',
'success': success
}
}
def update_format(base_url: str, api_key: str, format_data: Dict) -> bool:
"""Update existing custom format"""
try:
url = f"{base_url.rstrip('/')}/api/v3/customformat/{format_data['id']}"
response = requests.put(url,
headers={'X-Api-Key': api_key},
json=format_data)
logger.info(f"Update format '{format_data['name']}' response: {response.status_code}")
return response.status_code in [200, 201, 202, 204]
except Exception as e:
logger.error(f"Error updating format: {str(e)}")
return False
async def async_update_format(base_url: str, api_key: str, format_data: Dict) -> bool:
"""Async version of update_format"""
try:
url = f"{base_url.rstrip('/')}/api/v3/customformat/{format_data['id']}"
async with aiohttp.ClientSession() as session:
async with session.put(
url,
headers={'X-Api-Key': api_key},
json=format_data
) as response:
logger.info(f"Update format '{format_data['name']}' response: {response.status} (async)")
return response.status in [200, 201, 202, 204]
except Exception as e:
logger.error(f"Error updating format (async): {str(e)}")
return False
def add_format(base_url: str, api_key: str, format_data: Dict) -> bool:
"""Add new custom format"""
try:
url = f"{base_url.rstrip('/')}/api/v3/customformat"
response = requests.post(url,
headers={'X-Api-Key': api_key},
json=format_data)
logger.info(f"Add format '{format_data['name']}' response: {response.status_code}")
return response.status_code in [200, 201, 202, 204]
except Exception as e:
logger.error(f"Error adding format: {str(e)}")
return False
async def async_add_format(base_url: str, api_key: str, format_data: Dict) -> bool:
"""Async version of add_format"""
try:
url = f"{base_url.rstrip('/')}/api/v3/customformat"
async with aiohttp.ClientSession() as session:
async with session.post(
url,
headers={'X-Api-Key': api_key},
json=format_data
) as response:
logger.info(f"Add format '{format_data['name']}' response: {response.status} (async)")
return response.status in [200, 201, 202, 204]
except Exception as e:
logger.error(f"Error adding format (async): {str(e)}")
return False
def import_format_from_memory(format_data: Dict,
base_url: str,
api_key: str,
arr_type: str,
import_as_unique: bool = False) -> Dict:
"""
Import a format directly from memory without requiring file loading.
Args:
format_data: Dictionary containing the format specification
base_url: Arr instance base URL
api_key: API key for arr instance
arr_type: Type of arr instance (radarr/sonarr)
import_as_unique: Whether to append [Dictionarry] to format names
Returns:
Dict containing import results
"""
# For memory-based imports, no need to check size threshold
# as these are typically used for language formats which are few
results = {
'success': True,
'added': 0,
'updated': 0,
'failed': 0,
'details': []
}
try:
# Modify format name if import_as_unique is true
original_name = format_data['name']
if import_as_unique:
format_data['name'] = f"{original_name} [Dictionarry]"
logger.info(
f"Modified format name for unique import: {format_data['name']}"
)
logger.info("Looking for existing formats (memory-based import)...")
existing_formats = get_existing_formats(base_url, api_key)
if existing_formats is None:
return {
'success': False,
'error': 'Failed to get existing formats'
}
existing_format_map = {
fmt['name']: fmt['id']
for fmt in existing_formats
}
# Convert from raw data into a CustomFormat object
custom_format = CustomFormat(**format_data)
# Load patterns from regex directory
patterns = {}
for pattern_file in Path(REGEX_DIR).glob('*.yml'):
try:
pattern_data = load_yaml_file(str(pattern_file))
if pattern_data and 'name' in pattern_data and 'pattern' in pattern_data:
patterns[pattern_data['name']] = pattern_data['pattern']
except Exception as e:
logger.error(
f"Error loading pattern file {pattern_file}: {str(e)}")
continue
target_app = TargetApp.RADARR if arr_type.lower(
) == 'radarr' else TargetApp.SONARR
converter = FormatConverter(patterns)
converted_format = converter.convert_format(custom_format, target_app)
if not converted_format:
raise ValueError("Format conversion failed")
# Prepare final JSON data
api_format = {
'name':
converted_format.name,
'specifications':
[vars(spec) for spec in converted_format.specifications]
}
# Format compiled successfully
# Process the compiled format (update/add)
result = process_format(api_format, existing_format_map, base_url,
api_key)
if result['success']:
results[result['action']] += 1
else:
results['failed'] += 1
results['success'] = False
results['details'].append(result['detail'])
return results
except Exception as e:
logger.error(f"Error importing format data: {str(e)}")
return {
'success':
False,
'error':
str(e),
'details': [{
'name': format_data.get('name', 'unknown'),
'action': 'failed',
'success': False,
'error': str(e)
}]
}
async def async_import_format_from_memory(format_data: Dict,
base_url: str,
api_key: str,
arr_type: str,
import_as_unique: bool = False) -> Dict:
"""
Asynchronous version of import_format_from_memory
Args:
format_data: Dictionary containing the format specification
base_url: Arr instance base URL
api_key: API key for arr instance
arr_type: Type of arr instance (radarr/sonarr)
import_as_unique: Whether to append [Dictionarry] to format names
Returns:
Dict containing import results
"""
results = {
'success': True,
'added': 0,
'updated': 0,
'failed': 0,
'details': []
}
try:
# Modify format name if import_as_unique is true
original_name = format_data['name']
if import_as_unique:
format_data['name'] = f"{original_name} [Dictionarry]"
logger.info(
f"Modified format name for unique import: {format_data['name']}"
)
logger.info("Looking for existing formats (memory-based import, async)...")
existing_formats = await async_get_existing_formats(base_url, api_key)
if existing_formats is None:
return {
'success': False,
'error': 'Failed to get existing formats'
}
existing_format_map = {
fmt['name']: fmt['id']
for fmt in existing_formats
}
# Convert from raw data into a CustomFormat object
custom_format = CustomFormat(**format_data)
# Load patterns from regex directory (file system operations, no need for async)
patterns = {}
for pattern_file in Path(REGEX_DIR).glob('*.yml'):
try:
pattern_data = load_yaml_file(str(pattern_file))
if pattern_data and 'name' in pattern_data and 'pattern' in pattern_data:
patterns[pattern_data['name']] = pattern_data['pattern']
except Exception as e:
logger.error(
f"Error loading pattern file {pattern_file}: {str(e)}")
continue
target_app = TargetApp.RADARR if arr_type.lower() == 'radarr' else TargetApp.SONARR
converter = FormatConverter(patterns)
converted_format = converter.convert_format(custom_format, target_app)
if not converted_format:
raise ValueError("Format conversion failed")
# Prepare final JSON data
api_format = {
'name': converted_format.name,
'specifications': [vars(spec) for spec in converted_format.specifications]
}
# Format compiled successfully
# Process the compiled format (update/add) using async methods
result = await async_process_format(api_format, existing_format_map, base_url, api_key)
if result['success']:
results[result['action']] += 1
else:
results['failed'] += 1
results['success'] = False
results['details'].append(result['detail'])
return results
except Exception as e:
logger.error(f"Error importing format data (async): {str(e)}")
return {
'success': False,
'error': str(e),
'details': [{
'name': format_data.get('name', 'unknown'),
'action': 'failed',
'success': False,
'error': str(e)
}]
}

View File

@@ -1,861 +0,0 @@
# app/importarr/profile.py
import requests
import logging
import json
import yaml
import asyncio
import aiohttp
from pathlib import Path
from typing import Dict, List, Optional, Any, Tuple
from ..data.utils import load_yaml_file, get_category_directory
from ..compile.profile_compiler import compile_quality_profile
from ..compile.mappings import TargetApp
from .format import import_formats_to_arr
from .format_memory import import_format_from_memory, async_import_format_from_memory
from ..arr.manager import get_arr_config
logger = logging.getLogger('importarr')
def import_profiles_to_arr(profile_names: List[str], original_names: List[str],
base_url: str, api_key: str, arr_type: str,
arr_id: str, import_as_unique: bool) -> Dict:
"""
Import quality profiles to arr instance.
This function supports bulk importing of profiles with sequential or concurrent processing.
"""
logger.info(
f"Received {len(profile_names)} profiles to import for {arr_type}")
# For larger imports, use the async version to improve performance
if len(profile_names) > 1:
# Run async function within the event loop
return asyncio.run(
async_import_profiles_to_arr(profile_names=profile_names,
original_names=original_names,
base_url=base_url,
api_key=api_key,
arr_type=arr_type,
arr_id=arr_id,
import_as_unique=import_as_unique))
# For smaller imports, use the regular synchronous version
results = {
'success': True,
'added': 0,
'updated': 0,
'failed': 0,
'details': []
}
try:
arr_config_response = get_arr_config(arr_id)
if not arr_config_response['success']:
return {
'success': False,
'error': 'Failed to get arr configuration'
}
arr_config = arr_config_response['data']
logger.info("Looking for existing profiles...")
existing_profiles = get_existing_profiles(base_url, api_key)
if existing_profiles is None:
return {
'success': False,
'error': 'Failed to get existing profiles'
}
# Create mapping for existing profiles
existing_profile_map = {}
for profile in existing_profiles:
existing_profile_map[profile['name']] = profile['id']
target_app = TargetApp.RADARR if arr_type.lower(
) == 'radarr' else TargetApp.SONARR
for i, profile_name in enumerate(profile_names):
try:
# Use original name for file lookup
original_name = original_names[i]
profile_file = f"{get_category_directory('profile')}/{original_name}.yml"
profile_data = load_yaml_file(profile_file)
# Set the potentially modified profile name
profile_data['name'] = profile_name
# Modify custom format names if import_as_unique is true
if import_as_unique and 'custom_formats' in profile_data:
for cf in profile_data['custom_formats']:
cf['name'] = f"{cf['name']} [Dictionarry]"
# Profile loaded
profile_language = profile_data.get('language', 'any')
if profile_language != 'any':
# Detect if we're using simple or advanced mode
is_simple_mode = '_' not in profile_language
if is_simple_mode:
logger.info(
f"Profile '{profile_name}' has simple mode language: {profile_language}"
)
logger.info(
f"Simple mode will set language filter to: {profile_language}"
)
else:
logger.info(
f"Profile '{profile_name}' has advanced mode language: {profile_language}"
)
compiled_profiles = compile_quality_profile(
profile_data=profile_data,
target_app=target_app,
base_url=base_url,
api_key=api_key,
format_importer=import_formats_to_arr,
import_as_unique=import_as_unique)
if not compiled_profiles:
raise ValueError("Profile compilation returned no data")
profile_data = compiled_profiles[0]
logger.info(
"Looking for existing custom formats to sync format IDs..."
)
existing_formats = get_existing_formats(base_url, api_key)
if existing_formats is None:
raise ValueError("Failed to get updated format list")
format_id_map = {
fmt['name']: fmt['id']
for fmt in existing_formats
}
logger.debug(
f"Found {len(format_id_map)} existing custom formats")
profile_data = sync_format_ids(profile_data, format_id_map)
logger.debug("Format items after sync:")
for item in profile_data.get('formatItems', []):
logger.debug(
f" {item['name']} => Score: {item.get('score', 0)}, "
f"Format ID: {item.get('format', 'missing')}")
# Profile compiled successfully
result = process_profile(profile_data=profile_data,
existing_names=existing_profile_map,
base_url=base_url,
api_key=api_key)
results[result['action']] += 1
results['details'].append(result['detail'])
if not result['success']:
results['success'] = False
except Exception as e:
logger.error(
f"Error processing profile {profile_name}: {str(e)}, type: {type(e).__name__}"
)
logger.exception("Full traceback:")
results['failed'] += 1
results['details'].append({
'name': profile_name,
'action': 'failed',
'success': False,
'error': str(e)
})
results['success'] = False
logger.info(
f"Importing {len(profile_names)} profiles complete. "
f"Added: {results['added']}, Updated: {results['updated']}, "
f"Failed: {results['failed']}")
return results
except Exception as e:
logger.error(f"Error in import_profiles_to_arr: {str(e)}")
return {'success': False, 'error': str(e)}
async def async_import_profiles_to_arr(profile_names: List[str],
original_names: List[str],
base_url: str, api_key: str,
arr_type: str, arr_id: str,
import_as_unique: bool) -> Dict:
"""
Asynchronous version of import_profiles_to_arr that processes profiles concurrently.
This significantly improves performance for larger batches of profile imports.
"""
logger.info(
f"Received {len(profile_names)} profiles to import (async) for {arr_type}"
)
results = {
'success': True,
'added': 0,
'updated': 0,
'failed': 0,
'details': []
}
try:
arr_config_response = get_arr_config(arr_id)
if not arr_config_response['success']:
return {
'success': False,
'error': 'Failed to get arr configuration'
}
arr_config = arr_config_response['data']
logger.info("Looking for existing profiles (async)...")
existing_profiles = await async_get_existing_profiles(
base_url, api_key)
if existing_profiles is None:
return {
'success': False,
'error': 'Failed to get existing profiles'
}
# Create mapping for existing profiles
existing_profile_map = {}
for profile in existing_profiles:
existing_profile_map[profile['name']] = profile['id']
target_app = TargetApp.RADARR if arr_type.lower(
) == 'radarr' else TargetApp.SONARR
# Fetch all existing formats once upfront
logger.info("Pre-fetching existing custom formats for all profiles...")
existing_formats = await async_get_existing_formats(base_url, api_key)
if existing_formats is None:
return {
'success': False,
'error': 'Failed to get existing custom formats'
}
format_id_map = {fmt['name']: fmt['id'] for fmt in existing_formats}
logger.info(f"Successfully pre-fetched {len(format_id_map)} existing custom formats")
# Pre-scan all profiles to identify and cache language formats
needed_language_formats = set()
initial_profiles_data = []
# First, load and analyze all profile files
for i, profile_name in enumerate(profile_names):
try:
# Use original name for file lookup
original_name = original_names[i]
profile_file = f"{get_category_directory('profile')}/{original_name}.yml"
profile_data = load_yaml_file(profile_file)
# Store original profile data for later processing
initial_profiles_data.append((i, profile_name, original_name, profile_data))
# Extract language from profile data
profile_language = profile_data.get('language', 'any')
if profile_language != 'any' and '_' in profile_language:
# This is an advanced mode language that needs special format handling
needed_language_formats.add(profile_language)
# Language format identified
except Exception as e:
logger.error(f"Error pre-scanning profile {profile_name}: {str(e)}")
results['failed'] += 1
results['details'].append({
'name': profile_name,
'action': 'failed',
'success': False,
'error': f"Error pre-scanning profile: {str(e)}"
})
results['success'] = False
# Pre-load all language formats if any exist
language_format_cache = {}
if needed_language_formats:
logger.info(f"Pre-importing {len(needed_language_formats)} unique language formats for {len(profile_names)} profiles")
language_format_cache = await preload_language_formats(
language_formats=list(needed_language_formats),
target_app=target_app,
base_url=base_url,
api_key=api_key,
arr_type=arr_type,
import_as_unique=import_as_unique
)
logger.info(f"Successfully pre-loaded language formats for {len(language_format_cache)} languages")
# Process each profile with the cached language formats
profile_tasks = []
for i, profile_name, original_name, profile_data in initial_profiles_data:
try:
# Set the potentially modified profile name
profile_data['name'] = profile_name
# Modify custom format names if import_as_unique is true
if import_as_unique and 'custom_formats' in profile_data:
for cf in profile_data['custom_formats']:
cf['name'] = f"{cf['name']} [Dictionarry]"
# Profile loaded
profile_language = profile_data.get('language', 'any')
if profile_language != 'any':
# Detect if we're using simple or advanced mode
is_simple_mode = '_' not in profile_language
# Language mode detected
# Setup the profile compilation with the cached language formats
# By default, use normal import
format_importer = import_formats_to_arr
# For profiles with language formats, attach the cached formats
if language_format_cache and profile_language != 'any' and '_' in profile_language:
language_format_configs = language_format_cache.get(profile_language, [])
if language_format_configs:
# Using cached language formats
# Define a special function that will be detected by the profile compiler
# The function name is checked in _process_language_formats
def cached_format_importer(*args, **kwargs):
# Using cached formats from importer
return {
'success': True,
'added': 0,
'updated': len(language_format_configs),
'failed': 0,
'details': []
}
# Add the cached formats to the function so they can be accessed by the compiler
cached_format_importer.cached_formats = language_format_configs
format_importer = cached_format_importer
else:
logger.warning(f"No cached formats found for language {profile_language}")
# Add language formats from cache directly to the profile for the compiler
# This way we don't need to modify the compiler code at all
if profile_language != 'any' and '_' in profile_language and profile_language in language_format_cache:
# Add the cached language formats directly to the profile
if 'custom_formats' not in profile_data:
profile_data['custom_formats'] = []
# Add the cached formats - these are already imported, we just need to reference them
profile_data['custom_formats'].extend(language_format_cache[profile_language])
compiled_profiles = compile_quality_profile(
profile_data=profile_data,
target_app=target_app,
base_url=base_url,
api_key=api_key,
format_importer=format_importer,
import_as_unique=import_as_unique
)
if not compiled_profiles:
raise ValueError("Profile compilation returned no data")
compiled_profile = compiled_profiles[0]
# Sync format IDs upfront using the cached format_id_map
synced_profile = sync_format_ids(compiled_profile, format_id_map)
# Create a task for processing this profile (without fetching formats again)
task = asyncio.create_task(
async_process_profile(
profile_data=synced_profile,
existing_names=existing_profile_map,
base_url=base_url,
api_key=api_key
)
)
profile_tasks.append((profile_name, task))
except Exception as e:
logger.error(
f"Error processing profile {profile_name}: {str(e)}, type: {type(e).__name__} (async)"
)
logger.exception("Full traceback:")
results['failed'] += 1
results['details'].append({
'name': profile_name,
'action': 'failed',
'success': False,
'error': str(e)
})
results['success'] = False
# Process all profile upload results
for profile_name, task in profile_tasks:
try:
result = await task
if result['success']:
results[result['action']] += 1
else:
results['failed'] += 1
results['success'] = False
results['details'].append(result['detail'])
except Exception as e:
logger.error(
f"Error waiting for profile task {profile_name}: {str(e)} (async)"
)
results['failed'] += 1
results['details'].append({
'name': profile_name,
'action': 'failed',
'success': False,
'error': str(e)
})
results['success'] = False
logger.info(
f"Async importing {len(profile_names)} profiles complete. "
f"Added: {results['added']}, Updated: {results['updated']}, "
f"Failed: {results['failed']}")
return results
except Exception as e:
logger.error(f"Error in async_import_profiles_to_arr: {str(e)}")
return {'success': False, 'error': str(e)}
def get_existing_profiles(base_url: str, api_key: str) -> Optional[List[Dict]]:
try:
response = requests.get(
f"{base_url.rstrip('/')}/api/v3/qualityprofile",
headers={'X-Api-Key': api_key})
if response.status_code == 200:
return response.json()
return None
except Exception as e:
logger.error(f"Error getting existing profiles: {str(e)}")
return None
async def async_get_existing_profiles(base_url: str,
api_key: str) -> Optional[List[Dict]]:
"""Async version of get_existing_profiles"""
try:
async with aiohttp.ClientSession() as session:
async with session.get(
f"{base_url.rstrip('/')}/api/v3/qualityprofile",
headers={'X-Api-Key': api_key}) as response:
if response.status == 200:
return await response.json()
return None
except Exception as e:
logger.error(f"Error getting existing profiles (async): {str(e)}")
return None
def get_existing_formats(base_url: str, api_key: str) -> Optional[List[Dict]]:
try:
response = requests.get(f"{base_url.rstrip('/')}/api/v3/customformat",
headers={'X-Api-Key': api_key})
if response.status_code == 200:
return response.json()
return None
except Exception as e:
logger.error(f"Error getting existing formats: {str(e)}")
return None
async def async_get_existing_formats(base_url: str,
api_key: str) -> Optional[List[Dict]]:
"""Async version of get_existing_formats"""
try:
async with aiohttp.ClientSession() as session:
async with session.get(
f"{base_url.rstrip('/')}/api/v3/customformat",
headers={'X-Api-Key': api_key}) as response:
if response.status == 200:
return await response.json()
return None
except Exception as e:
logger.error(f"Error getting existing formats (async): {str(e)}")
return None
async def preload_language_formats(language_formats: List[str],
target_app: TargetApp,
base_url: str,
api_key: str,
arr_type: str,
import_as_unique: bool) -> Dict[str, List[Dict]]:
"""
Pre-load all language formats for the specified languages to avoid
duplicate imports when multiple profiles use the same language settings.
Args:
language_formats: List of language identifiers (e.g. ["must_english", "prefer_french"])
target_app: TargetApp enum value (RADARR or SONARR)
base_url: API base URL
api_key: API key for the arr instance
arr_type: Type of arr (radarr or sonarr)
import_as_unique: Whether to append [Dictionarry] to format names
Returns:
Dictionary mapping language IDs to their imported format configs
"""
from ..compile.profile_compiler import ProfileConverter
language_format_cache = {}
# Create a single ProfileConverter instance for all languages
converter = ProfileConverter(
target_app=target_app,
base_url=base_url,
api_key=api_key,
format_importer=None, # We'll handle importing manually
import_as_unique=import_as_unique
)
# For each unique language, process and cache its formats
for language_id in language_formats:
try:
# Skip if we've already processed this language
if language_id in language_format_cache:
continue
# Parse the language behavior and code
if '_' in language_id:
behavior, language_code = language_id.split('_', 1)
else:
# Skip simple language modes - they don't need special format imports
continue
logger.info(f"Pre-importing language formats for {language_id} (async batch)")
# First generate format data for this language
formats_data = converter._generate_language_formats(behavior, language_code)
# Import these language formats just once
format_results = await import_language_formats_once(
formats_data=formats_data,
base_url=base_url,
api_key=api_key,
arr_type=arr_type,
import_as_unique=import_as_unique
)
# Store the format configs for this language
language_format_cache[language_id] = format_results
logger.info(f"Successfully cached {len(format_results)} formats for language {language_id}")
except Exception as e:
logger.error(f"Error pre-loading language formats for {language_id}: {str(e)}")
language_format_cache[language_id] = [] # Empty list to indicate failure
return language_format_cache
async def import_language_formats_once(formats_data: List[Dict],
base_url: str,
api_key: str,
arr_type: str,
import_as_unique: bool) -> List[Dict]:
"""
Helper function to import language formats once and return the results.
Args:
formats_data: List of format data dictionaries to import
base_url: API base URL
api_key: API key for arr instance
arr_type: Type of arr (radarr or sonarr)
import_as_unique: Whether to append [Dictionarry] to format names
Returns:
List of format configs ready to be added to profiles
"""
# Create tasks for concurrent format imports
format_configs = []
import_tasks = []
for format_data in formats_data:
# Setup task for importing this format
task = asyncio.create_task(
async_import_format_from_memory(
format_data=format_data,
base_url=base_url,
api_key=api_key,
arr_type=arr_type,
import_as_unique=import_as_unique
)
)
import_tasks.append((format_data['name'], task))
# Process all format imports
for format_name, task in import_tasks:
try:
result = await task
if not result.get('success', False):
logger.error(f"Format import failed for cached language format: {format_name}")
continue
# Determine final format name (after any [Dictionarry] suffix)
display_name = format_name
if import_as_unique:
display_name = f"{format_name} [Dictionarry]"
# Create format config exactly as needed by profile compiler
format_configs.append({
'name': display_name,
'score': -9999
})
except Exception as e:
logger.error(f"Error importing cached language format {format_name}: {str(e)}")
return format_configs
def use_cached_language_formats(language_cache: Dict[str, List[Dict]],
format_names: List[str],
base_url: str,
api_key: str,
arr_type: str,
original_names: List[str]) -> Dict:
"""
Custom format importer that returns cached language formats instead
of re-importing them. This is used by the profile compiler when we've
already pre-loaded the language formats.
This is a replacement for the regular import_formats_to_arr function.
"""
# Extract the language ID from the original profile data
# This is passed from the profile compiler's context when calling this function
language_id = getattr(use_cached_language_formats, 'current_language_id', None)
if language_id and language_id in language_cache:
logger.info(f"Using cached language formats for {language_id}")
return {
'success': True,
'added': 0,
'updated': len(language_cache[language_id]),
'failed': 0,
'details': [
{'name': fmt['name'], 'action': 'updated', 'success': True}
for fmt in language_cache[language_id]
]
}
else:
# Fall back to normal import if no cache entry exists
# or if this isn't a language format import
logger.info(f"No cached formats for language ID {language_id}, using normal import")
return import_formats_to_arr(
format_names=format_names,
base_url=base_url,
api_key=api_key,
arr_type=arr_type,
original_names=original_names
)
def sync_format_ids(profile_data: Dict, format_id_map: Dict[str, int]) -> Dict:
if 'formatItems' not in profile_data:
profile_data['formatItems'] = []
# Create a set to track format names we've already processed
processed_formats = set()
synced_items = []
# First process existing items
for item in profile_data.get('formatItems', []):
if item['name'] not in processed_formats:
if item['name'] in format_id_map:
synced_items.append({
'format': format_id_map[item['name']],
'name': item['name'],
'score': item['score']
})
processed_formats.add(item['name'])
else:
logger.warning(
f"Custom format not found in arr: {item['name']}")
# Only add formats that haven't been processed yet
for format_name, format_id in format_id_map.items():
if format_name not in processed_formats:
synced_items.append({
'format': format_id,
'name': format_name,
'score': 0 # Default score for new formats
})
processed_formats.add(format_name)
profile_data['formatItems'] = synced_items
return profile_data
# This function is now deprecated and replaced by direct use of sync_format_ids and async_process_profile
# We're keeping the signature for backward compatibility but not using it in the optimized code path
async def async_process_profile_with_formats(profile_name: str,
profile_data: Dict,
existing_profile_map: Dict[str,
int],
base_url: str,
api_key: str) -> Dict:
"""
Asynchronous function that handles getting formats and processing a profile in one go.
This allows for concurrent profile processing.
Note: This function is deprecated and should not be used in new code.
It's better to fetch formats once upfront for all profiles.
"""
try:
# Get formats for profile synchronization
logger.info(
f"Looking for existing custom formats to sync format IDs (async)..."
)
existing_formats = await async_get_existing_formats(base_url, api_key)
if existing_formats is None:
raise ValueError("Failed to get updated format list")
format_id_map = {fmt['name']: fmt['id'] for fmt in existing_formats}
logger.debug(
f"Found {len(format_id_map)} existing custom formats (async)")
# Sync format IDs in the profile
synced_profile = sync_format_ids(profile_data, format_id_map)
# Process the profile (add or update)
return await async_process_profile(profile_data=synced_profile,
existing_names=existing_profile_map,
base_url=base_url,
api_key=api_key)
except Exception as e:
logger.error(
f"Error in async_process_profile_with_formats for {profile_name}: {str(e)}"
)
return {
'success': False,
'action': 'failed',
'detail': {
'name': profile_name,
'action': 'failed',
'success': False,
'error': str(e)
}
}
def process_profile(profile_data: Dict, existing_names: Dict[str, int],
base_url: str, api_key: str) -> Dict:
profile_name = profile_data['name']
if profile_name in existing_names:
profile_data['id'] = existing_names[profile_name]
success = update_profile(base_url, api_key, profile_data)
return {
'success': success,
'action': 'updated' if success else 'failed',
'detail': {
'name': profile_name,
'action': 'updated',
'success': success
}
}
else:
success = add_profile(base_url, api_key, profile_data)
return {
'success': success,
'action': 'added' if success else 'failed',
'detail': {
'name': profile_name,
'action': 'added',
'success': success
}
}
async def async_process_profile(profile_data: Dict, existing_names: Dict[str,
int],
base_url: str, api_key: str) -> Dict:
"""Async version of process_profile"""
profile_name = profile_data['name']
if profile_name in existing_names:
profile_data['id'] = existing_names[profile_name]
success = await async_update_profile(base_url, api_key, profile_data)
return {
'success': success,
'action': 'updated' if success else 'failed',
'detail': {
'name': profile_name,
'action': 'updated',
'success': success
}
}
else:
success = await async_add_profile(base_url, api_key, profile_data)
return {
'success': success,
'action': 'added' if success else 'failed',
'detail': {
'name': profile_name,
'action': 'added',
'success': success
}
}
def update_profile(base_url: str, api_key: str, profile_data: Dict) -> bool:
try:
url = f"{base_url.rstrip('/')}/api/v3/qualityprofile/{profile_data['id']}"
response = requests.put(url,
headers={'X-Api-Key': api_key},
json=profile_data)
logger.info(f"Update profile '{profile_data['name']}' response: {response.status_code}")
return response.status_code in [200, 201, 202, 204]
except Exception as e:
logger.error(f"Error updating profile: {str(e)}")
return False
async def async_update_profile(base_url: str, api_key: str,
profile_data: Dict) -> bool:
"""Async version of update_profile"""
try:
url = f"{base_url.rstrip('/')}/api/v3/qualityprofile/{profile_data['id']}"
async with aiohttp.ClientSession() as session:
async with session.put(url,
headers={'X-Api-Key': api_key},
json=profile_data) as response:
logger.info(f"Update profile '{profile_data['name']}' response: {response.status} (async)")
return response.status in [200, 201, 202, 204]
except Exception as e:
logger.error(f"Error updating profile (async): {str(e)}")
return False
def add_profile(base_url: str, api_key: str, profile_data: Dict) -> bool:
try:
url = f"{base_url.rstrip('/')}/api/v3/qualityprofile"
response = requests.post(url,
headers={'X-Api-Key': api_key},
json=profile_data)
logger.info(f"Add profile '{profile_data['name']}' response: {response.status_code}")
return response.status_code in [200, 201, 202, 204]
except Exception as e:
logger.error(f"Error adding profile: {str(e)}")
return False
async def async_add_profile(base_url: str, api_key: str,
profile_data: Dict) -> bool:
"""Async version of add_profile"""
try:
url = f"{base_url.rstrip('/')}/api/v3/qualityprofile"
async with aiohttp.ClientSession() as session:
async with session.post(url,
headers={'X-Api-Key': api_key},
json=profile_data) as response:
logger.info(f"Add profile '{profile_data['name']}' response: {response.status} (async)")
return response.status in [200, 201, 202, 204]
except Exception as e:
logger.error(f"Error adding profile (async): {str(e)}")
return False

View File

@@ -1,325 +0,0 @@
"""Main import module entry point."""
import sys
import logging
from typing import Dict, Any, List
from .strategies import FormatStrategy, ProfileStrategy
from .logger import reset_import_logger
logger = logging.getLogger(__name__)
def handle_import_request(request: Dict[str, Any]) -> Dict[str, Any]:
"""
Handle an import request.
Args:
request: Request dictionary containing:
- arrID: ID of the arr_config to use
- strategy: 'format' or 'profile'
- filenames: List of filenames to import
- dryRun: Optional boolean for dry-run mode (default: false)
Returns:
Import results with added/updated/failed counts
"""
from ..db import get_db
try:
# Extract request parameters
arr_id = request.get('arrID')
strategy_type = request.get('strategy')
filenames = request.get('filenames', [])
dry_run = request.get('dryRun', False)
# Validate inputs
if not arr_id:
return {'success': False, 'error': 'arrID is required'}
if strategy_type not in ['format', 'profile']:
return {
'success': False,
'error': 'strategy must be "format" or "profile"'
}
if not filenames:
return {'success': False, 'error': 'filenames list is required'}
# Load arr_config from database
with get_db() as conn:
cursor = conn.execute("SELECT * FROM arr_config WHERE id = ?",
(arr_id, ))
arr_config = cursor.fetchone()
if not arr_config:
return {
'success': False,
'error': f'arr_config {arr_id} not found'
}
# Select strategy
strategy_map = {'format': FormatStrategy, 'profile': ProfileStrategy}
strategy_class = strategy_map[strategy_type]
strategy = strategy_class(arr_config)
# Execute import with new logger
import_logger = reset_import_logger()
# Show start message
dry_run_text = " [DRY RUN]" if dry_run else ""
print(f"Starting {strategy_type} import for {arr_config['name']} ({arr_config['type']}): {len(filenames)} items{dry_run_text}", file=sys.stderr)
result = strategy.execute(filenames, dry_run=dry_run)
added = result.get('added', 0)
updated = result.get('updated', 0)
failed = result.get('failed', 0)
# Determine status
is_partial = failed > 0 and (added > 0 or updated > 0)
is_success = failed == 0
result['success'] = is_success or is_partial
if is_partial:
result['status'] = "partial"
elif is_success:
result['status'] = "success"
else:
result['status'] = "failed"
result['arr_config_id'] = arr_id
result['arr_config_name'] = arr_config['name']
result['strategy'] = strategy_type
# Complete logging
import_logger.complete()
return result
except Exception as e:
logger.exception("Import request failed")
return {'success': False, 'error': str(e)}
def handle_scheduled_import(task_id: int) -> Dict[str, Any]:
"""
Handle a scheduled import task.
Args:
task_id: ID from scheduled_tasks table
Returns:
Import results
"""
from ..db import get_db
import json
try:
# Find arr_config for this task
with get_db() as conn:
cursor = conn.execute(
"SELECT * FROM arr_config WHERE import_task_id = ?",
(task_id, ))
arr_config = cursor.fetchone()
if not arr_config:
return {
'success': False,
'error': f'No arr_config found for task {task_id}'
}
# Parse data_to_sync
data_to_sync = json.loads(arr_config['data_to_sync'] or '{}')
# Build import requests
results = []
# Import custom formats
format_names = data_to_sync.get('customFormats', [])
if format_names:
# Remove .yml extension if present
format_names = [f.replace('.yml', '') for f in format_names]
request = {
'arrID': arr_config['id'],
'strategy': 'format',
'filenames': format_names
}
result = handle_import_request(request)
results.append(result)
# Import profiles
profile_names = data_to_sync.get('profiles', [])
if profile_names:
# Remove .yml extension if present
profile_names = [p.replace('.yml', '') for p in profile_names]
request = {
'arrID': arr_config['id'],
'strategy': 'profile',
'filenames': profile_names
}
result = handle_import_request(request)
results.append(result)
# Combine results
total_added = sum(r.get('added', 0) for r in results)
total_updated = sum(r.get('updated', 0) for r in results)
total_failed = sum(r.get('failed', 0) for r in results)
is_partial = total_failed > 0 and (total_added > 0
or total_updated > 0)
is_success = total_failed == 0
status = "failed"
if is_partial:
status = "partial"
elif is_success:
status = "success"
combined_result = {
'success': is_success or is_partial,
'status': status,
'task_id': task_id,
'arr_config_id': arr_config['id'],
'arr_config_name': arr_config['name'],
'added': total_added,
'updated': total_updated,
'failed': total_failed,
'results': results
}
# Update sync status
_update_sync_status(arr_config['id'], combined_result)
return combined_result
except Exception as e:
logger.exception(f"Scheduled import {task_id} failed")
return {'success': False, 'error': str(e)}
def handle_pull_import(arr_config_id: int) -> Dict[str, Any]:
"""
Handle an on-pull import for a specific ARR config.
This mirrors scheduled import behavior but is triggered immediately
during a git pull (not scheduled).
"""
from ..db import get_db
import json
try:
# Load arr_config by id
with get_db() as conn:
cursor = conn.execute("SELECT * FROM arr_config WHERE id = ?",
(arr_config_id, ))
arr_config = cursor.fetchone()
if not arr_config:
return {
'success': False,
'error': f'arr_config {arr_config_id} not found'
}
# Parse data_to_sync
data_to_sync = json.loads(arr_config['data_to_sync'] or '{}')
results: List[Dict[str, Any]] = []
# Import custom formats
format_names = data_to_sync.get('customFormats', [])
if format_names:
format_names = [f.replace('.yml', '') for f in format_names]
request = {
'arrID': arr_config['id'],
'strategy': 'format',
'filenames': format_names,
}
result = handle_import_request(request)
results.append(result)
# Import profiles
profile_names = data_to_sync.get('profiles', [])
if profile_names:
profile_names = [p.replace('.yml', '') for p in profile_names]
request = {
'arrID': arr_config['id'],
'strategy': 'profile',
'filenames': profile_names,
}
result = handle_import_request(request)
results.append(result)
# Combine results
total_added = sum(r.get('added', 0) for r in results)
total_updated = sum(r.get('updated', 0) for r in results)
total_failed = sum(r.get('failed', 0) for r in results)
is_partial = total_failed > 0 and (total_added > 0
or total_updated > 0)
is_success = total_failed == 0
status = "failed"
if is_partial:
status = "partial"
elif is_success:
status = "success"
combined_result = {
'success': is_success or is_partial,
'status': status,
'arr_config_id': arr_config['id'],
'arr_config_name': arr_config['name'],
'added': total_added,
'updated': total_updated,
'failed': total_failed,
'results': results,
}
# Update sync status
_update_sync_status(arr_config['id'], combined_result)
return combined_result
except Exception as e:
logger.exception(f"Pull import for arr_config {arr_config_id} failed")
return {
'success': False,
'error': str(e),
}
def _update_sync_status(config_id: int, result: Dict[str, Any]) -> None:
"""Update arr_config sync status after scheduled import."""
from ..db import get_db
from datetime import datetime
try:
total = result.get('added', 0) + result.get('updated', 0) + result.get(
'failed', 0)
successful = result.get('added', 0) + result.get('updated', 0)
sync_percentage = int((successful / total * 100) if total > 0 else 0)
with get_db() as conn:
conn.execute(
"""
UPDATE arr_config
SET last_sync_time = ?,
sync_percentage = ?
WHERE id = ?
""", (datetime.now(), sync_percentage, config_id))
conn.commit()
logger.info(
f"Updated sync status for arr_config #{config_id}: {sync_percentage}%"
)
except Exception as e:
logger.error(f"Failed to update sync status: {e}")
# Export main functions
__all__ = [
'handle_import_request', 'handle_scheduled_import', 'handle_pull_import'
]

View File

@@ -1,150 +0,0 @@
"""ArrHandler class - manages all Arr API communication."""
import logging
import requests
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
from typing import Dict, List, Any, Optional
logger = logging.getLogger(__name__)
class ArrApiError(Exception):
"""Custom exception for Arr API errors."""
def __init__(self, message: str, status_code: Optional[int] = None):
super().__init__(message)
self.status_code = status_code
class ArrHandler:
"""Manages all communication with Radarr/Sonarr API."""
def __init__(self, base_url: str, api_key: str):
"""
Initialize the Arr API handler.
Args:
base_url: Base URL of the Arr instance
api_key: API key for authentication
"""
self.base_url = base_url.rstrip('/')
self.headers = {
'X-Api-Key': api_key,
'Content-Type': 'application/json'
}
self.session = self._create_session()
def _create_session(self) -> requests.Session:
"""Create a session with connection pooling and retry logic."""
session = requests.Session()
# Configure retry strategy
retry = Retry(
total=3,
backoff_factor=0.5,
status_forcelist=[500, 502, 503, 504]
)
# Configure connection pooling
adapter = HTTPAdapter(
pool_connections=5,
pool_maxsize=5,
max_retries=retry
)
session.mount('http://', adapter)
session.mount('https://', adapter)
session.headers.update(self.headers)
return session
def get(self, endpoint: str) -> Any:
"""
Make a GET request to the Arr API.
Args:
endpoint: API endpoint path
Returns:
JSON response data
Raises:
ArrApiError: If request fails
"""
url = f"{self.base_url}{endpoint}"
try:
response = self.session.get(url, timeout=30)
if response.status_code != 200:
raise ArrApiError(
f"GET {endpoint} failed: {response.text}",
response.status_code
)
return response.json()
except requests.RequestException as e:
raise ArrApiError(f"GET {endpoint} failed: {str(e)}")
def post(self, endpoint: str, data: Dict[str, Any]) -> Any:
"""
Make a POST request to the Arr API.
Args:
endpoint: API endpoint path
data: JSON data to send
Returns:
JSON response data
Raises:
ArrApiError: If request fails
"""
url = f"{self.base_url}{endpoint}"
try:
response = self.session.post(url, json=data, timeout=30)
if response.status_code not in [200, 201]:
raise ArrApiError(
f"POST {endpoint} failed: {response.text}",
response.status_code
)
return response.json()
except requests.RequestException as e:
raise ArrApiError(f"POST {endpoint} failed: {str(e)}")
def put(self, endpoint: str, data: Dict[str, Any]) -> Any:
"""
Make a PUT request to the Arr API.
Args:
endpoint: API endpoint path
data: JSON data to send
Returns:
JSON response data (if any)
Raises:
ArrApiError: If request fails
"""
url = f"{self.base_url}{endpoint}"
try:
response = self.session.put(url, json=data, timeout=30)
if response.status_code not in [200, 202, 204]:
raise ArrApiError(
f"PUT {endpoint} failed: {response.text}",
response.status_code
)
# 204 No Content won't have JSON
if response.status_code == 204:
return {}
return response.json()
except requests.RequestException as e:
raise ArrApiError(f"PUT {endpoint} failed: {str(e)}")
def get_all_formats(self) -> List[Dict[str, Any]]:
"""Get all custom formats from the Arr instance."""
return self.get("/api/v3/customformat")
def get_all_profiles(self) -> List[Dict[str, Any]]:
"""Get all quality profiles from the Arr instance."""
return self.get("/api/v3/qualityprofile")
def close(self):
"""Close the session."""
self.session.close()

View File

@@ -1,138 +0,0 @@
"""Custom logger for importer with progress tracking and colored output."""
import sys
from typing import List, Dict, Any
from datetime import datetime
class ImportLogger:
"""Custom logger with progress tracking and colored error output."""
def __init__(self):
"""Initialize the import logger."""
self.compilation_errors: List[Dict[str, str]] = []
self.import_errors: List[Dict[str, str]] = []
self.warnings: List[str] = []
self.current_compilation = 0
self.total_compilation = 0
self.current_import = 0
self.total_import = 0
self.added = 0
self.updated = 0
self.failed = 0
self.start_time = None
self.compilation_items: List[str] = []
self.import_items: List[Dict[str, str]] = []
def _write_colored(self, text: str, color: str = None):
"""Write colored text to stderr."""
if color == 'red':
text = f"\033[91m{text}\033[0m"
elif color == 'yellow':
text = f"\033[93m{text}\033[0m"
elif color == 'green':
text = f"\033[92m{text}\033[0m"
print(text, file=sys.stderr)
def start(self, total_compilation: int, total_import: int):
"""Start the import process."""
self.start_time = datetime.now()
self.total_compilation = total_compilation
self.total_import = total_import
self.current_compilation = 0
self.current_import = 0
def update_compilation(self, item_name: str):
"""Track compilation progress."""
self.current_compilation += 1
self.compilation_items.append(item_name)
def compilation_complete(self):
"""Show compilation summary."""
if self.total_compilation > 0:
print(f"Compiled: {self.current_compilation}/{self.total_compilation}", file=sys.stderr)
# Show compilation errors if any
if self.compilation_errors:
for error in self.compilation_errors:
self._write_colored(f"ERROR: Failed to compile {error['item']}: {error['message']}", 'red')
def update_import(self, item_name: str, action: str):
"""Track import progress."""
self.import_items.append({'name': item_name, 'action': action})
# Update counts based on action
if action == 'added':
self.added += 1
self.current_import += 1 # Only count successful imports
elif action == 'updated':
self.updated += 1
self.current_import += 1 # Only count successful imports
elif action == 'failed':
self.failed += 1
# Don't increment current_import for failures
def import_complete(self):
"""Show import summary."""
if self.total_import > 0:
print(f"Imported: {self.current_import}/{self.total_import}", file=sys.stderr)
# Show import errors if any
if self.import_errors:
for error in self.import_errors:
self._write_colored(f"ERROR: {error['message']}", 'red')
# Show warnings if any
if self.warnings:
for warning in self.warnings:
self._write_colored(f"WARNING: {warning}", 'yellow')
def error(self, message: str, item_name: str = None, phase: str = 'import'):
"""Log an error."""
if phase == 'compilation':
self.compilation_errors.append({'item': item_name or 'unknown', 'message': message})
else:
self.import_errors.append({'item': item_name or 'unknown', 'message': message})
def warning(self, message: str):
"""Log a warning."""
self.warnings.append(message)
def complete(self):
"""Complete the import and show final summary."""
# Show import summary first if not already shown
if self.current_import > 0 and not hasattr(self, '_import_shown'):
self.import_complete()
# Calculate duration
if self.start_time:
duration = (datetime.now() - self.start_time).total_seconds()
duration_str = f"{duration:.1f}s"
else:
duration_str = "N/A"
# Simple final summary
print(f"\n{'='*50}", file=sys.stderr)
print(f"Import Complete in {duration_str}", file=sys.stderr)
print(f"Added: {self.added}, Updated: {self.updated}, Failed: {self.failed}", file=sys.stderr)
print(f"{'='*50}\n", file=sys.stderr)
# Global instance
_logger = None
def get_import_logger() -> ImportLogger:
"""Get the import logger instance."""
global _logger
if _logger is None:
_logger = ImportLogger()
return _logger
def reset_import_logger() -> ImportLogger:
"""Reset and return a new import logger."""
global _logger
_logger = ImportLogger()
return _logger

View File

@@ -1,990 +0,0 @@
# app/compile/mappings.py
"""Centralized constants and mappings for arr applications"""
from enum import Enum, auto
from typing import Dict, Any
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class TargetApp(Enum):
"""Enum for target application types"""
RADARR = auto()
SONARR = auto()
class IndexerFlags:
"""Indexer flag mappings for both applications"""
RADARR = {
'freeleech': 1,
'halfleech': 2,
'double_upload': 4,
'internal': 32,
'scene': 128,
'freeleech_75': 256,
'freeleech_25': 512,
'nuked': 2048,
'ptp_golden': 8,
'ptp_approved': 16
}
SONARR = {
'freeleech': 1,
'halfleech': 2,
'double_upload': 4,
'internal': 8,
'scene': 16,
'freeleech_75': 32,
'freeleech_25': 64,
'nuked': 128
}
class Sources:
"""Source mappings for both applications"""
RADARR = {
'cam': 1,
'telesync': 2,
'telecine': 3,
'workprint': 4,
'dvd': 5,
'tv': 6,
'web_dl': 7,
'webrip': 8,
'bluray': 9
}
SONARR = {
'television': 1,
'television_raw': 2,
'web_dl': 3,
'webrip': 4,
'dvd': 5,
'bluray': 6,
'bluray_raw': 7
}
class Quality_Modifiers:
"""Quality modifier mappings for Radarr ONLY"""
RADARR = {
'none': 0,
'regional': 1,
'screener': 2,
'rawhd': 3,
'brdisk': 4,
'remux': 5,
}
class Release_Types:
"""Release type mappings for Sonarr ONLY"""
SONARR = {
'none': 0,
'single_episode': 1,
'multi_episode': 2,
'season_pack': 3,
}
class Qualities:
"""Quality mappings for both applications"""
COMMON_RESOLUTIONS = {
'360p': 360,
'480p': 480,
'540p': 540,
'576p': 576,
'720p': 720,
'1080p': 1080,
'2160p': 2160
}
RADARR = {
"Unknown": {
"id": 0,
"name": "Unknown",
"source": "unknown",
"resolution": 0
},
"SDTV": {
"id": 1,
"name": "SDTV",
"source": "tv",
"resolution": 480
},
"DVD": {
"id": 2,
"name": "DVD",
"source": "dvd",
"resolution": 480
},
"WEBDL-1080p": {
"id": 3,
"name": "WEBDL-1080p",
"source": "webdl",
"resolution": 1080
},
"HDTV-720p": {
"id": 4,
"name": "HDTV-720p",
"source": "tv",
"resolution": 720
},
"WEBDL-720p": {
"id": 5,
"name": "WEBDL-720p",
"source": "webdl",
"resolution": 720
},
"Bluray-720p": {
"id": 6,
"name": "Bluray-720p",
"source": "bluray",
"resolution": 720
},
"Bluray-1080p": {
"id": 7,
"name": "Bluray-1080p",
"source": "bluray",
"resolution": 1080
},
"WEBDL-480p": {
"id": 8,
"name": "WEBDL-480p",
"source": "webdl",
"resolution": 480
},
"HDTV-1080p": {
"id": 9,
"name": "HDTV-1080p",
"source": "tv",
"resolution": 1080
},
"Raw-HD": {
"id": 10,
"name": "Raw-HD",
"source": "tv",
"resolution": 1080
},
"WEBRip-480p": {
"id": 12,
"name": "WEBRip-480p",
"source": "webrip",
"resolution": 480
},
"WEBRip-720p": {
"id": 14,
"name": "WEBRip-720p",
"source": "webrip",
"resolution": 720
},
"WEBRip-1080p": {
"id": 15,
"name": "WEBRip-1080p",
"source": "webrip",
"resolution": 1080
},
"HDTV-2160p": {
"id": 16,
"name": "HDTV-2160p",
"source": "tv",
"resolution": 2160
},
"WEBRip-2160p": {
"id": 17,
"name": "WEBRip-2160p",
"source": "webrip",
"resolution": 2160
},
"WEBDL-2160p": {
"id": 18,
"name": "WEBDL-2160p",
"source": "webdl",
"resolution": 2160
},
"Bluray-2160p": {
"id": 19,
"name": "Bluray-2160p",
"source": "bluray",
"resolution": 2160
},
"Bluray-480p": {
"id": 20,
"name": "Bluray-480p",
"source": "bluray",
"resolution": 480
},
"Bluray-576p": {
"id": 21,
"name": "Bluray-576p",
"source": "bluray",
"resolution": 576
},
"BR-DISK": {
"id": 22,
"name": "BR-DISK",
"source": "bluray",
"resolution": 1080
},
"DVD-R": {
"id": 23,
"name": "DVD-R",
"source": "dvd",
"resolution": 480
},
"WORKPRINT": {
"id": 24,
"name": "WORKPRINT",
"source": "workprint",
"resolution": 0
},
"CAM": {
"id": 25,
"name": "CAM",
"source": "cam",
"resolution": 0
},
"TELESYNC": {
"id": 26,
"name": "TELESYNC",
"source": "telesync",
"resolution": 0
},
"TELECINE": {
"id": 27,
"name": "TELECINE",
"source": "telecine",
"resolution": 0
},
"DVDSCR": {
"id": 28,
"name": "DVDSCR",
"source": "dvd",
"resolution": 480
},
"REGIONAL": {
"id": 29,
"name": "REGIONAL",
"source": "dvd",
"resolution": 480
},
"Remux-1080p": {
"id": 30,
"name": "Remux-1080p",
"source": "bluray",
"resolution": 1080
},
"Remux-2160p": {
"id": 31,
"name": "Remux-2160p",
"source": "bluray",
"resolution": 2160
}
}
SONARR = {
"Unknown": {
"id": 0,
"name": "Unknown",
"source": "unknown",
"resolution": 0
},
"SDTV": {
"id": 1,
"name": "SDTV",
"source": "television",
"resolution": 480
},
"DVD": {
"id": 2,
"name": "DVD",
"source": "dvd",
"resolution": 480
},
"WEBDL-1080p": {
"id": 3,
"name": "WEBDL-1080p",
"source": "web",
"resolution": 1080
},
"HDTV-720p": {
"id": 4,
"name": "HDTV-720p",
"source": "television",
"resolution": 720
},
"WEBDL-720p": {
"id": 5,
"name": "WEBDL-720p",
"source": "web",
"resolution": 720
},
"Bluray-720p": {
"id": 6,
"name": "Bluray-720p",
"source": "bluray",
"resolution": 720
},
"Bluray-1080p": {
"id": 7,
"name": "Bluray-1080p",
"source": "bluray",
"resolution": 1080
},
"WEBDL-480p": {
"id": 8,
"name": "WEBDL-480p",
"source": "web",
"resolution": 480
},
"HDTV-1080p": {
"id": 9,
"name": "HDTV-1080p",
"source": "television",
"resolution": 1080
},
"Raw-HD": {
"id": 10,
"name": "Raw-HD",
"source": "televisionRaw",
"resolution": 1080
},
"WEBRip-480p": {
"id": 12,
"name": "WEBRip-480p",
"source": "webRip",
"resolution": 480
},
"Bluray-480p": {
"id": 13,
"name": "Bluray-480p",
"source": "bluray",
"resolution": 480
},
"WEBRip-720p": {
"id": 14,
"name": "WEBRip-720p",
"source": "webRip",
"resolution": 720
},
"WEBRip-1080p": {
"id": 15,
"name": "WEBRip-1080p",
"source": "webRip",
"resolution": 1080
},
"HDTV-2160p": {
"id": 16,
"name": "HDTV-2160p",
"source": "television",
"resolution": 2160
},
"WEBRip-2160p": {
"id": 17,
"name": "WEBRip-2160p",
"source": "webRip",
"resolution": 2160
},
"WEBDL-2160p": {
"id": 18,
"name": "WEBDL-2160p",
"source": "web",
"resolution": 2160
},
"Bluray-2160p": {
"id": 19,
"name": "Bluray-2160p",
"source": "bluray",
"resolution": 2160
},
"Bluray-1080p Remux": {
"id": 20,
"name": "Bluray-1080p Remux",
"source": "blurayRaw",
"resolution": 1080
},
"Bluray-2160p Remux": {
"id": 21,
"name": "Bluray-2160p Remux",
"source": "blurayRaw",
"resolution": 2160
},
"Bluray-576p": {
"id": 22,
"name": "Bluray-576p",
"source": "bluray",
"resolution": 576
}
}
class Languages:
"""Language mappings for both applications"""
RADARR = {
'any': {
'id': -1,
'name': 'Any'
},
'original': {
'id': -2,
'name': 'Original'
},
'unknown': {
'id': 0,
'name': 'Unknown'
},
'english': {
'id': 1,
'name': 'English'
},
'french': {
'id': 2,
'name': 'French'
},
'spanish': {
'id': 3,
'name': 'Spanish'
},
'german': {
'id': 4,
'name': 'German'
},
'italian': {
'id': 5,
'name': 'Italian'
},
'danish': {
'id': 6,
'name': 'Danish'
},
'dutch': {
'id': 7,
'name': 'Dutch'
},
'japanese': {
'id': 8,
'name': 'Japanese'
},
'icelandic': {
'id': 9,
'name': 'Icelandic'
},
'chinese': {
'id': 10,
'name': 'Chinese'
},
'russian': {
'id': 11,
'name': 'Russian'
},
'polish': {
'id': 12,
'name': 'Polish'
},
'vietnamese': {
'id': 13,
'name': 'Vietnamese'
},
'swedish': {
'id': 14,
'name': 'Swedish'
},
'norwegian': {
'id': 15,
'name': 'Norwegian'
},
'finnish': {
'id': 16,
'name': 'Finnish'
},
'turkish': {
'id': 17,
'name': 'Turkish'
},
'portuguese': {
'id': 18,
'name': 'Portuguese'
},
'flemish': {
'id': 19,
'name': 'Flemish'
},
'greek': {
'id': 20,
'name': 'Greek'
},
'korean': {
'id': 21,
'name': 'Korean'
},
'hungarian': {
'id': 22,
'name': 'Hungarian'
},
'hebrew': {
'id': 23,
'name': 'Hebrew'
},
'lithuanian': {
'id': 24,
'name': 'Lithuanian'
},
'czech': {
'id': 25,
'name': 'Czech'
},
'hindi': {
'id': 26,
'name': 'Hindi'
},
'romanian': {
'id': 27,
'name': 'Romanian'
},
'thai': {
'id': 28,
'name': 'Thai'
},
'bulgarian': {
'id': 29,
'name': 'Bulgarian'
},
'portuguese_br': {
'id': 30,
'name': 'Portuguese (Brazil)'
},
'arabic': {
'id': 31,
'name': 'Arabic'
},
'ukrainian': {
'id': 32,
'name': 'Ukrainian'
},
'persian': {
'id': 33,
'name': 'Persian'
},
'bengali': {
'id': 34,
'name': 'Bengali'
},
'slovak': {
'id': 35,
'name': 'Slovak'
},
'latvian': {
'id': 36,
'name': 'Latvian'
},
'spanish_latino': {
'id': 37,
'name': 'Spanish (Latino)'
},
'catalan': {
'id': 38,
'name': 'Catalan'
},
'croatian': {
'id': 39,
'name': 'Croatian'
},
'serbian': {
'id': 40,
'name': 'Serbian'
},
'bosnian': {
'id': 41,
'name': 'Bosnian'
},
'estonian': {
'id': 42,
'name': 'Estonian'
},
'tamil': {
'id': 43,
'name': 'Tamil'
},
'indonesian': {
'id': 44,
'name': 'Indonesian'
},
'telugu': {
'id': 45,
'name': 'Telugu'
},
'macedonian': {
'id': 46,
'name': 'Macedonian'
},
'slovenian': {
'id': 47,
'name': 'Slovenian'
},
'malayalam': {
'id': 48,
'name': 'Malayalam'
},
'kannada': {
'id': 49,
'name': 'Kannada'
},
'albanian': {
'id': 50,
'name': 'Albanian'
},
'afrikaans': {
'id': 51,
'name': 'Afrikaans'
}
}
SONARR = {
'unknown': {
'id': 0,
'name': 'Unknown'
},
'english': {
'id': 1,
'name': 'English'
},
'french': {
'id': 2,
'name': 'French'
},
'spanish': {
'id': 3,
'name': 'Spanish'
},
'german': {
'id': 4,
'name': 'German'
},
'italian': {
'id': 5,
'name': 'Italian'
},
'danish': {
'id': 6,
'name': 'Danish'
},
'dutch': {
'id': 7,
'name': 'Dutch'
},
'japanese': {
'id': 8,
'name': 'Japanese'
},
'icelandic': {
'id': 9,
'name': 'Icelandic'
},
'chinese': {
'id': 10,
'name': 'Chinese'
},
'russian': {
'id': 11,
'name': 'Russian'
},
'polish': {
'id': 12,
'name': 'Polish'
},
'vietnamese': {
'id': 13,
'name': 'Vietnamese'
},
'swedish': {
'id': 14,
'name': 'Swedish'
},
'norwegian': {
'id': 15,
'name': 'Norwegian'
},
'finnish': {
'id': 16,
'name': 'Finnish'
},
'turkish': {
'id': 17,
'name': 'Turkish'
},
'portuguese': {
'id': 18,
'name': 'Portuguese'
},
'flemish': {
'id': 19,
'name': 'Flemish'
},
'greek': {
'id': 20,
'name': 'Greek'
},
'korean': {
'id': 21,
'name': 'Korean'
},
'hungarian': {
'id': 22,
'name': 'Hungarian'
},
'hebrew': {
'id': 23,
'name': 'Hebrew'
},
'lithuanian': {
'id': 24,
'name': 'Lithuanian'
},
'czech': {
'id': 25,
'name': 'Czech'
},
'arabic': {
'id': 26,
'name': 'Arabic'
},
'hindi': {
'id': 27,
'name': 'Hindi'
},
'bulgarian': {
'id': 28,
'name': 'Bulgarian'
},
'malayalam': {
'id': 29,
'name': 'Malayalam'
},
'ukrainian': {
'id': 30,
'name': 'Ukrainian'
},
'slovak': {
'id': 31,
'name': 'Slovak'
},
'thai': {
'id': 32,
'name': 'Thai'
},
'portuguese_br': {
'id': 33,
'name': 'Portuguese (Brazil)'
},
'spanish_latino': {
'id': 34,
'name': 'Spanish (Latino)'
},
'romanian': {
'id': 35,
'name': 'Romanian'
},
'latvian': {
'id': 36,
'name': 'Latvian'
},
'persian': {
'id': 37,
'name': 'Persian'
},
'catalan': {
'id': 38,
'name': 'Catalan'
},
'croatian': {
'id': 39,
'name': 'Croatian'
},
'serbian': {
'id': 40,
'name': 'Serbian'
},
'bosnian': {
'id': 41,
'name': 'Bosnian'
},
'estonian': {
'id': 42,
'name': 'Estonian'
},
'tamil': {
'id': 43,
'name': 'Tamil'
},
'indonesian': {
'id': 44,
'name': 'Indonesian'
},
'macedonian': {
'id': 45,
'name': 'Macedonian'
},
'slovenian': {
'id': 46,
'name': 'Slovenian'
},
'original': {
'id': -2,
'name': 'Original'
}
}
class QualityNameMapper:
"""Maps between different quality naming conventions"""
REMUX_MAPPINGS = {
TargetApp.SONARR: {
"Remux-1080p": "Bluray-1080p Remux",
"Remux-2160p": "Bluray-2160p Remux"
},
TargetApp.RADARR: {
"Remux-1080p": "Remux-1080p",
"Remux-2160p": "Remux-2160p"
}
}
ALTERNATE_NAMES = {
"BR-Disk": "BR-DISK",
"BR-DISK": "BR-DISK",
"BRDISK": "BR-DISK",
"BR_DISK": "BR-DISK",
"BLURAY-DISK": "BR-DISK",
"BLURAY_DISK": "BR-DISK",
"BLURAYDISK": "BR-DISK",
"Telecine": "TELECINE",
"TELECINE": "TELECINE",
"TeleCine": "TELECINE",
"Telesync": "TELESYNC",
"TELESYNC": "TELESYNC",
"TeleSync": "TELESYNC",
}
@classmethod
def map_quality_name(cls, name: str, target_app: TargetApp) -> str:
"""
Maps quality names between different formats based on target app
Args:
name: The quality name to map
target_app: The target application (RADARR or SONARR)
Returns:
The mapped quality name
"""
# Handle empty or None cases
if not name:
return name
# First check for remux mappings
if name in cls.REMUX_MAPPINGS.get(target_app, {}):
return cls.REMUX_MAPPINGS[target_app][name]
# Then check for alternate spellings
normalized_name = name.upper().replace("-", "").replace("_", "")
for alt_name, standard_name in cls.ALTERNATE_NAMES.items():
if normalized_name == alt_name.upper().replace("-", "").replace(
"_", ""):
return standard_name
return name
class LanguageNameMapper:
"""Maps between different language naming conventions"""
ALTERNATE_NAMES = {
"spanish-latino": "spanish_latino",
"spanish_latino": "spanish_latino",
"spanishlatino": "spanish_latino",
"portuguese-br": "portuguese_br",
"portuguese_br": "portuguese_br",
"portuguesebr": "portuguese_br",
"portuguese-brazil": "portuguese_br",
"portuguese_brazil": "portuguese_br"
}
@classmethod
def normalize_language_name(cls, name: str) -> str:
"""
Normalizes language names to a consistent format
Args:
name: The language name to normalize
Returns:
The normalized language name
"""
if not name:
return name
normalized = name.lower().replace(" ", "_")
return cls.ALTERNATE_NAMES.get(normalized, normalized)
class ValueResolver:
"""Helper class to resolve values based on target app"""
@classmethod
def get_indexer_flag(cls, flag: str, target_app: TargetApp) -> int:
flags = IndexerFlags.RADARR if target_app == TargetApp.RADARR else IndexerFlags.SONARR
return flags.get(flag.lower(), 0)
@classmethod
def get_source(cls, source: str, target_app: TargetApp) -> int:
sources = Sources.RADARR if target_app == TargetApp.RADARR else Sources.SONARR
return sources.get(source.lower(), 0)
@classmethod
def get_resolution(cls, resolution: str) -> int:
return Qualities.COMMON_RESOLUTIONS.get(resolution.lower(), 0)
@classmethod
def get_qualities(cls, target_app: TargetApp) -> Dict[str, Any]:
qualities = Qualities.RADARR if target_app == TargetApp.RADARR else Qualities.SONARR
return qualities
@classmethod
def get_quality_name(cls, name: str, target_app: TargetApp) -> str:
"""Maps quality names between different formats based on target app"""
return QualityNameMapper.map_quality_name(name, target_app)
@classmethod
def get_quality_modifier(cls, quality_modifier: str) -> int:
return Quality_Modifiers.RADARR.get(quality_modifier.lower(), 0)
@classmethod
def get_release_type(cls, release_type: str) -> int:
return Release_Types.SONARR.get(release_type.lower(), 0)
@classmethod
def get_language(cls,
language_name: str,
target_app: TargetApp,
for_profile: bool = True) -> Dict[str, Any]:
"""
Get language mapping based on target app and context
Args:
language_name: Name of the language to look up
target_app: Target application (RADARR or SONARR)
for_profile: If True, this is for a quality profile. If False, this is for a custom format.
"""
languages = Languages.RADARR if target_app == TargetApp.RADARR else Languages.SONARR
# For profiles, only Radarr uses language settings
if for_profile and target_app == TargetApp.SONARR:
return {'id': -2, 'name': 'Original'}
# Normalize the language name
normalized_name = LanguageNameMapper.normalize_language_name(
language_name)
language_data = languages.get(normalized_name)
if not language_data:
logger.warning(
f"Language '{language_name}' (normalized: '{normalized_name}') "
f"not found in {target_app} mappings, falling back to Unknown")
language_data = languages['unknown']
return language_data

View File

@@ -1,59 +0,0 @@
"""Routes for the new import module."""
from flask import Blueprint, request, jsonify
from flask_cors import cross_origin
import logging
from . import handle_import_request
logger = logging.getLogger(__name__)
bp = Blueprint('new_import', __name__)
@bp.route('', methods=['POST', 'OPTIONS'])
@cross_origin()
def import_items():
"""
Import formats or profiles to an Arr instance.
Request body:
{
"arrID": int, # ID of arr_config to use
"strategy": str, # "format" or "profile"
"filenames": [str], # List of filenames to import
"dryRun": bool # Optional: simulate import without changes (default: false)
}
"""
if request.method == 'OPTIONS':
return jsonify({}), 200
try:
data = request.get_json()
# Validate request
if not data:
return jsonify({
'success': False,
'error': 'Request body is required'
}), 400
# Call the import handler
result = handle_import_request(data)
# Return appropriate status code
status_code = 200
if result.get('status') == 'partial':
status_code = 207
elif not result.get('success'):
if 'not found' in result.get('error', '').lower():
status_code = 404
else:
status_code = 400
return jsonify(result), status_code
except Exception as e:
logger.error(f"Error handling import request: {str(e)}")
return jsonify({
'success': False,
'error': str(e)
}), 500

View File

@@ -1,6 +0,0 @@
"""Import strategies."""
from .base import ImportStrategy
from .format import FormatStrategy
from .profile import ProfileStrategy
__all__ = ['ImportStrategy', 'FormatStrategy', 'ProfileStrategy']

View File

@@ -1,103 +0,0 @@
"""Base strategy class for import operations."""
import logging
from abc import ABC, abstractmethod
from typing import Dict, List, Any
from ..arr_handler import ArrHandler
from ..logger import get_import_logger
logger = logging.getLogger(__name__)
class ImportStrategy(ABC):
"""Base class for import strategies."""
def __init__(self, arr_config):
"""
Initialize the import strategy.
Args:
arr_config: Database row from arr_config table containing:
- type: 'radarr' or 'sonarr'
- arr_server: Base URL
- api_key: API key
- import_as_unique: Whether to add [Dictionarry] suffix
"""
# Handle sqlite3.Row objects (they support dict-like access)
self.arr_type = arr_config['type']
self.base_url = arr_config['arr_server']
self.api_key = arr_config['api_key']
# sqlite3.Row doesn't have .get() method, so we need to handle None
import_as_unique = arr_config['import_as_unique'] if 'import_as_unique' in arr_config.keys() else False
self.import_as_unique = bool(import_as_unique) if import_as_unique is not None else False
self.arr = ArrHandler(self.base_url, self.api_key)
@abstractmethod
def compile(self, filenames: List[str]) -> Dict[str, Any]:
"""
Compile files to API-ready format.
Args:
filenames: List of filenames to compile
Returns:
Dictionary with compiled data
"""
pass
@abstractmethod
def import_data(self, compiled_data: Dict[str, Any], dry_run: bool = False) -> Dict[str, Any]:
"""
Import compiled data to Arr instance.
Args:
compiled_data: Data from compile() method
dry_run: If True, simulate import without making changes
Returns:
Import results with added/updated/failed counts
"""
pass
def execute(self, filenames: List[str], dry_run: bool = False) -> Dict[str, Any]:
"""
Execute the full import process.
Args:
filenames: List of filenames to import
dry_run: If True, simulate import without making changes
Returns:
Import results
"""
try:
# Compile
compiled = self.compile(filenames)
# Import
results = self.import_data(compiled, dry_run=dry_run)
# Add dry_run flag and compiled data to results
if dry_run:
results['dry_run'] = True
results['compiled_data'] = compiled
return results
except Exception as e:
import_logger = get_import_logger()
import_logger.error(f"Strategy execution failed: {e}", phase='import')
return {
'added': 0,
'updated': 0,
'failed': len(filenames),
'error': str(e)
}
finally:
# Clean up
self.arr.close()
def add_unique_suffix(self, name: str) -> str:
"""Add [Dictionarry] suffix if unique import is enabled."""
if self.import_as_unique and not name.endswith('[Dictionarry]'):
return f"{name} [Dictionarry]"
return name

View File

@@ -1,179 +0,0 @@
"""Utility functions for import operations."""
import logging
import yaml
from pathlib import Path
from typing import Dict, List, Any, Set
from ..data.utils import get_category_directory
logger = logging.getLogger(__name__)
def load_yaml(file_path: str) -> Dict[str, Any]:
"""
Load a YAML file.
Args:
file_path: Path to YAML file (relative to data directory)
Returns:
Parsed YAML data
Raises:
FileNotFoundError: If file doesn't exist
yaml.YAMLError: If YAML is invalid
"""
# Handle both absolute and relative paths
if file_path.startswith('/'):
full_path = Path(file_path)
else:
# Check if it starts with a category
if file_path.startswith('custom_format/'):
base_dir = get_category_directory('custom_format')
filename = file_path.replace('custom_format/', '')
full_path = Path(base_dir) / filename
elif file_path.startswith('profile/'):
base_dir = get_category_directory('profile')
filename = file_path.replace('profile/', '')
full_path = Path(base_dir) / filename
else:
# Assume it's just a filename, figure out category
full_path = Path(file_path)
if not full_path.exists():
raise FileNotFoundError(f"File not found: {full_path}")
with open(full_path, 'r', encoding='utf-8') as f:
return yaml.safe_load(f)
def extract_format_names(profile_data: Dict[str, Any], arr_type: str = None) -> Set[str]:
"""
Extract all custom format names referenced in a profile.
Args:
profile_data: Profile YAML data
arr_type: Target arr type ('radarr' or 'sonarr'). If provided, only extracts
formats for that specific arr type.
Returns:
Set of unique format names
"""
format_names = set()
# Extract from main custom_formats
for cf in profile_data.get('custom_formats', []):
if isinstance(cf, dict) and 'name' in cf:
format_names.add(cf['name'])
# Extract from app-specific custom_formats
if arr_type:
# Only extract formats for the specific arr type
app_key = f'custom_formats_{arr_type.lower()}'
for cf in profile_data.get(app_key, []):
if isinstance(cf, dict) and 'name' in cf:
format_names.add(cf['name'])
else:
# Extract from all app-specific sections (backwards compatibility)
for key in ['custom_formats_radarr', 'custom_formats_sonarr']:
for cf in profile_data.get(key, []):
if isinstance(cf, dict) and 'name' in cf:
format_names.add(cf['name'])
return format_names
def generate_language_formats(language: str, arr_type: str) -> List[Dict[str, Any]]:
"""
Generate language-specific format configurations.
Args:
language: Language string (e.g., 'must_english', 'only_french')
arr_type: 'radarr' or 'sonarr'
Returns:
List of format configurations for language handling
"""
if language == 'any' or '_' not in language:
return []
behavior, language_code = language.split('_', 1)
formats = []
# Handle behaviors: 'must' and 'only' (matching old working logic)
if behavior in ['must', 'only']:
# Load base "Not English" format as template
try:
base_format = load_yaml('custom_format/Not English.yml')
# Create "Not [Language]" format
not_format = base_format.copy()
lang_display = language_code.capitalize()
not_format['name'] = f"Not {lang_display}"
# Update conditions for the specific language
for condition in not_format.get('conditions', []):
if condition.get('type') == 'language':
condition['language'] = language_code
if 'name' in condition:
condition['name'] = condition['name'].replace('English', lang_display)
# Note: exceptLanguage field is preserved from the base format
formats.append(not_format)
# For 'only' behavior, add additional formats
if behavior == 'only':
additional_format_names = [
"Not Only English",
"Not Only English (Missing)"
]
for format_name in additional_format_names:
try:
additional = load_yaml(f'custom_format/{format_name}.yml')
additional['name'] = additional['name'].replace('English', lang_display)
for condition in additional.get('conditions', []):
if condition.get('type') == 'language':
condition['language'] = language_code
if 'name' in condition:
condition['name'] = condition['name'].replace('English', lang_display)
# Note: exceptLanguage field is preserved from the base format
formats.append(additional)
except Exception as e:
# Silent fail - format doesn't exist
pass
except Exception as e:
# Silent fail - will be caught at higher level
pass
return formats
def load_regex_patterns() -> Dict[str, str]:
"""
Load all regex patterns from the regex directory.
Returns:
Dictionary mapping pattern names to regex patterns
"""
from ..data.utils import REGEX_DIR
patterns = {}
pattern_dir = Path(REGEX_DIR)
if not pattern_dir.exists():
return patterns
for pattern_file in pattern_dir.glob('*.yml'):
try:
with open(pattern_file, 'r', encoding='utf-8') as f:
data = yaml.safe_load(f)
if data and 'name' in data and 'pattern' in data:
patterns[data['name']] = data['pattern']
except Exception as e:
# Silent fail for individual pattern files
pass
return patterns

View File

@@ -1,202 +0,0 @@
from datetime import timedelta
import os
import subprocess
import logging
import logging.config
from .config import config
from .db import get_secret_key, update_pat_status
def setup_logging():
log_config = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'detailed': {
'format':
'%(asctime)s - %(name)s - %(levelname)s - %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S'
}
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'level': 'DEBUG',
'formatter': 'detailed',
'stream': 'ext://sys.stdout'
},
# general_file handler
'file': {
'class': 'logging.handlers.RotatingFileHandler',
'level': 'INFO',
'formatter': 'detailed',
'filename': config.GENERAL_LOG_FILE,
'maxBytes': 1048576, # 1MB
'backupCount': 20
},
# importarr_file handler
'importarr_file': {
'class': 'logging.handlers.RotatingFileHandler',
'level': 'DEBUG',
'formatter': 'detailed',
'filename': config.IMPORTARR_LOG_FILE,
'maxBytes': 1048576,
'backupCount': 20
},
# hash_file handler
'hash_file': {
'class': 'logging.handlers.RotatingFileHandler',
'level': 'INFO',
'formatter': 'detailed',
'filename': config.HASH_LOG_FILE,
'maxBytes': 1048576, # 1MB
'backupCount': 20
}
},
'root': {
'level': 'DEBUG',
'handlers': ['console', 'file']
},
'loggers': {
# The 'importarr' logger uses all three handlers
'importarr': {
'level': 'DEBUG',
'handlers': ['console', 'file', 'importarr_file'],
'propagate': False
},
# The 'importer' logger (new import module) - reduce verbosity
'importer': {
'level': 'WARNING',
'handlers': ['file'],
'propagate': False
},
# The 'hash' logger uses all three handlers
'hash': {
'level': 'INFO',
'handlers': ['console', 'file', 'hash_file'],
'propagate': False
},
# Example: Keep these if you want separate loggers
'werkzeug': {
'level': 'INFO',
'handlers': ['console', 'file'],
'propagate': False
},
'flask': {
'level': 'INFO',
'handlers': ['console', 'file'],
'propagate': False
},
'git': {
'level': 'ERROR',
'handlers': ['console', 'file'],
'propagate': False
},
'urllib3': {
'level': 'WARNING',
'handlers': ['console', 'file'],
'propagate': False
},
'urllib3.connectionpool': {
'level': 'WARNING',
'handlers': ['console', 'file'],
'propagate': False
}
}
}
# Make sure the log directory exists
os.makedirs(os.path.dirname(config.GENERAL_LOG_FILE), exist_ok=True)
# Apply the configuration
logging.config.dictConfig(log_config)
# Create a logger for this module
logger = logging.getLogger(__name__)
logger.info("Logging system initialized")
return logger
def init_git_user():
"""Initialize Git user configuration for the repository and update PAT status."""
logger = logging.getLogger(__name__)
logger.info("Starting Git user configuration")
try:
from .config import config
repo_path = config.DB_DIR
git_name = os.environ.get('GIT_USER_NAME', 'Profilarr')
git_email = os.environ.get('GIT_USER_EMAIL',
'profilarr@dictionarry.com')
logger.debug(
f"Retrieved Git config - Name: {git_name}, Email: {git_email}")
if git_name == 'Profilarr' or git_email == 'profilarr@dictionarry.com':
logger.info("Using default Git user configuration")
# Set repository-level Git configuration if repo exists
if os.path.exists(os.path.join(repo_path, '.git')):
logger.info(f"Setting git config for repository at {repo_path}")
subprocess.run(['git', '-C', repo_path, 'config', '--local', 'user.name', git_name],
check=True)
subprocess.run(['git', '-C', repo_path, 'config', '--local', 'user.email', git_email],
check=True)
# Add safe.directory to prevent ownership issues
subprocess.run(['git', '-C', repo_path, 'config', '--local', '--add', 'safe.directory', repo_path],
check=True)
else:
logger.warning(f"No git repository found at {repo_path}, skipping git config")
# Update PAT status in database
update_pat_status()
# Verify configuration if repository exists
if os.path.exists(os.path.join(repo_path, '.git')):
configured_name = subprocess.run(
['git', '-C', repo_path, 'config', '--local', 'user.name'],
capture_output=True,
text=True,
check=True).stdout.strip()
configured_email = subprocess.run(
['git', '-C', repo_path, 'config', '--local', 'user.email'],
capture_output=True,
text=True,
check=True).stdout.strip()
if configured_name != git_name or configured_email != git_email:
logger.error("Git configuration verification failed")
return False, "Git configuration verification failed"
logger.info("Git user configuration completed successfully")
return True, "Git configuration successful"
except subprocess.CalledProcessError as e:
logger.error(f"Error configuring git: {str(e)}", exc_info=True)
return False, f"Failed to configure git: {str(e)}"
except Exception as e:
logger.error(f"Unexpected error configuring git: {str(e)}",
exc_info=True)
return False, f"Unexpected error configuring git: {str(e)}"
def init_app_config(app):
"""Initialize Flask app configuration."""
logger = logging.getLogger(__name__)
logger.info("Initializing app configuration")
app.config['SECRET_KEY'] = get_secret_key()
app.config['PERMANENT_SESSION_LIFETIME'] = timedelta(
days=config.SESSION_LIFETIME_DAYS)
app.config['SESSION_COOKIE_SECURE'] = config.SESSION_COOKIE_SECURE
app.config['SESSION_COOKIE_HTTPONLY'] = config.SESSION_COOKIE_HTTPONLY
app.config['SESSION_COOKIE_SAMESITE'] = config.SESSION_COOKIE_SAMESITE
logger.info("App configuration initialized")

View File

@@ -1,148 +0,0 @@
from flask import Blueprint, jsonify, request, send_file
import os
from ..config import config
import logging
bp = Blueprint('logs', __name__)
logger = logging.getLogger(__name__)
@bp.route('/', methods=['GET'])
def get_logs():
"""Get list of available log files."""
try:
log_dir = os.path.dirname(config.GENERAL_LOG_FILE)
log_files = []
# Get all log files including rotated ones
for filename in os.listdir(log_dir):
if filename.endswith('.log') or filename.endswith('.log.1'):
file_path = os.path.join(log_dir, filename)
file_stat = os.stat(file_path)
log_files.append({
'filename': filename,
'size': file_stat.st_size,
'last_modified': file_stat.st_mtime
})
return jsonify(log_files), 200
except Exception as e:
logger.error(f"Error getting log files: {str(e)}")
return jsonify({'error': str(e)}), 500
@bp.route('/<filename>', methods=['GET'])
def get_log_content(filename):
"""Get content of a specific log file."""
try:
log_dir = os.path.dirname(config.GENERAL_LOG_FILE)
file_path = os.path.join(log_dir, filename)
# Ensure the file exists and is within the log directory
if not os.path.exists(file_path) or not file_path.startswith(log_dir):
return jsonify({'error': 'Log file not found'}), 404
# Get query parameters for filtering
lines = request.args.get('lines',
type=int) # Number of lines to return
level = request.args.get('level') # Log level filter
search = request.args.get('search') # Search term
# If no filters, return the whole file
if not any([lines, level, search]):
return send_file(file_path, mimetype='text/plain')
# Read and filter log content
with open(file_path, 'r') as f:
content = f.readlines()
# Apply filters
filtered_content = content
if level:
filtered_content = [
line for line in filtered_content
if f' - {level.upper()} - ' in line
]
if search:
filtered_content = [
line for line in filtered_content
if search.lower() in line.lower()
]
if lines:
filtered_content = filtered_content[-lines:]
return jsonify({
'filename': filename,
'total_lines': len(content),
'filtered_lines': len(filtered_content),
'content': filtered_content
}), 200
except Exception as e:
logger.error(f"Error reading log file {filename}: {str(e)}")
return jsonify({'error': str(e)}), 500
@bp.route('/level/<level>', methods=['GET'])
def get_logs_by_level(level):
"""Get all logs of a specific level."""
try:
log_dir = os.path.dirname(config.GENERAL_LOG_FILE)
results = []
for filename in os.listdir(log_dir):
if filename.endswith('.log'):
file_path = os.path.join(log_dir, filename)
with open(file_path, 'r') as f:
matching_lines = [
line.strip() for line in f
if f' - {level.upper()} - ' in line
]
if matching_lines:
results.extend(matching_lines)
return jsonify({
'level': level.upper(),
'count': len(results),
'logs': results
}), 200
except Exception as e:
logger.error(f"Error getting logs for level {level}: {str(e)}")
return jsonify({'error': str(e)}), 500
@bp.route('/search', methods=['GET'])
def search_logs():
"""Search all logs for a specific term."""
try:
term = request.args.get('q')
if not term:
return jsonify({'error': 'Search term required'}), 400
log_dir = os.path.dirname(config.GENERAL_LOG_FILE)
results = []
for filename in os.listdir(log_dir):
if filename.endswith('.log'):
file_path = os.path.join(log_dir, filename)
with open(file_path, 'r') as f:
matching_lines = [
line.strip() for line in f
if term.lower() in line.lower()
]
if matching_lines:
results.extend(matching_lines)
return jsonify({
'term': term,
'count': len(results),
'logs': results
}), 200
except Exception as e:
logger.error(f"Error searching logs: {str(e)}")
return jsonify({'error': str(e)}), 500

View File

@@ -1,140 +0,0 @@
from flask import Blueprint, jsonify, request
import logging
from .utils import (
get_media_management_data,
save_media_management_data,
update_media_management_data,
get_all_media_management_data,
MEDIA_MANAGEMENT_CATEGORIES
)
from .sync import (
sync_naming_config,
sync_media_management_config,
sync_quality_definitions
)
from ..arr.manager import get_arr_config
logger = logging.getLogger(__name__)
media_management_bp = Blueprint('media_management', __name__)
@media_management_bp.route('/api/media-management', methods=['GET'])
def get_all_media_management():
"""Get all media management data for all categories"""
try:
data = get_all_media_management_data()
return jsonify(data), 200
except Exception as e:
logger.error(f"Error retrieving media management data: {e}")
return jsonify({'error': str(e)}), 500
@media_management_bp.route('/api/media-management/<category>', methods=['GET'])
def get_media_management(category):
"""Get media management data for a specific category"""
if category not in MEDIA_MANAGEMENT_CATEGORIES:
return jsonify({'error': f'Invalid category: {category}'}), 400
try:
data = get_media_management_data(category)
return jsonify(data), 200
except Exception as e:
logger.error(f"Error retrieving {category}: {e}")
return jsonify({'error': str(e)}), 500
@media_management_bp.route('/api/media-management/<category>', methods=['PUT'])
def update_media_management(category):
"""Update media management data for a specific category"""
if category not in MEDIA_MANAGEMENT_CATEGORIES:
return jsonify({'error': f'Invalid category: {category}'}), 400
try:
data = request.get_json()
if not data:
return jsonify({'error': 'No data provided'}), 400
updated_data = update_media_management_data(category, data)
return jsonify(updated_data), 200
except Exception as e:
logger.error(f"Error updating {category}: {e}")
return jsonify({'error': str(e)}), 500
@media_management_bp.route('/api/media-management/sync', methods=['POST'])
def sync_media_management():
"""Sync media management data to arr instance"""
try:
data = request.get_json()
if not data:
return jsonify({'error': 'No data provided'}), 400
arr_id = data.get('arr_id')
categories = data.get('categories', [])
if not arr_id:
return jsonify({'error': 'arr_id is required'}), 400
if not categories:
return jsonify({'error': 'categories list is required'}), 400
# Validate categories
invalid_categories = [cat for cat in categories if cat not in MEDIA_MANAGEMENT_CATEGORIES]
if invalid_categories:
return jsonify({'error': f'Invalid categories: {invalid_categories}'}), 400
# Get arr config
arr_result = get_arr_config(arr_id)
if not arr_result.get('success'):
return jsonify({'error': 'Arr configuration not found'}), 404
arr_config = arr_result.get('data')
base_url = arr_config['arrServer']
api_key = arr_config['apiKey']
arr_type = arr_config['type']
results = {}
# Sync each requested category
for category in categories:
try:
# Get the current media management data for this category
category_data = get_media_management_data(category)
if category == 'naming':
arr_type_data = category_data.get(arr_type, {})
success, message = sync_naming_config(base_url, api_key, arr_type, arr_type_data)
elif category == 'misc':
arr_type_data = category_data.get(arr_type, {})
success, message = sync_media_management_config(base_url, api_key, arr_type, arr_type_data)
elif category == 'quality_definitions':
# Quality definitions has a nested structure: qualityDefinitions -> arr_type -> qualities
quality_defs = category_data.get('qualityDefinitions', {}).get(arr_type, {})
success, message = sync_quality_definitions(base_url, api_key, arr_type, quality_defs)
else:
success, message = False, f"Unknown category: {category}"
results[category] = {
'success': success,
'message': message
}
except Exception as e:
logger.error(f"Error syncing {category}: {e}")
results[category] = {
'success': False,
'message': str(e)
}
# Determine overall success
overall_success = all(result['success'] for result in results.values())
return jsonify({
'success': overall_success,
'results': results
}), 200
except Exception as e:
logger.error(f"Error in media management sync: {e}")
return jsonify({'error': str(e)}), 500

View File

@@ -1,186 +0,0 @@
import logging
from typing import Dict, Any, Tuple
from ..importer.arr_handler import ArrHandler, ArrApiError
logger = logging.getLogger(__name__)
def sync_naming_config(base_url: str, api_key: str, arr_type: str, naming_data: Dict[str, Any]) -> Tuple[bool, str]:
"""
Sync naming configuration to arr instance.
First GET current config, update with our data, then PUT back.
Args:
base_url: The arr instance base URL
api_key: The arr instance API key
arr_type: Either 'radarr' or 'sonarr'
naming_data: The naming configuration from our YML file
Returns:
Tuple of (success, message)
"""
arr = None
try:
# Initialize ArrHandler
arr = ArrHandler(base_url, api_key)
logger.info(f"Syncing naming config to {arr_type}")
# GET current naming config using ArrHandler
current_config = arr.get("/api/v3/config/naming")
# Update current_config with fields from naming_data
if arr_type == 'radarr':
# Map our YML fields to Radarr API fields
if 'rename' in naming_data:
current_config['renameMovies'] = naming_data['rename']
if 'replaceIllegalCharacters' in naming_data:
current_config['replaceIllegalCharacters'] = naming_data['replaceIllegalCharacters']
if 'colonReplacementFormat' in naming_data:
current_config['colonReplacementFormat'] = naming_data['colonReplacementFormat']
if 'movieFormat' in naming_data:
current_config['standardMovieFormat'] = naming_data['movieFormat']
if 'movieFolderFormat' in naming_data:
current_config['movieFolderFormat'] = naming_data['movieFolderFormat']
else: # sonarr
# Map our YML fields to Sonarr API fields
if 'rename' in naming_data:
current_config['renameEpisodes'] = naming_data['rename']
if 'replaceIllegalCharacters' in naming_data:
current_config['replaceIllegalCharacters'] = naming_data['replaceIllegalCharacters']
if 'colonReplacementFormat' in naming_data:
current_config['colonReplacementFormat'] = naming_data['colonReplacementFormat']
if 'customColonReplacementFormat' in naming_data:
current_config['customColonReplacementFormat'] = naming_data['customColonReplacementFormat']
if 'multiEpisodeStyle' in naming_data:
current_config['multiEpisodeStyle'] = naming_data['multiEpisodeStyle']
if 'standardEpisodeFormat' in naming_data:
current_config['standardEpisodeFormat'] = naming_data['standardEpisodeFormat']
if 'dailyEpisodeFormat' in naming_data:
current_config['dailyEpisodeFormat'] = naming_data['dailyEpisodeFormat']
if 'animeEpisodeFormat' in naming_data:
current_config['animeEpisodeFormat'] = naming_data['animeEpisodeFormat']
if 'seriesFolderFormat' in naming_data:
current_config['seriesFolderFormat'] = naming_data['seriesFolderFormat']
if 'seasonFolderFormat' in naming_data:
current_config['seasonFolderFormat'] = naming_data['seasonFolderFormat']
if 'specialsFolderFormat' in naming_data:
current_config['specialsFolderFormat'] = naming_data['specialsFolderFormat']
# PUT the updated config back using ArrHandler
arr.put("/api/v3/config/naming", current_config)
logger.info(f"Successfully synced naming config to {arr_type}")
return True, "Naming config sync successful"
except ArrApiError as e:
error_msg = f"Failed to sync naming config: {str(e)}"
logger.error(error_msg)
return False, error_msg
except Exception as e:
error_msg = f"Failed to sync naming config: {str(e)}"
logger.error(error_msg)
return False, error_msg
finally:
if arr:
arr.close()
def sync_media_management_config(base_url: str, api_key: str, arr_type: str, misc_data: Dict[str, Any]) -> Tuple[bool, str]:
"""
Sync media management (misc) configuration to arr instance.
First GET current config, update with our data, then PUT back.
Args:
base_url: The arr instance base URL
api_key: The arr instance API key
arr_type: Either 'radarr' or 'sonarr'
misc_data: The misc configuration from our YML file
Returns:
Tuple of (success, message)
"""
arr = None
try:
# Initialize ArrHandler
arr = ArrHandler(base_url, api_key)
logger.info(f"Syncing media management config to {arr_type}")
# GET current media management config using ArrHandler
current_config = arr.get("/api/v3/config/mediamanagement")
# Update current_config with fields from misc_data
if 'propersRepacks' in misc_data:
current_config['downloadPropersAndRepacks'] = misc_data['propersRepacks']
if 'enableMediaInfo' in misc_data:
current_config['enableMediaInfo'] = misc_data['enableMediaInfo']
# PUT the updated config back using ArrHandler
arr.put("/api/v3/config/mediamanagement", current_config)
logger.info(f"Successfully synced media management config to {arr_type}")
return True, "Media management config sync successful"
except ArrApiError as e:
error_msg = f"Failed to sync media management config: {str(e)}"
logger.error(error_msg)
return False, error_msg
except Exception as e:
error_msg = f"Failed to sync media management config: {str(e)}"
logger.error(error_msg)
return False, error_msg
finally:
if arr:
arr.close()
def sync_quality_definitions(base_url: str, api_key: str, arr_type: str, quality_data: Dict[str, Any]) -> Tuple[bool, str]:
"""
Sync quality definitions to arr instance.
Quality definitions contain all required data, so we can directly PUT.
Args:
base_url: The arr instance base URL
api_key: The arr instance API key
arr_type: Either 'radarr' or 'sonarr'
quality_data: The quality definitions from our YML file
Returns:
Tuple of (success, message)
"""
arr = None
try:
# Initialize ArrHandler
arr = ArrHandler(base_url, api_key)
logger.info(f"Syncing quality definitions to {arr_type}")
# GET current quality definitions using ArrHandler
current_definitions = arr.get("/api/v3/qualitydefinition")
# Create a mapping of quality names to current definitions for easier lookup
quality_map = {def_['quality']['name']: def_ for def_ in current_definitions}
# Update each quality definition with our values
for quality_name, settings in quality_data.items():
if quality_name in quality_map:
definition = quality_map[quality_name]
# Update size limits from our YML data
if 'min' in settings:
definition['minSize'] = settings['min']
if 'preferred' in settings:
definition['preferredSize'] = settings['preferred']
if 'max' in settings:
definition['maxSize'] = settings['max']
# PUT the updated definitions back using ArrHandler
arr.put("/api/v3/qualitydefinition/update", current_definitions)
logger.info(f"Successfully synced quality definitions to {arr_type}")
return True, "Quality definitions sync successful"
except ArrApiError as e:
error_msg = f"Failed to sync quality definitions: {str(e)}"
logger.error(error_msg)
return False, error_msg
except Exception as e:
error_msg = f"Failed to sync quality definitions: {str(e)}"
logger.error(error_msg)
return False, error_msg
finally:
if arr:
arr.close()

View File

@@ -1,211 +0,0 @@
import os
import yaml
import logging
from typing import Dict, Any
from datetime import datetime
from ..config.config import config
logger = logging.getLogger(__name__)
# Media management directory
MEDIA_MANAGEMENT_DIR = config.MEDIA_MANAGEMENT_DIR
# Media management categories
MEDIA_MANAGEMENT_CATEGORIES = ["misc", "naming", "quality_definitions"]
def _preserve_order(data: Dict[str, Any], category: str) -> Dict[str, Any]:
"""Preserve the desired key order based on category"""
if category == "misc":
# Order: radarr, sonarr
ordered = {}
for arr_type in ["radarr", "sonarr"]:
if arr_type in data:
arr_data = data[arr_type]
# Order within each: propersRepacks, enableMediaInfo
ordered_arr = {}
for key in ["propersRepacks", "enableMediaInfo"]:
if key in arr_data:
ordered_arr[key] = arr_data[key]
# Add any remaining keys
for key, value in arr_data.items():
if key not in ordered_arr:
ordered_arr[key] = value
ordered[arr_type] = ordered_arr
# Add any remaining top-level keys
for key, value in data.items():
if key not in ordered:
ordered[key] = value
return ordered
elif category == "naming":
# Order: radarr, sonarr
ordered = {}
for arr_type in ["radarr", "sonarr"]:
if arr_type in data:
arr_data = data[arr_type]
ordered_arr = {}
if arr_type == "radarr":
# Radarr order: rename, movieFormat, movieFolderFormat, replaceIllegalCharacters, colonReplacementFormat
for key in ["rename", "movieFormat", "movieFolderFormat", "replaceIllegalCharacters", "colonReplacementFormat"]:
if key in arr_data:
ordered_arr[key] = arr_data[key]
elif arr_type == "sonarr":
# Sonarr order: rename, standardEpisodeFormat, dailyEpisodeFormat, animeEpisodeFormat, seriesFolderFormat, seasonFolderFormat, replaceIllegalCharacters, colonReplacementFormat, customColonReplacementFormat, multiEpisodeStyle
for key in ["rename", "standardEpisodeFormat", "dailyEpisodeFormat", "animeEpisodeFormat", "seriesFolderFormat", "seasonFolderFormat", "replaceIllegalCharacters", "colonReplacementFormat", "customColonReplacementFormat", "multiEpisodeStyle"]:
if key in arr_data:
ordered_arr[key] = arr_data[key]
# Add any remaining keys
for key, value in arr_data.items():
if key not in ordered_arr:
ordered_arr[key] = value
ordered[arr_type] = ordered_arr
# Add any remaining top-level keys
for key, value in data.items():
if key not in ordered:
ordered[key] = value
return ordered
elif category == "quality_definitions":
# For quality_definitions, preserve the structure: qualityDefinitions -> radarr/sonarr -> qualities
return data
return data
def _get_file_path(category: str) -> str:
"""Get the file path for a media management category"""
return os.path.join(MEDIA_MANAGEMENT_DIR, f"{category}.yml")
def _load_yaml_file(file_path: str) -> Dict[str, Any]:
"""Load YAML file and return contents"""
if not os.path.exists(file_path):
logger.error(f"File not found: {file_path}")
raise FileNotFoundError(f"File not found: {file_path}")
try:
with open(file_path, 'r') as f:
return yaml.safe_load(f) or {}
except Exception as e:
logger.error(f"Error loading {file_path}: {e}")
raise
def _save_yaml_file(file_path: str, data: Dict[str, Any], category: str = None) -> None:
"""Save data to YAML file"""
try:
# Preserve key order if category is specified
if category:
data = _preserve_order(data, category)
with open(file_path, 'w') as f:
yaml.safe_dump(
data,
f,
sort_keys=False,
default_flow_style=False,
width=1000, # Prevent line wrapping
allow_unicode=True
)
except Exception as e:
logger.error(f"Error saving {file_path}: {e}")
raise
def get_media_management_data(category: str) -> Dict[str, Any]:
"""Get media management data for a specific category"""
if category not in MEDIA_MANAGEMENT_CATEGORIES:
raise ValueError(f"Invalid category: {category}")
file_path = _get_file_path(category)
# If file doesn't exist, return empty dict
if not os.path.exists(file_path):
logger.info(f"Media management file not found: {file_path}")
return {}
try:
data = _load_yaml_file(file_path)
return data
except Exception as e:
logger.error(f"Error reading {category}: {e}")
# Return empty dict on error
return {}
def save_media_management_data(category: str, data: Dict[str, Any]) -> Dict[str, Any]:
"""Save media management data for a specific category"""
if category not in MEDIA_MANAGEMENT_CATEGORIES:
raise ValueError(f"Invalid category: {category}")
file_path = _get_file_path(category)
try:
_save_yaml_file(file_path, data, category)
logger.info(f"Saved {category} data")
return get_media_management_data(category)
except Exception as e:
logger.error(f"Error saving {category}: {e}")
raise
def update_media_management_data(category: str, data: Dict[str, Any]) -> Dict[str, Any]:
"""Update media management data for a specific category"""
if category not in MEDIA_MANAGEMENT_CATEGORIES:
raise ValueError(f"Invalid category: {category}")
# For media management, update is the same as save
# since these files can't be deleted
return save_media_management_data(category, data)
def get_all_media_management_data() -> Dict[str, Any]:
"""Get all media management data for all categories, transformed to have arr type at top level"""
# First get all data in original structure
original_data = {}
for category in MEDIA_MANAGEMENT_CATEGORIES:
try:
data = get_media_management_data(category)
# Only include if data exists
if data:
original_data[category] = data
except Exception as e:
logger.error(f"Error getting {category} data: {e}")
# Transform to have radarr/sonarr at top level
result = {
"radarr": {},
"sonarr": {}
}
for category, data in original_data.items():
if category == "misc":
# misc has radarr/sonarr subdivisions
if "radarr" in data and data["radarr"]:
result["radarr"]["misc"] = data["radarr"]
if "sonarr" in data and data["sonarr"]:
result["sonarr"]["misc"] = data["sonarr"]
elif category == "naming":
# naming has radarr/sonarr subdivisions
if "radarr" in data and data["radarr"]:
result["radarr"]["naming"] = data["radarr"]
if "sonarr" in data and data["sonarr"]:
result["sonarr"]["naming"] = data["sonarr"]
elif category == "quality_definitions":
# quality_definitions has qualityDefinitions.radarr/sonarr
quality_defs = data.get("qualityDefinitions", {})
if "radarr" in quality_defs and quality_defs["radarr"]:
result["radarr"]["quality_definitions"] = quality_defs["radarr"]
if "sonarr" in quality_defs and quality_defs["sonarr"]:
result["sonarr"]["quality_definitions"] = quality_defs["sonarr"]
# Remove empty arr types
if not result["radarr"]:
del result["radarr"]
if not result["sonarr"]:
del result["sonarr"]
return result

View File

@@ -1,60 +0,0 @@
# backend/app/middleware.py
from flask import request, session, jsonify, send_from_directory
from .db import get_db
import logging
logger = logging.getLogger(__name__)
def init_middleware(app):
"""Initialize authentication middleware for the Flask app"""
@app.before_request
def authenticate_request():
# Skip authentication for OPTIONS requests (CORS preflight)
if request.method == 'OPTIONS':
return
# Always allow auth endpoints
if request.path.startswith('/api/auth/'):
return
# Allow static assets needed for auth pages
if request.path.startswith(
('/assets/',
'/static/')) or request.path in ['/', '/regex.svg', '/clone.svg']:
return
# For API routes, require auth
if request.path.startswith('/api/'):
# Check session authentication (for web users)
if session.get('authenticated'):
db = get_db()
user = db.execute('SELECT session_id FROM auth').fetchone()
if user and session.get('session_id') == user['session_id']:
return
# Check API key authentication (for API users)
api_key = request.headers.get('X-Api-Key')
if api_key:
db = get_db()
try:
user = db.execute('SELECT 1 FROM auth WHERE api_key = ?',
(api_key, )).fetchone()
if user:
return
logger.warning(
f'Invalid API key attempt: {api_key[:10]}...')
except Exception as e:
logger.error(
f'Database error during API key check: {str(e)}')
return jsonify({'error': 'Internal server error'}), 500
# If no valid authentication is found, return 401
logger.warning(f'Unauthorized access attempt to {request.path}')
return jsonify({'error': 'Unauthorized'}), 401
# For all other routes (frontend routes), serve index.html
# This lets React handle auth and routing
return send_from_directory(app.static_folder, 'index.html')

View File

@@ -1,158 +0,0 @@
# backend/app/settings/__init__.py
from flask import Blueprint, jsonify, request, session
from werkzeug.security import generate_password_hash, check_password_hash
import secrets
from ..db import get_db
from ..db.queries.settings import get_language_import_score, update_language_import_score
import logging
logger = logging.getLogger(__name__)
bp = Blueprint('settings', __name__)
@bp.route('/general', methods=['GET'])
def get_general_settings():
db = get_db()
try:
user = db.execute('SELECT username, api_key FROM auth').fetchone()
if not user:
logger.error('No user found in auth table')
return jsonify({'error': 'No user configuration found'}), 500
return jsonify({
'username': user['username'],
'api_key': user['api_key']
})
except Exception as e:
logger.error(f'Error fetching general settings: {str(e)}')
return jsonify({'error': 'Failed to fetch settings'}), 500
@bp.route('/username', methods=['PUT'])
def update_username():
db = get_db()
data = request.get_json()
new_username = data.get('username')
current_password = data.get('current_password')
if not new_username or not current_password:
return jsonify({'error':
'Username and current password are required'}), 400
try:
# Verify current password
user = db.execute('SELECT password_hash FROM auth').fetchone()
if not check_password_hash(user['password_hash'], current_password):
logger.warning('Failed username change - invalid password')
return jsonify({'error': 'Invalid password'}), 401
db.execute('UPDATE auth SET username = ?', (new_username, ))
db.commit()
logger.info(f'Username updated to: {new_username}')
return jsonify({'message': 'Username updated successfully'})
except Exception as e:
logger.error(f'Failed to update username: {str(e)}')
return jsonify({'error': 'Failed to update username'}), 500
@bp.route('/password', methods=['PUT'])
def update_password():
db = get_db()
data = request.get_json()
current_password = data.get('current_password')
new_password = data.get('new_password')
if not current_password or not new_password:
return jsonify({'error':
'Current and new passwords are required'}), 400
try:
# Verify current password
user = db.execute(
'SELECT password_hash, session_id FROM auth').fetchone()
if not check_password_hash(user['password_hash'], current_password):
logger.warning('Failed password change - invalid current password')
return jsonify({'error': 'Invalid current password'}), 401
# Update password and generate a new session ID
password_hash = generate_password_hash(new_password)
new_session_id = secrets.token_urlsafe(32)
db.execute('UPDATE auth SET password_hash = ?, session_id = ?',
(password_hash, new_session_id))
db.commit()
# Clear the current session to force re-login
session.clear()
logger.info('Password updated successfully')
return jsonify({
'message': 'Password updated successfully. Please log in again.',
'requireRelogin': True
})
except Exception as e:
logger.error(f'Failed to update password: {str(e)}')
return jsonify({'error': 'Failed to update password'}), 500
@bp.route('/api-key', methods=['POST'])
def reset_api_key():
db = get_db()
data = request.get_json()
current_password = data.get('current_password')
if not current_password:
return jsonify({'error': 'Current password is required'}), 400
try:
# Verify current password
user = db.execute('SELECT password_hash FROM auth').fetchone()
if not check_password_hash(user['password_hash'], current_password):
logger.warning('Failed API key reset - invalid password')
return jsonify({'error': 'Invalid password'}), 401
# Generate and save new API key
new_api_key = secrets.token_urlsafe(32)
db.execute('UPDATE auth SET api_key = ?', (new_api_key, ))
db.commit()
logger.info('API key reset successfully')
return jsonify({
'message': 'API key reset successfully',
'api_key': new_api_key
})
except Exception as e:
logger.error(f'Failed to reset API key: {str(e)}')
return jsonify({'error': 'Failed to reset API key'}), 500
@bp.route('/language-import-score', methods=['GET'])
def get_language_import_score_route():
try:
score = get_language_import_score()
return jsonify({'score': score})
except Exception as e:
logger.error(f'Failed to get language import score: {str(e)}')
return jsonify({'error': 'Failed to get language import score'}), 500
@bp.route('/language-import-score', methods=['PUT'])
def update_language_import_score_route():
data = request.get_json()
score = data.get('score')
if score is None:
return jsonify({'error': 'Score is required'}), 400
try:
score = int(score)
except (ValueError, TypeError):
return jsonify({'error': 'Score must be an integer'}), 400
try:
update_language_import_score(score)
return jsonify({'message': 'Language import score updated successfully'})
except Exception as e:
logger.error(f'Failed to update language import score: {str(e)}')
return jsonify({'error': 'Failed to update language import score'}), 500

View File

@@ -1,174 +0,0 @@
# app/task/__init__.py
from flask import Blueprint, jsonify, request
import logging
from ..db import get_db
from .tasks import TaskScheduler
bp = Blueprint('tasks', __name__)
logger = logging.getLogger(__name__)
@bp.route('', methods=['GET'])
def get_all_tasks():
try:
with get_db() as conn:
tasks = conn.execute('SELECT * FROM scheduled_tasks').fetchall()
result = []
scheduler_instance = TaskScheduler.get_instance()
if scheduler_instance:
for task in tasks:
# Get the job from scheduler
job = scheduler_instance.scheduler.get_job(str(task['id']))
next_run = job.next_run_time if job else None
result.append({
'id':
task['id'],
'name':
task['name'],
'type':
task['type'],
'interval_minutes':
task['interval_minutes'],
'last_run':
task['last_run'],
'next_run':
next_run.isoformat() if next_run else None,
'status':
task['status']
})
return jsonify(result), 200
except Exception as e:
logger.exception("Unexpected error occurred")
return jsonify({"error": "An unexpected error occurred"}), 500
@bp.route('/<int:task_id>', methods=['GET'])
def get_task(task_id):
try:
with get_db() as conn:
task = conn.execute('SELECT * FROM scheduled_tasks WHERE id = ?',
(task_id, )).fetchone()
if not task:
return jsonify({"error": "Task not found"}), 404
scheduler_instance = TaskScheduler.get_instance()
if scheduler_instance:
job = scheduler_instance.scheduler.get_job(str(task['id']))
next_run = job.next_run_time if job else None
else:
next_run = None
return jsonify({
'id': task['id'],
'name': task['name'],
'type': task['type'],
'interval_minutes': task['interval_minutes'],
'last_run': task['last_run'],
'next_run': next_run.isoformat() if next_run else None,
'status': task['status']
}), 200
except Exception as e:
logger.exception("Unexpected error occurred")
return jsonify({"error": "An unexpected error occurred"}), 500
@bp.route('/<int:task_id>', methods=['PUT'])
def update_task(task_id):
try:
data = request.get_json()
if not data:
return jsonify({"error": "No data provided"}), 400
interval_minutes = data.get('interval_minutes')
if interval_minutes is None:
return jsonify({"error": "interval_minutes is required"}), 400
if not isinstance(interval_minutes, int) or interval_minutes < 1:
return jsonify({"error": "interval_minutes must be a positive integer"}), 400
with get_db() as conn:
# Check if task exists
task = conn.execute('SELECT * FROM scheduled_tasks WHERE id = ?',
(task_id, )).fetchone()
if not task:
return jsonify({"error": "Task not found"}), 404
# Update the interval in database
conn.execute(
'UPDATE scheduled_tasks SET interval_minutes = ? WHERE id = ?',
(interval_minutes, task_id)
)
conn.commit()
# Update the scheduler
scheduler_instance = TaskScheduler.get_instance()
if scheduler_instance and interval_minutes > 0:
# Remove old job
scheduler_instance.scheduler.remove_job(str(task_id))
# Create new task instance with updated interval
task_class = TaskScheduler.get_task_class(task['type'])
if task_class:
new_task = task_class(
id=task_id,
name=task['name'],
interval_minutes=interval_minutes
)
scheduler_instance.schedule_task(new_task)
logger.info(f"Updated task {task_id} interval to {interval_minutes} minutes")
return jsonify({
"success": True,
"message": f"Task interval updated to {interval_minutes} minutes"
}), 200
except Exception as e:
logger.exception(f"Failed to update task {task_id}")
return jsonify({"error": f"Failed to update task: {str(e)}"}), 500
@bp.route('/<int:task_id>/run', methods=['POST'])
def trigger_task(task_id):
try:
with get_db() as conn:
task = conn.execute('SELECT * FROM scheduled_tasks WHERE id = ?',
(task_id, )).fetchone()
if not task:
return jsonify({"error": "Task not found"}), 404
# Get the task class and run it
task_class = TaskScheduler.get_task_class(task['type'])
if not task_class:
return jsonify({"error": "Invalid task type"}), 400
task_instance = task_class(
id=task['id'],
name=task['name'],
interval_minutes=task['interval_minutes'])
try:
task_instance.update_status('running')
task_instance.run_job()
task_instance.update_status('success')
return jsonify(
{"message": f"Task {task_id} triggered successfully"}), 200
except Exception as e:
task_instance.update_status('failed')
logger.error(f"Task {task_id} failed: {str(e)}")
return jsonify({"error": f"Task failed: {str(e)}"}), 500
except Exception as e:
logger.exception("Unexpected error occurred")
return jsonify({"error": "An unexpected error occurred"}), 500
__all__ = ['bp', 'TaskScheduler']

View File

@@ -1,4 +0,0 @@
# app/task/backup/__init__.py
from .backup import BackupManager
__all__ = ['BackupManager']

View File

@@ -1,186 +0,0 @@
# app/task/backup/backup.py
import os
import shutil
from datetime import datetime, timedelta
import logging
from pathlib import Path
import zipfile
import tempfile
from ...config.config import config
from ...db import get_db
logger = logging.getLogger(__name__)
class BackupManager:
def __init__(self):
self.backup_dir = os.path.join(config.CONFIG_DIR, 'backups')
self.retention_days = 30
self._ensure_backup_directory()
def _ensure_backup_directory(self):
"""Ensure backup directory exists"""
os.makedirs(self.backup_dir, exist_ok=True)
def create_backup(self):
"""Create a new backup of the config directory"""
try:
# Generate backup filename with timestamp
timestamp = datetime.now().strftime('%Y_%m_%d_%H%M%S')
backup_filename = f'backup_{timestamp}.zip'
backup_path = os.path.join(self.backup_dir, backup_filename)
# Create zip file
with zipfile.ZipFile(backup_path, 'w',
zipfile.ZIP_DEFLATED) as zipf:
# Walk through all files in config directory
for root, dirs, files in os.walk(config.CONFIG_DIR):
# Skip the backups directory itself
if 'backups' in root:
continue
for file in files:
file_path = os.path.join(root, file)
# Calculate path relative to config directory
arc_path = os.path.relpath(file_path,
config.CONFIG_DIR)
zipf.write(file_path, arc_path)
# Record backup in database
with get_db() as conn:
conn.execute(
'''
INSERT INTO backups (filename, created_at, status)
VALUES (?, CURRENT_TIMESTAMP, 'completed')
''', (backup_filename, ))
conn.commit()
logger.info(f'Backup created successfully: {backup_filename}')
return True, backup_filename
except Exception as e:
logger.error(f'Error creating backup: {str(e)}')
return False, str(e)
def restore_backup(self, backup_filename):
"""Restore from a backup file"""
backup_path = os.path.join(self.backup_dir, backup_filename)
if not os.path.exists(backup_path):
return False, "Backup file not found"
try:
# Create a temporary directory for extraction
temp_dir = os.path.join(self.backup_dir, 'temp_restore')
os.makedirs(temp_dir, exist_ok=True)
# Extract backup to temporary directory
with zipfile.ZipFile(backup_path, 'r') as zipf:
zipf.extractall(temp_dir)
# Move files to config directory
for item in os.listdir(temp_dir):
s = os.path.join(temp_dir, item)
d = os.path.join(config.CONFIG_DIR, item)
if os.path.isdir(s):
# Skip backups directory if it exists in the backup
if item == 'backups':
continue
shutil.rmtree(d, ignore_errors=True)
shutil.copytree(s, d, dirs_exist_ok=True)
else:
shutil.copy2(s, d)
# Clean up temporary directory
shutil.rmtree(temp_dir)
logger.info(f'Backup restored successfully: {backup_filename}')
return True, "Backup restored successfully"
except Exception as e:
logger.error(f'Error restoring backup: {str(e)}')
return False, str(e)
def cleanup_old_backups(self):
"""Remove backups older than retention period"""
try:
cutoff_date = datetime.now() - timedelta(days=self.retention_days)
with get_db() as conn:
# Get list of old backups
old_backups = conn.execute(
'''
SELECT filename FROM backups
WHERE created_at < ?
''', (cutoff_date.isoformat(), )).fetchall()
# Remove old backup files and database entries
for backup in old_backups:
backup_path = os.path.join(self.backup_dir,
backup['filename'])
if os.path.exists(backup_path):
os.remove(backup_path)
conn.execute('DELETE FROM backups WHERE filename = ?',
(backup['filename'], ))
conn.commit()
logger.info('Old backups cleaned up successfully')
return True, "Cleanup completed successfully"
except Exception as e:
logger.error(f'Error cleaning up old backups: {str(e)}')
return False, str(e)
def list_backups(self):
"""List all available backups"""
try:
with get_db() as conn:
backups = conn.execute('''
SELECT filename, created_at, status
FROM backups
ORDER BY created_at DESC
''').fetchall()
return [{
'filename': backup['filename'],
'created_at': backup['created_at'],
'status': backup['status']
} for backup in backups]
except Exception as e:
logger.error(f'Error listing backups: {str(e)}')
return []
def restore_backup_from_file(self, file_path):
"""Restore from a backup file path"""
try:
# Create a temporary directory for extraction
with tempfile.TemporaryDirectory() as temp_dir:
# Extract backup to temporary directory
with zipfile.ZipFile(file_path, 'r') as zipf:
zipf.extractall(temp_dir)
# Move files to config directory
for item in os.listdir(temp_dir):
s = os.path.join(temp_dir, item)
d = os.path.join(config.CONFIG_DIR, item)
if os.path.isdir(s):
# Skip backups directory if it exists in the backup
if item == 'backups':
continue
shutil.rmtree(d, ignore_errors=True)
shutil.copytree(s, d, dirs_exist_ok=True)
else:
shutil.copy2(s, d)
logger.info(f'Backup imported and restored successfully')
return True, "Backup imported and restored successfully"
except Exception as e:
logger.error(f'Error importing and restoring backup: {str(e)}')
return False, str(e)

View File

@@ -1,181 +0,0 @@
# app/task/tasks.py
from abc import ABC, abstractmethod
from apscheduler.schedulers.background import BackgroundScheduler
from datetime import datetime
import logging
import re
from ..db import get_db
task_logger = logging.getLogger('task_system')
task_logger.setLevel(logging.DEBUG)
class Task(ABC):
def __init__(self, id=None, name=None, interval_minutes=None):
self.id = id
self.name = name
self.interval_minutes = interval_minutes
self.last_run = None
self.status = 'pending'
@abstractmethod
def run_job(self):
pass
def update_status(self, status):
task_logger.info(
f"Task {self.name} (ID: {self.id}) status changed to: {status}")
with get_db() as conn:
conn.execute(
'''
UPDATE scheduled_tasks
SET status = ?, last_run = ?
WHERE id = ?
''', (status, datetime.now(), self.id))
conn.commit()
class TaskScheduler:
_instance = None
def __init__(self):
self.scheduler = BackgroundScheduler()
self.logger = logging.getLogger('TaskScheduler')
TaskScheduler._instance = self
@classmethod
def get_instance(cls):
return cls._instance
def load_tasks_from_db(self):
"""
Reload tasks from the DB, removing all old jobs first
so we don't collide with existing job IDs.
"""
self.logger.debug(
"[TaskScheduler] remove_all_jobs to avoid duplicates")
self.scheduler.remove_all_jobs()
with get_db() as conn:
task_rows = conn.execute(
'SELECT * FROM scheduled_tasks').fetchall()
for row in task_rows:
task_class = self.get_task_class(row['type'])
if task_class:
task = task_class(id=row['id'],
name=row['name'],
interval_minutes=row['interval_minutes'])
self.schedule_task(task)
def schedule_task(self, task):
self.scheduler.add_job(self._run_task_wrapper(task),
'interval',
minutes=task.interval_minutes,
id=str(task.id))
def _run_task_wrapper(self, task):
def wrapped():
task_logger.info(f"Starting task: {task.name} (ID: {task.id})")
start_time = datetime.now()
try:
task.update_status('running')
task.run_job()
end_time = datetime.now()
duration = (end_time - start_time).total_seconds()
task_logger.info(
f"Task {task.name} completed successfully in {duration:.2f} seconds"
)
task.update_status('success')
except Exception as e:
end_time = datetime.now()
duration = (end_time - start_time).total_seconds()
task_logger.error(
f"Task {task.name} failed after {duration:.2f} seconds: {str(e)}"
)
task.update_status('failed')
return wrapped
def start(self):
self.scheduler.start()
@staticmethod
def get_task_class(task_type):
task_classes = {
'Sync': SyncTask,
'Backup': BackupTask,
'ImportSchedule': ImportScheduleTask,
}
return task_classes.get(task_type)
class SyncTask(Task):
def run_job(self):
"""Updates remote git status and performs other sync operations"""
from ..git.status.status import GitStatusManager
import os
from ..config.config import config
repo_path = config.DB_DIR
# Quick check if there's a valid git repo
if not os.path.exists(os.path.join(repo_path, '.git')):
task_logger.info("No valid git repository found - skipping sync")
return
# If we have a valid repo, proceed with sync
status_manager = GitStatusManager.get_instance(repo_path)
if status_manager:
success = status_manager.update_remote_status()
if not success:
task_logger.error("Failed to update remote git status")
class BackupTask(Task):
def run_job(self):
"""Performs configuration backup and cleanup"""
from .backup.backup import BackupManager
logger = logging.getLogger(__name__)
logger.info(f"Running backup task {self.name}")
manager = BackupManager()
success, backup_name = manager.create_backup()
if success:
logger.info(f"Backup created successfully: {backup_name}")
# Run cleanup to remove old backups
manager.cleanup_old_backups()
else:
logger.error(f"Backup failed: {backup_name}"
) # backup_name contains error message in this case
class ImportScheduleTask(Task):
"""
A scheduled task that runs the "run_import_for_config" logic for a specific ARR config
(inferred by parsing the config ID from the task's 'name').
For example, if the scheduled_tasks.name is 'Import for ARR #1 - radarr',
we parse '1' out of that string to know which arr_config to import.
"""
def run_job(self):
from ..importer import handle_scheduled_import
task_logger.info(
f"[ImportScheduleTask] Running scheduled import for task_id={self.id} ({self.name})"
)
result = handle_scheduled_import(self.id)
if not result.get('success'):
task_logger.error(
f"[ImportScheduleTask] Scheduled import failed for task_id={self.id}: {result}"
)
else:
task_logger.info(
f"[ImportScheduleTask] Scheduled import completed for task_id={self.id}: added={result.get('added', 0)}, updated={result.get('updated', 0)}, failed={result.get('failed', 0)}"
)

View File

@@ -1,58 +0,0 @@
# app/utils/hash.py
import hashlib
from typing import Dict, Any, Optional
def generate_format_hash(format_name: str, profile_name: str,
arr_config: Dict[str, Any]) -> str:
"""
Generate a unique hash for a format based on its name, parent profile, and arr config.
"""
arr_identifier = f"{arr_config['name']}-{arr_config['type']}"
hash_input = f"{format_name}:{profile_name}:{arr_identifier}".encode(
'utf-8')
return hashlib.sha256(hash_input).hexdigest()[:8]
def process_format_name(format_name: str, profile_name: Optional[str],
arr_config: Dict[str, Any]) -> str:
"""
Process a format name and generate a unique version if needed.
If profile_name is None, appends [Profilarr] tag instead of hash.
"""
if not arr_config.get('import_as_unique', False):
return format_name
if profile_name:
# Format is part of a profile - use hash
hash_value = generate_format_hash(format_name, profile_name,
arr_config)
return f"{format_name} [{hash_value}]"
else:
# Standalone format - use Profilarr tag
return f"{format_name} [Profilarr]"
def generate_profile_hash(profile_data: Dict[str, Any],
arr_config: Dict[str, Any]) -> str:
"""
Generate a unique hash for a profile based on profile name and arr name.
"""
profile_name = profile_data.get('name', '')
arr_name = arr_config['name']
hash_input = f"{profile_name}:{arr_name}".encode('utf-8')
return hashlib.sha256(hash_input).hexdigest()[:8]
def process_profile_name(profile_data: Dict[str, Any],
arr_config: Dict[str, Any]) -> str:
"""
Process a profile name and generate a unique hashed version if needed.
"""
profile_name = profile_data['name']
if not arr_config.get('import_as_unique', False):
return profile_name
hash_value = generate_profile_hash(profile_data, arr_config)
return f"{profile_name} [{hash_value}]"

View File

@@ -1,11 +0,0 @@
Flask==2.0.1
Flask-CORS==3.0.10
PyYAML==5.4.1
requests==2.26.0
Werkzeug==2.0.1
GitPython==3.1.24
regex==2023.10.3
APScheduler==3.10.4
gunicorn==21.2.0
aiohttp==3.8.5
asyncio==3.4.3

View File

@@ -1,16 +0,0 @@
services:
profilarr:
build:
context: .
dockerfile: Dockerfile
container_name: profilarr
ports:
- 6870:6868
volumes:
- ./config-test:/config
environment:
- PUID=1000
- PGID=1000
- UMASK=002
- TZ=Australia/Adelaide
restart: unless-stopped

View File

@@ -1,23 +0,0 @@
services:
frontend:
build: ./frontend
ports:
- '3000:3000'
volumes:
- ./frontend:/app
- /app/node_modules
environment:
- CHOKIDAR_USEPOLLING=true
backend:
build: ./backend
ports:
- '5000:5000'
volumes:
- ./backend:/app
- ./config:/config
environment:
- PUID=1000
- PGID=1000
- TZ=Australia/Adelaide
restart: always

View File

@@ -1,41 +0,0 @@
stateDiagram-v2
[*] --> CheckingForUpdates: User Initiates Pull
CheckingForUpdates --> NormalPull: No Conflicts Detected
CheckingForUpdates --> ConflictDetected: Conflicts Found
NormalPull --> [*]: Pull Complete
ConflictDetected --> ResolutionState: Enter Resolution Mode
note right of ResolutionState
System returns conflict object
containing all conflicted files
end note
state ResolutionState {
[*] --> FileSelection
FileSelection --> FileResolution: Select Unresolved File
FileResolution --> ConflictChoice
state ConflictChoice {
[*] --> DecisionMaking
DecisionMaking --> KeepLocal: User Keeps Local
DecisionMaking --> AcceptIncoming: User Accepts Incoming
DecisionMaking --> CustomMerge: User Combines/Modifies
KeepLocal --> MarkResolved
AcceptIncoming --> MarkResolved
CustomMerge --> MarkResolved
}
ConflictChoice --> AddFile: File Resolved
AddFile --> FileSelection: More Files\nto Resolve
AddFile --> AllFilesResolved: No More\nConflicts
}
ResolutionState --> CommitChanges: All Files Resolved
CommitChanges --> [*]: Resolution Complete

View File

@@ -1,24 +0,0 @@
Profilarr Sync Flow
```mermaid
flowchart TD
A[User Opens App] --> B[Check Git Status]
B --> C{Changes Detected?}
C -->|No Changes| D[Up to Date]
C -->|Changes Exist| E{Type of Change}
E -->|Incoming Only| F[Fast Forward Available]
E -->|Outgoing Only| G[Push Available*]
E -->|Both| H{Conflicts?}
H -->|Yes| I[Show Conflict UI]
H -->|No| J[Auto-merge]
I --> K[User Resolves]
K --> L[Apply Resolution]
L --> M[Update Git State]
J --> M
F --> M
G --> M
%% Add note about push restrictions
N[*Push only available for developers<br/>on specific branches]
N -.- G
```

View File

@@ -1,34 +0,0 @@
#!/bin/bash
set -e
# Default to UID/GID 1000 if not provided
PUID=${PUID:-1000}
PGID=${PGID:-1000}
# Default umask to 022 if not provided
UMASK=${UMASK:-022}
echo "Starting with UID: $PUID, GID: $PGID, UMASK: $UMASK"
umask "$UMASK"
# Create group with specified GID
groupadd -g "$PGID" appgroup 2>/dev/null || true
# Create user with specified UID and GID
useradd -u "$PUID" -g "$PGID" -d /home/appuser -s /bin/bash appuser 2>/dev/null || true
# Create home directory if it doesn't exist
mkdir -p /home/appuser
chown "$PUID:$PGID" /home/appuser
# Fix permissions on /config if it exists
if [ -d "/config" ]; then
echo "Setting up /config directory permissions"
# Change ownership of /config and all its contents to PUID:PGID
# This ensures files created by different UIDs are accessible
chown -R "$PUID:$PGID" /config
fi
# Execute the main command as the specified user
echo "Starting application as user $PUID:$PGID"
exec gosu "$PUID:$PGID" "$@"

26
frontend/.gitignore vendored
View File

@@ -1,26 +0,0 @@
# Logs
logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
pnpm-debug.log*
lerna-debug.log*
node_modules
dist
dist-ssr
*.local
# Editor directories and files
.vscode/*
!.vscode/extensions.json
.idea
.DS_Store
*.suo
*.ntvs*
*.njsproj
*.sln
*.sw?
/backend/data

View File

@@ -1,10 +0,0 @@
FROM node:18
WORKDIR /app
COPY package*.json ./
RUN npm install
COPY . .
CMD ["npm", "run", "dev", "--", "--host", "0.0.0.0"]

View File

@@ -1,8 +0,0 @@
# React + Vite
This template provides a minimal setup to get React working in Vite with HMR and some ESLint rules.
Currently, two official plugins are available:
- [@vitejs/plugin-react](https://github.com/vitejs/vite-plugin-react/blob/main/packages/plugin-react/README.md) uses [Babel](https://babeljs.io/) for Fast Refresh
- [@vitejs/plugin-react-swc](https://github.com/vitejs/vite-plugin-react-swc) uses [SWC](https://swc.rs/) for Fast Refresh

View File

@@ -1,38 +0,0 @@
import js from '@eslint/js'
import globals from 'globals'
import react from 'eslint-plugin-react'
import reactHooks from 'eslint-plugin-react-hooks'
import reactRefresh from 'eslint-plugin-react-refresh'
export default [
{
files: ['**/*.{js,jsx}'],
ignores: ['dist'],
languageOptions: {
ecmaVersion: 2020,
globals: globals.browser,
parserOptions: {
ecmaVersion: 'latest',
ecmaFeatures: { jsx: true },
sourceType: 'module',
},
},
settings: { react: { version: '18.3' } },
plugins: {
react,
'react-hooks': reactHooks,
'react-refresh': reactRefresh,
},
rules: {
...js.configs.recommended.rules,
...react.configs.recommended.rules,
...react.configs['jsx-runtime'].rules,
...reactHooks.configs.recommended.rules,
'react/jsx-no-target-blank': 'off',
'react-refresh/only-export-components': [
'warn',
{ allowConstantExport: true },
],
},
},
]

View File

@@ -1,14 +0,0 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<link rel="icon" type="image/svg+xml" href="/regex.svg" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Profilarr</title>
</head>
<body>
<div id="root"></div>
<script type="module" src="/src/main.jsx"></script>
</body>
</html>

File diff suppressed because it is too large Load Diff

View File

@@ -1,39 +0,0 @@
{
"name": "frontend",
"private": true,
"version": "0.0.0",
"type": "module",
"scripts": {
"dev": "vite",
"build": "vite build",
"preview": "vite preview"
},
"dependencies": {
"@dnd-kit/core": "^6.1.0",
"@dnd-kit/modifiers": "^7.0.0",
"@dnd-kit/sortable": "^8.0.0",
"@radix-ui/react-slot": "^1.1.0",
"axios": "^0.21.1",
"class-variance-authority": "^0.7.0",
"clsx": "^2.1.1",
"lucide-react": "^0.428.0",
"prop-types": "^15.8.1",
"rc-slider": "^11.1.8",
"react": "^18.2.0",
"react-dom": "^18.2.0",
"react-markdown": "^9.0.3",
"react-router-dom": "^6.26.1",
"react-syntax-highlighter": "^15.5.0",
"react-toastify": "^10.0.5",
"tailwind-merge": "^2.5.2"
},
"devDependencies": {
"@types/react": "^18.0.28",
"@types/react-dom": "^18.0.11",
"@vitejs/plugin-react": "^3.1.0",
"autoprefixer": "^10.4.14",
"postcss": "^8.4.21",
"tailwindcss": "^3.3.1",
"vite": "^4.2.0"
}
}

View File

@@ -1,6 +0,0 @@
export default {
plugins: {
tailwindcss: {},
autoprefixer: {},
},
}

View File

@@ -1 +0,0 @@
<svg viewBox="0 0 24 24" fill="none" xmlns="http://www.w3.org/2000/svg"><g id="SVGRepo_bgCarrier" stroke-width="0"></g><g id="SVGRepo_tracerCarrier" stroke-linecap="round" stroke-linejoin="round"></g><g id="SVGRepo_iconCarrier"> <path opacity="0.4" d="M17.0998 2H12.8998C9.44976 2 8.04977 3.37 8.00977 6.75H11.0998C15.2998 6.75 17.2498 8.7 17.2498 12.9V15.99C20.6298 15.95 21.9998 14.55 21.9998 11.1V6.9C21.9998 3.4 20.5998 2 17.0998 2Z" fill="#ffffff"></path> <path d="M11.1 8H6.9C3.4 8 2 9.4 2 12.9V17.1C2 20.6 3.4 22 6.9 22H11.1C14.6 22 16 20.6 16 17.1V12.9C16 9.4 14.6 8 11.1 8ZM12.29 13.65L8.58 17.36C8.44 17.5 8.26 17.57 8.07 17.57C7.88 17.57 7.7 17.5 7.56 17.36L5.7 15.5C5.42 15.22 5.42 14.77 5.7 14.49C5.98 14.21 6.43 14.21 6.71 14.49L8.06 15.84L11.27 12.63C11.55 12.35 12 12.35 12.28 12.63C12.56 12.91 12.57 13.37 12.29 13.65Z" fill="#ffffff"></path> </g></svg>

Before

Width:  |  Height:  |  Size: 870 B

View File

@@ -1 +0,0 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" fill="none" stroke="#ffffff" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"><g id="SVGRepo_bgCarrier" stroke-width="0"></g><g id="SVGRepo_tracerCarrier" stroke-linecap="round" stroke-linejoin="round"></g><g id="SVGRepo_iconCarrier"> <path d="M17 3v10"></path> <path d="M12.67 5.5l8.66 5"></path> <path d="M12.67 10.5l8.66-5"></path> <path d="M9 17a2 2 0 00-2-2H5a2 2 0 00-2 2v2a2 2 0 002 2h2a2 2 0 002-2v-2z"></path> </g></svg>

Before

Width:  |  Height:  |  Size: 504 B

View File

@@ -1,191 +0,0 @@
import {
BrowserRouter as Router,
Routes,
Route,
Navigate
} from 'react-router-dom';
import {useState, useEffect} from 'react';
import RegexPage from './components/regex/RegexPage';
import FormatPage from './components/format/FormatPage';
import ProfilePage from './components/profile/ProfilePage';
import SettingsPage from './components/settings/SettingsPage';
import MediaManagementPage from './components/media-management/MediaManagementPage';
import SetupPage from './components/auth/SetupPage';
import LoginPage from './components/auth/LoginPage';
import Navbar from '@ui/Navbar';
import Footer from '@ui/Footer';
import {ToastContainer} from 'react-toastify';
import {checkSetupStatus} from '@api/auth';
import 'react-toastify/dist/ReactToastify.css';
import ErrorBoundary from '@ui/ErrorBoundary';
function App() {
const [darkMode, setDarkMode] = useState(true);
const [authState, setAuthState] = useState({
checking: true,
needsSetup: false,
needsLogin: false,
error: null
});
// Prevent layout shifts from scrollbar
useEffect(() => {
document.body.style.overflowY = 'scroll';
return () => {
document.body.style.overflowY = '';
};
}, []);
useEffect(() => {
if (darkMode) {
document.documentElement.classList.add('dark');
} else {
document.documentElement.classList.remove('dark');
}
}, [darkMode]);
useEffect(() => {
const checkAuth = async () => {
try {
const status = await checkSetupStatus();
setAuthState({
checking: false,
needsSetup: status.needsSetup,
needsLogin: status.needsLogin,
error: status.error
});
} catch (error) {
setAuthState({
checking: false,
needsSetup: false,
needsLogin: false,
error: 'Unable to connect to server'
});
}
};
checkAuth();
}, []);
if (authState.checking) {
return (
<>
<div>Loading...</div>
<ToastContainer
position='top-right'
autoClose={5000}
hideProgressBar={false}
newestOnTop={false}
closeOnClick
rtl={false}
pauseOnFocusLoss
draggable
pauseOnHover
theme='dark'
/>
</>
);
}
if (authState.needsSetup) {
return (
<>
<SetupPage
onSetupComplete={() =>
setAuthState({
...authState,
needsSetup: false,
needsLogin: false
})
}
/>
<ToastContainer
position='top-right'
autoClose={5000}
hideProgressBar={false}
newestOnTop={false}
closeOnClick
rtl={false}
pauseOnFocusLoss
draggable
pauseOnHover
theme='dark'
/>
</>
);
}
if (authState.needsLogin) {
return (
<>
<LoginPage
onLoginComplete={() =>
setAuthState({...authState, needsLogin: false})
}
/>
<ToastContainer
position='top-right'
autoClose={5000}
hideProgressBar={false}
newestOnTop={false}
closeOnClick
rtl={false}
pauseOnFocusLoss
draggable
pauseOnHover
theme='dark'
/>
</>
);
}
return (
<>
<Router>
<ErrorBoundary>
<div className='min-h-screen flex flex-col bg-gray-900 text-gray-100'>
<Navbar darkMode={darkMode} setDarkMode={setDarkMode} />
<div className='max-w-screen-2xl mx-auto px-4 sm:px-6 lg:px-8 mt-2 flex-grow flex-1 w-full'>
<Routes>
<Route path='/regex' element={<RegexPage />} />
<Route
path='/format'
element={<FormatPage />}
/>
<Route
path='/profile'
element={<ProfilePage />}
/>
<Route
path='/media-management'
element={<MediaManagementPage />}
/>
<Route
path='/settings'
element={<SettingsPage />}
/>
<Route
path='/'
element={<Navigate to='/settings' />}
/>
</Routes>
</div>
<Footer />
</div>
</ErrorBoundary>
</Router>
<ToastContainer
position='top-right'
autoClose={5000}
hideProgressBar={false}
newestOnTop={false}
closeOnClick
rtl={false}
pauseOnFocusLoss
draggable
pauseOnHover
theme='dark'
/>
</>
);
}
export default App;

View File

@@ -1,405 +0,0 @@
import axios from 'axios';
export const getSettings = async () => {
try {
const response = await axios.get(`/api/settings`);
return response.data;
} catch (error) {
console.error('Error fetching settings:', error);
throw error;
}
};
export const getGitStatus = async () => {
try {
const response = await axios.get(`/api/git/status`);
// Ensure has_unpushed_commits is included in the response
return {
...response.data,
data: {
...response.data.data,
has_unpushed_commits:
response.data.data.has_unpushed_commits || false
}
};
} catch (error) {
console.error('Error fetching Git status:', error);
throw error;
}
};
export const getBranches = async () => {
try {
const response = await axios.get(`/api/git/branches`);
return response.data;
} catch (error) {
console.error('Error fetching branches:', error);
throw error;
}
};
export const checkoutBranch = async branchName => {
try {
const response = await axios.post(
`/api/git/checkout`,
{
branch: branchName
},
{
validateStatus: status => {
return (
(status >= 200 && status < 300) ||
status === 400 ||
status === 409
);
}
}
);
return response.data;
} catch (error) {
console.error('Error checking out branch:', error);
throw error;
}
};
export const createBranch = async (branchName, baseBranch) => {
try {
const response = await axios.post(
`/api/git/branch`,
{
name: branchName,
base: baseBranch
},
{
validateStatus: status => {
return (
(status >= 200 && status < 300) ||
status === 400 ||
status === 409
);
}
}
);
return response.data;
} catch (error) {
console.error('Error creating branch:', error);
throw error;
}
};
export const deleteBranch = async branchName => {
try {
const response = await axios.delete(`/api/git/branch/${branchName}`, {
validateStatus: status => {
return (
(status >= 200 && status < 300) ||
status === 400 ||
status === 409
);
}
});
return response.data;
} catch (error) {
console.error('Error deleting branch:', error);
throw error;
}
};
export const pushBranchToRemote = async branchName => {
try {
const response = await axios.post(
`/api/git/branch/push`,
{
branch: branchName
},
{
validateStatus: status => {
return (
(status >= 200 && status < 300) ||
status === 400 ||
status === 409
);
}
}
);
return response.data;
} catch (error) {
console.error('Error pushing branch to remote:', error);
return {
success: false,
error:
error.response?.data?.error || 'Failed to push branch to remote'
};
}
};
export const addFiles = async files => {
try {
const response = await axios.post(`/api/git/stage`, {files});
return response.data;
} catch (error) {
console.error('Error staging files:', error);
throw error;
}
};
export const unstageFiles = async files => {
try {
const response = await axios.post(`/api/git/unstage`, {
files
});
return response.data;
} catch (error) {
console.error('Error unstaging files:', error);
throw error;
}
};
export const commitFiles = async (files, commitMessage) => {
try {
const response = await axios.post(`/api/git/commit`, {
files,
commit_message: commitMessage
});
return response.data;
} catch (error) {
console.error('Error committing files:', error);
throw error;
}
};
export const pushFiles = async () => {
try {
const response = await axios.post(`/api/git/push`);
return response.data;
} catch (error) {
if (error.response?.data?.error) {
return error.response.data;
}
return {
success: false,
error: error.message || 'Failed to push changes'
};
}
};
export const revertFile = async filePath => {
try {
const response = await axios.post(`/api/git/revert`, {
file_path: filePath
});
return response.data;
} catch (error) {
console.error('Error reverting file:', error);
throw error;
}
};
export const revertAll = async () => {
try {
const response = await axios.post(`/api/git/revert-all`);
return response.data;
} catch (error) {
console.error('Error reverting all changes:', error);
throw error;
}
};
export const deleteFile = async filePath => {
try {
const response = await axios.delete(`/api/git/file`, {
data: {file_path: filePath}
});
return response.data;
} catch (error) {
console.error('Error deleting file:', error);
return {success: false, error: 'Error deleting file'};
}
};
export const pullBranch = async branchName => {
try {
const response = await axios.post(`/api/git/pull`, {
branch: branchName
});
return response.data;
} catch (error) {
if (error.response?.data) {
return {
success: false,
state: error.response.data.state || 'error',
message: error.response.data.message,
details: error.response.data.details
};
}
return {
success: false,
state: 'error',
message: 'Failed to pull changes'
};
}
};
export const cloneRepo = async gitRepo => {
try {
const response = await axios.post(`/api/git/clone`, {
gitRepo
});
return response.data;
} catch (error) {
console.error('Error cloning repository:', error);
throw error;
}
};
export const getProfiles = async () => {
try {
const response = await axios.get(`/api/profile`);
return response.data;
} catch (error) {
console.error('Error fetching profiles:', error);
throw error;
}
};
export const saveProfile = async profile => {
try {
const response = await axios.post(`/api/profile`, profile);
return response.data;
} catch (error) {
console.error('Error saving profile:', error);
throw error;
}
};
export const updateProfile = async (id, profile) => {
try {
const response = await axios.put(`/api/profile/${id}`, profile);
return response.data;
} catch (error) {
console.error('Error updating profile:', error);
throw error;
}
};
export const deleteProfile = async id => {
try {
const response = await axios.delete(`/api/profile/${id}`);
return response.data;
} catch (error) {
console.error('Error deleting profile:', error);
throw error;
}
};
export const unlinkRepo = async (removeFiles = false) => {
try {
const response = await axios.post(`/api/git/unlink`, {
removeFiles
});
return response.data;
} catch (error) {
if (error.response?.data) {
// Return the error response directly
return error.response.data;
}
throw error;
}
};
export const checkDevMode = async () => {
try {
const response = await axios.get(`/api/git/dev`);
return response.data;
} catch (error) {
console.error('Error checking dev mode:', error);
throw error;
}
};
export const resolveConflict = async resolutions => {
try {
const response = await axios.post(`/api/git/resolve`, {
resolutions
});
return response.data;
} catch (error) {
console.error('Error resolving conflicts:', error);
throw error;
}
};
export const finalizeMerge = async () => {
try {
const response = await axios.post(`/api/git/merge/finalize`);
return response.data;
} catch (error) {
console.error('Error finalizing merge:', error);
if (error.response?.data) {
return {
success: false,
error: error.response.data.error
};
}
return {
success: false,
error: 'Failed to finalize merge'
};
}
};
export const abortMerge = async () => {
try {
const response = await axios.post(`/api/git/merge/abort`);
return response.data;
} catch (error) {
console.error('Error aborting merge:', error);
throw error;
}
};
export const getCommitHistory = async () => {
try {
const response = await axios.get('/api/git/commits', {
validateStatus: status => {
return (status >= 200 && status < 300) || status === 400;
}
});
return response.data;
} catch (error) {
console.error('Error fetching commit history:', error);
if (error.response?.data) {
return {
success: false,
error: error.response.data.error
};
}
return {
success: false,
error: 'Failed to fetch commit history'
};
}
};
export const getAutoPullStatus = async () => {
try {
const response = await axios.get('/api/git/autopull');
return response.data;
} catch (error) {
console.error('Error getting auto pull status:', error);
throw error;
}
};
export const setAutoPullStatus = async enabled => {
try {
const response = await axios.post('/api/git/autopull', {
enabled
});
return response.data;
} catch (error) {
console.error('Error setting auto pull status:', error);
throw error;
}
};

View File

@@ -1,129 +0,0 @@
import axios from 'axios';
export const pingService = async (url, apiKey, type) => {
try {
const response = await axios.post(
`/api/arr/ping`,
{
url,
apiKey,
type
},
{
validateStatus: status => {
return (status >= 200 && status < 300) || status === 400;
}
}
);
return response.data;
} catch (error) {
console.error('Error pinging service:', error);
if (error.response?.data) {
return {
success: false,
message: error.response.data.error
};
}
return {
success: false,
message: 'Failed to ping service'
};
}
};
export const saveArrConfig = async config => {
try {
// Validate and auto-correct sync_interval if schedule method
const validatedConfig = {...config};
if (validatedConfig.sync_method === 'schedule' && validatedConfig.sync_interval) {
if (validatedConfig.sync_interval < 60) {
validatedConfig.sync_interval = 60;
} else if (validatedConfig.sync_interval > 43200) {
validatedConfig.sync_interval = 43200;
}
}
const response = await axios.post(`/api/arr/config`, validatedConfig, {
validateStatus: status => {
return (status >= 200 && status < 300) || status === 409;
}
});
if (response.status === 409) {
return {
success: false,
error: 'Configuration with this name already exists'
};
}
return response.data;
} catch (error) {
console.error('Error saving arr config:', error);
throw error;
}
};
export const updateArrConfig = async (id, config) => {
try {
// Validate and auto-correct sync_interval if schedule method
const validatedConfig = {...config};
if (validatedConfig.sync_method === 'schedule' && validatedConfig.sync_interval) {
if (validatedConfig.sync_interval < 60) {
validatedConfig.sync_interval = 60;
} else if (validatedConfig.sync_interval > 43200) {
validatedConfig.sync_interval = 43200;
}
}
const response = await axios.put(`/api/arr/config/${id}`, validatedConfig, {
validateStatus: status => {
return (status >= 200 && status < 300) || status === 409;
}
});
if (response.status === 409) {
return {
success: false,
error: 'Configuration with this name already exists'
};
}
return response.data;
} catch (error) {
console.error('Error updating arr config:', error);
throw error;
}
};
export const getArrConfigs = async () => {
try {
const response = await axios.get(`/api/arr/config`);
return response.data;
} catch (error) {
console.error('Error fetching arr configs:', error);
throw error;
}
};
export const deleteArrConfig = async id => {
try {
const response = await axios.delete(`/api/arr/config/${id}`);
return response.data;
} catch (error) {
console.error('Error deleting arr config:', error);
throw error;
}
};
export const triggerSync = async configId => {
try {
const response = await fetch(`/api/arr/config/${configId}/sync`, {
method: 'POST',
headers: {
'Content-Type': 'application/json'
}
});
return await response.json();
} catch (error) {
console.error('Error triggering sync:', error);
return {success: false, error: error.message};
}
};

Some files were not shown because too many files have changed in this diff Show More