Compare commits
99 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2a69a83d71 | ||
|
|
0ec4ccbe40 | ||
|
|
0d864867cd | ||
|
|
3ff0afe828 | ||
|
|
bd74ad41f9 | ||
|
|
fa2a92481d | ||
|
|
6a1e001fc2 | ||
|
|
60afa92e09 | ||
|
|
212f3ce5eb | ||
|
|
0d79537033 | ||
|
|
72fc69c2f8 | ||
|
|
6d8c6deae8 | ||
|
|
6c116a029e | ||
|
|
3eb7c81fa1 | ||
|
|
0334f477fd | ||
|
|
8bb99c99b7 | ||
|
|
587cb2e516 | ||
|
|
fcf9d4b59b | ||
|
|
b483dbfaad | ||
|
|
9630917570 | ||
|
|
6a4432dd04 | ||
|
|
cfb91d825a | ||
|
|
a3b21f23fc | ||
|
|
e49dd85200 | ||
|
|
c9dec5ecd6 | ||
|
|
f3c5460e48 | ||
|
|
39b16b87a8 | ||
|
|
26c9d79814 | ||
|
|
2776d18a42 | ||
|
|
7057ccfb95 | ||
|
|
1807949c6f | ||
|
|
d611720b8f | ||
|
|
bf871650a8 | ||
|
|
5ca1adda7b | ||
|
|
acb18adf76 | ||
|
|
c18490f5d3 | ||
|
|
eeda944b73 | ||
|
|
52cfbebba4 | ||
|
|
f4385807f1 | ||
|
|
e9e083c9dd | ||
|
|
3218b2b39f | ||
|
|
ba296a79c9 | ||
|
|
62e05e2f5b | ||
|
|
77d8b68ba5 | ||
|
|
bb0a801396 | ||
|
|
ee968efc4b | ||
|
|
644b2b8fa0 | ||
|
|
0f74907f82 | ||
|
|
5a8b1b11de | ||
|
|
389ec40163 | ||
|
|
1d03055491 | ||
|
|
7775c6d974 | ||
|
|
a24a819511 | ||
|
|
0a6c2f2988 | ||
|
|
0c90e984ad | ||
|
|
0a0cbbfe6d | ||
|
|
15c44cd484 | ||
|
|
6d7ee6fc04 | ||
|
|
5a022db0db | ||
|
|
37ac22e0b4 | ||
|
|
bcea440e40 | ||
|
|
6edde2d65b | ||
|
|
74189c1e14 | ||
|
|
b5ddf7402a | ||
|
|
900224ed2e | ||
|
|
e290043089 | ||
|
|
a7fd37d646 | ||
|
|
d4b00046d3 | ||
|
|
545d345ea4 | ||
|
|
a29b831e41 | ||
|
|
bc9ca140bd | ||
|
|
ad8e3cd07c | ||
|
|
22efe0b32e | ||
|
|
d23a0a94d5 | ||
|
|
e42b79c9d8 | ||
|
|
3b2c657bfa | ||
|
|
e335ab05a1 | ||
|
|
75f963d6e2 | ||
|
|
94b998741f | ||
|
|
172c734866 | ||
|
|
1b483e178d | ||
|
|
78693225f1 | ||
|
|
ca08c84789 | ||
|
|
e930b422e5 | ||
|
|
0833d04376 | ||
|
|
55f36d76ec | ||
|
|
6a838ee84f | ||
|
|
4285bf4a54 | ||
|
|
640b1042c2 | ||
|
|
9357c4632e | ||
|
|
ca5d0d22f3 | ||
|
|
3875338fb7 | ||
|
|
196f55c58e | ||
|
|
9a149715f6 | ||
|
|
bf40533469 | ||
|
|
7bc7259988 | ||
|
|
66b96ac3a5 | ||
|
|
f974e0b14a | ||
|
|
de8c3f768d |
@@ -25,7 +25,5 @@ venv/
|
||||
.DS_Store
|
||||
Thumbs.db
|
||||
|
||||
# Arch pkg artifacts
|
||||
*.pkg.tar.*
|
||||
*.log
|
||||
package-manager-*
|
||||
# Logs
|
||||
*.log
|
||||
26
.github/workflows/ci.yml
vendored
Normal file
26
.github/workflows/ci.yml
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
name: CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches-ignore:
|
||||
- main
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
test-unit:
|
||||
uses: ./.github/workflows/test-unit.yml
|
||||
|
||||
test-integration:
|
||||
uses: ./.github/workflows/test-integration.yml
|
||||
|
||||
test-container:
|
||||
uses: ./.github/workflows/test-container.yml
|
||||
|
||||
test-e2e:
|
||||
uses: ./.github/workflows/test-e2e.yml
|
||||
|
||||
test-virgin-user:
|
||||
uses: ./.github/workflows/test-virgin-user.yml
|
||||
|
||||
test-virgin-root:
|
||||
uses: ./.github/workflows/test-virgin-root.yml
|
||||
98
.github/workflows/mark-stable.yml
vendored
Normal file
98
.github/workflows/mark-stable.yml
vendored
Normal file
@@ -0,0 +1,98 @@
|
||||
name: Mark stable commit
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main # still run tests for main
|
||||
tags:
|
||||
- 'v*' # run tests for version tags (e.g. v0.9.1)
|
||||
|
||||
jobs:
|
||||
test-unit:
|
||||
uses: ./.github/workflows/test-unit.yml
|
||||
|
||||
test-integration:
|
||||
uses: ./.github/workflows/test-integration.yml
|
||||
|
||||
test-container:
|
||||
uses: ./.github/workflows/test-container.yml
|
||||
|
||||
test-e2e:
|
||||
uses: ./.github/workflows/test-e2e.yml
|
||||
|
||||
test-virgin-user:
|
||||
uses: ./.github/workflows/test-virgin-user.yml
|
||||
|
||||
test-virgin-root:
|
||||
uses: ./.github/workflows/test-virgin-root.yml
|
||||
|
||||
mark-stable:
|
||||
needs:
|
||||
- test-unit
|
||||
- test-integration
|
||||
- test-container
|
||||
- test-e2e
|
||||
- test-virgin-user
|
||||
- test-virgin-root
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
# Only run this job if the push is for a version tag (v*)
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
|
||||
permissions:
|
||||
contents: write # Required to move/update the tag
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true # We need all tags for version comparison
|
||||
|
||||
- name: Move 'stable' tag only if this version is the highest
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
|
||||
echo "Ref: $GITHUB_REF"
|
||||
echo "SHA: $GITHUB_SHA"
|
||||
|
||||
VERSION="${GITHUB_REF#refs/tags/}"
|
||||
echo "Current version tag: ${VERSION}"
|
||||
|
||||
echo "Collecting all version tags..."
|
||||
ALL_V_TAGS="$(git tag --list 'v*' || true)"
|
||||
|
||||
if [[ -z "${ALL_V_TAGS}" ]]; then
|
||||
echo "No version tags found. Skipping stable update."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "All version tags:"
|
||||
echo "${ALL_V_TAGS}"
|
||||
|
||||
# Determine highest version using natural version sorting
|
||||
LATEST_TAG="$(printf '%s\n' ${ALL_V_TAGS} | sort -V | tail -n1)"
|
||||
|
||||
echo "Highest version tag: ${LATEST_TAG}"
|
||||
|
||||
if [[ "${VERSION}" != "${LATEST_TAG}" ]]; then
|
||||
echo "Current version ${VERSION} is NOT the highest version."
|
||||
echo "Stable tag will NOT be updated."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "Current version ${VERSION} IS the highest version."
|
||||
echo "Updating 'stable' tag..."
|
||||
|
||||
# Delete existing stable tag (local + remote)
|
||||
git tag -d stable 2>/dev/null || true
|
||||
git push origin :refs/tags/stable || true
|
||||
|
||||
# Create new stable tag
|
||||
git tag stable "$GITHUB_SHA"
|
||||
git push origin stable
|
||||
|
||||
echo "✅ Stable tag updated to ${VERSION}."
|
||||
23
.github/workflows/test-container.yml
vendored
23
.github/workflows/test-container.yml
vendored
@@ -1,25 +1,28 @@
|
||||
name: Test Distribution Containers
|
||||
name: Test OS Containers
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- master
|
||||
- develop
|
||||
- "*"
|
||||
pull_request:
|
||||
workflow_call:
|
||||
|
||||
jobs:
|
||||
test-container:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
distro: [arch, debian, ubuntu, fedora, centos]
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Show commit SHA
|
||||
run: git rev-parse HEAD
|
||||
|
||||
- name: Show Docker version
|
||||
run: docker version
|
||||
|
||||
- name: Run container tests
|
||||
run: make test-container
|
||||
- name: Run container tests (${{ matrix.distro }})
|
||||
run: |
|
||||
set -euo pipefail
|
||||
distro="${{ matrix.distro }}" make test-container
|
||||
|
||||
22
.github/workflows/test-e2e.yml
vendored
22
.github/workflows/test-e2e.yml
vendored
@@ -1,18 +1,16 @@
|
||||
name: Test package-manager (e2e)
|
||||
name: Test End-To-End
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- master
|
||||
- develop
|
||||
- "*"
|
||||
pull_request:
|
||||
workflow_call:
|
||||
|
||||
jobs:
|
||||
test-e2e:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60 # E2E + all distros can be heavier
|
||||
timeout-minutes: 60 # E2E can be heavier
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
distro: [arch, debian, ubuntu, fedora, centos]
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
@@ -21,5 +19,7 @@ jobs:
|
||||
- name: Show Docker version
|
||||
run: docker version
|
||||
|
||||
- name: Run E2E tests via make (all distros)
|
||||
run: make test-e2e
|
||||
- name: Run E2E tests via make (${{ matrix.distro }})
|
||||
run: |
|
||||
set -euo pipefail
|
||||
distro="${{ matrix.distro }}" make test-e2e
|
||||
|
||||
16
.github/workflows/test-integration.yml
vendored
16
.github/workflows/test-integration.yml
vendored
@@ -1,13 +1,7 @@
|
||||
name: Test package-manager (integration)
|
||||
name: Test Code Integration
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- master
|
||||
- develop
|
||||
- "*"
|
||||
pull_request:
|
||||
workflow_call:
|
||||
|
||||
jobs:
|
||||
test-integration:
|
||||
@@ -21,9 +15,5 @@ jobs:
|
||||
- name: Show Docker version
|
||||
run: docker version
|
||||
|
||||
# Build Arch test image (same as used in test-unit and test-e2e)
|
||||
- name: Build test images
|
||||
run: make build
|
||||
|
||||
- name: Run integration tests via make (Arch container)
|
||||
run: make test-integration
|
||||
run: make test-integration distro="arch"
|
||||
|
||||
12
.github/workflows/test-unit.yml
vendored
12
.github/workflows/test-unit.yml
vendored
@@ -1,13 +1,7 @@
|
||||
name: Test package-manager (unit)
|
||||
name: Test Units
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- master
|
||||
- develop
|
||||
- "*"
|
||||
pull_request:
|
||||
workflow_call:
|
||||
|
||||
jobs:
|
||||
test-unit:
|
||||
@@ -22,4 +16,4 @@ jobs:
|
||||
run: docker version
|
||||
|
||||
- name: Run unit tests via make (Arch container)
|
||||
run: make test-unit
|
||||
run: make test-unit distro="arch"
|
||||
|
||||
58
.github/workflows/test-virgin-root.yml
vendored
Normal file
58
.github/workflows/test-virgin-root.yml
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
name: Test Virgin Root
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
|
||||
jobs:
|
||||
test-virgin-root:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 45
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Show Docker version
|
||||
run: docker version
|
||||
|
||||
- name: Virgin Arch pkgmgr flake test (root)
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
echo ">>> Starting virgin ArchLinux container test (root, with shared caches)..."
|
||||
|
||||
docker run --rm \
|
||||
-v "$PWD":/src \
|
||||
-v pkgmgr_repos:/root/Repositories \
|
||||
-v pkgmgr_pip_cache:/root/.cache/pip \
|
||||
-w /src \
|
||||
archlinux:latest \
|
||||
bash -lc '
|
||||
set -euo pipefail
|
||||
|
||||
echo ">>> Updating and upgrading Arch system..."
|
||||
pacman -Syu --noconfirm git python python-pip nix >/dev/null
|
||||
|
||||
echo ">>> Creating isolated virtual environment for pkgmgr..."
|
||||
python -m venv /tmp/pkgmgr-venv
|
||||
|
||||
echo ">>> Activating virtual environment..."
|
||||
source /tmp/pkgmgr-venv/bin/activate
|
||||
|
||||
echo ">>> Upgrading pip (cached)..."
|
||||
python -m pip install --upgrade pip >/dev/null
|
||||
|
||||
echo ">>> Installing pkgmgr from current source tree (cached pip)..."
|
||||
python -m pip install /src >/dev/null
|
||||
|
||||
echo ">>> Enabling Nix experimental features..."
|
||||
export NIX_CONFIG="experimental-features = nix-command flakes"
|
||||
|
||||
echo ">>> Running: pkgmgr update pkgmgr --clone-mode shallow --no-verification"
|
||||
pkgmgr update pkgmgr --clone-mode shallow --no-verification
|
||||
|
||||
echo ">>> Running: pkgmgr version pkgmgr"
|
||||
pkgmgr version pkgmgr
|
||||
|
||||
echo ">>> Virgin Arch (root) test completed successfully."
|
||||
'
|
||||
73
.github/workflows/test-virgin-user.yml
vendored
Normal file
73
.github/workflows/test-virgin-user.yml
vendored
Normal file
@@ -0,0 +1,73 @@
|
||||
name: Test Virgin User
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
|
||||
jobs:
|
||||
test-virgin-user:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 45
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Show Docker version
|
||||
run: docker version
|
||||
|
||||
- name: Virgin Arch pkgmgr user test (non-root with sudo)
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
echo ">>> Starting virgin ArchLinux container test (non-root user with sudo)..."
|
||||
|
||||
docker run --rm \
|
||||
-v "$PWD":/src \
|
||||
archlinux:latest \
|
||||
bash -lc '
|
||||
set -euo pipefail
|
||||
|
||||
echo ">>> [root] Updating and upgrading Arch system..."
|
||||
pacman -Syu --noconfirm git python python-pip sudo base-devel debugedit
|
||||
|
||||
echo ">>> [root] Creating non-root user dev..."
|
||||
useradd -m dev
|
||||
|
||||
echo ">>> [root] Allowing passwordless sudo for dev..."
|
||||
echo "dev ALL=(ALL) NOPASSWD: ALL" > /etc/sudoers.d/dev
|
||||
chmod 0440 /etc/sudoers.d/dev
|
||||
|
||||
echo ">>> [root] Adjusting ownership of /src for dev..."
|
||||
chown -R dev:dev /src
|
||||
|
||||
echo ">>> [root] Running pkgmgr flow as non-root user dev..."
|
||||
sudo -u dev env PKGMGR_DISABLE_NIX_FLAKE_INSTALLER=1 bash -lc "
|
||||
set -euo pipefail
|
||||
cd /src
|
||||
|
||||
echo \">>> [dev] Using user: \$(whoami)\"
|
||||
echo \">>> [dev] Running scripts/installation/main.sh...\"
|
||||
bash scripts/installation/main.sh
|
||||
|
||||
echo \">>> [dev] Activating venv...\"
|
||||
. \"\$HOME/.venvs/pkgmgr/bin/activate\"
|
||||
|
||||
echo \">>> [dev] Installing pkgmgr into venv via pip...\"
|
||||
python -m pip install /src >/dev/null
|
||||
|
||||
echo \">>> [dev] PKGMGR_DISABLE_NIX_FLAKE_INSTALLER=\$PKGMGR_DISABLE_NIX_FLAKE_INSTALLER\"
|
||||
echo \">>> [dev] Updating managed repo package-manager via pkgmgr...\"
|
||||
pkgmgr update pkgmgr --clone-mode shallow --no-verification
|
||||
|
||||
echo \">>> [dev] PATH:\"
|
||||
echo \"\$PATH\"
|
||||
|
||||
echo \">>> [dev] which pkgmgr:\"
|
||||
which pkgmgr || echo \">>> [dev] pkgmgr not found in PATH\"
|
||||
|
||||
echo \">>> [dev] Running: pkgmgr version pkgmgr\"
|
||||
pkgmgr version pkgmgr
|
||||
"
|
||||
|
||||
echo ">>> [root] Container flow finished."
|
||||
'
|
||||
14
.gitignore
vendored
14
.gitignore
vendored
@@ -1,9 +1,6 @@
|
||||
|
||||
# Prevents unwanted files from being committed to version control.
|
||||
|
||||
# Custom Config file
|
||||
config/config.yaml
|
||||
|
||||
# Python bytecode
|
||||
__pycache__/
|
||||
*.pyc
|
||||
@@ -17,6 +14,7 @@ venv/
|
||||
dist/
|
||||
build/*
|
||||
*.egg-info/
|
||||
package-manager-*
|
||||
|
||||
# Editor files
|
||||
.vscode/
|
||||
@@ -28,14 +26,10 @@ Thumbs.db
|
||||
|
||||
# Nix Cache to speed up tests
|
||||
.nix/
|
||||
.nix-dev-installed
|
||||
flake.lock
|
||||
|
||||
# Ignore logs
|
||||
*.log
|
||||
package-manager-*
|
||||
|
||||
# debian
|
||||
debian/package-manager/
|
||||
debian/debhelper-build-stamp
|
||||
debian/files
|
||||
debian/.debhelper/
|
||||
debian/package-manager.substvars
|
||||
result
|
||||
|
||||
200
CHANGELOG.md
200
CHANGELOG.md
@@ -1,3 +1,203 @@
|
||||
## [1.2.0] - 2025-12-12
|
||||
|
||||
* **Release workflow overhaul**
|
||||
|
||||
* Introduced a fully structured release workflow with clear phases and safeguards
|
||||
* Added preview-first releases with explicit confirmation before execution
|
||||
* Automatic handling of *latest* tag when a release is the newest version
|
||||
* Optional branch closing after successful releases with interactive confirmation
|
||||
* Improved safety by syncing with remote before any changes
|
||||
* Clear separation of concerns (workflow, git handling, prompts, versioning)
|
||||
|
||||
|
||||
## [1.1.0] - 2025-12-12
|
||||
|
||||
* Added *branch drop* for destructive branch deletion and introduced *--force/-f* flags for branch close and branch drop to skip confirmation prompts.
|
||||
|
||||
|
||||
## [1.0.0] - 2025-12-11
|
||||
|
||||
* **1.0.0 – Official Stable Release 🎉**
|
||||
*First stable release of PKGMGR, the multi-distro development and package workflow manager.*
|
||||
|
||||
---
|
||||
|
||||
**Key Features**
|
||||
|
||||
**Core Functionality**
|
||||
|
||||
* Manage many repositories with one CLI: `clone`, `update`, `install`, `list`, `path`, `config`
|
||||
* Proxy wrappers for Git, Docker/Compose and Make
|
||||
* Multi-repo execution with safe *preview mode*
|
||||
* Mirror management: `mirror list/diff/merge/setup`
|
||||
|
||||
**Releases & Versioning**
|
||||
|
||||
* Automated SemVer bumps, tagging and changelog generation
|
||||
* Supports PKGBUILD, Debian, RPM, pyproject.toml, flake.nix
|
||||
|
||||
**Developer Tools**
|
||||
|
||||
* Open repositories in VS Code, file manager or terminal
|
||||
* Unified workflows across all major Linux distros
|
||||
|
||||
**Nix Integration**
|
||||
|
||||
* Cross-distro reproducible builds via Nix flakes
|
||||
* CI-tested across all supported environments
|
||||
|
||||
---
|
||||
|
||||
**Summary**
|
||||
PKGMGR 1.0.0 unifies repository management, build tooling, release automation and reproducible multi-distro workflows into one cohesive CLI tool.
|
||||
|
||||
*This is the first official stable release.*
|
||||
|
||||
|
||||
## [0.10.2] - 2025-12-11
|
||||
|
||||
* * Stable tag now updates only when a new highest version is released.
|
||||
* Debian package now includes sudo to ensure privilege escalation works reliably.
|
||||
* Nix setup is significantly more resilient with retries, correct permissions, and better environment handling.
|
||||
* AUR builder setup uses retries so yay installs succeed even under network instability.
|
||||
* Nix flake installation now fails only on mandatory parts; optional outputs no longer block installation.
|
||||
|
||||
|
||||
## [0.10.1] - 2025-12-11
|
||||
|
||||
* Fixed Debian\Ubuntu to pass container e2e tests
|
||||
|
||||
|
||||
## [0.10.0] - 2025-12-11
|
||||
|
||||
**Mirror System**
|
||||
|
||||
* Added SSH mirror support including multi-push and remote probing
|
||||
* Introduced mirror management commands and refactored the CLI parser into modules
|
||||
|
||||
**CI/CD**
|
||||
|
||||
* Migrated to reusable workflows with improved debugging instrumentation
|
||||
* Made stable-tag automation reliable for workflow_run events and permissions
|
||||
* Ensured deterministic test results by rebuilding all test containers with no-cache
|
||||
|
||||
**E2E and Container Tests**
|
||||
|
||||
* Fixed Git safe.directory handling across all containers
|
||||
* Restored Dockerfile ENTRYPOINT to resolve Nix TLS issues
|
||||
* Fixed missing volume errors and hardened the E2E runner
|
||||
* Added full Nix flake E2E test matrix across all distro containers
|
||||
* Disabled Nix sandboxing for cross-distro builds where required
|
||||
|
||||
**Nix and Python Environment**
|
||||
|
||||
* Unified Nix Python environment and introduced lazy CLI imports
|
||||
* Ensured PyYAML availability and improved Python 3.13 compatibility
|
||||
* Refactored flake.nix to remove side effects and rely on generic python3
|
||||
|
||||
**Packaging**
|
||||
|
||||
* Removed Debian’s hard dependency on Nix
|
||||
* Restructured packaging layout and refined build paths
|
||||
* Excluded assets from Arch PKGBUILD rsync
|
||||
* Cleaned up obsolete ignore files
|
||||
|
||||
**Repository Layout**
|
||||
|
||||
* Restructured repository to align local, Nix-based, and distro-based build workflows
|
||||
* Added Arch support and refined build/purge scripts
|
||||
|
||||
|
||||
## [0.9.1] - 2025-12-10
|
||||
|
||||
* * Refactored installer: new `venv-create.sh`, cleaner root/user setup flow, updated README with architecture map.
|
||||
* Split virgin tests into root/user workflows; stabilized Nix installer across distros; improved test scripts with dynamic distro selection and isolated Nix stores.
|
||||
* Fixed repository directory resolution; improved `pkgmgr path` and `pkgmgr shell`; added full unit/E2E coverage.
|
||||
* Removed deprecated files and updated `.gitignore`.
|
||||
|
||||
|
||||
## [0.9.0] - 2025-12-10
|
||||
|
||||
* Introduce a virgin Arch-based Nix flake E2E workflow that validates pkgmgr’s full flake installation path using shared caches for faster and reproducible CI runs.
|
||||
|
||||
|
||||
## [0.8.0] - 2025-12-10
|
||||
|
||||
* **v0.7.15 — Installer & Command Resolution Improvements**
|
||||
|
||||
* Introduced a unified **layer-based installer pipeline** with clear precedence (OS-packages, Nix, Python, Makefile).
|
||||
* Reworked installer structure and improved Python/Nix/Makefile installers, including isolated Python venvs and refined flake-output handling.
|
||||
* Fully rewrote **command resolution** with stronger typing, safer fallbacks, and explicit support for `command: null` to mark library-only repositories.
|
||||
* Added extensive **unit and integration tests** for installer capability ordering, command resolution, and Nix/Python installer behavior.
|
||||
* Expanded documentation with capability hierarchy diagrams and scenario matrices.
|
||||
* Removed deprecated repository entries and obsolete configuration files.
|
||||
|
||||
|
||||
## [0.7.14] - 2025-12-10
|
||||
|
||||
* Fixed the clone-all integration test so that `SystemExit(0)` from the proxy is treated as a successful command instead of a failure.
|
||||
|
||||
|
||||
## [0.7.13] - 2025-12-10
|
||||
|
||||
### Fix tools path resolution and add tests
|
||||
|
||||
- Fixed a crash in `pkgmgr code` caused by missing `directory` metadata by introducing `_resolve_repository_path()` with proper fallbacks to `repositories_base_dir` / `repositories_dir`.
|
||||
- Updated `explore`, `terminal` and `code` tool commands to use the new resolver.
|
||||
- Improved VS Code workspace generation and path handling.
|
||||
- Added unit & E2E tests for tool commands.
|
||||
|
||||
|
||||
## [0.7.12] - 2025-12-09
|
||||
|
||||
* Fixed self refering alias during setup
|
||||
|
||||
|
||||
## [0.7.11] - 2025-12-09
|
||||
|
||||
* test: fix installer unit tests for OS packages and Nix dev shell
|
||||
|
||||
|
||||
## [0.7.10] - 2025-12-09
|
||||
|
||||
* Fixed test_install_pkgmgr_shallow.py
|
||||
|
||||
|
||||
## [0.7.9] - 2025-12-09
|
||||
|
||||
* 'main' and 'master' are now both accepted as branches for branch close merge
|
||||
|
||||
|
||||
## [0.7.8] - 2025-12-09
|
||||
|
||||
* Missing pyproject.toml doesn't lead to an error during release
|
||||
|
||||
|
||||
## [0.7.7] - 2025-12-09
|
||||
|
||||
* Added TEST_PATTERN parameter to execute dedicated tests
|
||||
|
||||
|
||||
## [0.7.6] - 2025-12-09
|
||||
|
||||
* Fixed pull --preview bug in e2e test
|
||||
|
||||
|
||||
## [0.7.5] - 2025-12-09
|
||||
|
||||
* Fixed wrong directory permissions for nix
|
||||
|
||||
|
||||
## [0.7.4] - 2025-12-09
|
||||
|
||||
* Fixed missing build in test workflow -> Tests pass now
|
||||
|
||||
|
||||
## [0.7.3] - 2025-12-09
|
||||
|
||||
* Fixed bug: Ignored packages are now ignored
|
||||
|
||||
|
||||
## [0.7.2] - 2025-12-09
|
||||
|
||||
* Implemented Changelog Support for Fedora and Debian
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
# ------------------------------------------------------------
|
||||
# Base image selector — overridden by Makefile
|
||||
# ------------------------------------------------------------
|
||||
ARG BASE_IMAGE=archlinux:latest
|
||||
ARG BASE_IMAGE
|
||||
FROM ${BASE_IMAGE}
|
||||
|
||||
RUN echo "BASE_IMAGE=${BASE_IMAGE}" && \
|
||||
cat /etc/os-release || true
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Nix environment defaults
|
||||
#
|
||||
|
||||
3
MIRRORS
Normal file
3
MIRRORS
Normal file
@@ -0,0 +1,3 @@
|
||||
git@github.com:kevinveenbirkenbach/package-manager.git
|
||||
ssh://git@git.veen.world:2201/kevinveenbirkenbach/pkgmgr.git
|
||||
ssh://git@code.cymais.cloud:2201/kevinveenbirkenbach/pkgmgr.git
|
||||
34
Makefile
34
Makefile
@@ -2,17 +2,15 @@
|
||||
test build build-no-cache test-unit test-e2e test-integration \
|
||||
test-container
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Local Nix cache directories in the repo
|
||||
# ------------------------------------------------------------
|
||||
NIX_STORE_VOLUME := pkgmgr_nix_store
|
||||
NIX_CACHE_VOLUME := pkgmgr_nix_cache
|
||||
# Distro
|
||||
# Options: arch debian ubuntu fedora centos
|
||||
distro ?= arch
|
||||
export distro
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Distro list and base images
|
||||
# Base images
|
||||
# (kept for documentation/reference; actual build logic is in scripts/build)
|
||||
# ------------------------------------------------------------
|
||||
DISTROS := arch debian ubuntu fedora centos
|
||||
BASE_IMAGE_ARCH := archlinux:latest
|
||||
BASE_IMAGE_DEBIAN := debian:stable-slim
|
||||
BASE_IMAGE_UBUNTU := ubuntu:latest
|
||||
@@ -20,13 +18,16 @@ BASE_IMAGE_FEDORA := fedora:latest
|
||||
BASE_IMAGE_CENTOS := quay.io/centos/centos:stream9
|
||||
|
||||
# Make them available in scripts
|
||||
export DISTROS
|
||||
export BASE_IMAGE_ARCH
|
||||
export BASE_IMAGE_DEBIAN
|
||||
export BASE_IMAGE_UBUNTU
|
||||
export BASE_IMAGE_FEDORA
|
||||
export BASE_IMAGE_CENTOS
|
||||
|
||||
# PYthon Unittest Pattern
|
||||
TEST_PATTERN := test_*.py
|
||||
export TEST_PATTERN
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# PKGMGR setup (developer wrapper -> scripts/installation/main.sh)
|
||||
# ------------------------------------------------------------
|
||||
@@ -46,16 +47,16 @@ build:
|
||||
# Test targets (delegated to scripts/test)
|
||||
# ------------------------------------------------------------
|
||||
|
||||
test-unit:
|
||||
test-unit: build-missing
|
||||
@bash scripts/test/test-unit.sh
|
||||
|
||||
test-integration:
|
||||
test-integration: build-missing
|
||||
@bash scripts/test/test-integration.sh
|
||||
|
||||
test-e2e:
|
||||
test-e2e: build-missing
|
||||
@bash scripts/test/test-e2e.sh
|
||||
|
||||
test-container:
|
||||
test-container: build-missing
|
||||
@bash scripts/test/test-container.sh
|
||||
|
||||
# ------------------------------------------------------------
|
||||
@@ -64,8 +65,13 @@ test-container:
|
||||
build-missing:
|
||||
@bash scripts/build/build-image-missing.sh
|
||||
|
||||
# Combined test target for local + CI (unit + e2e + integration)
|
||||
test: build-missing test-container test-unit test-e2e test-integration
|
||||
# Combined test target for local + CI (unit + integration + e2e)
|
||||
test: test-container test-unit test-integration test-e2e
|
||||
|
||||
delete-volumes:
|
||||
@docker volume rm pkgmgr_nix_store_${distro} pkgmgr_nix_cache_${distro} || true
|
||||
|
||||
purge: delete-volumes build-no-cache
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# System install (native packages, calls scripts/installation/run-package.sh)
|
||||
|
||||
216
README.md
216
README.md
@@ -1,116 +1,188 @@
|
||||
# Package Manager🤖📦
|
||||
# Package Manager 🤖📦
|
||||
|
||||

|
||||
|
||||
[](https://github.com/sponsors/kevinveenbirkenbach)
|
||||
[](https://www.patreon.com/c/kevinveenbirkenbach)
|
||||
[](https://buymeacoffee.com/kevinveenbirkenbach) [](https://s.veen.world/paypaldonate)
|
||||
[](https://www.patreon.com/c/kevinveenbirkenbach)
|
||||
[](https://buymeacoffee.com/kevinveenbirkenbach)
|
||||
[](https://s.veen.world/paypaldonate)
|
||||
[](LICENSE)
|
||||
[](https://github.com/kevinveenbirkenbach/package-manager)
|
||||
|
||||
*Kevins's* Package Manager is a configurable Python tool designed to manage multiple repositories via Bash. It automates common Git operations such as clone, pull, push, status, and more. Additionally, it handles the creation of executable wrappers and alias links for your repositories.
|
||||
**Kevin's Package Manager (PKGMGR)** is a *multi-distro* package manager and workflow orchestrator.
|
||||
It helps you **develop, package, release and manage projects across multiple Linux-based
|
||||
operating systems** (Arch, Debian, Ubuntu, Fedora, CentOS, …).
|
||||
|
||||
PKGMGR is implemented in **Python** and uses **Nix (flakes)** as a foundation for
|
||||
distribution-independent builds and tooling. On top of that it provides a rich
|
||||
CLI that proxies common developer tools (Git, Docker, Make, …) and glues them
|
||||
together into repeatable development workflows.
|
||||
|
||||
---
|
||||
|
||||
## Why PKGMGR? 🧠
|
||||
|
||||
Traditional distro package managers like `apt`, `pacman` or `dnf` focus on a
|
||||
single operating system. PKGMGR instead focuses on **your repositories and
|
||||
development lifecycle**:
|
||||
|
||||
* one configuration for all your repos,
|
||||
* one CLI to interact with them,
|
||||
* one Nix-based layer to keep tooling reproducible across distros.
|
||||
|
||||
You keep using your native package manager where it makes sense – PKGMGR
|
||||
coordinates the *development and release flow* around it.
|
||||
|
||||
---
|
||||
|
||||
## Features 🚀
|
||||
|
||||
- **Installation & Setup:**
|
||||
Create executable wrappers with auto-detected commands (e.g. `main.sh` or `main.py`).
|
||||
|
||||
- **Git Operations:**
|
||||
Easily perform `git pull`, `push`, `status`, `commit`, `diff`, `add`, `show`, and `checkout` with extra parameters passed through.
|
||||
|
||||
- **Configuration Management:**
|
||||
Manage repository configurations via a default file (`config/defaults.yaml`) and a user-specific file (`config/config.yaml`). Initialize, add, delete, or ignore entries using subcommands.
|
||||
|
||||
- **Path & Listing:**
|
||||
Display repository paths or list all configured packages with their details.
|
||||
|
||||
- **Custom Aliases:**
|
||||
Generate and manage custom aliases for easy command invocation.
|
||||
### Multi-distro development & packaging
|
||||
|
||||
* Manage **many repositories at once** from a single `config/config.yaml`.
|
||||
* Drive full **release pipelines** across Linux distributions using:
|
||||
|
||||
* Nix flakes (`flake.nix`)
|
||||
* PyPI style builds (`pyproject.toml`)
|
||||
* OS packages (PKGBUILD, Debian control/changelog, RPM spec)
|
||||
* Ansible Galaxy metadata and more.
|
||||
|
||||
### Rich CLI for daily work
|
||||
|
||||
All commands are exposed via the `pkgmgr` CLI and are available on every distro:
|
||||
|
||||
* **Repository management**
|
||||
|
||||
* `clone`, `update`, `install`, `delete`, `deinstall`, `path`, `list`, `config`
|
||||
* **Git proxies**
|
||||
|
||||
* `pull`, `push`, `status`, `diff`, `add`, `show`, `checkout`,
|
||||
`reset`, `revert`, `rebase`, `commit`, `branch`
|
||||
* **Docker & Compose orchestration**
|
||||
|
||||
* `build`, `up`, `down`, `exec`, `ps`, `start`, `stop`, `restart`
|
||||
* **Release toolchain**
|
||||
|
||||
* `version`, `release`, `changelog`, `make`
|
||||
* **Mirror & workflow helpers**
|
||||
|
||||
* `mirror` (list/diff/merge/setup), `shell`, `terminal`, `code`, `explore`
|
||||
|
||||
Many of these commands support `--preview` mode so you can inspect the
|
||||
underlying Git or Docker calls without executing them.
|
||||
|
||||
### Full development workflows
|
||||
|
||||
PKGMGR is not just a helper around Git commands. Combined with its release and
|
||||
versioning features it can drive **end-to-end workflows**:
|
||||
|
||||
1. Clone and mirror repositories.
|
||||
2. Run tests and builds through `make` or Nix.
|
||||
3. Bump versions, update changelogs and tags.
|
||||
4. Build distro-specific packages.
|
||||
5. Keep all mirrors and working copies in sync.
|
||||
|
||||
The extensive E2E tests (`tests/e2e/`) and GitHub Actions workflows (including
|
||||
“virgin user” and “virgin root” Arch tests) validate these flows across
|
||||
different Linux environments.
|
||||
|
||||
---
|
||||
|
||||
## Architecture & Setup Map 🗺️
|
||||
|
||||
The following diagram gives a full overview of:
|
||||
|
||||
* PKGMGR’s package structure,
|
||||
* the layered installers (OS, foundation, Python, Makefile),
|
||||
* and the setup controller that decides which layer to use on a given system.
|
||||
|
||||

|
||||
|
||||
**Diagram status:** 11 December 2025
|
||||
**Always-up-to-date version:** [https://s.veen.world/pkgmgrmp](https://s.veen.world/pkgmgrmp)
|
||||
|
||||
---
|
||||
|
||||
## Installation ⚙️
|
||||
|
||||
Clone the repository and ensure your `~/.local/bin` is in your system PATH:
|
||||
### 1. Get the latest stable version
|
||||
|
||||
For a stable setup, use the **latest tagged release** (the tag pointed to by
|
||||
`latest`):
|
||||
|
||||
```bash
|
||||
git clone https://github.com/kevinveenbirkenbach/package-manager.git
|
||||
cd package-manager
|
||||
|
||||
# Optional but recommended: checkout the latest stable tag
|
||||
git fetch --tags
|
||||
git checkout "$(git describe --tags --abbrev=0)"
|
||||
```
|
||||
|
||||
Install make and pip if not installed yet:
|
||||
### 2. Install via Make
|
||||
|
||||
The project ships with a Makefile that encapsulates the typical installation
|
||||
flow. On most systems you only need:
|
||||
|
||||
```bash
|
||||
pacman -S make python-pip
|
||||
# Ensure make, Python and pip are installed via your distro package manager
|
||||
# (e.g. pacman -S make python python-pip, apt install make python3-pip, ...)
|
||||
|
||||
make install
|
||||
```
|
||||
|
||||
Then, run the following command to set up the project:
|
||||
This will:
|
||||
|
||||
* create or reuse a Python virtual environment,
|
||||
* install PKGMGR (and its Python dependencies) into that environment,
|
||||
* expose the `pkgmgr` executable on your PATH (usually via `~/.local/bin`),
|
||||
* prepare Nix-based integration where available so PKGMGR can build and manage
|
||||
packages distribution-independently.
|
||||
|
||||
For development use, you can also run:
|
||||
|
||||
```bash
|
||||
make setup
|
||||
```
|
||||
|
||||
The `make setup` command will:
|
||||
- Make `main.py` executable.
|
||||
- Install required packages from `requirements.txt`.
|
||||
- Execute `python main.py install` to complete the installation.
|
||||
which prepares the environment and leaves you with a fully wired development
|
||||
workspace (including Nix, tests and scripts).
|
||||
|
||||
## Docker Quickstart 🐳
|
||||
---
|
||||
|
||||
Alternatively to installing locally, you can use Docker: build the image with
|
||||
## Usage 🧰
|
||||
|
||||
After installation, the main entry point is:
|
||||
|
||||
```bash
|
||||
docker build --no-cache -t pkgmgr .
|
||||
pkgmgr --help
|
||||
```
|
||||
|
||||
or alternativ pull it via
|
||||
This prints a list of all available subcommands, for example:
|
||||
|
||||
* `pkgmgr list --all` – show all repositories in the config
|
||||
* `pkgmgr update --all --clone-mode https` – update every repository
|
||||
* `pkgmgr release patch --preview` – simulate a patch release
|
||||
* `pkgmgr version --all` – show version information for all repositories
|
||||
* `pkgmgr mirror setup --preview --all` – prepare Git mirrors (no changes in preview)
|
||||
* `pkgmgr make install --preview pkgmgr` – preview make install for the pkgmgr repo
|
||||
|
||||
The help for each command is available via:
|
||||
|
||||
```bash
|
||||
docker pull kevinveenbirkenbach/pkgmgr:latest
|
||||
pkgmgr <command> --help
|
||||
```
|
||||
|
||||
and then run
|
||||
|
||||
```bash
|
||||
docker run --rm pkgmgr --help
|
||||
```
|
||||
|
||||
## Usage 📖
|
||||
|
||||
Run the script with different commands. For example:
|
||||
|
||||
- **Install all packages:**
|
||||
```bash
|
||||
pkgmgr install --all
|
||||
```
|
||||
- **Pull updates for a specific repository:**
|
||||
```bash
|
||||
pkgmgr pull pkgmgr
|
||||
```
|
||||
- **Commit changes with extra Git parameters:**
|
||||
```bash
|
||||
pkgmgr commit pkgmgr -- -m "Your commit message"
|
||||
```
|
||||
- **List all configured packages:**
|
||||
```bash
|
||||
pkgmgr config show
|
||||
```
|
||||
- **Manage configuration:**
|
||||
```bash
|
||||
pkgmgr config init
|
||||
pkgmgr config add
|
||||
pkgmgr config edit
|
||||
pkgmgr config delete <identifier>
|
||||
pkgmgr config ignore <identifier> --set true
|
||||
```
|
||||
---
|
||||
|
||||
## License 📄
|
||||
|
||||
This project is licensed under the MIT License.
|
||||
|
||||
## Author 👤
|
||||
|
||||
Kevin Veen-Birkenbach
|
||||
[https://www.veen.world](https://www.veen.world)
|
||||
See the [LICENSE](LICENSE) file for details.
|
||||
|
||||
---
|
||||
|
||||
**Repository:** [github.com/kevinveenbirkenbach/package-manager](https://github.com/kevinveenbirkenbach/package-manager)
|
||||
## Author 👤
|
||||
|
||||
*Created with AI 🤖 - [View conversation](https://chatgpt.com/share/67c728c4-92d0-800f-8945-003fa9bf27c6)*
|
||||
Kevin Veen-Birkenbach
|
||||
[https://www.veen.world](https://www.veen.world)
|
||||
|
||||
6
TODO.md
Normal file
6
TODO.md
Normal file
@@ -0,0 +1,6 @@
|
||||
# to-dos
|
||||
|
||||
For the following checkout the implementation map:
|
||||
|
||||
- Implement TAGS
|
||||
- Implement SIGNING_KEY
|
||||
@@ -1,4 +0,0 @@
|
||||
# Legacy file used only if pip still installs from requirements.txt.
|
||||
# You may delete this file once you switch entirely to pyproject.toml.
|
||||
|
||||
PyYAML
|
||||
BIN
assets/banner.jpg
Normal file
BIN
assets/banner.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 63 KiB |
BIN
assets/map.png
Normal file
BIN
assets/map.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 1.9 MiB |
@@ -380,17 +380,6 @@ repositories:
|
||||
- 44D8F11FD62F878E
|
||||
- B5690EEEBB952194
|
||||
|
||||
- account: kevinveenbirkenbach
|
||||
alias: infinito-presentation
|
||||
description: This repository contains a Infinito.Nexus presentation designed for customers, end-users, investors, developers, and administrators, offering tailored content and insights for each group.
|
||||
homepage: https://github.com/kevinveenbirkenbach/infinito-presentation
|
||||
provider: github.com
|
||||
repository: infinito-presentation
|
||||
verified:
|
||||
gpg_keys:
|
||||
- 44D8F11FD62F878E
|
||||
- B5690EEEBB952194
|
||||
|
||||
- account: kevinveenbirkenbach
|
||||
description: A lightweight Python utility to generate dynamic color schemes from a single base color. Provides HSL-based color transformations for theming, UI design, and CSS variable generation. Optimized for integration in Python projects, Flask applications, and Ansible roles.
|
||||
homepage: https://github.com/kevinveenbirkenbach/colorscheme-generator
|
||||
@@ -599,17 +588,6 @@ repositories:
|
||||
- 44D8F11FD62F878E
|
||||
- B5690EEEBB952194
|
||||
|
||||
- account: kevinveenbirkenbach
|
||||
desciption: Infinito Inventory Builder — a containerized web application that dynamically generates Ansible inventory files from invokable Infinito.Nexus roles through an interactive, browser-based interface.
|
||||
homepage: https://github.com/kevinveenbirkenbach/infinito-inventory-builder
|
||||
alias: invbuild
|
||||
provider: github.com
|
||||
repository: infinito-inventory-builder
|
||||
verified:
|
||||
gpg_keys:
|
||||
- 44D8F11FD62F878E
|
||||
- B5690EEEBB952194
|
||||
|
||||
- account: kevinveenbirkenbach
|
||||
desciption: A simple Python CLI tool to safely rename Linux user accounts using usermod — including home directory migration and validation checks.
|
||||
homepage: https://github.com/kevinveenbirkenbach/user-rename
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
- account: kevinveenbirkenbach
|
||||
alias: gkfdrtdtcntr
|
||||
provider: github.com
|
||||
repository: federated-to-central-social-network-bridge
|
||||
verified:
|
||||
gpg_keys:
|
||||
- 44D8F11FD62F878E
|
||||
39
flake.nix
39
flake.nix
@@ -26,12 +26,17 @@
|
||||
packages = forAllSystems (system:
|
||||
let
|
||||
pkgs = nixpkgs.legacyPackages.${system};
|
||||
|
||||
# Single source of truth for pkgmgr: Python 3.11
|
||||
# - Matches pyproject.toml: requires-python = ">=3.11"
|
||||
# - Uses python311Packages so that PyYAML etc. are available
|
||||
python = pkgs.python311;
|
||||
pyPkgs = pkgs.python311Packages;
|
||||
in
|
||||
rec {
|
||||
pkgmgr = pyPkgs.buildPythonApplication {
|
||||
pname = "package-manager";
|
||||
version = "0.7.2";
|
||||
version = "1.2.0";
|
||||
|
||||
# Use the git repo as source
|
||||
src = ./.;
|
||||
@@ -45,18 +50,17 @@
|
||||
pyPkgs.wheel
|
||||
];
|
||||
|
||||
# Runtime dependencies (matches [project.dependencies])
|
||||
# Runtime dependencies (matches [project.dependencies] in pyproject.toml)
|
||||
propagatedBuildInputs = [
|
||||
pyPkgs.pyyaml
|
||||
# Add more here if needed, e.g.:
|
||||
# pyPkgs.click
|
||||
# pyPkgs.rich
|
||||
pyPkgs.pip
|
||||
];
|
||||
|
||||
doCheck = false;
|
||||
|
||||
pythonImportsCheck = [ "pkgmgr" ];
|
||||
};
|
||||
|
||||
default = pkgmgr;
|
||||
}
|
||||
);
|
||||
@@ -67,23 +71,42 @@
|
||||
devShells = forAllSystems (system:
|
||||
let
|
||||
pkgs = nixpkgs.legacyPackages.${system};
|
||||
pkgmgrPkg = self.packages.${system}.pkgmgr;
|
||||
|
||||
ansiblePkg =
|
||||
if pkgs ? ansible-core then pkgs.ansible-core
|
||||
else pkgs.ansible;
|
||||
|
||||
# Use the same Python version as the package (3.11)
|
||||
python = pkgs.python311;
|
||||
|
||||
pythonWithDeps = python.withPackages (ps: [
|
||||
ps.pip
|
||||
ps.pyyaml
|
||||
]);
|
||||
in
|
||||
{
|
||||
default = pkgs.mkShell {
|
||||
buildInputs = [
|
||||
pkgmgrPkg
|
||||
pythonWithDeps
|
||||
pkgs.git
|
||||
ansiblePkg
|
||||
];
|
||||
|
||||
shellHook = ''
|
||||
# Ensure our Python with dependencies is preferred on PATH
|
||||
export PATH=${pythonWithDeps}/bin:$PATH
|
||||
|
||||
# Ensure src/ layout is importable:
|
||||
# pkgmgr lives in ./src/pkgmgr
|
||||
export PYTHONPATH="$PWD/src:${PYTHONPATH:-}"
|
||||
# Also add repo root in case tools/tests rely on it
|
||||
export PYTHONPATH="$PWD:$PYTHONPATH"
|
||||
|
||||
echo "Entered pkgmgr development shell for ${system}"
|
||||
echo "pkgmgr CLI is available via the flake build"
|
||||
echo "Python used in this shell:"
|
||||
python --version
|
||||
echo "pkgmgr CLI (from source) is available via:"
|
||||
echo " python -m pkgmgr.cli --help"
|
||||
'';
|
||||
};
|
||||
}
|
||||
|
||||
10
main.py
10
main.py
@@ -1,6 +1,14 @@
|
||||
#!/usr/bin/env python3
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
# Ensure local src/ overrides installed package
|
||||
ROOT = Path(__file__).resolve().parent
|
||||
SRC = ROOT / "src"
|
||||
if SRC.is_dir():
|
||||
sys.path.insert(0, str(SRC))
|
||||
|
||||
from pkgmgr.cli import main
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
main()
|
||||
|
||||
@@ -1,84 +0,0 @@
|
||||
Name: package-manager
|
||||
Version: 0.7.2
|
||||
Release: 1%{?dist}
|
||||
Summary: Wrapper that runs Kevin's package-manager via Nix flake
|
||||
|
||||
License: MIT
|
||||
URL: https://github.com/kevinveenbirkenbach/package-manager
|
||||
Source0: %{name}-%{version}.tar.gz
|
||||
|
||||
BuildArch: noarch
|
||||
|
||||
# NOTE:
|
||||
# Nix is a runtime requirement, but it is *not* declared here as a hard
|
||||
# RPM dependency, because many distributions do not ship a "nix" RPM.
|
||||
# Instead, Nix is installed and initialized by init-nix.sh, which is
|
||||
# called in the %post scriptlet below.
|
||||
|
||||
%description
|
||||
This package provides the `pkgmgr` command, which runs Kevin's package
|
||||
manager via a local Nix flake:
|
||||
|
||||
nix run /usr/lib/package-manager#pkgmgr -- ...
|
||||
|
||||
Nix is a runtime requirement and is installed/initialized by the
|
||||
init-nix.sh helper during package installation if it is not yet
|
||||
available on the system.
|
||||
|
||||
%prep
|
||||
%setup -q
|
||||
|
||||
%build
|
||||
# No build step required; we ship the project tree as-is.
|
||||
:
|
||||
|
||||
%install
|
||||
rm -rf %{buildroot}
|
||||
install -d %{buildroot}%{_bindir}
|
||||
# Install project tree into a fixed, architecture-independent location.
|
||||
install -d %{buildroot}/usr/lib/package-manager
|
||||
|
||||
# Copy full project source into /usr/lib/package-manager
|
||||
cp -a . %{buildroot}/usr/lib/package-manager/
|
||||
|
||||
# Wrapper
|
||||
install -m0755 scripts/pkgmgr-wrapper.sh %{buildroot}%{_bindir}/pkgmgr
|
||||
|
||||
# Shared Nix init script (ensure it is executable in the installed tree)
|
||||
install -m0755 scripts/init-nix.sh %{buildroot}/usr/lib/package-manager/init-nix.sh
|
||||
|
||||
# Remove packaging-only and development artefacts from the installed tree
|
||||
rm -rf \
|
||||
%{buildroot}/usr/lib/package-manager/PKGBUILD \
|
||||
%{buildroot}/usr/lib/package-manager/Dockerfile \
|
||||
%{buildroot}/usr/lib/package-manager/debian \
|
||||
%{buildroot}/usr/lib/package-manager/.git \
|
||||
%{buildroot}/usr/lib/package-manager/.github \
|
||||
%{buildroot}/usr/lib/package-manager/tests \
|
||||
%{buildroot}/usr/lib/package-manager/.gitignore \
|
||||
%{buildroot}/usr/lib/package-manager/__pycache__ \
|
||||
%{buildroot}/usr/lib/package-manager/.gitkeep || true
|
||||
|
||||
%post
|
||||
# Initialize Nix (if needed) after installing the package-manager files.
|
||||
if [ -x /usr/lib/package-manager/init-nix.sh ]; then
|
||||
/usr/lib/package-manager/init-nix.sh || true
|
||||
else
|
||||
echo ">>> Warning: /usr/lib/package-manager/init-nix.sh not found or not executable."
|
||||
fi
|
||||
|
||||
%postun
|
||||
echo ">>> package-manager removed. Nix itself was not removed."
|
||||
|
||||
%files
|
||||
%doc README.md
|
||||
%license LICENSE
|
||||
%{_bindir}/pkgmgr
|
||||
/usr/lib/package-manager/
|
||||
|
||||
%changelog
|
||||
* Tue Dec 09 2025 Kevin Veen-Birkenbach <kevin@veen.world> - 0.7.2-1
|
||||
- Implemented Changelog Support for Fedora and Debian
|
||||
|
||||
* Sat Dec 06 2025 Kevin Veen-Birkenbach <info@veen.world> - 0.1.1-1
|
||||
- Initial RPM packaging for package-manager
|
||||
6
packaging/arch/.gitignore
vendored
Normal file
6
packaging/arch/.gitignore
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
# Arch pkg artifacts
|
||||
*.pkg.tar.*
|
||||
*.log
|
||||
package-manager-*
|
||||
src/
|
||||
pkg/
|
||||
@@ -1,7 +1,7 @@
|
||||
# Maintainer: Kevin Veen-Birkenbach <info@veen.world>
|
||||
|
||||
pkgname=package-manager
|
||||
pkgver=0.7.2
|
||||
pkgver=0.9.1
|
||||
pkgrel=1
|
||||
pkgdesc="Local-flake wrapper for Kevin's package-manager (Nix-based)."
|
||||
arch=('any')
|
||||
@@ -15,7 +15,7 @@ makedepends=('rsync')
|
||||
install=${pkgname}.install
|
||||
|
||||
# Local source checkout — avoids the tarball requirement.
|
||||
# This assumes you build the package from inside the main project repository.
|
||||
# We build from the project root (two levels above packaging/arch/).
|
||||
source=()
|
||||
sha256sums=()
|
||||
|
||||
@@ -24,12 +24,18 @@ _srcdir_name="source"
|
||||
|
||||
prepare() {
|
||||
mkdir -p "$srcdir/$_srcdir_name"
|
||||
|
||||
local project_root
|
||||
project_root="$(cd "$startdir/../.." && pwd)"
|
||||
|
||||
rsync -a \
|
||||
--exclude=".git" \
|
||||
--exclude=".github" \
|
||||
--exclude="pkg" \
|
||||
--exclude="srcpkg" \
|
||||
"$startdir/" "$srcdir/$_srcdir_name/"
|
||||
--exclude="packaging" \
|
||||
--exclude="assets" \
|
||||
"$project_root/" "$srcdir/$_srcdir_name/"
|
||||
}
|
||||
|
||||
build() {
|
||||
@@ -62,7 +68,8 @@ package() {
|
||||
"$pkgdir/usr/lib/package-manager/PKGBUILD" \
|
||||
"$pkgdir/usr/lib/package-manager/Dockerfile" \
|
||||
"$pkgdir/usr/lib/package-manager/debian" \
|
||||
"$pkgdir/usr/lib/package-manager/packaging" \
|
||||
"$pkgdir/usr/lib/package-manager/.gitignore" \
|
||||
"$pkgdir/usr/lib/package-manager/__pycache__" \
|
||||
"$pkgdir/usr/lib/package-manager/.gitkeep"
|
||||
"$pkgdir/usr/lib/package-manager/.gitkeep" || true
|
||||
}
|
||||
6
packaging/debian/.gitignore
vendored
Normal file
6
packaging/debian/.gitignore
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
# debian
|
||||
package-manager/
|
||||
debhelper-build-stamp
|
||||
files
|
||||
.debhelper/
|
||||
package-manager.substvars
|
||||
@@ -1,3 +1,103 @@
|
||||
package-manager (0.9.1-1) unstable; urgency=medium
|
||||
|
||||
* * Refactored installer: new `venv-create.sh`, cleaner root/user setup flow, updated README with architecture map.
|
||||
* Split virgin tests into root/user workflows; stabilized Nix installer across distros; improved test scripts with dynamic distro selection and isolated Nix stores.
|
||||
* Fixed repository directory resolution; improved `pkgmgr path` and `pkgmgr shell`; added full unit/E2E coverage.
|
||||
* Removed deprecated files and updated `.gitignore`.
|
||||
|
||||
-- Kevin Veen-Birkenbach <kevin@veen.world> Wed, 10 Dec 2025 22:56:01 +0100
|
||||
|
||||
package-manager (0.9.0-1) unstable; urgency=medium
|
||||
|
||||
* Introduce a virgin Arch-based Nix flake E2E workflow that validates pkgmgr’s full flake installation path using shared caches for faster and reproducible CI runs.
|
||||
|
||||
-- Kevin Veen-Birkenbach <kevin@veen.world> Wed, 10 Dec 2025 18:38:07 +0100
|
||||
|
||||
package-manager (0.8.0-1) unstable; urgency=medium
|
||||
|
||||
* **v0.7.15 — Installer & Command Resolution Improvements**
|
||||
|
||||
* Introduced a unified **layer-based installer pipeline** with clear precedence (OS-packages, Nix, Python, Makefile).
|
||||
* Reworked installer structure and improved Python/Nix/Makefile installers, including isolated Python venvs and refined flake-output handling.
|
||||
* Fully rewrote **command resolution** with stronger typing, safer fallbacks, and explicit support for `command: null` to mark library-only repositories.
|
||||
* Added extensive **unit and integration tests** for installer capability ordering, command resolution, and Nix/Python installer behavior.
|
||||
* Expanded documentation with capability hierarchy diagrams and scenario matrices.
|
||||
* Removed deprecated repository entries and obsolete configuration files.
|
||||
|
||||
-- Kevin Veen-Birkenbach <kevin@veen.world> Wed, 10 Dec 2025 17:31:57 +0100
|
||||
|
||||
package-manager (0.7.14-1) unstable; urgency=medium
|
||||
|
||||
* Fixed the clone-all integration test so that `SystemExit(0)` from the proxy is treated as a successful command instead of a failure.
|
||||
|
||||
-- Kevin Veen-Birkenbach <kevin@veen.world> Wed, 10 Dec 2025 10:38:33 +0100
|
||||
|
||||
package-manager (0.7.13-1) unstable; urgency=medium
|
||||
|
||||
* Automated release.
|
||||
|
||||
-- Kevin Veen-Birkenbach <kevin@veen.world> Wed, 10 Dec 2025 10:27:24 +0100
|
||||
|
||||
package-manager (0.7.12-1) unstable; urgency=medium
|
||||
|
||||
* Fixed self refering alias during setup
|
||||
|
||||
-- Kevin Veen-Birkenbach <kevin@veen.world> Tue, 09 Dec 2025 23:36:35 +0100
|
||||
|
||||
package-manager (0.7.11-1) unstable; urgency=medium
|
||||
|
||||
* test: fix installer unit tests for OS packages and Nix dev shell
|
||||
|
||||
-- Kevin Veen-Birkenbach <kevin@veen.world> Tue, 09 Dec 2025 23:16:46 +0100
|
||||
|
||||
package-manager (0.7.10-1) unstable; urgency=medium
|
||||
|
||||
* Fixed test_install_pkgmgr_shallow.py
|
||||
|
||||
-- Kevin Veen-Birkenbach <kevin@veen.world> Tue, 09 Dec 2025 22:57:08 +0100
|
||||
|
||||
package-manager (0.7.9-1) unstable; urgency=medium
|
||||
|
||||
* 'main' and 'master' are now both accepted as branches for branch close merge
|
||||
|
||||
-- Kevin Veen-Birkenbach <kevin@veen.world> Tue, 09 Dec 2025 21:19:13 +0100
|
||||
|
||||
package-manager (0.7.8-1) unstable; urgency=medium
|
||||
|
||||
* Missing pyproject.toml doesn't lead to an error during release
|
||||
|
||||
-- Kevin Veen-Birkenbach <kevin@veen.world> Tue, 09 Dec 2025 21:03:24 +0100
|
||||
|
||||
package-manager (0.7.7-1) unstable; urgency=medium
|
||||
|
||||
* Added TEST_PATTERN parameter to execute dedicated tests
|
||||
|
||||
-- Kevin Veen-Birkenbach <kevin@veen.world> Tue, 09 Dec 2025 17:54:38 +0100
|
||||
|
||||
package-manager (0.7.6-1) unstable; urgency=medium
|
||||
|
||||
* Fixed pull --preview bug in e2e test
|
||||
|
||||
-- Kevin Veen-Birkenbach <kevin@veen.world> Tue, 09 Dec 2025 17:14:19 +0100
|
||||
|
||||
package-manager (0.7.5-1) unstable; urgency=medium
|
||||
|
||||
* Fixed wrong directory permissions for nix
|
||||
|
||||
-- Kevin Veen-Birkenbach <kevin@veen.world> Tue, 09 Dec 2025 16:45:42 +0100
|
||||
|
||||
package-manager (0.7.4-1) unstable; urgency=medium
|
||||
|
||||
* Fixed missing build in test workflow -> Tests pass now
|
||||
|
||||
-- Kevin Veen-Birkenbach <kevin@veen.world> Tue, 09 Dec 2025 16:22:00 +0100
|
||||
|
||||
package-manager (0.7.3-1) unstable; urgency=medium
|
||||
|
||||
* Fixed bug: Ignored packages are now ignored
|
||||
|
||||
-- Kevin Veen-Birkenbach <kevin@veen.world> Tue, 09 Dec 2025 16:08:31 +0100
|
||||
|
||||
package-manager (0.7.2-1) unstable; urgency=medium
|
||||
|
||||
* Implemented Changelog Support for Fedora and Debian
|
||||
@@ -9,7 +9,7 @@ Homepage: https://github.com/kevinveenbirkenbach/package-manager
|
||||
|
||||
Package: package-manager
|
||||
Architecture: any
|
||||
Depends: nix, ${misc:Depends}
|
||||
Depends: sudo, ${misc:Depends}
|
||||
Description: Wrapper that runs Kevin's package-manager via Nix flake
|
||||
This package provides the `pkgmgr` command, which runs Kevin's package
|
||||
manager via a local Nix flake
|
||||
139
packaging/fedora/package-manager.spec
Normal file
139
packaging/fedora/package-manager.spec
Normal file
@@ -0,0 +1,139 @@
|
||||
Name: package-manager
|
||||
Version: 0.9.1
|
||||
Release: 1%{?dist}
|
||||
Summary: Wrapper that runs Kevin's package-manager via Nix flake
|
||||
|
||||
License: MIT
|
||||
URL: https://github.com/kevinveenbirkenbach/package-manager
|
||||
Source0: %{name}-%{version}.tar.gz
|
||||
|
||||
BuildArch: noarch
|
||||
|
||||
# NOTE:
|
||||
# Nix is a runtime requirement, but it is *not* declared here as a hard
|
||||
# RPM dependency, because many distributions do not ship a "nix" RPM.
|
||||
# Instead, Nix is installed and initialized by init-nix.sh, which is
|
||||
# called in the %post scriptlet below.
|
||||
|
||||
%description
|
||||
This package provides the `pkgmgr` command, which runs Kevin's package
|
||||
manager via a local Nix flake:
|
||||
|
||||
nix run /usr/lib/package-manager#pkgmgr -- ...
|
||||
|
||||
Nix is a runtime requirement and is installed/initialized by the
|
||||
init-nix.sh helper during package installation if it is not yet
|
||||
available on the system.
|
||||
|
||||
%prep
|
||||
%setup -q
|
||||
|
||||
%build
|
||||
# No build step required; we ship the project tree as-is.
|
||||
:
|
||||
|
||||
%install
|
||||
rm -rf %{buildroot}
|
||||
install -d %{buildroot}%{_bindir}
|
||||
# Install project tree into a fixed, architecture-independent location.
|
||||
install -d %{buildroot}/usr/lib/package-manager
|
||||
|
||||
# Copy full project source into /usr/lib/package-manager
|
||||
cp -a . %{buildroot}/usr/lib/package-manager/
|
||||
|
||||
# Wrapper
|
||||
install -m0755 scripts/pkgmgr-wrapper.sh %{buildroot}%{_bindir}/pkgmgr
|
||||
|
||||
# Shared Nix init script (ensure it is executable in the installed tree)
|
||||
install -m0755 scripts/init-nix.sh %{buildroot}/usr/lib/package-manager/init-nix.sh
|
||||
|
||||
# Remove packaging-only and development artefacts from the installed tree
|
||||
rm -rf \
|
||||
%{buildroot}/usr/lib/package-manager/PKGBUILD \
|
||||
%{buildroot}/usr/lib/package-manager/Dockerfile \
|
||||
%{buildroot}/usr/lib/package-manager/debian \
|
||||
%{buildroot}/usr/lib/package-manager/.git \
|
||||
%{buildroot}/usr/lib/package-manager/.github \
|
||||
%{buildroot}/usr/lib/package-manager/tests \
|
||||
%{buildroot}/usr/lib/package-manager/.gitignore \
|
||||
%{buildroot}/usr/lib/package-manager/__pycache__ \
|
||||
%{buildroot}/usr/lib/package-manager/.gitkeep || true
|
||||
|
||||
%post
|
||||
# Initialize Nix (if needed) after installing the package-manager files.
|
||||
if [ -x /usr/lib/package-manager/init-nix.sh ]; then
|
||||
/usr/lib/package-manager/init-nix.sh || true
|
||||
else
|
||||
echo ">>> Warning: /usr/lib/package-manager/init-nix.sh not found or not executable."
|
||||
fi
|
||||
|
||||
%postun
|
||||
echo ">>> package-manager removed. Nix itself was not removed."
|
||||
|
||||
%files
|
||||
%doc README.md
|
||||
%license LICENSE
|
||||
%{_bindir}/pkgmgr
|
||||
/usr/lib/package-manager/
|
||||
|
||||
%changelog
|
||||
* Wed Dec 10 2025 Kevin Veen-Birkenbach <kevin@veen.world> - 0.9.1-1
|
||||
- * Refactored installer: new `venv-create.sh`, cleaner root/user setup flow, updated README with architecture map.
|
||||
* Split virgin tests into root/user workflows; stabilized Nix installer across distros; improved test scripts with dynamic distro selection and isolated Nix stores.
|
||||
* Fixed repository directory resolution; improved `pkgmgr path` and `pkgmgr shell`; added full unit/E2E coverage.
|
||||
* Removed deprecated files and updated `.gitignore`.
|
||||
|
||||
* Wed Dec 10 2025 Kevin Veen-Birkenbach <kevin@veen.world> - 0.9.0-1
|
||||
- Introduce a virgin Arch-based Nix flake E2E workflow that validates pkgmgr’s full flake installation path using shared caches for faster and reproducible CI runs.
|
||||
|
||||
* Wed Dec 10 2025 Kevin Veen-Birkenbach <kevin@veen.world> - 0.8.0-1
|
||||
- **v0.7.15 — Installer & Command Resolution Improvements**
|
||||
|
||||
* Introduced a unified **layer-based installer pipeline** with clear precedence (OS-packages, Nix, Python, Makefile).
|
||||
* Reworked installer structure and improved Python/Nix/Makefile installers, including isolated Python venvs and refined flake-output handling.
|
||||
* Fully rewrote **command resolution** with stronger typing, safer fallbacks, and explicit support for `command: null` to mark library-only repositories.
|
||||
* Added extensive **unit and integration tests** for installer capability ordering, command resolution, and Nix/Python installer behavior.
|
||||
* Expanded documentation with capability hierarchy diagrams and scenario matrices.
|
||||
* Removed deprecated repository entries and obsolete configuration files.
|
||||
|
||||
* Wed Dec 10 2025 Kevin Veen-Birkenbach <kevin@veen.world> - 0.7.14-1
|
||||
- Fixed the clone-all integration test so that `SystemExit(0)` from the proxy is treated as a successful command instead of a failure.
|
||||
|
||||
* Wed Dec 10 2025 Kevin Veen-Birkenbach <kevin@veen.world> - 0.7.13-1
|
||||
- Automated release.
|
||||
|
||||
* Tue Dec 09 2025 Kevin Veen-Birkenbach <kevin@veen.world> - 0.7.12-1
|
||||
- Fixed self refering alias during setup
|
||||
|
||||
* Tue Dec 09 2025 Kevin Veen-Birkenbach <kevin@veen.world> - 0.7.11-1
|
||||
- test: fix installer unit tests for OS packages and Nix dev shell
|
||||
|
||||
* Tue Dec 09 2025 Kevin Veen-Birkenbach <kevin@veen.world> - 0.7.10-1
|
||||
- Fixed test_install_pkgmgr_shallow.py
|
||||
|
||||
* Tue Dec 09 2025 Kevin Veen-Birkenbach <kevin@veen.world> - 0.7.9-1
|
||||
- 'main' and 'master' are now both accepted as branches for branch close merge
|
||||
|
||||
* Tue Dec 09 2025 Kevin Veen-Birkenbach <kevin@veen.world> - 0.7.8-1
|
||||
- Missing pyproject.toml doesn't lead to an error during release
|
||||
|
||||
* Tue Dec 09 2025 Kevin Veen-Birkenbach <kevin@veen.world> - 0.7.7-1
|
||||
- Added TEST_PATTERN parameter to execute dedicated tests
|
||||
|
||||
* Tue Dec 09 2025 Kevin Veen-Birkenbach <kevin@veen.world> - 0.7.6-1
|
||||
- Fixed pull --preview bug in e2e test
|
||||
|
||||
* Tue Dec 09 2025 Kevin Veen-Birkenbach <kevin@veen.world> - 0.7.5-1
|
||||
- Fixed wrong directory permissions for nix
|
||||
|
||||
* Tue Dec 09 2025 Kevin Veen-Birkenbach <kevin@veen.world> - 0.7.4-1
|
||||
- Fixed missing build in test workflow -> Tests pass now
|
||||
|
||||
* Tue Dec 09 2025 Kevin Veen-Birkenbach <kevin@veen.world> - 0.7.3-1
|
||||
- Fixed bug: Ignored packages are now ignored
|
||||
|
||||
* Tue Dec 09 2025 Kevin Veen-Birkenbach <kevin@veen.world> - 0.7.2-1
|
||||
- Implemented Changelog Support for Fedora and Debian
|
||||
|
||||
* Sat Dec 06 2025 Kevin Veen-Birkenbach <info@veen.world> - 0.1.1-1
|
||||
- Initial RPM packaging for package-manager
|
||||
@@ -1,7 +0,0 @@
|
||||
version: 1
|
||||
|
||||
author: "Kevin Veen-Birkenbach"
|
||||
url: "https://github.com/kevinveenbirkenbach/package-manager"
|
||||
description: "A configurable Python-based package manager for managing multiple repositories via Bash."
|
||||
|
||||
dependencies: []
|
||||
@@ -1,214 +0,0 @@
|
||||
# pkgmgr/branch_commands.py
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
High-level helpers for branch-related operations.
|
||||
|
||||
This module encapsulates the actual Git logic so the CLI layer
|
||||
(pkgmgr.cli.commands.branch) stays thin and testable.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Optional
|
||||
|
||||
from pkgmgr.core.git import run_git, GitError, get_current_branch
|
||||
|
||||
|
||||
def open_branch(
|
||||
name: Optional[str],
|
||||
base_branch: str = "main",
|
||||
cwd: str = ".",
|
||||
) -> None:
|
||||
"""
|
||||
Create and push a new feature branch on top of `base_branch`.
|
||||
|
||||
Steps:
|
||||
1) git fetch origin
|
||||
2) git checkout <base_branch>
|
||||
3) git pull origin <base_branch>
|
||||
4) git checkout -b <name>
|
||||
5) git push -u origin <name>
|
||||
|
||||
If `name` is None or empty, the user is prompted on stdin.
|
||||
"""
|
||||
|
||||
if not name:
|
||||
name = input("Enter new branch name: ").strip()
|
||||
|
||||
if not name:
|
||||
raise RuntimeError("Branch name must not be empty.")
|
||||
|
||||
# 1) Fetch from origin
|
||||
try:
|
||||
run_git(["fetch", "origin"], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to fetch from origin before creating branch {name!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# 2) Checkout base branch
|
||||
try:
|
||||
run_git(["checkout", base_branch], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to checkout base branch {base_branch!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# 3) Pull latest changes on base
|
||||
try:
|
||||
run_git(["pull", "origin", base_branch], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to pull latest changes for base branch {base_branch!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# 4) Create new branch
|
||||
try:
|
||||
run_git(["checkout", "-b", name], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to create new branch {name!r} from base {base_branch!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# 5) Push and set upstream
|
||||
try:
|
||||
run_git(["push", "-u", "origin", name], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to push new branch {name!r} to origin: {exc}"
|
||||
) from exc
|
||||
|
||||
|
||||
def _resolve_base_branch(
|
||||
preferred: str,
|
||||
fallback: str,
|
||||
cwd: str,
|
||||
) -> str:
|
||||
"""
|
||||
Resolve the base branch to use for merging.
|
||||
|
||||
Try `preferred` (default: main) first, then `fallback` (default: master).
|
||||
Raise RuntimeError if neither exists.
|
||||
"""
|
||||
for candidate in (preferred, fallback):
|
||||
try:
|
||||
run_git(["rev-parse", "--verify", candidate], cwd=cwd)
|
||||
return candidate
|
||||
except GitError:
|
||||
continue
|
||||
|
||||
raise RuntimeError(
|
||||
f"Neither {preferred!r} nor {fallback!r} exist in this repository."
|
||||
)
|
||||
|
||||
|
||||
def close_branch(
|
||||
name: Optional[str],
|
||||
base_branch: str = "main",
|
||||
fallback_base: str = "master",
|
||||
cwd: str = ".",
|
||||
) -> None:
|
||||
"""
|
||||
Merge a feature branch into the main/master branch and optionally delete it.
|
||||
|
||||
Steps:
|
||||
1) Determine branch name (argument or current branch)
|
||||
2) Resolve base branch (prefers `base_branch`, falls back to `fallback_base`)
|
||||
3) Ask for confirmation (y/N)
|
||||
4) git fetch origin
|
||||
5) git checkout <base>
|
||||
6) git pull origin <base>
|
||||
7) git merge --no-ff <name>
|
||||
8) git push origin <base>
|
||||
9) Delete branch locally and on origin
|
||||
|
||||
If the user does not confirm with 'y', the operation is aborted.
|
||||
"""
|
||||
|
||||
# 1) Determine which branch to close
|
||||
if not name:
|
||||
try:
|
||||
name = get_current_branch(cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(f"Failed to detect current branch: {exc}") from exc
|
||||
|
||||
if not name:
|
||||
raise RuntimeError("Branch name must not be empty.")
|
||||
|
||||
# 2) Resolve base branch (main/master)
|
||||
target_base = _resolve_base_branch(base_branch, fallback_base, cwd=cwd)
|
||||
|
||||
if name == target_base:
|
||||
raise RuntimeError(
|
||||
f"Refusing to close base branch {target_base!r}. "
|
||||
"Please specify a feature branch."
|
||||
)
|
||||
|
||||
# 3) Confirmation prompt
|
||||
prompt = (
|
||||
f"Merge branch '{name}' into '{target_base}' and delete it afterwards? "
|
||||
"(y/N): "
|
||||
)
|
||||
answer = input(prompt).strip().lower()
|
||||
if answer != "y":
|
||||
print("Aborted closing branch.")
|
||||
return
|
||||
|
||||
# 4) Fetch from origin
|
||||
try:
|
||||
run_git(["fetch", "origin"], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to fetch from origin before closing branch {name!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# 5) Checkout base branch
|
||||
try:
|
||||
run_git(["checkout", target_base], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to checkout base branch {target_base!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# 6) Pull latest base
|
||||
try:
|
||||
run_git(["pull", "origin", target_base], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to pull latest changes for base branch {target_base!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# 7) Merge feature branch into base
|
||||
try:
|
||||
run_git(["merge", "--no-ff", name], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to merge branch {name!r} into {target_base!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# 8) Push updated base
|
||||
try:
|
||||
run_git(["push", "origin", target_base], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to push base branch {target_base!r} to origin after merge: {exc}"
|
||||
) from exc
|
||||
|
||||
# 9) Delete feature branch locally
|
||||
try:
|
||||
run_git(["branch", "-d", name], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to delete local branch {name!r} after merge: {exc}"
|
||||
) from exc
|
||||
|
||||
# 10) Delete feature branch on origin (best effort)
|
||||
try:
|
||||
run_git(["push", "origin", "--delete", name], cwd=cwd)
|
||||
except GitError as exc:
|
||||
# Remote delete is nice-to-have; surface as RuntimeError for clarity.
|
||||
raise RuntimeError(
|
||||
f"Branch {name!r} was deleted locally, but remote deletion failed: {exc}"
|
||||
) from exc
|
||||
@@ -1,95 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
Git-related helpers for the release workflow.
|
||||
|
||||
Responsibilities:
|
||||
- Run Git (or shell) commands with basic error reporting.
|
||||
- Ensure main/master are synchronized with origin before tagging.
|
||||
- Maintain the floating 'latest' tag that always points to the newest
|
||||
release tag.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import subprocess
|
||||
|
||||
from pkgmgr.core.git import GitError
|
||||
|
||||
|
||||
def run_git_command(cmd: str) -> None:
|
||||
"""
|
||||
Run a Git (or shell) command with basic error reporting.
|
||||
|
||||
The command is executed via the shell, primarily for readability
|
||||
when printed (as in 'git commit -am "msg"').
|
||||
"""
|
||||
print(f"[GIT] {cmd}")
|
||||
try:
|
||||
subprocess.run(cmd, shell=True, check=True)
|
||||
except subprocess.CalledProcessError as exc:
|
||||
print(f"[ERROR] Git command failed: {cmd}")
|
||||
print(f" Exit code: {exc.returncode}")
|
||||
if exc.stdout:
|
||||
print("--- stdout ---")
|
||||
print(exc.stdout)
|
||||
if exc.stderr:
|
||||
print("--- stderr ---")
|
||||
print(exc.stderr)
|
||||
raise GitError(f"Git command failed: {cmd}") from exc
|
||||
|
||||
|
||||
def sync_branch_with_remote(branch: str, preview: bool = False) -> None:
|
||||
"""
|
||||
Ensure the local main/master branch is up-to-date before tagging.
|
||||
|
||||
Behaviour:
|
||||
- For main/master: run 'git fetch origin' and 'git pull origin <branch>'.
|
||||
- For all other branches: only log that no automatic sync is performed.
|
||||
"""
|
||||
if branch not in ("main", "master"):
|
||||
print(
|
||||
f"[INFO] Skipping automatic git pull for non-main/master branch "
|
||||
f"{branch}."
|
||||
)
|
||||
return
|
||||
|
||||
print(
|
||||
f"[INFO] Updating branch {branch} from origin before creating tags..."
|
||||
)
|
||||
|
||||
if preview:
|
||||
print("[PREVIEW] Would run: git fetch origin")
|
||||
print(f"[PREVIEW] Would run: git pull origin {branch}")
|
||||
return
|
||||
|
||||
run_git_command("git fetch origin")
|
||||
run_git_command(f"git pull origin {branch}")
|
||||
|
||||
|
||||
def update_latest_tag(new_tag: str, preview: bool = False) -> None:
|
||||
"""
|
||||
Move the floating 'latest' tag to the newly created release tag.
|
||||
|
||||
Implementation details:
|
||||
- We explicitly dereference the tag object via `<tag>^{}` so that
|
||||
'latest' always points at the underlying commit, not at another tag.
|
||||
- We create/update 'latest' as an annotated tag with a short message so
|
||||
Git configurations that enforce annotated/signed tags do not fail
|
||||
with "no tag message".
|
||||
"""
|
||||
target_ref = f"{new_tag}^{{}}"
|
||||
print(f"[INFO] Updating 'latest' tag to point at {new_tag} (commit {target_ref})...")
|
||||
|
||||
if preview:
|
||||
print(f"[PREVIEW] Would run: git tag -f -a latest {target_ref} "
|
||||
f'-m "Floating latest tag for {new_tag}"')
|
||||
print("[PREVIEW] Would run: git push origin latest --force")
|
||||
return
|
||||
|
||||
run_git_command(
|
||||
f'git tag -f -a latest {target_ref} '
|
||||
f'-m "Floating latest tag for {new_tag}"'
|
||||
)
|
||||
run_git_command("git push origin latest --force")
|
||||
@@ -1,294 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
Repository installation pipeline for pkgmgr.
|
||||
|
||||
This module orchestrates the installation of repositories by:
|
||||
|
||||
1. Ensuring the repository directory exists (cloning if necessary).
|
||||
2. Verifying the repository according to the configured policies.
|
||||
3. Creating executable links using create_ink(), after resolving the
|
||||
appropriate command via resolve_command_for_repo().
|
||||
4. Running a sequence of modular installer components that handle
|
||||
specific technologies or manifests (PKGBUILD, Nix flakes, Python
|
||||
via pyproject.toml, Makefile, OS-specific package metadata).
|
||||
|
||||
The goal is to keep this file thin and delegate most logic to small,
|
||||
focused installer classes.
|
||||
"""
|
||||
|
||||
import os
|
||||
from typing import List, Dict, Any
|
||||
|
||||
from pkgmgr.core.repository.identifier import get_repo_identifier
|
||||
from pkgmgr.core.repository.dir import get_repo_dir
|
||||
from pkgmgr.core.command.ink import create_ink
|
||||
from pkgmgr.core.repository.verify import verify_repository
|
||||
from pkgmgr.actions.repository.clone import clone_repos
|
||||
from pkgmgr.actions.repository.install.context import RepoContext
|
||||
from pkgmgr.core.command.resolve import resolve_command_for_repo
|
||||
|
||||
# Installer implementations
|
||||
from pkgmgr.actions.repository.install.installers.os_packages import (
|
||||
ArchPkgbuildInstaller,
|
||||
DebianControlInstaller,
|
||||
RpmSpecInstaller,
|
||||
)
|
||||
from pkgmgr.actions.repository.install.installers.nix_flake import NixFlakeInstaller
|
||||
from pkgmgr.actions.repository.install.installers.python import PythonInstaller
|
||||
from pkgmgr.actions.repository.install.installers.makefile import MakefileInstaller
|
||||
|
||||
|
||||
# Layering:
|
||||
# 1) OS packages: PKGBUILD / debian/control / RPM spec → os-deps.*
|
||||
# 2) Nix flakes (flake.nix) → e.g. python-runtime, make-install
|
||||
# 3) Python (pyproject.toml) → e.g. python-runtime, make-install
|
||||
# 4) Makefile fallback → e.g. make-install
|
||||
INSTALLERS = [
|
||||
ArchPkgbuildInstaller(), # Arch
|
||||
DebianControlInstaller(), # Debian/Ubuntu
|
||||
RpmSpecInstaller(), # Fedora/RHEL/CentOS
|
||||
NixFlakeInstaller(), # flake.nix (Nix layer)
|
||||
PythonInstaller(), # pyproject.toml
|
||||
MakefileInstaller(), # generic 'make install'
|
||||
]
|
||||
|
||||
|
||||
def _ensure_repo_dir(
|
||||
repo: Dict[str, Any],
|
||||
repositories_base_dir: str,
|
||||
all_repos: List[Dict[str, Any]],
|
||||
preview: bool,
|
||||
no_verification: bool,
|
||||
clone_mode: str,
|
||||
identifier: str,
|
||||
) -> str:
|
||||
"""
|
||||
Ensure the repository directory exists. If not, attempt to clone it.
|
||||
|
||||
Returns the repository directory path or an empty string if cloning failed.
|
||||
"""
|
||||
repo_dir = get_repo_dir(repositories_base_dir, repo)
|
||||
|
||||
if not os.path.exists(repo_dir):
|
||||
print(f"Repository directory '{repo_dir}' does not exist. Cloning it now...")
|
||||
clone_repos(
|
||||
[repo],
|
||||
repositories_base_dir,
|
||||
all_repos,
|
||||
preview,
|
||||
no_verification,
|
||||
clone_mode,
|
||||
)
|
||||
if not os.path.exists(repo_dir):
|
||||
print(f"Cloning failed for repository {identifier}. Skipping installation.")
|
||||
return ""
|
||||
|
||||
return repo_dir
|
||||
|
||||
|
||||
def _verify_repo(
|
||||
repo: Dict[str, Any],
|
||||
repo_dir: str,
|
||||
no_verification: bool,
|
||||
identifier: str,
|
||||
) -> bool:
|
||||
"""
|
||||
Verify the repository using verify_repository().
|
||||
|
||||
Returns True if installation should proceed, False if it should be skipped.
|
||||
"""
|
||||
verified_info = repo.get("verified")
|
||||
verified_ok, errors, commit_hash, signing_key = verify_repository(
|
||||
repo,
|
||||
repo_dir,
|
||||
mode="local",
|
||||
no_verification=no_verification,
|
||||
)
|
||||
|
||||
if not no_verification and verified_info and not verified_ok:
|
||||
print(f"Warning: Verification failed for {identifier}:")
|
||||
for err in errors:
|
||||
print(f" - {err}")
|
||||
choice = input("Proceed with installation? (y/N): ").strip().lower()
|
||||
if choice != "y":
|
||||
print(f"Skipping installation for {identifier}.")
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def _create_context(
|
||||
repo: Dict[str, Any],
|
||||
identifier: str,
|
||||
repo_dir: str,
|
||||
repositories_base_dir: str,
|
||||
bin_dir: str,
|
||||
all_repos: List[Dict[str, Any]],
|
||||
no_verification: bool,
|
||||
preview: bool,
|
||||
quiet: bool,
|
||||
clone_mode: str,
|
||||
update_dependencies: bool,
|
||||
) -> RepoContext:
|
||||
"""
|
||||
Build a RepoContext for the given repository and parameters.
|
||||
"""
|
||||
return RepoContext(
|
||||
repo=repo,
|
||||
identifier=identifier,
|
||||
repo_dir=repo_dir,
|
||||
repositories_base_dir=repositories_base_dir,
|
||||
bin_dir=bin_dir,
|
||||
all_repos=all_repos,
|
||||
no_verification=no_verification,
|
||||
preview=preview,
|
||||
quiet=quiet,
|
||||
clone_mode=clone_mode,
|
||||
update_dependencies=update_dependencies,
|
||||
)
|
||||
|
||||
|
||||
def install_repos(
|
||||
selected_repos: List[Dict[str, Any]],
|
||||
repositories_base_dir: str,
|
||||
bin_dir: str,
|
||||
all_repos: List[Dict[str, Any]],
|
||||
no_verification: bool,
|
||||
preview: bool,
|
||||
quiet: bool,
|
||||
clone_mode: str,
|
||||
update_dependencies: bool,
|
||||
) -> None:
|
||||
"""
|
||||
Install repositories by creating symbolic links and processing standard
|
||||
manifest files (PKGBUILD, flake.nix, Python manifests, Makefile, etc.)
|
||||
via dedicated installer components.
|
||||
|
||||
Any installer failure (SystemExit) is treated as fatal and will abort
|
||||
the current installation.
|
||||
"""
|
||||
for repo in selected_repos:
|
||||
identifier = get_repo_identifier(repo, all_repos)
|
||||
repo_dir = _ensure_repo_dir(
|
||||
repo=repo,
|
||||
repositories_base_dir=repositories_base_dir,
|
||||
all_repos=all_repos,
|
||||
preview=preview,
|
||||
no_verification=no_verification,
|
||||
clone_mode=clone_mode,
|
||||
identifier=identifier,
|
||||
)
|
||||
if not repo_dir:
|
||||
continue
|
||||
|
||||
if not _verify_repo(
|
||||
repo=repo,
|
||||
repo_dir=repo_dir,
|
||||
no_verification=no_verification,
|
||||
identifier=identifier,
|
||||
):
|
||||
continue
|
||||
|
||||
ctx = _create_context(
|
||||
repo=repo,
|
||||
identifier=identifier,
|
||||
repo_dir=repo_dir,
|
||||
repositories_base_dir=repositories_base_dir,
|
||||
bin_dir=bin_dir,
|
||||
all_repos=all_repos,
|
||||
no_verification=no_verification,
|
||||
preview=preview,
|
||||
quiet=quiet,
|
||||
clone_mode=clone_mode,
|
||||
update_dependencies=update_dependencies,
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Resolve the command for this repository before creating the link.
|
||||
# If no command is resolved, no link will be created.
|
||||
# ------------------------------------------------------------
|
||||
resolved_command = resolve_command_for_repo(
|
||||
repo=repo,
|
||||
repo_identifier=identifier,
|
||||
repo_dir=repo_dir,
|
||||
)
|
||||
|
||||
if resolved_command:
|
||||
repo["command"] = resolved_command
|
||||
else:
|
||||
repo.pop("command", None)
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Create the symlink using create_ink (if a command is set).
|
||||
# ------------------------------------------------------------
|
||||
create_ink(
|
||||
repo,
|
||||
repositories_base_dir,
|
||||
bin_dir,
|
||||
all_repos,
|
||||
quiet=quiet,
|
||||
preview=preview,
|
||||
)
|
||||
|
||||
# Track which logical capabilities have already been provided by
|
||||
# earlier installers for this repository. This allows us to skip
|
||||
# installers that would only duplicate work (e.g. Python runtime
|
||||
# already provided by Nix flake → skip pyproject/Makefile).
|
||||
provided_capabilities: set[str] = set()
|
||||
|
||||
# Run all installers that support this repository, but only if they
|
||||
# provide at least one capability that is not yet satisfied.
|
||||
for installer in INSTALLERS:
|
||||
if not installer.supports(ctx):
|
||||
continue
|
||||
|
||||
caps = installer.discover_capabilities(ctx)
|
||||
|
||||
# If the installer declares capabilities and *all* of them are
|
||||
# already provided, we can safely skip it.
|
||||
if caps and caps.issubset(provided_capabilities):
|
||||
if not quiet:
|
||||
print(
|
||||
f"Skipping installer {installer.__class__.__name__} "
|
||||
f"for {identifier} – capabilities {caps} already provided."
|
||||
)
|
||||
continue
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Debug output + clear error if an installer fails
|
||||
# ------------------------------------------------------------
|
||||
if not quiet:
|
||||
print(
|
||||
f"[pkgmgr] Running installer {installer.__class__.__name__} "
|
||||
f"for {identifier} in '{repo_dir}' "
|
||||
f"(new capabilities: {caps or '∅'})..."
|
||||
)
|
||||
|
||||
try:
|
||||
installer.run(ctx)
|
||||
except SystemExit as exc:
|
||||
exit_code = exc.code if isinstance(exc.code, int) else str(exc.code)
|
||||
|
||||
print(
|
||||
f"[ERROR] Installer {installer.__class__.__name__} failed "
|
||||
f"for repository {identifier} (dir: {repo_dir}) "
|
||||
f"with exit code {exit_code}."
|
||||
)
|
||||
print(
|
||||
"[ERROR] This usually means an underlying command failed "
|
||||
"(e.g. 'make install', 'nix build', 'pip install', ...)."
|
||||
)
|
||||
print(
|
||||
"[ERROR] Check the log above for the exact command output. "
|
||||
"You can also run this repository in isolation via:\n"
|
||||
f" pkgmgr install {identifier} --clone-mode shallow --no-verification"
|
||||
)
|
||||
|
||||
# Re-raise so that CLI/tests fail clearly,
|
||||
# but now with much more context.
|
||||
raise
|
||||
|
||||
# Only merge capabilities if the installer succeeded
|
||||
provided_capabilities.update(caps)
|
||||
@@ -1,19 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
Installer package for pkgmgr.
|
||||
|
||||
This exposes all installer classes so users can import them directly from
|
||||
pkgmgr.actions.repository.install.installers.
|
||||
"""
|
||||
|
||||
from pkgmgr.actions.repository.install.installers.base import BaseInstaller # noqa: F401
|
||||
from pkgmgr.actions.repository.install.installers.nix_flake import NixFlakeInstaller # noqa: F401
|
||||
from pkgmgr.actions.repository.install.installers.python import PythonInstaller # noqa: F401
|
||||
from pkgmgr.actions.repository.install.installers.makefile import MakefileInstaller # noqa: F401
|
||||
|
||||
# OS-specific installers
|
||||
from pkgmgr.actions.repository.install.installers.os_packages.arch_pkgbuild import ArchPkgbuildInstaller # noqa: F401
|
||||
from pkgmgr.actions.repository.install.installers.os_packages.debian_control import DebianControlInstaller # noqa: F401
|
||||
from pkgmgr.actions.repository.install.installers.os_packages.rpm_spec import RpmSpecInstaller # noqa: F401
|
||||
@@ -1,93 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
Installer that triggers `make install` if a Makefile is present and
|
||||
the Makefile actually defines an 'install' target.
|
||||
|
||||
This is useful for repositories that expose a standard Makefile-based
|
||||
installation step.
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
|
||||
from pkgmgr.actions.repository.install.context import RepoContext
|
||||
from pkgmgr.actions.repository.install.installers.base import BaseInstaller
|
||||
from pkgmgr.core.command.run import run_command
|
||||
|
||||
|
||||
class MakefileInstaller(BaseInstaller):
|
||||
"""Run `make install` if a Makefile with an 'install' target exists."""
|
||||
|
||||
# Logical layer name, used by capability matchers.
|
||||
layer = "makefile"
|
||||
|
||||
MAKEFILE_NAME = "Makefile"
|
||||
|
||||
def supports(self, ctx: RepoContext) -> bool:
|
||||
"""Return True if a Makefile exists in the repository directory."""
|
||||
makefile_path = os.path.join(ctx.repo_dir, self.MAKEFILE_NAME)
|
||||
return os.path.exists(makefile_path)
|
||||
|
||||
def _has_install_target(self, makefile_path: str) -> bool:
|
||||
"""
|
||||
Check whether the Makefile defines an 'install' target.
|
||||
|
||||
We treat the presence of a real install target as either:
|
||||
- a line starting with 'install:' (optionally preceded by whitespace), or
|
||||
- a .PHONY line that lists 'install' as one of the targets.
|
||||
"""
|
||||
try:
|
||||
with open(makefile_path, "r", encoding="utf-8", errors="ignore") as f:
|
||||
content = f.read()
|
||||
except OSError:
|
||||
# If we cannot read the Makefile for some reason, assume no target.
|
||||
return False
|
||||
|
||||
# install: ...
|
||||
if re.search(r"^\s*install\s*:", content, flags=re.MULTILINE):
|
||||
return True
|
||||
|
||||
# .PHONY: ... install ...
|
||||
if re.search(r"^\s*\.PHONY\s*:\s*.*\binstall\b", content, flags=re.MULTILINE):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def run(self, ctx: RepoContext) -> None:
|
||||
"""
|
||||
Execute `make install` in the repository directory, but only if an
|
||||
'install' target is actually defined in the Makefile.
|
||||
|
||||
Any failure in `make install` is treated as a fatal error and will
|
||||
propagate as SystemExit from run_command().
|
||||
"""
|
||||
makefile_path = os.path.join(ctx.repo_dir, self.MAKEFILE_NAME)
|
||||
|
||||
if not os.path.exists(makefile_path):
|
||||
# Should normally not happen if supports() was checked before,
|
||||
# but keep this guard for robustness.
|
||||
if not ctx.quiet:
|
||||
print(
|
||||
f"[pkgmgr] Makefile '{makefile_path}' not found, "
|
||||
"skipping make install."
|
||||
)
|
||||
return
|
||||
|
||||
if not self._has_install_target(makefile_path):
|
||||
if not ctx.quiet:
|
||||
print(
|
||||
"[pkgmgr] Skipping Makefile install: no 'install' target "
|
||||
f"found in {makefile_path}."
|
||||
)
|
||||
return
|
||||
|
||||
if not ctx.quiet:
|
||||
print(
|
||||
f"[pkgmgr] Running 'make install' in {ctx.repo_dir} "
|
||||
"(install target detected in Makefile)."
|
||||
)
|
||||
|
||||
cmd = "make install"
|
||||
run_command(cmd, cwd=ctx.repo_dir, preview=ctx.preview)
|
||||
@@ -1,106 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
Installer for Nix flakes.
|
||||
|
||||
If a repository contains flake.nix and the 'nix' command is available, this
|
||||
installer will try to install profile outputs from the flake.
|
||||
|
||||
Behavior:
|
||||
- If flake.nix is present and `nix` exists on PATH:
|
||||
* First remove any existing `package-manager` profile entry (best-effort).
|
||||
* Then install the flake outputs (`pkgmgr`, `default`) via `nix profile install`.
|
||||
- Failure installing `pkgmgr` is treated as fatal.
|
||||
- Failure installing `default` is logged as an error/warning but does not abort.
|
||||
"""
|
||||
|
||||
import os
|
||||
import shutil
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from pkgmgr.actions.repository.install.installers.base import BaseInstaller
|
||||
from pkgmgr.core.command.run import run_command
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pkgmgr.actions.repository.install.context import RepoContext
|
||||
from pkgmgr.actions.repository.install import InstallContext
|
||||
|
||||
|
||||
class NixFlakeInstaller(BaseInstaller):
|
||||
"""Install Nix flake profiles for repositories that define flake.nix."""
|
||||
|
||||
# Logical layer name, used by capability matchers.
|
||||
layer = "nix"
|
||||
|
||||
FLAKE_FILE = "flake.nix"
|
||||
PROFILE_NAME = "package-manager"
|
||||
|
||||
def supports(self, ctx: "RepoContext") -> bool:
|
||||
"""
|
||||
Only support repositories that:
|
||||
- Have a flake.nix
|
||||
- And have the `nix` command available.
|
||||
"""
|
||||
if shutil.which("nix") is None:
|
||||
return False
|
||||
flake_path = os.path.join(ctx.repo_dir, self.FLAKE_FILE)
|
||||
return os.path.exists(flake_path)
|
||||
|
||||
def _ensure_old_profile_removed(self, ctx: "RepoContext") -> None:
|
||||
"""
|
||||
Best-effort removal of an existing profile entry.
|
||||
|
||||
This handles the "already provides the following file" conflict by
|
||||
removing previous `package-manager` installations before we install
|
||||
the new one.
|
||||
|
||||
Any error in `nix profile remove` is intentionally ignored, because
|
||||
a missing profile entry is not a fatal condition.
|
||||
"""
|
||||
if shutil.which("nix") is None:
|
||||
return
|
||||
|
||||
cmd = f"nix profile remove {self.PROFILE_NAME} || true"
|
||||
try:
|
||||
# NOTE: no allow_failure here → matches the existing unit tests
|
||||
run_command(cmd, cwd=ctx.repo_dir, preview=ctx.preview)
|
||||
except SystemExit:
|
||||
# Unit tests explicitly assert this is swallowed
|
||||
pass
|
||||
|
||||
def run(self, ctx: "InstallContext") -> None:
|
||||
"""
|
||||
Install Nix flake profile outputs (pkgmgr, default).
|
||||
|
||||
Any failure installing `pkgmgr` is treated as fatal (SystemExit).
|
||||
A failure installing `default` is logged but does not abort.
|
||||
"""
|
||||
# Reuse supports() to keep logic in one place
|
||||
if not self.supports(ctx): # type: ignore[arg-type]
|
||||
return
|
||||
|
||||
print("Nix flake detected, attempting to install profile outputs...")
|
||||
|
||||
# Handle the "already installed" case up-front:
|
||||
self._ensure_old_profile_removed(ctx) # type: ignore[arg-type]
|
||||
|
||||
for output in ("pkgmgr", "default"):
|
||||
cmd = f"nix profile install {ctx.repo_dir}#{output}"
|
||||
|
||||
try:
|
||||
# For 'default' we don't want the process to exit on error
|
||||
allow_failure = output == "default"
|
||||
run_command(cmd, cwd=ctx.repo_dir, preview=ctx.preview, allow_failure=allow_failure)
|
||||
print(f"Nix flake output '{output}' successfully installed.")
|
||||
except SystemExit as e:
|
||||
print(f"[Error] Failed to install Nix flake output '{output}': {e}")
|
||||
if output == "pkgmgr":
|
||||
# Broken main CLI install → fatal
|
||||
raise
|
||||
# For 'default' we log and continue
|
||||
print(
|
||||
"[Warning] Continuing despite failure to install 'default' "
|
||||
"because 'pkgmgr' is already installed."
|
||||
)
|
||||
break
|
||||
@@ -1,160 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
Installer for RPM-based packages defined in *.spec files.
|
||||
|
||||
This installer:
|
||||
|
||||
1. Installs build dependencies via dnf/yum builddep (where available)
|
||||
2. Uses rpmbuild to build RPMs from the provided .spec file
|
||||
3. Installs the resulting RPMs via `rpm -i`
|
||||
|
||||
It targets RPM-based systems (Fedora / RHEL / CentOS / Rocky / Alma, etc.).
|
||||
"""
|
||||
|
||||
import glob
|
||||
import os
|
||||
import shutil
|
||||
|
||||
from typing import List, Optional
|
||||
|
||||
from pkgmgr.actions.repository.install.context import RepoContext
|
||||
from pkgmgr.actions.repository.install.installers.base import BaseInstaller
|
||||
from pkgmgr.core.command.run import run_command
|
||||
|
||||
|
||||
class RpmSpecInstaller(BaseInstaller):
|
||||
"""
|
||||
Build and install RPM-based packages from *.spec files.
|
||||
|
||||
This installer is responsible for the full build + install of the
|
||||
application on RPM-like systems.
|
||||
"""
|
||||
|
||||
# Logical layer name, used by capability matchers.
|
||||
layer = "os-packages"
|
||||
|
||||
def _is_rpm_like(self) -> bool:
|
||||
"""
|
||||
Basic RPM-like detection:
|
||||
|
||||
- rpmbuild must be available
|
||||
- at least one of dnf / yum / yum-builddep must be present
|
||||
"""
|
||||
if shutil.which("rpmbuild") is None:
|
||||
return False
|
||||
|
||||
has_dnf = shutil.which("dnf") is not None
|
||||
has_yum = shutil.which("yum") is not None
|
||||
has_yum_builddep = shutil.which("yum-builddep") is not None
|
||||
|
||||
return has_dnf or has_yum or has_yum_builddep
|
||||
|
||||
def _spec_path(self, ctx: RepoContext) -> Optional[str]:
|
||||
"""Return the first *.spec file in the repository root, if any."""
|
||||
pattern = os.path.join(ctx.repo_dir, "*.spec")
|
||||
matches = sorted(glob.glob(pattern))
|
||||
if not matches:
|
||||
return None
|
||||
return matches[0]
|
||||
|
||||
def supports(self, ctx: RepoContext) -> bool:
|
||||
"""
|
||||
This installer is supported if:
|
||||
- we are on an RPM-based system (rpmbuild + dnf/yum/yum-builddep available), and
|
||||
- a *.spec file exists in the repository root.
|
||||
"""
|
||||
if not self._is_rpm_like():
|
||||
return False
|
||||
|
||||
return self._spec_path(ctx) is not None
|
||||
|
||||
def _find_built_rpms(self) -> List[str]:
|
||||
"""
|
||||
Find RPMs built by rpmbuild.
|
||||
|
||||
By default, rpmbuild outputs RPMs into:
|
||||
~/rpmbuild/RPMS/*/*.rpm
|
||||
"""
|
||||
home = os.path.expanduser("~")
|
||||
pattern = os.path.join(home, "rpmbuild", "RPMS", "**", "*.rpm")
|
||||
return sorted(glob.glob(pattern, recursive=True))
|
||||
|
||||
def _install_build_dependencies(self, ctx: RepoContext, spec_path: str) -> None:
|
||||
"""
|
||||
Install build dependencies for the given .spec file.
|
||||
|
||||
Strategy (best-effort):
|
||||
|
||||
1. If dnf is available:
|
||||
sudo dnf builddep -y <spec>
|
||||
2. Else if yum-builddep is available:
|
||||
sudo yum-builddep -y <spec>
|
||||
3. Else if yum is available:
|
||||
sudo yum-builddep -y <spec> # Some systems provide it via yum plugin
|
||||
4. Otherwise: print a warning and skip automatic builddep install.
|
||||
|
||||
Any failure in builddep installation is treated as fatal (SystemExit),
|
||||
consistent with other installer steps.
|
||||
"""
|
||||
spec_basename = os.path.basename(spec_path)
|
||||
|
||||
if shutil.which("dnf") is not None:
|
||||
cmd = f"sudo dnf builddep -y {spec_basename}"
|
||||
elif shutil.which("yum-builddep") is not None:
|
||||
cmd = f"sudo yum-builddep -y {spec_basename}"
|
||||
elif shutil.which("yum") is not None:
|
||||
# Some distributions ship yum-builddep as a plugin.
|
||||
cmd = f"sudo yum-builddep -y {spec_basename}"
|
||||
else:
|
||||
print(
|
||||
"[Warning] No suitable RPM builddep tool (dnf/yum-builddep/yum) found. "
|
||||
"Skipping automatic build dependency installation for RPM."
|
||||
)
|
||||
return
|
||||
|
||||
# Run builddep in the repository directory so relative spec paths work.
|
||||
run_command(cmd, cwd=ctx.repo_dir, preview=ctx.preview)
|
||||
|
||||
def run(self, ctx: RepoContext) -> None:
|
||||
"""
|
||||
Build and install RPM-based packages.
|
||||
|
||||
Steps:
|
||||
1. dnf/yum builddep <spec> (automatic build dependency installation)
|
||||
2. rpmbuild -ba path/to/spec
|
||||
3. sudo rpm -i ~/rpmbuild/RPMS/*/*.rpm
|
||||
"""
|
||||
spec_path = self._spec_path(ctx)
|
||||
if not spec_path:
|
||||
return
|
||||
|
||||
# 1) Install build dependencies
|
||||
self._install_build_dependencies(ctx, spec_path)
|
||||
|
||||
# 2) Build RPMs
|
||||
# Use the full spec path, but run in the repo directory.
|
||||
spec_basename = os.path.basename(spec_path)
|
||||
build_cmd = f"rpmbuild -ba {spec_basename}"
|
||||
run_command(build_cmd, cwd=ctx.repo_dir, preview=ctx.preview)
|
||||
|
||||
# 3) Find built RPMs
|
||||
rpms = self._find_built_rpms()
|
||||
if not rpms:
|
||||
print(
|
||||
"[Warning] No RPM files found after rpmbuild. "
|
||||
"Skipping RPM package installation."
|
||||
)
|
||||
return
|
||||
|
||||
# 4) Install RPMs
|
||||
if shutil.which("rpm") is None:
|
||||
print(
|
||||
"[Warning] rpm binary not found on PATH. "
|
||||
"Cannot install built RPMs."
|
||||
)
|
||||
return
|
||||
|
||||
install_cmd = "sudo rpm -i " + " ".join(rpms)
|
||||
run_command(install_cmd, cwd=ctx.repo_dir, preview=ctx.preview)
|
||||
@@ -1,68 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
Installer for Python projects based on pyproject.toml.
|
||||
|
||||
Strategy:
|
||||
- Determine a pip command in this order:
|
||||
1. $PKGMGR_PIP (explicit override, e.g. ~/.venvs/pkgmgr/bin/pip)
|
||||
2. sys.executable -m pip (current interpreter)
|
||||
3. "pip" from PATH as last resort
|
||||
- If pyproject.toml exists: pip install .
|
||||
|
||||
All installation failures are treated as fatal errors (SystemExit).
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
from pkgmgr.actions.repository.install.installers.base import BaseInstaller
|
||||
from pkgmgr.core.command.run import run_command
|
||||
|
||||
|
||||
class PythonInstaller(BaseInstaller):
|
||||
"""Install Python projects and dependencies via pip."""
|
||||
|
||||
# Logical layer name, used by capability matchers.
|
||||
layer = "python"
|
||||
|
||||
def supports(self, ctx) -> bool:
|
||||
"""
|
||||
Return True if this installer should handle the given repository.
|
||||
|
||||
Only pyproject.toml is supported as the single source of truth
|
||||
for Python dependencies and packaging metadata.
|
||||
"""
|
||||
repo_dir = ctx.repo_dir
|
||||
return os.path.exists(os.path.join(repo_dir, "pyproject.toml"))
|
||||
|
||||
def _pip_cmd(self) -> str:
|
||||
"""
|
||||
Resolve the pip command to use.
|
||||
"""
|
||||
explicit = os.environ.get("PKGMGR_PIP", "").strip()
|
||||
if explicit:
|
||||
return explicit
|
||||
|
||||
if sys.executable:
|
||||
return f"{sys.executable} -m pip"
|
||||
|
||||
return "pip"
|
||||
|
||||
def run(self, ctx) -> None:
|
||||
"""
|
||||
Install Python project defined via pyproject.toml.
|
||||
|
||||
Any pip failure is propagated as SystemExit.
|
||||
"""
|
||||
pip_cmd = self._pip_cmd()
|
||||
|
||||
pyproject = os.path.join(ctx.repo_dir, "pyproject.toml")
|
||||
if os.path.exists(pyproject):
|
||||
print(
|
||||
f"pyproject.toml found in {ctx.identifier}, "
|
||||
f"installing Python project..."
|
||||
)
|
||||
cmd = f"{pip_cmd} install ."
|
||||
run_command(cmd, cwd=ctx.repo_dir, preview=ctx.preview)
|
||||
@@ -1,110 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
|
||||
from pkgmgr.core.config.load import load_config
|
||||
|
||||
from .context import CLIContext
|
||||
from .parser import create_parser
|
||||
from .dispatch import dispatch_command
|
||||
|
||||
__all__ = ["CLIContext", "create_parser", "dispatch_command", "main"]
|
||||
|
||||
|
||||
# User config lives in the home directory:
|
||||
# ~/.config/pkgmgr/config.yaml
|
||||
USER_CONFIG_PATH = os.path.expanduser("~/.config/pkgmgr/config.yaml")
|
||||
|
||||
DESCRIPTION_TEXT = """\
|
||||
\033[1;32mPackage Manager 🤖📦\033[0m
|
||||
\033[3mKevin's Package Manager is a multi-repository, multi-package, and multi-format
|
||||
development tool crafted by and designed for:\033[0m
|
||||
\033[1;34mKevin Veen-Birkenbach\033[0m
|
||||
\033[4mhttps://www.veen.world/\033[0m
|
||||
|
||||
\033[1mOverview:\033[0m
|
||||
A powerful toolchain that unifies and automates workflows across heterogeneous
|
||||
project ecosystems. pkgmgr is not only a package manager — it is a full
|
||||
developer-oriented orchestration tool.
|
||||
|
||||
It automatically detects, merges, and processes metadata from multiple
|
||||
dependency formats, including:
|
||||
• \033[1;33mPython:\033[0m pyproject.toml, requirements.txt
|
||||
• \033[1;33mNix:\033[0m flake.nix
|
||||
• \033[1;33mArch Linux:\033[0m PKGBUILD
|
||||
• \033[1;33mAnsible:\033[0m requirements.yml
|
||||
• \033[1;33mpkgmgr-native:\033[0m pkgmgr.yml
|
||||
|
||||
This allows pkgmgr to perform installation, updates, verification, dependency
|
||||
resolution, and synchronization across complex multi-repo environments — with a
|
||||
single unified command-line interface.
|
||||
|
||||
\033[1mDeveloper Tools:\033[0m
|
||||
pkgmgr includes an integrated toolbox to enhance daily development workflows:
|
||||
|
||||
• \033[1;33mVS Code integration:\033[0m Auto-generate and open multi-repo workspaces
|
||||
• \033[1;33mTerminal integration:\033[0m Open repositories in new GNOME Terminal tabs
|
||||
• \033[1;33mExplorer integration:\033[0m Open repositories in your file manager
|
||||
• \033[1;33mRelease automation:\033[0m Version bumping, changelog updates, and tagging
|
||||
• \033[1;33mBatch operations:\033[0m Execute shell commands across multiple repositories
|
||||
• \033[1;33mGit/Docker/Make wrappers:\033[0m Unified command proxying for many tools
|
||||
|
||||
\033[1mCapabilities:\033[0m
|
||||
• Clone, pull, verify, update, and manage many repositories at once
|
||||
• Resolve dependencies across languages and ecosystems
|
||||
• Standardize install/update workflows
|
||||
• Create symbolic executable wrappers for any project
|
||||
• Merge configuration from default + user config layers
|
||||
|
||||
Use pkgmgr as both a robust package management framework and a versatile
|
||||
development orchestration tool.
|
||||
|
||||
For detailed help on each command, use:
|
||||
\033[1mpkgmgr <command> --help\033[0m
|
||||
"""
|
||||
|
||||
|
||||
def main() -> None:
|
||||
"""
|
||||
Entry point for the pkgmgr CLI.
|
||||
"""
|
||||
|
||||
config_merged = load_config(USER_CONFIG_PATH)
|
||||
|
||||
# Directories: be robust and provide sane defaults if missing
|
||||
directories = config_merged.get("directories") or {}
|
||||
repositories_dir = os.path.expanduser(
|
||||
directories.get("repositories", "~/Repositories")
|
||||
)
|
||||
binaries_dir = os.path.expanduser(
|
||||
directories.get("binaries", "~/.local/bin")
|
||||
)
|
||||
|
||||
# Ensure the merged config actually contains the resolved directories
|
||||
config_merged.setdefault("directories", {})
|
||||
config_merged["directories"]["repositories"] = repositories_dir
|
||||
config_merged["directories"]["binaries"] = binaries_dir
|
||||
|
||||
all_repositories = config_merged.get("repositories", [])
|
||||
|
||||
ctx = CLIContext(
|
||||
config_merged=config_merged,
|
||||
repositories_base_dir=repositories_dir,
|
||||
all_repositories=all_repositories,
|
||||
binaries_dir=binaries_dir,
|
||||
user_config_path=USER_CONFIG_PATH,
|
||||
)
|
||||
|
||||
parser = create_parser(DESCRIPTION_TEXT)
|
||||
args = parser.parse_args()
|
||||
|
||||
if not getattr(args, "command", None):
|
||||
parser.print_help()
|
||||
return
|
||||
|
||||
dispatch_command(args, ctx)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,83 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import os
|
||||
|
||||
from typing import Any, Dict, List
|
||||
|
||||
from pkgmgr.cli.context import CLIContext
|
||||
from pkgmgr.core.command.run import run_command
|
||||
from pkgmgr.core.repository.identifier import get_repo_identifier
|
||||
|
||||
|
||||
Repository = Dict[str, Any]
|
||||
|
||||
|
||||
def handle_tools_command(
|
||||
args,
|
||||
ctx: CLIContext,
|
||||
selected: List[Repository],
|
||||
) -> None:
|
||||
"""
|
||||
Handle integration commands:
|
||||
- explore (file manager)
|
||||
- terminal (GNOME Terminal)
|
||||
- code (VS Code workspace)
|
||||
"""
|
||||
|
||||
# --------------------------------------------------------
|
||||
# explore
|
||||
# --------------------------------------------------------
|
||||
if args.command == "explore":
|
||||
for repository in selected:
|
||||
run_command(
|
||||
f"nautilus {repository['directory']} & disown"
|
||||
)
|
||||
return
|
||||
|
||||
# --------------------------------------------------------
|
||||
# terminal
|
||||
# --------------------------------------------------------
|
||||
if args.command == "terminal":
|
||||
for repository in selected:
|
||||
run_command(
|
||||
f'gnome-terminal --tab --working-directory="{repository["directory"]}"'
|
||||
)
|
||||
return
|
||||
|
||||
# --------------------------------------------------------
|
||||
# code
|
||||
# --------------------------------------------------------
|
||||
if args.command == "code":
|
||||
if not selected:
|
||||
print("No repositories selected.")
|
||||
return
|
||||
|
||||
identifiers = [
|
||||
get_repo_identifier(repo, ctx.all_repositories)
|
||||
for repo in selected
|
||||
]
|
||||
sorted_identifiers = sorted(identifiers)
|
||||
workspace_name = "_".join(sorted_identifiers) + ".code-workspace"
|
||||
|
||||
workspaces_dir = os.path.expanduser(
|
||||
ctx.config_merged.get("directories").get("workspaces")
|
||||
)
|
||||
os.makedirs(workspaces_dir, exist_ok=True)
|
||||
workspace_file = os.path.join(workspaces_dir, workspace_name)
|
||||
|
||||
folders = [{"path": repository["directory"]} for repository in selected]
|
||||
|
||||
workspace_data = {
|
||||
"folders": folders,
|
||||
"settings": {},
|
||||
}
|
||||
if not os.path.exists(workspace_file):
|
||||
with open(workspace_file, "w") as f:
|
||||
json.dump(workspace_data, f, indent=4)
|
||||
print(f"Created workspace file: {workspace_file}")
|
||||
else:
|
||||
print(f"Using existing workspace file: {workspace_file}")
|
||||
|
||||
run_command(f'code "{workspace_file}"')
|
||||
return
|
||||
@@ -1,505 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
|
||||
from pkgmgr.cli.proxy import register_proxy_commands
|
||||
|
||||
|
||||
class SortedSubParsersAction(argparse._SubParsersAction):
|
||||
"""
|
||||
Subparsers action that keeps choices sorted alphabetically.
|
||||
"""
|
||||
|
||||
def add_parser(self, name, **kwargs):
|
||||
parser = super().add_parser(name, **kwargs)
|
||||
# Sort choices alphabetically by dest (subcommand name)
|
||||
self._choices_actions.sort(key=lambda a: a.dest)
|
||||
return parser
|
||||
|
||||
|
||||
def add_identifier_arguments(subparser: argparse.ArgumentParser) -> None:
|
||||
"""
|
||||
Common identifier / selection arguments for many subcommands.
|
||||
|
||||
Selection modes (mutual intent, not hard-enforced):
|
||||
- identifiers (positional): select by alias / provider/account/repo
|
||||
- --all: select all repositories
|
||||
- --category / --string / --tag: filter-based selection on top
|
||||
of the full repository set
|
||||
"""
|
||||
subparser.add_argument(
|
||||
"identifiers",
|
||||
nargs="*",
|
||||
help=(
|
||||
"Identifier(s) for repositories. "
|
||||
"Default: Repository of current folder."
|
||||
),
|
||||
)
|
||||
subparser.add_argument(
|
||||
"--all",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help=(
|
||||
"Apply the subcommand to all repositories in the config. "
|
||||
"Some subcommands ask for confirmation. If you want to give this "
|
||||
"confirmation for all repositories, pipe 'yes'. E.g: "
|
||||
"yes | pkgmgr {subcommand} --all"
|
||||
),
|
||||
)
|
||||
subparser.add_argument(
|
||||
"--category",
|
||||
nargs="+",
|
||||
default=[],
|
||||
help=(
|
||||
"Filter repositories by category patterns derived from config "
|
||||
"filenames or repo metadata (use filename without .yml/.yaml, "
|
||||
"or /regex/ to use a regular expression)."
|
||||
),
|
||||
)
|
||||
subparser.add_argument(
|
||||
"--string",
|
||||
default="",
|
||||
help=(
|
||||
"Filter repositories whose identifier / name / path contains this "
|
||||
"substring (case-insensitive). Use /regex/ for regular expressions."
|
||||
),
|
||||
)
|
||||
subparser.add_argument(
|
||||
"--tag",
|
||||
action="append",
|
||||
default=[],
|
||||
help=(
|
||||
"Filter repositories by tag. Matches tags from the repository "
|
||||
"collector and category tags. Use /regex/ for regular expressions."
|
||||
),
|
||||
)
|
||||
subparser.add_argument(
|
||||
"--preview",
|
||||
action="store_true",
|
||||
help="Preview changes without executing commands",
|
||||
)
|
||||
subparser.add_argument(
|
||||
"--list",
|
||||
action="store_true",
|
||||
help="List affected repositories (with preview or status)",
|
||||
)
|
||||
subparser.add_argument(
|
||||
"-a",
|
||||
"--args",
|
||||
nargs=argparse.REMAINDER,
|
||||
dest="extra_args",
|
||||
help="Additional parameters to be attached.",
|
||||
default=[],
|
||||
)
|
||||
|
||||
|
||||
def add_install_update_arguments(subparser: argparse.ArgumentParser) -> None:
|
||||
"""
|
||||
Common arguments for install/update commands.
|
||||
"""
|
||||
add_identifier_arguments(subparser)
|
||||
subparser.add_argument(
|
||||
"-q",
|
||||
"--quiet",
|
||||
action="store_true",
|
||||
help="Suppress warnings and info messages",
|
||||
)
|
||||
subparser.add_argument(
|
||||
"--no-verification",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="Disable verification via commit/gpg",
|
||||
)
|
||||
subparser.add_argument(
|
||||
"--dependencies",
|
||||
action="store_true",
|
||||
help="Also pull and update dependencies",
|
||||
)
|
||||
subparser.add_argument(
|
||||
"--clone-mode",
|
||||
choices=["ssh", "https", "shallow"],
|
||||
default="ssh",
|
||||
help=(
|
||||
"Specify the clone mode: ssh, https, or shallow "
|
||||
"(HTTPS shallow clone; default: ssh)"
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def create_parser(description_text: str) -> argparse.ArgumentParser:
|
||||
"""
|
||||
Create the top-level argument parser for pkgmgr.
|
||||
"""
|
||||
parser = argparse.ArgumentParser(
|
||||
description=description_text,
|
||||
formatter_class=argparse.RawTextHelpFormatter,
|
||||
)
|
||||
subparsers = parser.add_subparsers(
|
||||
dest="command",
|
||||
help="Subcommands",
|
||||
action=SortedSubParsersAction,
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# install / update / deinstall / delete
|
||||
# ------------------------------------------------------------
|
||||
install_parser = subparsers.add_parser(
|
||||
"install",
|
||||
help="Setup repository/repositories alias links to executables",
|
||||
)
|
||||
add_install_update_arguments(install_parser)
|
||||
|
||||
update_parser = subparsers.add_parser(
|
||||
"update",
|
||||
help="Update (pull + install) repository/repositories",
|
||||
)
|
||||
add_install_update_arguments(update_parser)
|
||||
update_parser.add_argument(
|
||||
"--system",
|
||||
action="store_true",
|
||||
help="Include system update commands",
|
||||
)
|
||||
|
||||
deinstall_parser = subparsers.add_parser(
|
||||
"deinstall",
|
||||
help="Remove alias links to repository/repositories",
|
||||
)
|
||||
add_identifier_arguments(deinstall_parser)
|
||||
|
||||
delete_parser = subparsers.add_parser(
|
||||
"delete",
|
||||
help="Delete repository/repositories alias links to executables",
|
||||
)
|
||||
add_identifier_arguments(delete_parser)
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# create
|
||||
# ------------------------------------------------------------
|
||||
create_cmd_parser = subparsers.add_parser(
|
||||
"create",
|
||||
help=(
|
||||
"Create new repository entries: add them to the config if not "
|
||||
"already present, initialize the local repository, and push "
|
||||
"remotely if --remote is set."
|
||||
),
|
||||
)
|
||||
add_identifier_arguments(create_cmd_parser)
|
||||
create_cmd_parser.add_argument(
|
||||
"--remote",
|
||||
action="store_true",
|
||||
help="If set, add the remote and push the initial commit.",
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# status
|
||||
# ------------------------------------------------------------
|
||||
status_parser = subparsers.add_parser(
|
||||
"status",
|
||||
help="Show status for repository/repositories or system",
|
||||
)
|
||||
add_identifier_arguments(status_parser)
|
||||
status_parser.add_argument(
|
||||
"--system",
|
||||
action="store_true",
|
||||
help="Show system status",
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# config
|
||||
# ------------------------------------------------------------
|
||||
config_parser = subparsers.add_parser(
|
||||
"config",
|
||||
help="Manage configuration",
|
||||
)
|
||||
config_subparsers = config_parser.add_subparsers(
|
||||
dest="subcommand",
|
||||
help="Config subcommands",
|
||||
required=True,
|
||||
)
|
||||
|
||||
config_show = config_subparsers.add_parser(
|
||||
"show",
|
||||
help="Show configuration",
|
||||
)
|
||||
add_identifier_arguments(config_show)
|
||||
|
||||
config_subparsers.add_parser(
|
||||
"add",
|
||||
help="Interactively add a new repository entry",
|
||||
)
|
||||
|
||||
config_subparsers.add_parser(
|
||||
"edit",
|
||||
help="Edit configuration file with nano",
|
||||
)
|
||||
|
||||
config_subparsers.add_parser(
|
||||
"init",
|
||||
help="Initialize user configuration by scanning the base directory",
|
||||
)
|
||||
|
||||
config_delete = config_subparsers.add_parser(
|
||||
"delete",
|
||||
help="Delete repository entry from user config",
|
||||
)
|
||||
add_identifier_arguments(config_delete)
|
||||
|
||||
config_ignore = config_subparsers.add_parser(
|
||||
"ignore",
|
||||
help="Set ignore flag for repository entries in user config",
|
||||
)
|
||||
add_identifier_arguments(config_ignore)
|
||||
config_ignore.add_argument(
|
||||
"--set",
|
||||
choices=["true", "false"],
|
||||
required=True,
|
||||
help="Set ignore to true or false",
|
||||
)
|
||||
|
||||
config_subparsers.add_parser(
|
||||
"update",
|
||||
help=(
|
||||
"Update default config files in ~/.config/pkgmgr/ from the "
|
||||
"installed pkgmgr package (does not touch config.yaml)."
|
||||
),
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# path / explore / terminal / code / shell
|
||||
# ------------------------------------------------------------
|
||||
path_parser = subparsers.add_parser(
|
||||
"path",
|
||||
help="Print the path(s) of repository/repositories",
|
||||
)
|
||||
add_identifier_arguments(path_parser)
|
||||
|
||||
explore_parser = subparsers.add_parser(
|
||||
"explore",
|
||||
help="Open repository in Nautilus file manager",
|
||||
)
|
||||
add_identifier_arguments(explore_parser)
|
||||
|
||||
terminal_parser = subparsers.add_parser(
|
||||
"terminal",
|
||||
help="Open repository in a new GNOME Terminal tab",
|
||||
)
|
||||
add_identifier_arguments(terminal_parser)
|
||||
|
||||
code_parser = subparsers.add_parser(
|
||||
"code",
|
||||
help="Open repository workspace with VS Code",
|
||||
)
|
||||
add_identifier_arguments(code_parser)
|
||||
|
||||
shell_parser = subparsers.add_parser(
|
||||
"shell",
|
||||
help="Execute a shell command in each repository",
|
||||
)
|
||||
add_identifier_arguments(shell_parser)
|
||||
shell_parser.add_argument(
|
||||
"-c",
|
||||
"--command",
|
||||
nargs=argparse.REMAINDER,
|
||||
dest="shell_command",
|
||||
help=(
|
||||
"The shell command (and its arguments) to execute in each "
|
||||
"repository"
|
||||
),
|
||||
default=[],
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# branch
|
||||
# ------------------------------------------------------------
|
||||
branch_parser = subparsers.add_parser(
|
||||
"branch",
|
||||
help="Branch-related utilities (e.g. open/close feature branches)",
|
||||
)
|
||||
branch_subparsers = branch_parser.add_subparsers(
|
||||
dest="subcommand",
|
||||
help="Branch subcommands",
|
||||
required=True,
|
||||
)
|
||||
|
||||
branch_open = branch_subparsers.add_parser(
|
||||
"open",
|
||||
help="Create and push a new branch on top of a base branch",
|
||||
)
|
||||
branch_open.add_argument(
|
||||
"name",
|
||||
nargs="?",
|
||||
help=(
|
||||
"Name of the new branch (optional; will be asked interactively "
|
||||
"if omitted)"
|
||||
),
|
||||
)
|
||||
branch_open.add_argument(
|
||||
"--base",
|
||||
default="main",
|
||||
help="Base branch to create the new branch from (default: main)",
|
||||
)
|
||||
|
||||
branch_close = branch_subparsers.add_parser(
|
||||
"close",
|
||||
help="Merge a feature branch into base and delete it",
|
||||
)
|
||||
branch_close.add_argument(
|
||||
"name",
|
||||
nargs="?",
|
||||
help=(
|
||||
"Name of the branch to close (optional; current branch is used "
|
||||
"if omitted)"
|
||||
),
|
||||
)
|
||||
branch_close.add_argument(
|
||||
"--base",
|
||||
default="main",
|
||||
help=(
|
||||
"Base branch to merge into (default: main; falls back to master "
|
||||
"internally if main does not exist)"
|
||||
),
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# release
|
||||
# ------------------------------------------------------------
|
||||
release_parser = subparsers.add_parser(
|
||||
"release",
|
||||
help=(
|
||||
"Create a release for repository/ies by incrementing version "
|
||||
"and updating the changelog."
|
||||
),
|
||||
)
|
||||
release_parser.add_argument(
|
||||
"release_type",
|
||||
choices=["major", "minor", "patch"],
|
||||
help="Type of version increment for the release (major, minor, patch).",
|
||||
)
|
||||
release_parser.add_argument(
|
||||
"-m",
|
||||
"--message",
|
||||
default=None,
|
||||
help=(
|
||||
"Optional release message to add to the changelog and tag."
|
||||
),
|
||||
)
|
||||
# Generic selection / preview / list / extra_args
|
||||
add_identifier_arguments(release_parser)
|
||||
# Close current branch after successful release
|
||||
release_parser.add_argument(
|
||||
"--close",
|
||||
action="store_true",
|
||||
help=(
|
||||
"Close the current branch after a successful release in each "
|
||||
"repository, if it is not main/master."
|
||||
),
|
||||
)
|
||||
# Force: skip preview+confirmation and run release directly
|
||||
release_parser.add_argument(
|
||||
"-f",
|
||||
"--force",
|
||||
action="store_true",
|
||||
help=(
|
||||
"Skip the interactive preview+confirmation step and run the "
|
||||
"release directly."
|
||||
),
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# version
|
||||
# ------------------------------------------------------------
|
||||
version_parser = subparsers.add_parser(
|
||||
"version",
|
||||
help=(
|
||||
"Show version information for repository/ies "
|
||||
"(git tags, pyproject.toml, flake.nix, PKGBUILD, debian, spec, "
|
||||
"Ansible Galaxy)."
|
||||
),
|
||||
)
|
||||
add_identifier_arguments(version_parser)
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# changelog
|
||||
# ------------------------------------------------------------
|
||||
changelog_parser = subparsers.add_parser(
|
||||
"changelog",
|
||||
help=(
|
||||
"Show changelog derived from Git history. "
|
||||
"By default, shows the changes between the last two SemVer tags."
|
||||
),
|
||||
)
|
||||
changelog_parser.add_argument(
|
||||
"range",
|
||||
nargs="?",
|
||||
default="",
|
||||
help=(
|
||||
"Optional tag or range (e.g. v1.2.3 or v1.2.0..v1.2.3). "
|
||||
"If omitted, the changelog between the last two SemVer "
|
||||
"tags is shown."
|
||||
),
|
||||
)
|
||||
add_identifier_arguments(changelog_parser)
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# list
|
||||
# ------------------------------------------------------------
|
||||
list_parser = subparsers.add_parser(
|
||||
"list",
|
||||
help="List all repositories with details and status",
|
||||
)
|
||||
# dieselbe Selektionslogik wie bei install/update/etc.:
|
||||
add_identifier_arguments(list_parser)
|
||||
list_parser.add_argument(
|
||||
"--status",
|
||||
type=str,
|
||||
default="",
|
||||
help=(
|
||||
"Filter repositories by status (case insensitive). "
|
||||
"Use /regex/ for regular expressions."
|
||||
),
|
||||
)
|
||||
list_parser.add_argument(
|
||||
"--description",
|
||||
action="store_true",
|
||||
help=(
|
||||
"Show an additional detailed section per repository "
|
||||
"(description, homepage, tags, categories, paths)."
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# make
|
||||
# ------------------------------------------------------------
|
||||
make_parser = subparsers.add_parser(
|
||||
"make",
|
||||
help="Executes make commands",
|
||||
)
|
||||
add_identifier_arguments(make_parser)
|
||||
make_subparsers = make_parser.add_subparsers(
|
||||
dest="subcommand",
|
||||
help="Make subcommands",
|
||||
required=True,
|
||||
)
|
||||
|
||||
make_install = make_subparsers.add_parser(
|
||||
"install",
|
||||
help="Executes the make install command",
|
||||
)
|
||||
add_identifier_arguments(make_install)
|
||||
|
||||
make_deinstall = make_subparsers.add_parser(
|
||||
"deinstall",
|
||||
help="Executes the make deinstall command",
|
||||
)
|
||||
add_identifier_arguments(make_deinstall)
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Proxy commands (git, docker, docker compose, ...)
|
||||
# ------------------------------------------------------------
|
||||
register_proxy_commands(subparsers)
|
||||
|
||||
return parser
|
||||
@@ -1,113 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
Command resolver for repositories.
|
||||
|
||||
This module determines the correct command to expose via symlink.
|
||||
It implements the following priority:
|
||||
|
||||
1. Explicit command in repo config → command
|
||||
2. System package manager binary (/usr/...) → NO LINK (respect OS)
|
||||
3. Nix profile binary (~/.nix-profile/bin/<id>) → command
|
||||
4. Python / non-system console script on PATH → command
|
||||
5. Fallback: repository's main.sh or main.py → command
|
||||
6. If nothing is available → raise error
|
||||
|
||||
The actual symlink creation is handled by create_ink(). This resolver
|
||||
only decides *what* should be used as the entrypoint, or whether no
|
||||
link should be created at all.
|
||||
"""
|
||||
|
||||
import os
|
||||
import shutil
|
||||
from typing import Optional
|
||||
|
||||
|
||||
def resolve_command_for_repo(repo, repo_identifier: str, repo_dir: str) -> Optional[str]:
|
||||
"""
|
||||
Determine the command for this repository.
|
||||
|
||||
Returns:
|
||||
str → path to the command (a symlink should be created)
|
||||
None → do NOT create a link (e.g. system package already provides it)
|
||||
|
||||
On total failure (no suitable command found at any layer), this function
|
||||
raises SystemExit with a descriptive error message.
|
||||
"""
|
||||
# ------------------------------------------------------------
|
||||
# 1. Explicit command defined by repository config
|
||||
# ------------------------------------------------------------
|
||||
explicit = repo.get("command")
|
||||
if explicit:
|
||||
return explicit
|
||||
|
||||
home = os.path.expanduser("~")
|
||||
|
||||
def is_executable(path: str) -> bool:
|
||||
return os.path.exists(path) and os.access(path, os.X_OK)
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# 2. System package manager binary via PATH
|
||||
#
|
||||
# If the binary lives under /usr/, we treat it as a system-managed
|
||||
# package (e.g. installed via pacman/apt/yum). In that case, pkgmgr
|
||||
# does NOT create a link at all and defers entirely to the OS.
|
||||
# ------------------------------------------------------------
|
||||
path_candidate = shutil.which(repo_identifier)
|
||||
system_binary: Optional[str] = None
|
||||
non_system_binary: Optional[str] = None
|
||||
|
||||
if path_candidate:
|
||||
if path_candidate.startswith("/usr/"):
|
||||
system_binary = path_candidate
|
||||
else:
|
||||
non_system_binary = path_candidate
|
||||
|
||||
if system_binary:
|
||||
# Respect system package manager: do not create a link.
|
||||
if repo.get("debug", False):
|
||||
print(
|
||||
f"[pkgmgr] System binary for '{repo_identifier}' found at "
|
||||
f"{system_binary}; no symlink will be created."
|
||||
)
|
||||
return None
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# 3. Nix profile binary (~/.nix-profile/bin/<identifier>)
|
||||
# ------------------------------------------------------------
|
||||
nix_candidate = os.path.join(home, ".nix-profile", "bin", repo_identifier)
|
||||
if is_executable(nix_candidate):
|
||||
return nix_candidate
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# 4. Python / non-system console script on PATH
|
||||
#
|
||||
# Here we reuse the non-system PATH candidate (e.g. from a venv or
|
||||
# a user-local install like ~/.local/bin). This is treated as a
|
||||
# valid command target.
|
||||
# ------------------------------------------------------------
|
||||
if non_system_binary and is_executable(non_system_binary):
|
||||
return non_system_binary
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# 5. Fallback: main.sh / main.py inside the repository
|
||||
# ------------------------------------------------------------
|
||||
main_sh = os.path.join(repo_dir, "main.sh")
|
||||
main_py = os.path.join(repo_dir, "main.py")
|
||||
|
||||
if is_executable(main_sh):
|
||||
return main_sh
|
||||
|
||||
if is_executable(main_py) or os.path.exists(main_py):
|
||||
return main_py
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# 6. Nothing found → treat as a hard error
|
||||
# ------------------------------------------------------------
|
||||
raise SystemExit(
|
||||
f"No executable command could be resolved for repository '{repo_identifier}'. "
|
||||
"No explicit 'command' configured, no system-managed binary under /usr/, "
|
||||
"no Nix profile binary, no non-system console script on PATH, and no "
|
||||
"main.sh/main.py found in the repository."
|
||||
)
|
||||
@@ -7,7 +7,7 @@ build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "package-manager"
|
||||
version = "0.7.2"
|
||||
version = "1.2.0"
|
||||
description = "Kevin's package-manager tool (pkgmgr)"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.11"
|
||||
@@ -39,13 +39,13 @@ pkgmgr = "pkgmgr.cli:main"
|
||||
# -----------------------------
|
||||
# setuptools configuration
|
||||
# -----------------------------
|
||||
# We use find_packages(), not a fixed list,
|
||||
# and explicitly include pkgmgr* and config*
|
||||
# Source layout: all packages live under "src/"
|
||||
[tool.setuptools]
|
||||
package-dir = { "" = "src", "config" = "config" }
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
where = ["."]
|
||||
where = ["src", "."]
|
||||
include = ["pkgmgr*", "config*"]
|
||||
|
||||
# Ensure defaults.yaml is shipped inside wheels & nix builds
|
||||
[tool.setuptools.package-data]
|
||||
"config" = ["defaults.yaml"]
|
||||
|
||||
@@ -4,32 +4,21 @@ set -euo pipefail
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
source "${SCRIPT_DIR}/resolve-base-image.sh"
|
||||
|
||||
echo "============================================================"
|
||||
echo ">>> Building ONLY missing container images"
|
||||
echo "============================================================"
|
||||
IMAGE="package-manager-test-$distro"
|
||||
BASE_IMAGE="$(resolve_base_image "$distro")"
|
||||
|
||||
for distro in $DISTROS; do
|
||||
IMAGE="package-manager-test-$distro"
|
||||
BASE_IMAGE="$(resolve_base_image "$distro")"
|
||||
|
||||
if docker image inspect "$IMAGE" >/dev/null 2>&1; then
|
||||
echo "[build-missing] Image already exists: $IMAGE (skipping)"
|
||||
continue
|
||||
fi
|
||||
|
||||
echo
|
||||
echo "------------------------------------------------------------"
|
||||
echo "[build-missing] Building missing image: $IMAGE"
|
||||
echo "BASE_IMAGE = $BASE_IMAGE"
|
||||
echo "------------------------------------------------------------"
|
||||
|
||||
docker build \
|
||||
--build-arg BASE_IMAGE="$BASE_IMAGE" \
|
||||
-t "$IMAGE" \
|
||||
.
|
||||
done
|
||||
if docker image inspect "$IMAGE" >/dev/null 2>&1; then
|
||||
echo "[build-missing] Image already exists: $IMAGE (skipping)"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo
|
||||
echo "============================================================"
|
||||
echo ">>> build-missing: Done"
|
||||
echo "============================================================"
|
||||
echo "------------------------------------------------------------"
|
||||
echo "[build-missing] Building missing image: $IMAGE"
|
||||
echo "BASE_IMAGE = $BASE_IMAGE"
|
||||
echo "------------------------------------------------------------"
|
||||
|
||||
docker build \
|
||||
--build-arg BASE_IMAGE="$BASE_IMAGE" \
|
||||
-t "$IMAGE" \
|
||||
.
|
||||
|
||||
@@ -4,14 +4,12 @@ set -euo pipefail
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
source "${SCRIPT_DIR}/resolve-base-image.sh"
|
||||
|
||||
for distro in $DISTROS; do
|
||||
base_image="$(resolve_base_image "$distro")"
|
||||
base_image="$(resolve_base_image "$distro")"
|
||||
|
||||
echo ">>> Building test image for distro '$distro' with NO CACHE (BASE_IMAGE=$base_image)..."
|
||||
echo ">>> Building test image for distro '$distro' with NO CACHE (BASE_IMAGE=$base_image)..."
|
||||
|
||||
docker build \
|
||||
--no-cache \
|
||||
--build-arg BASE_IMAGE="$base_image" \
|
||||
-t "package-manager-test-$distro" \
|
||||
.
|
||||
done
|
||||
docker build \
|
||||
--no-cache \
|
||||
--build-arg BASE_IMAGE="$base_image" \
|
||||
-t "package-manager-test-$distro" \
|
||||
.
|
||||
|
||||
@@ -4,13 +4,11 @@ set -euo pipefail
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
source "${SCRIPT_DIR}/resolve-base-image.sh"
|
||||
|
||||
for distro in $DISTROS; do
|
||||
base_image="$(resolve_base_image "$distro")"
|
||||
base_image="$(resolve_base_image "$distro")"
|
||||
|
||||
echo ">>> Building test image for distro '$distro' (BASE_IMAGE=$base_image)..."
|
||||
echo ">>> Building test image for distro '$distro' (BASE_IMAGE=$base_image)..."
|
||||
|
||||
docker build \
|
||||
--build-arg BASE_IMAGE="$base_image" \
|
||||
-t "package-manager-test-$distro" \
|
||||
.
|
||||
done
|
||||
docker build \
|
||||
--build-arg BASE_IMAGE="$base_image" \
|
||||
-t "package-manager-test-$distro" \
|
||||
.
|
||||
|
||||
@@ -2,28 +2,59 @@
|
||||
set -euo pipefail
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Ensure Nix has access to a valid CA bundle (TLS trust store)
|
||||
# Detect and export a valid CA bundle so Nix, Git, curl and Python tooling
|
||||
# can successfully perform HTTPS requests on all distros (Debian, Ubuntu,
|
||||
# Fedora, RHEL, CentOS, etc.)
|
||||
# ---------------------------------------------------------------------------
|
||||
if [[ -z "${NIX_SSL_CERT_FILE:-}" ]]; then
|
||||
if [[ -f /etc/ssl/certs/ca-certificates.crt ]]; then
|
||||
# Debian/Ubuntu-style path
|
||||
export NIX_SSL_CERT_FILE=/etc/ssl/certs/ca-certificates.crt
|
||||
echo "[docker] Using CA bundle: ${NIX_SSL_CERT_FILE}"
|
||||
elif [[ -f /etc/pki/tls/certs/ca-bundle.crt ]]; then
|
||||
# Fedora/RHEL/CentOS-style path
|
||||
export NIX_SSL_CERT_FILE=/etc/pki/tls/certs/ca-bundle.crt
|
||||
echo "[docker] Using CA bundle: ${NIX_SSL_CERT_FILE}"
|
||||
else
|
||||
echo "[docker] WARNING: No CA bundle found for Nix (NIX_SSL_CERT_FILE not set)."
|
||||
echo "[docker] HTTPS access for Nix flakes may fail."
|
||||
fi
|
||||
detect_ca_bundle() {
|
||||
# Common CA bundle locations across major Linux distributions
|
||||
local candidates=(
|
||||
/etc/ssl/certs/ca-certificates.crt # Debian/Ubuntu
|
||||
/etc/ssl/cert.pem # Some distros
|
||||
/etc/pki/tls/certs/ca-bundle.crt # Fedora/RHEL/CentOS
|
||||
/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem # CentOS/RHEL extracted bundle
|
||||
/etc/ssl/ca-bundle.pem # Generic fallback
|
||||
)
|
||||
|
||||
for path in "${candidates[@]}"; do
|
||||
if [[ -f "$path" ]]; then
|
||||
echo "$path"
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
# Use existing NIX_SSL_CERT_FILE if provided, otherwise auto-detect
|
||||
CA_BUNDLE="${NIX_SSL_CERT_FILE:-}"
|
||||
|
||||
if [[ -z "${CA_BUNDLE}" ]]; then
|
||||
CA_BUNDLE="$(detect_ca_bundle || true)"
|
||||
fi
|
||||
|
||||
if [[ -n "${CA_BUNDLE}" ]]; then
|
||||
# Export for Nix (critical)
|
||||
export NIX_SSL_CERT_FILE="${CA_BUNDLE}"
|
||||
|
||||
# Export for Git, Python requests, curl, etc.
|
||||
export SSL_CERT_FILE="${CA_BUNDLE}"
|
||||
export REQUESTS_CA_BUNDLE="${CA_BUNDLE}"
|
||||
export GIT_SSL_CAINFO="${CA_BUNDLE}"
|
||||
|
||||
echo "[docker] Using CA bundle: ${CA_BUNDLE}"
|
||||
else
|
||||
echo "[docker] WARNING: No CA certificate bundle found."
|
||||
echo "[docker] HTTPS access for Nix flakes and other tools may fail."
|
||||
fi
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
|
||||
echo "[docker] Starting package-manager container"
|
||||
|
||||
# Distro info for logging
|
||||
# ---------------------------------------------------------------------------
|
||||
# Log distribution info
|
||||
# ---------------------------------------------------------------------------
|
||||
if [[ -f /etc/os-release ]]; then
|
||||
# shellcheck disable=SC1091
|
||||
. /etc/os-release
|
||||
@@ -34,9 +65,9 @@ fi
|
||||
echo "[docker] Using /src as working directory"
|
||||
cd /src
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# DEV mode: build/install package-manager from current /src
|
||||
# ------------------------------------------------------------
|
||||
# ---------------------------------------------------------------------------
|
||||
# DEV mode: rebuild package-manager from the mounted /src tree
|
||||
# ---------------------------------------------------------------------------
|
||||
if [[ "${PKGMGR_DEV:-0}" == "1" ]]; then
|
||||
echo "[docker] DEV mode enabled (PKGMGR_DEV=1)"
|
||||
echo "[docker] Rebuilding package-manager from /src via scripts/installation/run-package.sh..."
|
||||
@@ -49,9 +80,9 @@ if [[ "${PKGMGR_DEV:-0}" == "1" ]]; then
|
||||
fi
|
||||
fi
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Hand-off to pkgmgr / arbitrary command
|
||||
# ------------------------------------------------------------
|
||||
# ---------------------------------------------------------------------------
|
||||
# Hand off to pkgmgr or arbitrary command
|
||||
# ---------------------------------------------------------------------------
|
||||
if [[ $# -eq 0 ]]; then
|
||||
echo "[docker] No arguments provided. Showing pkgmgr help..."
|
||||
exec pkgmgr --help
|
||||
|
||||
@@ -3,21 +3,22 @@ set -euo pipefail
|
||||
|
||||
echo "[init-nix] Starting Nix initialization..."
|
||||
|
||||
NIX_INSTALL_URL="${NIX_INSTALL_URL:-https://nixos.org/nix/install}"
|
||||
NIX_DOWNLOAD_MAX_TIME=300 # 5 minutes
|
||||
NIX_DOWNLOAD_SLEEP_INTERVAL=20 # 20 seconds
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helper: detect whether we are inside a container (Docker/Podman/etc.)
|
||||
# Detect whether we are inside a container (Docker/Podman/etc.)
|
||||
# ---------------------------------------------------------------------------
|
||||
is_container() {
|
||||
# Docker / Podman markers
|
||||
if [[ -f /.dockerenv ]] || [[ -f /run/.containerenv ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
# cgroup hints
|
||||
if grep -qiE 'docker|container|podman|lxc' /proc/1/cgroup 2>/dev/null; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Environment variable used by some runtimes
|
||||
if [[ -n "${container:-}" ]]; then
|
||||
return 0
|
||||
fi
|
||||
@@ -26,171 +27,206 @@ is_container() {
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helper: ensure Nix binaries are on PATH (multi-user or single-user)
|
||||
# Ensure Nix binaries are on PATH (multi-user or single-user)
|
||||
# ---------------------------------------------------------------------------
|
||||
ensure_nix_on_path() {
|
||||
# Multi-user profile (daemon install)
|
||||
if [[ -x /nix/var/nix/profiles/default/bin/nix ]]; then
|
||||
export PATH="/nix/var/nix/profiles/default/bin:${PATH}"
|
||||
fi
|
||||
|
||||
# Single-user profile (current user)
|
||||
if [[ -x "${HOME}/.nix-profile/bin/nix" ]]; then
|
||||
export PATH="${HOME}/.nix-profile/bin:${PATH}"
|
||||
fi
|
||||
|
||||
# Single-user profile for dedicated "nix" user (container case)
|
||||
if [[ -x /home/nix/.nix-profile/bin/nix ]]; then
|
||||
export PATH="/home/nix/.nix-profile/bin:${PATH}"
|
||||
fi
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Fast path: Nix already available
|
||||
# Ensure Nix build group and users exist (build-users-group = nixbld)
|
||||
# ---------------------------------------------------------------------------
|
||||
if command -v nix >/dev/null 2>&1; then
|
||||
echo "[init-nix] Nix already available on PATH: $(command -v nix)"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
ensure_nix_on_path
|
||||
|
||||
if command -v nix >/dev/null 2>&1; then
|
||||
echo "[init-nix] Nix found after adjusting PATH: $(command -v nix)"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "[init-nix] Nix not found, starting installation logic..."
|
||||
|
||||
IN_CONTAINER=0
|
||||
if is_container; then
|
||||
IN_CONTAINER=1
|
||||
echo "[init-nix] Detected container environment."
|
||||
else
|
||||
echo "[init-nix] No container detected."
|
||||
fi
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Container + root: install Nix as dedicated "nix" user (single-user)
|
||||
# ---------------------------------------------------------------------------
|
||||
if [[ "${IN_CONTAINER}" -eq 1 && "${EUID:-0}" -eq 0 ]]; then
|
||||
echo "[init-nix] Running as root inside a container – using dedicated 'nix' user."
|
||||
|
||||
# Ensure nixbld group (required by Nix)
|
||||
ensure_nix_build_group() {
|
||||
if ! getent group nixbld >/dev/null 2>&1; then
|
||||
echo "[init-nix] Creating group 'nixbld'..."
|
||||
groupadd -r nixbld
|
||||
fi
|
||||
|
||||
# Ensure Nix build users (nixbld1..nixbld10) as members of nixbld
|
||||
for i in $(seq 1 10); do
|
||||
if ! id "nixbld$i" >/dev/null 2>&1; then
|
||||
echo "[init-nix] Creating build user nixbld$i..."
|
||||
# -r: system account, -g: primary group, -G: supplementary (ensures membership is listed)
|
||||
useradd -r -g nixbld -G nixbld -s /usr/sbin/nologin "nixbld$i"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
# Ensure "nix" user (home at /home/nix)
|
||||
if ! id nix >/dev/null 2>&1; then
|
||||
echo "[init-nix] Creating user 'nix'..."
|
||||
useradd -m -r -g nixbld -s /usr/bin/bash nix
|
||||
fi
|
||||
# ---------------------------------------------------------------------------
|
||||
# Download and run Nix installer with retry
|
||||
# Usage: install_nix_with_retry daemon|no-daemon [run_as_user]
|
||||
# ---------------------------------------------------------------------------
|
||||
install_nix_with_retry() {
|
||||
local mode="$1"
|
||||
local run_as="${2:-}"
|
||||
local installer elapsed=0 mode_flag
|
||||
|
||||
# Create /nix directory and hand it to nix user (prevents installer sudo prompt)
|
||||
if [[ ! -d /nix ]]; then
|
||||
echo "[init-nix] Creating /nix with owner nix:nixbld..."
|
||||
mkdir -m 0755 /nix
|
||||
chown nix:nixbld /nix
|
||||
fi
|
||||
case "${mode}" in
|
||||
daemon) mode_flag="--daemon" ;;
|
||||
no-daemon) mode_flag="--no-daemon" ;;
|
||||
*)
|
||||
echo "[init-nix] ERROR: Invalid mode '${mode}', expected 'daemon' or 'no-daemon'."
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
# Run Nix single-user installer as "nix"
|
||||
echo "[init-nix] Installing Nix as user 'nix' (single-user, --no-daemon)..."
|
||||
if command -v sudo >/dev/null 2>&1; then
|
||||
sudo -u nix bash -lc 'sh <(curl -L https://nixos.org/nix/install) --no-daemon'
|
||||
else
|
||||
su - nix -c 'sh <(curl -L https://nixos.org/nix/install) --no-daemon'
|
||||
fi
|
||||
installer="$(mktemp -t nix-installer.XXXXXX)"
|
||||
|
||||
# After installation, expose nix to root via PATH and symlink
|
||||
ensure_nix_on_path
|
||||
echo "[init-nix] Downloading Nix installer from ${NIX_INSTALL_URL} with retry (max ${NIX_DOWNLOAD_MAX_TIME}s)..."
|
||||
|
||||
if [[ -x /home/nix/.nix-profile/bin/nix ]]; then
|
||||
if [[ ! -e /usr/local/bin/nix ]]; then
|
||||
echo "[init-nix] Creating /usr/local/bin/nix symlink -> /home/nix/.nix-profile/bin/nix"
|
||||
ln -s /home/nix/.nix-profile/bin/nix /usr/local/bin/nix
|
||||
while true; do
|
||||
if curl -fL "${NIX_INSTALL_URL}" -o "${installer}"; then
|
||||
echo "[init-nix] Successfully downloaded Nix installer to ${installer}"
|
||||
break
|
||||
fi
|
||||
|
||||
local curl_exit=$?
|
||||
echo "[init-nix] WARNING: Failed to download Nix installer (curl exit code ${curl_exit})."
|
||||
|
||||
elapsed=$((elapsed + NIX_DOWNLOAD_SLEEP_INTERVAL))
|
||||
if (( elapsed >= NIX_DOWNLOAD_MAX_TIME )); then
|
||||
echo "[init-nix] ERROR: Giving up after ${elapsed}s trying to download Nix installer."
|
||||
rm -f "${installer}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "[init-nix] Retrying in ${NIX_DOWNLOAD_SLEEP_INTERVAL}s (elapsed: ${elapsed}s/${NIX_DOWNLOAD_MAX_TIME}s)..."
|
||||
sleep "${NIX_DOWNLOAD_SLEEP_INTERVAL}"
|
||||
done
|
||||
|
||||
if [[ -n "${run_as}" ]]; then
|
||||
echo "[init-nix] Running installer as user '${run_as}' with mode '${mode}'..."
|
||||
if command -v sudo >/dev/null 2>&1; then
|
||||
sudo -u "${run_as}" bash -lc "sh '${installer}' ${mode_flag}"
|
||||
else
|
||||
su - "${run_as}" -c "sh '${installer}' ${mode_flag}"
|
||||
fi
|
||||
else
|
||||
echo "[init-nix] Running installer as current user with mode '${mode}'..."
|
||||
sh "${installer}" "${mode_flag}"
|
||||
fi
|
||||
|
||||
rm -f "${installer}"
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Main
|
||||
# ---------------------------------------------------------------------------
|
||||
main() {
|
||||
# Fast path: Nix already available
|
||||
if command -v nix >/dev/null 2>&1; then
|
||||
echo "[init-nix] Nix already available on PATH: $(command -v nix)"
|
||||
return 0
|
||||
fi
|
||||
|
||||
ensure_nix_on_path
|
||||
|
||||
if command -v nix >/dev/null 2>&1; then
|
||||
echo "[init-nix] Nix successfully installed (container mode) at: $(command -v nix)"
|
||||
else
|
||||
echo "[init-nix] WARNING: Nix installation finished in container, but 'nix' is still not on PATH."
|
||||
echo "[init-nix] Nix found after adjusting PATH: $(command -v nix)"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Optionally add PATH hints to /etc/profile (best effort)
|
||||
if [[ -w /etc/profile ]]; then
|
||||
if ! grep -q 'Nix profiles' /etc/profile 2>/dev/null; then
|
||||
cat <<'EOF' >> /etc/profile
|
||||
echo "[init-nix] Nix not found, starting installation logic..."
|
||||
|
||||
# Nix profiles (added by package-manager init-nix.sh)
|
||||
if [ -d /nix/var/nix/profiles/default/bin ]; then
|
||||
PATH="/nix/var/nix/profiles/default/bin:$PATH"
|
||||
fi
|
||||
if [ -d "$HOME/.nix-profile/bin" ]; then
|
||||
PATH="$HOME/.nix-profile/bin:$PATH"
|
||||
fi
|
||||
EOF
|
||||
echo "[init-nix] Appended Nix PATH setup to /etc/profile (container mode)."
|
||||
local IN_CONTAINER=0
|
||||
if is_container; then
|
||||
IN_CONTAINER=1
|
||||
echo "[init-nix] Detected container environment."
|
||||
else
|
||||
echo "[init-nix] No container detected."
|
||||
fi
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Container + root: dedicated "nix" user, single-user install
|
||||
# -------------------------------------------------------------------------
|
||||
if [[ "${IN_CONTAINER}" -eq 1 && "${EUID:-0}" -eq 0 ]]; then
|
||||
echo "[init-nix] Container + root – installing as 'nix' user (single-user)."
|
||||
|
||||
ensure_nix_build_group
|
||||
|
||||
if ! id nix >/dev/null 2>&1; then
|
||||
echo "[init-nix] Creating user 'nix'..."
|
||||
local BASH_SHELL
|
||||
BASH_SHELL="$(command -v bash || true)"
|
||||
[[ -z "${BASH_SHELL}" ]] && BASH_SHELL="/bin/sh"
|
||||
useradd -m -r -g nixbld -s "${BASH_SHELL}" nix
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "[init-nix] Nix initialization complete (container root mode)."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Non-container or non-root container: normal installer paths
|
||||
# ---------------------------------------------------------------------------
|
||||
if [[ "${IN_CONTAINER}" -eq 0 ]]; then
|
||||
# Real host
|
||||
if command -v systemctl >/dev/null 2>&1; then
|
||||
echo "[init-nix] Host with systemd – using multi-user install (--daemon)."
|
||||
sh <(curl -L https://nixos.org/nix/install) --daemon
|
||||
else
|
||||
if [[ "${EUID:-0}" -eq 0 ]]; then
|
||||
echo "[init-nix] WARNING: Running as root without systemd on host."
|
||||
echo "[init-nix] Falling back to single-user install (--no-daemon), but this is not recommended."
|
||||
sh <(curl -L https://nixos.org/nix/install) --no-daemon
|
||||
if [[ ! -d /nix ]]; then
|
||||
echo "[init-nix] Creating /nix with owner nix:nixbld..."
|
||||
mkdir -m 0755 /nix
|
||||
chown nix:nixbld /nix
|
||||
else
|
||||
echo "[init-nix] Non-root host without systemd – using single-user install (--no-daemon)."
|
||||
sh <(curl -L https://nixos.org/nix/install) --no-daemon
|
||||
local current_owner current_group
|
||||
current_owner="$(stat -c '%U' /nix 2>/dev/null || echo '?')"
|
||||
current_group="$(stat -c '%G' /nix 2>/dev/null || echo '?')"
|
||||
if [[ "${current_owner}" != "nix" || "${current_group}" != "nixbld" ]]; then
|
||||
echo "[init-nix] Fixing /nix ownership from ${current_owner}:${current_group} to nix:nixbld..."
|
||||
chown -R nix:nixbld /nix
|
||||
fi
|
||||
if [[ ! -w /nix ]]; then
|
||||
echo "[init-nix] WARNING: /nix is not writable after chown; Nix installer may fail."
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
else
|
||||
|
||||
install_nix_with_retry "no-daemon" "nix"
|
||||
|
||||
ensure_nix_on_path
|
||||
|
||||
if [[ -x /home/nix/.nix-profile/bin/nix && ! -e /usr/local/bin/nix ]]; then
|
||||
echo "[init-nix] Creating /usr/local/bin/nix symlink -> /home/nix/.nix-profile/bin/nix"
|
||||
ln -s /home/nix/.nix-profile/bin/nix /usr/local/bin/nix
|
||||
fi
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Host (no container)
|
||||
# -------------------------------------------------------------------------
|
||||
elif [[ "${IN_CONTAINER}" -eq 0 ]]; then
|
||||
if command -v systemctl >/dev/null 2>&1; then
|
||||
echo "[init-nix] Host with systemd – using multi-user install (--daemon)."
|
||||
if [[ "${EUID:-0}" -eq 0 ]]; then
|
||||
ensure_nix_build_group
|
||||
fi
|
||||
install_nix_with_retry "daemon"
|
||||
else
|
||||
if [[ "${EUID:-0}" -eq 0 ]]; then
|
||||
echo "[init-nix] Host without systemd as root – using single-user install (--no-daemon)."
|
||||
ensure_nix_build_group
|
||||
else
|
||||
echo "[init-nix] Host without systemd as non-root – using single-user install (--no-daemon)."
|
||||
fi
|
||||
install_nix_with_retry "no-daemon"
|
||||
fi
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Container, but not root (rare)
|
||||
echo "[init-nix] Container as non-root user – using single-user install (--no-daemon)."
|
||||
sh <(curl -L https://nixos.org/nix/install) --no-daemon
|
||||
fi
|
||||
# -------------------------------------------------------------------------
|
||||
else
|
||||
echo "[init-nix] Container as non-root – using single-user install (--no-daemon)."
|
||||
install_nix_with_retry "no-daemon"
|
||||
fi
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# After installation: fix PATH (runtime + shell profiles)
|
||||
# ---------------------------------------------------------------------------
|
||||
ensure_nix_on_path
|
||||
# -------------------------------------------------------------------------
|
||||
# After installation: PATH + /etc/profile
|
||||
# -------------------------------------------------------------------------
|
||||
ensure_nix_on_path
|
||||
|
||||
if ! command -v nix >/dev/null 2>&1; then
|
||||
echo "[init-nix] WARNING: Nix installation finished, but 'nix' is still not on PATH."
|
||||
echo "[init-nix] You may need to source your shell profile manually."
|
||||
exit 0
|
||||
fi
|
||||
if ! command -v nix >/dev/null 2>&1; then
|
||||
echo "[init-nix] WARNING: Nix installation finished, but 'nix' is still not on PATH."
|
||||
echo "[init-nix] You may need to source your shell profile manually."
|
||||
else
|
||||
echo "[init-nix] Nix successfully installed at: $(command -v nix)"
|
||||
fi
|
||||
|
||||
echo "[init-nix] Nix successfully installed at: $(command -v nix)"
|
||||
|
||||
# Update global /etc/profile if writable (helps especially on minimal systems)
|
||||
if [[ -w /etc/profile ]]; then
|
||||
if ! grep -q 'Nix profiles' /etc/profile 2>/dev/null; then
|
||||
if [[ -w /etc/profile ]] && ! grep -q 'Nix profiles' /etc/profile 2>/dev/null; then
|
||||
cat <<'EOF' >> /etc/profile
|
||||
|
||||
# Nix profiles (added by package-manager init-nix.sh)
|
||||
@@ -203,6 +239,8 @@ fi
|
||||
EOF
|
||||
echo "[init-nix] Appended Nix PATH setup to /etc/profile"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "[init-nix] Nix initialization complete."
|
||||
echo "[init-nix] Nix initialization complete."
|
||||
}
|
||||
|
||||
main "$@"
|
||||
|
||||
@@ -45,8 +45,42 @@ else
|
||||
fi
|
||||
|
||||
echo "[aur-builder-setup] Ensuring yay is installed for aur_builder..."
|
||||
|
||||
if ! "${RUN_AS_AUR[@]}" 'command -v yay >/dev/null 2>&1'; then
|
||||
"${RUN_AS_AUR[@]}" 'cd ~ && rm -rf yay && git clone https://aur.archlinux.org/yay.git && cd yay && makepkg -si --noconfirm'
|
||||
echo "[aur-builder-setup] yay not found – starting retry sequence for download..."
|
||||
|
||||
MAX_TIME=300
|
||||
SLEEP_INTERVAL=20
|
||||
ELAPSED=0
|
||||
|
||||
while true; do
|
||||
if "${RUN_AS_AUR[@]}" '
|
||||
set -euo pipefail
|
||||
cd ~
|
||||
rm -rf yay || true
|
||||
git clone https://aur.archlinux.org/yay.git yay
|
||||
'; then
|
||||
echo "[aur-builder-setup] yay repository cloned successfully."
|
||||
break
|
||||
fi
|
||||
|
||||
echo "[aur-builder-setup] git clone failed (likely 504). Retrying in ${SLEEP_INTERVAL}s..."
|
||||
sleep "${SLEEP_INTERVAL}"
|
||||
ELAPSED=$((ELAPSED + SLEEP_INTERVAL))
|
||||
|
||||
if (( ELAPSED >= MAX_TIME )); then
|
||||
echo "[aur-builder-setup] ERROR: Aborted after 5 minutes of retry attempts."
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
# Now build yay after successful clone
|
||||
"${RUN_AS_AUR[@]}" '
|
||||
set -euo pipefail
|
||||
cd ~/yay
|
||||
makepkg -si --noconfirm
|
||||
'
|
||||
|
||||
else
|
||||
echo "[aur-builder-setup] yay already installed."
|
||||
fi
|
||||
|
||||
@@ -3,10 +3,21 @@ set -euo pipefail
|
||||
|
||||
echo "[arch/package] Building Arch package (makepkg --nodeps)..."
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "${SCRIPT_DIR}/../../.." && pwd)"
|
||||
PKG_DIR="${PROJECT_ROOT}/packaging/arch"
|
||||
|
||||
if [[ ! -f "${PKG_DIR}/PKGBUILD" ]]; then
|
||||
echo "[arch/package] ERROR: PKGBUILD not found in ${PKG_DIR}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd "${PKG_DIR}"
|
||||
|
||||
if id aur_builder >/dev/null 2>&1; then
|
||||
echo "[arch/package] Using 'aur_builder' user for makepkg..."
|
||||
chown -R aur_builder:aur_builder "$(pwd)"
|
||||
su aur_builder -c "cd '$(pwd)' && rm -f package-manager-*.pkg.tar.* && makepkg --noconfirm --clean --nodeps"
|
||||
chown -R aur_builder:aur_builder "${PKG_DIR}"
|
||||
su aur_builder -c "cd '${PKG_DIR}' && rm -f package-manager-*.pkg.tar.* && makepkg --noconfirm --clean --nodeps"
|
||||
else
|
||||
echo "[arch/package] WARNING: user 'aur_builder' not found, running makepkg as current user..."
|
||||
rm -f package-manager-*.pkg.tar.*
|
||||
|
||||
@@ -13,6 +13,7 @@ dnf -y install \
|
||||
bash \
|
||||
curl-minimal \
|
||||
ca-certificates \
|
||||
sudo \
|
||||
xz
|
||||
|
||||
dnf clean all
|
||||
|
||||
@@ -4,8 +4,17 @@ set -euo pipefail
|
||||
echo "[centos/package] Setting up rpmbuild directories..."
|
||||
mkdir -p /root/rpmbuild/{BUILD,RPMS,SOURCES,SPECS,SRPMS}
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "${SCRIPT_DIR}/../../.." && pwd)"
|
||||
SPEC_PATH="${PROJECT_ROOT}/packaging/fedora/package-manager.spec"
|
||||
|
||||
if [[ ! -f "${SPEC_PATH}" ]]; then
|
||||
echo "[centos/package] ERROR: SPEC file not found: ${SPEC_PATH}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "[centos/package] Extracting version from package-manager.spec..."
|
||||
version="$(grep -E '^Version:' package-manager.spec | awk '{print $2}')"
|
||||
version="$(grep -E '^Version:' "${SPEC_PATH}" | awk '{print $2}')"
|
||||
if [[ -z "${version}" ]]; then
|
||||
echo "ERROR: Version missing!"
|
||||
exit 1
|
||||
@@ -15,13 +24,13 @@ srcdir="package-manager-${version}"
|
||||
echo "[centos/package] Preparing source tree: ${srcdir}"
|
||||
rm -rf "/tmp/${srcdir}"
|
||||
mkdir -p "/tmp/${srcdir}"
|
||||
cp -a . "/tmp/${srcdir}/"
|
||||
cp -a "${PROJECT_ROOT}/." "/tmp/${srcdir}/"
|
||||
|
||||
echo "[centos/package] Creating source tarball..."
|
||||
tar czf "/root/rpmbuild/SOURCES/${srcdir}.tar.gz" -C /tmp "${srcdir}"
|
||||
|
||||
echo "[centos/package] Copying SPEC..."
|
||||
cp package-manager.spec /root/rpmbuild/SPECS/
|
||||
cp "${SPEC_PATH}" /root/rpmbuild/SPECS/
|
||||
|
||||
echo "[centos/package] Running rpmbuild..."
|
||||
cd /root/rpmbuild/SPECS
|
||||
|
||||
@@ -3,6 +3,25 @@ set -euo pipefail
|
||||
|
||||
echo "[debian/package] Building Debian package..."
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "${SCRIPT_DIR}/../../.." && pwd)"
|
||||
|
||||
BUILD_ROOT="/tmp/package-manager-debian-build"
|
||||
rm -rf "${BUILD_ROOT}"
|
||||
mkdir -p "${BUILD_ROOT}"
|
||||
|
||||
echo "[debian/package] Syncing project sources to ${BUILD_ROOT}..."
|
||||
rsync -a \
|
||||
--exclude 'packaging/debian' \
|
||||
"${PROJECT_ROOT}/" "${BUILD_ROOT}/"
|
||||
|
||||
echo "[debian/package] Overlaying debian/ metadata from packaging/debian..."
|
||||
mkdir -p "${BUILD_ROOT}/debian"
|
||||
cp -a "${PROJECT_ROOT}/packaging/debian/." "${BUILD_ROOT}/debian/"
|
||||
|
||||
cd "${BUILD_ROOT}"
|
||||
|
||||
echo "[debian/package] Running dpkg-buildpackage..."
|
||||
dpkg-buildpackage -us -uc -b
|
||||
|
||||
echo "[debian/package] Installing generated DEB package..."
|
||||
|
||||
@@ -4,8 +4,17 @@ set -euo pipefail
|
||||
echo "[fedora/package] Setting up rpmbuild directories..."
|
||||
mkdir -p /root/rpmbuild/{BUILD,RPMS,SOURCES,SPECS,SRPMS}
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "${SCRIPT_DIR}/../../.." && pwd)"
|
||||
SPEC_PATH="${PROJECT_ROOT}/packaging/fedora/package-manager.spec"
|
||||
|
||||
if [[ ! -f "${SPEC_PATH}" ]]; then
|
||||
echo "[fedora/package] ERROR: SPEC file not found: ${SPEC_PATH}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "[fedora/package] Extracting version from package-manager.spec..."
|
||||
version="$(grep -E '^Version:' package-manager.spec | awk '{print $2}')"
|
||||
version="$(grep -E '^Version:' "${SPEC_PATH}" | awk '{print $2}')"
|
||||
if [[ -z "${version}" ]]; then
|
||||
echo "ERROR: Version missing!"
|
||||
exit 1
|
||||
@@ -15,13 +24,13 @@ srcdir="package-manager-${version}"
|
||||
echo "[fedora/package] Preparing source tree: ${srcdir}"
|
||||
rm -rf "/tmp/${srcdir}"
|
||||
mkdir -p "/tmp/${srcdir}"
|
||||
cp -a . "/tmp/${srcdir}/"
|
||||
cp -a "${PROJECT_ROOT}/." "/tmp/${srcdir}/"
|
||||
|
||||
echo "[fedora/package] Creating source tarball..."
|
||||
tar czf "/root/rpmbuild/SOURCES/${srcdir}.tar.gz" -C /tmp "${srcdir}"
|
||||
|
||||
echo "[fedora/package] Copying SPEC..."
|
||||
cp package-manager.spec /root/rpmbuild/SPECS/
|
||||
cp "${SPEC_PATH}" /root/rpmbuild/SPECS/
|
||||
|
||||
echo "[fedora/package] Running rpmbuild..."
|
||||
cd /root/rpmbuild/SPECS
|
||||
|
||||
@@ -4,20 +4,22 @@ set -euo pipefail
|
||||
# ------------------------------------------------------------
|
||||
# main.sh
|
||||
#
|
||||
# Developer setup entrypoint.
|
||||
# Developer / system setup entrypoint.
|
||||
#
|
||||
# Responsibilities:
|
||||
# - If inside a Nix shell (IN_NIX_SHELL=1):
|
||||
# * Skip venv creation and dependency installation
|
||||
# * Run `python3 main.py install`
|
||||
# - Otherwise:
|
||||
# - If running as root (EUID=0):
|
||||
# * Run system-level installer (run-package.sh)
|
||||
# - Otherwise (normal user):
|
||||
# * Create ~/.venvs/pkgmgr virtual environment if missing
|
||||
# * Install Python dependencies into that venv
|
||||
# * Append auto-activation to ~/.bashrc and ~/.zshrc
|
||||
# * Run `main.py install` using the venv Python
|
||||
# ------------------------------------------------------------
|
||||
|
||||
echo "[installation/main] Starting developer setup..."
|
||||
echo "[installation/main] Starting setup..."
|
||||
|
||||
PROJECT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
|
||||
cd "${PROJECT_ROOT}"
|
||||
@@ -26,20 +28,34 @@ VENV_DIR="${HOME}/.venvs/pkgmgr"
|
||||
RC_LINE='if [ -d "${HOME}/.venvs/pkgmgr" ]; then . "${HOME}/.venvs/pkgmgr/bin/activate"; if [ -n "${PS1:-}" ]; then echo "Global Python virtual environment '\''~/.venvs/pkgmgr'\'' activated."; fi; fi'
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Nix shell mode: do not touch venv, only run main.py install
|
||||
# 1) Nix shell mode: do not touch venv, only run main.py install
|
||||
# ------------------------------------------------------------
|
||||
if [[ -n "${IN_NIX_SHELL:-}" ]]; then
|
||||
echo "[installation/main] Nix shell detected (IN_NIX_SHELL=1)."
|
||||
echo "[installation/main] Skipping virtualenv creation and dependency installation."
|
||||
echo "[installation/main] Running main.py install via system python3..."
|
||||
python3 main.py install
|
||||
echo "[installation/main] Developer setup finished (Nix mode)."
|
||||
echo "[installation/main] Setup finished (Nix mode)."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Normal host mode: create/update venv and run main.py install
|
||||
# 2) Root mode: system / distro-level installation
|
||||
# ------------------------------------------------------------
|
||||
if [[ "${EUID:-$(id -u)}" -eq 0 ]]; then
|
||||
echo "[installation/main] Running as root (EUID=0)."
|
||||
echo "[installation/main] Skipping user virtualenv and shell RC modifications."
|
||||
echo "[installation/main] Delegating to scripts/installation/run-package.sh..."
|
||||
bash scripts/installation/run-package.sh
|
||||
echo "[installation/main] Root/system setup complete."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# 3) Normal user mode: dev setup with venv
|
||||
# ------------------------------------------------------------
|
||||
|
||||
echo "[installation/main] Running in normal user mode (developer setup)."
|
||||
|
||||
echo "[installation/main] Ensuring main.py is executable..."
|
||||
chmod +x main.py || true
|
||||
@@ -47,26 +63,8 @@ chmod +x main.py || true
|
||||
echo "[installation/main] Ensuring global virtualenv root: ${HOME}/.venvs"
|
||||
mkdir -p "${HOME}/.venvs"
|
||||
|
||||
if [[ ! -d "${VENV_DIR}" ]]; then
|
||||
echo "[installation/main] Creating virtual environment at: ${VENV_DIR}"
|
||||
python3 -m venv "${VENV_DIR}"
|
||||
else
|
||||
echo "[installation/main] Virtual environment already exists at: ${VENV_DIR}"
|
||||
fi
|
||||
|
||||
echo "[installation/main] Installing Python tooling into venv..."
|
||||
"${VENV_DIR}/bin/python" -m ensurepip --upgrade
|
||||
"${VENV_DIR}/bin/pip" install --upgrade pip setuptools wheel
|
||||
|
||||
if [[ -f "requirements.txt" ]]; then
|
||||
echo "[installation/main] Installing dependencies from requirements.txt..."
|
||||
"${VENV_DIR}/bin/pip" install -r requirements.txt
|
||||
elif [[ -f "_requirements.txt" ]]; then
|
||||
echo "[installation/main] Installing dependencies from _requirements.txt..."
|
||||
"${VENV_DIR}/bin/pip" install -r _requirements.txt
|
||||
else
|
||||
echo "[installation/main] No requirements.txt or _requirements.txt found. Skipping dependency installation."
|
||||
fi
|
||||
echo "[installation/main] Creating/updating virtualenv via helper..."
|
||||
PKGMGR_VENV_DIR="${VENV_DIR}" bash scripts/installation/venv-create.sh
|
||||
|
||||
echo "[installation/main] Ensuring ~/.bashrc and ~/.zshrc exist..."
|
||||
touch "${HOME}/.bashrc" "${HOME}/.zshrc"
|
||||
|
||||
@@ -8,6 +8,12 @@ source "${SCRIPT_DIR}/lib.sh"
|
||||
|
||||
OS_ID="$(detect_os_id)"
|
||||
|
||||
# Map Manjaro to Arch
|
||||
if [[ "${OS_ID}" == "manjaro" ]]; then
|
||||
echo "[run-package] Mapping OS 'manjaro' → 'arch'"
|
||||
OS_ID="arch"
|
||||
fi
|
||||
|
||||
echo "[run-package] Detected OS: ${OS_ID}"
|
||||
|
||||
case "${OS_ID}" in
|
||||
|
||||
@@ -3,6 +3,25 @@ set -euo pipefail
|
||||
|
||||
echo "[ubuntu/package] Building Ubuntu (Debian-style) package..."
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "${SCRIPT_DIR}/../../.." && pwd)"
|
||||
|
||||
BUILD_ROOT="/tmp/package-manager-ubuntu-build"
|
||||
rm -rf "${BUILD_ROOT}"
|
||||
mkdir -p "${BUILD_ROOT}"
|
||||
|
||||
echo "[ubuntu/package] Syncing project sources to ${BUILD_ROOT}..."
|
||||
rsync -a \
|
||||
--exclude 'packaging/debian' \
|
||||
"${PROJECT_ROOT}/" "${BUILD_ROOT}/"
|
||||
|
||||
echo "[ubuntu/package] Overlaying debian/ metadata from packaging/debian..."
|
||||
mkdir -p "${BUILD_ROOT}/debian"
|
||||
cp -a "${PROJECT_ROOT}/packaging/debian/." "${BUILD_ROOT}/debian/"
|
||||
|
||||
cd "${BUILD_ROOT}"
|
||||
|
||||
echo "[ubuntu/package] Running dpkg-buildpackage..."
|
||||
dpkg-buildpackage -us -uc -b
|
||||
|
||||
echo "[ubuntu/package] Installing generated DEB package..."
|
||||
|
||||
44
scripts/installation/venv-create.sh
Normal file
44
scripts/installation/venv-create.sh
Normal file
@@ -0,0 +1,44 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# venv-create.sh
|
||||
#
|
||||
# Small helper to create/update a Python virtual environment for pkgmgr.
|
||||
#
|
||||
# Usage:
|
||||
# PKGMGR_VENV_DIR=/home/dev/.venvs/pkgmgr bash scripts/installation/venv-create.sh
|
||||
# or
|
||||
# bash scripts/installation/venv-create.sh /home/dev/.venvs/pkgmgr
|
||||
|
||||
PROJECT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
|
||||
cd "${PROJECT_ROOT}"
|
||||
|
||||
VENV_DIR="${PKGMGR_VENV_DIR:-${1:-${HOME}/.venvs/pkgmgr}}"
|
||||
|
||||
echo "[venv-create] Using VENV_DIR=${VENV_DIR}"
|
||||
|
||||
echo "[venv-create] Ensuring virtualenv parent directory exists..."
|
||||
mkdir -p "$(dirname "${VENV_DIR}")"
|
||||
|
||||
if [[ ! -d "${VENV_DIR}" ]]; then
|
||||
echo "[venv-create] Creating virtual environment at: ${VENV_DIR}"
|
||||
python3 -m venv "${VENV_DIR}"
|
||||
else
|
||||
echo "[venv-create] Virtual environment already exists at: ${VENV_DIR}"
|
||||
fi
|
||||
|
||||
echo "[venv-create] Installing Python tooling into venv..."
|
||||
"${VENV_DIR}/bin/python" -m ensurepip --upgrade
|
||||
"${VENV_DIR}/bin/pip" install --upgrade pip setuptools wheel
|
||||
|
||||
if [[ -f "requirements.txt" ]]; then
|
||||
echo "[venv-create] Installing dependencies from requirements.txt..."
|
||||
"${VENV_DIR}/bin/pip" install -r requirements.txt
|
||||
elif [[ -f "_requirements.txt" ]]; then
|
||||
echo "[venv-create] Installing dependencies from _requirements.txt..."
|
||||
"${VENV_DIR}/bin/pip" install -r _requirements.txt
|
||||
else
|
||||
echo "[venv-create] No requirements.txt or _requirements.txt found. Skipping dependency installation."
|
||||
fi
|
||||
|
||||
echo "[venv-create] Done."
|
||||
@@ -8,19 +8,18 @@ fi
|
||||
|
||||
FLAKE_DIR="/usr/lib/package-manager"
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Try to ensure that "nix" is on PATH
|
||||
# ------------------------------------------------------------
|
||||
# ---------------------------------------------------------------------------
|
||||
# Try to ensure that "nix" is on PATH (common locations + container user)
|
||||
# ---------------------------------------------------------------------------
|
||||
if ! command -v nix >/dev/null 2>&1; then
|
||||
# Common locations for Nix installations
|
||||
CANDIDATES=(
|
||||
"/nix/var/nix/profiles/default/bin/nix"
|
||||
"${HOME:-/root}/.nix-profile/bin/nix"
|
||||
"/home/nix/.nix-profile/bin/nix"
|
||||
)
|
||||
|
||||
for candidate in "${CANDIDATES[@]}"; do
|
||||
if [[ -x "$candidate" ]]; then
|
||||
# Prepend the directory of the candidate to PATH
|
||||
PATH="$(dirname "$candidate"):${PATH}"
|
||||
export PATH
|
||||
break
|
||||
@@ -28,13 +27,22 @@ if ! command -v nix >/dev/null 2>&1; then
|
||||
done
|
||||
fi
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Primary (and only) path: use Nix flake if available
|
||||
# ------------------------------------------------------------
|
||||
# ---------------------------------------------------------------------------
|
||||
# If nix is still missing, try to run init-nix.sh once
|
||||
# ---------------------------------------------------------------------------
|
||||
if ! command -v nix >/dev/null 2>&1; then
|
||||
if [[ -x "${FLAKE_DIR}/init-nix.sh" ]]; then
|
||||
"${FLAKE_DIR}/init-nix.sh" || true
|
||||
fi
|
||||
fi
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Primary path: use Nix flake if available
|
||||
# ---------------------------------------------------------------------------
|
||||
if command -v nix >/dev/null 2>&1; then
|
||||
exec nix run "${FLAKE_DIR}#pkgmgr" -- "$@"
|
||||
fi
|
||||
|
||||
echo "[pkgmgr-wrapper] ERROR: 'nix' binary not found on PATH."
|
||||
echo "[pkgmgr-wrapper] ERROR: 'nix' binary not found on PATH after init."
|
||||
echo "[pkgmgr-wrapper] Nix is required to run pkgmgr (no Python fallback)."
|
||||
exit 1
|
||||
|
||||
@@ -1,40 +1,32 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
echo "============================================================"
|
||||
echo ">>> Running sanity test: verifying test containers start"
|
||||
echo "============================================================"
|
||||
|
||||
for distro in $DISTROS; do
|
||||
IMAGE="package-manager-test-$distro"
|
||||
|
||||
echo
|
||||
echo "------------------------------------------------------------"
|
||||
echo ">>> Testing container: $IMAGE"
|
||||
echo "------------------------------------------------------------"
|
||||
|
||||
echo "[test-container] Running: docker run --rm --entrypoint pkgmgr $IMAGE --help"
|
||||
echo
|
||||
|
||||
# Run the command and capture the output
|
||||
if OUTPUT=$(docker run --rm \
|
||||
-e PKGMGR_DEV=1 \
|
||||
-v "$(pwd):/src" \
|
||||
-v "pkgmgr_nix_cache:/root/.cache/nix" \
|
||||
"$IMAGE" 2>&1); then
|
||||
echo "$OUTPUT"
|
||||
echo
|
||||
echo "[test-container] SUCCESS: $IMAGE responded to 'pkgmgr --help'"
|
||||
|
||||
else
|
||||
echo "$OUTPUT"
|
||||
echo
|
||||
echo "[test-container] ERROR: $IMAGE failed to run 'pkgmgr --help'"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
IMAGE="package-manager-test-$distro"
|
||||
|
||||
echo
|
||||
echo "============================================================"
|
||||
echo ">>> All containers passed the sanity check"
|
||||
echo "============================================================"
|
||||
echo "------------------------------------------------------------"
|
||||
echo ">>> Testing container: $IMAGE"
|
||||
echo "------------------------------------------------------------"
|
||||
echo "[test-container] Inspect image metadata:"
|
||||
docker image inspect "$IMAGE" | sed -n '1,40p'
|
||||
|
||||
echo "[test-container] Running: docker run --rm --entrypoint pkgmgr $IMAGE --help"
|
||||
echo
|
||||
|
||||
# Run the command and capture the output
|
||||
if OUTPUT=$(docker run --rm \
|
||||
-e PKGMGR_DEV=1 \
|
||||
-v pkgmgr_nix_store_${distro}:/nix \
|
||||
-v "$(pwd):/src" \
|
||||
-v "pkgmgr_nix_cache_${distro}:/root/.cache/nix" \
|
||||
"$IMAGE" 2>&1); then
|
||||
echo "$OUTPUT"
|
||||
echo
|
||||
echo "[test-container] SUCCESS: $IMAGE responded to 'pkgmgr --help'"
|
||||
|
||||
else
|
||||
echo "$OUTPUT"
|
||||
echo
|
||||
echo "[test-container] ERROR: $IMAGE failed to run 'pkgmgr --help'"
|
||||
exit 1
|
||||
fi
|
||||
@@ -1,56 +1,60 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
echo ">>> Running E2E tests in all distros: $DISTROS"
|
||||
echo "============================================================"
|
||||
echo ">>> Running E2E tests: $distro"
|
||||
echo "============================================================"
|
||||
|
||||
for distro in $DISTROS; do
|
||||
echo "============================================================"
|
||||
echo ">>> Running E2E tests: $distro"
|
||||
echo "============================================================"
|
||||
docker run --rm \
|
||||
-v "$(pwd):/src" \
|
||||
-v "pkgmgr_nix_store_${distro}:/nix" \
|
||||
-v "pkgmgr_nix_cache_${distro}:/root/.cache/nix" \
|
||||
-e PKGMGR_DEV=1 \
|
||||
-e TEST_PATTERN="${TEST_PATTERN}" \
|
||||
--workdir /src \
|
||||
"package-manager-test-${distro}" \
|
||||
bash -lc '
|
||||
set -euo pipefail
|
||||
|
||||
MOUNT_NIX=""
|
||||
if [[ "$distro" == "arch" ]]; then
|
||||
MOUNT_NIX="-v pkgmgr_nix_store:/nix"
|
||||
fi
|
||||
# Load distro info
|
||||
if [ -f /etc/os-release ]; then
|
||||
. /etc/os-release
|
||||
fi
|
||||
|
||||
docker run --rm \
|
||||
-v "$(pwd):/src" \
|
||||
$MOUNT_NIX \
|
||||
-v "pkgmgr_nix_cache:/root/.cache/nix" \
|
||||
-e PKGMGR_DEV=1 \
|
||||
--workdir /src \
|
||||
--entrypoint bash \
|
||||
"package-manager-test-$distro" \
|
||||
-c '
|
||||
set -e;
|
||||
echo "Running tests inside distro: ${ID:-unknown}"
|
||||
|
||||
if [ -f /etc/os-release ]; then
|
||||
. /etc/os-release;
|
||||
fi;
|
||||
# Load Nix environment if available
|
||||
if [ -f "/nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh" ]; then
|
||||
. "/nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh"
|
||||
fi
|
||||
|
||||
echo "Running tests inside distro: $ID";
|
||||
if [ -f "$HOME/.nix-profile/etc/profile.d/nix.sh" ]; then
|
||||
. "$HOME/.nix-profile/etc/profile.d/nix.sh"
|
||||
fi
|
||||
|
||||
# Try to load nix environment
|
||||
if [ -f "/nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh" ]; then
|
||||
. "/nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh";
|
||||
fi
|
||||
PATH="/nix/var/nix/profiles/default/bin:$HOME/.nix-profile/bin:$PATH"
|
||||
|
||||
if [ -f "$HOME/.nix-profile/etc/profile.d/nix.sh" ]; then
|
||||
. "$HOME/.nix-profile/etc/profile.d/nix.sh";
|
||||
fi
|
||||
command -v nix >/dev/null || {
|
||||
echo "ERROR: nix not found."
|
||||
exit 1
|
||||
}
|
||||
|
||||
PATH="/nix/var/nix/profiles/default/bin:$HOME/.nix-profile/bin:$PATH";
|
||||
# Mark the mounted repository as safe to avoid Git ownership errors.
|
||||
# Newer Git (e.g. on Ubuntu) complains about the gitdir (/src/.git),
|
||||
# older versions about the worktree (/src). Nix turns "." into the
|
||||
# flake input "git+file:///src", which then uses Git under the hood.
|
||||
if command -v git >/dev/null 2>&1; then
|
||||
# Worktree path
|
||||
git config --global --add safe.directory /src || true
|
||||
# Gitdir path shown in the "dubious ownership" error
|
||||
git config --global --add safe.directory /src/.git || true
|
||||
# Ephemeral CI containers: allow all paths as a last resort
|
||||
git config --global --add safe.directory '*' || true
|
||||
fi
|
||||
|
||||
command -v nix >/dev/null || {
|
||||
echo "ERROR: nix not found.";
|
||||
exit 1;
|
||||
}
|
||||
|
||||
git config --global --add safe.directory /src || true;
|
||||
|
||||
nix develop .#default --no-write-lock-file -c \
|
||||
python3 -m unittest discover \
|
||||
-s /src/tests/e2e \
|
||||
-p "test_*.py";
|
||||
'
|
||||
done
|
||||
# Run the E2E tests inside the Nix development shell
|
||||
nix develop .#default --no-write-lock-file -c \
|
||||
python3 -m unittest discover \
|
||||
-s /src/tests/e2e \
|
||||
-p "$TEST_PATTERN"
|
||||
'
|
||||
|
||||
@@ -2,22 +2,23 @@
|
||||
set -euo pipefail
|
||||
|
||||
echo "============================================================"
|
||||
echo ">>> Running INTEGRATION tests in Arch container"
|
||||
echo ">>> Running INTEGRATION tests in ${distro} container"
|
||||
echo "============================================================"
|
||||
|
||||
docker run --rm \
|
||||
-v "$(pwd):/src" \
|
||||
-v "pkgmgr_nix_cache:/root/.cache/nix" \
|
||||
-v pkgmgr_nix_store_${distro}:/nix \
|
||||
-v "pkgmgr_nix_cache_${distro}:/root/.cache/nix" \
|
||||
--workdir /src \
|
||||
-e PKGMGR_DEV=1 \
|
||||
--entrypoint bash \
|
||||
"package-manager-test-arch" \
|
||||
-c '
|
||||
-e TEST_PATTERN="${TEST_PATTERN}" \
|
||||
"package-manager-test-${distro}" \
|
||||
bash -lc '
|
||||
set -e;
|
||||
git config --global --add safe.directory /src || true;
|
||||
nix develop .#default --no-write-lock-file -c \
|
||||
python -m unittest discover \
|
||||
python3 -m unittest discover \
|
||||
-s tests/integration \
|
||||
-t /src \
|
||||
-p "test_*.py";
|
||||
-p "$TEST_PATTERN";
|
||||
'
|
||||
|
||||
@@ -2,22 +2,23 @@
|
||||
set -euo pipefail
|
||||
|
||||
echo "============================================================"
|
||||
echo ">>> Running UNIT tests in Arch container"
|
||||
echo ">>> Running UNIT tests in ${distro} container"
|
||||
echo "============================================================"
|
||||
|
||||
docker run --rm \
|
||||
-v "$(pwd):/src" \
|
||||
-v "pkgmgr_nix_cache:/root/.cache/nix" \
|
||||
-v "pkgmgr_nix_cache_${distro}:/root/.cache/nix" \
|
||||
-v pkgmgr_nix_store_${distro}:/nix \
|
||||
--workdir /src \
|
||||
-e PKGMGR_DEV=1 \
|
||||
--entrypoint bash \
|
||||
"package-manager-test-arch" \
|
||||
-c '
|
||||
-e TEST_PATTERN="${TEST_PATTERN}" \
|
||||
"package-manager-test-${distro}" \
|
||||
bash -lc '
|
||||
set -e;
|
||||
git config --global --add safe.directory /src || true;
|
||||
nix develop .#default --no-write-lock-file -c \
|
||||
python -m unittest discover \
|
||||
python3 -m unittest discover \
|
||||
-s tests/unit \
|
||||
-t /src \
|
||||
-p "test_*.py";
|
||||
-p "$TEST_PATTERN";
|
||||
'
|
||||
|
||||
36
src/pkgmgr/__init__.py
Normal file
36
src/pkgmgr/__init__.py
Normal file
@@ -0,0 +1,36 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
Top-level pkgmgr package.
|
||||
|
||||
We deliberately avoid importing heavy submodules (like the CLI)
|
||||
on import to keep unit tests fast and to not require optional
|
||||
dependencies (like PyYAML) unless they are actually used.
|
||||
|
||||
Accessing ``pkgmgr.cli`` will load the CLI module lazily via
|
||||
``__getattr__``. This keeps patterns like
|
||||
|
||||
from pkgmgr import cli
|
||||
|
||||
working as expected in tests and entry points.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from importlib import import_module
|
||||
from typing import Any
|
||||
|
||||
__all__ = ["cli"]
|
||||
|
||||
|
||||
def __getattr__(name: str) -> Any:
|
||||
"""
|
||||
Lazily expose ``pkgmgr.cli`` as attribute on the top-level package.
|
||||
|
||||
This keeps ``import pkgmgr`` lightweight while still allowing
|
||||
``from pkgmgr import cli`` in tests and entry points.
|
||||
"""
|
||||
if name == "cli":
|
||||
return import_module("pkgmgr.cli")
|
||||
raise AttributeError(f"module 'pkgmgr' has no attribute {name!r}")
|
||||
14
src/pkgmgr/actions/branch/__init__.py
Normal file
14
src/pkgmgr/actions/branch/__init__.py
Normal file
@@ -0,0 +1,14 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Public API for branch actions.
|
||||
"""
|
||||
|
||||
from .open_branch import open_branch
|
||||
from .close_branch import close_branch
|
||||
from .drop_branch import drop_branch
|
||||
|
||||
__all__ = [
|
||||
"open_branch",
|
||||
"close_branch",
|
||||
"drop_branch",
|
||||
]
|
||||
100
src/pkgmgr/actions/branch/close_branch.py
Normal file
100
src/pkgmgr/actions/branch/close_branch.py
Normal file
@@ -0,0 +1,100 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Optional
|
||||
from pkgmgr.core.git import run_git, GitError, get_current_branch
|
||||
from .utils import _resolve_base_branch
|
||||
|
||||
|
||||
def close_branch(
|
||||
name: Optional[str],
|
||||
base_branch: str = "main",
|
||||
fallback_base: str = "master",
|
||||
cwd: str = ".",
|
||||
force: bool = False,
|
||||
) -> None:
|
||||
"""
|
||||
Merge a feature branch into the base branch and delete it afterwards.
|
||||
"""
|
||||
|
||||
# Determine branch name
|
||||
if not name:
|
||||
try:
|
||||
name = get_current_branch(cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(f"Failed to detect current branch: {exc}") from exc
|
||||
|
||||
if not name:
|
||||
raise RuntimeError("Branch name must not be empty.")
|
||||
|
||||
target_base = _resolve_base_branch(base_branch, fallback_base, cwd=cwd)
|
||||
|
||||
if name == target_base:
|
||||
raise RuntimeError(
|
||||
f"Refusing to close base branch {target_base!r}. "
|
||||
"Please specify a feature branch."
|
||||
)
|
||||
|
||||
# Confirmation
|
||||
if not force:
|
||||
answer = input(
|
||||
f"Merge branch '{name}' into '{target_base}' and delete it afterwards? (y/N): "
|
||||
).strip().lower()
|
||||
if answer != "y":
|
||||
print("Aborted closing branch.")
|
||||
return
|
||||
|
||||
# Fetch
|
||||
try:
|
||||
run_git(["fetch", "origin"], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to fetch from origin before closing branch {name!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# Checkout base
|
||||
try:
|
||||
run_git(["checkout", target_base], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to checkout base branch {target_base!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# Pull latest
|
||||
try:
|
||||
run_git(["pull", "origin", target_base], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to pull latest changes for base branch {target_base!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# Merge
|
||||
try:
|
||||
run_git(["merge", "--no-ff", name], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to merge branch {name!r} into {target_base!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# Push result
|
||||
try:
|
||||
run_git(["push", "origin", target_base], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to push base branch {target_base!r} after merge: {exc}"
|
||||
) from exc
|
||||
|
||||
# Delete local
|
||||
try:
|
||||
run_git(["branch", "-d", name], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to delete local branch {name!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# Delete remote
|
||||
try:
|
||||
run_git(["push", "origin", "--delete", name], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Branch {name!r} deleted locally, but remote deletion failed: {exc}"
|
||||
) from exc
|
||||
56
src/pkgmgr/actions/branch/drop_branch.py
Normal file
56
src/pkgmgr/actions/branch/drop_branch.py
Normal file
@@ -0,0 +1,56 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Optional
|
||||
from pkgmgr.core.git import run_git, GitError, get_current_branch
|
||||
from .utils import _resolve_base_branch
|
||||
|
||||
|
||||
def drop_branch(
|
||||
name: Optional[str],
|
||||
base_branch: str = "main",
|
||||
fallback_base: str = "master",
|
||||
cwd: str = ".",
|
||||
force: bool = False,
|
||||
) -> None:
|
||||
"""
|
||||
Delete a branch locally and remotely without merging.
|
||||
"""
|
||||
|
||||
if not name:
|
||||
try:
|
||||
name = get_current_branch(cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(f"Failed to detect current branch: {exc}") from exc
|
||||
|
||||
if not name:
|
||||
raise RuntimeError("Branch name must not be empty.")
|
||||
|
||||
target_base = _resolve_base_branch(base_branch, fallback_base, cwd=cwd)
|
||||
|
||||
if name == target_base:
|
||||
raise RuntimeError(
|
||||
f"Refusing to drop base branch {target_base!r}. It cannot be deleted."
|
||||
)
|
||||
|
||||
# Confirmation
|
||||
if not force:
|
||||
answer = input(
|
||||
f"Delete branch '{name}' locally and on origin? This is destructive! (y/N): "
|
||||
).strip().lower()
|
||||
if answer != "y":
|
||||
print("Aborted dropping branch.")
|
||||
return
|
||||
|
||||
# Local delete
|
||||
try:
|
||||
run_git(["branch", "-d", name], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(f"Failed to delete local branch {name!r}: {exc}") from exc
|
||||
|
||||
# Remote delete
|
||||
try:
|
||||
run_git(["push", "origin", "--delete", name], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Branch {name!r} was deleted locally, but remote deletion failed: {exc}"
|
||||
) from exc
|
||||
65
src/pkgmgr/actions/branch/open_branch.py
Normal file
65
src/pkgmgr/actions/branch/open_branch.py
Normal file
@@ -0,0 +1,65 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Optional
|
||||
from pkgmgr.core.git import run_git, GitError
|
||||
from .utils import _resolve_base_branch
|
||||
|
||||
|
||||
def open_branch(
|
||||
name: Optional[str],
|
||||
base_branch: str = "main",
|
||||
fallback_base: str = "master",
|
||||
cwd: str = ".",
|
||||
) -> None:
|
||||
"""
|
||||
Create and push a new feature branch on top of a base branch.
|
||||
"""
|
||||
|
||||
# Request name interactively if not provided
|
||||
if not name:
|
||||
name = input("Enter new branch name: ").strip()
|
||||
|
||||
if not name:
|
||||
raise RuntimeError("Branch name must not be empty.")
|
||||
|
||||
resolved_base = _resolve_base_branch(base_branch, fallback_base, cwd=cwd)
|
||||
|
||||
# 1) Fetch from origin
|
||||
try:
|
||||
run_git(["fetch", "origin"], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to fetch from origin before creating branch {name!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# 2) Checkout base branch
|
||||
try:
|
||||
run_git(["checkout", resolved_base], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to checkout base branch {resolved_base!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# 3) Pull latest changes
|
||||
try:
|
||||
run_git(["pull", "origin", resolved_base], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to pull latest changes for base branch {resolved_base!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# 4) Create new branch
|
||||
try:
|
||||
run_git(["checkout", "-b", name], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to create new branch {name!r} from base {resolved_base!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# 5) Push new branch
|
||||
try:
|
||||
run_git(["push", "-u", "origin", name], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to push new branch {name!r} to origin: {exc}"
|
||||
) from exc
|
||||
27
src/pkgmgr/actions/branch/utils.py
Normal file
27
src/pkgmgr/actions/branch/utils.py
Normal file
@@ -0,0 +1,27 @@
|
||||
from __future__ import annotations
|
||||
from pkgmgr.core.git import run_git, GitError
|
||||
|
||||
|
||||
def _resolve_base_branch(
|
||||
preferred: str,
|
||||
fallback: str,
|
||||
cwd: str,
|
||||
) -> str:
|
||||
"""
|
||||
Resolve the base branch to use.
|
||||
|
||||
Try `preferred` first (default: main),
|
||||
fall back to `fallback` (default: master).
|
||||
|
||||
Raise RuntimeError if neither exists.
|
||||
"""
|
||||
for candidate in (preferred, fallback):
|
||||
try:
|
||||
run_git(["rev-parse", "--verify", candidate], cwd=cwd)
|
||||
return candidate
|
||||
except GitError:
|
||||
continue
|
||||
|
||||
raise RuntimeError(
|
||||
f"Neither {preferred!r} nor {fallback!r} exist in this repository."
|
||||
)
|
||||
218
src/pkgmgr/actions/install/__init__.py
Normal file
218
src/pkgmgr/actions/install/__init__.py
Normal file
@@ -0,0 +1,218 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
High-level entry point for repository installation.
|
||||
|
||||
Responsibilities:
|
||||
|
||||
- Ensure the repository directory exists (clone if necessary).
|
||||
- Verify the repository (GPG / commit checks).
|
||||
- Build a RepoContext object.
|
||||
- Delegate the actual installation decision logic to InstallationPipeline.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import Any, Dict, List
|
||||
|
||||
from pkgmgr.core.repository.identifier import get_repo_identifier
|
||||
from pkgmgr.core.repository.dir import get_repo_dir
|
||||
from pkgmgr.core.repository.verify import verify_repository
|
||||
from pkgmgr.actions.repository.clone import clone_repos
|
||||
from pkgmgr.actions.install.context import RepoContext
|
||||
from pkgmgr.actions.install.installers.os_packages import (
|
||||
ArchPkgbuildInstaller,
|
||||
DebianControlInstaller,
|
||||
RpmSpecInstaller,
|
||||
)
|
||||
from pkgmgr.actions.install.installers.nix_flake import (
|
||||
NixFlakeInstaller,
|
||||
)
|
||||
from pkgmgr.actions.install.installers.python import PythonInstaller
|
||||
from pkgmgr.actions.install.installers.makefile import (
|
||||
MakefileInstaller,
|
||||
)
|
||||
from pkgmgr.actions.install.pipeline import InstallationPipeline
|
||||
|
||||
|
||||
Repository = Dict[str, Any]
|
||||
|
||||
# All available installers, in the order they should be considered.
|
||||
INSTALLERS = [
|
||||
ArchPkgbuildInstaller(),
|
||||
DebianControlInstaller(),
|
||||
RpmSpecInstaller(),
|
||||
NixFlakeInstaller(),
|
||||
PythonInstaller(),
|
||||
MakefileInstaller(),
|
||||
]
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Internal helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _ensure_repo_dir(
|
||||
repo: Repository,
|
||||
repositories_base_dir: str,
|
||||
all_repos: List[Repository],
|
||||
preview: bool,
|
||||
no_verification: bool,
|
||||
clone_mode: str,
|
||||
identifier: str,
|
||||
) -> str | None:
|
||||
"""
|
||||
Compute and, if necessary, clone the repository directory.
|
||||
|
||||
Returns the absolute repository path or None if cloning ultimately failed.
|
||||
"""
|
||||
repo_dir = get_repo_dir(repositories_base_dir, repo)
|
||||
|
||||
if not os.path.exists(repo_dir):
|
||||
print(
|
||||
f"Repository directory '{repo_dir}' does not exist. "
|
||||
f"Cloning it now..."
|
||||
)
|
||||
clone_repos(
|
||||
[repo],
|
||||
repositories_base_dir,
|
||||
all_repos,
|
||||
preview,
|
||||
no_verification,
|
||||
clone_mode,
|
||||
)
|
||||
if not os.path.exists(repo_dir):
|
||||
print(
|
||||
f"Cloning failed for repository {identifier}. "
|
||||
f"Skipping installation."
|
||||
)
|
||||
return None
|
||||
|
||||
return repo_dir
|
||||
|
||||
|
||||
def _verify_repo(
|
||||
repo: Repository,
|
||||
repo_dir: str,
|
||||
no_verification: bool,
|
||||
identifier: str,
|
||||
) -> bool:
|
||||
"""
|
||||
Verify a repository using the configured verification data.
|
||||
|
||||
Returns True if verification is considered okay and installation may continue.
|
||||
"""
|
||||
verified_info = repo.get("verified")
|
||||
verified_ok, errors, _commit_hash, _signing_key = verify_repository(
|
||||
repo,
|
||||
repo_dir,
|
||||
mode="local",
|
||||
no_verification=no_verification,
|
||||
)
|
||||
|
||||
if not no_verification and verified_info and not verified_ok:
|
||||
print(f"Warning: Verification failed for {identifier}:")
|
||||
for err in errors:
|
||||
print(f" - {err}")
|
||||
choice = input("Continue anyway? [y/N]: ").strip().lower()
|
||||
if choice != "y":
|
||||
print(f"Skipping installation for {identifier}.")
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def _create_context(
|
||||
repo: Repository,
|
||||
identifier: str,
|
||||
repo_dir: str,
|
||||
repositories_base_dir: str,
|
||||
bin_dir: str,
|
||||
all_repos: List[Repository],
|
||||
no_verification: bool,
|
||||
preview: bool,
|
||||
quiet: bool,
|
||||
clone_mode: str,
|
||||
update_dependencies: bool,
|
||||
) -> RepoContext:
|
||||
"""
|
||||
Build a RepoContext instance for the given repository.
|
||||
"""
|
||||
return RepoContext(
|
||||
repo=repo,
|
||||
identifier=identifier,
|
||||
repo_dir=repo_dir,
|
||||
repositories_base_dir=repositories_base_dir,
|
||||
bin_dir=bin_dir,
|
||||
all_repos=all_repos,
|
||||
no_verification=no_verification,
|
||||
preview=preview,
|
||||
quiet=quiet,
|
||||
clone_mode=clone_mode,
|
||||
update_dependencies=update_dependencies,
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Public API
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def install_repos(
|
||||
selected_repos: List[Repository],
|
||||
repositories_base_dir: str,
|
||||
bin_dir: str,
|
||||
all_repos: List[Repository],
|
||||
no_verification: bool,
|
||||
preview: bool,
|
||||
quiet: bool,
|
||||
clone_mode: str,
|
||||
update_dependencies: bool,
|
||||
) -> None:
|
||||
"""
|
||||
Install one or more repositories according to the configured installers
|
||||
and the CLI layer precedence rules.
|
||||
"""
|
||||
pipeline = InstallationPipeline(INSTALLERS)
|
||||
|
||||
for repo in selected_repos:
|
||||
identifier = get_repo_identifier(repo, all_repos)
|
||||
|
||||
repo_dir = _ensure_repo_dir(
|
||||
repo=repo,
|
||||
repositories_base_dir=repositories_base_dir,
|
||||
all_repos=all_repos,
|
||||
preview=preview,
|
||||
no_verification=no_verification,
|
||||
clone_mode=clone_mode,
|
||||
identifier=identifier,
|
||||
)
|
||||
if not repo_dir:
|
||||
continue
|
||||
|
||||
if not _verify_repo(
|
||||
repo=repo,
|
||||
repo_dir=repo_dir,
|
||||
no_verification=no_verification,
|
||||
identifier=identifier,
|
||||
):
|
||||
continue
|
||||
|
||||
ctx = _create_context(
|
||||
repo=repo,
|
||||
identifier=identifier,
|
||||
repo_dir=repo_dir,
|
||||
repositories_base_dir=repositories_base_dir,
|
||||
bin_dir=bin_dir,
|
||||
all_repos=all_repos,
|
||||
no_verification=no_verification,
|
||||
preview=preview,
|
||||
quiet=quiet,
|
||||
clone_mode=clone_mode,
|
||||
update_dependencies=update_dependencies,
|
||||
)
|
||||
|
||||
pipeline.run(ctx)
|
||||
@@ -38,7 +38,7 @@ from abc import ABC, abstractmethod
|
||||
from typing import Iterable, TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pkgmgr.actions.repository.install.context import RepoContext
|
||||
from pkgmgr.actions.install.context import RepoContext
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
19
src/pkgmgr/actions/install/installers/__init__.py
Normal file
19
src/pkgmgr/actions/install/installers/__init__.py
Normal file
@@ -0,0 +1,19 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
Installer package for pkgmgr.
|
||||
|
||||
This exposes all installer classes so users can import them directly from
|
||||
pkgmgr.actions.install.installers.
|
||||
"""
|
||||
|
||||
from pkgmgr.actions.install.installers.base import BaseInstaller # noqa: F401
|
||||
from pkgmgr.actions.install.installers.nix_flake import NixFlakeInstaller # noqa: F401
|
||||
from pkgmgr.actions.install.installers.python import PythonInstaller # noqa: F401
|
||||
from pkgmgr.actions.install.installers.makefile import MakefileInstaller # noqa: F401
|
||||
|
||||
# OS-specific installers
|
||||
from pkgmgr.actions.install.installers.os_packages.arch_pkgbuild import ArchPkgbuildInstaller # noqa: F401
|
||||
from pkgmgr.actions.install.installers.os_packages.debian_control import DebianControlInstaller # noqa: F401
|
||||
from pkgmgr.actions.install.installers.os_packages.rpm_spec import RpmSpecInstaller # noqa: F401
|
||||
@@ -8,8 +8,8 @@ Base interface for all installer components in the pkgmgr installation pipeline.
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Set
|
||||
|
||||
from pkgmgr.actions.repository.install.context import RepoContext
|
||||
from pkgmgr.actions.repository.install.capabilities import CAPABILITY_MATCHERS
|
||||
from pkgmgr.actions.install.context import RepoContext
|
||||
from pkgmgr.actions.install.capabilities import CAPABILITY_MATCHERS
|
||||
|
||||
|
||||
class BaseInstaller(ABC):
|
||||
97
src/pkgmgr/actions/install/installers/makefile.py
Normal file
97
src/pkgmgr/actions/install/installers/makefile.py
Normal file
@@ -0,0 +1,97 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import re
|
||||
|
||||
from pkgmgr.actions.install.context import RepoContext
|
||||
from pkgmgr.actions.install.installers.base import BaseInstaller
|
||||
from pkgmgr.core.command.run import run_command
|
||||
|
||||
|
||||
class MakefileInstaller(BaseInstaller):
|
||||
"""
|
||||
Generic installer that runs `make install` if a Makefile with an
|
||||
install target is present.
|
||||
|
||||
Safety rules:
|
||||
- If PKGMGR_DISABLE_MAKEFILE_INSTALLER=1 is set, this installer
|
||||
is globally disabled.
|
||||
- The higher-level InstallationPipeline ensures that Makefile
|
||||
installation does not run if a stronger CLI layer already owns
|
||||
the command (e.g. Nix or OS packages).
|
||||
"""
|
||||
|
||||
layer = "makefile"
|
||||
MAKEFILE_NAME = "Makefile"
|
||||
|
||||
def supports(self, ctx: RepoContext) -> bool:
|
||||
"""
|
||||
Return True if this repository has a Makefile and the installer
|
||||
is not globally disabled.
|
||||
"""
|
||||
# Optional global kill switch.
|
||||
if os.environ.get("PKGMGR_DISABLE_MAKEFILE_INSTALLER") == "1":
|
||||
if not ctx.quiet:
|
||||
print(
|
||||
"[INFO] MakefileInstaller is disabled via "
|
||||
"PKGMGR_DISABLE_MAKEFILE_INSTALLER."
|
||||
)
|
||||
return False
|
||||
|
||||
makefile_path = os.path.join(ctx.repo_dir, self.MAKEFILE_NAME)
|
||||
return os.path.exists(makefile_path)
|
||||
|
||||
def _has_install_target(self, makefile_path: str) -> bool:
|
||||
"""
|
||||
Heuristically check whether the Makefile defines an install target.
|
||||
|
||||
We look for:
|
||||
|
||||
- a plain 'install:' target, or
|
||||
- any 'install-*:' style target.
|
||||
"""
|
||||
try:
|
||||
with open(makefile_path, "r", encoding="utf-8", errors="ignore") as f:
|
||||
content = f.read()
|
||||
except OSError:
|
||||
return False
|
||||
|
||||
# Simple heuristics: look for "install:" or targets starting with "install-"
|
||||
if re.search(r"^install\s*:", content, flags=re.MULTILINE):
|
||||
return True
|
||||
|
||||
if re.search(r"^install-[a-zA-Z0-9_-]*\s*:", content, flags=re.MULTILINE):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def run(self, ctx: RepoContext) -> None:
|
||||
"""
|
||||
Execute `make install` in the repository directory if an install
|
||||
target exists.
|
||||
"""
|
||||
makefile_path = os.path.join(ctx.repo_dir, self.MAKEFILE_NAME)
|
||||
|
||||
if not os.path.exists(makefile_path):
|
||||
if not ctx.quiet:
|
||||
print(
|
||||
f"[pkgmgr] Makefile '{makefile_path}' not found, "
|
||||
"skipping MakefileInstaller."
|
||||
)
|
||||
return
|
||||
|
||||
if not self._has_install_target(makefile_path):
|
||||
if not ctx.quiet:
|
||||
print(
|
||||
f"[pkgmgr] No 'install' target found in {makefile_path}."
|
||||
)
|
||||
return
|
||||
|
||||
if not ctx.quiet:
|
||||
print(
|
||||
f"[pkgmgr] Running 'make install' in {ctx.repo_dir} "
|
||||
f"(MakefileInstaller)"
|
||||
)
|
||||
|
||||
cmd = "make install"
|
||||
run_command(cmd, cwd=ctx.repo_dir, preview=ctx.preview)
|
||||
165
src/pkgmgr/actions/install/installers/nix_flake.py
Normal file
165
src/pkgmgr/actions/install/installers/nix_flake.py
Normal file
@@ -0,0 +1,165 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
Installer for Nix flakes.
|
||||
|
||||
If a repository contains flake.nix and the 'nix' command is available, this
|
||||
installer will try to install profile outputs from the flake.
|
||||
|
||||
Behavior:
|
||||
- If flake.nix is present and `nix` exists on PATH:
|
||||
* First remove any existing `package-manager` profile entry (best-effort).
|
||||
* Then install one or more flake outputs via `nix profile install`.
|
||||
- For the package-manager repo:
|
||||
* `pkgmgr` is mandatory (CLI), `default` is optional.
|
||||
- For all other repos:
|
||||
* `default` is mandatory.
|
||||
|
||||
Special handling:
|
||||
- If PKGMGR_DISABLE_NIX_FLAKE_INSTALLER=1 is set, the installer is
|
||||
globally disabled (useful for CI or debugging).
|
||||
|
||||
The higher-level InstallationPipeline and CLI-layer model decide when this
|
||||
installer is allowed to run, based on where the current CLI comes from
|
||||
(e.g. Nix, OS packages, Python, Makefile).
|
||||
"""
|
||||
|
||||
import os
|
||||
import shutil
|
||||
from typing import TYPE_CHECKING, List, Tuple
|
||||
|
||||
from pkgmgr.actions.install.installers.base import BaseInstaller
|
||||
from pkgmgr.core.command.run import run_command
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pkgmgr.actions.install.context import RepoContext
|
||||
from pkgmgr.actions.install import InstallContext
|
||||
|
||||
|
||||
class NixFlakeInstaller(BaseInstaller):
|
||||
"""Install Nix flake profiles for repositories that define flake.nix."""
|
||||
|
||||
# Logical layer name, used by capability matchers.
|
||||
layer = "nix"
|
||||
|
||||
FLAKE_FILE = "flake.nix"
|
||||
PROFILE_NAME = "package-manager"
|
||||
|
||||
def supports(self, ctx: "RepoContext") -> bool:
|
||||
"""
|
||||
Only support repositories that:
|
||||
- Are NOT explicitly disabled via PKGMGR_DISABLE_NIX_FLAKE_INSTALLER=1,
|
||||
- Have a flake.nix,
|
||||
- And have the `nix` command available.
|
||||
"""
|
||||
# Optional global kill-switch for CI or debugging.
|
||||
if os.environ.get("PKGMGR_DISABLE_NIX_FLAKE_INSTALLER") == "1":
|
||||
print(
|
||||
"[INFO] PKGMGR_DISABLE_NIX_FLAKE_INSTALLER=1 – "
|
||||
"NixFlakeInstaller is disabled."
|
||||
)
|
||||
return False
|
||||
|
||||
# Nix must be available.
|
||||
if shutil.which("nix") is None:
|
||||
return False
|
||||
|
||||
# flake.nix must exist in the repository.
|
||||
flake_path = os.path.join(ctx.repo_dir, self.FLAKE_FILE)
|
||||
return os.path.exists(flake_path)
|
||||
|
||||
def _ensure_old_profile_removed(self, ctx: "RepoContext") -> None:
|
||||
"""
|
||||
Best-effort removal of an existing profile entry.
|
||||
|
||||
This handles the "already provides the following file" conflict by
|
||||
removing previous `package-manager` installations before we install
|
||||
the new one.
|
||||
|
||||
Any error in `nix profile remove` is intentionally ignored, because
|
||||
a missing profile entry is not a fatal condition.
|
||||
"""
|
||||
if shutil.which("nix") is None:
|
||||
return
|
||||
|
||||
cmd = f"nix profile remove {self.PROFILE_NAME} || true"
|
||||
try:
|
||||
# NOTE: no allow_failure here → matches the existing unit tests
|
||||
run_command(cmd, cwd=ctx.repo_dir, preview=ctx.preview)
|
||||
except SystemExit:
|
||||
# Unit tests explicitly assert this is swallowed
|
||||
pass
|
||||
|
||||
def _profile_outputs(self, ctx: "RepoContext") -> List[Tuple[str, bool]]:
|
||||
"""
|
||||
Decide which flake outputs to install and whether failures are fatal.
|
||||
|
||||
Returns a list of (output_name, allow_failure) tuples.
|
||||
|
||||
Rules:
|
||||
- For the package-manager repo (identifier 'pkgmgr' or 'package-manager'):
|
||||
[("pkgmgr", False), ("default", True)]
|
||||
- For all other repos:
|
||||
[("default", False)]
|
||||
"""
|
||||
ident = ctx.identifier
|
||||
|
||||
if ident in {"pkgmgr", "package-manager"}:
|
||||
# pkgmgr: main CLI output is "pkgmgr" (mandatory),
|
||||
# "default" is nice-to-have (non-fatal).
|
||||
return [("pkgmgr", False), ("default", True)]
|
||||
|
||||
# Generic repos: we expect a sensible "default" package/app.
|
||||
# Failure to install it is considered fatal.
|
||||
return [("default", False)]
|
||||
|
||||
def run(self, ctx: "InstallContext") -> None:
|
||||
"""
|
||||
Install Nix flake profile outputs.
|
||||
|
||||
For the package-manager repo, failure installing 'pkgmgr' is fatal,
|
||||
failure installing 'default' is non-fatal.
|
||||
For other repos, failure installing 'default' is fatal.
|
||||
"""
|
||||
# Reuse supports() to keep logic in one place.
|
||||
if not self.supports(ctx): # type: ignore[arg-type]
|
||||
return
|
||||
|
||||
outputs = self._profile_outputs(ctx) # list of (name, allow_failure)
|
||||
|
||||
print(
|
||||
"Nix flake detected in "
|
||||
f"{ctx.identifier}, attempting to install profile outputs: "
|
||||
+ ", ".join(name for name, _ in outputs)
|
||||
)
|
||||
|
||||
# Handle the "already installed" case up-front for the shared profile.
|
||||
self._ensure_old_profile_removed(ctx) # type: ignore[arg-type]
|
||||
|
||||
for output, allow_failure in outputs:
|
||||
cmd = f"nix profile install {ctx.repo_dir}#{output}"
|
||||
print(f"[INFO] Running: {cmd}")
|
||||
ret = os.system(cmd)
|
||||
|
||||
# Extract real exit code from os.system() result
|
||||
if os.WIFEXITED(ret):
|
||||
exit_code = os.WEXITSTATUS(ret)
|
||||
else:
|
||||
# abnormal termination (signal etc.) – keep raw value
|
||||
exit_code = ret
|
||||
|
||||
if exit_code == 0:
|
||||
print(f"Nix flake output '{output}' successfully installed.")
|
||||
continue
|
||||
|
||||
print(f"[Error] Failed to install Nix flake output '{output}'")
|
||||
print(f"[Error] Command exited with code {exit_code}")
|
||||
|
||||
if not allow_failure:
|
||||
raise SystemExit(exit_code)
|
||||
|
||||
print(
|
||||
"[Warning] Continuing despite failure to install "
|
||||
f"optional output '{output}'."
|
||||
)
|
||||
@@ -3,8 +3,8 @@
|
||||
import os
|
||||
import shutil
|
||||
|
||||
from pkgmgr.actions.repository.install.context import RepoContext
|
||||
from pkgmgr.actions.repository.install.installers.base import BaseInstaller
|
||||
from pkgmgr.actions.install.context import RepoContext
|
||||
from pkgmgr.actions.install.installers.base import BaseInstaller
|
||||
from pkgmgr.core.command.run import run_command
|
||||
|
||||
|
||||
@@ -17,11 +17,10 @@ apt/dpkg tooling are available.
|
||||
import glob
|
||||
import os
|
||||
import shutil
|
||||
|
||||
from typing import List
|
||||
|
||||
from pkgmgr.actions.repository.install.context import RepoContext
|
||||
from pkgmgr.actions.repository.install.installers.base import BaseInstaller
|
||||
from pkgmgr.actions.install.context import RepoContext
|
||||
from pkgmgr.actions.install.installers.base import BaseInstaller
|
||||
from pkgmgr.core.command.run import run_command
|
||||
|
||||
|
||||
@@ -68,6 +67,32 @@ class DebianControlInstaller(BaseInstaller):
|
||||
pattern = os.path.join(parent, "*.deb")
|
||||
return sorted(glob.glob(pattern))
|
||||
|
||||
def _privileged_prefix(self) -> str | None:
|
||||
"""
|
||||
Determine how to run privileged commands:
|
||||
|
||||
- If 'sudo' is available, return 'sudo '.
|
||||
- If we are running as root (e.g. inside CI/container), return ''.
|
||||
- Otherwise, return None, meaning we cannot safely elevate.
|
||||
|
||||
Callers are responsible for handling the None case (usually by
|
||||
warning and skipping automatic installation).
|
||||
"""
|
||||
sudo_path = shutil.which("sudo")
|
||||
|
||||
is_root = False
|
||||
try:
|
||||
is_root = os.geteuid() == 0
|
||||
except AttributeError: # pragma: no cover - non-POSIX platforms
|
||||
# On non-POSIX systems, fall back to assuming "not root".
|
||||
is_root = False
|
||||
|
||||
if sudo_path is not None:
|
||||
return "sudo "
|
||||
if is_root:
|
||||
return ""
|
||||
return None
|
||||
|
||||
def _install_build_dependencies(self, ctx: RepoContext) -> None:
|
||||
"""
|
||||
Install build dependencies using `apt-get build-dep ./`.
|
||||
@@ -86,12 +111,25 @@ class DebianControlInstaller(BaseInstaller):
|
||||
)
|
||||
return
|
||||
|
||||
prefix = self._privileged_prefix()
|
||||
if prefix is None:
|
||||
print(
|
||||
"[Warning] Neither 'sudo' is available nor running as root. "
|
||||
"Skipping automatic build-dep installation for Debian. "
|
||||
"Please install build dependencies from debian/control manually."
|
||||
)
|
||||
return
|
||||
|
||||
# Update package lists first for reliable build-dep resolution.
|
||||
run_command("sudo apt-get update", cwd=ctx.repo_dir, preview=ctx.preview)
|
||||
run_command(
|
||||
f"{prefix}apt-get update",
|
||||
cwd=ctx.repo_dir,
|
||||
preview=ctx.preview,
|
||||
)
|
||||
|
||||
# Install build dependencies based on debian/control in the current tree.
|
||||
# `apt-get build-dep ./` uses the source in the current directory.
|
||||
builddep_cmd = "sudo apt-get build-dep -y ./"
|
||||
builddep_cmd = f"{prefix}apt-get build-dep -y ./"
|
||||
run_command(builddep_cmd, cwd=ctx.repo_dir, preview=ctx.preview)
|
||||
|
||||
def run(self, ctx: RepoContext) -> None:
|
||||
@@ -101,7 +139,7 @@ class DebianControlInstaller(BaseInstaller):
|
||||
Steps:
|
||||
1. apt-get build-dep ./ (automatic build dependency installation)
|
||||
2. dpkg-buildpackage -b -us -uc
|
||||
3. sudo dpkg -i ../*.deb
|
||||
3. sudo dpkg -i ../*.deb (or plain dpkg -i when running as root)
|
||||
"""
|
||||
control_path = self._control_path(ctx)
|
||||
if not os.path.exists(control_path):
|
||||
@@ -123,7 +161,17 @@ class DebianControlInstaller(BaseInstaller):
|
||||
)
|
||||
return
|
||||
|
||||
prefix = self._privileged_prefix()
|
||||
if prefix is None:
|
||||
print(
|
||||
"[Warning] Neither 'sudo' is available nor running as root. "
|
||||
"Skipping automatic .deb installation. "
|
||||
"You can manually install the following files with dpkg -i:\n "
|
||||
+ "\n ".join(debs)
|
||||
)
|
||||
return
|
||||
|
||||
# 4) Install .deb files
|
||||
install_cmd = "sudo dpkg -i " + " ".join(os.path.basename(d) for d in debs)
|
||||
install_cmd = prefix + "dpkg -i " + " ".join(os.path.basename(d) for d in debs)
|
||||
parent = os.path.dirname(ctx.repo_dir)
|
||||
run_command(install_cmd, cwd=parent, preview=ctx.preview)
|
||||
282
src/pkgmgr/actions/install/installers/os_packages/rpm_spec.py
Normal file
282
src/pkgmgr/actions/install/installers/os_packages/rpm_spec.py
Normal file
@@ -0,0 +1,282 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
Installer for RPM-based packages defined in *.spec files.
|
||||
|
||||
This installer:
|
||||
|
||||
1. Installs build dependencies via dnf/yum builddep (where available)
|
||||
2. Prepares a source tarball in ~/rpmbuild/SOURCES based on the .spec
|
||||
3. Uses rpmbuild to build RPMs from the provided .spec file
|
||||
4. Installs the resulting RPMs via the system package manager (dnf/yum)
|
||||
or rpm as a fallback.
|
||||
|
||||
It targets RPM-based systems (Fedora / RHEL / CentOS / Rocky / Alma, etc.).
|
||||
"""
|
||||
|
||||
import glob
|
||||
import os
|
||||
import shutil
|
||||
import tarfile
|
||||
from typing import List, Optional, Tuple
|
||||
|
||||
from pkgmgr.actions.install.context import RepoContext
|
||||
from pkgmgr.actions.install.installers.base import BaseInstaller
|
||||
from pkgmgr.core.command.run import run_command
|
||||
|
||||
|
||||
class RpmSpecInstaller(BaseInstaller):
|
||||
"""
|
||||
Build and install RPM-based packages from *.spec files.
|
||||
|
||||
This installer is responsible for the full build + install of the
|
||||
application on RPM-like systems.
|
||||
"""
|
||||
|
||||
# Logical layer name, used by capability matchers.
|
||||
layer = "os-packages"
|
||||
|
||||
def _is_rpm_like(self) -> bool:
|
||||
"""
|
||||
Basic RPM-like detection:
|
||||
|
||||
- rpmbuild must be available
|
||||
- at least one of dnf / yum / yum-builddep must be present
|
||||
"""
|
||||
if shutil.which("rpmbuild") is None:
|
||||
return False
|
||||
|
||||
has_dnf = shutil.which("dnf") is not None
|
||||
has_yum = shutil.which("yum") is not None
|
||||
has_yum_builddep = shutil.which("yum-builddep") is not None
|
||||
|
||||
return has_dnf or has_yum or has_yum_builddep
|
||||
|
||||
def _spec_path(self, ctx: RepoContext) -> Optional[str]:
|
||||
"""Return the first *.spec file in the repository root, if any."""
|
||||
pattern = os.path.join(ctx.repo_dir, "*.spec")
|
||||
matches = sorted(glob.glob(pattern))
|
||||
if not matches:
|
||||
return None
|
||||
return matches[0]
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Helpers for preparing rpmbuild topdir and source tarball
|
||||
# ------------------------------------------------------------------
|
||||
def _rpmbuild_topdir(self) -> str:
|
||||
"""
|
||||
Return the rpmbuild topdir that rpmbuild will use by default.
|
||||
|
||||
By default this is: ~/rpmbuild
|
||||
|
||||
In the self-install tests, $HOME is set to /tmp/pkgmgr-self-install,
|
||||
so this becomes /tmp/pkgmgr-self-install/rpmbuild which matches the
|
||||
paths in the RPM build logs.
|
||||
"""
|
||||
home = os.path.expanduser("~")
|
||||
return os.path.join(home, "rpmbuild")
|
||||
|
||||
def _ensure_rpmbuild_tree(self, topdir: str) -> None:
|
||||
"""
|
||||
Ensure the standard rpmbuild directory tree exists:
|
||||
|
||||
<topdir>/
|
||||
BUILD/
|
||||
BUILDROOT/
|
||||
RPMS/
|
||||
SOURCES/
|
||||
SPECS/
|
||||
SRPMS/
|
||||
"""
|
||||
for sub in ("BUILD", "BUILDROOT", "RPMS", "SOURCES", "SPECS", "SRPMS"):
|
||||
os.makedirs(os.path.join(topdir, sub), exist_ok=True)
|
||||
|
||||
def _parse_name_version(self, spec_path: str) -> Optional[Tuple[str, str]]:
|
||||
"""
|
||||
Parse Name and Version from the given .spec file.
|
||||
|
||||
Returns (name, version) or None if either cannot be determined.
|
||||
"""
|
||||
name = None
|
||||
version = None
|
||||
|
||||
with open(spec_path, "r", encoding="utf-8") as f:
|
||||
for raw_line in f:
|
||||
line = raw_line.strip()
|
||||
# Ignore comments
|
||||
if not line or line.startswith("#"):
|
||||
continue
|
||||
|
||||
lower = line.lower()
|
||||
if lower.startswith("name:"):
|
||||
# e.g. "Name: package-manager"
|
||||
parts = line.split(":", 1)
|
||||
if len(parts) == 2:
|
||||
name = parts[1].strip()
|
||||
elif lower.startswith("version:"):
|
||||
# e.g. "Version: 0.7.7"
|
||||
parts = line.split(":", 1)
|
||||
if len(parts) == 2:
|
||||
version = parts[1].strip()
|
||||
|
||||
if name and version:
|
||||
break
|
||||
|
||||
if not name or not version:
|
||||
print(
|
||||
"[Warning] Could not determine Name/Version from spec file "
|
||||
f"'{spec_path}'. Skipping RPM source tarball preparation."
|
||||
)
|
||||
return None
|
||||
|
||||
return name, version
|
||||
|
||||
def _prepare_source_tarball(self, ctx: RepoContext, spec_path: str) -> None:
|
||||
"""
|
||||
Prepare a source tarball in <HOME>/rpmbuild/SOURCES that matches
|
||||
the Name/Version in the .spec file.
|
||||
"""
|
||||
parsed = self._parse_name_version(spec_path)
|
||||
if parsed is None:
|
||||
return
|
||||
|
||||
name, version = parsed
|
||||
topdir = self._rpmbuild_topdir()
|
||||
self._ensure_rpmbuild_tree(topdir)
|
||||
|
||||
build_dir = os.path.join(topdir, "BUILD")
|
||||
sources_dir = os.path.join(topdir, "SOURCES")
|
||||
|
||||
source_root = os.path.join(build_dir, f"{name}-{version}")
|
||||
tarball_path = os.path.join(sources_dir, f"{name}-{version}.tar.gz")
|
||||
|
||||
# Clean any previous build directory for this name/version.
|
||||
if os.path.exists(source_root):
|
||||
shutil.rmtree(source_root)
|
||||
|
||||
# Copy the repository tree into BUILD/<name>-<version>.
|
||||
shutil.copytree(ctx.repo_dir, source_root)
|
||||
|
||||
# Create the tarball with the top-level directory <name>-<version>.
|
||||
if os.path.exists(tarball_path):
|
||||
os.remove(tarball_path)
|
||||
|
||||
with tarfile.open(tarball_path, "w:gz") as tar:
|
||||
tar.add(source_root, arcname=f"{name}-{version}")
|
||||
|
||||
print(
|
||||
f"[INFO] Prepared RPM source tarball at '{tarball_path}' "
|
||||
f"from '{ctx.repo_dir}'."
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def supports(self, ctx: RepoContext) -> bool:
|
||||
"""
|
||||
This installer is supported if:
|
||||
- we are on an RPM-based system (rpmbuild + dnf/yum/yum-builddep available), and
|
||||
- a *.spec file exists in the repository root.
|
||||
"""
|
||||
if not self._is_rpm_like():
|
||||
return False
|
||||
|
||||
return self._spec_path(ctx) is not None
|
||||
|
||||
def _find_built_rpms(self) -> List[str]:
|
||||
"""
|
||||
Find RPMs built by rpmbuild.
|
||||
|
||||
By default, rpmbuild outputs RPMs into:
|
||||
~/rpmbuild/RPMS/*/*.rpm
|
||||
"""
|
||||
topdir = self._rpmbuild_topdir()
|
||||
pattern = os.path.join(topdir, "RPMS", "**", "*.rpm")
|
||||
return sorted(glob.glob(pattern, recursive=True))
|
||||
|
||||
def _install_build_dependencies(self, ctx: RepoContext, spec_path: str) -> None:
|
||||
"""
|
||||
Install build dependencies for the given .spec file.
|
||||
"""
|
||||
spec_basename = os.path.basename(spec_path)
|
||||
|
||||
if shutil.which("dnf") is not None:
|
||||
cmd = f"sudo dnf builddep -y {spec_basename}"
|
||||
elif shutil.which("yum-builddep") is not None:
|
||||
cmd = f"sudo yum-builddep -y {spec_basename}"
|
||||
elif shutil.which("yum") is not None:
|
||||
cmd = f"sudo yum-builddep -y {spec_basename}"
|
||||
else:
|
||||
print(
|
||||
"[Warning] No suitable RPM builddep tool (dnf/yum-builddep/yum) found. "
|
||||
"Skipping automatic build dependency installation for RPM."
|
||||
)
|
||||
return
|
||||
|
||||
run_command(cmd, cwd=ctx.repo_dir, preview=ctx.preview)
|
||||
|
||||
def _install_built_rpms(self, ctx: RepoContext, rpms: List[str]) -> None:
|
||||
"""
|
||||
Install or upgrade the built RPMs.
|
||||
|
||||
Strategy:
|
||||
- Prefer dnf install -y <rpms> (handles upgrades cleanly)
|
||||
- Else yum install -y <rpms>
|
||||
- Else fallback to rpm -Uvh <rpms> (upgrade/replace existing)
|
||||
"""
|
||||
if not rpms:
|
||||
print(
|
||||
"[Warning] No RPM files found after rpmbuild. "
|
||||
"Skipping RPM package installation."
|
||||
)
|
||||
return
|
||||
|
||||
dnf = shutil.which("dnf")
|
||||
yum = shutil.which("yum")
|
||||
rpm = shutil.which("rpm")
|
||||
|
||||
if dnf is not None:
|
||||
install_cmd = "sudo dnf install -y " + " ".join(rpms)
|
||||
elif yum is not None:
|
||||
install_cmd = "sudo yum install -y " + " ".join(rpms)
|
||||
elif rpm is not None:
|
||||
# Fallback: use rpm in upgrade mode so an existing older
|
||||
# version is replaced instead of causing file conflicts.
|
||||
install_cmd = "sudo rpm -Uvh " + " ".join(rpms)
|
||||
else:
|
||||
print(
|
||||
"[Warning] No suitable RPM installer (dnf/yum/rpm) found. "
|
||||
"Cannot install built RPMs."
|
||||
)
|
||||
return
|
||||
|
||||
run_command(install_cmd, cwd=ctx.repo_dir, preview=ctx.preview)
|
||||
|
||||
def run(self, ctx: RepoContext) -> None:
|
||||
"""
|
||||
Build and install RPM-based packages.
|
||||
|
||||
Steps:
|
||||
1. Prepare source tarball in ~/rpmbuild/SOURCES matching Name/Version
|
||||
2. dnf/yum builddep <spec> (automatic build dependency installation)
|
||||
3. rpmbuild -ba path/to/spec
|
||||
4. Install built RPMs via dnf/yum (or rpm as fallback)
|
||||
"""
|
||||
spec_path = self._spec_path(ctx)
|
||||
if not spec_path:
|
||||
return
|
||||
|
||||
# 1) Prepare source tarball so rpmbuild finds Source0 in SOURCES.
|
||||
self._prepare_source_tarball(ctx, spec_path)
|
||||
|
||||
# 2) Install build dependencies
|
||||
self._install_build_dependencies(ctx, spec_path)
|
||||
|
||||
# 3) Build RPMs
|
||||
spec_basename = os.path.basename(spec_path)
|
||||
build_cmd = f"rpmbuild -ba {spec_basename}"
|
||||
run_command(build_cmd, cwd=ctx.repo_dir, preview=ctx.preview)
|
||||
|
||||
# 4) Find and install built RPMs
|
||||
rpms = self._find_built_rpms()
|
||||
self._install_built_rpms(ctx, rpms)
|
||||
139
src/pkgmgr/actions/install/installers/python.py
Normal file
139
src/pkgmgr/actions/install/installers/python.py
Normal file
@@ -0,0 +1,139 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
PythonInstaller — install Python projects defined via pyproject.toml.
|
||||
|
||||
Installation rules:
|
||||
|
||||
1. pip command resolution:
|
||||
a) If PKGMGR_PIP is set → use it exactly as provided.
|
||||
b) Else if running inside a virtualenv → use `sys.executable -m pip`.
|
||||
c) Else → create/use a per-repository virtualenv under ~/.venvs/<repo>/.
|
||||
|
||||
2. Installation target:
|
||||
- Always install into the resolved pip environment.
|
||||
- Never modify system Python, never rely on --user.
|
||||
- Nix-immutable systems (PEP 668) are automatically avoided because we
|
||||
never touch system Python.
|
||||
|
||||
3. The installer is skipped when:
|
||||
- PKGMGR_DISABLE_PYTHON_INSTALLER=1 is set.
|
||||
- The repository has no pyproject.toml.
|
||||
|
||||
All pip failures are treated as fatal.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import sys
|
||||
import subprocess
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from pkgmgr.actions.install.installers.base import BaseInstaller
|
||||
from pkgmgr.core.command.run import run_command
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pkgmgr.actions.install.context import RepoContext
|
||||
from pkgmgr.actions.install import InstallContext
|
||||
|
||||
|
||||
class PythonInstaller(BaseInstaller):
|
||||
"""Install Python projects and dependencies via pip using isolated environments."""
|
||||
|
||||
layer = "python"
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# Installer activation logic
|
||||
# ----------------------------------------------------------------------
|
||||
def supports(self, ctx: "RepoContext") -> bool:
|
||||
"""
|
||||
Return True if this installer should handle this repository.
|
||||
|
||||
The installer is active only when:
|
||||
- A pyproject.toml exists in the repo, and
|
||||
- PKGMGR_DISABLE_PYTHON_INSTALLER is not set.
|
||||
"""
|
||||
if os.environ.get("PKGMGR_DISABLE_PYTHON_INSTALLER") == "1":
|
||||
print("[INFO] PythonInstaller disabled via PKGMGR_DISABLE_PYTHON_INSTALLER.")
|
||||
return False
|
||||
|
||||
return os.path.exists(os.path.join(ctx.repo_dir, "pyproject.toml"))
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# Virtualenv handling
|
||||
# ----------------------------------------------------------------------
|
||||
def _in_virtualenv(self) -> bool:
|
||||
"""Detect whether the current interpreter is inside a venv."""
|
||||
if os.environ.get("VIRTUAL_ENV"):
|
||||
return True
|
||||
|
||||
base = getattr(sys, "base_prefix", sys.prefix)
|
||||
return sys.prefix != base
|
||||
|
||||
def _ensure_repo_venv(self, ctx: "InstallContext") -> str:
|
||||
"""
|
||||
Ensure that ~/.venvs/<identifier>/ exists and contains a minimal venv.
|
||||
|
||||
Returns the venv directory path.
|
||||
"""
|
||||
venv_dir = os.path.expanduser(f"~/.venvs/{ctx.identifier}")
|
||||
python = sys.executable
|
||||
|
||||
if not os.path.isdir(venv_dir):
|
||||
print(f"[python-installer] Creating virtualenv: {venv_dir}")
|
||||
subprocess.check_call([python, "-m", "venv", venv_dir])
|
||||
|
||||
return venv_dir
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# pip command resolution
|
||||
# ----------------------------------------------------------------------
|
||||
def _pip_cmd(self, ctx: "InstallContext") -> str:
|
||||
"""
|
||||
Determine which pip command to use.
|
||||
|
||||
Priority:
|
||||
1. PKGMGR_PIP override given by user or automation.
|
||||
2. Active virtualenv → use sys.executable -m pip.
|
||||
3. Per-repository venv → ~/.venvs/<repo>/bin/pip
|
||||
"""
|
||||
explicit = os.environ.get("PKGMGR_PIP", "").strip()
|
||||
if explicit:
|
||||
return explicit
|
||||
|
||||
if self._in_virtualenv():
|
||||
return f"{sys.executable} -m pip"
|
||||
|
||||
venv_dir = self._ensure_repo_venv(ctx)
|
||||
pip_path = os.path.join(venv_dir, "bin", "pip")
|
||||
return pip_path
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# Execution
|
||||
# ----------------------------------------------------------------------
|
||||
def run(self, ctx: "InstallContext") -> None:
|
||||
"""
|
||||
Install the project defined by pyproject.toml.
|
||||
|
||||
Uses the resolved pip environment. Installation is isolated and never
|
||||
touches system Python.
|
||||
"""
|
||||
if not self.supports(ctx): # type: ignore[arg-type]
|
||||
return
|
||||
|
||||
pyproject = os.path.join(ctx.repo_dir, "pyproject.toml")
|
||||
if not os.path.exists(pyproject):
|
||||
return
|
||||
|
||||
print(f"[python-installer] Installing Python project for {ctx.identifier}...")
|
||||
|
||||
pip_cmd = self._pip_cmd(ctx)
|
||||
|
||||
# Final install command: ALWAYS isolated, never system-wide.
|
||||
install_cmd = f"{pip_cmd} install ."
|
||||
|
||||
run_command(install_cmd, cwd=ctx.repo_dir, preview=ctx.preview)
|
||||
|
||||
print(f"[python-installer] Installation finished for {ctx.identifier}.")
|
||||
91
src/pkgmgr/actions/install/layers.py
Normal file
91
src/pkgmgr/actions/install/layers.py
Normal file
@@ -0,0 +1,91 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
CLI layer model for the pkgmgr installation pipeline.
|
||||
|
||||
We treat CLI entry points as coming from one of four conceptual layers:
|
||||
|
||||
- os-packages : system package managers (pacman/apt/dnf/…)
|
||||
- nix : Nix flake / nix profile
|
||||
- python : pip / virtualenv / user-local scripts
|
||||
- makefile : repo-local Makefile / scripts inside the repo
|
||||
|
||||
The layer order defines precedence: higher layers "own" the CLI and
|
||||
lower layers will not be executed once a higher-priority CLI exists.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from enum import Enum
|
||||
from typing import Optional
|
||||
|
||||
|
||||
class CliLayer(str, Enum):
|
||||
OS_PACKAGES = "os-packages"
|
||||
NIX = "nix"
|
||||
PYTHON = "python"
|
||||
MAKEFILE = "makefile"
|
||||
|
||||
|
||||
# Highest priority first
|
||||
CLI_LAYERS: list[CliLayer] = [
|
||||
CliLayer.OS_PACKAGES,
|
||||
CliLayer.NIX,
|
||||
CliLayer.PYTHON,
|
||||
CliLayer.MAKEFILE,
|
||||
]
|
||||
|
||||
|
||||
def layer_priority(layer: Optional[CliLayer]) -> int:
|
||||
"""
|
||||
Return a numeric priority index for a given layer.
|
||||
|
||||
Lower index → higher priority.
|
||||
Unknown / None → very low priority.
|
||||
"""
|
||||
if layer is None:
|
||||
return len(CLI_LAYERS)
|
||||
try:
|
||||
return CLI_LAYERS.index(layer)
|
||||
except ValueError:
|
||||
return len(CLI_LAYERS)
|
||||
|
||||
|
||||
def classify_command_layer(command: str, repo_dir: str) -> CliLayer:
|
||||
"""
|
||||
Heuristically classify a resolved command path into a CLI layer.
|
||||
|
||||
Rules (best effort):
|
||||
|
||||
- /usr/... or /bin/... → os-packages
|
||||
- /nix/store/... or ~/.nix-profile → nix
|
||||
- ~/.local/bin/... → python
|
||||
- inside repo_dir → makefile
|
||||
- everything else → python (user/venv scripts, etc.)
|
||||
"""
|
||||
command_abs = os.path.abspath(os.path.expanduser(command))
|
||||
repo_abs = os.path.abspath(repo_dir)
|
||||
home = os.path.expanduser("~")
|
||||
|
||||
# OS package managers
|
||||
if command_abs.startswith("/usr/") or command_abs.startswith("/bin/"):
|
||||
return CliLayer.OS_PACKAGES
|
||||
|
||||
# Nix store / profile
|
||||
if command_abs.startswith("/nix/store/") or command_abs.startswith(
|
||||
os.path.join(home, ".nix-profile")
|
||||
):
|
||||
return CliLayer.NIX
|
||||
|
||||
# User-local bin
|
||||
if command_abs.startswith(os.path.join(home, ".local", "bin")):
|
||||
return CliLayer.PYTHON
|
||||
|
||||
# Inside the repository → usually a Makefile/script
|
||||
if command_abs.startswith(repo_abs):
|
||||
return CliLayer.MAKEFILE
|
||||
|
||||
# Fallback: treat as Python-style/user-level script
|
||||
return CliLayer.PYTHON
|
||||
257
src/pkgmgr/actions/install/pipeline.py
Normal file
257
src/pkgmgr/actions/install/pipeline.py
Normal file
@@ -0,0 +1,257 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
Installation pipeline orchestration for repositories.
|
||||
|
||||
This module implements the "Setup Controller" logic:
|
||||
|
||||
1. Detect current CLI command for the repo (if any).
|
||||
2. Classify it into a layer (os-packages, nix, python, makefile).
|
||||
3. Iterate over installers in layer order:
|
||||
- Skip installers whose layer is weaker than an already-loaded one.
|
||||
- Run only installers that support() the repo and add new capabilities.
|
||||
- After each installer, re-resolve the command and update the layer.
|
||||
4. Maintain the repo["command"] field and create/update symlinks via create_ink().
|
||||
|
||||
The goal is to prevent conflicting installations and make the layering
|
||||
behaviour explicit and testable.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Optional, Sequence, Set
|
||||
|
||||
from pkgmgr.actions.install.context import RepoContext
|
||||
from pkgmgr.actions.install.installers.base import BaseInstaller
|
||||
from pkgmgr.actions.install.layers import (
|
||||
CliLayer,
|
||||
classify_command_layer,
|
||||
layer_priority,
|
||||
)
|
||||
from pkgmgr.core.command.ink import create_ink
|
||||
from pkgmgr.core.command.resolve import resolve_command_for_repo
|
||||
|
||||
|
||||
@dataclass
|
||||
class CommandState:
|
||||
"""
|
||||
Represents the current CLI state for a repository:
|
||||
|
||||
- command: absolute or relative path to the CLI entry point
|
||||
- layer: which conceptual layer this command belongs to
|
||||
"""
|
||||
|
||||
command: Optional[str]
|
||||
layer: Optional[CliLayer]
|
||||
|
||||
|
||||
class CommandResolver:
|
||||
"""
|
||||
Small helper responsible for resolving the current command for a repo
|
||||
and mapping it into a CommandState.
|
||||
"""
|
||||
|
||||
def __init__(self, ctx: RepoContext) -> None:
|
||||
self._ctx = ctx
|
||||
|
||||
def resolve(self) -> CommandState:
|
||||
"""
|
||||
Resolve the current command for this repository.
|
||||
|
||||
If resolve_command_for_repo raises SystemExit (e.g. Python package
|
||||
without installed entry point), we treat this as "no command yet"
|
||||
from the point of view of the installers.
|
||||
"""
|
||||
repo = self._ctx.repo
|
||||
identifier = self._ctx.identifier
|
||||
repo_dir = self._ctx.repo_dir
|
||||
|
||||
try:
|
||||
cmd = resolve_command_for_repo(
|
||||
repo=repo,
|
||||
repo_identifier=identifier,
|
||||
repo_dir=repo_dir,
|
||||
)
|
||||
except SystemExit:
|
||||
cmd = None
|
||||
|
||||
if not cmd:
|
||||
return CommandState(command=None, layer=None)
|
||||
|
||||
layer = classify_command_layer(cmd, repo_dir)
|
||||
return CommandState(command=cmd, layer=layer)
|
||||
|
||||
|
||||
class InstallationPipeline:
|
||||
"""
|
||||
High-level orchestrator that applies a sequence of installers
|
||||
to a repository based on CLI layer precedence.
|
||||
"""
|
||||
|
||||
def __init__(self, installers: Sequence[BaseInstaller]) -> None:
|
||||
self._installers = list(installers)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Public API
|
||||
# ------------------------------------------------------------------
|
||||
def run(self, ctx: RepoContext) -> None:
|
||||
"""
|
||||
Execute the installation pipeline for a single repository.
|
||||
|
||||
- Detect initial command & layer.
|
||||
- Optionally create a symlink.
|
||||
- Run installers in order, skipping those whose layer is weaker
|
||||
than an already-loaded CLI.
|
||||
- After each installer, re-resolve the command and refresh the
|
||||
symlink if needed.
|
||||
"""
|
||||
repo = ctx.repo
|
||||
repo_dir = ctx.repo_dir
|
||||
identifier = ctx.identifier
|
||||
repositories_base_dir = ctx.repositories_base_dir
|
||||
bin_dir = ctx.bin_dir
|
||||
all_repos = ctx.all_repos
|
||||
quiet = ctx.quiet
|
||||
preview = ctx.preview
|
||||
|
||||
resolver = CommandResolver(ctx)
|
||||
state = resolver.resolve()
|
||||
|
||||
# Persist initial command (if any) and create a symlink.
|
||||
if state.command:
|
||||
repo["command"] = state.command
|
||||
create_ink(
|
||||
repo,
|
||||
repositories_base_dir,
|
||||
bin_dir,
|
||||
all_repos,
|
||||
quiet=quiet,
|
||||
preview=preview,
|
||||
)
|
||||
else:
|
||||
repo.pop("command", None)
|
||||
|
||||
provided_capabilities: Set[str] = set()
|
||||
|
||||
# Main installer loop
|
||||
for installer in self._installers:
|
||||
layer_name = getattr(installer, "layer", None)
|
||||
|
||||
# Installers without a layer participate without precedence logic.
|
||||
if layer_name is None:
|
||||
self._run_installer(installer, ctx, identifier, repo_dir, quiet)
|
||||
continue
|
||||
|
||||
try:
|
||||
installer_layer = CliLayer(layer_name)
|
||||
except ValueError:
|
||||
# Unknown layer string → treat as lowest priority.
|
||||
installer_layer = None
|
||||
|
||||
# "Previous/Current layer already loaded?"
|
||||
if state.layer is not None and installer_layer is not None:
|
||||
current_prio = layer_priority(state.layer)
|
||||
installer_prio = layer_priority(installer_layer)
|
||||
|
||||
if current_prio < installer_prio:
|
||||
# Current CLI comes from a higher-priority layer,
|
||||
# so we skip this installer entirely.
|
||||
if not quiet:
|
||||
print(
|
||||
f"[pkgmgr] Skipping installer "
|
||||
f"{installer.__class__.__name__} for {identifier} – "
|
||||
f"CLI already provided by layer {state.layer.value!r}."
|
||||
)
|
||||
continue
|
||||
|
||||
if current_prio == installer_prio:
|
||||
# Same layer already provides a CLI; usually there is no
|
||||
# need to run another installer on top of it.
|
||||
if not quiet:
|
||||
print(
|
||||
f"[pkgmgr] Skipping installer "
|
||||
f"{installer.__class__.__name__} for {identifier} – "
|
||||
f"layer {installer_layer.value!r} is already loaded."
|
||||
)
|
||||
continue
|
||||
|
||||
# Check if this installer is applicable at all.
|
||||
if not installer.supports(ctx):
|
||||
continue
|
||||
|
||||
# Capabilities: if everything this installer would provide is already
|
||||
# covered, we can safely skip it.
|
||||
caps = installer.discover_capabilities(ctx)
|
||||
if caps and caps.issubset(provided_capabilities):
|
||||
if not quiet:
|
||||
print(
|
||||
f"Skipping installer {installer.__class__.__name__} "
|
||||
f"for {identifier} – capabilities {caps} already provided."
|
||||
)
|
||||
continue
|
||||
|
||||
if not quiet:
|
||||
print(
|
||||
f"[pkgmgr] Running installer {installer.__class__.__name__} "
|
||||
f"for {identifier} in '{repo_dir}' "
|
||||
f"(new capabilities: {caps or set()})..."
|
||||
)
|
||||
|
||||
# Run the installer with error reporting.
|
||||
self._run_installer(installer, ctx, identifier, repo_dir, quiet)
|
||||
|
||||
provided_capabilities.update(caps)
|
||||
|
||||
# After running an installer, re-resolve the command and layer.
|
||||
new_state = resolver.resolve()
|
||||
if new_state.command:
|
||||
repo["command"] = new_state.command
|
||||
create_ink(
|
||||
repo,
|
||||
repositories_base_dir,
|
||||
bin_dir,
|
||||
all_repos,
|
||||
quiet=quiet,
|
||||
preview=preview,
|
||||
)
|
||||
else:
|
||||
repo.pop("command", None)
|
||||
|
||||
state = new_state
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Internal helpers
|
||||
# ------------------------------------------------------------------
|
||||
@staticmethod
|
||||
def _run_installer(
|
||||
installer: BaseInstaller,
|
||||
ctx: RepoContext,
|
||||
identifier: str,
|
||||
repo_dir: str,
|
||||
quiet: bool,
|
||||
) -> None:
|
||||
"""
|
||||
Execute a single installer with unified error handling.
|
||||
"""
|
||||
try:
|
||||
installer.run(ctx)
|
||||
except SystemExit as exc:
|
||||
exit_code = exc.code if isinstance(exc.code, int) else str(exc.code)
|
||||
print(
|
||||
f"[ERROR] Installer {installer.__class__.__name__} failed "
|
||||
f"for repository {identifier} (dir: {repo_dir}) "
|
||||
f"with exit code {exit_code}."
|
||||
)
|
||||
print(
|
||||
"[ERROR] This usually means an underlying command failed "
|
||||
"(e.g. 'make install', 'nix build', 'pip install', ...)."
|
||||
)
|
||||
print(
|
||||
"[ERROR] Check the log above for the exact command output. "
|
||||
"You can also run this repository in isolation via:\n"
|
||||
f" pkgmgr install {identifier} "
|
||||
"--clone-mode shallow --no-verification"
|
||||
)
|
||||
raise
|
||||
26
src/pkgmgr/actions/mirror/__init__.py
Normal file
26
src/pkgmgr/actions/mirror/__init__.py
Normal file
@@ -0,0 +1,26 @@
|
||||
from __future__ import annotations
|
||||
|
||||
"""
|
||||
High-level mirror actions.
|
||||
|
||||
Public API:
|
||||
- list_mirrors
|
||||
- diff_mirrors
|
||||
- merge_mirrors
|
||||
- setup_mirrors
|
||||
"""
|
||||
|
||||
from .types import Repository, MirrorMap
|
||||
from .list_cmd import list_mirrors
|
||||
from .diff_cmd import diff_mirrors
|
||||
from .merge_cmd import merge_mirrors
|
||||
from .setup_cmd import setup_mirrors
|
||||
|
||||
__all__ = [
|
||||
"Repository",
|
||||
"MirrorMap",
|
||||
"list_mirrors",
|
||||
"diff_mirrors",
|
||||
"merge_mirrors",
|
||||
"setup_mirrors",
|
||||
]
|
||||
31
src/pkgmgr/actions/mirror/context.py
Normal file
31
src/pkgmgr/actions/mirror/context.py
Normal file
@@ -0,0 +1,31 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import List
|
||||
|
||||
from pkgmgr.core.repository.dir import get_repo_dir
|
||||
from pkgmgr.core.repository.identifier import get_repo_identifier
|
||||
|
||||
from .io import load_config_mirrors, read_mirrors_file
|
||||
from .types import MirrorMap, RepoMirrorContext, Repository
|
||||
|
||||
|
||||
def build_context(
|
||||
repo: Repository,
|
||||
repositories_base_dir: str,
|
||||
all_repos: List[Repository],
|
||||
) -> RepoMirrorContext:
|
||||
"""
|
||||
Build a RepoMirrorContext for a single repository.
|
||||
"""
|
||||
identifier = get_repo_identifier(repo, all_repos)
|
||||
repo_dir = get_repo_dir(repositories_base_dir, repo)
|
||||
|
||||
config_mirrors: MirrorMap = load_config_mirrors(repo)
|
||||
file_mirrors: MirrorMap = read_mirrors_file(repo_dir)
|
||||
|
||||
return RepoMirrorContext(
|
||||
identifier=identifier,
|
||||
repo_dir=repo_dir,
|
||||
config_mirrors=config_mirrors,
|
||||
file_mirrors=file_mirrors,
|
||||
)
|
||||
60
src/pkgmgr/actions/mirror/diff_cmd.py
Normal file
60
src/pkgmgr/actions/mirror/diff_cmd.py
Normal file
@@ -0,0 +1,60 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import List
|
||||
|
||||
from .context import build_context
|
||||
from .printing import print_header
|
||||
from .types import Repository
|
||||
|
||||
|
||||
def diff_mirrors(
|
||||
selected_repos: List[Repository],
|
||||
repositories_base_dir: str,
|
||||
all_repos: List[Repository],
|
||||
) -> None:
|
||||
"""
|
||||
Show differences between config mirrors and MIRRORS file.
|
||||
|
||||
- Mirrors present only in config are reported as "ONLY IN CONFIG".
|
||||
- Mirrors present only in MIRRORS file are reported as "ONLY IN FILE".
|
||||
- Mirrors with same name but different URLs are reported as "URL MISMATCH".
|
||||
"""
|
||||
for repo in selected_repos:
|
||||
ctx = build_context(repo, repositories_base_dir, all_repos)
|
||||
|
||||
print_header("[MIRROR DIFF]", ctx)
|
||||
|
||||
config_m = ctx.config_mirrors
|
||||
file_m = ctx.file_mirrors
|
||||
|
||||
if not config_m and not file_m:
|
||||
print(" No mirrors configured in config or MIRRORS file.")
|
||||
print()
|
||||
continue
|
||||
|
||||
# Mirrors only in config
|
||||
for name, url in sorted(config_m.items()):
|
||||
if name not in file_m:
|
||||
print(f" [ONLY IN CONFIG] {name}: {url}")
|
||||
|
||||
# Mirrors only in MIRRORS file
|
||||
for name, url in sorted(file_m.items()):
|
||||
if name not in config_m:
|
||||
print(f" [ONLY IN FILE] {name}: {url}")
|
||||
|
||||
# Mirrors with same name but different URLs
|
||||
shared = set(config_m) & set(file_m)
|
||||
for name in sorted(shared):
|
||||
url_cfg = config_m.get(name)
|
||||
url_file = file_m.get(name)
|
||||
if url_cfg != url_file:
|
||||
print(
|
||||
f" [URL MISMATCH] {name}:\n"
|
||||
f" config: {url_cfg}\n"
|
||||
f" file: {url_file}"
|
||||
)
|
||||
|
||||
if config_m and file_m and config_m == file_m:
|
||||
print(" [OK] Mirrors in config and MIRRORS file are in sync.")
|
||||
|
||||
print()
|
||||
179
src/pkgmgr/actions/mirror/git_remote.py
Normal file
179
src/pkgmgr/actions/mirror/git_remote.py
Normal file
@@ -0,0 +1,179 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import List, Optional, Set
|
||||
|
||||
from pkgmgr.core.command.run import run_command
|
||||
from pkgmgr.core.git import GitError, run_git
|
||||
|
||||
from .types import MirrorMap, RepoMirrorContext, Repository
|
||||
|
||||
|
||||
def build_default_ssh_url(repo: Repository) -> Optional[str]:
|
||||
"""
|
||||
Build a simple SSH URL from repo config if no explicit mirror is defined.
|
||||
|
||||
Example: git@github.com:account/repository.git
|
||||
"""
|
||||
provider = repo.get("provider")
|
||||
account = repo.get("account")
|
||||
name = repo.get("repository")
|
||||
port = repo.get("port")
|
||||
|
||||
if not provider or not account or not name:
|
||||
return None
|
||||
|
||||
provider = str(provider)
|
||||
account = str(account)
|
||||
name = str(name)
|
||||
|
||||
if port:
|
||||
return f"ssh://git@{provider}:{port}/{account}/{name}.git"
|
||||
|
||||
# GitHub-style shorthand
|
||||
return f"git@{provider}:{account}/{name}.git"
|
||||
|
||||
|
||||
def determine_primary_remote_url(
|
||||
repo: Repository,
|
||||
resolved_mirrors: MirrorMap,
|
||||
) -> Optional[str]:
|
||||
"""
|
||||
Determine the primary remote URL in a consistent way:
|
||||
|
||||
1. resolved_mirrors["origin"]
|
||||
2. any resolved mirror (first by name)
|
||||
3. default SSH URL from provider/account/repository
|
||||
"""
|
||||
if "origin" in resolved_mirrors:
|
||||
return resolved_mirrors["origin"]
|
||||
|
||||
if resolved_mirrors:
|
||||
first_name = sorted(resolved_mirrors.keys())[0]
|
||||
return resolved_mirrors[first_name]
|
||||
|
||||
return build_default_ssh_url(repo)
|
||||
|
||||
|
||||
def _safe_git_output(args: List[str], cwd: str) -> Optional[str]:
|
||||
"""
|
||||
Run a Git command via run_git and return its stdout, or None on failure.
|
||||
"""
|
||||
try:
|
||||
return run_git(args, cwd=cwd)
|
||||
except GitError:
|
||||
return None
|
||||
|
||||
|
||||
def current_origin_url(repo_dir: str) -> Optional[str]:
|
||||
"""
|
||||
Return the current URL for remote 'origin', or None if not present.
|
||||
"""
|
||||
output = _safe_git_output(["remote", "get-url", "origin"], cwd=repo_dir)
|
||||
if not output:
|
||||
return None
|
||||
url = output.strip()
|
||||
return url or None
|
||||
|
||||
|
||||
def has_origin_remote(repo_dir: str) -> bool:
|
||||
"""
|
||||
Check whether a remote called 'origin' exists in the repository.
|
||||
"""
|
||||
output = _safe_git_output(["remote"], cwd=repo_dir)
|
||||
if not output:
|
||||
return False
|
||||
names = output.split()
|
||||
return "origin" in names
|
||||
|
||||
|
||||
def _ensure_push_urls_for_origin(
|
||||
repo_dir: str,
|
||||
mirrors: MirrorMap,
|
||||
preview: bool,
|
||||
) -> None:
|
||||
"""
|
||||
Ensure that all mirror URLs are present as push URLs on 'origin'.
|
||||
"""
|
||||
desired: Set[str] = {url for url in mirrors.values() if url}
|
||||
if not desired:
|
||||
return
|
||||
|
||||
existing_output = _safe_git_output(
|
||||
["remote", "get-url", "--push", "--all", "origin"],
|
||||
cwd=repo_dir,
|
||||
)
|
||||
existing = set(existing_output.splitlines()) if existing_output else set()
|
||||
|
||||
missing = sorted(desired - existing)
|
||||
for url in missing:
|
||||
cmd = f"git remote set-url --add --push origin {url}"
|
||||
if preview:
|
||||
print(f"[PREVIEW] Would run in {repo_dir!r}: {cmd}")
|
||||
else:
|
||||
print(f"[INFO] Adding push URL to 'origin': {url}")
|
||||
run_command(cmd, cwd=repo_dir, preview=False)
|
||||
|
||||
|
||||
def ensure_origin_remote(
|
||||
repo: Repository,
|
||||
ctx: RepoMirrorContext,
|
||||
preview: bool,
|
||||
) -> None:
|
||||
"""
|
||||
Ensure that a usable 'origin' remote exists and has all push URLs.
|
||||
"""
|
||||
repo_dir = ctx.repo_dir
|
||||
resolved_mirrors = ctx.resolved_mirrors
|
||||
|
||||
if not os.path.isdir(os.path.join(repo_dir, ".git")):
|
||||
print(f"[WARN] {repo_dir} is not a Git repository (no .git directory).")
|
||||
return
|
||||
|
||||
url = determine_primary_remote_url(repo, resolved_mirrors)
|
||||
|
||||
if not has_origin_remote(repo_dir):
|
||||
if not url:
|
||||
print(
|
||||
"[WARN] Could not determine URL for 'origin' remote. "
|
||||
"Please configure mirrors or provider/account/repository."
|
||||
)
|
||||
return
|
||||
|
||||
cmd = f"git remote add origin {url}"
|
||||
if preview:
|
||||
print(f"[PREVIEW] Would run in {repo_dir!r}: {cmd}")
|
||||
else:
|
||||
print(f"[INFO] Adding 'origin' remote in {repo_dir}: {url}")
|
||||
run_command(cmd, cwd=repo_dir, preview=False)
|
||||
else:
|
||||
current = current_origin_url(repo_dir)
|
||||
if current == url or not url:
|
||||
print(
|
||||
f"[INFO] 'origin' already points to "
|
||||
f"{current or '<unknown>'} (no change needed)."
|
||||
)
|
||||
else:
|
||||
# We do not auto-change origin here, only log the mismatch.
|
||||
print(
|
||||
"[INFO] 'origin' exists with URL "
|
||||
f"{current or '<unknown>'}; not changing to {url}."
|
||||
)
|
||||
|
||||
# Ensure all mirrors are present as push URLs
|
||||
_ensure_push_urls_for_origin(repo_dir, resolved_mirrors, preview)
|
||||
|
||||
|
||||
def is_remote_reachable(url: str, cwd: Optional[str] = None) -> bool:
|
||||
"""
|
||||
Check whether a remote repository is reachable via `git ls-remote`.
|
||||
|
||||
This does NOT modify anything; it only probes the remote.
|
||||
"""
|
||||
workdir = cwd or os.getcwd()
|
||||
try:
|
||||
# --exit-code → non-zero exit code if the remote does not exist
|
||||
run_git(["ls-remote", "--exit-code", url], cwd=workdir)
|
||||
return True
|
||||
except GitError:
|
||||
return False
|
||||
98
src/pkgmgr/actions/mirror/io.py
Normal file
98
src/pkgmgr/actions/mirror/io.py
Normal file
@@ -0,0 +1,98 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from urllib.parse import urlparse
|
||||
from typing import List, Mapping
|
||||
|
||||
from .types import MirrorMap, Repository
|
||||
|
||||
|
||||
def load_config_mirrors(repo: Repository) -> MirrorMap:
|
||||
mirrors = repo.get("mirrors") or {}
|
||||
result: MirrorMap = {}
|
||||
|
||||
if isinstance(mirrors, dict):
|
||||
for name, url in mirrors.items():
|
||||
if url:
|
||||
result[str(name)] = str(url)
|
||||
return result
|
||||
|
||||
if isinstance(mirrors, list):
|
||||
for entry in mirrors:
|
||||
if isinstance(entry, dict):
|
||||
name = entry.get("name")
|
||||
url = entry.get("url")
|
||||
if name and url:
|
||||
result[str(name)] = str(url)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def read_mirrors_file(repo_dir: str, filename: str = "MIRRORS") -> MirrorMap:
|
||||
"""
|
||||
Supports:
|
||||
NAME URL
|
||||
URL → auto name = hostname
|
||||
"""
|
||||
path = os.path.join(repo_dir, filename)
|
||||
mirrors: MirrorMap = {}
|
||||
|
||||
if not os.path.exists(path):
|
||||
return mirrors
|
||||
|
||||
try:
|
||||
with open(path, "r", encoding="utf-8") as fh:
|
||||
for line in fh:
|
||||
stripped = line.strip()
|
||||
if not stripped or stripped.startswith("#"):
|
||||
continue
|
||||
|
||||
parts = stripped.split(None, 1)
|
||||
|
||||
# Case 1: "name url"
|
||||
if len(parts) == 2:
|
||||
name, url = parts
|
||||
# Case 2: "url" → auto-generate name
|
||||
elif len(parts) == 1:
|
||||
url = parts[0]
|
||||
parsed = urlparse(url)
|
||||
host = (parsed.netloc or "").split(":")[0]
|
||||
base = host or "mirror"
|
||||
name = base
|
||||
i = 2
|
||||
while name in mirrors:
|
||||
name = f"{base}{i}"
|
||||
i += 1
|
||||
else:
|
||||
continue
|
||||
|
||||
mirrors[name] = url
|
||||
except OSError as exc:
|
||||
print(f"[WARN] Could not read MIRRORS file at {path}: {exc}")
|
||||
|
||||
return mirrors
|
||||
|
||||
|
||||
def write_mirrors_file(
|
||||
repo_dir: str,
|
||||
mirrors: Mapping[str, str],
|
||||
filename: str = "MIRRORS",
|
||||
preview: bool = False,
|
||||
) -> None:
|
||||
|
||||
path = os.path.join(repo_dir, filename)
|
||||
lines = [f"{name} {url}" for name, url in sorted(mirrors.items())]
|
||||
content = "\n".join(lines) + ("\n" if lines else "")
|
||||
|
||||
if preview:
|
||||
print(f"[PREVIEW] Would write MIRRORS file at {path}:")
|
||||
print(content or "(empty)")
|
||||
return
|
||||
|
||||
try:
|
||||
os.makedirs(os.path.dirname(path), exist_ok=True)
|
||||
with open(path, "w", encoding="utf-8") as fh:
|
||||
fh.write(content)
|
||||
print(f"[INFO] Wrote MIRRORS file at {path}")
|
||||
except OSError as exc:
|
||||
print(f"[ERROR] Failed to write MIRRORS file at {path}: {exc}")
|
||||
46
src/pkgmgr/actions/mirror/list_cmd.py
Normal file
46
src/pkgmgr/actions/mirror/list_cmd.py
Normal file
@@ -0,0 +1,46 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import List
|
||||
|
||||
from .context import build_context
|
||||
from .printing import print_header, print_named_mirrors
|
||||
from .types import Repository
|
||||
|
||||
|
||||
def list_mirrors(
|
||||
selected_repos: List[Repository],
|
||||
repositories_base_dir: str,
|
||||
all_repos: List[Repository],
|
||||
source: str = "all",
|
||||
) -> None:
|
||||
"""
|
||||
List mirrors for the selected repositories.
|
||||
|
||||
source:
|
||||
- "config" → only mirrors from configuration
|
||||
- "file" → only mirrors from MIRRORS file
|
||||
- "resolved" → merged view (config + file, file wins)
|
||||
- "all" → show config + file + resolved
|
||||
"""
|
||||
for repo in selected_repos:
|
||||
ctx = build_context(repo, repositories_base_dir, all_repos)
|
||||
resolved_m = ctx.resolved_mirrors
|
||||
|
||||
print_header("[MIRROR]", ctx)
|
||||
|
||||
if source in ("config", "all"):
|
||||
print_named_mirrors("config mirrors", ctx.config_mirrors)
|
||||
if source == "config":
|
||||
print()
|
||||
continue # next repo
|
||||
|
||||
if source in ("file", "all"):
|
||||
print_named_mirrors("MIRRORS file", ctx.file_mirrors)
|
||||
if source == "file":
|
||||
print()
|
||||
continue # next repo
|
||||
|
||||
if source in ("resolved", "all"):
|
||||
print_named_mirrors("resolved mirrors", resolved_m)
|
||||
|
||||
print()
|
||||
162
src/pkgmgr/actions/mirror/merge_cmd.py
Normal file
162
src/pkgmgr/actions/mirror/merge_cmd.py
Normal file
@@ -0,0 +1,162 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import Dict, List, Tuple, Optional
|
||||
|
||||
import yaml
|
||||
|
||||
from pkgmgr.core.config.save import save_user_config
|
||||
|
||||
from .context import build_context
|
||||
from .io import write_mirrors_file
|
||||
from .types import MirrorMap, Repository
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Helpers
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
def _repo_key(repo: Repository) -> Tuple[str, str, str]:
|
||||
"""
|
||||
Normalised key for identifying a repository in config files.
|
||||
"""
|
||||
return (
|
||||
str(repo.get("provider", "")),
|
||||
str(repo.get("account", "")),
|
||||
str(repo.get("repository", "")),
|
||||
)
|
||||
|
||||
|
||||
def _load_user_config(path: str) -> Dict[str, object]:
|
||||
"""
|
||||
Load a user config YAML file as dict.
|
||||
Non-dicts yield {}.
|
||||
"""
|
||||
if not os.path.exists(path):
|
||||
return {}
|
||||
|
||||
try:
|
||||
with open(path, "r", encoding="utf-8") as f:
|
||||
data = yaml.safe_load(f) or {}
|
||||
return data if isinstance(data, dict) else {}
|
||||
except Exception:
|
||||
return {}
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Main merge command
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
def merge_mirrors(
|
||||
selected_repos: List[Repository],
|
||||
repositories_base_dir: str,
|
||||
all_repos: List[Repository],
|
||||
source: str,
|
||||
target: str,
|
||||
preview: bool = False,
|
||||
user_config_path: Optional[str] = None,
|
||||
) -> None:
|
||||
"""
|
||||
Merge mirrors between config and MIRRORS file.
|
||||
|
||||
Rules:
|
||||
- source, target ∈ {"config", "file"}.
|
||||
- merged = (target_mirrors overridden by source_mirrors)
|
||||
- If target == "file" → write MIRRORS file.
|
||||
- If target == "config":
|
||||
* update the user config YAML directly
|
||||
* write it using save_user_config()
|
||||
|
||||
The merge strategy is:
|
||||
dst + src (src wins on same name)
|
||||
"""
|
||||
|
||||
# Load user config once if we intend to write to it.
|
||||
user_cfg: Optional[Dict[str, object]] = None
|
||||
user_cfg_path_expanded: Optional[str] = None
|
||||
|
||||
if target == "config" and user_config_path and not preview:
|
||||
user_cfg_path_expanded = os.path.expanduser(user_config_path)
|
||||
user_cfg = _load_user_config(user_cfg_path_expanded)
|
||||
if not isinstance(user_cfg.get("repositories"), list):
|
||||
user_cfg["repositories"] = []
|
||||
|
||||
for repo in selected_repos:
|
||||
ctx = build_context(repo, repositories_base_dir, all_repos)
|
||||
|
||||
print("============================================================")
|
||||
print(f"[MIRROR MERGE] Repository: {ctx.identifier}")
|
||||
print(f"[MIRROR MERGE] Directory: {ctx.repo_dir}")
|
||||
print(f"[MIRROR MERGE] {source} → {target}")
|
||||
print("============================================================")
|
||||
|
||||
# Pick the correct source/target maps
|
||||
if source == "config":
|
||||
src = ctx.config_mirrors
|
||||
dst = ctx.file_mirrors
|
||||
else: # source == "file"
|
||||
src = ctx.file_mirrors
|
||||
dst = ctx.config_mirrors
|
||||
|
||||
# Merge (src overrides dst)
|
||||
merged: MirrorMap = dict(dst)
|
||||
merged.update(src)
|
||||
|
||||
# ---------------------------------------------------------
|
||||
# WRITE TO FILE
|
||||
# ---------------------------------------------------------
|
||||
if target == "file":
|
||||
write_mirrors_file(ctx.repo_dir, merged, preview=preview)
|
||||
print()
|
||||
continue
|
||||
|
||||
# ---------------------------------------------------------
|
||||
# WRITE TO CONFIG
|
||||
# ---------------------------------------------------------
|
||||
if target == "config":
|
||||
# If preview or no config path → show intended output
|
||||
if preview or not user_cfg:
|
||||
print("[INFO] The following mirrors would be written to config:")
|
||||
if not merged:
|
||||
print(" (no mirrors)")
|
||||
else:
|
||||
for name, url in sorted(merged.items()):
|
||||
print(f" - {name}: {url}")
|
||||
print(" (Config not modified due to preview or missing path.)")
|
||||
print()
|
||||
continue
|
||||
|
||||
repos = user_cfg.get("repositories")
|
||||
target_key = _repo_key(repo)
|
||||
existing_repo: Optional[Repository] = None
|
||||
|
||||
# Find existing repo entry
|
||||
for entry in repos:
|
||||
if isinstance(entry, dict) and _repo_key(entry) == target_key:
|
||||
existing_repo = entry
|
||||
break
|
||||
|
||||
# Create entry if missing
|
||||
if existing_repo is None:
|
||||
existing_repo = {
|
||||
"provider": repo.get("provider"),
|
||||
"account": repo.get("account"),
|
||||
"repository": repo.get("repository"),
|
||||
}
|
||||
repos.append(existing_repo)
|
||||
|
||||
# Write or delete mirrors
|
||||
if merged:
|
||||
existing_repo["mirrors"] = dict(merged)
|
||||
else:
|
||||
existing_repo.pop("mirrors", None)
|
||||
|
||||
print(" [OK] Updated repo['mirrors'] in user config.")
|
||||
print()
|
||||
|
||||
# -------------------------------------------------------------
|
||||
# SAVE CONFIG (once at the end)
|
||||
# -------------------------------------------------------------
|
||||
if user_cfg is not None and user_cfg_path_expanded is not None and not preview:
|
||||
save_user_config(user_cfg, user_cfg_path_expanded)
|
||||
print(f"[OK] Saved updated config: {user_cfg_path_expanded}")
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user