Compare commits
62 Commits
eeda944b73
...
v1.3.1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f950bb493c | ||
|
|
fb0b81954d | ||
|
|
b9b4c3fa59 | ||
|
|
3642f92776 | ||
|
|
8f38edde67 | ||
|
|
5875441b23 | ||
|
|
9190f0d901 | ||
|
|
f227734185 | ||
|
|
c7ef77559c | ||
|
|
2385601ed5 | ||
|
|
ac5ae95369 | ||
|
|
31f7f47fe2 | ||
|
|
c8bf1c91ad | ||
|
|
f2caa68e3d | ||
|
|
03c232c308 | ||
|
|
e882e17737 | ||
|
|
b9edcf7101 | ||
|
|
8b8ebf329f | ||
|
|
9598c17ea0 | ||
|
|
67bd358e12 | ||
|
|
340c1700dc | ||
|
|
0dfbaa0f6b | ||
|
|
08ab9fb142 | ||
|
|
804245325d | ||
|
|
c05e77658a | ||
|
|
324f6db1f3 | ||
|
|
2a69a83d71 | ||
|
|
0ec4ccbe40 | ||
|
|
0d864867cd | ||
|
|
3ff0afe828 | ||
|
|
bd74ad41f9 | ||
|
|
fa2a92481d | ||
|
|
6a1e001fc2 | ||
|
|
60afa92e09 | ||
|
|
212f3ce5eb | ||
|
|
0d79537033 | ||
|
|
72fc69c2f8 | ||
|
|
6d8c6deae8 | ||
|
|
6c116a029e | ||
|
|
3eb7c81fa1 | ||
|
|
0334f477fd | ||
|
|
8bb99c99b7 | ||
|
|
587cb2e516 | ||
|
|
fcf9d4b59b | ||
|
|
b483dbfaad | ||
|
|
9630917570 | ||
|
|
6a4432dd04 | ||
|
|
cfb91d825a | ||
|
|
a3b21f23fc | ||
|
|
e49dd85200 | ||
|
|
c9dec5ecd6 | ||
|
|
f3c5460e48 | ||
|
|
39b16b87a8 | ||
|
|
26c9d79814 | ||
|
|
2776d18a42 | ||
|
|
7057ccfb95 | ||
|
|
1807949c6f | ||
|
|
d611720b8f | ||
|
|
bf871650a8 | ||
|
|
5ca1adda7b | ||
|
|
acb18adf76 | ||
|
|
c18490f5d3 |
7
.github/workflows/ci.yml
vendored
7
.github/workflows/ci.yml
vendored
@@ -13,8 +13,11 @@ jobs:
|
||||
test-integration:
|
||||
uses: ./.github/workflows/test-integration.yml
|
||||
|
||||
test-container:
|
||||
uses: ./.github/workflows/test-container.yml
|
||||
test-env-virtual:
|
||||
uses: ./.github/workflows/test-env-virtual.yml
|
||||
|
||||
test-env-nix:
|
||||
uses: ./.github/workflows/test-env-nix.yml
|
||||
|
||||
test-e2e:
|
||||
uses: ./.github/workflows/test-e2e.yml
|
||||
|
||||
60
.github/workflows/mark-stable.yml
vendored
60
.github/workflows/mark-stable.yml
vendored
@@ -3,7 +3,9 @@ name: Mark stable commit
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- main # still run tests for main
|
||||
tags:
|
||||
- 'v*' # run tests for version tags (e.g. v0.9.1)
|
||||
|
||||
jobs:
|
||||
test-unit:
|
||||
@@ -12,8 +14,11 @@ jobs:
|
||||
test-integration:
|
||||
uses: ./.github/workflows/test-integration.yml
|
||||
|
||||
test-container:
|
||||
uses: ./.github/workflows/test-container.yml
|
||||
test-env-virtual:
|
||||
uses: ./.github/workflows/test-env-virtual.yml
|
||||
|
||||
test-env-nix:
|
||||
uses: ./.github/workflows/test-env-nix.yml
|
||||
|
||||
test-e2e:
|
||||
uses: ./.github/workflows/test-e2e.yml
|
||||
@@ -28,37 +33,70 @@ jobs:
|
||||
needs:
|
||||
- test-unit
|
||||
- test-integration
|
||||
- test-container
|
||||
- test-env-nix
|
||||
- test-env-virtual
|
||||
- test-e2e
|
||||
- test-virgin-user
|
||||
- test-virgin-root
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
# Only run this job if the push is for a version tag (v*)
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
|
||||
permissions:
|
||||
contents: write # to move the tag
|
||||
contents: write # Required to move/update the tag
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true # We need all tags for version comparison
|
||||
|
||||
- name: Move 'stable' tag to this commit
|
||||
- name: Move 'stable' tag only if this version is the highest
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
|
||||
echo "Tagging commit $GITHUB_SHA as stable…"
|
||||
echo "Ref: $GITHUB_REF"
|
||||
echo "SHA: $GITHUB_SHA"
|
||||
|
||||
# delete local tag if exists
|
||||
VERSION="${GITHUB_REF#refs/tags/}"
|
||||
echo "Current version tag: ${VERSION}"
|
||||
|
||||
echo "Collecting all version tags..."
|
||||
ALL_V_TAGS="$(git tag --list 'v*' || true)"
|
||||
|
||||
if [[ -z "${ALL_V_TAGS}" ]]; then
|
||||
echo "No version tags found. Skipping stable update."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "All version tags:"
|
||||
echo "${ALL_V_TAGS}"
|
||||
|
||||
# Determine highest version using natural version sorting
|
||||
LATEST_TAG="$(printf '%s\n' ${ALL_V_TAGS} | sort -V | tail -n1)"
|
||||
|
||||
echo "Highest version tag: ${LATEST_TAG}"
|
||||
|
||||
if [[ "${VERSION}" != "${LATEST_TAG}" ]]; then
|
||||
echo "Current version ${VERSION} is NOT the highest version."
|
||||
echo "Stable tag will NOT be updated."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "Current version ${VERSION} IS the highest version."
|
||||
echo "Updating 'stable' tag..."
|
||||
|
||||
# Delete existing stable tag (local + remote)
|
||||
git tag -d stable 2>/dev/null || true
|
||||
# delete remote tag if exists
|
||||
git push origin :refs/tags/stable || true
|
||||
|
||||
# create new tag on this commit
|
||||
# Create new stable tag
|
||||
git tag stable "$GITHUB_SHA"
|
||||
git push origin stable
|
||||
|
||||
echo "✅ Stable tag updated."
|
||||
echo "✅ Stable tag updated to ${VERSION}."
|
||||
|
||||
19
.github/workflows/test-container.yml
vendored
19
.github/workflows/test-container.yml
vendored
@@ -1,19 +0,0 @@
|
||||
name: Test OS Containers
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
|
||||
jobs:
|
||||
test-container:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Show Docker version
|
||||
run: docker version
|
||||
|
||||
- name: Run container tests
|
||||
run: make test-container
|
||||
12
.github/workflows/test-e2e.yml
vendored
12
.github/workflows/test-e2e.yml
vendored
@@ -6,7 +6,11 @@ on:
|
||||
jobs:
|
||||
test-e2e:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60 # E2E + all distros can be heavier
|
||||
timeout-minutes: 60 # E2E can be heavier
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
distro: [arch, debian, ubuntu, fedora, centos]
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
@@ -15,5 +19,7 @@ jobs:
|
||||
- name: Show Docker version
|
||||
run: docker version
|
||||
|
||||
- name: Run E2E tests via make (all distros)
|
||||
run: make test-e2e
|
||||
- name: Run E2E tests via make (${{ matrix.distro }})
|
||||
run: |
|
||||
set -euo pipefail
|
||||
distro="${{ matrix.distro }}" make test-e2e
|
||||
|
||||
26
.github/workflows/test-env-nix.yml
vendored
Normal file
26
.github/workflows/test-env-nix.yml
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
name: Test Virgin Nix (flake only)
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
|
||||
jobs:
|
||||
test-env-nix:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 45
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
distro: [arch, debian, ubuntu, fedora, centos]
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Show Docker version
|
||||
run: docker version
|
||||
|
||||
- name: Nix flake-only test (${{ matrix.distro }})
|
||||
run: |
|
||||
set -euo pipefail
|
||||
distro="${{ matrix.distro }}" make test-env-nix
|
||||
28
.github/workflows/test-env-virtual.yml
vendored
Normal file
28
.github/workflows/test-env-virtual.yml
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
name: Test OS Containers
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
|
||||
jobs:
|
||||
test-env-virtual:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
distro: [arch, debian, ubuntu, fedora, centos]
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Show commit SHA
|
||||
run: git rev-parse HEAD
|
||||
|
||||
- name: Show Docker version
|
||||
run: docker version
|
||||
|
||||
- name: Run container tests (${{ matrix.distro }})
|
||||
run: |
|
||||
set -euo pipefail
|
||||
distro="${{ matrix.distro }}" make test-env-virtual
|
||||
2
.github/workflows/test-integration.yml
vendored
2
.github/workflows/test-integration.yml
vendored
@@ -16,4 +16,4 @@ jobs:
|
||||
run: docker version
|
||||
|
||||
- name: Run integration tests via make (Arch container)
|
||||
run: make test-integration DISTROS="arch"
|
||||
run: make test-integration distro="arch"
|
||||
|
||||
2
.github/workflows/test-unit.yml
vendored
2
.github/workflows/test-unit.yml
vendored
@@ -16,4 +16,4 @@ jobs:
|
||||
run: docker version
|
||||
|
||||
- name: Run unit tests via make (Arch container)
|
||||
run: make test-unit DISTROS="arch"
|
||||
run: make test-unit distro="arch"
|
||||
|
||||
38
.github/workflows/test-virgin-root.yml
vendored
38
.github/workflows/test-virgin-root.yml
vendored
@@ -7,6 +7,10 @@ jobs:
|
||||
test-virgin-root:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 45
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
distro: [arch, debian, ubuntu, fedora, centos]
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
@@ -15,44 +19,38 @@ jobs:
|
||||
- name: Show Docker version
|
||||
run: docker version
|
||||
|
||||
- name: Virgin Arch pkgmgr flake test (root)
|
||||
# 🔹 BUILD virgin image if missing
|
||||
- name: Build virgin container (${{ matrix.distro }})
|
||||
run: |
|
||||
set -euo pipefail
|
||||
distro="${{ matrix.distro }}" make build-missing-virgin
|
||||
|
||||
echo ">>> Starting virgin ArchLinux container test (root, with shared caches)..."
|
||||
# 🔹 RUN test inside virgin image
|
||||
- name: Virgin ${{ matrix.distro }} pkgmgr test (root)
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
docker run --rm \
|
||||
-v "$PWD":/src \
|
||||
-v pkgmgr_repos:/root/Repositories \
|
||||
-v pkgmgr_pip_cache:/root/.cache/pip \
|
||||
-w /src \
|
||||
archlinux:latest \
|
||||
"pkgmgr-${{ matrix.distro }}-virgin" \
|
||||
bash -lc '
|
||||
set -euo pipefail
|
||||
|
||||
echo ">>> Updating and upgrading Arch system..."
|
||||
pacman -Syu --noconfirm git python python-pip nix >/dev/null
|
||||
git config --global --add safe.directory /src
|
||||
|
||||
echo ">>> Creating isolated virtual environment for pkgmgr..."
|
||||
python -m venv /tmp/pkgmgr-venv
|
||||
make install
|
||||
make setup
|
||||
|
||||
echo ">>> Activating virtual environment..."
|
||||
source /tmp/pkgmgr-venv/bin/activate
|
||||
. "$HOME/.venvs/pkgmgr/bin/activate"
|
||||
|
||||
echo ">>> Upgrading pip (cached)..."
|
||||
python -m pip install --upgrade pip >/dev/null
|
||||
|
||||
echo ">>> Installing pkgmgr from current source tree (cached pip)..."
|
||||
python -m pip install /src >/dev/null
|
||||
|
||||
echo ">>> Enabling Nix experimental features..."
|
||||
export NIX_CONFIG="experimental-features = nix-command flakes"
|
||||
|
||||
echo ">>> Running: pkgmgr update pkgmgr --clone-mode shallow --no-verification"
|
||||
pkgmgr update pkgmgr --clone-mode shallow --no-verification
|
||||
|
||||
echo ">>> Running: pkgmgr version pkgmgr"
|
||||
pkgmgr version pkgmgr
|
||||
|
||||
echo ">>> Virgin Arch (root) test completed successfully."
|
||||
echo ">>> Running Nix-based: nix run .#pkgmgr -- version pkgmgr"
|
||||
nix run /src#pkgmgr -- version pkgmgr
|
||||
'
|
||||
|
||||
60
.github/workflows/test-virgin-user.yml
vendored
60
.github/workflows/test-virgin-user.yml
vendored
@@ -7,6 +7,10 @@ jobs:
|
||||
test-virgin-user:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 45
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
distro: [arch, debian, ubuntu, fedora, centos]
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
@@ -15,59 +19,47 @@ jobs:
|
||||
- name: Show Docker version
|
||||
run: docker version
|
||||
|
||||
- name: Virgin Arch pkgmgr user test (non-root with sudo)
|
||||
# 🔹 BUILD virgin image if missing
|
||||
- name: Build virgin container (${{ matrix.distro }})
|
||||
run: |
|
||||
set -euo pipefail
|
||||
distro="${{ matrix.distro }}" make build-missing-virgin
|
||||
|
||||
# 🔹 RUN test inside virgin image as non-root
|
||||
- name: Virgin ${{ matrix.distro }} pkgmgr test (user)
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
echo ">>> Starting virgin ArchLinux container test (non-root user with sudo)..."
|
||||
|
||||
docker run --rm \
|
||||
-v "$PWD":/src \
|
||||
archlinux:latest \
|
||||
-w /src \
|
||||
"pkgmgr-${{ matrix.distro }}-virgin" \
|
||||
bash -lc '
|
||||
set -euo pipefail
|
||||
|
||||
echo ">>> [root] Updating and upgrading Arch system..."
|
||||
pacman -Syu --noconfirm git python python-pip sudo base-devel debugedit
|
||||
make install
|
||||
|
||||
echo ">>> [root] Creating non-root user dev..."
|
||||
useradd -m dev
|
||||
|
||||
echo ">>> [root] Allowing passwordless sudo for dev..."
|
||||
echo "dev ALL=(ALL) NOPASSWD: ALL" > /etc/sudoers.d/dev
|
||||
chmod 0440 /etc/sudoers.d/dev
|
||||
|
||||
echo ">>> [root] Adjusting ownership of /src for dev..."
|
||||
chown -R dev:dev /src
|
||||
|
||||
echo ">>> [root] Running pkgmgr flow as non-root user dev..."
|
||||
sudo -u dev env PKGMGR_DISABLE_NIX_FLAKE_INSTALLER=1 bash -lc "
|
||||
mkdir -p /nix/store /nix/var/nix /nix/var/log/nix /nix/var/nix/profiles
|
||||
chown -R dev:dev /nix
|
||||
chmod 0755 /nix
|
||||
chmod 1777 /nix/store
|
||||
|
||||
sudo -H -u dev env HOME=/home/dev PKGMGR_DISABLE_NIX_FLAKE_INSTALLER=1 bash -lc "
|
||||
set -euo pipefail
|
||||
cd /src
|
||||
|
||||
echo \">>> [dev] Using user: \$(whoami)\"
|
||||
echo \">>> [dev] Running scripts/installation/main.sh...\"
|
||||
bash scripts/installation/main.sh
|
||||
|
||||
echo \">>> [dev] Activating venv...\"
|
||||
make setup-venv
|
||||
. \"\$HOME/.venvs/pkgmgr/bin/activate\"
|
||||
|
||||
echo \">>> [dev] Installing pkgmgr into venv via pip...\"
|
||||
python -m pip install /src >/dev/null
|
||||
|
||||
echo \">>> [dev] PKGMGR_DISABLE_NIX_FLAKE_INSTALLER=\$PKGMGR_DISABLE_NIX_FLAKE_INSTALLER\"
|
||||
echo \">>> [dev] Updating managed repo package-manager via pkgmgr...\"
|
||||
pkgmgr update pkgmgr --clone-mode shallow --no-verification
|
||||
|
||||
echo \">>> [dev] PATH:\"
|
||||
echo \"\$PATH\"
|
||||
|
||||
echo \">>> [dev] which pkgmgr:\"
|
||||
which pkgmgr || echo \">>> [dev] pkgmgr not found in PATH\"
|
||||
|
||||
echo \">>> [dev] Running: pkgmgr version pkgmgr\"
|
||||
pkgmgr version pkgmgr
|
||||
"
|
||||
|
||||
echo ">>> [root] Container flow finished."
|
||||
export NIX_REMOTE=local
|
||||
export NIX_CONFIG=\"experimental-features = nix-command flakes\"
|
||||
nix run /src#pkgmgr -- version pkgmgr
|
||||
"
|
||||
'
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -27,8 +27,9 @@ Thumbs.db
|
||||
# Nix Cache to speed up tests
|
||||
.nix/
|
||||
.nix-dev-installed
|
||||
flake.lock
|
||||
|
||||
# Ignore logs
|
||||
*.log
|
||||
|
||||
result
|
||||
result
|
||||
|
||||
144
CHANGELOG.md
144
CHANGELOG.md
@@ -1,3 +1,147 @@
|
||||
## [1.3.1] - 2025-12-12
|
||||
|
||||
* Updated documentation with better run and installation instructions
|
||||
|
||||
|
||||
## [1.3.0] - 2025-12-12
|
||||
|
||||
* **Minor release – Stability & CI hardening**
|
||||
|
||||
* Stabilized Nix resolution and global symlink handling across Arch, CentOS, Debian, and Ubuntu
|
||||
* Ensured Nix works reliably in CI, sudo, login, and non-login shells without overriding distro-managed paths
|
||||
* Improved error handling and deterministic behavior for non-root environments
|
||||
* Refactored Docker and CI workflows for reproducible multi-distro virgin tests
|
||||
* Made E2E tests more realistic by executing real CLI commands
|
||||
* Fixed Python compatibility and missing dependencies on affected distros
|
||||
|
||||
|
||||
## [1.2.1] - 2025-12-12
|
||||
|
||||
* **Changed**
|
||||
|
||||
* Split container tests into *virtualenv* and *Nix flake* environments to clearly separate Python and Nix responsibilities.
|
||||
|
||||
**Fixed**
|
||||
|
||||
* Fixed Nix installer permission issues when running under a different user in containers.
|
||||
* Improved reliability of post-install Nix initialization across all distro packages.
|
||||
|
||||
**CI**
|
||||
|
||||
* Replaced generic container tests with explicit environment checks.
|
||||
* Validate Nix availability via *nix flake* tests instead of Docker build-time side effects.
|
||||
|
||||
|
||||
## [1.2.0] - 2025-12-12
|
||||
|
||||
* **Release workflow overhaul**
|
||||
|
||||
* Introduced a fully structured release workflow with clear phases and safeguards
|
||||
* Added preview-first releases with explicit confirmation before execution
|
||||
* Automatic handling of *latest* tag when a release is the newest version
|
||||
* Optional branch closing after successful releases with interactive confirmation
|
||||
* Improved safety by syncing with remote before any changes
|
||||
* Clear separation of concerns (workflow, git handling, prompts, versioning)
|
||||
|
||||
|
||||
## [1.1.0] - 2025-12-12
|
||||
|
||||
* Added *branch drop* for destructive branch deletion and introduced *--force/-f* flags for branch close and branch drop to skip confirmation prompts.
|
||||
|
||||
|
||||
## [1.0.0] - 2025-12-11
|
||||
|
||||
* **1.0.0 – Official Stable Release 🎉**
|
||||
*First stable release of PKGMGR, the multi-distro development and package workflow manager.*
|
||||
|
||||
---
|
||||
|
||||
**Key Features**
|
||||
|
||||
**Core Functionality**
|
||||
|
||||
* Manage many repositories with one CLI: `clone`, `update`, `install`, `list`, `path`, `config`
|
||||
* Proxy wrappers for Git, Docker/Compose and Make
|
||||
* Multi-repo execution with safe *preview mode*
|
||||
* Mirror management: `mirror list/diff/merge/setup`
|
||||
|
||||
**Releases & Versioning**
|
||||
|
||||
* Automated SemVer bumps, tagging and changelog generation
|
||||
* Supports PKGBUILD, Debian, RPM, pyproject.toml, flake.nix
|
||||
|
||||
**Developer Tools**
|
||||
|
||||
* Open repositories in VS Code, file manager or terminal
|
||||
* Unified workflows across all major Linux distros
|
||||
|
||||
**Nix Integration**
|
||||
|
||||
* Cross-distro reproducible builds via Nix flakes
|
||||
* CI-tested across all supported environments
|
||||
|
||||
---
|
||||
|
||||
**Summary**
|
||||
PKGMGR 1.0.0 unifies repository management, build tooling, release automation and reproducible multi-distro workflows into one cohesive CLI tool.
|
||||
|
||||
*This is the first official stable release.*
|
||||
|
||||
|
||||
## [0.10.2] - 2025-12-11
|
||||
|
||||
* * Stable tag now updates only when a new highest version is released.
|
||||
* Debian package now includes sudo to ensure privilege escalation works reliably.
|
||||
* Nix setup is significantly more resilient with retries, correct permissions, and better environment handling.
|
||||
* AUR builder setup uses retries so yay installs succeed even under network instability.
|
||||
* Nix flake installation now fails only on mandatory parts; optional outputs no longer block installation.
|
||||
|
||||
|
||||
## [0.10.1] - 2025-12-11
|
||||
|
||||
* Fixed Debian\Ubuntu to pass container e2e tests
|
||||
|
||||
|
||||
## [0.10.0] - 2025-12-11
|
||||
|
||||
**Mirror System**
|
||||
|
||||
* Added SSH mirror support including multi-push and remote probing
|
||||
* Introduced mirror management commands and refactored the CLI parser into modules
|
||||
|
||||
**CI/CD**
|
||||
|
||||
* Migrated to reusable workflows with improved debugging instrumentation
|
||||
* Made stable-tag automation reliable for workflow_run events and permissions
|
||||
* Ensured deterministic test results by rebuilding all test containers with no-cache
|
||||
|
||||
**E2E and Container Tests**
|
||||
|
||||
* Fixed Git safe.directory handling across all containers
|
||||
* Restored Dockerfile ENTRYPOINT to resolve Nix TLS issues
|
||||
* Fixed missing volume errors and hardened the E2E runner
|
||||
* Added full Nix flake E2E test matrix across all distro containers
|
||||
* Disabled Nix sandboxing for cross-distro builds where required
|
||||
|
||||
**Nix and Python Environment**
|
||||
|
||||
* Unified Nix Python environment and introduced lazy CLI imports
|
||||
* Ensured PyYAML availability and improved Python 3.13 compatibility
|
||||
* Refactored flake.nix to remove side effects and rely on generic python3
|
||||
|
||||
**Packaging**
|
||||
|
||||
* Removed Debian’s hard dependency on Nix
|
||||
* Restructured packaging layout and refined build paths
|
||||
* Excluded assets from Arch PKGBUILD rsync
|
||||
* Cleaned up obsolete ignore files
|
||||
|
||||
**Repository Layout**
|
||||
|
||||
* Restructured repository to align local, Nix-based, and distro-based build workflows
|
||||
* Added Arch support and refined build/purge scripts
|
||||
|
||||
|
||||
## [0.9.1] - 2025-12-10
|
||||
|
||||
* * Refactored installer: new `venv-create.sh`, cleaner root/user setup flow, updated README with architecture map.
|
||||
|
||||
84
Dockerfile
84
Dockerfile
@@ -1,58 +1,58 @@
|
||||
# ------------------------------------------------------------
|
||||
# Base image selector — overridden by Makefile
|
||||
# ------------------------------------------------------------
|
||||
ARG BASE_IMAGE=archlinux:latest
|
||||
FROM ${BASE_IMAGE}
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Nix environment defaults
|
||||
#
|
||||
# Nix itself is installed by your system packages (via init-nix.sh).
|
||||
# Here we only define default configuration options.
|
||||
# Base image selector — overridden by build args / Makefile
|
||||
# ------------------------------------------------------------
|
||||
ENV NIX_CONFIG="experimental-features = nix-command flakes"
|
||||
ARG BASE_IMAGE
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Unprivileged user for Arch package build (makepkg)
|
||||
# ------------------------------------------------------------
|
||||
RUN useradd -m aur_builder || true
|
||||
# ============================================================
|
||||
# Target: virgin
|
||||
# - installs distro deps (incl. make)
|
||||
# - no pkgmgr build
|
||||
# - no entrypoint
|
||||
# ============================================================
|
||||
FROM ${BASE_IMAGE} AS virgin
|
||||
SHELL ["/bin/bash", "-lc"]
|
||||
|
||||
RUN echo "BASE_IMAGE=${BASE_IMAGE}" && cat /etc/os-release || true
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Copy scripts and install distro dependencies
|
||||
# ------------------------------------------------------------
|
||||
WORKDIR /build
|
||||
|
||||
# Copy only scripts first so dependency installation can run early
|
||||
COPY scripts/ scripts/
|
||||
RUN find scripts -type f -name '*.sh' -exec chmod +x {} \;
|
||||
# Copy scripts first so dependency installation can be cached
|
||||
COPY scripts/installation/ scripts/installation/
|
||||
|
||||
# Install distro-specific build dependencies (and AUR builder on Arch)
|
||||
RUN scripts/installation/run-dependencies.sh
|
||||
# Install distro-specific build dependencies (including make)
|
||||
RUN bash scripts/installation/dependencies.sh
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Select distro-specific Docker entrypoint
|
||||
# ------------------------------------------------------------
|
||||
# Docker entrypoint (distro-agnostic, nutzt run-package.sh)
|
||||
# ------------------------------------------------------------
|
||||
COPY scripts/docker/entry.sh /usr/local/bin/docker-entry.sh
|
||||
RUN chmod +x /usr/local/bin/docker-entry.sh
|
||||
# Virgin default
|
||||
CMD ["bash"]
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Build and install distro-native package-manager package
|
||||
# via Makefile `install` target (calls scripts/installation/run-package.sh)
|
||||
# ------------------------------------------------------------
|
||||
|
||||
# ============================================================
|
||||
# Target: full
|
||||
# - inherits from virgin
|
||||
# - builds + installs pkgmgr
|
||||
# - sets entrypoint + default cmd
|
||||
# ============================================================
|
||||
FROM virgin AS full
|
||||
|
||||
# Nix environment defaults (only config; nix itself comes from deps/install flow)
|
||||
ENV NIX_CONFIG="experimental-features = nix-command flakes"
|
||||
|
||||
WORKDIR /build
|
||||
|
||||
# Copy full repository for build
|
||||
COPY . .
|
||||
RUN find scripts -type f -name '*.sh' -exec chmod +x {} \;
|
||||
|
||||
RUN set -e; \
|
||||
echo "Building and installing package-manager via make install..."; \
|
||||
make install; \
|
||||
rm -rf /build
|
||||
# Build and install distro-native package-manager package
|
||||
RUN set -euo pipefail; \
|
||||
echo "Building and installing package-manager via make install..."; \
|
||||
make install; \
|
||||
cd /; rm -rf /build
|
||||
|
||||
# Entry point
|
||||
COPY scripts/docker/entry.sh /usr/local/bin/docker-entry.sh
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Runtime working directory and dev entrypoint
|
||||
# ------------------------------------------------------------
|
||||
WORKDIR /src
|
||||
|
||||
ENTRYPOINT ["/usr/local/bin/docker-entry.sh"]
|
||||
CMD ["pkgmgr", "--help"]
|
||||
|
||||
3
MIRRORS
Normal file
3
MIRRORS
Normal file
@@ -0,0 +1,3 @@
|
||||
git@github.com:kevinveenbirkenbach/package-manager.git
|
||||
ssh://git@git.veen.world:2201/kevinveenbirkenbach/pkgmgr.git
|
||||
ssh://git@code.cymais.cloud:2201/kevinveenbirkenbach/pkgmgr.git
|
||||
84
Makefile
84
Makefile
@@ -1,12 +1,19 @@
|
||||
.PHONY: install setup uninstall \
|
||||
test build build-no-cache test-unit test-e2e test-integration \
|
||||
test-container
|
||||
.PHONY: install uninstall \
|
||||
build build-no-cache build-no-cache-all build-missing \
|
||||
delete-volumes purge \
|
||||
test test-unit test-e2e test-integration test-env-virtual test-env-nix \
|
||||
setup setup-venv setup-nix
|
||||
|
||||
# Distro
|
||||
# Options: arch debian ubuntu fedora centos
|
||||
DISTROS ?= arch debian ubuntu fedora centos
|
||||
distro ?= arch
|
||||
export distro
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Distro list and base images
|
||||
# Base images
|
||||
# (kept for documentation/reference; actual build logic is in scripts/build)
|
||||
# ------------------------------------------------------------
|
||||
DISTROS := arch debian ubuntu fedora centos
|
||||
BASE_IMAGE_ARCH := archlinux:latest
|
||||
BASE_IMAGE_DEBIAN := debian:stable-slim
|
||||
BASE_IMAGE_UBUNTU := ubuntu:latest
|
||||
@@ -14,7 +21,6 @@ BASE_IMAGE_FEDORA := fedora:latest
|
||||
BASE_IMAGE_CENTOS := quay.io/centos/centos:stream9
|
||||
|
||||
# Make them available in scripts
|
||||
export DISTROS
|
||||
export BASE_IMAGE_ARCH
|
||||
export BASE_IMAGE_DEBIAN
|
||||
export BASE_IMAGE_UBUNTU
|
||||
@@ -26,19 +32,50 @@ TEST_PATTERN := test_*.py
|
||||
export TEST_PATTERN
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# PKGMGR setup (developer wrapper -> scripts/installation/main.sh)
|
||||
# System install
|
||||
# ------------------------------------------------------------
|
||||
setup:
|
||||
install:
|
||||
@echo "Building and installing distro-native package-manager for this system..."
|
||||
@bash scripts/installation/main.sh
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# PKGMGR setup
|
||||
# ------------------------------------------------------------
|
||||
|
||||
# Default: keep current auto-detection behavior
|
||||
setup: setup-nix setup-venv
|
||||
|
||||
# Explicit: developer setup (Python venv + shell RC + main.py install)
|
||||
setup-venv: setup-nix
|
||||
@bash scripts/setup/venv.sh
|
||||
|
||||
# Explicit: Nix shell mode (no venv, no RC changes)
|
||||
setup-nix:
|
||||
@bash scripts/setup/nix.sh
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Docker build targets (delegated to scripts/build)
|
||||
# ------------------------------------------------------------
|
||||
build-no-cache:
|
||||
@bash scripts/build/build-image-no-cache.sh
|
||||
|
||||
build:
|
||||
@bash scripts/build/build-image.sh
|
||||
@bash scripts/build/image.sh --target virgin
|
||||
@bash scripts/build/image.sh
|
||||
|
||||
build-missing-virgin:
|
||||
@bash scripts/build/image.sh --target virgin --missing
|
||||
|
||||
build-missing: build-missing-virgin
|
||||
@bash scripts/build/image.sh --missing
|
||||
|
||||
build-no-cache:
|
||||
@bash scripts/build/image.sh --target virgin --no-cache
|
||||
@bash scripts/build/image.sh --no-cache
|
||||
|
||||
build-no-cache-all:
|
||||
@set -e; \
|
||||
for d in $(DISTROS); do \
|
||||
echo "=== build-no-cache: $$d ==="; \
|
||||
distro="$$d" $(MAKE) build-no-cache; \
|
||||
done
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Test targets (delegated to scripts/test)
|
||||
@@ -53,24 +90,19 @@ test-integration: build-missing
|
||||
test-e2e: build-missing
|
||||
@bash scripts/test/test-e2e.sh
|
||||
|
||||
test-container: build-missing
|
||||
@bash scripts/test/test-container.sh
|
||||
test-env-virtual: build-missing
|
||||
@bash scripts/test/test-env-virtual.sh
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Build only missing container images
|
||||
# ------------------------------------------------------------
|
||||
build-missing:
|
||||
@bash scripts/build/build-image-missing.sh
|
||||
test-env-nix: build-missing
|
||||
@bash scripts/test/test-env-nix.sh
|
||||
|
||||
# Combined test target for local + CI (unit + integration + e2e)
|
||||
test: test-container test-unit test-integration test-e2e
|
||||
test: test-env-virtual test-unit test-integration test-e2e
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# System install (native packages, calls scripts/installation/run-package.sh)
|
||||
# ------------------------------------------------------------
|
||||
install:
|
||||
@echo "Building and installing distro-native package-manager for this system..."
|
||||
@bash scripts/installation/run-package.sh
|
||||
delete-volumes:
|
||||
@docker volume rm pkgmgr_nix_store_${distro} pkgmgr_nix_cache_${distro} || true
|
||||
|
||||
purge: delete-volumes build-no-cache
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Uninstall target
|
||||
|
||||
202
README.md
202
README.md
@@ -1,70 +1,202 @@
|
||||
# Package Manager🤖📦
|
||||
# Package Manager 🤖📦
|
||||
|
||||

|
||||
|
||||
[](https://github.com/sponsors/kevinveenbirkenbach)
|
||||
[](https://www.patreon.com/c/kevinveenbirkenbach)
|
||||
[](https://buymeacoffee.com/kevinveenbirkenbach) [](https://s.veen.world/paypaldonate)
|
||||
[](https://www.patreon.com/c/kevinveenbirkenbach)
|
||||
[](https://buymeacoffee.com/kevinveenbirkenbach)
|
||||
[](https://s.veen.world/paypaldonate)
|
||||
[](LICENSE)
|
||||
[](https://github.com/kevinveenbirkenbach/package-manager)
|
||||
[](https://github.com/kevinveenbirkenbach/package-manager/actions/workflows/mark-stable.yml)
|
||||
|
||||
*Kevins's* Package Manager is a configurable Python tool designed to manage multiple repositories via Bash. It automates common Git operations such as clone, pull, push, status, and more. Additionally, it handles the creation of executable wrappers and alias links for your repositories.
|
||||
[**Kevin's Package Manager (PKGMGR)**](https://s.veen.world/pkgmgr) is a *multi-distro* package manager and workflow orchestrator.
|
||||
It helps you **develop, package, release and manage projects across multiple Linux-based
|
||||
operating systems** (Arch, Debian, Ubuntu, Fedora, CentOS, …).
|
||||
|
||||
PKGMGR is implemented in **Python** and uses **Nix (flakes)** as a foundation for
|
||||
distribution-independent builds and tooling. On top of that it provides a rich
|
||||
CLI that proxies common developer tools (Git, Docker, Make, …) and glues them
|
||||
together into repeatable development workflows.
|
||||
|
||||
---
|
||||
|
||||
## Why PKGMGR? 🧠
|
||||
|
||||
Traditional distro package managers like `apt`, `pacman` or `dnf` focus on a
|
||||
single operating system. PKGMGR instead focuses on **your repositories and
|
||||
development lifecycle**:
|
||||
|
||||
* one configuration for all your repos,
|
||||
* one CLI to interact with them,
|
||||
* one Nix-based layer to keep tooling reproducible across distros.
|
||||
|
||||
You keep using your native package manager where it makes sense – PKGMGR
|
||||
coordinates the *development and release flow* around it.
|
||||
|
||||
---
|
||||
|
||||
## Features 🚀
|
||||
|
||||
- **Installation & Setup:**
|
||||
Create executable wrappers with auto-detected commands (e.g. `main.sh` or `main.py`).
|
||||
|
||||
- **Git Operations:**
|
||||
Easily perform `git pull`, `push`, `status`, `commit`, `diff`, `add`, `show`, and `checkout` with extra parameters passed through.
|
||||
|
||||
- **Configuration Management:**
|
||||
Manage repository configurations via a default file (`config/defaults.yaml`) and a user-specific file (`config/config.yaml`). Initialize, add, delete, or ignore entries using subcommands.
|
||||
|
||||
- **Path & Listing:**
|
||||
Display repository paths or list all configured packages with their details.
|
||||
|
||||
- **Custom Aliases:**
|
||||
Generate and manage custom aliases for easy command invocation.
|
||||
### Multi-distro development & packaging
|
||||
|
||||
* Manage **many repositories at once** from a single `config/config.yaml`.
|
||||
* Drive full **release pipelines** across Linux distributions using:
|
||||
|
||||
* Nix flakes (`flake.nix`)
|
||||
* PyPI style builds (`pyproject.toml`)
|
||||
* OS packages (PKGBUILD, Debian control/changelog, RPM spec)
|
||||
* Ansible Galaxy metadata and more.
|
||||
|
||||
### Rich CLI for daily work
|
||||
|
||||
All commands are exposed via the `pkgmgr` CLI and are available on every distro:
|
||||
|
||||
* **Repository management**
|
||||
|
||||
* `clone`, `update`, `install`, `delete`, `deinstall`, `path`, `list`, `config`
|
||||
* **Git proxies**
|
||||
|
||||
* `pull`, `push`, `status`, `diff`, `add`, `show`, `checkout`,
|
||||
`reset`, `revert`, `rebase`, `commit`, `branch`
|
||||
* **Docker & Compose orchestration**
|
||||
|
||||
* `build`, `up`, `down`, `exec`, `ps`, `start`, `stop`, `restart`
|
||||
* **Release toolchain**
|
||||
|
||||
* `version`, `release`, `changelog`, `make`
|
||||
* **Mirror & workflow helpers**
|
||||
|
||||
* `mirror` (list/diff/merge/setup), `shell`, `terminal`, `code`, `explore`
|
||||
|
||||
Many of these commands support `--preview` mode so you can inspect the
|
||||
underlying Git or Docker calls without executing them.
|
||||
|
||||
### Full development workflows
|
||||
|
||||
PKGMGR is not just a helper around Git commands. Combined with its release and
|
||||
versioning features it can drive **end-to-end workflows**:
|
||||
|
||||
1. Clone and mirror repositories.
|
||||
2. Run tests and builds through `make` or Nix.
|
||||
3. Bump versions, update changelogs and tags.
|
||||
4. Build distro-specific packages.
|
||||
5. Keep all mirrors and working copies in sync.
|
||||
|
||||
The extensive E2E tests (`tests/e2e/`) and GitHub Actions workflows (including
|
||||
“virgin user” and “virgin root” Arch tests) validate these flows across
|
||||
different Linux environments.
|
||||
|
||||
---
|
||||
|
||||
## Architecture & Setup Map 🗺️
|
||||
|
||||
The following diagram provides a full overview of PKGMGR’s package structure,
|
||||
installation layers, and setup controller flow:
|
||||
The following diagram gives a full overview of:
|
||||
|
||||
* PKGMGR’s package structure,
|
||||
* the layered installers (OS, foundation, Python, Makefile),
|
||||
* and the setup controller that decides which layer to use on a given system.
|
||||
|
||||

|
||||
|
||||
**Diagram status:** *Stand: 11. Dezember 2025*
|
||||
**Always-up-to-date version:** https://s.veen.world/pkgmgrmp
|
||||
**Diagram status:** 12 December 2025
|
||||
**Always-up-to-date version:** [https://s.veen.world/pkgmgrmp](https://s.veen.world/pkgmgrmp)
|
||||
|
||||
---
|
||||
|
||||
Perfekt, dann hier die **noch kompaktere und korrekt differenzierte Version**, die **nur** zwischen
|
||||
**`make setup`** und **`make setup-venv`** unterscheidet und exakt deinem Verhalten entspricht.
|
||||
|
||||
README-ready, ohne Over-Engineering.
|
||||
|
||||
---
|
||||
|
||||
## Installation ⚙️
|
||||
|
||||
Clone the repository and ensure your `~/.local/bin` is in your system PATH:
|
||||
PKGMGR can be installed using `make`.
|
||||
The setup mode defines **which runtime layers are prepared**.
|
||||
|
||||
---
|
||||
|
||||
### Setup modes
|
||||
|
||||
| Command | Prepares | Use case |
|
||||
| ------------------- | ----------------------- | --------------------- |
|
||||
| **make setup** | Python venv **and** Nix | Full development & CI |
|
||||
| **make setup-venv** | Python venv only | Local user setup |
|
||||
|
||||
---
|
||||
|
||||
### Install & setup
|
||||
|
||||
```bash
|
||||
git clone https://github.com/kevinveenbirkenbach/package-manager.git
|
||||
cd package-manager
|
||||
make install
|
||||
```
|
||||
|
||||
Install make and pip if not installed yet:
|
||||
|
||||
```bash
|
||||
pacman -S make python-pip
|
||||
```
|
||||
|
||||
Then, run the following command to set up the project:
|
||||
#### Full setup (venv + Nix)
|
||||
|
||||
```bash
|
||||
make setup
|
||||
```
|
||||
|
||||
The `make setup` command will:
|
||||
- Make `main.py` executable.
|
||||
- Install required packages from `requirements.txt`.
|
||||
- Execute `python main.py install` to complete the installation.
|
||||
Use this for CI, servers, containers and full development workflows.
|
||||
|
||||
#### Venv-only setup
|
||||
|
||||
```bash
|
||||
make setup-venv
|
||||
source ~/.venvs/pkgmgr/bin/activate
|
||||
```
|
||||
|
||||
Use this if you want PKGMGR isolated without Nix integration.
|
||||
|
||||
---
|
||||
|
||||
## Run without installation (Nix)
|
||||
|
||||
Run PKGMGR directly via Nix Flakes.
|
||||
|
||||
```bash
|
||||
nix run github:kevinveenbirkenbach/package-manager#pkgmgr -- --help
|
||||
```
|
||||
|
||||
Example:
|
||||
|
||||
```bash
|
||||
nix run github:kevinveenbirkenbach/package-manager#pkgmgr -- version pkgmgr
|
||||
```
|
||||
|
||||
Notes:
|
||||
|
||||
* full flake URL required
|
||||
* `--` separates Nix and PKGMGR arguments
|
||||
* can be used alongside any setup mode
|
||||
|
||||
---
|
||||
|
||||
## Usage 🧰
|
||||
|
||||
After installation, the main entry point is:
|
||||
|
||||
```bash
|
||||
pkgmgr --help
|
||||
```
|
||||
|
||||
This prints a list of all available subcommands.
|
||||
The help for each command is available via:
|
||||
|
||||
---
|
||||
|
||||
## License 📄
|
||||
|
||||
This project is licensed under the MIT License.
|
||||
See the [LICENSE](LICENSE) file for details.
|
||||
|
||||
---
|
||||
|
||||
## Author 👤
|
||||
|
||||
Kevin Veen-Birkenbach
|
||||
Kevin Veen-Birkenbach
|
||||
[https://www.veen.world](https://www.veen.world)
|
||||
|
||||
1
TODO.md
1
TODO.md
@@ -3,5 +3,4 @@
|
||||
For the following checkout the implementation map:
|
||||
|
||||
- Implement TAGS
|
||||
- Implement MIRROR
|
||||
- Implement SIGNING_KEY
|
||||
@@ -1,4 +0,0 @@
|
||||
# Legacy file used only if pip still installs from requirements.txt.
|
||||
# You may delete this file once you switch entirely to pyproject.toml.
|
||||
|
||||
PyYAML
|
||||
BIN
assets/banner.jpg
Normal file
BIN
assets/banner.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 63 KiB |
BIN
assets/map.png
BIN
assets/map.png
Binary file not shown.
|
Before Width: | Height: | Size: 1.9 MiB After Width: | Height: | Size: 1.9 MiB |
27
flake.lock
generated
27
flake.lock
generated
@@ -1,27 +0,0 @@
|
||||
{
|
||||
"nodes": {
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1765186076,
|
||||
"narHash": "sha256-hM20uyap1a0M9d344I692r+ik4gTMyj60cQWO+hAYP8=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "addf7cf5f383a3101ecfba091b98d0a1263dc9b8",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-unstable",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"nixpkgs": "nixpkgs"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
"version": 7
|
||||
}
|
||||
@@ -36,7 +36,7 @@
|
||||
rec {
|
||||
pkgmgr = pyPkgs.buildPythonApplication {
|
||||
pname = "package-manager";
|
||||
version = "0.9.1";
|
||||
version = "1.3.1";
|
||||
|
||||
# Use the git repo as source
|
||||
src = ./.;
|
||||
|
||||
@@ -50,9 +50,10 @@ package() {
|
||||
install -Dm0755 "scripts/pkgmgr-wrapper.sh" \
|
||||
"$pkgdir/usr/bin/pkgmgr"
|
||||
|
||||
# Install Nix init helper
|
||||
install -Dm0755 "scripts/init-nix.sh" \
|
||||
"$pkgdir/usr/lib/package-manager/init-nix.sh"
|
||||
# Install Nix bootstrap (init + lib)
|
||||
install -d "$pkgdir/usr/lib/package-manager/nix"
|
||||
cp -a scripts/nix/* "$pkgdir/usr/lib/package-manager/nix/"
|
||||
chmod 0755 "$pkgdir/usr/lib/package-manager/nix/init.sh"
|
||||
|
||||
# Install the full repository into /usr/lib/package-manager
|
||||
mkdir -p "$pkgdir/usr/lib/package-manager"
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
post_install() {
|
||||
/usr/lib/package-manager/init-nix.sh || true
|
||||
/usr/lib/package-manager/nix/init.sh || echo ">>> ERROR: /usr/lib/package-manager/nix/init.sh not found or not executable."
|
||||
}
|
||||
|
||||
post_upgrade() {
|
||||
/usr/lib/package-manager/init-nix.sh || true
|
||||
/usr/lib/package-manager/nix/init.sh || echo ">>> ERROR: /usr/lib/package-manager/nix/init.sh not found or not executable."
|
||||
}
|
||||
|
||||
post_remove() {
|
||||
|
||||
@@ -9,7 +9,7 @@ Homepage: https://github.com/kevinveenbirkenbach/package-manager
|
||||
|
||||
Package: package-manager
|
||||
Architecture: any
|
||||
Depends: nix, ${misc:Depends}
|
||||
Depends: sudo, ${misc:Depends}
|
||||
Description: Wrapper that runs Kevin's package-manager via Nix flake
|
||||
This package provides the `pkgmgr` command, which runs Kevin's package
|
||||
manager via a local Nix flake
|
||||
|
||||
@@ -3,11 +3,7 @@ set -e
|
||||
|
||||
case "$1" in
|
||||
configure)
|
||||
if [ -x /usr/lib/package-manager/init-nix.sh ]; then
|
||||
/usr/lib/package-manager/init-nix.sh || true
|
||||
else
|
||||
echo ">>> Warning: /usr/lib/package-manager/init-nix.sh not found or not executable."
|
||||
fi
|
||||
/usr/lib/package-manager/nix/init.sh || echo ">>> ERROR: /usr/lib/package-manager/nix/init.sh not found or not executable."
|
||||
;;
|
||||
esac
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ override_dh_auto_test:
|
||||
:
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Install phase: copy wrapper + init script + full project source
|
||||
# Install phase: copy wrapper + Nix bootstrap (init + lib) + full project source
|
||||
# ---------------------------------------------------------------------------
|
||||
override_dh_auto_install:
|
||||
# Create target directories
|
||||
@@ -31,9 +31,11 @@ override_dh_auto_install:
|
||||
install -m0755 scripts/pkgmgr-wrapper.sh \
|
||||
debian/package-manager/usr/bin/pkgmgr
|
||||
|
||||
# Install shared Nix init script
|
||||
install -m0755 scripts/init-nix.sh \
|
||||
debian/package-manager/usr/lib/package-manager/init-nix.sh
|
||||
# Install Nix bootstrap (init + lib)
|
||||
install -d debian/package-manager/usr/lib/package-manager/nix
|
||||
cp -a scripts/nix/* \
|
||||
debian/package-manager/usr/lib/package-manager/nix/
|
||||
chmod 0755 debian/package-manager/usr/lib/package-manager/nix/init.sh
|
||||
|
||||
# Copy full project source into /usr/lib/package-manager,
|
||||
# but do not include the debian/ directory itself.
|
||||
|
||||
@@ -12,7 +12,7 @@ BuildArch: noarch
|
||||
# NOTE:
|
||||
# Nix is a runtime requirement, but it is *not* declared here as a hard
|
||||
# RPM dependency, because many distributions do not ship a "nix" RPM.
|
||||
# Instead, Nix is installed and initialized by init-nix.sh, which is
|
||||
# Instead, Nix is installed and initialized by nix/init.sh, which is
|
||||
# called in the %post scriptlet below.
|
||||
|
||||
%description
|
||||
@@ -22,7 +22,7 @@ manager via a local Nix flake:
|
||||
nix run /usr/lib/package-manager#pkgmgr -- ...
|
||||
|
||||
Nix is a runtime requirement and is installed/initialized by the
|
||||
init-nix.sh helper during package installation if it is not yet
|
||||
nix/init.sh helper during package installation if it is not yet
|
||||
available on the system.
|
||||
|
||||
%prep
|
||||
@@ -34,8 +34,8 @@ available on the system.
|
||||
|
||||
%install
|
||||
rm -rf %{buildroot}
|
||||
|
||||
install -d %{buildroot}%{_bindir}
|
||||
# Install project tree into a fixed, architecture-independent location.
|
||||
install -d %{buildroot}/usr/lib/package-manager
|
||||
|
||||
# Copy full project source into /usr/lib/package-manager
|
||||
@@ -44,8 +44,10 @@ cp -a . %{buildroot}/usr/lib/package-manager/
|
||||
# Wrapper
|
||||
install -m0755 scripts/pkgmgr-wrapper.sh %{buildroot}%{_bindir}/pkgmgr
|
||||
|
||||
# Shared Nix init script (ensure it is executable in the installed tree)
|
||||
install -m0755 scripts/init-nix.sh %{buildroot}/usr/lib/package-manager/init-nix.sh
|
||||
# Nix bootstrap (init + lib)
|
||||
install -d %{buildroot}/usr/lib/package-manager/nix
|
||||
cp -a scripts/nix/* %{buildroot}/usr/lib/package-manager/nix/
|
||||
chmod 0755 %{buildroot}/usr/lib/package-manager/nix/init.sh
|
||||
|
||||
# Remove packaging-only and development artefacts from the installed tree
|
||||
rm -rf \
|
||||
@@ -60,12 +62,7 @@ rm -rf \
|
||||
%{buildroot}/usr/lib/package-manager/.gitkeep || true
|
||||
|
||||
%post
|
||||
# Initialize Nix (if needed) after installing the package-manager files.
|
||||
if [ -x /usr/lib/package-manager/init-nix.sh ]; then
|
||||
/usr/lib/package-manager/init-nix.sh || true
|
||||
else
|
||||
echo ">>> Warning: /usr/lib/package-manager/init-nix.sh not found or not executable."
|
||||
fi
|
||||
/usr/lib/package-manager/nix/init.sh || echo ">>> ERROR: /usr/lib/package-manager/nix/init.sh not found or not executable."
|
||||
|
||||
%postun
|
||||
echo ">>> package-manager removed. Nix itself was not removed."
|
||||
|
||||
@@ -7,10 +7,10 @@ build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "package-manager"
|
||||
version = "0.9.1"
|
||||
version = "1.3.1"
|
||||
description = "Kevin's package-manager tool (pkgmgr)"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.11"
|
||||
requires-python = ">=3.9"
|
||||
license = { text = "MIT" }
|
||||
|
||||
authors = [
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
source "${SCRIPT_DIR}/resolve-base-image.sh"
|
||||
|
||||
echo "============================================================"
|
||||
echo ">>> Building ONLY missing container images"
|
||||
echo "============================================================"
|
||||
|
||||
for distro in $DISTROS; do
|
||||
IMAGE="package-manager-test-$distro"
|
||||
BASE_IMAGE="$(resolve_base_image "$distro")"
|
||||
|
||||
if docker image inspect "$IMAGE" >/dev/null 2>&1; then
|
||||
echo "[build-missing] Image already exists: $IMAGE (skipping)"
|
||||
continue
|
||||
fi
|
||||
|
||||
echo
|
||||
echo "------------------------------------------------------------"
|
||||
echo "[build-missing] Building missing image: $IMAGE"
|
||||
echo "BASE_IMAGE = $BASE_IMAGE"
|
||||
echo "------------------------------------------------------------"
|
||||
|
||||
docker build \
|
||||
--build-arg BASE_IMAGE="$BASE_IMAGE" \
|
||||
-t "$IMAGE" \
|
||||
.
|
||||
done
|
||||
|
||||
echo
|
||||
echo "============================================================"
|
||||
echo ">>> build-missing: Done"
|
||||
echo "============================================================"
|
||||
@@ -1,17 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
source "${SCRIPT_DIR}/resolve-base-image.sh"
|
||||
|
||||
for distro in $DISTROS; do
|
||||
base_image="$(resolve_base_image "$distro")"
|
||||
|
||||
echo ">>> Building test image for distro '$distro' with NO CACHE (BASE_IMAGE=$base_image)..."
|
||||
|
||||
docker build \
|
||||
--no-cache \
|
||||
--build-arg BASE_IMAGE="$base_image" \
|
||||
-t "package-manager-test-$distro" \
|
||||
.
|
||||
done
|
||||
@@ -1,16 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
source "${SCRIPT_DIR}/resolve-base-image.sh"
|
||||
|
||||
for distro in $DISTROS; do
|
||||
base_image="$(resolve_base_image "$distro")"
|
||||
|
||||
echo ">>> Building test image for distro '$distro' (BASE_IMAGE=$base_image)..."
|
||||
|
||||
docker build \
|
||||
--build-arg BASE_IMAGE="$base_image" \
|
||||
-t "package-manager-test-$distro" \
|
||||
.
|
||||
done
|
||||
120
scripts/build/image.sh
Executable file
120
scripts/build/image.sh
Executable file
@@ -0,0 +1,120 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Unified docker image builder for all distros.
|
||||
#
|
||||
# Supports:
|
||||
# --missing Build only if image does not exist
|
||||
# --no-cache Disable docker layer cache
|
||||
# --target Dockerfile target (e.g. virgin|full)
|
||||
# --tag Override image tag (default: pkgmgr-$distro[-$target])
|
||||
#
|
||||
# Requires:
|
||||
# - env var: distro (arch|debian|ubuntu|fedora|centos)
|
||||
# - base.sh in same dir
|
||||
#
|
||||
# Examples:
|
||||
# distro=arch bash scripts/build/image.sh
|
||||
# distro=arch bash scripts/build/image.sh --no-cache
|
||||
# distro=arch bash scripts/build/image.sh --missing
|
||||
# distro=arch bash scripts/build/image.sh --target virgin
|
||||
# distro=arch bash scripts/build/image.sh --target virgin --missing
|
||||
# distro=arch bash scripts/build/image.sh --tag myimg:arch
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
# shellcheck source=/dev/null
|
||||
source "${SCRIPT_DIR}/base.sh"
|
||||
|
||||
: "${distro:?Environment variable 'distro' must be set (arch|debian|ubuntu|fedora|centos)}"
|
||||
|
||||
NO_CACHE=0
|
||||
MISSING_ONLY=0
|
||||
TARGET=""
|
||||
IMAGE_TAG="" # derive later unless --tag is provided
|
||||
|
||||
usage() {
|
||||
local default_tag="pkgmgr-${distro}"
|
||||
if [[ -n "${TARGET:-}" ]]; then
|
||||
default_tag="${default_tag}-${TARGET}"
|
||||
fi
|
||||
|
||||
cat <<EOF
|
||||
Usage: distro=<distro> $0 [--missing] [--no-cache] [--target <name>] [--tag <image>]
|
||||
|
||||
Options:
|
||||
--missing Build only if the image does not already exist
|
||||
--no-cache Build with --no-cache
|
||||
--target <name> Build a specific Dockerfile target (e.g. virgin|full)
|
||||
--tag <image> Override the output image tag (default: ${default_tag})
|
||||
-h, --help Show help
|
||||
EOF
|
||||
}
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--no-cache) NO_CACHE=1; shift ;;
|
||||
--missing) MISSING_ONLY=1; shift ;;
|
||||
--target)
|
||||
TARGET="${2:-}"
|
||||
if [[ -z "${TARGET}" ]]; then
|
||||
echo "ERROR: --target requires a value (e.g. virgin|full)" >&2
|
||||
exit 2
|
||||
fi
|
||||
shift 2
|
||||
;;
|
||||
--tag)
|
||||
IMAGE_TAG="${2:-}"
|
||||
if [[ -z "${IMAGE_TAG}" ]]; then
|
||||
echo "ERROR: --tag requires a value" >&2
|
||||
exit 2
|
||||
fi
|
||||
shift 2
|
||||
;;
|
||||
-h|--help) usage; exit 0 ;;
|
||||
*)
|
||||
echo "ERROR: Unknown argument: $1" >&2
|
||||
usage
|
||||
exit 2
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Auto-tag: if --tag not provided, derive from distro (+ target suffix)
|
||||
if [[ -z "${IMAGE_TAG}" ]]; then
|
||||
IMAGE_TAG="pkgmgr-${distro}"
|
||||
if [[ -n "${TARGET}" ]]; then
|
||||
IMAGE_TAG="${IMAGE_TAG}-${TARGET}"
|
||||
fi
|
||||
fi
|
||||
|
||||
BASE_IMAGE="$(resolve_base_image "$distro")"
|
||||
|
||||
if [[ "${MISSING_ONLY}" == "1" ]]; then
|
||||
if docker image inspect "${IMAGE_TAG}" >/dev/null 2>&1; then
|
||||
echo "[build] Image already exists: ${IMAGE_TAG} (skipping due to --missing)"
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
|
||||
echo
|
||||
echo "------------------------------------------------------------"
|
||||
echo "[build] Building image: ${IMAGE_TAG}"
|
||||
echo "distro = ${distro}"
|
||||
echo "BASE_IMAGE = ${BASE_IMAGE}"
|
||||
if [[ -n "${TARGET}" ]]; then echo "target = ${TARGET}"; fi
|
||||
if [[ "${NO_CACHE}" == "1" ]]; then echo "cache = disabled"; fi
|
||||
echo "------------------------------------------------------------"
|
||||
|
||||
build_args=(--build-arg "BASE_IMAGE=${BASE_IMAGE}")
|
||||
|
||||
if [[ "${NO_CACHE}" == "1" ]]; then
|
||||
build_args+=(--no-cache)
|
||||
fi
|
||||
|
||||
if [[ -n "${TARGET}" ]]; then
|
||||
build_args+=(--target "${TARGET}")
|
||||
fi
|
||||
|
||||
build_args+=(-t "${IMAGE_TAG}" .)
|
||||
|
||||
docker build "${build_args[@]}"
|
||||
@@ -1,53 +1,6 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Detect and export a valid CA bundle so Nix, Git, curl and Python tooling
|
||||
# can successfully perform HTTPS requests on all distros (Debian, Ubuntu,
|
||||
# Fedora, RHEL, CentOS, etc.)
|
||||
# ---------------------------------------------------------------------------
|
||||
detect_ca_bundle() {
|
||||
# Common CA bundle locations across major Linux distributions
|
||||
local candidates=(
|
||||
/etc/ssl/certs/ca-certificates.crt # Debian/Ubuntu
|
||||
/etc/ssl/cert.pem # Some distros
|
||||
/etc/pki/tls/certs/ca-bundle.crt # Fedora/RHEL/CentOS
|
||||
/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem # CentOS/RHEL extracted bundle
|
||||
/etc/ssl/ca-bundle.pem # Generic fallback
|
||||
)
|
||||
|
||||
for path in "${candidates[@]}"; do
|
||||
if [[ -f "$path" ]]; then
|
||||
echo "$path"
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
# Use existing NIX_SSL_CERT_FILE if provided, otherwise auto-detect
|
||||
CA_BUNDLE="${NIX_SSL_CERT_FILE:-}"
|
||||
|
||||
if [[ -z "${CA_BUNDLE}" ]]; then
|
||||
CA_BUNDLE="$(detect_ca_bundle || true)"
|
||||
fi
|
||||
|
||||
if [[ -n "${CA_BUNDLE}" ]]; then
|
||||
# Export for Nix (critical)
|
||||
export NIX_SSL_CERT_FILE="${CA_BUNDLE}"
|
||||
|
||||
# Export for Git, Python requests, curl, etc.
|
||||
export SSL_CERT_FILE="${CA_BUNDLE}"
|
||||
export REQUESTS_CA_BUNDLE="${CA_BUNDLE}"
|
||||
export GIT_SSL_CAINFO="${CA_BUNDLE}"
|
||||
|
||||
echo "[docker] Using CA bundle: ${CA_BUNDLE}"
|
||||
else
|
||||
echo "[docker] WARNING: No CA certificate bundle found."
|
||||
echo "[docker] HTTPS access for Nix flakes and other tools may fail."
|
||||
fi
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
|
||||
echo "[docker] Starting package-manager container"
|
||||
@@ -68,16 +21,10 @@ cd /src
|
||||
# ---------------------------------------------------------------------------
|
||||
# DEV mode: rebuild package-manager from the mounted /src tree
|
||||
# ---------------------------------------------------------------------------
|
||||
if [[ "${PKGMGR_DEV:-0}" == "1" ]]; then
|
||||
echo "[docker] DEV mode enabled (PKGMGR_DEV=1)"
|
||||
echo "[docker] Rebuilding package-manager from /src via scripts/installation/run-package.sh..."
|
||||
|
||||
if [[ -x scripts/installation/run-package.sh ]]; then
|
||||
bash scripts/installation/run-package.sh
|
||||
else
|
||||
echo "[docker] ERROR: scripts/installation/run-package.sh not found or not executable"
|
||||
exit 1
|
||||
fi
|
||||
if [[ "${REINSTALL_PKGMGR:-0}" == "1" ]]; then
|
||||
echo "[docker] DEV mode enabled (REINSTALL_PKGMGR=1)"
|
||||
echo "[docker] Rebuilding package-manager from /src via scripts/installation/package.sh..."
|
||||
bash scripts/installation/package.sh || exit 1
|
||||
fi
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@@ -1,237 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
echo "[init-nix] Starting Nix initialization..."
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helper: detect whether we are inside a container (Docker/Podman/etc.)
|
||||
# ---------------------------------------------------------------------------
|
||||
is_container() {
|
||||
# Docker / Podman markers
|
||||
if [[ -f /.dockerenv ]] || [[ -f /run/.containerenv ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
# cgroup hints
|
||||
if grep -qiE 'docker|container|podman|lxc' /proc/1/cgroup 2>/dev/null; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Environment variable used by some runtimes
|
||||
if [[ -n "${container:-}" ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helper: ensure Nix binaries are on PATH (multi-user or single-user)
|
||||
# ---------------------------------------------------------------------------
|
||||
ensure_nix_on_path() {
|
||||
# Multi-user profile (daemon install)
|
||||
if [[ -x /nix/var/nix/profiles/default/bin/nix ]]; then
|
||||
export PATH="/nix/var/nix/profiles/default/bin:${PATH}"
|
||||
fi
|
||||
|
||||
# Single-user profile (current user)
|
||||
if [[ -x "${HOME}/.nix-profile/bin/nix" ]]; then
|
||||
export PATH="${HOME}/.nix-profile/bin:${PATH}"
|
||||
fi
|
||||
|
||||
# Single-user profile for dedicated "nix" user (container case)
|
||||
if [[ -x /home/nix/.nix-profile/bin/nix ]]; then
|
||||
export PATH="/home/nix/.nix-profile/bin:${PATH}"
|
||||
fi
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Fast path: Nix already available
|
||||
# ---------------------------------------------------------------------------
|
||||
if command -v nix >/dev/null 2>&1; then
|
||||
echo "[init-nix] Nix already available on PATH: $(command -v nix)"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
ensure_nix_on_path
|
||||
|
||||
if command -v nix >/dev/null 2>&1; then
|
||||
echo "[init-nix] Nix found after adjusting PATH: $(command -v nix)"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "[init-nix] Nix not found, starting installation logic..."
|
||||
|
||||
IN_CONTAINER=0
|
||||
if is_container; then
|
||||
IN_CONTAINER=1
|
||||
echo "[init-nix] Detected container environment."
|
||||
else
|
||||
echo "[init-nix] No container detected."
|
||||
fi
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Container + root: install Nix as dedicated "nix" user (single-user)
|
||||
# ---------------------------------------------------------------------------
|
||||
if [[ "${IN_CONTAINER}" -eq 1 && "${EUID:-0}" -eq 0 ]]; then
|
||||
echo "[init-nix] Running as root inside a container – using dedicated 'nix' user."
|
||||
|
||||
# Ensure nixbld group (required by Nix)
|
||||
if ! getent group nixbld >/dev/null 2>&1; then
|
||||
echo "[init-nix] Creating group 'nixbld'..."
|
||||
groupadd -r nixbld
|
||||
fi
|
||||
|
||||
# Ensure Nix build users (nixbld1..nixbld10) as members of nixbld
|
||||
for i in $(seq 1 10); do
|
||||
if ! id "nixbld$i" >/dev/null 2>&1; then
|
||||
echo "[init-nix] Creating build user nixbld$i..."
|
||||
# -r: system account, -g: primary group, -G: supplementary (ensures membership is listed)
|
||||
useradd -r -g nixbld -G nixbld -s /usr/sbin/nologin "nixbld$i"
|
||||
fi
|
||||
done
|
||||
|
||||
# Ensure "nix" user (home at /home/nix)
|
||||
if ! id nix >/dev/null 2>&1; then
|
||||
echo "[init-nix] Creating user 'nix'..."
|
||||
# Resolve a valid shell path across distros:
|
||||
# - Debian/Ubuntu: /bin/bash
|
||||
# - Arch: /usr/bin/bash (often symlinked)
|
||||
# Fall back to /bin/sh on ultra-minimal systems.
|
||||
BASH_SHELL="$(command -v bash || true)"
|
||||
if [[ -z "${BASH_SHELL}" ]]; then
|
||||
BASH_SHELL="/bin/sh"
|
||||
fi
|
||||
useradd -m -r -g nixbld -s "${BASH_SHELL}" nix
|
||||
fi
|
||||
|
||||
# Ensure /nix exists and is writable by the "nix" user.
|
||||
#
|
||||
# In some base images (or previous runs), /nix may already exist and be
|
||||
# owned by root. In that case the Nix single-user installer will abort with:
|
||||
#
|
||||
# "directory /nix exists, but is not writable by you"
|
||||
#
|
||||
# To keep container runs idempotent and robust, we always enforce
|
||||
# ownership nix:nixbld here.
|
||||
if [[ ! -d /nix ]]; then
|
||||
echo "[init-nix] Creating /nix with owner nix:nixbld..."
|
||||
mkdir -m 0755 /nix
|
||||
chown nix:nixbld /nix
|
||||
else
|
||||
current_owner="$(stat -c '%U' /nix 2>/dev/null || echo '?')"
|
||||
current_group="$(stat -c '%G' /nix 2>/dev/null || echo '?')"
|
||||
if [[ "${current_owner}" != "nix" || "${current_group}" != "nixbld" ]]; then
|
||||
echo "[init-nix] /nix already exists with owner ${current_owner}:${current_group} – fixing to nix:nixbld..."
|
||||
chown -R nix:nixbld /nix
|
||||
else
|
||||
echo "[init-nix] /nix already exists with correct owner nix:nixbld."
|
||||
fi
|
||||
|
||||
if [[ ! -w /nix ]]; then
|
||||
echo "[init-nix] WARNING: /nix is still not writable after chown; Nix installer may fail."
|
||||
fi
|
||||
fi
|
||||
|
||||
# Run Nix single-user installer as "nix"
|
||||
echo "[init-nix] Installing Nix as user 'nix' (single-user, --no-daemon)..."
|
||||
if command -v sudo >/dev/null 2>&1; then
|
||||
sudo -u nix bash -lc 'sh <(curl -L https://nixos.org/nix/install) --no-daemon'
|
||||
else
|
||||
su - nix -c 'sh <(curl -L https://nixos.org/nix/install) --no-daemon'
|
||||
fi
|
||||
|
||||
# After installation, expose nix to root via PATH and symlink
|
||||
ensure_nix_on_path
|
||||
|
||||
if [[ -x /home/nix/.nix-profile/bin/nix ]]; then
|
||||
if [[ ! -e /usr/local/bin/nix ]]; then
|
||||
echo "[init-nix] Creating /usr/local/bin/nix symlink -> /home/nix/.nix-profile/bin/nix"
|
||||
ln -s /home/nix/.nix-profile/bin/nix /usr/local/bin/nix
|
||||
fi
|
||||
fi
|
||||
|
||||
ensure_nix_on_path
|
||||
|
||||
if command -v nix >/dev/null 2>&1; then
|
||||
echo "[init-nix] Nix successfully installed (container mode) at: $(command -v nix)"
|
||||
else
|
||||
echo "[init-nix] WARNING: Nix installation finished in container, but 'nix' is still not on PATH."
|
||||
fi
|
||||
|
||||
# Optionally add PATH hints to /etc/profile (best effort)
|
||||
if [[ -w /etc/profile ]]; then
|
||||
if ! grep -q 'Nix profiles' /etc/profile 2>/dev/null; then
|
||||
cat <<'EOF' >> /etc/profile
|
||||
|
||||
# Nix profiles (added by package-manager init-nix.sh)
|
||||
if [ -d /nix/var/nix/profiles/default/bin ]; then
|
||||
PATH="/nix/var/nix/profiles/default/bin:$PATH"
|
||||
fi
|
||||
if [ -d "$HOME/.nix-profile/bin" ]; then
|
||||
PATH="$HOME/.nix-profile/bin:$PATH"
|
||||
fi
|
||||
EOF
|
||||
echo "[init-nix] Appended Nix PATH setup to /etc/profile (container mode)."
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "[init-nix] Nix initialization complete (container root mode)."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Non-container or non-root container: normal installer paths
|
||||
# ---------------------------------------------------------------------------
|
||||
if [[ "${IN_CONTAINER}" -eq 0 ]]; then
|
||||
# Real host
|
||||
if command -v systemctl >/dev/null 2>&1; then
|
||||
echo "[init-nix] Host with systemd – using multi-user install (--daemon)."
|
||||
sh <(curl -L https://nixos.org/nix/install) --daemon
|
||||
else
|
||||
if [[ "${EUID:-0}" -eq 0 ]]; then
|
||||
echo "[init-nix] WARNING: Running as root without systemd on host."
|
||||
echo "[init-nix] Falling back to single-user install (--no-daemon), but this is not recommended."
|
||||
sh <(curl -L https://nixos.org/nix/install) --no-daemon
|
||||
else
|
||||
echo "[init-nix] Non-root host without systemd – using single-user install (--no-daemon)."
|
||||
sh <(curl -L https://nixos.org/nix/install) --no-daemon
|
||||
fi
|
||||
fi
|
||||
else
|
||||
# Container, but not root (rare)
|
||||
echo "[init-nix] Container as non-root user – using single-user install (--no-daemon)."
|
||||
sh <(curl -L https://nixos.org/nix/install) --no-daemon
|
||||
fi
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# After installation: fix PATH (runtime + shell profiles)
|
||||
# ---------------------------------------------------------------------------
|
||||
ensure_nix_on_path
|
||||
|
||||
if ! command -v nix >/dev/null 2>&1; then
|
||||
echo "[init-nix] WARNING: Nix installation finished, but 'nix' is still not on PATH."
|
||||
echo "[init-nix] You may need to source your shell profile manually."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "[init-nix] Nix successfully installed at: $(command -v nix)"
|
||||
|
||||
# Update global /etc/profile if writable (helps especially on minimal systems)
|
||||
if [[ -w /etc/profile ]]; then
|
||||
if ! grep -q 'Nix profiles' /etc/profile 2>/dev/null; then
|
||||
cat <<'EOF' >> /etc/profile
|
||||
|
||||
# Nix profiles (added by package-manager init-nix.sh)
|
||||
if [ -d /nix/var/nix/profiles/default/bin ]; then
|
||||
PATH="/nix/var/nix/profiles/default/bin:$PATH"
|
||||
fi
|
||||
if [ -d "$HOME/.nix-profile/bin" ]; then
|
||||
PATH="$HOME/.nix-profile/bin:$PATH"
|
||||
fi
|
||||
EOF
|
||||
echo "[init-nix] Appended Nix PATH setup to /etc/profile"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "[init-nix] Nix initialization complete."
|
||||
@@ -45,8 +45,42 @@ else
|
||||
fi
|
||||
|
||||
echo "[aur-builder-setup] Ensuring yay is installed for aur_builder..."
|
||||
|
||||
if ! "${RUN_AS_AUR[@]}" 'command -v yay >/dev/null 2>&1'; then
|
||||
"${RUN_AS_AUR[@]}" 'cd ~ && rm -rf yay && git clone https://aur.archlinux.org/yay.git && cd yay && makepkg -si --noconfirm'
|
||||
echo "[aur-builder-setup] yay not found – starting retry sequence for download..."
|
||||
|
||||
MAX_TIME=300
|
||||
SLEEP_INTERVAL=20
|
||||
ELAPSED=0
|
||||
|
||||
while true; do
|
||||
if "${RUN_AS_AUR[@]}" '
|
||||
set -euo pipefail
|
||||
cd ~
|
||||
rm -rf yay || true
|
||||
git clone https://aur.archlinux.org/yay.git yay
|
||||
'; then
|
||||
echo "[aur-builder-setup] yay repository cloned successfully."
|
||||
break
|
||||
fi
|
||||
|
||||
echo "[aur-builder-setup] git clone failed (likely 504). Retrying in ${SLEEP_INTERVAL}s..."
|
||||
sleep "${SLEEP_INTERVAL}"
|
||||
ELAPSED=$((ELAPSED + SLEEP_INTERVAL))
|
||||
|
||||
if (( ELAPSED >= MAX_TIME )); then
|
||||
echo "[aur-builder-setup] ERROR: Aborted after 5 minutes of retry attempts."
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
# Now build yay after successful clone
|
||||
"${RUN_AS_AUR[@]}" '
|
||||
set -euo pipefail
|
||||
cd ~/yay
|
||||
makepkg -si --noconfirm
|
||||
'
|
||||
|
||||
else
|
||||
echo "[aur-builder-setup] yay already installed."
|
||||
fi
|
||||
|
||||
@@ -12,6 +12,7 @@ pacman -S --noconfirm --needed \
|
||||
rsync \
|
||||
curl \
|
||||
ca-certificates \
|
||||
python \
|
||||
xz
|
||||
|
||||
pacman -Scc --noconfirm
|
||||
|
||||
@@ -1,30 +1,64 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
echo "[arch/package] Building Arch package (makepkg --nodeps)..."
|
||||
echo "[arch/package] Building Arch package (makepkg --nodeps) in an isolated build dir..."
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "${SCRIPT_DIR}/../../.." && pwd)"
|
||||
PKG_DIR="${PROJECT_ROOT}/packaging/arch"
|
||||
|
||||
if [[ ! -f "${PKG_DIR}/PKGBUILD" ]]; then
|
||||
echo "[arch/package] ERROR: PKGBUILD not found in ${PKG_DIR}"
|
||||
# We must not build inside /src (mounted repo). Build in /tmp to avoid permission issues.
|
||||
BUILD_ROOT="/tmp/package-manager-arch-build"
|
||||
PKG_SRC_DIR="${PROJECT_ROOT}/packaging/arch"
|
||||
PKG_BUILD_DIR="${BUILD_ROOT}/packaging/arch"
|
||||
|
||||
if [[ ! -f "${PKG_SRC_DIR}/PKGBUILD" ]]; then
|
||||
echo "[arch/package] ERROR: PKGBUILD not found in ${PKG_SRC_DIR}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd "${PKG_DIR}"
|
||||
echo "[arch/package] Preparing build directory: ${BUILD_ROOT}"
|
||||
rm -rf "${BUILD_ROOT}"
|
||||
mkdir -p "${BUILD_ROOT}"
|
||||
|
||||
if id aur_builder >/dev/null 2>&1; then
|
||||
echo "[arch/package] Using 'aur_builder' user for makepkg..."
|
||||
chown -R aur_builder:aur_builder "${PKG_DIR}"
|
||||
su aur_builder -c "cd '${PKG_DIR}' && rm -f package-manager-*.pkg.tar.* && makepkg --noconfirm --clean --nodeps"
|
||||
else
|
||||
echo "[arch/package] WARNING: user 'aur_builder' not found, running makepkg as current user..."
|
||||
rm -f package-manager-*.pkg.tar.*
|
||||
makepkg --noconfirm --clean --nodeps
|
||||
echo "[arch/package] Syncing project sources to ${BUILD_ROOT}..."
|
||||
# Keep it simple: copy everything; adjust excludes if needed later.
|
||||
rsync -a --delete \
|
||||
--exclude '.git' \
|
||||
--exclude '.venv' \
|
||||
--exclude '.venvs' \
|
||||
--exclude '__pycache__' \
|
||||
--exclude '*.pyc' \
|
||||
"${PROJECT_ROOT}/" "${BUILD_ROOT}/"
|
||||
|
||||
if [[ ! -d "${PKG_BUILD_DIR}" ]]; then
|
||||
echo "[arch/package] ERROR: Build PKG dir missing: ${PKG_BUILD_DIR}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Unprivileged user for Arch package build (makepkg)
|
||||
# ------------------------------------------------------------
|
||||
if ! id aur_builder >/dev/null 2>&1; then
|
||||
echo "[arch/package] ERROR: user 'aur_builder' not found. Run scripts/installation/arch/aur-builder-setup.sh first."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "[arch/package] Using 'aur_builder' user for makepkg..."
|
||||
chown -R aur_builder:aur_builder "${BUILD_ROOT}"
|
||||
|
||||
echo "[arch/package] Running makepkg in: ${PKG_BUILD_DIR}"
|
||||
su aur_builder -c "cd '${PKG_BUILD_DIR}' && rm -f package-manager-*.pkg.tar.* && makepkg --noconfirm --clean --nodeps"
|
||||
|
||||
echo "[arch/package] Installing generated Arch package..."
|
||||
pacman -U --noconfirm package-manager-*.pkg.tar.*
|
||||
pkg_path="$(find "${PKG_BUILD_DIR}" -maxdepth 1 -type f -name 'package-manager-*.pkg.tar.*' | head -n1)"
|
||||
if [[ -z "${pkg_path}" ]]; then
|
||||
echo "[arch/package] ERROR: Built package not found in ${PKG_BUILD_DIR}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
pacman -U --noconfirm "${pkg_path}"
|
||||
|
||||
echo "[arch/package] Cleanup build directory..."
|
||||
rm -rf "${BUILD_ROOT}"
|
||||
|
||||
echo "[arch/package] Done."
|
||||
|
||||
@@ -13,9 +13,64 @@ dnf -y install \
|
||||
bash \
|
||||
curl-minimal \
|
||||
ca-certificates \
|
||||
python3 \
|
||||
sudo \
|
||||
xz
|
||||
|
||||
dnf clean all
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Persist CA bundle configuration system-wide (virgin-compatible)
|
||||
# -----------------------------------------------------------------------------
|
||||
detect_ca_bundle() {
|
||||
local candidates=(
|
||||
/etc/pki/tls/certs/ca-bundle.crt
|
||||
/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem
|
||||
/etc/ssl/certs/ca-certificates.crt
|
||||
/etc/ssl/cert.pem
|
||||
/etc/ssl/ca-bundle.pem
|
||||
)
|
||||
|
||||
for path in "${candidates[@]}"; do
|
||||
if [[ -f "$path" ]]; then
|
||||
echo "$path"
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
CA_BUNDLE="$(detect_ca_bundle || true)"
|
||||
|
||||
if [[ -n "${CA_BUNDLE}" ]]; then
|
||||
echo "[centos/dependencies] Persisting CA bundle: ${CA_BUNDLE}"
|
||||
|
||||
# 1) Make it available for login shells
|
||||
cat >/etc/profile.d/pkgmgr-ca.sh <<EOF
|
||||
# Generated by package-manager
|
||||
export NIX_SSL_CERT_FILE="${CA_BUNDLE}"
|
||||
export SSL_CERT_FILE="${CA_BUNDLE}"
|
||||
export REQUESTS_CA_BUNDLE="${CA_BUNDLE}"
|
||||
export GIT_SSL_CAINFO="${CA_BUNDLE}"
|
||||
EOF
|
||||
chmod 0644 /etc/profile.d/pkgmgr-ca.sh
|
||||
|
||||
# 2) Ensure Nix uses it even without environment variables
|
||||
mkdir -p /etc/nix
|
||||
if [[ -f /etc/nix/nix.conf ]]; then
|
||||
# Replace existing ssl-cert-file or append it
|
||||
if grep -qE '^\s*ssl-cert-file\s*=' /etc/nix/nix.conf; then
|
||||
sed -i "s|^\s*ssl-cert-file\s*=.*|ssl-cert-file = ${CA_BUNDLE}|" /etc/nix/nix.conf
|
||||
else
|
||||
echo "ssl-cert-file = ${CA_BUNDLE}" >>/etc/nix/nix.conf
|
||||
fi
|
||||
else
|
||||
echo "ssl-cert-file = ${CA_BUNDLE}" >/etc/nix/nix.conf
|
||||
fi
|
||||
|
||||
else
|
||||
echo "[centos/dependencies] WARNING: No CA bundle found after installing ca-certificates."
|
||||
fi
|
||||
|
||||
echo "[centos/dependencies] Done."
|
||||
|
||||
@@ -13,6 +13,8 @@ DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
|
||||
bash \
|
||||
curl \
|
||||
ca-certificates \
|
||||
python3 \
|
||||
python3-venv \
|
||||
xz-utils
|
||||
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
@@ -1,87 +1,15 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# main.sh
|
||||
#
|
||||
# Developer / system setup entrypoint.
|
||||
#
|
||||
# Responsibilities:
|
||||
# - If inside a Nix shell (IN_NIX_SHELL=1):
|
||||
# * Skip venv creation and dependency installation
|
||||
# * Run `python3 main.py install`
|
||||
# - If running as root (EUID=0):
|
||||
# * Run system-level installer (run-package.sh)
|
||||
# - Otherwise (normal user):
|
||||
# * Create ~/.venvs/pkgmgr virtual environment if missing
|
||||
# * Install Python dependencies into that venv
|
||||
# * Append auto-activation to ~/.bashrc and ~/.zshrc
|
||||
# * Run `main.py install` using the venv Python
|
||||
# ------------------------------------------------------------
|
||||
|
||||
echo "[installation/main] Starting setup..."
|
||||
|
||||
PROJECT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
|
||||
cd "${PROJECT_ROOT}"
|
||||
|
||||
VENV_DIR="${HOME}/.venvs/pkgmgr"
|
||||
RC_LINE='if [ -d "${HOME}/.venvs/pkgmgr" ]; then . "${HOME}/.venvs/pkgmgr/bin/activate"; if [ -n "${PS1:-}" ]; then echo "Global Python virtual environment '\''~/.venvs/pkgmgr'\'' activated."; fi; fi'
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# 1) Nix shell mode: do not touch venv, only run main.py install
|
||||
# ------------------------------------------------------------
|
||||
if [[ -n "${IN_NIX_SHELL:-}" ]]; then
|
||||
echo "[installation/main] Nix shell detected (IN_NIX_SHELL=1)."
|
||||
echo "[installation/main] Skipping virtualenv creation and dependency installation."
|
||||
echo "[installation/main] Running main.py install via system python3..."
|
||||
python3 main.py install
|
||||
echo "[installation/main] Setup finished (Nix mode)."
|
||||
exit 0
|
||||
if [[ "${EUID:-$(id -u)}" -ne 0 ]]; then
|
||||
echo "[installation/install] Warning: Installation is just possible via root."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# 2) Root mode: system / distro-level installation
|
||||
# ------------------------------------------------------------
|
||||
if [[ "${EUID:-$(id -u)}" -eq 0 ]]; then
|
||||
echo "[installation/main] Running as root (EUID=0)."
|
||||
echo "[installation/main] Skipping user virtualenv and shell RC modifications."
|
||||
echo "[installation/main] Delegating to scripts/installation/run-package.sh..."
|
||||
bash scripts/installation/run-package.sh
|
||||
echo "[installation/main] Root/system setup complete."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# 3) Normal user mode: dev setup with venv
|
||||
# ------------------------------------------------------------
|
||||
|
||||
echo "[installation/main] Running in normal user mode (developer setup)."
|
||||
|
||||
echo "[installation/main] Ensuring main.py is executable..."
|
||||
chmod +x main.py || true
|
||||
|
||||
echo "[installation/main] Ensuring global virtualenv root: ${HOME}/.venvs"
|
||||
mkdir -p "${HOME}/.venvs"
|
||||
|
||||
echo "[installation/main] Creating/updating virtualenv via helper..."
|
||||
PKGMGR_VENV_DIR="${VENV_DIR}" bash scripts/installation/venv-create.sh
|
||||
|
||||
echo "[installation/main] Ensuring ~/.bashrc and ~/.zshrc exist..."
|
||||
touch "${HOME}/.bashrc" "${HOME}/.zshrc"
|
||||
|
||||
echo "[installation/main] Ensuring venv auto-activation is present in shell rc files..."
|
||||
for rc in "${HOME}/.bashrc" "${HOME}/.zshrc"; do
|
||||
if ! grep -qxF "${RC_LINE}" "$rc"; then
|
||||
echo "${RC_LINE}" >> "$rc"
|
||||
echo "[installation/main] Appended auto-activation to $rc"
|
||||
else
|
||||
echo "[installation/main] Auto-activation already present in $rc"
|
||||
fi
|
||||
done
|
||||
|
||||
echo "[installation/main] Running main.py install via venv Python..."
|
||||
"${VENV_DIR}/bin/python" main.py install
|
||||
|
||||
echo
|
||||
echo "[installation/main] Developer setup complete."
|
||||
echo "Restart your shell (or run 'exec bash' or 'exec zsh') to activate the environment."
|
||||
echo "[installation] Running as root (EUID=0)."
|
||||
echo "[installation] Install Package Dependencies..."
|
||||
bash scripts/installation/dependencies.sh
|
||||
echo "[installation] Install Distribution Package..."
|
||||
bash scripts/installation/package.sh
|
||||
echo "[installation] Root/system setup complete."
|
||||
exit 0
|
||||
|
||||
@@ -8,22 +8,28 @@ source "${SCRIPT_DIR}/lib.sh"
|
||||
|
||||
OS_ID="$(detect_os_id)"
|
||||
|
||||
echo "[run-package] Detected OS: ${OS_ID}"
|
||||
# Map Manjaro to Arch
|
||||
if [[ "${OS_ID}" == "manjaro" ]]; then
|
||||
echo "[package] Mapping OS 'manjaro' → 'arch'"
|
||||
OS_ID="arch"
|
||||
fi
|
||||
|
||||
echo "[package] Detected OS: ${OS_ID}"
|
||||
|
||||
case "${OS_ID}" in
|
||||
arch|debian|ubuntu|fedora|centos)
|
||||
PKG_SCRIPT="${SCRIPT_DIR}/${OS_ID}/package.sh"
|
||||
;;
|
||||
*)
|
||||
echo "[run-package] Unsupported OS: ${OS_ID}"
|
||||
echo "[package] Unsupported OS: ${OS_ID}"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
if [[ ! -f "${PKG_SCRIPT}" ]]; then
|
||||
echo "[run-package] Package script not found: ${PKG_SCRIPT}"
|
||||
echo "[package] Package script not found: ${PKG_SCRIPT}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "[run-package] Executing: ${PKG_SCRIPT}"
|
||||
echo "[package] Executing: ${PKG_SCRIPT}"
|
||||
exec bash "${PKG_SCRIPT}"
|
||||
@@ -14,6 +14,9 @@ DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
|
||||
rsync \
|
||||
bash \
|
||||
curl \
|
||||
make \
|
||||
python3 \
|
||||
python3-venv \
|
||||
ca-certificates \
|
||||
xz-utils
|
||||
|
||||
|
||||
@@ -1,44 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# venv-create.sh
|
||||
#
|
||||
# Small helper to create/update a Python virtual environment for pkgmgr.
|
||||
#
|
||||
# Usage:
|
||||
# PKGMGR_VENV_DIR=/home/dev/.venvs/pkgmgr bash scripts/installation/venv-create.sh
|
||||
# or
|
||||
# bash scripts/installation/venv-create.sh /home/dev/.venvs/pkgmgr
|
||||
|
||||
PROJECT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
|
||||
cd "${PROJECT_ROOT}"
|
||||
|
||||
VENV_DIR="${PKGMGR_VENV_DIR:-${1:-${HOME}/.venvs/pkgmgr}}"
|
||||
|
||||
echo "[venv-create] Using VENV_DIR=${VENV_DIR}"
|
||||
|
||||
echo "[venv-create] Ensuring virtualenv parent directory exists..."
|
||||
mkdir -p "$(dirname "${VENV_DIR}")"
|
||||
|
||||
if [[ ! -d "${VENV_DIR}" ]]; then
|
||||
echo "[venv-create] Creating virtual environment at: ${VENV_DIR}"
|
||||
python3 -m venv "${VENV_DIR}"
|
||||
else
|
||||
echo "[venv-create] Virtual environment already exists at: ${VENV_DIR}"
|
||||
fi
|
||||
|
||||
echo "[venv-create] Installing Python tooling into venv..."
|
||||
"${VENV_DIR}/bin/python" -m ensurepip --upgrade
|
||||
"${VENV_DIR}/bin/pip" install --upgrade pip setuptools wheel
|
||||
|
||||
if [[ -f "requirements.txt" ]]; then
|
||||
echo "[venv-create] Installing dependencies from requirements.txt..."
|
||||
"${VENV_DIR}/bin/pip" install -r requirements.txt
|
||||
elif [[ -f "_requirements.txt" ]]; then
|
||||
echo "[venv-create] Installing dependencies from _requirements.txt..."
|
||||
"${VENV_DIR}/bin/pip" install -r _requirements.txt
|
||||
else
|
||||
echo "[venv-create] No requirements.txt or _requirements.txt found. Skipping dependency installation."
|
||||
fi
|
||||
|
||||
echo "[venv-create] Done."
|
||||
53
scripts/nix/README.md
Normal file
53
scripts/nix/README.md
Normal file
@@ -0,0 +1,53 @@
|
||||
# Nix Bootstrap (package-manager)
|
||||
|
||||
This directory contains the **Nix initialization and bootstrap logic** used by *package-manager* to ensure the `nix` command is available on supported systems (host machines and CI containers).
|
||||
|
||||
It is invoked during package installation (Arch/Debian/Fedora scriptlets) and can also be called manually.
|
||||
|
||||
---
|
||||
|
||||
## Entry Point
|
||||
|
||||
- *scripts/nix/init.sh*
|
||||
Main bootstrap script. It:
|
||||
- checks whether `nix` is already available
|
||||
- adjusts `PATH` for common Nix locations
|
||||
- installs Nix when missing (daemon install on systemd hosts, single-user in containers)
|
||||
- ensures predictable `nix` availability via symlinks (without overwriting distro-managed paths)
|
||||
- validates that `nix` is usable at the end (CI-safe)
|
||||
|
||||
---
|
||||
|
||||
## Library Layout
|
||||
|
||||
The entry point sources small, focused modules from *scripts/nix/lib/*:
|
||||
|
||||
- *config.sh* — configuration defaults (installer URL, retry timing)
|
||||
- *detect.sh* — container detection helpers
|
||||
- *path.sh* — PATH adjustments and `nix` binary resolution helpers
|
||||
- *symlinks.sh* — user/global symlink helpers for stable `nix` discovery
|
||||
- *users.sh* — build group/users and container ownership/perms helpers
|
||||
- *install.sh* — installer download + retry logic and execution helpers
|
||||
|
||||
Each library file includes a simple guard to prevent double-sourcing.
|
||||
|
||||
---
|
||||
|
||||
## When It Runs
|
||||
|
||||
This bootstrap is typically executed automatically:
|
||||
|
||||
- Arch: post-install / post-upgrade hook
|
||||
- Debian: `postinst`
|
||||
- Fedora/RPM: `%post`
|
||||
|
||||
|
||||
---
|
||||
|
||||
## Notes / Design Goals
|
||||
|
||||
- **Cross-distro compatibility:** supports common Linux layouts (including Arch placing `nix` in */usr/sbin*).
|
||||
- **Non-destructive behavior:** avoids overwriting distro-managed `nix` binaries.
|
||||
- **CI robustness:** retry logic for downloads and a final `nix` availability check.
|
||||
- **Container-safe defaults:** single-user install as a dedicated `nix` user when running as root in containers.
|
||||
|
||||
130
scripts/nix/init.sh
Executable file
130
scripts/nix/init.sh
Executable file
@@ -0,0 +1,130 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# shellcheck source=lib/config.sh
|
||||
# shellcheck source=lib/detect.sh
|
||||
# shellcheck source=lib/path.sh
|
||||
# shellcheck source=lib/symlinks.sh
|
||||
# shellcheck source=lib/users.sh
|
||||
# shellcheck source=lib/install.sh
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
|
||||
source "${SCRIPT_DIR}/lib/config.sh"
|
||||
source "${SCRIPT_DIR}/lib/detect.sh"
|
||||
source "${SCRIPT_DIR}/lib/path.sh"
|
||||
source "${SCRIPT_DIR}/lib/symlinks.sh"
|
||||
source "${SCRIPT_DIR}/lib/users.sh"
|
||||
source "${SCRIPT_DIR}/lib/install.sh"
|
||||
|
||||
echo "[init-nix] Starting Nix initialization..."
|
||||
|
||||
main() {
|
||||
# Fast path: already available
|
||||
if command -v nix >/dev/null 2>&1; then
|
||||
echo "[init-nix] Nix already available on PATH: $(command -v nix)"
|
||||
ensure_nix_on_path
|
||||
|
||||
if [[ "${EUID:-0}" -eq 0 ]]; then
|
||||
ensure_global_nix_symlinks "$(resolve_nix_bin 2>/dev/null || true)"
|
||||
else
|
||||
ensure_user_nix_symlink "$(resolve_nix_bin 2>/dev/null || true)"
|
||||
fi
|
||||
|
||||
return 0
|
||||
fi
|
||||
|
||||
ensure_nix_on_path
|
||||
|
||||
if command -v nix >/dev/null 2>&1; then
|
||||
echo "[init-nix] Nix found after PATH adjustment: $(command -v nix)"
|
||||
if [[ "${EUID:-0}" -eq 0 ]]; then
|
||||
ensure_global_nix_symlinks "$(resolve_nix_bin 2>/dev/null || true)"
|
||||
else
|
||||
ensure_user_nix_symlink "$(resolve_nix_bin 2>/dev/null || true)"
|
||||
fi
|
||||
return 0
|
||||
fi
|
||||
|
||||
local IN_CONTAINER=0
|
||||
if is_container; then
|
||||
IN_CONTAINER=1
|
||||
echo "[init-nix] Detected container environment."
|
||||
else
|
||||
echo "[init-nix] No container detected."
|
||||
fi
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Container + root: dedicated "nix" user, single-user install
|
||||
# -------------------------------------------------------------------------
|
||||
if [[ "$IN_CONTAINER" -eq 1 && "${EUID:-0}" -eq 0 ]]; then
|
||||
echo "[init-nix] Container + root: installing as 'nix' user (single-user)."
|
||||
|
||||
ensure_nix_build_group
|
||||
|
||||
if ! id nix >/dev/null 2>&1; then
|
||||
echo "[init-nix] Creating user 'nix'..."
|
||||
local BASH_SHELL
|
||||
BASH_SHELL="$(command -v bash || true)"
|
||||
[[ -z "$BASH_SHELL" ]] && BASH_SHELL="/bin/sh"
|
||||
useradd -m -r -g nixbld -s "$BASH_SHELL" nix
|
||||
fi
|
||||
|
||||
ensure_nix_store_dir_for_container_user
|
||||
|
||||
install_nix_with_retry "no-daemon" "nix"
|
||||
|
||||
ensure_nix_on_path
|
||||
|
||||
# Ensure stable global symlink(s) (sudo secure_path friendly)
|
||||
ensure_global_nix_symlinks "/home/nix/.nix-profile/bin/nix"
|
||||
|
||||
# Ensure non-root users can traverse and execute nix user profile
|
||||
ensure_container_profile_perms
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Host (no container)
|
||||
# -------------------------------------------------------------------------
|
||||
else
|
||||
if command -v systemctl >/dev/null 2>&1; then
|
||||
echo "[init-nix] Host with systemd: using multi-user install (--daemon)."
|
||||
if [[ "${EUID:-0}" -eq 0 ]]; then
|
||||
ensure_nix_build_group
|
||||
fi
|
||||
install_nix_with_retry "daemon"
|
||||
else
|
||||
echo "[init-nix] No systemd detected: using single-user install (--no-daemon)."
|
||||
if [[ "${EUID:-0}" -eq 0 ]]; then
|
||||
ensure_nix_build_group
|
||||
fi
|
||||
install_nix_with_retry "no-daemon"
|
||||
fi
|
||||
fi
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# After install: PATH + symlink(s)
|
||||
# -------------------------------------------------------------------------
|
||||
ensure_nix_on_path
|
||||
|
||||
local nix_bin_post
|
||||
nix_bin_post="$(resolve_nix_bin 2>/dev/null || true)"
|
||||
|
||||
if [[ "${EUID:-0}" -eq 0 ]]; then
|
||||
ensure_global_nix_symlinks "$nix_bin_post"
|
||||
else
|
||||
ensure_user_nix_symlink "$nix_bin_post"
|
||||
fi
|
||||
|
||||
# Final verification (must succeed for CI)
|
||||
if ! command -v nix >/dev/null 2>&1; then
|
||||
echo "[init-nix] ERROR: nix not found after installation."
|
||||
echo "[init-nix] DEBUG: resolved nix path = ${nix_bin_post:-<empty>}"
|
||||
echo "[init-nix] DEBUG: PATH = $PATH"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "[init-nix] Nix successfully available at: $(command -v nix)"
|
||||
echo "[init-nix] Nix initialization complete."
|
||||
}
|
||||
|
||||
main "$@"
|
||||
11
scripts/nix/lib/config.sh
Normal file
11
scripts/nix/lib/config.sh
Normal file
@@ -0,0 +1,11 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Prevent double-sourcing
|
||||
if [[ -n "${PKGMGR_NIX_CONFIG_SH:-}" ]]; then
|
||||
return 0
|
||||
fi
|
||||
PKGMGR_NIX_CONFIG_SH=1
|
||||
|
||||
NIX_INSTALL_URL="${NIX_INSTALL_URL:-https://nixos.org/nix/install}"
|
||||
NIX_DOWNLOAD_MAX_TIME="${NIX_DOWNLOAD_MAX_TIME:-300}"
|
||||
NIX_DOWNLOAD_SLEEP_INTERVAL="${NIX_DOWNLOAD_SLEEP_INTERVAL:-20}"
|
||||
14
scripts/nix/lib/detect.sh
Normal file
14
scripts/nix/lib/detect.sh
Normal file
@@ -0,0 +1,14 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
if [[ -n "${PKGMGR_NIX_DETECT_SH:-}" ]]; then
|
||||
return 0
|
||||
fi
|
||||
PKGMGR_NIX_DETECT_SH=1
|
||||
|
||||
# Detect whether we are inside a container (Docker/Podman/etc.)
|
||||
is_container() {
|
||||
[[ -f /.dockerenv || -f /run/.containerenv ]] && return 0
|
||||
grep -qiE 'docker|container|podman|lxc' /proc/1/cgroup 2>/dev/null && return 0
|
||||
[[ -n "${container:-}" ]] && return 0
|
||||
return 1
|
||||
}
|
||||
63
scripts/nix/lib/install.sh
Normal file
63
scripts/nix/lib/install.sh
Normal file
@@ -0,0 +1,63 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
if [[ -n "${PKGMGR_NIX_INSTALL_SH:-}" ]]; then
|
||||
return 0
|
||||
fi
|
||||
PKGMGR_NIX_INSTALL_SH=1
|
||||
|
||||
# Requires: NIX_INSTALL_URL, NIX_DOWNLOAD_MAX_TIME, NIX_DOWNLOAD_SLEEP_INTERVAL
|
||||
|
||||
# Download and run Nix installer with retry
|
||||
# Usage: install_nix_with_retry daemon|no-daemon [run_as_user]
|
||||
install_nix_with_retry() {
|
||||
local mode="$1"
|
||||
local run_as="${2:-}"
|
||||
local installer elapsed=0 mode_flag
|
||||
|
||||
case "$mode" in
|
||||
daemon) mode_flag="--daemon" ;;
|
||||
no-daemon) mode_flag="--no-daemon" ;;
|
||||
*)
|
||||
echo "[init-nix] ERROR: Invalid mode '$mode' (expected 'daemon' or 'no-daemon')."
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
installer="$(mktemp -t nix-installer.XXXXXX)"
|
||||
chmod 0644 "$installer"
|
||||
|
||||
echo "[init-nix] Downloading Nix installer from $NIX_INSTALL_URL (max ${NIX_DOWNLOAD_MAX_TIME}s)..."
|
||||
|
||||
while true; do
|
||||
if curl -fL "$NIX_INSTALL_URL" -o "$installer"; then
|
||||
echo "[init-nix] Successfully downloaded installer to $installer"
|
||||
break
|
||||
fi
|
||||
|
||||
elapsed=$((elapsed + NIX_DOWNLOAD_SLEEP_INTERVAL))
|
||||
echo "[init-nix] WARNING: Download failed. Retrying in ${NIX_DOWNLOAD_SLEEP_INTERVAL}s (elapsed ${elapsed}s)..."
|
||||
|
||||
if (( elapsed >= NIX_DOWNLOAD_MAX_TIME )); then
|
||||
echo "[init-nix] ERROR: Giving up after ${elapsed}s trying to download Nix installer."
|
||||
rm -f "$installer"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
sleep "$NIX_DOWNLOAD_SLEEP_INTERVAL"
|
||||
done
|
||||
|
||||
if [[ -n "$run_as" ]]; then
|
||||
chown "$run_as:$run_as" "$installer" 2>/dev/null || true
|
||||
echo "[init-nix] Running installer as user '$run_as' ($mode_flag)..."
|
||||
if command -v sudo >/dev/null 2>&1; then
|
||||
sudo -u "$run_as" bash -lc "sh '$installer' $mode_flag"
|
||||
else
|
||||
su - "$run_as" -c "sh '$installer' $mode_flag"
|
||||
fi
|
||||
else
|
||||
echo "[init-nix] Running installer as current user ($mode_flag)..."
|
||||
sh "$installer" "$mode_flag"
|
||||
fi
|
||||
|
||||
rm -f "$installer"
|
||||
}
|
||||
68
scripts/nix/lib/path.sh
Normal file
68
scripts/nix/lib/path.sh
Normal file
@@ -0,0 +1,68 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
if [[ -n "${PKGMGR_NIX_PATH_SH:-}" ]]; then
|
||||
return 0
|
||||
fi
|
||||
PKGMGR_NIX_PATH_SH=1
|
||||
|
||||
# Ensure Nix binaries are on PATH (additive, never destructive)
|
||||
ensure_nix_on_path() {
|
||||
if [[ -x /nix/var/nix/profiles/default/bin/nix ]]; then
|
||||
PATH="/nix/var/nix/profiles/default/bin:$PATH"
|
||||
fi
|
||||
if [[ -x "$HOME/.nix-profile/bin/nix" ]]; then
|
||||
PATH="$HOME/.nix-profile/bin:$PATH"
|
||||
fi
|
||||
if [[ -x /home/nix/.nix-profile/bin/nix ]]; then
|
||||
PATH="/home/nix/.nix-profile/bin:$PATH"
|
||||
fi
|
||||
if [[ -d "$HOME/.local/bin" ]]; then
|
||||
PATH="$HOME/.local/bin:$PATH"
|
||||
fi
|
||||
export PATH
|
||||
}
|
||||
|
||||
# Resolve a path to a real executable (follows symlinks)
|
||||
real_exe() {
|
||||
local p="${1:-}"
|
||||
[[ -z "$p" ]] && return 1
|
||||
|
||||
local r
|
||||
r="$(readlink -f "$p" 2>/dev/null || echo "$p")"
|
||||
|
||||
[[ -x "$r" ]] && { echo "$r"; return 0; }
|
||||
return 1
|
||||
}
|
||||
|
||||
# Resolve nix binary path robustly (works across distros + Arch /usr/sbin)
|
||||
resolve_nix_bin() {
|
||||
local nix_cmd=""
|
||||
nix_cmd="$(command -v nix 2>/dev/null || true)"
|
||||
[[ -n "$nix_cmd" ]] && real_exe "$nix_cmd" && return 0
|
||||
|
||||
# IMPORTANT: prefer system locations before /usr/local to avoid self-symlink traps
|
||||
[[ -x /usr/sbin/nix ]] && { echo "/usr/sbin/nix"; return 0; } # Arch package can land here
|
||||
[[ -x /usr/bin/nix ]] && { echo "/usr/bin/nix"; return 0; }
|
||||
[[ -x /bin/nix ]] && { echo "/bin/nix"; return 0; }
|
||||
|
||||
# /usr/local last, and only if it resolves to a real executable
|
||||
[[ -e /usr/local/bin/nix ]] && real_exe "/usr/local/bin/nix" && return 0
|
||||
|
||||
[[ -x /nix/var/nix/profiles/default/bin/nix ]] && {
|
||||
echo "/nix/var/nix/profiles/default/bin/nix"; return 0;
|
||||
}
|
||||
|
||||
[[ -x "$HOME/.nix-profile/bin/nix" ]] && {
|
||||
echo "$HOME/.nix-profile/bin/nix"; return 0;
|
||||
}
|
||||
|
||||
[[ -x "$HOME/.local/bin/nix" ]] && {
|
||||
echo "$HOME/.local/bin/nix"; return 0;
|
||||
}
|
||||
|
||||
[[ -x /home/nix/.nix-profile/bin/nix ]] && {
|
||||
echo "/home/nix/.nix-profile/bin/nix"; return 0;
|
||||
}
|
||||
|
||||
return 1
|
||||
}
|
||||
95
scripts/nix/lib/symlinks.sh
Normal file
95
scripts/nix/lib/symlinks.sh
Normal file
@@ -0,0 +1,95 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
if [[ -n "${PKGMGR_NIX_SYMLINKS_SH:-}" ]]; then
|
||||
return 0
|
||||
fi
|
||||
PKGMGR_NIX_SYMLINKS_SH=1
|
||||
|
||||
# Requires: real_exe, resolve_nix_bin
|
||||
# shellcheck disable=SC2034
|
||||
|
||||
# Ensure globally reachable nix symlink(s) (CI / non-login shells) - root only
|
||||
ensure_global_nix_symlinks() {
|
||||
local nix_bin="${1:-}"
|
||||
|
||||
[[ -z "$nix_bin" ]] && nix_bin="$(resolve_nix_bin 2>/dev/null || true)"
|
||||
|
||||
if [[ -z "$nix_bin" || ! -x "$nix_bin" ]]; then
|
||||
echo "[init-nix] WARNING: nix binary not found, cannot create global symlink(s)."
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Always link to the real executable to avoid /usr/local/bin/nix -> /usr/local/bin/nix
|
||||
nix_bin="$(real_exe "$nix_bin" 2>/dev/null || echo "$nix_bin")"
|
||||
|
||||
local targets=()
|
||||
|
||||
# Always provide /usr/local/bin/nix for CI shells
|
||||
mkdir -p /usr/local/bin 2>/dev/null || true
|
||||
targets+=("/usr/local/bin/nix")
|
||||
|
||||
# Provide sudo-friendly locations only if they are NOT present (do not override distro paths)
|
||||
if [[ ! -e /usr/bin/nix ]]; then
|
||||
targets+=("/usr/bin/nix")
|
||||
fi
|
||||
if [[ ! -e /usr/sbin/nix ]]; then
|
||||
targets+=("/usr/sbin/nix")
|
||||
fi
|
||||
|
||||
local target current_real
|
||||
for target in "${targets[@]}"; do
|
||||
current_real=""
|
||||
if [[ -e "$target" ]]; then
|
||||
current_real="$(real_exe "$target" 2>/dev/null || true)"
|
||||
fi
|
||||
|
||||
if [[ -n "$current_real" && "$current_real" == "$nix_bin" ]]; then
|
||||
echo "[init-nix] $target already points to: $nix_bin"
|
||||
continue
|
||||
fi
|
||||
|
||||
# If something exists but is not the same (and we promised not to override), skip.
|
||||
if [[ -e "$target" && "$target" != "/usr/local/bin/nix" ]]; then
|
||||
echo "[init-nix] WARNING: $target exists; not overwriting."
|
||||
continue
|
||||
fi
|
||||
|
||||
if ln -sf "$nix_bin" "$target" 2>/dev/null; then
|
||||
echo "[init-nix] Ensured $target -> $nix_bin"
|
||||
else
|
||||
echo "[init-nix] WARNING: Failed to ensure $target symlink."
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
# Ensure user-level nix symlink (works without root; CI-safe)
|
||||
ensure_user_nix_symlink() {
|
||||
local nix_bin="${1:-}"
|
||||
|
||||
[[ -z "$nix_bin" ]] && nix_bin="$(resolve_nix_bin 2>/dev/null || true)"
|
||||
|
||||
if [[ -z "$nix_bin" || ! -x "$nix_bin" ]]; then
|
||||
echo "[init-nix] WARNING: nix binary not found, cannot create user symlink."
|
||||
return 0
|
||||
fi
|
||||
|
||||
nix_bin="$(real_exe "$nix_bin" 2>/dev/null || echo "$nix_bin")"
|
||||
|
||||
mkdir -p "$HOME/.local/bin" 2>/dev/null || true
|
||||
ln -sf "$nix_bin" "$HOME/.local/bin/nix"
|
||||
|
||||
echo "[init-nix] Ensured $HOME/.local/bin/nix -> $nix_bin"
|
||||
|
||||
PATH="$HOME/.local/bin:$PATH"
|
||||
export PATH
|
||||
|
||||
if [[ -w "$HOME/.profile" ]] && ! grep -q 'nix/init.sh' "$HOME/.profile" 2>/dev/null; then
|
||||
cat >>"$HOME/.profile" <<'EOF'
|
||||
|
||||
# PATH for nix (added by package-manager nix/init.sh)
|
||||
if [ -d "$HOME/.local/bin" ]; then
|
||||
PATH="$HOME/.local/bin:$PATH"
|
||||
fi
|
||||
EOF
|
||||
fi
|
||||
}
|
||||
49
scripts/nix/lib/users.sh
Normal file
49
scripts/nix/lib/users.sh
Normal file
@@ -0,0 +1,49 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
if [[ -n "${PKGMGR_NIX_USERS_SH:-}" ]]; then
|
||||
return 0
|
||||
fi
|
||||
PKGMGR_NIX_USERS_SH=1
|
||||
|
||||
# Ensure Nix build group and users exist (build-users-group = nixbld) - root only
|
||||
ensure_nix_build_group() {
|
||||
if ! getent group nixbld >/dev/null 2>&1; then
|
||||
echo "[init-nix] Creating group 'nixbld'..."
|
||||
groupadd -r nixbld
|
||||
fi
|
||||
|
||||
for i in $(seq 1 10); do
|
||||
if ! id "nixbld$i" >/dev/null 2>&1; then
|
||||
echo "[init-nix] Creating build user nixbld$i..."
|
||||
useradd -r -g nixbld -G nixbld -s /usr/sbin/nologin "nixbld$i"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
# Container-only helper: /nix ownership + perms for single-user install as 'nix'
|
||||
ensure_nix_store_dir_for_container_user() {
|
||||
if [[ ! -d /nix ]]; then
|
||||
echo "[init-nix] Creating /nix with owner nix:nixbld..."
|
||||
mkdir -m 0755 /nix
|
||||
chown nix:nixbld /nix
|
||||
return 0
|
||||
fi
|
||||
|
||||
local current_owner current_group
|
||||
current_owner="$(stat -c '%U' /nix 2>/dev/null || echo '?')"
|
||||
current_group="$(stat -c '%G' /nix 2>/dev/null || echo '?')"
|
||||
if [[ "$current_owner" != "nix" || "$current_group" != "nixbld" ]]; then
|
||||
echo "[init-nix] Fixing /nix ownership from $current_owner:$current_group to nix:nixbld..."
|
||||
chown -R nix:nixbld /nix
|
||||
fi
|
||||
}
|
||||
|
||||
# Container-only helper: make nix profile executable/traversable for non-root
|
||||
ensure_container_profile_perms() {
|
||||
if [[ -d /home/nix ]]; then
|
||||
chmod o+rx /home/nix 2>/dev/null || true
|
||||
fi
|
||||
if [[ -d /home/nix/.nix-profile ]]; then
|
||||
chmod -R o+rx /home/nix/.nix-profile 2>/dev/null || true
|
||||
fi
|
||||
}
|
||||
@@ -8,19 +8,18 @@ fi
|
||||
|
||||
FLAKE_DIR="/usr/lib/package-manager"
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Try to ensure that "nix" is on PATH
|
||||
# ------------------------------------------------------------
|
||||
# ---------------------------------------------------------------------------
|
||||
# Try to ensure that "nix" is on PATH (common locations + container user)
|
||||
# ---------------------------------------------------------------------------
|
||||
if ! command -v nix >/dev/null 2>&1; then
|
||||
# Common locations for Nix installations
|
||||
CANDIDATES=(
|
||||
"/nix/var/nix/profiles/default/bin/nix"
|
||||
"${HOME:-/root}/.nix-profile/bin/nix"
|
||||
"/home/nix/.nix-profile/bin/nix"
|
||||
)
|
||||
|
||||
for candidate in "${CANDIDATES[@]}"; do
|
||||
if [[ -x "$candidate" ]]; then
|
||||
# Prepend the directory of the candidate to PATH
|
||||
PATH="$(dirname "$candidate"):${PATH}"
|
||||
export PATH
|
||||
break
|
||||
@@ -28,13 +27,22 @@ if ! command -v nix >/dev/null 2>&1; then
|
||||
done
|
||||
fi
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Primary (and only) path: use Nix flake if available
|
||||
# ------------------------------------------------------------
|
||||
# ---------------------------------------------------------------------------
|
||||
# If nix is still missing, try to run nix/init.sh once
|
||||
# ---------------------------------------------------------------------------
|
||||
if ! command -v nix >/dev/null 2>&1; then
|
||||
if [[ -x "${FLAKE_DIR}/nix/init.sh" ]]; then
|
||||
"${FLAKE_DIR}/nix/init.sh" || true
|
||||
fi
|
||||
fi
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Primary path: use Nix flake if available
|
||||
# ---------------------------------------------------------------------------
|
||||
if command -v nix >/dev/null 2>&1; then
|
||||
exec nix run "${FLAKE_DIR}#pkgmgr" -- "$@"
|
||||
fi
|
||||
|
||||
echo "[pkgmgr-wrapper] ERROR: 'nix' binary not found on PATH."
|
||||
echo "[pkgmgr-wrapper] ERROR: 'nix' binary not found on PATH after init."
|
||||
echo "[pkgmgr-wrapper] Nix is required to run pkgmgr (no Python fallback)."
|
||||
exit 1
|
||||
|
||||
9
scripts/setup/nix.sh
Executable file
9
scripts/setup/nix.sh
Executable file
@@ -0,0 +1,9 @@
|
||||
# ------------------------------------------------------------
|
||||
# Nix shell mode: do not touch venv, only run main.py install
|
||||
# ------------------------------------------------------------
|
||||
|
||||
echo "[setup] Nix mode enabled (NIX_ENABLED=1)."
|
||||
echo "[setup] Skipping virtualenv creation and dependency installation."
|
||||
echo "[setup] Running main.py install via system python3..."
|
||||
python3 main.py install
|
||||
echo "[setup] Setup finished (Nix mode)."
|
||||
98
scripts/setup/venv.sh
Executable file
98
scripts/setup/venv.sh
Executable file
@@ -0,0 +1,98 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
echo "[setup] Starting setup..."
|
||||
|
||||
PROJECT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
|
||||
cd "${PROJECT_ROOT}"
|
||||
|
||||
VENV_DIR="${HOME}/.venvs/pkgmgr"
|
||||
RC_LINE='if [ -d "${HOME}/.venvs/pkgmgr" ]; then . "${HOME}/.venvs/pkgmgr/bin/activate"; if [ -n "${PS1:-}" ]; then echo "Global Python virtual environment '\''~/.venvs/pkgmgr'\'' activated."; fi; fi'
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Normal user mode: dev setup with venv
|
||||
# ------------------------------------------------------------
|
||||
|
||||
echo "[setup] Running in normal user mode (developer setup)."
|
||||
|
||||
echo "[setup] Ensuring main.py is executable..."
|
||||
chmod +x main.py || true
|
||||
|
||||
echo "[setup] Ensuring global virtualenv root: ${HOME}/.venvs"
|
||||
mkdir -p "${HOME}/.venvs"
|
||||
|
||||
echo "[setup] Creating/updating virtualenv via helper..."
|
||||
PROJECT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
|
||||
cd "${PROJECT_ROOT}"
|
||||
|
||||
PIP_EDITABLE="${PKGMGR_PIP_EDITABLE:-1}"
|
||||
PIP_EXTRAS="${PKGMGR_PIP_EXTRAS:-}"
|
||||
PREFER_NIX="${PKGMGR_PREFER_NIX:-0}"
|
||||
|
||||
echo "[venv] Using VENV_DIR=${VENV_DIR}"
|
||||
|
||||
if [[ "${PREFER_NIX}" == "1" ]]; then
|
||||
echo "[venv] PKGMGR_PREFER_NIX=1 set."
|
||||
echo "[venv] Hint: Use Nix instead of a venv for reproducible installs:"
|
||||
echo "[venv] nix develop"
|
||||
echo "[venv] nix run .#pkgmgr -- --help"
|
||||
exit 2
|
||||
fi
|
||||
|
||||
echo "[venv] Ensuring virtualenv parent directory exists..."
|
||||
mkdir -p "$(dirname "${VENV_DIR}")"
|
||||
|
||||
if [[ ! -d "${VENV_DIR}" ]]; then
|
||||
echo "[venv] Creating virtual environment at: ${VENV_DIR}"
|
||||
python3 -m venv "${VENV_DIR}"
|
||||
else
|
||||
echo "[venv] Virtual environment already exists at: ${VENV_DIR}"
|
||||
fi
|
||||
|
||||
echo "[venv] Installing Python tooling into venv..."
|
||||
"${VENV_DIR}/bin/python" -m ensurepip --upgrade
|
||||
"${VENV_DIR}/bin/pip" install --upgrade pip setuptools wheel
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Install dependencies
|
||||
# ---------------------------------------------------------------------------
|
||||
if [[ -f "pyproject.toml" ]]; then
|
||||
echo "[venv] Detected pyproject.toml. Installing project via pip..."
|
||||
|
||||
target="."
|
||||
if [[ -n "${PIP_EXTRAS}" ]]; then
|
||||
target=".[${PIP_EXTRAS}]"
|
||||
fi
|
||||
|
||||
if [[ "${PIP_EDITABLE}" == "1" ]]; then
|
||||
echo "[venv] pip install -e ${target}"
|
||||
"${VENV_DIR}/bin/pip" install -e "${target}"
|
||||
else
|
||||
echo "[venv] pip install ${target}"
|
||||
"${VENV_DIR}/bin/pip" install "${target}"
|
||||
fi
|
||||
else
|
||||
echo "[venv] No pyproject.toml found. Skipping dependency installation."
|
||||
fi
|
||||
|
||||
echo "[venv] Done."
|
||||
|
||||
echo "[setup] Ensuring ~/.bashrc and ~/.zshrc exist..."
|
||||
touch "${HOME}/.bashrc" "${HOME}/.zshrc"
|
||||
|
||||
echo "[setup] Ensuring venv auto-activation is present in shell rc files..."
|
||||
for rc in "${HOME}/.bashrc" "${HOME}/.zshrc"; do
|
||||
if ! grep -qxF "${RC_LINE}" "$rc"; then
|
||||
echo "${RC_LINE}" >> "$rc"
|
||||
echo "[setup] Appended auto-activation to $rc"
|
||||
else
|
||||
echo "[setup] Auto-activation already present in $rc"
|
||||
fi
|
||||
done
|
||||
|
||||
echo "[setup] Running main.py install via venv Python..."
|
||||
"${VENV_DIR}/bin/python" main.py install
|
||||
|
||||
echo
|
||||
echo "[setup] Developer setup complete."
|
||||
echo "Restart your shell (or run 'exec bash' or 'exec zsh') to activate the environment."
|
||||
@@ -1,41 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
echo "============================================================"
|
||||
echo ">>> Running sanity test: verifying test containers start"
|
||||
echo "============================================================"
|
||||
|
||||
for distro in $DISTROS; do
|
||||
IMAGE="package-manager-test-$distro"
|
||||
|
||||
echo
|
||||
echo "------------------------------------------------------------"
|
||||
echo ">>> Testing container: $IMAGE"
|
||||
echo "------------------------------------------------------------"
|
||||
|
||||
echo "[test-container] Running: docker run --rm --entrypoint pkgmgr $IMAGE --help"
|
||||
echo
|
||||
|
||||
# Run the command and capture the output
|
||||
if OUTPUT=$(docker run --rm \
|
||||
-e PKGMGR_DEV=1 \
|
||||
-v pkgmgr_nix_store_${distro}:/nix \
|
||||
-v "$(pwd):/src" \
|
||||
-v "pkgmgr_nix_cache_${distro}:/root/.cache/nix" \
|
||||
"$IMAGE" 2>&1); then
|
||||
echo "$OUTPUT"
|
||||
echo
|
||||
echo "[test-container] SUCCESS: $IMAGE responded to 'pkgmgr --help'"
|
||||
|
||||
else
|
||||
echo "$OUTPUT"
|
||||
echo
|
||||
echo "[test-container] ERROR: $IMAGE failed to run 'pkgmgr --help'"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
echo
|
||||
echo "============================================================"
|
||||
echo ">>> All containers passed the sanity check"
|
||||
echo "============================================================"
|
||||
@@ -1,65 +1,60 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
echo ">>> Running E2E tests in all distros: $DISTROS"
|
||||
echo "============================================================"
|
||||
echo ">>> Running E2E tests: $distro"
|
||||
echo "============================================================"
|
||||
|
||||
for distro in $DISTROS; do
|
||||
echo "============================================================"
|
||||
echo ">>> Running E2E tests: $distro"
|
||||
echo "============================================================"
|
||||
docker run --rm \
|
||||
-v "$(pwd):/src" \
|
||||
-v "pkgmgr_nix_store_${distro}:/nix" \
|
||||
-v "pkgmgr_nix_cache_${distro}:/root/.cache/nix" \
|
||||
-e REINSTALL_PKGMGR=1 \
|
||||
-e TEST_PATTERN="${TEST_PATTERN}" \
|
||||
--workdir /src \
|
||||
"pkgmgr-${distro}" \
|
||||
bash -lc '
|
||||
set -euo pipefail
|
||||
|
||||
docker run --rm \
|
||||
-v "$(pwd):/src" \
|
||||
-v "pkgmgr_nix_store_${distro}:/nix" \
|
||||
-v "pkgmgr_nix_cache_${distro}:/root/.cache/nix" \
|
||||
-e PKGMGR_DEV=1 \
|
||||
-e TEST_PATTERN="${TEST_PATTERN}" \
|
||||
--workdir /src \
|
||||
--entrypoint bash \
|
||||
"package-manager-test-${distro}" \
|
||||
-c '
|
||||
set -euo pipefail
|
||||
# Load distro info
|
||||
if [ -f /etc/os-release ]; then
|
||||
. /etc/os-release
|
||||
fi
|
||||
|
||||
# Load distro info
|
||||
if [ -f /etc/os-release ]; then
|
||||
. /etc/os-release
|
||||
fi
|
||||
echo "Running tests inside distro: ${ID:-unknown}"
|
||||
|
||||
echo "Running tests inside distro: ${ID:-unknown}"
|
||||
# Load Nix environment if available
|
||||
if [ -f "/nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh" ]; then
|
||||
. "/nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh"
|
||||
fi
|
||||
|
||||
# Load Nix environment if available
|
||||
if [ -f "/nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh" ]; then
|
||||
. "/nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh"
|
||||
fi
|
||||
if [ -f "$HOME/.nix-profile/etc/profile.d/nix.sh" ]; then
|
||||
. "$HOME/.nix-profile/etc/profile.d/nix.sh"
|
||||
fi
|
||||
|
||||
if [ -f "$HOME/.nix-profile/etc/profile.d/nix.sh" ]; then
|
||||
. "$HOME/.nix-profile/etc/profile.d/nix.sh"
|
||||
fi
|
||||
PATH="/nix/var/nix/profiles/default/bin:$HOME/.nix-profile/bin:$PATH"
|
||||
|
||||
PATH="/nix/var/nix/profiles/default/bin:$HOME/.nix-profile/bin:$PATH"
|
||||
command -v nix >/dev/null || {
|
||||
echo "ERROR: nix not found."
|
||||
exit 1
|
||||
}
|
||||
|
||||
command -v nix >/dev/null || {
|
||||
echo "ERROR: nix not found."
|
||||
exit 1
|
||||
}
|
||||
# Mark the mounted repository as safe to avoid Git ownership errors.
|
||||
# Newer Git (e.g. on Ubuntu) complains about the gitdir (/src/.git),
|
||||
# older versions about the worktree (/src). Nix turns "." into the
|
||||
# flake input "git+file:///src", which then uses Git under the hood.
|
||||
if command -v git >/dev/null 2>&1; then
|
||||
# Worktree path
|
||||
git config --global --add safe.directory /src || true
|
||||
# Gitdir path shown in the "dubious ownership" error
|
||||
git config --global --add safe.directory /src/.git || true
|
||||
# Ephemeral CI containers: allow all paths as a last resort
|
||||
git config --global --add safe.directory '*' || true
|
||||
fi
|
||||
|
||||
# Mark the mounted repository as safe to avoid Git ownership errors.
|
||||
# Newer Git (e.g. on Ubuntu) complains about the gitdir (/src/.git),
|
||||
# older versions about the worktree (/src). Nix turns "." into the
|
||||
# flake input "git+file:///src", which then uses Git under the hood.
|
||||
if command -v git >/dev/null 2>&1; then
|
||||
# Worktree path
|
||||
git config --global --add safe.directory /src || true
|
||||
# Gitdir path shown in the "dubious ownership" error
|
||||
git config --global --add safe.directory /src/.git || true
|
||||
# Ephemeral CI containers: allow all paths as a last resort
|
||||
git config --global --add safe.directory '*' || true
|
||||
fi
|
||||
|
||||
# Run the E2E tests inside the Nix development shell
|
||||
nix develop .#default --no-write-lock-file -c \
|
||||
python3 -m unittest discover \
|
||||
-s /src/tests/e2e \
|
||||
-p "$TEST_PATTERN"
|
||||
'
|
||||
done
|
||||
# Run the E2E tests inside the Nix development shell
|
||||
nix develop .#default --no-write-lock-file -c \
|
||||
python3 -m unittest discover \
|
||||
-s /src/tests/e2e \
|
||||
-p "$TEST_PATTERN"
|
||||
'
|
||||
|
||||
48
scripts/test/test-env-nix.sh
Executable file
48
scripts/test/test-env-nix.sh
Executable file
@@ -0,0 +1,48 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
IMAGE="pkgmgr-${distro}"
|
||||
|
||||
echo "============================================================"
|
||||
echo ">>> Running Nix flake-only test in ${distro} container"
|
||||
echo ">>> Image: ${IMAGE}"
|
||||
echo "============================================================"
|
||||
|
||||
docker run --rm \
|
||||
-v "$(pwd):/src" \
|
||||
-v "pkgmgr_nix_store_${distro}:/nix" \
|
||||
-v "pkgmgr_nix_cache_${distro}:/root/.cache/nix" \
|
||||
--workdir /src \
|
||||
-e REINSTALL_PKGMGR=1 \
|
||||
"${IMAGE}" \
|
||||
bash -lc '
|
||||
set -euo pipefail
|
||||
|
||||
if command -v git >/dev/null 2>&1; then
|
||||
git config --global --add safe.directory /src || true
|
||||
git config --global --add safe.directory /src/.git || true
|
||||
git config --global --add safe.directory "*" || true
|
||||
fi
|
||||
|
||||
echo ">>> preflight: nix must exist in image"
|
||||
if ! command -v nix >/dev/null 2>&1; then
|
||||
echo "NO_NIX"
|
||||
echo "ERROR: nix not found in image '\'''"${IMAGE}"''\'' (distro='"${distro}"')"
|
||||
echo "HINT: Ensure Nix is installed during image build for this distro."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo ">>> nix version"
|
||||
nix --version
|
||||
|
||||
echo ">>> nix flake show"
|
||||
nix flake show . --no-write-lock-file >/dev/null
|
||||
|
||||
echo ">>> nix build .#default"
|
||||
nix build .#default --no-link --no-write-lock-file
|
||||
|
||||
echo ">>> nix run .#pkgmgr -- --help"
|
||||
nix run .#pkgmgr -- --help --no-write-lock-file
|
||||
|
||||
echo ">>> OK: Nix flake-only test succeeded."
|
||||
'
|
||||
32
scripts/test/test-env-virtual.sh
Executable file
32
scripts/test/test-env-virtual.sh
Executable file
@@ -0,0 +1,32 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
IMAGE="pkgmgr-$distro"
|
||||
|
||||
echo
|
||||
echo "------------------------------------------------------------"
|
||||
echo ">>> Testing VENV: $IMAGE"
|
||||
echo "------------------------------------------------------------"
|
||||
echo "[test-env-virtual] Inspect image metadata:"
|
||||
docker image inspect "$IMAGE" | sed -n '1,40p'
|
||||
|
||||
echo "[test-env-virtual] Running: docker run --rm --entrypoint pkgmgr $IMAGE --help"
|
||||
echo
|
||||
|
||||
# Run the command and capture the output
|
||||
if OUTPUT=$(docker run --rm \
|
||||
-e REINSTALL_PKGMGR=1 \
|
||||
-v pkgmgr_nix_store_${distro}:/nix \
|
||||
-v "$(pwd):/src" \
|
||||
-v "pkgmgr_nix_cache_${distro}:/root/.cache/nix" \
|
||||
"$IMAGE" 2>&1); then
|
||||
echo "$OUTPUT"
|
||||
echo
|
||||
echo "[test-env-virtual] SUCCESS: $IMAGE responded to 'pkgmgr --help'"
|
||||
|
||||
else
|
||||
echo "$OUTPUT"
|
||||
echo
|
||||
echo "[test-env-virtual] ERROR: $IMAGE failed to run 'pkgmgr --help'"
|
||||
exit 1
|
||||
fi
|
||||
@@ -1,8 +1,6 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
: "${distro:=arch}"
|
||||
|
||||
echo "============================================================"
|
||||
echo ">>> Running INTEGRATION tests in ${distro} container"
|
||||
echo "============================================================"
|
||||
@@ -12,11 +10,10 @@ docker run --rm \
|
||||
-v pkgmgr_nix_store_${distro}:/nix \
|
||||
-v "pkgmgr_nix_cache_${distro}:/root/.cache/nix" \
|
||||
--workdir /src \
|
||||
-e PKGMGR_DEV=1 \
|
||||
-e REINSTALL_PKGMGR=1 \
|
||||
-e TEST_PATTERN="${TEST_PATTERN}" \
|
||||
--entrypoint bash \
|
||||
"package-manager-test-${distro}" \
|
||||
-c '
|
||||
"pkgmgr-${distro}" \
|
||||
bash -lc '
|
||||
set -e;
|
||||
git config --global --add safe.directory /src || true;
|
||||
nix develop .#default --no-write-lock-file -c \
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
: "${distro:=arch}"
|
||||
|
||||
echo "============================================================"
|
||||
echo ">>> Running UNIT tests in ${distro} container"
|
||||
echo "============================================================"
|
||||
@@ -12,11 +10,10 @@ docker run --rm \
|
||||
-v "pkgmgr_nix_cache_${distro}:/root/.cache/nix" \
|
||||
-v pkgmgr_nix_store_${distro}:/nix \
|
||||
--workdir /src \
|
||||
-e PKGMGR_DEV=1 \
|
||||
-e REINSTALL_PKGMGR=1 \
|
||||
-e TEST_PATTERN="${TEST_PATTERN}" \
|
||||
--entrypoint bash \
|
||||
"package-manager-test-${distro}" \
|
||||
-c '
|
||||
"pkgmgr-${distro}" \
|
||||
bash -lc '
|
||||
set -e;
|
||||
git config --global --add safe.directory /src || true;
|
||||
nix develop .#default --no-write-lock-file -c \
|
||||
|
||||
@@ -1,235 +1,14 @@
|
||||
# pkgmgr/actions/branch/__init__.py
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
High-level helpers for branch-related operations.
|
||||
|
||||
This module encapsulates the actual Git logic so the CLI layer
|
||||
(pkgmgr.cli.commands.branch) stays thin and testable.
|
||||
Public API for branch actions.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
from .open_branch import open_branch
|
||||
from .close_branch import close_branch
|
||||
from .drop_branch import drop_branch
|
||||
|
||||
from typing import Optional
|
||||
|
||||
from pkgmgr.core.git import run_git, GitError, get_current_branch
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Branch creation (open)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def open_branch(
|
||||
name: Optional[str],
|
||||
base_branch: str = "main",
|
||||
fallback_base: str = "master",
|
||||
cwd: str = ".",
|
||||
) -> None:
|
||||
"""
|
||||
Create and push a new feature branch on top of a base branch.
|
||||
|
||||
The base branch is resolved by:
|
||||
1. Trying 'base_branch' (default: 'main')
|
||||
2. Falling back to 'fallback_base' (default: 'master')
|
||||
|
||||
Steps:
|
||||
1) git fetch origin
|
||||
2) git checkout <resolved_base>
|
||||
3) git pull origin <resolved_base>
|
||||
4) git checkout -b <name>
|
||||
5) git push -u origin <name>
|
||||
|
||||
If `name` is None or empty, the user is prompted to enter one.
|
||||
"""
|
||||
|
||||
# Request name interactively if not provided
|
||||
if not name:
|
||||
name = input("Enter new branch name: ").strip()
|
||||
|
||||
if not name:
|
||||
raise RuntimeError("Branch name must not be empty.")
|
||||
|
||||
# Resolve which base branch to use (main or master)
|
||||
resolved_base = _resolve_base_branch(base_branch, fallback_base, cwd=cwd)
|
||||
|
||||
# 1) Fetch from origin
|
||||
try:
|
||||
run_git(["fetch", "origin"], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to fetch from origin before creating branch {name!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# 2) Checkout base branch
|
||||
try:
|
||||
run_git(["checkout", resolved_base], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to checkout base branch {resolved_base!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# 3) Pull latest changes for base branch
|
||||
try:
|
||||
run_git(["pull", "origin", resolved_base], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to pull latest changes for base branch {resolved_base!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# 4) Create new branch
|
||||
try:
|
||||
run_git(["checkout", "-b", name], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to create new branch {name!r} from base {resolved_base!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# 5) Push new branch to origin
|
||||
try:
|
||||
run_git(["push", "-u", "origin", name], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to push new branch {name!r} to origin: {exc}"
|
||||
) from exc
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Base branch resolver (shared by open/close)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _resolve_base_branch(
|
||||
preferred: str,
|
||||
fallback: str,
|
||||
cwd: str,
|
||||
) -> str:
|
||||
"""
|
||||
Resolve the base branch to use.
|
||||
|
||||
Try `preferred` first (default: main),
|
||||
fall back to `fallback` (default: master).
|
||||
|
||||
Raise RuntimeError if neither exists.
|
||||
"""
|
||||
for candidate in (preferred, fallback):
|
||||
try:
|
||||
run_git(["rev-parse", "--verify", candidate], cwd=cwd)
|
||||
return candidate
|
||||
except GitError:
|
||||
continue
|
||||
|
||||
raise RuntimeError(
|
||||
f"Neither {preferred!r} nor {fallback!r} exist in this repository."
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Branch closing (merge + deletion)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def close_branch(
|
||||
name: Optional[str],
|
||||
base_branch: str = "main",
|
||||
fallback_base: str = "master",
|
||||
cwd: str = ".",
|
||||
) -> None:
|
||||
"""
|
||||
Merge a feature branch into the base branch and delete it afterwards.
|
||||
|
||||
Steps:
|
||||
1) Determine the branch name (argument or current branch)
|
||||
2) Resolve base branch (main/master)
|
||||
3) Ask for confirmation
|
||||
4) git fetch origin
|
||||
5) git checkout <base>
|
||||
6) git pull origin <base>
|
||||
7) git merge --no-ff <name>
|
||||
8) git push origin <base>
|
||||
9) Delete branch locally
|
||||
10) Delete branch on origin (best effort)
|
||||
"""
|
||||
|
||||
# 1) Determine which branch should be closed
|
||||
if not name:
|
||||
try:
|
||||
name = get_current_branch(cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(f"Failed to detect current branch: {exc}") from exc
|
||||
|
||||
if not name:
|
||||
raise RuntimeError("Branch name must not be empty.")
|
||||
|
||||
# 2) Resolve base branch
|
||||
target_base = _resolve_base_branch(base_branch, fallback_base, cwd=cwd)
|
||||
|
||||
if name == target_base:
|
||||
raise RuntimeError(
|
||||
f"Refusing to close base branch {target_base!r}. "
|
||||
"Please specify a feature branch."
|
||||
)
|
||||
|
||||
# 3) Ask user for confirmation
|
||||
prompt = (
|
||||
f"Merge branch '{name}' into '{target_base}' and delete it afterwards? "
|
||||
"(y/N): "
|
||||
)
|
||||
answer = input(prompt).strip().lower()
|
||||
if answer != "y":
|
||||
print("Aborted closing branch.")
|
||||
return
|
||||
|
||||
# 4) Fetch from origin
|
||||
try:
|
||||
run_git(["fetch", "origin"], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to fetch from origin before closing branch {name!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# 5) Checkout base
|
||||
try:
|
||||
run_git(["checkout", target_base], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to checkout base branch {target_base!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# 6) Pull latest base state
|
||||
try:
|
||||
run_git(["pull", "origin", target_base], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to pull latest changes for base branch {target_base!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# 7) Merge the feature branch
|
||||
try:
|
||||
run_git(["merge", "--no-ff", name], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to merge branch {name!r} into {target_base!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# 8) Push updated base
|
||||
try:
|
||||
run_git(["push", "origin", target_base], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to push base branch {target_base!r} after merge: {exc}"
|
||||
) from exc
|
||||
|
||||
# 9) Delete branch locally
|
||||
try:
|
||||
run_git(["branch", "-d", name], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to delete local branch {name!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# 10) Delete branch on origin (best effort)
|
||||
try:
|
||||
run_git(["push", "origin", "--delete", name], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Branch {name!r} was deleted locally, but remote deletion failed: {exc}"
|
||||
) from exc
|
||||
__all__ = [
|
||||
"open_branch",
|
||||
"close_branch",
|
||||
"drop_branch",
|
||||
]
|
||||
|
||||
99
src/pkgmgr/actions/branch/close_branch.py
Normal file
99
src/pkgmgr/actions/branch/close_branch.py
Normal file
@@ -0,0 +1,99 @@
|
||||
from __future__ import annotations
|
||||
from typing import Optional
|
||||
from pkgmgr.core.git import run_git, GitError, get_current_branch
|
||||
from .utils import _resolve_base_branch
|
||||
|
||||
|
||||
def close_branch(
|
||||
name: Optional[str],
|
||||
base_branch: str = "main",
|
||||
fallback_base: str = "master",
|
||||
cwd: str = ".",
|
||||
force: bool = False,
|
||||
) -> None:
|
||||
"""
|
||||
Merge a feature branch into the base branch and delete it afterwards.
|
||||
"""
|
||||
|
||||
# Determine branch name
|
||||
if not name:
|
||||
try:
|
||||
name = get_current_branch(cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(f"Failed to detect current branch: {exc}") from exc
|
||||
|
||||
if not name:
|
||||
raise RuntimeError("Branch name must not be empty.")
|
||||
|
||||
target_base = _resolve_base_branch(base_branch, fallback_base, cwd=cwd)
|
||||
|
||||
if name == target_base:
|
||||
raise RuntimeError(
|
||||
f"Refusing to close base branch {target_base!r}. "
|
||||
"Please specify a feature branch."
|
||||
)
|
||||
|
||||
# Confirmation
|
||||
if not force:
|
||||
answer = input(
|
||||
f"Merge branch '{name}' into '{target_base}' and delete it afterwards? (y/N): "
|
||||
).strip().lower()
|
||||
if answer != "y":
|
||||
print("Aborted closing branch.")
|
||||
return
|
||||
|
||||
# Fetch
|
||||
try:
|
||||
run_git(["fetch", "origin"], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to fetch from origin before closing branch {name!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# Checkout base
|
||||
try:
|
||||
run_git(["checkout", target_base], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to checkout base branch {target_base!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# Pull latest
|
||||
try:
|
||||
run_git(["pull", "origin", target_base], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to pull latest changes for base branch {target_base!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# Merge
|
||||
try:
|
||||
run_git(["merge", "--no-ff", name], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to merge branch {name!r} into {target_base!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# Push result
|
||||
try:
|
||||
run_git(["push", "origin", target_base], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to push base branch {target_base!r} after merge: {exc}"
|
||||
) from exc
|
||||
|
||||
# Delete local
|
||||
try:
|
||||
run_git(["branch", "-d", name], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to delete local branch {name!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# Delete remote
|
||||
try:
|
||||
run_git(["push", "origin", "--delete", name], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Branch {name!r} deleted locally, but remote deletion failed: {exc}"
|
||||
) from exc
|
||||
55
src/pkgmgr/actions/branch/drop_branch.py
Normal file
55
src/pkgmgr/actions/branch/drop_branch.py
Normal file
@@ -0,0 +1,55 @@
|
||||
from __future__ import annotations
|
||||
from typing import Optional
|
||||
from pkgmgr.core.git import run_git, GitError, get_current_branch
|
||||
from .utils import _resolve_base_branch
|
||||
|
||||
|
||||
def drop_branch(
|
||||
name: Optional[str],
|
||||
base_branch: str = "main",
|
||||
fallback_base: str = "master",
|
||||
cwd: str = ".",
|
||||
force: bool = False,
|
||||
) -> None:
|
||||
"""
|
||||
Delete a branch locally and remotely without merging.
|
||||
"""
|
||||
|
||||
if not name:
|
||||
try:
|
||||
name = get_current_branch(cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(f"Failed to detect current branch: {exc}") from exc
|
||||
|
||||
if not name:
|
||||
raise RuntimeError("Branch name must not be empty.")
|
||||
|
||||
target_base = _resolve_base_branch(base_branch, fallback_base, cwd=cwd)
|
||||
|
||||
if name == target_base:
|
||||
raise RuntimeError(
|
||||
f"Refusing to drop base branch {target_base!r}. It cannot be deleted."
|
||||
)
|
||||
|
||||
# Confirmation
|
||||
if not force:
|
||||
answer = input(
|
||||
f"Delete branch '{name}' locally and on origin? This is destructive! (y/N): "
|
||||
).strip().lower()
|
||||
if answer != "y":
|
||||
print("Aborted dropping branch.")
|
||||
return
|
||||
|
||||
# Local delete
|
||||
try:
|
||||
run_git(["branch", "-d", name], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(f"Failed to delete local branch {name!r}: {exc}") from exc
|
||||
|
||||
# Remote delete
|
||||
try:
|
||||
run_git(["push", "origin", "--delete", name], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Branch {name!r} was deleted locally, but remote deletion failed: {exc}"
|
||||
) from exc
|
||||
64
src/pkgmgr/actions/branch/open_branch.py
Normal file
64
src/pkgmgr/actions/branch/open_branch.py
Normal file
@@ -0,0 +1,64 @@
|
||||
from __future__ import annotations
|
||||
from typing import Optional
|
||||
from pkgmgr.core.git import run_git, GitError
|
||||
from .utils import _resolve_base_branch
|
||||
|
||||
|
||||
def open_branch(
|
||||
name: Optional[str],
|
||||
base_branch: str = "main",
|
||||
fallback_base: str = "master",
|
||||
cwd: str = ".",
|
||||
) -> None:
|
||||
"""
|
||||
Create and push a new feature branch on top of a base branch.
|
||||
"""
|
||||
|
||||
# Request name interactively if not provided
|
||||
if not name:
|
||||
name = input("Enter new branch name: ").strip()
|
||||
|
||||
if not name:
|
||||
raise RuntimeError("Branch name must not be empty.")
|
||||
|
||||
resolved_base = _resolve_base_branch(base_branch, fallback_base, cwd=cwd)
|
||||
|
||||
# 1) Fetch from origin
|
||||
try:
|
||||
run_git(["fetch", "origin"], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to fetch from origin before creating branch {name!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# 2) Checkout base branch
|
||||
try:
|
||||
run_git(["checkout", resolved_base], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to checkout base branch {resolved_base!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# 3) Pull latest changes
|
||||
try:
|
||||
run_git(["pull", "origin", resolved_base], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to pull latest changes for base branch {resolved_base!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# 4) Create new branch
|
||||
try:
|
||||
run_git(["checkout", "-b", name], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to create new branch {name!r} from base {resolved_base!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# 5) Push new branch
|
||||
try:
|
||||
run_git(["push", "-u", "origin", name], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to push new branch {name!r} to origin: {exc}"
|
||||
) from exc
|
||||
27
src/pkgmgr/actions/branch/utils.py
Normal file
27
src/pkgmgr/actions/branch/utils.py
Normal file
@@ -0,0 +1,27 @@
|
||||
from __future__ import annotations
|
||||
from pkgmgr.core.git import run_git, GitError
|
||||
|
||||
|
||||
def _resolve_base_branch(
|
||||
preferred: str,
|
||||
fallback: str,
|
||||
cwd: str,
|
||||
) -> str:
|
||||
"""
|
||||
Resolve the base branch to use.
|
||||
|
||||
Try `preferred` first (default: main),
|
||||
fall back to `fallback` (default: master).
|
||||
|
||||
Raise RuntimeError if neither exists.
|
||||
"""
|
||||
for candidate in (preferred, fallback):
|
||||
try:
|
||||
run_git(["rev-parse", "--verify", candidate], cwd=cwd)
|
||||
return candidate
|
||||
except GitError:
|
||||
continue
|
||||
|
||||
raise RuntimeError(
|
||||
f"Neither {preferred!r} nor {fallback!r} exist in this repository."
|
||||
)
|
||||
@@ -15,7 +15,7 @@ Responsibilities:
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import Any, Dict, List
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from pkgmgr.core.repository.identifier import get_repo_identifier
|
||||
from pkgmgr.core.repository.dir import get_repo_dir
|
||||
@@ -63,7 +63,7 @@ def _ensure_repo_dir(
|
||||
no_verification: bool,
|
||||
clone_mode: str,
|
||||
identifier: str,
|
||||
) -> str | None:
|
||||
) -> Optional[str]:
|
||||
"""
|
||||
Compute and, if necessary, clone the repository directory.
|
||||
|
||||
|
||||
@@ -35,7 +35,7 @@ from __future__ import annotations
|
||||
import glob
|
||||
import os
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Iterable, TYPE_CHECKING
|
||||
from typing import Iterable, TYPE_CHECKING, Optional
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pkgmgr.actions.install.context import RepoContext
|
||||
@@ -46,7 +46,7 @@ if TYPE_CHECKING:
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _read_text_if_exists(path: str) -> str | None:
|
||||
def _read_text_if_exists(path: str) -> Optional[str]:
|
||||
"""Read a file as UTF-8 text, returning None if it does not exist or fails."""
|
||||
if not os.path.exists(path):
|
||||
return None
|
||||
@@ -75,7 +75,7 @@ def _scan_files_for_patterns(files: Iterable[str], patterns: Iterable[str]) -> b
|
||||
return False
|
||||
|
||||
|
||||
def _first_spec_file(repo_dir: str) -> str | None:
|
||||
def _first_spec_file(repo_dir: str) -> Optional[str]:
|
||||
"""Return the first *.spec file in repo_dir, if any."""
|
||||
matches = glob.glob(os.path.join(repo_dir, "*.spec"))
|
||||
if not matches:
|
||||
@@ -360,7 +360,7 @@ def detect_capabilities(
|
||||
|
||||
def resolve_effective_capabilities(
|
||||
ctx: "RepoContext",
|
||||
layers: Iterable[str] | None = None,
|
||||
layers: Optional[Iterable[str]] = None,
|
||||
) -> dict[str, set[str]]:
|
||||
"""
|
||||
Resolve *effective* capabilities for each layer using a bottom-up strategy.
|
||||
|
||||
@@ -6,7 +6,7 @@ Base interface for all installer components in the pkgmgr installation pipeline.
|
||||
"""
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Set
|
||||
from typing import Set, Optional
|
||||
|
||||
from pkgmgr.actions.install.context import RepoContext
|
||||
from pkgmgr.actions.install.capabilities import CAPABILITY_MATCHERS
|
||||
@@ -24,7 +24,7 @@ class BaseInstaller(ABC):
|
||||
# Examples: "nix", "python", "makefile".
|
||||
# This is used by capability matchers to decide which patterns to
|
||||
# search for in the repository.
|
||||
layer: str | None = None
|
||||
layer: Optional[str] = None
|
||||
|
||||
def discover_capabilities(self, ctx: RepoContext) -> Set[str]:
|
||||
"""
|
||||
|
||||
@@ -139,22 +139,27 @@ class NixFlakeInstaller(BaseInstaller):
|
||||
|
||||
for output, allow_failure in outputs:
|
||||
cmd = f"nix profile install {ctx.repo_dir}#{output}"
|
||||
print(f"[INFO] Running: {cmd}")
|
||||
ret = os.system(cmd)
|
||||
|
||||
try:
|
||||
run_command(
|
||||
cmd,
|
||||
cwd=ctx.repo_dir,
|
||||
preview=ctx.preview,
|
||||
allow_failure=allow_failure,
|
||||
)
|
||||
# Extract real exit code from os.system() result
|
||||
if os.WIFEXITED(ret):
|
||||
exit_code = os.WEXITSTATUS(ret)
|
||||
else:
|
||||
# abnormal termination (signal etc.) – keep raw value
|
||||
exit_code = ret
|
||||
|
||||
if exit_code == 0:
|
||||
print(f"Nix flake output '{output}' successfully installed.")
|
||||
except SystemExit as e:
|
||||
print(f"[Error] Failed to install Nix flake output '{output}': {e}")
|
||||
if not allow_failure:
|
||||
# Mandatory output failed → fatal for the pipeline.
|
||||
raise
|
||||
# Optional output failed → log and continue.
|
||||
print(
|
||||
"[Warning] Continuing despite failure to install "
|
||||
f"optional output '{output}'."
|
||||
)
|
||||
continue
|
||||
|
||||
print(f"[Error] Failed to install Nix flake output '{output}'")
|
||||
print(f"[Error] Command exited with code {exit_code}")
|
||||
|
||||
if not allow_failure:
|
||||
raise SystemExit(exit_code)
|
||||
|
||||
print(
|
||||
"[Warning] Continuing despite failure to install "
|
||||
f"optional output '{output}'."
|
||||
)
|
||||
|
||||
@@ -17,7 +17,7 @@ apt/dpkg tooling are available.
|
||||
import glob
|
||||
import os
|
||||
import shutil
|
||||
from typing import List
|
||||
from typing import List, Optional
|
||||
|
||||
from pkgmgr.actions.install.context import RepoContext
|
||||
from pkgmgr.actions.install.installers.base import BaseInstaller
|
||||
@@ -67,7 +67,7 @@ class DebianControlInstaller(BaseInstaller):
|
||||
pattern = os.path.join(parent, "*.deb")
|
||||
return sorted(glob.glob(pattern))
|
||||
|
||||
def _privileged_prefix(self) -> str | None:
|
||||
def _privileged_prefix(self) -> Optional[str]:
|
||||
"""
|
||||
Determine how to run privileged commands:
|
||||
|
||||
|
||||
26
src/pkgmgr/actions/mirror/__init__.py
Normal file
26
src/pkgmgr/actions/mirror/__init__.py
Normal file
@@ -0,0 +1,26 @@
|
||||
from __future__ import annotations
|
||||
|
||||
"""
|
||||
High-level mirror actions.
|
||||
|
||||
Public API:
|
||||
- list_mirrors
|
||||
- diff_mirrors
|
||||
- merge_mirrors
|
||||
- setup_mirrors
|
||||
"""
|
||||
|
||||
from .types import Repository, MirrorMap
|
||||
from .list_cmd import list_mirrors
|
||||
from .diff_cmd import diff_mirrors
|
||||
from .merge_cmd import merge_mirrors
|
||||
from .setup_cmd import setup_mirrors
|
||||
|
||||
__all__ = [
|
||||
"Repository",
|
||||
"MirrorMap",
|
||||
"list_mirrors",
|
||||
"diff_mirrors",
|
||||
"merge_mirrors",
|
||||
"setup_mirrors",
|
||||
]
|
||||
31
src/pkgmgr/actions/mirror/context.py
Normal file
31
src/pkgmgr/actions/mirror/context.py
Normal file
@@ -0,0 +1,31 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import List
|
||||
|
||||
from pkgmgr.core.repository.dir import get_repo_dir
|
||||
from pkgmgr.core.repository.identifier import get_repo_identifier
|
||||
|
||||
from .io import load_config_mirrors, read_mirrors_file
|
||||
from .types import MirrorMap, RepoMirrorContext, Repository
|
||||
|
||||
|
||||
def build_context(
|
||||
repo: Repository,
|
||||
repositories_base_dir: str,
|
||||
all_repos: List[Repository],
|
||||
) -> RepoMirrorContext:
|
||||
"""
|
||||
Build a RepoMirrorContext for a single repository.
|
||||
"""
|
||||
identifier = get_repo_identifier(repo, all_repos)
|
||||
repo_dir = get_repo_dir(repositories_base_dir, repo)
|
||||
|
||||
config_mirrors: MirrorMap = load_config_mirrors(repo)
|
||||
file_mirrors: MirrorMap = read_mirrors_file(repo_dir)
|
||||
|
||||
return RepoMirrorContext(
|
||||
identifier=identifier,
|
||||
repo_dir=repo_dir,
|
||||
config_mirrors=config_mirrors,
|
||||
file_mirrors=file_mirrors,
|
||||
)
|
||||
60
src/pkgmgr/actions/mirror/diff_cmd.py
Normal file
60
src/pkgmgr/actions/mirror/diff_cmd.py
Normal file
@@ -0,0 +1,60 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import List
|
||||
|
||||
from .context import build_context
|
||||
from .printing import print_header
|
||||
from .types import Repository
|
||||
|
||||
|
||||
def diff_mirrors(
|
||||
selected_repos: List[Repository],
|
||||
repositories_base_dir: str,
|
||||
all_repos: List[Repository],
|
||||
) -> None:
|
||||
"""
|
||||
Show differences between config mirrors and MIRRORS file.
|
||||
|
||||
- Mirrors present only in config are reported as "ONLY IN CONFIG".
|
||||
- Mirrors present only in MIRRORS file are reported as "ONLY IN FILE".
|
||||
- Mirrors with same name but different URLs are reported as "URL MISMATCH".
|
||||
"""
|
||||
for repo in selected_repos:
|
||||
ctx = build_context(repo, repositories_base_dir, all_repos)
|
||||
|
||||
print_header("[MIRROR DIFF]", ctx)
|
||||
|
||||
config_m = ctx.config_mirrors
|
||||
file_m = ctx.file_mirrors
|
||||
|
||||
if not config_m and not file_m:
|
||||
print(" No mirrors configured in config or MIRRORS file.")
|
||||
print()
|
||||
continue
|
||||
|
||||
# Mirrors only in config
|
||||
for name, url in sorted(config_m.items()):
|
||||
if name not in file_m:
|
||||
print(f" [ONLY IN CONFIG] {name}: {url}")
|
||||
|
||||
# Mirrors only in MIRRORS file
|
||||
for name, url in sorted(file_m.items()):
|
||||
if name not in config_m:
|
||||
print(f" [ONLY IN FILE] {name}: {url}")
|
||||
|
||||
# Mirrors with same name but different URLs
|
||||
shared = set(config_m) & set(file_m)
|
||||
for name in sorted(shared):
|
||||
url_cfg = config_m.get(name)
|
||||
url_file = file_m.get(name)
|
||||
if url_cfg != url_file:
|
||||
print(
|
||||
f" [URL MISMATCH] {name}:\n"
|
||||
f" config: {url_cfg}\n"
|
||||
f" file: {url_file}"
|
||||
)
|
||||
|
||||
if config_m and file_m and config_m == file_m:
|
||||
print(" [OK] Mirrors in config and MIRRORS file are in sync.")
|
||||
|
||||
print()
|
||||
179
src/pkgmgr/actions/mirror/git_remote.py
Normal file
179
src/pkgmgr/actions/mirror/git_remote.py
Normal file
@@ -0,0 +1,179 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
|
||||
from pkgmgr.core.command.run import run_command
|
||||
from pkgmgr.core.git import GitError, run_git
|
||||
from typing import List, Optional, Set
|
||||
|
||||
from .types import MirrorMap, RepoMirrorContext, Repository
|
||||
|
||||
|
||||
def build_default_ssh_url(repo: Repository) -> Optional[str]:
|
||||
"""
|
||||
Build a simple SSH URL from repo config if no explicit mirror is defined.
|
||||
|
||||
Example: git@github.com:account/repository.git
|
||||
"""
|
||||
provider = repo.get("provider")
|
||||
account = repo.get("account")
|
||||
name = repo.get("repository")
|
||||
port = repo.get("port")
|
||||
|
||||
if not provider or not account or not name:
|
||||
return None
|
||||
|
||||
provider = str(provider)
|
||||
account = str(account)
|
||||
name = str(name)
|
||||
|
||||
if port:
|
||||
return f"ssh://git@{provider}:{port}/{account}/{name}.git"
|
||||
|
||||
# GitHub-style shorthand
|
||||
return f"git@{provider}:{account}/{name}.git"
|
||||
|
||||
|
||||
def determine_primary_remote_url(
|
||||
repo: Repository,
|
||||
resolved_mirrors: MirrorMap,
|
||||
) -> Optional[str]:
|
||||
"""
|
||||
Determine the primary remote URL in a consistent way:
|
||||
|
||||
1. resolved_mirrors["origin"]
|
||||
2. any resolved mirror (first by name)
|
||||
3. default SSH URL from provider/account/repository
|
||||
"""
|
||||
if "origin" in resolved_mirrors:
|
||||
return resolved_mirrors["origin"]
|
||||
|
||||
if resolved_mirrors:
|
||||
first_name = sorted(resolved_mirrors.keys())[0]
|
||||
return resolved_mirrors[first_name]
|
||||
|
||||
return build_default_ssh_url(repo)
|
||||
|
||||
|
||||
def _safe_git_output(args: List[str], cwd: str) -> Optional[str]:
|
||||
"""
|
||||
Run a Git command via run_git and return its stdout, or None on failure.
|
||||
"""
|
||||
try:
|
||||
return run_git(args, cwd=cwd)
|
||||
except GitError:
|
||||
return None
|
||||
|
||||
|
||||
def current_origin_url(repo_dir: str) -> Optional[str]:
|
||||
"""
|
||||
Return the current URL for remote 'origin', or None if not present.
|
||||
"""
|
||||
output = _safe_git_output(["remote", "get-url", "origin"], cwd=repo_dir)
|
||||
if not output:
|
||||
return None
|
||||
url = output.strip()
|
||||
return url or None
|
||||
|
||||
|
||||
def has_origin_remote(repo_dir: str) -> bool:
|
||||
"""
|
||||
Check whether a remote called 'origin' exists in the repository.
|
||||
"""
|
||||
output = _safe_git_output(["remote"], cwd=repo_dir)
|
||||
if not output:
|
||||
return False
|
||||
names = output.split()
|
||||
return "origin" in names
|
||||
|
||||
|
||||
def _ensure_push_urls_for_origin(
|
||||
repo_dir: str,
|
||||
mirrors: MirrorMap,
|
||||
preview: bool,
|
||||
) -> None:
|
||||
"""
|
||||
Ensure that all mirror URLs are present as push URLs on 'origin'.
|
||||
"""
|
||||
desired: Set[str] = {url for url in mirrors.values() if url}
|
||||
if not desired:
|
||||
return
|
||||
|
||||
existing_output = _safe_git_output(
|
||||
["remote", "get-url", "--push", "--all", "origin"],
|
||||
cwd=repo_dir,
|
||||
)
|
||||
existing = set(existing_output.splitlines()) if existing_output else set()
|
||||
|
||||
missing = sorted(desired - existing)
|
||||
for url in missing:
|
||||
cmd = f"git remote set-url --add --push origin {url}"
|
||||
if preview:
|
||||
print(f"[PREVIEW] Would run in {repo_dir!r}: {cmd}")
|
||||
else:
|
||||
print(f"[INFO] Adding push URL to 'origin': {url}")
|
||||
run_command(cmd, cwd=repo_dir, preview=False)
|
||||
|
||||
|
||||
def ensure_origin_remote(
|
||||
repo: Repository,
|
||||
ctx: RepoMirrorContext,
|
||||
preview: bool,
|
||||
) -> None:
|
||||
"""
|
||||
Ensure that a usable 'origin' remote exists and has all push URLs.
|
||||
"""
|
||||
repo_dir = ctx.repo_dir
|
||||
resolved_mirrors = ctx.resolved_mirrors
|
||||
|
||||
if not os.path.isdir(os.path.join(repo_dir, ".git")):
|
||||
print(f"[WARN] {repo_dir} is not a Git repository (no .git directory).")
|
||||
return
|
||||
|
||||
url = determine_primary_remote_url(repo, resolved_mirrors)
|
||||
|
||||
if not has_origin_remote(repo_dir):
|
||||
if not url:
|
||||
print(
|
||||
"[WARN] Could not determine URL for 'origin' remote. "
|
||||
"Please configure mirrors or provider/account/repository."
|
||||
)
|
||||
return
|
||||
|
||||
cmd = f"git remote add origin {url}"
|
||||
if preview:
|
||||
print(f"[PREVIEW] Would run in {repo_dir!r}: {cmd}")
|
||||
else:
|
||||
print(f"[INFO] Adding 'origin' remote in {repo_dir}: {url}")
|
||||
run_command(cmd, cwd=repo_dir, preview=False)
|
||||
else:
|
||||
current = current_origin_url(repo_dir)
|
||||
if current == url or not url:
|
||||
print(
|
||||
f"[INFO] 'origin' already points to "
|
||||
f"{current or '<unknown>'} (no change needed)."
|
||||
)
|
||||
else:
|
||||
# We do not auto-change origin here, only log the mismatch.
|
||||
print(
|
||||
"[INFO] 'origin' exists with URL "
|
||||
f"{current or '<unknown>'}; not changing to {url}."
|
||||
)
|
||||
|
||||
# Ensure all mirrors are present as push URLs
|
||||
_ensure_push_urls_for_origin(repo_dir, resolved_mirrors, preview)
|
||||
|
||||
|
||||
def is_remote_reachable(url: str, cwd: Optional[str] = None) -> bool:
|
||||
"""
|
||||
Check whether a remote repository is reachable via `git ls-remote`.
|
||||
|
||||
This does NOT modify anything; it only probes the remote.
|
||||
"""
|
||||
workdir = cwd or os.getcwd()
|
||||
try:
|
||||
# --exit-code → non-zero exit code if the remote does not exist
|
||||
run_git(["ls-remote", "--exit-code", url], cwd=workdir)
|
||||
return True
|
||||
except GitError:
|
||||
return False
|
||||
98
src/pkgmgr/actions/mirror/io.py
Normal file
98
src/pkgmgr/actions/mirror/io.py
Normal file
@@ -0,0 +1,98 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from urllib.parse import urlparse
|
||||
from typing import List, Mapping
|
||||
|
||||
from .types import MirrorMap, Repository
|
||||
|
||||
|
||||
def load_config_mirrors(repo: Repository) -> MirrorMap:
|
||||
mirrors = repo.get("mirrors") or {}
|
||||
result: MirrorMap = {}
|
||||
|
||||
if isinstance(mirrors, dict):
|
||||
for name, url in mirrors.items():
|
||||
if url:
|
||||
result[str(name)] = str(url)
|
||||
return result
|
||||
|
||||
if isinstance(mirrors, list):
|
||||
for entry in mirrors:
|
||||
if isinstance(entry, dict):
|
||||
name = entry.get("name")
|
||||
url = entry.get("url")
|
||||
if name and url:
|
||||
result[str(name)] = str(url)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def read_mirrors_file(repo_dir: str, filename: str = "MIRRORS") -> MirrorMap:
|
||||
"""
|
||||
Supports:
|
||||
NAME URL
|
||||
URL → auto name = hostname
|
||||
"""
|
||||
path = os.path.join(repo_dir, filename)
|
||||
mirrors: MirrorMap = {}
|
||||
|
||||
if not os.path.exists(path):
|
||||
return mirrors
|
||||
|
||||
try:
|
||||
with open(path, "r", encoding="utf-8") as fh:
|
||||
for line in fh:
|
||||
stripped = line.strip()
|
||||
if not stripped or stripped.startswith("#"):
|
||||
continue
|
||||
|
||||
parts = stripped.split(None, 1)
|
||||
|
||||
# Case 1: "name url"
|
||||
if len(parts) == 2:
|
||||
name, url = parts
|
||||
# Case 2: "url" → auto-generate name
|
||||
elif len(parts) == 1:
|
||||
url = parts[0]
|
||||
parsed = urlparse(url)
|
||||
host = (parsed.netloc or "").split(":")[0]
|
||||
base = host or "mirror"
|
||||
name = base
|
||||
i = 2
|
||||
while name in mirrors:
|
||||
name = f"{base}{i}"
|
||||
i += 1
|
||||
else:
|
||||
continue
|
||||
|
||||
mirrors[name] = url
|
||||
except OSError as exc:
|
||||
print(f"[WARN] Could not read MIRRORS file at {path}: {exc}")
|
||||
|
||||
return mirrors
|
||||
|
||||
|
||||
def write_mirrors_file(
|
||||
repo_dir: str,
|
||||
mirrors: Mapping[str, str],
|
||||
filename: str = "MIRRORS",
|
||||
preview: bool = False,
|
||||
) -> None:
|
||||
|
||||
path = os.path.join(repo_dir, filename)
|
||||
lines = [f"{name} {url}" for name, url in sorted(mirrors.items())]
|
||||
content = "\n".join(lines) + ("\n" if lines else "")
|
||||
|
||||
if preview:
|
||||
print(f"[PREVIEW] Would write MIRRORS file at {path}:")
|
||||
print(content or "(empty)")
|
||||
return
|
||||
|
||||
try:
|
||||
os.makedirs(os.path.dirname(path), exist_ok=True)
|
||||
with open(path, "w", encoding="utf-8") as fh:
|
||||
fh.write(content)
|
||||
print(f"[INFO] Wrote MIRRORS file at {path}")
|
||||
except OSError as exc:
|
||||
print(f"[ERROR] Failed to write MIRRORS file at {path}: {exc}")
|
||||
46
src/pkgmgr/actions/mirror/list_cmd.py
Normal file
46
src/pkgmgr/actions/mirror/list_cmd.py
Normal file
@@ -0,0 +1,46 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import List
|
||||
|
||||
from .context import build_context
|
||||
from .printing import print_header, print_named_mirrors
|
||||
from .types import Repository
|
||||
|
||||
|
||||
def list_mirrors(
|
||||
selected_repos: List[Repository],
|
||||
repositories_base_dir: str,
|
||||
all_repos: List[Repository],
|
||||
source: str = "all",
|
||||
) -> None:
|
||||
"""
|
||||
List mirrors for the selected repositories.
|
||||
|
||||
source:
|
||||
- "config" → only mirrors from configuration
|
||||
- "file" → only mirrors from MIRRORS file
|
||||
- "resolved" → merged view (config + file, file wins)
|
||||
- "all" → show config + file + resolved
|
||||
"""
|
||||
for repo in selected_repos:
|
||||
ctx = build_context(repo, repositories_base_dir, all_repos)
|
||||
resolved_m = ctx.resolved_mirrors
|
||||
|
||||
print_header("[MIRROR]", ctx)
|
||||
|
||||
if source in ("config", "all"):
|
||||
print_named_mirrors("config mirrors", ctx.config_mirrors)
|
||||
if source == "config":
|
||||
print()
|
||||
continue # next repo
|
||||
|
||||
if source in ("file", "all"):
|
||||
print_named_mirrors("MIRRORS file", ctx.file_mirrors)
|
||||
if source == "file":
|
||||
print()
|
||||
continue # next repo
|
||||
|
||||
if source in ("resolved", "all"):
|
||||
print_named_mirrors("resolved mirrors", resolved_m)
|
||||
|
||||
print()
|
||||
162
src/pkgmgr/actions/mirror/merge_cmd.py
Normal file
162
src/pkgmgr/actions/mirror/merge_cmd.py
Normal file
@@ -0,0 +1,162 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import Dict, List, Tuple, Optional
|
||||
|
||||
import yaml
|
||||
|
||||
from pkgmgr.core.config.save import save_user_config
|
||||
|
||||
from .context import build_context
|
||||
from .io import write_mirrors_file
|
||||
from .types import MirrorMap, Repository
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Helpers
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
def _repo_key(repo: Repository) -> Tuple[str, str, str]:
|
||||
"""
|
||||
Normalised key for identifying a repository in config files.
|
||||
"""
|
||||
return (
|
||||
str(repo.get("provider", "")),
|
||||
str(repo.get("account", "")),
|
||||
str(repo.get("repository", "")),
|
||||
)
|
||||
|
||||
|
||||
def _load_user_config(path: str) -> Dict[str, object]:
|
||||
"""
|
||||
Load a user config YAML file as dict.
|
||||
Non-dicts yield {}.
|
||||
"""
|
||||
if not os.path.exists(path):
|
||||
return {}
|
||||
|
||||
try:
|
||||
with open(path, "r", encoding="utf-8") as f:
|
||||
data = yaml.safe_load(f) or {}
|
||||
return data if isinstance(data, dict) else {}
|
||||
except Exception:
|
||||
return {}
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Main merge command
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
def merge_mirrors(
|
||||
selected_repos: List[Repository],
|
||||
repositories_base_dir: str,
|
||||
all_repos: List[Repository],
|
||||
source: str,
|
||||
target: str,
|
||||
preview: bool = False,
|
||||
user_config_path: Optional[str] = None,
|
||||
) -> None:
|
||||
"""
|
||||
Merge mirrors between config and MIRRORS file.
|
||||
|
||||
Rules:
|
||||
- source, target ∈ {"config", "file"}.
|
||||
- merged = (target_mirrors overridden by source_mirrors)
|
||||
- If target == "file" → write MIRRORS file.
|
||||
- If target == "config":
|
||||
* update the user config YAML directly
|
||||
* write it using save_user_config()
|
||||
|
||||
The merge strategy is:
|
||||
dst + src (src wins on same name)
|
||||
"""
|
||||
|
||||
# Load user config once if we intend to write to it.
|
||||
user_cfg: Optional[Dict[str, object]] = None
|
||||
user_cfg_path_expanded: Optional[str] = None
|
||||
|
||||
if target == "config" and user_config_path and not preview:
|
||||
user_cfg_path_expanded = os.path.expanduser(user_config_path)
|
||||
user_cfg = _load_user_config(user_cfg_path_expanded)
|
||||
if not isinstance(user_cfg.get("repositories"), list):
|
||||
user_cfg["repositories"] = []
|
||||
|
||||
for repo in selected_repos:
|
||||
ctx = build_context(repo, repositories_base_dir, all_repos)
|
||||
|
||||
print("============================================================")
|
||||
print(f"[MIRROR MERGE] Repository: {ctx.identifier}")
|
||||
print(f"[MIRROR MERGE] Directory: {ctx.repo_dir}")
|
||||
print(f"[MIRROR MERGE] {source} → {target}")
|
||||
print("============================================================")
|
||||
|
||||
# Pick the correct source/target maps
|
||||
if source == "config":
|
||||
src = ctx.config_mirrors
|
||||
dst = ctx.file_mirrors
|
||||
else: # source == "file"
|
||||
src = ctx.file_mirrors
|
||||
dst = ctx.config_mirrors
|
||||
|
||||
# Merge (src overrides dst)
|
||||
merged: MirrorMap = dict(dst)
|
||||
merged.update(src)
|
||||
|
||||
# ---------------------------------------------------------
|
||||
# WRITE TO FILE
|
||||
# ---------------------------------------------------------
|
||||
if target == "file":
|
||||
write_mirrors_file(ctx.repo_dir, merged, preview=preview)
|
||||
print()
|
||||
continue
|
||||
|
||||
# ---------------------------------------------------------
|
||||
# WRITE TO CONFIG
|
||||
# ---------------------------------------------------------
|
||||
if target == "config":
|
||||
# If preview or no config path → show intended output
|
||||
if preview or not user_cfg:
|
||||
print("[INFO] The following mirrors would be written to config:")
|
||||
if not merged:
|
||||
print(" (no mirrors)")
|
||||
else:
|
||||
for name, url in sorted(merged.items()):
|
||||
print(f" - {name}: {url}")
|
||||
print(" (Config not modified due to preview or missing path.)")
|
||||
print()
|
||||
continue
|
||||
|
||||
repos = user_cfg.get("repositories")
|
||||
target_key = _repo_key(repo)
|
||||
existing_repo: Optional[Repository] = None
|
||||
|
||||
# Find existing repo entry
|
||||
for entry in repos:
|
||||
if isinstance(entry, dict) and _repo_key(entry) == target_key:
|
||||
existing_repo = entry
|
||||
break
|
||||
|
||||
# Create entry if missing
|
||||
if existing_repo is None:
|
||||
existing_repo = {
|
||||
"provider": repo.get("provider"),
|
||||
"account": repo.get("account"),
|
||||
"repository": repo.get("repository"),
|
||||
}
|
||||
repos.append(existing_repo)
|
||||
|
||||
# Write or delete mirrors
|
||||
if merged:
|
||||
existing_repo["mirrors"] = dict(merged)
|
||||
else:
|
||||
existing_repo.pop("mirrors", None)
|
||||
|
||||
print(" [OK] Updated repo['mirrors'] in user config.")
|
||||
print()
|
||||
|
||||
# -------------------------------------------------------------
|
||||
# SAVE CONFIG (once at the end)
|
||||
# -------------------------------------------------------------
|
||||
if user_cfg is not None and user_cfg_path_expanded is not None and not preview:
|
||||
save_user_config(user_cfg, user_cfg_path_expanded)
|
||||
print(f"[OK] Saved updated config: {user_cfg_path_expanded}")
|
||||
35
src/pkgmgr/actions/mirror/printing.py
Normal file
35
src/pkgmgr/actions/mirror/printing.py
Normal file
@@ -0,0 +1,35 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from .types import MirrorMap, RepoMirrorContext
|
||||
|
||||
|
||||
def print_header(
|
||||
title_prefix: str,
|
||||
ctx: RepoMirrorContext,
|
||||
) -> None:
|
||||
"""
|
||||
Print a standard header for mirror-related output.
|
||||
|
||||
title_prefix examples:
|
||||
- "[MIRROR]"
|
||||
- "[MIRROR DIFF]"
|
||||
- "[MIRROR MERGE]"
|
||||
- "[MIRROR SETUP:LOCAL]"
|
||||
- "[MIRROR SETUP:REMOTE]"
|
||||
"""
|
||||
print("============================================================")
|
||||
print(f"{title_prefix} Repository: {ctx.identifier}")
|
||||
print(f"{title_prefix} Directory: {ctx.repo_dir}")
|
||||
print("============================================================")
|
||||
|
||||
|
||||
def print_named_mirrors(label: str, mirrors: MirrorMap) -> None:
|
||||
"""
|
||||
Print a labeled mirror block (e.g. '[config mirrors]').
|
||||
"""
|
||||
print(f" [{label}]")
|
||||
if mirrors:
|
||||
for name, url in sorted(mirrors.items()):
|
||||
print(f" - {name}: {url}")
|
||||
else:
|
||||
print(" (none)")
|
||||
165
src/pkgmgr/actions/mirror/setup_cmd.py
Normal file
165
src/pkgmgr/actions/mirror/setup_cmd.py
Normal file
@@ -0,0 +1,165 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import List, Tuple
|
||||
|
||||
from pkgmgr.core.git import run_git, GitError
|
||||
|
||||
from .context import build_context
|
||||
from .git_remote import determine_primary_remote_url, ensure_origin_remote
|
||||
from .types import Repository
|
||||
|
||||
|
||||
def _setup_local_mirrors_for_repo(
|
||||
repo: Repository,
|
||||
repositories_base_dir: str,
|
||||
all_repos: List[Repository],
|
||||
preview: bool,
|
||||
) -> None:
|
||||
"""
|
||||
Ensure local Git state is sane (currently: 'origin' remote).
|
||||
"""
|
||||
ctx = build_context(repo, repositories_base_dir, all_repos)
|
||||
|
||||
print("------------------------------------------------------------")
|
||||
print(f"[MIRROR SETUP:LOCAL] {ctx.identifier}")
|
||||
print(f"[MIRROR SETUP:LOCAL] dir: {ctx.repo_dir}")
|
||||
print("------------------------------------------------------------")
|
||||
|
||||
ensure_origin_remote(repo, ctx, preview=preview)
|
||||
print()
|
||||
|
||||
|
||||
def _probe_mirror(url: str, repo_dir: str) -> Tuple[bool, str]:
|
||||
"""
|
||||
Probe a remote mirror by running `git ls-remote <url>`.
|
||||
|
||||
Returns:
|
||||
(True, "") on success,
|
||||
(False, error_message) on failure.
|
||||
|
||||
Wichtig:
|
||||
- Wir werten ausschließlich den Exit-Code aus.
|
||||
- STDERR kann Hinweise/Warnings enthalten und ist NICHT automatisch ein Fehler.
|
||||
"""
|
||||
try:
|
||||
# Wir ignorieren stdout komplett; wichtig ist nur, dass der Befehl ohne
|
||||
# GitError (also Exit-Code 0) durchläuft.
|
||||
run_git(["ls-remote", url], cwd=repo_dir)
|
||||
return True, ""
|
||||
except GitError as exc:
|
||||
return False, str(exc)
|
||||
|
||||
|
||||
def _setup_remote_mirrors_for_repo(
|
||||
repo: Repository,
|
||||
repositories_base_dir: str,
|
||||
all_repos: List[Repository],
|
||||
preview: bool,
|
||||
) -> None:
|
||||
"""
|
||||
Remote-side setup / validation.
|
||||
|
||||
Aktuell werden nur **nicht-destruktive Checks** gemacht:
|
||||
|
||||
- Für jeden Mirror (aus config + MIRRORS-Datei, file gewinnt):
|
||||
* `git ls-remote <url>` wird ausgeführt.
|
||||
* Bei Exit-Code 0 → [OK]
|
||||
* Bei Fehler → [WARN] + Details aus der GitError-Exception
|
||||
|
||||
Es werden **keine** Provider-APIs aufgerufen und keine Repos angelegt.
|
||||
"""
|
||||
ctx = build_context(repo, repositories_base_dir, all_repos)
|
||||
resolved_m = ctx.resolved_mirrors
|
||||
|
||||
print("------------------------------------------------------------")
|
||||
print(f"[MIRROR SETUP:REMOTE] {ctx.identifier}")
|
||||
print(f"[MIRROR SETUP:REMOTE] dir: {ctx.repo_dir}")
|
||||
print("------------------------------------------------------------")
|
||||
|
||||
if not resolved_m:
|
||||
# Optional: Fallback auf eine heuristisch bestimmte URL, falls wir
|
||||
# irgendwann "automatisch anlegen" implementieren wollen.
|
||||
primary_url = determine_primary_remote_url(repo, resolved_m)
|
||||
if not primary_url:
|
||||
print(
|
||||
"[INFO] No mirrors configured (config or MIRRORS file), and no "
|
||||
"primary URL could be derived from provider/account/repository."
|
||||
)
|
||||
print()
|
||||
return
|
||||
|
||||
ok, error_message = _probe_mirror(primary_url, ctx.repo_dir)
|
||||
if ok:
|
||||
print(f"[OK] Remote mirror (primary) is reachable: {primary_url}")
|
||||
else:
|
||||
print("[WARN] Primary remote URL is NOT reachable:")
|
||||
print(f" {primary_url}")
|
||||
if error_message:
|
||||
print(" Details:")
|
||||
for line in error_message.splitlines():
|
||||
print(f" {line}")
|
||||
|
||||
print()
|
||||
print(
|
||||
"[INFO] Remote checks are non-destructive and only use `git ls-remote` "
|
||||
"to probe mirror URLs."
|
||||
)
|
||||
print()
|
||||
return
|
||||
|
||||
# Normaler Fall: wir haben benannte Mirrors aus config/MIRRORS
|
||||
for name, url in sorted(resolved_m.items()):
|
||||
ok, error_message = _probe_mirror(url, ctx.repo_dir)
|
||||
if ok:
|
||||
print(f"[OK] Remote mirror '{name}' is reachable: {url}")
|
||||
else:
|
||||
print(f"[WARN] Remote mirror '{name}' is NOT reachable:")
|
||||
print(f" {url}")
|
||||
if error_message:
|
||||
print(" Details:")
|
||||
for line in error_message.splitlines():
|
||||
print(f" {line}")
|
||||
|
||||
print()
|
||||
print(
|
||||
"[INFO] Remote checks are non-destructive and only use `git ls-remote` "
|
||||
"to probe mirror URLs."
|
||||
)
|
||||
print()
|
||||
|
||||
|
||||
def setup_mirrors(
|
||||
selected_repos: List[Repository],
|
||||
repositories_base_dir: str,
|
||||
all_repos: List[Repository],
|
||||
preview: bool = False,
|
||||
local: bool = True,
|
||||
remote: bool = True,
|
||||
) -> None:
|
||||
"""
|
||||
Setup mirrors for the selected repositories.
|
||||
|
||||
local:
|
||||
- Configure local Git remotes (currently: ensure 'origin' is present and
|
||||
points to a reasonable URL).
|
||||
|
||||
remote:
|
||||
- Non-destructive remote checks using `git ls-remote` for each mirror URL.
|
||||
Es werden keine Repositories auf dem Provider angelegt.
|
||||
"""
|
||||
for repo in selected_repos:
|
||||
if local:
|
||||
_setup_local_mirrors_for_repo(
|
||||
repo,
|
||||
repositories_base_dir=repositories_base_dir,
|
||||
all_repos=all_repos,
|
||||
preview=preview,
|
||||
)
|
||||
|
||||
if remote:
|
||||
_setup_remote_mirrors_for_repo(
|
||||
repo,
|
||||
repositories_base_dir=repositories_base_dir,
|
||||
all_repos=all_repos,
|
||||
preview=preview,
|
||||
)
|
||||
32
src/pkgmgr/actions/mirror/types.py
Normal file
32
src/pkgmgr/actions/mirror/types.py
Normal file
@@ -0,0 +1,32 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Dict
|
||||
|
||||
Repository = Dict[str, Any]
|
||||
MirrorMap = Dict[str, str]
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class RepoMirrorContext:
|
||||
"""
|
||||
Bundle mirror-related information for a single repository.
|
||||
"""
|
||||
|
||||
identifier: str
|
||||
repo_dir: str
|
||||
config_mirrors: MirrorMap
|
||||
file_mirrors: MirrorMap
|
||||
|
||||
@property
|
||||
def resolved_mirrors(self) -> MirrorMap:
|
||||
"""
|
||||
Combined mirrors from config and MIRRORS file.
|
||||
|
||||
Strategy:
|
||||
- Start from config mirrors
|
||||
- Overlay MIRRORS file (file wins on same name)
|
||||
"""
|
||||
merged: MirrorMap = dict(self.config_mirrors)
|
||||
merged.update(self.file_mirrors)
|
||||
return merged
|
||||
218
src/pkgmgr/actions/release/README.md
Normal file
218
src/pkgmgr/actions/release/README.md
Normal file
@@ -0,0 +1,218 @@
|
||||
# Release Action
|
||||
|
||||
This module implements the `pkgmgr release` workflow.
|
||||
|
||||
It provides a controlled, reproducible release process that:
|
||||
- bumps the project version
|
||||
- updates all supported packaging formats
|
||||
- creates and pushes Git tags
|
||||
- optionally maintains a floating `latest` tag
|
||||
- optionally closes the current branch
|
||||
|
||||
The implementation is intentionally explicit and conservative to avoid
|
||||
accidental releases or broken Git states.
|
||||
|
||||
---
|
||||
|
||||
## What the Release Command Does
|
||||
|
||||
A release performs the following high-level steps:
|
||||
|
||||
1. Synchronize the current branch with its upstream (fast-forward only)
|
||||
2. Determine the next semantic version
|
||||
3. Update all versioned files
|
||||
4. Commit the release
|
||||
5. Create and push a version tag
|
||||
6. Optionally update and push the floating `latest` tag
|
||||
7. Optionally close the current branch
|
||||
|
||||
All steps support **preview (dry-run)** mode.
|
||||
|
||||
---
|
||||
|
||||
## Supported Files Updated During a Release
|
||||
|
||||
If present, the following files are updated automatically:
|
||||
|
||||
- `pyproject.toml`
|
||||
- `CHANGELOG.md`
|
||||
- `flake.nix`
|
||||
- `PKGBUILD`
|
||||
- `package-manager.spec`
|
||||
- `debian/changelog`
|
||||
|
||||
Missing files are skipped gracefully.
|
||||
|
||||
---
|
||||
|
||||
## Git Safety Rules
|
||||
|
||||
The release workflow enforces strict Git safety guarantees:
|
||||
|
||||
- A `git pull --ff-only` is executed **before any file modifications**
|
||||
- No merge commits are ever created automatically
|
||||
- Only the current branch and the newly created version tag are pushed
|
||||
- `git push --tags` is intentionally **not** used
|
||||
- The floating `latest` tag is force-pushed only when required
|
||||
|
||||
---
|
||||
|
||||
## Semantic Versioning
|
||||
|
||||
The next version is calculated from existing Git tags:
|
||||
|
||||
- Tags must follow the format `vX.Y.Z`
|
||||
- The release type controls the version bump:
|
||||
- `patch`
|
||||
- `minor`
|
||||
- `major`
|
||||
|
||||
The new tag is always created as an **annotated tag**.
|
||||
|
||||
---
|
||||
|
||||
## Floating `latest` Tag
|
||||
|
||||
The floating `latest` tag is handled explicitly:
|
||||
|
||||
- `latest` is updated **only if** the new version is the highest existing version
|
||||
- Version comparison uses natural version sorting (`sort -V`)
|
||||
- `latest` always points to the commit behind the version tag
|
||||
- Updating `latest` uses a forced push by design
|
||||
|
||||
This guarantees that `latest` always represents the highest released version,
|
||||
never an older release.
|
||||
|
||||
---
|
||||
|
||||
## Preview Mode
|
||||
|
||||
Preview mode (`--preview`) performs a full dry-run:
|
||||
|
||||
- No files are modified
|
||||
- No Git commands are executed
|
||||
- All intended actions are printed
|
||||
|
||||
Example preview output includes:
|
||||
- version bump
|
||||
- file updates
|
||||
- commit message
|
||||
- tag creation
|
||||
- branch and tag pushes
|
||||
- `latest` update (if applicable)
|
||||
|
||||
---
|
||||
|
||||
## Interactive vs Forced Mode
|
||||
|
||||
### Interactive (default)
|
||||
|
||||
1. Run a preview
|
||||
2. Ask for confirmation
|
||||
3. Execute the real release
|
||||
|
||||
### Forced (`--force`)
|
||||
|
||||
- Skips preview and confirmation
|
||||
- Skips branch deletion prompts
|
||||
- Executes the release immediately
|
||||
|
||||
---
|
||||
|
||||
## Branch Closing (`--close`)
|
||||
|
||||
When `--close` is enabled:
|
||||
|
||||
- `main` and `master` are **never** deleted
|
||||
- Other branches:
|
||||
- prompt for confirmation (`y/N`)
|
||||
- can be skipped using `--force`
|
||||
- Branch deletion happens **only after** a successful release
|
||||
|
||||
---
|
||||
|
||||
## Execution Flow (ASCII Diagram)
|
||||
|
||||
```
|
||||
|
||||
+---------------------+
|
||||
| pkgmgr release |
|
||||
+----------+----------+
|
||||
|
|
||||
v
|
||||
+---------------------+
|
||||
| Detect branch |
|
||||
+----------+----------+
|
||||
|
|
||||
v
|
||||
+------------------------------+
|
||||
| git fetch / pull --ff-only |
|
||||
+----------+-------------------+
|
||||
|
|
||||
v
|
||||
+------------------------------+
|
||||
| Determine next version |
|
||||
+----------+-------------------+
|
||||
|
|
||||
v
|
||||
+------------------------------+
|
||||
| Update versioned files |
|
||||
+----------+-------------------+
|
||||
|
|
||||
v
|
||||
+------------------------------+
|
||||
| Commit release |
|
||||
+----------+-------------------+
|
||||
|
|
||||
v
|
||||
+------------------------------+
|
||||
| Create version tag (vX.Y.Z) |
|
||||
+----------+-------------------+
|
||||
|
|
||||
v
|
||||
+------------------------------+
|
||||
| Push branch + version tag |
|
||||
+----------+-------------------+
|
||||
|
|
||||
v
|
||||
+---------------------------------------+
|
||||
| Is this the highest version? |
|
||||
+----------+----------------------------+
|
||||
|
|
||||
yes | no
|
||||
|
|
||||
v
|
||||
+------------------------------+ +----------------------+
|
||||
| Update & push `latest` tag | | Skip `latest` update |
|
||||
+----------+-------------------+ +----------------------+
|
||||
|
|
||||
v
|
||||
+------------------------------+
|
||||
| Close branch (optional) |
|
||||
+------------------------------+
|
||||
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Design Goals
|
||||
|
||||
- Deterministic and reproducible releases
|
||||
- No implicit Git side effects
|
||||
- Explicit tag handling
|
||||
- Safe defaults for interactive usage
|
||||
- Automation-friendly forced mode
|
||||
- Clear separation of concerns:
|
||||
- `workflow.py` – orchestration
|
||||
- `git_ops.py` – Git operations
|
||||
- `prompts.py` – user interaction
|
||||
- `versioning.py` – SemVer logic
|
||||
|
||||
---
|
||||
|
||||
## Summary
|
||||
|
||||
`pkgmgr release` is a **deliberately strict** release mechanism.
|
||||
|
||||
It trades convenience for safety, traceability, and correctness — making it
|
||||
suitable for both interactive development workflows and fully automated CI/CD
|
||||
@@ -1,310 +1,5 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
Release helper for pkgmgr (public entry point).
|
||||
|
||||
This package provides the high-level `release()` function used by the
|
||||
pkgmgr CLI to perform versioned releases:
|
||||
|
||||
- Determine the next semantic version based on existing Git tags.
|
||||
- Update pyproject.toml with the new version.
|
||||
- Update additional packaging files (flake.nix, PKGBUILD,
|
||||
debian/changelog, RPM spec) where present.
|
||||
- Prepend a basic entry to CHANGELOG.md.
|
||||
- Move the floating 'latest' tag to the newly created release tag so
|
||||
the newest release is always marked as latest.
|
||||
|
||||
Additional behaviour:
|
||||
- If `preview=True` (from --preview), no files are written and no
|
||||
Git commands are executed. Instead, a detailed summary of the
|
||||
planned changes and commands is printed.
|
||||
- If `preview=False` and not forced, the release is executed in two
|
||||
phases:
|
||||
1) Preview-only run (dry-run).
|
||||
2) Interactive confirmation, then real release if confirmed.
|
||||
This confirmation can be skipped with the `force=True` flag.
|
||||
- Before creating and pushing tags, main/master is updated from origin
|
||||
when the release is performed on one of these branches.
|
||||
- If `close=True` is used and the current branch is not main/master,
|
||||
the branch will be closed via branch_commands.close_branch() after
|
||||
a successful release.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import sys
|
||||
from typing import Optional
|
||||
|
||||
from pkgmgr.core.git import get_current_branch, GitError
|
||||
from pkgmgr.actions.branch import close_branch
|
||||
|
||||
from .versioning import determine_current_version, bump_semver
|
||||
from .git_ops import run_git_command, sync_branch_with_remote, update_latest_tag
|
||||
from .files import (
|
||||
update_pyproject_version,
|
||||
update_flake_version,
|
||||
update_pkgbuild_version,
|
||||
update_spec_version,
|
||||
update_changelog,
|
||||
update_debian_changelog,
|
||||
update_spec_changelog,
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Internal implementation (single-phase, preview or real)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _release_impl(
|
||||
pyproject_path: str = "pyproject.toml",
|
||||
changelog_path: str = "CHANGELOG.md",
|
||||
release_type: str = "patch",
|
||||
message: Optional[str] = None,
|
||||
preview: bool = False,
|
||||
close: bool = False,
|
||||
) -> None:
|
||||
"""
|
||||
Internal implementation that performs a single-phase release.
|
||||
"""
|
||||
current_ver = determine_current_version()
|
||||
new_ver = bump_semver(current_ver, release_type)
|
||||
new_ver_str = str(new_ver)
|
||||
new_tag = new_ver.to_tag(with_prefix=True)
|
||||
|
||||
mode = "PREVIEW" if preview else "REAL"
|
||||
print(f"Release mode: {mode}")
|
||||
print(f"Current version: {current_ver}")
|
||||
print(f"New version: {new_ver_str} ({release_type})")
|
||||
|
||||
repo_root = os.path.dirname(os.path.abspath(pyproject_path))
|
||||
|
||||
# Update core project metadata and packaging files
|
||||
update_pyproject_version(pyproject_path, new_ver_str, preview=preview)
|
||||
changelog_message = update_changelog(
|
||||
changelog_path,
|
||||
new_ver_str,
|
||||
message=message,
|
||||
preview=preview,
|
||||
)
|
||||
|
||||
flake_path = os.path.join(repo_root, "flake.nix")
|
||||
update_flake_version(flake_path, new_ver_str, preview=preview)
|
||||
|
||||
pkgbuild_path = os.path.join(repo_root, "PKGBUILD")
|
||||
update_pkgbuild_version(pkgbuild_path, new_ver_str, preview=preview)
|
||||
|
||||
spec_path = os.path.join(repo_root, "package-manager.spec")
|
||||
update_spec_version(spec_path, new_ver_str, preview=preview)
|
||||
|
||||
# Determine a single effective_message to be reused across all
|
||||
# changelog targets (project, Debian, Fedora).
|
||||
effective_message: Optional[str] = message
|
||||
if effective_message is None and isinstance(changelog_message, str):
|
||||
if changelog_message.strip():
|
||||
effective_message = changelog_message.strip()
|
||||
|
||||
debian_changelog_path = os.path.join(repo_root, "debian", "changelog")
|
||||
package_name = os.path.basename(repo_root) or "package-manager"
|
||||
|
||||
# Debian changelog
|
||||
update_debian_changelog(
|
||||
debian_changelog_path,
|
||||
package_name=package_name,
|
||||
new_version=new_ver_str,
|
||||
message=effective_message,
|
||||
preview=preview,
|
||||
)
|
||||
|
||||
# Fedora / RPM %changelog
|
||||
update_spec_changelog(
|
||||
spec_path=spec_path,
|
||||
package_name=package_name,
|
||||
new_version=new_ver_str,
|
||||
message=effective_message,
|
||||
preview=preview,
|
||||
)
|
||||
|
||||
commit_msg = f"Release version {new_ver_str}"
|
||||
tag_msg = effective_message or commit_msg
|
||||
|
||||
# Determine branch and ensure it is up to date if main/master
|
||||
try:
|
||||
branch = get_current_branch() or "main"
|
||||
except GitError:
|
||||
branch = "main"
|
||||
print(f"Releasing on branch: {branch}")
|
||||
|
||||
# Ensure main/master are up-to-date from origin before creating and
|
||||
# pushing tags. For other branches we only log the intent.
|
||||
sync_branch_with_remote(branch, preview=preview)
|
||||
|
||||
files_to_add = [
|
||||
pyproject_path,
|
||||
changelog_path,
|
||||
flake_path,
|
||||
pkgbuild_path,
|
||||
spec_path,
|
||||
debian_changelog_path,
|
||||
]
|
||||
existing_files = [p for p in files_to_add if p and os.path.exists(p)]
|
||||
|
||||
if preview:
|
||||
for path in existing_files:
|
||||
print(f"[PREVIEW] Would run: git add {path}")
|
||||
print(f'[PREVIEW] Would run: git commit -am "{commit_msg}"')
|
||||
print(f'[PREVIEW] Would run: git tag -a {new_tag} -m "{tag_msg}"')
|
||||
print(f"[PREVIEW] Would run: git push origin {branch}")
|
||||
print("[PREVIEW] Would run: git push origin --tags")
|
||||
|
||||
# Also update the floating 'latest' tag to the new highest SemVer.
|
||||
update_latest_tag(new_tag, preview=True)
|
||||
|
||||
if close and branch not in ("main", "master"):
|
||||
print(
|
||||
f"[PREVIEW] Would also close branch {branch} after the release "
|
||||
"(close=True and branch is not main/master)."
|
||||
)
|
||||
elif close:
|
||||
print(
|
||||
f"[PREVIEW] close=True but current branch is {branch}; "
|
||||
"no branch would be closed."
|
||||
)
|
||||
|
||||
print("Preview completed. No changes were made.")
|
||||
return
|
||||
|
||||
for path in existing_files:
|
||||
run_git_command(f"git add {path}")
|
||||
|
||||
run_git_command(f'git commit -am "{commit_msg}"')
|
||||
run_git_command(f'git tag -a {new_tag} -m "{tag_msg}"')
|
||||
run_git_command(f"git push origin {branch}")
|
||||
run_git_command("git push origin --tags")
|
||||
|
||||
# Move 'latest' to the new release tag so the newest SemVer is always
|
||||
# marked as latest. This is best-effort and must not break the release.
|
||||
try:
|
||||
update_latest_tag(new_tag, preview=False)
|
||||
except GitError as exc: # pragma: no cover
|
||||
print(
|
||||
f"[WARN] Failed to update floating 'latest' tag for {new_tag}: {exc}\n"
|
||||
"[WARN] The release itself completed successfully; only the "
|
||||
"'latest' tag was not updated."
|
||||
)
|
||||
|
||||
print(f"Release {new_ver_str} completed.")
|
||||
|
||||
if close:
|
||||
if branch in ("main", "master"):
|
||||
print(
|
||||
f"[INFO] close=True but current branch is {branch}; "
|
||||
"nothing to close."
|
||||
)
|
||||
return
|
||||
|
||||
print(
|
||||
f"[INFO] Closing branch {branch} after successful release "
|
||||
"(close=True and branch is not main/master)..."
|
||||
)
|
||||
try:
|
||||
close_branch(name=branch, base_branch="main", cwd=".")
|
||||
except Exception as exc: # pragma: no cover
|
||||
print(f"[WARN] Failed to close branch {branch} automatically: {exc}")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Public release entry point
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def release(
|
||||
pyproject_path: str = "pyproject.toml",
|
||||
changelog_path: str = "CHANGELOG.md",
|
||||
release_type: str = "patch",
|
||||
message: Optional[str] = None,
|
||||
preview: bool = False,
|
||||
force: bool = False,
|
||||
close: bool = False,
|
||||
) -> None:
|
||||
"""
|
||||
High-level release entry point.
|
||||
|
||||
Modes:
|
||||
|
||||
- preview=True:
|
||||
* Single-phase PREVIEW only.
|
||||
|
||||
- preview=False, force=True:
|
||||
* Single-phase REAL release, no interactive preview.
|
||||
|
||||
- preview=False, force=False:
|
||||
* Two-phase flow (intended default for interactive CLI use).
|
||||
"""
|
||||
if preview:
|
||||
_release_impl(
|
||||
pyproject_path=pyproject_path,
|
||||
changelog_path=changelog_path,
|
||||
release_type=release_type,
|
||||
message=message,
|
||||
preview=True,
|
||||
close=close,
|
||||
)
|
||||
return
|
||||
|
||||
if force:
|
||||
_release_impl(
|
||||
pyproject_path=pyproject_path,
|
||||
changelog_path=changelog_path,
|
||||
release_type=release_type,
|
||||
message=message,
|
||||
preview=False,
|
||||
close=close,
|
||||
)
|
||||
return
|
||||
|
||||
if not sys.stdin.isatty():
|
||||
_release_impl(
|
||||
pyproject_path=pyproject_path,
|
||||
changelog_path=changelog_path,
|
||||
release_type=release_type,
|
||||
message=message,
|
||||
preview=False,
|
||||
close=close,
|
||||
)
|
||||
return
|
||||
|
||||
print("[INFO] Running preview before actual release...\n")
|
||||
_release_impl(
|
||||
pyproject_path=pyproject_path,
|
||||
changelog_path=changelog_path,
|
||||
release_type=release_type,
|
||||
message=message,
|
||||
preview=True,
|
||||
close=close,
|
||||
)
|
||||
|
||||
try:
|
||||
answer = input("Proceed with the actual release? [y/N]: ").strip().lower()
|
||||
except (EOFError, KeyboardInterrupt):
|
||||
print("\n[INFO] Release aborted (no confirmation).")
|
||||
return
|
||||
|
||||
if answer not in ("y", "yes"):
|
||||
print("Release aborted by user. No changes were made.")
|
||||
return
|
||||
|
||||
print("\n[INFO] Running REAL release...\n")
|
||||
_release_impl(
|
||||
pyproject_path=pyproject_path,
|
||||
changelog_path=changelog_path,
|
||||
release_type=release_type,
|
||||
message=message,
|
||||
preview=False,
|
||||
close=close,
|
||||
)
|
||||
|
||||
from .workflow import release
|
||||
|
||||
__all__ = ["release"]
|
||||
|
||||
@@ -1,16 +1,3 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
Git-related helpers for the release workflow.
|
||||
|
||||
Responsibilities:
|
||||
- Run Git (or shell) commands with basic error reporting.
|
||||
- Ensure main/master are synchronized with origin before tagging.
|
||||
- Maintain the floating 'latest' tag that always points to the newest
|
||||
release tag.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import subprocess
|
||||
@@ -19,77 +6,87 @@ from pkgmgr.core.git import GitError
|
||||
|
||||
|
||||
def run_git_command(cmd: str) -> None:
|
||||
"""
|
||||
Run a Git (or shell) command with basic error reporting.
|
||||
|
||||
The command is executed via the shell, primarily for readability
|
||||
when printed (as in 'git commit -am "msg"').
|
||||
"""
|
||||
print(f"[GIT] {cmd}")
|
||||
try:
|
||||
subprocess.run(cmd, shell=True, check=True)
|
||||
subprocess.run(
|
||||
cmd,
|
||||
shell=True,
|
||||
check=True,
|
||||
text=True,
|
||||
capture_output=True,
|
||||
)
|
||||
except subprocess.CalledProcessError as exc:
|
||||
print(f"[ERROR] Git command failed: {cmd}")
|
||||
print(f" Exit code: {exc.returncode}")
|
||||
if exc.stdout:
|
||||
print("--- stdout ---")
|
||||
print(exc.stdout)
|
||||
print("\n" + exc.stdout)
|
||||
if exc.stderr:
|
||||
print("--- stderr ---")
|
||||
print(exc.stderr)
|
||||
print("\n" + exc.stderr)
|
||||
raise GitError(f"Git command failed: {cmd}") from exc
|
||||
|
||||
|
||||
def sync_branch_with_remote(branch: str, preview: bool = False) -> None:
|
||||
"""
|
||||
Ensure the local main/master branch is up-to-date before tagging.
|
||||
def _capture(cmd: str) -> str:
|
||||
res = subprocess.run(cmd, shell=True, check=False, capture_output=True, text=True)
|
||||
return (res.stdout or "").strip()
|
||||
|
||||
Behaviour:
|
||||
- For main/master: run 'git fetch origin' and 'git pull origin <branch>'.
|
||||
- For all other branches: only log that no automatic sync is performed.
|
||||
|
||||
def ensure_clean_and_synced(preview: bool = False) -> None:
|
||||
"""
|
||||
if branch not in ("main", "master"):
|
||||
print(
|
||||
f"[INFO] Skipping automatic git pull for non-main/master branch "
|
||||
f"{branch}."
|
||||
)
|
||||
Always run a pull BEFORE modifying anything.
|
||||
Uses --ff-only to avoid creating merge commits automatically.
|
||||
If no upstream is configured, we skip.
|
||||
"""
|
||||
upstream = _capture("git rev-parse --abbrev-ref --symbolic-full-name @{u} 2>/dev/null")
|
||||
if not upstream:
|
||||
print("[INFO] No upstream configured for current branch. Skipping pull.")
|
||||
return
|
||||
|
||||
print(
|
||||
f"[INFO] Updating branch {branch} from origin before creating tags..."
|
||||
)
|
||||
|
||||
if preview:
|
||||
print("[PREVIEW] Would run: git fetch origin")
|
||||
print(f"[PREVIEW] Would run: git pull origin {branch}")
|
||||
print("[PREVIEW] Would run: git fetch origin --prune --tags --force")
|
||||
print("[PREVIEW] Would run: git pull --ff-only")
|
||||
return
|
||||
|
||||
run_git_command("git fetch origin")
|
||||
run_git_command(f"git pull origin {branch}")
|
||||
print("[INFO] Syncing with remote before making any changes...")
|
||||
run_git_command("git fetch origin --prune --tags --force")
|
||||
run_git_command("git pull --ff-only")
|
||||
|
||||
def is_highest_version_tag(tag: str) -> bool:
|
||||
"""
|
||||
Return True if `tag` is the highest version among all tags matching v*.
|
||||
Comparison uses `sort -V` for natural version ordering.
|
||||
"""
|
||||
all_v = _capture("git tag --list 'v*'")
|
||||
if not all_v:
|
||||
return True # No tags yet, so the current tag is the highest
|
||||
|
||||
# Get the latest tag in natural version order
|
||||
latest = _capture("git tag --list 'v*' | sort -V | tail -n1")
|
||||
print(f"[INFO] Latest tag: {latest}, Current tag: {tag}")
|
||||
|
||||
# Ensure that the current tag is always considered the highest if it's the latest one
|
||||
return tag >= latest # Use comparison operator to consider all future tags
|
||||
|
||||
|
||||
def update_latest_tag(new_tag: str, preview: bool = False) -> None:
|
||||
"""
|
||||
Move the floating 'latest' tag to the newly created release tag.
|
||||
|
||||
Implementation details:
|
||||
- We explicitly dereference the tag object via `<tag>^{}` so that
|
||||
'latest' always points at the underlying commit, not at another tag.
|
||||
- We create/update 'latest' as an annotated tag with a short message so
|
||||
Git configurations that enforce annotated/signed tags do not fail
|
||||
with "no tag message".
|
||||
Notes:
|
||||
- We dereference the tag object via `<tag>^{}` so that 'latest' points to the commit.
|
||||
- 'latest' is forced (floating tag), therefore the push uses --force.
|
||||
"""
|
||||
target_ref = f"{new_tag}^{{}}"
|
||||
print(f"[INFO] Updating 'latest' tag to point at {new_tag} (commit {target_ref})...")
|
||||
|
||||
if preview:
|
||||
print(f"[PREVIEW] Would run: git tag -f -a latest {target_ref} "
|
||||
f'-m "Floating latest tag for {new_tag}"')
|
||||
print(
|
||||
f'[PREVIEW] Would run: git tag -f -a latest {target_ref} '
|
||||
f'-m "Floating latest tag for {new_tag}"'
|
||||
)
|
||||
print("[PREVIEW] Would run: git push origin latest --force")
|
||||
return
|
||||
|
||||
run_git_command(
|
||||
f'git tag -f -a latest {target_ref} '
|
||||
f'-m "Floating latest tag for {new_tag}"'
|
||||
f'git tag -f -a latest {target_ref} -m "Floating latest tag for {new_tag}"'
|
||||
)
|
||||
run_git_command("git push origin latest --force")
|
||||
|
||||
29
src/pkgmgr/actions/release/prompts.py
Normal file
29
src/pkgmgr/actions/release/prompts.py
Normal file
@@ -0,0 +1,29 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
|
||||
|
||||
def should_delete_branch(force: bool) -> bool:
|
||||
"""
|
||||
Ask whether the current branch should be deleted after a successful release.
|
||||
|
||||
- If force=True: skip prompt and return True.
|
||||
- If non-interactive stdin: do NOT delete by default.
|
||||
"""
|
||||
if force:
|
||||
return True
|
||||
if not sys.stdin.isatty():
|
||||
return False
|
||||
answer = input("Delete the current branch after release? [y/N] ").strip().lower()
|
||||
return answer in ("y", "yes")
|
||||
|
||||
|
||||
def confirm_proceed_release() -> bool:
|
||||
"""
|
||||
Ask whether to proceed with the REAL release after the preview phase.
|
||||
"""
|
||||
try:
|
||||
answer = input("Proceed with the actual release? [y/N]: ").strip().lower()
|
||||
except (EOFError, KeyboardInterrupt):
|
||||
return False
|
||||
return answer in ("y", "yes")
|
||||
231
src/pkgmgr/actions/release/workflow.py
Normal file
231
src/pkgmgr/actions/release/workflow.py
Normal file
@@ -0,0 +1,231 @@
|
||||
from __future__ import annotations
|
||||
from typing import Optional
|
||||
|
||||
import os
|
||||
import sys
|
||||
from typing import Optional
|
||||
|
||||
from pkgmgr.actions.branch import close_branch
|
||||
from pkgmgr.core.git import get_current_branch, GitError
|
||||
|
||||
from .files import (
|
||||
update_changelog,
|
||||
update_debian_changelog,
|
||||
update_flake_version,
|
||||
update_pkgbuild_version,
|
||||
update_pyproject_version,
|
||||
update_spec_changelog,
|
||||
update_spec_version,
|
||||
)
|
||||
from .git_ops import (
|
||||
ensure_clean_and_synced,
|
||||
is_highest_version_tag,
|
||||
run_git_command,
|
||||
update_latest_tag,
|
||||
)
|
||||
from .prompts import confirm_proceed_release, should_delete_branch
|
||||
from .versioning import bump_semver, determine_current_version
|
||||
|
||||
|
||||
def _release_impl(
|
||||
pyproject_path: str = "pyproject.toml",
|
||||
changelog_path: str = "CHANGELOG.md",
|
||||
release_type: str = "patch",
|
||||
message: Optional[str] = None,
|
||||
preview: bool = False,
|
||||
close: bool = False,
|
||||
force: bool = False,
|
||||
) -> None:
|
||||
# Determine current branch early
|
||||
try:
|
||||
branch = get_current_branch() or "main"
|
||||
except GitError:
|
||||
branch = "main"
|
||||
print(f"Releasing on branch: {branch}")
|
||||
|
||||
# Pull BEFORE making any modifications
|
||||
ensure_clean_and_synced(preview=preview)
|
||||
|
||||
current_ver = determine_current_version()
|
||||
new_ver = bump_semver(current_ver, release_type)
|
||||
new_ver_str = str(new_ver)
|
||||
new_tag = new_ver.to_tag(with_prefix=True)
|
||||
|
||||
mode = "PREVIEW" if preview else "REAL"
|
||||
print(f"Release mode: {mode}")
|
||||
print(f"Current version: {current_ver}")
|
||||
print(f"New version: {new_ver_str} ({release_type})")
|
||||
|
||||
repo_root = os.path.dirname(os.path.abspath(pyproject_path))
|
||||
|
||||
update_pyproject_version(pyproject_path, new_ver_str, preview=preview)
|
||||
changelog_message = update_changelog(
|
||||
changelog_path,
|
||||
new_ver_str,
|
||||
message=message,
|
||||
preview=preview,
|
||||
)
|
||||
|
||||
flake_path = os.path.join(repo_root, "flake.nix")
|
||||
update_flake_version(flake_path, new_ver_str, preview=preview)
|
||||
|
||||
pkgbuild_path = os.path.join(repo_root, "PKGBUILD")
|
||||
update_pkgbuild_version(pkgbuild_path, new_ver_str, preview=preview)
|
||||
|
||||
spec_path = os.path.join(repo_root, "package-manager.spec")
|
||||
update_spec_version(spec_path, new_ver_str, preview=preview)
|
||||
|
||||
effective_message: Optional[str] = message
|
||||
if effective_message is None and isinstance(changelog_message, str):
|
||||
if changelog_message.strip():
|
||||
effective_message = changelog_message.strip()
|
||||
|
||||
debian_changelog_path = os.path.join(repo_root, "debian", "changelog")
|
||||
package_name = os.path.basename(repo_root) or "package-manager"
|
||||
|
||||
update_debian_changelog(
|
||||
debian_changelog_path,
|
||||
package_name=package_name,
|
||||
new_version=new_ver_str,
|
||||
message=effective_message,
|
||||
preview=preview,
|
||||
)
|
||||
|
||||
update_spec_changelog(
|
||||
spec_path=spec_path,
|
||||
package_name=package_name,
|
||||
new_version=new_ver_str,
|
||||
message=effective_message,
|
||||
preview=preview,
|
||||
)
|
||||
|
||||
commit_msg = f"Release version {new_ver_str}"
|
||||
tag_msg = effective_message or commit_msg
|
||||
|
||||
files_to_add = [
|
||||
pyproject_path,
|
||||
changelog_path,
|
||||
flake_path,
|
||||
pkgbuild_path,
|
||||
spec_path,
|
||||
debian_changelog_path,
|
||||
]
|
||||
existing_files = [p for p in files_to_add if p and os.path.exists(p)]
|
||||
|
||||
if preview:
|
||||
for path in existing_files:
|
||||
print(f"[PREVIEW] Would run: git add {path}")
|
||||
print(f'[PREVIEW] Would run: git commit -am "{commit_msg}"')
|
||||
print(f'[PREVIEW] Would run: git tag -a {new_tag} -m "{tag_msg}"')
|
||||
print(f"[PREVIEW] Would run: git push origin {branch}")
|
||||
print(f"[PREVIEW] Would run: git push origin {new_tag}")
|
||||
|
||||
if is_highest_version_tag(new_tag):
|
||||
update_latest_tag(new_tag, preview=True)
|
||||
else:
|
||||
print(f"[PREVIEW] Skipping 'latest' update (tag {new_tag} is not the highest).")
|
||||
|
||||
if close and branch not in ("main", "master"):
|
||||
if force:
|
||||
print(f"[PREVIEW] Would delete branch {branch} (forced).")
|
||||
else:
|
||||
print(f"[PREVIEW] Would ask whether to delete branch {branch} after release.")
|
||||
return
|
||||
|
||||
for path in existing_files:
|
||||
run_git_command(f"git add {path}")
|
||||
|
||||
run_git_command(f'git commit -am "{commit_msg}"')
|
||||
run_git_command(f'git tag -a {new_tag} -m "{tag_msg}"')
|
||||
|
||||
# Push branch and ONLY the newly created version tag (no --tags)
|
||||
run_git_command(f"git push origin {branch}")
|
||||
run_git_command(f"git push origin {new_tag}")
|
||||
|
||||
# Update 'latest' only if this is the highest version tag
|
||||
try:
|
||||
if is_highest_version_tag(new_tag):
|
||||
update_latest_tag(new_tag, preview=False)
|
||||
else:
|
||||
print(f"[INFO] Skipping 'latest' update (tag {new_tag} is not the highest).")
|
||||
except GitError as exc:
|
||||
print(f"[WARN] Failed to update floating 'latest' tag for {new_tag}: {exc}")
|
||||
print("'latest' tag was not updated.")
|
||||
|
||||
print(f"Release {new_ver_str} completed.")
|
||||
|
||||
if close:
|
||||
if branch in ("main", "master"):
|
||||
print(f"[INFO] close=True but current branch is {branch}; skipping branch deletion.")
|
||||
return
|
||||
|
||||
if not should_delete_branch(force=force):
|
||||
print(f"[INFO] Branch deletion declined. Keeping branch {branch}.")
|
||||
return
|
||||
|
||||
print(f"[INFO] Deleting branch {branch} after successful release...")
|
||||
try:
|
||||
close_branch(name=branch, base_branch="main", cwd=".")
|
||||
except Exception as exc:
|
||||
print(f"[WARN] Failed to close branch {branch} automatically: {exc}")
|
||||
|
||||
|
||||
def release(
|
||||
pyproject_path: str = "pyproject.toml",
|
||||
changelog_path: str = "CHANGELOG.md",
|
||||
release_type: str = "patch",
|
||||
message: Optional[str] = None,
|
||||
preview: bool = False,
|
||||
force: bool = False,
|
||||
close: bool = False,
|
||||
) -> None:
|
||||
if preview:
|
||||
_release_impl(
|
||||
pyproject_path=pyproject_path,
|
||||
changelog_path=changelog_path,
|
||||
release_type=release_type,
|
||||
message=message,
|
||||
preview=True,
|
||||
close=close,
|
||||
force=force,
|
||||
)
|
||||
return
|
||||
|
||||
# If force or non-interactive: no preview+confirmation step
|
||||
if force or (not sys.stdin.isatty()):
|
||||
_release_impl(
|
||||
pyproject_path=pyproject_path,
|
||||
changelog_path=changelog_path,
|
||||
release_type=release_type,
|
||||
message=message,
|
||||
preview=False,
|
||||
close=close,
|
||||
force=force,
|
||||
)
|
||||
return
|
||||
|
||||
print("[INFO] Running preview before actual release...\n")
|
||||
_release_impl(
|
||||
pyproject_path=pyproject_path,
|
||||
changelog_path=changelog_path,
|
||||
release_type=release_type,
|
||||
message=message,
|
||||
preview=True,
|
||||
close=close,
|
||||
force=force,
|
||||
)
|
||||
|
||||
if not confirm_proceed_release():
|
||||
print()
|
||||
return
|
||||
|
||||
print("\n[INFO] Running REAL release...\n")
|
||||
_release_impl(
|
||||
pyproject_path=pyproject_path,
|
||||
changelog_path=changelog_path,
|
||||
release_type=release_type,
|
||||
message=message,
|
||||
preview=False,
|
||||
close=close,
|
||||
force=force,
|
||||
)
|
||||
@@ -18,52 +18,17 @@ USER_CONFIG_PATH = os.path.expanduser("~/.config/pkgmgr/config.yaml")
|
||||
|
||||
DESCRIPTION_TEXT = """\
|
||||
\033[1;32mPackage Manager 🤖📦\033[0m
|
||||
\033[3mKevin's Package Manager is a multi-repository, multi-package, and multi-format
|
||||
development tool crafted by and designed for:\033[0m
|
||||
\033[1;34mKevin Veen-Birkenbach\033[0m
|
||||
\033[4mhttps://www.veen.world/\033[0m
|
||||
\033[3mKevin's multi-distro package and workflow manager.\033[0m
|
||||
\033[1;34mKevin Veen-Birkenbach\033[0m – \033[4mhttps://s.veen.world/pkgmgr\033[0m
|
||||
|
||||
\033[1mOverview:\033[0m
|
||||
A powerful toolchain that unifies and automates workflows across heterogeneous
|
||||
project ecosystems. pkgmgr is not only a package manager — it is a full
|
||||
developer-oriented orchestration tool.
|
||||
Built in \033[1;33mPython\033[0m on top of \033[1;33mNix flakes\033[0m to manage many
|
||||
repositories and packaging formats (pyproject.toml, flake.nix,
|
||||
PKGBUILD, debian, Ansible, …) with one CLI.
|
||||
|
||||
It automatically detects, merges, and processes metadata from multiple
|
||||
dependency formats, including:
|
||||
• \033[1;33mPython:\033[0m pyproject.toml, requirements.txt
|
||||
• \033[1;33mNix:\033[0m flake.nix
|
||||
• \033[1;33mArch Linux:\033[0m PKGBUILD
|
||||
• \033[1;33mAnsible:\033[0m requirements.yml
|
||||
|
||||
This allows pkgmgr to perform installation, updates, verification, dependency
|
||||
resolution, and synchronization across complex multi-repo environments — with a
|
||||
single unified command-line interface.
|
||||
|
||||
\033[1mDeveloper Tools:\033[0m
|
||||
pkgmgr includes an integrated toolbox to enhance daily development workflows:
|
||||
|
||||
• \033[1;33mVS Code integration:\033[0m Auto-generate and open multi-repo workspaces
|
||||
• \033[1;33mTerminal integration:\033[0m Open repositories in new GNOME Terminal tabs
|
||||
• \033[1;33mExplorer integration:\033[0m Open repositories in your file manager
|
||||
• \033[1;33mRelease automation:\033[0m Version bumping, changelog updates, and tagging
|
||||
• \033[1;33mBatch operations:\033[0m Execute shell commands across multiple repositories
|
||||
• \033[1;33mGit/Docker/Make wrappers:\033[0m Unified command proxying for many tools
|
||||
|
||||
\033[1mCapabilities:\033[0m
|
||||
• Clone, pull, verify, update, and manage many repositories at once
|
||||
• Resolve dependencies across languages and ecosystems
|
||||
• Standardize install/update workflows
|
||||
• Create symbolic executable wrappers for any project
|
||||
• Merge configuration from default + user config layers
|
||||
|
||||
Use pkgmgr as both a robust package management framework and a versatile
|
||||
development orchestration tool.
|
||||
|
||||
For detailed help on each command, use:
|
||||
\033[1mpkgmgr <command> --help\033[0m
|
||||
For details on any command, run:
|
||||
\033[1mpkgmgr <command> --help\033[0m
|
||||
"""
|
||||
|
||||
|
||||
def main() -> None:
|
||||
"""
|
||||
Entry point for the pkgmgr CLI.
|
||||
|
||||
@@ -6,6 +6,7 @@ from .version import handle_version
|
||||
from .make import handle_make
|
||||
from .changelog import handle_changelog
|
||||
from .branch import handle_branch
|
||||
from .mirror import handle_mirror_command
|
||||
|
||||
__all__ = [
|
||||
"handle_repos_command",
|
||||
@@ -16,4 +17,5 @@ __all__ = [
|
||||
"handle_make",
|
||||
"handle_changelog",
|
||||
"handle_branch",
|
||||
"handle_mirror_command",
|
||||
]
|
||||
|
||||
@@ -3,7 +3,7 @@ from __future__ import annotations
|
||||
import sys
|
||||
|
||||
from pkgmgr.cli.context import CLIContext
|
||||
from pkgmgr.actions.branch import open_branch, close_branch
|
||||
from pkgmgr.actions.branch import open_branch, close_branch, drop_branch
|
||||
|
||||
|
||||
def handle_branch(args, ctx: CLIContext) -> None:
|
||||
@@ -12,7 +12,8 @@ def handle_branch(args, ctx: CLIContext) -> None:
|
||||
|
||||
Currently supported:
|
||||
- pkgmgr branch open [<name>] [--base <branch>]
|
||||
- pkgmgr branch close [<name>] [--base <branch>]
|
||||
- pkgmgr branch close [<name>] [--base <branch>] [--force|-f]
|
||||
- pkgmgr branch drop [<name>] [--base <branch>] [--force|-f]
|
||||
"""
|
||||
if args.subcommand == "open":
|
||||
open_branch(
|
||||
@@ -27,6 +28,16 @@ def handle_branch(args, ctx: CLIContext) -> None:
|
||||
name=getattr(args, "name", None),
|
||||
base_branch=getattr(args, "base", "main"),
|
||||
cwd=".",
|
||||
force=getattr(args, "force", False),
|
||||
)
|
||||
return
|
||||
|
||||
if args.subcommand == "drop":
|
||||
drop_branch(
|
||||
name=getattr(args, "name", None),
|
||||
base_branch=getattr(args, "base", "main"),
|
||||
cwd=".",
|
||||
force=getattr(args, "force", False),
|
||||
)
|
||||
return
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
from __future__ import annotations
|
||||
from typing import Optional
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
@@ -7,7 +7,7 @@ import os
|
||||
import sys
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
import yaml
|
||||
|
||||
@@ -36,7 +36,7 @@ def _load_user_config(user_config_path: str) -> Dict[str, Any]:
|
||||
return {"repositories": []}
|
||||
|
||||
|
||||
def _find_defaults_source_dir() -> str | None:
|
||||
def _find_defaults_source_dir() -> Optional[str]:
|
||||
"""
|
||||
Find the directory inside the installed pkgmgr package OR the
|
||||
project root that contains default config files.
|
||||
|
||||
118
src/pkgmgr/cli/commands/mirror.py
Normal file
118
src/pkgmgr/cli/commands/mirror.py
Normal file
@@ -0,0 +1,118 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
from typing import Any, Dict, List
|
||||
|
||||
from pkgmgr.actions.mirror import (
|
||||
diff_mirrors,
|
||||
list_mirrors,
|
||||
merge_mirrors,
|
||||
setup_mirrors,
|
||||
)
|
||||
from pkgmgr.cli.context import CLIContext
|
||||
|
||||
Repository = Dict[str, Any]
|
||||
|
||||
|
||||
def handle_mirror_command(
|
||||
args,
|
||||
ctx: CLIContext,
|
||||
selected: List[Repository],
|
||||
) -> None:
|
||||
"""
|
||||
Entry point for 'pkgmgr mirror' subcommands.
|
||||
|
||||
Subcommands:
|
||||
- mirror list → list configured mirrors
|
||||
- mirror diff → compare config vs MIRRORS file
|
||||
- mirror merge → merge mirrors between config and MIRRORS file
|
||||
- mirror setup → configure local Git + remote placeholders
|
||||
"""
|
||||
if not selected:
|
||||
print("[INFO] No repositories selected for 'mirror' command.")
|
||||
sys.exit(1)
|
||||
|
||||
subcommand = getattr(args, "subcommand", None)
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# mirror list
|
||||
# ------------------------------------------------------------
|
||||
if subcommand == "list":
|
||||
source = getattr(args, "source", "all")
|
||||
list_mirrors(
|
||||
selected_repos=selected,
|
||||
repositories_base_dir=ctx.repositories_base_dir,
|
||||
all_repos=ctx.all_repositories,
|
||||
source=source,
|
||||
)
|
||||
return
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# mirror diff
|
||||
# ------------------------------------------------------------
|
||||
if subcommand == "diff":
|
||||
diff_mirrors(
|
||||
selected_repos=selected,
|
||||
repositories_base_dir=ctx.repositories_base_dir,
|
||||
all_repos=ctx.all_repositories,
|
||||
)
|
||||
return
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# mirror merge
|
||||
# ------------------------------------------------------------
|
||||
if subcommand == "merge":
|
||||
source = getattr(args, "source", None)
|
||||
target = getattr(args, "target", None)
|
||||
preview = getattr(args, "preview", False)
|
||||
|
||||
if source == target:
|
||||
print(
|
||||
"[ERROR] For 'mirror merge', source and target "
|
||||
"must differ (one of: config, file)."
|
||||
)
|
||||
sys.exit(2)
|
||||
|
||||
# Config file path can be passed explicitly via --config-path.
|
||||
# If not given, fall back to the global context (if available).
|
||||
explicit_config_path = getattr(args, "config_path", None)
|
||||
user_config_path = explicit_config_path or getattr(
|
||||
ctx, "user_config_path", None
|
||||
)
|
||||
|
||||
merge_mirrors(
|
||||
selected_repos=selected,
|
||||
repositories_base_dir=ctx.repositories_base_dir,
|
||||
all_repos=ctx.all_repositories,
|
||||
source=source,
|
||||
target=target,
|
||||
preview=preview,
|
||||
user_config_path=user_config_path,
|
||||
)
|
||||
return
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# mirror setup
|
||||
# ------------------------------------------------------------
|
||||
if subcommand == "setup":
|
||||
local = getattr(args, "local", False)
|
||||
remote = getattr(args, "remote", False)
|
||||
preview = getattr(args, "preview", False)
|
||||
|
||||
# If neither flag is set → default to both.
|
||||
if not local and not remote:
|
||||
local = True
|
||||
remote = True
|
||||
|
||||
setup_mirrors(
|
||||
selected_repos=selected,
|
||||
repositories_base_dir=ctx.repositories_base_dir,
|
||||
all_repos=ctx.all_repositories,
|
||||
preview=preview,
|
||||
local=local,
|
||||
remote=remote,
|
||||
)
|
||||
return
|
||||
|
||||
print(f"[ERROR] Unknown mirror subcommand: {subcommand}")
|
||||
sys.exit(2)
|
||||
@@ -1,4 +1,5 @@
|
||||
from __future__ import annotations
|
||||
from typing import Optional
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
@@ -21,9 +21,9 @@ from pkgmgr.cli.commands import (
|
||||
handle_make,
|
||||
handle_changelog,
|
||||
handle_branch,
|
||||
handle_mirror_command,
|
||||
)
|
||||
|
||||
|
||||
def _has_explicit_selection(args) -> bool:
|
||||
"""
|
||||
Return True if the user explicitly selected repositories via
|
||||
@@ -108,6 +108,7 @@ def dispatch_command(args, ctx: CLIContext) -> None:
|
||||
"explore",
|
||||
"terminal",
|
||||
"code",
|
||||
"mirror",
|
||||
]
|
||||
|
||||
if getattr(args, "command", None) in commands_with_selection:
|
||||
@@ -174,5 +175,9 @@ def dispatch_command(args, ctx: CLIContext) -> None:
|
||||
handle_branch(args, ctx)
|
||||
return
|
||||
|
||||
if args.command == "mirror":
|
||||
handle_mirror_command(args, ctx, selected)
|
||||
return
|
||||
|
||||
print(f"Unknown command: {args.command}")
|
||||
sys.exit(2)
|
||||
|
||||
@@ -1,505 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
|
||||
from pkgmgr.cli.proxy import register_proxy_commands
|
||||
|
||||
|
||||
class SortedSubParsersAction(argparse._SubParsersAction):
|
||||
"""
|
||||
Subparsers action that keeps choices sorted alphabetically.
|
||||
"""
|
||||
|
||||
def add_parser(self, name, **kwargs):
|
||||
parser = super().add_parser(name, **kwargs)
|
||||
# Sort choices alphabetically by dest (subcommand name)
|
||||
self._choices_actions.sort(key=lambda a: a.dest)
|
||||
return parser
|
||||
|
||||
|
||||
def add_identifier_arguments(subparser: argparse.ArgumentParser) -> None:
|
||||
"""
|
||||
Common identifier / selection arguments for many subcommands.
|
||||
|
||||
Selection modes (mutual intent, not hard-enforced):
|
||||
- identifiers (positional): select by alias / provider/account/repo
|
||||
- --all: select all repositories
|
||||
- --category / --string / --tag: filter-based selection on top
|
||||
of the full repository set
|
||||
"""
|
||||
subparser.add_argument(
|
||||
"identifiers",
|
||||
nargs="*",
|
||||
help=(
|
||||
"Identifier(s) for repositories. "
|
||||
"Default: Repository of current folder."
|
||||
),
|
||||
)
|
||||
subparser.add_argument(
|
||||
"--all",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help=(
|
||||
"Apply the subcommand to all repositories in the config. "
|
||||
"Some subcommands ask for confirmation. If you want to give this "
|
||||
"confirmation for all repositories, pipe 'yes'. E.g: "
|
||||
"yes | pkgmgr {subcommand} --all"
|
||||
),
|
||||
)
|
||||
subparser.add_argument(
|
||||
"--category",
|
||||
nargs="+",
|
||||
default=[],
|
||||
help=(
|
||||
"Filter repositories by category patterns derived from config "
|
||||
"filenames or repo metadata (use filename without .yml/.yaml, "
|
||||
"or /regex/ to use a regular expression)."
|
||||
),
|
||||
)
|
||||
subparser.add_argument(
|
||||
"--string",
|
||||
default="",
|
||||
help=(
|
||||
"Filter repositories whose identifier / name / path contains this "
|
||||
"substring (case-insensitive). Use /regex/ for regular expressions."
|
||||
),
|
||||
)
|
||||
subparser.add_argument(
|
||||
"--tag",
|
||||
action="append",
|
||||
default=[],
|
||||
help=(
|
||||
"Filter repositories by tag. Matches tags from the repository "
|
||||
"collector and category tags. Use /regex/ for regular expressions."
|
||||
),
|
||||
)
|
||||
subparser.add_argument(
|
||||
"--preview",
|
||||
action="store_true",
|
||||
help="Preview changes without executing commands",
|
||||
)
|
||||
subparser.add_argument(
|
||||
"--list",
|
||||
action="store_true",
|
||||
help="List affected repositories (with preview or status)",
|
||||
)
|
||||
subparser.add_argument(
|
||||
"-a",
|
||||
"--args",
|
||||
nargs=argparse.REMAINDER,
|
||||
dest="extra_args",
|
||||
help="Additional parameters to be attached.",
|
||||
default=[],
|
||||
)
|
||||
|
||||
|
||||
def add_install_update_arguments(subparser: argparse.ArgumentParser) -> None:
|
||||
"""
|
||||
Common arguments for install/update commands.
|
||||
"""
|
||||
add_identifier_arguments(subparser)
|
||||
subparser.add_argument(
|
||||
"-q",
|
||||
"--quiet",
|
||||
action="store_true",
|
||||
help="Suppress warnings and info messages",
|
||||
)
|
||||
subparser.add_argument(
|
||||
"--no-verification",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="Disable verification via commit/gpg",
|
||||
)
|
||||
subparser.add_argument(
|
||||
"--dependencies",
|
||||
action="store_true",
|
||||
help="Also pull and update dependencies",
|
||||
)
|
||||
subparser.add_argument(
|
||||
"--clone-mode",
|
||||
choices=["ssh", "https", "shallow"],
|
||||
default="ssh",
|
||||
help=(
|
||||
"Specify the clone mode: ssh, https, or shallow "
|
||||
"(HTTPS shallow clone; default: ssh)"
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def create_parser(description_text: str) -> argparse.ArgumentParser:
|
||||
"""
|
||||
Create the top-level argument parser for pkgmgr.
|
||||
"""
|
||||
parser = argparse.ArgumentParser(
|
||||
description=description_text,
|
||||
formatter_class=argparse.RawTextHelpFormatter,
|
||||
)
|
||||
subparsers = parser.add_subparsers(
|
||||
dest="command",
|
||||
help="Subcommands",
|
||||
action=SortedSubParsersAction,
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# install / update / deinstall / delete
|
||||
# ------------------------------------------------------------
|
||||
install_parser = subparsers.add_parser(
|
||||
"install",
|
||||
help="Setup repository/repositories alias links to executables",
|
||||
)
|
||||
add_install_update_arguments(install_parser)
|
||||
|
||||
update_parser = subparsers.add_parser(
|
||||
"update",
|
||||
help="Update (pull + install) repository/repositories",
|
||||
)
|
||||
add_install_update_arguments(update_parser)
|
||||
update_parser.add_argument(
|
||||
"--system",
|
||||
action="store_true",
|
||||
help="Include system update commands",
|
||||
)
|
||||
|
||||
deinstall_parser = subparsers.add_parser(
|
||||
"deinstall",
|
||||
help="Remove alias links to repository/repositories",
|
||||
)
|
||||
add_identifier_arguments(deinstall_parser)
|
||||
|
||||
delete_parser = subparsers.add_parser(
|
||||
"delete",
|
||||
help="Delete repository/repositories alias links to executables",
|
||||
)
|
||||
add_identifier_arguments(delete_parser)
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# create
|
||||
# ------------------------------------------------------------
|
||||
create_cmd_parser = subparsers.add_parser(
|
||||
"create",
|
||||
help=(
|
||||
"Create new repository entries: add them to the config if not "
|
||||
"already present, initialize the local repository, and push "
|
||||
"remotely if --remote is set."
|
||||
),
|
||||
)
|
||||
add_identifier_arguments(create_cmd_parser)
|
||||
create_cmd_parser.add_argument(
|
||||
"--remote",
|
||||
action="store_true",
|
||||
help="If set, add the remote and push the initial commit.",
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# status
|
||||
# ------------------------------------------------------------
|
||||
status_parser = subparsers.add_parser(
|
||||
"status",
|
||||
help="Show status for repository/repositories or system",
|
||||
)
|
||||
add_identifier_arguments(status_parser)
|
||||
status_parser.add_argument(
|
||||
"--system",
|
||||
action="store_true",
|
||||
help="Show system status",
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# config
|
||||
# ------------------------------------------------------------
|
||||
config_parser = subparsers.add_parser(
|
||||
"config",
|
||||
help="Manage configuration",
|
||||
)
|
||||
config_subparsers = config_parser.add_subparsers(
|
||||
dest="subcommand",
|
||||
help="Config subcommands",
|
||||
required=True,
|
||||
)
|
||||
|
||||
config_show = config_subparsers.add_parser(
|
||||
"show",
|
||||
help="Show configuration",
|
||||
)
|
||||
add_identifier_arguments(config_show)
|
||||
|
||||
config_subparsers.add_parser(
|
||||
"add",
|
||||
help="Interactively add a new repository entry",
|
||||
)
|
||||
|
||||
config_subparsers.add_parser(
|
||||
"edit",
|
||||
help="Edit configuration file with nano",
|
||||
)
|
||||
|
||||
config_subparsers.add_parser(
|
||||
"init",
|
||||
help="Initialize user configuration by scanning the base directory",
|
||||
)
|
||||
|
||||
config_delete = config_subparsers.add_parser(
|
||||
"delete",
|
||||
help="Delete repository entry from user config",
|
||||
)
|
||||
add_identifier_arguments(config_delete)
|
||||
|
||||
config_ignore = config_subparsers.add_parser(
|
||||
"ignore",
|
||||
help="Set ignore flag for repository entries in user config",
|
||||
)
|
||||
add_identifier_arguments(config_ignore)
|
||||
config_ignore.add_argument(
|
||||
"--set",
|
||||
choices=["true", "false"],
|
||||
required=True,
|
||||
help="Set ignore to true or false",
|
||||
)
|
||||
|
||||
config_subparsers.add_parser(
|
||||
"update",
|
||||
help=(
|
||||
"Update default config files in ~/.config/pkgmgr/ from the "
|
||||
"installed pkgmgr package (does not touch config.yaml)."
|
||||
),
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# path / explore / terminal / code / shell
|
||||
# ------------------------------------------------------------
|
||||
path_parser = subparsers.add_parser(
|
||||
"path",
|
||||
help="Print the path(s) of repository/repositories",
|
||||
)
|
||||
add_identifier_arguments(path_parser)
|
||||
|
||||
explore_parser = subparsers.add_parser(
|
||||
"explore",
|
||||
help="Open repository in Nautilus file manager",
|
||||
)
|
||||
add_identifier_arguments(explore_parser)
|
||||
|
||||
terminal_parser = subparsers.add_parser(
|
||||
"terminal",
|
||||
help="Open repository in a new GNOME Terminal tab",
|
||||
)
|
||||
add_identifier_arguments(terminal_parser)
|
||||
|
||||
code_parser = subparsers.add_parser(
|
||||
"code",
|
||||
help="Open repository workspace with VS Code",
|
||||
)
|
||||
add_identifier_arguments(code_parser)
|
||||
|
||||
shell_parser = subparsers.add_parser(
|
||||
"shell",
|
||||
help="Execute a shell command in each repository",
|
||||
)
|
||||
add_identifier_arguments(shell_parser)
|
||||
shell_parser.add_argument(
|
||||
"-c",
|
||||
"--command",
|
||||
nargs=argparse.REMAINDER,
|
||||
dest="shell_command",
|
||||
help=(
|
||||
"The shell command (and its arguments) to execute in each "
|
||||
"repository"
|
||||
),
|
||||
default=[],
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# branch
|
||||
# ------------------------------------------------------------
|
||||
branch_parser = subparsers.add_parser(
|
||||
"branch",
|
||||
help="Branch-related utilities (e.g. open/close feature branches)",
|
||||
)
|
||||
branch_subparsers = branch_parser.add_subparsers(
|
||||
dest="subcommand",
|
||||
help="Branch subcommands",
|
||||
required=True,
|
||||
)
|
||||
|
||||
branch_open = branch_subparsers.add_parser(
|
||||
"open",
|
||||
help="Create and push a new branch on top of a base branch",
|
||||
)
|
||||
branch_open.add_argument(
|
||||
"name",
|
||||
nargs="?",
|
||||
help=(
|
||||
"Name of the new branch (optional; will be asked interactively "
|
||||
"if omitted)"
|
||||
),
|
||||
)
|
||||
branch_open.add_argument(
|
||||
"--base",
|
||||
default="main",
|
||||
help="Base branch to create the new branch from (default: main)",
|
||||
)
|
||||
|
||||
branch_close = branch_subparsers.add_parser(
|
||||
"close",
|
||||
help="Merge a feature branch into base and delete it",
|
||||
)
|
||||
branch_close.add_argument(
|
||||
"name",
|
||||
nargs="?",
|
||||
help=(
|
||||
"Name of the branch to close (optional; current branch is used "
|
||||
"if omitted)"
|
||||
),
|
||||
)
|
||||
branch_close.add_argument(
|
||||
"--base",
|
||||
default="main",
|
||||
help=(
|
||||
"Base branch to merge into (default: main; falls back to master "
|
||||
"internally if main does not exist)"
|
||||
),
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# release
|
||||
# ------------------------------------------------------------
|
||||
release_parser = subparsers.add_parser(
|
||||
"release",
|
||||
help=(
|
||||
"Create a release for repository/ies by incrementing version "
|
||||
"and updating the changelog."
|
||||
),
|
||||
)
|
||||
release_parser.add_argument(
|
||||
"release_type",
|
||||
choices=["major", "minor", "patch"],
|
||||
help="Type of version increment for the release (major, minor, patch).",
|
||||
)
|
||||
release_parser.add_argument(
|
||||
"-m",
|
||||
"--message",
|
||||
default=None,
|
||||
help=(
|
||||
"Optional release message to add to the changelog and tag."
|
||||
),
|
||||
)
|
||||
# Generic selection / preview / list / extra_args
|
||||
add_identifier_arguments(release_parser)
|
||||
# Close current branch after successful release
|
||||
release_parser.add_argument(
|
||||
"--close",
|
||||
action="store_true",
|
||||
help=(
|
||||
"Close the current branch after a successful release in each "
|
||||
"repository, if it is not main/master."
|
||||
),
|
||||
)
|
||||
# Force: skip preview+confirmation and run release directly
|
||||
release_parser.add_argument(
|
||||
"-f",
|
||||
"--force",
|
||||
action="store_true",
|
||||
help=(
|
||||
"Skip the interactive preview+confirmation step and run the "
|
||||
"release directly."
|
||||
),
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# version
|
||||
# ------------------------------------------------------------
|
||||
version_parser = subparsers.add_parser(
|
||||
"version",
|
||||
help=(
|
||||
"Show version information for repository/ies "
|
||||
"(git tags, pyproject.toml, flake.nix, PKGBUILD, debian, spec, "
|
||||
"Ansible Galaxy)."
|
||||
),
|
||||
)
|
||||
add_identifier_arguments(version_parser)
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# changelog
|
||||
# ------------------------------------------------------------
|
||||
changelog_parser = subparsers.add_parser(
|
||||
"changelog",
|
||||
help=(
|
||||
"Show changelog derived from Git history. "
|
||||
"By default, shows the changes between the last two SemVer tags."
|
||||
),
|
||||
)
|
||||
changelog_parser.add_argument(
|
||||
"range",
|
||||
nargs="?",
|
||||
default="",
|
||||
help=(
|
||||
"Optional tag or range (e.g. v1.2.3 or v1.2.0..v1.2.3). "
|
||||
"If omitted, the changelog between the last two SemVer "
|
||||
"tags is shown."
|
||||
),
|
||||
)
|
||||
add_identifier_arguments(changelog_parser)
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# list
|
||||
# ------------------------------------------------------------
|
||||
list_parser = subparsers.add_parser(
|
||||
"list",
|
||||
help="List all repositories with details and status",
|
||||
)
|
||||
# dieselbe Selektionslogik wie bei install/update/etc.:
|
||||
add_identifier_arguments(list_parser)
|
||||
list_parser.add_argument(
|
||||
"--status",
|
||||
type=str,
|
||||
default="",
|
||||
help=(
|
||||
"Filter repositories by status (case insensitive). "
|
||||
"Use /regex/ for regular expressions."
|
||||
),
|
||||
)
|
||||
list_parser.add_argument(
|
||||
"--description",
|
||||
action="store_true",
|
||||
help=(
|
||||
"Show an additional detailed section per repository "
|
||||
"(description, homepage, tags, categories, paths)."
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# make
|
||||
# ------------------------------------------------------------
|
||||
make_parser = subparsers.add_parser(
|
||||
"make",
|
||||
help="Executes make commands",
|
||||
)
|
||||
add_identifier_arguments(make_parser)
|
||||
make_subparsers = make_parser.add_subparsers(
|
||||
dest="subcommand",
|
||||
help="Make subcommands",
|
||||
required=True,
|
||||
)
|
||||
|
||||
make_install = make_subparsers.add_parser(
|
||||
"install",
|
||||
help="Executes the make install command",
|
||||
)
|
||||
add_identifier_arguments(make_install)
|
||||
|
||||
make_deinstall = make_subparsers.add_parser(
|
||||
"deinstall",
|
||||
help="Executes the make deinstall command",
|
||||
)
|
||||
add_identifier_arguments(make_deinstall)
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Proxy commands (git, docker, docker compose, ...)
|
||||
# ------------------------------------------------------------
|
||||
register_proxy_commands(subparsers)
|
||||
|
||||
return parser
|
||||
68
src/pkgmgr/cli/parser/__init__.py
Normal file
68
src/pkgmgr/cli/parser/__init__.py
Normal file
@@ -0,0 +1,68 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
|
||||
from pkgmgr.cli.proxy import register_proxy_commands
|
||||
|
||||
from .common import SortedSubParsersAction
|
||||
from .install_update import add_install_update_subparsers
|
||||
from .config_cmd import add_config_subparsers
|
||||
from .navigation_cmd import add_navigation_subparsers
|
||||
from .branch_cmd import add_branch_subparsers
|
||||
from .release_cmd import add_release_subparser
|
||||
from .version_cmd import add_version_subparser
|
||||
from .changelog_cmd import add_changelog_subparser
|
||||
from .list_cmd import add_list_subparser
|
||||
from .make_cmd import add_make_subparsers
|
||||
from .mirror_cmd import add_mirror_subparsers
|
||||
|
||||
|
||||
def create_parser(description_text: str) -> argparse.ArgumentParser:
|
||||
"""
|
||||
Create the top-level argument parser for pkgmgr.
|
||||
"""
|
||||
parser = argparse.ArgumentParser(
|
||||
description=description_text,
|
||||
formatter_class=argparse.RawTextHelpFormatter,
|
||||
)
|
||||
subparsers = parser.add_subparsers(
|
||||
dest="command",
|
||||
help="Subcommands",
|
||||
action=SortedSubParsersAction,
|
||||
)
|
||||
|
||||
# Core repo operations
|
||||
add_install_update_subparsers(subparsers)
|
||||
add_config_subparsers(subparsers)
|
||||
|
||||
# Navigation / tooling around repos
|
||||
add_navigation_subparsers(subparsers)
|
||||
|
||||
# Branch & release workflow
|
||||
add_branch_subparsers(subparsers)
|
||||
add_release_subparser(subparsers)
|
||||
|
||||
# Info commands
|
||||
add_version_subparser(subparsers)
|
||||
add_changelog_subparser(subparsers)
|
||||
add_list_subparser(subparsers)
|
||||
|
||||
# Make wrapper
|
||||
add_make_subparsers(subparsers)
|
||||
|
||||
# Mirror management
|
||||
add_mirror_subparsers(subparsers)
|
||||
|
||||
# Proxy commands (git, docker, docker compose, ...)
|
||||
register_proxy_commands(subparsers)
|
||||
|
||||
return parser
|
||||
|
||||
|
||||
__all__ = [
|
||||
"create_parser",
|
||||
"SortedSubParsersAction",
|
||||
]
|
||||
104
src/pkgmgr/cli/parser/branch_cmd.py
Normal file
104
src/pkgmgr/cli/parser/branch_cmd.py
Normal file
@@ -0,0 +1,104 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
|
||||
|
||||
def add_branch_subparsers(
|
||||
subparsers: argparse._SubParsersAction,
|
||||
) -> None:
|
||||
"""
|
||||
Register branch command and its subcommands.
|
||||
"""
|
||||
branch_parser = subparsers.add_parser(
|
||||
"branch",
|
||||
help="Branch-related utilities (e.g. open/close/drop feature branches)",
|
||||
)
|
||||
branch_subparsers = branch_parser.add_subparsers(
|
||||
dest="subcommand",
|
||||
help="Branch subcommands",
|
||||
required=True,
|
||||
)
|
||||
|
||||
# -----------------------------------------------------------------------
|
||||
# branch open
|
||||
# -----------------------------------------------------------------------
|
||||
branch_open = branch_subparsers.add_parser(
|
||||
"open",
|
||||
help="Create and push a new branch on top of a base branch",
|
||||
)
|
||||
branch_open.add_argument(
|
||||
"name",
|
||||
nargs="?",
|
||||
help=(
|
||||
"Name of the new branch (optional; will be asked interactively "
|
||||
"if omitted)"
|
||||
),
|
||||
)
|
||||
branch_open.add_argument(
|
||||
"--base",
|
||||
default="main",
|
||||
help="Base branch to create the new branch from (default: main)",
|
||||
)
|
||||
|
||||
# -----------------------------------------------------------------------
|
||||
# branch close
|
||||
# -----------------------------------------------------------------------
|
||||
branch_close = branch_subparsers.add_parser(
|
||||
"close",
|
||||
help="Merge a feature branch into base and delete it",
|
||||
)
|
||||
branch_close.add_argument(
|
||||
"name",
|
||||
nargs="?",
|
||||
help=(
|
||||
"Name of the branch to close (optional; current branch is used "
|
||||
"if omitted)"
|
||||
),
|
||||
)
|
||||
branch_close.add_argument(
|
||||
"--base",
|
||||
default="main",
|
||||
help=(
|
||||
"Base branch to merge into (default: main; falls back to master "
|
||||
"internally if main does not exist)"
|
||||
),
|
||||
)
|
||||
branch_close.add_argument(
|
||||
"-f",
|
||||
"--force",
|
||||
action="store_true",
|
||||
help="Skip confirmation prompt and close the branch directly",
|
||||
)
|
||||
|
||||
# -----------------------------------------------------------------------
|
||||
# branch drop
|
||||
# -----------------------------------------------------------------------
|
||||
branch_drop = branch_subparsers.add_parser(
|
||||
"drop",
|
||||
help="Delete a branch locally and on origin (without merging)",
|
||||
)
|
||||
branch_drop.add_argument(
|
||||
"name",
|
||||
nargs="?",
|
||||
help=(
|
||||
"Name of the branch to drop (optional; current branch is used "
|
||||
"if omitted)"
|
||||
),
|
||||
)
|
||||
branch_drop.add_argument(
|
||||
"--base",
|
||||
default="main",
|
||||
help=(
|
||||
"Base branch used to protect main/master from deletion "
|
||||
"(default: main; falls back to master internally)"
|
||||
),
|
||||
)
|
||||
branch_drop.add_argument(
|
||||
"-f",
|
||||
"--force",
|
||||
action="store_true",
|
||||
help="Skip confirmation prompt and drop the branch directly",
|
||||
)
|
||||
34
src/pkgmgr/cli/parser/changelog_cmd.py
Normal file
34
src/pkgmgr/cli/parser/changelog_cmd.py
Normal file
@@ -0,0 +1,34 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
|
||||
from .common import add_identifier_arguments
|
||||
|
||||
|
||||
def add_changelog_subparser(
|
||||
subparsers: argparse._SubParsersAction,
|
||||
) -> None:
|
||||
"""
|
||||
Register the changelog command.
|
||||
"""
|
||||
changelog_parser = subparsers.add_parser(
|
||||
"changelog",
|
||||
help=(
|
||||
"Show changelog derived from Git history. "
|
||||
"By default, shows the changes between the last two SemVer tags."
|
||||
),
|
||||
)
|
||||
changelog_parser.add_argument(
|
||||
"range",
|
||||
nargs="?",
|
||||
default="",
|
||||
help=(
|
||||
"Optional tag or range (e.g. v1.2.3 or v1.2.0..v1.2.3). "
|
||||
"If omitted, the changelog between the last two SemVer "
|
||||
"tags is shown."
|
||||
),
|
||||
)
|
||||
add_identifier_arguments(changelog_parser)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user