Compare commits
83 Commits
v0.10.2
...
d1e5a71f77
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d1e5a71f77 | ||
|
|
d59dc8ad53 | ||
|
|
55f4a1e941 | ||
|
|
2a4ec18532 | ||
|
|
2debdbee09 | ||
|
|
4cb62e90f8 | ||
|
|
923519497a | ||
|
|
5fa18cb449 | ||
|
|
f513196911 | ||
|
|
7f06447bbd | ||
|
|
1e5d6d3eee | ||
|
|
f2970adbb2 | ||
|
|
7f262c6557 | ||
|
|
0bc7a3ecc0 | ||
|
|
55a0ae4337 | ||
|
|
bcf284c5d6 | ||
|
|
db23b1a445 | ||
|
|
506f69d8a7 | ||
|
|
097e64408f | ||
|
|
a3913d9489 | ||
|
|
c92fd44dd3 | ||
|
|
2c3efa7a27 | ||
|
|
f388bc51bc | ||
|
|
4e28eba883 | ||
|
|
b8acd634f8 | ||
|
|
fb68b325d6 | ||
|
|
650a22d425 | ||
|
|
6a590d8780 | ||
|
|
5601ea442a | ||
|
|
5ff15013d7 | ||
|
|
6ccc1c1490 | ||
|
|
8ead3472dd | ||
|
|
422ac8b837 | ||
|
|
ea84c1b14e | ||
|
|
71a4e7e725 | ||
|
|
fb737ef290 | ||
|
|
2963a43754 | ||
|
|
103f49c8f6 | ||
|
|
f5d428950e | ||
|
|
b40787ffc5 | ||
|
|
0482a7f88d | ||
|
|
8c127cc45a | ||
|
|
2761e829cb | ||
|
|
d0c01b6955 | ||
|
|
b2421c9b84 | ||
|
|
f950bb493c | ||
|
|
fb0b81954d | ||
|
|
b9b4c3fa59 | ||
|
|
3642f92776 | ||
|
|
8f38edde67 | ||
|
|
5875441b23 | ||
|
|
9190f0d901 | ||
|
|
f227734185 | ||
|
|
c7ef77559c | ||
|
|
2385601ed5 | ||
|
|
ac5ae95369 | ||
|
|
31f7f47fe2 | ||
|
|
c8bf1c91ad | ||
|
|
f2caa68e3d | ||
|
|
03c232c308 | ||
|
|
e882e17737 | ||
|
|
b9edcf7101 | ||
|
|
8b8ebf329f | ||
|
|
9598c17ea0 | ||
|
|
67bd358e12 | ||
|
|
340c1700dc | ||
|
|
0dfbaa0f6b | ||
|
|
08ab9fb142 | ||
|
|
804245325d | ||
|
|
c05e77658a | ||
|
|
324f6db1f3 | ||
|
|
2a69a83d71 | ||
|
|
0ec4ccbe40 | ||
|
|
0d864867cd | ||
|
|
3ff0afe828 | ||
|
|
bd74ad41f9 | ||
|
|
fa2a92481d | ||
|
|
6a1e001fc2 | ||
|
|
60afa92e09 | ||
|
|
212f3ce5eb | ||
|
|
0d79537033 | ||
|
|
72fc69c2f8 | ||
|
|
6d8c6deae8 |
13
.github/workflows/ci.yml
vendored
13
.github/workflows/ci.yml
vendored
@@ -13,8 +13,11 @@ jobs:
|
||||
test-integration:
|
||||
uses: ./.github/workflows/test-integration.yml
|
||||
|
||||
test-container:
|
||||
uses: ./.github/workflows/test-container.yml
|
||||
test-env-virtual:
|
||||
uses: ./.github/workflows/test-env-virtual.yml
|
||||
|
||||
test-env-nix:
|
||||
uses: ./.github/workflows/test-env-nix.yml
|
||||
|
||||
test-e2e:
|
||||
uses: ./.github/workflows/test-e2e.yml
|
||||
@@ -24,3 +27,9 @@ jobs:
|
||||
|
||||
test-virgin-root:
|
||||
uses: ./.github/workflows/test-virgin-root.yml
|
||||
|
||||
codesniffer-shellcheck:
|
||||
uses: ./.github/workflows/codesniffer-shellcheck.yml
|
||||
|
||||
codesniffer-ruff:
|
||||
uses: ./.github/workflows/codesniffer-ruff.yml
|
||||
|
||||
23
.github/workflows/codesniffer-ruff.yml
vendored
Normal file
23
.github/workflows/codesniffer-ruff.yml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
name: Ruff (Python code sniffer)
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
|
||||
jobs:
|
||||
codesniffer-ruff:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.12"
|
||||
|
||||
- name: Install ruff
|
||||
run: pip install ruff
|
||||
|
||||
- name: Run ruff
|
||||
run: |
|
||||
ruff check src tests
|
||||
14
.github/workflows/codesniffer-shellcheck.yml
vendored
Normal file
14
.github/workflows/codesniffer-shellcheck.yml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
name: ShellCheck
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
|
||||
jobs:
|
||||
codesniffer-shellcheck:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install ShellCheck
|
||||
run: sudo apt-get update && sudo apt-get install -y shellcheck
|
||||
- name: Run ShellCheck
|
||||
run: shellcheck -x $(find scripts -type f -name '*.sh' -print)
|
||||
18
.github/workflows/mark-stable.yml
vendored
18
.github/workflows/mark-stable.yml
vendored
@@ -14,8 +14,11 @@ jobs:
|
||||
test-integration:
|
||||
uses: ./.github/workflows/test-integration.yml
|
||||
|
||||
test-container:
|
||||
uses: ./.github/workflows/test-container.yml
|
||||
test-env-virtual:
|
||||
uses: ./.github/workflows/test-env-virtual.yml
|
||||
|
||||
test-env-nix:
|
||||
uses: ./.github/workflows/test-env-nix.yml
|
||||
|
||||
test-e2e:
|
||||
uses: ./.github/workflows/test-e2e.yml
|
||||
@@ -26,11 +29,20 @@ jobs:
|
||||
test-virgin-root:
|
||||
uses: ./.github/workflows/test-virgin-root.yml
|
||||
|
||||
codesniffer-shellcheck:
|
||||
uses: ./.github/workflows/codesniffer-shellcheck.yml
|
||||
|
||||
codesniffer-ruff:
|
||||
uses: ./.github/workflows/codesniffer-ruff.yml
|
||||
|
||||
mark-stable:
|
||||
needs:
|
||||
- codesniffer-shellcheck
|
||||
- codesniffer-ruff
|
||||
- test-unit
|
||||
- test-integration
|
||||
- test-container
|
||||
- test-env-nix
|
||||
- test-env-virtual
|
||||
- test-e2e
|
||||
- test-virgin-user
|
||||
- test-virgin-root
|
||||
|
||||
66
.github/workflows/publish-containers.yml
vendored
Normal file
66
.github/workflows/publish-containers.yml
vendored
Normal file
@@ -0,0 +1,66 @@
|
||||
name: Publish container images (GHCR)
|
||||
|
||||
on:
|
||||
workflow_run:
|
||||
workflows: ["Mark stable commit"]
|
||||
types: [completed]
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
if: ${{ github.event.workflow_run.conclusion == 'success' }}
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository (with tags)
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- name: Checkout workflow_run commit and refresh tags
|
||||
run: |
|
||||
set -euo pipefail
|
||||
git checkout -f "${{ github.event.workflow_run.head_sha }}"
|
||||
git fetch --tags --force
|
||||
git tag --list 'stable' 'v*' --sort=version:refname | tail -n 20
|
||||
|
||||
- name: Compute version and stable flag
|
||||
id: info
|
||||
run: |
|
||||
set -euo pipefail
|
||||
SHA="$(git rev-parse HEAD)"
|
||||
|
||||
V_TAG="$(git tag --points-at "${SHA}" --list 'v*' | sort -V | tail -n1)"
|
||||
[[ -n "$V_TAG" ]] || { echo "No version tag found"; exit 1; }
|
||||
VERSION="${V_TAG#v}"
|
||||
|
||||
STABLE_SHA="$(git rev-parse -q --verify refs/tags/stable^{commit} 2>/dev/null || true)"
|
||||
IS_STABLE=false
|
||||
[[ -n "${STABLE_SHA}" && "${STABLE_SHA}" == "${SHA}" ]] && IS_STABLE=true
|
||||
|
||||
echo "version=${VERSION}" >> "$GITHUB_OUTPUT"
|
||||
echo "is_stable=${IS_STABLE}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
use: true
|
||||
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Publish all images
|
||||
run: |
|
||||
set -euo pipefail
|
||||
OWNER="${{ github.repository_owner }}" \
|
||||
VERSION="${{ steps.info.outputs.version }}" \
|
||||
IS_STABLE="${{ steps.info.outputs.is_stable }}" \
|
||||
bash scripts/build/publish.sh
|
||||
2
.github/workflows/test-e2e.yml
vendored
2
.github/workflows/test-e2e.yml
vendored
@@ -22,4 +22,4 @@ jobs:
|
||||
- name: Run E2E tests via make (${{ matrix.distro }})
|
||||
run: |
|
||||
set -euo pipefail
|
||||
distro="${{ matrix.distro }}" make test-e2e
|
||||
PKGMGR_DISTRO="${{ matrix.distro }}" make test-e2e
|
||||
|
||||
26
.github/workflows/test-env-nix.yml
vendored
Normal file
26
.github/workflows/test-env-nix.yml
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
name: Test Virgin Nix (flake only)
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
|
||||
jobs:
|
||||
test-env-nix:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 45
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
distro: [arch, debian, ubuntu, fedora, centos]
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Show Docker version
|
||||
run: docker version
|
||||
|
||||
- name: Nix flake-only test (${{ matrix.distro }})
|
||||
run: |
|
||||
set -euo pipefail
|
||||
PKGMGR_DISTRO="${{ matrix.distro }}" make test-env-nix
|
||||
@@ -4,7 +4,7 @@ on:
|
||||
workflow_call:
|
||||
|
||||
jobs:
|
||||
test-container:
|
||||
test-env-virtual:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
@@ -25,4 +25,4 @@ jobs:
|
||||
- name: Run container tests (${{ matrix.distro }})
|
||||
run: |
|
||||
set -euo pipefail
|
||||
distro="${{ matrix.distro }}" make test-container
|
||||
PKGMGR_DISTRO="${{ matrix.distro }}" make test-env-virtual
|
||||
2
.github/workflows/test-integration.yml
vendored
2
.github/workflows/test-integration.yml
vendored
@@ -16,4 +16,4 @@ jobs:
|
||||
run: docker version
|
||||
|
||||
- name: Run integration tests via make (Arch container)
|
||||
run: make test-integration distro="arch"
|
||||
run: make test-integration PKGMGR_DISTRO="arch"
|
||||
|
||||
2
.github/workflows/test-unit.yml
vendored
2
.github/workflows/test-unit.yml
vendored
@@ -16,4 +16,4 @@ jobs:
|
||||
run: docker version
|
||||
|
||||
- name: Run unit tests via make (Arch container)
|
||||
run: make test-unit distro="arch"
|
||||
run: make test-unit PKGMGR_DISTRO="arch"
|
||||
|
||||
40
.github/workflows/test-virgin-root.yml
vendored
40
.github/workflows/test-virgin-root.yml
vendored
@@ -7,6 +7,10 @@ jobs:
|
||||
test-virgin-root:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 45
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
distro: [arch, debian, ubuntu, fedora, centos]
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
@@ -15,44 +19,36 @@ jobs:
|
||||
- name: Show Docker version
|
||||
run: docker version
|
||||
|
||||
- name: Virgin Arch pkgmgr flake test (root)
|
||||
# 🔹 BUILD virgin image if missing
|
||||
- name: Build virgin container (${{ matrix.distro }})
|
||||
run: |
|
||||
set -euo pipefail
|
||||
PKGMGR_DISTRO="${{ matrix.distro }}" make build-missing-virgin
|
||||
|
||||
echo ">>> Starting virgin ArchLinux container test (root, with shared caches)..."
|
||||
# 🔹 RUN test inside virgin image
|
||||
- name: Virgin ${{ matrix.distro }} pkgmgr test (root)
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
docker run --rm \
|
||||
-v "$PWD":/src \
|
||||
-v pkgmgr_repos:/root/Repositories \
|
||||
-v pkgmgr_pip_cache:/root/.cache/pip \
|
||||
-w /src \
|
||||
archlinux:latest \
|
||||
"pkgmgr-${{ matrix.distro }}-virgin" \
|
||||
bash -lc '
|
||||
set -euo pipefail
|
||||
|
||||
echo ">>> Updating and upgrading Arch system..."
|
||||
pacman -Syu --noconfirm git python python-pip nix >/dev/null
|
||||
git config --global --add safe.directory /src
|
||||
|
||||
echo ">>> Creating isolated virtual environment for pkgmgr..."
|
||||
python -m venv /tmp/pkgmgr-venv
|
||||
make install
|
||||
make setup
|
||||
|
||||
echo ">>> Activating virtual environment..."
|
||||
source /tmp/pkgmgr-venv/bin/activate
|
||||
. "$HOME/.venvs/pkgmgr/bin/activate"
|
||||
|
||||
echo ">>> Upgrading pip (cached)..."
|
||||
python -m pip install --upgrade pip >/dev/null
|
||||
|
||||
echo ">>> Installing pkgmgr from current source tree (cached pip)..."
|
||||
python -m pip install /src >/dev/null
|
||||
|
||||
echo ">>> Enabling Nix experimental features..."
|
||||
export NIX_CONFIG="experimental-features = nix-command flakes"
|
||||
|
||||
echo ">>> Running: pkgmgr update pkgmgr --clone-mode shallow --no-verification"
|
||||
pkgmgr update pkgmgr --clone-mode shallow --no-verification
|
||||
|
||||
echo ">>> Running: pkgmgr version pkgmgr"
|
||||
pkgmgr version pkgmgr
|
||||
|
||||
echo ">>> Virgin Arch (root) test completed successfully."
|
||||
echo ">>> Running Nix-based: nix run .#pkgmgr -- version pkgmgr"
|
||||
nix run /src#pkgmgr -- version pkgmgr
|
||||
'
|
||||
|
||||
59
.github/workflows/test-virgin-user.yml
vendored
59
.github/workflows/test-virgin-user.yml
vendored
@@ -7,6 +7,10 @@ jobs:
|
||||
test-virgin-user:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 45
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
distro: [arch, debian, ubuntu, fedora, centos]
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
@@ -15,59 +19,46 @@ jobs:
|
||||
- name: Show Docker version
|
||||
run: docker version
|
||||
|
||||
- name: Virgin Arch pkgmgr user test (non-root with sudo)
|
||||
# 🔹 BUILD virgin image if missing
|
||||
- name: Build virgin container (${{ matrix.distro }})
|
||||
run: |
|
||||
set -euo pipefail
|
||||
PKGMGR_DISTRO="${{ matrix.distro }}" make build-missing-virgin
|
||||
|
||||
# 🔹 RUN test inside virgin image as non-root
|
||||
- name: Virgin ${{ matrix.distro }} pkgmgr test (user)
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
echo ">>> Starting virgin ArchLinux container test (non-root user with sudo)..."
|
||||
|
||||
docker run --rm \
|
||||
-v "$PWD":/src \
|
||||
archlinux:latest \
|
||||
-w /src \
|
||||
"pkgmgr-${{ matrix.distro }}-virgin" \
|
||||
bash -lc '
|
||||
set -euo pipefail
|
||||
|
||||
echo ">>> [root] Updating and upgrading Arch system..."
|
||||
pacman -Syu --noconfirm git python python-pip sudo base-devel debugedit
|
||||
make install
|
||||
|
||||
echo ">>> [root] Creating non-root user dev..."
|
||||
useradd -m dev
|
||||
|
||||
echo ">>> [root] Allowing passwordless sudo for dev..."
|
||||
echo "dev ALL=(ALL) NOPASSWD: ALL" > /etc/sudoers.d/dev
|
||||
chmod 0440 /etc/sudoers.d/dev
|
||||
|
||||
echo ">>> [root] Adjusting ownership of /src for dev..."
|
||||
chown -R dev:dev /src
|
||||
|
||||
echo ">>> [root] Running pkgmgr flow as non-root user dev..."
|
||||
sudo -u dev env PKGMGR_DISABLE_NIX_FLAKE_INSTALLER=1 bash -lc "
|
||||
mkdir -p /nix/store /nix/var/nix /nix/var/log/nix /nix/var/nix/profiles
|
||||
chown -R dev:dev /nix
|
||||
chmod 0755 /nix
|
||||
chmod 1777 /nix/store
|
||||
|
||||
sudo -H -u dev env HOME=/home/dev PKGMGR_DISABLE_NIX_FLAKE_INSTALLER=1 bash -lc "
|
||||
set -euo pipefail
|
||||
cd /src
|
||||
|
||||
echo \">>> [dev] Using user: \$(whoami)\"
|
||||
echo \">>> [dev] Running scripts/installation/main.sh...\"
|
||||
bash scripts/installation/main.sh
|
||||
|
||||
echo \">>> [dev] Activating venv...\"
|
||||
make setup-venv
|
||||
. \"\$HOME/.venvs/pkgmgr/bin/activate\"
|
||||
|
||||
echo \">>> [dev] Installing pkgmgr into venv via pip...\"
|
||||
python -m pip install /src >/dev/null
|
||||
|
||||
echo \">>> [dev] PKGMGR_DISABLE_NIX_FLAKE_INSTALLER=\$PKGMGR_DISABLE_NIX_FLAKE_INSTALLER\"
|
||||
echo \">>> [dev] Updating managed repo package-manager via pkgmgr...\"
|
||||
pkgmgr update pkgmgr --clone-mode shallow --no-verification
|
||||
|
||||
echo \">>> [dev] PATH:\"
|
||||
echo \"\$PATH\"
|
||||
|
||||
echo \">>> [dev] which pkgmgr:\"
|
||||
which pkgmgr || echo \">>> [dev] pkgmgr not found in PATH\"
|
||||
|
||||
echo \">>> [dev] Running: pkgmgr version pkgmgr\"
|
||||
pkgmgr version pkgmgr
|
||||
"
|
||||
|
||||
echo ">>> [root] Container flow finished."
|
||||
export NIX_REMOTE=local
|
||||
nix run /src#pkgmgr -- version pkgmgr
|
||||
"
|
||||
'
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -27,8 +27,9 @@ Thumbs.db
|
||||
# Nix Cache to speed up tests
|
||||
.nix/
|
||||
.nix-dev-installed
|
||||
flake.lock
|
||||
|
||||
# Ignore logs
|
||||
*.log
|
||||
|
||||
result
|
||||
result
|
||||
|
||||
156
CHANGELOG.md
156
CHANGELOG.md
@@ -1,3 +1,121 @@
|
||||
## [1.5.0] - 2025-12-13
|
||||
|
||||
* - Commands now show live output while running, making long operations easier to follow
|
||||
- Error messages include full command output, making failures easier to understand and debug
|
||||
- Deinstallation is more complete and predictable, removing CLI links and properly cleaning up repositories
|
||||
- Preview mode is more trustworthy, clearly showing what would happen without making changes
|
||||
- Repository configuration problems are detected earlier with clear, user-friendly explanations
|
||||
- More consistent behavior across different Linux distributions
|
||||
- More reliable execution in Docker containers and CI environments
|
||||
- Nix-based execution works more smoothly, especially when running as root or inside containers
|
||||
- Existing commands, scripts, and workflows continue to work without any breaking changes
|
||||
|
||||
|
||||
## [1.4.1] - 2025-12-12
|
||||
|
||||
* Fixed stable release container publishing
|
||||
|
||||
|
||||
## [1.4.0] - 2025-12-12
|
||||
|
||||
**Docker Container Building**
|
||||
|
||||
* New official container images are automatically published on each release.
|
||||
* Images are available per distribution and as a default Arch-based image.
|
||||
* Stable releases now provide an additional `stable` container tag.
|
||||
|
||||
|
||||
## [1.3.1] - 2025-12-12
|
||||
|
||||
* Updated documentation with better run and installation instructions
|
||||
|
||||
|
||||
## [1.3.0] - 2025-12-12
|
||||
|
||||
**Stability & CI hardening**
|
||||
|
||||
* Stabilized Nix resolution and global symlink handling across Arch, CentOS, Debian, and Ubuntu
|
||||
* Ensured Nix works reliably in CI, sudo, login, and non-login shells without overriding distro-managed paths
|
||||
* Improved error handling and deterministic behavior for non-root environments
|
||||
* Refactored Docker and CI workflows for reproducible multi-distro virgin tests
|
||||
* Made E2E tests more realistic by executing real CLI commands
|
||||
* Fixed Python compatibility and missing dependencies on affected distros
|
||||
|
||||
|
||||
## [1.2.1] - 2025-12-12
|
||||
|
||||
**Changed**
|
||||
|
||||
* Split container tests into *virtualenv* and *Nix flake* environments to clearly separate Python and Nix responsibilities.
|
||||
|
||||
**Fixed**
|
||||
|
||||
* Fixed Nix installer permission issues when running under a different user in containers.
|
||||
* Improved reliability of post-install Nix initialization across all distro packages.
|
||||
|
||||
**CI**
|
||||
|
||||
* Replaced generic container tests with explicit environment checks.
|
||||
* Validate Nix availability via *nix flake* tests instead of Docker build-time side effects.
|
||||
|
||||
|
||||
## [1.2.0] - 2025-12-12
|
||||
|
||||
**Release workflow overhaul**
|
||||
|
||||
* Introduced a fully structured release workflow with clear phases and safeguards
|
||||
* Added preview-first releases with explicit confirmation before execution
|
||||
* Automatic handling of *latest* tag when a release is the newest version
|
||||
* Optional branch closing after successful releases with interactive confirmation
|
||||
* Improved safety by syncing with remote before any changes
|
||||
* Clear separation of concerns (workflow, git handling, prompts, versioning)
|
||||
|
||||
|
||||
## [1.1.0] - 2025-12-12
|
||||
|
||||
* Added *branch drop* for destructive branch deletion and introduced *--force/-f* flags for branch close and branch drop to skip confirmation prompts.
|
||||
|
||||
|
||||
## [1.0.0] - 2025-12-11
|
||||
|
||||
**Official Stable Release 🎉**
|
||||
|
||||
*First stable release of PKGMGR, the multi-distro development and package workflow manager.*
|
||||
|
||||
---
|
||||
|
||||
**Key Features**
|
||||
|
||||
**Core Functionality**
|
||||
|
||||
* Manage many repositories with one CLI: `clone`, `update`, `install`, `list`, `path`, `config`
|
||||
* Proxy wrappers for Git, Docker/Compose and Make
|
||||
* Multi-repo execution with safe *preview mode*
|
||||
* Mirror management: `mirror list/diff/merge/setup`
|
||||
|
||||
**Releases & Versioning**
|
||||
|
||||
* Automated SemVer bumps, tagging and changelog generation
|
||||
* Supports PKGBUILD, Debian, RPM, pyproject.toml, flake.nix
|
||||
|
||||
**Developer Tools**
|
||||
|
||||
* Open repositories in VS Code, file manager or terminal
|
||||
* Unified workflows across all major Linux distros
|
||||
|
||||
**Nix Integration**
|
||||
|
||||
* Cross-distro reproducible builds via Nix flakes
|
||||
* CI-tested across all supported environments
|
||||
|
||||
---
|
||||
|
||||
**Summary**
|
||||
PKGMGR 1.0.0 unifies repository management, build tooling, release automation and reproducible multi-distro workflows into one cohesive CLI tool.
|
||||
|
||||
*This is the first official stable release.*
|
||||
|
||||
|
||||
## [0.10.2] - 2025-12-11
|
||||
|
||||
* * Stable tag now updates only when a new highest version is released.
|
||||
@@ -54,7 +172,7 @@
|
||||
|
||||
## [0.9.1] - 2025-12-10
|
||||
|
||||
* * Refactored installer: new `venv-create.sh`, cleaner root/user setup flow, updated README with architecture map.
|
||||
* Refactored installer: new `venv-create.sh`, cleaner root/user setup flow, updated README with architecture map.
|
||||
* Split virgin tests into root/user workflows; stabilized Nix installer across distros; improved test scripts with dynamic distro selection and isolated Nix stores.
|
||||
* Fixed repository directory resolution; improved `pkgmgr path` and `pkgmgr shell`; added full unit/E2E coverage.
|
||||
* Removed deprecated files and updated `.gitignore`.
|
||||
@@ -149,47 +267,45 @@
|
||||
|
||||
## [0.7.1] - 2025-12-09
|
||||
|
||||
* Fix floating 'latest' tag logic: dereference annotated target (vX.Y.Z^{}), add tag message to avoid Git errors, ensure best-effort update without blocking releases, and update unit tests (see ChatGPT conversation: https://chatgpt.com/share/69383024-efa4-800f-a875-129b81fa40ff).
|
||||
|
||||
* Fix floating 'latest' tag logic
|
||||
* dereference annotated target (vX.Y.Z^{})
|
||||
* add tag message to avoid Git errors
|
||||
* ensure best-effort update without blocking releases
|
||||
|
||||
## [0.7.0] - 2025-12-09
|
||||
|
||||
* Add Git helpers for branch sync and floating 'latest' tag in the release workflow, ensure main/master are updated from origin before tagging, and extend unit/e2e tests including 'pkgmgr release --help' coverage (see ChatGPT conversation: https://chatgpt.com/share/69383024-efa4-800f-a875-129b81fa40ff)
|
||||
|
||||
* Add Git helpers for branch sync and floating 'latest' tag in the release workflow
|
||||
* ensure main/master are updated from origin before tagging
|
||||
|
||||
## [0.6.0] - 2025-12-09
|
||||
|
||||
* Expose DISTROS and BASE_IMAGE_* variables as exported Makefile environment variables so all build and test commands can consume them dynamically. By exporting these values, every Make target (e.g., build, build-no-cache, build-missing, test-container, test-unit, test-e2e) and every delegated script in scripts/build/ and scripts/test/ now receives a consistent view of the supported distributions and their base container images. This change removes duplicated definitions across scripts, ensures reproducible builds, and allows build tooling to react automatically when new distros or base images are added to the Makefile.
|
||||
|
||||
* Consistent view of the supported distributions and their base container images.
|
||||
|
||||
## [0.5.1] - 2025-12-09
|
||||
|
||||
* Refine pkgmgr release CLI close wiring and integration tests for --close flag (ChatGPT: https://chatgpt.com/share/69376b4e-8440-800f-9d06-535ec1d7a40e)
|
||||
* Refine pkgmgr release CLI close wiring and integration tests for --close flag
|
||||
|
||||
|
||||
## [0.5.0] - 2025-12-09
|
||||
|
||||
* Add pkgmgr branch close subcommand, extend CLI parser wiring, and add unit tests for branch handling and version version-selection logic (see ChatGPT conversation: https://chatgpt.com/share/693762a3-9ea8-800f-a640-bc78170953d1)
|
||||
|
||||
* Add pkgmgr branch close subcommand, extend CLI parser wiring
|
||||
|
||||
## [0.4.3] - 2025-12-09
|
||||
|
||||
* Implement current-directory repository selection for release and proxy commands, unify selection semantics across CLI layers, extend release workflow with --close, integrate branch closing logic, fix wiring for get_repo_identifier/get_repo_dir, update packaging files (PKGBUILD, spec, flake.nix, pyproject), and add comprehensive unit/e2e tests for release and branch commands (see ChatGPT conversation: https://chatgpt.com/share/69375cfe-9e00-800f-bd65-1bd5937e1696)
|
||||
|
||||
* Implement current-directory repository selection for release and proxy commands, unify selection semantics across CLI layers, extend release workflow with --close, integrate branch closing logic, fix wiring for get_repo_identifier/get_repo_dir, update packaging files (PKGBUILD, spec, flake.nix, pyproject)
|
||||
|
||||
## [0.4.2] - 2025-12-09
|
||||
|
||||
* Wire pkgmgr release CLI to new helper and add unit tests (see ChatGPT conversation: https://chatgpt.com/share/69374f09-c760-800f-92e4-5b44a4510b62)
|
||||
* Wire pkgmgr release CLI to new helpe
|
||||
|
||||
|
||||
## [0.4.1] - 2025-12-08
|
||||
|
||||
* Add branch close subcommand and integrate release close/editor flow (ChatGPT: https://chatgpt.com/share/69374f09-c760-800f-92e4-5b44a4510b62)
|
||||
|
||||
* Add branch close subcommand and integrate release close/editor flow
|
||||
|
||||
## [0.4.0] - 2025-12-08
|
||||
|
||||
* Add branch closing helper and --close flag to release command, including CLI wiring and tests (see https://chatgpt.com/share/69374aec-74ec-800f-bde3-5d91dfdb9b91)
|
||||
* Add branch closing helper and --close flag to release command
|
||||
|
||||
## [0.3.0] - 2025-12-08
|
||||
|
||||
@@ -200,13 +316,10 @@
|
||||
- New config update logic + default YAML sync
|
||||
- Improved proxy command handling
|
||||
- Full CLI routing refactor
|
||||
- Expanded E2E tests for list, proxy, and selection logic
|
||||
Konversation: https://chatgpt.com/share/693745c3-b8d8-800f-aa29-c8481a2ffae1
|
||||
|
||||
## [0.2.0] - 2025-12-08
|
||||
|
||||
* Add preview-first release workflow and extended packaging support (see ChatGPT conversation: https://chatgpt.com/share/693722b4-af9c-800f-bccc-8a4036e99630)
|
||||
|
||||
* Add preview-first release workflow and extended packaging support
|
||||
|
||||
## [0.1.0] - 2025-12-08
|
||||
|
||||
@@ -215,5 +328,4 @@ Konversation: https://chatgpt.com/share/693745c3-b8d8-800f-aa29-c8481a2ffae1
|
||||
|
||||
## [0.1.0] - 2025-12-08
|
||||
|
||||
* Implement unified release helper with preview mode, multi-packaging version bumps, and new integration/unit tests (see ChatGPT conversation 2025-12-08: https://chatgpt.com/share/693722b4-af9c-800f-bccc-8a4036e99630)
|
||||
|
||||
* Implement unified release helper with preview mode, multi-packaging version bumps
|
||||
82
Dockerfile
82
Dockerfile
@@ -1,61 +1,55 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Base image selector — overridden by Makefile
|
||||
# Base image selector — overridden by build args / Makefile
|
||||
# ------------------------------------------------------------
|
||||
ARG BASE_IMAGE
|
||||
FROM ${BASE_IMAGE}
|
||||
|
||||
RUN echo "BASE_IMAGE=${BASE_IMAGE}" && \
|
||||
cat /etc/os-release || true
|
||||
# ============================================================
|
||||
# Target: virgin
|
||||
# - installs distro deps (incl. make)
|
||||
# - no pkgmgr build
|
||||
# - no entrypoint
|
||||
# ============================================================
|
||||
FROM ${BASE_IMAGE} AS virgin
|
||||
SHELL ["/bin/bash", "-lc"]
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Nix environment defaults
|
||||
#
|
||||
# Nix itself is installed by your system packages (via init-nix.sh).
|
||||
# Here we only define default configuration options.
|
||||
# ------------------------------------------------------------
|
||||
ENV NIX_CONFIG="experimental-features = nix-command flakes"
|
||||
RUN echo "BASE_IMAGE=${BASE_IMAGE}" && cat /etc/os-release || true
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Unprivileged user for Arch package build (makepkg)
|
||||
# ------------------------------------------------------------
|
||||
RUN useradd -m aur_builder || true
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Copy scripts and install distro dependencies
|
||||
# ------------------------------------------------------------
|
||||
WORKDIR /build
|
||||
|
||||
# Copy only scripts first so dependency installation can run early
|
||||
COPY scripts/ scripts/
|
||||
RUN find scripts -type f -name '*.sh' -exec chmod +x {} \;
|
||||
# Copy scripts first so dependency installation can be cached
|
||||
COPY scripts/installation/ scripts/installation/
|
||||
|
||||
# Install distro-specific build dependencies (and AUR builder on Arch)
|
||||
RUN scripts/installation/run-dependencies.sh
|
||||
# Install distro-specific build dependencies (including make)
|
||||
RUN bash scripts/installation/dependencies.sh
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Select distro-specific Docker entrypoint
|
||||
# ------------------------------------------------------------
|
||||
# Docker entrypoint (distro-agnostic, nutzt run-package.sh)
|
||||
# ------------------------------------------------------------
|
||||
COPY scripts/docker/entry.sh /usr/local/bin/docker-entry.sh
|
||||
RUN chmod +x /usr/local/bin/docker-entry.sh
|
||||
# Virgin default
|
||||
CMD ["bash"]
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Build and install distro-native package-manager package
|
||||
# via Makefile `install` target (calls scripts/installation/run-package.sh)
|
||||
# ------------------------------------------------------------
|
||||
|
||||
# ============================================================
|
||||
# Target: full
|
||||
# - inherits from virgin
|
||||
# - builds + installs pkgmgr
|
||||
# - sets entrypoint + default cmd
|
||||
# ============================================================
|
||||
FROM virgin AS full
|
||||
|
||||
WORKDIR /build
|
||||
|
||||
# Copy full repository for build
|
||||
COPY . .
|
||||
RUN find scripts -type f -name '*.sh' -exec chmod +x {} \;
|
||||
|
||||
RUN set -e; \
|
||||
echo "Building and installing package-manager via make install..."; \
|
||||
make install; \
|
||||
rm -rf /build
|
||||
# Build and install distro-native package-manager package
|
||||
RUN set -euo pipefail; \
|
||||
echo "Building and installing package-manager via make install..."; \
|
||||
make install; \
|
||||
cd /; rm -rf /build
|
||||
|
||||
# Entry point
|
||||
COPY scripts/docker/entry.sh /usr/local/bin/docker-entry.sh
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Runtime working directory and dev entrypoint
|
||||
# ------------------------------------------------------------
|
||||
WORKDIR /src
|
||||
|
||||
ENTRYPOINT ["/usr/local/bin/docker-entry.sh"]
|
||||
CMD ["pkgmgr", "--help"]
|
||||
|
||||
81
Makefile
81
Makefile
@@ -1,11 +1,14 @@
|
||||
.PHONY: install setup uninstall \
|
||||
test build build-no-cache test-unit test-e2e test-integration \
|
||||
test-container
|
||||
.PHONY: install uninstall \
|
||||
build build-no-cache build-no-cache-all build-missing \
|
||||
delete-volumes purge \
|
||||
test test-unit test-e2e test-integration test-env-virtual test-env-nix \
|
||||
setup setup-venv setup-nix
|
||||
|
||||
# Distro
|
||||
# Options: arch debian ubuntu fedora centos
|
||||
distro ?= arch
|
||||
export distro
|
||||
DISTROS ?= arch debian ubuntu fedora centos
|
||||
PKGMGR_DISTRO ?= arch
|
||||
export PKGMGR_DISTRO
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Base images
|
||||
@@ -27,21 +30,53 @@ export BASE_IMAGE_CENTOS
|
||||
# PYthon Unittest Pattern
|
||||
TEST_PATTERN := test_*.py
|
||||
export TEST_PATTERN
|
||||
export PYTHONPATH := src
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# PKGMGR setup (developer wrapper -> scripts/installation/main.sh)
|
||||
# System install
|
||||
# ------------------------------------------------------------
|
||||
setup:
|
||||
@bash scripts/installation/main.sh
|
||||
install:
|
||||
@echo "Building and installing distro-native package-manager for this system..."
|
||||
@bash scripts/installation/init.sh
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# PKGMGR setup
|
||||
# ------------------------------------------------------------
|
||||
|
||||
# Default: keep current auto-detection behavior
|
||||
setup: setup-nix setup-venv
|
||||
|
||||
# Explicit: developer setup (Python venv + shell RC + install)
|
||||
setup-venv: setup-nix
|
||||
@bash scripts/setup/venv.sh
|
||||
|
||||
# Explicit: Nix shell mode (no venv, no RC changes)
|
||||
setup-nix:
|
||||
@bash scripts/setup/nix.sh
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Docker build targets (delegated to scripts/build)
|
||||
# ------------------------------------------------------------
|
||||
build-no-cache:
|
||||
@bash scripts/build/build-image-no-cache.sh
|
||||
|
||||
build:
|
||||
@bash scripts/build/build-image.sh
|
||||
@bash scripts/build/image.sh --target virgin
|
||||
@bash scripts/build/image.sh
|
||||
|
||||
build-missing-virgin:
|
||||
@bash scripts/build/image.sh --target virgin --missing
|
||||
|
||||
build-missing: build-missing-virgin
|
||||
@bash scripts/build/image.sh --missing
|
||||
|
||||
build-no-cache:
|
||||
@bash scripts/build/image.sh --target virgin --no-cache
|
||||
@bash scripts/build/image.sh --no-cache
|
||||
|
||||
build-no-cache-all:
|
||||
@set -e; \
|
||||
for d in $(DISTROS); do \
|
||||
echo "=== build-no-cache: $$d ==="; \
|
||||
PKGMGR_DISTRO="$$d" $(MAKE) build-no-cache; \
|
||||
done
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Test targets (delegated to scripts/test)
|
||||
@@ -56,30 +91,20 @@ test-integration: build-missing
|
||||
test-e2e: build-missing
|
||||
@bash scripts/test/test-e2e.sh
|
||||
|
||||
test-container: build-missing
|
||||
@bash scripts/test/test-container.sh
|
||||
test-env-virtual: build-missing
|
||||
@bash scripts/test/test-env-virtual.sh
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Build only missing container images
|
||||
# ------------------------------------------------------------
|
||||
build-missing:
|
||||
@bash scripts/build/build-image-missing.sh
|
||||
test-env-nix: build-missing
|
||||
@bash scripts/test/test-env-nix.sh
|
||||
|
||||
# Combined test target for local + CI (unit + integration + e2e)
|
||||
test: test-container test-unit test-integration test-e2e
|
||||
test: test-env-virtual test-unit test-integration test-e2e
|
||||
|
||||
delete-volumes:
|
||||
@docker volume rm pkgmgr_nix_store_${distro} pkgmgr_nix_cache_${distro} || true
|
||||
@docker volume rm "pkgmgr_nix_store_${PKGMGR_DISTRO}" "pkgmgr_nix_cache_${PKGMGR_DISTRO}" || echo "No volumes to delete."
|
||||
|
||||
purge: delete-volumes build-no-cache
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# System install (native packages, calls scripts/installation/run-package.sh)
|
||||
# ------------------------------------------------------------
|
||||
install:
|
||||
@echo "Building and installing distro-native package-manager for this system..."
|
||||
@bash scripts/installation/run-package.sh
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Uninstall target
|
||||
# ------------------------------------------------------------
|
||||
|
||||
226
README.md
226
README.md
@@ -1,70 +1,232 @@
|
||||
# Package Manager🤖📦
|
||||
# Package Manager 🤖📦
|
||||
|
||||

|
||||
|
||||
[](https://github.com/sponsors/kevinveenbirkenbach)
|
||||
[](https://www.patreon.com/c/kevinveenbirkenbach)
|
||||
[](https://buymeacoffee.com/kevinveenbirkenbach) [](https://s.veen.world/paypaldonate)
|
||||
[](https://www.patreon.com/c/kevinveenbirkenbach)
|
||||
[](https://buymeacoffee.com/kevinveenbirkenbach)
|
||||
[](https://s.veen.world/paypaldonate)
|
||||
[](LICENSE)
|
||||
[](https://github.com/kevinveenbirkenbach/package-manager)
|
||||
[](https://github.com/kevinveenbirkenbach/package-manager/actions/workflows/mark-stable.yml)
|
||||
|
||||
*Kevins's* Package Manager is a configurable Python tool designed to manage multiple repositories via Bash. It automates common Git operations such as clone, pull, push, status, and more. Additionally, it handles the creation of executable wrappers and alias links for your repositories.
|
||||
[**Kevin's Package Manager (PKGMGR)**](https://s.veen.world/pkgmgr) is a *multi-distro* package manager and workflow orchestrator.
|
||||
It helps you **develop, package, release and manage projects across multiple Linux-based
|
||||
operating systems** (Arch, Debian, Ubuntu, Fedora, CentOS, …).
|
||||
|
||||
PKGMGR is implemented in **Python** and uses **Nix (flakes)** as a foundation for
|
||||
distribution-independent builds and tooling. On top of that it provides a rich
|
||||
CLI that proxies common developer tools (Git, Docker, Make, …) and glues them
|
||||
together into repeatable development workflows.
|
||||
|
||||
---
|
||||
|
||||
## Why PKGMGR? 🧠
|
||||
|
||||
Traditional distro package managers like `apt`, `pacman` or `dnf` focus on a
|
||||
single operating system. PKGMGR instead focuses on **your repositories and
|
||||
development lifecycle**. It provides one configuration for all repositories,
|
||||
one unified CLI to interact with them, and a Nix-based foundation that keeps
|
||||
tooling reproducible across distributions.
|
||||
|
||||
Native package managers are still used where they make sense. PKGMGR coordinates
|
||||
the surrounding development, build and release workflows in a consistent way.
|
||||
|
||||
In addition, PKGMGR provides Docker images that can serve as a **reproducible
|
||||
system baseline**. These images bundle the complete PKGMGR toolchain and are
|
||||
designed to be reused as a stable execution environment across machines,
|
||||
pipelines and teams. This approach is specifically used within
|
||||
[**Infinito.Nexus**](https://s.infinito.nexus/code) to make complex systems
|
||||
distribution-independent while remaining fully reproducible.
|
||||
|
||||
---
|
||||
|
||||
## Features 🚀
|
||||
|
||||
- **Installation & Setup:**
|
||||
Create executable wrappers with auto-detected commands (e.g. `main.sh` or `main.py`).
|
||||
|
||||
- **Git Operations:**
|
||||
Easily perform `git pull`, `push`, `status`, `commit`, `diff`, `add`, `show`, and `checkout` with extra parameters passed through.
|
||||
|
||||
- **Configuration Management:**
|
||||
Manage repository configurations via a default file (`config/defaults.yaml`) and a user-specific file (`config/config.yaml`). Initialize, add, delete, or ignore entries using subcommands.
|
||||
|
||||
- **Path & Listing:**
|
||||
Display repository paths or list all configured packages with their details.
|
||||
|
||||
- **Custom Aliases:**
|
||||
Generate and manage custom aliases for easy command invocation.
|
||||
PKGMGR enables multi-distro development and packaging by managing multiple
|
||||
repositories from a single configuration file. It drives complete release
|
||||
pipelines across Linux distributions using Nix flakes, Python build metadata,
|
||||
native OS packages such as Arch, Debian and RPM formats, and additional ecosystem
|
||||
integrations like Ansible.
|
||||
|
||||
All functionality is exposed through a unified `pkgmgr` command-line interface
|
||||
that works identically on every supported distribution. It combines repository
|
||||
management, Git operations, Docker and Compose orchestration, as well as
|
||||
versioning, release and changelog workflows. Many commands support a preview
|
||||
mode, allowing you to inspect the underlying actions before they are executed.
|
||||
|
||||
---
|
||||
|
||||
### Full development workflows
|
||||
|
||||
PKGMGR is not just a helper around Git commands. Combined with its release and
|
||||
versioning features it can drive **end-to-end workflows**:
|
||||
|
||||
1. Clone and mirror repositories.
|
||||
2. Run tests and builds through `make` or Nix.
|
||||
3. Bump versions, update changelogs and tags.
|
||||
4. Build distro-specific packages.
|
||||
5. Keep all mirrors and working copies in sync.
|
||||
|
||||
---
|
||||
|
||||
## Architecture & Setup Map 🗺️
|
||||
|
||||
The following diagram provides a full overview of PKGMGR’s package structure,
|
||||
installation layers, and setup controller flow:
|
||||
The following diagram gives a full overview of:
|
||||
|
||||
* PKGMGR’s package structure,
|
||||
* the layered installers (OS, foundation, Python, Makefile),
|
||||
* and the setup controller that decides which layer to use on a given system.
|
||||
|
||||

|
||||
|
||||
**Diagram status:** *Stand: 11. Dezember 2025*
|
||||
**Always-up-to-date version:** https://s.veen.world/pkgmgrmp
|
||||
|
||||
**Diagram status:** 12 December 2025
|
||||
|
||||
**Always-up-to-date version:** [https://s.veen.world/pkgmgrmp](https://s.veen.world/pkgmgrmp)
|
||||
|
||||
---
|
||||
|
||||
## Installation ⚙️
|
||||
|
||||
Clone the repository and ensure your `~/.local/bin` is in your system PATH:
|
||||
PKGMGR can be installed using `make`.
|
||||
The setup mode defines **which runtime layers are prepared**.
|
||||
---
|
||||
|
||||
### Download
|
||||
|
||||
```bash
|
||||
git clone https://github.com/kevinveenbirkenbach/package-manager.git
|
||||
cd package-manager
|
||||
```
|
||||
|
||||
Install make and pip if not installed yet:
|
||||
### Dependency installation (optional)
|
||||
|
||||
```bash
|
||||
pacman -S make python-pip
|
||||
System dependencies required **before running any *make* commands** are installed via:
|
||||
|
||||
```
|
||||
scripts/installation/dependencies.sh
|
||||
```
|
||||
|
||||
Then, run the following command to set up the project:
|
||||
The script detects and normalizes the OS and installs the required **system-level dependencies** accordingly.
|
||||
|
||||
### Install
|
||||
|
||||
```bash
|
||||
git clone https://github.com/kevinveenbirkenbach/package-manager.git
|
||||
cd package-manager
|
||||
make install
|
||||
```
|
||||
|
||||
### Setup modes
|
||||
|
||||
| Command | Prepares | Use case |
|
||||
| ------------------- | ----------------------- | --------------------- |
|
||||
| **make setup** | Python venv **and** Nix | Full development & CI |
|
||||
| **make setup-venv** | Python venv only | Local user setup |
|
||||
|
||||
|
||||
##### Full setup (venv + Nix)
|
||||
|
||||
```bash
|
||||
make setup
|
||||
```
|
||||
|
||||
The `make setup` command will:
|
||||
- Make `main.py` executable.
|
||||
- Install required packages from `requirements.txt`.
|
||||
- Execute `python main.py install` to complete the installation.
|
||||
Use this for CI, servers, containers and full development workflows.
|
||||
|
||||
##### Venv-only setup
|
||||
|
||||
```bash
|
||||
make setup-venv
|
||||
source ~/.venvs/pkgmgr/bin/activate
|
||||
```
|
||||
|
||||
Use this if you want PKGMGR isolated without Nix integration.
|
||||
|
||||
---
|
||||
|
||||
Alles klar 🙂
|
||||
Hier ist der **RUN-Abschnitt ohne Gedankenstriche**, klar nach **Nix, Docker und venv** getrennt:
|
||||
|
||||
---
|
||||
|
||||
## Run PKGMGR 🧰
|
||||
|
||||
PKGMGR can be executed in different environments.
|
||||
All modes expose the same CLI and commands.
|
||||
|
||||
---
|
||||
|
||||
### Run via Nix (no installation)
|
||||
|
||||
```bash
|
||||
nix run github:kevinveenbirkenbach/package-manager#pkgmgr -- --help
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Run via Docker 🐳
|
||||
|
||||
PKGMGR can be executed **inside Docker containers** for CI, testing and isolated
|
||||
workflows.
|
||||
---
|
||||
|
||||
#### Container types
|
||||
|
||||
Two container types are available.
|
||||
|
||||
|
||||
| Image type | Contains | Typical use |
|
||||
| ---------- | ----------------------------- | ----------------------- |
|
||||
| **Virgin** | Base OS + system dependencies | Clean test environments |
|
||||
| **Stable** | PKGMGR + Nix (flakes enabled) | Ready-to-use workflows |
|
||||
|
||||
Example images:
|
||||
|
||||
* Virgin: `pkgmgr-arch-virgin`
|
||||
* Stable: `ghcr.io/kevinveenbirkenbach/pkgmgr:stable`
|
||||
|
||||
|
||||
Use **virgin images** for isolated test runs,
|
||||
use the **stable image** for fast, reproducible execution.
|
||||
|
||||
---
|
||||
|
||||
#### Run examples
|
||||
|
||||
```bash
|
||||
docker run --rm -it \
|
||||
-v "$PWD":/src \
|
||||
-w /src \
|
||||
ghcr.io/kevinveenbirkenbach/pkgmgr:stable \
|
||||
pkgmgr --help
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Run via virtual environment (venv)
|
||||
|
||||
After activating the venv:
|
||||
|
||||
```bash
|
||||
pkgmgr --help
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
This allows you to choose between zero install execution using Nix, fully prebuilt
|
||||
Docker environments or local isolated venv setups with identical command behavior.
|
||||
|
||||
---
|
||||
|
||||
## License 📄
|
||||
|
||||
This project is licensed under the MIT License.
|
||||
See the [LICENSE](LICENSE) file for details.
|
||||
|
||||
---
|
||||
|
||||
## Author 👤
|
||||
|
||||
Kevin Veen-Birkenbach
|
||||
Kevin Veen-Birkenbach
|
||||
[https://www.veen.world](https://www.veen.world)
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
# Legacy file used only if pip still installs from requirements.txt.
|
||||
# You may delete this file once you switch entirely to pyproject.toml.
|
||||
|
||||
PyYAML
|
||||
BIN
assets/banner.jpg
Normal file
BIN
assets/banner.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 63 KiB |
BIN
assets/map.png
BIN
assets/map.png
Binary file not shown.
|
Before Width: | Height: | Size: 1.9 MiB After Width: | Height: | Size: 1.9 MiB |
27
flake.lock
generated
27
flake.lock
generated
@@ -1,27 +0,0 @@
|
||||
{
|
||||
"nodes": {
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1765186076,
|
||||
"narHash": "sha256-hM20uyap1a0M9d344I692r+ik4gTMyj60cQWO+hAYP8=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "addf7cf5f383a3101ecfba091b98d0a1263dc9b8",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-unstable",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"nixpkgs": "nixpkgs"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
"version": 7
|
||||
}
|
||||
@@ -26,17 +26,13 @@
|
||||
packages = forAllSystems (system:
|
||||
let
|
||||
pkgs = nixpkgs.legacyPackages.${system};
|
||||
|
||||
# Single source of truth for pkgmgr: Python 3.11
|
||||
# - Matches pyproject.toml: requires-python = ">=3.11"
|
||||
# - Uses python311Packages so that PyYAML etc. are available
|
||||
python = pkgs.python311;
|
||||
pyPkgs = pkgs.python311Packages;
|
||||
in
|
||||
rec {
|
||||
pkgmgr = pyPkgs.buildPythonApplication {
|
||||
pname = "package-manager";
|
||||
version = "0.10.2";
|
||||
version = "1.5.0";
|
||||
|
||||
# Use the git repo as source
|
||||
src = ./.;
|
||||
|
||||
14
main.py
14
main.py
@@ -1,14 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
# Ensure local src/ overrides installed package
|
||||
ROOT = Path(__file__).resolve().parent
|
||||
SRC = ROOT / "src"
|
||||
if SRC.is_dir():
|
||||
sys.path.insert(0, str(SRC))
|
||||
|
||||
from pkgmgr.cli import main
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -47,12 +47,13 @@ package() {
|
||||
cd "$srcdir/$_srcdir_name"
|
||||
|
||||
# Install the wrapper into /usr/bin
|
||||
install -Dm0755 "scripts/pkgmgr-wrapper.sh" \
|
||||
install -Dm0755 "scripts/launcher.sh" \
|
||||
"$pkgdir/usr/bin/pkgmgr"
|
||||
|
||||
# Install Nix init helper
|
||||
install -Dm0755 "scripts/init-nix.sh" \
|
||||
"$pkgdir/usr/lib/package-manager/init-nix.sh"
|
||||
# Install Nix bootstrap (init + lib)
|
||||
install -d "$pkgdir/usr/lib/package-manager/nix"
|
||||
cp -a scripts/nix/* "$pkgdir/usr/lib/package-manager/nix/"
|
||||
chmod 0755 "$pkgdir/usr/lib/package-manager/nix/init.sh"
|
||||
|
||||
# Install the full repository into /usr/lib/package-manager
|
||||
mkdir -p "$pkgdir/usr/lib/package-manager"
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
post_install() {
|
||||
/usr/lib/package-manager/init-nix.sh || true
|
||||
/usr/lib/package-manager/nix/init.sh || echo ">>> ERROR: /usr/lib/package-manager/nix/init.sh not found or not executable."
|
||||
}
|
||||
|
||||
post_upgrade() {
|
||||
/usr/lib/package-manager/init-nix.sh || true
|
||||
/usr/lib/package-manager/nix/init.sh || echo ">>> ERROR: /usr/lib/package-manager/nix/init.sh not found or not executable."
|
||||
}
|
||||
|
||||
post_remove() {
|
||||
|
||||
@@ -3,11 +3,7 @@ set -e
|
||||
|
||||
case "$1" in
|
||||
configure)
|
||||
if [ -x /usr/lib/package-manager/init-nix.sh ]; then
|
||||
/usr/lib/package-manager/init-nix.sh || true
|
||||
else
|
||||
echo ">>> Warning: /usr/lib/package-manager/init-nix.sh not found or not executable."
|
||||
fi
|
||||
/usr/lib/package-manager/nix/init.sh || echo ">>> ERROR: /usr/lib/package-manager/nix/init.sh not found or not executable."
|
||||
;;
|
||||
esac
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ override_dh_auto_test:
|
||||
:
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Install phase: copy wrapper + init script + full project source
|
||||
# Install phase: copy wrapper + Nix bootstrap (init + lib) + full project source
|
||||
# ---------------------------------------------------------------------------
|
||||
override_dh_auto_install:
|
||||
# Create target directories
|
||||
@@ -28,12 +28,14 @@ override_dh_auto_install:
|
||||
install -d debian/package-manager/usr/lib/package-manager
|
||||
|
||||
# Install wrapper
|
||||
install -m0755 scripts/pkgmgr-wrapper.sh \
|
||||
install -m0755 scripts/launcher.sh \
|
||||
debian/package-manager/usr/bin/pkgmgr
|
||||
|
||||
# Install shared Nix init script
|
||||
install -m0755 scripts/init-nix.sh \
|
||||
debian/package-manager/usr/lib/package-manager/init-nix.sh
|
||||
# Install Nix bootstrap (init + lib)
|
||||
install -d debian/package-manager/usr/lib/package-manager/nix
|
||||
cp -a scripts/nix/* \
|
||||
debian/package-manager/usr/lib/package-manager/nix/
|
||||
chmod 0755 debian/package-manager/usr/lib/package-manager/nix/init.sh
|
||||
|
||||
# Copy full project source into /usr/lib/package-manager,
|
||||
# but do not include the debian/ directory itself.
|
||||
|
||||
@@ -12,7 +12,7 @@ BuildArch: noarch
|
||||
# NOTE:
|
||||
# Nix is a runtime requirement, but it is *not* declared here as a hard
|
||||
# RPM dependency, because many distributions do not ship a "nix" RPM.
|
||||
# Instead, Nix is installed and initialized by init-nix.sh, which is
|
||||
# Instead, Nix is installed and initialized by nix/init.sh, which is
|
||||
# called in the %post scriptlet below.
|
||||
|
||||
%description
|
||||
@@ -22,7 +22,7 @@ manager via a local Nix flake:
|
||||
nix run /usr/lib/package-manager#pkgmgr -- ...
|
||||
|
||||
Nix is a runtime requirement and is installed/initialized by the
|
||||
init-nix.sh helper during package installation if it is not yet
|
||||
nix/init.sh helper during package installation if it is not yet
|
||||
available on the system.
|
||||
|
||||
%prep
|
||||
@@ -34,18 +34,20 @@ available on the system.
|
||||
|
||||
%install
|
||||
rm -rf %{buildroot}
|
||||
|
||||
install -d %{buildroot}%{_bindir}
|
||||
# Install project tree into a fixed, architecture-independent location.
|
||||
install -d %{buildroot}/usr/lib/package-manager
|
||||
|
||||
# Copy full project source into /usr/lib/package-manager
|
||||
cp -a . %{buildroot}/usr/lib/package-manager/
|
||||
|
||||
# Wrapper
|
||||
install -m0755 scripts/pkgmgr-wrapper.sh %{buildroot}%{_bindir}/pkgmgr
|
||||
install -m0755 scripts/launcher.sh %{buildroot}%{_bindir}/pkgmgr
|
||||
|
||||
# Shared Nix init script (ensure it is executable in the installed tree)
|
||||
install -m0755 scripts/init-nix.sh %{buildroot}/usr/lib/package-manager/init-nix.sh
|
||||
# Nix bootstrap (init + lib)
|
||||
install -d %{buildroot}/usr/lib/package-manager/nix
|
||||
cp -a scripts/nix/* %{buildroot}/usr/lib/package-manager/nix/
|
||||
chmod 0755 %{buildroot}/usr/lib/package-manager/nix/init.sh
|
||||
|
||||
# Remove packaging-only and development artefacts from the installed tree
|
||||
rm -rf \
|
||||
@@ -60,12 +62,7 @@ rm -rf \
|
||||
%{buildroot}/usr/lib/package-manager/.gitkeep || true
|
||||
|
||||
%post
|
||||
# Initialize Nix (if needed) after installing the package-manager files.
|
||||
if [ -x /usr/lib/package-manager/init-nix.sh ]; then
|
||||
/usr/lib/package-manager/init-nix.sh || true
|
||||
else
|
||||
echo ">>> Warning: /usr/lib/package-manager/init-nix.sh not found or not executable."
|
||||
fi
|
||||
/usr/lib/package-manager/nix/init.sh || echo ">>> ERROR: /usr/lib/package-manager/nix/init.sh not found or not executable."
|
||||
|
||||
%postun
|
||||
echo ">>> package-manager removed. Nix itself was not removed."
|
||||
|
||||
@@ -7,10 +7,10 @@ build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "package-manager"
|
||||
version = "0.10.2"
|
||||
version = "1.5.0"
|
||||
description = "Kevin's package-manager tool (pkgmgr)"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.11"
|
||||
requires-python = ">=3.9"
|
||||
license = { text = "MIT" }
|
||||
|
||||
authors = [
|
||||
@@ -23,12 +23,12 @@ dependencies = [
|
||||
]
|
||||
|
||||
[project.urls]
|
||||
Homepage = "https://github.com/kevinveenbirkenbach/package-manager"
|
||||
Homepage = "https://s.veen.world/pkgmgr"
|
||||
Source = "https://github.com/kevinveenbirkenbach/package-manager"
|
||||
|
||||
[project.optional-dependencies]
|
||||
keyring = ["keyring>=24.0.0"]
|
||||
dev = [
|
||||
"pytest",
|
||||
"mypy"
|
||||
]
|
||||
|
||||
|
||||
20
scripts/build/base.sh
Executable file
20
scripts/build/base.sh
Executable file
@@ -0,0 +1,20 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
: "${BASE_IMAGE_ARCH:=archlinux:latest}"
|
||||
: "${BASE_IMAGE_DEBIAN:=debian:stable-slim}"
|
||||
: "${BASE_IMAGE_UBUNTU:=ubuntu:latest}"
|
||||
: "${BASE_IMAGE_FEDORA:=fedora:latest}"
|
||||
: "${BASE_IMAGE_CENTOS:=quay.io/centos/centos:stream9}"
|
||||
|
||||
resolve_base_image() {
|
||||
local PKGMGR_DISTRO="$1"
|
||||
case "$PKGMGR_DISTRO" in
|
||||
arch) echo "$BASE_IMAGE_ARCH" ;;
|
||||
debian) echo "$BASE_IMAGE_DEBIAN" ;;
|
||||
ubuntu) echo "$BASE_IMAGE_UBUNTU" ;;
|
||||
fedora) echo "$BASE_IMAGE_FEDORA" ;;
|
||||
centos) echo "$BASE_IMAGE_CENTOS" ;;
|
||||
*) echo "ERROR: Unknown distro '$PKGMGR_DISTRO'" >&2; exit 1 ;;
|
||||
esac
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
source "${SCRIPT_DIR}/resolve-base-image.sh"
|
||||
|
||||
IMAGE="package-manager-test-$distro"
|
||||
BASE_IMAGE="$(resolve_base_image "$distro")"
|
||||
|
||||
if docker image inspect "$IMAGE" >/dev/null 2>&1; then
|
||||
echo "[build-missing] Image already exists: $IMAGE (skipping)"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo
|
||||
echo "------------------------------------------------------------"
|
||||
echo "[build-missing] Building missing image: $IMAGE"
|
||||
echo "BASE_IMAGE = $BASE_IMAGE"
|
||||
echo "------------------------------------------------------------"
|
||||
|
||||
docker build \
|
||||
--build-arg BASE_IMAGE="$BASE_IMAGE" \
|
||||
-t "$IMAGE" \
|
||||
.
|
||||
@@ -1,15 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
source "${SCRIPT_DIR}/resolve-base-image.sh"
|
||||
|
||||
base_image="$(resolve_base_image "$distro")"
|
||||
|
||||
echo ">>> Building test image for distro '$distro' with NO CACHE (BASE_IMAGE=$base_image)..."
|
||||
|
||||
docker build \
|
||||
--no-cache \
|
||||
--build-arg BASE_IMAGE="$base_image" \
|
||||
-t "package-manager-test-$distro" \
|
||||
.
|
||||
@@ -1,14 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
source "${SCRIPT_DIR}/resolve-base-image.sh"
|
||||
|
||||
base_image="$(resolve_base_image "$distro")"
|
||||
|
||||
echo ">>> Building test image for distro '$distro' (BASE_IMAGE=$base_image)..."
|
||||
|
||||
docker build \
|
||||
--build-arg BASE_IMAGE="$base_image" \
|
||||
-t "package-manager-test-$distro" \
|
||||
.
|
||||
227
scripts/build/image.sh
Executable file
227
scripts/build/image.sh
Executable file
@@ -0,0 +1,227 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
|
||||
# shellcheck source=./scripts/build/base.sh
|
||||
source "${SCRIPT_DIR}/base.sh"
|
||||
|
||||
: "${PKGMGR_DISTRO:?Environment variable 'PKGMGR_DISTRO' must be set (arch|debian|ubuntu|fedora|centos)}"
|
||||
|
||||
NO_CACHE=0
|
||||
MISSING_ONLY=0
|
||||
TARGET=""
|
||||
IMAGE_TAG="" # local image name or base tag (without registry)
|
||||
PUSH=0 # if 1 -> use buildx and push (requires docker buildx)
|
||||
PUBLISH=0 # if 1 -> push with semantic tags (latest/version/stable + arch aliases)
|
||||
REGISTRY="" # e.g. ghcr.io
|
||||
OWNER="" # e.g. github org/user
|
||||
REPO_PREFIX="pkgmgr" # image base name (pkgmgr)
|
||||
VERSION="" # X.Y.Z (required for --publish)
|
||||
IS_STABLE="false" # "true" -> publish stable tags
|
||||
DEFAULT_DISTRO="arch"
|
||||
|
||||
usage() {
|
||||
local default_tag="pkgmgr-${PKGMGR_DISTRO}"
|
||||
if [[ -n "${TARGET:-}" ]]; then
|
||||
default_tag="${default_tag}-${TARGET}"
|
||||
fi
|
||||
|
||||
cat <<EOF
|
||||
Usage: PKGMGR_DISTRO=<distro> $0 [options]
|
||||
|
||||
Build options:
|
||||
--missing Build only if the image does not already exist (local build only)
|
||||
--no-cache Build with --no-cache
|
||||
--target <name> Build a specific Dockerfile target (e.g. virgin)
|
||||
--tag <image> Override the output image tag (default: ${default_tag})
|
||||
|
||||
Publish options:
|
||||
--push Push the built image (uses docker buildx build --push)
|
||||
--publish Publish semantic tags (latest, <version>, optional stable) + arch aliases
|
||||
--registry <reg> Registry (e.g. ghcr.io)
|
||||
--owner <owner> Registry namespace (e.g. \${GITHUB_REPOSITORY_OWNER})
|
||||
--repo-prefix <name> Image base name (default: pkgmgr)
|
||||
--version <X.Y.Z> Version for --publish
|
||||
--stable <true|false> Whether to publish :stable tags (default: false)
|
||||
|
||||
Notes:
|
||||
- --publish implies --push and requires --registry, --owner, and --version.
|
||||
- Local build (no --push) uses "docker build" and creates local images like "pkgmgr-arch" / "pkgmgr-arch-virgin".
|
||||
EOF
|
||||
}
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--no-cache) NO_CACHE=1; shift ;;
|
||||
--missing) MISSING_ONLY=1; shift ;;
|
||||
--target)
|
||||
TARGET="${2:-}"
|
||||
[[ -n "${TARGET}" ]] || { echo "ERROR: --target requires a value (e.g. virgin)"; exit 2; }
|
||||
shift 2
|
||||
;;
|
||||
--tag)
|
||||
IMAGE_TAG="${2:-}"
|
||||
[[ -n "${IMAGE_TAG}" ]] || { echo "ERROR: --tag requires a value"; exit 2; }
|
||||
shift 2
|
||||
;;
|
||||
--push) PUSH=1; shift ;;
|
||||
--publish) PUBLISH=1; PUSH=1; shift ;;
|
||||
--registry)
|
||||
REGISTRY="${2:-}"
|
||||
[[ -n "${REGISTRY}" ]] || { echo "ERROR: --registry requires a value"; exit 2; }
|
||||
shift 2
|
||||
;;
|
||||
--owner)
|
||||
OWNER="${2:-}"
|
||||
[[ -n "${OWNER}" ]] || { echo "ERROR: --owner requires a value"; exit 2; }
|
||||
shift 2
|
||||
;;
|
||||
--repo-prefix)
|
||||
REPO_PREFIX="${2:-}"
|
||||
[[ -n "${REPO_PREFIX}" ]] || { echo "ERROR: --repo-prefix requires a value"; exit 2; }
|
||||
shift 2
|
||||
;;
|
||||
--version)
|
||||
VERSION="${2:-}"
|
||||
[[ -n "${VERSION}" ]] || { echo "ERROR: --version requires a value"; exit 2; }
|
||||
shift 2
|
||||
;;
|
||||
--stable)
|
||||
IS_STABLE="${2:-}"
|
||||
[[ -n "${IS_STABLE}" ]] || { echo "ERROR: --stable requires a value (true|false)"; exit 2; }
|
||||
shift 2
|
||||
;;
|
||||
-h|--help) usage; exit 0 ;;
|
||||
*)
|
||||
echo "ERROR: Unknown argument: $1" >&2
|
||||
usage
|
||||
exit 2
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Derive default local tag if not provided
|
||||
if [[ -z "${IMAGE_TAG}" ]]; then
|
||||
IMAGE_TAG="${REPO_PREFIX}-${PKGMGR_DISTRO}"
|
||||
if [[ -n "${TARGET}" ]]; then
|
||||
IMAGE_TAG="${IMAGE_TAG}-${TARGET}"
|
||||
fi
|
||||
fi
|
||||
|
||||
BASE_IMAGE="$(resolve_base_image "$PKGMGR_DISTRO")"
|
||||
|
||||
# Local-only "missing" shortcut
|
||||
if [[ "${MISSING_ONLY}" == "1" ]]; then
|
||||
if [[ "${PUSH}" == "1" ]]; then
|
||||
echo "ERROR: --missing is only supported for local builds (without --push/--publish)" >&2
|
||||
exit 2
|
||||
fi
|
||||
if docker image inspect "${IMAGE_TAG}" >/dev/null 2>&1; then
|
||||
echo "[build] Image already exists: ${IMAGE_TAG} (skipping due to --missing)"
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
|
||||
# Validate publish parameters
|
||||
if [[ "${PUBLISH}" == "1" ]]; then
|
||||
[[ -n "${REGISTRY}" ]] || { echo "ERROR: --publish requires --registry"; exit 2; }
|
||||
[[ -n "${OWNER}" ]] || { echo "ERROR: --publish requires --owner"; exit 2; }
|
||||
[[ -n "${VERSION}" ]] || { echo "ERROR: --publish requires --version"; exit 2; }
|
||||
fi
|
||||
|
||||
# Guard: --push without --publish requires fully-qualified --tag
|
||||
if [[ "${PUSH}" == "1" && "${PUBLISH}" != "1" ]]; then
|
||||
if [[ "${IMAGE_TAG}" != */* ]]; then
|
||||
echo "ERROR: --push requires --tag with a fully-qualified name (e.g. ghcr.io/<owner>/<image>:tag), or use --publish" >&2
|
||||
exit 2
|
||||
fi
|
||||
fi
|
||||
|
||||
echo
|
||||
echo "------------------------------------------------------------"
|
||||
echo "[build] Building image"
|
||||
echo "distro = ${PKGMGR_DISTRO}"
|
||||
echo "BASE_IMAGE = ${BASE_IMAGE}"
|
||||
if [[ -n "${TARGET}" ]]; then echo "target = ${TARGET}"; fi
|
||||
if [[ "${NO_CACHE}" == "1" ]]; then echo "cache = disabled"; fi
|
||||
if [[ "${PUSH}" == "1" ]]; then echo "push = enabled"; fi
|
||||
if [[ "${PUBLISH}" == "1" ]]; then
|
||||
echo "publish = enabled"
|
||||
echo "registry = ${REGISTRY}"
|
||||
echo "owner = ${OWNER}"
|
||||
echo "version = ${VERSION}"
|
||||
echo "stable = ${IS_STABLE}"
|
||||
fi
|
||||
echo "------------------------------------------------------------"
|
||||
|
||||
# Common build args
|
||||
build_args=(--build-arg "BASE_IMAGE=${BASE_IMAGE}")
|
||||
|
||||
if [[ "${NO_CACHE}" == "1" ]]; then
|
||||
build_args+=(--no-cache)
|
||||
fi
|
||||
|
||||
if [[ -n "${TARGET}" ]]; then
|
||||
build_args+=(--target "${TARGET}")
|
||||
fi
|
||||
|
||||
compute_publish_tags() {
|
||||
local distro_tag_base="${REGISTRY}/${OWNER}/${REPO_PREFIX}-${PKGMGR_DISTRO}"
|
||||
local alias_tag_base=""
|
||||
|
||||
if [[ -n "${TARGET}" ]]; then
|
||||
distro_tag_base="${distro_tag_base}-${TARGET}"
|
||||
fi
|
||||
|
||||
if [[ "${PKGMGR_DISTRO}" == "${DEFAULT_DISTRO}" ]]; then
|
||||
alias_tag_base="${REGISTRY}/${OWNER}/${REPO_PREFIX}"
|
||||
if [[ -n "${TARGET}" ]]; then
|
||||
alias_tag_base="${alias_tag_base}-${TARGET}"
|
||||
fi
|
||||
fi
|
||||
|
||||
local tags=()
|
||||
tags+=("${distro_tag_base}:latest")
|
||||
tags+=("${distro_tag_base}:${VERSION}")
|
||||
|
||||
if [[ "${IS_STABLE}" == "true" ]]; then
|
||||
tags+=("${distro_tag_base}:stable")
|
||||
fi
|
||||
|
||||
if [[ -n "${alias_tag_base}" ]]; then
|
||||
tags+=("${alias_tag_base}:latest")
|
||||
tags+=("${alias_tag_base}:${VERSION}")
|
||||
if [[ "${IS_STABLE}" == "true" ]]; then
|
||||
tags+=("${alias_tag_base}:stable")
|
||||
fi
|
||||
fi
|
||||
|
||||
printf '%s\n' "${tags[@]}"
|
||||
}
|
||||
|
||||
if [[ "${PUSH}" == "1" ]]; then
|
||||
bx_args=(docker buildx build --push)
|
||||
|
||||
if [[ "${PUBLISH}" == "1" ]]; then
|
||||
while IFS= read -r t; do
|
||||
bx_args+=(-t "$t")
|
||||
done < <(compute_publish_tags)
|
||||
else
|
||||
bx_args+=(-t "${IMAGE_TAG}")
|
||||
fi
|
||||
|
||||
bx_args+=("${build_args[@]}")
|
||||
bx_args+=(.)
|
||||
|
||||
echo "[build] Running: ${bx_args[*]}"
|
||||
"${bx_args[@]}"
|
||||
else
|
||||
local_args=(docker build)
|
||||
local_args+=("${build_args[@]}")
|
||||
local_args+=(-t "${IMAGE_TAG}")
|
||||
local_args+=(.)
|
||||
|
||||
echo "[build] Running: ${local_args[*]}"
|
||||
"${local_args[@]}"
|
||||
fi
|
||||
55
scripts/build/publish.sh
Executable file
55
scripts/build/publish.sh
Executable file
@@ -0,0 +1,55 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Publish all distro images (full + virgin) to a registry via image.sh --publish
|
||||
#
|
||||
# Required env:
|
||||
# OWNER (e.g. GITHUB_REPOSITORY_OWNER)
|
||||
# VERSION (e.g. 1.2.3)
|
||||
#
|
||||
# Optional env:
|
||||
# REGISTRY (default: ghcr.io)
|
||||
# IS_STABLE (default: false)
|
||||
# DISTROS (default: "arch debian ubuntu fedora centos")
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
|
||||
REGISTRY="${REGISTRY:-ghcr.io}"
|
||||
IS_STABLE="${IS_STABLE:-false}"
|
||||
DISTROS="${DISTROS:-arch debian ubuntu fedora centos}"
|
||||
|
||||
: "${OWNER:?Environment variable OWNER must be set (e.g. github.repository_owner)}"
|
||||
: "${VERSION:?Environment variable VERSION must be set (e.g. 1.2.3)}"
|
||||
|
||||
echo "[publish] REGISTRY=${REGISTRY}"
|
||||
echo "[publish] OWNER=${OWNER}"
|
||||
echo "[publish] VERSION=${VERSION}"
|
||||
echo "[publish] IS_STABLE=${IS_STABLE}"
|
||||
echo "[publish] DISTROS=${DISTROS}"
|
||||
|
||||
for d in ${DISTROS}; do
|
||||
echo
|
||||
echo "============================================================"
|
||||
echo "[publish] PKGMGR_DISTRO=${d}"
|
||||
echo "============================================================"
|
||||
|
||||
# virgin
|
||||
PKGMGR_DISTRO="${d}" bash "${SCRIPT_DIR}/image.sh" \
|
||||
--publish \
|
||||
--registry "${REGISTRY}" \
|
||||
--owner "${OWNER}" \
|
||||
--version "${VERSION}" \
|
||||
--stable "${IS_STABLE}" \
|
||||
--target virgin
|
||||
|
||||
# full (default target)
|
||||
PKGMGR_DISTRO="${d}" bash "${SCRIPT_DIR}/image.sh" \
|
||||
--publish \
|
||||
--registry "${REGISTRY}" \
|
||||
--owner "${OWNER}" \
|
||||
--version "${VERSION}" \
|
||||
--stable "${IS_STABLE}"
|
||||
done
|
||||
|
||||
echo
|
||||
echo "[publish] Done."
|
||||
@@ -1,18 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
resolve_base_image() {
|
||||
local distro="$1"
|
||||
|
||||
case "$distro" in
|
||||
arch) echo "$BASE_IMAGE_ARCH" ;;
|
||||
debian) echo "$BASE_IMAGE_DEBIAN" ;;
|
||||
ubuntu) echo "$BASE_IMAGE_UBUNTU" ;;
|
||||
fedora) echo "$BASE_IMAGE_FEDORA" ;;
|
||||
centos) echo "$BASE_IMAGE_CENTOS" ;;
|
||||
*)
|
||||
echo "ERROR: Unknown distro '$distro'" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
@@ -1,55 +1,6 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Detect and export a valid CA bundle so Nix, Git, curl and Python tooling
|
||||
# can successfully perform HTTPS requests on all distros (Debian, Ubuntu,
|
||||
# Fedora, RHEL, CentOS, etc.)
|
||||
# ---------------------------------------------------------------------------
|
||||
detect_ca_bundle() {
|
||||
# Common CA bundle locations across major Linux distributions
|
||||
local candidates=(
|
||||
/etc/ssl/certs/ca-certificates.crt # Debian/Ubuntu
|
||||
/etc/ssl/cert.pem # Some distros
|
||||
/etc/pki/tls/certs/ca-bundle.crt # Fedora/RHEL/CentOS
|
||||
/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem # CentOS/RHEL extracted bundle
|
||||
/etc/ssl/ca-bundle.pem # Generic fallback
|
||||
)
|
||||
|
||||
for path in "${candidates[@]}"; do
|
||||
if [[ -f "$path" ]]; then
|
||||
echo "$path"
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
# Use existing NIX_SSL_CERT_FILE if provided, otherwise auto-detect
|
||||
CA_BUNDLE="${NIX_SSL_CERT_FILE:-}"
|
||||
|
||||
if [[ -z "${CA_BUNDLE}" ]]; then
|
||||
CA_BUNDLE="$(detect_ca_bundle || true)"
|
||||
fi
|
||||
|
||||
if [[ -n "${CA_BUNDLE}" ]]; then
|
||||
# Export for Nix (critical)
|
||||
export NIX_SSL_CERT_FILE="${CA_BUNDLE}"
|
||||
|
||||
# Export for Git, Python requests, curl, etc.
|
||||
export SSL_CERT_FILE="${CA_BUNDLE}"
|
||||
export REQUESTS_CA_BUNDLE="${CA_BUNDLE}"
|
||||
export GIT_SSL_CAINFO="${CA_BUNDLE}"
|
||||
|
||||
echo "[docker] Using CA bundle: ${CA_BUNDLE}"
|
||||
else
|
||||
echo "[docker] WARNING: No CA certificate bundle found."
|
||||
echo "[docker] HTTPS access for Nix flakes and other tools may fail."
|
||||
fi
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
|
||||
echo "[docker] Starting package-manager container"
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
@@ -68,16 +19,10 @@ cd /src
|
||||
# ---------------------------------------------------------------------------
|
||||
# DEV mode: rebuild package-manager from the mounted /src tree
|
||||
# ---------------------------------------------------------------------------
|
||||
if [[ "${PKGMGR_DEV:-0}" == "1" ]]; then
|
||||
echo "[docker] DEV mode enabled (PKGMGR_DEV=1)"
|
||||
echo "[docker] Rebuilding package-manager from /src via scripts/installation/run-package.sh..."
|
||||
|
||||
if [[ -x scripts/installation/run-package.sh ]]; then
|
||||
bash scripts/installation/run-package.sh
|
||||
else
|
||||
echo "[docker] ERROR: scripts/installation/run-package.sh not found or not executable"
|
||||
exit 1
|
||||
fi
|
||||
if [[ "${REINSTALL_PKGMGR:-0}" == "1" ]]; then
|
||||
echo "[docker] DEV mode enabled (REINSTALL_PKGMGR=1)"
|
||||
echo "[docker] Rebuilding package-manager from /src via scripts/installation/package.sh..."
|
||||
bash scripts/installation/package.sh || exit 1
|
||||
fi
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@@ -1,246 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
echo "[init-nix] Starting Nix initialization..."
|
||||
|
||||
NIX_INSTALL_URL="${NIX_INSTALL_URL:-https://nixos.org/nix/install}"
|
||||
NIX_DOWNLOAD_MAX_TIME=300 # 5 minutes
|
||||
NIX_DOWNLOAD_SLEEP_INTERVAL=20 # 20 seconds
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Detect whether we are inside a container (Docker/Podman/etc.)
|
||||
# ---------------------------------------------------------------------------
|
||||
is_container() {
|
||||
if [[ -f /.dockerenv ]] || [[ -f /run/.containerenv ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
if grep -qiE 'docker|container|podman|lxc' /proc/1/cgroup 2>/dev/null; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
if [[ -n "${container:-}" ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Ensure Nix binaries are on PATH (multi-user or single-user)
|
||||
# ---------------------------------------------------------------------------
|
||||
ensure_nix_on_path() {
|
||||
if [[ -x /nix/var/nix/profiles/default/bin/nix ]]; then
|
||||
export PATH="/nix/var/nix/profiles/default/bin:${PATH}"
|
||||
fi
|
||||
|
||||
if [[ -x "${HOME}/.nix-profile/bin/nix" ]]; then
|
||||
export PATH="${HOME}/.nix-profile/bin:${PATH}"
|
||||
fi
|
||||
|
||||
if [[ -x /home/nix/.nix-profile/bin/nix ]]; then
|
||||
export PATH="/home/nix/.nix-profile/bin:${PATH}"
|
||||
fi
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Ensure Nix build group and users exist (build-users-group = nixbld)
|
||||
# ---------------------------------------------------------------------------
|
||||
ensure_nix_build_group() {
|
||||
if ! getent group nixbld >/dev/null 2>&1; then
|
||||
echo "[init-nix] Creating group 'nixbld'..."
|
||||
groupadd -r nixbld
|
||||
fi
|
||||
|
||||
for i in $(seq 1 10); do
|
||||
if ! id "nixbld$i" >/dev/null 2>&1; then
|
||||
echo "[init-nix] Creating build user nixbld$i..."
|
||||
useradd -r -g nixbld -G nixbld -s /usr/sbin/nologin "nixbld$i"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Download and run Nix installer with retry
|
||||
# Usage: install_nix_with_retry daemon|no-daemon [run_as_user]
|
||||
# ---------------------------------------------------------------------------
|
||||
install_nix_with_retry() {
|
||||
local mode="$1"
|
||||
local run_as="${2:-}"
|
||||
local installer elapsed=0 mode_flag
|
||||
|
||||
case "${mode}" in
|
||||
daemon) mode_flag="--daemon" ;;
|
||||
no-daemon) mode_flag="--no-daemon" ;;
|
||||
*)
|
||||
echo "[init-nix] ERROR: Invalid mode '${mode}', expected 'daemon' or 'no-daemon'."
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
installer="$(mktemp -t nix-installer.XXXXXX)"
|
||||
|
||||
echo "[init-nix] Downloading Nix installer from ${NIX_INSTALL_URL} with retry (max ${NIX_DOWNLOAD_MAX_TIME}s)..."
|
||||
|
||||
while true; do
|
||||
if curl -fL "${NIX_INSTALL_URL}" -o "${installer}"; then
|
||||
echo "[init-nix] Successfully downloaded Nix installer to ${installer}"
|
||||
break
|
||||
fi
|
||||
|
||||
local curl_exit=$?
|
||||
echo "[init-nix] WARNING: Failed to download Nix installer (curl exit code ${curl_exit})."
|
||||
|
||||
elapsed=$((elapsed + NIX_DOWNLOAD_SLEEP_INTERVAL))
|
||||
if (( elapsed >= NIX_DOWNLOAD_MAX_TIME )); then
|
||||
echo "[init-nix] ERROR: Giving up after ${elapsed}s trying to download Nix installer."
|
||||
rm -f "${installer}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "[init-nix] Retrying in ${NIX_DOWNLOAD_SLEEP_INTERVAL}s (elapsed: ${elapsed}s/${NIX_DOWNLOAD_MAX_TIME}s)..."
|
||||
sleep "${NIX_DOWNLOAD_SLEEP_INTERVAL}"
|
||||
done
|
||||
|
||||
if [[ -n "${run_as}" ]]; then
|
||||
echo "[init-nix] Running installer as user '${run_as}' with mode '${mode}'..."
|
||||
if command -v sudo >/dev/null 2>&1; then
|
||||
sudo -u "${run_as}" bash -lc "sh '${installer}' ${mode_flag}"
|
||||
else
|
||||
su - "${run_as}" -c "sh '${installer}' ${mode_flag}"
|
||||
fi
|
||||
else
|
||||
echo "[init-nix] Running installer as current user with mode '${mode}'..."
|
||||
sh "${installer}" "${mode_flag}"
|
||||
fi
|
||||
|
||||
rm -f "${installer}"
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Main
|
||||
# ---------------------------------------------------------------------------
|
||||
main() {
|
||||
# Fast path: Nix already available
|
||||
if command -v nix >/dev/null 2>&1; then
|
||||
echo "[init-nix] Nix already available on PATH: $(command -v nix)"
|
||||
return 0
|
||||
fi
|
||||
|
||||
ensure_nix_on_path
|
||||
|
||||
if command -v nix >/dev/null 2>&1; then
|
||||
echo "[init-nix] Nix found after adjusting PATH: $(command -v nix)"
|
||||
return 0
|
||||
fi
|
||||
|
||||
echo "[init-nix] Nix not found, starting installation logic..."
|
||||
|
||||
local IN_CONTAINER=0
|
||||
if is_container; then
|
||||
IN_CONTAINER=1
|
||||
echo "[init-nix] Detected container environment."
|
||||
else
|
||||
echo "[init-nix] No container detected."
|
||||
fi
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Container + root: dedicated "nix" user, single-user install
|
||||
# -------------------------------------------------------------------------
|
||||
if [[ "${IN_CONTAINER}" -eq 1 && "${EUID:-0}" -eq 0 ]]; then
|
||||
echo "[init-nix] Container + root – installing as 'nix' user (single-user)."
|
||||
|
||||
ensure_nix_build_group
|
||||
|
||||
if ! id nix >/dev/null 2>&1; then
|
||||
echo "[init-nix] Creating user 'nix'..."
|
||||
local BASH_SHELL
|
||||
BASH_SHELL="$(command -v bash || true)"
|
||||
[[ -z "${BASH_SHELL}" ]] && BASH_SHELL="/bin/sh"
|
||||
useradd -m -r -g nixbld -s "${BASH_SHELL}" nix
|
||||
fi
|
||||
|
||||
if [[ ! -d /nix ]]; then
|
||||
echo "[init-nix] Creating /nix with owner nix:nixbld..."
|
||||
mkdir -m 0755 /nix
|
||||
chown nix:nixbld /nix
|
||||
else
|
||||
local current_owner current_group
|
||||
current_owner="$(stat -c '%U' /nix 2>/dev/null || echo '?')"
|
||||
current_group="$(stat -c '%G' /nix 2>/dev/null || echo '?')"
|
||||
if [[ "${current_owner}" != "nix" || "${current_group}" != "nixbld" ]]; then
|
||||
echo "[init-nix] Fixing /nix ownership from ${current_owner}:${current_group} to nix:nixbld..."
|
||||
chown -R nix:nixbld /nix
|
||||
fi
|
||||
if [[ ! -w /nix ]]; then
|
||||
echo "[init-nix] WARNING: /nix is not writable after chown; Nix installer may fail."
|
||||
fi
|
||||
fi
|
||||
|
||||
install_nix_with_retry "no-daemon" "nix"
|
||||
|
||||
ensure_nix_on_path
|
||||
|
||||
if [[ -x /home/nix/.nix-profile/bin/nix && ! -e /usr/local/bin/nix ]]; then
|
||||
echo "[init-nix] Creating /usr/local/bin/nix symlink -> /home/nix/.nix-profile/bin/nix"
|
||||
ln -s /home/nix/.nix-profile/bin/nix /usr/local/bin/nix
|
||||
fi
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Host (no container)
|
||||
# -------------------------------------------------------------------------
|
||||
elif [[ "${IN_CONTAINER}" -eq 0 ]]; then
|
||||
if command -v systemctl >/dev/null 2>&1; then
|
||||
echo "[init-nix] Host with systemd – using multi-user install (--daemon)."
|
||||
if [[ "${EUID:-0}" -eq 0 ]]; then
|
||||
ensure_nix_build_group
|
||||
fi
|
||||
install_nix_with_retry "daemon"
|
||||
else
|
||||
if [[ "${EUID:-0}" -eq 0 ]]; then
|
||||
echo "[init-nix] Host without systemd as root – using single-user install (--no-daemon)."
|
||||
ensure_nix_build_group
|
||||
else
|
||||
echo "[init-nix] Host without systemd as non-root – using single-user install (--no-daemon)."
|
||||
fi
|
||||
install_nix_with_retry "no-daemon"
|
||||
fi
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Container, but not root (rare)
|
||||
# -------------------------------------------------------------------------
|
||||
else
|
||||
echo "[init-nix] Container as non-root – using single-user install (--no-daemon)."
|
||||
install_nix_with_retry "no-daemon"
|
||||
fi
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# After installation: PATH + /etc/profile
|
||||
# -------------------------------------------------------------------------
|
||||
ensure_nix_on_path
|
||||
|
||||
if ! command -v nix >/dev/null 2>&1; then
|
||||
echo "[init-nix] WARNING: Nix installation finished, but 'nix' is still not on PATH."
|
||||
echo "[init-nix] You may need to source your shell profile manually."
|
||||
else
|
||||
echo "[init-nix] Nix successfully installed at: $(command -v nix)"
|
||||
fi
|
||||
|
||||
if [[ -w /etc/profile ]] && ! grep -q 'Nix profiles' /etc/profile 2>/dev/null; then
|
||||
cat <<'EOF' >> /etc/profile
|
||||
|
||||
# Nix profiles (added by package-manager init-nix.sh)
|
||||
if [ -d /nix/var/nix/profiles/default/bin ]; then
|
||||
PATH="/nix/var/nix/profiles/default/bin:$PATH"
|
||||
fi
|
||||
if [ -d "$HOME/.nix-profile/bin" ]; then
|
||||
PATH="$HOME/.nix-profile/bin:$PATH"
|
||||
fi
|
||||
EOF
|
||||
echo "[init-nix] Appended Nix PATH setup to /etc/profile"
|
||||
fi
|
||||
|
||||
echo "[init-nix] Nix initialization complete."
|
||||
}
|
||||
|
||||
main "$@"
|
||||
@@ -12,6 +12,7 @@ pacman -S --noconfirm --needed \
|
||||
rsync \
|
||||
curl \
|
||||
ca-certificates \
|
||||
python \
|
||||
xz
|
||||
|
||||
pacman -Scc --noconfirm
|
||||
|
||||
@@ -1,30 +1,64 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
echo "[arch/package] Building Arch package (makepkg --nodeps)..."
|
||||
echo "[arch/package] Building Arch package (makepkg --nodeps) in an isolated build dir..."
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "${SCRIPT_DIR}/../../.." && pwd)"
|
||||
PKG_DIR="${PROJECT_ROOT}/packaging/arch"
|
||||
|
||||
if [[ ! -f "${PKG_DIR}/PKGBUILD" ]]; then
|
||||
echo "[arch/package] ERROR: PKGBUILD not found in ${PKG_DIR}"
|
||||
# We must not build inside /src (mounted repo). Build in /tmp to avoid permission issues.
|
||||
BUILD_ROOT="/tmp/package-manager-arch-build"
|
||||
PKG_SRC_DIR="${PROJECT_ROOT}/packaging/arch"
|
||||
PKG_BUILD_DIR="${BUILD_ROOT}/packaging/arch"
|
||||
|
||||
if [[ ! -f "${PKG_SRC_DIR}/PKGBUILD" ]]; then
|
||||
echo "[arch/package] ERROR: PKGBUILD not found in ${PKG_SRC_DIR}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd "${PKG_DIR}"
|
||||
echo "[arch/package] Preparing build directory: ${BUILD_ROOT}"
|
||||
rm -rf "${BUILD_ROOT}"
|
||||
mkdir -p "${BUILD_ROOT}"
|
||||
|
||||
if id aur_builder >/dev/null 2>&1; then
|
||||
echo "[arch/package] Using 'aur_builder' user for makepkg..."
|
||||
chown -R aur_builder:aur_builder "${PKG_DIR}"
|
||||
su aur_builder -c "cd '${PKG_DIR}' && rm -f package-manager-*.pkg.tar.* && makepkg --noconfirm --clean --nodeps"
|
||||
else
|
||||
echo "[arch/package] WARNING: user 'aur_builder' not found, running makepkg as current user..."
|
||||
rm -f package-manager-*.pkg.tar.*
|
||||
makepkg --noconfirm --clean --nodeps
|
||||
echo "[arch/package] Syncing project sources to ${BUILD_ROOT}..."
|
||||
# Keep it simple: copy everything; adjust excludes if needed later.
|
||||
rsync -a --delete \
|
||||
--exclude '.git' \
|
||||
--exclude '.venv' \
|
||||
--exclude '.venvs' \
|
||||
--exclude '__pycache__' \
|
||||
--exclude '*.pyc' \
|
||||
"${PROJECT_ROOT}/" "${BUILD_ROOT}/"
|
||||
|
||||
if [[ ! -d "${PKG_BUILD_DIR}" ]]; then
|
||||
echo "[arch/package] ERROR: Build PKG dir missing: ${PKG_BUILD_DIR}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Unprivileged user for Arch package build (makepkg)
|
||||
# ------------------------------------------------------------
|
||||
if ! id aur_builder >/dev/null 2>&1; then
|
||||
echo "[arch/package] ERROR: user 'aur_builder' not found. Run scripts/installation/arch/aur-builder-setup.sh first."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "[arch/package] Using 'aur_builder' user for makepkg..."
|
||||
chown -R aur_builder:aur_builder "${BUILD_ROOT}"
|
||||
|
||||
echo "[arch/package] Running makepkg in: ${PKG_BUILD_DIR}"
|
||||
su aur_builder -c "cd '${PKG_BUILD_DIR}' && rm -f package-manager-*.pkg.tar.* && makepkg --noconfirm --clean --nodeps"
|
||||
|
||||
echo "[arch/package] Installing generated Arch package..."
|
||||
pacman -U --noconfirm package-manager-*.pkg.tar.*
|
||||
pkg_path="$(find "${PKG_BUILD_DIR}" -maxdepth 1 -type f -name 'package-manager-*.pkg.tar.*' | head -n1)"
|
||||
if [[ -z "${pkg_path}" ]]; then
|
||||
echo "[arch/package] ERROR: Built package not found in ${PKG_BUILD_DIR}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
pacman -U --noconfirm "${pkg_path}"
|
||||
|
||||
echo "[arch/package] Cleanup build directory..."
|
||||
rm -rf "${BUILD_ROOT}"
|
||||
|
||||
echo "[arch/package] Done."
|
||||
|
||||
@@ -13,9 +13,64 @@ dnf -y install \
|
||||
bash \
|
||||
curl-minimal \
|
||||
ca-certificates \
|
||||
python3 \
|
||||
sudo \
|
||||
xz
|
||||
|
||||
dnf clean all
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Persist CA bundle configuration system-wide (virgin-compatible)
|
||||
# -----------------------------------------------------------------------------
|
||||
detect_ca_bundle() {
|
||||
local candidates=(
|
||||
/etc/pki/tls/certs/ca-bundle.crt
|
||||
/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem
|
||||
/etc/ssl/certs/ca-certificates.crt
|
||||
/etc/ssl/cert.pem
|
||||
/etc/ssl/ca-bundle.pem
|
||||
)
|
||||
|
||||
for path in "${candidates[@]}"; do
|
||||
if [[ -f "$path" ]]; then
|
||||
echo "$path"
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
CA_BUNDLE="$(detect_ca_bundle || true)"
|
||||
|
||||
if [[ -n "${CA_BUNDLE}" ]]; then
|
||||
echo "[centos/dependencies] Persisting CA bundle: ${CA_BUNDLE}"
|
||||
|
||||
# 1) Make it available for login shells
|
||||
cat >/etc/profile.d/pkgmgr-ca.sh <<EOF
|
||||
# Generated by package-manager
|
||||
export NIX_SSL_CERT_FILE="${CA_BUNDLE}"
|
||||
export SSL_CERT_FILE="${CA_BUNDLE}"
|
||||
export REQUESTS_CA_BUNDLE="${CA_BUNDLE}"
|
||||
export GIT_SSL_CAINFO="${CA_BUNDLE}"
|
||||
EOF
|
||||
chmod 0644 /etc/profile.d/pkgmgr-ca.sh
|
||||
|
||||
# 2) Ensure Nix uses it even without environment variables
|
||||
mkdir -p /etc/nix
|
||||
if [[ -f /etc/nix/nix.conf ]]; then
|
||||
# Replace existing ssl-cert-file or append it
|
||||
if grep -qE '^\s*ssl-cert-file\s*=' /etc/nix/nix.conf; then
|
||||
sed -i "s|^\s*ssl-cert-file\s*=.*|ssl-cert-file = ${CA_BUNDLE}|" /etc/nix/nix.conf
|
||||
else
|
||||
echo "ssl-cert-file = ${CA_BUNDLE}" >>/etc/nix/nix.conf
|
||||
fi
|
||||
else
|
||||
echo "ssl-cert-file = ${CA_BUNDLE}" >/etc/nix/nix.conf
|
||||
fi
|
||||
|
||||
else
|
||||
echo "[centos/dependencies] WARNING: No CA bundle found after installing ca-certificates."
|
||||
fi
|
||||
|
||||
echo "[centos/dependencies] Done."
|
||||
|
||||
@@ -13,6 +13,8 @@ DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
|
||||
bash \
|
||||
curl \
|
||||
ca-certificates \
|
||||
python3 \
|
||||
python3-venv \
|
||||
xz-utils
|
||||
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
@@ -3,22 +3,19 @@ set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
|
||||
# shellcheck source=/dev/null
|
||||
source "${SCRIPT_DIR}/lib.sh"
|
||||
# shellcheck disable=SC1091
|
||||
source "${SCRIPT_DIR}/os_resolver.sh"
|
||||
|
||||
OS_ID="$(detect_os_id)"
|
||||
OS_ID="$(osr_get_os_id)"
|
||||
|
||||
echo "[run-dependencies] Detected OS: ${OS_ID}"
|
||||
|
||||
case "${OS_ID}" in
|
||||
arch|debian|ubuntu|fedora|centos)
|
||||
DEP_SCRIPT="${SCRIPT_DIR}/${OS_ID}/dependencies.sh"
|
||||
;;
|
||||
*)
|
||||
echo "[run-dependencies] Unsupported OS: ${OS_ID}"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
if ! osr_is_supported "${OS_ID}"; then
|
||||
echo "[run-dependencies] Unsupported OS: ${OS_ID}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
DEP_SCRIPT="$(osr_script_path_for "${SCRIPT_DIR}" "${OS_ID}" "dependencies")"
|
||||
|
||||
if [[ ! -f "${DEP_SCRIPT}" ]]; then
|
||||
echo "[run-dependencies] Dependency script not found: ${DEP_SCRIPT}"
|
||||
15
scripts/installation/init.sh
Executable file
15
scripts/installation/init.sh
Executable file
@@ -0,0 +1,15 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
if [[ "${EUID:-$(id -u)}" -ne 0 ]]; then
|
||||
echo "[installation/install] Warning: Installation is just possible via root."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "[installation] Running as root (EUID=0)."
|
||||
echo "[installation] Install Package Dependencies..."
|
||||
bash scripts/installation/dependencies.sh
|
||||
echo "[installation] Install Distribution Package..."
|
||||
bash scripts/installation/package.sh
|
||||
echo "[installation] Root/system setup complete."
|
||||
exit 0
|
||||
@@ -1,12 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
detect_os_id() {
|
||||
if [[ -f /etc/os-release ]]; then
|
||||
# shellcheck disable=SC1091
|
||||
. /etc/os-release
|
||||
echo "${ID:-unknown}"
|
||||
else
|
||||
echo "unknown"
|
||||
fi
|
||||
}
|
||||
@@ -1,87 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# main.sh
|
||||
#
|
||||
# Developer / system setup entrypoint.
|
||||
#
|
||||
# Responsibilities:
|
||||
# - If inside a Nix shell (IN_NIX_SHELL=1):
|
||||
# * Skip venv creation and dependency installation
|
||||
# * Run `python3 main.py install`
|
||||
# - If running as root (EUID=0):
|
||||
# * Run system-level installer (run-package.sh)
|
||||
# - Otherwise (normal user):
|
||||
# * Create ~/.venvs/pkgmgr virtual environment if missing
|
||||
# * Install Python dependencies into that venv
|
||||
# * Append auto-activation to ~/.bashrc and ~/.zshrc
|
||||
# * Run `main.py install` using the venv Python
|
||||
# ------------------------------------------------------------
|
||||
|
||||
echo "[installation/main] Starting setup..."
|
||||
|
||||
PROJECT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
|
||||
cd "${PROJECT_ROOT}"
|
||||
|
||||
VENV_DIR="${HOME}/.venvs/pkgmgr"
|
||||
RC_LINE='if [ -d "${HOME}/.venvs/pkgmgr" ]; then . "${HOME}/.venvs/pkgmgr/bin/activate"; if [ -n "${PS1:-}" ]; then echo "Global Python virtual environment '\''~/.venvs/pkgmgr'\'' activated."; fi; fi'
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# 1) Nix shell mode: do not touch venv, only run main.py install
|
||||
# ------------------------------------------------------------
|
||||
if [[ -n "${IN_NIX_SHELL:-}" ]]; then
|
||||
echo "[installation/main] Nix shell detected (IN_NIX_SHELL=1)."
|
||||
echo "[installation/main] Skipping virtualenv creation and dependency installation."
|
||||
echo "[installation/main] Running main.py install via system python3..."
|
||||
python3 main.py install
|
||||
echo "[installation/main] Setup finished (Nix mode)."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# 2) Root mode: system / distro-level installation
|
||||
# ------------------------------------------------------------
|
||||
if [[ "${EUID:-$(id -u)}" -eq 0 ]]; then
|
||||
echo "[installation/main] Running as root (EUID=0)."
|
||||
echo "[installation/main] Skipping user virtualenv and shell RC modifications."
|
||||
echo "[installation/main] Delegating to scripts/installation/run-package.sh..."
|
||||
bash scripts/installation/run-package.sh
|
||||
echo "[installation/main] Root/system setup complete."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# 3) Normal user mode: dev setup with venv
|
||||
# ------------------------------------------------------------
|
||||
|
||||
echo "[installation/main] Running in normal user mode (developer setup)."
|
||||
|
||||
echo "[installation/main] Ensuring main.py is executable..."
|
||||
chmod +x main.py || true
|
||||
|
||||
echo "[installation/main] Ensuring global virtualenv root: ${HOME}/.venvs"
|
||||
mkdir -p "${HOME}/.venvs"
|
||||
|
||||
echo "[installation/main] Creating/updating virtualenv via helper..."
|
||||
PKGMGR_VENV_DIR="${VENV_DIR}" bash scripts/installation/venv-create.sh
|
||||
|
||||
echo "[installation/main] Ensuring ~/.bashrc and ~/.zshrc exist..."
|
||||
touch "${HOME}/.bashrc" "${HOME}/.zshrc"
|
||||
|
||||
echo "[installation/main] Ensuring venv auto-activation is present in shell rc files..."
|
||||
for rc in "${HOME}/.bashrc" "${HOME}/.zshrc"; do
|
||||
if ! grep -qxF "${RC_LINE}" "$rc"; then
|
||||
echo "${RC_LINE}" >> "$rc"
|
||||
echo "[installation/main] Appended auto-activation to $rc"
|
||||
else
|
||||
echo "[installation/main] Auto-activation already present in $rc"
|
||||
fi
|
||||
done
|
||||
|
||||
echo "[installation/main] Running main.py install via venv Python..."
|
||||
"${VENV_DIR}/bin/python" main.py install
|
||||
|
||||
echo
|
||||
echo "[installation/main] Developer setup complete."
|
||||
echo "Restart your shell (or run 'exec bash' or 'exec zsh') to activate the environment."
|
||||
82
scripts/installation/os_resolver.sh
Executable file
82
scripts/installation/os_resolver.sh
Executable file
@@ -0,0 +1,82 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# OsResolver (bash "class-style" module)
|
||||
# Centralizes OS detection + normalization + supported checks + script paths.
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
osr_detect_raw_id() {
|
||||
if [[ -f /etc/os-release ]]; then
|
||||
# shellcheck disable=SC1091
|
||||
. /etc/os-release
|
||||
echo "${ID:-unknown}"
|
||||
else
|
||||
echo "unknown"
|
||||
fi
|
||||
}
|
||||
|
||||
osr_detect_id_like() {
|
||||
if [[ -f /etc/os-release ]]; then
|
||||
# shellcheck disable=SC1091
|
||||
. /etc/os-release
|
||||
echo "${ID_LIKE:-}"
|
||||
else
|
||||
echo ""
|
||||
fi
|
||||
}
|
||||
|
||||
osr_normalize_id() {
|
||||
local raw="${1:-unknown}"
|
||||
local like="${2:-}"
|
||||
|
||||
# Explicit mapping first (your bugfix: manjaro -> arch everywhere)
|
||||
case "${raw}" in
|
||||
manjaro) echo "arch"; return 0 ;;
|
||||
esac
|
||||
|
||||
# Keep direct IDs when they are already supported
|
||||
case "${raw}" in
|
||||
arch|debian|ubuntu|fedora|centos) echo "${raw}"; return 0 ;;
|
||||
esac
|
||||
|
||||
# Fallback mapping via ID_LIKE for better portability
|
||||
# Example: many Arch derivatives expose ID_LIKE="arch"
|
||||
if [[ " ${like} " == *" arch "* ]]; then
|
||||
echo "arch"; return 0
|
||||
fi
|
||||
if [[ " ${like} " == *" debian "* ]]; then
|
||||
echo "debian"; return 0
|
||||
fi
|
||||
if [[ " ${like} " == *" fedora "* ]]; then
|
||||
echo "fedora"; return 0
|
||||
fi
|
||||
if [[ " ${like} " == *" rhel "* || " ${like} " == *" centos "* ]]; then
|
||||
echo "centos"; return 0
|
||||
fi
|
||||
|
||||
echo "${raw}"
|
||||
}
|
||||
|
||||
osr_get_os_id() {
|
||||
local raw like
|
||||
raw="$(osr_detect_raw_id)"
|
||||
like="$(osr_detect_id_like)"
|
||||
osr_normalize_id "${raw}" "${like}"
|
||||
}
|
||||
|
||||
osr_is_supported() {
|
||||
local id="${1:-unknown}"
|
||||
case "${id}" in
|
||||
arch|debian|ubuntu|fedora|centos) return 0 ;;
|
||||
*) return 1 ;;
|
||||
esac
|
||||
}
|
||||
|
||||
osr_script_path_for() {
|
||||
local script_dir="${1:?script_dir required}"
|
||||
local os_id="${2:?os_id required}"
|
||||
local kind="${3:?kind required}" # "dependencies" or "package"
|
||||
|
||||
echo "${script_dir}/${os_id}/${kind}.sh"
|
||||
}
|
||||
26
scripts/installation/package.sh
Executable file
26
scripts/installation/package.sh
Executable file
@@ -0,0 +1,26 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
source "${SCRIPT_DIR}/os_resolver.sh"
|
||||
|
||||
OS_ID="$(osr_get_os_id)"
|
||||
|
||||
echo "[package] Detected OS: ${OS_ID}"
|
||||
|
||||
if ! osr_is_supported "${OS_ID}"; then
|
||||
echo "[package] Unsupported OS: ${OS_ID}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
PKG_SCRIPT="$(osr_script_path_for "${SCRIPT_DIR}" "${OS_ID}" "package")"
|
||||
|
||||
if [[ ! -f "${PKG_SCRIPT}" ]]; then
|
||||
echo "[package] Package script not found: ${PKG_SCRIPT}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "[package] Executing: ${PKG_SCRIPT}"
|
||||
exec bash "${PKG_SCRIPT}"
|
||||
@@ -1,35 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
|
||||
# shellcheck source=/dev/null
|
||||
source "${SCRIPT_DIR}/lib.sh"
|
||||
|
||||
OS_ID="$(detect_os_id)"
|
||||
|
||||
# Map Manjaro to Arch
|
||||
if [[ "${OS_ID}" == "manjaro" ]]; then
|
||||
echo "[run-package] Mapping OS 'manjaro' → 'arch'"
|
||||
OS_ID="arch"
|
||||
fi
|
||||
|
||||
echo "[run-package] Detected OS: ${OS_ID}"
|
||||
|
||||
case "${OS_ID}" in
|
||||
arch|debian|ubuntu|fedora|centos)
|
||||
PKG_SCRIPT="${SCRIPT_DIR}/${OS_ID}/package.sh"
|
||||
;;
|
||||
*)
|
||||
echo "[run-package] Unsupported OS: ${OS_ID}"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
if [[ ! -f "${PKG_SCRIPT}" ]]; then
|
||||
echo "[run-package] Package script not found: ${PKG_SCRIPT}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "[run-package] Executing: ${PKG_SCRIPT}"
|
||||
exec bash "${PKG_SCRIPT}"
|
||||
@@ -14,6 +14,9 @@ DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
|
||||
rsync \
|
||||
bash \
|
||||
curl \
|
||||
make \
|
||||
python3 \
|
||||
python3-venv \
|
||||
ca-certificates \
|
||||
xz-utils
|
||||
|
||||
|
||||
@@ -1,44 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# venv-create.sh
|
||||
#
|
||||
# Small helper to create/update a Python virtual environment for pkgmgr.
|
||||
#
|
||||
# Usage:
|
||||
# PKGMGR_VENV_DIR=/home/dev/.venvs/pkgmgr bash scripts/installation/venv-create.sh
|
||||
# or
|
||||
# bash scripts/installation/venv-create.sh /home/dev/.venvs/pkgmgr
|
||||
|
||||
PROJECT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
|
||||
cd "${PROJECT_ROOT}"
|
||||
|
||||
VENV_DIR="${PKGMGR_VENV_DIR:-${1:-${HOME}/.venvs/pkgmgr}}"
|
||||
|
||||
echo "[venv-create] Using VENV_DIR=${VENV_DIR}"
|
||||
|
||||
echo "[venv-create] Ensuring virtualenv parent directory exists..."
|
||||
mkdir -p "$(dirname "${VENV_DIR}")"
|
||||
|
||||
if [[ ! -d "${VENV_DIR}" ]]; then
|
||||
echo "[venv-create] Creating virtual environment at: ${VENV_DIR}"
|
||||
python3 -m venv "${VENV_DIR}"
|
||||
else
|
||||
echo "[venv-create] Virtual environment already exists at: ${VENV_DIR}"
|
||||
fi
|
||||
|
||||
echo "[venv-create] Installing Python tooling into venv..."
|
||||
"${VENV_DIR}/bin/python" -m ensurepip --upgrade
|
||||
"${VENV_DIR}/bin/pip" install --upgrade pip setuptools wheel
|
||||
|
||||
if [[ -f "requirements.txt" ]]; then
|
||||
echo "[venv-create] Installing dependencies from requirements.txt..."
|
||||
"${VENV_DIR}/bin/pip" install -r requirements.txt
|
||||
elif [[ -f "_requirements.txt" ]]; then
|
||||
echo "[venv-create] Installing dependencies from _requirements.txt..."
|
||||
"${VENV_DIR}/bin/pip" install -r _requirements.txt
|
||||
else
|
||||
echo "[venv-create] No requirements.txt or _requirements.txt found. Skipping dependency installation."
|
||||
fi
|
||||
|
||||
echo "[venv-create] Done."
|
||||
@@ -1,11 +1,6 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Ensure NIX_CONFIG has our defaults if not already set
|
||||
if [[ -z "${NIX_CONFIG:-}" ]]; then
|
||||
export NIX_CONFIG="experimental-features = nix-command flakes"
|
||||
fi
|
||||
|
||||
FLAKE_DIR="/usr/lib/package-manager"
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
@@ -28,11 +23,11 @@ if ! command -v nix >/dev/null 2>&1; then
|
||||
fi
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# If nix is still missing, try to run init-nix.sh once
|
||||
# If nix is still missing, try to run nix/init.sh once
|
||||
# ---------------------------------------------------------------------------
|
||||
if ! command -v nix >/dev/null 2>&1; then
|
||||
if [[ -x "${FLAKE_DIR}/init-nix.sh" ]]; then
|
||||
"${FLAKE_DIR}/init-nix.sh" || true
|
||||
if [[ -x "${FLAKE_DIR}/nix/init.sh" ]]; then
|
||||
"${FLAKE_DIR}/nix/init.sh" || true
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -43,6 +38,6 @@ if command -v nix >/dev/null 2>&1; then
|
||||
exec nix run "${FLAKE_DIR}#pkgmgr" -- "$@"
|
||||
fi
|
||||
|
||||
echo "[pkgmgr-wrapper] ERROR: 'nix' binary not found on PATH after init."
|
||||
echo "[pkgmgr-wrapper] Nix is required to run pkgmgr (no Python fallback)."
|
||||
echo "[launcher] ERROR: 'nix' binary not found on PATH after init."
|
||||
echo "[launcher] Nix is required to run pkgmgr (no Python fallback)."
|
||||
exit 1
|
||||
53
scripts/nix/README.md
Normal file
53
scripts/nix/README.md
Normal file
@@ -0,0 +1,53 @@
|
||||
# Nix Bootstrap (package-manager)
|
||||
|
||||
This directory contains the **Nix initialization and bootstrap logic** used by *package-manager* to ensure the `nix` command is available on supported systems (host machines and CI containers).
|
||||
|
||||
It is invoked during package installation (Arch/Debian/Fedora scriptlets) and can also be called manually.
|
||||
|
||||
---
|
||||
|
||||
## Entry Point
|
||||
|
||||
- *scripts/nix/init.sh*
|
||||
Main bootstrap script. It:
|
||||
- checks whether `nix` is already available
|
||||
- adjusts `PATH` for common Nix locations
|
||||
- installs Nix when missing (daemon install on systemd hosts, single-user in containers)
|
||||
- ensures predictable `nix` availability via symlinks (without overwriting distro-managed paths)
|
||||
- validates that `nix` is usable at the end (CI-safe)
|
||||
|
||||
---
|
||||
|
||||
## Library Layout
|
||||
|
||||
The entry point sources small, focused modules from *scripts/nix/lib/*:
|
||||
|
||||
- *bootstrap_config.sh* — configuration defaults (installer URL, retry timing)
|
||||
- *detect.sh* — container detection helpers
|
||||
- *path.sh* — PATH adjustments and `nix` binary resolution helpers
|
||||
- *symlinks.sh* — user/global symlink helpers for stable `nix` discovery
|
||||
- *users.sh* — build group/users and container ownership/perms helpers
|
||||
- *install.sh* — installer download + retry logic and execution helpers
|
||||
|
||||
Each library file includes a simple guard to prevent double-sourcing.
|
||||
|
||||
---
|
||||
|
||||
## When It Runs
|
||||
|
||||
This bootstrap is typically executed automatically:
|
||||
|
||||
- Arch: post-install / post-upgrade hook
|
||||
- Debian: `postinst`
|
||||
- Fedora/RPM: `%post`
|
||||
|
||||
|
||||
---
|
||||
|
||||
## Notes / Design Goals
|
||||
|
||||
- **Cross-distro compatibility:** supports common Linux layouts (including Arch placing `nix` in */usr/sbin*).
|
||||
- **Non-destructive behavior:** avoids overwriting distro-managed `nix` binaries.
|
||||
- **CI robustness:** retry logic for downloads and a final `nix` availability check.
|
||||
- **Container-safe defaults:** single-user install as a dedicated `nix` user when running as root in containers.
|
||||
|
||||
142
scripts/nix/init.sh
Executable file
142
scripts/nix/init.sh
Executable file
@@ -0,0 +1,142 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
|
||||
# shellcheck source=./scripts/nix/lib/bootstrap_config.sh
|
||||
source "${SCRIPT_DIR}/lib/bootstrap_config.sh"
|
||||
|
||||
# shellcheck source=./scripts/nix/lib/detect.sh
|
||||
source "${SCRIPT_DIR}/lib/detect.sh"
|
||||
|
||||
# shellcheck source=./scripts/nix/lib/path.sh
|
||||
source "${SCRIPT_DIR}/lib/path.sh"
|
||||
|
||||
# shellcheck source=./scripts/nix/lib/symlinks.sh
|
||||
source "${SCRIPT_DIR}/lib/symlinks.sh"
|
||||
|
||||
# shellcheck source=./scripts/nix/lib/users.sh
|
||||
source "${SCRIPT_DIR}/lib/users.sh"
|
||||
|
||||
# shellcheck source=./scripts/nix/lib/install.sh
|
||||
source "${SCRIPT_DIR}/lib/install.sh"
|
||||
|
||||
# shellcheck source=./scripts/nix/lib/nix_conf_file.sh
|
||||
source "${SCRIPT_DIR}/lib/nix_conf_file.sh"
|
||||
|
||||
echo "[init-nix] Starting Nix initialization..."
|
||||
|
||||
main() {
|
||||
# Fast path: already available
|
||||
if command -v nix >/dev/null 2>&1; then
|
||||
echo "[init-nix] Nix already available on PATH: $(command -v nix)"
|
||||
ensure_nix_on_path
|
||||
|
||||
if [[ "${EUID:-0}" -eq 0 ]]; then
|
||||
nixconf_ensure_experimental_features
|
||||
ensure_global_nix_symlinks "$(resolve_nix_bin 2>/dev/null || true)"
|
||||
else
|
||||
ensure_user_nix_symlink "$(resolve_nix_bin 2>/dev/null || true)"
|
||||
fi
|
||||
|
||||
return 0
|
||||
fi
|
||||
|
||||
ensure_nix_on_path
|
||||
|
||||
if command -v nix >/dev/null 2>&1; then
|
||||
echo "[init-nix] Nix found after PATH adjustment: $(command -v nix)"
|
||||
if [[ "${EUID:-0}" -eq 0 ]]; then
|
||||
ensure_global_nix_symlinks "$(resolve_nix_bin 2>/dev/null || true)"
|
||||
else
|
||||
ensure_user_nix_symlink "$(resolve_nix_bin 2>/dev/null || true)"
|
||||
fi
|
||||
return 0
|
||||
fi
|
||||
|
||||
local IN_CONTAINER=0
|
||||
if is_container; then
|
||||
IN_CONTAINER=1
|
||||
echo "[init-nix] Detected container environment."
|
||||
else
|
||||
echo "[init-nix] No container detected."
|
||||
fi
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Container + root: dedicated "nix" user, single-user install
|
||||
# -------------------------------------------------------------------------
|
||||
if [[ "$IN_CONTAINER" -eq 1 && "${EUID:-0}" -eq 0 ]]; then
|
||||
echo "[init-nix] Container + root: installing as 'nix' user (single-user)."
|
||||
|
||||
ensure_nix_build_group
|
||||
|
||||
if ! id nix >/dev/null 2>&1; then
|
||||
echo "[init-nix] Creating user 'nix'..."
|
||||
local BASH_SHELL
|
||||
BASH_SHELL="$(command -v bash || true)"
|
||||
[[ -z "$BASH_SHELL" ]] && BASH_SHELL="/bin/sh"
|
||||
useradd -m -r -g nixbld -s "$BASH_SHELL" nix
|
||||
fi
|
||||
|
||||
ensure_nix_store_dir_for_container_user
|
||||
|
||||
install_nix_with_retry "no-daemon" "nix"
|
||||
|
||||
ensure_nix_on_path
|
||||
|
||||
# Ensure stable global symlink(s) (sudo secure_path friendly)
|
||||
ensure_global_nix_symlinks "/home/nix/.nix-profile/bin/nix"
|
||||
|
||||
# Ensure non-root users can traverse and execute nix user profile
|
||||
ensure_container_profile_perms
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Host (no container)
|
||||
# -------------------------------------------------------------------------
|
||||
else
|
||||
if command -v systemctl >/dev/null 2>&1; then
|
||||
echo "[init-nix] Host with systemd: using multi-user install (--daemon)."
|
||||
if [[ "${EUID:-0}" -eq 0 ]]; then
|
||||
ensure_nix_build_group
|
||||
fi
|
||||
install_nix_with_retry "daemon"
|
||||
else
|
||||
echo "[init-nix] No systemd detected: using single-user install (--no-daemon)."
|
||||
if [[ "${EUID:-0}" -eq 0 ]]; then
|
||||
ensure_nix_build_group
|
||||
fi
|
||||
install_nix_with_retry "no-daemon"
|
||||
fi
|
||||
fi
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# After install: PATH + symlink(s)
|
||||
# -------------------------------------------------------------------------
|
||||
ensure_nix_on_path
|
||||
|
||||
if [[ "${EUID:-0}" -eq 0 ]]; then
|
||||
nixconf_ensure_experimental_features
|
||||
fi
|
||||
|
||||
local nix_bin_post
|
||||
nix_bin_post="$(resolve_nix_bin 2>/dev/null || true)"
|
||||
|
||||
if [[ "${EUID:-0}" -eq 0 ]]; then
|
||||
ensure_global_nix_symlinks "$nix_bin_post"
|
||||
else
|
||||
ensure_user_nix_symlink "$nix_bin_post"
|
||||
fi
|
||||
|
||||
# Final verification (must succeed for CI)
|
||||
if ! command -v nix >/dev/null 2>&1; then
|
||||
echo "[init-nix] ERROR: nix not found after installation."
|
||||
echo "[init-nix] DEBUG: resolved nix path = ${nix_bin_post:-<empty>}"
|
||||
echo "[init-nix] DEBUG: PATH = $PATH"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "[init-nix] Nix successfully available at: $(command -v nix)"
|
||||
echo "[init-nix] Nix initialization complete."
|
||||
}
|
||||
|
||||
main "$@"
|
||||
11
scripts/nix/lib/bootstrap_config.sh
Executable file
11
scripts/nix/lib/bootstrap_config.sh
Executable file
@@ -0,0 +1,11 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Prevent double-sourcing
|
||||
if [[ -n "${PKGMGR_NIX_CONFIG_SH:-}" ]]; then
|
||||
return 0
|
||||
fi
|
||||
PKGMGR_NIX_CONFIG_SH=1
|
||||
|
||||
NIX_INSTALL_URL="${NIX_INSTALL_URL:-https://nixos.org/nix/install}"
|
||||
NIX_DOWNLOAD_MAX_TIME="${NIX_DOWNLOAD_MAX_TIME:-300}"
|
||||
NIX_DOWNLOAD_SLEEP_INTERVAL="${NIX_DOWNLOAD_SLEEP_INTERVAL:-20}"
|
||||
14
scripts/nix/lib/detect.sh
Executable file
14
scripts/nix/lib/detect.sh
Executable file
@@ -0,0 +1,14 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
if [[ -n "${PKGMGR_NIX_DETECT_SH:-}" ]]; then
|
||||
return 0
|
||||
fi
|
||||
PKGMGR_NIX_DETECT_SH=1
|
||||
|
||||
# Detect whether we are inside a container (Docker/Podman/etc.)
|
||||
is_container() {
|
||||
[[ -f /.dockerenv || -f /run/.containerenv ]] && return 0
|
||||
grep -qiE 'docker|container|podman|lxc' /proc/1/cgroup 2>/dev/null && return 0
|
||||
[[ -n "${container:-}" ]] && return 0
|
||||
return 1
|
||||
}
|
||||
63
scripts/nix/lib/install.sh
Executable file
63
scripts/nix/lib/install.sh
Executable file
@@ -0,0 +1,63 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
if [[ -n "${PKGMGR_NIX_INSTALL_SH:-}" ]]; then
|
||||
return 0
|
||||
fi
|
||||
PKGMGR_NIX_INSTALL_SH=1
|
||||
|
||||
# Requires: NIX_INSTALL_URL, NIX_DOWNLOAD_MAX_TIME, NIX_DOWNLOAD_SLEEP_INTERVAL
|
||||
|
||||
# Download and run Nix installer with retry
|
||||
# Usage: install_nix_with_retry daemon|no-daemon [run_as_user]
|
||||
install_nix_with_retry() {
|
||||
local mode="$1"
|
||||
local run_as="${2:-}"
|
||||
local installer elapsed=0 mode_flag
|
||||
|
||||
case "$mode" in
|
||||
daemon) mode_flag="--daemon" ;;
|
||||
no-daemon) mode_flag="--no-daemon" ;;
|
||||
*)
|
||||
echo "[init-nix] ERROR: Invalid mode '$mode' (expected 'daemon' or 'no-daemon')."
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
installer="$(mktemp -t nix-installer.XXXXXX)"
|
||||
chmod 0644 "$installer"
|
||||
|
||||
echo "[init-nix] Downloading Nix installer from $NIX_INSTALL_URL (max ${NIX_DOWNLOAD_MAX_TIME}s)..."
|
||||
|
||||
while true; do
|
||||
if curl -fL "$NIX_INSTALL_URL" -o "$installer"; then
|
||||
echo "[init-nix] Successfully downloaded installer to $installer"
|
||||
break
|
||||
fi
|
||||
|
||||
elapsed=$((elapsed + NIX_DOWNLOAD_SLEEP_INTERVAL))
|
||||
echo "[init-nix] WARNING: Download failed. Retrying in ${NIX_DOWNLOAD_SLEEP_INTERVAL}s (elapsed ${elapsed}s)..."
|
||||
|
||||
if (( elapsed >= NIX_DOWNLOAD_MAX_TIME )); then
|
||||
echo "[init-nix] ERROR: Giving up after ${elapsed}s trying to download Nix installer."
|
||||
rm -f "$installer"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
sleep "$NIX_DOWNLOAD_SLEEP_INTERVAL"
|
||||
done
|
||||
|
||||
if [[ -n "$run_as" ]]; then
|
||||
chown "$run_as:$run_as" "$installer" 2>/dev/null || true
|
||||
echo "[init-nix] Running installer as user '$run_as' ($mode_flag)..."
|
||||
if command -v sudo >/dev/null 2>&1; then
|
||||
sudo -u "$run_as" bash -lc "sh '$installer' $mode_flag"
|
||||
else
|
||||
su - "$run_as" -c "sh '$installer' $mode_flag"
|
||||
fi
|
||||
else
|
||||
echo "[init-nix] Running installer as current user ($mode_flag)..."
|
||||
sh "$installer" "$mode_flag"
|
||||
fi
|
||||
|
||||
rm -f "$installer"
|
||||
}
|
||||
89
scripts/nix/lib/nix_conf_file.sh
Normal file
89
scripts/nix/lib/nix_conf_file.sh
Normal file
@@ -0,0 +1,89 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Prevent double-sourcing
|
||||
if [[ -n "${PKGMGR_NIX_CONF_FILE_SH:-}" ]]; then
|
||||
return 0
|
||||
fi
|
||||
PKGMGR_NIX_CONF_FILE_SH=1
|
||||
|
||||
nixconf_file_path() {
|
||||
echo "/etc/nix/nix.conf"
|
||||
}
|
||||
|
||||
# Ensure a given nix.conf key contains required tokens (merged, no duplicates)
|
||||
nixconf_ensure_features_key() {
|
||||
local nix_conf="$1"
|
||||
local key="$2"
|
||||
shift 2
|
||||
local required=("$@")
|
||||
|
||||
mkdir -p /etc/nix
|
||||
|
||||
# Create file if missing (with just the required tokens)
|
||||
if [[ ! -f "${nix_conf}" ]]; then
|
||||
local want="${key} = ${required[*]}"
|
||||
echo "[nix-conf] Creating ${nix_conf} with: ${want}"
|
||||
printf "%s\n" "${want}" >"${nix_conf}"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Key exists -> merge tokens
|
||||
if grep -qE "^\s*${key}\s*=" "${nix_conf}"; then
|
||||
local ok=1
|
||||
local t
|
||||
for t in "${required[@]}"; do
|
||||
if ! grep -qE "^\s*${key}\s*=.*\b${t}\b" "${nix_conf}"; then
|
||||
ok=0
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ "$ok" -eq 1 ]]; then
|
||||
echo "[nix-conf] ${key} already correct"
|
||||
return 0
|
||||
fi
|
||||
|
||||
echo "[nix-conf] Extending ${key} in ${nix_conf}"
|
||||
|
||||
local current
|
||||
current="$(grep -E "^\s*${key}\s*=" "${nix_conf}" | head -n1 | cut -d= -f2-)"
|
||||
current="$(echo "${current}" | xargs)" # trim
|
||||
|
||||
local merged=""
|
||||
local token
|
||||
|
||||
# Start with existing tokens
|
||||
for token in ${current}; do
|
||||
if [[ " ${merged} " != *" ${token} "* ]]; then
|
||||
merged="${merged} ${token}"
|
||||
fi
|
||||
done
|
||||
|
||||
# Add required tokens
|
||||
for token in "${required[@]}"; do
|
||||
if [[ " ${merged} " != *" ${token} "* ]]; then
|
||||
merged="${merged} ${token}"
|
||||
fi
|
||||
done
|
||||
|
||||
merged="$(echo "${merged}" | xargs)" # trim
|
||||
|
||||
sed -i "s|^\s*${key}\s*=.*|${key} = ${merged}|" "${nix_conf}"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Key missing -> append
|
||||
local want="${key} = ${required[*]}"
|
||||
echo "[nix-conf] Appending to ${nix_conf}: ${want}"
|
||||
printf "\n%s\n" "${want}" >>"${nix_conf}"
|
||||
}
|
||||
|
||||
nixconf_ensure_experimental_features() {
|
||||
local nix_conf
|
||||
nix_conf="$(nixconf_file_path)"
|
||||
|
||||
# Ensure both keys to avoid prompts and cover older/alternate expectations
|
||||
nixconf_ensure_features_key "${nix_conf}" "experimental-features" "nix-command" "flakes"
|
||||
nixconf_ensure_features_key "${nix_conf}" "extra-experimental-features" "nix-command" "flakes"
|
||||
}
|
||||
68
scripts/nix/lib/path.sh
Executable file
68
scripts/nix/lib/path.sh
Executable file
@@ -0,0 +1,68 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
if [[ -n "${PKGMGR_NIX_PATH_SH:-}" ]]; then
|
||||
return 0
|
||||
fi
|
||||
PKGMGR_NIX_PATH_SH=1
|
||||
|
||||
# Ensure Nix binaries are on PATH (additive, never destructive)
|
||||
ensure_nix_on_path() {
|
||||
if [[ -x /nix/var/nix/profiles/default/bin/nix ]]; then
|
||||
PATH="/nix/var/nix/profiles/default/bin:$PATH"
|
||||
fi
|
||||
if [[ -x "$HOME/.nix-profile/bin/nix" ]]; then
|
||||
PATH="$HOME/.nix-profile/bin:$PATH"
|
||||
fi
|
||||
if [[ -x /home/nix/.nix-profile/bin/nix ]]; then
|
||||
PATH="/home/nix/.nix-profile/bin:$PATH"
|
||||
fi
|
||||
if [[ -d "$HOME/.local/bin" ]]; then
|
||||
PATH="$HOME/.local/bin:$PATH"
|
||||
fi
|
||||
export PATH
|
||||
}
|
||||
|
||||
# Resolve a path to a real executable (follows symlinks)
|
||||
real_exe() {
|
||||
local p="${1:-}"
|
||||
[[ -z "$p" ]] && return 1
|
||||
|
||||
local r
|
||||
r="$(readlink -f "$p" 2>/dev/null || echo "$p")"
|
||||
|
||||
[[ -x "$r" ]] && { echo "$r"; return 0; }
|
||||
return 1
|
||||
}
|
||||
|
||||
# Resolve nix binary path robustly (works across distros + Arch /usr/sbin)
|
||||
resolve_nix_bin() {
|
||||
local nix_cmd=""
|
||||
nix_cmd="$(command -v nix 2>/dev/null || true)"
|
||||
[[ -n "$nix_cmd" ]] && real_exe "$nix_cmd" && return 0
|
||||
|
||||
# IMPORTANT: prefer system locations before /usr/local to avoid self-symlink traps
|
||||
[[ -x /usr/sbin/nix ]] && { echo "/usr/sbin/nix"; return 0; } # Arch package can land here
|
||||
[[ -x /usr/bin/nix ]] && { echo "/usr/bin/nix"; return 0; }
|
||||
[[ -x /bin/nix ]] && { echo "/bin/nix"; return 0; }
|
||||
|
||||
# /usr/local last, and only if it resolves to a real executable
|
||||
[[ -e /usr/local/bin/nix ]] && real_exe "/usr/local/bin/nix" && return 0
|
||||
|
||||
[[ -x /nix/var/nix/profiles/default/bin/nix ]] && {
|
||||
echo "/nix/var/nix/profiles/default/bin/nix"; return 0;
|
||||
}
|
||||
|
||||
[[ -x "$HOME/.nix-profile/bin/nix" ]] && {
|
||||
echo "$HOME/.nix-profile/bin/nix"; return 0;
|
||||
}
|
||||
|
||||
[[ -x "$HOME/.local/bin/nix" ]] && {
|
||||
echo "$HOME/.local/bin/nix"; return 0;
|
||||
}
|
||||
|
||||
[[ -x /home/nix/.nix-profile/bin/nix ]] && {
|
||||
echo "/home/nix/.nix-profile/bin/nix"; return 0;
|
||||
}
|
||||
|
||||
return 1
|
||||
}
|
||||
52
scripts/nix/lib/retry_403.sh
Executable file
52
scripts/nix/lib/retry_403.sh
Executable file
@@ -0,0 +1,52 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
if [[ -n "${PKGMGR_NIX_RETRY_403_SH:-}" ]]; then
|
||||
return 0
|
||||
fi
|
||||
PKGMGR_NIX_RETRY_403_SH=1
|
||||
|
||||
# Retry only when we see the GitHub API rate limit 403 error during nix flake evaluation.
|
||||
# Retries 7 times with delays: 10, 30, 50, 80, 130, 210, 420 seconds.
|
||||
run_with_github_403_retry() {
|
||||
local -a delays=(10 30 50 80 130 210 420)
|
||||
local attempt=0
|
||||
local max_retries="${#delays[@]}"
|
||||
|
||||
while true; do
|
||||
local err tmp
|
||||
tmp="$(mktemp -t nix-err.XXXXXX)"
|
||||
err=0
|
||||
|
||||
# Run the command; capture stderr for inspection while preserving stdout.
|
||||
if "$@" 2>"$tmp"; then
|
||||
rm -f "$tmp"
|
||||
return 0
|
||||
else
|
||||
err=$?
|
||||
fi
|
||||
|
||||
# Only retry on the specific GitHub API rate limit 403 case.
|
||||
if grep -qE 'HTTP error 403' "$tmp" && grep -qiE 'API rate limit exceeded|api\.github\.com' "$tmp"; then
|
||||
if (( attempt >= max_retries )); then
|
||||
cat "$tmp" >&2
|
||||
rm -f "$tmp"
|
||||
return "$err"
|
||||
fi
|
||||
|
||||
local sleep_s="${delays[$attempt]}"
|
||||
attempt=$((attempt + 1))
|
||||
|
||||
echo "[nix-retry] GitHub API rate-limit (403). Retry ${attempt}/${max_retries} in ${sleep_s}s: $*" >&2
|
||||
cat "$tmp" >&2
|
||||
rm -f "$tmp"
|
||||
sleep "$sleep_s"
|
||||
continue
|
||||
fi
|
||||
|
||||
# Not our retry case -> fail fast with original stderr.
|
||||
cat "$tmp" >&2
|
||||
rm -f "$tmp"
|
||||
return "$err"
|
||||
done
|
||||
}
|
||||
95
scripts/nix/lib/symlinks.sh
Executable file
95
scripts/nix/lib/symlinks.sh
Executable file
@@ -0,0 +1,95 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
if [[ -n "${PKGMGR_NIX_SYMLINKS_SH:-}" ]]; then
|
||||
return 0
|
||||
fi
|
||||
PKGMGR_NIX_SYMLINKS_SH=1
|
||||
|
||||
# Requires: real_exe, resolve_nix_bin
|
||||
# shellcheck disable=SC2034
|
||||
|
||||
# Ensure globally reachable nix symlink(s) (CI / non-login shells) - root only
|
||||
ensure_global_nix_symlinks() {
|
||||
local nix_bin="${1:-}"
|
||||
|
||||
[[ -z "$nix_bin" ]] && nix_bin="$(resolve_nix_bin 2>/dev/null || true)"
|
||||
|
||||
if [[ -z "$nix_bin" || ! -x "$nix_bin" ]]; then
|
||||
echo "[init-nix] WARNING: nix binary not found, cannot create global symlink(s)."
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Always link to the real executable to avoid /usr/local/bin/nix -> /usr/local/bin/nix
|
||||
nix_bin="$(real_exe "$nix_bin" 2>/dev/null || echo "$nix_bin")"
|
||||
|
||||
local targets=()
|
||||
|
||||
# Always provide /usr/local/bin/nix for CI shells
|
||||
mkdir -p /usr/local/bin 2>/dev/null || true
|
||||
targets+=("/usr/local/bin/nix")
|
||||
|
||||
# Provide sudo-friendly locations only if they are NOT present (do not override distro paths)
|
||||
if [[ ! -e /usr/bin/nix ]]; then
|
||||
targets+=("/usr/bin/nix")
|
||||
fi
|
||||
if [[ ! -e /usr/sbin/nix ]]; then
|
||||
targets+=("/usr/sbin/nix")
|
||||
fi
|
||||
|
||||
local target current_real
|
||||
for target in "${targets[@]}"; do
|
||||
current_real=""
|
||||
if [[ -e "$target" ]]; then
|
||||
current_real="$(real_exe "$target" 2>/dev/null || true)"
|
||||
fi
|
||||
|
||||
if [[ -n "$current_real" && "$current_real" == "$nix_bin" ]]; then
|
||||
echo "[init-nix] $target already points to: $nix_bin"
|
||||
continue
|
||||
fi
|
||||
|
||||
# If something exists but is not the same (and we promised not to override), skip.
|
||||
if [[ -e "$target" && "$target" != "/usr/local/bin/nix" ]]; then
|
||||
echo "[init-nix] WARNING: $target exists; not overwriting."
|
||||
continue
|
||||
fi
|
||||
|
||||
if ln -sf "$nix_bin" "$target" 2>/dev/null; then
|
||||
echo "[init-nix] Ensured $target -> $nix_bin"
|
||||
else
|
||||
echo "[init-nix] WARNING: Failed to ensure $target symlink."
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
# Ensure user-level nix symlink (works without root; CI-safe)
|
||||
ensure_user_nix_symlink() {
|
||||
local nix_bin="${1:-}"
|
||||
|
||||
[[ -z "$nix_bin" ]] && nix_bin="$(resolve_nix_bin 2>/dev/null || true)"
|
||||
|
||||
if [[ -z "$nix_bin" || ! -x "$nix_bin" ]]; then
|
||||
echo "[init-nix] WARNING: nix binary not found, cannot create user symlink."
|
||||
return 0
|
||||
fi
|
||||
|
||||
nix_bin="$(real_exe "$nix_bin" 2>/dev/null || echo "$nix_bin")"
|
||||
|
||||
mkdir -p "$HOME/.local/bin" 2>/dev/null || true
|
||||
ln -sf "$nix_bin" "$HOME/.local/bin/nix"
|
||||
|
||||
echo "[init-nix] Ensured $HOME/.local/bin/nix -> $nix_bin"
|
||||
|
||||
PATH="$HOME/.local/bin:$PATH"
|
||||
export PATH
|
||||
|
||||
if [[ -w "$HOME/.profile" ]] && ! grep -q 'nix/init.sh' "$HOME/.profile" 2>/dev/null; then
|
||||
cat >>"$HOME/.profile" <<'EOF'
|
||||
|
||||
# PATH for nix (added by package-manager nix/init.sh)
|
||||
if [ -d "$HOME/.local/bin" ]; then
|
||||
PATH="$HOME/.local/bin:$PATH"
|
||||
fi
|
||||
EOF
|
||||
fi
|
||||
}
|
||||
49
scripts/nix/lib/users.sh
Executable file
49
scripts/nix/lib/users.sh
Executable file
@@ -0,0 +1,49 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
if [[ -n "${PKGMGR_NIX_USERS_SH:-}" ]]; then
|
||||
return 0
|
||||
fi
|
||||
PKGMGR_NIX_USERS_SH=1
|
||||
|
||||
# Ensure Nix build group and users exist (build-users-group = nixbld) - root only
|
||||
ensure_nix_build_group() {
|
||||
if ! getent group nixbld >/dev/null 2>&1; then
|
||||
echo "[init-nix] Creating group 'nixbld'..."
|
||||
groupadd -r nixbld
|
||||
fi
|
||||
|
||||
for i in $(seq 1 10); do
|
||||
if ! id "nixbld$i" >/dev/null 2>&1; then
|
||||
echo "[init-nix] Creating build user nixbld$i..."
|
||||
useradd -r -g nixbld -G nixbld -s /usr/sbin/nologin "nixbld$i"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
# Container-only helper: /nix ownership + perms for single-user install as 'nix'
|
||||
ensure_nix_store_dir_for_container_user() {
|
||||
if [[ ! -d /nix ]]; then
|
||||
echo "[init-nix] Creating /nix with owner nix:nixbld..."
|
||||
mkdir -m 0755 /nix
|
||||
chown nix:nixbld /nix
|
||||
return 0
|
||||
fi
|
||||
|
||||
local current_owner current_group
|
||||
current_owner="$(stat -c '%U' /nix 2>/dev/null || echo '?')"
|
||||
current_group="$(stat -c '%G' /nix 2>/dev/null || echo '?')"
|
||||
if [[ "$current_owner" != "nix" || "$current_group" != "nixbld" ]]; then
|
||||
echo "[init-nix] Fixing /nix ownership from $current_owner:$current_group to nix:nixbld..."
|
||||
chown -R nix:nixbld /nix
|
||||
fi
|
||||
}
|
||||
|
||||
# Container-only helper: make nix profile executable/traversable for non-root
|
||||
ensure_container_profile_perms() {
|
||||
if [[ -d /home/nix ]]; then
|
||||
chmod o+rx /home/nix 2>/dev/null || true
|
||||
fi
|
||||
if [[ -d /home/nix/.nix-profile ]]; then
|
||||
chmod -R o+rx /home/nix/.nix-profile 2>/dev/null || true
|
||||
fi
|
||||
}
|
||||
11
scripts/setup/nix.sh
Executable file
11
scripts/setup/nix.sh
Executable file
@@ -0,0 +1,11 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Nix shell mode: do not touch venv, only run install
|
||||
# ------------------------------------------------------------
|
||||
|
||||
echo "[setup] Nix mode enabled (NIX_ENABLED=1)."
|
||||
echo "[setup] Skipping virtualenv creation and dependency installation."
|
||||
echo "[setup] Running install via system python3..."
|
||||
python3 -m pkgmgr install
|
||||
echo "[setup] Setup finished (Nix mode)."
|
||||
96
scripts/setup/venv.sh
Executable file
96
scripts/setup/venv.sh
Executable file
@@ -0,0 +1,96 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
echo "[setup] Starting setup..."
|
||||
|
||||
PROJECT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
|
||||
cd "${PROJECT_ROOT}"
|
||||
|
||||
VENV_DIR="${HOME}/.venvs/pkgmgr"
|
||||
# shellcheck disable=SC2016
|
||||
RC_LINE='if [ -d "${HOME}/.venvs/pkgmgr" ]; then . "${HOME}/.venvs/pkgmgr/bin/activate"; if [ -n "${PS1:-}" ]; then echo "Global Python virtual environment '\''~/.venvs/pkgmgr'\'' activated."; fi; fi'
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Normal user mode: dev setup with venv
|
||||
# ------------------------------------------------------------
|
||||
|
||||
echo "[setup] Running in normal user mode (developer setup)."
|
||||
|
||||
echo "[setup] Ensuring global virtualenv root: ${HOME}/.venvs"
|
||||
mkdir -p "${HOME}/.venvs"
|
||||
|
||||
echo "[setup] Creating/updating virtualenv via helper..."
|
||||
PROJECT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
|
||||
cd "${PROJECT_ROOT}"
|
||||
|
||||
PIP_EDITABLE="${PKGMGR_PIP_EDITABLE:-1}"
|
||||
PIP_EXTRAS="${PKGMGR_PIP_EXTRAS:-}"
|
||||
PREFER_NIX="${PKGMGR_PREFER_NIX:-0}"
|
||||
|
||||
echo "[venv] Using VENV_DIR=${VENV_DIR}"
|
||||
|
||||
if [[ "${PREFER_NIX}" == "1" ]]; then
|
||||
echo "[venv] PKGMGR_PREFER_NIX=1 set."
|
||||
echo "[venv] Hint: Use Nix instead of a venv for reproducible installs:"
|
||||
echo "[venv] nix develop"
|
||||
echo "[venv] nix run .#pkgmgr -- --help"
|
||||
exit 2
|
||||
fi
|
||||
|
||||
echo "[venv] Ensuring virtualenv parent directory exists..."
|
||||
mkdir -p "$(dirname "${VENV_DIR}")"
|
||||
|
||||
if [[ ! -d "${VENV_DIR}" ]]; then
|
||||
echo "[venv] Creating virtual environment at: ${VENV_DIR}"
|
||||
python3 -m venv "${VENV_DIR}"
|
||||
else
|
||||
echo "[venv] Virtual environment already exists at: ${VENV_DIR}"
|
||||
fi
|
||||
|
||||
echo "[venv] Installing Python tooling into venv..."
|
||||
"${VENV_DIR}/bin/python" -m ensurepip --upgrade
|
||||
"${VENV_DIR}/bin/pip" install --upgrade pip setuptools wheel
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Install dependencies
|
||||
# ---------------------------------------------------------------------------
|
||||
if [[ -f "pyproject.toml" ]]; then
|
||||
echo "[venv] Detected pyproject.toml. Installing project via pip..."
|
||||
|
||||
target="."
|
||||
if [[ -n "${PIP_EXTRAS}" ]]; then
|
||||
target=".[${PIP_EXTRAS}]"
|
||||
fi
|
||||
|
||||
if [[ "${PIP_EDITABLE}" == "1" ]]; then
|
||||
echo "[venv] pip install -e ${target}"
|
||||
"${VENV_DIR}/bin/pip" install -e "${target}"
|
||||
else
|
||||
echo "[venv] pip install ${target}"
|
||||
"${VENV_DIR}/bin/pip" install "${target}"
|
||||
fi
|
||||
else
|
||||
echo "[venv] No pyproject.toml found. Skipping dependency installation."
|
||||
fi
|
||||
|
||||
echo "[venv] Done."
|
||||
|
||||
echo "[setup] Ensuring ~/.bashrc and ~/.zshrc exist..."
|
||||
touch "${HOME}/.bashrc" "${HOME}/.zshrc"
|
||||
|
||||
echo "[setup] Ensuring venv auto-activation is present in shell rc files..."
|
||||
for rc in "${HOME}/.bashrc" "${HOME}/.zshrc"; do
|
||||
if ! grep -qxF "${RC_LINE}" "$rc"; then
|
||||
echo "${RC_LINE}" >> "$rc"
|
||||
echo "[setup] Appended auto-activation to $rc"
|
||||
else
|
||||
echo "[setup] Auto-activation already present in $rc"
|
||||
fi
|
||||
done
|
||||
|
||||
echo "[setup] Running install via venv Python..."
|
||||
"${VENV_DIR}/bin/python" -m pkgmgr install
|
||||
|
||||
echo
|
||||
echo "[setup] Developer setup complete."
|
||||
echo "Restart your shell (or run 'exec bash' or 'exec zsh') to activate the environment."
|
||||
@@ -1,32 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
IMAGE="package-manager-test-$distro"
|
||||
|
||||
echo
|
||||
echo "------------------------------------------------------------"
|
||||
echo ">>> Testing container: $IMAGE"
|
||||
echo "------------------------------------------------------------"
|
||||
echo "[test-container] Inspect image metadata:"
|
||||
docker image inspect "$IMAGE" | sed -n '1,40p'
|
||||
|
||||
echo "[test-container] Running: docker run --rm --entrypoint pkgmgr $IMAGE --help"
|
||||
echo
|
||||
|
||||
# Run the command and capture the output
|
||||
if OUTPUT=$(docker run --rm \
|
||||
-e PKGMGR_DEV=1 \
|
||||
-v pkgmgr_nix_store_${distro}:/nix \
|
||||
-v "$(pwd):/src" \
|
||||
-v "pkgmgr_nix_cache_${distro}:/root/.cache/nix" \
|
||||
"$IMAGE" 2>&1); then
|
||||
echo "$OUTPUT"
|
||||
echo
|
||||
echo "[test-container] SUCCESS: $IMAGE responded to 'pkgmgr --help'"
|
||||
|
||||
else
|
||||
echo "$OUTPUT"
|
||||
echo
|
||||
echo "[test-container] ERROR: $IMAGE failed to run 'pkgmgr --help'"
|
||||
exit 1
|
||||
fi
|
||||
@@ -2,17 +2,17 @@
|
||||
set -euo pipefail
|
||||
|
||||
echo "============================================================"
|
||||
echo ">>> Running E2E tests: $distro"
|
||||
echo ">>> Running E2E tests: $PKGMGR_DISTRO"
|
||||
echo "============================================================"
|
||||
|
||||
docker run --rm \
|
||||
-v "$(pwd):/src" \
|
||||
-v "pkgmgr_nix_store_${distro}:/nix" \
|
||||
-v "pkgmgr_nix_cache_${distro}:/root/.cache/nix" \
|
||||
-e PKGMGR_DEV=1 \
|
||||
-v "pkgmgr_nix_store_${PKGMGR_DISTRO}:/nix" \
|
||||
-v "pkgmgr_nix_cache_${PKGMGR_DISTRO}:/root/.cache/nix" \
|
||||
-e REINSTALL_PKGMGR=1 \
|
||||
-e TEST_PATTERN="${TEST_PATTERN}" \
|
||||
--workdir /src \
|
||||
"package-manager-test-${distro}" \
|
||||
"pkgmgr-${PKGMGR_DISTRO}" \
|
||||
bash -lc '
|
||||
set -euo pipefail
|
||||
|
||||
@@ -49,7 +49,7 @@ docker run --rm \
|
||||
# Gitdir path shown in the "dubious ownership" error
|
||||
git config --global --add safe.directory /src/.git || true
|
||||
# Ephemeral CI containers: allow all paths as a last resort
|
||||
git config --global --add safe.directory '*' || true
|
||||
git config --global --add safe.directory "*" || true
|
||||
fi
|
||||
|
||||
# Run the E2E tests inside the Nix development shell
|
||||
|
||||
62
scripts/test/test-env-nix.sh
Executable file
62
scripts/test/test-env-nix.sh
Executable file
@@ -0,0 +1,62 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
IMAGE="pkgmgr-${PKGMGR_DISTRO}"
|
||||
|
||||
echo "============================================================"
|
||||
echo ">>> Running Nix flake-only test in ${PKGMGR_DISTRO} container"
|
||||
echo ">>> Image: ${IMAGE}"
|
||||
echo "============================================================"
|
||||
|
||||
docker run --rm \
|
||||
-v "$(pwd):/src" \
|
||||
-v "pkgmgr_nix_store_${PKGMGR_DISTRO}:/nix" \
|
||||
-v "pkgmgr_nix_cache_${PKGMGR_DISTRO}:/root/.cache/nix" \
|
||||
--workdir /src \
|
||||
-e REINSTALL_PKGMGR=1 \
|
||||
"${IMAGE}" \
|
||||
bash -lc '
|
||||
set -euo pipefail
|
||||
|
||||
if command -v git >/dev/null 2>&1; then
|
||||
git config --global --add safe.directory /src || true
|
||||
git config --global --add safe.directory /src/.git || true
|
||||
git config --global --add safe.directory "*" || true
|
||||
fi
|
||||
|
||||
echo ">>> preflight: nix must exist in image"
|
||||
if ! command -v nix >/dev/null 2>&1; then
|
||||
echo "NO_NIX"
|
||||
echo "ERROR: nix not found in image '"${IMAGE}"' (PKGMGR_DISTRO='"${PKGMGR_DISTRO}"')"
|
||||
echo "HINT: Ensure Nix is installed during image build for this distro."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo ">>> nix version"
|
||||
nix --version
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Retry helper for GitHub API rate-limit (HTTP 403)
|
||||
# ------------------------------------------------------------
|
||||
if [[ -f /src/scripts/nix/lib/retry_403.sh ]]; then
|
||||
# shellcheck source=./scripts/nix/lib/retry_403.sh
|
||||
source /src/scripts/nix/lib/retry_403.sh
|
||||
elif [[ -f ./scripts/nix/lib/retry_403.sh ]]; then
|
||||
# shellcheck source=./scripts/nix/lib/retry_403.sh
|
||||
source ./scripts/nix/lib/retry_403.sh
|
||||
else
|
||||
echo "ERROR: retry helper not found: scripts/nix/lib/retry_403.sh"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo ">>> nix flake show"
|
||||
run_with_github_403_retry nix flake show . --no-write-lock-file >/dev/null
|
||||
|
||||
echo ">>> nix build .#default"
|
||||
run_with_github_403_retry nix build .#default --no-link --no-write-lock-file
|
||||
|
||||
echo ">>> nix run .#pkgmgr -- --help"
|
||||
run_with_github_403_retry nix run .#pkgmgr -- --help --no-write-lock-file
|
||||
|
||||
echo ">>> OK: Nix flake-only test succeeded."
|
||||
'
|
||||
32
scripts/test/test-env-virtual.sh
Executable file
32
scripts/test/test-env-virtual.sh
Executable file
@@ -0,0 +1,32 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
IMAGE="pkgmgr-$PKGMGR_DISTRO"
|
||||
|
||||
echo
|
||||
echo "------------------------------------------------------------"
|
||||
echo ">>> Testing VENV: $IMAGE"
|
||||
echo "------------------------------------------------------------"
|
||||
echo "[test-env-virtual] Inspect image metadata:"
|
||||
docker image inspect "$IMAGE" | sed -n '1,40p'
|
||||
|
||||
echo "[test-env-virtual] Running: docker run --rm --entrypoint pkgmgr $IMAGE --help"
|
||||
echo
|
||||
|
||||
# Run the command and capture the output
|
||||
if OUTPUT=$(docker run --rm \
|
||||
-e REINSTALL_PKGMGR=1 \
|
||||
-v "pkgmgr_nix_store_${PKGMGR_DISTRO}:/nix" \
|
||||
-v "$(pwd):/src" \
|
||||
-v "pkgmgr_nix_cache_${PKGMGR_DISTRO}:/root/.cache/nix" \
|
||||
"$IMAGE" 2>&1); then
|
||||
echo "$OUTPUT"
|
||||
echo
|
||||
echo "[test-env-virtual] SUCCESS: $IMAGE responded to 'pkgmgr --help'"
|
||||
|
||||
else
|
||||
echo "$OUTPUT"
|
||||
echo
|
||||
echo "[test-env-virtual] ERROR: $IMAGE failed to run 'pkgmgr --help'"
|
||||
exit 1
|
||||
fi
|
||||
@@ -2,17 +2,17 @@
|
||||
set -euo pipefail
|
||||
|
||||
echo "============================================================"
|
||||
echo ">>> Running INTEGRATION tests in ${distro} container"
|
||||
echo ">>> Running INTEGRATION tests in ${PKGMGR_DISTRO} container"
|
||||
echo "============================================================"
|
||||
|
||||
docker run --rm \
|
||||
-v "$(pwd):/src" \
|
||||
-v pkgmgr_nix_store_${distro}:/nix \
|
||||
-v "pkgmgr_nix_cache_${distro}:/root/.cache/nix" \
|
||||
-v "pkgmgr_nix_store_${PKGMGR_DISTRO}:/nix" \
|
||||
-v "pkgmgr_nix_cache_${PKGMGR_DISTRO}:/root/.cache/nix" \
|
||||
--workdir /src \
|
||||
-e PKGMGR_DEV=1 \
|
||||
-e REINSTALL_PKGMGR=1 \
|
||||
-e TEST_PATTERN="${TEST_PATTERN}" \
|
||||
"package-manager-test-${distro}" \
|
||||
"pkgmgr-${PKGMGR_DISTRO}" \
|
||||
bash -lc '
|
||||
set -e;
|
||||
git config --global --add safe.directory /src || true;
|
||||
|
||||
@@ -2,17 +2,17 @@
|
||||
set -euo pipefail
|
||||
|
||||
echo "============================================================"
|
||||
echo ">>> Running UNIT tests in ${distro} container"
|
||||
echo ">>> Running UNIT tests in ${PKGMGR_DISTRO} container"
|
||||
echo "============================================================"
|
||||
|
||||
docker run --rm \
|
||||
-v "$(pwd):/src" \
|
||||
-v "pkgmgr_nix_cache_${distro}:/root/.cache/nix" \
|
||||
-v pkgmgr_nix_store_${distro}:/nix \
|
||||
-v "pkgmgr_nix_cache_${PKGMGR_DISTRO}:/root/.cache/nix" \
|
||||
-v "pkgmgr_nix_store_${PKGMGR_DISTRO}:/nix" \
|
||||
--workdir /src \
|
||||
-e PKGMGR_DEV=1 \
|
||||
-e REINSTALL_PKGMGR=1 \
|
||||
-e TEST_PATTERN="${TEST_PATTERN}" \
|
||||
"package-manager-test-${distro}" \
|
||||
"pkgmgr-${PKGMGR_DISTRO}" \
|
||||
bash -lc '
|
||||
set -e;
|
||||
git config --global --add safe.directory /src || true;
|
||||
|
||||
@@ -19,12 +19,20 @@ fi
|
||||
# ------------------------------------------------------------
|
||||
# Remove auto-activation lines from shell RC files
|
||||
# ------------------------------------------------------------
|
||||
RC_PATTERN='\.venvs\/pkgmgr\/bin\/activate"; if \[ -n "\$${PS1:-}" \]; then echo "Global Python virtual environment '\''~\/\.venvs\/pkgmgr'\'' activated."; fi; fi'
|
||||
# Matches:
|
||||
# ~/.venvs/pkgmgr/bin/activate
|
||||
# ./.venvs/pkgmgr/bin/activate
|
||||
RC_PATTERN='(\./)?\.venvs/pkgmgr/bin/activate'
|
||||
|
||||
echo "[uninstall] Cleaning up ~/.bashrc and ~/.zshrc entries..."
|
||||
for rc in "$HOME/.bashrc" "$HOME/.zshrc"; do
|
||||
if [[ -f "$rc" ]]; then
|
||||
sed -i "/$RC_PATTERN/d" "$rc"
|
||||
# Remove activation lines (functional)
|
||||
sed -E -i "/$RC_PATTERN/d" "$rc"
|
||||
|
||||
# Remove leftover echo / cosmetic lines referencing pkgmgr venv
|
||||
sed -i '/\.venvs\/pkgmgr/d' "$rc"
|
||||
|
||||
echo "[uninstall] Cleaned $rc"
|
||||
else
|
||||
echo "[uninstall] File not found: $rc (skipped)"
|
||||
|
||||
5
src/pkgmgr/__main__.py
Executable file
5
src/pkgmgr/__main__.py
Executable file
@@ -0,0 +1,5 @@
|
||||
#!/usr/bin/env python3
|
||||
from pkgmgr.cli import main
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,235 +1,14 @@
|
||||
# pkgmgr/actions/branch/__init__.py
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
High-level helpers for branch-related operations.
|
||||
|
||||
This module encapsulates the actual Git logic so the CLI layer
|
||||
(pkgmgr.cli.commands.branch) stays thin and testable.
|
||||
Public API for branch actions.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
from .open_branch import open_branch
|
||||
from .close_branch import close_branch
|
||||
from .drop_branch import drop_branch
|
||||
|
||||
from typing import Optional
|
||||
|
||||
from pkgmgr.core.git import run_git, GitError, get_current_branch
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Branch creation (open)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def open_branch(
|
||||
name: Optional[str],
|
||||
base_branch: str = "main",
|
||||
fallback_base: str = "master",
|
||||
cwd: str = ".",
|
||||
) -> None:
|
||||
"""
|
||||
Create and push a new feature branch on top of a base branch.
|
||||
|
||||
The base branch is resolved by:
|
||||
1. Trying 'base_branch' (default: 'main')
|
||||
2. Falling back to 'fallback_base' (default: 'master')
|
||||
|
||||
Steps:
|
||||
1) git fetch origin
|
||||
2) git checkout <resolved_base>
|
||||
3) git pull origin <resolved_base>
|
||||
4) git checkout -b <name>
|
||||
5) git push -u origin <name>
|
||||
|
||||
If `name` is None or empty, the user is prompted to enter one.
|
||||
"""
|
||||
|
||||
# Request name interactively if not provided
|
||||
if not name:
|
||||
name = input("Enter new branch name: ").strip()
|
||||
|
||||
if not name:
|
||||
raise RuntimeError("Branch name must not be empty.")
|
||||
|
||||
# Resolve which base branch to use (main or master)
|
||||
resolved_base = _resolve_base_branch(base_branch, fallback_base, cwd=cwd)
|
||||
|
||||
# 1) Fetch from origin
|
||||
try:
|
||||
run_git(["fetch", "origin"], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to fetch from origin before creating branch {name!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# 2) Checkout base branch
|
||||
try:
|
||||
run_git(["checkout", resolved_base], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to checkout base branch {resolved_base!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# 3) Pull latest changes for base branch
|
||||
try:
|
||||
run_git(["pull", "origin", resolved_base], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to pull latest changes for base branch {resolved_base!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# 4) Create new branch
|
||||
try:
|
||||
run_git(["checkout", "-b", name], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to create new branch {name!r} from base {resolved_base!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# 5) Push new branch to origin
|
||||
try:
|
||||
run_git(["push", "-u", "origin", name], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to push new branch {name!r} to origin: {exc}"
|
||||
) from exc
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Base branch resolver (shared by open/close)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _resolve_base_branch(
|
||||
preferred: str,
|
||||
fallback: str,
|
||||
cwd: str,
|
||||
) -> str:
|
||||
"""
|
||||
Resolve the base branch to use.
|
||||
|
||||
Try `preferred` first (default: main),
|
||||
fall back to `fallback` (default: master).
|
||||
|
||||
Raise RuntimeError if neither exists.
|
||||
"""
|
||||
for candidate in (preferred, fallback):
|
||||
try:
|
||||
run_git(["rev-parse", "--verify", candidate], cwd=cwd)
|
||||
return candidate
|
||||
except GitError:
|
||||
continue
|
||||
|
||||
raise RuntimeError(
|
||||
f"Neither {preferred!r} nor {fallback!r} exist in this repository."
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Branch closing (merge + deletion)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def close_branch(
|
||||
name: Optional[str],
|
||||
base_branch: str = "main",
|
||||
fallback_base: str = "master",
|
||||
cwd: str = ".",
|
||||
) -> None:
|
||||
"""
|
||||
Merge a feature branch into the base branch and delete it afterwards.
|
||||
|
||||
Steps:
|
||||
1) Determine the branch name (argument or current branch)
|
||||
2) Resolve base branch (main/master)
|
||||
3) Ask for confirmation
|
||||
4) git fetch origin
|
||||
5) git checkout <base>
|
||||
6) git pull origin <base>
|
||||
7) git merge --no-ff <name>
|
||||
8) git push origin <base>
|
||||
9) Delete branch locally
|
||||
10) Delete branch on origin (best effort)
|
||||
"""
|
||||
|
||||
# 1) Determine which branch should be closed
|
||||
if not name:
|
||||
try:
|
||||
name = get_current_branch(cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(f"Failed to detect current branch: {exc}") from exc
|
||||
|
||||
if not name:
|
||||
raise RuntimeError("Branch name must not be empty.")
|
||||
|
||||
# 2) Resolve base branch
|
||||
target_base = _resolve_base_branch(base_branch, fallback_base, cwd=cwd)
|
||||
|
||||
if name == target_base:
|
||||
raise RuntimeError(
|
||||
f"Refusing to close base branch {target_base!r}. "
|
||||
"Please specify a feature branch."
|
||||
)
|
||||
|
||||
# 3) Ask user for confirmation
|
||||
prompt = (
|
||||
f"Merge branch '{name}' into '{target_base}' and delete it afterwards? "
|
||||
"(y/N): "
|
||||
)
|
||||
answer = input(prompt).strip().lower()
|
||||
if answer != "y":
|
||||
print("Aborted closing branch.")
|
||||
return
|
||||
|
||||
# 4) Fetch from origin
|
||||
try:
|
||||
run_git(["fetch", "origin"], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to fetch from origin before closing branch {name!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# 5) Checkout base
|
||||
try:
|
||||
run_git(["checkout", target_base], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to checkout base branch {target_base!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# 6) Pull latest base state
|
||||
try:
|
||||
run_git(["pull", "origin", target_base], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to pull latest changes for base branch {target_base!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# 7) Merge the feature branch
|
||||
try:
|
||||
run_git(["merge", "--no-ff", name], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to merge branch {name!r} into {target_base!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# 8) Push updated base
|
||||
try:
|
||||
run_git(["push", "origin", target_base], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to push base branch {target_base!r} after merge: {exc}"
|
||||
) from exc
|
||||
|
||||
# 9) Delete branch locally
|
||||
try:
|
||||
run_git(["branch", "-d", name], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to delete local branch {name!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# 10) Delete branch on origin (best effort)
|
||||
try:
|
||||
run_git(["push", "origin", "--delete", name], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Branch {name!r} was deleted locally, but remote deletion failed: {exc}"
|
||||
) from exc
|
||||
__all__ = [
|
||||
"open_branch",
|
||||
"close_branch",
|
||||
"drop_branch",
|
||||
]
|
||||
|
||||
99
src/pkgmgr/actions/branch/close_branch.py
Normal file
99
src/pkgmgr/actions/branch/close_branch.py
Normal file
@@ -0,0 +1,99 @@
|
||||
from __future__ import annotations
|
||||
from typing import Optional
|
||||
from pkgmgr.core.git import run_git, GitError, get_current_branch
|
||||
from .utils import _resolve_base_branch
|
||||
|
||||
|
||||
def close_branch(
|
||||
name: Optional[str],
|
||||
base_branch: str = "main",
|
||||
fallback_base: str = "master",
|
||||
cwd: str = ".",
|
||||
force: bool = False,
|
||||
) -> None:
|
||||
"""
|
||||
Merge a feature branch into the base branch and delete it afterwards.
|
||||
"""
|
||||
|
||||
# Determine branch name
|
||||
if not name:
|
||||
try:
|
||||
name = get_current_branch(cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(f"Failed to detect current branch: {exc}") from exc
|
||||
|
||||
if not name:
|
||||
raise RuntimeError("Branch name must not be empty.")
|
||||
|
||||
target_base = _resolve_base_branch(base_branch, fallback_base, cwd=cwd)
|
||||
|
||||
if name == target_base:
|
||||
raise RuntimeError(
|
||||
f"Refusing to close base branch {target_base!r}. "
|
||||
"Please specify a feature branch."
|
||||
)
|
||||
|
||||
# Confirmation
|
||||
if not force:
|
||||
answer = input(
|
||||
f"Merge branch '{name}' into '{target_base}' and delete it afterwards? (y/N): "
|
||||
).strip().lower()
|
||||
if answer != "y":
|
||||
print("Aborted closing branch.")
|
||||
return
|
||||
|
||||
# Fetch
|
||||
try:
|
||||
run_git(["fetch", "origin"], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to fetch from origin before closing branch {name!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# Checkout base
|
||||
try:
|
||||
run_git(["checkout", target_base], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to checkout base branch {target_base!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# Pull latest
|
||||
try:
|
||||
run_git(["pull", "origin", target_base], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to pull latest changes for base branch {target_base!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# Merge
|
||||
try:
|
||||
run_git(["merge", "--no-ff", name], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to merge branch {name!r} into {target_base!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# Push result
|
||||
try:
|
||||
run_git(["push", "origin", target_base], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to push base branch {target_base!r} after merge: {exc}"
|
||||
) from exc
|
||||
|
||||
# Delete local
|
||||
try:
|
||||
run_git(["branch", "-d", name], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to delete local branch {name!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# Delete remote
|
||||
try:
|
||||
run_git(["push", "origin", "--delete", name], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Branch {name!r} deleted locally, but remote deletion failed: {exc}"
|
||||
) from exc
|
||||
55
src/pkgmgr/actions/branch/drop_branch.py
Normal file
55
src/pkgmgr/actions/branch/drop_branch.py
Normal file
@@ -0,0 +1,55 @@
|
||||
from __future__ import annotations
|
||||
from typing import Optional
|
||||
from pkgmgr.core.git import run_git, GitError, get_current_branch
|
||||
from .utils import _resolve_base_branch
|
||||
|
||||
|
||||
def drop_branch(
|
||||
name: Optional[str],
|
||||
base_branch: str = "main",
|
||||
fallback_base: str = "master",
|
||||
cwd: str = ".",
|
||||
force: bool = False,
|
||||
) -> None:
|
||||
"""
|
||||
Delete a branch locally and remotely without merging.
|
||||
"""
|
||||
|
||||
if not name:
|
||||
try:
|
||||
name = get_current_branch(cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(f"Failed to detect current branch: {exc}") from exc
|
||||
|
||||
if not name:
|
||||
raise RuntimeError("Branch name must not be empty.")
|
||||
|
||||
target_base = _resolve_base_branch(base_branch, fallback_base, cwd=cwd)
|
||||
|
||||
if name == target_base:
|
||||
raise RuntimeError(
|
||||
f"Refusing to drop base branch {target_base!r}. It cannot be deleted."
|
||||
)
|
||||
|
||||
# Confirmation
|
||||
if not force:
|
||||
answer = input(
|
||||
f"Delete branch '{name}' locally and on origin? This is destructive! (y/N): "
|
||||
).strip().lower()
|
||||
if answer != "y":
|
||||
print("Aborted dropping branch.")
|
||||
return
|
||||
|
||||
# Local delete
|
||||
try:
|
||||
run_git(["branch", "-d", name], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(f"Failed to delete local branch {name!r}: {exc}") from exc
|
||||
|
||||
# Remote delete
|
||||
try:
|
||||
run_git(["push", "origin", "--delete", name], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Branch {name!r} was deleted locally, but remote deletion failed: {exc}"
|
||||
) from exc
|
||||
64
src/pkgmgr/actions/branch/open_branch.py
Normal file
64
src/pkgmgr/actions/branch/open_branch.py
Normal file
@@ -0,0 +1,64 @@
|
||||
from __future__ import annotations
|
||||
from typing import Optional
|
||||
from pkgmgr.core.git import run_git, GitError
|
||||
from .utils import _resolve_base_branch
|
||||
|
||||
|
||||
def open_branch(
|
||||
name: Optional[str],
|
||||
base_branch: str = "main",
|
||||
fallback_base: str = "master",
|
||||
cwd: str = ".",
|
||||
) -> None:
|
||||
"""
|
||||
Create and push a new feature branch on top of a base branch.
|
||||
"""
|
||||
|
||||
# Request name interactively if not provided
|
||||
if not name:
|
||||
name = input("Enter new branch name: ").strip()
|
||||
|
||||
if not name:
|
||||
raise RuntimeError("Branch name must not be empty.")
|
||||
|
||||
resolved_base = _resolve_base_branch(base_branch, fallback_base, cwd=cwd)
|
||||
|
||||
# 1) Fetch from origin
|
||||
try:
|
||||
run_git(["fetch", "origin"], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to fetch from origin before creating branch {name!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# 2) Checkout base branch
|
||||
try:
|
||||
run_git(["checkout", resolved_base], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to checkout base branch {resolved_base!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# 3) Pull latest changes
|
||||
try:
|
||||
run_git(["pull", "origin", resolved_base], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to pull latest changes for base branch {resolved_base!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# 4) Create new branch
|
||||
try:
|
||||
run_git(["checkout", "-b", name], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to create new branch {name!r} from base {resolved_base!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# 5) Push new branch
|
||||
try:
|
||||
run_git(["push", "-u", "origin", name], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to push new branch {name!r} to origin: {exc}"
|
||||
) from exc
|
||||
27
src/pkgmgr/actions/branch/utils.py
Normal file
27
src/pkgmgr/actions/branch/utils.py
Normal file
@@ -0,0 +1,27 @@
|
||||
from __future__ import annotations
|
||||
from pkgmgr.core.git import run_git, GitError
|
||||
|
||||
|
||||
def _resolve_base_branch(
|
||||
preferred: str,
|
||||
fallback: str,
|
||||
cwd: str,
|
||||
) -> str:
|
||||
"""
|
||||
Resolve the base branch to use.
|
||||
|
||||
Try `preferred` first (default: main),
|
||||
fall back to `fallback` (default: master).
|
||||
|
||||
Raise RuntimeError if neither exists.
|
||||
"""
|
||||
for candidate in (preferred, fallback):
|
||||
try:
|
||||
run_git(["rev-parse", "--verify", candidate], cwd=cwd)
|
||||
return candidate
|
||||
except GitError:
|
||||
continue
|
||||
|
||||
raise RuntimeError(
|
||||
f"Neither {preferred!r} nor {fallback!r} exist in this repository."
|
||||
)
|
||||
@@ -1,5 +1,6 @@
|
||||
import yaml
|
||||
import os
|
||||
from pkgmgr.core.config.save import save_user_config
|
||||
|
||||
def interactive_add(config,USER_CONFIG_PATH:str):
|
||||
"""Interactively prompt the user to add a new repository entry to the user config."""
|
||||
|
||||
@@ -45,7 +45,7 @@ def config_init(
|
||||
# Announce where we will write the result
|
||||
# ------------------------------------------------------------
|
||||
print("============================================================")
|
||||
print(f"[INIT] Writing user configuration to:")
|
||||
print("[INIT] Writing user configuration to:")
|
||||
print(f" {user_config_path}")
|
||||
print("============================================================")
|
||||
|
||||
@@ -53,7 +53,7 @@ def config_init(
|
||||
defaults_config["directories"]["repositories"]
|
||||
)
|
||||
|
||||
print(f"[INIT] Scanning repository base directory:")
|
||||
print("[INIT] Scanning repository base directory:")
|
||||
print(f" {repositories_base_dir}")
|
||||
print("")
|
||||
|
||||
@@ -173,7 +173,7 @@ def config_init(
|
||||
if new_entries:
|
||||
user_config.setdefault("repositories", []).extend(new_entries)
|
||||
save_user_config(user_config, user_config_path)
|
||||
print(f"[SAVE] Wrote user configuration to:")
|
||||
print("[SAVE] Wrote user configuration to:")
|
||||
print(f" {user_config_path}")
|
||||
else:
|
||||
print("[INFO] No new repositories were added.")
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# src/pkgmgr/actions/install/__init__.py
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
@@ -15,7 +16,7 @@ Responsibilities:
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import Any, Dict, List
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from pkgmgr.core.repository.identifier import get_repo_identifier
|
||||
from pkgmgr.core.repository.dir import get_repo_dir
|
||||
@@ -36,10 +37,8 @@ from pkgmgr.actions.install.installers.makefile import (
|
||||
)
|
||||
from pkgmgr.actions.install.pipeline import InstallationPipeline
|
||||
|
||||
|
||||
Repository = Dict[str, Any]
|
||||
|
||||
# All available installers, in the order they should be considered.
|
||||
INSTALLERS = [
|
||||
ArchPkgbuildInstaller(),
|
||||
DebianControlInstaller(),
|
||||
@@ -50,11 +49,6 @@ INSTALLERS = [
|
||||
]
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Internal helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _ensure_repo_dir(
|
||||
repo: Repository,
|
||||
repositories_base_dir: str,
|
||||
@@ -63,7 +57,7 @@ def _ensure_repo_dir(
|
||||
no_verification: bool,
|
||||
clone_mode: str,
|
||||
identifier: str,
|
||||
) -> str | None:
|
||||
) -> Optional[str]:
|
||||
"""
|
||||
Compute and, if necessary, clone the repository directory.
|
||||
|
||||
@@ -74,7 +68,7 @@ def _ensure_repo_dir(
|
||||
if not os.path.exists(repo_dir):
|
||||
print(
|
||||
f"Repository directory '{repo_dir}' does not exist. "
|
||||
f"Cloning it now..."
|
||||
"Cloning it now..."
|
||||
)
|
||||
clone_repos(
|
||||
[repo],
|
||||
@@ -87,7 +81,7 @@ def _ensure_repo_dir(
|
||||
if not os.path.exists(repo_dir):
|
||||
print(
|
||||
f"Cloning failed for repository {identifier}. "
|
||||
f"Skipping installation."
|
||||
"Skipping installation."
|
||||
)
|
||||
return None
|
||||
|
||||
@@ -137,6 +131,7 @@ def _create_context(
|
||||
quiet: bool,
|
||||
clone_mode: str,
|
||||
update_dependencies: bool,
|
||||
force_update: bool,
|
||||
) -> RepoContext:
|
||||
"""
|
||||
Build a RepoContext instance for the given repository.
|
||||
@@ -153,14 +148,10 @@ def _create_context(
|
||||
quiet=quiet,
|
||||
clone_mode=clone_mode,
|
||||
update_dependencies=update_dependencies,
|
||||
force_update=force_update,
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Public API
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def install_repos(
|
||||
selected_repos: List[Repository],
|
||||
repositories_base_dir: str,
|
||||
@@ -171,10 +162,14 @@ def install_repos(
|
||||
quiet: bool,
|
||||
clone_mode: str,
|
||||
update_dependencies: bool,
|
||||
force_update: bool = False,
|
||||
) -> None:
|
||||
"""
|
||||
Install one or more repositories according to the configured installers
|
||||
and the CLI layer precedence rules.
|
||||
|
||||
If force_update=True, installers of the currently active layer are allowed
|
||||
to run again (upgrade/refresh), even if that layer is already loaded.
|
||||
"""
|
||||
pipeline = InstallationPipeline(INSTALLERS)
|
||||
|
||||
@@ -213,6 +208,7 @@ def install_repos(
|
||||
quiet=quiet,
|
||||
clone_mode=clone_mode,
|
||||
update_dependencies=update_dependencies,
|
||||
force_update=force_update,
|
||||
)
|
||||
|
||||
pipeline.run(ctx)
|
||||
|
||||
@@ -35,7 +35,7 @@ from __future__ import annotations
|
||||
import glob
|
||||
import os
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Iterable, TYPE_CHECKING
|
||||
from typing import Iterable, TYPE_CHECKING, Optional
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pkgmgr.actions.install.context import RepoContext
|
||||
@@ -46,7 +46,7 @@ if TYPE_CHECKING:
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _read_text_if_exists(path: str) -> str | None:
|
||||
def _read_text_if_exists(path: str) -> Optional[str]:
|
||||
"""Read a file as UTF-8 text, returning None if it does not exist or fails."""
|
||||
if not os.path.exists(path):
|
||||
return None
|
||||
@@ -75,7 +75,7 @@ def _scan_files_for_patterns(files: Iterable[str], patterns: Iterable[str]) -> b
|
||||
return False
|
||||
|
||||
|
||||
def _first_spec_file(repo_dir: str) -> str | None:
|
||||
def _first_spec_file(repo_dir: str) -> Optional[str]:
|
||||
"""Return the first *.spec file in repo_dir, if any."""
|
||||
matches = glob.glob(os.path.join(repo_dir, "*.spec"))
|
||||
if not matches:
|
||||
@@ -360,7 +360,7 @@ def detect_capabilities(
|
||||
|
||||
def resolve_effective_capabilities(
|
||||
ctx: "RepoContext",
|
||||
layers: Iterable[str] | None = None,
|
||||
layers: Optional[Iterable[str]] = None,
|
||||
) -> dict[str, set[str]]:
|
||||
"""
|
||||
Resolve *effective* capabilities for each layer using a bottom-up strategy.
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# src/pkgmgr/actions/install/context.py
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
@@ -28,3 +29,6 @@ class RepoContext:
|
||||
quiet: bool
|
||||
clone_mode: str
|
||||
update_dependencies: bool
|
||||
|
||||
# If True, allow re-running installers of the currently active layer.
|
||||
force_update: bool = False
|
||||
|
||||
@@ -6,7 +6,7 @@ Base interface for all installer components in the pkgmgr installation pipeline.
|
||||
"""
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Set
|
||||
from typing import Set, Optional
|
||||
|
||||
from pkgmgr.actions.install.context import RepoContext
|
||||
from pkgmgr.actions.install.capabilities import CAPABILITY_MATCHERS
|
||||
@@ -24,7 +24,7 @@ class BaseInstaller(ABC):
|
||||
# Examples: "nix", "python", "makefile".
|
||||
# This is used by capability matchers to decide which patterns to
|
||||
# search for in the repository.
|
||||
layer: str | None = None
|
||||
layer: Optional[str] = None
|
||||
|
||||
def discover_capabilities(self, ctx: RepoContext) -> Set[str]:
|
||||
"""
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# src/pkgmgr/actions/install/installers/makefile.py
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
@@ -9,89 +10,45 @@ from pkgmgr.core.command.run import run_command
|
||||
|
||||
|
||||
class MakefileInstaller(BaseInstaller):
|
||||
"""
|
||||
Generic installer that runs `make install` if a Makefile with an
|
||||
install target is present.
|
||||
|
||||
Safety rules:
|
||||
- If PKGMGR_DISABLE_MAKEFILE_INSTALLER=1 is set, this installer
|
||||
is globally disabled.
|
||||
- The higher-level InstallationPipeline ensures that Makefile
|
||||
installation does not run if a stronger CLI layer already owns
|
||||
the command (e.g. Nix or OS packages).
|
||||
"""
|
||||
|
||||
layer = "makefile"
|
||||
MAKEFILE_NAME = "Makefile"
|
||||
|
||||
def supports(self, ctx: RepoContext) -> bool:
|
||||
"""
|
||||
Return True if this repository has a Makefile and the installer
|
||||
is not globally disabled.
|
||||
"""
|
||||
# Optional global kill switch.
|
||||
if os.environ.get("PKGMGR_DISABLE_MAKEFILE_INSTALLER") == "1":
|
||||
if not ctx.quiet:
|
||||
print(
|
||||
"[INFO] MakefileInstaller is disabled via "
|
||||
"PKGMGR_DISABLE_MAKEFILE_INSTALLER."
|
||||
)
|
||||
print("[INFO] PKGMGR_DISABLE_MAKEFILE_INSTALLER=1 – skipping MakefileInstaller.")
|
||||
return False
|
||||
|
||||
makefile_path = os.path.join(ctx.repo_dir, self.MAKEFILE_NAME)
|
||||
return os.path.exists(makefile_path)
|
||||
|
||||
def _has_install_target(self, makefile_path: str) -> bool:
|
||||
"""
|
||||
Heuristically check whether the Makefile defines an install target.
|
||||
|
||||
We look for:
|
||||
|
||||
- a plain 'install:' target, or
|
||||
- any 'install-*:' style target.
|
||||
"""
|
||||
try:
|
||||
with open(makefile_path, "r", encoding="utf-8", errors="ignore") as f:
|
||||
content = f.read()
|
||||
except OSError:
|
||||
return False
|
||||
|
||||
# Simple heuristics: look for "install:" or targets starting with "install-"
|
||||
if re.search(r"^install\s*:", content, flags=re.MULTILINE):
|
||||
return True
|
||||
|
||||
if re.search(r"^install-[a-zA-Z0-9_-]*\s*:", content, flags=re.MULTILINE):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def run(self, ctx: RepoContext) -> None:
|
||||
"""
|
||||
Execute `make install` in the repository directory if an install
|
||||
target exists.
|
||||
"""
|
||||
makefile_path = os.path.join(ctx.repo_dir, self.MAKEFILE_NAME)
|
||||
|
||||
if not os.path.exists(makefile_path):
|
||||
if not ctx.quiet:
|
||||
print(
|
||||
f"[pkgmgr] Makefile '{makefile_path}' not found, "
|
||||
"skipping MakefileInstaller."
|
||||
)
|
||||
return
|
||||
|
||||
if not self._has_install_target(makefile_path):
|
||||
if not ctx.quiet:
|
||||
print(
|
||||
f"[pkgmgr] No 'install' target found in {makefile_path}."
|
||||
)
|
||||
print(f"[pkgmgr] No 'install' target found in {makefile_path}.")
|
||||
return
|
||||
|
||||
if not ctx.quiet:
|
||||
print(
|
||||
f"[pkgmgr] Running 'make install' in {ctx.repo_dir} "
|
||||
f"(MakefileInstaller)"
|
||||
)
|
||||
print(f"[pkgmgr] Running make install for {ctx.identifier} (MakefileInstaller)")
|
||||
|
||||
cmd = "make install"
|
||||
run_command(cmd, cwd=ctx.repo_dir, preview=ctx.preview)
|
||||
run_command("make install", cwd=ctx.repo_dir, preview=ctx.preview)
|
||||
|
||||
if ctx.force_update and not ctx.quiet:
|
||||
print(f"[makefile] repo '{ctx.identifier}' successfully upgraded.")
|
||||
|
||||
@@ -1,32 +1,12 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
Installer for Nix flakes.
|
||||
|
||||
If a repository contains flake.nix and the 'nix' command is available, this
|
||||
installer will try to install profile outputs from the flake.
|
||||
|
||||
Behavior:
|
||||
- If flake.nix is present and `nix` exists on PATH:
|
||||
* First remove any existing `package-manager` profile entry (best-effort).
|
||||
* Then install one or more flake outputs via `nix profile install`.
|
||||
- For the package-manager repo:
|
||||
* `pkgmgr` is mandatory (CLI), `default` is optional.
|
||||
- For all other repos:
|
||||
* `default` is mandatory.
|
||||
|
||||
Special handling:
|
||||
- If PKGMGR_DISABLE_NIX_FLAKE_INSTALLER=1 is set, the installer is
|
||||
globally disabled (useful for CI or debugging).
|
||||
|
||||
The higher-level InstallationPipeline and CLI-layer model decide when this
|
||||
installer is allowed to run, based on where the current CLI comes from
|
||||
(e.g. Nix, OS packages, Python, Makefile).
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
from typing import TYPE_CHECKING, List, Tuple
|
||||
|
||||
from pkgmgr.actions.install.installers.base import BaseInstaller
|
||||
@@ -34,132 +14,225 @@ from pkgmgr.core.command.run import run_command
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pkgmgr.actions.install.context import RepoContext
|
||||
from pkgmgr.actions.install import InstallContext
|
||||
|
||||
|
||||
class NixFlakeInstaller(BaseInstaller):
|
||||
"""Install Nix flake profiles for repositories that define flake.nix."""
|
||||
|
||||
# Logical layer name, used by capability matchers.
|
||||
layer = "nix"
|
||||
|
||||
FLAKE_FILE = "flake.nix"
|
||||
PROFILE_NAME = "package-manager"
|
||||
|
||||
def supports(self, ctx: "RepoContext") -> bool:
|
||||
"""
|
||||
Only support repositories that:
|
||||
- Are NOT explicitly disabled via PKGMGR_DISABLE_NIX_FLAKE_INSTALLER=1,
|
||||
- Have a flake.nix,
|
||||
- And have the `nix` command available.
|
||||
"""
|
||||
# Optional global kill-switch for CI or debugging.
|
||||
if os.environ.get("PKGMGR_DISABLE_NIX_FLAKE_INSTALLER") == "1":
|
||||
print(
|
||||
"[INFO] PKGMGR_DISABLE_NIX_FLAKE_INSTALLER=1 – "
|
||||
"NixFlakeInstaller is disabled."
|
||||
)
|
||||
if not ctx.quiet:
|
||||
print("[INFO] PKGMGR_DISABLE_NIX_FLAKE_INSTALLER=1 – skipping NixFlakeInstaller.")
|
||||
return False
|
||||
|
||||
# Nix must be available.
|
||||
if shutil.which("nix") is None:
|
||||
return False
|
||||
|
||||
# flake.nix must exist in the repository.
|
||||
flake_path = os.path.join(ctx.repo_dir, self.FLAKE_FILE)
|
||||
return os.path.exists(flake_path)
|
||||
|
||||
def _ensure_old_profile_removed(self, ctx: "RepoContext") -> None:
|
||||
"""
|
||||
Best-effort removal of an existing profile entry.
|
||||
|
||||
This handles the "already provides the following file" conflict by
|
||||
removing previous `package-manager` installations before we install
|
||||
the new one.
|
||||
|
||||
Any error in `nix profile remove` is intentionally ignored, because
|
||||
a missing profile entry is not a fatal condition.
|
||||
"""
|
||||
if shutil.which("nix") is None:
|
||||
return
|
||||
|
||||
cmd = f"nix profile remove {self.PROFILE_NAME} || true"
|
||||
try:
|
||||
# NOTE: no allow_failure here → matches the existing unit tests
|
||||
run_command(cmd, cwd=ctx.repo_dir, preview=ctx.preview)
|
||||
except SystemExit:
|
||||
# Unit tests explicitly assert this is swallowed
|
||||
pass
|
||||
return os.path.exists(os.path.join(ctx.repo_dir, self.FLAKE_FILE))
|
||||
|
||||
def _profile_outputs(self, ctx: "RepoContext") -> List[Tuple[str, bool]]:
|
||||
"""
|
||||
Decide which flake outputs to install and whether failures are fatal.
|
||||
|
||||
Returns a list of (output_name, allow_failure) tuples.
|
||||
|
||||
Rules:
|
||||
- For the package-manager repo (identifier 'pkgmgr' or 'package-manager'):
|
||||
[("pkgmgr", False), ("default", True)]
|
||||
- For all other repos:
|
||||
[("default", False)]
|
||||
"""
|
||||
ident = ctx.identifier
|
||||
|
||||
if ident in {"pkgmgr", "package-manager"}:
|
||||
# pkgmgr: main CLI output is "pkgmgr" (mandatory),
|
||||
# "default" is nice-to-have (non-fatal).
|
||||
# (output_name, allow_failure)
|
||||
if ctx.identifier in {"pkgmgr", "package-manager"}:
|
||||
return [("pkgmgr", False), ("default", True)]
|
||||
|
||||
# Generic repos: we expect a sensible "default" package/app.
|
||||
# Failure to install it is considered fatal.
|
||||
return [("default", False)]
|
||||
|
||||
def run(self, ctx: "InstallContext") -> None:
|
||||
"""
|
||||
Install Nix flake profile outputs.
|
||||
def _installable(self, ctx: "RepoContext", output: str) -> str:
|
||||
return f"{ctx.repo_dir}#{output}"
|
||||
|
||||
For the package-manager repo, failure installing 'pkgmgr' is fatal,
|
||||
failure installing 'default' is non-fatal.
|
||||
For other repos, failure installing 'default' is fatal.
|
||||
"""
|
||||
# Reuse supports() to keep logic in one place.
|
||||
if not self.supports(ctx): # type: ignore[arg-type]
|
||||
return
|
||||
|
||||
outputs = self._profile_outputs(ctx) # list of (name, allow_failure)
|
||||
|
||||
print(
|
||||
"Nix flake detected in "
|
||||
f"{ctx.identifier}, attempting to install profile outputs: "
|
||||
+ ", ".join(name for name, _ in outputs)
|
||||
def _run(self, ctx: "RepoContext", cmd: str, allow_failure: bool = True):
|
||||
return run_command(
|
||||
cmd,
|
||||
cwd=ctx.repo_dir,
|
||||
preview=ctx.preview,
|
||||
allow_failure=allow_failure,
|
||||
)
|
||||
|
||||
# Handle the "already installed" case up-front for the shared profile.
|
||||
self._ensure_old_profile_removed(ctx) # type: ignore[arg-type]
|
||||
def _profile_list_json(self, ctx: "RepoContext") -> dict:
|
||||
"""
|
||||
Read current Nix profile entries as JSON (best-effort).
|
||||
|
||||
for output, allow_failure in outputs:
|
||||
cmd = f"nix profile install {ctx.repo_dir}#{output}"
|
||||
print(f"[INFO] Running: {cmd}")
|
||||
ret = os.system(cmd)
|
||||
NOTE: Nix versions differ:
|
||||
- Newer: {"elements": [ { "index": 0, "attrPath": "...", ... }, ... ]}
|
||||
- Older: {"elements": [ "nixpkgs#hello", ... ]} (strings)
|
||||
|
||||
# Extract real exit code from os.system() result
|
||||
if os.WIFEXITED(ret):
|
||||
exit_code = os.WEXITSTATUS(ret)
|
||||
else:
|
||||
# abnormal termination (signal etc.) – keep raw value
|
||||
exit_code = ret
|
||||
We return {} on failure or in preview mode.
|
||||
"""
|
||||
if ctx.preview:
|
||||
return {}
|
||||
|
||||
if exit_code == 0:
|
||||
print(f"Nix flake output '{output}' successfully installed.")
|
||||
proc = subprocess.run(
|
||||
["nix", "profile", "list", "--json"],
|
||||
check=False,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
text=True,
|
||||
env=os.environ.copy(),
|
||||
)
|
||||
if proc.returncode != 0:
|
||||
return {}
|
||||
|
||||
try:
|
||||
return json.loads(proc.stdout or "{}")
|
||||
except json.JSONDecodeError:
|
||||
return {}
|
||||
|
||||
def _find_installed_indices_for_output(self, ctx: "RepoContext", output: str) -> List[int]:
|
||||
"""
|
||||
Find installed profile indices for a given output.
|
||||
|
||||
Works across Nix JSON variants:
|
||||
- If elements are dicts: we can extract indices.
|
||||
- If elements are strings: we cannot extract indices -> return [].
|
||||
"""
|
||||
data = self._profile_list_json(ctx)
|
||||
elements = data.get("elements", []) or []
|
||||
|
||||
matches: List[int] = []
|
||||
|
||||
for el in elements:
|
||||
# Legacy JSON format: plain strings -> no index information
|
||||
if not isinstance(el, dict):
|
||||
continue
|
||||
|
||||
print(f"[Error] Failed to install Nix flake output '{output}'")
|
||||
print(f"[Error] Command exited with code {exit_code}")
|
||||
idx = el.get("index")
|
||||
if idx is None:
|
||||
continue
|
||||
|
||||
if not allow_failure:
|
||||
raise SystemExit(exit_code)
|
||||
attr_path = el.get("attrPath") or el.get("attr_path") or ""
|
||||
pname = el.get("pname") or ""
|
||||
name = el.get("name") or ""
|
||||
|
||||
if attr_path == output:
|
||||
matches.append(int(idx))
|
||||
continue
|
||||
|
||||
if pname == output or name == output:
|
||||
matches.append(int(idx))
|
||||
continue
|
||||
|
||||
if isinstance(attr_path, str) and attr_path.endswith(f".{output}"):
|
||||
matches.append(int(idx))
|
||||
continue
|
||||
|
||||
return matches
|
||||
|
||||
def _upgrade_index(self, ctx: "RepoContext", index: int) -> bool:
|
||||
cmd = f"nix profile upgrade --refresh {index}"
|
||||
if not ctx.quiet:
|
||||
print(f"[nix] upgrade: {cmd}")
|
||||
res = self._run(ctx, cmd, allow_failure=True)
|
||||
return res.returncode == 0
|
||||
|
||||
def _remove_index(self, ctx: "RepoContext", index: int) -> None:
|
||||
cmd = f"nix profile remove {index}"
|
||||
if not ctx.quiet:
|
||||
print(f"[nix] remove: {cmd}")
|
||||
self._run(ctx, cmd, allow_failure=True)
|
||||
|
||||
def _install_only(self, ctx: "RepoContext", output: str, allow_failure: bool) -> None:
|
||||
"""
|
||||
Install output; on failure, try index-based upgrade/remove+install if possible.
|
||||
"""
|
||||
installable = self._installable(ctx, output)
|
||||
install_cmd = f"nix profile install {installable}"
|
||||
|
||||
if not ctx.quiet:
|
||||
print(f"[nix] install: {install_cmd}")
|
||||
|
||||
res = self._run(ctx, install_cmd, allow_failure=True)
|
||||
if res.returncode == 0:
|
||||
if not ctx.quiet:
|
||||
print(f"[nix] output '{output}' successfully installed.")
|
||||
return
|
||||
|
||||
if not ctx.quiet:
|
||||
print(
|
||||
"[Warning] Continuing despite failure to install "
|
||||
f"optional output '{output}'."
|
||||
f"[nix] install failed for '{output}' (exit {res.returncode}), "
|
||||
"trying index-based upgrade/remove+install..."
|
||||
)
|
||||
|
||||
indices = self._find_installed_indices_for_output(ctx, output)
|
||||
|
||||
# 1) Try upgrading existing indices (only possible on newer JSON format)
|
||||
upgraded = False
|
||||
for idx in indices:
|
||||
if self._upgrade_index(ctx, idx):
|
||||
upgraded = True
|
||||
if not ctx.quiet:
|
||||
print(f"[nix] output '{output}' successfully upgraded (index {idx}).")
|
||||
|
||||
if upgraded:
|
||||
return
|
||||
|
||||
# 2) Remove matching indices and retry install
|
||||
if indices and not ctx.quiet:
|
||||
print(f"[nix] upgrade failed; removing indices {indices} and reinstalling '{output}'.")
|
||||
|
||||
for idx in indices:
|
||||
self._remove_index(ctx, idx)
|
||||
|
||||
final = self._run(ctx, install_cmd, allow_failure=True)
|
||||
if final.returncode == 0:
|
||||
if not ctx.quiet:
|
||||
print(f"[nix] output '{output}' successfully re-installed.")
|
||||
return
|
||||
|
||||
msg = f"[ERROR] Failed to install Nix flake output '{output}' (exit {final.returncode})"
|
||||
print(msg)
|
||||
|
||||
if not allow_failure:
|
||||
raise SystemExit(final.returncode)
|
||||
|
||||
print(f"[WARNING] Continuing despite failure of optional output '{output}'.")
|
||||
|
||||
def _force_upgrade_output(self, ctx: "RepoContext", output: str, allow_failure: bool) -> None:
|
||||
"""
|
||||
force_update path:
|
||||
- Prefer upgrading existing entries via indices (if we can discover them).
|
||||
- If no indices (legacy JSON) or upgrade fails, fall back to install-only logic.
|
||||
"""
|
||||
indices = self._find_installed_indices_for_output(ctx, output)
|
||||
|
||||
upgraded_any = False
|
||||
for idx in indices:
|
||||
if self._upgrade_index(ctx, idx):
|
||||
upgraded_any = True
|
||||
if not ctx.quiet:
|
||||
print(f"[nix] output '{output}' successfully upgraded (index {idx}).")
|
||||
|
||||
if upgraded_any:
|
||||
# Make upgrades visible to tests
|
||||
print(f"[nix] output '{output}' successfully upgraded.")
|
||||
return
|
||||
|
||||
if indices and not ctx.quiet:
|
||||
print(f"[nix] upgrade failed; removing indices {indices} and reinstalling '{output}'.")
|
||||
|
||||
for idx in indices:
|
||||
self._remove_index(ctx, idx)
|
||||
|
||||
# Ensure installed (includes its own fallback logic)
|
||||
self._install_only(ctx, output, allow_failure)
|
||||
|
||||
# Make upgrades visible to tests (semantic: update requested)
|
||||
print(f"[nix] output '{output}' successfully upgraded.")
|
||||
|
||||
def run(self, ctx: "RepoContext") -> None:
|
||||
if not self.supports(ctx):
|
||||
return
|
||||
|
||||
outputs = self._profile_outputs(ctx)
|
||||
|
||||
if not ctx.quiet:
|
||||
print(
|
||||
"[nix] flake detected in "
|
||||
f"{ctx.identifier}, ensuring outputs: "
|
||||
+ ", ".join(name for name, _ in outputs)
|
||||
)
|
||||
|
||||
for output, allow_failure in outputs:
|
||||
if ctx.force_update:
|
||||
self._force_upgrade_output(ctx, output, allow_failure)
|
||||
else:
|
||||
self._install_only(ctx, output, allow_failure)
|
||||
|
||||
@@ -17,7 +17,7 @@ apt/dpkg tooling are available.
|
||||
import glob
|
||||
import os
|
||||
import shutil
|
||||
from typing import List
|
||||
from typing import List, Optional
|
||||
|
||||
from pkgmgr.actions.install.context import RepoContext
|
||||
from pkgmgr.actions.install.installers.base import BaseInstaller
|
||||
@@ -67,7 +67,7 @@ class DebianControlInstaller(BaseInstaller):
|
||||
pattern = os.path.join(parent, "*.deb")
|
||||
return sorted(glob.glob(pattern))
|
||||
|
||||
def _privileged_prefix(self) -> str | None:
|
||||
def _privileged_prefix(self) -> Optional[str]:
|
||||
"""
|
||||
Determine how to run privileged commands:
|
||||
|
||||
|
||||
@@ -1,104 +1,40 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
PythonInstaller — install Python projects defined via pyproject.toml.
|
||||
|
||||
Installation rules:
|
||||
|
||||
1. pip command resolution:
|
||||
a) If PKGMGR_PIP is set → use it exactly as provided.
|
||||
b) Else if running inside a virtualenv → use `sys.executable -m pip`.
|
||||
c) Else → create/use a per-repository virtualenv under ~/.venvs/<repo>/.
|
||||
|
||||
2. Installation target:
|
||||
- Always install into the resolved pip environment.
|
||||
- Never modify system Python, never rely on --user.
|
||||
- Nix-immutable systems (PEP 668) are automatically avoided because we
|
||||
never touch system Python.
|
||||
|
||||
3. The installer is skipped when:
|
||||
- PKGMGR_DISABLE_PYTHON_INSTALLER=1 is set.
|
||||
- The repository has no pyproject.toml.
|
||||
|
||||
All pip failures are treated as fatal.
|
||||
"""
|
||||
|
||||
# src/pkgmgr/actions/install/installers/python.py
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import sys
|
||||
import subprocess
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from pkgmgr.actions.install.installers.base import BaseInstaller
|
||||
from pkgmgr.actions.install.context import RepoContext
|
||||
from pkgmgr.core.command.run import run_command
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pkgmgr.actions.install.context import RepoContext
|
||||
from pkgmgr.actions.install import InstallContext
|
||||
|
||||
|
||||
class PythonInstaller(BaseInstaller):
|
||||
"""Install Python projects and dependencies via pip using isolated environments."""
|
||||
|
||||
layer = "python"
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# Installer activation logic
|
||||
# ----------------------------------------------------------------------
|
||||
def supports(self, ctx: "RepoContext") -> bool:
|
||||
"""
|
||||
Return True if this installer should handle this repository.
|
||||
|
||||
The installer is active only when:
|
||||
- A pyproject.toml exists in the repo, and
|
||||
- PKGMGR_DISABLE_PYTHON_INSTALLER is not set.
|
||||
"""
|
||||
def supports(self, ctx: RepoContext) -> bool:
|
||||
if os.environ.get("PKGMGR_DISABLE_PYTHON_INSTALLER") == "1":
|
||||
print("[INFO] PythonInstaller disabled via PKGMGR_DISABLE_PYTHON_INSTALLER.")
|
||||
return False
|
||||
|
||||
return os.path.exists(os.path.join(ctx.repo_dir, "pyproject.toml"))
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# Virtualenv handling
|
||||
# ----------------------------------------------------------------------
|
||||
def _in_virtualenv(self) -> bool:
|
||||
"""Detect whether the current interpreter is inside a venv."""
|
||||
if os.environ.get("VIRTUAL_ENV"):
|
||||
return True
|
||||
|
||||
base = getattr(sys, "base_prefix", sys.prefix)
|
||||
return sys.prefix != base
|
||||
|
||||
def _ensure_repo_venv(self, ctx: "InstallContext") -> str:
|
||||
"""
|
||||
Ensure that ~/.venvs/<identifier>/ exists and contains a minimal venv.
|
||||
|
||||
Returns the venv directory path.
|
||||
"""
|
||||
def _ensure_repo_venv(self, ctx: RepoContext) -> str:
|
||||
venv_dir = os.path.expanduser(f"~/.venvs/{ctx.identifier}")
|
||||
python = sys.executable
|
||||
|
||||
if not os.path.isdir(venv_dir):
|
||||
print(f"[python-installer] Creating virtualenv: {venv_dir}")
|
||||
subprocess.check_call([python, "-m", "venv", venv_dir])
|
||||
if not os.path.exists(venv_dir):
|
||||
run_command(f"{python} -m venv {venv_dir}", preview=ctx.preview)
|
||||
|
||||
return venv_dir
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# pip command resolution
|
||||
# ----------------------------------------------------------------------
|
||||
def _pip_cmd(self, ctx: "InstallContext") -> str:
|
||||
"""
|
||||
Determine which pip command to use.
|
||||
|
||||
Priority:
|
||||
1. PKGMGR_PIP override given by user or automation.
|
||||
2. Active virtualenv → use sys.executable -m pip.
|
||||
3. Per-repository venv → ~/.venvs/<repo>/bin/pip
|
||||
"""
|
||||
def _pip_cmd(self, ctx: RepoContext) -> str:
|
||||
explicit = os.environ.get("PKGMGR_PIP", "").strip()
|
||||
if explicit:
|
||||
return explicit
|
||||
@@ -107,33 +43,19 @@ class PythonInstaller(BaseInstaller):
|
||||
return f"{sys.executable} -m pip"
|
||||
|
||||
venv_dir = self._ensure_repo_venv(ctx)
|
||||
pip_path = os.path.join(venv_dir, "bin", "pip")
|
||||
return pip_path
|
||||
return os.path.join(venv_dir, "bin", "pip")
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# Execution
|
||||
# ----------------------------------------------------------------------
|
||||
def run(self, ctx: "InstallContext") -> None:
|
||||
"""
|
||||
Install the project defined by pyproject.toml.
|
||||
|
||||
Uses the resolved pip environment. Installation is isolated and never
|
||||
touches system Python.
|
||||
"""
|
||||
if not self.supports(ctx): # type: ignore[arg-type]
|
||||
return
|
||||
|
||||
pyproject = os.path.join(ctx.repo_dir, "pyproject.toml")
|
||||
if not os.path.exists(pyproject):
|
||||
def run(self, ctx: RepoContext) -> None:
|
||||
if not self.supports(ctx):
|
||||
return
|
||||
|
||||
print(f"[python-installer] Installing Python project for {ctx.identifier}...")
|
||||
|
||||
pip_cmd = self._pip_cmd(ctx)
|
||||
run_command(f"{pip_cmd} install .", cwd=ctx.repo_dir, preview=ctx.preview)
|
||||
|
||||
# Final install command: ALWAYS isolated, never system-wide.
|
||||
install_cmd = f"{pip_cmd} install ."
|
||||
|
||||
run_command(install_cmd, cwd=ctx.repo_dir, preview=ctx.preview)
|
||||
if ctx.force_update:
|
||||
# test-visible marker
|
||||
print(f"[python-installer] repo '{ctx.identifier}' successfully upgraded.")
|
||||
|
||||
print(f"[python-installer] Installation finished for {ctx.identifier}.")
|
||||
|
||||
@@ -1,21 +1,9 @@
|
||||
# src/pkgmgr/actions/install/pipeline.py
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
Installation pipeline orchestration for repositories.
|
||||
|
||||
This module implements the "Setup Controller" logic:
|
||||
|
||||
1. Detect current CLI command for the repo (if any).
|
||||
2. Classify it into a layer (os-packages, nix, python, makefile).
|
||||
3. Iterate over installers in layer order:
|
||||
- Skip installers whose layer is weaker than an already-loaded one.
|
||||
- Run only installers that support() the repo and add new capabilities.
|
||||
- After each installer, re-resolve the command and update the layer.
|
||||
4. Maintain the repo["command"] field and create/update symlinks via create_ink().
|
||||
|
||||
The goal is to prevent conflicting installations and make the layering
|
||||
behaviour explicit and testable.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
@@ -36,34 +24,15 @@ from pkgmgr.core.command.resolve import resolve_command_for_repo
|
||||
|
||||
@dataclass
|
||||
class CommandState:
|
||||
"""
|
||||
Represents the current CLI state for a repository:
|
||||
|
||||
- command: absolute or relative path to the CLI entry point
|
||||
- layer: which conceptual layer this command belongs to
|
||||
"""
|
||||
|
||||
command: Optional[str]
|
||||
layer: Optional[CliLayer]
|
||||
|
||||
|
||||
class CommandResolver:
|
||||
"""
|
||||
Small helper responsible for resolving the current command for a repo
|
||||
and mapping it into a CommandState.
|
||||
"""
|
||||
|
||||
def __init__(self, ctx: RepoContext) -> None:
|
||||
self._ctx = ctx
|
||||
|
||||
def resolve(self) -> CommandState:
|
||||
"""
|
||||
Resolve the current command for this repository.
|
||||
|
||||
If resolve_command_for_repo raises SystemExit (e.g. Python package
|
||||
without installed entry point), we treat this as "no command yet"
|
||||
from the point of view of the installers.
|
||||
"""
|
||||
repo = self._ctx.repo
|
||||
identifier = self._ctx.identifier
|
||||
repo_dir = self._ctx.repo_dir
|
||||
@@ -85,28 +54,10 @@ class CommandResolver:
|
||||
|
||||
|
||||
class InstallationPipeline:
|
||||
"""
|
||||
High-level orchestrator that applies a sequence of installers
|
||||
to a repository based on CLI layer precedence.
|
||||
"""
|
||||
|
||||
def __init__(self, installers: Sequence[BaseInstaller]) -> None:
|
||||
self._installers = list(installers)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Public API
|
||||
# ------------------------------------------------------------------
|
||||
def run(self, ctx: RepoContext) -> None:
|
||||
"""
|
||||
Execute the installation pipeline for a single repository.
|
||||
|
||||
- Detect initial command & layer.
|
||||
- Optionally create a symlink.
|
||||
- Run installers in order, skipping those whose layer is weaker
|
||||
than an already-loaded CLI.
|
||||
- After each installer, re-resolve the command and refresh the
|
||||
symlink if needed.
|
||||
"""
|
||||
repo = ctx.repo
|
||||
repo_dir = ctx.repo_dir
|
||||
identifier = ctx.identifier
|
||||
@@ -119,7 +70,6 @@ class InstallationPipeline:
|
||||
resolver = CommandResolver(ctx)
|
||||
state = resolver.resolve()
|
||||
|
||||
# Persist initial command (if any) and create a symlink.
|
||||
if state.command:
|
||||
repo["command"] = state.command
|
||||
create_ink(
|
||||
@@ -135,11 +85,9 @@ class InstallationPipeline:
|
||||
|
||||
provided_capabilities: Set[str] = set()
|
||||
|
||||
# Main installer loop
|
||||
for installer in self._installers:
|
||||
layer_name = getattr(installer, "layer", None)
|
||||
|
||||
# Installers without a layer participate without precedence logic.
|
||||
if layer_name is None:
|
||||
self._run_installer(installer, ctx, identifier, repo_dir, quiet)
|
||||
continue
|
||||
@@ -147,42 +95,33 @@ class InstallationPipeline:
|
||||
try:
|
||||
installer_layer = CliLayer(layer_name)
|
||||
except ValueError:
|
||||
# Unknown layer string → treat as lowest priority.
|
||||
installer_layer = None
|
||||
|
||||
# "Previous/Current layer already loaded?"
|
||||
if state.layer is not None and installer_layer is not None:
|
||||
current_prio = layer_priority(state.layer)
|
||||
installer_prio = layer_priority(installer_layer)
|
||||
|
||||
if current_prio < installer_prio:
|
||||
# Current CLI comes from a higher-priority layer,
|
||||
# so we skip this installer entirely.
|
||||
if not quiet:
|
||||
print(
|
||||
f"[pkgmgr] Skipping installer "
|
||||
"[pkgmgr] Skipping installer "
|
||||
f"{installer.__class__.__name__} for {identifier} – "
|
||||
f"CLI already provided by layer {state.layer.value!r}."
|
||||
)
|
||||
continue
|
||||
|
||||
if current_prio == installer_prio:
|
||||
# Same layer already provides a CLI; usually there is no
|
||||
# need to run another installer on top of it.
|
||||
if current_prio == installer_prio and not ctx.force_update:
|
||||
if not quiet:
|
||||
print(
|
||||
f"[pkgmgr] Skipping installer "
|
||||
"[pkgmgr] Skipping installer "
|
||||
f"{installer.__class__.__name__} for {identifier} – "
|
||||
f"layer {installer_layer.value!r} is already loaded."
|
||||
)
|
||||
continue
|
||||
|
||||
# Check if this installer is applicable at all.
|
||||
if not installer.supports(ctx):
|
||||
continue
|
||||
|
||||
# Capabilities: if everything this installer would provide is already
|
||||
# covered, we can safely skip it.
|
||||
caps = installer.discover_capabilities(ctx)
|
||||
if caps and caps.issubset(provided_capabilities):
|
||||
if not quiet:
|
||||
@@ -193,18 +132,22 @@ class InstallationPipeline:
|
||||
continue
|
||||
|
||||
if not quiet:
|
||||
print(
|
||||
f"[pkgmgr] Running installer {installer.__class__.__name__} "
|
||||
f"for {identifier} in '{repo_dir}' "
|
||||
f"(new capabilities: {caps or set()})..."
|
||||
)
|
||||
if ctx.force_update and state.layer is not None and installer_layer == state.layer:
|
||||
print(
|
||||
f"[pkgmgr] Running installer {installer.__class__.__name__} "
|
||||
f"for {identifier} in '{repo_dir}' (upgrade requested)..."
|
||||
)
|
||||
else:
|
||||
print(
|
||||
f"[pkgmgr] Running installer {installer.__class__.__name__} "
|
||||
f"for {identifier} in '{repo_dir}' "
|
||||
f"(new capabilities: {caps or set()})..."
|
||||
)
|
||||
|
||||
# Run the installer with error reporting.
|
||||
self._run_installer(installer, ctx, identifier, repo_dir, quiet)
|
||||
|
||||
provided_capabilities.update(caps)
|
||||
|
||||
# After running an installer, re-resolve the command and layer.
|
||||
new_state = resolver.resolve()
|
||||
if new_state.command:
|
||||
repo["command"] = new_state.command
|
||||
@@ -221,9 +164,6 @@ class InstallationPipeline:
|
||||
|
||||
state = new_state
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Internal helpers
|
||||
# ------------------------------------------------------------------
|
||||
@staticmethod
|
||||
def _run_installer(
|
||||
installer: BaseInstaller,
|
||||
@@ -232,9 +172,6 @@ class InstallationPipeline:
|
||||
repo_dir: str,
|
||||
quiet: bool,
|
||||
) -> None:
|
||||
"""
|
||||
Execute a single installer with unified error handling.
|
||||
"""
|
||||
try:
|
||||
installer.run(ctx)
|
||||
except SystemExit as exc:
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
from __future__ import annotations
|
||||
|
||||
"""
|
||||
High-level mirror actions.
|
||||
|
||||
@@ -10,6 +8,7 @@ Public API:
|
||||
- setup_mirrors
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
from .types import Repository, MirrorMap
|
||||
from .list_cmd import list_mirrors
|
||||
from .diff_cmd import diff_mirrors
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import List, Optional, Set
|
||||
|
||||
from pkgmgr.core.command.run import run_command
|
||||
from pkgmgr.core.git import GitError, run_git
|
||||
from typing import List, Optional, Set
|
||||
|
||||
from .types import MirrorMap, RepoMirrorContext, Repository
|
||||
|
||||
@@ -150,7 +150,7 @@ def ensure_origin_remote(
|
||||
current = current_origin_url(repo_dir)
|
||||
if current == url or not url:
|
||||
print(
|
||||
f"[INFO] 'origin' already points to "
|
||||
"[INFO] 'origin' already points to "
|
||||
f"{current or '<unknown>'} (no change needed)."
|
||||
)
|
||||
else:
|
||||
|
||||
@@ -2,7 +2,7 @@ from __future__ import annotations
|
||||
|
||||
import os
|
||||
from urllib.parse import urlparse
|
||||
from typing import List, Mapping
|
||||
from typing import Mapping
|
||||
|
||||
from .types import MirrorMap, Repository
|
||||
|
||||
|
||||
@@ -1,14 +1,121 @@
|
||||
# src/pkgmgr/actions/mirror/setup_cmd.py
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import List, Tuple
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from pkgmgr.core.git import run_git, GitError
|
||||
from pkgmgr.core.git import GitError, run_git
|
||||
from pkgmgr.core.remote_provisioning import ProviderHint, RepoSpec, ensure_remote_repo
|
||||
from pkgmgr.core.remote_provisioning.ensure import EnsureOptions
|
||||
|
||||
from .context import build_context
|
||||
from .git_remote import determine_primary_remote_url, ensure_origin_remote
|
||||
from .types import Repository
|
||||
|
||||
|
||||
def _probe_mirror(url: str, repo_dir: str) -> Tuple[bool, str]:
|
||||
"""
|
||||
Probe a remote mirror URL using `git ls-remote`.
|
||||
|
||||
Returns:
|
||||
(True, "") on success,
|
||||
(False, error_message) on failure.
|
||||
"""
|
||||
try:
|
||||
run_git(["ls-remote", url], cwd=repo_dir)
|
||||
return True, ""
|
||||
except GitError as exc:
|
||||
return False, str(exc)
|
||||
|
||||
|
||||
def _host_from_git_url(url: str) -> str:
|
||||
url = (url or "").strip()
|
||||
if not url:
|
||||
return ""
|
||||
|
||||
if "://" in url:
|
||||
parsed = urlparse(url)
|
||||
netloc = (parsed.netloc or "").strip()
|
||||
if "@" in netloc:
|
||||
netloc = netloc.split("@", 1)[1]
|
||||
# keep optional :port
|
||||
return netloc
|
||||
|
||||
# scp-like: git@host:owner/repo.git
|
||||
if "@" in url and ":" in url:
|
||||
after_at = url.split("@", 1)[1]
|
||||
host = after_at.split(":", 1)[0]
|
||||
return host.strip()
|
||||
|
||||
return url.split("/", 1)[0].strip()
|
||||
|
||||
def _ensure_remote_repository(
|
||||
repo: Repository,
|
||||
repositories_base_dir: str,
|
||||
all_repos: List[Repository],
|
||||
preview: bool,
|
||||
) -> None:
|
||||
"""
|
||||
Ensure that the remote repository exists using provider APIs.
|
||||
|
||||
This is ONLY called when ensure_remote=True.
|
||||
"""
|
||||
ctx = build_context(repo, repositories_base_dir, all_repos)
|
||||
resolved_mirrors = ctx.resolved_mirrors
|
||||
|
||||
primary_url = determine_primary_remote_url(repo, resolved_mirrors)
|
||||
if not primary_url:
|
||||
print("[INFO] No remote URL could be derived; skipping remote provisioning.")
|
||||
return
|
||||
|
||||
# IMPORTANT:
|
||||
# - repo["provider"] is typically a provider *kind* (e.g. "github" / "gitea"),
|
||||
# NOT a hostname. We derive the actual host from the remote URL.
|
||||
host = _host_from_git_url(primary_url)
|
||||
owner = repo.get("account")
|
||||
name = repo.get("repository")
|
||||
|
||||
if not host or not owner or not name:
|
||||
print("[WARN] Missing host/account/repository; cannot ensure remote repo.")
|
||||
print(f" host={host!r}, account={owner!r}, repository={name!r}")
|
||||
return
|
||||
|
||||
print("------------------------------------------------------------")
|
||||
print(f"[REMOTE ENSURE] {ctx.identifier}")
|
||||
print(f"[REMOTE ENSURE] host: {host}")
|
||||
print("------------------------------------------------------------")
|
||||
|
||||
spec = RepoSpec(
|
||||
host=str(host),
|
||||
owner=str(owner),
|
||||
name=str(name),
|
||||
private=bool(repo.get("private", True)),
|
||||
description=str(repo.get("description", "")),
|
||||
)
|
||||
|
||||
provider_kind = str(repo.get("provider", "")).strip().lower() or None
|
||||
|
||||
try:
|
||||
result = ensure_remote_repo(
|
||||
spec,
|
||||
provider_hint=ProviderHint(kind=provider_kind),
|
||||
options=EnsureOptions(
|
||||
preview=preview,
|
||||
interactive=True,
|
||||
allow_prompt=True,
|
||||
save_prompt_token_to_keyring=True,
|
||||
),
|
||||
)
|
||||
print(f"[REMOTE ENSURE] {result.status.upper()}: {result.message}")
|
||||
if result.url:
|
||||
print(f"[REMOTE ENSURE] URL: {result.url}")
|
||||
except Exception as exc: # noqa: BLE001
|
||||
# Keep action layer resilient
|
||||
print(f"[ERROR] Remote provisioning failed: {exc}")
|
||||
|
||||
print()
|
||||
|
||||
|
||||
def _setup_local_mirrors_for_repo(
|
||||
repo: Repository,
|
||||
repositories_base_dir: str,
|
||||
@@ -16,7 +123,8 @@ def _setup_local_mirrors_for_repo(
|
||||
preview: bool,
|
||||
) -> None:
|
||||
"""
|
||||
Ensure local Git state is sane (currently: 'origin' remote).
|
||||
Local setup:
|
||||
- Ensure 'origin' remote exists and is sane
|
||||
"""
|
||||
ctx = build_context(repo, repositories_base_dir, all_repos)
|
||||
|
||||
@@ -29,103 +137,68 @@ def _setup_local_mirrors_for_repo(
|
||||
print()
|
||||
|
||||
|
||||
def _probe_mirror(url: str, repo_dir: str) -> Tuple[bool, str]:
|
||||
"""
|
||||
Probe a remote mirror by running `git ls-remote <url>`.
|
||||
|
||||
Returns:
|
||||
(True, "") on success,
|
||||
(False, error_message) on failure.
|
||||
|
||||
Wichtig:
|
||||
- Wir werten ausschließlich den Exit-Code aus.
|
||||
- STDERR kann Hinweise/Warnings enthalten und ist NICHT automatisch ein Fehler.
|
||||
"""
|
||||
try:
|
||||
# Wir ignorieren stdout komplett; wichtig ist nur, dass der Befehl ohne
|
||||
# GitError (also Exit-Code 0) durchläuft.
|
||||
run_git(["ls-remote", url], cwd=repo_dir)
|
||||
return True, ""
|
||||
except GitError as exc:
|
||||
return False, str(exc)
|
||||
|
||||
|
||||
def _setup_remote_mirrors_for_repo(
|
||||
repo: Repository,
|
||||
repositories_base_dir: str,
|
||||
all_repos: List[Repository],
|
||||
preview: bool,
|
||||
ensure_remote: bool,
|
||||
) -> None:
|
||||
"""
|
||||
Remote-side setup / validation.
|
||||
|
||||
Aktuell werden nur **nicht-destruktive Checks** gemacht:
|
||||
Default behavior:
|
||||
- Non-destructive checks using `git ls-remote`.
|
||||
|
||||
- Für jeden Mirror (aus config + MIRRORS-Datei, file gewinnt):
|
||||
* `git ls-remote <url>` wird ausgeführt.
|
||||
* Bei Exit-Code 0 → [OK]
|
||||
* Bei Fehler → [WARN] + Details aus der GitError-Exception
|
||||
|
||||
Es werden **keine** Provider-APIs aufgerufen und keine Repos angelegt.
|
||||
Optional behavior:
|
||||
- If ensure_remote=True:
|
||||
* Attempt to create missing repositories via provider API
|
||||
* Uses TokenResolver (ENV -> keyring -> prompt)
|
||||
"""
|
||||
ctx = build_context(repo, repositories_base_dir, all_repos)
|
||||
resolved_m = ctx.resolved_mirrors
|
||||
resolved_mirrors = ctx.resolved_mirrors
|
||||
|
||||
print("------------------------------------------------------------")
|
||||
print(f"[MIRROR SETUP:REMOTE] {ctx.identifier}")
|
||||
print(f"[MIRROR SETUP:REMOTE] dir: {ctx.repo_dir}")
|
||||
print("------------------------------------------------------------")
|
||||
|
||||
if not resolved_m:
|
||||
# Optional: Fallback auf eine heuristisch bestimmte URL, falls wir
|
||||
# irgendwann "automatisch anlegen" implementieren wollen.
|
||||
primary_url = determine_primary_remote_url(repo, resolved_m)
|
||||
if ensure_remote:
|
||||
_ensure_remote_repository(
|
||||
repo,
|
||||
repositories_base_dir=repositories_base_dir,
|
||||
all_repos=all_repos,
|
||||
preview=preview,
|
||||
)
|
||||
|
||||
if not resolved_mirrors:
|
||||
primary_url = determine_primary_remote_url(repo, resolved_mirrors)
|
||||
if not primary_url:
|
||||
print(
|
||||
"[INFO] No mirrors configured (config or MIRRORS file), and no "
|
||||
"primary URL could be derived from provider/account/repository."
|
||||
)
|
||||
print("[INFO] No mirrors configured and no primary URL available.")
|
||||
print()
|
||||
return
|
||||
|
||||
ok, error_message = _probe_mirror(primary_url, ctx.repo_dir)
|
||||
if ok:
|
||||
print(f"[OK] Remote mirror (primary) is reachable: {primary_url}")
|
||||
print(f"[OK] primary: {primary_url}")
|
||||
else:
|
||||
print("[WARN] Primary remote URL is NOT reachable:")
|
||||
print(f" {primary_url}")
|
||||
if error_message:
|
||||
print(" Details:")
|
||||
for line in error_message.splitlines():
|
||||
print(f" {line}")
|
||||
print(f"[WARN] primary: {primary_url}")
|
||||
for line in error_message.splitlines():
|
||||
print(f" {line}")
|
||||
|
||||
print()
|
||||
print(
|
||||
"[INFO] Remote checks are non-destructive and only use `git ls-remote` "
|
||||
"to probe mirror URLs."
|
||||
)
|
||||
print()
|
||||
return
|
||||
|
||||
# Normaler Fall: wir haben benannte Mirrors aus config/MIRRORS
|
||||
for name, url in sorted(resolved_m.items()):
|
||||
for name, url in sorted(resolved_mirrors.items()):
|
||||
ok, error_message = _probe_mirror(url, ctx.repo_dir)
|
||||
if ok:
|
||||
print(f"[OK] Remote mirror '{name}' is reachable: {url}")
|
||||
print(f"[OK] {name}: {url}")
|
||||
else:
|
||||
print(f"[WARN] Remote mirror '{name}' is NOT reachable:")
|
||||
print(f" {url}")
|
||||
if error_message:
|
||||
print(" Details:")
|
||||
for line in error_message.splitlines():
|
||||
print(f" {line}")
|
||||
print(f"[WARN] {name}: {url}")
|
||||
for line in error_message.splitlines():
|
||||
print(f" {line}")
|
||||
|
||||
print()
|
||||
print(
|
||||
"[INFO] Remote checks are non-destructive and only use `git ls-remote` "
|
||||
"to probe mirror URLs."
|
||||
)
|
||||
print()
|
||||
|
||||
|
||||
def setup_mirrors(
|
||||
@@ -135,22 +208,25 @@ def setup_mirrors(
|
||||
preview: bool = False,
|
||||
local: bool = True,
|
||||
remote: bool = True,
|
||||
ensure_remote: bool = False,
|
||||
) -> None:
|
||||
"""
|
||||
Setup mirrors for the selected repositories.
|
||||
|
||||
local:
|
||||
- Configure local Git remotes (currently: ensure 'origin' is present and
|
||||
points to a reasonable URL).
|
||||
- Configure local Git remotes (ensure 'origin' exists).
|
||||
|
||||
remote:
|
||||
- Non-destructive remote checks using `git ls-remote` for each mirror URL.
|
||||
Es werden keine Repositories auf dem Provider angelegt.
|
||||
- Non-destructive remote checks using `git ls-remote`.
|
||||
|
||||
ensure_remote:
|
||||
- If True, attempt to create missing remote repositories via provider APIs.
|
||||
- This is explicit and NEVER enabled implicitly.
|
||||
"""
|
||||
for repo in selected_repos:
|
||||
if local:
|
||||
_setup_local_mirrors_for_repo(
|
||||
repo,
|
||||
repo=repo,
|
||||
repositories_base_dir=repositories_base_dir,
|
||||
all_repos=all_repos,
|
||||
preview=preview,
|
||||
@@ -158,8 +234,9 @@ def setup_mirrors(
|
||||
|
||||
if remote:
|
||||
_setup_remote_mirrors_for_repo(
|
||||
repo,
|
||||
repo=repo,
|
||||
repositories_base_dir=repositories_base_dir,
|
||||
all_repos=all_repos,
|
||||
preview=preview,
|
||||
ensure_remote=ensure_remote,
|
||||
)
|
||||
|
||||
218
src/pkgmgr/actions/release/README.md
Normal file
218
src/pkgmgr/actions/release/README.md
Normal file
@@ -0,0 +1,218 @@
|
||||
# Release Action
|
||||
|
||||
This module implements the `pkgmgr release` workflow.
|
||||
|
||||
It provides a controlled, reproducible release process that:
|
||||
- bumps the project version
|
||||
- updates all supported packaging formats
|
||||
- creates and pushes Git tags
|
||||
- optionally maintains a floating `latest` tag
|
||||
- optionally closes the current branch
|
||||
|
||||
The implementation is intentionally explicit and conservative to avoid
|
||||
accidental releases or broken Git states.
|
||||
|
||||
---
|
||||
|
||||
## What the Release Command Does
|
||||
|
||||
A release performs the following high-level steps:
|
||||
|
||||
1. Synchronize the current branch with its upstream (fast-forward only)
|
||||
2. Determine the next semantic version
|
||||
3. Update all versioned files
|
||||
4. Commit the release
|
||||
5. Create and push a version tag
|
||||
6. Optionally update and push the floating `latest` tag
|
||||
7. Optionally close the current branch
|
||||
|
||||
All steps support **preview (dry-run)** mode.
|
||||
|
||||
---
|
||||
|
||||
## Supported Files Updated During a Release
|
||||
|
||||
If present, the following files are updated automatically:
|
||||
|
||||
- `pyproject.toml`
|
||||
- `CHANGELOG.md`
|
||||
- `flake.nix`
|
||||
- `PKGBUILD`
|
||||
- `package-manager.spec`
|
||||
- `debian/changelog`
|
||||
|
||||
Missing files are skipped gracefully.
|
||||
|
||||
---
|
||||
|
||||
## Git Safety Rules
|
||||
|
||||
The release workflow enforces strict Git safety guarantees:
|
||||
|
||||
- A `git pull --ff-only` is executed **before any file modifications**
|
||||
- No merge commits are ever created automatically
|
||||
- Only the current branch and the newly created version tag are pushed
|
||||
- `git push --tags` is intentionally **not** used
|
||||
- The floating `latest` tag is force-pushed only when required
|
||||
|
||||
---
|
||||
|
||||
## Semantic Versioning
|
||||
|
||||
The next version is calculated from existing Git tags:
|
||||
|
||||
- Tags must follow the format `vX.Y.Z`
|
||||
- The release type controls the version bump:
|
||||
- `patch`
|
||||
- `minor`
|
||||
- `major`
|
||||
|
||||
The new tag is always created as an **annotated tag**.
|
||||
|
||||
---
|
||||
|
||||
## Floating `latest` Tag
|
||||
|
||||
The floating `latest` tag is handled explicitly:
|
||||
|
||||
- `latest` is updated **only if** the new version is the highest existing version
|
||||
- Version comparison uses natural version sorting (`sort -V`)
|
||||
- `latest` always points to the commit behind the version tag
|
||||
- Updating `latest` uses a forced push by design
|
||||
|
||||
This guarantees that `latest` always represents the highest released version,
|
||||
never an older release.
|
||||
|
||||
---
|
||||
|
||||
## Preview Mode
|
||||
|
||||
Preview mode (`--preview`) performs a full dry-run:
|
||||
|
||||
- No files are modified
|
||||
- No Git commands are executed
|
||||
- All intended actions are printed
|
||||
|
||||
Example preview output includes:
|
||||
- version bump
|
||||
- file updates
|
||||
- commit message
|
||||
- tag creation
|
||||
- branch and tag pushes
|
||||
- `latest` update (if applicable)
|
||||
|
||||
---
|
||||
|
||||
## Interactive vs Forced Mode
|
||||
|
||||
### Interactive (default)
|
||||
|
||||
1. Run a preview
|
||||
2. Ask for confirmation
|
||||
3. Execute the real release
|
||||
|
||||
### Forced (`--force`)
|
||||
|
||||
- Skips preview and confirmation
|
||||
- Skips branch deletion prompts
|
||||
- Executes the release immediately
|
||||
|
||||
---
|
||||
|
||||
## Branch Closing (`--close`)
|
||||
|
||||
When `--close` is enabled:
|
||||
|
||||
- `main` and `master` are **never** deleted
|
||||
- Other branches:
|
||||
- prompt for confirmation (`y/N`)
|
||||
- can be skipped using `--force`
|
||||
- Branch deletion happens **only after** a successful release
|
||||
|
||||
---
|
||||
|
||||
## Execution Flow (ASCII Diagram)
|
||||
|
||||
```
|
||||
|
||||
+---------------------+
|
||||
| pkgmgr release |
|
||||
+----------+----------+
|
||||
|
|
||||
v
|
||||
+---------------------+
|
||||
| Detect branch |
|
||||
+----------+----------+
|
||||
|
|
||||
v
|
||||
+------------------------------+
|
||||
| git fetch / pull --ff-only |
|
||||
+----------+-------------------+
|
||||
|
|
||||
v
|
||||
+------------------------------+
|
||||
| Determine next version |
|
||||
+----------+-------------------+
|
||||
|
|
||||
v
|
||||
+------------------------------+
|
||||
| Update versioned files |
|
||||
+----------+-------------------+
|
||||
|
|
||||
v
|
||||
+------------------------------+
|
||||
| Commit release |
|
||||
+----------+-------------------+
|
||||
|
|
||||
v
|
||||
+------------------------------+
|
||||
| Create version tag (vX.Y.Z) |
|
||||
+----------+-------------------+
|
||||
|
|
||||
v
|
||||
+------------------------------+
|
||||
| Push branch + version tag |
|
||||
+----------+-------------------+
|
||||
|
|
||||
v
|
||||
+---------------------------------------+
|
||||
| Is this the highest version? |
|
||||
+----------+----------------------------+
|
||||
|
|
||||
yes | no
|
||||
|
|
||||
v
|
||||
+------------------------------+ +----------------------+
|
||||
| Update & push `latest` tag | | Skip `latest` update |
|
||||
+----------+-------------------+ +----------------------+
|
||||
|
|
||||
v
|
||||
+------------------------------+
|
||||
| Close branch (optional) |
|
||||
+------------------------------+
|
||||
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Design Goals
|
||||
|
||||
- Deterministic and reproducible releases
|
||||
- No implicit Git side effects
|
||||
- Explicit tag handling
|
||||
- Safe defaults for interactive usage
|
||||
- Automation-friendly forced mode
|
||||
- Clear separation of concerns:
|
||||
- `workflow.py` – orchestration
|
||||
- `git_ops.py` – Git operations
|
||||
- `prompts.py` – user interaction
|
||||
- `versioning.py` – SemVer logic
|
||||
|
||||
---
|
||||
|
||||
## Summary
|
||||
|
||||
`pkgmgr release` is a **deliberately strict** release mechanism.
|
||||
|
||||
It trades convenience for safety, traceability, and correctness — making it
|
||||
suitable for both interactive development workflows and fully automated CI/CD
|
||||
@@ -1,310 +1,5 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
Release helper for pkgmgr (public entry point).
|
||||
|
||||
This package provides the high-level `release()` function used by the
|
||||
pkgmgr CLI to perform versioned releases:
|
||||
|
||||
- Determine the next semantic version based on existing Git tags.
|
||||
- Update pyproject.toml with the new version.
|
||||
- Update additional packaging files (flake.nix, PKGBUILD,
|
||||
debian/changelog, RPM spec) where present.
|
||||
- Prepend a basic entry to CHANGELOG.md.
|
||||
- Move the floating 'latest' tag to the newly created release tag so
|
||||
the newest release is always marked as latest.
|
||||
|
||||
Additional behaviour:
|
||||
- If `preview=True` (from --preview), no files are written and no
|
||||
Git commands are executed. Instead, a detailed summary of the
|
||||
planned changes and commands is printed.
|
||||
- If `preview=False` and not forced, the release is executed in two
|
||||
phases:
|
||||
1) Preview-only run (dry-run).
|
||||
2) Interactive confirmation, then real release if confirmed.
|
||||
This confirmation can be skipped with the `force=True` flag.
|
||||
- Before creating and pushing tags, main/master is updated from origin
|
||||
when the release is performed on one of these branches.
|
||||
- If `close=True` is used and the current branch is not main/master,
|
||||
the branch will be closed via branch_commands.close_branch() after
|
||||
a successful release.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import sys
|
||||
from typing import Optional
|
||||
|
||||
from pkgmgr.core.git import get_current_branch, GitError
|
||||
from pkgmgr.actions.branch import close_branch
|
||||
|
||||
from .versioning import determine_current_version, bump_semver
|
||||
from .git_ops import run_git_command, sync_branch_with_remote, update_latest_tag
|
||||
from .files import (
|
||||
update_pyproject_version,
|
||||
update_flake_version,
|
||||
update_pkgbuild_version,
|
||||
update_spec_version,
|
||||
update_changelog,
|
||||
update_debian_changelog,
|
||||
update_spec_changelog,
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Internal implementation (single-phase, preview or real)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _release_impl(
|
||||
pyproject_path: str = "pyproject.toml",
|
||||
changelog_path: str = "CHANGELOG.md",
|
||||
release_type: str = "patch",
|
||||
message: Optional[str] = None,
|
||||
preview: bool = False,
|
||||
close: bool = False,
|
||||
) -> None:
|
||||
"""
|
||||
Internal implementation that performs a single-phase release.
|
||||
"""
|
||||
current_ver = determine_current_version()
|
||||
new_ver = bump_semver(current_ver, release_type)
|
||||
new_ver_str = str(new_ver)
|
||||
new_tag = new_ver.to_tag(with_prefix=True)
|
||||
|
||||
mode = "PREVIEW" if preview else "REAL"
|
||||
print(f"Release mode: {mode}")
|
||||
print(f"Current version: {current_ver}")
|
||||
print(f"New version: {new_ver_str} ({release_type})")
|
||||
|
||||
repo_root = os.path.dirname(os.path.abspath(pyproject_path))
|
||||
|
||||
# Update core project metadata and packaging files
|
||||
update_pyproject_version(pyproject_path, new_ver_str, preview=preview)
|
||||
changelog_message = update_changelog(
|
||||
changelog_path,
|
||||
new_ver_str,
|
||||
message=message,
|
||||
preview=preview,
|
||||
)
|
||||
|
||||
flake_path = os.path.join(repo_root, "flake.nix")
|
||||
update_flake_version(flake_path, new_ver_str, preview=preview)
|
||||
|
||||
pkgbuild_path = os.path.join(repo_root, "PKGBUILD")
|
||||
update_pkgbuild_version(pkgbuild_path, new_ver_str, preview=preview)
|
||||
|
||||
spec_path = os.path.join(repo_root, "package-manager.spec")
|
||||
update_spec_version(spec_path, new_ver_str, preview=preview)
|
||||
|
||||
# Determine a single effective_message to be reused across all
|
||||
# changelog targets (project, Debian, Fedora).
|
||||
effective_message: Optional[str] = message
|
||||
if effective_message is None and isinstance(changelog_message, str):
|
||||
if changelog_message.strip():
|
||||
effective_message = changelog_message.strip()
|
||||
|
||||
debian_changelog_path = os.path.join(repo_root, "debian", "changelog")
|
||||
package_name = os.path.basename(repo_root) or "package-manager"
|
||||
|
||||
# Debian changelog
|
||||
update_debian_changelog(
|
||||
debian_changelog_path,
|
||||
package_name=package_name,
|
||||
new_version=new_ver_str,
|
||||
message=effective_message,
|
||||
preview=preview,
|
||||
)
|
||||
|
||||
# Fedora / RPM %changelog
|
||||
update_spec_changelog(
|
||||
spec_path=spec_path,
|
||||
package_name=package_name,
|
||||
new_version=new_ver_str,
|
||||
message=effective_message,
|
||||
preview=preview,
|
||||
)
|
||||
|
||||
commit_msg = f"Release version {new_ver_str}"
|
||||
tag_msg = effective_message or commit_msg
|
||||
|
||||
# Determine branch and ensure it is up to date if main/master
|
||||
try:
|
||||
branch = get_current_branch() or "main"
|
||||
except GitError:
|
||||
branch = "main"
|
||||
print(f"Releasing on branch: {branch}")
|
||||
|
||||
# Ensure main/master are up-to-date from origin before creating and
|
||||
# pushing tags. For other branches we only log the intent.
|
||||
sync_branch_with_remote(branch, preview=preview)
|
||||
|
||||
files_to_add = [
|
||||
pyproject_path,
|
||||
changelog_path,
|
||||
flake_path,
|
||||
pkgbuild_path,
|
||||
spec_path,
|
||||
debian_changelog_path,
|
||||
]
|
||||
existing_files = [p for p in files_to_add if p and os.path.exists(p)]
|
||||
|
||||
if preview:
|
||||
for path in existing_files:
|
||||
print(f"[PREVIEW] Would run: git add {path}")
|
||||
print(f'[PREVIEW] Would run: git commit -am "{commit_msg}"')
|
||||
print(f'[PREVIEW] Would run: git tag -a {new_tag} -m "{tag_msg}"')
|
||||
print(f"[PREVIEW] Would run: git push origin {branch}")
|
||||
print("[PREVIEW] Would run: git push origin --tags")
|
||||
|
||||
# Also update the floating 'latest' tag to the new highest SemVer.
|
||||
update_latest_tag(new_tag, preview=True)
|
||||
|
||||
if close and branch not in ("main", "master"):
|
||||
print(
|
||||
f"[PREVIEW] Would also close branch {branch} after the release "
|
||||
"(close=True and branch is not main/master)."
|
||||
)
|
||||
elif close:
|
||||
print(
|
||||
f"[PREVIEW] close=True but current branch is {branch}; "
|
||||
"no branch would be closed."
|
||||
)
|
||||
|
||||
print("Preview completed. No changes were made.")
|
||||
return
|
||||
|
||||
for path in existing_files:
|
||||
run_git_command(f"git add {path}")
|
||||
|
||||
run_git_command(f'git commit -am "{commit_msg}"')
|
||||
run_git_command(f'git tag -a {new_tag} -m "{tag_msg}"')
|
||||
run_git_command(f"git push origin {branch}")
|
||||
run_git_command("git push origin --tags")
|
||||
|
||||
# Move 'latest' to the new release tag so the newest SemVer is always
|
||||
# marked as latest. This is best-effort and must not break the release.
|
||||
try:
|
||||
update_latest_tag(new_tag, preview=False)
|
||||
except GitError as exc: # pragma: no cover
|
||||
print(
|
||||
f"[WARN] Failed to update floating 'latest' tag for {new_tag}: {exc}\n"
|
||||
"[WARN] The release itself completed successfully; only the "
|
||||
"'latest' tag was not updated."
|
||||
)
|
||||
|
||||
print(f"Release {new_ver_str} completed.")
|
||||
|
||||
if close:
|
||||
if branch in ("main", "master"):
|
||||
print(
|
||||
f"[INFO] close=True but current branch is {branch}; "
|
||||
"nothing to close."
|
||||
)
|
||||
return
|
||||
|
||||
print(
|
||||
f"[INFO] Closing branch {branch} after successful release "
|
||||
"(close=True and branch is not main/master)..."
|
||||
)
|
||||
try:
|
||||
close_branch(name=branch, base_branch="main", cwd=".")
|
||||
except Exception as exc: # pragma: no cover
|
||||
print(f"[WARN] Failed to close branch {branch} automatically: {exc}")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Public release entry point
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def release(
|
||||
pyproject_path: str = "pyproject.toml",
|
||||
changelog_path: str = "CHANGELOG.md",
|
||||
release_type: str = "patch",
|
||||
message: Optional[str] = None,
|
||||
preview: bool = False,
|
||||
force: bool = False,
|
||||
close: bool = False,
|
||||
) -> None:
|
||||
"""
|
||||
High-level release entry point.
|
||||
|
||||
Modes:
|
||||
|
||||
- preview=True:
|
||||
* Single-phase PREVIEW only.
|
||||
|
||||
- preview=False, force=True:
|
||||
* Single-phase REAL release, no interactive preview.
|
||||
|
||||
- preview=False, force=False:
|
||||
* Two-phase flow (intended default for interactive CLI use).
|
||||
"""
|
||||
if preview:
|
||||
_release_impl(
|
||||
pyproject_path=pyproject_path,
|
||||
changelog_path=changelog_path,
|
||||
release_type=release_type,
|
||||
message=message,
|
||||
preview=True,
|
||||
close=close,
|
||||
)
|
||||
return
|
||||
|
||||
if force:
|
||||
_release_impl(
|
||||
pyproject_path=pyproject_path,
|
||||
changelog_path=changelog_path,
|
||||
release_type=release_type,
|
||||
message=message,
|
||||
preview=False,
|
||||
close=close,
|
||||
)
|
||||
return
|
||||
|
||||
if not sys.stdin.isatty():
|
||||
_release_impl(
|
||||
pyproject_path=pyproject_path,
|
||||
changelog_path=changelog_path,
|
||||
release_type=release_type,
|
||||
message=message,
|
||||
preview=False,
|
||||
close=close,
|
||||
)
|
||||
return
|
||||
|
||||
print("[INFO] Running preview before actual release...\n")
|
||||
_release_impl(
|
||||
pyproject_path=pyproject_path,
|
||||
changelog_path=changelog_path,
|
||||
release_type=release_type,
|
||||
message=message,
|
||||
preview=True,
|
||||
close=close,
|
||||
)
|
||||
|
||||
try:
|
||||
answer = input("Proceed with the actual release? [y/N]: ").strip().lower()
|
||||
except (EOFError, KeyboardInterrupt):
|
||||
print("\n[INFO] Release aborted (no confirmation).")
|
||||
return
|
||||
|
||||
if answer not in ("y", "yes"):
|
||||
print("Release aborted by user. No changes were made.")
|
||||
return
|
||||
|
||||
print("\n[INFO] Running REAL release...\n")
|
||||
_release_impl(
|
||||
pyproject_path=pyproject_path,
|
||||
changelog_path=changelog_path,
|
||||
release_type=release_type,
|
||||
message=message,
|
||||
preview=False,
|
||||
close=close,
|
||||
)
|
||||
|
||||
from .workflow import release
|
||||
|
||||
__all__ = ["release"]
|
||||
|
||||
@@ -289,7 +289,7 @@ def update_spec_version(
|
||||
|
||||
if preview:
|
||||
print(
|
||||
f"[PREVIEW] Would update spec file "
|
||||
"[PREVIEW] Would update spec file "
|
||||
f"{os.path.basename(spec_path)} to Version: {new_version}, Release: 1..."
|
||||
)
|
||||
return
|
||||
|
||||
@@ -1,16 +1,3 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
Git-related helpers for the release workflow.
|
||||
|
||||
Responsibilities:
|
||||
- Run Git (or shell) commands with basic error reporting.
|
||||
- Ensure main/master are synchronized with origin before tagging.
|
||||
- Maintain the floating 'latest' tag that always points to the newest
|
||||
release tag.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import subprocess
|
||||
@@ -19,77 +6,87 @@ from pkgmgr.core.git import GitError
|
||||
|
||||
|
||||
def run_git_command(cmd: str) -> None:
|
||||
"""
|
||||
Run a Git (or shell) command with basic error reporting.
|
||||
|
||||
The command is executed via the shell, primarily for readability
|
||||
when printed (as in 'git commit -am "msg"').
|
||||
"""
|
||||
print(f"[GIT] {cmd}")
|
||||
try:
|
||||
subprocess.run(cmd, shell=True, check=True)
|
||||
subprocess.run(
|
||||
cmd,
|
||||
shell=True,
|
||||
check=True,
|
||||
text=True,
|
||||
capture_output=True,
|
||||
)
|
||||
except subprocess.CalledProcessError as exc:
|
||||
print(f"[ERROR] Git command failed: {cmd}")
|
||||
print(f" Exit code: {exc.returncode}")
|
||||
if exc.stdout:
|
||||
print("--- stdout ---")
|
||||
print(exc.stdout)
|
||||
print("\n" + exc.stdout)
|
||||
if exc.stderr:
|
||||
print("--- stderr ---")
|
||||
print(exc.stderr)
|
||||
print("\n" + exc.stderr)
|
||||
raise GitError(f"Git command failed: {cmd}") from exc
|
||||
|
||||
|
||||
def sync_branch_with_remote(branch: str, preview: bool = False) -> None:
|
||||
"""
|
||||
Ensure the local main/master branch is up-to-date before tagging.
|
||||
def _capture(cmd: str) -> str:
|
||||
res = subprocess.run(cmd, shell=True, check=False, capture_output=True, text=True)
|
||||
return (res.stdout or "").strip()
|
||||
|
||||
Behaviour:
|
||||
- For main/master: run 'git fetch origin' and 'git pull origin <branch>'.
|
||||
- For all other branches: only log that no automatic sync is performed.
|
||||
|
||||
def ensure_clean_and_synced(preview: bool = False) -> None:
|
||||
"""
|
||||
if branch not in ("main", "master"):
|
||||
print(
|
||||
f"[INFO] Skipping automatic git pull for non-main/master branch "
|
||||
f"{branch}."
|
||||
)
|
||||
Always run a pull BEFORE modifying anything.
|
||||
Uses --ff-only to avoid creating merge commits automatically.
|
||||
If no upstream is configured, we skip.
|
||||
"""
|
||||
upstream = _capture("git rev-parse --abbrev-ref --symbolic-full-name @{u} 2>/dev/null")
|
||||
if not upstream:
|
||||
print("[INFO] No upstream configured for current branch. Skipping pull.")
|
||||
return
|
||||
|
||||
print(
|
||||
f"[INFO] Updating branch {branch} from origin before creating tags..."
|
||||
)
|
||||
|
||||
if preview:
|
||||
print("[PREVIEW] Would run: git fetch origin")
|
||||
print(f"[PREVIEW] Would run: git pull origin {branch}")
|
||||
print("[PREVIEW] Would run: git fetch origin --prune --tags --force")
|
||||
print("[PREVIEW] Would run: git pull --ff-only")
|
||||
return
|
||||
|
||||
run_git_command("git fetch origin")
|
||||
run_git_command(f"git pull origin {branch}")
|
||||
print("[INFO] Syncing with remote before making any changes...")
|
||||
run_git_command("git fetch origin --prune --tags --force")
|
||||
run_git_command("git pull --ff-only")
|
||||
|
||||
def is_highest_version_tag(tag: str) -> bool:
|
||||
"""
|
||||
Return True if `tag` is the highest version among all tags matching v*.
|
||||
Comparison uses `sort -V` for natural version ordering.
|
||||
"""
|
||||
all_v = _capture("git tag --list 'v*'")
|
||||
if not all_v:
|
||||
return True # No tags yet, so the current tag is the highest
|
||||
|
||||
# Get the latest tag in natural version order
|
||||
latest = _capture("git tag --list 'v*' | sort -V | tail -n1")
|
||||
print(f"[INFO] Latest tag: {latest}, Current tag: {tag}")
|
||||
|
||||
# Ensure that the current tag is always considered the highest if it's the latest one
|
||||
return tag >= latest # Use comparison operator to consider all future tags
|
||||
|
||||
|
||||
def update_latest_tag(new_tag: str, preview: bool = False) -> None:
|
||||
"""
|
||||
Move the floating 'latest' tag to the newly created release tag.
|
||||
|
||||
Implementation details:
|
||||
- We explicitly dereference the tag object via `<tag>^{}` so that
|
||||
'latest' always points at the underlying commit, not at another tag.
|
||||
- We create/update 'latest' as an annotated tag with a short message so
|
||||
Git configurations that enforce annotated/signed tags do not fail
|
||||
with "no tag message".
|
||||
Notes:
|
||||
- We dereference the tag object via `<tag>^{}` so that 'latest' points to the commit.
|
||||
- 'latest' is forced (floating tag), therefore the push uses --force.
|
||||
"""
|
||||
target_ref = f"{new_tag}^{{}}"
|
||||
print(f"[INFO] Updating 'latest' tag to point at {new_tag} (commit {target_ref})...")
|
||||
|
||||
if preview:
|
||||
print(f"[PREVIEW] Would run: git tag -f -a latest {target_ref} "
|
||||
f'-m "Floating latest tag for {new_tag}"')
|
||||
print(
|
||||
f'[PREVIEW] Would run: git tag -f -a latest {target_ref} '
|
||||
f'-m "Floating latest tag for {new_tag}"'
|
||||
)
|
||||
print("[PREVIEW] Would run: git push origin latest --force")
|
||||
return
|
||||
|
||||
run_git_command(
|
||||
f'git tag -f -a latest {target_ref} '
|
||||
f'-m "Floating latest tag for {new_tag}"'
|
||||
f'git tag -f -a latest {target_ref} -m "Floating latest tag for {new_tag}"'
|
||||
)
|
||||
run_git_command("git push origin latest --force")
|
||||
|
||||
29
src/pkgmgr/actions/release/prompts.py
Normal file
29
src/pkgmgr/actions/release/prompts.py
Normal file
@@ -0,0 +1,29 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
|
||||
|
||||
def should_delete_branch(force: bool) -> bool:
|
||||
"""
|
||||
Ask whether the current branch should be deleted after a successful release.
|
||||
|
||||
- If force=True: skip prompt and return True.
|
||||
- If non-interactive stdin: do NOT delete by default.
|
||||
"""
|
||||
if force:
|
||||
return True
|
||||
if not sys.stdin.isatty():
|
||||
return False
|
||||
answer = input("Delete the current branch after release? [y/N] ").strip().lower()
|
||||
return answer in ("y", "yes")
|
||||
|
||||
|
||||
def confirm_proceed_release() -> bool:
|
||||
"""
|
||||
Ask whether to proceed with the REAL release after the preview phase.
|
||||
"""
|
||||
try:
|
||||
answer = input("Proceed with the actual release? [y/N]: ").strip().lower()
|
||||
except (EOFError, KeyboardInterrupt):
|
||||
return False
|
||||
return answer in ("y", "yes")
|
||||
229
src/pkgmgr/actions/release/workflow.py
Normal file
229
src/pkgmgr/actions/release/workflow.py
Normal file
@@ -0,0 +1,229 @@
|
||||
from __future__ import annotations
|
||||
from typing import Optional
|
||||
import os
|
||||
import sys
|
||||
|
||||
from pkgmgr.actions.branch import close_branch
|
||||
from pkgmgr.core.git import get_current_branch, GitError
|
||||
|
||||
from .files import (
|
||||
update_changelog,
|
||||
update_debian_changelog,
|
||||
update_flake_version,
|
||||
update_pkgbuild_version,
|
||||
update_pyproject_version,
|
||||
update_spec_changelog,
|
||||
update_spec_version,
|
||||
)
|
||||
from .git_ops import (
|
||||
ensure_clean_and_synced,
|
||||
is_highest_version_tag,
|
||||
run_git_command,
|
||||
update_latest_tag,
|
||||
)
|
||||
from .prompts import confirm_proceed_release, should_delete_branch
|
||||
from .versioning import bump_semver, determine_current_version
|
||||
|
||||
|
||||
def _release_impl(
|
||||
pyproject_path: str = "pyproject.toml",
|
||||
changelog_path: str = "CHANGELOG.md",
|
||||
release_type: str = "patch",
|
||||
message: Optional[str] = None,
|
||||
preview: bool = False,
|
||||
close: bool = False,
|
||||
force: bool = False,
|
||||
) -> None:
|
||||
# Determine current branch early
|
||||
try:
|
||||
branch = get_current_branch() or "main"
|
||||
except GitError:
|
||||
branch = "main"
|
||||
print(f"Releasing on branch: {branch}")
|
||||
|
||||
# Pull BEFORE making any modifications
|
||||
ensure_clean_and_synced(preview=preview)
|
||||
|
||||
current_ver = determine_current_version()
|
||||
new_ver = bump_semver(current_ver, release_type)
|
||||
new_ver_str = str(new_ver)
|
||||
new_tag = new_ver.to_tag(with_prefix=True)
|
||||
|
||||
mode = "PREVIEW" if preview else "REAL"
|
||||
print(f"Release mode: {mode}")
|
||||
print(f"Current version: {current_ver}")
|
||||
print(f"New version: {new_ver_str} ({release_type})")
|
||||
|
||||
repo_root = os.path.dirname(os.path.abspath(pyproject_path))
|
||||
|
||||
update_pyproject_version(pyproject_path, new_ver_str, preview=preview)
|
||||
changelog_message = update_changelog(
|
||||
changelog_path,
|
||||
new_ver_str,
|
||||
message=message,
|
||||
preview=preview,
|
||||
)
|
||||
|
||||
flake_path = os.path.join(repo_root, "flake.nix")
|
||||
update_flake_version(flake_path, new_ver_str, preview=preview)
|
||||
|
||||
pkgbuild_path = os.path.join(repo_root, "PKGBUILD")
|
||||
update_pkgbuild_version(pkgbuild_path, new_ver_str, preview=preview)
|
||||
|
||||
spec_path = os.path.join(repo_root, "package-manager.spec")
|
||||
update_spec_version(spec_path, new_ver_str, preview=preview)
|
||||
|
||||
effective_message: Optional[str] = message
|
||||
if effective_message is None and isinstance(changelog_message, str):
|
||||
if changelog_message.strip():
|
||||
effective_message = changelog_message.strip()
|
||||
|
||||
debian_changelog_path = os.path.join(repo_root, "debian", "changelog")
|
||||
package_name = os.path.basename(repo_root) or "package-manager"
|
||||
|
||||
update_debian_changelog(
|
||||
debian_changelog_path,
|
||||
package_name=package_name,
|
||||
new_version=new_ver_str,
|
||||
message=effective_message,
|
||||
preview=preview,
|
||||
)
|
||||
|
||||
update_spec_changelog(
|
||||
spec_path=spec_path,
|
||||
package_name=package_name,
|
||||
new_version=new_ver_str,
|
||||
message=effective_message,
|
||||
preview=preview,
|
||||
)
|
||||
|
||||
commit_msg = f"Release version {new_ver_str}"
|
||||
tag_msg = effective_message or commit_msg
|
||||
|
||||
files_to_add = [
|
||||
pyproject_path,
|
||||
changelog_path,
|
||||
flake_path,
|
||||
pkgbuild_path,
|
||||
spec_path,
|
||||
debian_changelog_path,
|
||||
]
|
||||
existing_files = [p for p in files_to_add if p and os.path.exists(p)]
|
||||
|
||||
if preview:
|
||||
for path in existing_files:
|
||||
print(f"[PREVIEW] Would run: git add {path}")
|
||||
print(f'[PREVIEW] Would run: git commit -am "{commit_msg}"')
|
||||
print(f'[PREVIEW] Would run: git tag -a {new_tag} -m "{tag_msg}"')
|
||||
print(f"[PREVIEW] Would run: git push origin {branch}")
|
||||
print(f"[PREVIEW] Would run: git push origin {new_tag}")
|
||||
|
||||
if is_highest_version_tag(new_tag):
|
||||
update_latest_tag(new_tag, preview=True)
|
||||
else:
|
||||
print(f"[PREVIEW] Skipping 'latest' update (tag {new_tag} is not the highest).")
|
||||
|
||||
if close and branch not in ("main", "master"):
|
||||
if force:
|
||||
print(f"[PREVIEW] Would delete branch {branch} (forced).")
|
||||
else:
|
||||
print(f"[PREVIEW] Would ask whether to delete branch {branch} after release.")
|
||||
return
|
||||
|
||||
for path in existing_files:
|
||||
run_git_command(f"git add {path}")
|
||||
|
||||
run_git_command(f'git commit -am "{commit_msg}"')
|
||||
run_git_command(f'git tag -a {new_tag} -m "{tag_msg}"')
|
||||
|
||||
# Push branch and ONLY the newly created version tag (no --tags)
|
||||
run_git_command(f"git push origin {branch}")
|
||||
run_git_command(f"git push origin {new_tag}")
|
||||
|
||||
# Update 'latest' only if this is the highest version tag
|
||||
try:
|
||||
if is_highest_version_tag(new_tag):
|
||||
update_latest_tag(new_tag, preview=False)
|
||||
else:
|
||||
print(f"[INFO] Skipping 'latest' update (tag {new_tag} is not the highest).")
|
||||
except GitError as exc:
|
||||
print(f"[WARN] Failed to update floating 'latest' tag for {new_tag}: {exc}")
|
||||
print("'latest' tag was not updated.")
|
||||
|
||||
print(f"Release {new_ver_str} completed.")
|
||||
|
||||
if close:
|
||||
if branch in ("main", "master"):
|
||||
print(f"[INFO] close=True but current branch is {branch}; skipping branch deletion.")
|
||||
return
|
||||
|
||||
if not should_delete_branch(force=force):
|
||||
print(f"[INFO] Branch deletion declined. Keeping branch {branch}.")
|
||||
return
|
||||
|
||||
print(f"[INFO] Deleting branch {branch} after successful release...")
|
||||
try:
|
||||
close_branch(name=branch, base_branch="main", cwd=".")
|
||||
except Exception as exc:
|
||||
print(f"[WARN] Failed to close branch {branch} automatically: {exc}")
|
||||
|
||||
|
||||
def release(
|
||||
pyproject_path: str = "pyproject.toml",
|
||||
changelog_path: str = "CHANGELOG.md",
|
||||
release_type: str = "patch",
|
||||
message: Optional[str] = None,
|
||||
preview: bool = False,
|
||||
force: bool = False,
|
||||
close: bool = False,
|
||||
) -> None:
|
||||
if preview:
|
||||
_release_impl(
|
||||
pyproject_path=pyproject_path,
|
||||
changelog_path=changelog_path,
|
||||
release_type=release_type,
|
||||
message=message,
|
||||
preview=True,
|
||||
close=close,
|
||||
force=force,
|
||||
)
|
||||
return
|
||||
|
||||
# If force or non-interactive: no preview+confirmation step
|
||||
if force or (not sys.stdin.isatty()):
|
||||
_release_impl(
|
||||
pyproject_path=pyproject_path,
|
||||
changelog_path=changelog_path,
|
||||
release_type=release_type,
|
||||
message=message,
|
||||
preview=False,
|
||||
close=close,
|
||||
force=force,
|
||||
)
|
||||
return
|
||||
|
||||
print("[INFO] Running preview before actual release...\n")
|
||||
_release_impl(
|
||||
pyproject_path=pyproject_path,
|
||||
changelog_path=changelog_path,
|
||||
release_type=release_type,
|
||||
message=message,
|
||||
preview=True,
|
||||
close=close,
|
||||
force=force,
|
||||
)
|
||||
|
||||
if not confirm_proceed_release():
|
||||
print()
|
||||
return
|
||||
|
||||
print("\n[INFO] Running REAL release...\n")
|
||||
_release_impl(
|
||||
pyproject_path=pyproject_path,
|
||||
changelog_path=changelog_path,
|
||||
release_type=release_type,
|
||||
message=message,
|
||||
preview=False,
|
||||
close=close,
|
||||
force=force,
|
||||
)
|
||||
@@ -1,6 +1,5 @@
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import yaml
|
||||
from pkgmgr.core.command.alias import generate_alias
|
||||
from pkgmgr.core.config.save import save_user_config
|
||||
|
||||
@@ -1,15 +1,32 @@
|
||||
import os
|
||||
import sys
|
||||
from pkgmgr.core.repository.identifier import get_repo_identifier
|
||||
from pkgmgr.core.repository.dir import get_repo_dir
|
||||
|
||||
def deinstall_repos(selected_repos, repositories_base_dir, bin_dir, all_repos, preview=False):
|
||||
from pkgmgr.core.command.run import run_command
|
||||
from pkgmgr.core.repository.dir import get_repo_dir
|
||||
from pkgmgr.core.repository.identifier import get_repo_identifier
|
||||
|
||||
|
||||
def deinstall_repos(
|
||||
selected_repos,
|
||||
repositories_base_dir,
|
||||
bin_dir,
|
||||
all_repos,
|
||||
preview: bool = False,
|
||||
) -> None:
|
||||
for repo in selected_repos:
|
||||
repo_identifier = get_repo_identifier(repo, all_repos)
|
||||
alias_path = os.path.join(bin_dir, repo_identifier)
|
||||
|
||||
# Resolve repository directory
|
||||
repo_dir = get_repo_dir(repositories_base_dir, repo)
|
||||
|
||||
# Prefer alias if available; fall back to identifier
|
||||
alias_name = str(repo.get("alias") or repo_identifier)
|
||||
alias_path = os.path.join(os.path.expanduser(bin_dir), alias_name)
|
||||
|
||||
# Remove alias link/file (interactive)
|
||||
if os.path.exists(alias_path):
|
||||
confirm = input(f"Are you sure you want to delete link '{alias_path}' for {repo_identifier}? [y/N]: ").strip().lower()
|
||||
confirm = input(
|
||||
f"Are you sure you want to delete link '{alias_path}' for {repo_identifier}? [y/N]: "
|
||||
).strip().lower()
|
||||
if confirm == "y":
|
||||
if preview:
|
||||
print(f"[Preview] Would remove link '{alias_path}'.")
|
||||
@@ -19,10 +36,13 @@ def deinstall_repos(selected_repos, repositories_base_dir, bin_dir, all_repos, p
|
||||
else:
|
||||
print(f"No link found for {repo_identifier} in {bin_dir}.")
|
||||
|
||||
# Run make deinstall if repository exists and has a Makefile
|
||||
makefile_path = os.path.join(repo_dir, "Makefile")
|
||||
if os.path.exists(makefile_path):
|
||||
print(f"Makefile found in {repo_identifier}, running 'make deinstall'...")
|
||||
try:
|
||||
run_command("make deinstall", cwd=repo_dir, preview=preview)
|
||||
except SystemExit as e:
|
||||
print(f"[Warning] Failed to run 'make deinstall' for {repo_identifier}: {e}")
|
||||
print(
|
||||
f"[Warning] Failed to run 'make deinstall' for {repo_identifier}: {e}"
|
||||
)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user