Compare commits
78 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6effacefef | ||
|
|
65903e740b | ||
|
|
aa80a2ddb4 | ||
|
|
9456ad4475 | ||
|
|
3d7d7e9c09 | ||
|
|
328203ccd7 | ||
|
|
ac16378807 | ||
|
|
f7a86bc353 | ||
|
|
06a6a77a48 | ||
|
|
4883e40812 | ||
|
|
031ae5ac69 | ||
|
|
1c4fc531fa | ||
|
|
33dfbf3a4d | ||
|
|
a3aa7b6394 | ||
|
|
724c262a4a | ||
|
|
dcbe16c5f0 | ||
|
|
f63b0a9f08 | ||
|
|
822c418503 | ||
|
|
562a6da291 | ||
|
|
e61b30d9af | ||
|
|
27c0c7c01f | ||
|
|
0d652d995e | ||
|
|
0e03fbbee2 | ||
|
|
7cfd7e8d5c | ||
|
|
84b6c71748 | ||
|
|
db9aaf920e | ||
|
|
69d28a461d | ||
|
|
03e414cc9f | ||
|
|
7674762c9a | ||
|
|
a47de15e42 | ||
|
|
37f3057d31 | ||
|
|
d55c8d3726 | ||
|
|
3990560cd7 | ||
|
|
d1e5a71f77 | ||
|
|
d59dc8ad53 | ||
|
|
55f4a1e941 | ||
|
|
2a4ec18532 | ||
|
|
2debdbee09 | ||
|
|
4cb62e90f8 | ||
|
|
923519497a | ||
|
|
5fa18cb449 | ||
|
|
f513196911 | ||
|
|
7f06447bbd | ||
|
|
1e5d6d3eee | ||
|
|
f2970adbb2 | ||
|
|
7f262c6557 | ||
|
|
0bc7a3ecc0 | ||
|
|
55a0ae4337 | ||
|
|
bcf284c5d6 | ||
|
|
db23b1a445 | ||
|
|
506f69d8a7 | ||
|
|
097e64408f | ||
|
|
a3913d9489 | ||
|
|
c92fd44dd3 | ||
|
|
2c3efa7a27 | ||
|
|
f388bc51bc | ||
|
|
4e28eba883 | ||
|
|
b8acd634f8 | ||
|
|
fb68b325d6 | ||
|
|
650a22d425 | ||
|
|
6a590d8780 | ||
|
|
5601ea442a | ||
|
|
5ff15013d7 | ||
|
|
6ccc1c1490 | ||
|
|
8ead3472dd | ||
|
|
422ac8b837 | ||
|
|
ea84c1b14e | ||
|
|
71a4e7e725 | ||
|
|
fb737ef290 | ||
|
|
2963a43754 | ||
|
|
103f49c8f6 | ||
|
|
f5d428950e | ||
|
|
b40787ffc5 | ||
|
|
0482a7f88d | ||
|
|
8c127cc45a | ||
|
|
2761e829cb | ||
|
|
d0c01b6955 | ||
|
|
b2421c9b84 |
6
.github/workflows/ci.yml
vendored
6
.github/workflows/ci.yml
vendored
@@ -27,3 +27,9 @@ jobs:
|
||||
|
||||
test-virgin-root:
|
||||
uses: ./.github/workflows/test-virgin-root.yml
|
||||
|
||||
linter-shell:
|
||||
uses: ./.github/workflows/linter-shell.yml
|
||||
|
||||
linter-python:
|
||||
uses: ./.github/workflows/linter-python.yml
|
||||
|
||||
23
.github/workflows/linter-python.yml
vendored
Normal file
23
.github/workflows/linter-python.yml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
name: Ruff (Python code sniffer)
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
|
||||
jobs:
|
||||
linter-python:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.12"
|
||||
|
||||
- name: Install ruff
|
||||
run: pip install ruff
|
||||
|
||||
- name: Run ruff
|
||||
run: |
|
||||
ruff check src tests
|
||||
14
.github/workflows/linter-shell.yml
vendored
Normal file
14
.github/workflows/linter-shell.yml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
name: ShellCheck
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
|
||||
jobs:
|
||||
linter-shell:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install ShellCheck
|
||||
run: sudo apt-get update && sudo apt-get install -y shellcheck
|
||||
- name: Run ShellCheck
|
||||
run: shellcheck -x $(find scripts -type f -name '*.sh' -print)
|
||||
8
.github/workflows/mark-stable.yml
vendored
8
.github/workflows/mark-stable.yml
vendored
@@ -29,8 +29,16 @@ jobs:
|
||||
test-virgin-root:
|
||||
uses: ./.github/workflows/test-virgin-root.yml
|
||||
|
||||
linter-shell:
|
||||
uses: ./.github/workflows/linter-shell.yml
|
||||
|
||||
linter-python:
|
||||
uses: ./.github/workflows/linter-python.yml
|
||||
|
||||
mark-stable:
|
||||
needs:
|
||||
- linter-shell
|
||||
- linter-python
|
||||
- test-unit
|
||||
- test-integration
|
||||
- test-env-nix
|
||||
|
||||
74
.github/workflows/publish-containers.yml
vendored
Normal file
74
.github/workflows/publish-containers.yml
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
name: Publish container images (GHCR)
|
||||
|
||||
on:
|
||||
workflow_run:
|
||||
workflows: ["Mark stable commit"]
|
||||
types: [completed]
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
if: ${{ github.event.workflow_run.conclusion == 'success' }}
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository (with tags)
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Checkout workflow_run commit and refresh tags
|
||||
run: |
|
||||
set -euo pipefail
|
||||
git checkout -f "${{ github.event.workflow_run.head_sha }}"
|
||||
git fetch --tags --force
|
||||
git tag --list 'stable' 'v*' --sort=version:refname | tail -n 20
|
||||
|
||||
- name: Compute version and stable flag
|
||||
id: info
|
||||
run: |
|
||||
set -euo pipefail
|
||||
SHA="$(git rev-parse HEAD)"
|
||||
|
||||
V_TAG="$(git tag --points-at "${SHA}" --list 'v*' | sort -V | tail -n1)"
|
||||
if [[ -z "${V_TAG}" ]]; then
|
||||
echo "No version tag found for ${SHA}. Skipping publish."
|
||||
echo "should_publish=false" >> "$GITHUB_OUTPUT"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
VERSION="${V_TAG#v}"
|
||||
|
||||
STABLE_SHA="$(git rev-parse -q --verify refs/tags/stable^{commit} 2>/dev/null || true)"
|
||||
IS_STABLE=false
|
||||
[[ -n "${STABLE_SHA}" && "${STABLE_SHA}" == "${SHA}" ]] && IS_STABLE=true
|
||||
|
||||
echo "should_publish=true" >> "$GITHUB_OUTPUT"
|
||||
echo "version=${VERSION}" >> "$GITHUB_OUTPUT"
|
||||
echo "is_stable=${IS_STABLE}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
if: ${{ steps.info.outputs.should_publish == 'true' }}
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
use: true
|
||||
|
||||
- name: Login to GHCR
|
||||
if: ${{ steps.info.outputs.should_publish == 'true' }}
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Publish all images
|
||||
if: ${{ steps.info.outputs.should_publish == 'true' }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
OWNER="${{ github.repository_owner }}" \
|
||||
VERSION="${{ steps.info.outputs.version }}" \
|
||||
IS_STABLE="${{ steps.info.outputs.is_stable }}" \
|
||||
bash scripts/build/publish.sh
|
||||
2
.github/workflows/test-e2e.yml
vendored
2
.github/workflows/test-e2e.yml
vendored
@@ -22,4 +22,4 @@ jobs:
|
||||
- name: Run E2E tests via make (${{ matrix.distro }})
|
||||
run: |
|
||||
set -euo pipefail
|
||||
distro="${{ matrix.distro }}" make test-e2e
|
||||
PKGMGR_DISTRO="${{ matrix.distro }}" make test-e2e
|
||||
|
||||
2
.github/workflows/test-env-nix.yml
vendored
2
.github/workflows/test-env-nix.yml
vendored
@@ -23,4 +23,4 @@ jobs:
|
||||
- name: Nix flake-only test (${{ matrix.distro }})
|
||||
run: |
|
||||
set -euo pipefail
|
||||
distro="${{ matrix.distro }}" make test-env-nix
|
||||
PKGMGR_DISTRO="${{ matrix.distro }}" make test-env-nix
|
||||
|
||||
2
.github/workflows/test-env-virtual.yml
vendored
2
.github/workflows/test-env-virtual.yml
vendored
@@ -25,4 +25,4 @@ jobs:
|
||||
- name: Run container tests (${{ matrix.distro }})
|
||||
run: |
|
||||
set -euo pipefail
|
||||
distro="${{ matrix.distro }}" make test-env-virtual
|
||||
PKGMGR_DISTRO="${{ matrix.distro }}" make test-env-virtual
|
||||
|
||||
2
.github/workflows/test-integration.yml
vendored
2
.github/workflows/test-integration.yml
vendored
@@ -16,4 +16,4 @@ jobs:
|
||||
run: docker version
|
||||
|
||||
- name: Run integration tests via make (Arch container)
|
||||
run: make test-integration distro="arch"
|
||||
run: make test-integration PKGMGR_DISTRO="arch"
|
||||
|
||||
2
.github/workflows/test-unit.yml
vendored
2
.github/workflows/test-unit.yml
vendored
@@ -16,4 +16,4 @@ jobs:
|
||||
run: docker version
|
||||
|
||||
- name: Run unit tests via make (Arch container)
|
||||
run: make test-unit distro="arch"
|
||||
run: make test-unit PKGMGR_DISTRO="arch"
|
||||
|
||||
4
.github/workflows/test-virgin-root.yml
vendored
4
.github/workflows/test-virgin-root.yml
vendored
@@ -23,7 +23,7 @@ jobs:
|
||||
- name: Build virgin container (${{ matrix.distro }})
|
||||
run: |
|
||||
set -euo pipefail
|
||||
distro="${{ matrix.distro }}" make build-missing-virgin
|
||||
PKGMGR_DISTRO="${{ matrix.distro }}" make build-missing-virgin
|
||||
|
||||
# 🔹 RUN test inside virgin image
|
||||
- name: Virgin ${{ matrix.distro }} pkgmgr test (root)
|
||||
@@ -46,8 +46,6 @@ jobs:
|
||||
|
||||
. "$HOME/.venvs/pkgmgr/bin/activate"
|
||||
|
||||
export NIX_CONFIG="experimental-features = nix-command flakes"
|
||||
|
||||
pkgmgr update pkgmgr --clone-mode shallow --no-verification
|
||||
pkgmgr version pkgmgr
|
||||
|
||||
|
||||
3
.github/workflows/test-virgin-user.yml
vendored
3
.github/workflows/test-virgin-user.yml
vendored
@@ -23,7 +23,7 @@ jobs:
|
||||
- name: Build virgin container (${{ matrix.distro }})
|
||||
run: |
|
||||
set -euo pipefail
|
||||
distro="${{ matrix.distro }}" make build-missing-virgin
|
||||
PKGMGR_DISTRO="${{ matrix.distro }}" make build-missing-virgin
|
||||
|
||||
# 🔹 RUN test inside virgin image as non-root
|
||||
- name: Virgin ${{ matrix.distro }} pkgmgr test (user)
|
||||
@@ -59,7 +59,6 @@ jobs:
|
||||
pkgmgr version pkgmgr
|
||||
|
||||
export NIX_REMOTE=local
|
||||
export NIX_CONFIG=\"experimental-features = nix-command flakes\"
|
||||
nix run /src#pkgmgr -- version pkgmgr
|
||||
"
|
||||
'
|
||||
|
||||
130
CHANGELOG.md
130
CHANGELOG.md
@@ -1,3 +1,86 @@
|
||||
## [1.7.1] - 2025-12-14
|
||||
|
||||
* Patched package-manager to kpmx to publish on pypi
|
||||
|
||||
|
||||
## [1.7.0] - 2025-12-14
|
||||
|
||||
* * New *pkgmgr publish* command to publish repository artifacts to PyPI based on the *MIRRORS* file.
|
||||
* Automatically selects the current repository when no explicit selection is given.
|
||||
* Publishes only when a semantic version tag is present on *HEAD*; otherwise skips with a clear info message.
|
||||
* Supports non-interactive mode for CI environments via *--non-interactive*.
|
||||
|
||||
|
||||
## [1.6.4] - 2025-12-14
|
||||
|
||||
* * Improved reliability of Nix installs and updates, including automatic resolution of profile conflicts and better handling of GitHub 403 rate limits.
|
||||
* More stable launcher behavior in packaged and virtual-env setups.
|
||||
* Enhanced mirror and remote handling: repository owner/name are derived from URLs, with smoother provisioning and clearer credential handling.
|
||||
* More reliable releases and artifacts due to safer CI behavior when no version tag is present.
|
||||
|
||||
|
||||
## [1.6.3] - 2025-12-14
|
||||
|
||||
* ***Fixed:*** Corrected repository path resolution so release and version logic consistently use the canonical packaging/* layout, preventing changelog and packaging files from being read or updated from incorrect locations.
|
||||
|
||||
|
||||
## [1.6.2] - 2025-12-14
|
||||
|
||||
* **pkgmgr version** now also shows the installed pkgmgr version when run outside a repository.
|
||||
|
||||
|
||||
## [1.6.1] - 2025-12-14
|
||||
|
||||
* * Added automatic retry handling for GitHub 403 / rate-limit errors during Nix flake installs (Fibonacci backoff with jitter).
|
||||
|
||||
|
||||
## [1.6.0] - 2025-12-14
|
||||
|
||||
* *** Changed ***
|
||||
- Unified update handling via a single top-level `pkgmgr update` command, removing ambiguous update paths.
|
||||
- Improved update reliability by routing all update logic through a central UpdateManager.
|
||||
- Renamed system update flag from `--system-update` to `--system` for clarity and consistency.
|
||||
- Made mirror handling explicit and safer by separating setup, check, and provision responsibilities.
|
||||
- Improved credential resolution for remote providers (environment → keyring → interactive).
|
||||
|
||||
*** Added ***
|
||||
- Optional system updates via `pkgmgr update --system` (Arch, Debian/Ubuntu, Fedora/RHEL).
|
||||
- `pkgmgr install --update` to force re-running installers and refresh existing installations.
|
||||
- Remote repository provisioning for mirrors on supported providers.
|
||||
- Extended end-to-end test coverage for update and mirror workflows.
|
||||
|
||||
*** Fixed ***
|
||||
- Resolved “Unknown repos command: update” errors after CLI refactoring.
|
||||
- Improved Nix update stability and reduced CI failures caused by transient rate limits.
|
||||
|
||||
|
||||
## [1.5.0] - 2025-12-13
|
||||
|
||||
* - Commands now show live output while running, making long operations easier to follow
|
||||
- Error messages include full command output, making failures easier to understand and debug
|
||||
- Deinstallation is more complete and predictable, removing CLI links and properly cleaning up repositories
|
||||
- Preview mode is more trustworthy, clearly showing what would happen without making changes
|
||||
- Repository configuration problems are detected earlier with clear, user-friendly explanations
|
||||
- More consistent behavior across different Linux distributions
|
||||
- More reliable execution in Docker containers and CI environments
|
||||
- Nix-based execution works more smoothly, especially when running as root or inside containers
|
||||
- Existing commands, scripts, and workflows continue to work without any breaking changes
|
||||
|
||||
|
||||
## [1.4.1] - 2025-12-12
|
||||
|
||||
* Fixed stable release container publishing
|
||||
|
||||
|
||||
## [1.4.0] - 2025-12-12
|
||||
|
||||
**Docker Container Building**
|
||||
|
||||
* New official container images are automatically published on each release.
|
||||
* Images are available per distribution and as a default Arch-based image.
|
||||
* Stable releases now provide an additional `stable` container tag.
|
||||
|
||||
|
||||
## [1.3.1] - 2025-12-12
|
||||
|
||||
* Updated documentation with better run and installation instructions
|
||||
@@ -5,7 +88,7 @@
|
||||
|
||||
## [1.3.0] - 2025-12-12
|
||||
|
||||
* **Minor release – Stability & CI hardening**
|
||||
**Stability & CI hardening**
|
||||
|
||||
* Stabilized Nix resolution and global symlink handling across Arch, CentOS, Debian, and Ubuntu
|
||||
* Ensured Nix works reliably in CI, sudo, login, and non-login shells without overriding distro-managed paths
|
||||
@@ -17,7 +100,7 @@
|
||||
|
||||
## [1.2.1] - 2025-12-12
|
||||
|
||||
* **Changed**
|
||||
**Changed**
|
||||
|
||||
* Split container tests into *virtualenv* and *Nix flake* environments to clearly separate Python and Nix responsibilities.
|
||||
|
||||
@@ -34,7 +117,7 @@
|
||||
|
||||
## [1.2.0] - 2025-12-12
|
||||
|
||||
* **Release workflow overhaul**
|
||||
**Release workflow overhaul**
|
||||
|
||||
* Introduced a fully structured release workflow with clear phases and safeguards
|
||||
* Added preview-first releases with explicit confirmation before execution
|
||||
@@ -51,7 +134,8 @@
|
||||
|
||||
## [1.0.0] - 2025-12-11
|
||||
|
||||
* **1.0.0 – Official Stable Release 🎉**
|
||||
**Official Stable Release 🎉**
|
||||
|
||||
*First stable release of PKGMGR, the multi-distro development and package workflow manager.*
|
||||
|
||||
---
|
||||
@@ -144,7 +228,7 @@ PKGMGR 1.0.0 unifies repository management, build tooling, release automation an
|
||||
|
||||
## [0.9.1] - 2025-12-10
|
||||
|
||||
* * Refactored installer: new `venv-create.sh`, cleaner root/user setup flow, updated README with architecture map.
|
||||
* Refactored installer: new `venv-create.sh`, cleaner root/user setup flow, updated README with architecture map.
|
||||
* Split virgin tests into root/user workflows; stabilized Nix installer across distros; improved test scripts with dynamic distro selection and isolated Nix stores.
|
||||
* Fixed repository directory resolution; improved `pkgmgr path` and `pkgmgr shell`; added full unit/E2E coverage.
|
||||
* Removed deprecated files and updated `.gitignore`.
|
||||
@@ -239,47 +323,45 @@ PKGMGR 1.0.0 unifies repository management, build tooling, release automation an
|
||||
|
||||
## [0.7.1] - 2025-12-09
|
||||
|
||||
* Fix floating 'latest' tag logic: dereference annotated target (vX.Y.Z^{}), add tag message to avoid Git errors, ensure best-effort update without blocking releases, and update unit tests (see ChatGPT conversation: https://chatgpt.com/share/69383024-efa4-800f-a875-129b81fa40ff).
|
||||
|
||||
* Fix floating 'latest' tag logic
|
||||
* dereference annotated target (vX.Y.Z^{})
|
||||
* add tag message to avoid Git errors
|
||||
* ensure best-effort update without blocking releases
|
||||
|
||||
## [0.7.0] - 2025-12-09
|
||||
|
||||
* Add Git helpers for branch sync and floating 'latest' tag in the release workflow, ensure main/master are updated from origin before tagging, and extend unit/e2e tests including 'pkgmgr release --help' coverage (see ChatGPT conversation: https://chatgpt.com/share/69383024-efa4-800f-a875-129b81fa40ff)
|
||||
|
||||
* Add Git helpers for branch sync and floating 'latest' tag in the release workflow
|
||||
* ensure main/master are updated from origin before tagging
|
||||
|
||||
## [0.6.0] - 2025-12-09
|
||||
|
||||
* Expose DISTROS and BASE_IMAGE_* variables as exported Makefile environment variables so all build and test commands can consume them dynamically. By exporting these values, every Make target (e.g., build, build-no-cache, build-missing, test-container, test-unit, test-e2e) and every delegated script in scripts/build/ and scripts/test/ now receives a consistent view of the supported distributions and their base container images. This change removes duplicated definitions across scripts, ensures reproducible builds, and allows build tooling to react automatically when new distros or base images are added to the Makefile.
|
||||
|
||||
* Consistent view of the supported distributions and their base container images.
|
||||
|
||||
## [0.5.1] - 2025-12-09
|
||||
|
||||
* Refine pkgmgr release CLI close wiring and integration tests for --close flag (ChatGPT: https://chatgpt.com/share/69376b4e-8440-800f-9d06-535ec1d7a40e)
|
||||
* Refine pkgmgr release CLI close wiring and integration tests for --close flag
|
||||
|
||||
|
||||
## [0.5.0] - 2025-12-09
|
||||
|
||||
* Add pkgmgr branch close subcommand, extend CLI parser wiring, and add unit tests for branch handling and version version-selection logic (see ChatGPT conversation: https://chatgpt.com/share/693762a3-9ea8-800f-a640-bc78170953d1)
|
||||
|
||||
* Add pkgmgr branch close subcommand, extend CLI parser wiring
|
||||
|
||||
## [0.4.3] - 2025-12-09
|
||||
|
||||
* Implement current-directory repository selection for release and proxy commands, unify selection semantics across CLI layers, extend release workflow with --close, integrate branch closing logic, fix wiring for get_repo_identifier/get_repo_dir, update packaging files (PKGBUILD, spec, flake.nix, pyproject), and add comprehensive unit/e2e tests for release and branch commands (see ChatGPT conversation: https://chatgpt.com/share/69375cfe-9e00-800f-bd65-1bd5937e1696)
|
||||
|
||||
* Implement current-directory repository selection for release and proxy commands, unify selection semantics across CLI layers, extend release workflow with --close, integrate branch closing logic, fix wiring for get_repo_identifier/get_repo_dir, update packaging files (PKGBUILD, spec, flake.nix, pyproject)
|
||||
|
||||
## [0.4.2] - 2025-12-09
|
||||
|
||||
* Wire pkgmgr release CLI to new helper and add unit tests (see ChatGPT conversation: https://chatgpt.com/share/69374f09-c760-800f-92e4-5b44a4510b62)
|
||||
* Wire pkgmgr release CLI to new helpe
|
||||
|
||||
|
||||
## [0.4.1] - 2025-12-08
|
||||
|
||||
* Add branch close subcommand and integrate release close/editor flow (ChatGPT: https://chatgpt.com/share/69374f09-c760-800f-92e4-5b44a4510b62)
|
||||
|
||||
* Add branch close subcommand and integrate release close/editor flow
|
||||
|
||||
## [0.4.0] - 2025-12-08
|
||||
|
||||
* Add branch closing helper and --close flag to release command, including CLI wiring and tests (see https://chatgpt.com/share/69374aec-74ec-800f-bde3-5d91dfdb9b91)
|
||||
* Add branch closing helper and --close flag to release command
|
||||
|
||||
## [0.3.0] - 2025-12-08
|
||||
|
||||
@@ -290,13 +372,10 @@ PKGMGR 1.0.0 unifies repository management, build tooling, release automation an
|
||||
- New config update logic + default YAML sync
|
||||
- Improved proxy command handling
|
||||
- Full CLI routing refactor
|
||||
- Expanded E2E tests for list, proxy, and selection logic
|
||||
Konversation: https://chatgpt.com/share/693745c3-b8d8-800f-aa29-c8481a2ffae1
|
||||
|
||||
## [0.2.0] - 2025-12-08
|
||||
|
||||
* Add preview-first release workflow and extended packaging support (see ChatGPT conversation: https://chatgpt.com/share/693722b4-af9c-800f-bccc-8a4036e99630)
|
||||
|
||||
* Add preview-first release workflow and extended packaging support
|
||||
|
||||
## [0.1.0] - 2025-12-08
|
||||
|
||||
@@ -305,5 +384,4 @@ Konversation: https://chatgpt.com/share/693745c3-b8d8-800f-aa29-c8481a2ffae1
|
||||
|
||||
## [0.1.0] - 2025-12-08
|
||||
|
||||
* Implement unified release helper with preview mode, multi-packaging version bumps, and new integration/unit tests (see ChatGPT conversation 2025-12-08: https://chatgpt.com/share/693722b4-af9c-800f-bccc-8a4036e99630)
|
||||
|
||||
* Implement unified release helper with preview mode, multi-packaging version bumps
|
||||
@@ -36,9 +36,6 @@ CMD ["bash"]
|
||||
# ============================================================
|
||||
FROM virgin AS full
|
||||
|
||||
# Nix environment defaults (only config; nix itself comes from deps/install flow)
|
||||
ENV NIX_CONFIG="experimental-features = nix-command flakes"
|
||||
|
||||
WORKDIR /build
|
||||
|
||||
# Copy full repository for build
|
||||
|
||||
3
MIRRORS
3
MIRRORS
@@ -1,3 +1,4 @@
|
||||
git@github.com:kevinveenbirkenbach/package-manager.git
|
||||
ssh://git@git.veen.world:2201/kevinveenbirkenbach/pkgmgr.git
|
||||
ssh://git@code.cymais.cloud:2201/kevinveenbirkenbach/pkgmgr.git
|
||||
ssh://git@code.infinito.nexus:2201/kevinveenbirkenbach/pkgmgr.git
|
||||
https://pypi.org/project/kpmx/
|
||||
|
||||
15
Makefile
15
Makefile
@@ -7,8 +7,8 @@
|
||||
# Distro
|
||||
# Options: arch debian ubuntu fedora centos
|
||||
DISTROS ?= arch debian ubuntu fedora centos
|
||||
distro ?= arch
|
||||
export distro
|
||||
PKGMGR_DISTRO ?= arch
|
||||
export PKGMGR_DISTRO
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Base images
|
||||
@@ -30,22 +30,23 @@ export BASE_IMAGE_CENTOS
|
||||
# PYthon Unittest Pattern
|
||||
TEST_PATTERN := test_*.py
|
||||
export TEST_PATTERN
|
||||
export PYTHONPATH := src
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# System install
|
||||
# ------------------------------------------------------------
|
||||
install:
|
||||
@echo "Building and installing distro-native package-manager for this system..."
|
||||
@bash scripts/installation/main.sh
|
||||
@bash scripts/installation/init.sh
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# PKGMGR setup
|
||||
# ------------------------------------------------------------
|
||||
|
||||
# Default: keep current auto-detection behavior
|
||||
setup: setup-nix setup-venv
|
||||
setup: setup-venv
|
||||
|
||||
# Explicit: developer setup (Python venv + shell RC + main.py install)
|
||||
# Explicit: developer setup (Python venv + shell RC + install)
|
||||
setup-venv: setup-nix
|
||||
@bash scripts/setup/venv.sh
|
||||
|
||||
@@ -74,7 +75,7 @@ build-no-cache-all:
|
||||
@set -e; \
|
||||
for d in $(DISTROS); do \
|
||||
echo "=== build-no-cache: $$d ==="; \
|
||||
distro="$$d" $(MAKE) build-no-cache; \
|
||||
PKGMGR_DISTRO="$$d" $(MAKE) build-no-cache; \
|
||||
done
|
||||
|
||||
# ------------------------------------------------------------
|
||||
@@ -100,7 +101,7 @@ test-env-nix: build-missing
|
||||
test: test-env-virtual test-unit test-integration test-e2e
|
||||
|
||||
delete-volumes:
|
||||
@docker volume rm pkgmgr_nix_store_${distro} pkgmgr_nix_cache_${distro} || true
|
||||
@docker volume rm "pkgmgr_nix_store_${PKGMGR_DISTRO}" "pkgmgr_nix_cache_${PKGMGR_DISTRO}" || echo "No volumes to delete."
|
||||
|
||||
purge: delete-volumes build-no-cache
|
||||
|
||||
|
||||
178
README.md
178
README.md
@@ -25,52 +25,37 @@ together into repeatable development workflows.
|
||||
|
||||
Traditional distro package managers like `apt`, `pacman` or `dnf` focus on a
|
||||
single operating system. PKGMGR instead focuses on **your repositories and
|
||||
development lifecycle**:
|
||||
development lifecycle**. It provides one configuration for all repositories,
|
||||
one unified CLI to interact with them, and a Nix-based foundation that keeps
|
||||
tooling reproducible across distributions.
|
||||
|
||||
* one configuration for all your repos,
|
||||
* one CLI to interact with them,
|
||||
* one Nix-based layer to keep tooling reproducible across distros.
|
||||
Native package managers are still used where they make sense. PKGMGR coordinates
|
||||
the surrounding development, build and release workflows in a consistent way.
|
||||
|
||||
You keep using your native package manager where it makes sense – PKGMGR
|
||||
coordinates the *development and release flow* around it.
|
||||
In addition, PKGMGR provides Docker images that can serve as a **reproducible
|
||||
system baseline**. These images bundle the complete PKGMGR toolchain and are
|
||||
designed to be reused as a stable execution environment across machines,
|
||||
pipelines and teams. This approach is specifically used within
|
||||
[**Infinito.Nexus**](https://s.infinito.nexus/code) to make complex systems
|
||||
distribution-independent while remaining fully reproducible.
|
||||
|
||||
---
|
||||
|
||||
## Features 🚀
|
||||
|
||||
### Multi-distro development & packaging
|
||||
PKGMGR enables multi-distro development and packaging by managing multiple
|
||||
repositories from a single configuration file. It drives complete release
|
||||
pipelines across Linux distributions using Nix flakes, Python build metadata,
|
||||
native OS packages such as Arch, Debian and RPM formats, and additional ecosystem
|
||||
integrations like Ansible.
|
||||
|
||||
* Manage **many repositories at once** from a single `config/config.yaml`.
|
||||
* Drive full **release pipelines** across Linux distributions using:
|
||||
All functionality is exposed through a unified `pkgmgr` command-line interface
|
||||
that works identically on every supported distribution. It combines repository
|
||||
management, Git operations, Docker and Compose orchestration, as well as
|
||||
versioning, release and changelog workflows. Many commands support a preview
|
||||
mode, allowing you to inspect the underlying actions before they are executed.
|
||||
|
||||
* Nix flakes (`flake.nix`)
|
||||
* PyPI style builds (`pyproject.toml`)
|
||||
* OS packages (PKGBUILD, Debian control/changelog, RPM spec)
|
||||
* Ansible Galaxy metadata and more.
|
||||
|
||||
### Rich CLI for daily work
|
||||
|
||||
All commands are exposed via the `pkgmgr` CLI and are available on every distro:
|
||||
|
||||
* **Repository management**
|
||||
|
||||
* `clone`, `update`, `install`, `delete`, `deinstall`, `path`, `list`, `config`
|
||||
* **Git proxies**
|
||||
|
||||
* `pull`, `push`, `status`, `diff`, `add`, `show`, `checkout`,
|
||||
`reset`, `revert`, `rebase`, `commit`, `branch`
|
||||
* **Docker & Compose orchestration**
|
||||
|
||||
* `build`, `up`, `down`, `exec`, `ps`, `start`, `stop`, `restart`
|
||||
* **Release toolchain**
|
||||
|
||||
* `version`, `release`, `changelog`, `make`
|
||||
* **Mirror & workflow helpers**
|
||||
|
||||
* `mirror` (list/diff/merge/setup), `shell`, `terminal`, `code`, `explore`
|
||||
|
||||
Many of these commands support `--preview` mode so you can inspect the
|
||||
underlying Git or Docker calls without executing them.
|
||||
---
|
||||
|
||||
### Full development workflows
|
||||
|
||||
@@ -83,10 +68,6 @@ versioning features it can drive **end-to-end workflows**:
|
||||
4. Build distro-specific packages.
|
||||
5. Keep all mirrors and working copies in sync.
|
||||
|
||||
The extensive E2E tests (`tests/e2e/`) and GitHub Actions workflows (including
|
||||
“virgin user” and “virgin root” Arch tests) validate these flows across
|
||||
different Linux environments.
|
||||
|
||||
---
|
||||
|
||||
## Architecture & Setup Map 🗺️
|
||||
@@ -99,25 +80,44 @@ The following diagram gives a full overview of:
|
||||
|
||||

|
||||
|
||||
|
||||
**Diagram status:** 12 December 2025
|
||||
|
||||
**Always-up-to-date version:** [https://s.veen.world/pkgmgrmp](https://s.veen.world/pkgmgrmp)
|
||||
|
||||
---
|
||||
|
||||
Perfekt, dann hier die **noch kompaktere und korrekt differenzierte Version**, die **nur** zwischen
|
||||
**`make setup`** und **`make setup-venv`** unterscheidet und exakt deinem Verhalten entspricht.
|
||||
|
||||
README-ready, ohne Over-Engineering.
|
||||
|
||||
---
|
||||
|
||||
## Installation ⚙️
|
||||
|
||||
PKGMGR can be installed using `make`.
|
||||
The setup mode defines **which runtime layers are prepared**.
|
||||
|
||||
---
|
||||
|
||||
### Download
|
||||
|
||||
```bash
|
||||
git clone https://github.com/kevinveenbirkenbach/package-manager.git
|
||||
cd package-manager
|
||||
```
|
||||
|
||||
### Dependency installation (optional)
|
||||
|
||||
System dependencies required **before running any *make* commands** are installed via:
|
||||
|
||||
```
|
||||
scripts/installation/dependencies.sh
|
||||
```
|
||||
|
||||
The script detects and normalizes the OS and installs the required **system-level dependencies** accordingly.
|
||||
|
||||
### Install
|
||||
|
||||
```bash
|
||||
git clone https://github.com/kevinveenbirkenbach/package-manager.git
|
||||
cd package-manager
|
||||
make install
|
||||
```
|
||||
|
||||
### Setup modes
|
||||
|
||||
| Command | Prepares | Use case |
|
||||
@@ -125,17 +125,8 @@ The setup mode defines **which runtime layers are prepared**.
|
||||
| **make setup** | Python venv **and** Nix | Full development & CI |
|
||||
| **make setup-venv** | Python venv only | Local user setup |
|
||||
|
||||
---
|
||||
|
||||
### Install & setup
|
||||
|
||||
```bash
|
||||
git clone https://github.com/kevinveenbirkenbach/package-manager.git
|
||||
cd package-manager
|
||||
make install
|
||||
```
|
||||
|
||||
#### Full setup (venv + Nix)
|
||||
##### Full setup (venv + Nix)
|
||||
|
||||
```bash
|
||||
make setup
|
||||
@@ -143,7 +134,7 @@ make setup
|
||||
|
||||
Use this for CI, servers, containers and full development workflows.
|
||||
|
||||
#### Venv-only setup
|
||||
##### Venv-only setup
|
||||
|
||||
```bash
|
||||
make setup-venv
|
||||
@@ -154,38 +145,77 @@ Use this if you want PKGMGR isolated without Nix integration.
|
||||
|
||||
---
|
||||
|
||||
## Run without installation (Nix)
|
||||
Alles klar 🙂
|
||||
Hier ist der **RUN-Abschnitt ohne Gedankenstriche**, klar nach **Nix, Docker und venv** getrennt:
|
||||
|
||||
Run PKGMGR directly via Nix Flakes.
|
||||
---
|
||||
|
||||
## Run PKGMGR 🧰
|
||||
|
||||
PKGMGR can be executed in different environments.
|
||||
All modes expose the same CLI and commands.
|
||||
|
||||
---
|
||||
|
||||
### Run via Nix (no installation)
|
||||
|
||||
```bash
|
||||
nix run github:kevinveenbirkenbach/package-manager#pkgmgr -- --help
|
||||
```
|
||||
|
||||
Example:
|
||||
---
|
||||
|
||||
```bash
|
||||
nix run github:kevinveenbirkenbach/package-manager#pkgmgr -- version pkgmgr
|
||||
```
|
||||
### Run via Docker 🐳
|
||||
|
||||
Notes:
|
||||
PKGMGR can be executed **inside Docker containers** for CI, testing and isolated
|
||||
workflows.
|
||||
---
|
||||
|
||||
* full flake URL required
|
||||
* `--` separates Nix and PKGMGR arguments
|
||||
* can be used alongside any setup mode
|
||||
#### Container types
|
||||
|
||||
Two container types are available.
|
||||
|
||||
|
||||
| Image type | Contains | Typical use |
|
||||
| ---------- | ----------------------------- | ----------------------- |
|
||||
| **Virgin** | Base OS + system dependencies | Clean test environments |
|
||||
| **Stable** | PKGMGR + Nix (flakes enabled) | Ready-to-use workflows |
|
||||
|
||||
Example images:
|
||||
|
||||
* Virgin: `pkgmgr-arch-virgin`
|
||||
* Stable: `ghcr.io/kevinveenbirkenbach/pkgmgr:stable`
|
||||
|
||||
|
||||
Use **virgin images** for isolated test runs,
|
||||
use the **stable image** for fast, reproducible execution.
|
||||
|
||||
---
|
||||
|
||||
## Usage 🧰
|
||||
#### Run examples
|
||||
|
||||
After installation, the main entry point is:
|
||||
```bash
|
||||
docker run --rm -it \
|
||||
-v "$PWD":/src \
|
||||
-w /src \
|
||||
ghcr.io/kevinveenbirkenbach/pkgmgr:stable \
|
||||
pkgmgr --help
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Run via virtual environment (venv)
|
||||
|
||||
After activating the venv:
|
||||
|
||||
```bash
|
||||
pkgmgr --help
|
||||
```
|
||||
|
||||
This prints a list of all available subcommands.
|
||||
The help for each command is available via:
|
||||
---
|
||||
|
||||
This allows you to choose between zero install execution using Nix, fully prebuilt
|
||||
Docker environments or local isolated venv setups with identical command behavior.
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -26,17 +26,13 @@
|
||||
packages = forAllSystems (system:
|
||||
let
|
||||
pkgs = nixpkgs.legacyPackages.${system};
|
||||
|
||||
# Single source of truth for pkgmgr: Python 3.11
|
||||
# - Matches pyproject.toml: requires-python = ">=3.11"
|
||||
# - Uses python311Packages so that PyYAML etc. are available
|
||||
python = pkgs.python311;
|
||||
pyPkgs = pkgs.python311Packages;
|
||||
in
|
||||
rec {
|
||||
pkgmgr = pyPkgs.buildPythonApplication {
|
||||
pname = "package-manager";
|
||||
version = "1.3.1";
|
||||
version = "1.7.1";
|
||||
|
||||
# Use the git repo as source
|
||||
src = ./.;
|
||||
|
||||
14
main.py
14
main.py
@@ -1,14 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
# Ensure local src/ overrides installed package
|
||||
ROOT = Path(__file__).resolve().parent
|
||||
SRC = ROOT / "src"
|
||||
if SRC.is_dir():
|
||||
sys.path.insert(0, str(SRC))
|
||||
|
||||
from pkgmgr.cli import main
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,7 +1,7 @@
|
||||
# Maintainer: Kevin Veen-Birkenbach <info@veen.world>
|
||||
|
||||
pkgname=package-manager
|
||||
pkgver=0.9.1
|
||||
pkgver=1.7.1
|
||||
pkgrel=1
|
||||
pkgdesc="Local-flake wrapper for Kevin's package-manager (Nix-based)."
|
||||
arch=('any')
|
||||
@@ -47,7 +47,7 @@ package() {
|
||||
cd "$srcdir/$_srcdir_name"
|
||||
|
||||
# Install the wrapper into /usr/bin
|
||||
install -Dm0755 "scripts/pkgmgr-wrapper.sh" \
|
||||
install -Dm0755 "scripts/launcher.sh" \
|
||||
"$pkgdir/usr/bin/pkgmgr"
|
||||
|
||||
# Install Nix bootstrap (init + lib)
|
||||
|
||||
@@ -1,3 +1,33 @@
|
||||
package-manager (1.7.1-1) unstable; urgency=medium
|
||||
|
||||
* Patched package-manager to kpmx to publish on pypi
|
||||
|
||||
-- Kevin Veen-Birkenbach <kevin@veen.world> Sun, 14 Dec 2025 21:19:11 +0100
|
||||
|
||||
package-manager (1.7.0-1) unstable; urgency=medium
|
||||
|
||||
* * New *pkgmgr publish* command to publish repository artifacts to PyPI based on the *MIRRORS* file.
|
||||
* Automatically selects the current repository when no explicit selection is given.
|
||||
* Publishes only when a semantic version tag is present on *HEAD*; otherwise skips with a clear info message.
|
||||
* Supports non-interactive mode for CI environments via *--non-interactive*.
|
||||
|
||||
-- Kevin Veen-Birkenbach <kevin@veen.world> Sun, 14 Dec 2025 21:10:06 +0100
|
||||
|
||||
package-manager (1.6.4-1) unstable; urgency=medium
|
||||
|
||||
* * Improved reliability of Nix installs and updates, including automatic resolution of profile conflicts and better handling of GitHub 403 rate limits.
|
||||
* More stable launcher behavior in packaged and virtual-env setups.
|
||||
* Enhanced mirror and remote handling: repository owner/name are derived from URLs, with smoother provisioning and clearer credential handling.
|
||||
* More reliable releases and artifacts due to safer CI behavior when no version tag is present.
|
||||
|
||||
-- Kevin Veen-Birkenbach <kevin@veen.world> Sun, 14 Dec 2025 19:33:07 +0100
|
||||
|
||||
package-manager (1.6.3-1) unstable; urgency=medium
|
||||
|
||||
* ***Fixed:*** Corrected repository path resolution so release and version logic consistently use the canonical packaging/* layout, preventing changelog and packaging files from being read or updated from incorrect locations.
|
||||
|
||||
-- Kevin Veen-Birkenbach <kevin@veen.world> Sun, 14 Dec 2025 13:39:52 +0100
|
||||
|
||||
package-manager (0.9.1-1) unstable; urgency=medium
|
||||
|
||||
* * Refactored installer: new `venv-create.sh`, cleaner root/user setup flow, updated README with architecture map.
|
||||
|
||||
@@ -28,7 +28,7 @@ override_dh_auto_install:
|
||||
install -d debian/package-manager/usr/lib/package-manager
|
||||
|
||||
# Install wrapper
|
||||
install -m0755 scripts/pkgmgr-wrapper.sh \
|
||||
install -m0755 scripts/launcher.sh \
|
||||
debian/package-manager/usr/bin/pkgmgr
|
||||
|
||||
# Install Nix bootstrap (init + lib)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
Name: package-manager
|
||||
Version: 0.9.1
|
||||
Version: 1.7.1
|
||||
Release: 1%{?dist}
|
||||
Summary: Wrapper that runs Kevin's package-manager via Nix flake
|
||||
|
||||
@@ -42,7 +42,7 @@ install -d %{buildroot}/usr/lib/package-manager
|
||||
cp -a . %{buildroot}/usr/lib/package-manager/
|
||||
|
||||
# Wrapper
|
||||
install -m0755 scripts/pkgmgr-wrapper.sh %{buildroot}%{_bindir}/pkgmgr
|
||||
install -m0755 scripts/launcher.sh %{buildroot}%{_bindir}/pkgmgr
|
||||
|
||||
# Nix bootstrap (init + lib)
|
||||
install -d %{buildroot}/usr/lib/package-manager/nix
|
||||
@@ -74,6 +74,24 @@ echo ">>> package-manager removed. Nix itself was not removed."
|
||||
/usr/lib/package-manager/
|
||||
|
||||
%changelog
|
||||
* Sun Dec 14 2025 Kevin Veen-Birkenbach <kevin@veen.world> - 1.7.1-1
|
||||
- Patched package-manager to kpmx to publish on pypi
|
||||
|
||||
* Sun Dec 14 2025 Kevin Veen-Birkenbach <kevin@veen.world> - 1.7.0-1
|
||||
- * New *pkgmgr publish* command to publish repository artifacts to PyPI based on the *MIRRORS* file.
|
||||
* Automatically selects the current repository when no explicit selection is given.
|
||||
* Publishes only when a semantic version tag is present on *HEAD*; otherwise skips with a clear info message.
|
||||
* Supports non-interactive mode for CI environments via *--non-interactive*.
|
||||
|
||||
* Sun Dec 14 2025 Kevin Veen-Birkenbach <kevin@veen.world> - 1.6.4-1
|
||||
- * Improved reliability of Nix installs and updates, including automatic resolution of profile conflicts and better handling of GitHub 403 rate limits.
|
||||
* More stable launcher behavior in packaged and virtual-env setups.
|
||||
* Enhanced mirror and remote handling: repository owner/name are derived from URLs, with smoother provisioning and clearer credential handling.
|
||||
* More reliable releases and artifacts due to safer CI behavior when no version tag is present.
|
||||
|
||||
* Sun Dec 14 2025 Kevin Veen-Birkenbach <kevin@veen.world> - 1.6.3-1
|
||||
- ***Fixed:*** Corrected repository path resolution so release and version logic consistently use the canonical packaging/* layout, preventing changelog and packaging files from being read or updated from incorrect locations.
|
||||
|
||||
* Wed Dec 10 2025 Kevin Veen-Birkenbach <kevin@veen.world> - 0.9.1-1
|
||||
- * Refactored installer: new `venv-create.sh`, cleaner root/user setup flow, updated README with architecture map.
|
||||
* Split virgin tests into root/user workflows; stabilized Nix installer across distros; improved test scripts with dynamic distro selection and isolated Nix stores.
|
||||
|
||||
@@ -6,8 +6,8 @@ requires = [
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "package-manager"
|
||||
version = "1.3.1"
|
||||
name = "kpmx"
|
||||
version = "1.7.1"
|
||||
description = "Kevin's package-manager tool (pkgmgr)"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.9"
|
||||
@@ -19,16 +19,17 @@ authors = [
|
||||
|
||||
# Base runtime dependencies
|
||||
dependencies = [
|
||||
"PyYAML>=6.0"
|
||||
"PyYAML>=6.0",
|
||||
"tomli; python_version < \"3.11\"",
|
||||
]
|
||||
|
||||
[project.urls]
|
||||
Homepage = "https://github.com/kevinveenbirkenbach/package-manager"
|
||||
Homepage = "https://s.veen.world/pkgmgr"
|
||||
Source = "https://github.com/kevinveenbirkenbach/package-manager"
|
||||
|
||||
[project.optional-dependencies]
|
||||
keyring = ["keyring>=24.0.0"]
|
||||
dev = [
|
||||
"pytest",
|
||||
"mypy"
|
||||
]
|
||||
|
||||
|
||||
@@ -1,18 +1,20 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
resolve_base_image() {
|
||||
local distro="$1"
|
||||
: "${BASE_IMAGE_ARCH:=archlinux:latest}"
|
||||
: "${BASE_IMAGE_DEBIAN:=debian:stable-slim}"
|
||||
: "${BASE_IMAGE_UBUNTU:=ubuntu:latest}"
|
||||
: "${BASE_IMAGE_FEDORA:=fedora:latest}"
|
||||
: "${BASE_IMAGE_CENTOS:=quay.io/centos/centos:stream9}"
|
||||
|
||||
case "$distro" in
|
||||
resolve_base_image() {
|
||||
local PKGMGR_DISTRO="$1"
|
||||
case "$PKGMGR_DISTRO" in
|
||||
arch) echo "$BASE_IMAGE_ARCH" ;;
|
||||
debian) echo "$BASE_IMAGE_DEBIAN" ;;
|
||||
ubuntu) echo "$BASE_IMAGE_UBUNTU" ;;
|
||||
fedora) echo "$BASE_IMAGE_FEDORA" ;;
|
||||
centos) echo "$BASE_IMAGE_CENTOS" ;;
|
||||
*)
|
||||
echo "ERROR: Unknown distro '$distro'" >&2
|
||||
exit 1
|
||||
;;
|
||||
*) echo "ERROR: Unknown distro '$PKGMGR_DISTRO'" >&2; exit 1 ;;
|
||||
esac
|
||||
}
|
||||
|
||||
@@ -1,52 +1,53 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Unified docker image builder for all distros.
|
||||
#
|
||||
# Supports:
|
||||
# --missing Build only if image does not exist
|
||||
# --no-cache Disable docker layer cache
|
||||
# --target Dockerfile target (e.g. virgin|full)
|
||||
# --tag Override image tag (default: pkgmgr-$distro[-$target])
|
||||
#
|
||||
# Requires:
|
||||
# - env var: distro (arch|debian|ubuntu|fedora|centos)
|
||||
# - base.sh in same dir
|
||||
#
|
||||
# Examples:
|
||||
# distro=arch bash scripts/build/image.sh
|
||||
# distro=arch bash scripts/build/image.sh --no-cache
|
||||
# distro=arch bash scripts/build/image.sh --missing
|
||||
# distro=arch bash scripts/build/image.sh --target virgin
|
||||
# distro=arch bash scripts/build/image.sh --target virgin --missing
|
||||
# distro=arch bash scripts/build/image.sh --tag myimg:arch
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
# shellcheck source=/dev/null
|
||||
|
||||
# shellcheck source=./scripts/build/base.sh
|
||||
source "${SCRIPT_DIR}/base.sh"
|
||||
|
||||
: "${distro:?Environment variable 'distro' must be set (arch|debian|ubuntu|fedora|centos)}"
|
||||
: "${PKGMGR_DISTRO:?Environment variable 'PKGMGR_DISTRO' must be set (arch|debian|ubuntu|fedora|centos)}"
|
||||
|
||||
NO_CACHE=0
|
||||
MISSING_ONLY=0
|
||||
TARGET=""
|
||||
IMAGE_TAG="" # derive later unless --tag is provided
|
||||
IMAGE_TAG="" # local image name or base tag (without registry)
|
||||
PUSH=0 # if 1 -> use buildx and push (requires docker buildx)
|
||||
PUBLISH=0 # if 1 -> push with semantic tags (latest/version/stable + arch aliases)
|
||||
REGISTRY="" # e.g. ghcr.io
|
||||
OWNER="" # e.g. github org/user
|
||||
REPO_PREFIX="pkgmgr" # image base name (pkgmgr)
|
||||
VERSION="" # X.Y.Z (required for --publish)
|
||||
IS_STABLE="false" # "true" -> publish stable tags
|
||||
DEFAULT_DISTRO="arch"
|
||||
|
||||
usage() {
|
||||
local default_tag="pkgmgr-${distro}"
|
||||
local default_tag="pkgmgr-${PKGMGR_DISTRO}"
|
||||
if [[ -n "${TARGET:-}" ]]; then
|
||||
default_tag="${default_tag}-${TARGET}"
|
||||
fi
|
||||
|
||||
cat <<EOF
|
||||
Usage: distro=<distro> $0 [--missing] [--no-cache] [--target <name>] [--tag <image>]
|
||||
Usage: PKGMGR_DISTRO=<distro> $0 [options]
|
||||
|
||||
Options:
|
||||
--missing Build only if the image does not already exist
|
||||
--no-cache Build with --no-cache
|
||||
--target <name> Build a specific Dockerfile target (e.g. virgin|full)
|
||||
--tag <image> Override the output image tag (default: ${default_tag})
|
||||
-h, --help Show help
|
||||
Build options:
|
||||
--missing Build only if the image does not already exist (local build only)
|
||||
--no-cache Build with --no-cache
|
||||
--target <name> Build a specific Dockerfile target (e.g. virgin)
|
||||
--tag <image> Override the output image tag (default: ${default_tag})
|
||||
|
||||
Publish options:
|
||||
--push Push the built image (uses docker buildx build --push)
|
||||
--publish Publish semantic tags (latest, <version>, optional stable) + arch aliases
|
||||
--registry <reg> Registry (e.g. ghcr.io)
|
||||
--owner <owner> Registry namespace (e.g. \${GITHUB_REPOSITORY_OWNER})
|
||||
--repo-prefix <name> Image base name (default: pkgmgr)
|
||||
--version <X.Y.Z> Version for --publish
|
||||
--stable <true|false> Whether to publish :stable tags (default: false)
|
||||
|
||||
Notes:
|
||||
- --publish implies --push and requires --registry, --owner, and --version.
|
||||
- Local build (no --push) uses "docker build" and creates local images like "pkgmgr-arch" / "pkgmgr-arch-virgin".
|
||||
EOF
|
||||
}
|
||||
|
||||
@@ -56,18 +57,39 @@ while [[ $# -gt 0 ]]; do
|
||||
--missing) MISSING_ONLY=1; shift ;;
|
||||
--target)
|
||||
TARGET="${2:-}"
|
||||
if [[ -z "${TARGET}" ]]; then
|
||||
echo "ERROR: --target requires a value (e.g. virgin|full)" >&2
|
||||
exit 2
|
||||
fi
|
||||
[[ -n "${TARGET}" ]] || { echo "ERROR: --target requires a value (e.g. virgin)"; exit 2; }
|
||||
shift 2
|
||||
;;
|
||||
--tag)
|
||||
IMAGE_TAG="${2:-}"
|
||||
if [[ -z "${IMAGE_TAG}" ]]; then
|
||||
echo "ERROR: --tag requires a value" >&2
|
||||
exit 2
|
||||
fi
|
||||
[[ -n "${IMAGE_TAG}" ]] || { echo "ERROR: --tag requires a value"; exit 2; }
|
||||
shift 2
|
||||
;;
|
||||
--push) PUSH=1; shift ;;
|
||||
--publish) PUBLISH=1; PUSH=1; shift ;;
|
||||
--registry)
|
||||
REGISTRY="${2:-}"
|
||||
[[ -n "${REGISTRY}" ]] || { echo "ERROR: --registry requires a value"; exit 2; }
|
||||
shift 2
|
||||
;;
|
||||
--owner)
|
||||
OWNER="${2:-}"
|
||||
[[ -n "${OWNER}" ]] || { echo "ERROR: --owner requires a value"; exit 2; }
|
||||
shift 2
|
||||
;;
|
||||
--repo-prefix)
|
||||
REPO_PREFIX="${2:-}"
|
||||
[[ -n "${REPO_PREFIX}" ]] || { echo "ERROR: --repo-prefix requires a value"; exit 2; }
|
||||
shift 2
|
||||
;;
|
||||
--version)
|
||||
VERSION="${2:-}"
|
||||
[[ -n "${VERSION}" ]] || { echo "ERROR: --version requires a value"; exit 2; }
|
||||
shift 2
|
||||
;;
|
||||
--stable)
|
||||
IS_STABLE="${2:-}"
|
||||
[[ -n "${IS_STABLE}" ]] || { echo "ERROR: --stable requires a value (true|false)"; exit 2; }
|
||||
shift 2
|
||||
;;
|
||||
-h|--help) usage; exit 0 ;;
|
||||
@@ -79,32 +101,61 @@ while [[ $# -gt 0 ]]; do
|
||||
esac
|
||||
done
|
||||
|
||||
# Auto-tag: if --tag not provided, derive from distro (+ target suffix)
|
||||
# Derive default local tag if not provided
|
||||
if [[ -z "${IMAGE_TAG}" ]]; then
|
||||
IMAGE_TAG="pkgmgr-${distro}"
|
||||
IMAGE_TAG="${REPO_PREFIX}-${PKGMGR_DISTRO}"
|
||||
if [[ -n "${TARGET}" ]]; then
|
||||
IMAGE_TAG="${IMAGE_TAG}-${TARGET}"
|
||||
fi
|
||||
fi
|
||||
|
||||
BASE_IMAGE="$(resolve_base_image "$distro")"
|
||||
BASE_IMAGE="$(resolve_base_image "$PKGMGR_DISTRO")"
|
||||
|
||||
# Local-only "missing" shortcut
|
||||
if [[ "${MISSING_ONLY}" == "1" ]]; then
|
||||
if [[ "${PUSH}" == "1" ]]; then
|
||||
echo "ERROR: --missing is only supported for local builds (without --push/--publish)" >&2
|
||||
exit 2
|
||||
fi
|
||||
if docker image inspect "${IMAGE_TAG}" >/dev/null 2>&1; then
|
||||
echo "[build] Image already exists: ${IMAGE_TAG} (skipping due to --missing)"
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
|
||||
# Validate publish parameters
|
||||
if [[ "${PUBLISH}" == "1" ]]; then
|
||||
[[ -n "${REGISTRY}" ]] || { echo "ERROR: --publish requires --registry"; exit 2; }
|
||||
[[ -n "${OWNER}" ]] || { echo "ERROR: --publish requires --owner"; exit 2; }
|
||||
[[ -n "${VERSION}" ]] || { echo "ERROR: --publish requires --version"; exit 2; }
|
||||
fi
|
||||
|
||||
# Guard: --push without --publish requires fully-qualified --tag
|
||||
if [[ "${PUSH}" == "1" && "${PUBLISH}" != "1" ]]; then
|
||||
if [[ "${IMAGE_TAG}" != */* ]]; then
|
||||
echo "ERROR: --push requires --tag with a fully-qualified name (e.g. ghcr.io/<owner>/<image>:tag), or use --publish" >&2
|
||||
exit 2
|
||||
fi
|
||||
fi
|
||||
|
||||
echo
|
||||
echo "------------------------------------------------------------"
|
||||
echo "[build] Building image: ${IMAGE_TAG}"
|
||||
echo "distro = ${distro}"
|
||||
echo "[build] Building image"
|
||||
echo "distro = ${PKGMGR_DISTRO}"
|
||||
echo "BASE_IMAGE = ${BASE_IMAGE}"
|
||||
if [[ -n "${TARGET}" ]]; then echo "target = ${TARGET}"; fi
|
||||
if [[ "${NO_CACHE}" == "1" ]]; then echo "cache = disabled"; fi
|
||||
if [[ "${PUSH}" == "1" ]]; then echo "push = enabled"; fi
|
||||
if [[ "${PUBLISH}" == "1" ]]; then
|
||||
echo "publish = enabled"
|
||||
echo "registry = ${REGISTRY}"
|
||||
echo "owner = ${OWNER}"
|
||||
echo "version = ${VERSION}"
|
||||
echo "stable = ${IS_STABLE}"
|
||||
fi
|
||||
echo "------------------------------------------------------------"
|
||||
|
||||
# Common build args
|
||||
build_args=(--build-arg "BASE_IMAGE=${BASE_IMAGE}")
|
||||
|
||||
if [[ "${NO_CACHE}" == "1" ]]; then
|
||||
@@ -115,6 +166,62 @@ if [[ -n "${TARGET}" ]]; then
|
||||
build_args+=(--target "${TARGET}")
|
||||
fi
|
||||
|
||||
build_args+=(-t "${IMAGE_TAG}" .)
|
||||
compute_publish_tags() {
|
||||
local distro_tag_base="${REGISTRY}/${OWNER}/${REPO_PREFIX}-${PKGMGR_DISTRO}"
|
||||
local alias_tag_base=""
|
||||
|
||||
docker build "${build_args[@]}"
|
||||
if [[ -n "${TARGET}" ]]; then
|
||||
distro_tag_base="${distro_tag_base}-${TARGET}"
|
||||
fi
|
||||
|
||||
if [[ "${PKGMGR_DISTRO}" == "${DEFAULT_DISTRO}" ]]; then
|
||||
alias_tag_base="${REGISTRY}/${OWNER}/${REPO_PREFIX}"
|
||||
if [[ -n "${TARGET}" ]]; then
|
||||
alias_tag_base="${alias_tag_base}-${TARGET}"
|
||||
fi
|
||||
fi
|
||||
|
||||
local tags=()
|
||||
tags+=("${distro_tag_base}:latest")
|
||||
tags+=("${distro_tag_base}:${VERSION}")
|
||||
|
||||
if [[ "${IS_STABLE}" == "true" ]]; then
|
||||
tags+=("${distro_tag_base}:stable")
|
||||
fi
|
||||
|
||||
if [[ -n "${alias_tag_base}" ]]; then
|
||||
tags+=("${alias_tag_base}:latest")
|
||||
tags+=("${alias_tag_base}:${VERSION}")
|
||||
if [[ "${IS_STABLE}" == "true" ]]; then
|
||||
tags+=("${alias_tag_base}:stable")
|
||||
fi
|
||||
fi
|
||||
|
||||
printf '%s\n' "${tags[@]}"
|
||||
}
|
||||
|
||||
if [[ "${PUSH}" == "1" ]]; then
|
||||
bx_args=(docker buildx build --push)
|
||||
|
||||
if [[ "${PUBLISH}" == "1" ]]; then
|
||||
while IFS= read -r t; do
|
||||
bx_args+=(-t "$t")
|
||||
done < <(compute_publish_tags)
|
||||
else
|
||||
bx_args+=(-t "${IMAGE_TAG}")
|
||||
fi
|
||||
|
||||
bx_args+=("${build_args[@]}")
|
||||
bx_args+=(.)
|
||||
|
||||
echo "[build] Running: ${bx_args[*]}"
|
||||
"${bx_args[@]}"
|
||||
else
|
||||
local_args=(docker build)
|
||||
local_args+=("${build_args[@]}")
|
||||
local_args+=(-t "${IMAGE_TAG}")
|
||||
local_args+=(.)
|
||||
|
||||
echo "[build] Running: ${local_args[*]}"
|
||||
"${local_args[@]}"
|
||||
fi
|
||||
|
||||
55
scripts/build/publish.sh
Executable file
55
scripts/build/publish.sh
Executable file
@@ -0,0 +1,55 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Publish all distro images (full + virgin) to a registry via image.sh --publish
|
||||
#
|
||||
# Required env:
|
||||
# OWNER (e.g. GITHUB_REPOSITORY_OWNER)
|
||||
# VERSION (e.g. 1.2.3)
|
||||
#
|
||||
# Optional env:
|
||||
# REGISTRY (default: ghcr.io)
|
||||
# IS_STABLE (default: false)
|
||||
# DISTROS (default: "arch debian ubuntu fedora centos")
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
|
||||
REGISTRY="${REGISTRY:-ghcr.io}"
|
||||
IS_STABLE="${IS_STABLE:-false}"
|
||||
DISTROS="${DISTROS:-arch debian ubuntu fedora centos}"
|
||||
|
||||
: "${OWNER:?Environment variable OWNER must be set (e.g. github.repository_owner)}"
|
||||
: "${VERSION:?Environment variable VERSION must be set (e.g. 1.2.3)}"
|
||||
|
||||
echo "[publish] REGISTRY=${REGISTRY}"
|
||||
echo "[publish] OWNER=${OWNER}"
|
||||
echo "[publish] VERSION=${VERSION}"
|
||||
echo "[publish] IS_STABLE=${IS_STABLE}"
|
||||
echo "[publish] DISTROS=${DISTROS}"
|
||||
|
||||
for d in ${DISTROS}; do
|
||||
echo
|
||||
echo "============================================================"
|
||||
echo "[publish] PKGMGR_DISTRO=${d}"
|
||||
echo "============================================================"
|
||||
|
||||
# virgin
|
||||
PKGMGR_DISTRO="${d}" bash "${SCRIPT_DIR}/image.sh" \
|
||||
--publish \
|
||||
--registry "${REGISTRY}" \
|
||||
--owner "${OWNER}" \
|
||||
--version "${VERSION}" \
|
||||
--stable "${IS_STABLE}" \
|
||||
--target virgin
|
||||
|
||||
# full (default target)
|
||||
PKGMGR_DISTRO="${d}" bash "${SCRIPT_DIR}/image.sh" \
|
||||
--publish \
|
||||
--registry "${REGISTRY}" \
|
||||
--owner "${OWNER}" \
|
||||
--version "${VERSION}" \
|
||||
--stable "${IS_STABLE}"
|
||||
done
|
||||
|
||||
echo
|
||||
echo "[publish] Done."
|
||||
@@ -1,8 +1,6 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
|
||||
echo "[docker] Starting package-manager container"
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@@ -3,22 +3,19 @@ set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
|
||||
# shellcheck source=/dev/null
|
||||
source "${SCRIPT_DIR}/lib.sh"
|
||||
# shellcheck disable=SC1091
|
||||
source "${SCRIPT_DIR}/os_resolver.sh"
|
||||
|
||||
OS_ID="$(detect_os_id)"
|
||||
OS_ID="$(osr_get_os_id)"
|
||||
|
||||
echo "[run-dependencies] Detected OS: ${OS_ID}"
|
||||
|
||||
case "${OS_ID}" in
|
||||
arch|debian|ubuntu|fedora|centos)
|
||||
DEP_SCRIPT="${SCRIPT_DIR}/${OS_ID}/dependencies.sh"
|
||||
;;
|
||||
*)
|
||||
echo "[run-dependencies] Unsupported OS: ${OS_ID}"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
if ! osr_is_supported "${OS_ID}"; then
|
||||
echo "[run-dependencies] Unsupported OS: ${OS_ID}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
DEP_SCRIPT="$(osr_script_path_for "${SCRIPT_DIR}" "${OS_ID}" "dependencies")"
|
||||
|
||||
if [[ ! -f "${DEP_SCRIPT}" ]]; then
|
||||
echo "[run-dependencies] Dependency script not found: ${DEP_SCRIPT}"
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
detect_os_id() {
|
||||
if [[ -f /etc/os-release ]]; then
|
||||
# shellcheck disable=SC1091
|
||||
. /etc/os-release
|
||||
echo "${ID:-unknown}"
|
||||
else
|
||||
echo "unknown"
|
||||
fi
|
||||
}
|
||||
82
scripts/installation/os_resolver.sh
Executable file
82
scripts/installation/os_resolver.sh
Executable file
@@ -0,0 +1,82 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# OsResolver (bash "class-style" module)
|
||||
# Centralizes OS detection + normalization + supported checks + script paths.
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
osr_detect_raw_id() {
|
||||
if [[ -f /etc/os-release ]]; then
|
||||
# shellcheck disable=SC1091
|
||||
. /etc/os-release
|
||||
echo "${ID:-unknown}"
|
||||
else
|
||||
echo "unknown"
|
||||
fi
|
||||
}
|
||||
|
||||
osr_detect_id_like() {
|
||||
if [[ -f /etc/os-release ]]; then
|
||||
# shellcheck disable=SC1091
|
||||
. /etc/os-release
|
||||
echo "${ID_LIKE:-}"
|
||||
else
|
||||
echo ""
|
||||
fi
|
||||
}
|
||||
|
||||
osr_normalize_id() {
|
||||
local raw="${1:-unknown}"
|
||||
local like="${2:-}"
|
||||
|
||||
# Explicit mapping first (your bugfix: manjaro -> arch everywhere)
|
||||
case "${raw}" in
|
||||
manjaro) echo "arch"; return 0 ;;
|
||||
esac
|
||||
|
||||
# Keep direct IDs when they are already supported
|
||||
case "${raw}" in
|
||||
arch|debian|ubuntu|fedora|centos) echo "${raw}"; return 0 ;;
|
||||
esac
|
||||
|
||||
# Fallback mapping via ID_LIKE for better portability
|
||||
# Example: many Arch derivatives expose ID_LIKE="arch"
|
||||
if [[ " ${like} " == *" arch "* ]]; then
|
||||
echo "arch"; return 0
|
||||
fi
|
||||
if [[ " ${like} " == *" debian "* ]]; then
|
||||
echo "debian"; return 0
|
||||
fi
|
||||
if [[ " ${like} " == *" fedora "* ]]; then
|
||||
echo "fedora"; return 0
|
||||
fi
|
||||
if [[ " ${like} " == *" rhel "* || " ${like} " == *" centos "* ]]; then
|
||||
echo "centos"; return 0
|
||||
fi
|
||||
|
||||
echo "${raw}"
|
||||
}
|
||||
|
||||
osr_get_os_id() {
|
||||
local raw like
|
||||
raw="$(osr_detect_raw_id)"
|
||||
like="$(osr_detect_id_like)"
|
||||
osr_normalize_id "${raw}" "${like}"
|
||||
}
|
||||
|
||||
osr_is_supported() {
|
||||
local id="${1:-unknown}"
|
||||
case "${id}" in
|
||||
arch|debian|ubuntu|fedora|centos) return 0 ;;
|
||||
*) return 1 ;;
|
||||
esac
|
||||
}
|
||||
|
||||
osr_script_path_for() {
|
||||
local script_dir="${1:?script_dir required}"
|
||||
local os_id="${2:?os_id required}"
|
||||
local kind="${3:?kind required}" # "dependencies" or "package"
|
||||
|
||||
echo "${script_dir}/${os_id}/${kind}.sh"
|
||||
}
|
||||
@@ -3,28 +3,19 @@ set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
|
||||
# shellcheck source=/dev/null
|
||||
source "${SCRIPT_DIR}/lib.sh"
|
||||
# shellcheck disable=SC1091
|
||||
source "${SCRIPT_DIR}/os_resolver.sh"
|
||||
|
||||
OS_ID="$(detect_os_id)"
|
||||
|
||||
# Map Manjaro to Arch
|
||||
if [[ "${OS_ID}" == "manjaro" ]]; then
|
||||
echo "[package] Mapping OS 'manjaro' → 'arch'"
|
||||
OS_ID="arch"
|
||||
fi
|
||||
OS_ID="$(osr_get_os_id)"
|
||||
|
||||
echo "[package] Detected OS: ${OS_ID}"
|
||||
|
||||
case "${OS_ID}" in
|
||||
arch|debian|ubuntu|fedora|centos)
|
||||
PKG_SCRIPT="${SCRIPT_DIR}/${OS_ID}/package.sh"
|
||||
;;
|
||||
*)
|
||||
echo "[package] Unsupported OS: ${OS_ID}"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
if ! osr_is_supported "${OS_ID}"; then
|
||||
echo "[package] Unsupported OS: ${OS_ID}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
PKG_SCRIPT="$(osr_script_path_for "${SCRIPT_DIR}" "${OS_ID}" "package")"
|
||||
|
||||
if [[ ! -f "${PKG_SCRIPT}" ]]; then
|
||||
echo "[package] Package script not found: ${PKG_SCRIPT}"
|
||||
|
||||
@@ -1,12 +1,17 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Ensure NIX_CONFIG has our defaults if not already set
|
||||
if [[ -z "${NIX_CONFIG:-}" ]]; then
|
||||
export NIX_CONFIG="experimental-features = nix-command flakes"
|
||||
fi
|
||||
|
||||
FLAKE_DIR="/usr/lib/package-manager"
|
||||
NIX_LIB_DIR="${FLAKE_DIR}/nix/lib"
|
||||
RETRY_LIB="${NIX_LIB_DIR}/retry_403.sh"
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Hard requirement: retry helper must exist (fail if missing)
|
||||
# ---------------------------------------------------------------------------
|
||||
if [[ ! -f "${RETRY_LIB}" ]]; then
|
||||
echo "[launcher] ERROR: Required retry helper not found: ${RETRY_LIB}" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Try to ensure that "nix" is on PATH (common locations + container user)
|
||||
@@ -37,12 +42,16 @@ if ! command -v nix >/dev/null 2>&1; then
|
||||
fi
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Primary path: use Nix flake if available
|
||||
# Primary path: use Nix flake if available (with GitHub 403 retry)
|
||||
# ---------------------------------------------------------------------------
|
||||
if command -v nix >/dev/null 2>&1; then
|
||||
if declare -F run_with_github_403_retry >/dev/null; then
|
||||
# shellcheck source=./scripts/nix/lib/retry_403.sh
|
||||
source "${RETRY_LIB}"
|
||||
exec run_with_github_403_retry nix run "${FLAKE_DIR}#pkgmgr" -- "$@"
|
||||
else
|
||||
exec nix run "${FLAKE_DIR}#pkgmgr" -- "$@"
|
||||
fi
|
||||
|
||||
echo "[pkgmgr-wrapper] ERROR: 'nix' binary not found on PATH after init."
|
||||
echo "[pkgmgr-wrapper] Nix is required to run pkgmgr (no Python fallback)."
|
||||
echo "[launcher] ERROR: 'nix' binary not found on PATH after init."
|
||||
echo "[launcher] Nix is required to run pkgmgr (no Python fallback)."
|
||||
exit 1
|
||||
@@ -22,7 +22,7 @@ It is invoked during package installation (Arch/Debian/Fedora scriptlets) and ca
|
||||
|
||||
The entry point sources small, focused modules from *scripts/nix/lib/*:
|
||||
|
||||
- *config.sh* — configuration defaults (installer URL, retry timing)
|
||||
- *bootstrap_config.sh* — configuration defaults (installer URL, retry timing)
|
||||
- *detect.sh* — container detection helpers
|
||||
- *path.sh* — PATH adjustments and `nix` binary resolution helpers
|
||||
- *symlinks.sh* — user/global symlink helpers for stable `nix` discovery
|
||||
|
||||
@@ -1,22 +1,29 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# shellcheck source=lib/config.sh
|
||||
# shellcheck source=lib/detect.sh
|
||||
# shellcheck source=lib/path.sh
|
||||
# shellcheck source=lib/symlinks.sh
|
||||
# shellcheck source=lib/users.sh
|
||||
# shellcheck source=lib/install.sh
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
|
||||
source "${SCRIPT_DIR}/lib/config.sh"
|
||||
# shellcheck source=./scripts/nix/lib/bootstrap_config.sh
|
||||
source "${SCRIPT_DIR}/lib/bootstrap_config.sh"
|
||||
|
||||
# shellcheck source=./scripts/nix/lib/detect.sh
|
||||
source "${SCRIPT_DIR}/lib/detect.sh"
|
||||
|
||||
# shellcheck source=./scripts/nix/lib/path.sh
|
||||
source "${SCRIPT_DIR}/lib/path.sh"
|
||||
|
||||
# shellcheck source=./scripts/nix/lib/symlinks.sh
|
||||
source "${SCRIPT_DIR}/lib/symlinks.sh"
|
||||
|
||||
# shellcheck source=./scripts/nix/lib/users.sh
|
||||
source "${SCRIPT_DIR}/lib/users.sh"
|
||||
|
||||
# shellcheck source=./scripts/nix/lib/install.sh
|
||||
source "${SCRIPT_DIR}/lib/install.sh"
|
||||
|
||||
# shellcheck source=./scripts/nix/lib/nix_conf_file.sh
|
||||
source "${SCRIPT_DIR}/lib/nix_conf_file.sh"
|
||||
|
||||
echo "[init-nix] Starting Nix initialization..."
|
||||
|
||||
main() {
|
||||
@@ -26,6 +33,7 @@ main() {
|
||||
ensure_nix_on_path
|
||||
|
||||
if [[ "${EUID:-0}" -eq 0 ]]; then
|
||||
nixconf_ensure_experimental_features
|
||||
ensure_global_nix_symlinks "$(resolve_nix_bin 2>/dev/null || true)"
|
||||
else
|
||||
ensure_user_nix_symlink "$(resolve_nix_bin 2>/dev/null || true)"
|
||||
@@ -106,6 +114,10 @@ main() {
|
||||
# -------------------------------------------------------------------------
|
||||
ensure_nix_on_path
|
||||
|
||||
if [[ "${EUID:-0}" -eq 0 ]]; then
|
||||
nixconf_ensure_experimental_features
|
||||
fi
|
||||
|
||||
local nix_bin_post
|
||||
nix_bin_post="$(resolve_nix_bin 2>/dev/null || true)"
|
||||
|
||||
|
||||
0
scripts/nix/lib/config.sh → scripts/nix/lib/bootstrap_config.sh
Normal file → Executable file
0
scripts/nix/lib/config.sh → scripts/nix/lib/bootstrap_config.sh
Normal file → Executable file
0
scripts/nix/lib/detect.sh
Normal file → Executable file
0
scripts/nix/lib/detect.sh
Normal file → Executable file
0
scripts/nix/lib/install.sh
Normal file → Executable file
0
scripts/nix/lib/install.sh
Normal file → Executable file
89
scripts/nix/lib/nix_conf_file.sh
Normal file
89
scripts/nix/lib/nix_conf_file.sh
Normal file
@@ -0,0 +1,89 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Prevent double-sourcing
|
||||
if [[ -n "${PKGMGR_NIX_CONF_FILE_SH:-}" ]]; then
|
||||
return 0
|
||||
fi
|
||||
PKGMGR_NIX_CONF_FILE_SH=1
|
||||
|
||||
nixconf_file_path() {
|
||||
echo "/etc/nix/nix.conf"
|
||||
}
|
||||
|
||||
# Ensure a given nix.conf key contains required tokens (merged, no duplicates)
|
||||
nixconf_ensure_features_key() {
|
||||
local nix_conf="$1"
|
||||
local key="$2"
|
||||
shift 2
|
||||
local required=("$@")
|
||||
|
||||
mkdir -p /etc/nix
|
||||
|
||||
# Create file if missing (with just the required tokens)
|
||||
if [[ ! -f "${nix_conf}" ]]; then
|
||||
local want="${key} = ${required[*]}"
|
||||
echo "[nix-conf] Creating ${nix_conf} with: ${want}"
|
||||
printf "%s\n" "${want}" >"${nix_conf}"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Key exists -> merge tokens
|
||||
if grep -qE "^\s*${key}\s*=" "${nix_conf}"; then
|
||||
local ok=1
|
||||
local t
|
||||
for t in "${required[@]}"; do
|
||||
if ! grep -qE "^\s*${key}\s*=.*\b${t}\b" "${nix_conf}"; then
|
||||
ok=0
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ "$ok" -eq 1 ]]; then
|
||||
echo "[nix-conf] ${key} already correct"
|
||||
return 0
|
||||
fi
|
||||
|
||||
echo "[nix-conf] Extending ${key} in ${nix_conf}"
|
||||
|
||||
local current
|
||||
current="$(grep -E "^\s*${key}\s*=" "${nix_conf}" | head -n1 | cut -d= -f2-)"
|
||||
current="$(echo "${current}" | xargs)" # trim
|
||||
|
||||
local merged=""
|
||||
local token
|
||||
|
||||
# Start with existing tokens
|
||||
for token in ${current}; do
|
||||
if [[ " ${merged} " != *" ${token} "* ]]; then
|
||||
merged="${merged} ${token}"
|
||||
fi
|
||||
done
|
||||
|
||||
# Add required tokens
|
||||
for token in "${required[@]}"; do
|
||||
if [[ " ${merged} " != *" ${token} "* ]]; then
|
||||
merged="${merged} ${token}"
|
||||
fi
|
||||
done
|
||||
|
||||
merged="$(echo "${merged}" | xargs)" # trim
|
||||
|
||||
sed -i "s|^\s*${key}\s*=.*|${key} = ${merged}|" "${nix_conf}"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Key missing -> append
|
||||
local want="${key} = ${required[*]}"
|
||||
echo "[nix-conf] Appending to ${nix_conf}: ${want}"
|
||||
printf "\n%s\n" "${want}" >>"${nix_conf}"
|
||||
}
|
||||
|
||||
nixconf_ensure_experimental_features() {
|
||||
local nix_conf
|
||||
nix_conf="$(nixconf_file_path)"
|
||||
|
||||
# Ensure both keys to avoid prompts and cover older/alternate expectations
|
||||
nixconf_ensure_features_key "${nix_conf}" "experimental-features" "nix-command" "flakes"
|
||||
nixconf_ensure_features_key "${nix_conf}" "extra-experimental-features" "nix-command" "flakes"
|
||||
}
|
||||
0
scripts/nix/lib/path.sh
Normal file → Executable file
0
scripts/nix/lib/path.sh
Normal file → Executable file
52
scripts/nix/lib/retry_403.sh
Executable file
52
scripts/nix/lib/retry_403.sh
Executable file
@@ -0,0 +1,52 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
if [[ -n "${PKGMGR_NIX_RETRY_403_SH:-}" ]]; then
|
||||
return 0
|
||||
fi
|
||||
PKGMGR_NIX_RETRY_403_SH=1
|
||||
|
||||
# Retry only when we see the GitHub API rate limit 403 error during nix flake evaluation.
|
||||
# Retries 7 times with delays: 10, 30, 50, 80, 130, 210, 420 seconds.
|
||||
run_with_github_403_retry() {
|
||||
local -a delays=(10 30 50 80 130 210 420)
|
||||
local attempt=0
|
||||
local max_retries="${#delays[@]}"
|
||||
|
||||
while true; do
|
||||
local err tmp
|
||||
tmp="$(mktemp -t nix-err.XXXXXX)"
|
||||
err=0
|
||||
|
||||
# Run the command; capture stderr for inspection while preserving stdout.
|
||||
if "$@" 2>"$tmp"; then
|
||||
rm -f "$tmp"
|
||||
return 0
|
||||
else
|
||||
err=$?
|
||||
fi
|
||||
|
||||
# Only retry on the specific GitHub API rate limit 403 case.
|
||||
if grep -qE 'HTTP error 403' "$tmp" && grep -qiE 'API rate limit exceeded|api\.github\.com' "$tmp"; then
|
||||
if (( attempt >= max_retries )); then
|
||||
cat "$tmp" >&2
|
||||
rm -f "$tmp"
|
||||
return "$err"
|
||||
fi
|
||||
|
||||
local sleep_s="${delays[$attempt]}"
|
||||
attempt=$((attempt + 1))
|
||||
|
||||
echo "[nix-retry] GitHub API rate-limit (403). Retry ${attempt}/${max_retries} in ${sleep_s}s: $*" >&2
|
||||
cat "$tmp" >&2
|
||||
rm -f "$tmp"
|
||||
sleep "$sleep_s"
|
||||
continue
|
||||
fi
|
||||
|
||||
# Not our retry case -> fail fast with original stderr.
|
||||
cat "$tmp" >&2
|
||||
rm -f "$tmp"
|
||||
return "$err"
|
||||
done
|
||||
}
|
||||
0
scripts/nix/lib/symlinks.sh
Normal file → Executable file
0
scripts/nix/lib/symlinks.sh
Normal file → Executable file
0
scripts/nix/lib/users.sh
Normal file → Executable file
0
scripts/nix/lib/users.sh
Normal file → Executable file
@@ -1,9 +1,11 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Nix shell mode: do not touch venv, only run main.py install
|
||||
# Nix shell mode: do not touch venv, only run install
|
||||
# ------------------------------------------------------------
|
||||
|
||||
echo "[setup] Nix mode enabled (NIX_ENABLED=1)."
|
||||
echo "[setup] Skipping virtualenv creation and dependency installation."
|
||||
echo "[setup] Running main.py install via system python3..."
|
||||
python3 main.py install
|
||||
echo "[setup] Running install via system python3..."
|
||||
python3 -m pkgmgr install
|
||||
echo "[setup] Setup finished (Nix mode)."
|
||||
|
||||
@@ -7,6 +7,7 @@ PROJECT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
|
||||
cd "${PROJECT_ROOT}"
|
||||
|
||||
VENV_DIR="${HOME}/.venvs/pkgmgr"
|
||||
# shellcheck disable=SC2016
|
||||
RC_LINE='if [ -d "${HOME}/.venvs/pkgmgr" ]; then . "${HOME}/.venvs/pkgmgr/bin/activate"; if [ -n "${PS1:-}" ]; then echo "Global Python virtual environment '\''~/.venvs/pkgmgr'\'' activated."; fi; fi'
|
||||
|
||||
# ------------------------------------------------------------
|
||||
@@ -15,9 +16,6 @@ RC_LINE='if [ -d "${HOME}/.venvs/pkgmgr" ]; then . "${HOME}/.venvs/pkgmgr/bin/ac
|
||||
|
||||
echo "[setup] Running in normal user mode (developer setup)."
|
||||
|
||||
echo "[setup] Ensuring main.py is executable..."
|
||||
chmod +x main.py || true
|
||||
|
||||
echo "[setup] Ensuring global virtualenv root: ${HOME}/.venvs"
|
||||
mkdir -p "${HOME}/.venvs"
|
||||
|
||||
@@ -90,8 +88,8 @@ for rc in "${HOME}/.bashrc" "${HOME}/.zshrc"; do
|
||||
fi
|
||||
done
|
||||
|
||||
echo "[setup] Running main.py install via venv Python..."
|
||||
"${VENV_DIR}/bin/python" main.py install
|
||||
echo "[setup] Running install via venv Python..."
|
||||
"${VENV_DIR}/bin/python" -m pkgmgr install
|
||||
|
||||
echo
|
||||
echo "[setup] Developer setup complete."
|
||||
|
||||
@@ -2,17 +2,17 @@
|
||||
set -euo pipefail
|
||||
|
||||
echo "============================================================"
|
||||
echo ">>> Running E2E tests: $distro"
|
||||
echo ">>> Running E2E tests: $PKGMGR_DISTRO"
|
||||
echo "============================================================"
|
||||
|
||||
docker run --rm \
|
||||
-v "$(pwd):/src" \
|
||||
-v "pkgmgr_nix_store_${distro}:/nix" \
|
||||
-v "pkgmgr_nix_cache_${distro}:/root/.cache/nix" \
|
||||
-v "pkgmgr_nix_store_${PKGMGR_DISTRO}:/nix" \
|
||||
-v "pkgmgr_nix_cache_${PKGMGR_DISTRO}:/root/.cache/nix" \
|
||||
-e REINSTALL_PKGMGR=1 \
|
||||
-e TEST_PATTERN="${TEST_PATTERN}" \
|
||||
--workdir /src \
|
||||
"pkgmgr-${distro}" \
|
||||
"pkgmgr-${PKGMGR_DISTRO}" \
|
||||
bash -lc '
|
||||
set -euo pipefail
|
||||
|
||||
@@ -49,7 +49,7 @@ docker run --rm \
|
||||
# Gitdir path shown in the "dubious ownership" error
|
||||
git config --global --add safe.directory /src/.git || true
|
||||
# Ephemeral CI containers: allow all paths as a last resort
|
||||
git config --global --add safe.directory '*' || true
|
||||
git config --global --add safe.directory "*" || true
|
||||
fi
|
||||
|
||||
# Run the E2E tests inside the Nix development shell
|
||||
|
||||
@@ -1,17 +1,17 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
IMAGE="pkgmgr-${distro}"
|
||||
IMAGE="pkgmgr-${PKGMGR_DISTRO}"
|
||||
|
||||
echo "============================================================"
|
||||
echo ">>> Running Nix flake-only test in ${distro} container"
|
||||
echo ">>> Running Nix flake-only test in ${PKGMGR_DISTRO} container"
|
||||
echo ">>> Image: ${IMAGE}"
|
||||
echo "============================================================"
|
||||
|
||||
docker run --rm \
|
||||
-v "$(pwd):/src" \
|
||||
-v "pkgmgr_nix_store_${distro}:/nix" \
|
||||
-v "pkgmgr_nix_cache_${distro}:/root/.cache/nix" \
|
||||
-v "pkgmgr_nix_store_${PKGMGR_DISTRO}:/nix" \
|
||||
-v "pkgmgr_nix_cache_${PKGMGR_DISTRO}:/root/.cache/nix" \
|
||||
--workdir /src \
|
||||
-e REINSTALL_PKGMGR=1 \
|
||||
"${IMAGE}" \
|
||||
@@ -27,7 +27,7 @@ docker run --rm \
|
||||
echo ">>> preflight: nix must exist in image"
|
||||
if ! command -v nix >/dev/null 2>&1; then
|
||||
echo "NO_NIX"
|
||||
echo "ERROR: nix not found in image '\'''"${IMAGE}"''\'' (distro='"${distro}"')"
|
||||
echo "ERROR: nix not found in image '"${IMAGE}"' (PKGMGR_DISTRO='"${PKGMGR_DISTRO}"')"
|
||||
echo "HINT: Ensure Nix is installed during image build for this distro."
|
||||
exit 1
|
||||
fi
|
||||
@@ -35,14 +35,28 @@ docker run --rm \
|
||||
echo ">>> nix version"
|
||||
nix --version
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Retry helper for GitHub API rate-limit (HTTP 403)
|
||||
# ------------------------------------------------------------
|
||||
if [[ -f /src/scripts/nix/lib/retry_403.sh ]]; then
|
||||
# shellcheck source=./scripts/nix/lib/retry_403.sh
|
||||
source /src/scripts/nix/lib/retry_403.sh
|
||||
elif [[ -f ./scripts/nix/lib/retry_403.sh ]]; then
|
||||
# shellcheck source=./scripts/nix/lib/retry_403.sh
|
||||
source ./scripts/nix/lib/retry_403.sh
|
||||
else
|
||||
echo "ERROR: retry helper not found: scripts/nix/lib/retry_403.sh"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo ">>> nix flake show"
|
||||
nix flake show . --no-write-lock-file >/dev/null
|
||||
run_with_github_403_retry nix flake show . --no-write-lock-file >/dev/null
|
||||
|
||||
echo ">>> nix build .#default"
|
||||
nix build .#default --no-link --no-write-lock-file
|
||||
run_with_github_403_retry nix build .#default --no-link --no-write-lock-file
|
||||
|
||||
echo ">>> nix run .#pkgmgr -- --help"
|
||||
nix run .#pkgmgr -- --help --no-write-lock-file
|
||||
run_with_github_403_retry nix run .#pkgmgr -- --help --no-write-lock-file
|
||||
|
||||
echo ">>> OK: Nix flake-only test succeeded."
|
||||
'
|
||||
|
||||
@@ -1,32 +1,49 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
IMAGE="pkgmgr-$distro"
|
||||
IMAGE="pkgmgr-${PKGMGR_DISTRO}"
|
||||
|
||||
echo
|
||||
echo "------------------------------------------------------------"
|
||||
echo ">>> Testing VENV: $IMAGE"
|
||||
echo ">>> Testing VENV: ${IMAGE}"
|
||||
echo "------------------------------------------------------------"
|
||||
|
||||
echo "[test-env-virtual] Inspect image metadata:"
|
||||
docker image inspect "$IMAGE" | sed -n '1,40p'
|
||||
|
||||
echo "[test-env-virtual] Running: docker run --rm --entrypoint pkgmgr $IMAGE --help"
|
||||
docker image inspect "${IMAGE}" | sed -n '1,40p'
|
||||
echo
|
||||
|
||||
# Run the command and capture the output
|
||||
# ------------------------------------------------------------
|
||||
# Run VENV-based pkgmgr test inside container
|
||||
# ------------------------------------------------------------
|
||||
if OUTPUT=$(docker run --rm \
|
||||
-e REINSTALL_PKGMGR=1 \
|
||||
-v pkgmgr_nix_store_${distro}:/nix \
|
||||
-v "$(pwd):/src" \
|
||||
-v "pkgmgr_nix_cache_${distro}:/root/.cache/nix" \
|
||||
"$IMAGE" 2>&1); then
|
||||
-e REINSTALL_PKGMGR=1 \
|
||||
-v "$(pwd):/src" \
|
||||
-w /src \
|
||||
"${IMAGE}" \
|
||||
bash -lc '
|
||||
set -euo pipefail
|
||||
|
||||
echo "[test-env-virtual] Installing pkgmgr (distro package)..."
|
||||
make install
|
||||
|
||||
echo "[test-env-virtual] Setting up Python venv..."
|
||||
make setup-venv
|
||||
|
||||
echo "[test-env-virtual] Activating venv..."
|
||||
. "$HOME/.venvs/pkgmgr/bin/activate"
|
||||
|
||||
echo "[test-env-virtual] Using pkgmgr from:"
|
||||
command -v pkgmgr
|
||||
pkgmgr --help
|
||||
' 2>&1); then
|
||||
|
||||
echo "$OUTPUT"
|
||||
echo
|
||||
echo "[test-env-virtual] SUCCESS: $IMAGE responded to 'pkgmgr --help'"
|
||||
echo "[test-env-virtual] SUCCESS: venv-based pkgmgr works in ${IMAGE}"
|
||||
|
||||
else
|
||||
echo "$OUTPUT"
|
||||
echo
|
||||
echo "[test-env-virtual] ERROR: $IMAGE failed to run 'pkgmgr --help'"
|
||||
echo "[test-env-virtual] ERROR: venv-based pkgmgr failed in ${IMAGE}"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -2,17 +2,17 @@
|
||||
set -euo pipefail
|
||||
|
||||
echo "============================================================"
|
||||
echo ">>> Running INTEGRATION tests in ${distro} container"
|
||||
echo ">>> Running INTEGRATION tests in ${PKGMGR_DISTRO} container"
|
||||
echo "============================================================"
|
||||
|
||||
docker run --rm \
|
||||
-v "$(pwd):/src" \
|
||||
-v pkgmgr_nix_store_${distro}:/nix \
|
||||
-v "pkgmgr_nix_cache_${distro}:/root/.cache/nix" \
|
||||
-v "pkgmgr_nix_store_${PKGMGR_DISTRO}:/nix" \
|
||||
-v "pkgmgr_nix_cache_${PKGMGR_DISTRO}:/root/.cache/nix" \
|
||||
--workdir /src \
|
||||
-e REINSTALL_PKGMGR=1 \
|
||||
-e TEST_PATTERN="${TEST_PATTERN}" \
|
||||
"pkgmgr-${distro}" \
|
||||
"pkgmgr-${PKGMGR_DISTRO}" \
|
||||
bash -lc '
|
||||
set -e;
|
||||
git config --global --add safe.directory /src || true;
|
||||
|
||||
@@ -2,17 +2,17 @@
|
||||
set -euo pipefail
|
||||
|
||||
echo "============================================================"
|
||||
echo ">>> Running UNIT tests in ${distro} container"
|
||||
echo ">>> Running UNIT tests in ${PKGMGR_DISTRO} container"
|
||||
echo "============================================================"
|
||||
|
||||
docker run --rm \
|
||||
-v "$(pwd):/src" \
|
||||
-v "pkgmgr_nix_cache_${distro}:/root/.cache/nix" \
|
||||
-v pkgmgr_nix_store_${distro}:/nix \
|
||||
-v "pkgmgr_nix_cache_${PKGMGR_DISTRO}:/root/.cache/nix" \
|
||||
-v "pkgmgr_nix_store_${PKGMGR_DISTRO}:/nix" \
|
||||
--workdir /src \
|
||||
-e REINSTALL_PKGMGR=1 \
|
||||
-e TEST_PATTERN="${TEST_PATTERN}" \
|
||||
"pkgmgr-${distro}" \
|
||||
"pkgmgr-${PKGMGR_DISTRO}" \
|
||||
bash -lc '
|
||||
set -e;
|
||||
git config --global --add safe.directory /src || true;
|
||||
|
||||
@@ -19,12 +19,20 @@ fi
|
||||
# ------------------------------------------------------------
|
||||
# Remove auto-activation lines from shell RC files
|
||||
# ------------------------------------------------------------
|
||||
RC_PATTERN='\.venvs\/pkgmgr\/bin\/activate"; if \[ -n "\$${PS1:-}" \]; then echo "Global Python virtual environment '\''~\/\.venvs\/pkgmgr'\'' activated."; fi; fi'
|
||||
# Matches:
|
||||
# ~/.venvs/pkgmgr/bin/activate
|
||||
# ./.venvs/pkgmgr/bin/activate
|
||||
RC_PATTERN='(\./)?\.venvs/pkgmgr/bin/activate'
|
||||
|
||||
echo "[uninstall] Cleaning up ~/.bashrc and ~/.zshrc entries..."
|
||||
for rc in "$HOME/.bashrc" "$HOME/.zshrc"; do
|
||||
if [[ -f "$rc" ]]; then
|
||||
sed -i "/$RC_PATTERN/d" "$rc"
|
||||
# Remove activation lines (functional)
|
||||
sed -E -i "/$RC_PATTERN/d" "$rc"
|
||||
|
||||
# Remove leftover echo / cosmetic lines referencing pkgmgr venv
|
||||
sed -i '/\.venvs\/pkgmgr/d' "$rc"
|
||||
|
||||
echo "[uninstall] Cleaned $rc"
|
||||
else
|
||||
echo "[uninstall] File not found: $rc (skipped)"
|
||||
|
||||
5
src/pkgmgr/__main__.py
Executable file
5
src/pkgmgr/__main__.py
Executable file
@@ -0,0 +1,5 @@
|
||||
#!/usr/bin/env python3
|
||||
from pkgmgr.cli import main
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,5 +1,6 @@
|
||||
import yaml
|
||||
import os
|
||||
from pkgmgr.core.config.save import save_user_config
|
||||
|
||||
def interactive_add(config,USER_CONFIG_PATH:str):
|
||||
"""Interactively prompt the user to add a new repository entry to the user config."""
|
||||
|
||||
@@ -45,7 +45,7 @@ def config_init(
|
||||
# Announce where we will write the result
|
||||
# ------------------------------------------------------------
|
||||
print("============================================================")
|
||||
print(f"[INIT] Writing user configuration to:")
|
||||
print("[INIT] Writing user configuration to:")
|
||||
print(f" {user_config_path}")
|
||||
print("============================================================")
|
||||
|
||||
@@ -53,7 +53,7 @@ def config_init(
|
||||
defaults_config["directories"]["repositories"]
|
||||
)
|
||||
|
||||
print(f"[INIT] Scanning repository base directory:")
|
||||
print("[INIT] Scanning repository base directory:")
|
||||
print(f" {repositories_base_dir}")
|
||||
print("")
|
||||
|
||||
@@ -173,7 +173,7 @@ def config_init(
|
||||
if new_entries:
|
||||
user_config.setdefault("repositories", []).extend(new_entries)
|
||||
save_user_config(user_config, user_config_path)
|
||||
print(f"[SAVE] Wrote user configuration to:")
|
||||
print("[SAVE] Wrote user configuration to:")
|
||||
print(f" {user_config_path}")
|
||||
else:
|
||||
print("[INFO] No new repositories were added.")
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# src/pkgmgr/actions/install/__init__.py
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
@@ -27,7 +28,7 @@ from pkgmgr.actions.install.installers.os_packages import (
|
||||
DebianControlInstaller,
|
||||
RpmSpecInstaller,
|
||||
)
|
||||
from pkgmgr.actions.install.installers.nix_flake import (
|
||||
from pkgmgr.actions.install.installers.nix import (
|
||||
NixFlakeInstaller,
|
||||
)
|
||||
from pkgmgr.actions.install.installers.python import PythonInstaller
|
||||
@@ -36,10 +37,8 @@ from pkgmgr.actions.install.installers.makefile import (
|
||||
)
|
||||
from pkgmgr.actions.install.pipeline import InstallationPipeline
|
||||
|
||||
|
||||
Repository = Dict[str, Any]
|
||||
|
||||
# All available installers, in the order they should be considered.
|
||||
INSTALLERS = [
|
||||
ArchPkgbuildInstaller(),
|
||||
DebianControlInstaller(),
|
||||
@@ -50,11 +49,6 @@ INSTALLERS = [
|
||||
]
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Internal helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _ensure_repo_dir(
|
||||
repo: Repository,
|
||||
repositories_base_dir: str,
|
||||
@@ -74,7 +68,7 @@ def _ensure_repo_dir(
|
||||
if not os.path.exists(repo_dir):
|
||||
print(
|
||||
f"Repository directory '{repo_dir}' does not exist. "
|
||||
f"Cloning it now..."
|
||||
"Cloning it now..."
|
||||
)
|
||||
clone_repos(
|
||||
[repo],
|
||||
@@ -87,7 +81,7 @@ def _ensure_repo_dir(
|
||||
if not os.path.exists(repo_dir):
|
||||
print(
|
||||
f"Cloning failed for repository {identifier}. "
|
||||
f"Skipping installation."
|
||||
"Skipping installation."
|
||||
)
|
||||
return None
|
||||
|
||||
@@ -137,6 +131,7 @@ def _create_context(
|
||||
quiet: bool,
|
||||
clone_mode: str,
|
||||
update_dependencies: bool,
|
||||
force_update: bool,
|
||||
) -> RepoContext:
|
||||
"""
|
||||
Build a RepoContext instance for the given repository.
|
||||
@@ -153,14 +148,10 @@ def _create_context(
|
||||
quiet=quiet,
|
||||
clone_mode=clone_mode,
|
||||
update_dependencies=update_dependencies,
|
||||
force_update=force_update,
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Public API
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def install_repos(
|
||||
selected_repos: List[Repository],
|
||||
repositories_base_dir: str,
|
||||
@@ -171,10 +162,14 @@ def install_repos(
|
||||
quiet: bool,
|
||||
clone_mode: str,
|
||||
update_dependencies: bool,
|
||||
force_update: bool = False,
|
||||
) -> None:
|
||||
"""
|
||||
Install one or more repositories according to the configured installers
|
||||
and the CLI layer precedence rules.
|
||||
|
||||
If force_update=True, installers of the currently active layer are allowed
|
||||
to run again (upgrade/refresh), even if that layer is already loaded.
|
||||
"""
|
||||
pipeline = InstallationPipeline(INSTALLERS)
|
||||
|
||||
@@ -213,6 +208,7 @@ def install_repos(
|
||||
quiet=quiet,
|
||||
clone_mode=clone_mode,
|
||||
update_dependencies=update_dependencies,
|
||||
force_update=force_update,
|
||||
)
|
||||
|
||||
pipeline.run(ctx)
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# src/pkgmgr/actions/install/context.py
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
@@ -28,3 +29,6 @@ class RepoContext:
|
||||
quiet: bool
|
||||
clone_mode: str
|
||||
update_dependencies: bool
|
||||
|
||||
# If True, allow re-running installers of the currently active layer.
|
||||
force_update: bool = False
|
||||
|
||||
@@ -9,7 +9,7 @@ pkgmgr.actions.install.installers.
|
||||
"""
|
||||
|
||||
from pkgmgr.actions.install.installers.base import BaseInstaller # noqa: F401
|
||||
from pkgmgr.actions.install.installers.nix_flake import NixFlakeInstaller # noqa: F401
|
||||
from pkgmgr.actions.install.installers.nix import NixFlakeInstaller # noqa: F401
|
||||
from pkgmgr.actions.install.installers.python import PythonInstaller # noqa: F401
|
||||
from pkgmgr.actions.install.installers.makefile import MakefileInstaller # noqa: F401
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# src/pkgmgr/actions/install/installers/makefile.py
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
@@ -9,89 +10,45 @@ from pkgmgr.core.command.run import run_command
|
||||
|
||||
|
||||
class MakefileInstaller(BaseInstaller):
|
||||
"""
|
||||
Generic installer that runs `make install` if a Makefile with an
|
||||
install target is present.
|
||||
|
||||
Safety rules:
|
||||
- If PKGMGR_DISABLE_MAKEFILE_INSTALLER=1 is set, this installer
|
||||
is globally disabled.
|
||||
- The higher-level InstallationPipeline ensures that Makefile
|
||||
installation does not run if a stronger CLI layer already owns
|
||||
the command (e.g. Nix or OS packages).
|
||||
"""
|
||||
|
||||
layer = "makefile"
|
||||
MAKEFILE_NAME = "Makefile"
|
||||
|
||||
def supports(self, ctx: RepoContext) -> bool:
|
||||
"""
|
||||
Return True if this repository has a Makefile and the installer
|
||||
is not globally disabled.
|
||||
"""
|
||||
# Optional global kill switch.
|
||||
if os.environ.get("PKGMGR_DISABLE_MAKEFILE_INSTALLER") == "1":
|
||||
if not ctx.quiet:
|
||||
print(
|
||||
"[INFO] MakefileInstaller is disabled via "
|
||||
"PKGMGR_DISABLE_MAKEFILE_INSTALLER."
|
||||
)
|
||||
print("[INFO] PKGMGR_DISABLE_MAKEFILE_INSTALLER=1 – skipping MakefileInstaller.")
|
||||
return False
|
||||
|
||||
makefile_path = os.path.join(ctx.repo_dir, self.MAKEFILE_NAME)
|
||||
return os.path.exists(makefile_path)
|
||||
|
||||
def _has_install_target(self, makefile_path: str) -> bool:
|
||||
"""
|
||||
Heuristically check whether the Makefile defines an install target.
|
||||
|
||||
We look for:
|
||||
|
||||
- a plain 'install:' target, or
|
||||
- any 'install-*:' style target.
|
||||
"""
|
||||
try:
|
||||
with open(makefile_path, "r", encoding="utf-8", errors="ignore") as f:
|
||||
content = f.read()
|
||||
except OSError:
|
||||
return False
|
||||
|
||||
# Simple heuristics: look for "install:" or targets starting with "install-"
|
||||
if re.search(r"^install\s*:", content, flags=re.MULTILINE):
|
||||
return True
|
||||
|
||||
if re.search(r"^install-[a-zA-Z0-9_-]*\s*:", content, flags=re.MULTILINE):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def run(self, ctx: RepoContext) -> None:
|
||||
"""
|
||||
Execute `make install` in the repository directory if an install
|
||||
target exists.
|
||||
"""
|
||||
makefile_path = os.path.join(ctx.repo_dir, self.MAKEFILE_NAME)
|
||||
|
||||
if not os.path.exists(makefile_path):
|
||||
if not ctx.quiet:
|
||||
print(
|
||||
f"[pkgmgr] Makefile '{makefile_path}' not found, "
|
||||
"skipping MakefileInstaller."
|
||||
)
|
||||
return
|
||||
|
||||
if not self._has_install_target(makefile_path):
|
||||
if not ctx.quiet:
|
||||
print(
|
||||
f"[pkgmgr] No 'install' target found in {makefile_path}."
|
||||
)
|
||||
print(f"[pkgmgr] No 'install' target found in {makefile_path}.")
|
||||
return
|
||||
|
||||
if not ctx.quiet:
|
||||
print(
|
||||
f"[pkgmgr] Running 'make install' in {ctx.repo_dir} "
|
||||
f"(MakefileInstaller)"
|
||||
)
|
||||
print(f"[pkgmgr] Running make install for {ctx.identifier} (MakefileInstaller)")
|
||||
|
||||
cmd = "make install"
|
||||
run_command(cmd, cwd=ctx.repo_dir, preview=ctx.preview)
|
||||
run_command("make install", cwd=ctx.repo_dir, preview=ctx.preview)
|
||||
|
||||
if ctx.force_update and not ctx.quiet:
|
||||
print(f"[makefile] repo '{ctx.identifier}' successfully upgraded.")
|
||||
|
||||
4
src/pkgmgr/actions/install/installers/nix/__init__.py
Normal file
4
src/pkgmgr/actions/install/installers/nix/__init__.py
Normal file
@@ -0,0 +1,4 @@
|
||||
from .installer import NixFlakeInstaller
|
||||
from .retry import RetryPolicy
|
||||
|
||||
__all__ = ["NixFlakeInstaller", "RetryPolicy"]
|
||||
100
src/pkgmgr/actions/install/installers/nix/conflicts.py
Normal file
100
src/pkgmgr/actions/install/installers/nix/conflicts.py
Normal file
@@ -0,0 +1,100 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, List
|
||||
|
||||
from .profile import NixProfileInspector
|
||||
from .retry import GitHubRateLimitRetry
|
||||
from .runner import CommandRunner
|
||||
from .textparse import NixConflictTextParser
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pkgmgr.actions.install.context import RepoContext
|
||||
|
||||
|
||||
class NixConflictResolver:
|
||||
"""
|
||||
Resolves nix profile file conflicts by:
|
||||
1. Parsing conflicting store paths from stderr
|
||||
2. Mapping them to profile remove tokens via `nix profile list --json`
|
||||
3. Removing those tokens deterministically
|
||||
4. Retrying install
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
runner: CommandRunner,
|
||||
retry: GitHubRateLimitRetry,
|
||||
profile: NixProfileInspector,
|
||||
) -> None:
|
||||
self._runner = runner
|
||||
self._retry = retry
|
||||
self._profile = profile
|
||||
self._parser = NixConflictTextParser()
|
||||
|
||||
def resolve(
|
||||
self,
|
||||
ctx: "RepoContext",
|
||||
install_cmd: str,
|
||||
stdout: str,
|
||||
stderr: str,
|
||||
*,
|
||||
output: str,
|
||||
max_rounds: int = 10,
|
||||
) -> bool:
|
||||
quiet = bool(getattr(ctx, "quiet", False))
|
||||
combined = f"{stdout}\n{stderr}"
|
||||
|
||||
for _ in range(max_rounds):
|
||||
# 1) Extract conflicting store prefixes from nix error output
|
||||
store_prefixes = self._parser.existing_store_prefixes(combined)
|
||||
|
||||
# 2) Resolve them to concrete remove tokens
|
||||
tokens: List[str] = self._profile.find_remove_tokens_for_store_prefixes(
|
||||
ctx,
|
||||
self._runner,
|
||||
store_prefixes,
|
||||
)
|
||||
|
||||
# 3) Fallback: output-name based lookup (also covers nix suggesting: `nix profile remove pkgmgr`)
|
||||
if not tokens:
|
||||
tokens = self._profile.find_remove_tokens_for_output(ctx, self._runner, output)
|
||||
|
||||
if tokens:
|
||||
if not quiet:
|
||||
print(
|
||||
"[nix] conflict detected; removing existing profile entries: "
|
||||
+ ", ".join(tokens)
|
||||
)
|
||||
|
||||
for t in tokens:
|
||||
# tokens may contain things like "pkgmgr" or "pkgmgr-1" or quoted tokens (we keep raw)
|
||||
self._runner.run(ctx, f"nix profile remove {t}", allow_failure=True)
|
||||
|
||||
res = self._retry.run_with_retry(ctx, self._runner, install_cmd)
|
||||
if res.returncode == 0:
|
||||
return True
|
||||
|
||||
combined = f"{res.stdout}\n{res.stderr}"
|
||||
continue
|
||||
|
||||
# 4) Last-resort fallback: use textual remove tokens from stderr (“nix profile remove X”)
|
||||
tokens = self._parser.remove_tokens(combined)
|
||||
if tokens:
|
||||
if not quiet:
|
||||
print("[nix] fallback remove tokens: " + ", ".join(tokens))
|
||||
|
||||
for t in tokens:
|
||||
self._runner.run(ctx, f"nix profile remove {t}", allow_failure=True)
|
||||
|
||||
res = self._retry.run_with_retry(ctx, self._runner, install_cmd)
|
||||
if res.returncode == 0:
|
||||
return True
|
||||
|
||||
combined = f"{res.stdout}\n{res.stderr}"
|
||||
continue
|
||||
|
||||
if not quiet:
|
||||
print("[nix] conflict detected but could not resolve profile entries to remove.")
|
||||
return False
|
||||
|
||||
return False
|
||||
229
src/pkgmgr/actions/install/installers/nix/installer.py
Normal file
229
src/pkgmgr/actions/install/installers/nix/installer.py
Normal file
@@ -0,0 +1,229 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import shutil
|
||||
from typing import TYPE_CHECKING, List, Tuple
|
||||
|
||||
from pkgmgr.actions.install.installers.base import BaseInstaller
|
||||
|
||||
from .conflicts import NixConflictResolver
|
||||
from .profile import NixProfileInspector
|
||||
from .retry import GitHubRateLimitRetry, RetryPolicy
|
||||
from .runner import CommandRunner
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pkgmgr.actions.install.context import RepoContext
|
||||
|
||||
|
||||
class NixFlakeInstaller(BaseInstaller):
|
||||
layer = "nix"
|
||||
FLAKE_FILE = "flake.nix"
|
||||
|
||||
def __init__(self, policy: RetryPolicy | None = None) -> None:
|
||||
self._runner = CommandRunner()
|
||||
self._retry = GitHubRateLimitRetry(policy=policy)
|
||||
self._profile = NixProfileInspector()
|
||||
self._conflicts = NixConflictResolver(self._runner, self._retry, self._profile)
|
||||
|
||||
# Newer nix rejects numeric indices; we learn this at runtime and cache the decision.
|
||||
self._indices_supported: bool | None = None
|
||||
|
||||
def supports(self, ctx: "RepoContext") -> bool:
|
||||
if os.environ.get("PKGMGR_DISABLE_NIX_FLAKE_INSTALLER") == "1":
|
||||
if not ctx.quiet:
|
||||
print(
|
||||
"[INFO] PKGMGR_DISABLE_NIX_FLAKE_INSTALLER=1 – "
|
||||
"skipping NixFlakeInstaller."
|
||||
)
|
||||
return False
|
||||
|
||||
if shutil.which("nix") is None:
|
||||
return False
|
||||
|
||||
return os.path.exists(os.path.join(ctx.repo_dir, self.FLAKE_FILE))
|
||||
|
||||
def _profile_outputs(self, ctx: "RepoContext") -> List[Tuple[str, bool]]:
|
||||
# (output_name, allow_failure)
|
||||
if ctx.identifier in {"pkgmgr", "package-manager"}:
|
||||
return [("pkgmgr", False), ("default", True)]
|
||||
return [("default", False)]
|
||||
|
||||
def run(self, ctx: "RepoContext") -> None:
|
||||
if not self.supports(ctx):
|
||||
return
|
||||
|
||||
outputs = self._profile_outputs(ctx)
|
||||
|
||||
if not ctx.quiet:
|
||||
msg = (
|
||||
"[nix] flake detected in "
|
||||
f"{ctx.identifier}, ensuring outputs: "
|
||||
+ ", ".join(name for name, _ in outputs)
|
||||
)
|
||||
print(msg)
|
||||
|
||||
for output, allow_failure in outputs:
|
||||
if ctx.force_update:
|
||||
self._force_upgrade_output(ctx, output, allow_failure)
|
||||
else:
|
||||
self._install_only(ctx, output, allow_failure)
|
||||
|
||||
def _installable(self, ctx: "RepoContext", output: str) -> str:
|
||||
return f"{ctx.repo_dir}#{output}"
|
||||
|
||||
# ---------------------------------------------------------------------
|
||||
# Core install path
|
||||
# ---------------------------------------------------------------------
|
||||
|
||||
def _install_only(self, ctx: "RepoContext", output: str, allow_failure: bool) -> None:
|
||||
install_cmd = f"nix profile install {self._installable(ctx, output)}"
|
||||
|
||||
if not ctx.quiet:
|
||||
print(f"[nix] install: {install_cmd}")
|
||||
|
||||
res = self._retry.run_with_retry(ctx, self._runner, install_cmd)
|
||||
if res.returncode == 0:
|
||||
if not ctx.quiet:
|
||||
print(f"[nix] output '{output}' successfully installed.")
|
||||
return
|
||||
|
||||
# Conflict resolver first (handles the common “existing package already provides file” case)
|
||||
if self._conflicts.resolve(
|
||||
ctx,
|
||||
install_cmd,
|
||||
res.stdout,
|
||||
res.stderr,
|
||||
output=output,
|
||||
):
|
||||
if not ctx.quiet:
|
||||
print(f"[nix] output '{output}' successfully installed after conflict cleanup.")
|
||||
return
|
||||
|
||||
if not ctx.quiet:
|
||||
print(
|
||||
f"[nix] install failed for '{output}' (exit {res.returncode}), "
|
||||
"trying upgrade/remove+install..."
|
||||
)
|
||||
|
||||
# If indices are supported, try legacy index-upgrade path.
|
||||
if self._indices_supported is not False:
|
||||
indices = self._profile.find_installed_indices_for_output(ctx, self._runner, output)
|
||||
|
||||
upgraded = False
|
||||
for idx in indices:
|
||||
if self._upgrade_index(ctx, idx):
|
||||
upgraded = True
|
||||
if not ctx.quiet:
|
||||
print(f"[nix] output '{output}' successfully upgraded (index {idx}).")
|
||||
|
||||
if upgraded:
|
||||
return
|
||||
|
||||
if indices and not ctx.quiet:
|
||||
print(f"[nix] upgrade failed; removing indices {indices} and reinstalling '{output}'.")
|
||||
|
||||
for idx in indices:
|
||||
self._remove_index(ctx, idx)
|
||||
|
||||
# If we learned indices are unsupported, immediately fall back below
|
||||
if self._indices_supported is False:
|
||||
self._remove_tokens_for_output(ctx, output)
|
||||
|
||||
else:
|
||||
# indices explicitly unsupported
|
||||
self._remove_tokens_for_output(ctx, output)
|
||||
|
||||
final = self._runner.run(ctx, install_cmd, allow_failure=True)
|
||||
if final.returncode == 0:
|
||||
if not ctx.quiet:
|
||||
print(f"[nix] output '{output}' successfully re-installed.")
|
||||
return
|
||||
|
||||
print(f"[ERROR] Failed to install Nix flake output '{output}' (exit {final.returncode})")
|
||||
if not allow_failure:
|
||||
raise SystemExit(final.returncode)
|
||||
|
||||
print(f"[WARNING] Continuing despite failure of optional output '{output}'.")
|
||||
|
||||
# ---------------------------------------------------------------------
|
||||
# force_update path
|
||||
# ---------------------------------------------------------------------
|
||||
|
||||
def _force_upgrade_output(self, ctx: "RepoContext", output: str, allow_failure: bool) -> None:
|
||||
# Prefer token path if indices unsupported (new nix)
|
||||
if self._indices_supported is False:
|
||||
self._remove_tokens_for_output(ctx, output)
|
||||
self._install_only(ctx, output, allow_failure)
|
||||
if not ctx.quiet:
|
||||
print(f"[nix] output '{output}' successfully upgraded.")
|
||||
return
|
||||
|
||||
indices = self._profile.find_installed_indices_for_output(ctx, self._runner, output)
|
||||
|
||||
upgraded_any = False
|
||||
for idx in indices:
|
||||
if self._upgrade_index(ctx, idx):
|
||||
upgraded_any = True
|
||||
if not ctx.quiet:
|
||||
print(f"[nix] output '{output}' successfully upgraded (index {idx}).")
|
||||
|
||||
if upgraded_any:
|
||||
if not ctx.quiet:
|
||||
print(f"[nix] output '{output}' successfully upgraded.")
|
||||
return
|
||||
|
||||
if indices and not ctx.quiet:
|
||||
print(f"[nix] upgrade failed; removing indices {indices} and reinstalling '{output}'.")
|
||||
|
||||
for idx in indices:
|
||||
self._remove_index(ctx, idx)
|
||||
|
||||
# If we learned indices are unsupported, also remove by token to actually clear conflicts
|
||||
if self._indices_supported is False:
|
||||
self._remove_tokens_for_output(ctx, output)
|
||||
|
||||
self._install_only(ctx, output, allow_failure)
|
||||
|
||||
if not ctx.quiet:
|
||||
print(f"[nix] output '{output}' successfully upgraded.")
|
||||
|
||||
# ---------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ---------------------------------------------------------------------
|
||||
|
||||
def _stderr_says_indices_unsupported(self, stderr: str) -> bool:
|
||||
s = (stderr or "").lower()
|
||||
return "no longer supports indices" in s or "does not support indices" in s
|
||||
|
||||
def _upgrade_index(self, ctx: "RepoContext", idx: int) -> bool:
|
||||
cmd = f"nix profile upgrade --refresh {idx}"
|
||||
res = self._runner.run(ctx, cmd, allow_failure=True)
|
||||
|
||||
if self._stderr_says_indices_unsupported(getattr(res, "stderr", "")):
|
||||
self._indices_supported = False
|
||||
return False
|
||||
|
||||
if self._indices_supported is None:
|
||||
self._indices_supported = True
|
||||
|
||||
return res.returncode == 0
|
||||
|
||||
def _remove_index(self, ctx: "RepoContext", idx: int) -> None:
|
||||
res = self._runner.run(ctx, f"nix profile remove {idx}", allow_failure=True)
|
||||
|
||||
if self._stderr_says_indices_unsupported(getattr(res, "stderr", "")):
|
||||
self._indices_supported = False
|
||||
|
||||
if self._indices_supported is None:
|
||||
self._indices_supported = True
|
||||
|
||||
def _remove_tokens_for_output(self, ctx: "RepoContext", output: str) -> None:
|
||||
tokens = self._profile.find_remove_tokens_for_output(ctx, self._runner, output)
|
||||
if not tokens:
|
||||
return
|
||||
|
||||
if not ctx.quiet:
|
||||
print(f"[nix] indices unsupported; removing by token(s): {', '.join(tokens)}")
|
||||
|
||||
for t in tokens:
|
||||
self._runner.run(ctx, f"nix profile remove {t}", allow_failure=True)
|
||||
@@ -0,0 +1,4 @@
|
||||
from .inspector import NixProfileInspector
|
||||
from .models import NixProfileEntry
|
||||
|
||||
__all__ = ["NixProfileInspector", "NixProfileEntry"]
|
||||
162
src/pkgmgr/actions/install/installers/nix/profile/inspector.py
Normal file
162
src/pkgmgr/actions/install/installers/nix/profile/inspector.py
Normal file
@@ -0,0 +1,162 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, List, TYPE_CHECKING
|
||||
|
||||
from .matcher import (
|
||||
entry_matches_output,
|
||||
entry_matches_store_path,
|
||||
stable_unique_ints,
|
||||
)
|
||||
from .normalizer import normalize_elements
|
||||
from .parser import parse_profile_list_json
|
||||
from .result import extract_stdout_text
|
||||
|
||||
if TYPE_CHECKING:
|
||||
# Keep these as TYPE_CHECKING-only to avoid runtime import cycles.
|
||||
from pkgmgr.actions.install.context import RepoContext
|
||||
from pkgmgr.core.command.runner import CommandRunner
|
||||
|
||||
|
||||
class NixProfileInspector:
|
||||
"""
|
||||
Reads and inspects the user's Nix profile list (JSON).
|
||||
|
||||
Public API:
|
||||
- list_json()
|
||||
- find_installed_indices_for_output() (legacy; may not work on newer nix)
|
||||
- find_indices_by_store_path() (legacy; may not work on newer nix)
|
||||
- find_remove_tokens_for_output()
|
||||
- find_remove_tokens_for_store_prefixes()
|
||||
"""
|
||||
|
||||
def list_json(self, ctx: "RepoContext", runner: "CommandRunner") -> dict[str, Any]:
|
||||
res = runner.run(ctx, "nix profile list --json", allow_failure=False)
|
||||
raw = extract_stdout_text(res)
|
||||
return parse_profile_list_json(raw)
|
||||
|
||||
# ---------------------------------------------------------------------
|
||||
# Legacy index helpers (still useful on older nix; newer nix may reject indices)
|
||||
# ---------------------------------------------------------------------
|
||||
|
||||
def find_installed_indices_for_output(
|
||||
self,
|
||||
ctx: "RepoContext",
|
||||
runner: "CommandRunner",
|
||||
output: str,
|
||||
) -> List[int]:
|
||||
data = self.list_json(ctx, runner)
|
||||
entries = normalize_elements(data)
|
||||
|
||||
hits: List[int] = []
|
||||
for e in entries:
|
||||
if e.index is None:
|
||||
continue
|
||||
if entry_matches_output(e, output):
|
||||
hits.append(e.index)
|
||||
|
||||
return stable_unique_ints(hits)
|
||||
|
||||
def find_indices_by_store_path(
|
||||
self,
|
||||
ctx: "RepoContext",
|
||||
runner: "CommandRunner",
|
||||
store_path: str,
|
||||
) -> List[int]:
|
||||
needle = (store_path or "").strip()
|
||||
if not needle:
|
||||
return []
|
||||
|
||||
data = self.list_json(ctx, runner)
|
||||
entries = normalize_elements(data)
|
||||
|
||||
hits: List[int] = []
|
||||
for e in entries:
|
||||
if e.index is None:
|
||||
continue
|
||||
if entry_matches_store_path(e, needle):
|
||||
hits.append(e.index)
|
||||
|
||||
return stable_unique_ints(hits)
|
||||
|
||||
# ---------------------------------------------------------------------
|
||||
# New token-based helpers (works with newer nix where indices are rejected)
|
||||
# ---------------------------------------------------------------------
|
||||
|
||||
def find_remove_tokens_for_output(
|
||||
self,
|
||||
ctx: "RepoContext",
|
||||
runner: "CommandRunner",
|
||||
output: str,
|
||||
) -> List[str]:
|
||||
"""
|
||||
Returns profile remove tokens to remove entries matching a given output.
|
||||
|
||||
We always include the raw output token first because nix itself suggests:
|
||||
nix profile remove pkgmgr
|
||||
"""
|
||||
out = (output or "").strip()
|
||||
if not out:
|
||||
return []
|
||||
|
||||
data = self.list_json(ctx, runner)
|
||||
entries = normalize_elements(data)
|
||||
|
||||
tokens: List[str] = [out] # critical: matches nix's own suggestion for conflicts
|
||||
|
||||
for e in entries:
|
||||
if entry_matches_output(e, out):
|
||||
# Prefer removing by key/name (non-index) when possible.
|
||||
# New nix rejects numeric indices; these tokens are safer.
|
||||
k = (e.key or "").strip()
|
||||
n = (e.name or "").strip()
|
||||
|
||||
if k and not k.isdigit():
|
||||
tokens.append(k)
|
||||
elif n and not n.isdigit():
|
||||
tokens.append(n)
|
||||
|
||||
# stable unique preserving order
|
||||
seen: set[str] = set()
|
||||
uniq: List[str] = []
|
||||
for t in tokens:
|
||||
if t and t not in seen:
|
||||
uniq.append(t)
|
||||
seen.add(t)
|
||||
return uniq
|
||||
|
||||
def find_remove_tokens_for_store_prefixes(
|
||||
self,
|
||||
ctx: "RepoContext",
|
||||
runner: "CommandRunner",
|
||||
prefixes: List[str],
|
||||
) -> List[str]:
|
||||
"""
|
||||
Returns remove tokens for entries whose store path matches any prefix.
|
||||
"""
|
||||
prefixes = [(p or "").strip() for p in (prefixes or []) if p]
|
||||
prefixes = [p for p in prefixes if p]
|
||||
if not prefixes:
|
||||
return []
|
||||
|
||||
data = self.list_json(ctx, runner)
|
||||
entries = normalize_elements(data)
|
||||
|
||||
tokens: List[str] = []
|
||||
for e in entries:
|
||||
if not e.store_paths:
|
||||
continue
|
||||
if any(sp == p for sp in e.store_paths for p in prefixes):
|
||||
k = (e.key or "").strip()
|
||||
n = (e.name or "").strip()
|
||||
if k and not k.isdigit():
|
||||
tokens.append(k)
|
||||
elif n and not n.isdigit():
|
||||
tokens.append(n)
|
||||
|
||||
seen: set[str] = set()
|
||||
uniq: List[str] = []
|
||||
for t in tokens:
|
||||
if t and t not in seen:
|
||||
uniq.append(t)
|
||||
seen.add(t)
|
||||
return uniq
|
||||
62
src/pkgmgr/actions/install/installers/nix/profile/matcher.py
Normal file
62
src/pkgmgr/actions/install/installers/nix/profile/matcher.py
Normal file
@@ -0,0 +1,62 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import List
|
||||
|
||||
from .models import NixProfileEntry
|
||||
|
||||
|
||||
def entry_matches_output(entry: NixProfileEntry, output: str) -> bool:
|
||||
"""
|
||||
Heuristic matcher: output is typically a flake output name (e.g. "pkgmgr"),
|
||||
and we match against name/attrPath patterns.
|
||||
"""
|
||||
out = (output or "").strip()
|
||||
if not out:
|
||||
return False
|
||||
|
||||
candidates = [entry.name, entry.attr_path]
|
||||
|
||||
for c in candidates:
|
||||
c = (c or "").strip()
|
||||
if not c:
|
||||
continue
|
||||
|
||||
# Direct match
|
||||
if c == out:
|
||||
return True
|
||||
|
||||
# AttrPath contains "#<output>"
|
||||
if f"#{out}" in c:
|
||||
return True
|
||||
|
||||
# AttrPath ends with ".<output>"
|
||||
if c.endswith(f".{out}"):
|
||||
return True
|
||||
|
||||
# Name pattern "<output>-<n>" (common, e.g. pkgmgr-1)
|
||||
if c.startswith(f"{out}-"):
|
||||
return True
|
||||
|
||||
# Historical special case: repo is "package-manager" but output is "pkgmgr"
|
||||
if out == "pkgmgr" and c.startswith("package-manager-"):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def entry_matches_store_path(entry: NixProfileEntry, store_path: str) -> bool:
|
||||
needle = (store_path or "").strip()
|
||||
if not needle:
|
||||
return False
|
||||
return any((p or "") == needle for p in entry.store_paths)
|
||||
|
||||
|
||||
def stable_unique_ints(values: List[int]) -> List[int]:
|
||||
seen: set[int] = set()
|
||||
uniq: List[int] = []
|
||||
for v in values:
|
||||
if v in seen:
|
||||
continue
|
||||
uniq.append(v)
|
||||
seen.add(v)
|
||||
return uniq
|
||||
17
src/pkgmgr/actions/install/installers/nix/profile/models.py
Normal file
17
src/pkgmgr/actions/install/installers/nix/profile/models.py
Normal file
@@ -0,0 +1,17 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import List, Optional
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class NixProfileEntry:
|
||||
"""
|
||||
Minimal normalized representation of one nix profile element entry.
|
||||
"""
|
||||
|
||||
key: str
|
||||
index: Optional[int]
|
||||
name: str
|
||||
attr_path: str
|
||||
store_paths: List[str]
|
||||
128
src/pkgmgr/actions/install/installers/nix/profile/normalizer.py
Normal file
128
src/pkgmgr/actions/install/installers/nix/profile/normalizer.py
Normal file
@@ -0,0 +1,128 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
from typing import Any, Dict, Iterable, List, Optional
|
||||
|
||||
from .models import NixProfileEntry
|
||||
|
||||
|
||||
def coerce_index(key: str, entry: Dict[str, Any]) -> Optional[int]:
|
||||
"""
|
||||
Nix JSON schema varies:
|
||||
- elements keys might be "0", "1", ...
|
||||
- or might be names like "pkgmgr-1"
|
||||
Some versions include an explicit index field.
|
||||
We try safe options in order.
|
||||
"""
|
||||
k = (key or "").strip()
|
||||
|
||||
# 1) Classic: numeric keys
|
||||
if k.isdigit():
|
||||
try:
|
||||
return int(k)
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
# 2) Explicit index fields (schema-dependent)
|
||||
for field in ("index", "id", "position"):
|
||||
v = entry.get(field)
|
||||
if isinstance(v, int):
|
||||
return v
|
||||
if isinstance(v, str) and v.strip().isdigit():
|
||||
try:
|
||||
return int(v.strip())
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# 3) Last resort: extract trailing number from key if it looks like "<name>-<n>"
|
||||
m = re.match(r"^.+-(\d+)$", k)
|
||||
if m:
|
||||
try:
|
||||
return int(m.group(1))
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def iter_store_paths(entry: Dict[str, Any]) -> Iterable[str]:
|
||||
"""
|
||||
Yield all possible store paths from a nix profile JSON entry.
|
||||
|
||||
Nix has had schema shifts. We support common variants:
|
||||
- "storePaths": ["/nix/store/..", ...]
|
||||
- "storePaths": "/nix/store/.." (rare)
|
||||
- "storePath": "/nix/store/.." (some variants)
|
||||
- nested "outputs" dict(s) with store paths (best-effort)
|
||||
"""
|
||||
if not isinstance(entry, dict):
|
||||
return
|
||||
|
||||
sp = entry.get("storePaths")
|
||||
if isinstance(sp, list):
|
||||
for p in sp:
|
||||
if isinstance(p, str):
|
||||
yield p
|
||||
elif isinstance(sp, str):
|
||||
yield sp
|
||||
|
||||
sp2 = entry.get("storePath")
|
||||
if isinstance(sp2, str):
|
||||
yield sp2
|
||||
|
||||
outs = entry.get("outputs")
|
||||
if isinstance(outs, dict):
|
||||
for _, ov in outs.items():
|
||||
if isinstance(ov, dict):
|
||||
p = ov.get("storePath")
|
||||
if isinstance(p, str):
|
||||
yield p
|
||||
|
||||
|
||||
def normalize_store_path(store_path: str) -> str:
|
||||
"""
|
||||
Normalize store path for matching.
|
||||
Currently just strips whitespace; hook for future normalization if needed.
|
||||
"""
|
||||
return (store_path or "").strip()
|
||||
|
||||
|
||||
def normalize_elements(data: Dict[str, Any]) -> List[NixProfileEntry]:
|
||||
"""
|
||||
Converts nix profile list JSON into a list of normalized entries.
|
||||
|
||||
JSON formats observed:
|
||||
- {"elements": {"0": {...}, "1": {...}}}
|
||||
- {"elements": {"pkgmgr-1": {...}, "pkgmgr-2": {...}}}
|
||||
"""
|
||||
elements = data.get("elements")
|
||||
if not isinstance(elements, dict):
|
||||
return []
|
||||
|
||||
normalized: List[NixProfileEntry] = []
|
||||
|
||||
for k, entry in elements.items():
|
||||
if not isinstance(entry, dict):
|
||||
continue
|
||||
|
||||
idx = coerce_index(str(k), entry)
|
||||
name = str(entry.get("name", "") or "")
|
||||
attr = str(entry.get("attrPath", "") or "")
|
||||
|
||||
store_paths: List[str] = []
|
||||
for p in iter_store_paths(entry):
|
||||
sp = normalize_store_path(p)
|
||||
if sp:
|
||||
store_paths.append(sp)
|
||||
|
||||
normalized.append(
|
||||
NixProfileEntry(
|
||||
key=str(k),
|
||||
index=idx,
|
||||
name=name,
|
||||
attr_path=attr,
|
||||
store_paths=store_paths,
|
||||
)
|
||||
)
|
||||
|
||||
return normalized
|
||||
19
src/pkgmgr/actions/install/installers/nix/profile/parser.py
Normal file
19
src/pkgmgr/actions/install/installers/nix/profile/parser.py
Normal file
@@ -0,0 +1,19 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from typing import Any, Dict
|
||||
|
||||
|
||||
def parse_profile_list_json(raw: str) -> Dict[str, Any]:
|
||||
"""
|
||||
Parse JSON output from `nix profile list --json`.
|
||||
|
||||
Raises SystemExit with a helpful excerpt on parse failure.
|
||||
"""
|
||||
try:
|
||||
return json.loads(raw)
|
||||
except json.JSONDecodeError as e:
|
||||
excerpt = (raw or "")[:5000]
|
||||
raise SystemExit(
|
||||
f"[nix] Failed to parse `nix profile list --json`: {e}\n{excerpt}"
|
||||
) from e
|
||||
28
src/pkgmgr/actions/install/installers/nix/profile/result.py
Normal file
28
src/pkgmgr/actions/install/installers/nix/profile/result.py
Normal file
@@ -0,0 +1,28 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
|
||||
def extract_stdout_text(result: Any) -> str:
|
||||
"""
|
||||
Normalize different runner return types to a stdout string.
|
||||
|
||||
Supported patterns:
|
||||
- result is str -> returned as-is
|
||||
- result is bytes/bytearray -> decoded UTF-8 (replace errors)
|
||||
- result has `.stdout` (str or bytes) -> used
|
||||
- fallback: str(result)
|
||||
"""
|
||||
if isinstance(result, str):
|
||||
return result
|
||||
|
||||
if isinstance(result, (bytes, bytearray)):
|
||||
return bytes(result).decode("utf-8", errors="replace")
|
||||
|
||||
stdout = getattr(result, "stdout", None)
|
||||
if isinstance(stdout, str):
|
||||
return stdout
|
||||
if isinstance(stdout, (bytes, bytearray)):
|
||||
return bytes(stdout).decode("utf-8", errors="replace")
|
||||
|
||||
return str(result)
|
||||
69
src/pkgmgr/actions/install/installers/nix/profile_list.py
Normal file
69
src/pkgmgr/actions/install/installers/nix/profile_list.py
Normal file
@@ -0,0 +1,69 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
from typing import TYPE_CHECKING, List, Tuple
|
||||
|
||||
from .runner import CommandRunner
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pkgmgr.actions.install.context import RepoContext
|
||||
|
||||
|
||||
class NixProfileListReader:
|
||||
def __init__(self, runner: CommandRunner) -> None:
|
||||
self._runner = runner
|
||||
|
||||
@staticmethod
|
||||
def _store_prefix(path: str) -> str:
|
||||
raw = (path or "").strip()
|
||||
m = re.match(r"^(/nix/store/[0-9a-z]{32}-[^/ \t]+)", raw)
|
||||
return m.group(1) if m else raw
|
||||
|
||||
def entries(self, ctx: "RepoContext") -> List[Tuple[int, str]]:
|
||||
res = self._runner.run(ctx, "nix profile list", allow_failure=True)
|
||||
if res.returncode != 0:
|
||||
return []
|
||||
|
||||
entries: List[Tuple[int, str]] = []
|
||||
pat = re.compile(
|
||||
r"^\s*(\d+)\s+.*?(/nix/store/[0-9a-z]{32}-[^/ \t]+)",
|
||||
re.MULTILINE,
|
||||
)
|
||||
|
||||
for m in pat.finditer(res.stdout or ""):
|
||||
idx_s = m.group(1)
|
||||
sp = m.group(2)
|
||||
try:
|
||||
idx = int(idx_s)
|
||||
except Exception:
|
||||
continue
|
||||
entries.append((idx, self._store_prefix(sp)))
|
||||
|
||||
seen: set[int] = set()
|
||||
uniq: List[Tuple[int, str]] = []
|
||||
for idx, sp in entries:
|
||||
if idx not in seen:
|
||||
seen.add(idx)
|
||||
uniq.append((idx, sp))
|
||||
|
||||
return uniq
|
||||
|
||||
def indices_matching_store_prefixes(self, ctx: "RepoContext", prefixes: List[str]) -> List[int]:
|
||||
prefixes = [self._store_prefix(p) for p in prefixes if p]
|
||||
prefixes = [p for p in prefixes if p]
|
||||
if not prefixes:
|
||||
return []
|
||||
|
||||
hits: List[int] = []
|
||||
for idx, sp in self.entries(ctx):
|
||||
if any(sp == p for p in prefixes):
|
||||
hits.append(idx)
|
||||
|
||||
seen: set[int] = set()
|
||||
uniq: List[int] = []
|
||||
for i in hits:
|
||||
if i not in seen:
|
||||
seen.add(i)
|
||||
uniq.append(i)
|
||||
|
||||
return uniq
|
||||
87
src/pkgmgr/actions/install/installers/nix/retry.py
Normal file
87
src/pkgmgr/actions/install/installers/nix/retry.py
Normal file
@@ -0,0 +1,87 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import random
|
||||
import time
|
||||
from dataclasses import dataclass
|
||||
from typing import Iterable, TYPE_CHECKING
|
||||
|
||||
from .types import RunResult
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pkgmgr.actions.install.context import RepoContext
|
||||
from .runner import CommandRunner
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class RetryPolicy:
|
||||
max_attempts: int = 7
|
||||
base_delay_seconds: int = 30
|
||||
jitter_seconds_min: int = 0
|
||||
jitter_seconds_max: int = 60
|
||||
|
||||
|
||||
class GitHubRateLimitRetry:
|
||||
"""
|
||||
Retries nix install commands only when the error looks like a GitHub API rate limit (HTTP 403).
|
||||
Backoff: Fibonacci(base, base, ...) + random jitter.
|
||||
"""
|
||||
|
||||
def __init__(self, policy: RetryPolicy | None = None) -> None:
|
||||
self._policy = policy or RetryPolicy()
|
||||
|
||||
def run_with_retry(
|
||||
self,
|
||||
ctx: "RepoContext",
|
||||
runner: "CommandRunner",
|
||||
install_cmd: str,
|
||||
) -> RunResult:
|
||||
quiet = bool(getattr(ctx, "quiet", False))
|
||||
delays = list(self._fibonacci_backoff(self._policy.base_delay_seconds, self._policy.max_attempts))
|
||||
|
||||
last: RunResult | None = None
|
||||
|
||||
for attempt, base_delay in enumerate(delays, start=1):
|
||||
if not quiet:
|
||||
print(f"[nix] attempt {attempt}/{self._policy.max_attempts}: {install_cmd}")
|
||||
|
||||
res = runner.run(ctx, install_cmd, allow_failure=True)
|
||||
last = res
|
||||
|
||||
if res.returncode == 0:
|
||||
return res
|
||||
|
||||
combined = f"{res.stdout}\n{res.stderr}"
|
||||
if not self._is_github_rate_limit_error(combined):
|
||||
return res
|
||||
|
||||
if attempt >= self._policy.max_attempts:
|
||||
break
|
||||
|
||||
jitter = random.randint(self._policy.jitter_seconds_min, self._policy.jitter_seconds_max)
|
||||
wait_time = base_delay + jitter
|
||||
|
||||
if not quiet:
|
||||
print(
|
||||
"[nix] GitHub rate limit detected (403). "
|
||||
f"Retrying in {wait_time}s (base={base_delay}s, jitter={jitter}s)..."
|
||||
)
|
||||
|
||||
time.sleep(wait_time)
|
||||
|
||||
return last if last is not None else RunResult(returncode=1, stdout="", stderr="nix install retry failed")
|
||||
|
||||
@staticmethod
|
||||
def _is_github_rate_limit_error(text: str) -> bool:
|
||||
t = (text or "").lower()
|
||||
return (
|
||||
"http error 403" in t
|
||||
or "rate limit exceeded" in t
|
||||
or "github api rate limit" in t
|
||||
or "api rate limit exceeded" in t
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _fibonacci_backoff(base: int, attempts: int) -> Iterable[int]:
|
||||
a, b = base, base
|
||||
for _ in range(max(1, attempts)):
|
||||
yield a
|
||||
a, b = b, a + b
|
||||
64
src/pkgmgr/actions/install/installers/nix/runner.py
Normal file
64
src/pkgmgr/actions/install/installers/nix/runner.py
Normal file
@@ -0,0 +1,64 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import subprocess
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from .types import RunResult
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pkgmgr.actions.install.context import RepoContext
|
||||
|
||||
class CommandRunner:
|
||||
"""
|
||||
Executes commands (shell=True) inside a repository directory (if provided).
|
||||
Supports preview mode and compact failure output logging.
|
||||
"""
|
||||
|
||||
def run(self, ctx: "RepoContext", cmd: str, allow_failure: bool) -> RunResult:
|
||||
repo_dir = getattr(ctx, "repo_dir", None) or getattr(ctx, "repo_path", None)
|
||||
preview = bool(getattr(ctx, "preview", False))
|
||||
quiet = bool(getattr(ctx, "quiet", False))
|
||||
|
||||
if preview:
|
||||
if not quiet:
|
||||
print(f"[preview] {cmd}")
|
||||
return RunResult(returncode=0, stdout="", stderr="")
|
||||
|
||||
try:
|
||||
p = subprocess.run(
|
||||
cmd,
|
||||
shell=True,
|
||||
cwd=repo_dir,
|
||||
check=False,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
text=True,
|
||||
)
|
||||
except Exception as e:
|
||||
if not allow_failure:
|
||||
raise
|
||||
return RunResult(returncode=1, stdout="", stderr=str(e))
|
||||
|
||||
res = RunResult(returncode=p.returncode, stdout=p.stdout or "", stderr=p.stderr or "")
|
||||
|
||||
if res.returncode != 0 and not quiet:
|
||||
self._print_compact_failure(res)
|
||||
|
||||
if res.returncode != 0 and not allow_failure:
|
||||
raise SystemExit(res.returncode)
|
||||
|
||||
return res
|
||||
|
||||
@staticmethod
|
||||
def _print_compact_failure(res: RunResult) -> None:
|
||||
out = (res.stdout or "").strip()
|
||||
err = (res.stderr or "").strip()
|
||||
|
||||
if out:
|
||||
print("[nix] stdout (last lines):")
|
||||
print("\n".join(out.splitlines()[-20:]))
|
||||
|
||||
if err:
|
||||
print("[nix] stderr (last lines):")
|
||||
print("\n".join(err.splitlines()[-40:]))
|
||||
76
src/pkgmgr/actions/install/installers/nix/textparse.py
Normal file
76
src/pkgmgr/actions/install/installers/nix/textparse.py
Normal file
@@ -0,0 +1,76 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
from typing import List
|
||||
|
||||
|
||||
class NixConflictTextParser:
|
||||
@staticmethod
|
||||
def _store_prefix(path: str) -> str:
|
||||
raw = (path or "").strip()
|
||||
m = re.match(r"^(/nix/store/[0-9a-z]{32}-[^/ \t]+)", raw)
|
||||
return m.group(1) if m else raw
|
||||
|
||||
def remove_tokens(self, text: str) -> List[str]:
|
||||
pat = re.compile(
|
||||
r"^\s*nix profile remove\s+([^\s'\"`]+|'[^']+'|\"[^\"]+\")\s*$",
|
||||
re.MULTILINE,
|
||||
)
|
||||
|
||||
tokens: List[str] = []
|
||||
for m in pat.finditer(text or ""):
|
||||
t = (m.group(1) or "").strip()
|
||||
if (t.startswith("'") and t.endswith("'")) or (t.startswith('"') and t.endswith('"')):
|
||||
t = t[1:-1]
|
||||
if t:
|
||||
tokens.append(t)
|
||||
|
||||
seen: set[str] = set()
|
||||
uniq: List[str] = []
|
||||
for t in tokens:
|
||||
if t not in seen:
|
||||
seen.add(t)
|
||||
uniq.append(t)
|
||||
|
||||
return uniq
|
||||
|
||||
def existing_store_prefixes(self, text: str) -> List[str]:
|
||||
lines = (text or "").splitlines()
|
||||
prefixes: List[str] = []
|
||||
|
||||
in_existing = False
|
||||
in_new = False
|
||||
|
||||
store_pat = re.compile(r"^\s*(/nix/store/[0-9a-z]{32}-[^ \t]+)")
|
||||
|
||||
for raw in lines:
|
||||
line = raw.strip()
|
||||
|
||||
if "An existing package already provides the following file" in line:
|
||||
in_existing = True
|
||||
in_new = False
|
||||
continue
|
||||
|
||||
if "This is the conflicting file from the new package" in line:
|
||||
in_existing = False
|
||||
in_new = True
|
||||
continue
|
||||
|
||||
if in_existing:
|
||||
m = store_pat.match(raw)
|
||||
if m:
|
||||
prefixes.append(m.group(1))
|
||||
continue
|
||||
|
||||
_ = in_new
|
||||
|
||||
norm = [self._store_prefix(p) for p in prefixes if p]
|
||||
|
||||
seen: set[str] = set()
|
||||
uniq: List[str] = []
|
||||
for p in norm:
|
||||
if p and p not in seen:
|
||||
seen.add(p)
|
||||
uniq.append(p)
|
||||
|
||||
return uniq
|
||||
10
src/pkgmgr/actions/install/installers/nix/types.py
Normal file
10
src/pkgmgr/actions/install/installers/nix/types.py
Normal file
@@ -0,0 +1,10 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class RunResult:
|
||||
returncode: int
|
||||
stdout: str
|
||||
stderr: str
|
||||
@@ -1,165 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
Installer for Nix flakes.
|
||||
|
||||
If a repository contains flake.nix and the 'nix' command is available, this
|
||||
installer will try to install profile outputs from the flake.
|
||||
|
||||
Behavior:
|
||||
- If flake.nix is present and `nix` exists on PATH:
|
||||
* First remove any existing `package-manager` profile entry (best-effort).
|
||||
* Then install one or more flake outputs via `nix profile install`.
|
||||
- For the package-manager repo:
|
||||
* `pkgmgr` is mandatory (CLI), `default` is optional.
|
||||
- For all other repos:
|
||||
* `default` is mandatory.
|
||||
|
||||
Special handling:
|
||||
- If PKGMGR_DISABLE_NIX_FLAKE_INSTALLER=1 is set, the installer is
|
||||
globally disabled (useful for CI or debugging).
|
||||
|
||||
The higher-level InstallationPipeline and CLI-layer model decide when this
|
||||
installer is allowed to run, based on where the current CLI comes from
|
||||
(e.g. Nix, OS packages, Python, Makefile).
|
||||
"""
|
||||
|
||||
import os
|
||||
import shutil
|
||||
from typing import TYPE_CHECKING, List, Tuple
|
||||
|
||||
from pkgmgr.actions.install.installers.base import BaseInstaller
|
||||
from pkgmgr.core.command.run import run_command
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pkgmgr.actions.install.context import RepoContext
|
||||
from pkgmgr.actions.install import InstallContext
|
||||
|
||||
|
||||
class NixFlakeInstaller(BaseInstaller):
|
||||
"""Install Nix flake profiles for repositories that define flake.nix."""
|
||||
|
||||
# Logical layer name, used by capability matchers.
|
||||
layer = "nix"
|
||||
|
||||
FLAKE_FILE = "flake.nix"
|
||||
PROFILE_NAME = "package-manager"
|
||||
|
||||
def supports(self, ctx: "RepoContext") -> bool:
|
||||
"""
|
||||
Only support repositories that:
|
||||
- Are NOT explicitly disabled via PKGMGR_DISABLE_NIX_FLAKE_INSTALLER=1,
|
||||
- Have a flake.nix,
|
||||
- And have the `nix` command available.
|
||||
"""
|
||||
# Optional global kill-switch for CI or debugging.
|
||||
if os.environ.get("PKGMGR_DISABLE_NIX_FLAKE_INSTALLER") == "1":
|
||||
print(
|
||||
"[INFO] PKGMGR_DISABLE_NIX_FLAKE_INSTALLER=1 – "
|
||||
"NixFlakeInstaller is disabled."
|
||||
)
|
||||
return False
|
||||
|
||||
# Nix must be available.
|
||||
if shutil.which("nix") is None:
|
||||
return False
|
||||
|
||||
# flake.nix must exist in the repository.
|
||||
flake_path = os.path.join(ctx.repo_dir, self.FLAKE_FILE)
|
||||
return os.path.exists(flake_path)
|
||||
|
||||
def _ensure_old_profile_removed(self, ctx: "RepoContext") -> None:
|
||||
"""
|
||||
Best-effort removal of an existing profile entry.
|
||||
|
||||
This handles the "already provides the following file" conflict by
|
||||
removing previous `package-manager` installations before we install
|
||||
the new one.
|
||||
|
||||
Any error in `nix profile remove` is intentionally ignored, because
|
||||
a missing profile entry is not a fatal condition.
|
||||
"""
|
||||
if shutil.which("nix") is None:
|
||||
return
|
||||
|
||||
cmd = f"nix profile remove {self.PROFILE_NAME} || true"
|
||||
try:
|
||||
# NOTE: no allow_failure here → matches the existing unit tests
|
||||
run_command(cmd, cwd=ctx.repo_dir, preview=ctx.preview)
|
||||
except SystemExit:
|
||||
# Unit tests explicitly assert this is swallowed
|
||||
pass
|
||||
|
||||
def _profile_outputs(self, ctx: "RepoContext") -> List[Tuple[str, bool]]:
|
||||
"""
|
||||
Decide which flake outputs to install and whether failures are fatal.
|
||||
|
||||
Returns a list of (output_name, allow_failure) tuples.
|
||||
|
||||
Rules:
|
||||
- For the package-manager repo (identifier 'pkgmgr' or 'package-manager'):
|
||||
[("pkgmgr", False), ("default", True)]
|
||||
- For all other repos:
|
||||
[("default", False)]
|
||||
"""
|
||||
ident = ctx.identifier
|
||||
|
||||
if ident in {"pkgmgr", "package-manager"}:
|
||||
# pkgmgr: main CLI output is "pkgmgr" (mandatory),
|
||||
# "default" is nice-to-have (non-fatal).
|
||||
return [("pkgmgr", False), ("default", True)]
|
||||
|
||||
# Generic repos: we expect a sensible "default" package/app.
|
||||
# Failure to install it is considered fatal.
|
||||
return [("default", False)]
|
||||
|
||||
def run(self, ctx: "InstallContext") -> None:
|
||||
"""
|
||||
Install Nix flake profile outputs.
|
||||
|
||||
For the package-manager repo, failure installing 'pkgmgr' is fatal,
|
||||
failure installing 'default' is non-fatal.
|
||||
For other repos, failure installing 'default' is fatal.
|
||||
"""
|
||||
# Reuse supports() to keep logic in one place.
|
||||
if not self.supports(ctx): # type: ignore[arg-type]
|
||||
return
|
||||
|
||||
outputs = self._profile_outputs(ctx) # list of (name, allow_failure)
|
||||
|
||||
print(
|
||||
"Nix flake detected in "
|
||||
f"{ctx.identifier}, attempting to install profile outputs: "
|
||||
+ ", ".join(name for name, _ in outputs)
|
||||
)
|
||||
|
||||
# Handle the "already installed" case up-front for the shared profile.
|
||||
self._ensure_old_profile_removed(ctx) # type: ignore[arg-type]
|
||||
|
||||
for output, allow_failure in outputs:
|
||||
cmd = f"nix profile install {ctx.repo_dir}#{output}"
|
||||
print(f"[INFO] Running: {cmd}")
|
||||
ret = os.system(cmd)
|
||||
|
||||
# Extract real exit code from os.system() result
|
||||
if os.WIFEXITED(ret):
|
||||
exit_code = os.WEXITSTATUS(ret)
|
||||
else:
|
||||
# abnormal termination (signal etc.) – keep raw value
|
||||
exit_code = ret
|
||||
|
||||
if exit_code == 0:
|
||||
print(f"Nix flake output '{output}' successfully installed.")
|
||||
continue
|
||||
|
||||
print(f"[Error] Failed to install Nix flake output '{output}'")
|
||||
print(f"[Error] Command exited with code {exit_code}")
|
||||
|
||||
if not allow_failure:
|
||||
raise SystemExit(exit_code)
|
||||
|
||||
print(
|
||||
"[Warning] Continuing despite failure to install "
|
||||
f"optional output '{output}'."
|
||||
)
|
||||
@@ -1,104 +1,40 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
PythonInstaller — install Python projects defined via pyproject.toml.
|
||||
|
||||
Installation rules:
|
||||
|
||||
1. pip command resolution:
|
||||
a) If PKGMGR_PIP is set → use it exactly as provided.
|
||||
b) Else if running inside a virtualenv → use `sys.executable -m pip`.
|
||||
c) Else → create/use a per-repository virtualenv under ~/.venvs/<repo>/.
|
||||
|
||||
2. Installation target:
|
||||
- Always install into the resolved pip environment.
|
||||
- Never modify system Python, never rely on --user.
|
||||
- Nix-immutable systems (PEP 668) are automatically avoided because we
|
||||
never touch system Python.
|
||||
|
||||
3. The installer is skipped when:
|
||||
- PKGMGR_DISABLE_PYTHON_INSTALLER=1 is set.
|
||||
- The repository has no pyproject.toml.
|
||||
|
||||
All pip failures are treated as fatal.
|
||||
"""
|
||||
|
||||
# src/pkgmgr/actions/install/installers/python.py
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import sys
|
||||
import subprocess
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from pkgmgr.actions.install.installers.base import BaseInstaller
|
||||
from pkgmgr.actions.install.context import RepoContext
|
||||
from pkgmgr.core.command.run import run_command
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pkgmgr.actions.install.context import RepoContext
|
||||
from pkgmgr.actions.install import InstallContext
|
||||
|
||||
|
||||
class PythonInstaller(BaseInstaller):
|
||||
"""Install Python projects and dependencies via pip using isolated environments."""
|
||||
|
||||
layer = "python"
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# Installer activation logic
|
||||
# ----------------------------------------------------------------------
|
||||
def supports(self, ctx: "RepoContext") -> bool:
|
||||
"""
|
||||
Return True if this installer should handle this repository.
|
||||
|
||||
The installer is active only when:
|
||||
- A pyproject.toml exists in the repo, and
|
||||
- PKGMGR_DISABLE_PYTHON_INSTALLER is not set.
|
||||
"""
|
||||
def supports(self, ctx: RepoContext) -> bool:
|
||||
if os.environ.get("PKGMGR_DISABLE_PYTHON_INSTALLER") == "1":
|
||||
print("[INFO] PythonInstaller disabled via PKGMGR_DISABLE_PYTHON_INSTALLER.")
|
||||
return False
|
||||
|
||||
return os.path.exists(os.path.join(ctx.repo_dir, "pyproject.toml"))
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# Virtualenv handling
|
||||
# ----------------------------------------------------------------------
|
||||
def _in_virtualenv(self) -> bool:
|
||||
"""Detect whether the current interpreter is inside a venv."""
|
||||
if os.environ.get("VIRTUAL_ENV"):
|
||||
return True
|
||||
|
||||
base = getattr(sys, "base_prefix", sys.prefix)
|
||||
return sys.prefix != base
|
||||
|
||||
def _ensure_repo_venv(self, ctx: "InstallContext") -> str:
|
||||
"""
|
||||
Ensure that ~/.venvs/<identifier>/ exists and contains a minimal venv.
|
||||
|
||||
Returns the venv directory path.
|
||||
"""
|
||||
def _ensure_repo_venv(self, ctx: RepoContext) -> str:
|
||||
venv_dir = os.path.expanduser(f"~/.venvs/{ctx.identifier}")
|
||||
python = sys.executable
|
||||
|
||||
if not os.path.isdir(venv_dir):
|
||||
print(f"[python-installer] Creating virtualenv: {venv_dir}")
|
||||
subprocess.check_call([python, "-m", "venv", venv_dir])
|
||||
if not os.path.exists(venv_dir):
|
||||
run_command(f"{python} -m venv {venv_dir}", preview=ctx.preview)
|
||||
|
||||
return venv_dir
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# pip command resolution
|
||||
# ----------------------------------------------------------------------
|
||||
def _pip_cmd(self, ctx: "InstallContext") -> str:
|
||||
"""
|
||||
Determine which pip command to use.
|
||||
|
||||
Priority:
|
||||
1. PKGMGR_PIP override given by user or automation.
|
||||
2. Active virtualenv → use sys.executable -m pip.
|
||||
3. Per-repository venv → ~/.venvs/<repo>/bin/pip
|
||||
"""
|
||||
def _pip_cmd(self, ctx: RepoContext) -> str:
|
||||
explicit = os.environ.get("PKGMGR_PIP", "").strip()
|
||||
if explicit:
|
||||
return explicit
|
||||
@@ -107,33 +43,19 @@ class PythonInstaller(BaseInstaller):
|
||||
return f"{sys.executable} -m pip"
|
||||
|
||||
venv_dir = self._ensure_repo_venv(ctx)
|
||||
pip_path = os.path.join(venv_dir, "bin", "pip")
|
||||
return pip_path
|
||||
return os.path.join(venv_dir, "bin", "pip")
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# Execution
|
||||
# ----------------------------------------------------------------------
|
||||
def run(self, ctx: "InstallContext") -> None:
|
||||
"""
|
||||
Install the project defined by pyproject.toml.
|
||||
|
||||
Uses the resolved pip environment. Installation is isolated and never
|
||||
touches system Python.
|
||||
"""
|
||||
if not self.supports(ctx): # type: ignore[arg-type]
|
||||
return
|
||||
|
||||
pyproject = os.path.join(ctx.repo_dir, "pyproject.toml")
|
||||
if not os.path.exists(pyproject):
|
||||
def run(self, ctx: RepoContext) -> None:
|
||||
if not self.supports(ctx):
|
||||
return
|
||||
|
||||
print(f"[python-installer] Installing Python project for {ctx.identifier}...")
|
||||
|
||||
pip_cmd = self._pip_cmd(ctx)
|
||||
run_command(f"{pip_cmd} install .", cwd=ctx.repo_dir, preview=ctx.preview)
|
||||
|
||||
# Final install command: ALWAYS isolated, never system-wide.
|
||||
install_cmd = f"{pip_cmd} install ."
|
||||
|
||||
run_command(install_cmd, cwd=ctx.repo_dir, preview=ctx.preview)
|
||||
if ctx.force_update:
|
||||
# test-visible marker
|
||||
print(f"[python-installer] repo '{ctx.identifier}' successfully upgraded.")
|
||||
|
||||
print(f"[python-installer] Installation finished for {ctx.identifier}.")
|
||||
|
||||
@@ -1,21 +1,9 @@
|
||||
# src/pkgmgr/actions/install/pipeline.py
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
Installation pipeline orchestration for repositories.
|
||||
|
||||
This module implements the "Setup Controller" logic:
|
||||
|
||||
1. Detect current CLI command for the repo (if any).
|
||||
2. Classify it into a layer (os-packages, nix, python, makefile).
|
||||
3. Iterate over installers in layer order:
|
||||
- Skip installers whose layer is weaker than an already-loaded one.
|
||||
- Run only installers that support() the repo and add new capabilities.
|
||||
- After each installer, re-resolve the command and update the layer.
|
||||
4. Maintain the repo["command"] field and create/update symlinks via create_ink().
|
||||
|
||||
The goal is to prevent conflicting installations and make the layering
|
||||
behaviour explicit and testable.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
@@ -36,34 +24,15 @@ from pkgmgr.core.command.resolve import resolve_command_for_repo
|
||||
|
||||
@dataclass
|
||||
class CommandState:
|
||||
"""
|
||||
Represents the current CLI state for a repository:
|
||||
|
||||
- command: absolute or relative path to the CLI entry point
|
||||
- layer: which conceptual layer this command belongs to
|
||||
"""
|
||||
|
||||
command: Optional[str]
|
||||
layer: Optional[CliLayer]
|
||||
|
||||
|
||||
class CommandResolver:
|
||||
"""
|
||||
Small helper responsible for resolving the current command for a repo
|
||||
and mapping it into a CommandState.
|
||||
"""
|
||||
|
||||
def __init__(self, ctx: RepoContext) -> None:
|
||||
self._ctx = ctx
|
||||
|
||||
def resolve(self) -> CommandState:
|
||||
"""
|
||||
Resolve the current command for this repository.
|
||||
|
||||
If resolve_command_for_repo raises SystemExit (e.g. Python package
|
||||
without installed entry point), we treat this as "no command yet"
|
||||
from the point of view of the installers.
|
||||
"""
|
||||
repo = self._ctx.repo
|
||||
identifier = self._ctx.identifier
|
||||
repo_dir = self._ctx.repo_dir
|
||||
@@ -85,28 +54,10 @@ class CommandResolver:
|
||||
|
||||
|
||||
class InstallationPipeline:
|
||||
"""
|
||||
High-level orchestrator that applies a sequence of installers
|
||||
to a repository based on CLI layer precedence.
|
||||
"""
|
||||
|
||||
def __init__(self, installers: Sequence[BaseInstaller]) -> None:
|
||||
self._installers = list(installers)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Public API
|
||||
# ------------------------------------------------------------------
|
||||
def run(self, ctx: RepoContext) -> None:
|
||||
"""
|
||||
Execute the installation pipeline for a single repository.
|
||||
|
||||
- Detect initial command & layer.
|
||||
- Optionally create a symlink.
|
||||
- Run installers in order, skipping those whose layer is weaker
|
||||
than an already-loaded CLI.
|
||||
- After each installer, re-resolve the command and refresh the
|
||||
symlink if needed.
|
||||
"""
|
||||
repo = ctx.repo
|
||||
repo_dir = ctx.repo_dir
|
||||
identifier = ctx.identifier
|
||||
@@ -119,7 +70,6 @@ class InstallationPipeline:
|
||||
resolver = CommandResolver(ctx)
|
||||
state = resolver.resolve()
|
||||
|
||||
# Persist initial command (if any) and create a symlink.
|
||||
if state.command:
|
||||
repo["command"] = state.command
|
||||
create_ink(
|
||||
@@ -135,11 +85,9 @@ class InstallationPipeline:
|
||||
|
||||
provided_capabilities: Set[str] = set()
|
||||
|
||||
# Main installer loop
|
||||
for installer in self._installers:
|
||||
layer_name = getattr(installer, "layer", None)
|
||||
|
||||
# Installers without a layer participate without precedence logic.
|
||||
if layer_name is None:
|
||||
self._run_installer(installer, ctx, identifier, repo_dir, quiet)
|
||||
continue
|
||||
@@ -147,42 +95,33 @@ class InstallationPipeline:
|
||||
try:
|
||||
installer_layer = CliLayer(layer_name)
|
||||
except ValueError:
|
||||
# Unknown layer string → treat as lowest priority.
|
||||
installer_layer = None
|
||||
|
||||
# "Previous/Current layer already loaded?"
|
||||
if state.layer is not None and installer_layer is not None:
|
||||
current_prio = layer_priority(state.layer)
|
||||
installer_prio = layer_priority(installer_layer)
|
||||
|
||||
if current_prio < installer_prio:
|
||||
# Current CLI comes from a higher-priority layer,
|
||||
# so we skip this installer entirely.
|
||||
if not quiet:
|
||||
print(
|
||||
f"[pkgmgr] Skipping installer "
|
||||
"[pkgmgr] Skipping installer "
|
||||
f"{installer.__class__.__name__} for {identifier} – "
|
||||
f"CLI already provided by layer {state.layer.value!r}."
|
||||
)
|
||||
continue
|
||||
|
||||
if current_prio == installer_prio:
|
||||
# Same layer already provides a CLI; usually there is no
|
||||
# need to run another installer on top of it.
|
||||
if current_prio == installer_prio and not ctx.force_update:
|
||||
if not quiet:
|
||||
print(
|
||||
f"[pkgmgr] Skipping installer "
|
||||
"[pkgmgr] Skipping installer "
|
||||
f"{installer.__class__.__name__} for {identifier} – "
|
||||
f"layer {installer_layer.value!r} is already loaded."
|
||||
)
|
||||
continue
|
||||
|
||||
# Check if this installer is applicable at all.
|
||||
if not installer.supports(ctx):
|
||||
continue
|
||||
|
||||
# Capabilities: if everything this installer would provide is already
|
||||
# covered, we can safely skip it.
|
||||
caps = installer.discover_capabilities(ctx)
|
||||
if caps and caps.issubset(provided_capabilities):
|
||||
if not quiet:
|
||||
@@ -193,18 +132,22 @@ class InstallationPipeline:
|
||||
continue
|
||||
|
||||
if not quiet:
|
||||
print(
|
||||
f"[pkgmgr] Running installer {installer.__class__.__name__} "
|
||||
f"for {identifier} in '{repo_dir}' "
|
||||
f"(new capabilities: {caps or set()})..."
|
||||
)
|
||||
if ctx.force_update and state.layer is not None and installer_layer == state.layer:
|
||||
print(
|
||||
f"[pkgmgr] Running installer {installer.__class__.__name__} "
|
||||
f"for {identifier} in '{repo_dir}' (upgrade requested)..."
|
||||
)
|
||||
else:
|
||||
print(
|
||||
f"[pkgmgr] Running installer {installer.__class__.__name__} "
|
||||
f"for {identifier} in '{repo_dir}' "
|
||||
f"(new capabilities: {caps or set()})..."
|
||||
)
|
||||
|
||||
# Run the installer with error reporting.
|
||||
self._run_installer(installer, ctx, identifier, repo_dir, quiet)
|
||||
|
||||
provided_capabilities.update(caps)
|
||||
|
||||
# After running an installer, re-resolve the command and layer.
|
||||
new_state = resolver.resolve()
|
||||
if new_state.command:
|
||||
repo["command"] = new_state.command
|
||||
@@ -221,9 +164,6 @@ class InstallationPipeline:
|
||||
|
||||
state = new_state
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Internal helpers
|
||||
# ------------------------------------------------------------------
|
||||
@staticmethod
|
||||
def _run_installer(
|
||||
installer: BaseInstaller,
|
||||
@@ -232,9 +172,6 @@ class InstallationPipeline:
|
||||
repo_dir: str,
|
||||
quiet: bool,
|
||||
) -> None:
|
||||
"""
|
||||
Execute a single installer with unified error handling.
|
||||
"""
|
||||
try:
|
||||
installer.run(ctx)
|
||||
except SystemExit as exc:
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
from __future__ import annotations
|
||||
|
||||
"""
|
||||
High-level mirror actions.
|
||||
|
||||
@@ -10,6 +8,7 @@ Public API:
|
||||
- setup_mirrors
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
from .types import Repository, MirrorMap
|
||||
from .list_cmd import list_mirrors
|
||||
from .diff_cmd import diff_mirrors
|
||||
|
||||
@@ -150,7 +150,7 @@ def ensure_origin_remote(
|
||||
current = current_origin_url(repo_dir)
|
||||
if current == url or not url:
|
||||
print(
|
||||
f"[INFO] 'origin' already points to "
|
||||
"[INFO] 'origin' already points to "
|
||||
f"{current or '<unknown>'} (no change needed)."
|
||||
)
|
||||
else:
|
||||
|
||||
@@ -2,7 +2,7 @@ from __future__ import annotations
|
||||
|
||||
import os
|
||||
from urllib.parse import urlparse
|
||||
from typing import List, Mapping
|
||||
from typing import Mapping
|
||||
|
||||
from .types import MirrorMap, Repository
|
||||
|
||||
|
||||
21
src/pkgmgr/actions/mirror/remote_check.py
Normal file
21
src/pkgmgr/actions/mirror/remote_check.py
Normal file
@@ -0,0 +1,21 @@
|
||||
# src/pkgmgr/actions/mirror/remote_check.py
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Tuple
|
||||
|
||||
from pkgmgr.core.git import GitError, run_git
|
||||
|
||||
|
||||
def probe_mirror(url: str, repo_dir: str) -> Tuple[bool, str]:
|
||||
"""
|
||||
Probe a remote mirror URL using `git ls-remote`.
|
||||
|
||||
Returns:
|
||||
(True, "") on success,
|
||||
(False, error_message) on failure.
|
||||
"""
|
||||
try:
|
||||
run_git(["ls-remote", url], cwd=repo_dir)
|
||||
return True, ""
|
||||
except GitError as exc:
|
||||
return False, str(exc)
|
||||
70
src/pkgmgr/actions/mirror/remote_provision.py
Normal file
70
src/pkgmgr/actions/mirror/remote_provision.py
Normal file
@@ -0,0 +1,70 @@
|
||||
# src/pkgmgr/actions/mirror/remote_provision.py
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import List
|
||||
|
||||
from pkgmgr.core.remote_provisioning import ProviderHint, RepoSpec, ensure_remote_repo
|
||||
from pkgmgr.core.remote_provisioning.ensure import EnsureOptions
|
||||
|
||||
from .context import build_context
|
||||
from .git_remote import determine_primary_remote_url
|
||||
from .types import Repository
|
||||
from .url_utils import normalize_provider_host, parse_repo_from_git_url
|
||||
|
||||
|
||||
def ensure_remote_repository(
|
||||
repo: Repository,
|
||||
repositories_base_dir: str,
|
||||
all_repos: List[Repository],
|
||||
preview: bool,
|
||||
) -> None:
|
||||
ctx = build_context(repo, repositories_base_dir, all_repos)
|
||||
resolved_mirrors = ctx.resolved_mirrors
|
||||
|
||||
primary_url = determine_primary_remote_url(repo, resolved_mirrors)
|
||||
if not primary_url:
|
||||
print("[INFO] No remote URL could be derived; skipping remote provisioning.")
|
||||
return
|
||||
|
||||
host_raw, owner_from_url, name_from_url = parse_repo_from_git_url(primary_url)
|
||||
host = normalize_provider_host(host_raw)
|
||||
|
||||
if not host or not owner_from_url or not name_from_url:
|
||||
print("[WARN] Could not derive host/owner/repository from URL; cannot ensure remote repo.")
|
||||
print(f" url={primary_url!r}")
|
||||
print(f" host={host!r}, owner={owner_from_url!r}, repository={name_from_url!r}")
|
||||
return
|
||||
|
||||
print("------------------------------------------------------------")
|
||||
print(f"[REMOTE ENSURE] {ctx.identifier}")
|
||||
print(f"[REMOTE ENSURE] host: {host}")
|
||||
print("------------------------------------------------------------")
|
||||
|
||||
spec = RepoSpec(
|
||||
host=str(host),
|
||||
owner=str(owner_from_url),
|
||||
name=str(name_from_url),
|
||||
private=bool(repo.get("private", True)),
|
||||
description=str(repo.get("description", "")),
|
||||
)
|
||||
|
||||
provider_kind = str(repo.get("provider", "")).strip().lower() or None
|
||||
|
||||
try:
|
||||
result = ensure_remote_repo(
|
||||
spec,
|
||||
provider_hint=ProviderHint(kind=provider_kind),
|
||||
options=EnsureOptions(
|
||||
preview=preview,
|
||||
interactive=True,
|
||||
allow_prompt=True,
|
||||
save_prompt_token_to_keyring=True,
|
||||
),
|
||||
)
|
||||
print(f"[REMOTE ENSURE] {result.status.upper()}: {result.message}")
|
||||
if result.url:
|
||||
print(f"[REMOTE ENSURE] URL: {result.url}")
|
||||
except Exception as exc: # noqa: BLE001
|
||||
print(f"[ERROR] Remote provisioning failed: {exc}")
|
||||
|
||||
print()
|
||||
@@ -1,23 +1,20 @@
|
||||
# src/pkgmgr/actions/mirror/setup_cmd.py
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import List, Tuple
|
||||
|
||||
from pkgmgr.core.git import run_git, GitError
|
||||
from typing import List
|
||||
|
||||
from .context import build_context
|
||||
from .git_remote import determine_primary_remote_url, ensure_origin_remote
|
||||
from .git_remote import ensure_origin_remote, determine_primary_remote_url
|
||||
from .remote_check import probe_mirror
|
||||
from .remote_provision import ensure_remote_repository
|
||||
from .types import Repository
|
||||
|
||||
|
||||
def _setup_local_mirrors_for_repo(
|
||||
repo: Repository,
|
||||
repositories_base_dir: str,
|
||||
all_repos: List[Repository],
|
||||
preview: bool,
|
||||
) -> None:
|
||||
"""
|
||||
Ensure local Git state is sane (currently: 'origin' remote).
|
||||
"""
|
||||
ctx = build_context(repo, repositories_base_dir, all_repos)
|
||||
|
||||
print("------------------------------------------------------------")
|
||||
@@ -29,103 +26,57 @@ def _setup_local_mirrors_for_repo(
|
||||
print()
|
||||
|
||||
|
||||
def _probe_mirror(url: str, repo_dir: str) -> Tuple[bool, str]:
|
||||
"""
|
||||
Probe a remote mirror by running `git ls-remote <url>`.
|
||||
|
||||
Returns:
|
||||
(True, "") on success,
|
||||
(False, error_message) on failure.
|
||||
|
||||
Wichtig:
|
||||
- Wir werten ausschließlich den Exit-Code aus.
|
||||
- STDERR kann Hinweise/Warnings enthalten und ist NICHT automatisch ein Fehler.
|
||||
"""
|
||||
try:
|
||||
# Wir ignorieren stdout komplett; wichtig ist nur, dass der Befehl ohne
|
||||
# GitError (also Exit-Code 0) durchläuft.
|
||||
run_git(["ls-remote", url], cwd=repo_dir)
|
||||
return True, ""
|
||||
except GitError as exc:
|
||||
return False, str(exc)
|
||||
|
||||
|
||||
def _setup_remote_mirrors_for_repo(
|
||||
repo: Repository,
|
||||
repositories_base_dir: str,
|
||||
all_repos: List[Repository],
|
||||
preview: bool,
|
||||
ensure_remote: bool,
|
||||
) -> None:
|
||||
"""
|
||||
Remote-side setup / validation.
|
||||
|
||||
Aktuell werden nur **nicht-destruktive Checks** gemacht:
|
||||
|
||||
- Für jeden Mirror (aus config + MIRRORS-Datei, file gewinnt):
|
||||
* `git ls-remote <url>` wird ausgeführt.
|
||||
* Bei Exit-Code 0 → [OK]
|
||||
* Bei Fehler → [WARN] + Details aus der GitError-Exception
|
||||
|
||||
Es werden **keine** Provider-APIs aufgerufen und keine Repos angelegt.
|
||||
"""
|
||||
ctx = build_context(repo, repositories_base_dir, all_repos)
|
||||
resolved_m = ctx.resolved_mirrors
|
||||
resolved_mirrors = ctx.resolved_mirrors
|
||||
|
||||
print("------------------------------------------------------------")
|
||||
print(f"[MIRROR SETUP:REMOTE] {ctx.identifier}")
|
||||
print(f"[MIRROR SETUP:REMOTE] dir: {ctx.repo_dir}")
|
||||
print("------------------------------------------------------------")
|
||||
|
||||
if not resolved_m:
|
||||
# Optional: Fallback auf eine heuristisch bestimmte URL, falls wir
|
||||
# irgendwann "automatisch anlegen" implementieren wollen.
|
||||
primary_url = determine_primary_remote_url(repo, resolved_m)
|
||||
if ensure_remote:
|
||||
ensure_remote_repository(
|
||||
repo,
|
||||
repositories_base_dir=repositories_base_dir,
|
||||
all_repos=all_repos,
|
||||
preview=preview,
|
||||
)
|
||||
|
||||
if not resolved_mirrors:
|
||||
primary_url = determine_primary_remote_url(repo, resolved_mirrors)
|
||||
if not primary_url:
|
||||
print(
|
||||
"[INFO] No mirrors configured (config or MIRRORS file), and no "
|
||||
"primary URL could be derived from provider/account/repository."
|
||||
)
|
||||
print("[INFO] No mirrors configured and no primary URL available.")
|
||||
print()
|
||||
return
|
||||
|
||||
ok, error_message = _probe_mirror(primary_url, ctx.repo_dir)
|
||||
ok, error_message = probe_mirror(primary_url, ctx.repo_dir)
|
||||
if ok:
|
||||
print(f"[OK] Remote mirror (primary) is reachable: {primary_url}")
|
||||
print(f"[OK] primary: {primary_url}")
|
||||
else:
|
||||
print("[WARN] Primary remote URL is NOT reachable:")
|
||||
print(f" {primary_url}")
|
||||
if error_message:
|
||||
print(" Details:")
|
||||
for line in error_message.splitlines():
|
||||
print(f" {line}")
|
||||
print(f"[WARN] primary: {primary_url}")
|
||||
for line in error_message.splitlines():
|
||||
print(f" {line}")
|
||||
|
||||
print()
|
||||
print(
|
||||
"[INFO] Remote checks are non-destructive and only use `git ls-remote` "
|
||||
"to probe mirror URLs."
|
||||
)
|
||||
print()
|
||||
return
|
||||
|
||||
# Normaler Fall: wir haben benannte Mirrors aus config/MIRRORS
|
||||
for name, url in sorted(resolved_m.items()):
|
||||
ok, error_message = _probe_mirror(url, ctx.repo_dir)
|
||||
for name, url in sorted(resolved_mirrors.items()):
|
||||
ok, error_message = probe_mirror(url, ctx.repo_dir)
|
||||
if ok:
|
||||
print(f"[OK] Remote mirror '{name}' is reachable: {url}")
|
||||
print(f"[OK] {name}: {url}")
|
||||
else:
|
||||
print(f"[WARN] Remote mirror '{name}' is NOT reachable:")
|
||||
print(f" {url}")
|
||||
if error_message:
|
||||
print(" Details:")
|
||||
for line in error_message.splitlines():
|
||||
print(f" {line}")
|
||||
print(f"[WARN] {name}: {url}")
|
||||
for line in error_message.splitlines():
|
||||
print(f" {line}")
|
||||
|
||||
print()
|
||||
print(
|
||||
"[INFO] Remote checks are non-destructive and only use `git ls-remote` "
|
||||
"to probe mirror URLs."
|
||||
)
|
||||
print()
|
||||
|
||||
|
||||
def setup_mirrors(
|
||||
@@ -135,22 +86,12 @@ def setup_mirrors(
|
||||
preview: bool = False,
|
||||
local: bool = True,
|
||||
remote: bool = True,
|
||||
ensure_remote: bool = False,
|
||||
) -> None:
|
||||
"""
|
||||
Setup mirrors for the selected repositories.
|
||||
|
||||
local:
|
||||
- Configure local Git remotes (currently: ensure 'origin' is present and
|
||||
points to a reasonable URL).
|
||||
|
||||
remote:
|
||||
- Non-destructive remote checks using `git ls-remote` for each mirror URL.
|
||||
Es werden keine Repositories auf dem Provider angelegt.
|
||||
"""
|
||||
for repo in selected_repos:
|
||||
if local:
|
||||
_setup_local_mirrors_for_repo(
|
||||
repo,
|
||||
repo=repo,
|
||||
repositories_base_dir=repositories_base_dir,
|
||||
all_repos=all_repos,
|
||||
preview=preview,
|
||||
@@ -158,8 +99,9 @@ def setup_mirrors(
|
||||
|
||||
if remote:
|
||||
_setup_remote_mirrors_for_repo(
|
||||
repo,
|
||||
repo=repo,
|
||||
repositories_base_dir=repositories_base_dir,
|
||||
all_repos=all_repos,
|
||||
preview=preview,
|
||||
ensure_remote=ensure_remote,
|
||||
)
|
||||
|
||||
111
src/pkgmgr/actions/mirror/url_utils.py
Normal file
111
src/pkgmgr/actions/mirror/url_utils.py
Normal file
@@ -0,0 +1,111 @@
|
||||
# src/pkgmgr/actions/mirror/url_utils.py
|
||||
from __future__ import annotations
|
||||
|
||||
from urllib.parse import urlparse
|
||||
from typing import Optional, Tuple
|
||||
|
||||
|
||||
def hostport_from_git_url(url: str) -> Tuple[str, Optional[str]]:
|
||||
url = (url or "").strip()
|
||||
if not url:
|
||||
return "", None
|
||||
|
||||
if "://" in url:
|
||||
parsed = urlparse(url)
|
||||
netloc = (parsed.netloc or "").strip()
|
||||
if "@" in netloc:
|
||||
netloc = netloc.split("@", 1)[1]
|
||||
|
||||
if netloc.startswith("[") and "]" in netloc:
|
||||
host = netloc[1:netloc.index("]")]
|
||||
rest = netloc[netloc.index("]") + 1 :]
|
||||
port = rest[1:] if rest.startswith(":") else None
|
||||
return host.strip(), (port.strip() if port else None)
|
||||
|
||||
if ":" in netloc:
|
||||
host, port = netloc.rsplit(":", 1)
|
||||
return host.strip(), (port.strip() or None)
|
||||
|
||||
return netloc.strip(), None
|
||||
|
||||
if "@" in url and ":" in url:
|
||||
after_at = url.split("@", 1)[1]
|
||||
host = after_at.split(":", 1)[0].strip()
|
||||
return host, None
|
||||
|
||||
host = url.split("/", 1)[0].strip()
|
||||
return host, None
|
||||
|
||||
|
||||
def normalize_provider_host(host: str) -> str:
|
||||
host = (host or "").strip()
|
||||
if not host:
|
||||
return ""
|
||||
|
||||
if host.startswith("[") and "]" in host:
|
||||
host = host[1:host.index("]")]
|
||||
|
||||
if ":" in host and host.count(":") == 1:
|
||||
host = host.rsplit(":", 1)[0]
|
||||
|
||||
return host.strip().lower()
|
||||
|
||||
|
||||
def _strip_dot_git(name: str) -> str:
|
||||
n = (name or "").strip()
|
||||
if n.lower().endswith(".git"):
|
||||
return n[:-4]
|
||||
return n
|
||||
|
||||
|
||||
def parse_repo_from_git_url(url: str) -> Tuple[str, Optional[str], Optional[str]]:
|
||||
"""
|
||||
Parse (host, owner, repo_name) from common Git remote URLs.
|
||||
|
||||
Supports:
|
||||
- ssh://git@host:2201/owner/repo.git
|
||||
- https://host/owner/repo.git
|
||||
- git@host:owner/repo.git
|
||||
- host/owner/repo(.git) (best-effort)
|
||||
|
||||
Returns:
|
||||
(host, owner, repo_name) with owner/repo possibly None if not derivable.
|
||||
"""
|
||||
u = (url or "").strip()
|
||||
if not u:
|
||||
return "", None, None
|
||||
|
||||
# URL-style (ssh://, https://, http://)
|
||||
if "://" in u:
|
||||
parsed = urlparse(u)
|
||||
host = (parsed.hostname or "").strip()
|
||||
path = (parsed.path or "").strip("/")
|
||||
parts = [p for p in path.split("/") if p]
|
||||
if len(parts) >= 2:
|
||||
owner = parts[0]
|
||||
repo_name = _strip_dot_git(parts[1])
|
||||
return host, owner, repo_name
|
||||
return host, None, None
|
||||
|
||||
# SCP-like: git@host:owner/repo.git
|
||||
if "@" in u and ":" in u:
|
||||
after_at = u.split("@", 1)[1]
|
||||
host = after_at.split(":", 1)[0].strip()
|
||||
path = after_at.split(":", 1)[1].strip("/")
|
||||
parts = [p for p in path.split("/") if p]
|
||||
if len(parts) >= 2:
|
||||
owner = parts[0]
|
||||
repo_name = _strip_dot_git(parts[1])
|
||||
return host, owner, repo_name
|
||||
return host, None, None
|
||||
|
||||
# Fallback: host/owner/repo.git
|
||||
host = u.split("/", 1)[0].strip()
|
||||
rest = u.split("/", 1)[1] if "/" in u else ""
|
||||
parts = [p for p in rest.strip("/").split("/") if p]
|
||||
if len(parts) >= 2:
|
||||
owner = parts[0]
|
||||
repo_name = _strip_dot_git(parts[1])
|
||||
return host, owner, repo_name
|
||||
|
||||
return host, None, None
|
||||
5
src/pkgmgr/actions/publish/__init__.py
Normal file
5
src/pkgmgr/actions/publish/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from .workflow import publish
|
||||
|
||||
__all__ = ["publish"]
|
||||
17
src/pkgmgr/actions/publish/git_tags.py
Normal file
17
src/pkgmgr/actions/publish/git_tags.py
Normal file
@@ -0,0 +1,17 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from pkgmgr.core.git import run_git
|
||||
from pkgmgr.core.version.semver import SemVer, is_semver_tag
|
||||
|
||||
|
||||
def head_semver_tags(cwd: str = ".") -> list[str]:
|
||||
out = run_git(["tag", "--points-at", "HEAD"], cwd=cwd)
|
||||
if not out:
|
||||
return []
|
||||
|
||||
tags = [t.strip() for t in out.splitlines() if t.strip()]
|
||||
tags = [t for t in tags if is_semver_tag(t) and t.startswith("v")]
|
||||
if not tags:
|
||||
return []
|
||||
|
||||
return sorted(tags, key=SemVer.parse)
|
||||
24
src/pkgmgr/actions/publish/pypi_url.py
Normal file
24
src/pkgmgr/actions/publish/pypi_url.py
Normal file
@@ -0,0 +1,24 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from .types import PyPITarget
|
||||
|
||||
|
||||
def parse_pypi_project_url(url: str) -> PyPITarget | None:
|
||||
u = (url or "").strip()
|
||||
if not u:
|
||||
return None
|
||||
|
||||
parsed = urlparse(u)
|
||||
host = (parsed.netloc or "").lower()
|
||||
path = (parsed.path or "").strip("/")
|
||||
|
||||
if host not in ("pypi.org", "test.pypi.org"):
|
||||
return None
|
||||
|
||||
parts = [p for p in path.split("/") if p]
|
||||
if len(parts) >= 2 and parts[0] == "project":
|
||||
return PyPITarget(host=host, project=parts[1])
|
||||
|
||||
return None
|
||||
9
src/pkgmgr/actions/publish/types.py
Normal file
9
src/pkgmgr/actions/publish/types.py
Normal file
@@ -0,0 +1,9 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class PyPITarget:
|
||||
host: str
|
||||
project: str
|
||||
109
src/pkgmgr/actions/publish/workflow.py
Normal file
109
src/pkgmgr/actions/publish/workflow.py
Normal file
@@ -0,0 +1,109 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import glob
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
|
||||
from pkgmgr.actions.mirror.io import read_mirrors_file
|
||||
from pkgmgr.actions.mirror.types import Repository
|
||||
from pkgmgr.core.credentials.resolver import ResolutionOptions, TokenResolver
|
||||
from pkgmgr.core.version.semver import SemVer
|
||||
|
||||
from .git_tags import head_semver_tags
|
||||
from .pypi_url import parse_pypi_project_url
|
||||
|
||||
|
||||
def _require_tool(module: str) -> None:
|
||||
try:
|
||||
subprocess.run(
|
||||
["python", "-m", module, "--help"],
|
||||
stdout=subprocess.DEVNULL,
|
||||
stderr=subprocess.DEVNULL,
|
||||
check=True,
|
||||
)
|
||||
except Exception as exc:
|
||||
raise RuntimeError(
|
||||
f"Required Python module '{module}' is not available. "
|
||||
f"Install it via: pip install {module}"
|
||||
) from exc
|
||||
|
||||
|
||||
def publish(
|
||||
repo: Repository,
|
||||
repo_dir: str,
|
||||
*,
|
||||
preview: bool = False,
|
||||
interactive: bool = True,
|
||||
allow_prompt: bool = True,
|
||||
) -> None:
|
||||
mirrors = read_mirrors_file(repo_dir)
|
||||
|
||||
targets = []
|
||||
for url in mirrors.values():
|
||||
t = parse_pypi_project_url(url)
|
||||
if t:
|
||||
targets.append(t)
|
||||
|
||||
if not targets:
|
||||
print("[INFO] No PyPI mirror found. Skipping publish.")
|
||||
return
|
||||
|
||||
if len(targets) > 1:
|
||||
raise RuntimeError("Multiple PyPI mirrors found; refusing to publish.")
|
||||
|
||||
tags = head_semver_tags(cwd=repo_dir)
|
||||
if not tags:
|
||||
print("[INFO] No version tag on HEAD. Skipping publish.")
|
||||
return
|
||||
|
||||
tag = max(tags, key=SemVer.parse)
|
||||
target = targets[0]
|
||||
|
||||
print(f"[INFO] Publishing {target.project} for tag {tag}")
|
||||
|
||||
if preview:
|
||||
print("[PREVIEW] Would build and upload to PyPI.")
|
||||
return
|
||||
|
||||
_require_tool("build")
|
||||
_require_tool("twine")
|
||||
|
||||
dist_dir = os.path.join(repo_dir, "dist")
|
||||
if os.path.isdir(dist_dir):
|
||||
shutil.rmtree(dist_dir, ignore_errors=True)
|
||||
|
||||
subprocess.run(
|
||||
["python", "-m", "build"],
|
||||
cwd=repo_dir,
|
||||
check=True,
|
||||
)
|
||||
|
||||
artifacts = sorted(glob.glob(os.path.join(dist_dir, "*")))
|
||||
if not artifacts:
|
||||
raise RuntimeError("No build artifacts found in dist/.")
|
||||
|
||||
resolver = TokenResolver()
|
||||
token = resolver.get_token(
|
||||
provider_kind="pypi",
|
||||
host=target.host,
|
||||
owner=target.project,
|
||||
options=ResolutionOptions(
|
||||
interactive=interactive,
|
||||
allow_prompt=allow_prompt,
|
||||
save_prompt_token_to_keyring=True,
|
||||
),
|
||||
).token
|
||||
|
||||
env = dict(os.environ)
|
||||
env["TWINE_USERNAME"] = "__token__"
|
||||
env["TWINE_PASSWORD"] = token
|
||||
|
||||
subprocess.run(
|
||||
["python", "-m", "twine", "upload", *artifacts],
|
||||
cwd=repo_dir,
|
||||
env=env,
|
||||
check=True,
|
||||
)
|
||||
|
||||
print("[INFO] Publish completed.")
|
||||
@@ -289,7 +289,7 @@ def update_spec_version(
|
||||
|
||||
if preview:
|
||||
print(
|
||||
f"[PREVIEW] Would update spec file "
|
||||
"[PREVIEW] Would update spec file "
|
||||
f"{os.path.basename(spec_path)} to Version: {new_version}, Release: 1..."
|
||||
)
|
||||
return
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# src/pkgmgr/actions/release/workflow.py
|
||||
from __future__ import annotations
|
||||
from typing import Optional
|
||||
|
||||
import os
|
||||
import sys
|
||||
@@ -7,6 +7,7 @@ from typing import Optional
|
||||
|
||||
from pkgmgr.actions.branch import close_branch
|
||||
from pkgmgr.core.git import get_current_branch, GitError
|
||||
from pkgmgr.core.repository.paths import resolve_repo_paths
|
||||
|
||||
from .files import (
|
||||
update_changelog,
|
||||
@@ -57,8 +58,12 @@ def _release_impl(
|
||||
print(f"New version: {new_ver_str} ({release_type})")
|
||||
|
||||
repo_root = os.path.dirname(os.path.abspath(pyproject_path))
|
||||
paths = resolve_repo_paths(repo_root)
|
||||
|
||||
# --- Update versioned files ------------------------------------------------
|
||||
|
||||
update_pyproject_version(pyproject_path, new_ver_str, preview=preview)
|
||||
|
||||
changelog_message = update_changelog(
|
||||
changelog_path,
|
||||
new_ver_str,
|
||||
@@ -66,38 +71,46 @@ def _release_impl(
|
||||
preview=preview,
|
||||
)
|
||||
|
||||
flake_path = os.path.join(repo_root, "flake.nix")
|
||||
update_flake_version(flake_path, new_ver_str, preview=preview)
|
||||
update_flake_version(paths.flake_nix, new_ver_str, preview=preview)
|
||||
|
||||
pkgbuild_path = os.path.join(repo_root, "PKGBUILD")
|
||||
update_pkgbuild_version(pkgbuild_path, new_ver_str, preview=preview)
|
||||
if paths.arch_pkgbuild:
|
||||
update_pkgbuild_version(paths.arch_pkgbuild, new_ver_str, preview=preview)
|
||||
else:
|
||||
print("[INFO] No PKGBUILD found (packaging/arch/PKGBUILD or PKGBUILD). Skipping.")
|
||||
|
||||
spec_path = os.path.join(repo_root, "package-manager.spec")
|
||||
update_spec_version(spec_path, new_ver_str, preview=preview)
|
||||
if paths.rpm_spec:
|
||||
update_spec_version(paths.rpm_spec, new_ver_str, preview=preview)
|
||||
else:
|
||||
print("[INFO] No RPM spec file found. Skipping spec version update.")
|
||||
|
||||
effective_message: Optional[str] = message
|
||||
if effective_message is None and isinstance(changelog_message, str):
|
||||
if changelog_message.strip():
|
||||
effective_message = changelog_message.strip()
|
||||
|
||||
debian_changelog_path = os.path.join(repo_root, "debian", "changelog")
|
||||
package_name = os.path.basename(repo_root) or "package-manager"
|
||||
|
||||
update_debian_changelog(
|
||||
debian_changelog_path,
|
||||
package_name=package_name,
|
||||
new_version=new_ver_str,
|
||||
message=effective_message,
|
||||
preview=preview,
|
||||
)
|
||||
if paths.debian_changelog:
|
||||
update_debian_changelog(
|
||||
paths.debian_changelog,
|
||||
package_name=package_name,
|
||||
new_version=new_ver_str,
|
||||
message=effective_message,
|
||||
preview=preview,
|
||||
)
|
||||
else:
|
||||
print("[INFO] No debian changelog found. Skipping debian/changelog update.")
|
||||
|
||||
update_spec_changelog(
|
||||
spec_path=spec_path,
|
||||
package_name=package_name,
|
||||
new_version=new_ver_str,
|
||||
message=effective_message,
|
||||
preview=preview,
|
||||
)
|
||||
if paths.rpm_spec:
|
||||
update_spec_changelog(
|
||||
spec_path=paths.rpm_spec,
|
||||
package_name=package_name,
|
||||
new_version=new_ver_str,
|
||||
message=effective_message,
|
||||
preview=preview,
|
||||
)
|
||||
|
||||
# --- Git commit / tag / push ----------------------------------------------
|
||||
|
||||
commit_msg = f"Release version {new_ver_str}"
|
||||
tag_msg = effective_message or commit_msg
|
||||
@@ -105,12 +118,12 @@ def _release_impl(
|
||||
files_to_add = [
|
||||
pyproject_path,
|
||||
changelog_path,
|
||||
flake_path,
|
||||
pkgbuild_path,
|
||||
spec_path,
|
||||
debian_changelog_path,
|
||||
paths.flake_nix,
|
||||
paths.arch_pkgbuild,
|
||||
paths.rpm_spec,
|
||||
paths.debian_changelog,
|
||||
]
|
||||
existing_files = [p for p in files_to_add if p and os.path.exists(p)]
|
||||
existing_files = [p for p in files_to_add if isinstance(p, str) and p and os.path.exists(p)]
|
||||
|
||||
if preview:
|
||||
for path in existing_files:
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import yaml
|
||||
from pkgmgr.core.command.alias import generate_alias
|
||||
from pkgmgr.core.config.save import save_user_config
|
||||
|
||||
@@ -1,15 +1,32 @@
|
||||
import os
|
||||
import sys
|
||||
from pkgmgr.core.repository.identifier import get_repo_identifier
|
||||
from pkgmgr.core.repository.dir import get_repo_dir
|
||||
|
||||
def deinstall_repos(selected_repos, repositories_base_dir, bin_dir, all_repos, preview=False):
|
||||
from pkgmgr.core.command.run import run_command
|
||||
from pkgmgr.core.repository.dir import get_repo_dir
|
||||
from pkgmgr.core.repository.identifier import get_repo_identifier
|
||||
|
||||
|
||||
def deinstall_repos(
|
||||
selected_repos,
|
||||
repositories_base_dir,
|
||||
bin_dir,
|
||||
all_repos,
|
||||
preview: bool = False,
|
||||
) -> None:
|
||||
for repo in selected_repos:
|
||||
repo_identifier = get_repo_identifier(repo, all_repos)
|
||||
alias_path = os.path.join(bin_dir, repo_identifier)
|
||||
|
||||
# Resolve repository directory
|
||||
repo_dir = get_repo_dir(repositories_base_dir, repo)
|
||||
|
||||
# Prefer alias if available; fall back to identifier
|
||||
alias_name = str(repo.get("alias") or repo_identifier)
|
||||
alias_path = os.path.join(os.path.expanduser(bin_dir), alias_name)
|
||||
|
||||
# Remove alias link/file (interactive)
|
||||
if os.path.exists(alias_path):
|
||||
confirm = input(f"Are you sure you want to delete link '{alias_path}' for {repo_identifier}? [y/N]: ").strip().lower()
|
||||
confirm = input(
|
||||
f"Are you sure you want to delete link '{alias_path}' for {repo_identifier}? [y/N]: "
|
||||
).strip().lower()
|
||||
if confirm == "y":
|
||||
if preview:
|
||||
print(f"[Preview] Would remove link '{alias_path}'.")
|
||||
@@ -19,10 +36,13 @@ def deinstall_repos(selected_repos, repositories_base_dir, bin_dir, all_repos, p
|
||||
else:
|
||||
print(f"No link found for {repo_identifier} in {bin_dir}.")
|
||||
|
||||
# Run make deinstall if repository exists and has a Makefile
|
||||
makefile_path = os.path.join(repo_dir, "Makefile")
|
||||
if os.path.exists(makefile_path):
|
||||
print(f"Makefile found in {repo_identifier}, running 'make deinstall'...")
|
||||
try:
|
||||
run_command("make deinstall", cwd=repo_dir, preview=preview)
|
||||
except SystemExit as e:
|
||||
print(f"[Warning] Failed to run 'make deinstall' for {repo_identifier}: {e}")
|
||||
print(
|
||||
f"[Warning] Failed to run 'make deinstall' for {repo_identifier}: {e}"
|
||||
)
|
||||
|
||||
@@ -272,7 +272,7 @@ def list_repositories(
|
||||
f"{'STATUS'.ljust(status_width)} "
|
||||
f"{'CATEGORIES'.ljust(cat_width)} "
|
||||
f"{'TAGS'.ljust(tag_width)} "
|
||||
f"DIR"
|
||||
"DIR"
|
||||
f"{RESET}"
|
||||
)
|
||||
print(header)
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
from pkgmgr.core.repository.identifier import get_repo_identifier
|
||||
from pkgmgr.core.repository.dir import get_repo_dir
|
||||
from pkgmgr.core.repository.identifier import get_repo_identifier
|
||||
from pkgmgr.core.repository.verify import verify_repository
|
||||
|
||||
|
||||
@@ -17,13 +20,6 @@ def pull_with_verification(
|
||||
) -> None:
|
||||
"""
|
||||
Execute `git pull` for each repository with verification.
|
||||
|
||||
- Uses verify_repository() in "pull" mode.
|
||||
- If verification fails (and verification info is set) and
|
||||
--no-verification is not enabled, the user is prompted to confirm
|
||||
the pull.
|
||||
- In preview mode, no interactive prompts are performed and no
|
||||
Git commands are executed; only the would-be command is printed.
|
||||
"""
|
||||
for repo in selected_repos:
|
||||
repo_identifier = get_repo_identifier(repo, all_repos)
|
||||
@@ -34,18 +30,13 @@ def pull_with_verification(
|
||||
continue
|
||||
|
||||
verified_info = repo.get("verified")
|
||||
verified_ok, errors, commit_hash, signing_key = verify_repository(
|
||||
verified_ok, errors, _commit_hash, _signing_key = verify_repository(
|
||||
repo,
|
||||
repo_dir,
|
||||
mode="pull",
|
||||
no_verification=no_verification,
|
||||
)
|
||||
|
||||
# Only prompt the user if:
|
||||
# - we are NOT in preview mode
|
||||
# - verification is enabled
|
||||
# - the repo has verification info configured
|
||||
# - verification failed
|
||||
if (
|
||||
not preview
|
||||
and not no_verification
|
||||
@@ -59,16 +50,14 @@ def pull_with_verification(
|
||||
if choice != "y":
|
||||
continue
|
||||
|
||||
# Build the git pull command (include extra args if present)
|
||||
args_part = " ".join(extra_args) if extra_args else ""
|
||||
full_cmd = f"git pull{(' ' + args_part) if args_part else ''}"
|
||||
|
||||
if preview:
|
||||
# Preview mode: only show the command, do not execute or prompt.
|
||||
print(f"[Preview] In '{repo_dir}': {full_cmd}")
|
||||
else:
|
||||
print(f"Running in '{repo_dir}': {full_cmd}")
|
||||
result = subprocess.run(full_cmd, cwd=repo_dir, shell=True)
|
||||
result = subprocess.run(full_cmd, cwd=repo_dir, shell=True, check=False)
|
||||
if result.returncode != 0:
|
||||
print(
|
||||
f"'git pull' for {repo_identifier} failed "
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import sys
|
||||
import shutil
|
||||
|
||||
from pkgmgr.actions.proxy import exec_proxy_command
|
||||
|
||||
@@ -1,68 +0,0 @@
|
||||
import sys
|
||||
import shutil
|
||||
|
||||
from pkgmgr.actions.repository.pull import pull_with_verification
|
||||
from pkgmgr.actions.install import install_repos
|
||||
|
||||
|
||||
def update_repos(
|
||||
selected_repos,
|
||||
repositories_base_dir,
|
||||
bin_dir,
|
||||
all_repos,
|
||||
no_verification,
|
||||
system_update,
|
||||
preview: bool,
|
||||
quiet: bool,
|
||||
update_dependencies: bool,
|
||||
clone_mode: str,
|
||||
):
|
||||
"""
|
||||
Update repositories by pulling latest changes and installing them.
|
||||
|
||||
Parameters:
|
||||
- selected_repos: List of selected repositories.
|
||||
- repositories_base_dir: Base directory for repositories.
|
||||
- bin_dir: Directory for symbolic links.
|
||||
- all_repos: All repository configurations.
|
||||
- no_verification: Whether to skip verification.
|
||||
- system_update: Whether to run system update.
|
||||
- preview: If True, only show commands without executing.
|
||||
- quiet: If True, suppress messages.
|
||||
- update_dependencies: Whether to update dependent repositories.
|
||||
- clone_mode: Method to clone repositories (ssh or https).
|
||||
"""
|
||||
pull_with_verification(
|
||||
selected_repos,
|
||||
repositories_base_dir,
|
||||
all_repos,
|
||||
[],
|
||||
no_verification,
|
||||
preview,
|
||||
)
|
||||
|
||||
install_repos(
|
||||
selected_repos,
|
||||
repositories_base_dir,
|
||||
bin_dir,
|
||||
all_repos,
|
||||
no_verification,
|
||||
preview,
|
||||
quiet,
|
||||
clone_mode,
|
||||
update_dependencies,
|
||||
)
|
||||
|
||||
if system_update:
|
||||
from pkgmgr.core.command.run import run_command
|
||||
|
||||
# Nix: upgrade all profile entries (if Nix is available)
|
||||
if shutil.which("nix") is not None:
|
||||
try:
|
||||
run_command("nix profile upgrade '.*'", preview=preview)
|
||||
except SystemExit as e:
|
||||
print(f"[Warning] 'nix profile upgrade' failed: {e}")
|
||||
|
||||
# Arch / AUR system update
|
||||
run_command("sudo -u aur_builder yay -Syu --noconfirm", preview=preview)
|
||||
run_command("sudo pacman -Syyu --noconfirm", preview=preview)
|
||||
10
src/pkgmgr/actions/update/__init__.py
Normal file
10
src/pkgmgr/actions/update/__init__.py
Normal file
@@ -0,0 +1,10 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from pkgmgr.actions.update.manager import UpdateManager
|
||||
|
||||
__all__ = [
|
||||
"UpdateManager",
|
||||
]
|
||||
61
src/pkgmgr/actions/update/manager.py
Normal file
61
src/pkgmgr/actions/update/manager.py
Normal file
@@ -0,0 +1,61 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Iterable
|
||||
|
||||
from pkgmgr.actions.update.system_updater import SystemUpdater
|
||||
|
||||
|
||||
class UpdateManager:
|
||||
"""
|
||||
Orchestrates:
|
||||
- repository pull + installation
|
||||
- optional system update
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._system_updater = SystemUpdater()
|
||||
|
||||
def run(
|
||||
self,
|
||||
selected_repos: Iterable[Any],
|
||||
repositories_base_dir: str,
|
||||
bin_dir: str,
|
||||
all_repos: Any,
|
||||
no_verification: bool,
|
||||
system_update: bool,
|
||||
preview: bool,
|
||||
quiet: bool,
|
||||
update_dependencies: bool,
|
||||
clone_mode: str,
|
||||
force_update: bool = True,
|
||||
) -> None:
|
||||
from pkgmgr.actions.install import install_repos
|
||||
from pkgmgr.actions.repository.pull import pull_with_verification
|
||||
|
||||
pull_with_verification(
|
||||
selected_repos,
|
||||
repositories_base_dir,
|
||||
all_repos,
|
||||
[],
|
||||
no_verification,
|
||||
preview,
|
||||
)
|
||||
|
||||
install_repos(
|
||||
selected_repos,
|
||||
repositories_base_dir,
|
||||
bin_dir,
|
||||
all_repos,
|
||||
no_verification,
|
||||
preview,
|
||||
quiet,
|
||||
clone_mode,
|
||||
update_dependencies,
|
||||
force_update=force_update,
|
||||
)
|
||||
|
||||
if system_update:
|
||||
self._system_updater.run(preview=preview)
|
||||
66
src/pkgmgr/actions/update/os_release.py
Normal file
66
src/pkgmgr/actions/update/os_release.py
Normal file
@@ -0,0 +1,66 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from dataclasses import dataclass
|
||||
from typing import Dict
|
||||
|
||||
|
||||
def read_os_release(path: str = "/etc/os-release") -> Dict[str, str]:
|
||||
"""
|
||||
Parse /etc/os-release into a dict. Returns empty dict if missing.
|
||||
"""
|
||||
if not os.path.exists(path):
|
||||
return {}
|
||||
|
||||
result: Dict[str, str] = {}
|
||||
with open(path, "r", encoding="utf-8") as f:
|
||||
for line in f:
|
||||
line = line.strip()
|
||||
if not line or line.startswith("#") or "=" not in line:
|
||||
continue
|
||||
key, value = line.split("=", 1)
|
||||
result[key.strip()] = value.strip().strip('"')
|
||||
return result
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class OSReleaseInfo:
|
||||
"""
|
||||
Minimal /etc/os-release representation for distro detection.
|
||||
"""
|
||||
id: str = ""
|
||||
id_like: str = ""
|
||||
pretty_name: str = ""
|
||||
|
||||
@staticmethod
|
||||
def load() -> "OSReleaseInfo":
|
||||
data = read_os_release()
|
||||
return OSReleaseInfo(
|
||||
id=(data.get("ID") or "").lower(),
|
||||
id_like=(data.get("ID_LIKE") or "").lower(),
|
||||
pretty_name=(data.get("PRETTY_NAME") or ""),
|
||||
)
|
||||
|
||||
def ids(self) -> set[str]:
|
||||
ids: set[str] = set()
|
||||
if self.id:
|
||||
ids.add(self.id)
|
||||
if self.id_like:
|
||||
for part in self.id_like.split():
|
||||
ids.add(part.strip())
|
||||
return ids
|
||||
|
||||
def is_arch_family(self) -> bool:
|
||||
ids = self.ids()
|
||||
return ("arch" in ids) or ("archlinux" in ids)
|
||||
|
||||
def is_debian_family(self) -> bool:
|
||||
ids = self.ids()
|
||||
return bool(ids.intersection({"debian", "ubuntu"}))
|
||||
|
||||
def is_fedora_family(self) -> bool:
|
||||
ids = self.ids()
|
||||
return bool(ids.intersection({"fedora", "rhel", "centos", "rocky", "almalinux"}))
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user