Compare commits
21 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
69d28a461d | ||
|
|
03e414cc9f | ||
|
|
7674762c9a | ||
|
|
a47de15e42 | ||
|
|
37f3057d31 | ||
|
|
d55c8d3726 | ||
|
|
3990560cd7 | ||
|
|
d1e5a71f77 | ||
|
|
d59dc8ad53 | ||
|
|
55f4a1e941 | ||
|
|
2a4ec18532 | ||
|
|
2debdbee09 | ||
|
|
4cb62e90f8 | ||
|
|
923519497a | ||
|
|
5fa18cb449 | ||
|
|
f513196911 | ||
|
|
7f06447bbd | ||
|
|
1e5d6d3eee | ||
|
|
f2970adbb2 | ||
|
|
7f262c6557 | ||
|
|
0bc7a3ecc0 |
2
.github/workflows/test-virgin-root.yml
vendored
2
.github/workflows/test-virgin-root.yml
vendored
@@ -46,8 +46,6 @@ jobs:
|
|||||||
|
|
||||||
. "$HOME/.venvs/pkgmgr/bin/activate"
|
. "$HOME/.venvs/pkgmgr/bin/activate"
|
||||||
|
|
||||||
export NIX_CONFIG="experimental-features = nix-command flakes"
|
|
||||||
|
|
||||||
pkgmgr update pkgmgr --clone-mode shallow --no-verification
|
pkgmgr update pkgmgr --clone-mode shallow --no-verification
|
||||||
pkgmgr version pkgmgr
|
pkgmgr version pkgmgr
|
||||||
|
|
||||||
|
|||||||
1
.github/workflows/test-virgin-user.yml
vendored
1
.github/workflows/test-virgin-user.yml
vendored
@@ -59,7 +59,6 @@ jobs:
|
|||||||
pkgmgr version pkgmgr
|
pkgmgr version pkgmgr
|
||||||
|
|
||||||
export NIX_REMOTE=local
|
export NIX_REMOTE=local
|
||||||
export NIX_CONFIG=\"experimental-features = nix-command flakes\"
|
|
||||||
nix run /src#pkgmgr -- version pkgmgr
|
nix run /src#pkgmgr -- version pkgmgr
|
||||||
"
|
"
|
||||||
'
|
'
|
||||||
|
|||||||
30
CHANGELOG.md
30
CHANGELOG.md
@@ -1,3 +1,33 @@
|
|||||||
|
## [1.6.2] - 2025-12-14
|
||||||
|
|
||||||
|
* **pkgmgr version** now also shows the installed pkgmgr version when run outside a repository.
|
||||||
|
|
||||||
|
|
||||||
|
## [1.6.1] - 2025-12-14
|
||||||
|
|
||||||
|
* * Added automatic retry handling for GitHub 403 / rate-limit errors during Nix flake installs (Fibonacci backoff with jitter).
|
||||||
|
|
||||||
|
|
||||||
|
## [1.6.0] - 2025-12-14
|
||||||
|
|
||||||
|
* *** Changed ***
|
||||||
|
- Unified update handling via a single top-level `pkgmgr update` command, removing ambiguous update paths.
|
||||||
|
- Improved update reliability by routing all update logic through a central UpdateManager.
|
||||||
|
- Renamed system update flag from `--system-update` to `--system` for clarity and consistency.
|
||||||
|
- Made mirror handling explicit and safer by separating setup, check, and provision responsibilities.
|
||||||
|
- Improved credential resolution for remote providers (environment → keyring → interactive).
|
||||||
|
|
||||||
|
*** Added ***
|
||||||
|
- Optional system updates via `pkgmgr update --system` (Arch, Debian/Ubuntu, Fedora/RHEL).
|
||||||
|
- `pkgmgr install --update` to force re-running installers and refresh existing installations.
|
||||||
|
- Remote repository provisioning for mirrors on supported providers.
|
||||||
|
- Extended end-to-end test coverage for update and mirror workflows.
|
||||||
|
|
||||||
|
*** Fixed ***
|
||||||
|
- Resolved “Unknown repos command: update” errors after CLI refactoring.
|
||||||
|
- Improved Nix update stability and reduced CI failures caused by transient rate limits.
|
||||||
|
|
||||||
|
|
||||||
## [1.5.0] - 2025-12-13
|
## [1.5.0] - 2025-12-13
|
||||||
|
|
||||||
* - Commands now show live output while running, making long operations easier to follow
|
* - Commands now show live output while running, making long operations easier to follow
|
||||||
|
|||||||
@@ -36,9 +36,6 @@ CMD ["bash"]
|
|||||||
# ============================================================
|
# ============================================================
|
||||||
FROM virgin AS full
|
FROM virgin AS full
|
||||||
|
|
||||||
# Nix environment defaults (only config; nix itself comes from deps/install flow)
|
|
||||||
ENV NIX_CONFIG="experimental-features = nix-command flakes"
|
|
||||||
|
|
||||||
WORKDIR /build
|
WORKDIR /build
|
||||||
|
|
||||||
# Copy full repository for build
|
# Copy full repository for build
|
||||||
|
|||||||
2
Makefile
2
Makefile
@@ -44,7 +44,7 @@ install:
|
|||||||
# ------------------------------------------------------------
|
# ------------------------------------------------------------
|
||||||
|
|
||||||
# Default: keep current auto-detection behavior
|
# Default: keep current auto-detection behavior
|
||||||
setup: setup-nix setup-venv
|
setup: setup-venv
|
||||||
|
|
||||||
# Explicit: developer setup (Python venv + shell RC + install)
|
# Explicit: developer setup (Python venv + shell RC + install)
|
||||||
setup-venv: setup-nix
|
setup-venv: setup-nix
|
||||||
|
|||||||
@@ -32,7 +32,7 @@
|
|||||||
rec {
|
rec {
|
||||||
pkgmgr = pyPkgs.buildPythonApplication {
|
pkgmgr = pyPkgs.buildPythonApplication {
|
||||||
pname = "package-manager";
|
pname = "package-manager";
|
||||||
version = "1.5.0";
|
version = "1.6.2";
|
||||||
|
|
||||||
# Use the git repo as source
|
# Use the git repo as source
|
||||||
src = ./.;
|
src = ./.;
|
||||||
|
|||||||
@@ -47,7 +47,7 @@ package() {
|
|||||||
cd "$srcdir/$_srcdir_name"
|
cd "$srcdir/$_srcdir_name"
|
||||||
|
|
||||||
# Install the wrapper into /usr/bin
|
# Install the wrapper into /usr/bin
|
||||||
install -Dm0755 "scripts/pkgmgr-wrapper.sh" \
|
install -Dm0755 "scripts/launcher.sh" \
|
||||||
"$pkgdir/usr/bin/pkgmgr"
|
"$pkgdir/usr/bin/pkgmgr"
|
||||||
|
|
||||||
# Install Nix bootstrap (init + lib)
|
# Install Nix bootstrap (init + lib)
|
||||||
|
|||||||
@@ -28,7 +28,7 @@ override_dh_auto_install:
|
|||||||
install -d debian/package-manager/usr/lib/package-manager
|
install -d debian/package-manager/usr/lib/package-manager
|
||||||
|
|
||||||
# Install wrapper
|
# Install wrapper
|
||||||
install -m0755 scripts/pkgmgr-wrapper.sh \
|
install -m0755 scripts/launcher.sh \
|
||||||
debian/package-manager/usr/bin/pkgmgr
|
debian/package-manager/usr/bin/pkgmgr
|
||||||
|
|
||||||
# Install Nix bootstrap (init + lib)
|
# Install Nix bootstrap (init + lib)
|
||||||
|
|||||||
@@ -42,7 +42,7 @@ install -d %{buildroot}/usr/lib/package-manager
|
|||||||
cp -a . %{buildroot}/usr/lib/package-manager/
|
cp -a . %{buildroot}/usr/lib/package-manager/
|
||||||
|
|
||||||
# Wrapper
|
# Wrapper
|
||||||
install -m0755 scripts/pkgmgr-wrapper.sh %{buildroot}%{_bindir}/pkgmgr
|
install -m0755 scripts/launcher.sh %{buildroot}%{_bindir}/pkgmgr
|
||||||
|
|
||||||
# Nix bootstrap (init + lib)
|
# Nix bootstrap (init + lib)
|
||||||
install -d %{buildroot}/usr/lib/package-manager/nix
|
install -d %{buildroot}/usr/lib/package-manager/nix
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ build-backend = "setuptools.build_meta"
|
|||||||
|
|
||||||
[project]
|
[project]
|
||||||
name = "package-manager"
|
name = "package-manager"
|
||||||
version = "1.5.0"
|
version = "1.6.2"
|
||||||
description = "Kevin's package-manager tool (pkgmgr)"
|
description = "Kevin's package-manager tool (pkgmgr)"
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
requires-python = ">=3.9"
|
requires-python = ">=3.9"
|
||||||
@@ -19,16 +19,17 @@ authors = [
|
|||||||
|
|
||||||
# Base runtime dependencies
|
# Base runtime dependencies
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"PyYAML>=6.0"
|
"PyYAML>=6.0",
|
||||||
|
"tomli; python_version < \"3.11\"",
|
||||||
]
|
]
|
||||||
|
|
||||||
[project.urls]
|
[project.urls]
|
||||||
Homepage = "https://github.com/kevinveenbirkenbach/package-manager"
|
Homepage = "https://s.veen.world/pkgmgr"
|
||||||
Source = "https://github.com/kevinveenbirkenbach/package-manager"
|
Source = "https://github.com/kevinveenbirkenbach/package-manager"
|
||||||
|
|
||||||
[project.optional-dependencies]
|
[project.optional-dependencies]
|
||||||
|
keyring = ["keyring>=24.0.0"]
|
||||||
dev = [
|
dev = [
|
||||||
"pytest",
|
|
||||||
"mypy"
|
"mypy"
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|||||||
@@ -1,11 +1,6 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
|
|
||||||
# Ensure NIX_CONFIG has our defaults if not already set
|
|
||||||
if [[ -z "${NIX_CONFIG:-}" ]]; then
|
|
||||||
export NIX_CONFIG="experimental-features = nix-command flakes"
|
|
||||||
fi
|
|
||||||
|
|
||||||
FLAKE_DIR="/usr/lib/package-manager"
|
FLAKE_DIR="/usr/lib/package-manager"
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
# ---------------------------------------------------------------------------
|
||||||
@@ -43,6 +38,6 @@ if command -v nix >/dev/null 2>&1; then
|
|||||||
exec nix run "${FLAKE_DIR}#pkgmgr" -- "$@"
|
exec nix run "${FLAKE_DIR}#pkgmgr" -- "$@"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "[pkgmgr-wrapper] ERROR: 'nix' binary not found on PATH after init."
|
echo "[launcher] ERROR: 'nix' binary not found on PATH after init."
|
||||||
echo "[pkgmgr-wrapper] Nix is required to run pkgmgr (no Python fallback)."
|
echo "[launcher] Nix is required to run pkgmgr (no Python fallback)."
|
||||||
exit 1
|
exit 1
|
||||||
@@ -11,45 +11,79 @@ nixconf_file_path() {
|
|||||||
echo "/etc/nix/nix.conf"
|
echo "/etc/nix/nix.conf"
|
||||||
}
|
}
|
||||||
|
|
||||||
nixconf_ensure_experimental_features() {
|
# Ensure a given nix.conf key contains required tokens (merged, no duplicates)
|
||||||
local nix_conf want
|
nixconf_ensure_features_key() {
|
||||||
nix_conf="$(nixconf_file_path)"
|
local nix_conf="$1"
|
||||||
want="experimental-features = nix-command flakes"
|
local key="$2"
|
||||||
|
shift 2
|
||||||
|
local required=("$@")
|
||||||
|
|
||||||
mkdir -p /etc/nix
|
mkdir -p /etc/nix
|
||||||
|
|
||||||
|
# Create file if missing (with just the required tokens)
|
||||||
if [[ ! -f "${nix_conf}" ]]; then
|
if [[ ! -f "${nix_conf}" ]]; then
|
||||||
|
local want="${key} = ${required[*]}"
|
||||||
echo "[nix-conf] Creating ${nix_conf} with: ${want}"
|
echo "[nix-conf] Creating ${nix_conf} with: ${want}"
|
||||||
printf "%s\n" "${want}" >"${nix_conf}"
|
printf "%s\n" "${want}" >"${nix_conf}"
|
||||||
return 0
|
return 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if grep -qE '^\s*experimental-features\s*=' "${nix_conf}"; then
|
# Key exists -> merge tokens
|
||||||
if grep -qE '^\s*experimental-features\s*=.*\bnix-command\b' "${nix_conf}" \
|
if grep -qE "^\s*${key}\s*=" "${nix_conf}"; then
|
||||||
&& grep -qE '^\s*experimental-features\s*=.*\bflakes\b' "${nix_conf}"; then
|
local ok=1
|
||||||
echo "[nix-conf] experimental-features already correct"
|
local t
|
||||||
|
for t in "${required[@]}"; do
|
||||||
|
if ! grep -qE "^\s*${key}\s*=.*\b${t}\b" "${nix_conf}"; then
|
||||||
|
ok=0
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
if [[ "$ok" -eq 1 ]]; then
|
||||||
|
echo "[nix-conf] ${key} already correct"
|
||||||
return 0
|
return 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "[nix-conf] Extending experimental-features in ${nix_conf}"
|
echo "[nix-conf] Extending ${key} in ${nix_conf}"
|
||||||
|
|
||||||
local current
|
local current
|
||||||
current="$(grep -E '^\s*experimental-features\s*=' "${nix_conf}" | head -n1 | cut -d= -f2-)"
|
current="$(grep -E "^\s*${key}\s*=" "${nix_conf}" | head -n1 | cut -d= -f2-)"
|
||||||
current="$(echo "${current}" | xargs)" # trim
|
current="$(echo "${current}" | xargs)" # trim
|
||||||
|
|
||||||
# Build a merged feature string without duplicates (simple token set)
|
local merged=""
|
||||||
local merged="nix-command flakes"
|
|
||||||
local token
|
local token
|
||||||
|
|
||||||
|
# Start with existing tokens
|
||||||
for token in ${current}; do
|
for token in ${current}; do
|
||||||
if [[ " ${merged} " != *" ${token} "* ]]; then
|
if [[ " ${merged} " != *" ${token} "* ]]; then
|
||||||
merged="${merged} ${token}"
|
merged="${merged} ${token}"
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
sed -i "s|^\s*experimental-features\s*=.*|experimental-features = ${merged}|" "${nix_conf}"
|
# Add required tokens
|
||||||
|
for token in "${required[@]}"; do
|
||||||
|
if [[ " ${merged} " != *" ${token} "* ]]; then
|
||||||
|
merged="${merged} ${token}"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
merged="$(echo "${merged}" | xargs)" # trim
|
||||||
|
|
||||||
|
sed -i "s|^\s*${key}\s*=.*|${key} = ${merged}|" "${nix_conf}"
|
||||||
return 0
|
return 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Key missing -> append
|
||||||
|
local want="${key} = ${required[*]}"
|
||||||
echo "[nix-conf] Appending to ${nix_conf}: ${want}"
|
echo "[nix-conf] Appending to ${nix_conf}: ${want}"
|
||||||
printf "\n%s\n" "${want}" >>"${nix_conf}"
|
printf "\n%s\n" "${want}" >>"${nix_conf}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
nixconf_ensure_experimental_features() {
|
||||||
|
local nix_conf
|
||||||
|
nix_conf="$(nixconf_file_path)"
|
||||||
|
|
||||||
|
# Ensure both keys to avoid prompts and cover older/alternate expectations
|
||||||
|
nixconf_ensure_features_key "${nix_conf}" "experimental-features" "nix-command" "flakes"
|
||||||
|
nixconf_ensure_features_key "${nix_conf}" "extra-experimental-features" "nix-command" "flakes"
|
||||||
|
}
|
||||||
|
|||||||
52
scripts/nix/lib/retry_403.sh
Executable file
52
scripts/nix/lib/retry_403.sh
Executable file
@@ -0,0 +1,52 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
if [[ -n "${PKGMGR_NIX_RETRY_403_SH:-}" ]]; then
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
PKGMGR_NIX_RETRY_403_SH=1
|
||||||
|
|
||||||
|
# Retry only when we see the GitHub API rate limit 403 error during nix flake evaluation.
|
||||||
|
# Retries 7 times with delays: 10, 30, 50, 80, 130, 210, 420 seconds.
|
||||||
|
run_with_github_403_retry() {
|
||||||
|
local -a delays=(10 30 50 80 130 210 420)
|
||||||
|
local attempt=0
|
||||||
|
local max_retries="${#delays[@]}"
|
||||||
|
|
||||||
|
while true; do
|
||||||
|
local err tmp
|
||||||
|
tmp="$(mktemp -t nix-err.XXXXXX)"
|
||||||
|
err=0
|
||||||
|
|
||||||
|
# Run the command; capture stderr for inspection while preserving stdout.
|
||||||
|
if "$@" 2>"$tmp"; then
|
||||||
|
rm -f "$tmp"
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
err=$?
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Only retry on the specific GitHub API rate limit 403 case.
|
||||||
|
if grep -qE 'HTTP error 403' "$tmp" && grep -qiE 'API rate limit exceeded|api\.github\.com' "$tmp"; then
|
||||||
|
if (( attempt >= max_retries )); then
|
||||||
|
cat "$tmp" >&2
|
||||||
|
rm -f "$tmp"
|
||||||
|
return "$err"
|
||||||
|
fi
|
||||||
|
|
||||||
|
local sleep_s="${delays[$attempt]}"
|
||||||
|
attempt=$((attempt + 1))
|
||||||
|
|
||||||
|
echo "[nix-retry] GitHub API rate-limit (403). Retry ${attempt}/${max_retries} in ${sleep_s}s: $*" >&2
|
||||||
|
cat "$tmp" >&2
|
||||||
|
rm -f "$tmp"
|
||||||
|
sleep "$sleep_s"
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Not our retry case -> fail fast with original stderr.
|
||||||
|
cat "$tmp" >&2
|
||||||
|
rm -f "$tmp"
|
||||||
|
return "$err"
|
||||||
|
done
|
||||||
|
}
|
||||||
@@ -49,7 +49,7 @@ docker run --rm \
|
|||||||
# Gitdir path shown in the "dubious ownership" error
|
# Gitdir path shown in the "dubious ownership" error
|
||||||
git config --global --add safe.directory /src/.git || true
|
git config --global --add safe.directory /src/.git || true
|
||||||
# Ephemeral CI containers: allow all paths as a last resort
|
# Ephemeral CI containers: allow all paths as a last resort
|
||||||
git config --global --add safe.directory '*' || true
|
git config --global --add safe.directory "*" || true
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Run the E2E tests inside the Nix development shell
|
# Run the E2E tests inside the Nix development shell
|
||||||
|
|||||||
@@ -27,7 +27,7 @@ docker run --rm \
|
|||||||
echo ">>> preflight: nix must exist in image"
|
echo ">>> preflight: nix must exist in image"
|
||||||
if ! command -v nix >/dev/null 2>&1; then
|
if ! command -v nix >/dev/null 2>&1; then
|
||||||
echo "NO_NIX"
|
echo "NO_NIX"
|
||||||
echo "ERROR: nix not found in image '\'''"${IMAGE}"''\'' (PKGMGR_DISTRO='"${PKGMGR_DISTRO}"')"
|
echo "ERROR: nix not found in image '"${IMAGE}"' (PKGMGR_DISTRO='"${PKGMGR_DISTRO}"')"
|
||||||
echo "HINT: Ensure Nix is installed during image build for this distro."
|
echo "HINT: Ensure Nix is installed during image build for this distro."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
@@ -35,14 +35,28 @@ docker run --rm \
|
|||||||
echo ">>> nix version"
|
echo ">>> nix version"
|
||||||
nix --version
|
nix --version
|
||||||
|
|
||||||
|
# ------------------------------------------------------------
|
||||||
|
# Retry helper for GitHub API rate-limit (HTTP 403)
|
||||||
|
# ------------------------------------------------------------
|
||||||
|
if [[ -f /src/scripts/nix/lib/retry_403.sh ]]; then
|
||||||
|
# shellcheck source=./scripts/nix/lib/retry_403.sh
|
||||||
|
source /src/scripts/nix/lib/retry_403.sh
|
||||||
|
elif [[ -f ./scripts/nix/lib/retry_403.sh ]]; then
|
||||||
|
# shellcheck source=./scripts/nix/lib/retry_403.sh
|
||||||
|
source ./scripts/nix/lib/retry_403.sh
|
||||||
|
else
|
||||||
|
echo "ERROR: retry helper not found: scripts/nix/lib/retry_403.sh"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
echo ">>> nix flake show"
|
echo ">>> nix flake show"
|
||||||
nix flake show . --no-write-lock-file >/dev/null
|
run_with_github_403_retry nix flake show . --no-write-lock-file >/dev/null
|
||||||
|
|
||||||
echo ">>> nix build .#default"
|
echo ">>> nix build .#default"
|
||||||
nix build .#default --no-link --no-write-lock-file
|
run_with_github_403_retry nix build .#default --no-link --no-write-lock-file
|
||||||
|
|
||||||
echo ">>> nix run .#pkgmgr -- --help"
|
echo ">>> nix run .#pkgmgr -- --help"
|
||||||
nix run .#pkgmgr -- --help --no-write-lock-file
|
run_with_github_403_retry nix run .#pkgmgr -- --help --no-write-lock-file
|
||||||
|
|
||||||
echo ">>> OK: Nix flake-only test succeeded."
|
echo ">>> OK: Nix flake-only test succeeded."
|
||||||
'
|
'
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
# src/pkgmgr/actions/install/__init__.py
|
||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
@@ -27,7 +28,7 @@ from pkgmgr.actions.install.installers.os_packages import (
|
|||||||
DebianControlInstaller,
|
DebianControlInstaller,
|
||||||
RpmSpecInstaller,
|
RpmSpecInstaller,
|
||||||
)
|
)
|
||||||
from pkgmgr.actions.install.installers.nix_flake import (
|
from pkgmgr.actions.install.installers.nix import (
|
||||||
NixFlakeInstaller,
|
NixFlakeInstaller,
|
||||||
)
|
)
|
||||||
from pkgmgr.actions.install.installers.python import PythonInstaller
|
from pkgmgr.actions.install.installers.python import PythonInstaller
|
||||||
@@ -36,10 +37,8 @@ from pkgmgr.actions.install.installers.makefile import (
|
|||||||
)
|
)
|
||||||
from pkgmgr.actions.install.pipeline import InstallationPipeline
|
from pkgmgr.actions.install.pipeline import InstallationPipeline
|
||||||
|
|
||||||
|
|
||||||
Repository = Dict[str, Any]
|
Repository = Dict[str, Any]
|
||||||
|
|
||||||
# All available installers, in the order they should be considered.
|
|
||||||
INSTALLERS = [
|
INSTALLERS = [
|
||||||
ArchPkgbuildInstaller(),
|
ArchPkgbuildInstaller(),
|
||||||
DebianControlInstaller(),
|
DebianControlInstaller(),
|
||||||
@@ -50,11 +49,6 @@ INSTALLERS = [
|
|||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
# Internal helpers
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
|
|
||||||
def _ensure_repo_dir(
|
def _ensure_repo_dir(
|
||||||
repo: Repository,
|
repo: Repository,
|
||||||
repositories_base_dir: str,
|
repositories_base_dir: str,
|
||||||
@@ -137,6 +131,7 @@ def _create_context(
|
|||||||
quiet: bool,
|
quiet: bool,
|
||||||
clone_mode: str,
|
clone_mode: str,
|
||||||
update_dependencies: bool,
|
update_dependencies: bool,
|
||||||
|
force_update: bool,
|
||||||
) -> RepoContext:
|
) -> RepoContext:
|
||||||
"""
|
"""
|
||||||
Build a RepoContext instance for the given repository.
|
Build a RepoContext instance for the given repository.
|
||||||
@@ -153,14 +148,10 @@ def _create_context(
|
|||||||
quiet=quiet,
|
quiet=quiet,
|
||||||
clone_mode=clone_mode,
|
clone_mode=clone_mode,
|
||||||
update_dependencies=update_dependencies,
|
update_dependencies=update_dependencies,
|
||||||
|
force_update=force_update,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
# Public API
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
|
|
||||||
def install_repos(
|
def install_repos(
|
||||||
selected_repos: List[Repository],
|
selected_repos: List[Repository],
|
||||||
repositories_base_dir: str,
|
repositories_base_dir: str,
|
||||||
@@ -171,10 +162,14 @@ def install_repos(
|
|||||||
quiet: bool,
|
quiet: bool,
|
||||||
clone_mode: str,
|
clone_mode: str,
|
||||||
update_dependencies: bool,
|
update_dependencies: bool,
|
||||||
|
force_update: bool = False,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
Install one or more repositories according to the configured installers
|
Install one or more repositories according to the configured installers
|
||||||
and the CLI layer precedence rules.
|
and the CLI layer precedence rules.
|
||||||
|
|
||||||
|
If force_update=True, installers of the currently active layer are allowed
|
||||||
|
to run again (upgrade/refresh), even if that layer is already loaded.
|
||||||
"""
|
"""
|
||||||
pipeline = InstallationPipeline(INSTALLERS)
|
pipeline = InstallationPipeline(INSTALLERS)
|
||||||
|
|
||||||
@@ -213,6 +208,7 @@ def install_repos(
|
|||||||
quiet=quiet,
|
quiet=quiet,
|
||||||
clone_mode=clone_mode,
|
clone_mode=clone_mode,
|
||||||
update_dependencies=update_dependencies,
|
update_dependencies=update_dependencies,
|
||||||
|
force_update=force_update,
|
||||||
)
|
)
|
||||||
|
|
||||||
pipeline.run(ctx)
|
pipeline.run(ctx)
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
# src/pkgmgr/actions/install/context.py
|
||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
@@ -28,3 +29,6 @@ class RepoContext:
|
|||||||
quiet: bool
|
quiet: bool
|
||||||
clone_mode: str
|
clone_mode: str
|
||||||
update_dependencies: bool
|
update_dependencies: bool
|
||||||
|
|
||||||
|
# If True, allow re-running installers of the currently active layer.
|
||||||
|
force_update: bool = False
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ pkgmgr.actions.install.installers.
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
from pkgmgr.actions.install.installers.base import BaseInstaller # noqa: F401
|
from pkgmgr.actions.install.installers.base import BaseInstaller # noqa: F401
|
||||||
from pkgmgr.actions.install.installers.nix_flake import NixFlakeInstaller # noqa: F401
|
from pkgmgr.actions.install.installers.nix import NixFlakeInstaller # noqa: F401
|
||||||
from pkgmgr.actions.install.installers.python import PythonInstaller # noqa: F401
|
from pkgmgr.actions.install.installers.python import PythonInstaller # noqa: F401
|
||||||
from pkgmgr.actions.install.installers.makefile import MakefileInstaller # noqa: F401
|
from pkgmgr.actions.install.installers.makefile import MakefileInstaller # noqa: F401
|
||||||
|
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
# src/pkgmgr/actions/install/installers/makefile.py
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import os
|
import os
|
||||||
@@ -9,89 +10,45 @@ from pkgmgr.core.command.run import run_command
|
|||||||
|
|
||||||
|
|
||||||
class MakefileInstaller(BaseInstaller):
|
class MakefileInstaller(BaseInstaller):
|
||||||
"""
|
|
||||||
Generic installer that runs `make install` if a Makefile with an
|
|
||||||
install target is present.
|
|
||||||
|
|
||||||
Safety rules:
|
|
||||||
- If PKGMGR_DISABLE_MAKEFILE_INSTALLER=1 is set, this installer
|
|
||||||
is globally disabled.
|
|
||||||
- The higher-level InstallationPipeline ensures that Makefile
|
|
||||||
installation does not run if a stronger CLI layer already owns
|
|
||||||
the command (e.g. Nix or OS packages).
|
|
||||||
"""
|
|
||||||
|
|
||||||
layer = "makefile"
|
layer = "makefile"
|
||||||
MAKEFILE_NAME = "Makefile"
|
MAKEFILE_NAME = "Makefile"
|
||||||
|
|
||||||
def supports(self, ctx: RepoContext) -> bool:
|
def supports(self, ctx: RepoContext) -> bool:
|
||||||
"""
|
|
||||||
Return True if this repository has a Makefile and the installer
|
|
||||||
is not globally disabled.
|
|
||||||
"""
|
|
||||||
# Optional global kill switch.
|
|
||||||
if os.environ.get("PKGMGR_DISABLE_MAKEFILE_INSTALLER") == "1":
|
if os.environ.get("PKGMGR_DISABLE_MAKEFILE_INSTALLER") == "1":
|
||||||
if not ctx.quiet:
|
if not ctx.quiet:
|
||||||
print(
|
print("[INFO] PKGMGR_DISABLE_MAKEFILE_INSTALLER=1 – skipping MakefileInstaller.")
|
||||||
"[INFO] MakefileInstaller is disabled via "
|
|
||||||
"PKGMGR_DISABLE_MAKEFILE_INSTALLER."
|
|
||||||
)
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
makefile_path = os.path.join(ctx.repo_dir, self.MAKEFILE_NAME)
|
makefile_path = os.path.join(ctx.repo_dir, self.MAKEFILE_NAME)
|
||||||
return os.path.exists(makefile_path)
|
return os.path.exists(makefile_path)
|
||||||
|
|
||||||
def _has_install_target(self, makefile_path: str) -> bool:
|
def _has_install_target(self, makefile_path: str) -> bool:
|
||||||
"""
|
|
||||||
Heuristically check whether the Makefile defines an install target.
|
|
||||||
|
|
||||||
We look for:
|
|
||||||
|
|
||||||
- a plain 'install:' target, or
|
|
||||||
- any 'install-*:' style target.
|
|
||||||
"""
|
|
||||||
try:
|
try:
|
||||||
with open(makefile_path, "r", encoding="utf-8", errors="ignore") as f:
|
with open(makefile_path, "r", encoding="utf-8", errors="ignore") as f:
|
||||||
content = f.read()
|
content = f.read()
|
||||||
except OSError:
|
except OSError:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# Simple heuristics: look for "install:" or targets starting with "install-"
|
|
||||||
if re.search(r"^install\s*:", content, flags=re.MULTILINE):
|
if re.search(r"^install\s*:", content, flags=re.MULTILINE):
|
||||||
return True
|
return True
|
||||||
|
|
||||||
if re.search(r"^install-[a-zA-Z0-9_-]*\s*:", content, flags=re.MULTILINE):
|
if re.search(r"^install-[a-zA-Z0-9_-]*\s*:", content, flags=re.MULTILINE):
|
||||||
return True
|
return True
|
||||||
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def run(self, ctx: RepoContext) -> None:
|
def run(self, ctx: RepoContext) -> None:
|
||||||
"""
|
|
||||||
Execute `make install` in the repository directory if an install
|
|
||||||
target exists.
|
|
||||||
"""
|
|
||||||
makefile_path = os.path.join(ctx.repo_dir, self.MAKEFILE_NAME)
|
makefile_path = os.path.join(ctx.repo_dir, self.MAKEFILE_NAME)
|
||||||
|
|
||||||
if not os.path.exists(makefile_path):
|
if not os.path.exists(makefile_path):
|
||||||
if not ctx.quiet:
|
|
||||||
print(
|
|
||||||
f"[pkgmgr] Makefile '{makefile_path}' not found, "
|
|
||||||
"skipping MakefileInstaller."
|
|
||||||
)
|
|
||||||
return
|
return
|
||||||
|
|
||||||
if not self._has_install_target(makefile_path):
|
if not self._has_install_target(makefile_path):
|
||||||
if not ctx.quiet:
|
if not ctx.quiet:
|
||||||
print(
|
print(f"[pkgmgr] No 'install' target found in {makefile_path}.")
|
||||||
f"[pkgmgr] No 'install' target found in {makefile_path}."
|
|
||||||
)
|
|
||||||
return
|
return
|
||||||
|
|
||||||
if not ctx.quiet:
|
if not ctx.quiet:
|
||||||
print(
|
print(f"[pkgmgr] Running make install for {ctx.identifier} (MakefileInstaller)")
|
||||||
f"[pkgmgr] Running 'make install' in {ctx.repo_dir} "
|
|
||||||
"(MakefileInstaller)"
|
|
||||||
)
|
|
||||||
|
|
||||||
cmd = "make install"
|
run_command("make install", cwd=ctx.repo_dir, preview=ctx.preview)
|
||||||
run_command(cmd, cwd=ctx.repo_dir, preview=ctx.preview)
|
|
||||||
|
if ctx.force_update and not ctx.quiet:
|
||||||
|
print(f"[makefile] repo '{ctx.identifier}' successfully upgraded.")
|
||||||
|
|||||||
4
src/pkgmgr/actions/install/installers/nix/__init__.py
Normal file
4
src/pkgmgr/actions/install/installers/nix/__init__.py
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
from .installer import NixFlakeInstaller
|
||||||
|
from .retry import RetryPolicy
|
||||||
|
|
||||||
|
__all__ = ["NixFlakeInstaller", "RetryPolicy"]
|
||||||
168
src/pkgmgr/actions/install/installers/nix/installer.py
Normal file
168
src/pkgmgr/actions/install/installers/nix/installer.py
Normal file
@@ -0,0 +1,168 @@
|
|||||||
|
# src/pkgmgr/actions/install/installers/nix/installer.py
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
from typing import List, Tuple, TYPE_CHECKING
|
||||||
|
|
||||||
|
from pkgmgr.actions.install.installers.base import BaseInstaller
|
||||||
|
|
||||||
|
from .profile import NixProfileInspector
|
||||||
|
from .retry import GitHubRateLimitRetry, RetryPolicy
|
||||||
|
from .runner import CommandRunner
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from pkgmgr.actions.install.context import RepoContext
|
||||||
|
|
||||||
|
class NixFlakeInstaller(BaseInstaller):
|
||||||
|
layer = "nix"
|
||||||
|
FLAKE_FILE = "flake.nix"
|
||||||
|
|
||||||
|
def __init__(self, policy: RetryPolicy | None = None) -> None:
|
||||||
|
self._runner = CommandRunner()
|
||||||
|
self._retry = GitHubRateLimitRetry(policy=policy)
|
||||||
|
self._profile = NixProfileInspector()
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------ #
|
||||||
|
# Compatibility: supports()
|
||||||
|
# ------------------------------------------------------------------ #
|
||||||
|
|
||||||
|
def supports(self, ctx: "RepoContext") -> bool:
|
||||||
|
if os.environ.get("PKGMGR_DISABLE_NIX_FLAKE_INSTALLER") == "1":
|
||||||
|
if not ctx.quiet:
|
||||||
|
print("[INFO] PKGMGR_DISABLE_NIX_FLAKE_INSTALLER=1 – skipping NixFlakeInstaller.")
|
||||||
|
return False
|
||||||
|
|
||||||
|
if shutil.which("nix") is None:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return os.path.exists(os.path.join(ctx.repo_dir, self.FLAKE_FILE))
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------ #
|
||||||
|
# Compatibility: output selection
|
||||||
|
# ------------------------------------------------------------------ #
|
||||||
|
|
||||||
|
def _profile_outputs(self, ctx: "RepoContext") -> List[Tuple[str, bool]]:
|
||||||
|
# (output_name, allow_failure)
|
||||||
|
if ctx.identifier in {"pkgmgr", "package-manager"}:
|
||||||
|
return [("pkgmgr", False), ("default", True)]
|
||||||
|
return [("default", False)]
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------ #
|
||||||
|
# Compatibility: run()
|
||||||
|
# ------------------------------------------------------------------ #
|
||||||
|
|
||||||
|
def run(self, ctx: "RepoContext") -> None:
|
||||||
|
if not self.supports(ctx):
|
||||||
|
return
|
||||||
|
|
||||||
|
outputs = self._profile_outputs(ctx)
|
||||||
|
|
||||||
|
if not ctx.quiet:
|
||||||
|
print(
|
||||||
|
"[nix] flake detected in "
|
||||||
|
f"{ctx.identifier}, ensuring outputs: "
|
||||||
|
+ ", ".join(name for name, _ in outputs)
|
||||||
|
)
|
||||||
|
|
||||||
|
for output, allow_failure in outputs:
|
||||||
|
if ctx.force_update:
|
||||||
|
self._force_upgrade_output(ctx, output, allow_failure)
|
||||||
|
else:
|
||||||
|
self._install_only(ctx, output, allow_failure)
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------ #
|
||||||
|
# Core logic (unchanged semantics)
|
||||||
|
# ------------------------------------------------------------------ #
|
||||||
|
|
||||||
|
def _installable(self, ctx: "RepoContext", output: str) -> str:
|
||||||
|
return f"{ctx.repo_dir}#{output}"
|
||||||
|
|
||||||
|
def _install_only(self, ctx: "RepoContext", output: str, allow_failure: bool) -> None:
|
||||||
|
install_cmd = f"nix profile install {self._installable(ctx, output)}"
|
||||||
|
|
||||||
|
if not ctx.quiet:
|
||||||
|
print(f"[nix] install: {install_cmd}")
|
||||||
|
|
||||||
|
res = self._retry.run_with_retry(ctx, self._runner, install_cmd)
|
||||||
|
|
||||||
|
if res.returncode == 0:
|
||||||
|
if not ctx.quiet:
|
||||||
|
print(f"[nix] output '{output}' successfully installed.")
|
||||||
|
return
|
||||||
|
|
||||||
|
if not ctx.quiet:
|
||||||
|
print(
|
||||||
|
f"[nix] install failed for '{output}' (exit {res.returncode}), "
|
||||||
|
"trying index-based upgrade/remove+install..."
|
||||||
|
)
|
||||||
|
|
||||||
|
indices = self._profile.find_installed_indices_for_output(ctx, self._runner, output)
|
||||||
|
|
||||||
|
upgraded = False
|
||||||
|
for idx in indices:
|
||||||
|
if self._upgrade_index(ctx, idx):
|
||||||
|
upgraded = True
|
||||||
|
if not ctx.quiet:
|
||||||
|
print(f"[nix] output '{output}' successfully upgraded (index {idx}).")
|
||||||
|
|
||||||
|
if upgraded:
|
||||||
|
return
|
||||||
|
|
||||||
|
if indices and not ctx.quiet:
|
||||||
|
print(f"[nix] upgrade failed; removing indices {indices} and reinstalling '{output}'.")
|
||||||
|
|
||||||
|
for idx in indices:
|
||||||
|
self._remove_index(ctx, idx)
|
||||||
|
|
||||||
|
final = self._runner.run(ctx, install_cmd, allow_failure=True)
|
||||||
|
if final.returncode == 0:
|
||||||
|
if not ctx.quiet:
|
||||||
|
print(f"[nix] output '{output}' successfully re-installed.")
|
||||||
|
return
|
||||||
|
|
||||||
|
print(f"[ERROR] Failed to install Nix flake output '{output}' (exit {final.returncode})")
|
||||||
|
|
||||||
|
if not allow_failure:
|
||||||
|
raise SystemExit(final.returncode)
|
||||||
|
|
||||||
|
print(f"[WARNING] Continuing despite failure of optional output '{output}'.")
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------ #
|
||||||
|
# force_update path (unchanged semantics)
|
||||||
|
# ------------------------------------------------------------------ #
|
||||||
|
|
||||||
|
def _force_upgrade_output(self, ctx: "RepoContext", output: str, allow_failure: bool) -> None:
|
||||||
|
indices = self._profile.find_installed_indices_for_output(ctx, self._runner, output)
|
||||||
|
|
||||||
|
upgraded_any = False
|
||||||
|
for idx in indices:
|
||||||
|
if self._upgrade_index(ctx, idx):
|
||||||
|
upgraded_any = True
|
||||||
|
if not ctx.quiet:
|
||||||
|
print(f"[nix] output '{output}' successfully upgraded (index {idx}).")
|
||||||
|
|
||||||
|
if upgraded_any:
|
||||||
|
print(f"[nix] output '{output}' successfully upgraded.")
|
||||||
|
return
|
||||||
|
|
||||||
|
if indices and not ctx.quiet:
|
||||||
|
print(f"[nix] upgrade failed; removing indices {indices} and reinstalling '{output}'.")
|
||||||
|
|
||||||
|
for idx in indices:
|
||||||
|
self._remove_index(ctx, idx)
|
||||||
|
|
||||||
|
self._install_only(ctx, output, allow_failure)
|
||||||
|
|
||||||
|
print(f"[nix] output '{output}' successfully upgraded.")
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------ #
|
||||||
|
# Helpers
|
||||||
|
# ------------------------------------------------------------------ #
|
||||||
|
|
||||||
|
def _upgrade_index(self, ctx: "RepoContext", idx: int) -> bool:
|
||||||
|
res = self._runner.run(ctx, f"nix profile upgrade --refresh {idx}", allow_failure=True)
|
||||||
|
return res.returncode == 0
|
||||||
|
|
||||||
|
def _remove_index(self, ctx: "RepoContext", idx: int) -> None:
|
||||||
|
self._runner.run(ctx, f"nix profile remove {idx}", allow_failure=True)
|
||||||
71
src/pkgmgr/actions/install/installers/nix/profile.py
Normal file
71
src/pkgmgr/actions/install/installers/nix/profile.py
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import json
|
||||||
|
from typing import Any, List, TYPE_CHECKING
|
||||||
|
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from pkgmgr.actions.install.context import RepoContext
|
||||||
|
from .runner import CommandRunner
|
||||||
|
|
||||||
|
class NixProfileInspector:
|
||||||
|
"""
|
||||||
|
Reads and interprets `nix profile list --json` and provides helpers for
|
||||||
|
finding indices matching a given output name.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def find_installed_indices_for_output(self, ctx: "RepoContext", runner: "CommandRunner", output: str) -> List[int]:
|
||||||
|
res = runner.run(ctx, "nix profile list --json", allow_failure=True)
|
||||||
|
if res.returncode != 0:
|
||||||
|
return []
|
||||||
|
|
||||||
|
try:
|
||||||
|
data = json.loads(res.stdout or "{}")
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
return []
|
||||||
|
|
||||||
|
indices: List[int] = []
|
||||||
|
|
||||||
|
elements = data.get("elements")
|
||||||
|
if isinstance(elements, dict):
|
||||||
|
for idx_str, elem in elements.items():
|
||||||
|
try:
|
||||||
|
idx = int(idx_str)
|
||||||
|
except (TypeError, ValueError):
|
||||||
|
continue
|
||||||
|
if self._element_matches_output(elem, output):
|
||||||
|
indices.append(idx)
|
||||||
|
return sorted(indices)
|
||||||
|
|
||||||
|
if isinstance(elements, list):
|
||||||
|
for elem in elements:
|
||||||
|
idx = elem.get("index") if isinstance(elem, dict) else None
|
||||||
|
if isinstance(idx, int) and self._element_matches_output(elem, output):
|
||||||
|
indices.append(idx)
|
||||||
|
return sorted(indices)
|
||||||
|
|
||||||
|
return []
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def element_matches_output(elem: Any, output: str) -> bool:
|
||||||
|
return NixProfileInspector._element_matches_output(elem, output)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _element_matches_output(elem: Any, output: str) -> bool:
|
||||||
|
out = (output or "").strip()
|
||||||
|
if not out or not isinstance(elem, dict):
|
||||||
|
return False
|
||||||
|
|
||||||
|
candidates: List[str] = []
|
||||||
|
for k in ("attrPath", "originalUrl", "url", "storePath", "name"):
|
||||||
|
v = elem.get(k)
|
||||||
|
if isinstance(v, str) and v:
|
||||||
|
candidates.append(v)
|
||||||
|
|
||||||
|
for c in candidates:
|
||||||
|
if c == out:
|
||||||
|
return True
|
||||||
|
if f"#{out}" in c:
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
87
src/pkgmgr/actions/install/installers/nix/retry.py
Normal file
87
src/pkgmgr/actions/install/installers/nix/retry.py
Normal file
@@ -0,0 +1,87 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import random
|
||||||
|
import time
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from typing import Iterable, TYPE_CHECKING
|
||||||
|
|
||||||
|
from .types import RunResult
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from pkgmgr.actions.install.context import RepoContext
|
||||||
|
from .runner import CommandRunner
|
||||||
|
|
||||||
|
@dataclass(frozen=True)
|
||||||
|
class RetryPolicy:
|
||||||
|
max_attempts: int = 7
|
||||||
|
base_delay_seconds: int = 30
|
||||||
|
jitter_seconds_min: int = 0
|
||||||
|
jitter_seconds_max: int = 60
|
||||||
|
|
||||||
|
|
||||||
|
class GitHubRateLimitRetry:
|
||||||
|
"""
|
||||||
|
Retries nix install commands only when the error looks like a GitHub API rate limit (HTTP 403).
|
||||||
|
Backoff: Fibonacci(base, base, ...) + random jitter.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, policy: RetryPolicy | None = None) -> None:
|
||||||
|
self._policy = policy or RetryPolicy()
|
||||||
|
|
||||||
|
def run_with_retry(
|
||||||
|
self,
|
||||||
|
ctx: "RepoContext",
|
||||||
|
runner: "CommandRunner",
|
||||||
|
install_cmd: str,
|
||||||
|
) -> RunResult:
|
||||||
|
quiet = bool(getattr(ctx, "quiet", False))
|
||||||
|
delays = list(self._fibonacci_backoff(self._policy.base_delay_seconds, self._policy.max_attempts))
|
||||||
|
|
||||||
|
last: RunResult | None = None
|
||||||
|
|
||||||
|
for attempt, base_delay in enumerate(delays, start=1):
|
||||||
|
if not quiet:
|
||||||
|
print(f"[nix] attempt {attempt}/{self._policy.max_attempts}: {install_cmd}")
|
||||||
|
|
||||||
|
res = runner.run(ctx, install_cmd, allow_failure=True)
|
||||||
|
last = res
|
||||||
|
|
||||||
|
if res.returncode == 0:
|
||||||
|
return res
|
||||||
|
|
||||||
|
combined = f"{res.stdout}\n{res.stderr}"
|
||||||
|
if not self._is_github_rate_limit_error(combined):
|
||||||
|
return res
|
||||||
|
|
||||||
|
if attempt >= self._policy.max_attempts:
|
||||||
|
break
|
||||||
|
|
||||||
|
jitter = random.randint(self._policy.jitter_seconds_min, self._policy.jitter_seconds_max)
|
||||||
|
wait_time = base_delay + jitter
|
||||||
|
|
||||||
|
if not quiet:
|
||||||
|
print(
|
||||||
|
"[nix] GitHub rate limit detected (403). "
|
||||||
|
f"Retrying in {wait_time}s (base={base_delay}s, jitter={jitter}s)..."
|
||||||
|
)
|
||||||
|
|
||||||
|
time.sleep(wait_time)
|
||||||
|
|
||||||
|
return last if last is not None else RunResult(returncode=1, stdout="", stderr="nix install retry failed")
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _is_github_rate_limit_error(text: str) -> bool:
|
||||||
|
t = (text or "").lower()
|
||||||
|
return (
|
||||||
|
"http error 403" in t
|
||||||
|
or "rate limit exceeded" in t
|
||||||
|
or "github api rate limit" in t
|
||||||
|
or "api rate limit exceeded" in t
|
||||||
|
)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _fibonacci_backoff(base: int, attempts: int) -> Iterable[int]:
|
||||||
|
a, b = base, base
|
||||||
|
for _ in range(max(1, attempts)):
|
||||||
|
yield a
|
||||||
|
a, b = b, a + b
|
||||||
64
src/pkgmgr/actions/install/installers/nix/runner.py
Normal file
64
src/pkgmgr/actions/install/installers/nix/runner.py
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
from typing import TYPE_CHECKING
|
||||||
|
|
||||||
|
from .types import RunResult
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from pkgmgr.actions.install.context import RepoContext
|
||||||
|
|
||||||
|
class CommandRunner:
|
||||||
|
"""
|
||||||
|
Executes commands (shell=True) inside a repository directory (if provided).
|
||||||
|
Supports preview mode and compact failure output logging.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def run(self, ctx: "RepoContext", cmd: str, allow_failure: bool) -> RunResult:
|
||||||
|
repo_dir = getattr(ctx, "repo_dir", None) or getattr(ctx, "repo_path", None)
|
||||||
|
preview = bool(getattr(ctx, "preview", False))
|
||||||
|
quiet = bool(getattr(ctx, "quiet", False))
|
||||||
|
|
||||||
|
if preview:
|
||||||
|
if not quiet:
|
||||||
|
print(f"[preview] {cmd}")
|
||||||
|
return RunResult(returncode=0, stdout="", stderr="")
|
||||||
|
|
||||||
|
try:
|
||||||
|
p = subprocess.run(
|
||||||
|
cmd,
|
||||||
|
shell=True,
|
||||||
|
cwd=repo_dir,
|
||||||
|
check=False,
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.PIPE,
|
||||||
|
text=True,
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
if not allow_failure:
|
||||||
|
raise
|
||||||
|
return RunResult(returncode=1, stdout="", stderr=str(e))
|
||||||
|
|
||||||
|
res = RunResult(returncode=p.returncode, stdout=p.stdout or "", stderr=p.stderr or "")
|
||||||
|
|
||||||
|
if res.returncode != 0 and not quiet:
|
||||||
|
self._print_compact_failure(res)
|
||||||
|
|
||||||
|
if res.returncode != 0 and not allow_failure:
|
||||||
|
raise SystemExit(res.returncode)
|
||||||
|
|
||||||
|
return res
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _print_compact_failure(res: RunResult) -> None:
|
||||||
|
out = (res.stdout or "").strip()
|
||||||
|
err = (res.stderr or "").strip()
|
||||||
|
|
||||||
|
if out:
|
||||||
|
print("[nix] stdout (last lines):")
|
||||||
|
print("\n".join(out.splitlines()[-20:]))
|
||||||
|
|
||||||
|
if err:
|
||||||
|
print("[nix] stderr (last lines):")
|
||||||
|
print("\n".join(err.splitlines()[-40:]))
|
||||||
10
src/pkgmgr/actions/install/installers/nix/types.py
Normal file
10
src/pkgmgr/actions/install/installers/nix/types.py
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from dataclasses import dataclass
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(frozen=True)
|
||||||
|
class RunResult:
|
||||||
|
returncode: int
|
||||||
|
stdout: str
|
||||||
|
stderr: str
|
||||||
@@ -1,165 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
"""
|
|
||||||
Installer for Nix flakes.
|
|
||||||
|
|
||||||
If a repository contains flake.nix and the 'nix' command is available, this
|
|
||||||
installer will try to install profile outputs from the flake.
|
|
||||||
|
|
||||||
Behavior:
|
|
||||||
- If flake.nix is present and `nix` exists on PATH:
|
|
||||||
* First remove any existing `package-manager` profile entry (best-effort).
|
|
||||||
* Then install one or more flake outputs via `nix profile install`.
|
|
||||||
- For the package-manager repo:
|
|
||||||
* `pkgmgr` is mandatory (CLI), `default` is optional.
|
|
||||||
- For all other repos:
|
|
||||||
* `default` is mandatory.
|
|
||||||
|
|
||||||
Special handling:
|
|
||||||
- If PKGMGR_DISABLE_NIX_FLAKE_INSTALLER=1 is set, the installer is
|
|
||||||
globally disabled (useful for CI or debugging).
|
|
||||||
|
|
||||||
The higher-level InstallationPipeline and CLI-layer model decide when this
|
|
||||||
installer is allowed to run, based on where the current CLI comes from
|
|
||||||
(e.g. Nix, OS packages, Python, Makefile).
|
|
||||||
"""
|
|
||||||
|
|
||||||
import os
|
|
||||||
import shutil
|
|
||||||
from typing import TYPE_CHECKING, List, Tuple
|
|
||||||
|
|
||||||
from pkgmgr.actions.install.installers.base import BaseInstaller
|
|
||||||
from pkgmgr.core.command.run import run_command
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
|
||||||
from pkgmgr.actions.install.context import RepoContext
|
|
||||||
from pkgmgr.actions.install import InstallContext
|
|
||||||
|
|
||||||
|
|
||||||
class NixFlakeInstaller(BaseInstaller):
|
|
||||||
"""Install Nix flake profiles for repositories that define flake.nix."""
|
|
||||||
|
|
||||||
# Logical layer name, used by capability matchers.
|
|
||||||
layer = "nix"
|
|
||||||
|
|
||||||
FLAKE_FILE = "flake.nix"
|
|
||||||
PROFILE_NAME = "package-manager"
|
|
||||||
|
|
||||||
def supports(self, ctx: "RepoContext") -> bool:
|
|
||||||
"""
|
|
||||||
Only support repositories that:
|
|
||||||
- Are NOT explicitly disabled via PKGMGR_DISABLE_NIX_FLAKE_INSTALLER=1,
|
|
||||||
- Have a flake.nix,
|
|
||||||
- And have the `nix` command available.
|
|
||||||
"""
|
|
||||||
# Optional global kill-switch for CI or debugging.
|
|
||||||
if os.environ.get("PKGMGR_DISABLE_NIX_FLAKE_INSTALLER") == "1":
|
|
||||||
print(
|
|
||||||
"[INFO] PKGMGR_DISABLE_NIX_FLAKE_INSTALLER=1 – "
|
|
||||||
"NixFlakeInstaller is disabled."
|
|
||||||
)
|
|
||||||
return False
|
|
||||||
|
|
||||||
# Nix must be available.
|
|
||||||
if shutil.which("nix") is None:
|
|
||||||
return False
|
|
||||||
|
|
||||||
# flake.nix must exist in the repository.
|
|
||||||
flake_path = os.path.join(ctx.repo_dir, self.FLAKE_FILE)
|
|
||||||
return os.path.exists(flake_path)
|
|
||||||
|
|
||||||
def _ensure_old_profile_removed(self, ctx: "RepoContext") -> None:
|
|
||||||
"""
|
|
||||||
Best-effort removal of an existing profile entry.
|
|
||||||
|
|
||||||
This handles the "already provides the following file" conflict by
|
|
||||||
removing previous `package-manager` installations before we install
|
|
||||||
the new one.
|
|
||||||
|
|
||||||
Any error in `nix profile remove` is intentionally ignored, because
|
|
||||||
a missing profile entry is not a fatal condition.
|
|
||||||
"""
|
|
||||||
if shutil.which("nix") is None:
|
|
||||||
return
|
|
||||||
|
|
||||||
cmd = f"nix profile remove {self.PROFILE_NAME} || true"
|
|
||||||
try:
|
|
||||||
# NOTE: no allow_failure here → matches the existing unit tests
|
|
||||||
run_command(cmd, cwd=ctx.repo_dir, preview=ctx.preview)
|
|
||||||
except SystemExit:
|
|
||||||
# Unit tests explicitly assert this is swallowed
|
|
||||||
pass
|
|
||||||
|
|
||||||
def _profile_outputs(self, ctx: "RepoContext") -> List[Tuple[str, bool]]:
|
|
||||||
"""
|
|
||||||
Decide which flake outputs to install and whether failures are fatal.
|
|
||||||
|
|
||||||
Returns a list of (output_name, allow_failure) tuples.
|
|
||||||
|
|
||||||
Rules:
|
|
||||||
- For the package-manager repo (identifier 'pkgmgr' or 'package-manager'):
|
|
||||||
[("pkgmgr", False), ("default", True)]
|
|
||||||
- For all other repos:
|
|
||||||
[("default", False)]
|
|
||||||
"""
|
|
||||||
ident = ctx.identifier
|
|
||||||
|
|
||||||
if ident in {"pkgmgr", "package-manager"}:
|
|
||||||
# pkgmgr: main CLI output is "pkgmgr" (mandatory),
|
|
||||||
# "default" is nice-to-have (non-fatal).
|
|
||||||
return [("pkgmgr", False), ("default", True)]
|
|
||||||
|
|
||||||
# Generic repos: we expect a sensible "default" package/app.
|
|
||||||
# Failure to install it is considered fatal.
|
|
||||||
return [("default", False)]
|
|
||||||
|
|
||||||
def run(self, ctx: "InstallContext") -> None:
|
|
||||||
"""
|
|
||||||
Install Nix flake profile outputs.
|
|
||||||
|
|
||||||
For the package-manager repo, failure installing 'pkgmgr' is fatal,
|
|
||||||
failure installing 'default' is non-fatal.
|
|
||||||
For other repos, failure installing 'default' is fatal.
|
|
||||||
"""
|
|
||||||
# Reuse supports() to keep logic in one place.
|
|
||||||
if not self.supports(ctx): # type: ignore[arg-type]
|
|
||||||
return
|
|
||||||
|
|
||||||
outputs = self._profile_outputs(ctx) # list of (name, allow_failure)
|
|
||||||
|
|
||||||
print(
|
|
||||||
"Nix flake detected in "
|
|
||||||
f"{ctx.identifier}, attempting to install profile outputs: "
|
|
||||||
+ ", ".join(name for name, _ in outputs)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Handle the "already installed" case up-front for the shared profile.
|
|
||||||
self._ensure_old_profile_removed(ctx) # type: ignore[arg-type]
|
|
||||||
|
|
||||||
for output, allow_failure in outputs:
|
|
||||||
cmd = f"nix profile install {ctx.repo_dir}#{output}"
|
|
||||||
print(f"[INFO] Running: {cmd}")
|
|
||||||
ret = os.system(cmd)
|
|
||||||
|
|
||||||
# Extract real exit code from os.system() result
|
|
||||||
if os.WIFEXITED(ret):
|
|
||||||
exit_code = os.WEXITSTATUS(ret)
|
|
||||||
else:
|
|
||||||
# abnormal termination (signal etc.) – keep raw value
|
|
||||||
exit_code = ret
|
|
||||||
|
|
||||||
if exit_code == 0:
|
|
||||||
print(f"Nix flake output '{output}' successfully installed.")
|
|
||||||
continue
|
|
||||||
|
|
||||||
print(f"[Error] Failed to install Nix flake output '{output}'")
|
|
||||||
print(f"[Error] Command exited with code {exit_code}")
|
|
||||||
|
|
||||||
if not allow_failure:
|
|
||||||
raise SystemExit(exit_code)
|
|
||||||
|
|
||||||
print(
|
|
||||||
"[Warning] Continuing despite failure to install "
|
|
||||||
f"optional output '{output}'."
|
|
||||||
)
|
|
||||||
@@ -1,104 +1,40 @@
|
|||||||
#!/usr/bin/env python3
|
# src/pkgmgr/actions/install/installers/python.py
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
"""
|
|
||||||
PythonInstaller — install Python projects defined via pyproject.toml.
|
|
||||||
|
|
||||||
Installation rules:
|
|
||||||
|
|
||||||
1. pip command resolution:
|
|
||||||
a) If PKGMGR_PIP is set → use it exactly as provided.
|
|
||||||
b) Else if running inside a virtualenv → use `sys.executable -m pip`.
|
|
||||||
c) Else → create/use a per-repository virtualenv under ~/.venvs/<repo>/.
|
|
||||||
|
|
||||||
2. Installation target:
|
|
||||||
- Always install into the resolved pip environment.
|
|
||||||
- Never modify system Python, never rely on --user.
|
|
||||||
- Nix-immutable systems (PEP 668) are automatically avoided because we
|
|
||||||
never touch system Python.
|
|
||||||
|
|
||||||
3. The installer is skipped when:
|
|
||||||
- PKGMGR_DISABLE_PYTHON_INSTALLER=1 is set.
|
|
||||||
- The repository has no pyproject.toml.
|
|
||||||
|
|
||||||
All pip failures are treated as fatal.
|
|
||||||
"""
|
|
||||||
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
import subprocess
|
|
||||||
from typing import TYPE_CHECKING
|
|
||||||
|
|
||||||
from pkgmgr.actions.install.installers.base import BaseInstaller
|
from pkgmgr.actions.install.installers.base import BaseInstaller
|
||||||
|
from pkgmgr.actions.install.context import RepoContext
|
||||||
from pkgmgr.core.command.run import run_command
|
from pkgmgr.core.command.run import run_command
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
|
||||||
from pkgmgr.actions.install.context import RepoContext
|
|
||||||
from pkgmgr.actions.install import InstallContext
|
|
||||||
|
|
||||||
|
|
||||||
class PythonInstaller(BaseInstaller):
|
class PythonInstaller(BaseInstaller):
|
||||||
"""Install Python projects and dependencies via pip using isolated environments."""
|
|
||||||
|
|
||||||
layer = "python"
|
layer = "python"
|
||||||
|
|
||||||
# ----------------------------------------------------------------------
|
def supports(self, ctx: RepoContext) -> bool:
|
||||||
# Installer activation logic
|
|
||||||
# ----------------------------------------------------------------------
|
|
||||||
def supports(self, ctx: "RepoContext") -> bool:
|
|
||||||
"""
|
|
||||||
Return True if this installer should handle this repository.
|
|
||||||
|
|
||||||
The installer is active only when:
|
|
||||||
- A pyproject.toml exists in the repo, and
|
|
||||||
- PKGMGR_DISABLE_PYTHON_INSTALLER is not set.
|
|
||||||
"""
|
|
||||||
if os.environ.get("PKGMGR_DISABLE_PYTHON_INSTALLER") == "1":
|
if os.environ.get("PKGMGR_DISABLE_PYTHON_INSTALLER") == "1":
|
||||||
print("[INFO] PythonInstaller disabled via PKGMGR_DISABLE_PYTHON_INSTALLER.")
|
print("[INFO] PythonInstaller disabled via PKGMGR_DISABLE_PYTHON_INSTALLER.")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
return os.path.exists(os.path.join(ctx.repo_dir, "pyproject.toml"))
|
return os.path.exists(os.path.join(ctx.repo_dir, "pyproject.toml"))
|
||||||
|
|
||||||
# ----------------------------------------------------------------------
|
|
||||||
# Virtualenv handling
|
|
||||||
# ----------------------------------------------------------------------
|
|
||||||
def _in_virtualenv(self) -> bool:
|
def _in_virtualenv(self) -> bool:
|
||||||
"""Detect whether the current interpreter is inside a venv."""
|
|
||||||
if os.environ.get("VIRTUAL_ENV"):
|
if os.environ.get("VIRTUAL_ENV"):
|
||||||
return True
|
return True
|
||||||
|
|
||||||
base = getattr(sys, "base_prefix", sys.prefix)
|
base = getattr(sys, "base_prefix", sys.prefix)
|
||||||
return sys.prefix != base
|
return sys.prefix != base
|
||||||
|
|
||||||
def _ensure_repo_venv(self, ctx: "InstallContext") -> str:
|
def _ensure_repo_venv(self, ctx: RepoContext) -> str:
|
||||||
"""
|
|
||||||
Ensure that ~/.venvs/<identifier>/ exists and contains a minimal venv.
|
|
||||||
|
|
||||||
Returns the venv directory path.
|
|
||||||
"""
|
|
||||||
venv_dir = os.path.expanduser(f"~/.venvs/{ctx.identifier}")
|
venv_dir = os.path.expanduser(f"~/.venvs/{ctx.identifier}")
|
||||||
python = sys.executable
|
python = sys.executable
|
||||||
|
|
||||||
if not os.path.isdir(venv_dir):
|
if not os.path.exists(venv_dir):
|
||||||
print(f"[python-installer] Creating virtualenv: {venv_dir}")
|
run_command(f"{python} -m venv {venv_dir}", preview=ctx.preview)
|
||||||
subprocess.check_call([python, "-m", "venv", venv_dir])
|
|
||||||
|
|
||||||
return venv_dir
|
return venv_dir
|
||||||
|
|
||||||
# ----------------------------------------------------------------------
|
def _pip_cmd(self, ctx: RepoContext) -> str:
|
||||||
# pip command resolution
|
|
||||||
# ----------------------------------------------------------------------
|
|
||||||
def _pip_cmd(self, ctx: "InstallContext") -> str:
|
|
||||||
"""
|
|
||||||
Determine which pip command to use.
|
|
||||||
|
|
||||||
Priority:
|
|
||||||
1. PKGMGR_PIP override given by user or automation.
|
|
||||||
2. Active virtualenv → use sys.executable -m pip.
|
|
||||||
3. Per-repository venv → ~/.venvs/<repo>/bin/pip
|
|
||||||
"""
|
|
||||||
explicit = os.environ.get("PKGMGR_PIP", "").strip()
|
explicit = os.environ.get("PKGMGR_PIP", "").strip()
|
||||||
if explicit:
|
if explicit:
|
||||||
return explicit
|
return explicit
|
||||||
@@ -107,33 +43,19 @@ class PythonInstaller(BaseInstaller):
|
|||||||
return f"{sys.executable} -m pip"
|
return f"{sys.executable} -m pip"
|
||||||
|
|
||||||
venv_dir = self._ensure_repo_venv(ctx)
|
venv_dir = self._ensure_repo_venv(ctx)
|
||||||
pip_path = os.path.join(venv_dir, "bin", "pip")
|
return os.path.join(venv_dir, "bin", "pip")
|
||||||
return pip_path
|
|
||||||
|
|
||||||
# ----------------------------------------------------------------------
|
def run(self, ctx: RepoContext) -> None:
|
||||||
# Execution
|
if not self.supports(ctx):
|
||||||
# ----------------------------------------------------------------------
|
|
||||||
def run(self, ctx: "InstallContext") -> None:
|
|
||||||
"""
|
|
||||||
Install the project defined by pyproject.toml.
|
|
||||||
|
|
||||||
Uses the resolved pip environment. Installation is isolated and never
|
|
||||||
touches system Python.
|
|
||||||
"""
|
|
||||||
if not self.supports(ctx): # type: ignore[arg-type]
|
|
||||||
return
|
|
||||||
|
|
||||||
pyproject = os.path.join(ctx.repo_dir, "pyproject.toml")
|
|
||||||
if not os.path.exists(pyproject):
|
|
||||||
return
|
return
|
||||||
|
|
||||||
print(f"[python-installer] Installing Python project for {ctx.identifier}...")
|
print(f"[python-installer] Installing Python project for {ctx.identifier}...")
|
||||||
|
|
||||||
pip_cmd = self._pip_cmd(ctx)
|
pip_cmd = self._pip_cmd(ctx)
|
||||||
|
run_command(f"{pip_cmd} install .", cwd=ctx.repo_dir, preview=ctx.preview)
|
||||||
|
|
||||||
# Final install command: ALWAYS isolated, never system-wide.
|
if ctx.force_update:
|
||||||
install_cmd = f"{pip_cmd} install ."
|
# test-visible marker
|
||||||
|
print(f"[python-installer] repo '{ctx.identifier}' successfully upgraded.")
|
||||||
run_command(install_cmd, cwd=ctx.repo_dir, preview=ctx.preview)
|
|
||||||
|
|
||||||
print(f"[python-installer] Installation finished for {ctx.identifier}.")
|
print(f"[python-installer] Installation finished for {ctx.identifier}.")
|
||||||
|
|||||||
@@ -1,21 +1,9 @@
|
|||||||
|
# src/pkgmgr/actions/install/pipeline.py
|
||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Installation pipeline orchestration for repositories.
|
Installation pipeline orchestration for repositories.
|
||||||
|
|
||||||
This module implements the "Setup Controller" logic:
|
|
||||||
|
|
||||||
1. Detect current CLI command for the repo (if any).
|
|
||||||
2. Classify it into a layer (os-packages, nix, python, makefile).
|
|
||||||
3. Iterate over installers in layer order:
|
|
||||||
- Skip installers whose layer is weaker than an already-loaded one.
|
|
||||||
- Run only installers that support() the repo and add new capabilities.
|
|
||||||
- After each installer, re-resolve the command and update the layer.
|
|
||||||
4. Maintain the repo["command"] field and create/update symlinks via create_ink().
|
|
||||||
|
|
||||||
The goal is to prevent conflicting installations and make the layering
|
|
||||||
behaviour explicit and testable.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
@@ -36,34 +24,15 @@ from pkgmgr.core.command.resolve import resolve_command_for_repo
|
|||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class CommandState:
|
class CommandState:
|
||||||
"""
|
|
||||||
Represents the current CLI state for a repository:
|
|
||||||
|
|
||||||
- command: absolute or relative path to the CLI entry point
|
|
||||||
- layer: which conceptual layer this command belongs to
|
|
||||||
"""
|
|
||||||
|
|
||||||
command: Optional[str]
|
command: Optional[str]
|
||||||
layer: Optional[CliLayer]
|
layer: Optional[CliLayer]
|
||||||
|
|
||||||
|
|
||||||
class CommandResolver:
|
class CommandResolver:
|
||||||
"""
|
|
||||||
Small helper responsible for resolving the current command for a repo
|
|
||||||
and mapping it into a CommandState.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, ctx: RepoContext) -> None:
|
def __init__(self, ctx: RepoContext) -> None:
|
||||||
self._ctx = ctx
|
self._ctx = ctx
|
||||||
|
|
||||||
def resolve(self) -> CommandState:
|
def resolve(self) -> CommandState:
|
||||||
"""
|
|
||||||
Resolve the current command for this repository.
|
|
||||||
|
|
||||||
If resolve_command_for_repo raises SystemExit (e.g. Python package
|
|
||||||
without installed entry point), we treat this as "no command yet"
|
|
||||||
from the point of view of the installers.
|
|
||||||
"""
|
|
||||||
repo = self._ctx.repo
|
repo = self._ctx.repo
|
||||||
identifier = self._ctx.identifier
|
identifier = self._ctx.identifier
|
||||||
repo_dir = self._ctx.repo_dir
|
repo_dir = self._ctx.repo_dir
|
||||||
@@ -85,28 +54,10 @@ class CommandResolver:
|
|||||||
|
|
||||||
|
|
||||||
class InstallationPipeline:
|
class InstallationPipeline:
|
||||||
"""
|
|
||||||
High-level orchestrator that applies a sequence of installers
|
|
||||||
to a repository based on CLI layer precedence.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, installers: Sequence[BaseInstaller]) -> None:
|
def __init__(self, installers: Sequence[BaseInstaller]) -> None:
|
||||||
self._installers = list(installers)
|
self._installers = list(installers)
|
||||||
|
|
||||||
# ------------------------------------------------------------------
|
|
||||||
# Public API
|
|
||||||
# ------------------------------------------------------------------
|
|
||||||
def run(self, ctx: RepoContext) -> None:
|
def run(self, ctx: RepoContext) -> None:
|
||||||
"""
|
|
||||||
Execute the installation pipeline for a single repository.
|
|
||||||
|
|
||||||
- Detect initial command & layer.
|
|
||||||
- Optionally create a symlink.
|
|
||||||
- Run installers in order, skipping those whose layer is weaker
|
|
||||||
than an already-loaded CLI.
|
|
||||||
- After each installer, re-resolve the command and refresh the
|
|
||||||
symlink if needed.
|
|
||||||
"""
|
|
||||||
repo = ctx.repo
|
repo = ctx.repo
|
||||||
repo_dir = ctx.repo_dir
|
repo_dir = ctx.repo_dir
|
||||||
identifier = ctx.identifier
|
identifier = ctx.identifier
|
||||||
@@ -119,7 +70,6 @@ class InstallationPipeline:
|
|||||||
resolver = CommandResolver(ctx)
|
resolver = CommandResolver(ctx)
|
||||||
state = resolver.resolve()
|
state = resolver.resolve()
|
||||||
|
|
||||||
# Persist initial command (if any) and create a symlink.
|
|
||||||
if state.command:
|
if state.command:
|
||||||
repo["command"] = state.command
|
repo["command"] = state.command
|
||||||
create_ink(
|
create_ink(
|
||||||
@@ -135,11 +85,9 @@ class InstallationPipeline:
|
|||||||
|
|
||||||
provided_capabilities: Set[str] = set()
|
provided_capabilities: Set[str] = set()
|
||||||
|
|
||||||
# Main installer loop
|
|
||||||
for installer in self._installers:
|
for installer in self._installers:
|
||||||
layer_name = getattr(installer, "layer", None)
|
layer_name = getattr(installer, "layer", None)
|
||||||
|
|
||||||
# Installers without a layer participate without precedence logic.
|
|
||||||
if layer_name is None:
|
if layer_name is None:
|
||||||
self._run_installer(installer, ctx, identifier, repo_dir, quiet)
|
self._run_installer(installer, ctx, identifier, repo_dir, quiet)
|
||||||
continue
|
continue
|
||||||
@@ -147,17 +95,13 @@ class InstallationPipeline:
|
|||||||
try:
|
try:
|
||||||
installer_layer = CliLayer(layer_name)
|
installer_layer = CliLayer(layer_name)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
# Unknown layer string → treat as lowest priority.
|
|
||||||
installer_layer = None
|
installer_layer = None
|
||||||
|
|
||||||
# "Previous/Current layer already loaded?"
|
|
||||||
if state.layer is not None and installer_layer is not None:
|
if state.layer is not None and installer_layer is not None:
|
||||||
current_prio = layer_priority(state.layer)
|
current_prio = layer_priority(state.layer)
|
||||||
installer_prio = layer_priority(installer_layer)
|
installer_prio = layer_priority(installer_layer)
|
||||||
|
|
||||||
if current_prio < installer_prio:
|
if current_prio < installer_prio:
|
||||||
# Current CLI comes from a higher-priority layer,
|
|
||||||
# so we skip this installer entirely.
|
|
||||||
if not quiet:
|
if not quiet:
|
||||||
print(
|
print(
|
||||||
"[pkgmgr] Skipping installer "
|
"[pkgmgr] Skipping installer "
|
||||||
@@ -166,9 +110,7 @@ class InstallationPipeline:
|
|||||||
)
|
)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if current_prio == installer_prio:
|
if current_prio == installer_prio and not ctx.force_update:
|
||||||
# Same layer already provides a CLI; usually there is no
|
|
||||||
# need to run another installer on top of it.
|
|
||||||
if not quiet:
|
if not quiet:
|
||||||
print(
|
print(
|
||||||
"[pkgmgr] Skipping installer "
|
"[pkgmgr] Skipping installer "
|
||||||
@@ -177,12 +119,9 @@ class InstallationPipeline:
|
|||||||
)
|
)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# Check if this installer is applicable at all.
|
|
||||||
if not installer.supports(ctx):
|
if not installer.supports(ctx):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# Capabilities: if everything this installer would provide is already
|
|
||||||
# covered, we can safely skip it.
|
|
||||||
caps = installer.discover_capabilities(ctx)
|
caps = installer.discover_capabilities(ctx)
|
||||||
if caps and caps.issubset(provided_capabilities):
|
if caps and caps.issubset(provided_capabilities):
|
||||||
if not quiet:
|
if not quiet:
|
||||||
@@ -193,18 +132,22 @@ class InstallationPipeline:
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
if not quiet:
|
if not quiet:
|
||||||
|
if ctx.force_update and state.layer is not None and installer_layer == state.layer:
|
||||||
|
print(
|
||||||
|
f"[pkgmgr] Running installer {installer.__class__.__name__} "
|
||||||
|
f"for {identifier} in '{repo_dir}' (upgrade requested)..."
|
||||||
|
)
|
||||||
|
else:
|
||||||
print(
|
print(
|
||||||
f"[pkgmgr] Running installer {installer.__class__.__name__} "
|
f"[pkgmgr] Running installer {installer.__class__.__name__} "
|
||||||
f"for {identifier} in '{repo_dir}' "
|
f"for {identifier} in '{repo_dir}' "
|
||||||
f"(new capabilities: {caps or set()})..."
|
f"(new capabilities: {caps or set()})..."
|
||||||
)
|
)
|
||||||
|
|
||||||
# Run the installer with error reporting.
|
|
||||||
self._run_installer(installer, ctx, identifier, repo_dir, quiet)
|
self._run_installer(installer, ctx, identifier, repo_dir, quiet)
|
||||||
|
|
||||||
provided_capabilities.update(caps)
|
provided_capabilities.update(caps)
|
||||||
|
|
||||||
# After running an installer, re-resolve the command and layer.
|
|
||||||
new_state = resolver.resolve()
|
new_state = resolver.resolve()
|
||||||
if new_state.command:
|
if new_state.command:
|
||||||
repo["command"] = new_state.command
|
repo["command"] = new_state.command
|
||||||
@@ -221,9 +164,6 @@ class InstallationPipeline:
|
|||||||
|
|
||||||
state = new_state
|
state = new_state
|
||||||
|
|
||||||
# ------------------------------------------------------------------
|
|
||||||
# Internal helpers
|
|
||||||
# ------------------------------------------------------------------
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _run_installer(
|
def _run_installer(
|
||||||
installer: BaseInstaller,
|
installer: BaseInstaller,
|
||||||
@@ -232,9 +172,6 @@ class InstallationPipeline:
|
|||||||
repo_dir: str,
|
repo_dir: str,
|
||||||
quiet: bool,
|
quiet: bool,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
|
||||||
Execute a single installer with unified error handling.
|
|
||||||
"""
|
|
||||||
try:
|
try:
|
||||||
installer.run(ctx)
|
installer.run(ctx)
|
||||||
except SystemExit as exc:
|
except SystemExit as exc:
|
||||||
|
|||||||
@@ -1,14 +1,121 @@
|
|||||||
|
# src/pkgmgr/actions/mirror/setup_cmd.py
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
from typing import List, Tuple
|
from typing import List, Tuple
|
||||||
|
from urllib.parse import urlparse
|
||||||
|
|
||||||
from pkgmgr.core.git import run_git, GitError
|
from pkgmgr.core.git import GitError, run_git
|
||||||
|
from pkgmgr.core.remote_provisioning import ProviderHint, RepoSpec, ensure_remote_repo
|
||||||
|
from pkgmgr.core.remote_provisioning.ensure import EnsureOptions
|
||||||
|
|
||||||
from .context import build_context
|
from .context import build_context
|
||||||
from .git_remote import determine_primary_remote_url, ensure_origin_remote
|
from .git_remote import determine_primary_remote_url, ensure_origin_remote
|
||||||
from .types import Repository
|
from .types import Repository
|
||||||
|
|
||||||
|
|
||||||
|
def _probe_mirror(url: str, repo_dir: str) -> Tuple[bool, str]:
|
||||||
|
"""
|
||||||
|
Probe a remote mirror URL using `git ls-remote`.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
(True, "") on success,
|
||||||
|
(False, error_message) on failure.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
run_git(["ls-remote", url], cwd=repo_dir)
|
||||||
|
return True, ""
|
||||||
|
except GitError as exc:
|
||||||
|
return False, str(exc)
|
||||||
|
|
||||||
|
|
||||||
|
def _host_from_git_url(url: str) -> str:
|
||||||
|
url = (url or "").strip()
|
||||||
|
if not url:
|
||||||
|
return ""
|
||||||
|
|
||||||
|
if "://" in url:
|
||||||
|
parsed = urlparse(url)
|
||||||
|
netloc = (parsed.netloc or "").strip()
|
||||||
|
if "@" in netloc:
|
||||||
|
netloc = netloc.split("@", 1)[1]
|
||||||
|
# keep optional :port
|
||||||
|
return netloc
|
||||||
|
|
||||||
|
# scp-like: git@host:owner/repo.git
|
||||||
|
if "@" in url and ":" in url:
|
||||||
|
after_at = url.split("@", 1)[1]
|
||||||
|
host = after_at.split(":", 1)[0]
|
||||||
|
return host.strip()
|
||||||
|
|
||||||
|
return url.split("/", 1)[0].strip()
|
||||||
|
|
||||||
|
def _ensure_remote_repository(
|
||||||
|
repo: Repository,
|
||||||
|
repositories_base_dir: str,
|
||||||
|
all_repos: List[Repository],
|
||||||
|
preview: bool,
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Ensure that the remote repository exists using provider APIs.
|
||||||
|
|
||||||
|
This is ONLY called when ensure_remote=True.
|
||||||
|
"""
|
||||||
|
ctx = build_context(repo, repositories_base_dir, all_repos)
|
||||||
|
resolved_mirrors = ctx.resolved_mirrors
|
||||||
|
|
||||||
|
primary_url = determine_primary_remote_url(repo, resolved_mirrors)
|
||||||
|
if not primary_url:
|
||||||
|
print("[INFO] No remote URL could be derived; skipping remote provisioning.")
|
||||||
|
return
|
||||||
|
|
||||||
|
# IMPORTANT:
|
||||||
|
# - repo["provider"] is typically a provider *kind* (e.g. "github" / "gitea"),
|
||||||
|
# NOT a hostname. We derive the actual host from the remote URL.
|
||||||
|
host = _host_from_git_url(primary_url)
|
||||||
|
owner = repo.get("account")
|
||||||
|
name = repo.get("repository")
|
||||||
|
|
||||||
|
if not host or not owner or not name:
|
||||||
|
print("[WARN] Missing host/account/repository; cannot ensure remote repo.")
|
||||||
|
print(f" host={host!r}, account={owner!r}, repository={name!r}")
|
||||||
|
return
|
||||||
|
|
||||||
|
print("------------------------------------------------------------")
|
||||||
|
print(f"[REMOTE ENSURE] {ctx.identifier}")
|
||||||
|
print(f"[REMOTE ENSURE] host: {host}")
|
||||||
|
print("------------------------------------------------------------")
|
||||||
|
|
||||||
|
spec = RepoSpec(
|
||||||
|
host=str(host),
|
||||||
|
owner=str(owner),
|
||||||
|
name=str(name),
|
||||||
|
private=bool(repo.get("private", True)),
|
||||||
|
description=str(repo.get("description", "")),
|
||||||
|
)
|
||||||
|
|
||||||
|
provider_kind = str(repo.get("provider", "")).strip().lower() or None
|
||||||
|
|
||||||
|
try:
|
||||||
|
result = ensure_remote_repo(
|
||||||
|
spec,
|
||||||
|
provider_hint=ProviderHint(kind=provider_kind),
|
||||||
|
options=EnsureOptions(
|
||||||
|
preview=preview,
|
||||||
|
interactive=True,
|
||||||
|
allow_prompt=True,
|
||||||
|
save_prompt_token_to_keyring=True,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
print(f"[REMOTE ENSURE] {result.status.upper()}: {result.message}")
|
||||||
|
if result.url:
|
||||||
|
print(f"[REMOTE ENSURE] URL: {result.url}")
|
||||||
|
except Exception as exc: # noqa: BLE001
|
||||||
|
# Keep action layer resilient
|
||||||
|
print(f"[ERROR] Remote provisioning failed: {exc}")
|
||||||
|
|
||||||
|
print()
|
||||||
|
|
||||||
|
|
||||||
def _setup_local_mirrors_for_repo(
|
def _setup_local_mirrors_for_repo(
|
||||||
repo: Repository,
|
repo: Repository,
|
||||||
repositories_base_dir: str,
|
repositories_base_dir: str,
|
||||||
@@ -16,7 +123,8 @@ def _setup_local_mirrors_for_repo(
|
|||||||
preview: bool,
|
preview: bool,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
Ensure local Git state is sane (currently: 'origin' remote).
|
Local setup:
|
||||||
|
- Ensure 'origin' remote exists and is sane
|
||||||
"""
|
"""
|
||||||
ctx = build_context(repo, repositories_base_dir, all_repos)
|
ctx = build_context(repo, repositories_base_dir, all_repos)
|
||||||
|
|
||||||
@@ -29,103 +137,68 @@ def _setup_local_mirrors_for_repo(
|
|||||||
print()
|
print()
|
||||||
|
|
||||||
|
|
||||||
def _probe_mirror(url: str, repo_dir: str) -> Tuple[bool, str]:
|
|
||||||
"""
|
|
||||||
Probe a remote mirror by running `git ls-remote <url>`.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
(True, "") on success,
|
|
||||||
(False, error_message) on failure.
|
|
||||||
|
|
||||||
Wichtig:
|
|
||||||
- Wir werten ausschließlich den Exit-Code aus.
|
|
||||||
- STDERR kann Hinweise/Warnings enthalten und ist NICHT automatisch ein Fehler.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
# Wir ignorieren stdout komplett; wichtig ist nur, dass der Befehl ohne
|
|
||||||
# GitError (also Exit-Code 0) durchläuft.
|
|
||||||
run_git(["ls-remote", url], cwd=repo_dir)
|
|
||||||
return True, ""
|
|
||||||
except GitError as exc:
|
|
||||||
return False, str(exc)
|
|
||||||
|
|
||||||
|
|
||||||
def _setup_remote_mirrors_for_repo(
|
def _setup_remote_mirrors_for_repo(
|
||||||
repo: Repository,
|
repo: Repository,
|
||||||
repositories_base_dir: str,
|
repositories_base_dir: str,
|
||||||
all_repos: List[Repository],
|
all_repos: List[Repository],
|
||||||
preview: bool,
|
preview: bool,
|
||||||
|
ensure_remote: bool,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
Remote-side setup / validation.
|
Remote-side setup / validation.
|
||||||
|
|
||||||
Aktuell werden nur **nicht-destruktive Checks** gemacht:
|
Default behavior:
|
||||||
|
- Non-destructive checks using `git ls-remote`.
|
||||||
|
|
||||||
- Für jeden Mirror (aus config + MIRRORS-Datei, file gewinnt):
|
Optional behavior:
|
||||||
* `git ls-remote <url>` wird ausgeführt.
|
- If ensure_remote=True:
|
||||||
* Bei Exit-Code 0 → [OK]
|
* Attempt to create missing repositories via provider API
|
||||||
* Bei Fehler → [WARN] + Details aus der GitError-Exception
|
* Uses TokenResolver (ENV -> keyring -> prompt)
|
||||||
|
|
||||||
Es werden **keine** Provider-APIs aufgerufen und keine Repos angelegt.
|
|
||||||
"""
|
"""
|
||||||
ctx = build_context(repo, repositories_base_dir, all_repos)
|
ctx = build_context(repo, repositories_base_dir, all_repos)
|
||||||
resolved_m = ctx.resolved_mirrors
|
resolved_mirrors = ctx.resolved_mirrors
|
||||||
|
|
||||||
print("------------------------------------------------------------")
|
print("------------------------------------------------------------")
|
||||||
print(f"[MIRROR SETUP:REMOTE] {ctx.identifier}")
|
print(f"[MIRROR SETUP:REMOTE] {ctx.identifier}")
|
||||||
print(f"[MIRROR SETUP:REMOTE] dir: {ctx.repo_dir}")
|
print(f"[MIRROR SETUP:REMOTE] dir: {ctx.repo_dir}")
|
||||||
print("------------------------------------------------------------")
|
print("------------------------------------------------------------")
|
||||||
|
|
||||||
if not resolved_m:
|
if ensure_remote:
|
||||||
# Optional: Fallback auf eine heuristisch bestimmte URL, falls wir
|
_ensure_remote_repository(
|
||||||
# irgendwann "automatisch anlegen" implementieren wollen.
|
repo,
|
||||||
primary_url = determine_primary_remote_url(repo, resolved_m)
|
repositories_base_dir=repositories_base_dir,
|
||||||
if not primary_url:
|
all_repos=all_repos,
|
||||||
print(
|
preview=preview,
|
||||||
"[INFO] No mirrors configured (config or MIRRORS file), and no "
|
|
||||||
"primary URL could be derived from provider/account/repository."
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if not resolved_mirrors:
|
||||||
|
primary_url = determine_primary_remote_url(repo, resolved_mirrors)
|
||||||
|
if not primary_url:
|
||||||
|
print("[INFO] No mirrors configured and no primary URL available.")
|
||||||
print()
|
print()
|
||||||
return
|
return
|
||||||
|
|
||||||
ok, error_message = _probe_mirror(primary_url, ctx.repo_dir)
|
ok, error_message = _probe_mirror(primary_url, ctx.repo_dir)
|
||||||
if ok:
|
if ok:
|
||||||
print(f"[OK] Remote mirror (primary) is reachable: {primary_url}")
|
print(f"[OK] primary: {primary_url}")
|
||||||
else:
|
else:
|
||||||
print("[WARN] Primary remote URL is NOT reachable:")
|
print(f"[WARN] primary: {primary_url}")
|
||||||
print(f" {primary_url}")
|
|
||||||
if error_message:
|
|
||||||
print(" Details:")
|
|
||||||
for line in error_message.splitlines():
|
for line in error_message.splitlines():
|
||||||
print(f" {line}")
|
print(f" {line}")
|
||||||
|
|
||||||
print()
|
|
||||||
print(
|
|
||||||
"[INFO] Remote checks are non-destructive and only use `git ls-remote` "
|
|
||||||
"to probe mirror URLs."
|
|
||||||
)
|
|
||||||
print()
|
print()
|
||||||
return
|
return
|
||||||
|
|
||||||
# Normaler Fall: wir haben benannte Mirrors aus config/MIRRORS
|
for name, url in sorted(resolved_mirrors.items()):
|
||||||
for name, url in sorted(resolved_m.items()):
|
|
||||||
ok, error_message = _probe_mirror(url, ctx.repo_dir)
|
ok, error_message = _probe_mirror(url, ctx.repo_dir)
|
||||||
if ok:
|
if ok:
|
||||||
print(f"[OK] Remote mirror '{name}' is reachable: {url}")
|
print(f"[OK] {name}: {url}")
|
||||||
else:
|
else:
|
||||||
print(f"[WARN] Remote mirror '{name}' is NOT reachable:")
|
print(f"[WARN] {name}: {url}")
|
||||||
print(f" {url}")
|
|
||||||
if error_message:
|
|
||||||
print(" Details:")
|
|
||||||
for line in error_message.splitlines():
|
for line in error_message.splitlines():
|
||||||
print(f" {line}")
|
print(f" {line}")
|
||||||
|
|
||||||
print()
|
print()
|
||||||
print(
|
|
||||||
"[INFO] Remote checks are non-destructive and only use `git ls-remote` "
|
|
||||||
"to probe mirror URLs."
|
|
||||||
)
|
|
||||||
print()
|
|
||||||
|
|
||||||
|
|
||||||
def setup_mirrors(
|
def setup_mirrors(
|
||||||
@@ -135,22 +208,25 @@ def setup_mirrors(
|
|||||||
preview: bool = False,
|
preview: bool = False,
|
||||||
local: bool = True,
|
local: bool = True,
|
||||||
remote: bool = True,
|
remote: bool = True,
|
||||||
|
ensure_remote: bool = False,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
Setup mirrors for the selected repositories.
|
Setup mirrors for the selected repositories.
|
||||||
|
|
||||||
local:
|
local:
|
||||||
- Configure local Git remotes (currently: ensure 'origin' is present and
|
- Configure local Git remotes (ensure 'origin' exists).
|
||||||
points to a reasonable URL).
|
|
||||||
|
|
||||||
remote:
|
remote:
|
||||||
- Non-destructive remote checks using `git ls-remote` for each mirror URL.
|
- Non-destructive remote checks using `git ls-remote`.
|
||||||
Es werden keine Repositories auf dem Provider angelegt.
|
|
||||||
|
ensure_remote:
|
||||||
|
- If True, attempt to create missing remote repositories via provider APIs.
|
||||||
|
- This is explicit and NEVER enabled implicitly.
|
||||||
"""
|
"""
|
||||||
for repo in selected_repos:
|
for repo in selected_repos:
|
||||||
if local:
|
if local:
|
||||||
_setup_local_mirrors_for_repo(
|
_setup_local_mirrors_for_repo(
|
||||||
repo,
|
repo=repo,
|
||||||
repositories_base_dir=repositories_base_dir,
|
repositories_base_dir=repositories_base_dir,
|
||||||
all_repos=all_repos,
|
all_repos=all_repos,
|
||||||
preview=preview,
|
preview=preview,
|
||||||
@@ -158,8 +234,9 @@ def setup_mirrors(
|
|||||||
|
|
||||||
if remote:
|
if remote:
|
||||||
_setup_remote_mirrors_for_repo(
|
_setup_remote_mirrors_for_repo(
|
||||||
repo,
|
repo=repo,
|
||||||
repositories_base_dir=repositories_base_dir,
|
repositories_base_dir=repositories_base_dir,
|
||||||
all_repos=all_repos,
|
all_repos=all_repos,
|
||||||
preview=preview,
|
preview=preview,
|
||||||
|
ensure_remote=ensure_remote,
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,9 +1,12 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import subprocess
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
from pkgmgr.core.repository.identifier import get_repo_identifier
|
|
||||||
from pkgmgr.core.repository.dir import get_repo_dir
|
from pkgmgr.core.repository.dir import get_repo_dir
|
||||||
|
from pkgmgr.core.repository.identifier import get_repo_identifier
|
||||||
from pkgmgr.core.repository.verify import verify_repository
|
from pkgmgr.core.repository.verify import verify_repository
|
||||||
|
|
||||||
|
|
||||||
@@ -17,13 +20,6 @@ def pull_with_verification(
|
|||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
Execute `git pull` for each repository with verification.
|
Execute `git pull` for each repository with verification.
|
||||||
|
|
||||||
- Uses verify_repository() in "pull" mode.
|
|
||||||
- If verification fails (and verification info is set) and
|
|
||||||
--no-verification is not enabled, the user is prompted to confirm
|
|
||||||
the pull.
|
|
||||||
- In preview mode, no interactive prompts are performed and no
|
|
||||||
Git commands are executed; only the would-be command is printed.
|
|
||||||
"""
|
"""
|
||||||
for repo in selected_repos:
|
for repo in selected_repos:
|
||||||
repo_identifier = get_repo_identifier(repo, all_repos)
|
repo_identifier = get_repo_identifier(repo, all_repos)
|
||||||
@@ -34,18 +30,13 @@ def pull_with_verification(
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
verified_info = repo.get("verified")
|
verified_info = repo.get("verified")
|
||||||
verified_ok, errors, commit_hash, signing_key = verify_repository(
|
verified_ok, errors, _commit_hash, _signing_key = verify_repository(
|
||||||
repo,
|
repo,
|
||||||
repo_dir,
|
repo_dir,
|
||||||
mode="pull",
|
mode="pull",
|
||||||
no_verification=no_verification,
|
no_verification=no_verification,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Only prompt the user if:
|
|
||||||
# - we are NOT in preview mode
|
|
||||||
# - verification is enabled
|
|
||||||
# - the repo has verification info configured
|
|
||||||
# - verification failed
|
|
||||||
if (
|
if (
|
||||||
not preview
|
not preview
|
||||||
and not no_verification
|
and not no_verification
|
||||||
@@ -59,16 +50,14 @@ def pull_with_verification(
|
|||||||
if choice != "y":
|
if choice != "y":
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# Build the git pull command (include extra args if present)
|
|
||||||
args_part = " ".join(extra_args) if extra_args else ""
|
args_part = " ".join(extra_args) if extra_args else ""
|
||||||
full_cmd = f"git pull{(' ' + args_part) if args_part else ''}"
|
full_cmd = f"git pull{(' ' + args_part) if args_part else ''}"
|
||||||
|
|
||||||
if preview:
|
if preview:
|
||||||
# Preview mode: only show the command, do not execute or prompt.
|
|
||||||
print(f"[Preview] In '{repo_dir}': {full_cmd}")
|
print(f"[Preview] In '{repo_dir}': {full_cmd}")
|
||||||
else:
|
else:
|
||||||
print(f"Running in '{repo_dir}': {full_cmd}")
|
print(f"Running in '{repo_dir}': {full_cmd}")
|
||||||
result = subprocess.run(full_cmd, cwd=repo_dir, shell=True)
|
result = subprocess.run(full_cmd, cwd=repo_dir, shell=True, check=False)
|
||||||
if result.returncode != 0:
|
if result.returncode != 0:
|
||||||
print(
|
print(
|
||||||
f"'git pull' for {repo_identifier} failed "
|
f"'git pull' for {repo_identifier} failed "
|
||||||
|
|||||||
@@ -1,67 +0,0 @@
|
|||||||
import shutil
|
|
||||||
|
|
||||||
from pkgmgr.actions.repository.pull import pull_with_verification
|
|
||||||
from pkgmgr.actions.install import install_repos
|
|
||||||
|
|
||||||
|
|
||||||
def update_repos(
|
|
||||||
selected_repos,
|
|
||||||
repositories_base_dir,
|
|
||||||
bin_dir,
|
|
||||||
all_repos,
|
|
||||||
no_verification,
|
|
||||||
system_update,
|
|
||||||
preview: bool,
|
|
||||||
quiet: bool,
|
|
||||||
update_dependencies: bool,
|
|
||||||
clone_mode: str,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Update repositories by pulling latest changes and installing them.
|
|
||||||
|
|
||||||
Parameters:
|
|
||||||
- selected_repos: List of selected repositories.
|
|
||||||
- repositories_base_dir: Base directory for repositories.
|
|
||||||
- bin_dir: Directory for symbolic links.
|
|
||||||
- all_repos: All repository configurations.
|
|
||||||
- no_verification: Whether to skip verification.
|
|
||||||
- system_update: Whether to run system update.
|
|
||||||
- preview: If True, only show commands without executing.
|
|
||||||
- quiet: If True, suppress messages.
|
|
||||||
- update_dependencies: Whether to update dependent repositories.
|
|
||||||
- clone_mode: Method to clone repositories (ssh or https).
|
|
||||||
"""
|
|
||||||
pull_with_verification(
|
|
||||||
selected_repos,
|
|
||||||
repositories_base_dir,
|
|
||||||
all_repos,
|
|
||||||
[],
|
|
||||||
no_verification,
|
|
||||||
preview,
|
|
||||||
)
|
|
||||||
|
|
||||||
install_repos(
|
|
||||||
selected_repos,
|
|
||||||
repositories_base_dir,
|
|
||||||
bin_dir,
|
|
||||||
all_repos,
|
|
||||||
no_verification,
|
|
||||||
preview,
|
|
||||||
quiet,
|
|
||||||
clone_mode,
|
|
||||||
update_dependencies,
|
|
||||||
)
|
|
||||||
|
|
||||||
if system_update:
|
|
||||||
from pkgmgr.core.command.run import run_command
|
|
||||||
|
|
||||||
# Nix: upgrade all profile entries (if Nix is available)
|
|
||||||
if shutil.which("nix") is not None:
|
|
||||||
try:
|
|
||||||
run_command("nix profile upgrade '.*'", preview=preview)
|
|
||||||
except SystemExit as e:
|
|
||||||
print(f"[Warning] 'nix profile upgrade' failed: {e}")
|
|
||||||
|
|
||||||
# Arch / AUR system update
|
|
||||||
run_command("sudo -u aur_builder yay -Syu --noconfirm", preview=preview)
|
|
||||||
run_command("sudo pacman -Syyu --noconfirm", preview=preview)
|
|
||||||
10
src/pkgmgr/actions/update/__init__.py
Normal file
10
src/pkgmgr/actions/update/__init__.py
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from pkgmgr.actions.update.manager import UpdateManager
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
"UpdateManager",
|
||||||
|
]
|
||||||
61
src/pkgmgr/actions/update/manager.py
Normal file
61
src/pkgmgr/actions/update/manager.py
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from typing import Any, Iterable
|
||||||
|
|
||||||
|
from pkgmgr.actions.update.system_updater import SystemUpdater
|
||||||
|
|
||||||
|
|
||||||
|
class UpdateManager:
|
||||||
|
"""
|
||||||
|
Orchestrates:
|
||||||
|
- repository pull + installation
|
||||||
|
- optional system update
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self) -> None:
|
||||||
|
self._system_updater = SystemUpdater()
|
||||||
|
|
||||||
|
def run(
|
||||||
|
self,
|
||||||
|
selected_repos: Iterable[Any],
|
||||||
|
repositories_base_dir: str,
|
||||||
|
bin_dir: str,
|
||||||
|
all_repos: Any,
|
||||||
|
no_verification: bool,
|
||||||
|
system_update: bool,
|
||||||
|
preview: bool,
|
||||||
|
quiet: bool,
|
||||||
|
update_dependencies: bool,
|
||||||
|
clone_mode: str,
|
||||||
|
force_update: bool = True,
|
||||||
|
) -> None:
|
||||||
|
from pkgmgr.actions.install import install_repos
|
||||||
|
from pkgmgr.actions.repository.pull import pull_with_verification
|
||||||
|
|
||||||
|
pull_with_verification(
|
||||||
|
selected_repos,
|
||||||
|
repositories_base_dir,
|
||||||
|
all_repos,
|
||||||
|
[],
|
||||||
|
no_verification,
|
||||||
|
preview,
|
||||||
|
)
|
||||||
|
|
||||||
|
install_repos(
|
||||||
|
selected_repos,
|
||||||
|
repositories_base_dir,
|
||||||
|
bin_dir,
|
||||||
|
all_repos,
|
||||||
|
no_verification,
|
||||||
|
preview,
|
||||||
|
quiet,
|
||||||
|
clone_mode,
|
||||||
|
update_dependencies,
|
||||||
|
force_update=force_update,
|
||||||
|
)
|
||||||
|
|
||||||
|
if system_update:
|
||||||
|
self._system_updater.run(preview=preview)
|
||||||
66
src/pkgmgr/actions/update/os_release.py
Normal file
66
src/pkgmgr/actions/update/os_release.py
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import os
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from typing import Dict
|
||||||
|
|
||||||
|
|
||||||
|
def read_os_release(path: str = "/etc/os-release") -> Dict[str, str]:
|
||||||
|
"""
|
||||||
|
Parse /etc/os-release into a dict. Returns empty dict if missing.
|
||||||
|
"""
|
||||||
|
if not os.path.exists(path):
|
||||||
|
return {}
|
||||||
|
|
||||||
|
result: Dict[str, str] = {}
|
||||||
|
with open(path, "r", encoding="utf-8") as f:
|
||||||
|
for line in f:
|
||||||
|
line = line.strip()
|
||||||
|
if not line or line.startswith("#") or "=" not in line:
|
||||||
|
continue
|
||||||
|
key, value = line.split("=", 1)
|
||||||
|
result[key.strip()] = value.strip().strip('"')
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(frozen=True)
|
||||||
|
class OSReleaseInfo:
|
||||||
|
"""
|
||||||
|
Minimal /etc/os-release representation for distro detection.
|
||||||
|
"""
|
||||||
|
id: str = ""
|
||||||
|
id_like: str = ""
|
||||||
|
pretty_name: str = ""
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def load() -> "OSReleaseInfo":
|
||||||
|
data = read_os_release()
|
||||||
|
return OSReleaseInfo(
|
||||||
|
id=(data.get("ID") or "").lower(),
|
||||||
|
id_like=(data.get("ID_LIKE") or "").lower(),
|
||||||
|
pretty_name=(data.get("PRETTY_NAME") or ""),
|
||||||
|
)
|
||||||
|
|
||||||
|
def ids(self) -> set[str]:
|
||||||
|
ids: set[str] = set()
|
||||||
|
if self.id:
|
||||||
|
ids.add(self.id)
|
||||||
|
if self.id_like:
|
||||||
|
for part in self.id_like.split():
|
||||||
|
ids.add(part.strip())
|
||||||
|
return ids
|
||||||
|
|
||||||
|
def is_arch_family(self) -> bool:
|
||||||
|
ids = self.ids()
|
||||||
|
return ("arch" in ids) or ("archlinux" in ids)
|
||||||
|
|
||||||
|
def is_debian_family(self) -> bool:
|
||||||
|
ids = self.ids()
|
||||||
|
return bool(ids.intersection({"debian", "ubuntu"}))
|
||||||
|
|
||||||
|
def is_fedora_family(self) -> bool:
|
||||||
|
ids = self.ids()
|
||||||
|
return bool(ids.intersection({"fedora", "rhel", "centos", "rocky", "almalinux"}))
|
||||||
96
src/pkgmgr/actions/update/system_updater.py
Normal file
96
src/pkgmgr/actions/update/system_updater.py
Normal file
@@ -0,0 +1,96 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import platform
|
||||||
|
import shutil
|
||||||
|
|
||||||
|
from pkgmgr.actions.update.os_release import OSReleaseInfo
|
||||||
|
|
||||||
|
|
||||||
|
class SystemUpdater:
|
||||||
|
"""
|
||||||
|
Executes distro-specific system update commands, plus Nix profile upgrades if available.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def run(self, *, preview: bool) -> None:
|
||||||
|
from pkgmgr.core.command.run import run_command
|
||||||
|
|
||||||
|
# Distro-agnostic: Nix profile upgrades (if Nix is present).
|
||||||
|
if shutil.which("nix") is not None:
|
||||||
|
try:
|
||||||
|
run_command("nix profile upgrade '.*'", preview=preview)
|
||||||
|
except SystemExit as e:
|
||||||
|
print(f"[Warning] 'nix profile upgrade' failed: {e}")
|
||||||
|
|
||||||
|
osr = OSReleaseInfo.load()
|
||||||
|
|
||||||
|
if osr.is_arch_family():
|
||||||
|
self._update_arch(preview=preview)
|
||||||
|
return
|
||||||
|
|
||||||
|
if osr.is_debian_family():
|
||||||
|
self._update_debian(preview=preview)
|
||||||
|
return
|
||||||
|
|
||||||
|
if osr.is_fedora_family():
|
||||||
|
self._update_fedora(preview=preview)
|
||||||
|
return
|
||||||
|
|
||||||
|
distro = osr.pretty_name or platform.platform()
|
||||||
|
print(f"[Warning] Unsupported distribution for system update: {distro}")
|
||||||
|
|
||||||
|
def _update_arch(self, *, preview: bool) -> None:
|
||||||
|
from pkgmgr.core.command.run import run_command
|
||||||
|
|
||||||
|
yay = shutil.which("yay")
|
||||||
|
pacman = shutil.which("pacman")
|
||||||
|
sudo = shutil.which("sudo")
|
||||||
|
|
||||||
|
# Prefer yay if available (repo + AUR in one pass).
|
||||||
|
# Avoid running yay and pacman afterwards to prevent double update passes.
|
||||||
|
if yay and sudo:
|
||||||
|
run_command("sudo -u aur_builder yay -Syu --noconfirm", preview=preview)
|
||||||
|
return
|
||||||
|
|
||||||
|
if pacman and sudo:
|
||||||
|
run_command("sudo pacman -Syu --noconfirm", preview=preview)
|
||||||
|
return
|
||||||
|
|
||||||
|
print("[Warning] Cannot update Arch system: missing required tools (sudo/yay/pacman).")
|
||||||
|
|
||||||
|
def _update_debian(self, *, preview: bool) -> None:
|
||||||
|
from pkgmgr.core.command.run import run_command
|
||||||
|
|
||||||
|
sudo = shutil.which("sudo")
|
||||||
|
apt_get = shutil.which("apt-get")
|
||||||
|
|
||||||
|
if not (sudo and apt_get):
|
||||||
|
print("[Warning] Cannot update Debian/Ubuntu system: missing required tools (sudo/apt-get).")
|
||||||
|
return
|
||||||
|
|
||||||
|
env = "DEBIAN_FRONTEND=noninteractive"
|
||||||
|
run_command(f"sudo {env} apt-get update -y", preview=preview)
|
||||||
|
run_command(f"sudo {env} apt-get -y dist-upgrade", preview=preview)
|
||||||
|
|
||||||
|
def _update_fedora(self, *, preview: bool) -> None:
|
||||||
|
from pkgmgr.core.command.run import run_command
|
||||||
|
|
||||||
|
sudo = shutil.which("sudo")
|
||||||
|
dnf = shutil.which("dnf")
|
||||||
|
microdnf = shutil.which("microdnf")
|
||||||
|
|
||||||
|
if not sudo:
|
||||||
|
print("[Warning] Cannot update Fedora/RHEL-like system: missing sudo.")
|
||||||
|
return
|
||||||
|
|
||||||
|
if dnf:
|
||||||
|
run_command("sudo dnf -y upgrade", preview=preview)
|
||||||
|
return
|
||||||
|
|
||||||
|
if microdnf:
|
||||||
|
run_command("sudo microdnf -y upgrade", preview=preview)
|
||||||
|
return
|
||||||
|
|
||||||
|
print("[Warning] Cannot update Fedora/RHEL-like system: missing dnf/microdnf.")
|
||||||
@@ -1,32 +1,30 @@
|
|||||||
|
# src/pkgmgr/cli/commands/mirror.py
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
from typing import Any, Dict, List
|
from typing import Any, Dict, List
|
||||||
|
|
||||||
from pkgmgr.actions.mirror import (
|
from pkgmgr.actions.mirror import diff_mirrors, list_mirrors, merge_mirrors, setup_mirrors
|
||||||
diff_mirrors,
|
|
||||||
list_mirrors,
|
|
||||||
merge_mirrors,
|
|
||||||
setup_mirrors,
|
|
||||||
)
|
|
||||||
from pkgmgr.cli.context import CLIContext
|
from pkgmgr.cli.context import CLIContext
|
||||||
|
|
||||||
Repository = Dict[str, Any]
|
Repository = Dict[str, Any]
|
||||||
|
|
||||||
|
|
||||||
def handle_mirror_command(
|
def handle_mirror_command(
|
||||||
args,
|
|
||||||
ctx: CLIContext,
|
ctx: CLIContext,
|
||||||
|
args: Any,
|
||||||
selected: List[Repository],
|
selected: List[Repository],
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
Entry point for 'pkgmgr mirror' subcommands.
|
Entry point for 'pkgmgr mirror' subcommands.
|
||||||
|
|
||||||
Subcommands:
|
Subcommands:
|
||||||
- mirror list → list configured mirrors
|
- mirror list
|
||||||
- mirror diff → compare config vs MIRRORS file
|
- mirror diff
|
||||||
- mirror merge → merge mirrors between config and MIRRORS file
|
- mirror merge
|
||||||
- mirror setup → configure local Git + remote placeholders
|
- mirror setup
|
||||||
|
- mirror check
|
||||||
|
- mirror provision
|
||||||
"""
|
"""
|
||||||
if not selected:
|
if not selected:
|
||||||
print("[INFO] No repositories selected for 'mirror' command.")
|
print("[INFO] No repositories selected for 'mirror' command.")
|
||||||
@@ -34,9 +32,6 @@ def handle_mirror_command(
|
|||||||
|
|
||||||
subcommand = getattr(args, "subcommand", None)
|
subcommand = getattr(args, "subcommand", None)
|
||||||
|
|
||||||
# ------------------------------------------------------------
|
|
||||||
# mirror list
|
|
||||||
# ------------------------------------------------------------
|
|
||||||
if subcommand == "list":
|
if subcommand == "list":
|
||||||
source = getattr(args, "source", "all")
|
source = getattr(args, "source", "all")
|
||||||
list_mirrors(
|
list_mirrors(
|
||||||
@@ -47,9 +42,6 @@ def handle_mirror_command(
|
|||||||
)
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
# ------------------------------------------------------------
|
|
||||||
# mirror diff
|
|
||||||
# ------------------------------------------------------------
|
|
||||||
if subcommand == "diff":
|
if subcommand == "diff":
|
||||||
diff_mirrors(
|
diff_mirrors(
|
||||||
selected_repos=selected,
|
selected_repos=selected,
|
||||||
@@ -58,27 +50,17 @@ def handle_mirror_command(
|
|||||||
)
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
# ------------------------------------------------------------
|
|
||||||
# mirror merge
|
|
||||||
# ------------------------------------------------------------
|
|
||||||
if subcommand == "merge":
|
if subcommand == "merge":
|
||||||
source = getattr(args, "source", None)
|
source = getattr(args, "source", None)
|
||||||
target = getattr(args, "target", None)
|
target = getattr(args, "target", None)
|
||||||
preview = getattr(args, "preview", False)
|
preview = getattr(args, "preview", False)
|
||||||
|
|
||||||
if source == target:
|
if source == target:
|
||||||
print(
|
print("[ERROR] For 'mirror merge', source and target must differ (config vs file).")
|
||||||
"[ERROR] For 'mirror merge', source and target "
|
|
||||||
"must differ (one of: config, file)."
|
|
||||||
)
|
|
||||||
sys.exit(2)
|
sys.exit(2)
|
||||||
|
|
||||||
# Config file path can be passed explicitly via --config-path.
|
|
||||||
# If not given, fall back to the global context (if available).
|
|
||||||
explicit_config_path = getattr(args, "config_path", None)
|
explicit_config_path = getattr(args, "config_path", None)
|
||||||
user_config_path = explicit_config_path or getattr(
|
user_config_path = explicit_config_path or getattr(ctx, "user_config_path", None)
|
||||||
ctx, "user_config_path", None
|
|
||||||
)
|
|
||||||
|
|
||||||
merge_mirrors(
|
merge_mirrors(
|
||||||
selected_repos=selected,
|
selected_repos=selected,
|
||||||
@@ -91,26 +73,42 @@ def handle_mirror_command(
|
|||||||
)
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
# ------------------------------------------------------------
|
|
||||||
# mirror setup
|
|
||||||
# ------------------------------------------------------------
|
|
||||||
if subcommand == "setup":
|
if subcommand == "setup":
|
||||||
local = getattr(args, "local", False)
|
|
||||||
remote = getattr(args, "remote", False)
|
|
||||||
preview = getattr(args, "preview", False)
|
preview = getattr(args, "preview", False)
|
||||||
|
|
||||||
# If neither flag is set → default to both.
|
|
||||||
if not local and not remote:
|
|
||||||
local = True
|
|
||||||
remote = True
|
|
||||||
|
|
||||||
setup_mirrors(
|
setup_mirrors(
|
||||||
selected_repos=selected,
|
selected_repos=selected,
|
||||||
repositories_base_dir=ctx.repositories_base_dir,
|
repositories_base_dir=ctx.repositories_base_dir,
|
||||||
all_repos=ctx.all_repositories,
|
all_repos=ctx.all_repositories,
|
||||||
preview=preview,
|
preview=preview,
|
||||||
local=local,
|
local=True,
|
||||||
remote=remote,
|
remote=False,
|
||||||
|
ensure_remote=False,
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
if subcommand == "check":
|
||||||
|
preview = getattr(args, "preview", False)
|
||||||
|
setup_mirrors(
|
||||||
|
selected_repos=selected,
|
||||||
|
repositories_base_dir=ctx.repositories_base_dir,
|
||||||
|
all_repos=ctx.all_repositories,
|
||||||
|
preview=preview,
|
||||||
|
local=False,
|
||||||
|
remote=True,
|
||||||
|
ensure_remote=False,
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
if subcommand == "provision":
|
||||||
|
preview = getattr(args, "preview", False)
|
||||||
|
setup_mirrors(
|
||||||
|
selected_repos=selected,
|
||||||
|
repositories_base_dir=ctx.repositories_base_dir,
|
||||||
|
all_repos=ctx.all_repositories,
|
||||||
|
preview=preview,
|
||||||
|
local=False,
|
||||||
|
remote=True,
|
||||||
|
ensure_remote=True,
|
||||||
)
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|||||||
@@ -10,11 +10,10 @@ from pkgmgr.cli.context import CLIContext
|
|||||||
from pkgmgr.actions.install import install_repos
|
from pkgmgr.actions.install import install_repos
|
||||||
from pkgmgr.actions.repository.deinstall import deinstall_repos
|
from pkgmgr.actions.repository.deinstall import deinstall_repos
|
||||||
from pkgmgr.actions.repository.delete import delete_repos
|
from pkgmgr.actions.repository.delete import delete_repos
|
||||||
from pkgmgr.actions.repository.update import update_repos
|
|
||||||
from pkgmgr.actions.repository.status import status_repos
|
from pkgmgr.actions.repository.status import status_repos
|
||||||
from pkgmgr.actions.repository.list import list_repositories
|
from pkgmgr.actions.repository.list import list_repositories
|
||||||
from pkgmgr.core.command.run import run_command
|
|
||||||
from pkgmgr.actions.repository.create import create_repo
|
from pkgmgr.actions.repository.create import create_repo
|
||||||
|
from pkgmgr.core.command.run import run_command
|
||||||
from pkgmgr.core.repository.dir import get_repo_dir
|
from pkgmgr.core.repository.dir import get_repo_dir
|
||||||
|
|
||||||
Repository = Dict[str, Any]
|
Repository = Dict[str, Any]
|
||||||
@@ -51,7 +50,7 @@ def handle_repos_command(
|
|||||||
selected: List[Repository],
|
selected: List[Repository],
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
Handle core repository commands (install/update/deinstall/delete/.../list).
|
Handle core repository commands (install/update/deinstall/delete/status/list/path/shell/create).
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# ------------------------------------------------------------
|
# ------------------------------------------------------------
|
||||||
@@ -68,24 +67,7 @@ def handle_repos_command(
|
|||||||
args.quiet,
|
args.quiet,
|
||||||
args.clone_mode,
|
args.clone_mode,
|
||||||
args.dependencies,
|
args.dependencies,
|
||||||
)
|
force_update=getattr(args, "update", False),
|
||||||
return
|
|
||||||
|
|
||||||
# ------------------------------------------------------------
|
|
||||||
# update
|
|
||||||
# ------------------------------------------------------------
|
|
||||||
if args.command == "update":
|
|
||||||
update_repos(
|
|
||||||
selected,
|
|
||||||
ctx.repositories_base_dir,
|
|
||||||
ctx.binaries_dir,
|
|
||||||
ctx.all_repositories,
|
|
||||||
args.no_verification,
|
|
||||||
args.system,
|
|
||||||
args.preview,
|
|
||||||
args.quiet,
|
|
||||||
args.dependencies,
|
|
||||||
args.clone_mode,
|
|
||||||
)
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
@@ -146,9 +128,7 @@ def handle_repos_command(
|
|||||||
f"{repository.get('account', '?')}/"
|
f"{repository.get('account', '?')}/"
|
||||||
f"{repository.get('repository', '?')}"
|
f"{repository.get('repository', '?')}"
|
||||||
)
|
)
|
||||||
print(
|
print(f"[WARN] Could not resolve directory for {ident}: {exc}")
|
||||||
f"[WARN] Could not resolve directory for {ident}: {exc}"
|
|
||||||
)
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
print(repo_dir)
|
print(repo_dir)
|
||||||
|
|||||||
@@ -9,8 +9,13 @@ from pkgmgr.core.repository.dir import get_repo_dir
|
|||||||
from pkgmgr.core.repository.identifier import get_repo_identifier
|
from pkgmgr.core.repository.identifier import get_repo_identifier
|
||||||
from pkgmgr.core.git import get_tags
|
from pkgmgr.core.git import get_tags
|
||||||
from pkgmgr.core.version.semver import SemVer, find_latest_version
|
from pkgmgr.core.version.semver import SemVer, find_latest_version
|
||||||
|
from pkgmgr.core.version.installed import (
|
||||||
|
get_installed_python_version,
|
||||||
|
get_installed_nix_profile_version,
|
||||||
|
)
|
||||||
from pkgmgr.core.version.source import (
|
from pkgmgr.core.version.source import (
|
||||||
read_pyproject_version,
|
read_pyproject_version,
|
||||||
|
read_pyproject_project_name,
|
||||||
read_flake_version,
|
read_flake_version,
|
||||||
read_pkgbuild_version,
|
read_pkgbuild_version,
|
||||||
read_debian_changelog_version,
|
read_debian_changelog_version,
|
||||||
@@ -18,10 +23,54 @@ from pkgmgr.core.version.source import (
|
|||||||
read_ansible_galaxy_version,
|
read_ansible_galaxy_version,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
Repository = Dict[str, Any]
|
Repository = Dict[str, Any]
|
||||||
|
|
||||||
|
|
||||||
|
def _print_pkgmgr_self_version() -> None:
|
||||||
|
"""
|
||||||
|
Print version information for pkgmgr itself (installed env + nix profile),
|
||||||
|
used when no repository is selected (e.g. user is not inside a repo).
|
||||||
|
"""
|
||||||
|
print("pkgmgr version info")
|
||||||
|
print("====================")
|
||||||
|
print("\nRepository: <pkgmgr self>")
|
||||||
|
print("----------------------------------------")
|
||||||
|
|
||||||
|
# Common distribution/module naming variants.
|
||||||
|
python_candidates = [
|
||||||
|
"package-manager", # PyPI dist name in your project
|
||||||
|
"package_manager", # module-ish variant
|
||||||
|
"pkgmgr", # console/alias-ish
|
||||||
|
]
|
||||||
|
nix_candidates = [
|
||||||
|
"pkgmgr",
|
||||||
|
"package-manager",
|
||||||
|
]
|
||||||
|
|
||||||
|
installed_python = get_installed_python_version(*python_candidates)
|
||||||
|
installed_nix = get_installed_nix_profile_version(*nix_candidates)
|
||||||
|
|
||||||
|
if installed_python:
|
||||||
|
print(
|
||||||
|
f"Installed (Python env): {installed_python.version} "
|
||||||
|
f"(dist: {installed_python.name})"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
print("Installed (Python env): <not installed>")
|
||||||
|
|
||||||
|
if installed_nix:
|
||||||
|
print(
|
||||||
|
f"Installed (Nix profile): {installed_nix.version} "
|
||||||
|
f"(match: {installed_nix.name})"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
print("Installed (Nix profile): <not installed>")
|
||||||
|
|
||||||
|
# Helpful context for debugging "why do versions differ?"
|
||||||
|
print(f"Python executable: {sys.executable}")
|
||||||
|
print(f"Python prefix: {sys.prefix}")
|
||||||
|
|
||||||
|
|
||||||
def handle_version(
|
def handle_version(
|
||||||
args,
|
args,
|
||||||
ctx: CLIContext,
|
ctx: CLIContext,
|
||||||
@@ -30,20 +79,39 @@ def handle_version(
|
|||||||
"""
|
"""
|
||||||
Handle the 'version' command.
|
Handle the 'version' command.
|
||||||
|
|
||||||
Shows version information from various sources (git tags, pyproject,
|
Shows version information from:
|
||||||
flake.nix, PKGBUILD, debian, spec, Ansible Galaxy).
|
- Git tags
|
||||||
"""
|
- packaging metadata
|
||||||
|
- installed Python environment
|
||||||
|
- installed Nix profile
|
||||||
|
|
||||||
repo_list = selected
|
Special case:
|
||||||
if not repo_list:
|
- If no repositories are selected (e.g. not in a repo and no identifiers),
|
||||||
print("No repositories selected for version.")
|
print pkgmgr's own installed versions instead of exiting with an error.
|
||||||
sys.exit(1)
|
"""
|
||||||
|
if not selected:
|
||||||
|
_print_pkgmgr_self_version()
|
||||||
|
return
|
||||||
|
|
||||||
print("pkgmgr version info")
|
print("pkgmgr version info")
|
||||||
print("====================")
|
print("====================")
|
||||||
|
|
||||||
for repo in repo_list:
|
for repo in selected:
|
||||||
# Resolve repository directory
|
identifier = get_repo_identifier(repo, ctx.all_repositories)
|
||||||
|
|
||||||
|
python_candidates: list[str] = []
|
||||||
|
nix_candidates: list[str] = [identifier]
|
||||||
|
|
||||||
|
for key in ("pypi", "pip", "python_package", "distribution", "package"):
|
||||||
|
val = repo.get(key)
|
||||||
|
if isinstance(val, str) and val.strip():
|
||||||
|
python_candidates.append(val.strip())
|
||||||
|
|
||||||
|
python_candidates.append(identifier)
|
||||||
|
|
||||||
|
installed_python = get_installed_python_version(*python_candidates)
|
||||||
|
installed_nix = get_installed_nix_profile_version(*nix_candidates)
|
||||||
|
|
||||||
repo_dir = repo.get("directory")
|
repo_dir = repo.get("directory")
|
||||||
if not repo_dir:
|
if not repo_dir:
|
||||||
try:
|
try:
|
||||||
@@ -51,51 +119,79 @@ def handle_version(
|
|||||||
except Exception:
|
except Exception:
|
||||||
repo_dir = None
|
repo_dir = None
|
||||||
|
|
||||||
# If no local clone exists, skip gracefully with info message
|
|
||||||
if not repo_dir or not os.path.isdir(repo_dir):
|
if not repo_dir or not os.path.isdir(repo_dir):
|
||||||
identifier = get_repo_identifier(repo, ctx.all_repositories)
|
|
||||||
print(f"\nRepository: {identifier}")
|
print(f"\nRepository: {identifier}")
|
||||||
print("----------------------------------------")
|
print("----------------------------------------")
|
||||||
print(
|
print(
|
||||||
"[INFO] Skipped: repository directory does not exist "
|
"[INFO] Skipped: repository directory does not exist locally, "
|
||||||
"locally, version detection is not possible."
|
"version detection is not possible."
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if installed_python:
|
||||||
|
print(
|
||||||
|
f"Installed (Python env): {installed_python.version} "
|
||||||
|
f"(dist: {installed_python.name})"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
print("Installed (Python env): <not installed>")
|
||||||
|
|
||||||
|
if installed_nix:
|
||||||
|
print(
|
||||||
|
f"Installed (Nix profile): {installed_nix.version} "
|
||||||
|
f"(match: {installed_nix.name})"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
print("Installed (Nix profile): <not installed>")
|
||||||
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
print(f"\nRepository: {repo_dir}")
|
print(f"\nRepository: {repo_dir}")
|
||||||
print("----------------------------------------")
|
print("----------------------------------------")
|
||||||
|
|
||||||
# 1) Git tags (SemVer)
|
|
||||||
try:
|
try:
|
||||||
tags = get_tags(cwd=repo_dir)
|
tags = get_tags(cwd=repo_dir)
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
print(f"[ERROR] Could not read git tags: {exc}")
|
print(f"[ERROR] Could not read git tags: {exc}")
|
||||||
tags = []
|
tags = []
|
||||||
|
|
||||||
latest_tag_info: Optional[Tuple[str, SemVer]]
|
latest_tag_info: Optional[Tuple[str, SemVer]] = (
|
||||||
latest_tag_info = find_latest_version(tags) if tags else None
|
find_latest_version(tags) if tags else None
|
||||||
|
)
|
||||||
|
|
||||||
if latest_tag_info is None:
|
if latest_tag_info:
|
||||||
latest_tag_str = None
|
tag, ver = latest_tag_info
|
||||||
latest_ver = None
|
print(f"Git (latest SemVer tag): {tag} (parsed: {ver})")
|
||||||
else:
|
else:
|
||||||
latest_tag_str, latest_ver = latest_tag_info
|
print("Git (latest SemVer tag): <none found>")
|
||||||
|
|
||||||
# 2) Packaging / metadata sources
|
|
||||||
pyproject_version = read_pyproject_version(repo_dir)
|
pyproject_version = read_pyproject_version(repo_dir)
|
||||||
|
pyproject_name = read_pyproject_project_name(repo_dir)
|
||||||
flake_version = read_flake_version(repo_dir)
|
flake_version = read_flake_version(repo_dir)
|
||||||
pkgbuild_version = read_pkgbuild_version(repo_dir)
|
pkgbuild_version = read_pkgbuild_version(repo_dir)
|
||||||
debian_version = read_debian_changelog_version(repo_dir)
|
debian_version = read_debian_changelog_version(repo_dir)
|
||||||
spec_version = read_spec_version(repo_dir)
|
spec_version = read_spec_version(repo_dir)
|
||||||
ansible_version = read_ansible_galaxy_version(repo_dir)
|
ansible_version = read_ansible_galaxy_version(repo_dir)
|
||||||
|
|
||||||
# 3) Print version summary
|
if pyproject_name:
|
||||||
if latest_ver is not None:
|
installed_python = get_installed_python_version(
|
||||||
|
pyproject_name, *python_candidates
|
||||||
|
)
|
||||||
|
|
||||||
|
if installed_python:
|
||||||
print(
|
print(
|
||||||
f"Git (latest SemVer tag): {latest_tag_str} (parsed: {latest_ver})"
|
f"Installed (Python env): {installed_python.version} "
|
||||||
|
f"(dist: {installed_python.name})"
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
print("Git (latest SemVer tag): <none found>")
|
print("Installed (Python env): <not installed>")
|
||||||
|
|
||||||
|
if installed_nix:
|
||||||
|
print(
|
||||||
|
f"Installed (Nix profile): {installed_nix.version} "
|
||||||
|
f"(match: {installed_nix.name})"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
print("Installed (Nix profile): <not installed>")
|
||||||
|
|
||||||
print(f"pyproject.toml: {pyproject_version or '<not found>'}")
|
print(f"pyproject.toml: {pyproject_version or '<not found>'}")
|
||||||
print(f"flake.nix: {flake_version or '<not found>'}")
|
print(f"flake.nix: {flake_version or '<not found>'}")
|
||||||
@@ -104,15 +200,16 @@ def handle_version(
|
|||||||
print(f"package-manager.spec: {spec_version or '<not found>'}")
|
print(f"package-manager.spec: {spec_version or '<not found>'}")
|
||||||
print(f"Ansible Galaxy meta: {ansible_version or '<not found>'}")
|
print(f"Ansible Galaxy meta: {ansible_version or '<not found>'}")
|
||||||
|
|
||||||
# 4) Consistency hint (Git tag vs. pyproject)
|
if latest_tag_info and pyproject_version:
|
||||||
if latest_ver is not None and pyproject_version is not None:
|
|
||||||
try:
|
try:
|
||||||
file_ver = SemVer.parse(pyproject_version)
|
file_ver = SemVer.parse(pyproject_version)
|
||||||
if file_ver != latest_ver:
|
if file_ver != latest_tag_info[1]:
|
||||||
print(
|
print(
|
||||||
f"[WARN] Version mismatch: Git={latest_ver}, pyproject={file_ver}"
|
f"[WARN] Version mismatch: "
|
||||||
|
f"Git={latest_tag_info[1]}, pyproject={file_ver}"
|
||||||
)
|
)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
print(
|
print(
|
||||||
f"[WARN] pyproject version {pyproject_version!r} is not valid SemVer."
|
f"[WARN] pyproject version {pyproject_version!r} "
|
||||||
|
f"is not valid SemVer."
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -129,7 +129,6 @@ def dispatch_command(args, ctx: CLIContext) -> None:
|
|||||||
# ------------------------------------------------------------------ #
|
# ------------------------------------------------------------------ #
|
||||||
if args.command in (
|
if args.command in (
|
||||||
"install",
|
"install",
|
||||||
"update",
|
|
||||||
"deinstall",
|
"deinstall",
|
||||||
"delete",
|
"delete",
|
||||||
"status",
|
"status",
|
||||||
@@ -141,6 +140,27 @@ def dispatch_command(args, ctx: CLIContext) -> None:
|
|||||||
handle_repos_command(args, ctx, selected)
|
handle_repos_command(args, ctx, selected)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
# ------------------------------------------------------------
|
||||||
|
# update
|
||||||
|
# ------------------------------------------------------------
|
||||||
|
if args.command == "update":
|
||||||
|
from pkgmgr.actions.update import UpdateManager
|
||||||
|
UpdateManager().run(
|
||||||
|
selected_repos=selected,
|
||||||
|
repositories_base_dir=ctx.repositories_base_dir,
|
||||||
|
bin_dir=ctx.binaries_dir,
|
||||||
|
all_repos=ctx.all_repositories,
|
||||||
|
no_verification=args.no_verification,
|
||||||
|
system_update=args.system,
|
||||||
|
preview=args.preview,
|
||||||
|
quiet=args.quiet,
|
||||||
|
update_dependencies=args.dependencies,
|
||||||
|
clone_mode=args.clone_mode,
|
||||||
|
force_update=True,
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------ #
|
# ------------------------------------------------------------------ #
|
||||||
# Tools (explore / terminal / code)
|
# Tools (explore / terminal / code)
|
||||||
# ------------------------------------------------------------------ #
|
# ------------------------------------------------------------------ #
|
||||||
@@ -176,7 +196,7 @@ def dispatch_command(args, ctx: CLIContext) -> None:
|
|||||||
return
|
return
|
||||||
|
|
||||||
if args.command == "mirror":
|
if args.command == "mirror":
|
||||||
handle_mirror_command(args, ctx, selected)
|
handle_mirror_command(ctx, args, selected)
|
||||||
return
|
return
|
||||||
|
|
||||||
print(f"Unknown command: {args.command}")
|
print(f"Unknown command: {args.command}")
|
||||||
|
|||||||
@@ -1,96 +1,134 @@
|
|||||||
#!/usr/bin/env python3
|
# src/pkgmgr/cli/parser/common.py
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
|
from typing import Optional, Tuple
|
||||||
|
|
||||||
|
|
||||||
class SortedSubParsersAction(argparse._SubParsersAction):
|
class SortedSubParsersAction(argparse._SubParsersAction):
|
||||||
"""
|
"""
|
||||||
Subparsers action that keeps choices sorted alphabetically.
|
Subparsers action that keeps subcommands sorted alphabetically.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def add_parser(self, name, **kwargs):
|
def add_parser(self, name, **kwargs):
|
||||||
parser = super().add_parser(name, **kwargs)
|
parser = super().add_parser(name, **kwargs)
|
||||||
# Sort choices alphabetically by dest (subcommand name)
|
|
||||||
self._choices_actions.sort(key=lambda a: a.dest)
|
self._choices_actions.sort(key=lambda a: a.dest)
|
||||||
return parser
|
return parser
|
||||||
|
|
||||||
|
|
||||||
|
def _has_action(
|
||||||
|
parser: argparse.ArgumentParser,
|
||||||
|
*,
|
||||||
|
positional: Optional[str] = None,
|
||||||
|
options: Tuple[str, ...] = (),
|
||||||
|
) -> bool:
|
||||||
|
"""
|
||||||
|
Check whether the parser already has an action.
|
||||||
|
|
||||||
|
- positional: name of a positional argument (e.g. "identifiers")
|
||||||
|
- options: option strings (e.g. "--preview", "-q")
|
||||||
|
"""
|
||||||
|
for action in parser._actions:
|
||||||
|
if positional and action.dest == positional:
|
||||||
|
return True
|
||||||
|
if options and any(opt in action.option_strings for opt in options):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def _add_positional_if_missing(
|
||||||
|
parser: argparse.ArgumentParser,
|
||||||
|
name: str,
|
||||||
|
**kwargs,
|
||||||
|
) -> None:
|
||||||
|
"""Safely add a positional argument."""
|
||||||
|
if _has_action(parser, positional=name):
|
||||||
|
return
|
||||||
|
parser.add_argument(name, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
def _add_option_if_missing(
|
||||||
|
parser: argparse.ArgumentParser,
|
||||||
|
*option_strings: str,
|
||||||
|
**kwargs,
|
||||||
|
) -> None:
|
||||||
|
"""Safely add an optional argument."""
|
||||||
|
if _has_action(parser, options=tuple(option_strings)):
|
||||||
|
return
|
||||||
|
parser.add_argument(*option_strings, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
def add_identifier_arguments(subparser: argparse.ArgumentParser) -> None:
|
def add_identifier_arguments(subparser: argparse.ArgumentParser) -> None:
|
||||||
"""
|
"""
|
||||||
Common identifier / selection arguments for many subcommands.
|
Common identifier / selection arguments for many subcommands.
|
||||||
|
|
||||||
Selection modes (mutual intent, not hard-enforced):
|
|
||||||
- identifiers (positional): select by alias / provider/account/repo
|
|
||||||
- --all: select all repositories
|
|
||||||
- --category / --string / --tag: filter-based selection on top
|
|
||||||
of the full repository set
|
|
||||||
"""
|
"""
|
||||||
subparser.add_argument(
|
_add_positional_if_missing(
|
||||||
|
subparser,
|
||||||
"identifiers",
|
"identifiers",
|
||||||
nargs="*",
|
nargs="*",
|
||||||
help=(
|
help=(
|
||||||
"Identifier(s) for repositories. "
|
"Identifier(s) for repositories. "
|
||||||
"Default: Repository of current folder."
|
"Default: repository of the current working directory."
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
subparser.add_argument(
|
|
||||||
|
_add_option_if_missing(
|
||||||
|
subparser,
|
||||||
"--all",
|
"--all",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
default=False,
|
default=False,
|
||||||
help=(
|
help=(
|
||||||
"Apply the subcommand to all repositories in the config. "
|
"Apply the subcommand to all repositories in the config. "
|
||||||
"Some subcommands ask for confirmation. If you want to give this "
|
"Pipe 'yes' to auto-confirm. Example:\n"
|
||||||
"confirmation for all repositories, pipe 'yes'. E.g: "
|
" yes | pkgmgr <command> --all"
|
||||||
"yes | pkgmgr {subcommand} --all"
|
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
subparser.add_argument(
|
|
||||||
|
_add_option_if_missing(
|
||||||
|
subparser,
|
||||||
"--category",
|
"--category",
|
||||||
nargs="+",
|
nargs="+",
|
||||||
default=[],
|
default=[],
|
||||||
help=(
|
help="Filter repositories by category (supports /regex/).",
|
||||||
"Filter repositories by category patterns derived from config "
|
|
||||||
"filenames or repo metadata (use filename without .yml/.yaml, "
|
|
||||||
"or /regex/ to use a regular expression)."
|
|
||||||
),
|
|
||||||
)
|
)
|
||||||
subparser.add_argument(
|
|
||||||
|
_add_option_if_missing(
|
||||||
|
subparser,
|
||||||
"--string",
|
"--string",
|
||||||
default="",
|
default="",
|
||||||
help=(
|
help="Filter repositories by substring or /regex/.",
|
||||||
"Filter repositories whose identifier / name / path contains this "
|
|
||||||
"substring (case-insensitive). Use /regex/ for regular expressions."
|
|
||||||
),
|
|
||||||
)
|
)
|
||||||
subparser.add_argument(
|
|
||||||
|
_add_option_if_missing(
|
||||||
|
subparser,
|
||||||
"--tag",
|
"--tag",
|
||||||
action="append",
|
action="append",
|
||||||
default=[],
|
default=[],
|
||||||
help=(
|
help="Filter repositories by tag (supports /regex/).",
|
||||||
"Filter repositories by tag. Matches tags from the repository "
|
|
||||||
"collector and category tags. Use /regex/ for regular expressions."
|
|
||||||
),
|
|
||||||
)
|
)
|
||||||
subparser.add_argument(
|
|
||||||
|
_add_option_if_missing(
|
||||||
|
subparser,
|
||||||
"--preview",
|
"--preview",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
help="Preview changes without executing commands",
|
help="Preview changes without executing commands.",
|
||||||
)
|
)
|
||||||
subparser.add_argument(
|
|
||||||
|
_add_option_if_missing(
|
||||||
|
subparser,
|
||||||
"--list",
|
"--list",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
help="List affected repositories (with preview or status)",
|
help="List affected repositories.",
|
||||||
)
|
)
|
||||||
subparser.add_argument(
|
|
||||||
|
_add_option_if_missing(
|
||||||
|
subparser,
|
||||||
"-a",
|
"-a",
|
||||||
"--args",
|
"--args",
|
||||||
nargs=argparse.REMAINDER,
|
|
||||||
dest="extra_args",
|
dest="extra_args",
|
||||||
help="Additional parameters to be attached.",
|
nargs=argparse.REMAINDER,
|
||||||
default=[],
|
default=[],
|
||||||
|
help="Additional parameters to be attached.",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -99,29 +137,34 @@ def add_install_update_arguments(subparser: argparse.ArgumentParser) -> None:
|
|||||||
Common arguments for install/update commands.
|
Common arguments for install/update commands.
|
||||||
"""
|
"""
|
||||||
add_identifier_arguments(subparser)
|
add_identifier_arguments(subparser)
|
||||||
subparser.add_argument(
|
|
||||||
|
_add_option_if_missing(
|
||||||
|
subparser,
|
||||||
"-q",
|
"-q",
|
||||||
"--quiet",
|
"--quiet",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
help="Suppress warnings and info messages",
|
help="Suppress warnings and info messages.",
|
||||||
)
|
)
|
||||||
subparser.add_argument(
|
|
||||||
|
_add_option_if_missing(
|
||||||
|
subparser,
|
||||||
"--no-verification",
|
"--no-verification",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
default=False,
|
default=False,
|
||||||
help="Disable verification via commit/gpg",
|
help="Disable verification via commit / GPG.",
|
||||||
)
|
)
|
||||||
subparser.add_argument(
|
|
||||||
|
_add_option_if_missing(
|
||||||
|
subparser,
|
||||||
"--dependencies",
|
"--dependencies",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
help="Also pull and update dependencies",
|
help="Also pull and update dependencies.",
|
||||||
)
|
)
|
||||||
subparser.add_argument(
|
|
||||||
|
_add_option_if_missing(
|
||||||
|
subparser,
|
||||||
"--clone-mode",
|
"--clone-mode",
|
||||||
choices=["ssh", "https", "shallow"],
|
choices=["ssh", "https", "shallow"],
|
||||||
default="ssh",
|
default="ssh",
|
||||||
help=(
|
help="Specify clone mode (default: ssh).",
|
||||||
"Specify the clone mode: ssh, https, or shallow "
|
|
||||||
"(HTTPS shallow clone; default: ssh)"
|
|
||||||
),
|
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,11 +1,12 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
|
|
||||||
from .common import add_install_update_arguments, add_identifier_arguments
|
from pkgmgr.cli.parser.common import (
|
||||||
|
add_install_update_arguments,
|
||||||
|
add_identifier_arguments,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def add_install_update_subparsers(
|
def add_install_update_subparsers(
|
||||||
@@ -14,11 +15,17 @@ def add_install_update_subparsers(
|
|||||||
"""
|
"""
|
||||||
Register install / update / deinstall / delete commands.
|
Register install / update / deinstall / delete commands.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
install_parser = subparsers.add_parser(
|
install_parser = subparsers.add_parser(
|
||||||
"install",
|
"install",
|
||||||
help="Setup repository/repositories alias links to executables",
|
help="Setup repository/repositories alias links to executables",
|
||||||
)
|
)
|
||||||
add_install_update_arguments(install_parser)
|
add_install_update_arguments(install_parser)
|
||||||
|
install_parser.add_argument(
|
||||||
|
"--update",
|
||||||
|
action="store_true",
|
||||||
|
help="Force re-run installers (upgrade/refresh) even if the CLI layer is already loaded",
|
||||||
|
)
|
||||||
|
|
||||||
update_parser = subparsers.add_parser(
|
update_parser = subparsers.add_parser(
|
||||||
"update",
|
"update",
|
||||||
@@ -27,9 +34,11 @@ def add_install_update_subparsers(
|
|||||||
add_install_update_arguments(update_parser)
|
add_install_update_arguments(update_parser)
|
||||||
update_parser.add_argument(
|
update_parser.add_argument(
|
||||||
"--system",
|
"--system",
|
||||||
|
dest="system",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
help="Include system update commands",
|
help="Include system update commands",
|
||||||
)
|
)
|
||||||
|
# No --update here: update implies force_update=True
|
||||||
|
|
||||||
deinstall_parser = subparsers.add_parser(
|
deinstall_parser = subparsers.add_parser(
|
||||||
"deinstall",
|
"deinstall",
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
# src/pkgmgr/cli/parser/mirror_cmd.py
|
||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
@@ -8,103 +9,55 @@ import argparse
|
|||||||
from .common import add_identifier_arguments
|
from .common import add_identifier_arguments
|
||||||
|
|
||||||
|
|
||||||
def add_mirror_subparsers(
|
def add_mirror_subparsers(subparsers: argparse._SubParsersAction) -> None:
|
||||||
subparsers: argparse._SubParsersAction,
|
|
||||||
) -> None:
|
|
||||||
"""
|
|
||||||
Register mirror command and its subcommands (list, diff, merge, setup).
|
|
||||||
"""
|
|
||||||
mirror_parser = subparsers.add_parser(
|
mirror_parser = subparsers.add_parser(
|
||||||
"mirror",
|
"mirror",
|
||||||
help="Mirror-related utilities (list, diff, merge, setup)",
|
help="Mirror-related utilities (list, diff, merge, setup, check, provision)",
|
||||||
)
|
)
|
||||||
mirror_subparsers = mirror_parser.add_subparsers(
|
mirror_subparsers = mirror_parser.add_subparsers(
|
||||||
dest="subcommand",
|
dest="subcommand",
|
||||||
help="Mirror subcommands",
|
metavar="SUBCOMMAND",
|
||||||
required=True,
|
required=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
# ------------------------------------------------------------------
|
mirror_list = mirror_subparsers.add_parser("list", help="List configured mirrors for repositories")
|
||||||
# mirror list
|
|
||||||
# ------------------------------------------------------------------
|
|
||||||
mirror_list = mirror_subparsers.add_parser(
|
|
||||||
"list",
|
|
||||||
help="List configured mirrors for repositories",
|
|
||||||
)
|
|
||||||
add_identifier_arguments(mirror_list)
|
add_identifier_arguments(mirror_list)
|
||||||
mirror_list.add_argument(
|
mirror_list.add_argument(
|
||||||
"--source",
|
"--source",
|
||||||
choices=["all", "config", "file", "resolved"],
|
choices=["config", "file", "all"],
|
||||||
default="all",
|
default="all",
|
||||||
help="Which mirror source to show.",
|
help="Which mirror source to show.",
|
||||||
)
|
)
|
||||||
|
|
||||||
# ------------------------------------------------------------------
|
mirror_diff = mirror_subparsers.add_parser("diff", help="Show differences between config mirrors and MIRRORS file")
|
||||||
# mirror diff
|
|
||||||
# ------------------------------------------------------------------
|
|
||||||
mirror_diff = mirror_subparsers.add_parser(
|
|
||||||
"diff",
|
|
||||||
help="Show differences between config mirrors and MIRRORS file",
|
|
||||||
)
|
|
||||||
add_identifier_arguments(mirror_diff)
|
add_identifier_arguments(mirror_diff)
|
||||||
|
|
||||||
# ------------------------------------------------------------------
|
|
||||||
# mirror merge {config,file} {config,file}
|
|
||||||
# ------------------------------------------------------------------
|
|
||||||
mirror_merge = mirror_subparsers.add_parser(
|
mirror_merge = mirror_subparsers.add_parser(
|
||||||
"merge",
|
"merge",
|
||||||
help=(
|
help="Merge mirrors between config and MIRRORS file (example: pkgmgr mirror merge config file --all)",
|
||||||
"Merge mirrors between config and MIRRORS file "
|
|
||||||
"(example: pkgmgr mirror merge config file --all)"
|
|
||||||
),
|
|
||||||
)
|
)
|
||||||
|
mirror_merge.add_argument("source", choices=["config", "file"], help="Source of mirrors.")
|
||||||
# First define merge direction positionals, then selection args.
|
mirror_merge.add_argument("target", choices=["config", "file"], help="Target of mirrors.")
|
||||||
mirror_merge.add_argument(
|
|
||||||
"source",
|
|
||||||
choices=["config", "file"],
|
|
||||||
help="Source of mirrors.",
|
|
||||||
)
|
|
||||||
mirror_merge.add_argument(
|
|
||||||
"target",
|
|
||||||
choices=["config", "file"],
|
|
||||||
help="Target of mirrors.",
|
|
||||||
)
|
|
||||||
|
|
||||||
# Selection / filter / preview arguments
|
|
||||||
add_identifier_arguments(mirror_merge)
|
add_identifier_arguments(mirror_merge)
|
||||||
|
|
||||||
mirror_merge.add_argument(
|
mirror_merge.add_argument(
|
||||||
"--config-path",
|
"--config-path",
|
||||||
help=(
|
help="Path to the user config file to update. If omitted, the global config path is used.",
|
||||||
"Path to the user config file to update. "
|
|
||||||
"If omitted, the global config path is used."
|
|
||||||
),
|
|
||||||
)
|
)
|
||||||
# Note: --preview, --all, --category, --tag, --list, etc. are provided
|
|
||||||
# by add_identifier_arguments().
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------
|
|
||||||
# mirror setup
|
|
||||||
# ------------------------------------------------------------------
|
|
||||||
mirror_setup = mirror_subparsers.add_parser(
|
mirror_setup = mirror_subparsers.add_parser(
|
||||||
"setup",
|
"setup",
|
||||||
help=(
|
help="Configure local Git remotes and push URLs (origin, pushurl list).",
|
||||||
"Setup mirror configuration for repositories.\n"
|
|
||||||
" --local → configure local Git (remotes, pushurls)\n"
|
|
||||||
" --remote → create remote repositories if missing\n"
|
|
||||||
"Default: both local and remote."
|
|
||||||
),
|
|
||||||
)
|
)
|
||||||
add_identifier_arguments(mirror_setup)
|
add_identifier_arguments(mirror_setup)
|
||||||
mirror_setup.add_argument(
|
|
||||||
"--local",
|
mirror_check = mirror_subparsers.add_parser(
|
||||||
action="store_true",
|
"check",
|
||||||
help="Only configure the local Git repository.",
|
help="Check remote mirror reachability (git ls-remote). Read-only.",
|
||||||
)
|
)
|
||||||
mirror_setup.add_argument(
|
add_identifier_arguments(mirror_check)
|
||||||
"--remote",
|
|
||||||
action="store_true",
|
mirror_provision = mirror_subparsers.add_parser(
|
||||||
help="Only operate on remote repositories.",
|
"provision",
|
||||||
|
help="Provision remote repositories via provider APIs (create missing repos).",
|
||||||
)
|
)
|
||||||
# Note: --preview also comes from add_identifier_arguments().
|
add_identifier_arguments(mirror_provision)
|
||||||
|
|||||||
30
src/pkgmgr/core/command/layer.py
Normal file
30
src/pkgmgr/core/command/layer.py
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
# src/pkgmgr/core/command/layer.py
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from enum import Enum
|
||||||
|
|
||||||
|
|
||||||
|
class CliLayer(str, Enum):
|
||||||
|
"""
|
||||||
|
CLI layer precedence (lower number = stronger layer).
|
||||||
|
"""
|
||||||
|
OS_PACKAGES = "os-packages"
|
||||||
|
NIX = "nix"
|
||||||
|
PYTHON = "python"
|
||||||
|
MAKEFILE = "makefile"
|
||||||
|
|
||||||
|
|
||||||
|
_LAYER_PRIORITY: dict[CliLayer, int] = {
|
||||||
|
CliLayer.OS_PACKAGES: 0,
|
||||||
|
CliLayer.NIX: 1,
|
||||||
|
CliLayer.PYTHON: 2,
|
||||||
|
CliLayer.MAKEFILE: 3,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def layer_priority(layer: CliLayer) -> int:
|
||||||
|
"""
|
||||||
|
Return precedence priority for the given layer.
|
||||||
|
Lower value means higher priority (stronger layer).
|
||||||
|
"""
|
||||||
|
return _LAYER_PRIORITY.get(layer, 999)
|
||||||
21
src/pkgmgr/core/credentials/__init__.py
Normal file
21
src/pkgmgr/core/credentials/__init__.py
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
# src/pkgmgr/core/credentials/__init__.py
|
||||||
|
"""Credential resolution for provider APIs."""
|
||||||
|
|
||||||
|
from .resolver import ResolutionOptions, TokenResolver
|
||||||
|
from .types import (
|
||||||
|
CredentialError,
|
||||||
|
KeyringUnavailableError,
|
||||||
|
NoCredentialsError,
|
||||||
|
TokenRequest,
|
||||||
|
TokenResult,
|
||||||
|
)
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
"TokenResolver",
|
||||||
|
"ResolutionOptions",
|
||||||
|
"CredentialError",
|
||||||
|
"NoCredentialsError",
|
||||||
|
"KeyringUnavailableError",
|
||||||
|
"TokenRequest",
|
||||||
|
"TokenResult",
|
||||||
|
]
|
||||||
11
src/pkgmgr/core/credentials/providers/__init__.py
Normal file
11
src/pkgmgr/core/credentials/providers/__init__.py
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
"""Credential providers used by TokenResolver."""
|
||||||
|
|
||||||
|
from .env import EnvTokenProvider
|
||||||
|
from .keyring import KeyringTokenProvider
|
||||||
|
from .prompt import PromptTokenProvider
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
"EnvTokenProvider",
|
||||||
|
"KeyringTokenProvider",
|
||||||
|
"PromptTokenProvider",
|
||||||
|
]
|
||||||
23
src/pkgmgr/core/credentials/providers/env.py
Normal file
23
src/pkgmgr/core/credentials/providers/env.py
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
# src/pkgmgr/core/credentials/providers/env.py
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import os
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
from ..store_keys import env_var_candidates
|
||||||
|
from ..types import TokenRequest, TokenResult
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(frozen=True)
|
||||||
|
class EnvTokenProvider:
|
||||||
|
"""Resolve tokens from environment variables."""
|
||||||
|
|
||||||
|
source_name: str = "env"
|
||||||
|
|
||||||
|
def get(self, request: TokenRequest) -> Optional[TokenResult]:
|
||||||
|
for key in env_var_candidates(request.provider_kind, request.host, request.owner):
|
||||||
|
val = os.environ.get(key)
|
||||||
|
if val:
|
||||||
|
return TokenResult(token=val.strip(), source=self.source_name)
|
||||||
|
return None
|
||||||
39
src/pkgmgr/core/credentials/providers/keyring.py
Normal file
39
src/pkgmgr/core/credentials/providers/keyring.py
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
# src/pkgmgr/core/credentials/providers/keyring.py
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
from ..store_keys import build_keyring_key
|
||||||
|
from ..types import KeyringUnavailableError, TokenRequest, TokenResult
|
||||||
|
|
||||||
|
|
||||||
|
def _import_keyring():
|
||||||
|
try:
|
||||||
|
import keyring # type: ignore
|
||||||
|
|
||||||
|
return keyring
|
||||||
|
except Exception as exc: # noqa: BLE001
|
||||||
|
raise KeyringUnavailableError(
|
||||||
|
"python-keyring is not available or no backend is configured."
|
||||||
|
) from exc
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(frozen=True)
|
||||||
|
class KeyringTokenProvider:
|
||||||
|
"""Resolve/store tokens from/to OS keyring via python-keyring."""
|
||||||
|
|
||||||
|
source_name: str = "keyring"
|
||||||
|
|
||||||
|
def get(self, request: TokenRequest) -> Optional[TokenResult]:
|
||||||
|
keyring = _import_keyring()
|
||||||
|
key = build_keyring_key(request.provider_kind, request.host, request.owner)
|
||||||
|
token = keyring.get_password(key.service, key.username)
|
||||||
|
if token:
|
||||||
|
return TokenResult(token=token.strip(), source=self.source_name)
|
||||||
|
return None
|
||||||
|
|
||||||
|
def set(self, request: TokenRequest, token: str) -> None:
|
||||||
|
keyring = _import_keyring()
|
||||||
|
key = build_keyring_key(request.provider_kind, request.host, request.owner)
|
||||||
|
keyring.set_password(key.service, key.username, token)
|
||||||
32
src/pkgmgr/core/credentials/providers/prompt.py
Normal file
32
src/pkgmgr/core/credentials/providers/prompt.py
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
# src/pkgmgr/core/credentials/providers/prompt.py
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import sys
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from getpass import getpass
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
from ..types import TokenRequest, TokenResult
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(frozen=True)
|
||||||
|
class PromptTokenProvider:
|
||||||
|
"""Interactively prompt for a token.
|
||||||
|
|
||||||
|
Only used when:
|
||||||
|
- interactive mode is enabled
|
||||||
|
- stdin is a TTY
|
||||||
|
"""
|
||||||
|
|
||||||
|
source_name: str = "prompt"
|
||||||
|
|
||||||
|
def get(self, request: TokenRequest) -> Optional[TokenResult]:
|
||||||
|
if not sys.stdin.isatty():
|
||||||
|
return None
|
||||||
|
|
||||||
|
owner_info = f" (owner: {request.owner})" if request.owner else ""
|
||||||
|
prompt = f"Enter API token for {request.provider_kind} on {request.host}{owner_info}: "
|
||||||
|
token = (getpass(prompt) or "").strip()
|
||||||
|
if not token:
|
||||||
|
return None
|
||||||
|
return TokenResult(token=token, source=self.source_name)
|
||||||
71
src/pkgmgr/core/credentials/resolver.py
Normal file
71
src/pkgmgr/core/credentials/resolver.py
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
# src/pkgmgr/core/credentials/resolver.py
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
from .providers.env import EnvTokenProvider
|
||||||
|
from .providers.keyring import KeyringTokenProvider
|
||||||
|
from .providers.prompt import PromptTokenProvider
|
||||||
|
from .types import NoCredentialsError, TokenRequest, TokenResult
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(frozen=True)
|
||||||
|
class ResolutionOptions:
|
||||||
|
"""Controls token resolution behavior."""
|
||||||
|
|
||||||
|
interactive: bool = True
|
||||||
|
allow_prompt: bool = True
|
||||||
|
save_prompt_token_to_keyring: bool = True
|
||||||
|
|
||||||
|
|
||||||
|
class TokenResolver:
|
||||||
|
"""Resolve tokens from multiple sources (ENV -> Keyring -> Prompt)."""
|
||||||
|
|
||||||
|
def __init__(self) -> None:
|
||||||
|
self._env = EnvTokenProvider()
|
||||||
|
self._keyring = KeyringTokenProvider()
|
||||||
|
self._prompt = PromptTokenProvider()
|
||||||
|
|
||||||
|
def get_token(
|
||||||
|
self,
|
||||||
|
provider_kind: str,
|
||||||
|
host: str,
|
||||||
|
owner: Optional[str] = None,
|
||||||
|
options: Optional[ResolutionOptions] = None,
|
||||||
|
) -> TokenResult:
|
||||||
|
opts = options or ResolutionOptions()
|
||||||
|
request = TokenRequest(provider_kind=provider_kind, host=host, owner=owner)
|
||||||
|
|
||||||
|
# 1) ENV
|
||||||
|
env_res = self._env.get(request)
|
||||||
|
if env_res:
|
||||||
|
return env_res
|
||||||
|
|
||||||
|
# 2) Keyring
|
||||||
|
try:
|
||||||
|
kr_res = self._keyring.get(request)
|
||||||
|
if kr_res:
|
||||||
|
return kr_res
|
||||||
|
except Exception:
|
||||||
|
# Keyring missing/unavailable: ignore to allow prompt (workstations)
|
||||||
|
# or to fail cleanly below (headless CI without prompt).
|
||||||
|
pass
|
||||||
|
|
||||||
|
# 3) Prompt (optional)
|
||||||
|
if opts.interactive and opts.allow_prompt:
|
||||||
|
prompt_res = self._prompt.get(request)
|
||||||
|
if prompt_res:
|
||||||
|
if opts.save_prompt_token_to_keyring:
|
||||||
|
try:
|
||||||
|
self._keyring.set(request, prompt_res.token)
|
||||||
|
except Exception:
|
||||||
|
# If keyring cannot store, still use token for this run.
|
||||||
|
pass
|
||||||
|
return prompt_res
|
||||||
|
|
||||||
|
raise NoCredentialsError(
|
||||||
|
f"No token available for {provider_kind}@{host}"
|
||||||
|
+ (f" (owner: {owner})" if owner else "")
|
||||||
|
+ ". Provide it via environment variable or keyring."
|
||||||
|
)
|
||||||
54
src/pkgmgr/core/credentials/store_keys.py
Normal file
54
src/pkgmgr/core/credentials/store_keys.py
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
# src/pkgmgr/core/credentials/store_keys.py
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(frozen=True)
|
||||||
|
class KeyringKey:
|
||||||
|
"""Keyring address for a token."""
|
||||||
|
|
||||||
|
service: str
|
||||||
|
username: str
|
||||||
|
|
||||||
|
|
||||||
|
def build_keyring_key(provider_kind: str, host: str, owner: Optional[str]) -> KeyringKey:
|
||||||
|
"""Build a stable keyring key.
|
||||||
|
|
||||||
|
- service: "pkgmgr:<provider>"
|
||||||
|
- username: "<host>|<owner>" or "<host>|-"
|
||||||
|
"""
|
||||||
|
provider_kind = str(provider_kind).strip().lower()
|
||||||
|
host = str(host).strip()
|
||||||
|
owner_part = (str(owner).strip() if owner else "-")
|
||||||
|
return KeyringKey(service=f"pkgmgr:{provider_kind}", username=f"{host}|{owner_part}")
|
||||||
|
|
||||||
|
|
||||||
|
def env_var_candidates(provider_kind: str, host: str, owner: Optional[str]) -> list[str]:
|
||||||
|
"""Return a list of environment variable names to try.
|
||||||
|
|
||||||
|
Order is from most specific to most generic.
|
||||||
|
"""
|
||||||
|
kind = re_sub_non_alnum(str(provider_kind).strip().upper())
|
||||||
|
host_norm = re_sub_non_alnum(str(host).strip().upper())
|
||||||
|
candidates: list[str] = []
|
||||||
|
|
||||||
|
if owner:
|
||||||
|
owner_norm = re_sub_non_alnum(str(owner).strip().upper())
|
||||||
|
candidates.append(f"PKGMGR_{kind}_TOKEN_{host_norm}_{owner_norm}")
|
||||||
|
candidates.append(f"PKGMGR_TOKEN_{kind}_{host_norm}_{owner_norm}")
|
||||||
|
|
||||||
|
candidates.append(f"PKGMGR_{kind}_TOKEN_{host_norm}")
|
||||||
|
candidates.append(f"PKGMGR_TOKEN_{kind}_{host_norm}")
|
||||||
|
candidates.append(f"PKGMGR_{kind}_TOKEN")
|
||||||
|
candidates.append(f"PKGMGR_TOKEN_{kind}")
|
||||||
|
candidates.append("PKGMGR_TOKEN")
|
||||||
|
return candidates
|
||||||
|
|
||||||
|
|
||||||
|
def re_sub_non_alnum(value: str) -> str:
|
||||||
|
"""Normalize to an uppercase env-var friendly token (A-Z0-9_)."""
|
||||||
|
import re
|
||||||
|
|
||||||
|
return re.sub(r"[^A-Z0-9]+", "_", value).strip("_")
|
||||||
34
src/pkgmgr/core/credentials/types.py
Normal file
34
src/pkgmgr/core/credentials/types.py
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
# src/pkgmgr/core/credentials/types.py
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
|
||||||
|
class CredentialError(RuntimeError):
|
||||||
|
"""Base class for credential resolution errors."""
|
||||||
|
|
||||||
|
|
||||||
|
class NoCredentialsError(CredentialError):
|
||||||
|
"""Raised when no usable credential could be resolved."""
|
||||||
|
|
||||||
|
|
||||||
|
class KeyringUnavailableError(CredentialError):
|
||||||
|
"""Raised when keyring is requested but no backend is available."""
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(frozen=True)
|
||||||
|
class TokenRequest:
|
||||||
|
"""Parameters describing which token we need."""
|
||||||
|
|
||||||
|
provider_kind: str # e.g. "gitea", "github"
|
||||||
|
host: str # e.g. "git.example.org" or "github.com"
|
||||||
|
owner: Optional[str] = None # optional org/user
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(frozen=True)
|
||||||
|
class TokenResult:
|
||||||
|
"""A resolved token plus metadata about its source."""
|
||||||
|
|
||||||
|
token: str
|
||||||
|
source: str # "env" | "keyring" | "prompt"
|
||||||
14
src/pkgmgr/core/remote_provisioning/__init__.py
Normal file
14
src/pkgmgr/core/remote_provisioning/__init__.py
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
# src/pkgmgr/core/remote_provisioning/__init__.py
|
||||||
|
"""Remote repository provisioning (ensure remote repo exists)."""
|
||||||
|
|
||||||
|
from .ensure import ensure_remote_repo
|
||||||
|
from .registry import ProviderRegistry
|
||||||
|
from .types import EnsureResult, ProviderHint, RepoSpec
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
"ensure_remote_repo",
|
||||||
|
"RepoSpec",
|
||||||
|
"EnsureResult",
|
||||||
|
"ProviderHint",
|
||||||
|
"ProviderRegistry",
|
||||||
|
]
|
||||||
97
src/pkgmgr/core/remote_provisioning/ensure.py
Normal file
97
src/pkgmgr/core/remote_provisioning/ensure.py
Normal file
@@ -0,0 +1,97 @@
|
|||||||
|
# src/pkgmgr/core/remote_provisioning/ensure.py
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
from pkgmgr.core.credentials.resolver import ResolutionOptions, TokenResolver
|
||||||
|
|
||||||
|
from .http.errors import HttpError
|
||||||
|
from .registry import ProviderRegistry
|
||||||
|
from .types import (
|
||||||
|
AuthError,
|
||||||
|
EnsureResult,
|
||||||
|
NetworkError,
|
||||||
|
PermissionError,
|
||||||
|
ProviderHint,
|
||||||
|
RepoSpec,
|
||||||
|
UnsupportedProviderError,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(frozen=True)
|
||||||
|
class EnsureOptions:
|
||||||
|
"""Options controlling remote provisioning."""
|
||||||
|
|
||||||
|
preview: bool = False
|
||||||
|
interactive: bool = True
|
||||||
|
allow_prompt: bool = True
|
||||||
|
save_prompt_token_to_keyring: bool = True
|
||||||
|
|
||||||
|
|
||||||
|
def _raise_mapped_http_error(exc: HttpError, host: str) -> None:
|
||||||
|
"""Map HttpError into domain-specific error types."""
|
||||||
|
if exc.status == 0:
|
||||||
|
raise NetworkError(f"Network error while talking to {host}: {exc}") from exc
|
||||||
|
if exc.status == 401:
|
||||||
|
raise AuthError(f"Authentication failed for {host} (401).") from exc
|
||||||
|
if exc.status == 403:
|
||||||
|
raise PermissionError(f"Permission denied for {host} (403).") from exc
|
||||||
|
|
||||||
|
raise NetworkError(
|
||||||
|
f"HTTP error from {host}: status={exc.status}, message={exc}, body={exc.body}"
|
||||||
|
) from exc
|
||||||
|
|
||||||
|
|
||||||
|
def ensure_remote_repo(
|
||||||
|
spec: RepoSpec,
|
||||||
|
provider_hint: Optional[ProviderHint] = None,
|
||||||
|
options: Optional[EnsureOptions] = None,
|
||||||
|
registry: Optional[ProviderRegistry] = None,
|
||||||
|
token_resolver: Optional[TokenResolver] = None,
|
||||||
|
) -> EnsureResult:
|
||||||
|
"""Ensure that the remote repository exists (create if missing).
|
||||||
|
|
||||||
|
- Uses TokenResolver (ENV -> keyring -> prompt)
|
||||||
|
- Selects provider via ProviderRegistry (or provider_hint override)
|
||||||
|
- Respects preview mode (no remote changes)
|
||||||
|
- Maps HTTP errors to domain-specific errors
|
||||||
|
"""
|
||||||
|
opts = options or EnsureOptions()
|
||||||
|
reg = registry or ProviderRegistry.default()
|
||||||
|
resolver = token_resolver or TokenResolver()
|
||||||
|
|
||||||
|
provider = reg.resolve(spec.host)
|
||||||
|
if provider_hint and provider_hint.kind:
|
||||||
|
forced = provider_hint.kind.strip().lower()
|
||||||
|
provider = next(
|
||||||
|
(p for p in reg.providers if getattr(p, "kind", "").lower() == forced),
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
|
||||||
|
if provider is None:
|
||||||
|
raise UnsupportedProviderError(f"No provider matched host: {spec.host}")
|
||||||
|
|
||||||
|
token_opts = ResolutionOptions(
|
||||||
|
interactive=opts.interactive,
|
||||||
|
allow_prompt=opts.allow_prompt,
|
||||||
|
save_prompt_token_to_keyring=opts.save_prompt_token_to_keyring,
|
||||||
|
)
|
||||||
|
token = resolver.get_token(
|
||||||
|
provider_kind=getattr(provider, "kind", "unknown"),
|
||||||
|
host=spec.host,
|
||||||
|
owner=spec.owner,
|
||||||
|
options=token_opts,
|
||||||
|
)
|
||||||
|
|
||||||
|
if opts.preview:
|
||||||
|
return EnsureResult(
|
||||||
|
status="skipped",
|
||||||
|
message="Preview mode: no remote changes performed.",
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
return provider.ensure_repo(token.token, spec)
|
||||||
|
except HttpError as exc:
|
||||||
|
_raise_mapped_http_error(exc, host=spec.host)
|
||||||
|
return EnsureResult(status="failed", message="Unreachable error mapping.")
|
||||||
5
src/pkgmgr/core/remote_provisioning/http/__init__.py
Normal file
5
src/pkgmgr/core/remote_provisioning/http/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
# src/pkgmgr/core/remote_provisioning/http/__init__.py
|
||||||
|
from .client import HttpClient, HttpResponse
|
||||||
|
from .errors import HttpError
|
||||||
|
|
||||||
|
__all__ = ["HttpClient", "HttpResponse", "HttpError"]
|
||||||
69
src/pkgmgr/core/remote_provisioning/http/client.py
Normal file
69
src/pkgmgr/core/remote_provisioning/http/client.py
Normal file
@@ -0,0 +1,69 @@
|
|||||||
|
# src/pkgmgr/core/remote_provisioning/http/client.py
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import json
|
||||||
|
import ssl
|
||||||
|
import urllib.error
|
||||||
|
import urllib.request
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from typing import Any, Dict, Optional
|
||||||
|
|
||||||
|
from .errors import HttpError
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(frozen=True)
|
||||||
|
class HttpResponse:
|
||||||
|
status: int
|
||||||
|
text: str
|
||||||
|
json: Optional[Dict[str, Any]] = None
|
||||||
|
|
||||||
|
|
||||||
|
class HttpClient:
|
||||||
|
"""Tiny HTTP client (stdlib) with JSON support."""
|
||||||
|
|
||||||
|
def __init__(self, timeout_s: int = 15) -> None:
|
||||||
|
self._timeout_s = int(timeout_s)
|
||||||
|
|
||||||
|
def request_json(
|
||||||
|
self,
|
||||||
|
method: str,
|
||||||
|
url: str,
|
||||||
|
headers: Optional[Dict[str, str]] = None,
|
||||||
|
payload: Optional[Dict[str, Any]] = None,
|
||||||
|
) -> HttpResponse:
|
||||||
|
data: Optional[bytes] = None
|
||||||
|
final_headers: Dict[str, str] = dict(headers or {})
|
||||||
|
|
||||||
|
if payload is not None:
|
||||||
|
data = json.dumps(payload).encode("utf-8")
|
||||||
|
final_headers.setdefault("Content-Type", "application/json")
|
||||||
|
|
||||||
|
req = urllib.request.Request(url=url, data=data, method=method.upper())
|
||||||
|
for k, v in final_headers.items():
|
||||||
|
req.add_header(k, v)
|
||||||
|
|
||||||
|
try:
|
||||||
|
with urllib.request.urlopen(
|
||||||
|
req,
|
||||||
|
timeout=self._timeout_s,
|
||||||
|
context=ssl.create_default_context(),
|
||||||
|
) as resp:
|
||||||
|
raw = resp.read().decode("utf-8", errors="replace")
|
||||||
|
|
||||||
|
parsed: Optional[Dict[str, Any]] = None
|
||||||
|
if raw:
|
||||||
|
try:
|
||||||
|
loaded = json.loads(raw)
|
||||||
|
parsed = loaded if isinstance(loaded, dict) else None
|
||||||
|
except Exception:
|
||||||
|
parsed = None
|
||||||
|
|
||||||
|
return HttpResponse(status=int(resp.status), text=raw, json=parsed)
|
||||||
|
except urllib.error.HTTPError as exc:
|
||||||
|
try:
|
||||||
|
body = exc.read().decode("utf-8", errors="replace")
|
||||||
|
except Exception:
|
||||||
|
body = ""
|
||||||
|
raise HttpError(status=int(exc.code), message=str(exc), body=body) from exc
|
||||||
|
except urllib.error.URLError as exc:
|
||||||
|
raise HttpError(status=0, message=str(exc), body="") from exc
|
||||||
9
src/pkgmgr/core/remote_provisioning/http/errors.py
Normal file
9
src/pkgmgr/core/remote_provisioning/http/errors.py
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
# src/pkgmgr/core/remote_provisioning/http/errors.py
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
|
||||||
|
class HttpError(RuntimeError):
|
||||||
|
def __init__(self, status: int, message: str, body: str = "") -> None:
|
||||||
|
super().__init__(message)
|
||||||
|
self.status = status
|
||||||
|
self.body = body
|
||||||
@@ -0,0 +1,6 @@
|
|||||||
|
# src/pkgmgr/core/remote_provisioning/providers/__init__.py
|
||||||
|
from .base import RemoteProvider
|
||||||
|
from .gitea import GiteaProvider
|
||||||
|
from .github import GitHubProvider
|
||||||
|
|
||||||
|
__all__ = ["RemoteProvider", "GiteaProvider", "GitHubProvider"]
|
||||||
36
src/pkgmgr/core/remote_provisioning/providers/base.py
Normal file
36
src/pkgmgr/core/remote_provisioning/providers/base.py
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
# src/pkgmgr/core/remote_provisioning/providers/base.py
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
|
||||||
|
from ..types import EnsureResult, RepoSpec
|
||||||
|
|
||||||
|
|
||||||
|
class RemoteProvider(ABC):
|
||||||
|
"""Provider interface for remote repo provisioning."""
|
||||||
|
|
||||||
|
kind: str
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def can_handle(self, host: str) -> bool:
|
||||||
|
"""Return True if this provider implementation matches the host."""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def repo_exists(self, token: str, spec: RepoSpec) -> bool:
|
||||||
|
"""Return True if repo exists and is accessible."""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def create_repo(self, token: str, spec: RepoSpec) -> EnsureResult:
|
||||||
|
"""Create a repository (owner may be user or org)."""
|
||||||
|
|
||||||
|
def ensure_repo(self, token: str, spec: RepoSpec) -> EnsureResult:
|
||||||
|
if self.repo_exists(token, spec):
|
||||||
|
return EnsureResult(status="exists", message="Repository exists.")
|
||||||
|
return self.create_repo(token, spec)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _api_base(host: str) -> str:
|
||||||
|
# Default to https. If you need http for local dev, store host as "http://..."
|
||||||
|
if host.startswith("http://") or host.startswith("https://"):
|
||||||
|
return host.rstrip("/")
|
||||||
|
return f"https://{host}".rstrip("/")
|
||||||
106
src/pkgmgr/core/remote_provisioning/providers/gitea.py
Normal file
106
src/pkgmgr/core/remote_provisioning/providers/gitea.py
Normal file
@@ -0,0 +1,106 @@
|
|||||||
|
# src/pkgmgr/core/remote_provisioning/providers/gitea.py
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from typing import Any, Dict
|
||||||
|
|
||||||
|
from ..http.client import HttpClient
|
||||||
|
from ..http.errors import HttpError
|
||||||
|
from ..types import EnsureResult, RepoSpec
|
||||||
|
from .base import RemoteProvider
|
||||||
|
|
||||||
|
|
||||||
|
class GiteaProvider(RemoteProvider):
|
||||||
|
"""Gitea provider using Gitea REST API v1."""
|
||||||
|
|
||||||
|
kind = "gitea"
|
||||||
|
|
||||||
|
def __init__(self, timeout_s: int = 15) -> None:
|
||||||
|
self._http = HttpClient(timeout_s=timeout_s)
|
||||||
|
|
||||||
|
def can_handle(self, host: str) -> bool:
|
||||||
|
"""
|
||||||
|
Heuristic host match:
|
||||||
|
- Acts as a fallback provider for self-hosted setups.
|
||||||
|
- Must NOT claim GitHub hosts.
|
||||||
|
- If you add more providers later, tighten this heuristic or use provider hints.
|
||||||
|
"""
|
||||||
|
h = host.lower()
|
||||||
|
if h in ("github.com", "api.github.com") or h.endswith(".github.com"):
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def _headers(self, token: str) -> Dict[str, str]:
|
||||||
|
"""
|
||||||
|
Gitea commonly supports:
|
||||||
|
Authorization: token <TOKEN>
|
||||||
|
Newer versions may also accept Bearer tokens, but "token" is broadly compatible.
|
||||||
|
"""
|
||||||
|
return {
|
||||||
|
"Authorization": f"token {token}",
|
||||||
|
"Accept": "application/json",
|
||||||
|
"User-Agent": "pkgmgr",
|
||||||
|
}
|
||||||
|
|
||||||
|
def repo_exists(self, token: str, spec: RepoSpec) -> bool:
|
||||||
|
base = self._api_base(spec.host)
|
||||||
|
url = f"{base}/api/v1/repos/{spec.owner}/{spec.name}"
|
||||||
|
try:
|
||||||
|
resp = self._http.request_json("GET", url, headers=self._headers(token))
|
||||||
|
return 200 <= resp.status < 300
|
||||||
|
except HttpError as exc:
|
||||||
|
if exc.status == 404:
|
||||||
|
return False
|
||||||
|
raise
|
||||||
|
|
||||||
|
def create_repo(self, token: str, spec: RepoSpec) -> EnsureResult:
|
||||||
|
base = self._api_base(spec.host)
|
||||||
|
|
||||||
|
payload: Dict[str, Any] = {
|
||||||
|
"name": spec.name,
|
||||||
|
"private": bool(spec.private),
|
||||||
|
}
|
||||||
|
if spec.description:
|
||||||
|
payload["description"] = spec.description
|
||||||
|
if spec.default_branch:
|
||||||
|
payload["default_branch"] = spec.default_branch
|
||||||
|
|
||||||
|
org_url = f"{base}/api/v1/orgs/{spec.owner}/repos"
|
||||||
|
user_url = f"{base}/api/v1/user/repos"
|
||||||
|
|
||||||
|
# Try org first, then fall back to user creation.
|
||||||
|
try:
|
||||||
|
resp = self._http.request_json(
|
||||||
|
"POST",
|
||||||
|
org_url,
|
||||||
|
headers=self._headers(token),
|
||||||
|
payload=payload,
|
||||||
|
)
|
||||||
|
if 200 <= resp.status < 300:
|
||||||
|
html_url = (resp.json or {}).get("html_url") if resp.json else None
|
||||||
|
return EnsureResult(
|
||||||
|
status="created",
|
||||||
|
message="Repository created (org).",
|
||||||
|
url=str(html_url) if html_url else None,
|
||||||
|
)
|
||||||
|
except HttpError:
|
||||||
|
# Typical org failures: 404 (not an org), 403 (no rights), 401 (bad token).
|
||||||
|
pass
|
||||||
|
|
||||||
|
resp = self._http.request_json(
|
||||||
|
"POST",
|
||||||
|
user_url,
|
||||||
|
headers=self._headers(token),
|
||||||
|
payload=payload,
|
||||||
|
)
|
||||||
|
if 200 <= resp.status < 300:
|
||||||
|
html_url = (resp.json or {}).get("html_url") if resp.json else None
|
||||||
|
return EnsureResult(
|
||||||
|
status="created",
|
||||||
|
message="Repository created (user).",
|
||||||
|
url=str(html_url) if html_url else None,
|
||||||
|
)
|
||||||
|
|
||||||
|
return EnsureResult(
|
||||||
|
status="failed",
|
||||||
|
message=f"Failed to create repository (status {resp.status}).",
|
||||||
|
)
|
||||||
101
src/pkgmgr/core/remote_provisioning/providers/github.py
Normal file
101
src/pkgmgr/core/remote_provisioning/providers/github.py
Normal file
@@ -0,0 +1,101 @@
|
|||||||
|
# src/pkgmgr/core/remote_provisioning/providers/github.py
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from typing import Any, Dict
|
||||||
|
|
||||||
|
from ..http.client import HttpClient
|
||||||
|
from ..http.errors import HttpError
|
||||||
|
from ..types import EnsureResult, RepoSpec
|
||||||
|
from .base import RemoteProvider
|
||||||
|
|
||||||
|
|
||||||
|
class GitHubProvider(RemoteProvider):
|
||||||
|
"""GitHub provider using GitHub REST API."""
|
||||||
|
|
||||||
|
kind = "github"
|
||||||
|
|
||||||
|
def __init__(self, timeout_s: int = 15) -> None:
|
||||||
|
self._http = HttpClient(timeout_s=timeout_s)
|
||||||
|
|
||||||
|
def can_handle(self, host: str) -> bool:
|
||||||
|
h = host.lower()
|
||||||
|
return h in ("github.com", "api.github.com") or h.endswith(".github.com")
|
||||||
|
|
||||||
|
def _api_base(self, host: str) -> str:
|
||||||
|
"""
|
||||||
|
GitHub API base:
|
||||||
|
- Public GitHub: https://api.github.com
|
||||||
|
- GitHub Enterprise Server: https://<host>/api/v3
|
||||||
|
"""
|
||||||
|
h = host.lower()
|
||||||
|
if h in ("github.com", "api.github.com"):
|
||||||
|
return "https://api.github.com"
|
||||||
|
|
||||||
|
# Enterprise instance:
|
||||||
|
if host.startswith("http://") or host.startswith("https://"):
|
||||||
|
return host.rstrip("/") + "/api/v3"
|
||||||
|
return f"https://{host}/api/v3"
|
||||||
|
|
||||||
|
def _headers(self, token: str) -> Dict[str, str]:
|
||||||
|
return {
|
||||||
|
"Authorization": f"Bearer {token}",
|
||||||
|
"Accept": "application/vnd.github+json",
|
||||||
|
"User-Agent": "pkgmgr",
|
||||||
|
}
|
||||||
|
|
||||||
|
def repo_exists(self, token: str, spec: RepoSpec) -> bool:
|
||||||
|
api = self._api_base(spec.host)
|
||||||
|
url = f"{api}/repos/{spec.owner}/{spec.name}"
|
||||||
|
try:
|
||||||
|
resp = self._http.request_json("GET", url, headers=self._headers(token))
|
||||||
|
return 200 <= resp.status < 300
|
||||||
|
except HttpError as exc:
|
||||||
|
if exc.status == 404:
|
||||||
|
return False
|
||||||
|
raise
|
||||||
|
|
||||||
|
def create_repo(self, token: str, spec: RepoSpec) -> EnsureResult:
|
||||||
|
api = self._api_base(spec.host)
|
||||||
|
|
||||||
|
payload: Dict[str, Any] = {
|
||||||
|
"name": spec.name,
|
||||||
|
"private": bool(spec.private),
|
||||||
|
}
|
||||||
|
if spec.description:
|
||||||
|
payload["description"] = spec.description
|
||||||
|
if spec.default_branch:
|
||||||
|
payload["default_branch"] = spec.default_branch
|
||||||
|
|
||||||
|
org_url = f"{api}/orgs/{spec.owner}/repos"
|
||||||
|
user_url = f"{api}/user/repos"
|
||||||
|
|
||||||
|
# Try org first, then fall back to user creation.
|
||||||
|
try:
|
||||||
|
resp = self._http.request_json(
|
||||||
|
"POST", org_url, headers=self._headers(token), payload=payload
|
||||||
|
)
|
||||||
|
if 200 <= resp.status < 300:
|
||||||
|
html_url = (resp.json or {}).get("html_url") if resp.json else None
|
||||||
|
return EnsureResult(
|
||||||
|
status="created",
|
||||||
|
message="Repository created (org).",
|
||||||
|
url=str(html_url) if html_url else None,
|
||||||
|
)
|
||||||
|
except HttpError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
resp = self._http.request_json(
|
||||||
|
"POST", user_url, headers=self._headers(token), payload=payload
|
||||||
|
)
|
||||||
|
if 200 <= resp.status < 300:
|
||||||
|
html_url = (resp.json or {}).get("html_url") if resp.json else None
|
||||||
|
return EnsureResult(
|
||||||
|
status="created",
|
||||||
|
message="Repository created (user).",
|
||||||
|
url=str(html_url) if html_url else None,
|
||||||
|
)
|
||||||
|
|
||||||
|
return EnsureResult(
|
||||||
|
status="failed",
|
||||||
|
message=f"Failed to create repository (status {resp.status}).",
|
||||||
|
)
|
||||||
30
src/pkgmgr/core/remote_provisioning/registry.py
Normal file
30
src/pkgmgr/core/remote_provisioning/registry.py
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
# src/pkgmgr/core/remote_provisioning/registry.py
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from typing import List, Optional
|
||||||
|
|
||||||
|
from .providers.base import RemoteProvider
|
||||||
|
from .providers.gitea import GiteaProvider
|
||||||
|
from .providers.github import GitHubProvider
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ProviderRegistry:
|
||||||
|
"""Resolve the correct provider implementation for a host."""
|
||||||
|
|
||||||
|
providers: List[RemoteProvider]
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def default(cls) -> "ProviderRegistry":
|
||||||
|
# Order matters: more specific providers first; fallback providers last.
|
||||||
|
return cls(providers=[GitHubProvider(), GiteaProvider()])
|
||||||
|
|
||||||
|
def resolve(self, host: str) -> Optional[RemoteProvider]:
|
||||||
|
for p in self.providers:
|
||||||
|
try:
|
||||||
|
if p.can_handle(host):
|
||||||
|
return p
|
||||||
|
except Exception:
|
||||||
|
continue
|
||||||
|
return None
|
||||||
61
src/pkgmgr/core/remote_provisioning/types.py
Normal file
61
src/pkgmgr/core/remote_provisioning/types.py
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
# src/pkgmgr/core/remote_provisioning/types.py
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from typing import Literal, Optional
|
||||||
|
|
||||||
|
EnsureStatus = Literal["exists", "created", "skipped", "failed"]
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(frozen=True)
|
||||||
|
class ProviderHint:
|
||||||
|
"""Optional hint to force a provider kind."""
|
||||||
|
|
||||||
|
kind: Optional[str] = None # e.g. "gitea" or "github"
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(frozen=True)
|
||||||
|
class RepoSpec:
|
||||||
|
"""Desired remote repository."""
|
||||||
|
|
||||||
|
host: str
|
||||||
|
owner: str
|
||||||
|
name: str
|
||||||
|
private: bool = True
|
||||||
|
description: str = ""
|
||||||
|
default_branch: Optional[str] = None
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(frozen=True)
|
||||||
|
class EnsureResult:
|
||||||
|
status: EnsureStatus
|
||||||
|
message: str
|
||||||
|
url: Optional[str] = None
|
||||||
|
|
||||||
|
|
||||||
|
class RemoteProvisioningError(RuntimeError):
|
||||||
|
"""Base class for remote provisioning errors."""
|
||||||
|
|
||||||
|
|
||||||
|
class AuthError(RemoteProvisioningError):
|
||||||
|
"""Authentication failed (401)."""
|
||||||
|
|
||||||
|
|
||||||
|
class PermissionError(RemoteProvisioningError):
|
||||||
|
"""Permission denied (403)."""
|
||||||
|
|
||||||
|
|
||||||
|
class NotFoundError(RemoteProvisioningError):
|
||||||
|
"""Resource not found (404)."""
|
||||||
|
|
||||||
|
|
||||||
|
class PolicyError(RemoteProvisioningError):
|
||||||
|
"""Provider/org policy prevents the operation."""
|
||||||
|
|
||||||
|
|
||||||
|
class NetworkError(RemoteProvisioningError):
|
||||||
|
"""Network/transport errors."""
|
||||||
|
|
||||||
|
|
||||||
|
class UnsupportedProviderError(RemoteProvisioningError):
|
||||||
|
"""No provider matched for the given host."""
|
||||||
168
src/pkgmgr/core/version/installed.py
Normal file
168
src/pkgmgr/core/version/installed.py
Normal file
@@ -0,0 +1,168 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import json
|
||||||
|
import re
|
||||||
|
import shutil
|
||||||
|
import subprocess
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from typing import Iterable, Optional, Tuple
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(frozen=True)
|
||||||
|
class InstalledVersion:
|
||||||
|
"""
|
||||||
|
Represents a resolved installed version and the matched name.
|
||||||
|
"""
|
||||||
|
name: str
|
||||||
|
version: str
|
||||||
|
|
||||||
|
|
||||||
|
def _normalize(name: str) -> str:
|
||||||
|
return re.sub(r"[-_.]+", "-", (name or "").strip()).lower()
|
||||||
|
|
||||||
|
|
||||||
|
def _unique_candidates(names: Iterable[str]) -> list[str]:
|
||||||
|
seen: set[str] = set()
|
||||||
|
out: list[str] = []
|
||||||
|
for n in names:
|
||||||
|
if not n:
|
||||||
|
continue
|
||||||
|
key = _normalize(n)
|
||||||
|
if key in seen:
|
||||||
|
continue
|
||||||
|
seen.add(key)
|
||||||
|
out.append(n)
|
||||||
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
def get_installed_python_version(*candidates: str) -> Optional[InstalledVersion]:
|
||||||
|
"""
|
||||||
|
Detect installed Python package version in the CURRENT Python environment.
|
||||||
|
|
||||||
|
Strategy:
|
||||||
|
1) Exact normalized match using importlib.metadata.version()
|
||||||
|
2) Substring fallback by scanning installed distributions
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
from importlib import metadata as importlib_metadata
|
||||||
|
except Exception:
|
||||||
|
return None
|
||||||
|
|
||||||
|
candidates = _unique_candidates(candidates)
|
||||||
|
|
||||||
|
expanded: list[str] = []
|
||||||
|
for c in candidates:
|
||||||
|
n = _normalize(c)
|
||||||
|
expanded.extend([c, n, n.replace("-", "_"), n.replace("-", ".")])
|
||||||
|
expanded = _unique_candidates(expanded)
|
||||||
|
|
||||||
|
# 1) Direct queries first (fast path)
|
||||||
|
for name in expanded:
|
||||||
|
try:
|
||||||
|
version = importlib_metadata.version(name)
|
||||||
|
return InstalledVersion(name=name, version=version)
|
||||||
|
except Exception:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# 2) Fallback: scan distributions (last resort)
|
||||||
|
try:
|
||||||
|
dists = importlib_metadata.distributions()
|
||||||
|
except Exception:
|
||||||
|
return None
|
||||||
|
|
||||||
|
norm_candidates = {_normalize(c) for c in candidates}
|
||||||
|
|
||||||
|
for dist in dists:
|
||||||
|
dist_name = dist.metadata.get("Name", "") or ""
|
||||||
|
norm_dist = _normalize(dist_name)
|
||||||
|
for c in norm_candidates:
|
||||||
|
if c and (c in norm_dist or norm_dist in c):
|
||||||
|
ver = getattr(dist, "version", None)
|
||||||
|
if ver:
|
||||||
|
return InstalledVersion(name=dist_name, version=ver)
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def _run_nix(args: list[str]) -> Tuple[int, str, str]:
|
||||||
|
p = subprocess.run(
|
||||||
|
args,
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.PIPE,
|
||||||
|
text=True,
|
||||||
|
check=False,
|
||||||
|
)
|
||||||
|
return p.returncode, p.stdout or "", p.stderr or ""
|
||||||
|
|
||||||
|
|
||||||
|
def _extract_version_from_store_path(path: str) -> Optional[str]:
|
||||||
|
if not path:
|
||||||
|
return None
|
||||||
|
base = path.rstrip("/").split("/")[-1]
|
||||||
|
if "-" not in base:
|
||||||
|
return None
|
||||||
|
tail = base.split("-")[-1]
|
||||||
|
if re.match(r"\d+(\.\d+){0,3}([a-z0-9+._-]*)?$", tail, re.I):
|
||||||
|
return tail
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def get_installed_nix_profile_version(*candidates: str) -> Optional[InstalledVersion]:
|
||||||
|
"""
|
||||||
|
Detect installed version from the current Nix profile.
|
||||||
|
|
||||||
|
Strategy:
|
||||||
|
1) JSON output (exact normalized match)
|
||||||
|
2) Text fallback (substring)
|
||||||
|
"""
|
||||||
|
if shutil.which("nix") is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
candidates = _unique_candidates(candidates)
|
||||||
|
if not candidates:
|
||||||
|
return None
|
||||||
|
|
||||||
|
norm_candidates = {_normalize(c) for c in candidates}
|
||||||
|
|
||||||
|
# Preferred: JSON output
|
||||||
|
rc, out, _ = _run_nix(["nix", "profile", "list", "--json"])
|
||||||
|
if rc == 0 and out.strip():
|
||||||
|
try:
|
||||||
|
data = json.loads(out)
|
||||||
|
elements = data.get("elements") or data.get("items") or {}
|
||||||
|
if isinstance(elements, dict):
|
||||||
|
for elem in elements.values():
|
||||||
|
if not isinstance(elem, dict):
|
||||||
|
continue
|
||||||
|
name = (elem.get("name") or elem.get("pname") or "").strip()
|
||||||
|
version = (elem.get("version") or "").strip()
|
||||||
|
norm_name = _normalize(name)
|
||||||
|
|
||||||
|
if norm_name in norm_candidates:
|
||||||
|
if version:
|
||||||
|
return InstalledVersion(name=name, version=version)
|
||||||
|
for sp in elem.get("storePaths", []) or []:
|
||||||
|
guess = _extract_version_from_store_path(sp)
|
||||||
|
if guess:
|
||||||
|
return InstalledVersion(name=name, version=guess)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Fallback: text mode
|
||||||
|
rc, out, _ = _run_nix(["nix", "profile", "list"])
|
||||||
|
if rc != 0:
|
||||||
|
return None
|
||||||
|
|
||||||
|
for line in out.splitlines():
|
||||||
|
norm_line = _normalize(line)
|
||||||
|
for c in norm_candidates:
|
||||||
|
if c in norm_line:
|
||||||
|
m = re.search(r"\b\d+(\.\d+){0,3}[a-z0-9+._-]*\b", line, re.I)
|
||||||
|
if m:
|
||||||
|
return InstalledVersion(name=c, version=m.group(0))
|
||||||
|
if "/nix/store/" in line:
|
||||||
|
guess = _extract_version_from_store_path(line.split()[-1])
|
||||||
|
if guess:
|
||||||
|
return InstalledVersion(name=c, version=guess)
|
||||||
|
|
||||||
|
return None
|
||||||
@@ -1,21 +1,3 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
"""
|
|
||||||
Helpers to extract version information from various packaging files.
|
|
||||||
|
|
||||||
All functions take a repository directory and return either a version
|
|
||||||
string or None if the corresponding file or version field is missing.
|
|
||||||
|
|
||||||
Supported sources:
|
|
||||||
- pyproject.toml (PEP 621, [project].version)
|
|
||||||
- flake.nix (version = "X.Y.Z";)
|
|
||||||
- PKGBUILD (pkgver / pkgrel)
|
|
||||||
- debian/changelog (first entry line: package (version) ...)
|
|
||||||
- RPM spec file (package-manager.spec: Version / Release)
|
|
||||||
- Ansible Galaxy (galaxy.yml or meta/main.yml)
|
|
||||||
"""
|
|
||||||
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import os
|
import os
|
||||||
@@ -30,33 +12,49 @@ def read_pyproject_version(repo_dir: str) -> Optional[str]:
|
|||||||
Read the version from pyproject.toml in repo_dir, if present.
|
Read the version from pyproject.toml in repo_dir, if present.
|
||||||
|
|
||||||
Expects a PEP 621-style [project] table with a 'version' field.
|
Expects a PEP 621-style [project] table with a 'version' field.
|
||||||
Returns the version string or None.
|
|
||||||
"""
|
"""
|
||||||
path = os.path.join(repo_dir, "pyproject.toml")
|
path = os.path.join(repo_dir, "pyproject.toml")
|
||||||
if not os.path.exists(path):
|
if not os.path.isfile(path):
|
||||||
return None
|
return None
|
||||||
|
|
||||||
try:
|
|
||||||
try:
|
try:
|
||||||
import tomllib # Python 3.11+
|
import tomllib # Python 3.11+
|
||||||
except ModuleNotFoundError: # pragma: no cover
|
except Exception:
|
||||||
tomllib = None
|
import tomli as tomllib # type: ignore
|
||||||
|
|
||||||
if tomllib is None:
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
try:
|
||||||
with open(path, "rb") as f:
|
with open(path, "rb") as f:
|
||||||
data = tomllib.load(f)
|
data = tomllib.load(f)
|
||||||
|
project = data.get("project") or {}
|
||||||
project = data.get("project", {})
|
|
||||||
if isinstance(project, dict):
|
|
||||||
version = project.get("version")
|
version = project.get("version")
|
||||||
if isinstance(version, str):
|
return str(version).strip() if version else None
|
||||||
return version.strip() or None
|
|
||||||
except Exception:
|
except Exception:
|
||||||
# Intentionally swallow errors and fall back to None.
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def read_pyproject_project_name(repo_dir: str) -> Optional[str]:
|
||||||
|
"""
|
||||||
|
Read distribution name from pyproject.toml ([project].name).
|
||||||
|
|
||||||
|
This is required to correctly resolve installed Python package
|
||||||
|
versions via importlib.metadata.
|
||||||
|
"""
|
||||||
|
path = os.path.join(repo_dir, "pyproject.toml")
|
||||||
|
if not os.path.isfile(path):
|
||||||
|
return None
|
||||||
|
|
||||||
|
try:
|
||||||
|
import tomllib # Python 3.11+
|
||||||
|
except Exception:
|
||||||
|
import tomli as tomllib # type: ignore
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(path, "rb") as f:
|
||||||
|
data = tomllib.load(f)
|
||||||
|
project = data.get("project") or {}
|
||||||
|
name = project.get("name")
|
||||||
|
return str(name).strip() if name else None
|
||||||
|
except Exception:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
@@ -64,12 +62,11 @@ def read_flake_version(repo_dir: str) -> Optional[str]:
|
|||||||
"""
|
"""
|
||||||
Read the version from flake.nix in repo_dir, if present.
|
Read the version from flake.nix in repo_dir, if present.
|
||||||
|
|
||||||
Looks for a line like:
|
Looks for:
|
||||||
version = "1.2.3";
|
version = "X.Y.Z";
|
||||||
and returns the string inside the quotes.
|
|
||||||
"""
|
"""
|
||||||
path = os.path.join(repo_dir, "flake.nix")
|
path = os.path.join(repo_dir, "flake.nix")
|
||||||
if not os.path.exists(path):
|
if not os.path.isfile(path):
|
||||||
return None
|
return None
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@@ -81,22 +78,21 @@ def read_flake_version(repo_dir: str) -> Optional[str]:
|
|||||||
match = re.search(r'version\s*=\s*"([^"]+)"', text)
|
match = re.search(r'version\s*=\s*"([^"]+)"', text)
|
||||||
if not match:
|
if not match:
|
||||||
return None
|
return None
|
||||||
version = match.group(1).strip()
|
|
||||||
return version or None
|
return match.group(1).strip() or None
|
||||||
|
|
||||||
|
|
||||||
def read_pkgbuild_version(repo_dir: str) -> Optional[str]:
|
def read_pkgbuild_version(repo_dir: str) -> Optional[str]:
|
||||||
"""
|
"""
|
||||||
Read the version from PKGBUILD in repo_dir, if present.
|
Read the version from PKGBUILD in repo_dir.
|
||||||
|
|
||||||
Expects:
|
Combines pkgver and pkgrel if both exist:
|
||||||
pkgver=1.2.3
|
pkgver=1.2.3
|
||||||
pkgrel=1
|
pkgrel=1
|
||||||
|
-> 1.2.3-1
|
||||||
Returns either "1.2.3-1" (if both are present) or just "1.2.3".
|
|
||||||
"""
|
"""
|
||||||
path = os.path.join(repo_dir, "PKGBUILD")
|
path = os.path.join(repo_dir, "PKGBUILD")
|
||||||
if not os.path.exists(path):
|
if not os.path.isfile(path):
|
||||||
return None
|
return None
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@@ -121,15 +117,13 @@ def read_pkgbuild_version(repo_dir: str) -> Optional[str]:
|
|||||||
|
|
||||||
def read_debian_changelog_version(repo_dir: str) -> Optional[str]:
|
def read_debian_changelog_version(repo_dir: str) -> Optional[str]:
|
||||||
"""
|
"""
|
||||||
Read the latest Debian version from debian/changelog in repo_dir, if present.
|
Read the latest version from debian/changelog.
|
||||||
|
|
||||||
The first non-empty line typically looks like:
|
Expected format:
|
||||||
package-name (1.2.3-1) unstable; urgency=medium
|
package (1.2.3-1) unstable; urgency=medium
|
||||||
|
|
||||||
We extract the text inside the first parentheses.
|
|
||||||
"""
|
"""
|
||||||
path = os.path.join(repo_dir, "debian", "changelog")
|
path = os.path.join(repo_dir, "debian", "changelog")
|
||||||
if not os.path.exists(path):
|
if not os.path.isfile(path):
|
||||||
return None
|
return None
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@@ -140,8 +134,7 @@ def read_debian_changelog_version(repo_dir: str) -> Optional[str]:
|
|||||||
continue
|
continue
|
||||||
match = re.search(r"\(([^)]+)\)", line)
|
match = re.search(r"\(([^)]+)\)", line)
|
||||||
if match:
|
if match:
|
||||||
version = match.group(1).strip()
|
return match.group(1).strip() or None
|
||||||
return version or None
|
|
||||||
break
|
break
|
||||||
except Exception:
|
except Exception:
|
||||||
return None
|
return None
|
||||||
@@ -151,21 +144,18 @@ def read_debian_changelog_version(repo_dir: str) -> Optional[str]:
|
|||||||
|
|
||||||
def read_spec_version(repo_dir: str) -> Optional[str]:
|
def read_spec_version(repo_dir: str) -> Optional[str]:
|
||||||
"""
|
"""
|
||||||
Read the version from a RPM spec file.
|
Read the version from an RPM spec file.
|
||||||
|
|
||||||
For now, we assume a fixed file name 'package-manager.spec'
|
|
||||||
in repo_dir with lines like:
|
|
||||||
|
|
||||||
|
Combines:
|
||||||
Version: 1.2.3
|
Version: 1.2.3
|
||||||
Release: 1%{?dist}
|
Release: 1%{?dist}
|
||||||
|
-> 1.2.3-1
|
||||||
Returns either "1.2.3-1" (if Release is present) or "1.2.3".
|
|
||||||
Any RPM macro suffix like '%{?dist}' is stripped from the release.
|
|
||||||
"""
|
"""
|
||||||
path = os.path.join(repo_dir, "package-manager.spec")
|
for fn in os.listdir(repo_dir):
|
||||||
if not os.path.exists(path):
|
if not fn.endswith(".spec"):
|
||||||
return None
|
continue
|
||||||
|
|
||||||
|
path = os.path.join(repo_dir, fn)
|
||||||
try:
|
try:
|
||||||
with open(path, "r", encoding="utf-8") as f:
|
with open(path, "r", encoding="utf-8") as f:
|
||||||
text = f.read()
|
text = f.read()
|
||||||
@@ -180,52 +170,46 @@ def read_spec_version(repo_dir: str) -> Optional[str]:
|
|||||||
rel_match = re.search(r"^Release:\s*(.+)$", text, re.MULTILINE)
|
rel_match = re.search(r"^Release:\s*(.+)$", text, re.MULTILINE)
|
||||||
if rel_match:
|
if rel_match:
|
||||||
release_raw = rel_match.group(1).strip()
|
release_raw = rel_match.group(1).strip()
|
||||||
# Strip common RPM macro suffix like %... (e.g. 1%{?dist})
|
release = release_raw.split("%", 1)[0].split(" ", 1)[0].strip()
|
||||||
release = release_raw.split("%", 1)[0].strip()
|
|
||||||
# Also strip anything after first whitespace, just in case
|
|
||||||
release = release.split(" ", 1)[0].strip()
|
|
||||||
if release:
|
if release:
|
||||||
return f"{version}-{release}"
|
return f"{version}-{release}"
|
||||||
|
|
||||||
return version or None
|
return version or None
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
def read_ansible_galaxy_version(repo_dir: str) -> Optional[str]:
|
def read_ansible_galaxy_version(repo_dir: str) -> Optional[str]:
|
||||||
"""
|
"""
|
||||||
Read the version from Ansible Galaxy metadata, if present.
|
Read the version from Ansible Galaxy metadata.
|
||||||
|
|
||||||
Supported locations:
|
Supported:
|
||||||
- galaxy.yml (preferred for modern roles/collections)
|
- galaxy.yml
|
||||||
- meta/main.yml (legacy style roles; uses galaxy_info.version or version)
|
- meta/main.yml (galaxy_info.version or version)
|
||||||
"""
|
"""
|
||||||
# 1) galaxy.yml in repo root
|
galaxy_yml = os.path.join(repo_dir, "galaxy.yml")
|
||||||
galaxy_path = os.path.join(repo_dir, "galaxy.yml")
|
if os.path.isfile(galaxy_yml):
|
||||||
if os.path.exists(galaxy_path):
|
|
||||||
try:
|
try:
|
||||||
with open(galaxy_path, "r", encoding="utf-8") as f:
|
with open(galaxy_yml, "r", encoding="utf-8") as f:
|
||||||
data = yaml.safe_load(f) or {}
|
data = yaml.safe_load(f) or {}
|
||||||
version = data.get("version")
|
version = data.get("version")
|
||||||
if isinstance(version, str) and version.strip():
|
if isinstance(version, str) and version.strip():
|
||||||
return version.strip()
|
return version.strip()
|
||||||
except Exception:
|
except Exception:
|
||||||
# Ignore parse errors and fall through to meta/main.yml
|
|
||||||
pass
|
pass
|
||||||
|
|
||||||
# 2) meta/main.yml (classic Ansible role)
|
meta_yml = os.path.join(repo_dir, "meta", "main.yml")
|
||||||
meta_path = os.path.join(repo_dir, "meta", "main.yml")
|
if os.path.isfile(meta_yml):
|
||||||
if os.path.exists(meta_path):
|
|
||||||
try:
|
try:
|
||||||
with open(meta_path, "r", encoding="utf-8") as f:
|
with open(meta_yml, "r", encoding="utf-8") as f:
|
||||||
data = yaml.safe_load(f) or {}
|
data = yaml.safe_load(f) or {}
|
||||||
|
|
||||||
# Preferred: galaxy_info.version
|
|
||||||
galaxy_info = data.get("galaxy_info") or {}
|
galaxy_info = data.get("galaxy_info") or {}
|
||||||
if isinstance(galaxy_info, dict):
|
if isinstance(galaxy_info, dict):
|
||||||
version = galaxy_info.get("version")
|
version = galaxy_info.get("version")
|
||||||
if isinstance(version, str) and version.strip():
|
if isinstance(version, str) and version.strip():
|
||||||
return version.strip()
|
return version.strip()
|
||||||
|
|
||||||
# Fallback: top-level 'version'
|
|
||||||
version = data.get("version")
|
version = data.get("version")
|
||||||
if isinstance(version, str) and version.strip():
|
if isinstance(version, str) and version.strip():
|
||||||
return version.strip()
|
return version.strip()
|
||||||
|
|||||||
24
tests/e2e/_util.py
Normal file
24
tests/e2e/_util.py
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
import subprocess
|
||||||
|
|
||||||
|
|
||||||
|
def run(cmd, *, cwd=None, env=None, shell=False) -> str:
|
||||||
|
proc = subprocess.run(
|
||||||
|
cmd,
|
||||||
|
cwd=cwd,
|
||||||
|
env=env,
|
||||||
|
shell=shell,
|
||||||
|
text=True,
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.STDOUT,
|
||||||
|
)
|
||||||
|
|
||||||
|
print("----- BEGIN COMMAND -----")
|
||||||
|
print(cmd if isinstance(cmd, str) else " ".join(cmd))
|
||||||
|
print("----- OUTPUT -----")
|
||||||
|
print(proc.stdout.rstrip())
|
||||||
|
print("----- END COMMAND -----")
|
||||||
|
|
||||||
|
if proc.returncode != 0:
|
||||||
|
raise AssertionError(proc.stdout)
|
||||||
|
|
||||||
|
return proc.stdout
|
||||||
25
tests/e2e/test_install_makefile_three_times.py
Normal file
25
tests/e2e/test_install_makefile_three_times.py
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
from tests.e2e._util import run
|
||||||
|
import tempfile
|
||||||
|
import unittest
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
class TestMakefileThreeTimes(unittest.TestCase):
|
||||||
|
def test_make_install_three_times(self):
|
||||||
|
with tempfile.TemporaryDirectory(prefix="makefile-3x-") as tmp:
|
||||||
|
repo = Path(tmp)
|
||||||
|
|
||||||
|
# Minimal Makefile with install target
|
||||||
|
(repo / "Makefile").write_text(
|
||||||
|
"install:\n\t@echo install >> install.log\n"
|
||||||
|
)
|
||||||
|
|
||||||
|
for i in range(1, 4):
|
||||||
|
print(f"\n=== RUN {i}/3 ===")
|
||||||
|
run(["make", "install"], cwd=repo)
|
||||||
|
|
||||||
|
log = (repo / "install.log").read_text().splitlines()
|
||||||
|
self.assertEqual(
|
||||||
|
len(log),
|
||||||
|
3,
|
||||||
|
"make install should have been executed exactly three times",
|
||||||
|
)
|
||||||
37
tests/e2e/test_install_pkgmgr_three_times_nix.py
Normal file
37
tests/e2e/test_install_pkgmgr_three_times_nix.py
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
import os
|
||||||
|
from tests.e2e._util import run
|
||||||
|
import tempfile
|
||||||
|
import unittest
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
|
||||||
|
class TestPkgmgrInstallThreeTimesNix(unittest.TestCase):
|
||||||
|
def test_three_times_install_nix(self):
|
||||||
|
with tempfile.TemporaryDirectory(prefix="pkgmgr-nix-") as tmp:
|
||||||
|
tmp_path = Path(tmp)
|
||||||
|
|
||||||
|
env = os.environ.copy()
|
||||||
|
env["HOME"] = tmp
|
||||||
|
|
||||||
|
# Ensure nix is found
|
||||||
|
env["PATH"] = "/nix/var/nix/profiles/default/bin:" + os.environ.get("PATH", "")
|
||||||
|
|
||||||
|
# IMPORTANT:
|
||||||
|
# nix run uses git+file:///src internally -> Git will reject /src if it's not a safe.directory.
|
||||||
|
# Our test sets HOME to a temp dir, so we must provide a temp global gitconfig.
|
||||||
|
gitconfig = tmp_path / ".gitconfig"
|
||||||
|
gitconfig.write_text(
|
||||||
|
"[safe]\n"
|
||||||
|
"\tdirectory = /src\n"
|
||||||
|
"\tdirectory = /src/.git\n"
|
||||||
|
"\tdirectory = *\n"
|
||||||
|
)
|
||||||
|
env["GIT_CONFIG_GLOBAL"] = str(gitconfig)
|
||||||
|
|
||||||
|
for i in range(1, 4):
|
||||||
|
print(f"\n=== RUN {i}/3 ===")
|
||||||
|
run(
|
||||||
|
"nix run .#pkgmgr -- install pkgmgr --update --clone-mode shallow --no-verification",
|
||||||
|
env=env,
|
||||||
|
shell=True,
|
||||||
|
)
|
||||||
34
tests/e2e/test_install_pkgmgr_three_times_venv.py
Normal file
34
tests/e2e/test_install_pkgmgr_three_times_venv.py
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
from tests.e2e._util import run
|
||||||
|
import tempfile
|
||||||
|
import unittest
|
||||||
|
from pathlib import Path
|
||||||
|
import os
|
||||||
|
|
||||||
|
|
||||||
|
class TestPkgmgrInstallThreeTimesVenv(unittest.TestCase):
|
||||||
|
def test_three_times_install_venv(self):
|
||||||
|
with tempfile.TemporaryDirectory(prefix="pkgmgr-venv-") as tmp:
|
||||||
|
home = Path(tmp)
|
||||||
|
bin_dir = home / ".local" / "bin"
|
||||||
|
bin_dir.mkdir(parents=True)
|
||||||
|
|
||||||
|
env = os.environ.copy()
|
||||||
|
env["HOME"] = tmp
|
||||||
|
|
||||||
|
# pkgmgr kommt aus dem Projekt-venv
|
||||||
|
env["PATH"] = (
|
||||||
|
f"{Path.cwd() / '.venv' / 'bin'}:"
|
||||||
|
f"{bin_dir}:"
|
||||||
|
+ os.environ.get("PATH", "")
|
||||||
|
)
|
||||||
|
|
||||||
|
# nix explizit deaktivieren → Python/Venv-Pfad
|
||||||
|
env["PKGMGR_DISABLE_NIX_FLAKE_INSTALLER"] = "1"
|
||||||
|
|
||||||
|
for i in range(1, 4):
|
||||||
|
print(f"\n=== RUN {i}/3 ===")
|
||||||
|
run(
|
||||||
|
"pkgmgr install pkgmgr --update --clone-mode shallow --no-verification",
|
||||||
|
env=env,
|
||||||
|
shell=True,
|
||||||
|
)
|
||||||
@@ -4,21 +4,21 @@
|
|||||||
"""
|
"""
|
||||||
E2E integration tests for the `pkgmgr mirror` command family.
|
E2E integration tests for the `pkgmgr mirror` command family.
|
||||||
|
|
||||||
This test class covers:
|
Covered commands:
|
||||||
|
|
||||||
- pkgmgr mirror --help
|
- pkgmgr mirror --help
|
||||||
- pkgmgr mirror list --preview --all
|
- pkgmgr mirror list --preview --all
|
||||||
- pkgmgr mirror diff --preview --all
|
- pkgmgr mirror diff --preview --all
|
||||||
- pkgmgr mirror merge config file --preview --all
|
- pkgmgr mirror merge config file --preview --all
|
||||||
- pkgmgr mirror setup --preview --all
|
- pkgmgr mirror setup --preview --all
|
||||||
|
- pkgmgr mirror check --preview --all
|
||||||
|
- pkgmgr mirror provision --preview --all
|
||||||
|
|
||||||
All of these subcommands are fully wired at CLI level and do not
|
All commands are executed via the real CLI entry point (main module).
|
||||||
require mocks. With --preview, merge and setup do not perform
|
With --preview enabled, all operations are non-destructive and safe
|
||||||
destructive actions, making them safe for CI execution.
|
to run inside CI containers.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
import io
|
import io
|
||||||
import runpy
|
import runpy
|
||||||
import sys
|
import sys
|
||||||
@@ -28,25 +28,25 @@ from contextlib import redirect_stdout, redirect_stderr
|
|||||||
|
|
||||||
class TestIntegrationMirrorCommands(unittest.TestCase):
|
class TestIntegrationMirrorCommands(unittest.TestCase):
|
||||||
"""
|
"""
|
||||||
E2E tests for `pkgmgr mirror` commands.
|
End-to-end tests for `pkgmgr mirror` commands.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# ------------------------------------------------------------
|
# ------------------------------------------------------------
|
||||||
# Helper
|
# Helper
|
||||||
# ------------------------------------------------------------
|
# ------------------------------------------------------------
|
||||||
def _run_pkgmgr(self, args: list[str]) -> str:
|
def _run_pkgmgr(self, args):
|
||||||
"""
|
"""
|
||||||
Execute pkgmgr with the given arguments and return captured stdout+stderr.
|
Execute pkgmgr with the given arguments and return captured output.
|
||||||
|
|
||||||
- Treat SystemExit(0) or SystemExit(None) as success.
|
- Treat SystemExit(0) or SystemExit(None) as success.
|
||||||
- Convert non-zero exit codes into AssertionError.
|
- Any other exit code is considered a test failure.
|
||||||
"""
|
"""
|
||||||
original_argv = list(sys.argv)
|
original_argv = list(sys.argv)
|
||||||
buffer = io.StringIO()
|
buffer = io.StringIO()
|
||||||
cmd_repr = "pkgmgr " + " ".join(args)
|
cmd_repr = "pkgmgr " + " ".join(args)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
sys.argv = ["pkgmgr"] + args
|
sys.argv = ["pkgmgr"] + list(args)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
with redirect_stdout(buffer), redirect_stderr(buffer):
|
with redirect_stdout(buffer), redirect_stderr(buffer):
|
||||||
@@ -55,9 +55,9 @@ class TestIntegrationMirrorCommands(unittest.TestCase):
|
|||||||
code = exc.code if isinstance(exc.code, int) else None
|
code = exc.code if isinstance(exc.code, int) else None
|
||||||
if code not in (0, None):
|
if code not in (0, None):
|
||||||
raise AssertionError(
|
raise AssertionError(
|
||||||
f"{cmd_repr!r} failed with exit code {exc.code}. "
|
"%r failed with exit code %r.\n\nOutput:\n%s"
|
||||||
"Scroll up to inspect the pkgmgr output."
|
% (cmd_repr, exc.code, buffer.getvalue())
|
||||||
) from exc
|
)
|
||||||
|
|
||||||
return buffer.getvalue()
|
return buffer.getvalue()
|
||||||
|
|
||||||
@@ -68,44 +68,41 @@ class TestIntegrationMirrorCommands(unittest.TestCase):
|
|||||||
# Tests
|
# Tests
|
||||||
# ------------------------------------------------------------
|
# ------------------------------------------------------------
|
||||||
|
|
||||||
def test_mirror_help(self) -> None:
|
def test_mirror_help(self):
|
||||||
"""
|
"""
|
||||||
Ensure `pkgmgr mirror --help` runs successfully
|
`pkgmgr mirror --help` should run without error and print usage info.
|
||||||
and prints a usage message for the mirror command.
|
|
||||||
"""
|
"""
|
||||||
output = self._run_pkgmgr(["mirror", "--help"])
|
output = self._run_pkgmgr(["mirror", "--help"])
|
||||||
self.assertIn("usage:", output)
|
self.assertIn("usage:", output)
|
||||||
self.assertIn("pkgmgr mirror", output)
|
self.assertIn("pkgmgr mirror", output)
|
||||||
|
|
||||||
def test_mirror_list_preview_all(self) -> None:
|
def test_mirror_list_preview_all(self):
|
||||||
"""
|
"""
|
||||||
`pkgmgr mirror list --preview --all` should run without error
|
`pkgmgr mirror list --preview --all`
|
||||||
and produce some output for the selected repositories.
|
|
||||||
"""
|
"""
|
||||||
output = self._run_pkgmgr(["mirror", "list", "--preview", "--all"])
|
output = self._run_pkgmgr(
|
||||||
# Do not assert specific wording; just ensure something was printed.
|
["mirror", "list", "--preview", "--all"]
|
||||||
|
)
|
||||||
self.assertTrue(
|
self.assertTrue(
|
||||||
output.strip(),
|
output.strip(),
|
||||||
msg="Expected `pkgmgr mirror list --preview --all` to produce output.",
|
"Expected output from mirror list",
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_mirror_diff_preview_all(self) -> None:
|
def test_mirror_diff_preview_all(self):
|
||||||
"""
|
"""
|
||||||
`pkgmgr mirror diff --preview --all` should run without error
|
`pkgmgr mirror diff --preview --all`
|
||||||
and produce some diagnostic output (diff header, etc.).
|
|
||||||
"""
|
"""
|
||||||
output = self._run_pkgmgr(["mirror", "diff", "--preview", "--all"])
|
output = self._run_pkgmgr(
|
||||||
|
["mirror", "diff", "--preview", "--all"]
|
||||||
|
)
|
||||||
self.assertTrue(
|
self.assertTrue(
|
||||||
output.strip(),
|
output.strip(),
|
||||||
msg="Expected `pkgmgr mirror diff --preview --all` to produce output.",
|
"Expected output from mirror diff",
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_mirror_merge_config_to_file_preview_all(self) -> None:
|
def test_mirror_merge_config_to_file_preview_all(self):
|
||||||
"""
|
"""
|
||||||
`pkgmgr mirror merge config file --preview --all` should run without error.
|
`pkgmgr mirror merge config file --preview --all`
|
||||||
|
|
||||||
In preview mode this does not change either config or MIRRORS files;
|
|
||||||
it only prints what would be merged.
|
|
||||||
"""
|
"""
|
||||||
output = self._run_pkgmgr(
|
output = self._run_pkgmgr(
|
||||||
[
|
[
|
||||||
@@ -119,23 +116,47 @@ class TestIntegrationMirrorCommands(unittest.TestCase):
|
|||||||
)
|
)
|
||||||
self.assertTrue(
|
self.assertTrue(
|
||||||
output.strip(),
|
output.strip(),
|
||||||
msg=(
|
"Expected output from mirror merge (config -> file)",
|
||||||
"Expected `pkgmgr mirror merge config file --preview --all` "
|
|
||||||
"to produce output."
|
|
||||||
),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_mirror_setup_preview_all(self) -> None:
|
def test_mirror_setup_preview_all(self):
|
||||||
"""
|
"""
|
||||||
`pkgmgr mirror setup --preview --all` should run without error.
|
`pkgmgr mirror setup --preview --all`
|
||||||
|
|
||||||
In preview mode only the intended Git operations and remote
|
|
||||||
suggestions are printed; no real changes are made.
|
|
||||||
"""
|
"""
|
||||||
output = self._run_pkgmgr(["mirror", "setup", "--preview", "--all"])
|
output = self._run_pkgmgr(
|
||||||
|
["mirror", "setup", "--preview", "--all"]
|
||||||
|
)
|
||||||
self.assertTrue(
|
self.assertTrue(
|
||||||
output.strip(),
|
output.strip(),
|
||||||
msg="Expected `pkgmgr mirror setup --preview --all` to produce output.",
|
"Expected output from mirror setup",
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_mirror_check_preview_all(self):
|
||||||
|
"""
|
||||||
|
`pkgmgr mirror check --preview --all`
|
||||||
|
|
||||||
|
Performs non-destructive remote checks (git ls-remote).
|
||||||
|
"""
|
||||||
|
output = self._run_pkgmgr(
|
||||||
|
["mirror", "check", "--preview", "--all"]
|
||||||
|
)
|
||||||
|
self.assertTrue(
|
||||||
|
output.strip(),
|
||||||
|
"Expected output from mirror check",
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_mirror_provision_preview_all(self):
|
||||||
|
"""
|
||||||
|
`pkgmgr mirror provision --preview --all`
|
||||||
|
|
||||||
|
In preview mode this MUST NOT create remote repositories.
|
||||||
|
"""
|
||||||
|
output = self._run_pkgmgr(
|
||||||
|
["mirror", "provision", "--preview", "--all"]
|
||||||
|
)
|
||||||
|
self.assertTrue(
|
||||||
|
output.strip(),
|
||||||
|
"Expected output from mirror provision (preview)",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,92 +0,0 @@
|
|||||||
"""
|
|
||||||
Integration test: update all configured repositories using
|
|
||||||
--clone-mode https and --no-verification.
|
|
||||||
|
|
||||||
This test is intended to be run inside the Docker container where:
|
|
||||||
- network access is available,
|
|
||||||
- the config/config.yaml is present,
|
|
||||||
- and it is safe to perform real git operations.
|
|
||||||
|
|
||||||
It passes if BOTH commands complete successfully (in separate tests):
|
|
||||||
1) pkgmgr update --all --clone-mode https --no-verification
|
|
||||||
2) nix run .#pkgmgr -- update --all --clone-mode https --no-verification
|
|
||||||
"""
|
|
||||||
|
|
||||||
import os
|
|
||||||
import subprocess
|
|
||||||
import unittest
|
|
||||||
|
|
||||||
from test_install_pkgmgr_shallow import (
|
|
||||||
nix_profile_list_debug,
|
|
||||||
remove_pkgmgr_from_nix_profile,
|
|
||||||
pkgmgr_help_debug,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class TestIntegrationUpdateAllHttps(unittest.TestCase):
|
|
||||||
def _run_cmd(self, cmd: list[str], label: str) -> None:
|
|
||||||
"""
|
|
||||||
Run a real CLI command and raise a helpful assertion on failure.
|
|
||||||
"""
|
|
||||||
cmd_repr = " ".join(cmd)
|
|
||||||
env = os.environ.copy()
|
|
||||||
|
|
||||||
try:
|
|
||||||
print(f"\n[TEST] Running ({label}): {cmd_repr}")
|
|
||||||
subprocess.run(
|
|
||||||
cmd,
|
|
||||||
check=True,
|
|
||||||
cwd=os.getcwd(),
|
|
||||||
env=env,
|
|
||||||
text=True,
|
|
||||||
)
|
|
||||||
except subprocess.CalledProcessError as exc:
|
|
||||||
print(f"\n[TEST] Command failed ({label})")
|
|
||||||
print(f"[TEST] Command : {cmd_repr}")
|
|
||||||
print(f"[TEST] Exit code: {exc.returncode}")
|
|
||||||
|
|
||||||
nix_profile_list_debug(f"ON FAILURE ({label})")
|
|
||||||
|
|
||||||
raise AssertionError(
|
|
||||||
f"({label}) {cmd_repr!r} failed with exit code {exc.returncode}. "
|
|
||||||
"Scroll up to see the full pkgmgr/nix output inside the container."
|
|
||||||
) from exc
|
|
||||||
|
|
||||||
def _common_setup(self) -> None:
|
|
||||||
# Debug before cleanup
|
|
||||||
nix_profile_list_debug("BEFORE CLEANUP")
|
|
||||||
|
|
||||||
# Cleanup: aggressively try to drop any pkgmgr/profile entries
|
|
||||||
# (keeps the environment comparable to other integration tests).
|
|
||||||
remove_pkgmgr_from_nix_profile()
|
|
||||||
|
|
||||||
# Debug after cleanup
|
|
||||||
nix_profile_list_debug("AFTER CLEANUP")
|
|
||||||
|
|
||||||
def test_update_all_repositories_https_pkgmgr(self) -> None:
|
|
||||||
"""
|
|
||||||
Run: pkgmgr update --all --clone-mode https --no-verification
|
|
||||||
"""
|
|
||||||
self._common_setup()
|
|
||||||
|
|
||||||
args = ["update", "--all", "--clone-mode", "https", "--no-verification"]
|
|
||||||
self._run_cmd(["pkgmgr", *args], label="pkgmgr")
|
|
||||||
|
|
||||||
# After successful update: show `pkgmgr --help` via interactive bash
|
|
||||||
pkgmgr_help_debug()
|
|
||||||
|
|
||||||
def test_update_all_repositories_https_nix_pkgmgr(self) -> None:
|
|
||||||
"""
|
|
||||||
Run: nix run .#pkgmgr -- update --all --clone-mode https --no-verification
|
|
||||||
"""
|
|
||||||
self._common_setup()
|
|
||||||
|
|
||||||
args = ["update", "--all", "--clone-mode", "https", "--no-verification"]
|
|
||||||
self._run_cmd(["nix", "run", ".#pkgmgr", "--", *args], label="nix run .#pkgmgr")
|
|
||||||
|
|
||||||
# After successful update: show `pkgmgr --help` via interactive bash
|
|
||||||
pkgmgr_help_debug()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
unittest.main()
|
|
||||||
123
tests/e2e/test_update_all_no_system.py
Normal file
123
tests/e2e/test_update_all_no_system.py
Normal file
@@ -0,0 +1,123 @@
|
|||||||
|
"""
|
||||||
|
Integration test: update all configured repositories using
|
||||||
|
--clone-mode shallow and --no-verification, WITHOUT system updates.
|
||||||
|
|
||||||
|
This test is intended to be run inside the Docker container where:
|
||||||
|
- network access is available,
|
||||||
|
- the config/config.yaml is present,
|
||||||
|
- and it is safe to perform real git operations.
|
||||||
|
|
||||||
|
It passes if BOTH commands complete successfully (in separate tests):
|
||||||
|
1) pkgmgr update --all --clone-mode shallow --no-verification
|
||||||
|
2) nix run .#pkgmgr -- update --all --clone-mode shallow --no-verification
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
import tempfile
|
||||||
|
import unittest
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
from test_install_pkgmgr_shallow import (
|
||||||
|
nix_profile_list_debug,
|
||||||
|
remove_pkgmgr_from_nix_profile,
|
||||||
|
pkgmgr_help_debug,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _make_temp_gitconfig_with_safe_dirs(home: Path) -> Path:
|
||||||
|
gitconfig = home / ".gitconfig"
|
||||||
|
gitconfig.write_text(
|
||||||
|
"[safe]\n"
|
||||||
|
"\tdirectory = /src\n"
|
||||||
|
"\tdirectory = /src/.git\n"
|
||||||
|
"\tdirectory = *\n"
|
||||||
|
)
|
||||||
|
return gitconfig
|
||||||
|
|
||||||
|
|
||||||
|
class TestIntegrationUpdateAllshallowNoSystem(unittest.TestCase):
|
||||||
|
def _common_env(self, home_dir: str) -> dict[str, str]:
|
||||||
|
env = os.environ.copy()
|
||||||
|
env["HOME"] = home_dir
|
||||||
|
|
||||||
|
home = Path(home_dir)
|
||||||
|
home.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
env["GIT_CONFIG_GLOBAL"] = str(_make_temp_gitconfig_with_safe_dirs(home))
|
||||||
|
|
||||||
|
# Ensure nix is discoverable if the container has it
|
||||||
|
env["PATH"] = "/nix/var/nix/profiles/default/bin:" + env.get("PATH", "")
|
||||||
|
|
||||||
|
return env
|
||||||
|
|
||||||
|
def _run_cmd(self, cmd: list[str], label: str, env: dict[str, str]) -> None:
|
||||||
|
cmd_repr = " ".join(cmd)
|
||||||
|
print(f"\n[TEST] Running ({label}): {cmd_repr}")
|
||||||
|
|
||||||
|
proc = subprocess.run(
|
||||||
|
cmd,
|
||||||
|
check=False,
|
||||||
|
cwd=os.getcwd(),
|
||||||
|
env=env,
|
||||||
|
text=True,
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.STDOUT,
|
||||||
|
)
|
||||||
|
|
||||||
|
print(proc.stdout.rstrip())
|
||||||
|
|
||||||
|
if proc.returncode != 0:
|
||||||
|
print(f"\n[TEST] Command failed ({label})")
|
||||||
|
print(f"[TEST] Command : {cmd_repr}")
|
||||||
|
print(f"[TEST] Exit code: {proc.returncode}")
|
||||||
|
|
||||||
|
nix_profile_list_debug(f"ON FAILURE ({label})")
|
||||||
|
|
||||||
|
raise AssertionError(
|
||||||
|
f"({label}) {cmd_repr!r} failed with exit code {proc.returncode}.\n\n"
|
||||||
|
f"--- output ---\n{proc.stdout}\n"
|
||||||
|
)
|
||||||
|
|
||||||
|
def _common_setup(self) -> None:
|
||||||
|
nix_profile_list_debug("BEFORE CLEANUP")
|
||||||
|
remove_pkgmgr_from_nix_profile()
|
||||||
|
nix_profile_list_debug("AFTER CLEANUP")
|
||||||
|
|
||||||
|
def test_update_all_repositories_shallow_pkgmgr_no_system(self) -> None:
|
||||||
|
self._common_setup()
|
||||||
|
with tempfile.TemporaryDirectory(prefix="pkgmgr-updateall-nosys-") as tmp:
|
||||||
|
env = self._common_env(tmp)
|
||||||
|
args = [
|
||||||
|
"update",
|
||||||
|
"--all",
|
||||||
|
"--clone-mode",
|
||||||
|
"shallow",
|
||||||
|
"--no-verification",
|
||||||
|
]
|
||||||
|
self._run_cmd(["pkgmgr", *args], label="pkgmgr", env=env)
|
||||||
|
pkgmgr_help_debug()
|
||||||
|
|
||||||
|
def test_update_all_repositories_shallow_nix_pkgmgr_no_system(self) -> None:
|
||||||
|
self._common_setup()
|
||||||
|
with tempfile.TemporaryDirectory(prefix="pkgmgr-updateall-nosys-nix-") as tmp:
|
||||||
|
env = self._common_env(tmp)
|
||||||
|
args = [
|
||||||
|
"update",
|
||||||
|
"--all",
|
||||||
|
"--clone-mode",
|
||||||
|
"shallow",
|
||||||
|
"--no-verification",
|
||||||
|
]
|
||||||
|
self._run_cmd(
|
||||||
|
["nix", "run", ".#pkgmgr", "--", *args],
|
||||||
|
label="nix run .#pkgmgr",
|
||||||
|
env=env,
|
||||||
|
)
|
||||||
|
pkgmgr_help_debug()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
unittest.main()
|
||||||
124
tests/e2e/test_update_pkgmgr_system.py
Normal file
124
tests/e2e/test_update_pkgmgr_system.py
Normal file
@@ -0,0 +1,124 @@
|
|||||||
|
"""
|
||||||
|
Integration test: update ONLY the 'pkgmgr' repository with system updates enabled.
|
||||||
|
|
||||||
|
This test is intended to be run inside the Docker container where:
|
||||||
|
- network access is available,
|
||||||
|
- the config/config.yaml is present,
|
||||||
|
- and it is safe to perform real git operations.
|
||||||
|
|
||||||
|
It passes if BOTH commands complete successfully (in separate tests):
|
||||||
|
1) pkgmgr update pkgmgr --clone-mode shallow --no-verification --system
|
||||||
|
2) nix run .#pkgmgr -- update pkgmgr --clone-mode shallow --no-verification --system
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
import tempfile
|
||||||
|
import unittest
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
from test_install_pkgmgr_shallow import (
|
||||||
|
nix_profile_list_debug,
|
||||||
|
remove_pkgmgr_from_nix_profile,
|
||||||
|
pkgmgr_help_debug,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _make_temp_gitconfig_with_safe_dirs(home: Path) -> Path:
|
||||||
|
gitconfig = home / ".gitconfig"
|
||||||
|
gitconfig.write_text(
|
||||||
|
"[safe]\n"
|
||||||
|
"\tdirectory = /src\n"
|
||||||
|
"\tdirectory = /src/.git\n"
|
||||||
|
"\tdirectory = *\n"
|
||||||
|
)
|
||||||
|
return gitconfig
|
||||||
|
|
||||||
|
|
||||||
|
class TestIntegrationUpdatePkgmgrWithSystem(unittest.TestCase):
|
||||||
|
def _common_env(self, home_dir: str) -> dict[str, str]:
|
||||||
|
env = os.environ.copy()
|
||||||
|
env["HOME"] = home_dir
|
||||||
|
|
||||||
|
home = Path(home_dir)
|
||||||
|
home.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
env["GIT_CONFIG_GLOBAL"] = str(_make_temp_gitconfig_with_safe_dirs(home))
|
||||||
|
|
||||||
|
# Ensure nix is discoverable if the container has it
|
||||||
|
env["PATH"] = "/nix/var/nix/profiles/default/bin:" + env.get("PATH", "")
|
||||||
|
|
||||||
|
return env
|
||||||
|
|
||||||
|
def _run_cmd(self, cmd: list[str], label: str, env: dict[str, str]) -> None:
|
||||||
|
cmd_repr = " ".join(cmd)
|
||||||
|
print(f"\n[TEST] Running ({label}): {cmd_repr}")
|
||||||
|
|
||||||
|
proc = subprocess.run(
|
||||||
|
cmd,
|
||||||
|
check=False,
|
||||||
|
cwd=os.getcwd(),
|
||||||
|
env=env,
|
||||||
|
text=True,
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.STDOUT,
|
||||||
|
)
|
||||||
|
|
||||||
|
print(proc.stdout.rstrip())
|
||||||
|
|
||||||
|
if proc.returncode != 0:
|
||||||
|
print(f"\n[TEST] Command failed ({label})")
|
||||||
|
print(f"[TEST] Command : {cmd_repr}")
|
||||||
|
print(f"[TEST] Exit code: {proc.returncode}")
|
||||||
|
|
||||||
|
nix_profile_list_debug(f"ON FAILURE ({label})")
|
||||||
|
|
||||||
|
raise AssertionError(
|
||||||
|
f"({label}) {cmd_repr!r} failed with exit code {proc.returncode}.\n\n"
|
||||||
|
f"--- output ---\n{proc.stdout}\n"
|
||||||
|
)
|
||||||
|
|
||||||
|
def _common_setup(self) -> None:
|
||||||
|
nix_profile_list_debug("BEFORE CLEANUP")
|
||||||
|
remove_pkgmgr_from_nix_profile()
|
||||||
|
nix_profile_list_debug("AFTER CLEANUP")
|
||||||
|
|
||||||
|
def test_update_pkgmgr_shallow_pkgmgr_with_system(self) -> None:
|
||||||
|
self._common_setup()
|
||||||
|
with tempfile.TemporaryDirectory(prefix="pkgmgr-update-pkgmgr-sys-") as tmp:
|
||||||
|
env = self._common_env(tmp)
|
||||||
|
args = [
|
||||||
|
"update",
|
||||||
|
"pkgmgr",
|
||||||
|
"--clone-mode",
|
||||||
|
"shallow",
|
||||||
|
"--no-verification",
|
||||||
|
"--system",
|
||||||
|
]
|
||||||
|
self._run_cmd(["pkgmgr", *args], label="pkgmgr", env=env)
|
||||||
|
pkgmgr_help_debug()
|
||||||
|
|
||||||
|
def test_update_pkgmgr_shallow_nix_pkgmgr_with_system(self) -> None:
|
||||||
|
self._common_setup()
|
||||||
|
with tempfile.TemporaryDirectory(prefix="pkgmgr-update-pkgmgr-sys-nix-") as tmp:
|
||||||
|
env = self._common_env(tmp)
|
||||||
|
args = [
|
||||||
|
"update",
|
||||||
|
"pkgmgr",
|
||||||
|
"--clone-mode",
|
||||||
|
"shallow",
|
||||||
|
"--no-verification",
|
||||||
|
"--system",
|
||||||
|
]
|
||||||
|
self._run_cmd(
|
||||||
|
["nix", "run", ".#pkgmgr", "--", *args],
|
||||||
|
label="nix run .#pkgmgr",
|
||||||
|
env=env,
|
||||||
|
)
|
||||||
|
pkgmgr_help_debug()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
unittest.main()
|
||||||
@@ -23,7 +23,7 @@ from unittest.mock import patch
|
|||||||
import pkgmgr.actions.install as install_mod
|
import pkgmgr.actions.install as install_mod
|
||||||
from pkgmgr.actions.install import install_repos
|
from pkgmgr.actions.install import install_repos
|
||||||
from pkgmgr.actions.install.installers.makefile import MakefileInstaller
|
from pkgmgr.actions.install.installers.makefile import MakefileInstaller
|
||||||
from pkgmgr.actions.install.installers.nix_flake import NixFlakeInstaller
|
from pkgmgr.actions.install.installers.nix import NixFlakeInstaller
|
||||||
from pkgmgr.actions.install.installers.os_packages.arch_pkgbuild import (
|
from pkgmgr.actions.install.installers.os_packages.arch_pkgbuild import (
|
||||||
ArchPkgbuildInstaller,
|
ArchPkgbuildInstaller,
|
||||||
)
|
)
|
||||||
|
|||||||
234
tests/unit/pkgmgr/actions/install/installers/nix/test_legacy.py
Normal file
234
tests/unit/pkgmgr/actions/install/installers/nix/test_legacy.py
Normal file
@@ -0,0 +1,234 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
"""
|
||||||
|
Unit tests for NixFlakeInstaller using unittest (no pytest).
|
||||||
|
|
||||||
|
Covers:
|
||||||
|
- Successful installation (returncode == 0)
|
||||||
|
- Mandatory failure → SystemExit with correct code
|
||||||
|
- Optional failure (pkgmgr default) → no raise, but warning
|
||||||
|
- supports() behavior incl. PKGMGR_DISABLE_NIX_FLAKE_INSTALLER
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import io
|
||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
import subprocess
|
||||||
|
import tempfile
|
||||||
|
import unittest
|
||||||
|
from contextlib import redirect_stdout
|
||||||
|
from typing import List
|
||||||
|
from unittest.mock import patch
|
||||||
|
|
||||||
|
from pkgmgr.actions.install.installers.nix import NixFlakeInstaller
|
||||||
|
|
||||||
|
|
||||||
|
class DummyCtx:
|
||||||
|
"""Minimal context object to satisfy NixFlakeInstaller.run() / supports()."""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
identifier: str,
|
||||||
|
repo_dir: str,
|
||||||
|
preview: bool = False,
|
||||||
|
quiet: bool = False,
|
||||||
|
force_update: bool = False,
|
||||||
|
):
|
||||||
|
self.identifier = identifier
|
||||||
|
self.repo_dir = repo_dir
|
||||||
|
self.preview = preview
|
||||||
|
self.quiet = quiet
|
||||||
|
self.force_update = force_update
|
||||||
|
|
||||||
|
|
||||||
|
class TestNixFlakeInstaller(unittest.TestCase):
|
||||||
|
def setUp(self) -> None:
|
||||||
|
# Create a temporary repository directory with a flake.nix file
|
||||||
|
self._tmpdir = tempfile.mkdtemp(prefix="nix_flake_test_")
|
||||||
|
self.repo_dir = self._tmpdir
|
||||||
|
flake_path = os.path.join(self.repo_dir, "flake.nix")
|
||||||
|
with open(flake_path, "w", encoding="utf-8") as f:
|
||||||
|
f.write("{}\n")
|
||||||
|
|
||||||
|
# Ensure the disable env var is not set by default
|
||||||
|
os.environ.pop("PKGMGR_DISABLE_NIX_FLAKE_INSTALLER", None)
|
||||||
|
|
||||||
|
def tearDown(self) -> None:
|
||||||
|
if os.path.isdir(self._tmpdir):
|
||||||
|
shutil.rmtree(self._tmpdir, ignore_errors=True)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _cp(code: int, stdout: str = "", stderr: str = "") -> subprocess.CompletedProcess:
|
||||||
|
return subprocess.CompletedProcess(args=["nix"], returncode=code, stdout=stdout, stderr=stderr)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _enable_nix_in_module(which_patch) -> None:
|
||||||
|
"""Ensure shutil.which('nix') in nix installer module returns a path."""
|
||||||
|
which_patch.return_value = "/usr/bin/nix"
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _install_cmds_from_calls(call_args_list) -> List[str]:
|
||||||
|
cmds: List[str] = []
|
||||||
|
for c in call_args_list:
|
||||||
|
if not c.args:
|
||||||
|
continue
|
||||||
|
cmd = c.args[0]
|
||||||
|
if isinstance(cmd, str) and cmd.startswith("nix profile install "):
|
||||||
|
cmds.append(cmd)
|
||||||
|
return cmds
|
||||||
|
|
||||||
|
def test_nix_flake_run_success(self) -> None:
|
||||||
|
"""
|
||||||
|
When install returns success (returncode 0), installer
|
||||||
|
should report success and not raise.
|
||||||
|
"""
|
||||||
|
ctx = DummyCtx(identifier="some-lib", repo_dir=self.repo_dir)
|
||||||
|
installer = NixFlakeInstaller()
|
||||||
|
|
||||||
|
install_results = [self._cp(0)] # first install succeeds
|
||||||
|
|
||||||
|
def fake_subprocess_run(cmd, *args, **kwargs):
|
||||||
|
# cmd is a string because CommandRunner uses shell=True
|
||||||
|
if isinstance(cmd, str) and cmd.startswith("nix profile list --json"):
|
||||||
|
return self._cp(0, stdout='{"elements": []}', stderr="")
|
||||||
|
if isinstance(cmd, str) and cmd.startswith("nix profile install "):
|
||||||
|
return install_results.pop(0)
|
||||||
|
return self._cp(0)
|
||||||
|
|
||||||
|
buf = io.StringIO()
|
||||||
|
with patch("pkgmgr.actions.install.installers.nix.installer.shutil.which") as which_mock, patch(
|
||||||
|
"pkgmgr.actions.install.installers.nix.installer.os.path.exists", return_value=True
|
||||||
|
), patch(
|
||||||
|
"pkgmgr.actions.install.installers.nix.runner.subprocess.run", side_effect=fake_subprocess_run
|
||||||
|
) as subproc_mock, redirect_stdout(buf):
|
||||||
|
self._enable_nix_in_module(which_mock)
|
||||||
|
|
||||||
|
self.assertTrue(installer.supports(ctx))
|
||||||
|
installer.run(ctx)
|
||||||
|
|
||||||
|
out = buf.getvalue()
|
||||||
|
self.assertIn("[nix] install: nix profile install", out)
|
||||||
|
self.assertIn("[nix] output 'default' successfully installed.", out)
|
||||||
|
|
||||||
|
install_cmds = self._install_cmds_from_calls(subproc_mock.call_args_list)
|
||||||
|
self.assertEqual(install_cmds, [f"nix profile install {self.repo_dir}#default"])
|
||||||
|
|
||||||
|
def test_nix_flake_run_mandatory_failure_raises(self) -> None:
|
||||||
|
"""
|
||||||
|
For a generic repository, 'default' is mandatory.
|
||||||
|
A non-zero return code must raise SystemExit with that code.
|
||||||
|
"""
|
||||||
|
ctx = DummyCtx(identifier="some-lib", repo_dir=self.repo_dir)
|
||||||
|
installer = NixFlakeInstaller()
|
||||||
|
|
||||||
|
# retry layer does one attempt (non-403), then fallback does final attempt => 2 installs
|
||||||
|
install_results = [self._cp(1), self._cp(1)]
|
||||||
|
|
||||||
|
def fake_subprocess_run(cmd, *args, **kwargs):
|
||||||
|
if isinstance(cmd, str) and cmd.startswith("nix profile list --json"):
|
||||||
|
return self._cp(0, stdout='{"elements": []}', stderr="")
|
||||||
|
if isinstance(cmd, str) and cmd.startswith("nix profile install "):
|
||||||
|
return install_results.pop(0)
|
||||||
|
return self._cp(0)
|
||||||
|
|
||||||
|
buf = io.StringIO()
|
||||||
|
with patch("pkgmgr.actions.install.installers.nix.installer.shutil.which") as which_mock, patch(
|
||||||
|
"pkgmgr.actions.install.installers.nix.installer.os.path.exists", return_value=True
|
||||||
|
), patch(
|
||||||
|
"pkgmgr.actions.install.installers.nix.runner.subprocess.run", side_effect=fake_subprocess_run
|
||||||
|
) as subproc_mock, redirect_stdout(buf):
|
||||||
|
self._enable_nix_in_module(which_mock)
|
||||||
|
|
||||||
|
self.assertTrue(installer.supports(ctx))
|
||||||
|
with self.assertRaises(SystemExit) as cm:
|
||||||
|
installer.run(ctx)
|
||||||
|
|
||||||
|
self.assertEqual(cm.exception.code, 1)
|
||||||
|
|
||||||
|
out = buf.getvalue()
|
||||||
|
self.assertIn("[nix] install: nix profile install", out)
|
||||||
|
self.assertIn("[ERROR] Failed to install Nix flake output 'default' (exit 1)", out)
|
||||||
|
|
||||||
|
install_cmds = self._install_cmds_from_calls(subproc_mock.call_args_list)
|
||||||
|
self.assertEqual(
|
||||||
|
install_cmds,
|
||||||
|
[
|
||||||
|
f"nix profile install {self.repo_dir}#default",
|
||||||
|
f"nix profile install {self.repo_dir}#default",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_nix_flake_run_optional_failure_does_not_raise(self) -> None:
|
||||||
|
"""
|
||||||
|
For pkgmgr/package-manager repositories:
|
||||||
|
- 'pkgmgr' output is mandatory
|
||||||
|
- 'default' output is optional
|
||||||
|
Failure of optional output must not raise.
|
||||||
|
"""
|
||||||
|
ctx = DummyCtx(identifier="pkgmgr", repo_dir=self.repo_dir)
|
||||||
|
installer = NixFlakeInstaller()
|
||||||
|
|
||||||
|
# pkgmgr success (1 call), default fails (2 calls: attempt + final)
|
||||||
|
install_results = [self._cp(0), self._cp(1), self._cp(1)]
|
||||||
|
|
||||||
|
def fake_subprocess_run(cmd, *args, **kwargs):
|
||||||
|
if isinstance(cmd, str) and cmd.startswith("nix profile list --json"):
|
||||||
|
return self._cp(0, stdout='{"elements": []}', stderr="")
|
||||||
|
if isinstance(cmd, str) and cmd.startswith("nix profile install "):
|
||||||
|
return install_results.pop(0)
|
||||||
|
return self._cp(0)
|
||||||
|
|
||||||
|
buf = io.StringIO()
|
||||||
|
with patch("pkgmgr.actions.install.installers.nix.installer.shutil.which") as which_mock, patch(
|
||||||
|
"pkgmgr.actions.install.installers.nix.installer.os.path.exists", return_value=True
|
||||||
|
), patch(
|
||||||
|
"pkgmgr.actions.install.installers.nix.runner.subprocess.run", side_effect=fake_subprocess_run
|
||||||
|
) as subproc_mock, redirect_stdout(buf):
|
||||||
|
self._enable_nix_in_module(which_mock)
|
||||||
|
|
||||||
|
self.assertTrue(installer.supports(ctx))
|
||||||
|
installer.run(ctx) # must NOT raise
|
||||||
|
|
||||||
|
out = buf.getvalue()
|
||||||
|
|
||||||
|
# Should announce both outputs
|
||||||
|
self.assertIn("ensuring outputs: pkgmgr, default", out)
|
||||||
|
|
||||||
|
# First output ok
|
||||||
|
self.assertIn("[nix] output 'pkgmgr' successfully installed.", out)
|
||||||
|
|
||||||
|
# Second output failed but no raise
|
||||||
|
self.assertIn("[ERROR] Failed to install Nix flake output 'default' (exit 1)", out)
|
||||||
|
self.assertIn("[WARNING] Continuing despite failure of optional output 'default'.", out)
|
||||||
|
|
||||||
|
install_cmds = self._install_cmds_from_calls(subproc_mock.call_args_list)
|
||||||
|
self.assertEqual(
|
||||||
|
install_cmds,
|
||||||
|
[
|
||||||
|
f"nix profile install {self.repo_dir}#pkgmgr",
|
||||||
|
f"nix profile install {self.repo_dir}#default",
|
||||||
|
f"nix profile install {self.repo_dir}#default",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_nix_flake_supports_respects_disable_env(self) -> None:
|
||||||
|
"""
|
||||||
|
PKGMGR_DISABLE_NIX_FLAKE_INSTALLER=1 must disable the installer,
|
||||||
|
even if flake.nix exists and nix is available.
|
||||||
|
"""
|
||||||
|
ctx = DummyCtx(identifier="pkgmgr", repo_dir=self.repo_dir, quiet=False)
|
||||||
|
installer = NixFlakeInstaller()
|
||||||
|
|
||||||
|
with patch("pkgmgr.actions.install.installers.nix.installer.shutil.which") as which_mock, patch(
|
||||||
|
"pkgmgr.actions.install.installers.nix.installer.os.path.exists", return_value=True
|
||||||
|
):
|
||||||
|
self._enable_nix_in_module(which_mock)
|
||||||
|
os.environ["PKGMGR_DISABLE_NIX_FLAKE_INSTALLER"] = "1"
|
||||||
|
self.assertFalse(installer.supports(ctx))
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
unittest.main()
|
||||||
@@ -0,0 +1,106 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import unittest
|
||||||
|
from unittest.mock import patch
|
||||||
|
|
||||||
|
from pkgmgr.actions.install.installers.nix.retry import GitHubRateLimitRetry, RetryPolicy
|
||||||
|
from pkgmgr.actions.install.installers.nix.types import RunResult
|
||||||
|
|
||||||
|
|
||||||
|
class DummyCtx:
|
||||||
|
def __init__(self, quiet: bool = True) -> None:
|
||||||
|
self.quiet = quiet
|
||||||
|
|
||||||
|
|
||||||
|
class FakeRunner:
|
||||||
|
"""
|
||||||
|
Simulates a runner that returns:
|
||||||
|
- HTTP 403 for the first N calls
|
||||||
|
- success afterwards
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, fail_count: int) -> None:
|
||||||
|
self.fail_count = fail_count
|
||||||
|
self.calls = 0
|
||||||
|
|
||||||
|
def run(self, ctx: DummyCtx, cmd: str, allow_failure: bool) -> RunResult:
|
||||||
|
self.calls += 1
|
||||||
|
|
||||||
|
if self.calls <= self.fail_count:
|
||||||
|
return RunResult(
|
||||||
|
returncode=1,
|
||||||
|
stdout="",
|
||||||
|
stderr="error: HTTP error 403: rate limit exceeded (simulated)",
|
||||||
|
)
|
||||||
|
|
||||||
|
return RunResult(returncode=0, stdout="ok", stderr="")
|
||||||
|
|
||||||
|
|
||||||
|
class TestGitHub403Retry(unittest.TestCase):
|
||||||
|
def test_retries_on_403_without_realtime_waiting(self) -> None:
|
||||||
|
"""
|
||||||
|
Ensure:
|
||||||
|
- It retries only on GitHub 403-like errors
|
||||||
|
- It does not actually sleep in realtime (time.sleep patched)
|
||||||
|
- It stops once a success occurs
|
||||||
|
- Wait times follow Fibonacci(base=30) + jitter
|
||||||
|
"""
|
||||||
|
policy = RetryPolicy(
|
||||||
|
max_attempts=3, # attempts: 1,2,3
|
||||||
|
base_delay_seconds=30, # fibonacci delays: 30, 30, 60
|
||||||
|
jitter_seconds_min=0,
|
||||||
|
jitter_seconds_max=60,
|
||||||
|
)
|
||||||
|
|
||||||
|
retry = GitHubRateLimitRetry(policy=policy)
|
||||||
|
ctx = DummyCtx(quiet=True)
|
||||||
|
runner = FakeRunner(fail_count=2) # fail twice (403), then succeed
|
||||||
|
|
||||||
|
# Make jitter deterministic and prevent real sleeping.
|
||||||
|
with patch("pkgmgr.actions.install.installers.nix.retry.random.randint", return_value=5) as jitter_mock, patch(
|
||||||
|
"pkgmgr.actions.install.installers.nix.retry.time.sleep"
|
||||||
|
) as sleep_mock:
|
||||||
|
res = retry.run_with_retry(ctx, runner, "nix profile install /tmp#default")
|
||||||
|
|
||||||
|
# Result should be success on 3rd attempt.
|
||||||
|
self.assertEqual(res.returncode, 0)
|
||||||
|
self.assertEqual(runner.calls, 3)
|
||||||
|
|
||||||
|
# jitter should be used for each retry sleep (attempt 1->2, attempt 2->3) => 2 sleeps
|
||||||
|
self.assertEqual(jitter_mock.call_count, 2)
|
||||||
|
self.assertEqual(sleep_mock.call_count, 2)
|
||||||
|
|
||||||
|
# Fibonacci delays for attempts=3: [30, 30, 60]
|
||||||
|
# sleep occurs after failed attempt 1 and 2, so base delays are 30 and 30
|
||||||
|
# wait_time = base_delay + jitter(5) => 35, 35
|
||||||
|
sleep_args = [c.args[0] for c in sleep_mock.call_args_list]
|
||||||
|
self.assertEqual(sleep_args, [35, 35])
|
||||||
|
|
||||||
|
def test_does_not_retry_on_non_403_errors(self) -> None:
|
||||||
|
"""
|
||||||
|
Ensure it does not retry when the error is not recognized as GitHub 403/rate limit.
|
||||||
|
"""
|
||||||
|
policy = RetryPolicy(max_attempts=7, base_delay_seconds=30)
|
||||||
|
retry = GitHubRateLimitRetry(policy=policy)
|
||||||
|
ctx = DummyCtx(quiet=True)
|
||||||
|
|
||||||
|
class Non403Runner:
|
||||||
|
def __init__(self) -> None:
|
||||||
|
self.calls = 0
|
||||||
|
|
||||||
|
def run(self, ctx: DummyCtx, cmd: str, allow_failure: bool) -> RunResult:
|
||||||
|
self.calls += 1
|
||||||
|
return RunResult(returncode=1, stdout="", stderr="some other error (simulated)")
|
||||||
|
|
||||||
|
runner = Non403Runner()
|
||||||
|
|
||||||
|
with patch("pkgmgr.actions.install.installers.nix.retry.time.sleep") as sleep_mock:
|
||||||
|
res = retry.run_with_retry(ctx, runner, "nix profile install /tmp#default")
|
||||||
|
|
||||||
|
self.assertEqual(res.returncode, 1)
|
||||||
|
self.assertEqual(runner.calls, 1) # no retries
|
||||||
|
self.assertEqual(sleep_mock.call_count, 0)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
unittest.main()
|
||||||
@@ -1,206 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
"""
|
|
||||||
Unit tests for NixFlakeInstaller using unittest (no pytest).
|
|
||||||
|
|
||||||
Covers:
|
|
||||||
- Successful installation (exit_code == 0)
|
|
||||||
- Mandatory failure → SystemExit with correct code
|
|
||||||
- Optional failure (pkgmgr default) → no raise, but warning
|
|
||||||
- supports() behavior incl. PKGMGR_DISABLE_NIX_FLAKE_INSTALLER
|
|
||||||
"""
|
|
||||||
|
|
||||||
import io
|
|
||||||
import os
|
|
||||||
import shutil
|
|
||||||
import tempfile
|
|
||||||
import unittest
|
|
||||||
from contextlib import redirect_stdout
|
|
||||||
from unittest.mock import patch
|
|
||||||
|
|
||||||
from pkgmgr.actions.install.installers.nix_flake import NixFlakeInstaller
|
|
||||||
|
|
||||||
|
|
||||||
class DummyCtx:
|
|
||||||
"""Minimal context object to satisfy NixFlakeInstaller.run() / supports()."""
|
|
||||||
|
|
||||||
def __init__(self, identifier: str, repo_dir: str, preview: bool = False):
|
|
||||||
self.identifier = identifier
|
|
||||||
self.repo_dir = repo_dir
|
|
||||||
self.preview = preview
|
|
||||||
|
|
||||||
|
|
||||||
class TestNixFlakeInstaller(unittest.TestCase):
|
|
||||||
def setUp(self) -> None:
|
|
||||||
# Create a temporary repository directory with a flake.nix file
|
|
||||||
self._tmpdir = tempfile.mkdtemp(prefix="nix_flake_test_")
|
|
||||||
self.repo_dir = self._tmpdir
|
|
||||||
flake_path = os.path.join(self.repo_dir, "flake.nix")
|
|
||||||
with open(flake_path, "w", encoding="utf-8") as f:
|
|
||||||
f.write("{}\n")
|
|
||||||
|
|
||||||
# Ensure the disable env var is not set by default
|
|
||||||
os.environ.pop("PKGMGR_DISABLE_NIX_FLAKE_INSTALLER", None)
|
|
||||||
|
|
||||||
def tearDown(self) -> None:
|
|
||||||
# Cleanup temporary directory
|
|
||||||
if os.path.isdir(self._tmpdir):
|
|
||||||
shutil.rmtree(self._tmpdir, ignore_errors=True)
|
|
||||||
|
|
||||||
def _enable_nix_in_module(self, which_patch):
|
|
||||||
"""Ensure shutil.which('nix') in nix_flake module returns a path."""
|
|
||||||
which_patch.return_value = "/usr/bin/nix"
|
|
||||||
|
|
||||||
def test_nix_flake_run_success(self):
|
|
||||||
"""
|
|
||||||
When os.system returns a successful exit code, the installer
|
|
||||||
should report success and not raise.
|
|
||||||
"""
|
|
||||||
ctx = DummyCtx(identifier="some-lib", repo_dir=self.repo_dir)
|
|
||||||
|
|
||||||
installer = NixFlakeInstaller()
|
|
||||||
|
|
||||||
buf = io.StringIO()
|
|
||||||
with patch(
|
|
||||||
"pkgmgr.actions.install.installers.nix_flake.shutil.which"
|
|
||||||
) as which_mock, patch(
|
|
||||||
"pkgmgr.actions.install.installers.nix_flake.os.system"
|
|
||||||
) as system_mock, redirect_stdout(buf):
|
|
||||||
self._enable_nix_in_module(which_mock)
|
|
||||||
|
|
||||||
# Simulate os.system returning success (exit code 0)
|
|
||||||
system_mock.return_value = 0
|
|
||||||
|
|
||||||
# Sanity: supports() must be True
|
|
||||||
self.assertTrue(installer.supports(ctx))
|
|
||||||
|
|
||||||
installer.run(ctx)
|
|
||||||
|
|
||||||
out = buf.getvalue()
|
|
||||||
self.assertIn("[INFO] Running: nix profile install", out)
|
|
||||||
self.assertIn("Nix flake output 'default' successfully installed.", out)
|
|
||||||
|
|
||||||
# Ensure the nix command was actually invoked
|
|
||||||
system_mock.assert_called_with(
|
|
||||||
f"nix profile install {self.repo_dir}#default"
|
|
||||||
)
|
|
||||||
|
|
||||||
def test_nix_flake_run_mandatory_failure_raises(self):
|
|
||||||
"""
|
|
||||||
For a generic repository (identifier not pkgmgr/package-manager),
|
|
||||||
`default` is mandatory and a non-zero exit code should raise SystemExit
|
|
||||||
with the real exit code (e.g. 1, not 256).
|
|
||||||
"""
|
|
||||||
ctx = DummyCtx(identifier="some-lib", repo_dir=self.repo_dir)
|
|
||||||
installer = NixFlakeInstaller()
|
|
||||||
|
|
||||||
buf = io.StringIO()
|
|
||||||
with patch(
|
|
||||||
"pkgmgr.actions.install.installers.nix_flake.shutil.which"
|
|
||||||
) as which_mock, patch(
|
|
||||||
"pkgmgr.actions.install.installers.nix_flake.os.system"
|
|
||||||
) as system_mock, redirect_stdout(buf):
|
|
||||||
self._enable_nix_in_module(which_mock)
|
|
||||||
|
|
||||||
# Simulate os.system returning encoded status for exit code 1
|
|
||||||
# os.system encodes exit code as (exit_code << 8)
|
|
||||||
system_mock.return_value = 1 << 8
|
|
||||||
|
|
||||||
self.assertTrue(installer.supports(ctx))
|
|
||||||
|
|
||||||
with self.assertRaises(SystemExit) as cm:
|
|
||||||
installer.run(ctx)
|
|
||||||
|
|
||||||
# The real exit code should be 1 (not 256)
|
|
||||||
self.assertEqual(cm.exception.code, 1)
|
|
||||||
|
|
||||||
out = buf.getvalue()
|
|
||||||
self.assertIn("[INFO] Running: nix profile install", out)
|
|
||||||
self.assertIn("[Error] Failed to install Nix flake output 'default'", out)
|
|
||||||
self.assertIn("[Error] Command exited with code 1", out)
|
|
||||||
|
|
||||||
def test_nix_flake_run_optional_failure_does_not_raise(self):
|
|
||||||
"""
|
|
||||||
For the package-manager repository, the 'default' output is optional.
|
|
||||||
Failure to install it must not raise, but should log a warning instead.
|
|
||||||
"""
|
|
||||||
ctx = DummyCtx(identifier="pkgmgr", repo_dir=self.repo_dir)
|
|
||||||
installer = NixFlakeInstaller()
|
|
||||||
|
|
||||||
calls = []
|
|
||||||
|
|
||||||
def fake_system(cmd: str) -> int:
|
|
||||||
calls.append(cmd)
|
|
||||||
# First call (pkgmgr) → success
|
|
||||||
if len(calls) == 1:
|
|
||||||
return 0
|
|
||||||
# Second call (default) → failure (exit code 1 encoded)
|
|
||||||
return 1 << 8
|
|
||||||
|
|
||||||
buf = io.StringIO()
|
|
||||||
with patch(
|
|
||||||
"pkgmgr.actions.install.installers.nix_flake.shutil.which"
|
|
||||||
) as which_mock, patch(
|
|
||||||
"pkgmgr.actions.install.installers.nix_flake.os.system",
|
|
||||||
side_effect=fake_system,
|
|
||||||
), redirect_stdout(buf):
|
|
||||||
self._enable_nix_in_module(which_mock)
|
|
||||||
|
|
||||||
self.assertTrue(installer.supports(ctx))
|
|
||||||
|
|
||||||
# Optional failure must NOT raise
|
|
||||||
installer.run(ctx)
|
|
||||||
|
|
||||||
out = buf.getvalue()
|
|
||||||
|
|
||||||
# Both outputs should have been mentioned
|
|
||||||
self.assertIn(
|
|
||||||
"attempting to install profile outputs: pkgmgr, default", out
|
|
||||||
)
|
|
||||||
|
|
||||||
# First output ("pkgmgr") succeeded
|
|
||||||
self.assertIn(
|
|
||||||
"Nix flake output 'pkgmgr' successfully installed.", out
|
|
||||||
)
|
|
||||||
|
|
||||||
# Second output ("default") failed but did not raise
|
|
||||||
self.assertIn(
|
|
||||||
"[Error] Failed to install Nix flake output 'default'", out
|
|
||||||
)
|
|
||||||
self.assertIn("[Error] Command exited with code 1", out)
|
|
||||||
self.assertIn(
|
|
||||||
"Continuing despite failure to install optional output 'default'.",
|
|
||||||
out,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Ensure we actually called os.system twice (pkgmgr and default)
|
|
||||||
self.assertEqual(len(calls), 2)
|
|
||||||
self.assertIn(
|
|
||||||
f"nix profile install {self.repo_dir}#pkgmgr",
|
|
||||||
calls[0],
|
|
||||||
)
|
|
||||||
self.assertIn(
|
|
||||||
f"nix profile install {self.repo_dir}#default",
|
|
||||||
calls[1],
|
|
||||||
)
|
|
||||||
|
|
||||||
def test_nix_flake_supports_respects_disable_env(self):
|
|
||||||
"""
|
|
||||||
PKGMGR_DISABLE_NIX_FLAKE_INSTALLER=1 must disable the installer,
|
|
||||||
even if flake.nix exists and nix is available.
|
|
||||||
"""
|
|
||||||
ctx = DummyCtx(identifier="pkgmgr", repo_dir=self.repo_dir)
|
|
||||||
installer = NixFlakeInstaller()
|
|
||||||
|
|
||||||
with patch(
|
|
||||||
"pkgmgr.actions.install.installers.nix_flake.shutil.which"
|
|
||||||
) as which_mock:
|
|
||||||
self._enable_nix_in_module(which_mock)
|
|
||||||
os.environ["PKGMGR_DISABLE_NIX_FLAKE_INSTALLER"] = "1"
|
|
||||||
|
|
||||||
self.assertFalse(installer.supports(ctx))
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
unittest.main()
|
|
||||||
Reference in New Issue
Block a user