Compare commits
43 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
72fc69c2f8 | ||
|
|
6d8c6deae8 | ||
|
|
6c116a029e | ||
|
|
3eb7c81fa1 | ||
|
|
0334f477fd | ||
|
|
8bb99c99b7 | ||
|
|
587cb2e516 | ||
|
|
fcf9d4b59b | ||
|
|
b483dbfaad | ||
|
|
9630917570 | ||
|
|
6a4432dd04 | ||
|
|
cfb91d825a | ||
|
|
a3b21f23fc | ||
|
|
e49dd85200 | ||
|
|
c9dec5ecd6 | ||
|
|
f3c5460e48 | ||
|
|
39b16b87a8 | ||
|
|
26c9d79814 | ||
|
|
2776d18a42 | ||
|
|
7057ccfb95 | ||
|
|
1807949c6f | ||
|
|
d611720b8f | ||
|
|
bf871650a8 | ||
|
|
5ca1adda7b | ||
|
|
acb18adf76 | ||
|
|
c18490f5d3 | ||
|
|
eeda944b73 | ||
|
|
52cfbebba4 | ||
|
|
f4385807f1 | ||
|
|
e9e083c9dd | ||
|
|
3218b2b39f | ||
|
|
ba296a79c9 | ||
|
|
62e05e2f5b | ||
|
|
77d8b68ba5 | ||
|
|
bb0a801396 | ||
|
|
ee968efc4b | ||
|
|
644b2b8fa0 | ||
|
|
0f74907f82 | ||
|
|
5a8b1b11de | ||
|
|
389ec40163 | ||
|
|
1d03055491 | ||
|
|
7775c6d974 | ||
|
|
a24a819511 |
@@ -25,7 +25,5 @@ venv/
|
||||
.DS_Store
|
||||
Thumbs.db
|
||||
|
||||
# Arch pkg artifacts
|
||||
*.pkg.tar.*
|
||||
*.log
|
||||
package-manager-*
|
||||
# Logs
|
||||
*.log
|
||||
26
.github/workflows/ci.yml
vendored
Normal file
26
.github/workflows/ci.yml
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
name: CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches-ignore:
|
||||
- main
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
test-unit:
|
||||
uses: ./.github/workflows/test-unit.yml
|
||||
|
||||
test-integration:
|
||||
uses: ./.github/workflows/test-integration.yml
|
||||
|
||||
test-container:
|
||||
uses: ./.github/workflows/test-container.yml
|
||||
|
||||
test-e2e:
|
||||
uses: ./.github/workflows/test-e2e.yml
|
||||
|
||||
test-virgin-user:
|
||||
uses: ./.github/workflows/test-virgin-user.yml
|
||||
|
||||
test-virgin-root:
|
||||
uses: ./.github/workflows/test-virgin-root.yml
|
||||
98
.github/workflows/mark-stable.yml
vendored
Normal file
98
.github/workflows/mark-stable.yml
vendored
Normal file
@@ -0,0 +1,98 @@
|
||||
name: Mark stable commit
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main # still run tests for main
|
||||
tags:
|
||||
- 'v*' # run tests for version tags (e.g. v0.9.1)
|
||||
|
||||
jobs:
|
||||
test-unit:
|
||||
uses: ./.github/workflows/test-unit.yml
|
||||
|
||||
test-integration:
|
||||
uses: ./.github/workflows/test-integration.yml
|
||||
|
||||
test-container:
|
||||
uses: ./.github/workflows/test-container.yml
|
||||
|
||||
test-e2e:
|
||||
uses: ./.github/workflows/test-e2e.yml
|
||||
|
||||
test-virgin-user:
|
||||
uses: ./.github/workflows/test-virgin-user.yml
|
||||
|
||||
test-virgin-root:
|
||||
uses: ./.github/workflows/test-virgin-root.yml
|
||||
|
||||
mark-stable:
|
||||
needs:
|
||||
- test-unit
|
||||
- test-integration
|
||||
- test-container
|
||||
- test-e2e
|
||||
- test-virgin-user
|
||||
- test-virgin-root
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
# Only run this job if the push is for a version tag (v*)
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
|
||||
permissions:
|
||||
contents: write # Required to move/update the tag
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true # We need all tags for version comparison
|
||||
|
||||
- name: Move 'stable' tag only if this version is the highest
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
|
||||
echo "Ref: $GITHUB_REF"
|
||||
echo "SHA: $GITHUB_SHA"
|
||||
|
||||
VERSION="${GITHUB_REF#refs/tags/}"
|
||||
echo "Current version tag: ${VERSION}"
|
||||
|
||||
echo "Collecting all version tags..."
|
||||
ALL_V_TAGS="$(git tag --list 'v*' || true)"
|
||||
|
||||
if [[ -z "${ALL_V_TAGS}" ]]; then
|
||||
echo "No version tags found. Skipping stable update."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "All version tags:"
|
||||
echo "${ALL_V_TAGS}"
|
||||
|
||||
# Determine highest version using natural version sorting
|
||||
LATEST_TAG="$(printf '%s\n' ${ALL_V_TAGS} | sort -V | tail -n1)"
|
||||
|
||||
echo "Highest version tag: ${LATEST_TAG}"
|
||||
|
||||
if [[ "${VERSION}" != "${LATEST_TAG}" ]]; then
|
||||
echo "Current version ${VERSION} is NOT the highest version."
|
||||
echo "Stable tag will NOT be updated."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "Current version ${VERSION} IS the highest version."
|
||||
echo "Updating 'stable' tag..."
|
||||
|
||||
# Delete existing stable tag (local + remote)
|
||||
git tag -d stable 2>/dev/null || true
|
||||
git push origin :refs/tags/stable || true
|
||||
|
||||
# Create new stable tag
|
||||
git tag stable "$GITHUB_SHA"
|
||||
git push origin stable
|
||||
|
||||
echo "✅ Stable tag updated to ${VERSION}."
|
||||
21
.github/workflows/test-container.yml
vendored
21
.github/workflows/test-container.yml
vendored
@@ -1,25 +1,28 @@
|
||||
name: Test OS Containers
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- master
|
||||
- develop
|
||||
- "*"
|
||||
pull_request:
|
||||
workflow_call:
|
||||
|
||||
jobs:
|
||||
test-container:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
distro: [arch, debian, ubuntu, fedora, centos]
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Show commit SHA
|
||||
run: git rev-parse HEAD
|
||||
|
||||
- name: Show Docker version
|
||||
run: docker version
|
||||
|
||||
- name: Run container tests
|
||||
run: make test-container
|
||||
- name: Run container tests (${{ matrix.distro }})
|
||||
run: |
|
||||
set -euo pipefail
|
||||
distro="${{ matrix.distro }}" make test-container
|
||||
|
||||
20
.github/workflows/test-e2e.yml
vendored
20
.github/workflows/test-e2e.yml
vendored
@@ -1,18 +1,16 @@
|
||||
name: Test End-To-End
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- master
|
||||
- develop
|
||||
- "*"
|
||||
pull_request:
|
||||
workflow_call:
|
||||
|
||||
jobs:
|
||||
test-e2e:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60 # E2E + all distros can be heavier
|
||||
timeout-minutes: 60 # E2E can be heavier
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
distro: [arch, debian, ubuntu, fedora, centos]
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
@@ -21,5 +19,7 @@ jobs:
|
||||
- name: Show Docker version
|
||||
run: docker version
|
||||
|
||||
- name: Run E2E tests via make (all distros)
|
||||
run: make test-e2e
|
||||
- name: Run E2E tests via make (${{ matrix.distro }})
|
||||
run: |
|
||||
set -euo pipefail
|
||||
distro="${{ matrix.distro }}" make test-e2e
|
||||
|
||||
10
.github/workflows/test-integration.yml
vendored
10
.github/workflows/test-integration.yml
vendored
@@ -1,13 +1,7 @@
|
||||
name: Test Code Integration
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- master
|
||||
- develop
|
||||
- "*"
|
||||
pull_request:
|
||||
workflow_call:
|
||||
|
||||
jobs:
|
||||
test-integration:
|
||||
@@ -22,4 +16,4 @@ jobs:
|
||||
run: docker version
|
||||
|
||||
- name: Run integration tests via make (Arch container)
|
||||
run: make test-integration DISTROS="arch"
|
||||
run: make test-integration distro="arch"
|
||||
|
||||
10
.github/workflows/test-unit.yml
vendored
10
.github/workflows/test-unit.yml
vendored
@@ -1,13 +1,7 @@
|
||||
name: Test Units
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- master
|
||||
- develop
|
||||
- "*"
|
||||
pull_request:
|
||||
workflow_call:
|
||||
|
||||
jobs:
|
||||
test-unit:
|
||||
@@ -22,4 +16,4 @@ jobs:
|
||||
run: docker version
|
||||
|
||||
- name: Run unit tests via make (Arch container)
|
||||
run: make test-unit DISTROS="arch"
|
||||
run: make test-unit distro="arch"
|
||||
|
||||
8
.github/workflows/test-virgin-root.yml
vendored
8
.github/workflows/test-virgin-root.yml
vendored
@@ -1,13 +1,7 @@
|
||||
name: Test Virgin Root
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- master
|
||||
- develop
|
||||
- "*"
|
||||
pull_request:
|
||||
workflow_call:
|
||||
|
||||
jobs:
|
||||
test-virgin-root:
|
||||
|
||||
8
.github/workflows/test-virgin-user.yml
vendored
8
.github/workflows/test-virgin-user.yml
vendored
@@ -1,13 +1,7 @@
|
||||
name: Test Virgin User
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- master
|
||||
- develop
|
||||
- "*"
|
||||
pull_request:
|
||||
workflow_call:
|
||||
|
||||
jobs:
|
||||
test-virgin-user:
|
||||
|
||||
12
.gitignore
vendored
12
.gitignore
vendored
@@ -14,17 +14,8 @@ venv/
|
||||
dist/
|
||||
build/*
|
||||
*.egg-info/
|
||||
pkg
|
||||
src/source
|
||||
package-manager-*
|
||||
|
||||
# debian
|
||||
debian/package-manager/
|
||||
debian/debhelper-build-stamp
|
||||
debian/files
|
||||
debian/.debhelper/
|
||||
debian/package-manager.substvars
|
||||
|
||||
# Editor files
|
||||
.vscode/
|
||||
.idea/
|
||||
@@ -35,6 +26,9 @@ Thumbs.db
|
||||
|
||||
# Nix Cache to speed up tests
|
||||
.nix/
|
||||
.nix-dev-installed
|
||||
|
||||
# Ignore logs
|
||||
*.log
|
||||
|
||||
result
|
||||
93
CHANGELOG.md
93
CHANGELOG.md
@@ -1,3 +1,96 @@
|
||||
## [1.0.0] - 2025-12-11
|
||||
|
||||
* **1.0.0 – Official Stable Release 🎉**
|
||||
*First stable release of PKGMGR, the multi-distro development and package workflow manager.*
|
||||
|
||||
---
|
||||
|
||||
**Key Features**
|
||||
|
||||
**Core Functionality**
|
||||
|
||||
* Manage many repositories with one CLI: `clone`, `update`, `install`, `list`, `path`, `config`
|
||||
* Proxy wrappers for Git, Docker/Compose and Make
|
||||
* Multi-repo execution with safe *preview mode*
|
||||
* Mirror management: `mirror list/diff/merge/setup`
|
||||
|
||||
**Releases & Versioning**
|
||||
|
||||
* Automated SemVer bumps, tagging and changelog generation
|
||||
* Supports PKGBUILD, Debian, RPM, pyproject.toml, flake.nix
|
||||
|
||||
**Developer Tools**
|
||||
|
||||
* Open repositories in VS Code, file manager or terminal
|
||||
* Unified workflows across all major Linux distros
|
||||
|
||||
**Nix Integration**
|
||||
|
||||
* Cross-distro reproducible builds via Nix flakes
|
||||
* CI-tested across all supported environments
|
||||
|
||||
---
|
||||
|
||||
**Summary**
|
||||
PKGMGR 1.0.0 unifies repository management, build tooling, release automation and reproducible multi-distro workflows into one cohesive CLI tool.
|
||||
|
||||
*This is the first official stable release.*
|
||||
|
||||
|
||||
## [0.10.2] - 2025-12-11
|
||||
|
||||
* * Stable tag now updates only when a new highest version is released.
|
||||
* Debian package now includes sudo to ensure privilege escalation works reliably.
|
||||
* Nix setup is significantly more resilient with retries, correct permissions, and better environment handling.
|
||||
* AUR builder setup uses retries so yay installs succeed even under network instability.
|
||||
* Nix flake installation now fails only on mandatory parts; optional outputs no longer block installation.
|
||||
|
||||
|
||||
## [0.10.1] - 2025-12-11
|
||||
|
||||
* Fixed Debian\Ubuntu to pass container e2e tests
|
||||
|
||||
|
||||
## [0.10.0] - 2025-12-11
|
||||
|
||||
**Mirror System**
|
||||
|
||||
* Added SSH mirror support including multi-push and remote probing
|
||||
* Introduced mirror management commands and refactored the CLI parser into modules
|
||||
|
||||
**CI/CD**
|
||||
|
||||
* Migrated to reusable workflows with improved debugging instrumentation
|
||||
* Made stable-tag automation reliable for workflow_run events and permissions
|
||||
* Ensured deterministic test results by rebuilding all test containers with no-cache
|
||||
|
||||
**E2E and Container Tests**
|
||||
|
||||
* Fixed Git safe.directory handling across all containers
|
||||
* Restored Dockerfile ENTRYPOINT to resolve Nix TLS issues
|
||||
* Fixed missing volume errors and hardened the E2E runner
|
||||
* Added full Nix flake E2E test matrix across all distro containers
|
||||
* Disabled Nix sandboxing for cross-distro builds where required
|
||||
|
||||
**Nix and Python Environment**
|
||||
|
||||
* Unified Nix Python environment and introduced lazy CLI imports
|
||||
* Ensured PyYAML availability and improved Python 3.13 compatibility
|
||||
* Refactored flake.nix to remove side effects and rely on generic python3
|
||||
|
||||
**Packaging**
|
||||
|
||||
* Removed Debian’s hard dependency on Nix
|
||||
* Restructured packaging layout and refined build paths
|
||||
* Excluded assets from Arch PKGBUILD rsync
|
||||
* Cleaned up obsolete ignore files
|
||||
|
||||
**Repository Layout**
|
||||
|
||||
* Restructured repository to align local, Nix-based, and distro-based build workflows
|
||||
* Added Arch support and refined build/purge scripts
|
||||
|
||||
|
||||
## [0.9.1] - 2025-12-10
|
||||
|
||||
* * Refactored installer: new `venv-create.sh`, cleaner root/user setup flow, updated README with architecture map.
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
# ------------------------------------------------------------
|
||||
# Base image selector — overridden by Makefile
|
||||
# ------------------------------------------------------------
|
||||
ARG BASE_IMAGE=archlinux:latest
|
||||
ARG BASE_IMAGE
|
||||
FROM ${BASE_IMAGE}
|
||||
|
||||
RUN echo "BASE_IMAGE=${BASE_IMAGE}" && \
|
||||
cat /etc/os-release || true
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Nix environment defaults
|
||||
#
|
||||
|
||||
3
MIRRORS
Normal file
3
MIRRORS
Normal file
@@ -0,0 +1,3 @@
|
||||
git@github.com:kevinveenbirkenbach/package-manager.git
|
||||
ssh://git@git.veen.world:2201/kevinveenbirkenbach/pkgmgr.git
|
||||
ssh://git@code.cymais.cloud:2201/kevinveenbirkenbach/pkgmgr.git
|
||||
14
Makefile
14
Makefile
@@ -2,11 +2,15 @@
|
||||
test build build-no-cache test-unit test-e2e test-integration \
|
||||
test-container
|
||||
|
||||
# Distro
|
||||
# Options: arch debian ubuntu fedora centos
|
||||
distro ?= arch
|
||||
export distro
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Distro list and base images
|
||||
# Base images
|
||||
# (kept for documentation/reference; actual build logic is in scripts/build)
|
||||
# ------------------------------------------------------------
|
||||
DISTROS := arch debian ubuntu fedora centos
|
||||
BASE_IMAGE_ARCH := archlinux:latest
|
||||
BASE_IMAGE_DEBIAN := debian:stable-slim
|
||||
BASE_IMAGE_UBUNTU := ubuntu:latest
|
||||
@@ -14,7 +18,6 @@ BASE_IMAGE_FEDORA := fedora:latest
|
||||
BASE_IMAGE_CENTOS := quay.io/centos/centos:stream9
|
||||
|
||||
# Make them available in scripts
|
||||
export DISTROS
|
||||
export BASE_IMAGE_ARCH
|
||||
export BASE_IMAGE_DEBIAN
|
||||
export BASE_IMAGE_UBUNTU
|
||||
@@ -65,6 +68,11 @@ build-missing:
|
||||
# Combined test target for local + CI (unit + integration + e2e)
|
||||
test: test-container test-unit test-integration test-e2e
|
||||
|
||||
delete-volumes:
|
||||
@docker volume rm pkgmgr_nix_store_${distro} pkgmgr_nix_cache_${distro} || true
|
||||
|
||||
purge: delete-volumes build-no-cache
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# System install (native packages, calls scripts/installation/run-package.sh)
|
||||
# ------------------------------------------------------------
|
||||
|
||||
178
README.md
178
README.md
@@ -1,70 +1,186 @@
|
||||
# Package Manager🤖📦
|
||||
# Package Manager 🤖📦
|
||||
|
||||
[](https://github.com/sponsors/kevinveenbirkenbach)
|
||||
[](https://www.patreon.com/c/kevinveenbirkenbach)
|
||||
[](https://buymeacoffee.com/kevinveenbirkenbach) [](https://s.veen.world/paypaldonate)
|
||||
[](https://www.patreon.com/c/kevinveenbirkenbach)
|
||||
[](https://buymeacoffee.com/kevinveenbirkenbach)
|
||||
[](https://s.veen.world/paypaldonate)
|
||||
[](LICENSE)
|
||||
[](https://github.com/kevinveenbirkenbach/package-manager)
|
||||
|
||||
*Kevins's* Package Manager is a configurable Python tool designed to manage multiple repositories via Bash. It automates common Git operations such as clone, pull, push, status, and more. Additionally, it handles the creation of executable wrappers and alias links for your repositories.
|
||||
**Kevin's Package Manager (PKGMGR)** is a *multi-distro* package manager and workflow orchestrator.
|
||||
It helps you **develop, package, release and manage projects across multiple Linux-based
|
||||
operating systems** (Arch, Debian, Ubuntu, Fedora, CentOS, …).
|
||||
|
||||
PKGMGR is implemented in **Python** and uses **Nix (flakes)** as a foundation for
|
||||
distribution-independent builds and tooling. On top of that it provides a rich
|
||||
CLI that proxies common developer tools (Git, Docker, Make, …) and glues them
|
||||
together into repeatable development workflows.
|
||||
|
||||
---
|
||||
|
||||
## Why PKGMGR? 🧠
|
||||
|
||||
Traditional distro package managers like `apt`, `pacman` or `dnf` focus on a
|
||||
single operating system. PKGMGR instead focuses on **your repositories and
|
||||
development lifecycle**:
|
||||
|
||||
* one configuration for all your repos,
|
||||
* one CLI to interact with them,
|
||||
* one Nix-based layer to keep tooling reproducible across distros.
|
||||
|
||||
You keep using your native package manager where it makes sense – PKGMGR
|
||||
coordinates the *development and release flow* around it.
|
||||
|
||||
---
|
||||
|
||||
## Features 🚀
|
||||
|
||||
- **Installation & Setup:**
|
||||
Create executable wrappers with auto-detected commands (e.g. `main.sh` or `main.py`).
|
||||
|
||||
- **Git Operations:**
|
||||
Easily perform `git pull`, `push`, `status`, `commit`, `diff`, `add`, `show`, and `checkout` with extra parameters passed through.
|
||||
|
||||
- **Configuration Management:**
|
||||
Manage repository configurations via a default file (`config/defaults.yaml`) and a user-specific file (`config/config.yaml`). Initialize, add, delete, or ignore entries using subcommands.
|
||||
|
||||
- **Path & Listing:**
|
||||
Display repository paths or list all configured packages with their details.
|
||||
|
||||
- **Custom Aliases:**
|
||||
Generate and manage custom aliases for easy command invocation.
|
||||
### Multi-distro development & packaging
|
||||
|
||||
* Manage **many repositories at once** from a single `config/config.yaml`.
|
||||
* Drive full **release pipelines** across Linux distributions using:
|
||||
|
||||
* Nix flakes (`flake.nix`)
|
||||
* PyPI style builds (`pyproject.toml`)
|
||||
* OS packages (PKGBUILD, Debian control/changelog, RPM spec)
|
||||
* Ansible Galaxy metadata and more.
|
||||
|
||||
### Rich CLI for daily work
|
||||
|
||||
All commands are exposed via the `pkgmgr` CLI and are available on every distro:
|
||||
|
||||
* **Repository management**
|
||||
|
||||
* `clone`, `update`, `install`, `delete`, `deinstall`, `path`, `list`, `config`
|
||||
* **Git proxies**
|
||||
|
||||
* `pull`, `push`, `status`, `diff`, `add`, `show`, `checkout`,
|
||||
`reset`, `revert`, `rebase`, `commit`, `branch`
|
||||
* **Docker & Compose orchestration**
|
||||
|
||||
* `build`, `up`, `down`, `exec`, `ps`, `start`, `stop`, `restart`
|
||||
* **Release toolchain**
|
||||
|
||||
* `version`, `release`, `changelog`, `make`
|
||||
* **Mirror & workflow helpers**
|
||||
|
||||
* `mirror` (list/diff/merge/setup), `shell`, `terminal`, `code`, `explore`
|
||||
|
||||
Many of these commands support `--preview` mode so you can inspect the
|
||||
underlying Git or Docker calls without executing them.
|
||||
|
||||
### Full development workflows
|
||||
|
||||
PKGMGR is not just a helper around Git commands. Combined with its release and
|
||||
versioning features it can drive **end-to-end workflows**:
|
||||
|
||||
1. Clone and mirror repositories.
|
||||
2. Run tests and builds through `make` or Nix.
|
||||
3. Bump versions, update changelogs and tags.
|
||||
4. Build distro-specific packages.
|
||||
5. Keep all mirrors and working copies in sync.
|
||||
|
||||
The extensive E2E tests (`tests/e2e/`) and GitHub Actions workflows (including
|
||||
“virgin user” and “virgin root” Arch tests) validate these flows across
|
||||
different Linux environments.
|
||||
|
||||
---
|
||||
|
||||
## Architecture & Setup Map 🗺️
|
||||
|
||||
The following diagram provides a full overview of PKGMGR’s package structure,
|
||||
installation layers, and setup controller flow:
|
||||
The following diagram gives a full overview of:
|
||||
|
||||
* PKGMGR’s package structure,
|
||||
* the layered installers (OS, foundation, Python, Makefile),
|
||||
* and the setup controller that decides which layer to use on a given system.
|
||||
|
||||

|
||||
|
||||
**Diagram status:** *Stand: 10. Dezember 2025*
|
||||
**Always-up-to-date version:** https://s.veen.world/pkgmgrmp
|
||||
**Diagram status:** 11 December 2025
|
||||
**Always-up-to-date version:** [https://s.veen.world/pkgmgrmp](https://s.veen.world/pkgmgrmp)
|
||||
|
||||
---
|
||||
|
||||
## Installation ⚙️
|
||||
|
||||
Clone the repository and ensure your `~/.local/bin` is in your system PATH:
|
||||
### 1. Get the latest stable version
|
||||
|
||||
For a stable setup, use the **latest tagged release** (the tag pointed to by
|
||||
`latest`):
|
||||
|
||||
```bash
|
||||
git clone https://github.com/kevinveenbirkenbach/package-manager.git
|
||||
cd package-manager
|
||||
|
||||
# Optional but recommended: checkout the latest stable tag
|
||||
git fetch --tags
|
||||
git checkout "$(git describe --tags --abbrev=0)"
|
||||
```
|
||||
|
||||
Install make and pip if not installed yet:
|
||||
### 2. Install via Make
|
||||
|
||||
The project ships with a Makefile that encapsulates the typical installation
|
||||
flow. On most systems you only need:
|
||||
|
||||
```bash
|
||||
pacman -S make python-pip
|
||||
# Ensure make, Python and pip are installed via your distro package manager
|
||||
# (e.g. pacman -S make python python-pip, apt install make python3-pip, ...)
|
||||
|
||||
make install
|
||||
```
|
||||
|
||||
Then, run the following command to set up the project:
|
||||
This will:
|
||||
|
||||
* create or reuse a Python virtual environment,
|
||||
* install PKGMGR (and its Python dependencies) into that environment,
|
||||
* expose the `pkgmgr` executable on your PATH (usually via `~/.local/bin`),
|
||||
* prepare Nix-based integration where available so PKGMGR can build and manage
|
||||
packages distribution-independently.
|
||||
|
||||
For development use, you can also run:
|
||||
|
||||
```bash
|
||||
make setup
|
||||
```
|
||||
|
||||
The `make setup` command will:
|
||||
- Make `main.py` executable.
|
||||
- Install required packages from `requirements.txt`.
|
||||
- Execute `python main.py install` to complete the installation.
|
||||
which prepares the environment and leaves you with a fully wired development
|
||||
workspace (including Nix, tests and scripts).
|
||||
|
||||
---
|
||||
|
||||
## Usage 🧰
|
||||
|
||||
After installation, the main entry point is:
|
||||
|
||||
```bash
|
||||
pkgmgr --help
|
||||
```
|
||||
|
||||
This prints a list of all available subcommands, for example:
|
||||
|
||||
* `pkgmgr list --all` – show all repositories in the config
|
||||
* `pkgmgr update --all --clone-mode https` – update every repository
|
||||
* `pkgmgr release patch --preview` – simulate a patch release
|
||||
* `pkgmgr version --all` – show version information for all repositories
|
||||
* `pkgmgr mirror setup --preview --all` – prepare Git mirrors (no changes in preview)
|
||||
* `pkgmgr make install --preview pkgmgr` – preview make install for the pkgmgr repo
|
||||
|
||||
The help for each command is available via:
|
||||
|
||||
```bash
|
||||
pkgmgr <command> --help
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## License 📄
|
||||
|
||||
This project is licensed under the MIT License.
|
||||
See the [LICENSE](LICENSE) file for details.
|
||||
|
||||
---
|
||||
|
||||
## Author 👤
|
||||
|
||||
Kevin Veen-Birkenbach
|
||||
Kevin Veen-Birkenbach
|
||||
[https://www.veen.world](https://www.veen.world)
|
||||
|
||||
6
TODO.md
Normal file
6
TODO.md
Normal file
@@ -0,0 +1,6 @@
|
||||
# to-dos
|
||||
|
||||
For the following checkout the implementation map:
|
||||
|
||||
- Implement TAGS
|
||||
- Implement SIGNING_KEY
|
||||
BIN
assets/map.png
BIN
assets/map.png
Binary file not shown.
|
Before Width: | Height: | Size: 1.9 MiB After Width: | Height: | Size: 1.9 MiB |
27
flake.lock
generated
Normal file
27
flake.lock
generated
Normal file
@@ -0,0 +1,27 @@
|
||||
{
|
||||
"nodes": {
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1765186076,
|
||||
"narHash": "sha256-hM20uyap1a0M9d344I692r+ik4gTMyj60cQWO+hAYP8=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "addf7cf5f383a3101ecfba091b98d0a1263dc9b8",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-unstable",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"nixpkgs": "nixpkgs"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
"version": 7
|
||||
}
|
||||
35
flake.nix
35
flake.nix
@@ -26,12 +26,17 @@
|
||||
packages = forAllSystems (system:
|
||||
let
|
||||
pkgs = nixpkgs.legacyPackages.${system};
|
||||
|
||||
# Single source of truth for pkgmgr: Python 3.11
|
||||
# - Matches pyproject.toml: requires-python = ">=3.11"
|
||||
# - Uses python311Packages so that PyYAML etc. are available
|
||||
python = pkgs.python311;
|
||||
pyPkgs = pkgs.python311Packages;
|
||||
in
|
||||
rec {
|
||||
pkgmgr = pyPkgs.buildPythonApplication {
|
||||
pname = "package-manager";
|
||||
version = "0.9.1";
|
||||
version = "1.0.0";
|
||||
|
||||
# Use the git repo as source
|
||||
src = ./.;
|
||||
@@ -45,7 +50,7 @@
|
||||
pyPkgs.wheel
|
||||
];
|
||||
|
||||
# Runtime dependencies (matches [project.dependencies])
|
||||
# Runtime dependencies (matches [project.dependencies] in pyproject.toml)
|
||||
propagatedBuildInputs = [
|
||||
pyPkgs.pyyaml
|
||||
pyPkgs.pip
|
||||
@@ -55,6 +60,7 @@
|
||||
|
||||
pythonImportsCheck = [ "pkgmgr" ];
|
||||
};
|
||||
|
||||
default = pkgmgr;
|
||||
}
|
||||
);
|
||||
@@ -65,29 +71,42 @@
|
||||
devShells = forAllSystems (system:
|
||||
let
|
||||
pkgs = nixpkgs.legacyPackages.${system};
|
||||
pkgmgrPkg = self.packages.${system}.pkgmgr;
|
||||
|
||||
ansiblePkg =
|
||||
if pkgs ? ansible-core then pkgs.ansible-core
|
||||
else pkgs.ansible;
|
||||
|
||||
# Python 3 + pip für alles, was "python3 -m pip" macht
|
||||
pythonWithPip = pkgs.python3.withPackages (ps: [
|
||||
# Use the same Python version as the package (3.11)
|
||||
python = pkgs.python311;
|
||||
|
||||
pythonWithDeps = python.withPackages (ps: [
|
||||
ps.pip
|
||||
ps.pyyaml
|
||||
]);
|
||||
in
|
||||
{
|
||||
default = pkgs.mkShell {
|
||||
buildInputs = [
|
||||
pythonWithPip
|
||||
pkgmgrPkg
|
||||
pythonWithDeps
|
||||
pkgs.git
|
||||
ansiblePkg
|
||||
];
|
||||
|
||||
shellHook = ''
|
||||
# Ensure our Python with dependencies is preferred on PATH
|
||||
export PATH=${pythonWithDeps}/bin:$PATH
|
||||
|
||||
# Ensure src/ layout is importable:
|
||||
# pkgmgr lives in ./src/pkgmgr
|
||||
export PYTHONPATH="$PWD/src:${PYTHONPATH:-}"
|
||||
# Also add repo root in case tools/tests rely on it
|
||||
export PYTHONPATH="$PWD:$PYTHONPATH"
|
||||
|
||||
echo "Entered pkgmgr development shell for ${system}"
|
||||
echo "pkgmgr CLI is available via the flake build"
|
||||
echo "Python used in this shell:"
|
||||
python --version
|
||||
echo "pkgmgr CLI (from source) is available via:"
|
||||
echo " python -m pkgmgr.cli --help"
|
||||
'';
|
||||
};
|
||||
}
|
||||
|
||||
10
main.py
10
main.py
@@ -1,6 +1,14 @@
|
||||
#!/usr/bin/env python3
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
# Ensure local src/ overrides installed package
|
||||
ROOT = Path(__file__).resolve().parent
|
||||
SRC = ROOT / "src"
|
||||
if SRC.is_dir():
|
||||
sys.path.insert(0, str(SRC))
|
||||
|
||||
from pkgmgr.cli import main
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
main()
|
||||
|
||||
6
packaging/arch/.gitignore
vendored
Normal file
6
packaging/arch/.gitignore
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
# Arch pkg artifacts
|
||||
*.pkg.tar.*
|
||||
*.log
|
||||
package-manager-*
|
||||
src/
|
||||
pkg/
|
||||
@@ -15,7 +15,7 @@ makedepends=('rsync')
|
||||
install=${pkgname}.install
|
||||
|
||||
# Local source checkout — avoids the tarball requirement.
|
||||
# This assumes you build the package from inside the main project repository.
|
||||
# We build from the project root (two levels above packaging/arch/).
|
||||
source=()
|
||||
sha256sums=()
|
||||
|
||||
@@ -24,12 +24,18 @@ _srcdir_name="source"
|
||||
|
||||
prepare() {
|
||||
mkdir -p "$srcdir/$_srcdir_name"
|
||||
|
||||
local project_root
|
||||
project_root="$(cd "$startdir/../.." && pwd)"
|
||||
|
||||
rsync -a \
|
||||
--exclude=".git" \
|
||||
--exclude=".github" \
|
||||
--exclude="pkg" \
|
||||
--exclude="srcpkg" \
|
||||
"$startdir/" "$srcdir/$_srcdir_name/"
|
||||
--exclude="packaging" \
|
||||
--exclude="assets" \
|
||||
"$project_root/" "$srcdir/$_srcdir_name/"
|
||||
}
|
||||
|
||||
build() {
|
||||
@@ -62,7 +68,8 @@ package() {
|
||||
"$pkgdir/usr/lib/package-manager/PKGBUILD" \
|
||||
"$pkgdir/usr/lib/package-manager/Dockerfile" \
|
||||
"$pkgdir/usr/lib/package-manager/debian" \
|
||||
"$pkgdir/usr/lib/package-manager/packaging" \
|
||||
"$pkgdir/usr/lib/package-manager/.gitignore" \
|
||||
"$pkgdir/usr/lib/package-manager/__pycache__" \
|
||||
"$pkgdir/usr/lib/package-manager/.gitkeep"
|
||||
"$pkgdir/usr/lib/package-manager/.gitkeep" || true
|
||||
}
|
||||
6
packaging/debian/.gitignore
vendored
Normal file
6
packaging/debian/.gitignore
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
# debian
|
||||
package-manager/
|
||||
debhelper-build-stamp
|
||||
files
|
||||
.debhelper/
|
||||
package-manager.substvars
|
||||
@@ -9,7 +9,7 @@ Homepage: https://github.com/kevinveenbirkenbach/package-manager
|
||||
|
||||
Package: package-manager
|
||||
Architecture: any
|
||||
Depends: nix, ${misc:Depends}
|
||||
Depends: sudo, ${misc:Depends}
|
||||
Description: Wrapper that runs Kevin's package-manager via Nix flake
|
||||
This package provides the `pkgmgr` command, which runs Kevin's package
|
||||
manager via a local Nix flake
|
||||
@@ -1,109 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
|
||||
from pkgmgr.core.config.load import load_config
|
||||
|
||||
from .context import CLIContext
|
||||
from .parser import create_parser
|
||||
from .dispatch import dispatch_command
|
||||
|
||||
__all__ = ["CLIContext", "create_parser", "dispatch_command", "main"]
|
||||
|
||||
|
||||
# User config lives in the home directory:
|
||||
# ~/.config/pkgmgr/config.yaml
|
||||
USER_CONFIG_PATH = os.path.expanduser("~/.config/pkgmgr/config.yaml")
|
||||
|
||||
DESCRIPTION_TEXT = """\
|
||||
\033[1;32mPackage Manager 🤖📦\033[0m
|
||||
\033[3mKevin's Package Manager is a multi-repository, multi-package, and multi-format
|
||||
development tool crafted by and designed for:\033[0m
|
||||
\033[1;34mKevin Veen-Birkenbach\033[0m
|
||||
\033[4mhttps://www.veen.world/\033[0m
|
||||
|
||||
\033[1mOverview:\033[0m
|
||||
A powerful toolchain that unifies and automates workflows across heterogeneous
|
||||
project ecosystems. pkgmgr is not only a package manager — it is a full
|
||||
developer-oriented orchestration tool.
|
||||
|
||||
It automatically detects, merges, and processes metadata from multiple
|
||||
dependency formats, including:
|
||||
• \033[1;33mPython:\033[0m pyproject.toml, requirements.txt
|
||||
• \033[1;33mNix:\033[0m flake.nix
|
||||
• \033[1;33mArch Linux:\033[0m PKGBUILD
|
||||
• \033[1;33mAnsible:\033[0m requirements.yml
|
||||
|
||||
This allows pkgmgr to perform installation, updates, verification, dependency
|
||||
resolution, and synchronization across complex multi-repo environments — with a
|
||||
single unified command-line interface.
|
||||
|
||||
\033[1mDeveloper Tools:\033[0m
|
||||
pkgmgr includes an integrated toolbox to enhance daily development workflows:
|
||||
|
||||
• \033[1;33mVS Code integration:\033[0m Auto-generate and open multi-repo workspaces
|
||||
• \033[1;33mTerminal integration:\033[0m Open repositories in new GNOME Terminal tabs
|
||||
• \033[1;33mExplorer integration:\033[0m Open repositories in your file manager
|
||||
• \033[1;33mRelease automation:\033[0m Version bumping, changelog updates, and tagging
|
||||
• \033[1;33mBatch operations:\033[0m Execute shell commands across multiple repositories
|
||||
• \033[1;33mGit/Docker/Make wrappers:\033[0m Unified command proxying for many tools
|
||||
|
||||
\033[1mCapabilities:\033[0m
|
||||
• Clone, pull, verify, update, and manage many repositories at once
|
||||
• Resolve dependencies across languages and ecosystems
|
||||
• Standardize install/update workflows
|
||||
• Create symbolic executable wrappers for any project
|
||||
• Merge configuration from default + user config layers
|
||||
|
||||
Use pkgmgr as both a robust package management framework and a versatile
|
||||
development orchestration tool.
|
||||
|
||||
For detailed help on each command, use:
|
||||
\033[1mpkgmgr <command> --help\033[0m
|
||||
"""
|
||||
|
||||
|
||||
def main() -> None:
|
||||
"""
|
||||
Entry point for the pkgmgr CLI.
|
||||
"""
|
||||
|
||||
config_merged = load_config(USER_CONFIG_PATH)
|
||||
|
||||
# Directories: be robust and provide sane defaults if missing
|
||||
directories = config_merged.get("directories") or {}
|
||||
repositories_dir = os.path.expanduser(
|
||||
directories.get("repositories", "~/Repositories")
|
||||
)
|
||||
binaries_dir = os.path.expanduser(
|
||||
directories.get("binaries", "~/.local/bin")
|
||||
)
|
||||
|
||||
# Ensure the merged config actually contains the resolved directories
|
||||
config_merged.setdefault("directories", {})
|
||||
config_merged["directories"]["repositories"] = repositories_dir
|
||||
config_merged["directories"]["binaries"] = binaries_dir
|
||||
|
||||
all_repositories = config_merged.get("repositories", [])
|
||||
|
||||
ctx = CLIContext(
|
||||
config_merged=config_merged,
|
||||
repositories_base_dir=repositories_dir,
|
||||
all_repositories=all_repositories,
|
||||
binaries_dir=binaries_dir,
|
||||
user_config_path=USER_CONFIG_PATH,
|
||||
)
|
||||
|
||||
parser = create_parser(DESCRIPTION_TEXT)
|
||||
args = parser.parse_args()
|
||||
|
||||
if not getattr(args, "command", None):
|
||||
parser.print_help()
|
||||
return
|
||||
|
||||
dispatch_command(args, ctx)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,505 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
|
||||
from pkgmgr.cli.proxy import register_proxy_commands
|
||||
|
||||
|
||||
class SortedSubParsersAction(argparse._SubParsersAction):
|
||||
"""
|
||||
Subparsers action that keeps choices sorted alphabetically.
|
||||
"""
|
||||
|
||||
def add_parser(self, name, **kwargs):
|
||||
parser = super().add_parser(name, **kwargs)
|
||||
# Sort choices alphabetically by dest (subcommand name)
|
||||
self._choices_actions.sort(key=lambda a: a.dest)
|
||||
return parser
|
||||
|
||||
|
||||
def add_identifier_arguments(subparser: argparse.ArgumentParser) -> None:
|
||||
"""
|
||||
Common identifier / selection arguments for many subcommands.
|
||||
|
||||
Selection modes (mutual intent, not hard-enforced):
|
||||
- identifiers (positional): select by alias / provider/account/repo
|
||||
- --all: select all repositories
|
||||
- --category / --string / --tag: filter-based selection on top
|
||||
of the full repository set
|
||||
"""
|
||||
subparser.add_argument(
|
||||
"identifiers",
|
||||
nargs="*",
|
||||
help=(
|
||||
"Identifier(s) for repositories. "
|
||||
"Default: Repository of current folder."
|
||||
),
|
||||
)
|
||||
subparser.add_argument(
|
||||
"--all",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help=(
|
||||
"Apply the subcommand to all repositories in the config. "
|
||||
"Some subcommands ask for confirmation. If you want to give this "
|
||||
"confirmation for all repositories, pipe 'yes'. E.g: "
|
||||
"yes | pkgmgr {subcommand} --all"
|
||||
),
|
||||
)
|
||||
subparser.add_argument(
|
||||
"--category",
|
||||
nargs="+",
|
||||
default=[],
|
||||
help=(
|
||||
"Filter repositories by category patterns derived from config "
|
||||
"filenames or repo metadata (use filename without .yml/.yaml, "
|
||||
"or /regex/ to use a regular expression)."
|
||||
),
|
||||
)
|
||||
subparser.add_argument(
|
||||
"--string",
|
||||
default="",
|
||||
help=(
|
||||
"Filter repositories whose identifier / name / path contains this "
|
||||
"substring (case-insensitive). Use /regex/ for regular expressions."
|
||||
),
|
||||
)
|
||||
subparser.add_argument(
|
||||
"--tag",
|
||||
action="append",
|
||||
default=[],
|
||||
help=(
|
||||
"Filter repositories by tag. Matches tags from the repository "
|
||||
"collector and category tags. Use /regex/ for regular expressions."
|
||||
),
|
||||
)
|
||||
subparser.add_argument(
|
||||
"--preview",
|
||||
action="store_true",
|
||||
help="Preview changes without executing commands",
|
||||
)
|
||||
subparser.add_argument(
|
||||
"--list",
|
||||
action="store_true",
|
||||
help="List affected repositories (with preview or status)",
|
||||
)
|
||||
subparser.add_argument(
|
||||
"-a",
|
||||
"--args",
|
||||
nargs=argparse.REMAINDER,
|
||||
dest="extra_args",
|
||||
help="Additional parameters to be attached.",
|
||||
default=[],
|
||||
)
|
||||
|
||||
|
||||
def add_install_update_arguments(subparser: argparse.ArgumentParser) -> None:
|
||||
"""
|
||||
Common arguments for install/update commands.
|
||||
"""
|
||||
add_identifier_arguments(subparser)
|
||||
subparser.add_argument(
|
||||
"-q",
|
||||
"--quiet",
|
||||
action="store_true",
|
||||
help="Suppress warnings and info messages",
|
||||
)
|
||||
subparser.add_argument(
|
||||
"--no-verification",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="Disable verification via commit/gpg",
|
||||
)
|
||||
subparser.add_argument(
|
||||
"--dependencies",
|
||||
action="store_true",
|
||||
help="Also pull and update dependencies",
|
||||
)
|
||||
subparser.add_argument(
|
||||
"--clone-mode",
|
||||
choices=["ssh", "https", "shallow"],
|
||||
default="ssh",
|
||||
help=(
|
||||
"Specify the clone mode: ssh, https, or shallow "
|
||||
"(HTTPS shallow clone; default: ssh)"
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def create_parser(description_text: str) -> argparse.ArgumentParser:
|
||||
"""
|
||||
Create the top-level argument parser for pkgmgr.
|
||||
"""
|
||||
parser = argparse.ArgumentParser(
|
||||
description=description_text,
|
||||
formatter_class=argparse.RawTextHelpFormatter,
|
||||
)
|
||||
subparsers = parser.add_subparsers(
|
||||
dest="command",
|
||||
help="Subcommands",
|
||||
action=SortedSubParsersAction,
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# install / update / deinstall / delete
|
||||
# ------------------------------------------------------------
|
||||
install_parser = subparsers.add_parser(
|
||||
"install",
|
||||
help="Setup repository/repositories alias links to executables",
|
||||
)
|
||||
add_install_update_arguments(install_parser)
|
||||
|
||||
update_parser = subparsers.add_parser(
|
||||
"update",
|
||||
help="Update (pull + install) repository/repositories",
|
||||
)
|
||||
add_install_update_arguments(update_parser)
|
||||
update_parser.add_argument(
|
||||
"--system",
|
||||
action="store_true",
|
||||
help="Include system update commands",
|
||||
)
|
||||
|
||||
deinstall_parser = subparsers.add_parser(
|
||||
"deinstall",
|
||||
help="Remove alias links to repository/repositories",
|
||||
)
|
||||
add_identifier_arguments(deinstall_parser)
|
||||
|
||||
delete_parser = subparsers.add_parser(
|
||||
"delete",
|
||||
help="Delete repository/repositories alias links to executables",
|
||||
)
|
||||
add_identifier_arguments(delete_parser)
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# create
|
||||
# ------------------------------------------------------------
|
||||
create_cmd_parser = subparsers.add_parser(
|
||||
"create",
|
||||
help=(
|
||||
"Create new repository entries: add them to the config if not "
|
||||
"already present, initialize the local repository, and push "
|
||||
"remotely if --remote is set."
|
||||
),
|
||||
)
|
||||
add_identifier_arguments(create_cmd_parser)
|
||||
create_cmd_parser.add_argument(
|
||||
"--remote",
|
||||
action="store_true",
|
||||
help="If set, add the remote and push the initial commit.",
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# status
|
||||
# ------------------------------------------------------------
|
||||
status_parser = subparsers.add_parser(
|
||||
"status",
|
||||
help="Show status for repository/repositories or system",
|
||||
)
|
||||
add_identifier_arguments(status_parser)
|
||||
status_parser.add_argument(
|
||||
"--system",
|
||||
action="store_true",
|
||||
help="Show system status",
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# config
|
||||
# ------------------------------------------------------------
|
||||
config_parser = subparsers.add_parser(
|
||||
"config",
|
||||
help="Manage configuration",
|
||||
)
|
||||
config_subparsers = config_parser.add_subparsers(
|
||||
dest="subcommand",
|
||||
help="Config subcommands",
|
||||
required=True,
|
||||
)
|
||||
|
||||
config_show = config_subparsers.add_parser(
|
||||
"show",
|
||||
help="Show configuration",
|
||||
)
|
||||
add_identifier_arguments(config_show)
|
||||
|
||||
config_subparsers.add_parser(
|
||||
"add",
|
||||
help="Interactively add a new repository entry",
|
||||
)
|
||||
|
||||
config_subparsers.add_parser(
|
||||
"edit",
|
||||
help="Edit configuration file with nano",
|
||||
)
|
||||
|
||||
config_subparsers.add_parser(
|
||||
"init",
|
||||
help="Initialize user configuration by scanning the base directory",
|
||||
)
|
||||
|
||||
config_delete = config_subparsers.add_parser(
|
||||
"delete",
|
||||
help="Delete repository entry from user config",
|
||||
)
|
||||
add_identifier_arguments(config_delete)
|
||||
|
||||
config_ignore = config_subparsers.add_parser(
|
||||
"ignore",
|
||||
help="Set ignore flag for repository entries in user config",
|
||||
)
|
||||
add_identifier_arguments(config_ignore)
|
||||
config_ignore.add_argument(
|
||||
"--set",
|
||||
choices=["true", "false"],
|
||||
required=True,
|
||||
help="Set ignore to true or false",
|
||||
)
|
||||
|
||||
config_subparsers.add_parser(
|
||||
"update",
|
||||
help=(
|
||||
"Update default config files in ~/.config/pkgmgr/ from the "
|
||||
"installed pkgmgr package (does not touch config.yaml)."
|
||||
),
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# path / explore / terminal / code / shell
|
||||
# ------------------------------------------------------------
|
||||
path_parser = subparsers.add_parser(
|
||||
"path",
|
||||
help="Print the path(s) of repository/repositories",
|
||||
)
|
||||
add_identifier_arguments(path_parser)
|
||||
|
||||
explore_parser = subparsers.add_parser(
|
||||
"explore",
|
||||
help="Open repository in Nautilus file manager",
|
||||
)
|
||||
add_identifier_arguments(explore_parser)
|
||||
|
||||
terminal_parser = subparsers.add_parser(
|
||||
"terminal",
|
||||
help="Open repository in a new GNOME Terminal tab",
|
||||
)
|
||||
add_identifier_arguments(terminal_parser)
|
||||
|
||||
code_parser = subparsers.add_parser(
|
||||
"code",
|
||||
help="Open repository workspace with VS Code",
|
||||
)
|
||||
add_identifier_arguments(code_parser)
|
||||
|
||||
shell_parser = subparsers.add_parser(
|
||||
"shell",
|
||||
help="Execute a shell command in each repository",
|
||||
)
|
||||
add_identifier_arguments(shell_parser)
|
||||
shell_parser.add_argument(
|
||||
"-c",
|
||||
"--command",
|
||||
nargs=argparse.REMAINDER,
|
||||
dest="shell_command",
|
||||
help=(
|
||||
"The shell command (and its arguments) to execute in each "
|
||||
"repository"
|
||||
),
|
||||
default=[],
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# branch
|
||||
# ------------------------------------------------------------
|
||||
branch_parser = subparsers.add_parser(
|
||||
"branch",
|
||||
help="Branch-related utilities (e.g. open/close feature branches)",
|
||||
)
|
||||
branch_subparsers = branch_parser.add_subparsers(
|
||||
dest="subcommand",
|
||||
help="Branch subcommands",
|
||||
required=True,
|
||||
)
|
||||
|
||||
branch_open = branch_subparsers.add_parser(
|
||||
"open",
|
||||
help="Create and push a new branch on top of a base branch",
|
||||
)
|
||||
branch_open.add_argument(
|
||||
"name",
|
||||
nargs="?",
|
||||
help=(
|
||||
"Name of the new branch (optional; will be asked interactively "
|
||||
"if omitted)"
|
||||
),
|
||||
)
|
||||
branch_open.add_argument(
|
||||
"--base",
|
||||
default="main",
|
||||
help="Base branch to create the new branch from (default: main)",
|
||||
)
|
||||
|
||||
branch_close = branch_subparsers.add_parser(
|
||||
"close",
|
||||
help="Merge a feature branch into base and delete it",
|
||||
)
|
||||
branch_close.add_argument(
|
||||
"name",
|
||||
nargs="?",
|
||||
help=(
|
||||
"Name of the branch to close (optional; current branch is used "
|
||||
"if omitted)"
|
||||
),
|
||||
)
|
||||
branch_close.add_argument(
|
||||
"--base",
|
||||
default="main",
|
||||
help=(
|
||||
"Base branch to merge into (default: main; falls back to master "
|
||||
"internally if main does not exist)"
|
||||
),
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# release
|
||||
# ------------------------------------------------------------
|
||||
release_parser = subparsers.add_parser(
|
||||
"release",
|
||||
help=(
|
||||
"Create a release for repository/ies by incrementing version "
|
||||
"and updating the changelog."
|
||||
),
|
||||
)
|
||||
release_parser.add_argument(
|
||||
"release_type",
|
||||
choices=["major", "minor", "patch"],
|
||||
help="Type of version increment for the release (major, minor, patch).",
|
||||
)
|
||||
release_parser.add_argument(
|
||||
"-m",
|
||||
"--message",
|
||||
default=None,
|
||||
help=(
|
||||
"Optional release message to add to the changelog and tag."
|
||||
),
|
||||
)
|
||||
# Generic selection / preview / list / extra_args
|
||||
add_identifier_arguments(release_parser)
|
||||
# Close current branch after successful release
|
||||
release_parser.add_argument(
|
||||
"--close",
|
||||
action="store_true",
|
||||
help=(
|
||||
"Close the current branch after a successful release in each "
|
||||
"repository, if it is not main/master."
|
||||
),
|
||||
)
|
||||
# Force: skip preview+confirmation and run release directly
|
||||
release_parser.add_argument(
|
||||
"-f",
|
||||
"--force",
|
||||
action="store_true",
|
||||
help=(
|
||||
"Skip the interactive preview+confirmation step and run the "
|
||||
"release directly."
|
||||
),
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# version
|
||||
# ------------------------------------------------------------
|
||||
version_parser = subparsers.add_parser(
|
||||
"version",
|
||||
help=(
|
||||
"Show version information for repository/ies "
|
||||
"(git tags, pyproject.toml, flake.nix, PKGBUILD, debian, spec, "
|
||||
"Ansible Galaxy)."
|
||||
),
|
||||
)
|
||||
add_identifier_arguments(version_parser)
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# changelog
|
||||
# ------------------------------------------------------------
|
||||
changelog_parser = subparsers.add_parser(
|
||||
"changelog",
|
||||
help=(
|
||||
"Show changelog derived from Git history. "
|
||||
"By default, shows the changes between the last two SemVer tags."
|
||||
),
|
||||
)
|
||||
changelog_parser.add_argument(
|
||||
"range",
|
||||
nargs="?",
|
||||
default="",
|
||||
help=(
|
||||
"Optional tag or range (e.g. v1.2.3 or v1.2.0..v1.2.3). "
|
||||
"If omitted, the changelog between the last two SemVer "
|
||||
"tags is shown."
|
||||
),
|
||||
)
|
||||
add_identifier_arguments(changelog_parser)
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# list
|
||||
# ------------------------------------------------------------
|
||||
list_parser = subparsers.add_parser(
|
||||
"list",
|
||||
help="List all repositories with details and status",
|
||||
)
|
||||
# dieselbe Selektionslogik wie bei install/update/etc.:
|
||||
add_identifier_arguments(list_parser)
|
||||
list_parser.add_argument(
|
||||
"--status",
|
||||
type=str,
|
||||
default="",
|
||||
help=(
|
||||
"Filter repositories by status (case insensitive). "
|
||||
"Use /regex/ for regular expressions."
|
||||
),
|
||||
)
|
||||
list_parser.add_argument(
|
||||
"--description",
|
||||
action="store_true",
|
||||
help=(
|
||||
"Show an additional detailed section per repository "
|
||||
"(description, homepage, tags, categories, paths)."
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# make
|
||||
# ------------------------------------------------------------
|
||||
make_parser = subparsers.add_parser(
|
||||
"make",
|
||||
help="Executes make commands",
|
||||
)
|
||||
add_identifier_arguments(make_parser)
|
||||
make_subparsers = make_parser.add_subparsers(
|
||||
dest="subcommand",
|
||||
help="Make subcommands",
|
||||
required=True,
|
||||
)
|
||||
|
||||
make_install = make_subparsers.add_parser(
|
||||
"install",
|
||||
help="Executes the make install command",
|
||||
)
|
||||
add_identifier_arguments(make_install)
|
||||
|
||||
make_deinstall = make_subparsers.add_parser(
|
||||
"deinstall",
|
||||
help="Executes the make deinstall command",
|
||||
)
|
||||
add_identifier_arguments(make_deinstall)
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Proxy commands (git, docker, docker compose, ...)
|
||||
# ------------------------------------------------------------
|
||||
register_proxy_commands(subparsers)
|
||||
|
||||
return parser
|
||||
@@ -7,7 +7,7 @@ build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "package-manager"
|
||||
version = "0.9.1"
|
||||
version = "1.0.0"
|
||||
description = "Kevin's package-manager tool (pkgmgr)"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.11"
|
||||
@@ -39,13 +39,13 @@ pkgmgr = "pkgmgr.cli:main"
|
||||
# -----------------------------
|
||||
# setuptools configuration
|
||||
# -----------------------------
|
||||
# We use find_packages(), not a fixed list,
|
||||
# and explicitly include pkgmgr* and config*
|
||||
# Source layout: all packages live under "src/"
|
||||
[tool.setuptools]
|
||||
package-dir = { "" = "src", "config" = "config" }
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
where = ["."]
|
||||
where = ["src", "."]
|
||||
include = ["pkgmgr*", "config*"]
|
||||
|
||||
# Ensure defaults.yaml is shipped inside wheels & nix builds
|
||||
[tool.setuptools.package-data]
|
||||
"config" = ["defaults.yaml"]
|
||||
|
||||
@@ -4,32 +4,21 @@ set -euo pipefail
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
source "${SCRIPT_DIR}/resolve-base-image.sh"
|
||||
|
||||
echo "============================================================"
|
||||
echo ">>> Building ONLY missing container images"
|
||||
echo "============================================================"
|
||||
IMAGE="package-manager-test-$distro"
|
||||
BASE_IMAGE="$(resolve_base_image "$distro")"
|
||||
|
||||
for distro in $DISTROS; do
|
||||
IMAGE="package-manager-test-$distro"
|
||||
BASE_IMAGE="$(resolve_base_image "$distro")"
|
||||
|
||||
if docker image inspect "$IMAGE" >/dev/null 2>&1; then
|
||||
echo "[build-missing] Image already exists: $IMAGE (skipping)"
|
||||
continue
|
||||
fi
|
||||
|
||||
echo
|
||||
echo "------------------------------------------------------------"
|
||||
echo "[build-missing] Building missing image: $IMAGE"
|
||||
echo "BASE_IMAGE = $BASE_IMAGE"
|
||||
echo "------------------------------------------------------------"
|
||||
|
||||
docker build \
|
||||
--build-arg BASE_IMAGE="$BASE_IMAGE" \
|
||||
-t "$IMAGE" \
|
||||
.
|
||||
done
|
||||
if docker image inspect "$IMAGE" >/dev/null 2>&1; then
|
||||
echo "[build-missing] Image already exists: $IMAGE (skipping)"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo
|
||||
echo "============================================================"
|
||||
echo ">>> build-missing: Done"
|
||||
echo "============================================================"
|
||||
echo "------------------------------------------------------------"
|
||||
echo "[build-missing] Building missing image: $IMAGE"
|
||||
echo "BASE_IMAGE = $BASE_IMAGE"
|
||||
echo "------------------------------------------------------------"
|
||||
|
||||
docker build \
|
||||
--build-arg BASE_IMAGE="$BASE_IMAGE" \
|
||||
-t "$IMAGE" \
|
||||
.
|
||||
|
||||
@@ -4,14 +4,12 @@ set -euo pipefail
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
source "${SCRIPT_DIR}/resolve-base-image.sh"
|
||||
|
||||
for distro in $DISTROS; do
|
||||
base_image="$(resolve_base_image "$distro")"
|
||||
base_image="$(resolve_base_image "$distro")"
|
||||
|
||||
echo ">>> Building test image for distro '$distro' with NO CACHE (BASE_IMAGE=$base_image)..."
|
||||
echo ">>> Building test image for distro '$distro' with NO CACHE (BASE_IMAGE=$base_image)..."
|
||||
|
||||
docker build \
|
||||
--no-cache \
|
||||
--build-arg BASE_IMAGE="$base_image" \
|
||||
-t "package-manager-test-$distro" \
|
||||
.
|
||||
done
|
||||
docker build \
|
||||
--no-cache \
|
||||
--build-arg BASE_IMAGE="$base_image" \
|
||||
-t "package-manager-test-$distro" \
|
||||
.
|
||||
|
||||
@@ -4,13 +4,11 @@ set -euo pipefail
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
source "${SCRIPT_DIR}/resolve-base-image.sh"
|
||||
|
||||
for distro in $DISTROS; do
|
||||
base_image="$(resolve_base_image "$distro")"
|
||||
base_image="$(resolve_base_image "$distro")"
|
||||
|
||||
echo ">>> Building test image for distro '$distro' (BASE_IMAGE=$base_image)..."
|
||||
echo ">>> Building test image for distro '$distro' (BASE_IMAGE=$base_image)..."
|
||||
|
||||
docker build \
|
||||
--build-arg BASE_IMAGE="$base_image" \
|
||||
-t "package-manager-test-$distro" \
|
||||
.
|
||||
done
|
||||
docker build \
|
||||
--build-arg BASE_IMAGE="$base_image" \
|
||||
-t "package-manager-test-$distro" \
|
||||
.
|
||||
|
||||
@@ -3,21 +3,22 @@ set -euo pipefail
|
||||
|
||||
echo "[init-nix] Starting Nix initialization..."
|
||||
|
||||
NIX_INSTALL_URL="${NIX_INSTALL_URL:-https://nixos.org/nix/install}"
|
||||
NIX_DOWNLOAD_MAX_TIME=300 # 5 minutes
|
||||
NIX_DOWNLOAD_SLEEP_INTERVAL=20 # 20 seconds
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helper: detect whether we are inside a container (Docker/Podman/etc.)
|
||||
# Detect whether we are inside a container (Docker/Podman/etc.)
|
||||
# ---------------------------------------------------------------------------
|
||||
is_container() {
|
||||
# Docker / Podman markers
|
||||
if [[ -f /.dockerenv ]] || [[ -f /run/.containerenv ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
# cgroup hints
|
||||
if grep -qiE 'docker|container|podman|lxc' /proc/1/cgroup 2>/dev/null; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Environment variable used by some runtimes
|
||||
if [[ -n "${container:-}" ]]; then
|
||||
return 0
|
||||
fi
|
||||
@@ -26,200 +27,206 @@ is_container() {
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helper: ensure Nix binaries are on PATH (multi-user or single-user)
|
||||
# Ensure Nix binaries are on PATH (multi-user or single-user)
|
||||
# ---------------------------------------------------------------------------
|
||||
ensure_nix_on_path() {
|
||||
# Multi-user profile (daemon install)
|
||||
if [[ -x /nix/var/nix/profiles/default/bin/nix ]]; then
|
||||
export PATH="/nix/var/nix/profiles/default/bin:${PATH}"
|
||||
fi
|
||||
|
||||
# Single-user profile (current user)
|
||||
if [[ -x "${HOME}/.nix-profile/bin/nix" ]]; then
|
||||
export PATH="${HOME}/.nix-profile/bin:${PATH}"
|
||||
fi
|
||||
|
||||
# Single-user profile for dedicated "nix" user (container case)
|
||||
if [[ -x /home/nix/.nix-profile/bin/nix ]]; then
|
||||
export PATH="/home/nix/.nix-profile/bin:${PATH}"
|
||||
fi
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Fast path: Nix already available
|
||||
# Ensure Nix build group and users exist (build-users-group = nixbld)
|
||||
# ---------------------------------------------------------------------------
|
||||
if command -v nix >/dev/null 2>&1; then
|
||||
echo "[init-nix] Nix already available on PATH: $(command -v nix)"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
ensure_nix_on_path
|
||||
|
||||
if command -v nix >/dev/null 2>&1; then
|
||||
echo "[init-nix] Nix found after adjusting PATH: $(command -v nix)"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "[init-nix] Nix not found, starting installation logic..."
|
||||
|
||||
IN_CONTAINER=0
|
||||
if is_container; then
|
||||
IN_CONTAINER=1
|
||||
echo "[init-nix] Detected container environment."
|
||||
else
|
||||
echo "[init-nix] No container detected."
|
||||
fi
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Container + root: install Nix as dedicated "nix" user (single-user)
|
||||
# ---------------------------------------------------------------------------
|
||||
if [[ "${IN_CONTAINER}" -eq 1 && "${EUID:-0}" -eq 0 ]]; then
|
||||
echo "[init-nix] Running as root inside a container – using dedicated 'nix' user."
|
||||
|
||||
# Ensure nixbld group (required by Nix)
|
||||
ensure_nix_build_group() {
|
||||
if ! getent group nixbld >/dev/null 2>&1; then
|
||||
echo "[init-nix] Creating group 'nixbld'..."
|
||||
groupadd -r nixbld
|
||||
fi
|
||||
|
||||
# Ensure Nix build users (nixbld1..nixbld10) as members of nixbld
|
||||
for i in $(seq 1 10); do
|
||||
if ! id "nixbld$i" >/dev/null 2>&1; then
|
||||
echo "[init-nix] Creating build user nixbld$i..."
|
||||
# -r: system account, -g: primary group, -G: supplementary (ensures membership is listed)
|
||||
useradd -r -g nixbld -G nixbld -s /usr/sbin/nologin "nixbld$i"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
# Ensure "nix" user (home at /home/nix)
|
||||
if ! id nix >/dev/null 2>&1; then
|
||||
echo "[init-nix] Creating user 'nix'..."
|
||||
# Resolve a valid shell path across distros:
|
||||
# - Debian/Ubuntu: /bin/bash
|
||||
# - Arch: /usr/bin/bash (often symlinked)
|
||||
# Fall back to /bin/sh on ultra-minimal systems.
|
||||
BASH_SHELL="$(command -v bash || true)"
|
||||
if [[ -z "${BASH_SHELL}" ]]; then
|
||||
BASH_SHELL="/bin/sh"
|
||||
# ---------------------------------------------------------------------------
|
||||
# Download and run Nix installer with retry
|
||||
# Usage: install_nix_with_retry daemon|no-daemon [run_as_user]
|
||||
# ---------------------------------------------------------------------------
|
||||
install_nix_with_retry() {
|
||||
local mode="$1"
|
||||
local run_as="${2:-}"
|
||||
local installer elapsed=0 mode_flag
|
||||
|
||||
case "${mode}" in
|
||||
daemon) mode_flag="--daemon" ;;
|
||||
no-daemon) mode_flag="--no-daemon" ;;
|
||||
*)
|
||||
echo "[init-nix] ERROR: Invalid mode '${mode}', expected 'daemon' or 'no-daemon'."
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
installer="$(mktemp -t nix-installer.XXXXXX)"
|
||||
|
||||
echo "[init-nix] Downloading Nix installer from ${NIX_INSTALL_URL} with retry (max ${NIX_DOWNLOAD_MAX_TIME}s)..."
|
||||
|
||||
while true; do
|
||||
if curl -fL "${NIX_INSTALL_URL}" -o "${installer}"; then
|
||||
echo "[init-nix] Successfully downloaded Nix installer to ${installer}"
|
||||
break
|
||||
fi
|
||||
useradd -m -r -g nixbld -s "${BASH_SHELL}" nix
|
||||
fi
|
||||
|
||||
# Ensure /nix exists and is writable by the "nix" user.
|
||||
#
|
||||
# In some base images (or previous runs), /nix may already exist and be
|
||||
# owned by root. In that case the Nix single-user installer will abort with:
|
||||
#
|
||||
# "directory /nix exists, but is not writable by you"
|
||||
#
|
||||
# To keep container runs idempotent and robust, we always enforce
|
||||
# ownership nix:nixbld here.
|
||||
if [[ ! -d /nix ]]; then
|
||||
echo "[init-nix] Creating /nix with owner nix:nixbld..."
|
||||
mkdir -m 0755 /nix
|
||||
chown nix:nixbld /nix
|
||||
else
|
||||
current_owner="$(stat -c '%U' /nix 2>/dev/null || echo '?')"
|
||||
current_group="$(stat -c '%G' /nix 2>/dev/null || echo '?')"
|
||||
if [[ "${current_owner}" != "nix" || "${current_group}" != "nixbld" ]]; then
|
||||
echo "[init-nix] /nix already exists with owner ${current_owner}:${current_group} – fixing to nix:nixbld..."
|
||||
chown -R nix:nixbld /nix
|
||||
local curl_exit=$?
|
||||
echo "[init-nix] WARNING: Failed to download Nix installer (curl exit code ${curl_exit})."
|
||||
|
||||
elapsed=$((elapsed + NIX_DOWNLOAD_SLEEP_INTERVAL))
|
||||
if (( elapsed >= NIX_DOWNLOAD_MAX_TIME )); then
|
||||
echo "[init-nix] ERROR: Giving up after ${elapsed}s trying to download Nix installer."
|
||||
rm -f "${installer}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "[init-nix] Retrying in ${NIX_DOWNLOAD_SLEEP_INTERVAL}s (elapsed: ${elapsed}s/${NIX_DOWNLOAD_MAX_TIME}s)..."
|
||||
sleep "${NIX_DOWNLOAD_SLEEP_INTERVAL}"
|
||||
done
|
||||
|
||||
if [[ -n "${run_as}" ]]; then
|
||||
echo "[init-nix] Running installer as user '${run_as}' with mode '${mode}'..."
|
||||
if command -v sudo >/dev/null 2>&1; then
|
||||
sudo -u "${run_as}" bash -lc "sh '${installer}' ${mode_flag}"
|
||||
else
|
||||
echo "[init-nix] /nix already exists with correct owner nix:nixbld."
|
||||
su - "${run_as}" -c "sh '${installer}' ${mode_flag}"
|
||||
fi
|
||||
|
||||
if [[ ! -w /nix ]]; then
|
||||
echo "[init-nix] WARNING: /nix is still not writable after chown; Nix installer may fail."
|
||||
fi
|
||||
fi
|
||||
|
||||
# Run Nix single-user installer as "nix"
|
||||
echo "[init-nix] Installing Nix as user 'nix' (single-user, --no-daemon)..."
|
||||
if command -v sudo >/dev/null 2>&1; then
|
||||
sudo -u nix bash -lc 'sh <(curl -L https://nixos.org/nix/install) --no-daemon'
|
||||
else
|
||||
su - nix -c 'sh <(curl -L https://nixos.org/nix/install) --no-daemon'
|
||||
echo "[init-nix] Running installer as current user with mode '${mode}'..."
|
||||
sh "${installer}" "${mode_flag}"
|
||||
fi
|
||||
|
||||
# After installation, expose nix to root via PATH and symlink
|
||||
ensure_nix_on_path
|
||||
rm -f "${installer}"
|
||||
}
|
||||
|
||||
if [[ -x /home/nix/.nix-profile/bin/nix ]]; then
|
||||
if [[ ! -e /usr/local/bin/nix ]]; then
|
||||
echo "[init-nix] Creating /usr/local/bin/nix symlink -> /home/nix/.nix-profile/bin/nix"
|
||||
ln -s /home/nix/.nix-profile/bin/nix /usr/local/bin/nix
|
||||
fi
|
||||
# ---------------------------------------------------------------------------
|
||||
# Main
|
||||
# ---------------------------------------------------------------------------
|
||||
main() {
|
||||
# Fast path: Nix already available
|
||||
if command -v nix >/dev/null 2>&1; then
|
||||
echo "[init-nix] Nix already available on PATH: $(command -v nix)"
|
||||
return 0
|
||||
fi
|
||||
|
||||
ensure_nix_on_path
|
||||
|
||||
if command -v nix >/dev/null 2>&1; then
|
||||
echo "[init-nix] Nix successfully installed (container mode) at: $(command -v nix)"
|
||||
else
|
||||
echo "[init-nix] WARNING: Nix installation finished in container, but 'nix' is still not on PATH."
|
||||
echo "[init-nix] Nix found after adjusting PATH: $(command -v nix)"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Optionally add PATH hints to /etc/profile (best effort)
|
||||
if [[ -w /etc/profile ]]; then
|
||||
if ! grep -q 'Nix profiles' /etc/profile 2>/dev/null; then
|
||||
cat <<'EOF' >> /etc/profile
|
||||
echo "[init-nix] Nix not found, starting installation logic..."
|
||||
|
||||
# Nix profiles (added by package-manager init-nix.sh)
|
||||
if [ -d /nix/var/nix/profiles/default/bin ]; then
|
||||
PATH="/nix/var/nix/profiles/default/bin:$PATH"
|
||||
fi
|
||||
if [ -d "$HOME/.nix-profile/bin" ]; then
|
||||
PATH="$HOME/.nix-profile/bin:$PATH"
|
||||
fi
|
||||
EOF
|
||||
echo "[init-nix] Appended Nix PATH setup to /etc/profile (container mode)."
|
||||
local IN_CONTAINER=0
|
||||
if is_container; then
|
||||
IN_CONTAINER=1
|
||||
echo "[init-nix] Detected container environment."
|
||||
else
|
||||
echo "[init-nix] No container detected."
|
||||
fi
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Container + root: dedicated "nix" user, single-user install
|
||||
# -------------------------------------------------------------------------
|
||||
if [[ "${IN_CONTAINER}" -eq 1 && "${EUID:-0}" -eq 0 ]]; then
|
||||
echo "[init-nix] Container + root – installing as 'nix' user (single-user)."
|
||||
|
||||
ensure_nix_build_group
|
||||
|
||||
if ! id nix >/dev/null 2>&1; then
|
||||
echo "[init-nix] Creating user 'nix'..."
|
||||
local BASH_SHELL
|
||||
BASH_SHELL="$(command -v bash || true)"
|
||||
[[ -z "${BASH_SHELL}" ]] && BASH_SHELL="/bin/sh"
|
||||
useradd -m -r -g nixbld -s "${BASH_SHELL}" nix
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "[init-nix] Nix initialization complete (container root mode)."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Non-container or non-root container: normal installer paths
|
||||
# ---------------------------------------------------------------------------
|
||||
if [[ "${IN_CONTAINER}" -eq 0 ]]; then
|
||||
# Real host
|
||||
if command -v systemctl >/dev/null 2>&1; then
|
||||
echo "[init-nix] Host with systemd – using multi-user install (--daemon)."
|
||||
sh <(curl -L https://nixos.org/nix/install) --daemon
|
||||
else
|
||||
if [[ "${EUID:-0}" -eq 0 ]]; then
|
||||
echo "[init-nix] WARNING: Running as root without systemd on host."
|
||||
echo "[init-nix] Falling back to single-user install (--no-daemon), but this is not recommended."
|
||||
sh <(curl -L https://nixos.org/nix/install) --no-daemon
|
||||
if [[ ! -d /nix ]]; then
|
||||
echo "[init-nix] Creating /nix with owner nix:nixbld..."
|
||||
mkdir -m 0755 /nix
|
||||
chown nix:nixbld /nix
|
||||
else
|
||||
echo "[init-nix] Non-root host without systemd – using single-user install (--no-daemon)."
|
||||
sh <(curl -L https://nixos.org/nix/install) --no-daemon
|
||||
local current_owner current_group
|
||||
current_owner="$(stat -c '%U' /nix 2>/dev/null || echo '?')"
|
||||
current_group="$(stat -c '%G' /nix 2>/dev/null || echo '?')"
|
||||
if [[ "${current_owner}" != "nix" || "${current_group}" != "nixbld" ]]; then
|
||||
echo "[init-nix] Fixing /nix ownership from ${current_owner}:${current_group} to nix:nixbld..."
|
||||
chown -R nix:nixbld /nix
|
||||
fi
|
||||
if [[ ! -w /nix ]]; then
|
||||
echo "[init-nix] WARNING: /nix is not writable after chown; Nix installer may fail."
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
else
|
||||
|
||||
install_nix_with_retry "no-daemon" "nix"
|
||||
|
||||
ensure_nix_on_path
|
||||
|
||||
if [[ -x /home/nix/.nix-profile/bin/nix && ! -e /usr/local/bin/nix ]]; then
|
||||
echo "[init-nix] Creating /usr/local/bin/nix symlink -> /home/nix/.nix-profile/bin/nix"
|
||||
ln -s /home/nix/.nix-profile/bin/nix /usr/local/bin/nix
|
||||
fi
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Host (no container)
|
||||
# -------------------------------------------------------------------------
|
||||
elif [[ "${IN_CONTAINER}" -eq 0 ]]; then
|
||||
if command -v systemctl >/dev/null 2>&1; then
|
||||
echo "[init-nix] Host with systemd – using multi-user install (--daemon)."
|
||||
if [[ "${EUID:-0}" -eq 0 ]]; then
|
||||
ensure_nix_build_group
|
||||
fi
|
||||
install_nix_with_retry "daemon"
|
||||
else
|
||||
if [[ "${EUID:-0}" -eq 0 ]]; then
|
||||
echo "[init-nix] Host without systemd as root – using single-user install (--no-daemon)."
|
||||
ensure_nix_build_group
|
||||
else
|
||||
echo "[init-nix] Host without systemd as non-root – using single-user install (--no-daemon)."
|
||||
fi
|
||||
install_nix_with_retry "no-daemon"
|
||||
fi
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Container, but not root (rare)
|
||||
echo "[init-nix] Container as non-root user – using single-user install (--no-daemon)."
|
||||
sh <(curl -L https://nixos.org/nix/install) --no-daemon
|
||||
fi
|
||||
# -------------------------------------------------------------------------
|
||||
else
|
||||
echo "[init-nix] Container as non-root – using single-user install (--no-daemon)."
|
||||
install_nix_with_retry "no-daemon"
|
||||
fi
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# After installation: fix PATH (runtime + shell profiles)
|
||||
# ---------------------------------------------------------------------------
|
||||
ensure_nix_on_path
|
||||
# -------------------------------------------------------------------------
|
||||
# After installation: PATH + /etc/profile
|
||||
# -------------------------------------------------------------------------
|
||||
ensure_nix_on_path
|
||||
|
||||
if ! command -v nix >/dev/null 2>&1; then
|
||||
echo "[init-nix] WARNING: Nix installation finished, but 'nix' is still not on PATH."
|
||||
echo "[init-nix] You may need to source your shell profile manually."
|
||||
exit 0
|
||||
fi
|
||||
if ! command -v nix >/dev/null 2>&1; then
|
||||
echo "[init-nix] WARNING: Nix installation finished, but 'nix' is still not on PATH."
|
||||
echo "[init-nix] You may need to source your shell profile manually."
|
||||
else
|
||||
echo "[init-nix] Nix successfully installed at: $(command -v nix)"
|
||||
fi
|
||||
|
||||
echo "[init-nix] Nix successfully installed at: $(command -v nix)"
|
||||
|
||||
# Update global /etc/profile if writable (helps especially on minimal systems)
|
||||
if [[ -w /etc/profile ]]; then
|
||||
if ! grep -q 'Nix profiles' /etc/profile 2>/dev/null; then
|
||||
if [[ -w /etc/profile ]] && ! grep -q 'Nix profiles' /etc/profile 2>/dev/null; then
|
||||
cat <<'EOF' >> /etc/profile
|
||||
|
||||
# Nix profiles (added by package-manager init-nix.sh)
|
||||
@@ -232,6 +239,8 @@ fi
|
||||
EOF
|
||||
echo "[init-nix] Appended Nix PATH setup to /etc/profile"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "[init-nix] Nix initialization complete."
|
||||
echo "[init-nix] Nix initialization complete."
|
||||
}
|
||||
|
||||
main "$@"
|
||||
|
||||
@@ -45,8 +45,42 @@ else
|
||||
fi
|
||||
|
||||
echo "[aur-builder-setup] Ensuring yay is installed for aur_builder..."
|
||||
|
||||
if ! "${RUN_AS_AUR[@]}" 'command -v yay >/dev/null 2>&1'; then
|
||||
"${RUN_AS_AUR[@]}" 'cd ~ && rm -rf yay && git clone https://aur.archlinux.org/yay.git && cd yay && makepkg -si --noconfirm'
|
||||
echo "[aur-builder-setup] yay not found – starting retry sequence for download..."
|
||||
|
||||
MAX_TIME=300
|
||||
SLEEP_INTERVAL=20
|
||||
ELAPSED=0
|
||||
|
||||
while true; do
|
||||
if "${RUN_AS_AUR[@]}" '
|
||||
set -euo pipefail
|
||||
cd ~
|
||||
rm -rf yay || true
|
||||
git clone https://aur.archlinux.org/yay.git yay
|
||||
'; then
|
||||
echo "[aur-builder-setup] yay repository cloned successfully."
|
||||
break
|
||||
fi
|
||||
|
||||
echo "[aur-builder-setup] git clone failed (likely 504). Retrying in ${SLEEP_INTERVAL}s..."
|
||||
sleep "${SLEEP_INTERVAL}"
|
||||
ELAPSED=$((ELAPSED + SLEEP_INTERVAL))
|
||||
|
||||
if (( ELAPSED >= MAX_TIME )); then
|
||||
echo "[aur-builder-setup] ERROR: Aborted after 5 minutes of retry attempts."
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
# Now build yay after successful clone
|
||||
"${RUN_AS_AUR[@]}" '
|
||||
set -euo pipefail
|
||||
cd ~/yay
|
||||
makepkg -si --noconfirm
|
||||
'
|
||||
|
||||
else
|
||||
echo "[aur-builder-setup] yay already installed."
|
||||
fi
|
||||
|
||||
@@ -3,10 +3,21 @@ set -euo pipefail
|
||||
|
||||
echo "[arch/package] Building Arch package (makepkg --nodeps)..."
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "${SCRIPT_DIR}/../../.." && pwd)"
|
||||
PKG_DIR="${PROJECT_ROOT}/packaging/arch"
|
||||
|
||||
if [[ ! -f "${PKG_DIR}/PKGBUILD" ]]; then
|
||||
echo "[arch/package] ERROR: PKGBUILD not found in ${PKG_DIR}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd "${PKG_DIR}"
|
||||
|
||||
if id aur_builder >/dev/null 2>&1; then
|
||||
echo "[arch/package] Using 'aur_builder' user for makepkg..."
|
||||
chown -R aur_builder:aur_builder "$(pwd)"
|
||||
su aur_builder -c "cd '$(pwd)' && rm -f package-manager-*.pkg.tar.* && makepkg --noconfirm --clean --nodeps"
|
||||
chown -R aur_builder:aur_builder "${PKG_DIR}"
|
||||
su aur_builder -c "cd '${PKG_DIR}' && rm -f package-manager-*.pkg.tar.* && makepkg --noconfirm --clean --nodeps"
|
||||
else
|
||||
echo "[arch/package] WARNING: user 'aur_builder' not found, running makepkg as current user..."
|
||||
rm -f package-manager-*.pkg.tar.*
|
||||
|
||||
@@ -4,8 +4,17 @@ set -euo pipefail
|
||||
echo "[centos/package] Setting up rpmbuild directories..."
|
||||
mkdir -p /root/rpmbuild/{BUILD,RPMS,SOURCES,SPECS,SRPMS}
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "${SCRIPT_DIR}/../../.." && pwd)"
|
||||
SPEC_PATH="${PROJECT_ROOT}/packaging/fedora/package-manager.spec"
|
||||
|
||||
if [[ ! -f "${SPEC_PATH}" ]]; then
|
||||
echo "[centos/package] ERROR: SPEC file not found: ${SPEC_PATH}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "[centos/package] Extracting version from package-manager.spec..."
|
||||
version="$(grep -E '^Version:' package-manager.spec | awk '{print $2}')"
|
||||
version="$(grep -E '^Version:' "${SPEC_PATH}" | awk '{print $2}')"
|
||||
if [[ -z "${version}" ]]; then
|
||||
echo "ERROR: Version missing!"
|
||||
exit 1
|
||||
@@ -15,13 +24,13 @@ srcdir="package-manager-${version}"
|
||||
echo "[centos/package] Preparing source tree: ${srcdir}"
|
||||
rm -rf "/tmp/${srcdir}"
|
||||
mkdir -p "/tmp/${srcdir}"
|
||||
cp -a . "/tmp/${srcdir}/"
|
||||
cp -a "${PROJECT_ROOT}/." "/tmp/${srcdir}/"
|
||||
|
||||
echo "[centos/package] Creating source tarball..."
|
||||
tar czf "/root/rpmbuild/SOURCES/${srcdir}.tar.gz" -C /tmp "${srcdir}"
|
||||
|
||||
echo "[centos/package] Copying SPEC..."
|
||||
cp package-manager.spec /root/rpmbuild/SPECS/
|
||||
cp "${SPEC_PATH}" /root/rpmbuild/SPECS/
|
||||
|
||||
echo "[centos/package] Running rpmbuild..."
|
||||
cd /root/rpmbuild/SPECS
|
||||
|
||||
@@ -3,6 +3,25 @@ set -euo pipefail
|
||||
|
||||
echo "[debian/package] Building Debian package..."
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "${SCRIPT_DIR}/../../.." && pwd)"
|
||||
|
||||
BUILD_ROOT="/tmp/package-manager-debian-build"
|
||||
rm -rf "${BUILD_ROOT}"
|
||||
mkdir -p "${BUILD_ROOT}"
|
||||
|
||||
echo "[debian/package] Syncing project sources to ${BUILD_ROOT}..."
|
||||
rsync -a \
|
||||
--exclude 'packaging/debian' \
|
||||
"${PROJECT_ROOT}/" "${BUILD_ROOT}/"
|
||||
|
||||
echo "[debian/package] Overlaying debian/ metadata from packaging/debian..."
|
||||
mkdir -p "${BUILD_ROOT}/debian"
|
||||
cp -a "${PROJECT_ROOT}/packaging/debian/." "${BUILD_ROOT}/debian/"
|
||||
|
||||
cd "${BUILD_ROOT}"
|
||||
|
||||
echo "[debian/package] Running dpkg-buildpackage..."
|
||||
dpkg-buildpackage -us -uc -b
|
||||
|
||||
echo "[debian/package] Installing generated DEB package..."
|
||||
|
||||
@@ -4,8 +4,17 @@ set -euo pipefail
|
||||
echo "[fedora/package] Setting up rpmbuild directories..."
|
||||
mkdir -p /root/rpmbuild/{BUILD,RPMS,SOURCES,SPECS,SRPMS}
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "${SCRIPT_DIR}/../../.." && pwd)"
|
||||
SPEC_PATH="${PROJECT_ROOT}/packaging/fedora/package-manager.spec"
|
||||
|
||||
if [[ ! -f "${SPEC_PATH}" ]]; then
|
||||
echo "[fedora/package] ERROR: SPEC file not found: ${SPEC_PATH}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "[fedora/package] Extracting version from package-manager.spec..."
|
||||
version="$(grep -E '^Version:' package-manager.spec | awk '{print $2}')"
|
||||
version="$(grep -E '^Version:' "${SPEC_PATH}" | awk '{print $2}')"
|
||||
if [[ -z "${version}" ]]; then
|
||||
echo "ERROR: Version missing!"
|
||||
exit 1
|
||||
@@ -15,13 +24,13 @@ srcdir="package-manager-${version}"
|
||||
echo "[fedora/package] Preparing source tree: ${srcdir}"
|
||||
rm -rf "/tmp/${srcdir}"
|
||||
mkdir -p "/tmp/${srcdir}"
|
||||
cp -a . "/tmp/${srcdir}/"
|
||||
cp -a "${PROJECT_ROOT}/." "/tmp/${srcdir}/"
|
||||
|
||||
echo "[fedora/package] Creating source tarball..."
|
||||
tar czf "/root/rpmbuild/SOURCES/${srcdir}.tar.gz" -C /tmp "${srcdir}"
|
||||
|
||||
echo "[fedora/package] Copying SPEC..."
|
||||
cp package-manager.spec /root/rpmbuild/SPECS/
|
||||
cp "${SPEC_PATH}" /root/rpmbuild/SPECS/
|
||||
|
||||
echo "[fedora/package] Running rpmbuild..."
|
||||
cd /root/rpmbuild/SPECS
|
||||
|
||||
@@ -8,6 +8,12 @@ source "${SCRIPT_DIR}/lib.sh"
|
||||
|
||||
OS_ID="$(detect_os_id)"
|
||||
|
||||
# Map Manjaro to Arch
|
||||
if [[ "${OS_ID}" == "manjaro" ]]; then
|
||||
echo "[run-package] Mapping OS 'manjaro' → 'arch'"
|
||||
OS_ID="arch"
|
||||
fi
|
||||
|
||||
echo "[run-package] Detected OS: ${OS_ID}"
|
||||
|
||||
case "${OS_ID}" in
|
||||
|
||||
@@ -3,6 +3,25 @@ set -euo pipefail
|
||||
|
||||
echo "[ubuntu/package] Building Ubuntu (Debian-style) package..."
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "${SCRIPT_DIR}/../../.." && pwd)"
|
||||
|
||||
BUILD_ROOT="/tmp/package-manager-ubuntu-build"
|
||||
rm -rf "${BUILD_ROOT}"
|
||||
mkdir -p "${BUILD_ROOT}"
|
||||
|
||||
echo "[ubuntu/package] Syncing project sources to ${BUILD_ROOT}..."
|
||||
rsync -a \
|
||||
--exclude 'packaging/debian' \
|
||||
"${PROJECT_ROOT}/" "${BUILD_ROOT}/"
|
||||
|
||||
echo "[ubuntu/package] Overlaying debian/ metadata from packaging/debian..."
|
||||
mkdir -p "${BUILD_ROOT}/debian"
|
||||
cp -a "${PROJECT_ROOT}/packaging/debian/." "${BUILD_ROOT}/debian/"
|
||||
|
||||
cd "${BUILD_ROOT}"
|
||||
|
||||
echo "[ubuntu/package] Running dpkg-buildpackage..."
|
||||
dpkg-buildpackage -us -uc -b
|
||||
|
||||
echo "[ubuntu/package] Installing generated DEB package..."
|
||||
|
||||
@@ -8,19 +8,18 @@ fi
|
||||
|
||||
FLAKE_DIR="/usr/lib/package-manager"
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Try to ensure that "nix" is on PATH
|
||||
# ------------------------------------------------------------
|
||||
# ---------------------------------------------------------------------------
|
||||
# Try to ensure that "nix" is on PATH (common locations + container user)
|
||||
# ---------------------------------------------------------------------------
|
||||
if ! command -v nix >/dev/null 2>&1; then
|
||||
# Common locations for Nix installations
|
||||
CANDIDATES=(
|
||||
"/nix/var/nix/profiles/default/bin/nix"
|
||||
"${HOME:-/root}/.nix-profile/bin/nix"
|
||||
"/home/nix/.nix-profile/bin/nix"
|
||||
)
|
||||
|
||||
for candidate in "${CANDIDATES[@]}"; do
|
||||
if [[ -x "$candidate" ]]; then
|
||||
# Prepend the directory of the candidate to PATH
|
||||
PATH="$(dirname "$candidate"):${PATH}"
|
||||
export PATH
|
||||
break
|
||||
@@ -28,13 +27,22 @@ if ! command -v nix >/dev/null 2>&1; then
|
||||
done
|
||||
fi
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Primary (and only) path: use Nix flake if available
|
||||
# ------------------------------------------------------------
|
||||
# ---------------------------------------------------------------------------
|
||||
# If nix is still missing, try to run init-nix.sh once
|
||||
# ---------------------------------------------------------------------------
|
||||
if ! command -v nix >/dev/null 2>&1; then
|
||||
if [[ -x "${FLAKE_DIR}/init-nix.sh" ]]; then
|
||||
"${FLAKE_DIR}/init-nix.sh" || true
|
||||
fi
|
||||
fi
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Primary path: use Nix flake if available
|
||||
# ---------------------------------------------------------------------------
|
||||
if command -v nix >/dev/null 2>&1; then
|
||||
exec nix run "${FLAKE_DIR}#pkgmgr" -- "$@"
|
||||
fi
|
||||
|
||||
echo "[pkgmgr-wrapper] ERROR: 'nix' binary not found on PATH."
|
||||
echo "[pkgmgr-wrapper] ERROR: 'nix' binary not found on PATH after init."
|
||||
echo "[pkgmgr-wrapper] Nix is required to run pkgmgr (no Python fallback)."
|
||||
exit 1
|
||||
|
||||
@@ -1,41 +1,32 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
echo "============================================================"
|
||||
echo ">>> Running sanity test: verifying test containers start"
|
||||
echo "============================================================"
|
||||
|
||||
for distro in $DISTROS; do
|
||||
IMAGE="package-manager-test-$distro"
|
||||
|
||||
echo
|
||||
echo "------------------------------------------------------------"
|
||||
echo ">>> Testing container: $IMAGE"
|
||||
echo "------------------------------------------------------------"
|
||||
|
||||
echo "[test-container] Running: docker run --rm --entrypoint pkgmgr $IMAGE --help"
|
||||
echo
|
||||
|
||||
# Run the command and capture the output
|
||||
if OUTPUT=$(docker run --rm \
|
||||
-e PKGMGR_DEV=1 \
|
||||
-v pkgmgr_nix_store_${distro}:/nix \
|
||||
-v "$(pwd):/src" \
|
||||
-v "pkgmgr_nix_cache_${distro}:/root/.cache/nix" \
|
||||
"$IMAGE" 2>&1); then
|
||||
echo "$OUTPUT"
|
||||
echo
|
||||
echo "[test-container] SUCCESS: $IMAGE responded to 'pkgmgr --help'"
|
||||
|
||||
else
|
||||
echo "$OUTPUT"
|
||||
echo
|
||||
echo "[test-container] ERROR: $IMAGE failed to run 'pkgmgr --help'"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
IMAGE="package-manager-test-$distro"
|
||||
|
||||
echo
|
||||
echo "============================================================"
|
||||
echo ">>> All containers passed the sanity check"
|
||||
echo "============================================================"
|
||||
echo "------------------------------------------------------------"
|
||||
echo ">>> Testing container: $IMAGE"
|
||||
echo "------------------------------------------------------------"
|
||||
echo "[test-container] Inspect image metadata:"
|
||||
docker image inspect "$IMAGE" | sed -n '1,40p'
|
||||
|
||||
echo "[test-container] Running: docker run --rm --entrypoint pkgmgr $IMAGE --help"
|
||||
echo
|
||||
|
||||
# Run the command and capture the output
|
||||
if OUTPUT=$(docker run --rm \
|
||||
-e PKGMGR_DEV=1 \
|
||||
-v pkgmgr_nix_store_${distro}:/nix \
|
||||
-v "$(pwd):/src" \
|
||||
-v "pkgmgr_nix_cache_${distro}:/root/.cache/nix" \
|
||||
"$IMAGE" 2>&1); then
|
||||
echo "$OUTPUT"
|
||||
echo
|
||||
echo "[test-container] SUCCESS: $IMAGE responded to 'pkgmgr --help'"
|
||||
|
||||
else
|
||||
echo "$OUTPUT"
|
||||
echo
|
||||
echo "[test-container] ERROR: $IMAGE failed to run 'pkgmgr --help'"
|
||||
exit 1
|
||||
fi
|
||||
@@ -1,55 +1,60 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
echo ">>> Running E2E tests in all distros: $DISTROS"
|
||||
echo "============================================================"
|
||||
echo ">>> Running E2E tests: $distro"
|
||||
echo "============================================================"
|
||||
|
||||
for distro in $DISTROS; do
|
||||
echo "============================================================"
|
||||
echo ">>> Running E2E tests: $distro"
|
||||
echo "============================================================"
|
||||
docker run --rm \
|
||||
-v "$(pwd):/src" \
|
||||
-v "pkgmgr_nix_store_${distro}:/nix" \
|
||||
-v "pkgmgr_nix_cache_${distro}:/root/.cache/nix" \
|
||||
-e PKGMGR_DEV=1 \
|
||||
-e TEST_PATTERN="${TEST_PATTERN}" \
|
||||
--workdir /src \
|
||||
"package-manager-test-${distro}" \
|
||||
bash -lc '
|
||||
set -euo pipefail
|
||||
|
||||
docker run --rm \
|
||||
-v "$(pwd):/src" \
|
||||
-v pkgmgr_nix_store_${distro}:/nix \
|
||||
-v "pkgmgr_nix_cache_${distro}:/root/.cache/nix" \
|
||||
-e PKGMGR_DEV=1 \
|
||||
-e TEST_PATTERN="${TEST_PATTERN}" \
|
||||
--workdir /src \
|
||||
--entrypoint bash \
|
||||
"package-manager-test-$distro" \
|
||||
-c '
|
||||
set -e
|
||||
# Load distro info
|
||||
if [ -f /etc/os-release ]; then
|
||||
. /etc/os-release
|
||||
fi
|
||||
|
||||
# Load distro info
|
||||
if [ -f /etc/os-release ]; then
|
||||
. /etc/os-release
|
||||
fi
|
||||
echo "Running tests inside distro: ${ID:-unknown}"
|
||||
|
||||
echo "Running tests inside distro: $ID"
|
||||
# Load Nix environment if available
|
||||
if [ -f "/nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh" ]; then
|
||||
. "/nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh"
|
||||
fi
|
||||
|
||||
# Load nix environment if available
|
||||
if [ -f "/nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh" ]; then
|
||||
. "/nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh"
|
||||
fi
|
||||
if [ -f "$HOME/.nix-profile/etc/profile.d/nix.sh" ]; then
|
||||
. "$HOME/.nix-profile/etc/profile.d/nix.sh"
|
||||
fi
|
||||
|
||||
if [ -f "$HOME/.nix-profile/etc/profile.d/nix.sh" ]; then
|
||||
. "$HOME/.nix-profile/etc/profile.d/nix.sh"
|
||||
fi
|
||||
PATH="/nix/var/nix/profiles/default/bin:$HOME/.nix-profile/bin:$PATH"
|
||||
|
||||
PATH="/nix/var/nix/profiles/default/bin:$HOME/.nix-profile/bin:$PATH"
|
||||
command -v nix >/dev/null || {
|
||||
echo "ERROR: nix not found."
|
||||
exit 1
|
||||
}
|
||||
|
||||
command -v nix >/dev/null || {
|
||||
echo "ERROR: nix not found."
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Mark the mounted repository as safe to avoid Git ownership errors
|
||||
# Mark the mounted repository as safe to avoid Git ownership errors.
|
||||
# Newer Git (e.g. on Ubuntu) complains about the gitdir (/src/.git),
|
||||
# older versions about the worktree (/src). Nix turns "." into the
|
||||
# flake input "git+file:///src", which then uses Git under the hood.
|
||||
if command -v git >/dev/null 2>&1; then
|
||||
# Worktree path
|
||||
git config --global --add safe.directory /src || true
|
||||
# Gitdir path shown in the "dubious ownership" error
|
||||
git config --global --add safe.directory /src/.git || true
|
||||
# Ephemeral CI containers: allow all paths as a last resort
|
||||
git config --global --add safe.directory '*' || true
|
||||
fi
|
||||
|
||||
# Run the E2E tests inside the Nix development shell
|
||||
nix develop .#default --no-write-lock-file -c \
|
||||
python3 -m unittest discover \
|
||||
-s /src/tests/e2e \
|
||||
-p "$TEST_PATTERN"
|
||||
'
|
||||
done
|
||||
# Run the E2E tests inside the Nix development shell
|
||||
nix develop .#default --no-write-lock-file -c \
|
||||
python3 -m unittest discover \
|
||||
-s /src/tests/e2e \
|
||||
-p "$TEST_PATTERN"
|
||||
'
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
: "${distro:=arch}"
|
||||
|
||||
echo "============================================================"
|
||||
echo ">>> Running INTEGRATION tests in ${distro} container"
|
||||
echo "============================================================"
|
||||
@@ -14,13 +12,12 @@ docker run --rm \
|
||||
--workdir /src \
|
||||
-e PKGMGR_DEV=1 \
|
||||
-e TEST_PATTERN="${TEST_PATTERN}" \
|
||||
--entrypoint bash \
|
||||
"package-manager-test-${distro}" \
|
||||
-c '
|
||||
bash -lc '
|
||||
set -e;
|
||||
git config --global --add safe.directory /src || true;
|
||||
nix develop .#default --no-write-lock-file -c \
|
||||
python -m unittest discover \
|
||||
python3 -m unittest discover \
|
||||
-s tests/integration \
|
||||
-t /src \
|
||||
-p "$TEST_PATTERN";
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
: "${distro:=arch}"
|
||||
|
||||
echo "============================================================"
|
||||
echo ">>> Running UNIT tests in ${distro} container"
|
||||
echo "============================================================"
|
||||
@@ -14,13 +12,12 @@ docker run --rm \
|
||||
--workdir /src \
|
||||
-e PKGMGR_DEV=1 \
|
||||
-e TEST_PATTERN="${TEST_PATTERN}" \
|
||||
--entrypoint bash \
|
||||
"package-manager-test-${distro}" \
|
||||
-c '
|
||||
bash -lc '
|
||||
set -e;
|
||||
git config --global --add safe.directory /src || true;
|
||||
nix develop .#default --no-write-lock-file -c \
|
||||
python -m unittest discover \
|
||||
python3 -m unittest discover \
|
||||
-s tests/unit \
|
||||
-t /src \
|
||||
-p "$TEST_PATTERN";
|
||||
|
||||
36
src/pkgmgr/__init__.py
Normal file
36
src/pkgmgr/__init__.py
Normal file
@@ -0,0 +1,36 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
Top-level pkgmgr package.
|
||||
|
||||
We deliberately avoid importing heavy submodules (like the CLI)
|
||||
on import to keep unit tests fast and to not require optional
|
||||
dependencies (like PyYAML) unless they are actually used.
|
||||
|
||||
Accessing ``pkgmgr.cli`` will load the CLI module lazily via
|
||||
``__getattr__``. This keeps patterns like
|
||||
|
||||
from pkgmgr import cli
|
||||
|
||||
working as expected in tests and entry points.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from importlib import import_module
|
||||
from typing import Any
|
||||
|
||||
__all__ = ["cli"]
|
||||
|
||||
|
||||
def __getattr__(name: str) -> Any:
|
||||
"""
|
||||
Lazily expose ``pkgmgr.cli`` as attribute on the top-level package.
|
||||
|
||||
This keeps ``import pkgmgr`` lightweight while still allowing
|
||||
``from pkgmgr import cli`` in tests and entry points.
|
||||
"""
|
||||
if name == "cli":
|
||||
return import_module("pkgmgr.cli")
|
||||
raise AttributeError(f"module 'pkgmgr' has no attribute {name!r}")
|
||||
@@ -139,22 +139,27 @@ class NixFlakeInstaller(BaseInstaller):
|
||||
|
||||
for output, allow_failure in outputs:
|
||||
cmd = f"nix profile install {ctx.repo_dir}#{output}"
|
||||
print(f"[INFO] Running: {cmd}")
|
||||
ret = os.system(cmd)
|
||||
|
||||
try:
|
||||
run_command(
|
||||
cmd,
|
||||
cwd=ctx.repo_dir,
|
||||
preview=ctx.preview,
|
||||
allow_failure=allow_failure,
|
||||
)
|
||||
# Extract real exit code from os.system() result
|
||||
if os.WIFEXITED(ret):
|
||||
exit_code = os.WEXITSTATUS(ret)
|
||||
else:
|
||||
# abnormal termination (signal etc.) – keep raw value
|
||||
exit_code = ret
|
||||
|
||||
if exit_code == 0:
|
||||
print(f"Nix flake output '{output}' successfully installed.")
|
||||
except SystemExit as e:
|
||||
print(f"[Error] Failed to install Nix flake output '{output}': {e}")
|
||||
if not allow_failure:
|
||||
# Mandatory output failed → fatal for the pipeline.
|
||||
raise
|
||||
# Optional output failed → log and continue.
|
||||
print(
|
||||
"[Warning] Continuing despite failure to install "
|
||||
f"optional output '{output}'."
|
||||
)
|
||||
continue
|
||||
|
||||
print(f"[Error] Failed to install Nix flake output '{output}'")
|
||||
print(f"[Error] Command exited with code {exit_code}")
|
||||
|
||||
if not allow_failure:
|
||||
raise SystemExit(exit_code)
|
||||
|
||||
print(
|
||||
"[Warning] Continuing despite failure to install "
|
||||
f"optional output '{output}'."
|
||||
)
|
||||
26
src/pkgmgr/actions/mirror/__init__.py
Normal file
26
src/pkgmgr/actions/mirror/__init__.py
Normal file
@@ -0,0 +1,26 @@
|
||||
from __future__ import annotations
|
||||
|
||||
"""
|
||||
High-level mirror actions.
|
||||
|
||||
Public API:
|
||||
- list_mirrors
|
||||
- diff_mirrors
|
||||
- merge_mirrors
|
||||
- setup_mirrors
|
||||
"""
|
||||
|
||||
from .types import Repository, MirrorMap
|
||||
from .list_cmd import list_mirrors
|
||||
from .diff_cmd import diff_mirrors
|
||||
from .merge_cmd import merge_mirrors
|
||||
from .setup_cmd import setup_mirrors
|
||||
|
||||
__all__ = [
|
||||
"Repository",
|
||||
"MirrorMap",
|
||||
"list_mirrors",
|
||||
"diff_mirrors",
|
||||
"merge_mirrors",
|
||||
"setup_mirrors",
|
||||
]
|
||||
31
src/pkgmgr/actions/mirror/context.py
Normal file
31
src/pkgmgr/actions/mirror/context.py
Normal file
@@ -0,0 +1,31 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import List
|
||||
|
||||
from pkgmgr.core.repository.dir import get_repo_dir
|
||||
from pkgmgr.core.repository.identifier import get_repo_identifier
|
||||
|
||||
from .io import load_config_mirrors, read_mirrors_file
|
||||
from .types import MirrorMap, RepoMirrorContext, Repository
|
||||
|
||||
|
||||
def build_context(
|
||||
repo: Repository,
|
||||
repositories_base_dir: str,
|
||||
all_repos: List[Repository],
|
||||
) -> RepoMirrorContext:
|
||||
"""
|
||||
Build a RepoMirrorContext for a single repository.
|
||||
"""
|
||||
identifier = get_repo_identifier(repo, all_repos)
|
||||
repo_dir = get_repo_dir(repositories_base_dir, repo)
|
||||
|
||||
config_mirrors: MirrorMap = load_config_mirrors(repo)
|
||||
file_mirrors: MirrorMap = read_mirrors_file(repo_dir)
|
||||
|
||||
return RepoMirrorContext(
|
||||
identifier=identifier,
|
||||
repo_dir=repo_dir,
|
||||
config_mirrors=config_mirrors,
|
||||
file_mirrors=file_mirrors,
|
||||
)
|
||||
60
src/pkgmgr/actions/mirror/diff_cmd.py
Normal file
60
src/pkgmgr/actions/mirror/diff_cmd.py
Normal file
@@ -0,0 +1,60 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import List
|
||||
|
||||
from .context import build_context
|
||||
from .printing import print_header
|
||||
from .types import Repository
|
||||
|
||||
|
||||
def diff_mirrors(
|
||||
selected_repos: List[Repository],
|
||||
repositories_base_dir: str,
|
||||
all_repos: List[Repository],
|
||||
) -> None:
|
||||
"""
|
||||
Show differences between config mirrors and MIRRORS file.
|
||||
|
||||
- Mirrors present only in config are reported as "ONLY IN CONFIG".
|
||||
- Mirrors present only in MIRRORS file are reported as "ONLY IN FILE".
|
||||
- Mirrors with same name but different URLs are reported as "URL MISMATCH".
|
||||
"""
|
||||
for repo in selected_repos:
|
||||
ctx = build_context(repo, repositories_base_dir, all_repos)
|
||||
|
||||
print_header("[MIRROR DIFF]", ctx)
|
||||
|
||||
config_m = ctx.config_mirrors
|
||||
file_m = ctx.file_mirrors
|
||||
|
||||
if not config_m and not file_m:
|
||||
print(" No mirrors configured in config or MIRRORS file.")
|
||||
print()
|
||||
continue
|
||||
|
||||
# Mirrors only in config
|
||||
for name, url in sorted(config_m.items()):
|
||||
if name not in file_m:
|
||||
print(f" [ONLY IN CONFIG] {name}: {url}")
|
||||
|
||||
# Mirrors only in MIRRORS file
|
||||
for name, url in sorted(file_m.items()):
|
||||
if name not in config_m:
|
||||
print(f" [ONLY IN FILE] {name}: {url}")
|
||||
|
||||
# Mirrors with same name but different URLs
|
||||
shared = set(config_m) & set(file_m)
|
||||
for name in sorted(shared):
|
||||
url_cfg = config_m.get(name)
|
||||
url_file = file_m.get(name)
|
||||
if url_cfg != url_file:
|
||||
print(
|
||||
f" [URL MISMATCH] {name}:\n"
|
||||
f" config: {url_cfg}\n"
|
||||
f" file: {url_file}"
|
||||
)
|
||||
|
||||
if config_m and file_m and config_m == file_m:
|
||||
print(" [OK] Mirrors in config and MIRRORS file are in sync.")
|
||||
|
||||
print()
|
||||
179
src/pkgmgr/actions/mirror/git_remote.py
Normal file
179
src/pkgmgr/actions/mirror/git_remote.py
Normal file
@@ -0,0 +1,179 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import List, Optional, Set
|
||||
|
||||
from pkgmgr.core.command.run import run_command
|
||||
from pkgmgr.core.git import GitError, run_git
|
||||
|
||||
from .types import MirrorMap, RepoMirrorContext, Repository
|
||||
|
||||
|
||||
def build_default_ssh_url(repo: Repository) -> Optional[str]:
|
||||
"""
|
||||
Build a simple SSH URL from repo config if no explicit mirror is defined.
|
||||
|
||||
Example: git@github.com:account/repository.git
|
||||
"""
|
||||
provider = repo.get("provider")
|
||||
account = repo.get("account")
|
||||
name = repo.get("repository")
|
||||
port = repo.get("port")
|
||||
|
||||
if not provider or not account or not name:
|
||||
return None
|
||||
|
||||
provider = str(provider)
|
||||
account = str(account)
|
||||
name = str(name)
|
||||
|
||||
if port:
|
||||
return f"ssh://git@{provider}:{port}/{account}/{name}.git"
|
||||
|
||||
# GitHub-style shorthand
|
||||
return f"git@{provider}:{account}/{name}.git"
|
||||
|
||||
|
||||
def determine_primary_remote_url(
|
||||
repo: Repository,
|
||||
resolved_mirrors: MirrorMap,
|
||||
) -> Optional[str]:
|
||||
"""
|
||||
Determine the primary remote URL in a consistent way:
|
||||
|
||||
1. resolved_mirrors["origin"]
|
||||
2. any resolved mirror (first by name)
|
||||
3. default SSH URL from provider/account/repository
|
||||
"""
|
||||
if "origin" in resolved_mirrors:
|
||||
return resolved_mirrors["origin"]
|
||||
|
||||
if resolved_mirrors:
|
||||
first_name = sorted(resolved_mirrors.keys())[0]
|
||||
return resolved_mirrors[first_name]
|
||||
|
||||
return build_default_ssh_url(repo)
|
||||
|
||||
|
||||
def _safe_git_output(args: List[str], cwd: str) -> Optional[str]:
|
||||
"""
|
||||
Run a Git command via run_git and return its stdout, or None on failure.
|
||||
"""
|
||||
try:
|
||||
return run_git(args, cwd=cwd)
|
||||
except GitError:
|
||||
return None
|
||||
|
||||
|
||||
def current_origin_url(repo_dir: str) -> Optional[str]:
|
||||
"""
|
||||
Return the current URL for remote 'origin', or None if not present.
|
||||
"""
|
||||
output = _safe_git_output(["remote", "get-url", "origin"], cwd=repo_dir)
|
||||
if not output:
|
||||
return None
|
||||
url = output.strip()
|
||||
return url or None
|
||||
|
||||
|
||||
def has_origin_remote(repo_dir: str) -> bool:
|
||||
"""
|
||||
Check whether a remote called 'origin' exists in the repository.
|
||||
"""
|
||||
output = _safe_git_output(["remote"], cwd=repo_dir)
|
||||
if not output:
|
||||
return False
|
||||
names = output.split()
|
||||
return "origin" in names
|
||||
|
||||
|
||||
def _ensure_push_urls_for_origin(
|
||||
repo_dir: str,
|
||||
mirrors: MirrorMap,
|
||||
preview: bool,
|
||||
) -> None:
|
||||
"""
|
||||
Ensure that all mirror URLs are present as push URLs on 'origin'.
|
||||
"""
|
||||
desired: Set[str] = {url for url in mirrors.values() if url}
|
||||
if not desired:
|
||||
return
|
||||
|
||||
existing_output = _safe_git_output(
|
||||
["remote", "get-url", "--push", "--all", "origin"],
|
||||
cwd=repo_dir,
|
||||
)
|
||||
existing = set(existing_output.splitlines()) if existing_output else set()
|
||||
|
||||
missing = sorted(desired - existing)
|
||||
for url in missing:
|
||||
cmd = f"git remote set-url --add --push origin {url}"
|
||||
if preview:
|
||||
print(f"[PREVIEW] Would run in {repo_dir!r}: {cmd}")
|
||||
else:
|
||||
print(f"[INFO] Adding push URL to 'origin': {url}")
|
||||
run_command(cmd, cwd=repo_dir, preview=False)
|
||||
|
||||
|
||||
def ensure_origin_remote(
|
||||
repo: Repository,
|
||||
ctx: RepoMirrorContext,
|
||||
preview: bool,
|
||||
) -> None:
|
||||
"""
|
||||
Ensure that a usable 'origin' remote exists and has all push URLs.
|
||||
"""
|
||||
repo_dir = ctx.repo_dir
|
||||
resolved_mirrors = ctx.resolved_mirrors
|
||||
|
||||
if not os.path.isdir(os.path.join(repo_dir, ".git")):
|
||||
print(f"[WARN] {repo_dir} is not a Git repository (no .git directory).")
|
||||
return
|
||||
|
||||
url = determine_primary_remote_url(repo, resolved_mirrors)
|
||||
|
||||
if not has_origin_remote(repo_dir):
|
||||
if not url:
|
||||
print(
|
||||
"[WARN] Could not determine URL for 'origin' remote. "
|
||||
"Please configure mirrors or provider/account/repository."
|
||||
)
|
||||
return
|
||||
|
||||
cmd = f"git remote add origin {url}"
|
||||
if preview:
|
||||
print(f"[PREVIEW] Would run in {repo_dir!r}: {cmd}")
|
||||
else:
|
||||
print(f"[INFO] Adding 'origin' remote in {repo_dir}: {url}")
|
||||
run_command(cmd, cwd=repo_dir, preview=False)
|
||||
else:
|
||||
current = current_origin_url(repo_dir)
|
||||
if current == url or not url:
|
||||
print(
|
||||
f"[INFO] 'origin' already points to "
|
||||
f"{current or '<unknown>'} (no change needed)."
|
||||
)
|
||||
else:
|
||||
# We do not auto-change origin here, only log the mismatch.
|
||||
print(
|
||||
"[INFO] 'origin' exists with URL "
|
||||
f"{current or '<unknown>'}; not changing to {url}."
|
||||
)
|
||||
|
||||
# Ensure all mirrors are present as push URLs
|
||||
_ensure_push_urls_for_origin(repo_dir, resolved_mirrors, preview)
|
||||
|
||||
|
||||
def is_remote_reachable(url: str, cwd: Optional[str] = None) -> bool:
|
||||
"""
|
||||
Check whether a remote repository is reachable via `git ls-remote`.
|
||||
|
||||
This does NOT modify anything; it only probes the remote.
|
||||
"""
|
||||
workdir = cwd or os.getcwd()
|
||||
try:
|
||||
# --exit-code → non-zero exit code if the remote does not exist
|
||||
run_git(["ls-remote", "--exit-code", url], cwd=workdir)
|
||||
return True
|
||||
except GitError:
|
||||
return False
|
||||
98
src/pkgmgr/actions/mirror/io.py
Normal file
98
src/pkgmgr/actions/mirror/io.py
Normal file
@@ -0,0 +1,98 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from urllib.parse import urlparse
|
||||
from typing import List, Mapping
|
||||
|
||||
from .types import MirrorMap, Repository
|
||||
|
||||
|
||||
def load_config_mirrors(repo: Repository) -> MirrorMap:
|
||||
mirrors = repo.get("mirrors") or {}
|
||||
result: MirrorMap = {}
|
||||
|
||||
if isinstance(mirrors, dict):
|
||||
for name, url in mirrors.items():
|
||||
if url:
|
||||
result[str(name)] = str(url)
|
||||
return result
|
||||
|
||||
if isinstance(mirrors, list):
|
||||
for entry in mirrors:
|
||||
if isinstance(entry, dict):
|
||||
name = entry.get("name")
|
||||
url = entry.get("url")
|
||||
if name and url:
|
||||
result[str(name)] = str(url)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def read_mirrors_file(repo_dir: str, filename: str = "MIRRORS") -> MirrorMap:
|
||||
"""
|
||||
Supports:
|
||||
NAME URL
|
||||
URL → auto name = hostname
|
||||
"""
|
||||
path = os.path.join(repo_dir, filename)
|
||||
mirrors: MirrorMap = {}
|
||||
|
||||
if not os.path.exists(path):
|
||||
return mirrors
|
||||
|
||||
try:
|
||||
with open(path, "r", encoding="utf-8") as fh:
|
||||
for line in fh:
|
||||
stripped = line.strip()
|
||||
if not stripped or stripped.startswith("#"):
|
||||
continue
|
||||
|
||||
parts = stripped.split(None, 1)
|
||||
|
||||
# Case 1: "name url"
|
||||
if len(parts) == 2:
|
||||
name, url = parts
|
||||
# Case 2: "url" → auto-generate name
|
||||
elif len(parts) == 1:
|
||||
url = parts[0]
|
||||
parsed = urlparse(url)
|
||||
host = (parsed.netloc or "").split(":")[0]
|
||||
base = host or "mirror"
|
||||
name = base
|
||||
i = 2
|
||||
while name in mirrors:
|
||||
name = f"{base}{i}"
|
||||
i += 1
|
||||
else:
|
||||
continue
|
||||
|
||||
mirrors[name] = url
|
||||
except OSError as exc:
|
||||
print(f"[WARN] Could not read MIRRORS file at {path}: {exc}")
|
||||
|
||||
return mirrors
|
||||
|
||||
|
||||
def write_mirrors_file(
|
||||
repo_dir: str,
|
||||
mirrors: Mapping[str, str],
|
||||
filename: str = "MIRRORS",
|
||||
preview: bool = False,
|
||||
) -> None:
|
||||
|
||||
path = os.path.join(repo_dir, filename)
|
||||
lines = [f"{name} {url}" for name, url in sorted(mirrors.items())]
|
||||
content = "\n".join(lines) + ("\n" if lines else "")
|
||||
|
||||
if preview:
|
||||
print(f"[PREVIEW] Would write MIRRORS file at {path}:")
|
||||
print(content or "(empty)")
|
||||
return
|
||||
|
||||
try:
|
||||
os.makedirs(os.path.dirname(path), exist_ok=True)
|
||||
with open(path, "w", encoding="utf-8") as fh:
|
||||
fh.write(content)
|
||||
print(f"[INFO] Wrote MIRRORS file at {path}")
|
||||
except OSError as exc:
|
||||
print(f"[ERROR] Failed to write MIRRORS file at {path}: {exc}")
|
||||
46
src/pkgmgr/actions/mirror/list_cmd.py
Normal file
46
src/pkgmgr/actions/mirror/list_cmd.py
Normal file
@@ -0,0 +1,46 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import List
|
||||
|
||||
from .context import build_context
|
||||
from .printing import print_header, print_named_mirrors
|
||||
from .types import Repository
|
||||
|
||||
|
||||
def list_mirrors(
|
||||
selected_repos: List[Repository],
|
||||
repositories_base_dir: str,
|
||||
all_repos: List[Repository],
|
||||
source: str = "all",
|
||||
) -> None:
|
||||
"""
|
||||
List mirrors for the selected repositories.
|
||||
|
||||
source:
|
||||
- "config" → only mirrors from configuration
|
||||
- "file" → only mirrors from MIRRORS file
|
||||
- "resolved" → merged view (config + file, file wins)
|
||||
- "all" → show config + file + resolved
|
||||
"""
|
||||
for repo in selected_repos:
|
||||
ctx = build_context(repo, repositories_base_dir, all_repos)
|
||||
resolved_m = ctx.resolved_mirrors
|
||||
|
||||
print_header("[MIRROR]", ctx)
|
||||
|
||||
if source in ("config", "all"):
|
||||
print_named_mirrors("config mirrors", ctx.config_mirrors)
|
||||
if source == "config":
|
||||
print()
|
||||
continue # next repo
|
||||
|
||||
if source in ("file", "all"):
|
||||
print_named_mirrors("MIRRORS file", ctx.file_mirrors)
|
||||
if source == "file":
|
||||
print()
|
||||
continue # next repo
|
||||
|
||||
if source in ("resolved", "all"):
|
||||
print_named_mirrors("resolved mirrors", resolved_m)
|
||||
|
||||
print()
|
||||
162
src/pkgmgr/actions/mirror/merge_cmd.py
Normal file
162
src/pkgmgr/actions/mirror/merge_cmd.py
Normal file
@@ -0,0 +1,162 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import Dict, List, Tuple, Optional
|
||||
|
||||
import yaml
|
||||
|
||||
from pkgmgr.core.config.save import save_user_config
|
||||
|
||||
from .context import build_context
|
||||
from .io import write_mirrors_file
|
||||
from .types import MirrorMap, Repository
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Helpers
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
def _repo_key(repo: Repository) -> Tuple[str, str, str]:
|
||||
"""
|
||||
Normalised key for identifying a repository in config files.
|
||||
"""
|
||||
return (
|
||||
str(repo.get("provider", "")),
|
||||
str(repo.get("account", "")),
|
||||
str(repo.get("repository", "")),
|
||||
)
|
||||
|
||||
|
||||
def _load_user_config(path: str) -> Dict[str, object]:
|
||||
"""
|
||||
Load a user config YAML file as dict.
|
||||
Non-dicts yield {}.
|
||||
"""
|
||||
if not os.path.exists(path):
|
||||
return {}
|
||||
|
||||
try:
|
||||
with open(path, "r", encoding="utf-8") as f:
|
||||
data = yaml.safe_load(f) or {}
|
||||
return data if isinstance(data, dict) else {}
|
||||
except Exception:
|
||||
return {}
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Main merge command
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
def merge_mirrors(
|
||||
selected_repos: List[Repository],
|
||||
repositories_base_dir: str,
|
||||
all_repos: List[Repository],
|
||||
source: str,
|
||||
target: str,
|
||||
preview: bool = False,
|
||||
user_config_path: Optional[str] = None,
|
||||
) -> None:
|
||||
"""
|
||||
Merge mirrors between config and MIRRORS file.
|
||||
|
||||
Rules:
|
||||
- source, target ∈ {"config", "file"}.
|
||||
- merged = (target_mirrors overridden by source_mirrors)
|
||||
- If target == "file" → write MIRRORS file.
|
||||
- If target == "config":
|
||||
* update the user config YAML directly
|
||||
* write it using save_user_config()
|
||||
|
||||
The merge strategy is:
|
||||
dst + src (src wins on same name)
|
||||
"""
|
||||
|
||||
# Load user config once if we intend to write to it.
|
||||
user_cfg: Optional[Dict[str, object]] = None
|
||||
user_cfg_path_expanded: Optional[str] = None
|
||||
|
||||
if target == "config" and user_config_path and not preview:
|
||||
user_cfg_path_expanded = os.path.expanduser(user_config_path)
|
||||
user_cfg = _load_user_config(user_cfg_path_expanded)
|
||||
if not isinstance(user_cfg.get("repositories"), list):
|
||||
user_cfg["repositories"] = []
|
||||
|
||||
for repo in selected_repos:
|
||||
ctx = build_context(repo, repositories_base_dir, all_repos)
|
||||
|
||||
print("============================================================")
|
||||
print(f"[MIRROR MERGE] Repository: {ctx.identifier}")
|
||||
print(f"[MIRROR MERGE] Directory: {ctx.repo_dir}")
|
||||
print(f"[MIRROR MERGE] {source} → {target}")
|
||||
print("============================================================")
|
||||
|
||||
# Pick the correct source/target maps
|
||||
if source == "config":
|
||||
src = ctx.config_mirrors
|
||||
dst = ctx.file_mirrors
|
||||
else: # source == "file"
|
||||
src = ctx.file_mirrors
|
||||
dst = ctx.config_mirrors
|
||||
|
||||
# Merge (src overrides dst)
|
||||
merged: MirrorMap = dict(dst)
|
||||
merged.update(src)
|
||||
|
||||
# ---------------------------------------------------------
|
||||
# WRITE TO FILE
|
||||
# ---------------------------------------------------------
|
||||
if target == "file":
|
||||
write_mirrors_file(ctx.repo_dir, merged, preview=preview)
|
||||
print()
|
||||
continue
|
||||
|
||||
# ---------------------------------------------------------
|
||||
# WRITE TO CONFIG
|
||||
# ---------------------------------------------------------
|
||||
if target == "config":
|
||||
# If preview or no config path → show intended output
|
||||
if preview or not user_cfg:
|
||||
print("[INFO] The following mirrors would be written to config:")
|
||||
if not merged:
|
||||
print(" (no mirrors)")
|
||||
else:
|
||||
for name, url in sorted(merged.items()):
|
||||
print(f" - {name}: {url}")
|
||||
print(" (Config not modified due to preview or missing path.)")
|
||||
print()
|
||||
continue
|
||||
|
||||
repos = user_cfg.get("repositories")
|
||||
target_key = _repo_key(repo)
|
||||
existing_repo: Optional[Repository] = None
|
||||
|
||||
# Find existing repo entry
|
||||
for entry in repos:
|
||||
if isinstance(entry, dict) and _repo_key(entry) == target_key:
|
||||
existing_repo = entry
|
||||
break
|
||||
|
||||
# Create entry if missing
|
||||
if existing_repo is None:
|
||||
existing_repo = {
|
||||
"provider": repo.get("provider"),
|
||||
"account": repo.get("account"),
|
||||
"repository": repo.get("repository"),
|
||||
}
|
||||
repos.append(existing_repo)
|
||||
|
||||
# Write or delete mirrors
|
||||
if merged:
|
||||
existing_repo["mirrors"] = dict(merged)
|
||||
else:
|
||||
existing_repo.pop("mirrors", None)
|
||||
|
||||
print(" [OK] Updated repo['mirrors'] in user config.")
|
||||
print()
|
||||
|
||||
# -------------------------------------------------------------
|
||||
# SAVE CONFIG (once at the end)
|
||||
# -------------------------------------------------------------
|
||||
if user_cfg is not None and user_cfg_path_expanded is not None and not preview:
|
||||
save_user_config(user_cfg, user_cfg_path_expanded)
|
||||
print(f"[OK] Saved updated config: {user_cfg_path_expanded}")
|
||||
35
src/pkgmgr/actions/mirror/printing.py
Normal file
35
src/pkgmgr/actions/mirror/printing.py
Normal file
@@ -0,0 +1,35 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from .types import MirrorMap, RepoMirrorContext
|
||||
|
||||
|
||||
def print_header(
|
||||
title_prefix: str,
|
||||
ctx: RepoMirrorContext,
|
||||
) -> None:
|
||||
"""
|
||||
Print a standard header for mirror-related output.
|
||||
|
||||
title_prefix examples:
|
||||
- "[MIRROR]"
|
||||
- "[MIRROR DIFF]"
|
||||
- "[MIRROR MERGE]"
|
||||
- "[MIRROR SETUP:LOCAL]"
|
||||
- "[MIRROR SETUP:REMOTE]"
|
||||
"""
|
||||
print("============================================================")
|
||||
print(f"{title_prefix} Repository: {ctx.identifier}")
|
||||
print(f"{title_prefix} Directory: {ctx.repo_dir}")
|
||||
print("============================================================")
|
||||
|
||||
|
||||
def print_named_mirrors(label: str, mirrors: MirrorMap) -> None:
|
||||
"""
|
||||
Print a labeled mirror block (e.g. '[config mirrors]').
|
||||
"""
|
||||
print(f" [{label}]")
|
||||
if mirrors:
|
||||
for name, url in sorted(mirrors.items()):
|
||||
print(f" - {name}: {url}")
|
||||
else:
|
||||
print(" (none)")
|
||||
165
src/pkgmgr/actions/mirror/setup_cmd.py
Normal file
165
src/pkgmgr/actions/mirror/setup_cmd.py
Normal file
@@ -0,0 +1,165 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import List, Tuple
|
||||
|
||||
from pkgmgr.core.git import run_git, GitError
|
||||
|
||||
from .context import build_context
|
||||
from .git_remote import determine_primary_remote_url, ensure_origin_remote
|
||||
from .types import Repository
|
||||
|
||||
|
||||
def _setup_local_mirrors_for_repo(
|
||||
repo: Repository,
|
||||
repositories_base_dir: str,
|
||||
all_repos: List[Repository],
|
||||
preview: bool,
|
||||
) -> None:
|
||||
"""
|
||||
Ensure local Git state is sane (currently: 'origin' remote).
|
||||
"""
|
||||
ctx = build_context(repo, repositories_base_dir, all_repos)
|
||||
|
||||
print("------------------------------------------------------------")
|
||||
print(f"[MIRROR SETUP:LOCAL] {ctx.identifier}")
|
||||
print(f"[MIRROR SETUP:LOCAL] dir: {ctx.repo_dir}")
|
||||
print("------------------------------------------------------------")
|
||||
|
||||
ensure_origin_remote(repo, ctx, preview=preview)
|
||||
print()
|
||||
|
||||
|
||||
def _probe_mirror(url: str, repo_dir: str) -> Tuple[bool, str]:
|
||||
"""
|
||||
Probe a remote mirror by running `git ls-remote <url>`.
|
||||
|
||||
Returns:
|
||||
(True, "") on success,
|
||||
(False, error_message) on failure.
|
||||
|
||||
Wichtig:
|
||||
- Wir werten ausschließlich den Exit-Code aus.
|
||||
- STDERR kann Hinweise/Warnings enthalten und ist NICHT automatisch ein Fehler.
|
||||
"""
|
||||
try:
|
||||
# Wir ignorieren stdout komplett; wichtig ist nur, dass der Befehl ohne
|
||||
# GitError (also Exit-Code 0) durchläuft.
|
||||
run_git(["ls-remote", url], cwd=repo_dir)
|
||||
return True, ""
|
||||
except GitError as exc:
|
||||
return False, str(exc)
|
||||
|
||||
|
||||
def _setup_remote_mirrors_for_repo(
|
||||
repo: Repository,
|
||||
repositories_base_dir: str,
|
||||
all_repos: List[Repository],
|
||||
preview: bool,
|
||||
) -> None:
|
||||
"""
|
||||
Remote-side setup / validation.
|
||||
|
||||
Aktuell werden nur **nicht-destruktive Checks** gemacht:
|
||||
|
||||
- Für jeden Mirror (aus config + MIRRORS-Datei, file gewinnt):
|
||||
* `git ls-remote <url>` wird ausgeführt.
|
||||
* Bei Exit-Code 0 → [OK]
|
||||
* Bei Fehler → [WARN] + Details aus der GitError-Exception
|
||||
|
||||
Es werden **keine** Provider-APIs aufgerufen und keine Repos angelegt.
|
||||
"""
|
||||
ctx = build_context(repo, repositories_base_dir, all_repos)
|
||||
resolved_m = ctx.resolved_mirrors
|
||||
|
||||
print("------------------------------------------------------------")
|
||||
print(f"[MIRROR SETUP:REMOTE] {ctx.identifier}")
|
||||
print(f"[MIRROR SETUP:REMOTE] dir: {ctx.repo_dir}")
|
||||
print("------------------------------------------------------------")
|
||||
|
||||
if not resolved_m:
|
||||
# Optional: Fallback auf eine heuristisch bestimmte URL, falls wir
|
||||
# irgendwann "automatisch anlegen" implementieren wollen.
|
||||
primary_url = determine_primary_remote_url(repo, resolved_m)
|
||||
if not primary_url:
|
||||
print(
|
||||
"[INFO] No mirrors configured (config or MIRRORS file), and no "
|
||||
"primary URL could be derived from provider/account/repository."
|
||||
)
|
||||
print()
|
||||
return
|
||||
|
||||
ok, error_message = _probe_mirror(primary_url, ctx.repo_dir)
|
||||
if ok:
|
||||
print(f"[OK] Remote mirror (primary) is reachable: {primary_url}")
|
||||
else:
|
||||
print("[WARN] Primary remote URL is NOT reachable:")
|
||||
print(f" {primary_url}")
|
||||
if error_message:
|
||||
print(" Details:")
|
||||
for line in error_message.splitlines():
|
||||
print(f" {line}")
|
||||
|
||||
print()
|
||||
print(
|
||||
"[INFO] Remote checks are non-destructive and only use `git ls-remote` "
|
||||
"to probe mirror URLs."
|
||||
)
|
||||
print()
|
||||
return
|
||||
|
||||
# Normaler Fall: wir haben benannte Mirrors aus config/MIRRORS
|
||||
for name, url in sorted(resolved_m.items()):
|
||||
ok, error_message = _probe_mirror(url, ctx.repo_dir)
|
||||
if ok:
|
||||
print(f"[OK] Remote mirror '{name}' is reachable: {url}")
|
||||
else:
|
||||
print(f"[WARN] Remote mirror '{name}' is NOT reachable:")
|
||||
print(f" {url}")
|
||||
if error_message:
|
||||
print(" Details:")
|
||||
for line in error_message.splitlines():
|
||||
print(f" {line}")
|
||||
|
||||
print()
|
||||
print(
|
||||
"[INFO] Remote checks are non-destructive and only use `git ls-remote` "
|
||||
"to probe mirror URLs."
|
||||
)
|
||||
print()
|
||||
|
||||
|
||||
def setup_mirrors(
|
||||
selected_repos: List[Repository],
|
||||
repositories_base_dir: str,
|
||||
all_repos: List[Repository],
|
||||
preview: bool = False,
|
||||
local: bool = True,
|
||||
remote: bool = True,
|
||||
) -> None:
|
||||
"""
|
||||
Setup mirrors for the selected repositories.
|
||||
|
||||
local:
|
||||
- Configure local Git remotes (currently: ensure 'origin' is present and
|
||||
points to a reasonable URL).
|
||||
|
||||
remote:
|
||||
- Non-destructive remote checks using `git ls-remote` for each mirror URL.
|
||||
Es werden keine Repositories auf dem Provider angelegt.
|
||||
"""
|
||||
for repo in selected_repos:
|
||||
if local:
|
||||
_setup_local_mirrors_for_repo(
|
||||
repo,
|
||||
repositories_base_dir=repositories_base_dir,
|
||||
all_repos=all_repos,
|
||||
preview=preview,
|
||||
)
|
||||
|
||||
if remote:
|
||||
_setup_remote_mirrors_for_repo(
|
||||
repo,
|
||||
repositories_base_dir=repositories_base_dir,
|
||||
all_repos=all_repos,
|
||||
preview=preview,
|
||||
)
|
||||
32
src/pkgmgr/actions/mirror/types.py
Normal file
32
src/pkgmgr/actions/mirror/types.py
Normal file
@@ -0,0 +1,32 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Dict
|
||||
|
||||
Repository = Dict[str, Any]
|
||||
MirrorMap = Dict[str, str]
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class RepoMirrorContext:
|
||||
"""
|
||||
Bundle mirror-related information for a single repository.
|
||||
"""
|
||||
|
||||
identifier: str
|
||||
repo_dir: str
|
||||
config_mirrors: MirrorMap
|
||||
file_mirrors: MirrorMap
|
||||
|
||||
@property
|
||||
def resolved_mirrors(self) -> MirrorMap:
|
||||
"""
|
||||
Combined mirrors from config and MIRRORS file.
|
||||
|
||||
Strategy:
|
||||
- Start from config mirrors
|
||||
- Overlay MIRRORS file (file wins on same name)
|
||||
"""
|
||||
merged: MirrorMap = dict(self.config_mirrors)
|
||||
merged.update(self.file_mirrors)
|
||||
return merged
|
||||
74
src/pkgmgr/cli/__init__.py
Normal file
74
src/pkgmgr/cli/__init__.py
Normal file
@@ -0,0 +1,74 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
|
||||
from pkgmgr.core.config.load import load_config
|
||||
|
||||
from .context import CLIContext
|
||||
from .parser import create_parser
|
||||
from .dispatch import dispatch_command
|
||||
|
||||
__all__ = ["CLIContext", "create_parser", "dispatch_command", "main"]
|
||||
|
||||
|
||||
# User config lives in the home directory:
|
||||
# ~/.config/pkgmgr/config.yaml
|
||||
USER_CONFIG_PATH = os.path.expanduser("~/.config/pkgmgr/config.yaml")
|
||||
|
||||
DESCRIPTION_TEXT = """\
|
||||
\033[1;32mPackage Manager 🤖📦\033[0m
|
||||
\033[3mKevin's multi-distro package and workflow manager.\033[0m
|
||||
\033[1;34mKevin Veen-Birkenbach\033[0m – \033[4mhttps://www.veen.world/\033[0m
|
||||
|
||||
Built in \033[1;33mPython\033[0m on top of \033[1;33mNix flakes\033[0m to manage many
|
||||
repositories and packaging formats (pyproject.toml, flake.nix,
|
||||
PKGBUILD, debian, Ansible, …) with one CLI.
|
||||
|
||||
For details on any command, run:
|
||||
\033[1mpkgmgr <command> --help\033[0m
|
||||
"""
|
||||
|
||||
def main() -> None:
|
||||
"""
|
||||
Entry point for the pkgmgr CLI.
|
||||
"""
|
||||
|
||||
config_merged = load_config(USER_CONFIG_PATH)
|
||||
|
||||
# Directories: be robust and provide sane defaults if missing
|
||||
directories = config_merged.get("directories") or {}
|
||||
repositories_dir = os.path.expanduser(
|
||||
directories.get("repositories", "~/Repositories")
|
||||
)
|
||||
binaries_dir = os.path.expanduser(
|
||||
directories.get("binaries", "~/.local/bin")
|
||||
)
|
||||
|
||||
# Ensure the merged config actually contains the resolved directories
|
||||
config_merged.setdefault("directories", {})
|
||||
config_merged["directories"]["repositories"] = repositories_dir
|
||||
config_merged["directories"]["binaries"] = binaries_dir
|
||||
|
||||
all_repositories = config_merged.get("repositories", [])
|
||||
|
||||
ctx = CLIContext(
|
||||
config_merged=config_merged,
|
||||
repositories_base_dir=repositories_dir,
|
||||
all_repositories=all_repositories,
|
||||
binaries_dir=binaries_dir,
|
||||
user_config_path=USER_CONFIG_PATH,
|
||||
)
|
||||
|
||||
parser = create_parser(DESCRIPTION_TEXT)
|
||||
args = parser.parse_args()
|
||||
|
||||
if not getattr(args, "command", None):
|
||||
parser.print_help()
|
||||
return
|
||||
|
||||
dispatch_command(args, ctx)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -6,6 +6,7 @@ from .version import handle_version
|
||||
from .make import handle_make
|
||||
from .changelog import handle_changelog
|
||||
from .branch import handle_branch
|
||||
from .mirror import handle_mirror_command
|
||||
|
||||
__all__ = [
|
||||
"handle_repos_command",
|
||||
@@ -16,4 +17,5 @@ __all__ = [
|
||||
"handle_make",
|
||||
"handle_changelog",
|
||||
"handle_branch",
|
||||
"handle_mirror_command",
|
||||
]
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user