feat: initial rigging — multi-repo NixOS + Nomad infrastructure management
Flake-parts module that repos import to declare hosts, jobs, and secrets. Nushell CLI (rigging) aggregates multiple repos and provides unified management: host deploy/build, job run/plan/stop, secret list/rekey. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
commit
892161eae2
9 changed files with 1851 additions and 0 deletions
587
cli/default.nix
Normal file
587
cli/default.nix
Normal file
|
|
@ -0,0 +1,587 @@
|
|||
{
|
||||
pkgs,
|
||||
lib ? pkgs.lib,
|
||||
jobs ? {},
|
||||
jobModules ? {},
|
||||
compiledJobs ? {},
|
||||
nomadLib ? {},
|
||||
nomadAddress ? "http://127.0.0.1:4646",
|
||||
flakeRef ? null,
|
||||
defaultVars ? {},
|
||||
}:
|
||||
pkgs.writeShellScriptBin "bosun" ''
|
||||
set -euo pipefail
|
||||
|
||||
# Colors
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
CYAN='\033[0;36m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
NOMAD_ADDR="''${NOMAD_ADDR:-${nomadAddress}}"
|
||||
export NOMAD_ADDR
|
||||
|
||||
# Detect flake reference - walk up to find flake.nix
|
||||
detect_flake_ref() {
|
||||
local dir="$PWD"
|
||||
while [[ "$dir" != "/" ]]; do
|
||||
if [[ -f "$dir/flake.nix" ]]; then
|
||||
echo "$dir"
|
||||
return 0
|
||||
fi
|
||||
dir="$(dirname "$dir")"
|
||||
done
|
||||
echo ""
|
||||
}
|
||||
|
||||
# Set flake reference
|
||||
${if flakeRef != null then ''
|
||||
FLAKE_REF="''${BOSUN_FLAKE:-${flakeRef}}"
|
||||
'' else ''
|
||||
if [[ -n "''${BOSUN_FLAKE:-}" ]]; then
|
||||
FLAKE_REF="$BOSUN_FLAKE"
|
||||
else
|
||||
FLAKE_REF="$(detect_flake_ref)"
|
||||
if [[ -z "$FLAKE_REF" ]]; then
|
||||
echo "Error: Could not find flake.nix. Set BOSUN_FLAKE or run from within a flake." >&2
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
''}
|
||||
|
||||
# Available jobs
|
||||
JOBS=(${lib.concatStringsSep " " (lib.attrNames jobs)})
|
||||
|
||||
# Check if a job accepts variables
|
||||
job_accepts_vars() {
|
||||
local job="$1"
|
||||
# Jobs that accept vars will have 'vars' in their function args
|
||||
# We check this at build time and encode it here
|
||||
case "$job" in
|
||||
${lib.concatStringsSep "\n " (lib.mapAttrsToList (name: mod:
|
||||
let
|
||||
args = builtins.functionArgs mod;
|
||||
hasVars = args ? vars;
|
||||
in "${name}) echo ${if hasVars then "true" else "false"} ;;"
|
||||
) jobModules)}
|
||||
*) echo "false" ;;
|
||||
esac
|
||||
}
|
||||
|
||||
usage() {
|
||||
echo "bosun - Nomad job deployment tool"
|
||||
echo ""
|
||||
echo "Usage: bosun <command> [options] [job-name]"
|
||||
echo ""
|
||||
echo "Commands:"
|
||||
echo " list List all available jobs"
|
||||
echo " compile <job> Compile a job to JSON (stdout)"
|
||||
echo " inspect <job> Show compiled job with syntax highlighting"
|
||||
echo " run <job> Compile and deploy a job to Nomad"
|
||||
echo " dispatch [job] Dispatch a parameterized job (list if no job given)"
|
||||
echo " plan <job> Plan a job deployment (dry-run)"
|
||||
echo " stop <job> Stop a running job"
|
||||
echo " status [job] Show job status"
|
||||
echo " logs <job> [task] Show job logs"
|
||||
echo " generate [dir] Generate all job files to directory"
|
||||
echo ""
|
||||
echo "Options:"
|
||||
echo " -v, --var KEY=VALUE Set a variable (can be repeated)"
|
||||
echo " -m, --meta KEY=VALUE Set dispatch metadata (for dispatch command)"
|
||||
echo " --dry-run Show what would be done without executing"
|
||||
echo " --verbose Verbose output"
|
||||
echo " --help, -h Show this help message"
|
||||
echo ""
|
||||
echo "Environment:"
|
||||
echo " NOMAD_ADDR Nomad server address (default: $NOMAD_ADDR)"
|
||||
echo " BOSUN_FLAKE Flake reference (default: $FLAKE_REF)"
|
||||
echo ""
|
||||
echo "Available jobs:"
|
||||
for job in "''${JOBS[@]}"; do
|
||||
if [[ "$(job_accepts_vars "$job")" == "true" ]]; then
|
||||
echo " - $job ''${CYAN}(parameterized)''${NC}"
|
||||
else
|
||||
echo " - $job"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
log_info() {
|
||||
echo -e "''${BLUE}→''${NC} $1"
|
||||
}
|
||||
|
||||
log_success() {
|
||||
echo -e "''${GREEN}✓''${NC} $1"
|
||||
}
|
||||
|
||||
log_warn() {
|
||||
echo -e "''${YELLOW}⚠''${NC} $1"
|
||||
}
|
||||
|
||||
log_error() {
|
||||
echo -e "''${RED}✗''${NC} $1" >&2
|
||||
}
|
||||
|
||||
# Check if job exists
|
||||
job_exists() {
|
||||
local job="$1"
|
||||
for j in "''${JOBS[@]}"; do
|
||||
if [[ "$j" == "$job" ]]; then
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
# Get pre-compiled job file path (for jobs without runtime vars)
|
||||
get_static_job_file() {
|
||||
local job="$1"
|
||||
case "$job" in
|
||||
${lib.concatStringsSep "\n " (lib.mapAttrsToList (name: file: ''${name}) echo "${file}" ;;'') compiledJobs)}
|
||||
*) echo "" ;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Compile job with variables using nix eval
|
||||
compile_job_with_vars() {
|
||||
local job="$1"
|
||||
shift
|
||||
local vars_json="$1"
|
||||
|
||||
log_info "Evaluating job with variables..." >&2
|
||||
|
||||
# Use nix eval to get the job JSON with variables
|
||||
${pkgs.nix}/bin/nix eval \
|
||||
--json \
|
||||
--impure \
|
||||
--expr "
|
||||
let
|
||||
flake = builtins.getFlake \"$FLAKE_REF\";
|
||||
system = builtins.currentSystem;
|
||||
vars = builtins.fromJSON '''$vars_json''';
|
||||
in
|
||||
flake.legacyPackages.\''${system}.bosun.evalJobWithVars \"$job\" vars
|
||||
"
|
||||
}
|
||||
|
||||
# Get job JSON - either static or dynamic with vars
|
||||
get_job_json() {
|
||||
local job="$1"
|
||||
local vars_json="$2"
|
||||
|
||||
if [[ "$vars_json" == "{}" ]]; then
|
||||
# No vars, use static file
|
||||
local static_file
|
||||
static_file="$(get_static_job_file "$job")"
|
||||
if [[ -n "$static_file" ]]; then
|
||||
cat "$static_file"
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
|
||||
# Has vars or no static file - evaluate dynamically
|
||||
compile_job_with_vars "$job" "$vars_json"
|
||||
}
|
||||
|
||||
cmd_list() {
|
||||
echo "Available jobs:"
|
||||
for job in "''${JOBS[@]}"; do
|
||||
if [[ "$(job_accepts_vars "$job")" == "true" ]]; then
|
||||
echo -e " $job ''${CYAN}(parameterized)''${NC}"
|
||||
else
|
||||
echo " $job"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
cmd_compile() {
|
||||
local job="$1"
|
||||
local vars_json="$2"
|
||||
|
||||
if ! job_exists "$job"; then
|
||||
log_error "Unknown job: $job"
|
||||
echo "Available jobs: ''${JOBS[*]}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
get_job_json "$job" "$vars_json"
|
||||
}
|
||||
|
||||
cmd_inspect() {
|
||||
local job="$1"
|
||||
local vars_json="$2"
|
||||
|
||||
if ! job_exists "$job"; then
|
||||
log_error "Unknown job: $job"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
local job_json
|
||||
job_json="$(get_job_json "$job" "$vars_json")"
|
||||
|
||||
if command -v jq &> /dev/null; then
|
||||
echo "$job_json" | jq '.'
|
||||
else
|
||||
echo "$job_json"
|
||||
fi
|
||||
}
|
||||
|
||||
cmd_plan() {
|
||||
local job="$1"
|
||||
local vars_json="$2"
|
||||
local dry_run="$3"
|
||||
|
||||
if ! job_exists "$job"; then
|
||||
log_error "Unknown job: $job"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log_info "Planning job: $job"
|
||||
|
||||
if [[ "$dry_run" == "true" ]]; then
|
||||
log_warn "DRY RUN - would execute: nomad job plan <job.json>"
|
||||
if [[ "$vars_json" != "{}" ]]; then
|
||||
log_info "Variables: $vars_json"
|
||||
fi
|
||||
return 0
|
||||
fi
|
||||
|
||||
local job_json
|
||||
job_json="$(get_job_json "$job" "$vars_json")"
|
||||
|
||||
echo "$job_json" | ${pkgs.nomad}/bin/nomad job plan -
|
||||
}
|
||||
|
||||
cmd_run() {
|
||||
local job="$1"
|
||||
local vars_json="$2"
|
||||
local dry_run="$3"
|
||||
|
||||
if ! job_exists "$job"; then
|
||||
log_error "Unknown job: $job"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log_info "Deploying job: $job"
|
||||
log_info "Nomad address: $NOMAD_ADDR"
|
||||
if [[ "$vars_json" != "{}" ]]; then
|
||||
log_info "Variables: $vars_json"
|
||||
fi
|
||||
|
||||
if [[ "$dry_run" == "true" ]]; then
|
||||
log_warn "DRY RUN - would execute: nomad job run <job.json>"
|
||||
echo ""
|
||||
log_info "Job definition:"
|
||||
cmd_inspect "$job" "$vars_json"
|
||||
return 0
|
||||
fi
|
||||
|
||||
local job_json
|
||||
job_json="$(get_job_json "$job" "$vars_json")"
|
||||
|
||||
echo "$job_json" | ${pkgs.nomad}/bin/nomad job run -
|
||||
log_success "Job $job deployed successfully"
|
||||
}
|
||||
|
||||
cmd_stop() {
|
||||
local job="$1"
|
||||
local dry_run="$2"
|
||||
|
||||
log_info "Stopping job: $job"
|
||||
|
||||
if [[ "$dry_run" == "true" ]]; then
|
||||
log_warn "DRY RUN - would execute: nomad job stop $job"
|
||||
return 0
|
||||
fi
|
||||
|
||||
${pkgs.nomad}/bin/nomad job stop "$job"
|
||||
log_success "Job $job stopped"
|
||||
}
|
||||
|
||||
cmd_status() {
|
||||
local job="''${1:-}"
|
||||
|
||||
if [[ -z "$job" ]]; then
|
||||
${pkgs.nomad}/bin/nomad job status
|
||||
else
|
||||
${pkgs.nomad}/bin/nomad job status "$job"
|
||||
fi
|
||||
}
|
||||
|
||||
cmd_logs() {
|
||||
local job="$1"
|
||||
local task="''${2:-}"
|
||||
|
||||
if [[ -z "$task" ]]; then
|
||||
${pkgs.nomad}/bin/nomad alloc logs -job "$job"
|
||||
else
|
||||
${pkgs.nomad}/bin/nomad alloc logs -job "$job" "$task"
|
||||
fi
|
||||
}
|
||||
|
||||
# Get parameterized job info
|
||||
get_parameterized_info() {
|
||||
local job="$1"
|
||||
local job_json
|
||||
job_json="$(get_job_json "$job" "{}")"
|
||||
|
||||
# Extract parameterized block if it exists
|
||||
echo "$job_json" | ${pkgs.jq}/bin/jq -r '
|
||||
.job | to_entries[0].value.parameterized // {} |
|
||||
if . == {} then
|
||||
"not_parameterized"
|
||||
else
|
||||
"required:" + (.meta_required // [] | join(",")) +
|
||||
"|optional:" + (.meta_optional // [] | join(","))
|
||||
end
|
||||
'
|
||||
}
|
||||
|
||||
cmd_dispatch() {
|
||||
local job="''${1:-}"
|
||||
local dry_run="$2"
|
||||
|
||||
# If no job specified, list parameterized jobs
|
||||
if [[ -z "$job" ]]; then
|
||||
echo "Parameterized jobs available for dispatch:"
|
||||
echo ""
|
||||
local found_any=false
|
||||
for j in "''${JOBS[@]}"; do
|
||||
local info
|
||||
info="$(get_parameterized_info "$j")"
|
||||
if [[ "$info" != "not_parameterized" ]]; then
|
||||
found_any=true
|
||||
local required=$(echo "$info" | cut -d'|' -f1 | cut -d':' -f2)
|
||||
local optional=$(echo "$info" | cut -d'|' -f2 | cut -d':' -f2)
|
||||
|
||||
echo -e " ''${GREEN}$j''${NC}"
|
||||
if [[ -n "$required" ]]; then
|
||||
echo -e " Required: ''${YELLOW}$required''${NC}"
|
||||
fi
|
||||
if [[ -n "$optional" ]]; then
|
||||
echo -e " Optional: ''${CYAN}$optional''${NC}"
|
||||
fi
|
||||
echo ""
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ "$found_any" == "false" ]]; then
|
||||
echo " No parameterized jobs found."
|
||||
echo ""
|
||||
echo " To create a parameterized job, add a 'parameterized' block:"
|
||||
echo " job.myjob.parameterized = {"
|
||||
echo " meta_required = [\"username\"];"
|
||||
echo " meta_optional = [\"password\"];"
|
||||
echo " };"
|
||||
fi
|
||||
echo ""
|
||||
echo "Usage: bosun dispatch <job> -m key=value [-m key2=value2 ...]"
|
||||
return 0
|
||||
fi
|
||||
|
||||
if ! job_exists "$job"; then
|
||||
log_error "Unknown job: $job"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if job is parameterized
|
||||
local info
|
||||
info="$(get_parameterized_info "$job")"
|
||||
if [[ "$info" == "not_parameterized" ]]; then
|
||||
log_error "Job '$job' is not parameterized. Use 'bosun run $job' instead."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log_info "Dispatching job: $job"
|
||||
|
||||
if [[ "$dry_run" == "true" ]]; then
|
||||
log_warn "DRY RUN - would execute: nomad job dispatch $job"
|
||||
if [[ ''${#META[@]} -gt 0 ]]; then
|
||||
for key in "''${!META[@]}"; do
|
||||
echo " -meta $key=''${META[$key]}"
|
||||
done
|
||||
fi
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Build nomad dispatch command
|
||||
local dispatch_args=()
|
||||
for key in "''${!META[@]}"; do
|
||||
dispatch_args+=("-meta" "$key=''${META[$key]}")
|
||||
done
|
||||
|
||||
${pkgs.nomad}/bin/nomad job dispatch "''${dispatch_args[@]}" "$job"
|
||||
log_success "Job $job dispatched successfully"
|
||||
}
|
||||
|
||||
cmd_generate() {
|
||||
local outdir="''${1:-./generated}"
|
||||
local vars_json="$2"
|
||||
|
||||
mkdir -p "$outdir"
|
||||
log_info "Generating jobs to $outdir"
|
||||
|
||||
for job in "''${JOBS[@]}"; do
|
||||
local job_json
|
||||
job_json="$(get_job_json "$job" "$vars_json")"
|
||||
echo "$job_json" > "$outdir/$job.nomad.json"
|
||||
chmod 644 "$outdir/$job.nomad.json"
|
||||
log_success "$job.nomad.json"
|
||||
done
|
||||
|
||||
echo ""
|
||||
log_success "Generated ''${#JOBS[@]} jobs in $outdir"
|
||||
}
|
||||
|
||||
# Parse arguments
|
||||
DRY_RUN=false
|
||||
VERBOSE=false
|
||||
COMMAND=""
|
||||
ARGS=()
|
||||
declare -A VARS
|
||||
declare -A META
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
-v|--var)
|
||||
if [[ $# -lt 2 ]]; then
|
||||
log_error "Missing value for $1"
|
||||
exit 1
|
||||
fi
|
||||
# Parse KEY=VALUE
|
||||
if [[ "$2" =~ ^([^=]+)=(.*)$ ]]; then
|
||||
VARS["''${BASH_REMATCH[1]}"]="''${BASH_REMATCH[2]}"
|
||||
else
|
||||
log_error "Invalid variable format: $2 (expected KEY=VALUE)"
|
||||
exit 1
|
||||
fi
|
||||
shift 2
|
||||
;;
|
||||
-m|--meta)
|
||||
if [[ $# -lt 2 ]]; then
|
||||
log_error "Missing value for $1"
|
||||
exit 1
|
||||
fi
|
||||
# Parse KEY=VALUE
|
||||
if [[ "$2" =~ ^([^=]+)=(.*)$ ]]; then
|
||||
META["''${BASH_REMATCH[1]}"]="''${BASH_REMATCH[2]}"
|
||||
else
|
||||
log_error "Invalid metadata format: $2 (expected KEY=VALUE)"
|
||||
exit 1
|
||||
fi
|
||||
shift 2
|
||||
;;
|
||||
--dry-run)
|
||||
DRY_RUN=true
|
||||
shift
|
||||
;;
|
||||
--verbose)
|
||||
VERBOSE=true
|
||||
shift
|
||||
;;
|
||||
--help|-h)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
-*)
|
||||
log_error "Unknown option: $1"
|
||||
usage
|
||||
exit 1
|
||||
;;
|
||||
*)
|
||||
if [[ -z "$COMMAND" ]]; then
|
||||
COMMAND="$1"
|
||||
else
|
||||
ARGS+=("$1")
|
||||
fi
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Convert VARS to JSON
|
||||
VARS_JSON="{"
|
||||
first=true
|
||||
for key in "''${!VARS[@]}"; do
|
||||
if [[ "$first" == "true" ]]; then
|
||||
first=false
|
||||
else
|
||||
VARS_JSON+=","
|
||||
fi
|
||||
# Escape the value for JSON
|
||||
value="''${VARS[$key]}"
|
||||
value="''${value//\\/\\\\}"
|
||||
value="''${value//\"/\\\"}"
|
||||
VARS_JSON+="\"$key\":\"$value\""
|
||||
done
|
||||
VARS_JSON+="}"
|
||||
|
||||
if [[ -z "$COMMAND" ]]; then
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
case "$COMMAND" in
|
||||
list)
|
||||
cmd_list
|
||||
;;
|
||||
compile)
|
||||
if [[ ''${#ARGS[@]} -lt 1 ]]; then
|
||||
log_error "Missing job name"
|
||||
exit 1
|
||||
fi
|
||||
cmd_compile "''${ARGS[0]}" "$VARS_JSON"
|
||||
;;
|
||||
inspect)
|
||||
if [[ ''${#ARGS[@]} -lt 1 ]]; then
|
||||
log_error "Missing job name"
|
||||
exit 1
|
||||
fi
|
||||
cmd_inspect "''${ARGS[0]}" "$VARS_JSON"
|
||||
;;
|
||||
plan)
|
||||
if [[ ''${#ARGS[@]} -lt 1 ]]; then
|
||||
log_error "Missing job name"
|
||||
exit 1
|
||||
fi
|
||||
cmd_plan "''${ARGS[0]}" "$VARS_JSON" "$DRY_RUN"
|
||||
;;
|
||||
run)
|
||||
if [[ ''${#ARGS[@]} -lt 1 ]]; then
|
||||
log_error "Missing job name"
|
||||
exit 1
|
||||
fi
|
||||
cmd_run "''${ARGS[0]}" "$VARS_JSON" "$DRY_RUN"
|
||||
;;
|
||||
stop)
|
||||
if [[ ''${#ARGS[@]} -lt 1 ]]; then
|
||||
log_error "Missing job name"
|
||||
exit 1
|
||||
fi
|
||||
cmd_stop "''${ARGS[0]}" "$DRY_RUN"
|
||||
;;
|
||||
status)
|
||||
cmd_status "''${ARGS[0]:-}"
|
||||
;;
|
||||
logs)
|
||||
if [[ ''${#ARGS[@]} -lt 1 ]]; then
|
||||
log_error "Missing job name"
|
||||
exit 1
|
||||
fi
|
||||
cmd_logs "''${ARGS[0]}" "''${ARGS[1]:-}"
|
||||
;;
|
||||
generate)
|
||||
cmd_generate "''${ARGS[0]:-./generated}" "$VARS_JSON"
|
||||
;;
|
||||
dispatch)
|
||||
cmd_dispatch "''${ARGS[0]:-}" "$DRY_RUN"
|
||||
;;
|
||||
*)
|
||||
log_error "Unknown command: $COMMAND"
|
||||
usage
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
''
|
||||
12
cli/rigging.nix
Normal file
12
cli/rigging.nix
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
{
|
||||
pkgs,
|
||||
lib ? pkgs.lib,
|
||||
...
|
||||
}: let
|
||||
writeNushellApplication = import ../lib/nushell.nix {inherit lib pkgs;};
|
||||
in
|
||||
writeNushellApplication {
|
||||
name = "rigging";
|
||||
runtimeInputs = with pkgs; [nix nomad jq];
|
||||
text = builtins.readFile ./rigging.nu;
|
||||
}
|
||||
510
cli/rigging.nu
Normal file
510
cli/rigging.nu
Normal file
|
|
@ -0,0 +1,510 @@
|
|||
# rigging — multi-repo infrastructure management CLI
|
||||
# Aggregates bosun-enabled repos and provides unified host/job/secret management.
|
||||
|
||||
def config-path []: nothing -> string {
|
||||
$env.HOME | path join ".config" "bosun" "config.toml"
|
||||
}
|
||||
|
||||
def load-config []: nothing -> record {
|
||||
let p = (config-path)
|
||||
if ($p | path exists) {
|
||||
open $p
|
||||
} else {
|
||||
{ repos: {} }
|
||||
}
|
||||
}
|
||||
|
||||
def save-config [cfg: record]: nothing -> nothing {
|
||||
let p = (config-path)
|
||||
let dir = ($p | path dirname)
|
||||
if not ($dir | path exists) { mkdir $dir }
|
||||
$cfg | to toml | save -f $p
|
||||
}
|
||||
|
||||
# Build manifest derivation and read the JSON
|
||||
def read-manifest [repo_path: string]: nothing -> record {
|
||||
let out = (^nix build $"($repo_path)#bosun-manifest" --no-link --print-out-paths | str trim)
|
||||
open $out
|
||||
}
|
||||
|
||||
# Load all repos with their manifests
|
||||
def load-all []: nothing -> list<record> {
|
||||
let cfg = (load-config)
|
||||
if ($cfg.repos | is-empty) {
|
||||
print $"(ansi yellow)No repos registered. Use `rigging repo add <path>` first.(ansi reset)"
|
||||
return []
|
||||
}
|
||||
$cfg.repos | transpose name meta | each { |r|
|
||||
let manifest = (read-manifest $r.meta.path)
|
||||
{ name: $r.name, path: $r.meta.path, manifest: $manifest }
|
||||
}
|
||||
}
|
||||
|
||||
# Find which repo owns a given job
|
||||
def find-job-repo [name: string]: nothing -> record {
|
||||
let repos = (load-all)
|
||||
let matches = ($repos | where { |r| $name in ($r.manifest.jobs | columns) })
|
||||
if ($matches | is-empty) {
|
||||
print $"(ansi red)Error: job '($name)' not found in any registered repo(ansi reset)"
|
||||
exit 1
|
||||
}
|
||||
$matches | first
|
||||
}
|
||||
|
||||
# Find which repo owns a given host
|
||||
def find-host-repo [name: string]: nothing -> record {
|
||||
let repos = (load-all)
|
||||
let matches = ($repos | where { |r| $name in ($r.manifest.hosts | columns) })
|
||||
if ($matches | is-empty) {
|
||||
print $"(ansi red)Error: host '($name)' not found in any registered repo(ansi reset)"
|
||||
exit 1
|
||||
}
|
||||
$matches | first
|
||||
}
|
||||
|
||||
# --- Top-level ---
|
||||
|
||||
# Rigging — multi-repo infrastructure management
|
||||
def main []: nothing -> nothing {
|
||||
print "rigging — multi-repo infrastructure management"
|
||||
print ""
|
||||
print "Usage: rigging <command>"
|
||||
print ""
|
||||
print "Commands:"
|
||||
print " repo Manage registered repos"
|
||||
print " status Aggregated overview of all repos"
|
||||
print " host Host management (list, deploy, build)"
|
||||
print " job Nomad job management (list, run, plan, stop, ...)"
|
||||
print " secret Secret management (list, rekey)"
|
||||
print ""
|
||||
print $"Config: (config-path)"
|
||||
}
|
||||
|
||||
# --- Repo management ---
|
||||
|
||||
# Manage registered repos
|
||||
def "main repo" []: nothing -> nothing {
|
||||
main repo list
|
||||
}
|
||||
|
||||
# Register a new repo
|
||||
def "main repo add" [
|
||||
path: string # Path to the repo (must contain a flake with bosun-manifest)
|
||||
]: nothing -> nothing {
|
||||
let abs_path = ($path | path expand)
|
||||
if not ($"($abs_path)/flake.nix" | path exists) {
|
||||
print $"(ansi red)Error: no flake.nix found at ($abs_path)(ansi reset)"
|
||||
exit 1
|
||||
}
|
||||
|
||||
print $"(ansi blue)→(ansi reset) Building manifest for ($abs_path)..."
|
||||
let manifest = (read-manifest $abs_path)
|
||||
let name = $manifest.name
|
||||
|
||||
let cfg = (load-config)
|
||||
let repos = ($cfg.repos | upsert $name { path: $abs_path })
|
||||
save-config { repos: $repos }
|
||||
|
||||
let n_hosts = ($manifest.hosts | columns | length)
|
||||
let n_jobs = ($manifest.jobs | columns | length)
|
||||
print $"(ansi green)✓(ansi reset) Registered '($name)' — ($n_hosts) hosts, ($n_jobs) jobs"
|
||||
}
|
||||
|
||||
# Remove a registered repo
|
||||
def "main repo remove" [
|
||||
name: string # Name of the repo to remove
|
||||
]: nothing -> nothing {
|
||||
let cfg = (load-config)
|
||||
if $name not-in ($cfg.repos | columns) {
|
||||
print $"(ansi red)Error: repo '($name)' not registered(ansi reset)"
|
||||
exit 1
|
||||
}
|
||||
let repos = ($cfg.repos | reject $name)
|
||||
save-config { repos: $repos }
|
||||
print $"(ansi green)✓(ansi reset) Removed '($name)'"
|
||||
}
|
||||
|
||||
# List registered repos
|
||||
def "main repo list" []: nothing -> nothing {
|
||||
let cfg = (load-config)
|
||||
if ($cfg.repos | is-empty) {
|
||||
print "No repos registered. Use `rigging repo add <path>` to add one."
|
||||
return
|
||||
}
|
||||
$cfg.repos | transpose name meta | each { |r|
|
||||
let manifest = (try { read-manifest $r.meta.path } catch { null })
|
||||
if $manifest != null {
|
||||
let n_hosts = ($manifest.hosts | columns | length)
|
||||
let n_jobs = ($manifest.jobs | columns | length)
|
||||
{ name: $r.name, path: $r.meta.path, hosts: $n_hosts, jobs: $n_jobs }
|
||||
} else {
|
||||
{ name: $r.name, path: $r.meta.path, hosts: "?", jobs: "?" }
|
||||
}
|
||||
} | table
|
||||
| print
|
||||
}
|
||||
|
||||
# --- Status ---
|
||||
|
||||
# Aggregated overview of all repos
|
||||
def "main status" []: nothing -> nothing {
|
||||
let repos = (load-all)
|
||||
if ($repos | is-empty) { return }
|
||||
|
||||
print $"(ansi blue_bold)Repos(ansi reset)"
|
||||
$repos | each { |r|
|
||||
let n_hosts = ($r.manifest.hosts | columns | length)
|
||||
let n_jobs = ($r.manifest.jobs | columns | length)
|
||||
{ repo: $r.name, path: $r.path, hosts: $n_hosts, jobs: $n_jobs }
|
||||
} | table | print
|
||||
|
||||
print ""
|
||||
print $"(ansi blue_bold)Hosts(ansi reset)"
|
||||
let hosts = ($repos | each { |r|
|
||||
$r.manifest.hosts | transpose name cfg | each { |h|
|
||||
{
|
||||
host: $h.name
|
||||
repo: $r.name
|
||||
system: $h.cfg.system
|
||||
class: $h.cfg.class
|
||||
target: ($h.cfg.targetHost? | default "local")
|
||||
tags: ($h.cfg.tags | str join ", ")
|
||||
}
|
||||
}
|
||||
} | flatten)
|
||||
if ($hosts | is-empty) {
|
||||
print " (none)"
|
||||
} else {
|
||||
$hosts | table | print
|
||||
}
|
||||
|
||||
print ""
|
||||
print $"(ansi blue_bold)Jobs(ansi reset)"
|
||||
let jobs = ($repos | each { |r|
|
||||
$r.manifest.jobs | transpose name meta | each { |j|
|
||||
{
|
||||
job: $j.name
|
||||
repo: $r.name
|
||||
type: $j.meta.type
|
||||
datacenters: ($j.meta.datacenters | str join ", ")
|
||||
parameterized: $j.meta.parameterized
|
||||
}
|
||||
}
|
||||
} | flatten)
|
||||
if ($jobs | is-empty) {
|
||||
print " (none)"
|
||||
} else {
|
||||
$jobs | table | print
|
||||
}
|
||||
}
|
||||
|
||||
# --- Host management ---
|
||||
|
||||
# Host management
|
||||
def "main host" []: nothing -> nothing {
|
||||
main host list
|
||||
}
|
||||
|
||||
# List all hosts across repos
|
||||
def "main host list" [
|
||||
--tag: string # Filter by tag
|
||||
]: nothing -> nothing {
|
||||
let repos = (load-all)
|
||||
if ($repos | is-empty) { return }
|
||||
|
||||
let hosts = ($repos | each { |r|
|
||||
$r.manifest.hosts | transpose name cfg | each { |h|
|
||||
{
|
||||
host: $h.name
|
||||
repo: $r.name
|
||||
system: $h.cfg.system
|
||||
class: $h.cfg.class
|
||||
target: ($h.cfg.targetHost? | default "local")
|
||||
tags: ($h.cfg.tags | default [])
|
||||
}
|
||||
}
|
||||
} | flatten)
|
||||
|
||||
let filtered = if $tag != null {
|
||||
$hosts | where { |h| $tag in $h.tags }
|
||||
} else {
|
||||
$hosts
|
||||
}
|
||||
|
||||
$filtered | update tags { |r| $r.tags | str join ", " } | table | print
|
||||
}
|
||||
|
||||
# Deploy a host (nixos-rebuild or darwin-rebuild)
|
||||
def "main host deploy" [
|
||||
name: string # Host name
|
||||
--dry-run # Print command without executing
|
||||
]: nothing -> nothing {
|
||||
let repo = (find-host-repo $name)
|
||||
let host_cfg = ($repo.manifest.hosts | get $name)
|
||||
let cmd = (build-deploy-cmd $repo.path $name $host_cfg false)
|
||||
|
||||
if $dry_run {
|
||||
print $"(ansi yellow)DRY RUN(ansi reset) — would execute:"
|
||||
print $" ($cmd | str join ' ')"
|
||||
return
|
||||
}
|
||||
|
||||
print $"(ansi blue)→(ansi reset) Deploying ($name) from ($repo.name)..."
|
||||
^...$cmd
|
||||
}
|
||||
|
||||
# Build a host (without activating)
|
||||
def "main host build" [
|
||||
name: string # Host name
|
||||
--dry-run # Print command without executing
|
||||
]: nothing -> nothing {
|
||||
let repo = (find-host-repo $name)
|
||||
let host_cfg = ($repo.manifest.hosts | get $name)
|
||||
let cmd = (build-deploy-cmd $repo.path $name $host_cfg true)
|
||||
|
||||
if $dry_run {
|
||||
print $"(ansi yellow)DRY RUN(ansi reset) — would execute:"
|
||||
print $" ($cmd | str join ' ')"
|
||||
return
|
||||
}
|
||||
|
||||
print $"(ansi blue)→(ansi reset) Building ($name) from ($repo.name)..."
|
||||
^...$cmd
|
||||
}
|
||||
|
||||
# Construct the rebuild command for a host
|
||||
def build-deploy-cmd [
|
||||
repo_path: string
|
||||
name: string
|
||||
host_cfg: record
|
||||
build_only: bool
|
||||
]: nothing -> list<string> {
|
||||
let rebuilder = if $host_cfg.class == "darwin" { "darwin-rebuild" } else { "nixos-rebuild" }
|
||||
let action = if $build_only { "build" } else { "switch" }
|
||||
|
||||
mut cmd = [$rebuilder $action "--flake" $"($repo_path)#($name)"]
|
||||
|
||||
if $host_cfg.targetHost? != null {
|
||||
$cmd = ($cmd | append ["--target-host" $host_cfg.targetHost "--use-remote-sudo"])
|
||||
}
|
||||
|
||||
if $host_cfg.buildHost? != null {
|
||||
$cmd = ($cmd | append ["--build-host" $host_cfg.buildHost])
|
||||
}
|
||||
|
||||
$cmd
|
||||
}
|
||||
|
||||
# --- Job management ---
|
||||
|
||||
# Nomad job management
|
||||
def "main job" []: nothing -> nothing {
|
||||
main job list
|
||||
}
|
||||
|
||||
# List all Nomad jobs across repos
|
||||
def "main job list" []: nothing -> nothing {
|
||||
let repos = (load-all)
|
||||
if ($repos | is-empty) { return }
|
||||
|
||||
let jobs = ($repos | each { |r|
|
||||
$r.manifest.jobs | transpose name meta | each { |j|
|
||||
{
|
||||
job: $j.name
|
||||
repo: $r.name
|
||||
type: $j.meta.type
|
||||
datacenters: ($j.meta.datacenters | str join ", ")
|
||||
parameterized: $j.meta.parameterized
|
||||
}
|
||||
}
|
||||
} | flatten)
|
||||
|
||||
if ($jobs | is-empty) {
|
||||
print "No jobs found."
|
||||
} else {
|
||||
$jobs | table | print
|
||||
}
|
||||
}
|
||||
|
||||
# Compile and deploy a job to Nomad
|
||||
def "main job run" [
|
||||
name: string # Job name
|
||||
--dry-run # Print command without executing
|
||||
...rest: string # Extra args passed to the repo-local bosun CLI (e.g. -v KEY=VALUE)
|
||||
]: nothing -> nothing {
|
||||
let repo = (find-job-repo $name)
|
||||
let args = if $dry_run {
|
||||
["run" $name "--dry-run" ...$rest]
|
||||
} else {
|
||||
["run" $name ...$rest]
|
||||
}
|
||||
|
||||
print $"(ansi blue)→(ansi reset) Running job ($name) from ($repo.name)..."
|
||||
^nix run $"($repo.path)#bosun" -- ...$args
|
||||
}
|
||||
|
||||
# Plan a job deployment (dry-run)
|
||||
def "main job plan" [
|
||||
name: string # Job name
|
||||
...rest: string # Extra args
|
||||
]: nothing -> nothing {
|
||||
let repo = (find-job-repo $name)
|
||||
print $"(ansi blue)→(ansi reset) Planning job ($name) from ($repo.name)..."
|
||||
^nix run $"($repo.path)#bosun" -- "plan" $name ...$rest
|
||||
}
|
||||
|
||||
# Stop a running job
|
||||
def "main job stop" [
|
||||
name: string # Job name
|
||||
--dry-run # Print command without executing
|
||||
]: nothing -> nothing {
|
||||
let repo = (find-job-repo $name)
|
||||
let args = if $dry_run {
|
||||
["stop" $name "--dry-run"]
|
||||
} else {
|
||||
["stop" $name]
|
||||
}
|
||||
|
||||
print $"(ansi blue)→(ansi reset) Stopping job ($name)..."
|
||||
^nix run $"($repo.path)#bosun" -- ...$args
|
||||
}
|
||||
|
||||
# Show job status
|
||||
def "main job status" [
|
||||
name?: string # Job name (omit for all)
|
||||
]: nothing -> nothing {
|
||||
if $name == null {
|
||||
# Find any repo with nomad enabled and query status
|
||||
let repos = (load-all)
|
||||
let nomad_repos = ($repos | where { |r| not ($r.manifest.nomad | is-empty) })
|
||||
if ($nomad_repos | is-empty) {
|
||||
print "No repos with Nomad enabled."
|
||||
return
|
||||
}
|
||||
let repo = ($nomad_repos | first)
|
||||
^nix run $"($repo.path)#bosun" -- "status"
|
||||
} else {
|
||||
let repo = (find-job-repo $name)
|
||||
^nix run $"($repo.path)#bosun" -- "status" $name
|
||||
}
|
||||
}
|
||||
|
||||
# Show job logs
|
||||
def "main job logs" [
|
||||
name: string # Job name
|
||||
task?: string # Task name (optional)
|
||||
]: nothing -> nothing {
|
||||
let repo = (find-job-repo $name)
|
||||
if $task != null {
|
||||
^nix run $"($repo.path)#bosun" -- "logs" $name $task
|
||||
} else {
|
||||
^nix run $"($repo.path)#bosun" -- "logs" $name
|
||||
}
|
||||
}
|
||||
|
||||
# Pretty-print compiled job JSON
|
||||
def "main job inspect" [
|
||||
name: string # Job name
|
||||
...rest: string # Extra args (e.g. -v KEY=VALUE)
|
||||
]: nothing -> nothing {
|
||||
let repo = (find-job-repo $name)
|
||||
^nix run $"($repo.path)#bosun" -- "inspect" $name ...$rest
|
||||
}
|
||||
|
||||
# Generate all job JSON files
|
||||
def "main job generate" [
|
||||
dir?: string # Output directory (default: ./generated)
|
||||
...rest: string # Extra args
|
||||
]: nothing -> nothing {
|
||||
let repos = (load-all)
|
||||
let nomad_repos = ($repos | where { |r| not ($r.manifest.jobs | is-empty) })
|
||||
|
||||
for repo in $nomad_repos {
|
||||
print $"(ansi blue)→(ansi reset) Generating jobs for ($repo.name)..."
|
||||
let args = if $dir != null {
|
||||
["generate" $dir ...$rest]
|
||||
} else {
|
||||
["generate" ...$rest]
|
||||
}
|
||||
^nix run $"($repo.path)#bosun" -- ...$args
|
||||
}
|
||||
}
|
||||
|
||||
# Dispatch a parameterized job
|
||||
def "main job dispatch" [
|
||||
name?: string # Job name (omit to list parameterized jobs)
|
||||
...rest: string # Extra args (e.g. -m KEY=VALUE)
|
||||
]: nothing -> nothing {
|
||||
if $name == null {
|
||||
# List parameterized jobs
|
||||
let repos = (load-all)
|
||||
let param_jobs = ($repos | each { |r|
|
||||
$r.manifest.jobs | transpose jname meta
|
||||
| where { |j| $j.meta.parameterized }
|
||||
| each { |j| { job: $j.jname, repo: $r.name } }
|
||||
} | flatten)
|
||||
|
||||
if ($param_jobs | is-empty) {
|
||||
print "No parameterized jobs found."
|
||||
} else {
|
||||
print "Parameterized jobs:"
|
||||
$param_jobs | table | print
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
let repo = (find-job-repo $name)
|
||||
^nix run $"($repo.path)#bosun" -- "dispatch" $name ...$rest
|
||||
}
|
||||
|
||||
# --- Secret management ---
|
||||
|
||||
# Secret management
|
||||
def "main secret" []: nothing -> nothing {
|
||||
main secret list
|
||||
}
|
||||
|
||||
# List secrets across repos
|
||||
def "main secret list" []: nothing -> nothing {
|
||||
let cfg = (load-config)
|
||||
if ($cfg.repos | is-empty) {
|
||||
print "No repos registered."
|
||||
return
|
||||
}
|
||||
|
||||
$cfg.repos | transpose name meta | each { |r|
|
||||
let secrets_dir = $"($r.meta.path)/secrets"
|
||||
if ($secrets_dir | path exists) {
|
||||
let files = (glob $"($secrets_dir)/**/*.age")
|
||||
$files | each { |f|
|
||||
let rel = ($f | str replace $"($r.meta.path)/" "")
|
||||
{ repo: $r.name, secret: $rel }
|
||||
}
|
||||
} else {
|
||||
[]
|
||||
}
|
||||
} | flatten | table | print
|
||||
}
|
||||
|
||||
# Rekey secrets for a repo
|
||||
def "main secret rekey" [
|
||||
--repo: string # Repo name (omit to rekey all)
|
||||
]: nothing -> nothing {
|
||||
let cfg = (load-config)
|
||||
|
||||
let targets = if $repo != null {
|
||||
if $repo not-in ($cfg.repos | columns) {
|
||||
print $"(ansi red)Error: repo '($repo)' not registered(ansi reset)"
|
||||
exit 1
|
||||
}
|
||||
[{ name: $repo, path: ($cfg.repos | get $repo | get path) }]
|
||||
} else {
|
||||
$cfg.repos | transpose name meta | each { |r| { name: $r.name, path: $r.meta.path } }
|
||||
}
|
||||
|
||||
for target in $targets {
|
||||
print $"(ansi blue)→(ansi reset) Rekeying secrets for ($target.name)..."
|
||||
^nix run $"($target.path)#agenix" -- rekey -a
|
||||
print $"(ansi green)✓(ansi reset) ($target.name) rekeyed"
|
||||
}
|
||||
}
|
||||
202
flake-module.nix
Normal file
202
flake-module.nix
Normal file
|
|
@ -0,0 +1,202 @@
|
|||
# Rigging flake-parts module
|
||||
# Import this in your flake to get Nomad job management + multi-repo infrastructure CLI
|
||||
topArgs @ {
|
||||
lib,
|
||||
config,
|
||||
flake-parts-lib,
|
||||
...
|
||||
}: let
|
||||
inherit (lib) mkOption mkIf mkMerge types;
|
||||
inherit (flake-parts-lib) mkPerSystemOption;
|
||||
|
||||
# Top-level bosun config — accessed lazily via closure
|
||||
topCfg = topArgs.config.bosun;
|
||||
in {
|
||||
options = {
|
||||
bosun = {
|
||||
meta.name = mkOption {
|
||||
type = types.str;
|
||||
description = "Short name identifying this repo (e.g. 'infra', 'dotfiles')";
|
||||
};
|
||||
|
||||
hosts = mkOption {
|
||||
type = types.attrsOf (types.submodule {
|
||||
options = {
|
||||
system = mkOption {
|
||||
type = types.str;
|
||||
default = "x86_64-linux";
|
||||
description = "System architecture";
|
||||
};
|
||||
class = mkOption {
|
||||
type = types.enum ["nixos" "darwin"];
|
||||
default = "nixos";
|
||||
description = "Host class (nixos or darwin)";
|
||||
};
|
||||
targetHost = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
description = "SSH target for remote deployment (null = local)";
|
||||
};
|
||||
buildHost = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
description = "SSH host for remote building (null = local)";
|
||||
};
|
||||
tags = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [];
|
||||
description = "Freeform tags for filtering (e.g. 'server', 'desktop', 'contabo')";
|
||||
};
|
||||
};
|
||||
});
|
||||
default = {};
|
||||
description = "Host declarations for this repo";
|
||||
};
|
||||
};
|
||||
|
||||
perSystem = mkPerSystemOption ({
|
||||
config,
|
||||
pkgs,
|
||||
system,
|
||||
...
|
||||
}: {
|
||||
options.bosun = {
|
||||
enable = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = "Enable bosun Nomad job management";
|
||||
};
|
||||
|
||||
jobsDir = mkOption {
|
||||
type = types.nullOr types.path;
|
||||
default = null;
|
||||
description = "Directory containing job definitions (auto-discovered)";
|
||||
};
|
||||
|
||||
jobs = mkOption {
|
||||
type = types.lazyAttrsOf types.raw;
|
||||
default = {};
|
||||
description = "Explicit job definitions (merged with discovered jobs)";
|
||||
};
|
||||
|
||||
nomadLib = mkOption {
|
||||
type = types.lazyAttrsOf types.raw;
|
||||
default = {};
|
||||
description = "Additional nomadLib helpers to merge";
|
||||
};
|
||||
|
||||
defaultVars = mkOption {
|
||||
type = types.attrsOf types.str;
|
||||
default = {};
|
||||
description = "Default variables passed to all jobs";
|
||||
};
|
||||
|
||||
nomadAddress = mkOption {
|
||||
type = types.str;
|
||||
default = "http://127.0.0.1:4646";
|
||||
description = "Nomad server address";
|
||||
};
|
||||
|
||||
flakeRef = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
description = "Flake reference for runtime evaluation (auto-detected if null)";
|
||||
};
|
||||
};
|
||||
|
||||
config = let
|
||||
cfg = config.bosun;
|
||||
bosunLib = import ./lib;
|
||||
nomadLib =
|
||||
(bosunLib.nomad {inherit lib;})
|
||||
// cfg.nomadLib;
|
||||
|
||||
# Discover jobs from directory if specified
|
||||
discoveredJobs =
|
||||
if cfg.jobsDir != null
|
||||
then bosunLib.discoverJobs {
|
||||
path = cfg.jobsDir;
|
||||
inherit lib nomadLib;
|
||||
}
|
||||
else {};
|
||||
|
||||
# Merge discovered with explicit jobs (explicit takes precedence)
|
||||
allJobModules = discoveredJobs // cfg.jobs;
|
||||
|
||||
# Evaluate all jobs with default vars (for static compilation)
|
||||
evaluatedJobs = lib.mapAttrs (name: jobModule:
|
||||
bosunLib.evalJob {
|
||||
job = jobModule;
|
||||
inherit lib nomadLib;
|
||||
vars = cfg.defaultVars;
|
||||
}
|
||||
) allJobModules;
|
||||
|
||||
# Compile jobs to JSON files in nix store (static, no runtime vars)
|
||||
compiledJobs = bosunLib.compileJobs {
|
||||
inherit pkgs lib;
|
||||
jobs = evaluatedJobs;
|
||||
};
|
||||
|
||||
# Job metadata for the manifest
|
||||
jobManifest = lib.mapAttrs (name: evaluated:
|
||||
let jobSpec = builtins.head (builtins.attrValues evaluated.job);
|
||||
in {
|
||||
type = jobSpec.type or "service";
|
||||
datacenters = jobSpec.datacenters or [];
|
||||
parameterized = (builtins.functionArgs allJobModules.${name}) ? vars;
|
||||
}
|
||||
) evaluatedJobs;
|
||||
|
||||
# The main bosun CLI with runtime variable support
|
||||
bosunCli = import ./cli {
|
||||
inherit pkgs lib nomadLib;
|
||||
jobs = evaluatedJobs;
|
||||
jobModules = allJobModules;
|
||||
inherit compiledJobs;
|
||||
nomadAddress = cfg.nomadAddress;
|
||||
flakeRef = cfg.flakeRef;
|
||||
defaultVars = cfg.defaultVars;
|
||||
};
|
||||
|
||||
# Rigging CLI (multi-repo orchestrator)
|
||||
riggingCli = import ./cli/rigging.nix {inherit pkgs lib;};
|
||||
in
|
||||
mkMerge [
|
||||
# Always available: manifest + rigging
|
||||
{
|
||||
packages.bosun-manifest = pkgs.writeText "bosun-manifest.json" (builtins.toJSON {
|
||||
name = topCfg.meta.name;
|
||||
hosts = topCfg.hosts;
|
||||
jobs = if cfg.enable then jobManifest else {};
|
||||
nomad = lib.optionalAttrs cfg.enable {address = cfg.nomadAddress;};
|
||||
});
|
||||
|
||||
packages.rigging = riggingCli;
|
||||
}
|
||||
|
||||
# Only when Nomad jobs are enabled
|
||||
(mkIf cfg.enable {
|
||||
packages.bosun = bosunCli;
|
||||
|
||||
apps.bosun = {
|
||||
type = "app";
|
||||
program = "${bosunCli}/bin/bosun";
|
||||
};
|
||||
|
||||
# Expose job modules for runtime evaluation with variables
|
||||
legacyPackages.bosun = {
|
||||
jobModules = allJobModules;
|
||||
inherit evaluatedJobs compiledJobs nomadLib;
|
||||
|
||||
evalJobWithVars = name: vars:
|
||||
bosunLib.evalJob {
|
||||
job = allJobModules.${name};
|
||||
inherit lib nomadLib vars;
|
||||
};
|
||||
};
|
||||
})
|
||||
];
|
||||
});
|
||||
};
|
||||
}
|
||||
61
flake.lock
generated
Normal file
61
flake.lock
generated
Normal file
|
|
@ -0,0 +1,61 @@
|
|||
{
|
||||
"nodes": {
|
||||
"flake-parts": {
|
||||
"inputs": {
|
||||
"nixpkgs-lib": "nixpkgs-lib"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1769996383,
|
||||
"narHash": "sha256-AnYjnFWgS49RlqX7LrC4uA+sCCDBj0Ry/WOJ5XWAsa0=",
|
||||
"owner": "hercules-ci",
|
||||
"repo": "flake-parts",
|
||||
"rev": "57928607ea566b5db3ad13af0e57e921e6b12381",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "hercules-ci",
|
||||
"repo": "flake-parts",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1771008912,
|
||||
"narHash": "sha256-gf2AmWVTs8lEq7z/3ZAsgnZDhWIckkb+ZnAo5RzSxJg=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "a82ccc39b39b621151d6732718e3e250109076fa",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-unstable",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs-lib": {
|
||||
"locked": {
|
||||
"lastModified": 1769909678,
|
||||
"narHash": "sha256-cBEymOf4/o3FD5AZnzC3J9hLbiZ+QDT/KDuyHXVJOpM=",
|
||||
"owner": "nix-community",
|
||||
"repo": "nixpkgs.lib",
|
||||
"rev": "72716169fe93074c333e8d0173151350670b824c",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-community",
|
||||
"repo": "nixpkgs.lib",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"flake-parts": "flake-parts",
|
||||
"nixpkgs": "nixpkgs"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
"version": 7
|
||||
}
|
||||
20
flake.nix
Normal file
20
flake.nix
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
{
|
||||
description = "Rigging — multi-repo NixOS + Nomad infrastructure management";
|
||||
|
||||
inputs = {
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
|
||||
flake-parts.url = "github:hercules-ci/flake-parts";
|
||||
};
|
||||
|
||||
outputs = {
|
||||
self,
|
||||
nixpkgs,
|
||||
flake-parts,
|
||||
...
|
||||
}: {
|
||||
flakeModules.default = ./flake-module.nix;
|
||||
flakeModules.rigging = ./flake-module.nix;
|
||||
|
||||
lib = import ./lib;
|
||||
};
|
||||
}
|
||||
123
lib/default.nix
Normal file
123
lib/default.nix
Normal file
|
|
@ -0,0 +1,123 @@
|
|||
# Bosun library - Nix-based Nomad job definitions
|
||||
{
|
||||
# Nomad-specific helpers
|
||||
nomad = import ./nomad.nix;
|
||||
|
||||
# Discover all jobs in a directory
|
||||
# Each subdirectory with a default.nix is treated as a job
|
||||
# Jobs can be:
|
||||
# - { lib, nomadLib }: { job.name = {...}; } # simple job
|
||||
# - { lib, nomadLib, vars }: { job.name = {...}; } # parameterized job
|
||||
discoverJobs = {
|
||||
path,
|
||||
lib,
|
||||
nomadLib ? (import ./nomad.nix {inherit lib;}),
|
||||
extraArgs ? {},
|
||||
}: let
|
||||
# List all entries in the directory
|
||||
entries = builtins.readDir path;
|
||||
|
||||
# Filter to directories that have a default.nix
|
||||
jobDirs = lib.filterAttrs (name: type:
|
||||
type == "directory" && builtins.pathExists (path + "/${name}/default.nix")
|
||||
) entries;
|
||||
|
||||
# Import each job
|
||||
importJob = name: let
|
||||
jobPath = path + "/${name}";
|
||||
jobModule = import jobPath;
|
||||
in
|
||||
# Return the raw module - evaluation happens later with vars
|
||||
jobModule;
|
||||
in
|
||||
lib.mapAttrs (name: _: importJob name) jobDirs;
|
||||
|
||||
# Evaluate a discovered job with given arguments
|
||||
evalJob = {
|
||||
job, # The imported job module (function)
|
||||
lib,
|
||||
nomadLib ? (import ./nomad.nix {inherit lib;}),
|
||||
vars ? {}, # Variables to pass to parameterized jobs
|
||||
extraArgs ? {},
|
||||
}: let
|
||||
# Check function arity to see if it accepts vars
|
||||
funcArgs = builtins.functionArgs job;
|
||||
hasVars = funcArgs ? vars;
|
||||
|
||||
baseArgs = {inherit lib nomadLib;} // extraArgs;
|
||||
fullArgs = if hasVars then baseArgs // {inherit vars;} else baseArgs;
|
||||
in
|
||||
job fullArgs;
|
||||
|
||||
# Discover and evaluate all jobs (convenience wrapper)
|
||||
loadJobs = {
|
||||
path,
|
||||
lib,
|
||||
nomadLib ? (import ./nomad.nix {inherit lib;}),
|
||||
vars ? {}, # Global vars passed to all jobs
|
||||
extraArgs ? {},
|
||||
}: let
|
||||
discovered = import ./default.nix;
|
||||
jobs = discovered.discoverJobs {inherit path lib nomadLib extraArgs;};
|
||||
in
|
||||
lib.mapAttrs (name: job:
|
||||
discovered.evalJob {inherit job lib nomadLib vars extraArgs;}
|
||||
) jobs;
|
||||
|
||||
# Compile a set of jobs to JSON files
|
||||
# jobs: attrset of job definitions { name = { job.name = {...}; }; }
|
||||
# pkgs: nixpkgs
|
||||
compileJobs = {
|
||||
pkgs,
|
||||
jobs,
|
||||
lib ? pkgs.lib,
|
||||
}:
|
||||
lib.mapAttrs (
|
||||
name: job:
|
||||
pkgs.writeText "${name}.nomad.json" (builtins.toJSON job)
|
||||
) jobs;
|
||||
|
||||
# Build a derivation containing all compiled jobs
|
||||
compileJobsDir = {
|
||||
pkgs,
|
||||
jobs,
|
||||
lib ? pkgs.lib,
|
||||
}: let
|
||||
jobFiles = lib.mapAttrs (
|
||||
name: job:
|
||||
pkgs.writeText "${name}.nomad.json" (builtins.toJSON job)
|
||||
) jobs;
|
||||
in
|
||||
pkgs.runCommand "nomad-jobs" {} ''
|
||||
mkdir -p $out
|
||||
${lib.concatStrings (lib.mapAttrsToList (name: file: ''
|
||||
cp ${file} $out/${name}.nomad.json
|
||||
'')
|
||||
jobFiles)}
|
||||
'';
|
||||
|
||||
# Create a script to generate all job files to a directory
|
||||
mkGenerateScript = {
|
||||
pkgs,
|
||||
jobs,
|
||||
lib ? pkgs.lib,
|
||||
}: let
|
||||
jobFiles = lib.mapAttrs (
|
||||
name: job:
|
||||
pkgs.writeText "${name}.nomad.json" (builtins.toJSON job)
|
||||
) jobs;
|
||||
in
|
||||
pkgs.writeShellScriptBin "bosun-generate" ''
|
||||
set -euo pipefail
|
||||
OUTDIR="''${1:-$(pwd)/generated}"
|
||||
mkdir -p "$OUTDIR"
|
||||
${lib.concatStrings (lib.mapAttrsToList (name: file: ''
|
||||
cp ${file} "$OUTDIR/${name}.nomad.json"
|
||||
chmod 644 "$OUTDIR/${name}.nomad.json"
|
||||
echo "✓ ${name}.nomad.json"
|
||||
'')
|
||||
jobFiles)}
|
||||
echo ""
|
||||
echo "Generated ${toString (lib.length (lib.attrNames jobs))} jobs in $OUTDIR"
|
||||
'';
|
||||
}
|
||||
223
lib/nomad.nix
Normal file
223
lib/nomad.nix
Normal file
|
|
@ -0,0 +1,223 @@
|
|||
# Nomad-specific helper functions
|
||||
{lib ? (import <nixpkgs> {}).lib}: {
|
||||
# Pin a job/group to a specific hostname
|
||||
pinToHost = hostname: [
|
||||
{
|
||||
attribute = "\${attr.unique.hostname}";
|
||||
value = hostname;
|
||||
}
|
||||
];
|
||||
|
||||
# Pin to a node class
|
||||
pinToClass = class: [
|
||||
{
|
||||
attribute = "\${node.class}";
|
||||
value = class;
|
||||
}
|
||||
];
|
||||
|
||||
# Pin to a datacenter
|
||||
pinToDatacenter = dc: [
|
||||
{
|
||||
attribute = "\${node.datacenter}";
|
||||
value = dc;
|
||||
}
|
||||
];
|
||||
|
||||
# Require specific metadata
|
||||
requireMeta = name: value: [
|
||||
{
|
||||
attribute = "\${meta.${name}}";
|
||||
value = value;
|
||||
}
|
||||
];
|
||||
|
||||
# Generate Traefik service tags for reverse proxy routing
|
||||
traefikTags = {
|
||||
name,
|
||||
domain,
|
||||
entrypoint ? "websecure",
|
||||
certResolver ? "letsencrypt",
|
||||
middlewares ? [],
|
||||
}: let
|
||||
middlewareStr =
|
||||
if middlewares == []
|
||||
then []
|
||||
else ["traefik.http.routers.${name}.middlewares=${lib.concatStringsSep "," middlewares}"];
|
||||
in
|
||||
[
|
||||
"traefik.enable=true"
|
||||
"traefik.http.routers.${name}.rule=Host(`${domain}`)"
|
||||
"traefik.http.routers.${name}.entrypoints=${entrypoint}"
|
||||
"traefik.http.routers.${name}.tls=true"
|
||||
"traefik.http.routers.${name}.tls.certresolver=${certResolver}"
|
||||
]
|
||||
++ middlewareStr;
|
||||
|
||||
# Generate Consul service tags
|
||||
consulTags = {
|
||||
name,
|
||||
version ? null,
|
||||
env ? null,
|
||||
}:
|
||||
lib.filter (x: x != null) [
|
||||
"service=${name}"
|
||||
(
|
||||
if version != null
|
||||
then "version=${version}"
|
||||
else null
|
||||
)
|
||||
(
|
||||
if env != null
|
||||
then "env=${env}"
|
||||
else null
|
||||
)
|
||||
];
|
||||
|
||||
# Common resource presets
|
||||
resources = {
|
||||
tiny = {
|
||||
cpu = 100;
|
||||
memory = 128;
|
||||
};
|
||||
small = {
|
||||
cpu = 256;
|
||||
memory = 256;
|
||||
};
|
||||
medium = {
|
||||
cpu = 512;
|
||||
memory = 512;
|
||||
};
|
||||
large = {
|
||||
cpu = 1024;
|
||||
memory = 1024;
|
||||
};
|
||||
xlarge = {
|
||||
cpu = 2048;
|
||||
memory = 2048;
|
||||
};
|
||||
};
|
||||
|
||||
# Docker task helper
|
||||
mkDockerTask = {
|
||||
name,
|
||||
image,
|
||||
ports ? [],
|
||||
env ? {},
|
||||
volumes ? [],
|
||||
args ? [],
|
||||
command ? null,
|
||||
resources ? {
|
||||
cpu = 256;
|
||||
memory = 256;
|
||||
},
|
||||
templates ? [],
|
||||
}: {
|
||||
${name} = {
|
||||
driver = "docker";
|
||||
config =
|
||||
{
|
||||
inherit image;
|
||||
}
|
||||
// (
|
||||
if ports != []
|
||||
then {ports = ports;}
|
||||
else {}
|
||||
)
|
||||
// (
|
||||
if volumes != []
|
||||
then {inherit volumes;}
|
||||
else {}
|
||||
)
|
||||
// (
|
||||
if args != []
|
||||
then {inherit args;}
|
||||
else {}
|
||||
)
|
||||
// (
|
||||
if command != null
|
||||
then {inherit command;}
|
||||
else {}
|
||||
);
|
||||
env = env;
|
||||
resources = resources;
|
||||
template = templates;
|
||||
};
|
||||
};
|
||||
|
||||
# Template helper for Nomad variables
|
||||
mkNomadVarTemplate = {
|
||||
path,
|
||||
destPath ? "secrets/env",
|
||||
envvars ? true,
|
||||
content,
|
||||
}: {
|
||||
data = content;
|
||||
destination = destPath;
|
||||
env = envvars;
|
||||
};
|
||||
|
||||
# Template helper for file configs
|
||||
mkConfigTemplate = {
|
||||
destPath,
|
||||
content,
|
||||
changeMode ? "restart",
|
||||
changeSignal ? null,
|
||||
}:
|
||||
{
|
||||
data = content;
|
||||
destination = destPath;
|
||||
change_mode = changeMode;
|
||||
}
|
||||
// (
|
||||
if changeSignal != null
|
||||
then {change_signal = changeSignal;}
|
||||
else {}
|
||||
);
|
||||
|
||||
# Network helper
|
||||
mkNetwork = {
|
||||
mode ? "bridge",
|
||||
ports ? {},
|
||||
}: {
|
||||
inherit mode;
|
||||
port = lib.mapAttrs (name: config:
|
||||
if builtins.isInt config
|
||||
then {static = config;}
|
||||
else if builtins.isAttrs config
|
||||
then config
|
||||
else {})
|
||||
ports;
|
||||
};
|
||||
|
||||
# Service definition helper
|
||||
mkService = {
|
||||
name,
|
||||
port,
|
||||
provider ? "nomad",
|
||||
tags ? [],
|
||||
checks ? [],
|
||||
}: {
|
||||
inherit name port provider tags;
|
||||
check = checks;
|
||||
};
|
||||
|
||||
# HTTP health check
|
||||
httpCheck = {
|
||||
path ? "/health",
|
||||
interval ? "10s",
|
||||
timeout ? "2s",
|
||||
}: {
|
||||
type = "http";
|
||||
inherit path interval timeout;
|
||||
};
|
||||
|
||||
# TCP health check
|
||||
tcpCheck = {
|
||||
interval ? "10s",
|
||||
timeout ? "2s",
|
||||
}: {
|
||||
type = "tcp";
|
||||
inherit interval timeout;
|
||||
};
|
||||
}
|
||||
113
lib/nushell.nix
Normal file
113
lib/nushell.nix
Normal file
|
|
@ -0,0 +1,113 @@
|
|||
{
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}: {
|
||||
/*
|
||||
The name of the script to write.
|
||||
|
||||
Type: String
|
||||
*/
|
||||
name,
|
||||
/*
|
||||
The shell script's text, not including a shebang.
|
||||
|
||||
Type: String
|
||||
*/
|
||||
text,
|
||||
/*
|
||||
Inputs to add to the shell script's `$PATH` at runtime.
|
||||
|
||||
Type: [String|Derivation]
|
||||
*/
|
||||
runtimeInputs ? [],
|
||||
/*
|
||||
Extra environment variables to set at runtime.
|
||||
|
||||
Type: AttrSet
|
||||
*/
|
||||
runtimeEnv ? null,
|
||||
/*
|
||||
`stdenv.mkDerivation`'s `meta` argument.
|
||||
|
||||
Type: AttrSet
|
||||
*/
|
||||
meta ? {},
|
||||
/*
|
||||
`stdenv.mkDerivation`'s `passthru` argument.
|
||||
|
||||
Type: AttrSet
|
||||
*/
|
||||
passthru ? {},
|
||||
/*
|
||||
The `checkPhase` to run. Defaults to `shellcheck` on supported
|
||||
platforms and `bash -n`.
|
||||
|
||||
The script path will be given as `$target` in the `checkPhase`.
|
||||
|
||||
Type: String
|
||||
*/
|
||||
checkPhase ? null,
|
||||
/*
|
||||
Extra arguments to pass to `stdenv.mkDerivation`.
|
||||
|
||||
:::{.caution}
|
||||
Certain derivation attributes are used internally,
|
||||
overriding those could cause problems.
|
||||
:::
|
||||
|
||||
Type: AttrSet
|
||||
*/
|
||||
derivationArgs ? {},
|
||||
/*
|
||||
Whether to inherit the current `$PATH` in the script.
|
||||
|
||||
Type: Bool
|
||||
*/
|
||||
inheritPath ? true,
|
||||
}: let
|
||||
nu = pkgs.nushell;
|
||||
in
|
||||
pkgs.writeTextFile {
|
||||
inherit
|
||||
name
|
||||
meta
|
||||
passthru
|
||||
derivationArgs
|
||||
;
|
||||
executable = true;
|
||||
destination = "/bin/${name}";
|
||||
allowSubstitutes = true;
|
||||
preferLocalBuild = false;
|
||||
|
||||
text =
|
||||
''
|
||||
#!${nu}${nu.shellPath}
|
||||
|
||||
use std/util "path add"
|
||||
''
|
||||
+ lib.optionalString (runtimeEnv != null) (
|
||||
lib.concatMapAttrsStringSep "" (name: value: ''
|
||||
$env.${lib.toShellVar name value}
|
||||
export ${name}
|
||||
'')
|
||||
runtimeEnv
|
||||
)
|
||||
+ lib.optionalString (runtimeInputs != []) (''
|
||||
${lib.optionalString (! inheritPath) "$env.PATH = []"}
|
||||
''
|
||||
+ lib.concatMapStringsSep "" (path: ''
|
||||
path add '${path}/bin'
|
||||
'')
|
||||
runtimeInputs)
|
||||
+ text;
|
||||
|
||||
checkPhase =
|
||||
if checkPhase == null
|
||||
then ''
|
||||
runHook preCheck
|
||||
${nu}${nu.shellPath} -c "nu-check --debug $target"
|
||||
runHook postCheck
|
||||
''
|
||||
else checkPhase;
|
||||
}
|
||||
Loading…
Add table
Reference in a new issue