Implement prestart fetch with S3 cache for dependencies

- Prestart task fetches from S3 binary cache into shared volume
- Server task serves from shared volume (read-only)
- Build uses S3 cache as substituter (ultra-fast builds for shared deps)
- Push entire closure to cache (derivation + dependencies)
- No host involvement, pure container isolation

Architecture:
- Site A builds nodejs_20 + vite → pushed to cache
- Site B builds → pulls nodejs_20 + vite from cache (instant)
- Only builds site-specific code

Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
This commit is contained in:
Christopher Mühl 2026-02-16 15:02:21 +01:00
parent 01d6a3e779
commit 297001e5fd
No known key found for this signature in database
GPG key ID: 925AC7D69955293F

View file

@ -42,7 +42,15 @@ runs:
- name: Build site with Nix
shell: bash
run: |
nix build ${{ inputs.flake-output }} --print-build-logs
# Configure S3 as substituter to pull cached dependencies
export AWS_ACCESS_KEY_ID="${{ env.S3_ACCESS_KEY }}"
export AWS_SECRET_ACCESS_KEY="${{ env.S3_SECRET_KEY }}"
# Build with S3 cache as substituter (fetches cached deps)
nix build ${{ inputs.flake-output }} \
--print-build-logs \
--option substituters "https://cache.nixos.org s3://nix-cache?endpoint=${{ inputs.s3-endpoint }}&scheme=https" \
--option trusted-public-keys "cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY= $(cat /tmp/cache-pub-key.pem 2>/dev/null || echo '')"
# Get the store path
STORE_PATH=$(readlink -f result)
@ -59,20 +67,21 @@ runs:
export AWS_ACCESS_KEY_ID="${{ env.S3_ACCESS_KEY }}"
export AWS_SECRET_ACCESS_KEY="${{ env.S3_SECRET_KEY }}"
# Push to S3 binary cache
# Push entire closure (derivation + all dependencies) to cache
nix copy \
--to "s3://nix-cache?endpoint=${{ inputs.s3-endpoint }}&scheme=https&secret-key=${{ env.NIX_SIGNING_KEY }}" \
--derivation \
"$STORE_PATH"
echo "✅ Pushed to binary cache: $STORE_HASH"
echo "✅ Pushed to binary cache: $STORE_HASH (with all dependencies)"
- name: Pull to host and deploy via Nomad
- name: Deploy via Nomad
shell: bash
run: |
# First, pull the store path to the host's /nix/store
ssh alvin "nix copy --from 's3://nix-cache?endpoint=${{ inputs.s3-endpoint }}&scheme=https' '$STORE_PATH'"
# Get S3 credentials for the fetch task
S3_ACCESS_KEY="${{ env.S3_ACCESS_KEY }}"
S3_SECRET_KEY="${{ env.S3_SECRET_KEY }}"
# Now deploy Nomad job that mounts this specific store path
cat > /tmp/deploy-${{ inputs.site-name }}.nomad.json <<NOMAD_EOF
{
"Job": {
@ -106,25 +115,62 @@ runs:
"traefik.http.routers.${{ inputs.site-name }}.tls.certresolver=letsencrypt"
]
}],
"Tasks": [{
"Name": "server",
"Driver": "docker",
"Config": {
"image": "joseluisq/static-web-server:2",
"ports": ["http"],
"volumes": [
"$STORE_PATH:/var/www:ro"
]
"Volumes": [{
"Name": "site-data",
"Type": "host",
"Source": "site-data"
}],
"Tasks": [
{
"Name": "fetch",
"Driver": "docker",
"Lifecycle": {
"Hook": "prestart",
"Sidecar": false
},
"Config": {
"image": "nixos/nix:latest",
"command": "/bin/sh",
"args": [
"-c",
"nix copy --from 's3://nix-cache?endpoint=${{ inputs.s3-endpoint }}&scheme=https' '$STORE_PATH' && cp -r $STORE_PATH/* /alloc/data/"
]
},
"Env": {
"AWS_ACCESS_KEY_ID": "$S3_ACCESS_KEY",
"AWS_SECRET_ACCESS_KEY": "$S3_SECRET_KEY"
},
"VolumeMounts": [{
"Volume": "site-data",
"Destination": "/alloc/data"
}],
"Resources": {
"CPU": 200,
"MemoryMB": 256
}
},
"Env": {
"SERVER_ROOT": "/var/www",
"SERVER_LOG_LEVEL": "info"
},
"Resources": {
"CPU": 100,
"MemoryMB": 64
{
"Name": "server",
"Driver": "docker",
"Config": {
"image": "joseluisq/static-web-server:2",
"ports": ["http"]
},
"Env": {
"SERVER_ROOT": "/var/www",
"SERVER_LOG_LEVEL": "info"
},
"VolumeMounts": [{
"Volume": "site-data",
"Destination": "/var/www",
"ReadOnly": true
}],
"Resources": {
"CPU": 100,
"MemoryMB": 64
}
}
}]
]
}]
}
}