Switch to S3 binary cache with isolated store paths

- Build in isolated Nix container
- Push to S3 binary cache (no host /nix/store access)
- Pull specific store paths to alvin
- Mount only specific /nix/store/hash to /var/www (read-only)
- Generate signing keys for cache authentication
- Update documentation with binary cache setup

Security improvements:
- Build container has no access to host /nix/store
- Web server only mounts its specific store path
- Proper isolation at every layer

Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
This commit is contained in:
Christopher Mühl 2026-02-16 14:53:11 +01:00
parent 33c8946041
commit 01d6a3e779
No known key found for this signature in database
GPG key ID: 925AC7D69955293F
2 changed files with 72 additions and 44 deletions

View file

@ -33,6 +33,7 @@ jobs:
env:
S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }}
S3_SECRET_KEY: ${{ secrets.S3_SECRET_KEY }}
NIX_SIGNING_KEY: ${{ secrets.NIX_SIGNING_KEY }}
```
**Inputs:**
@ -171,7 +172,21 @@ cd /srv/infra
nix run .#bosun -- run s3
```
### 2. Create artifacts bucket
### 2. Generate binary cache signing keys
```bash
nix-store --generate-binary-cache-key cache.toph.so cache-priv-key.pem cache-pub-key.pem
# Public key (add to nix.conf trusted-public-keys on hosts that will fetch):
cat cache-pub-key.pem
# Example: cache.toph.so:9zFo64TPnxaQeyFM6NS9ou2Fd8OQv4Ia+MuLMjLBYjY=
# Private key (store in Forgejo secrets):
cat cache-priv-key.pem
# Keep this secret!
```
### 3. Create S3 buckets
```bash
# Configure AWS CLI
@ -180,20 +195,43 @@ export AWS_SECRET_ACCESS_KEY=<your-secret-key>
export AWS_ENDPOINT_URL=https://s3.toph.so
export AWS_EC2_METADATA_DISABLED=true
# Create bucket
aws s3 mb s3://artifacts
# Create binary cache bucket
aws s3 mb s3://nix-cache
# Set public-read policy for the bucket
# (Optional) Create artifacts bucket for non-Nix deployments
aws s3 mb s3://artifacts
aws s3api put-bucket-acl --bucket artifacts --acl public-read
```
### 3. Add Forgejo secrets
### 4. Configure alvin to trust the binary cache
Add to `/srv/infra/hosts/alvin/default.nix`:
```nix
nix.settings = {
substituters = [
"https://cache.nixos.org"
"s3://nix-cache?endpoint=s3.toph.so&scheme=https"
];
trusted-public-keys = [
"cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY="
"cache.toph.so:9zFo64TPnxaQeyFM6NS9ou2Fd8OQv4Ia+MuLMjLBYjY=" # Your public key
];
};
```
Then deploy: `sudo nixos-rebuild switch --flake .#alvin`
### 5. Add Forgejo secrets
In your repository settings (or organization settings for global secrets):
- `S3_ACCESS_KEY`: The access key from S3 credentials
- `S3_SECRET_KEY`: The secret key from S3 credentials
- `S3_ACCESS_KEY`: S3 access key
- `S3_SECRET_KEY`: S3 secret key
- `NIX_SIGNING_KEY`: Contents of `cache-priv-key.pem`
That's it! The action will automatically create individual Nomad service jobs for each site.
### 6. Configure SSH access from runner to alvin
The runner needs to pull store paths to alvin's `/nix/store`. Add the runner's SSH key to alvin or use an agent socket mount.
## Examples

View file

@ -44,45 +44,36 @@ runs:
run: |
nix build ${{ inputs.flake-output }} --print-build-logs
# Find the result
if [ -L result ]; then
BUILD_OUTPUT="result"
else
echo "Error: No result symlink found after nix build"
exit 1
fi
# Get the store path
STORE_PATH=$(readlink -f result)
STORE_HASH=$(basename "$STORE_PATH")
echo "BUILD_OUTPUT=$BUILD_OUTPUT" >> $GITHUB_ENV
echo "STORE_PATH=$STORE_PATH" >> $GITHUB_ENV
echo "STORE_HASH=$STORE_HASH" >> $GITHUB_ENV
echo "📦 Built: $STORE_PATH"
- name: Package and upload to S3
- name: Push to binary cache
shell: bash
run: |
ARTIFACT_NAME="${{ github.sha }}.tar.gz"
# Package the built output
tar czf "/tmp/${ARTIFACT_NAME}" -C "$BUILD_OUTPUT" .
# Configure AWS CLI for S3
# Configure S3 binary cache
export AWS_ACCESS_KEY_ID="${{ env.S3_ACCESS_KEY }}"
export AWS_SECRET_ACCESS_KEY="${{ env.S3_SECRET_KEY }}"
export AWS_ENDPOINT_URL="${{ inputs.s3-endpoint }}"
export AWS_EC2_METADATA_DISABLED=true
# Upload to S3
aws s3 cp "/tmp/${ARTIFACT_NAME}" "s3://artifacts/${ARTIFACT_NAME}"
# Push to S3 binary cache
nix copy \
--to "s3://nix-cache?endpoint=${{ inputs.s3-endpoint }}&scheme=https&secret-key=${{ env.NIX_SIGNING_KEY }}" \
"$STORE_PATH"
# Make publicly readable
aws s3api put-object-acl \
--bucket artifacts \
--key "${ARTIFACT_NAME}" \
--acl public-read
echo "✅ Pushed to binary cache: $STORE_HASH"
echo "📦 Artifact uploaded: ${{ inputs.s3-endpoint }}/artifacts/${ARTIFACT_NAME}"
- name: Deploy via Nomad
- name: Pull to host and deploy via Nomad
shell: bash
run: |
cat > /tmp/deploy-${{ inputs.site-name }}.nomad.json <<'NOMAD_EOF'
# First, pull the store path to the host's /nix/store
ssh alvin "nix copy --from 's3://nix-cache?endpoint=${{ inputs.s3-endpoint }}&scheme=https' '$STORE_PATH'"
# Now deploy Nomad job that mounts this specific store path
cat > /tmp/deploy-${{ inputs.site-name }}.nomad.json <<NOMAD_EOF
{
"Job": {
"ID": "${{ inputs.site-name }}",
@ -90,7 +81,7 @@ runs:
"Type": "service",
"Datacenters": ["contabo"],
"Constraints": [{
"LTarget": "${node.unique.name}",
"LTarget": "\${node.unique.name}",
"RTarget": "alvin",
"Operand": "="
}],
@ -120,17 +111,15 @@ runs:
"Driver": "docker",
"Config": {
"image": "joseluisq/static-web-server:2",
"ports": ["http"]
"ports": ["http"],
"volumes": [
"$STORE_PATH:/var/www:ro"
]
},
"Env": {
"SERVER_ROOT": "/local/public",
"SERVER_ROOT": "/var/www",
"SERVER_LOG_LEVEL": "info"
},
"Artifacts": [{
"GetterSource": "${{ inputs.s3-endpoint }}/artifacts/${{ github.sha }}.tar.gz",
"RelativeDest": "local/public",
"GetterMode": "dir"
}],
"Resources": {
"CPU": 100,
"MemoryMB": 64
@ -148,3 +137,4 @@ runs:
run: |
echo "✅ Deployed ${{ inputs.site-name }}"
echo "📋 Traefik rule: ${{ inputs.traefik-rule }}"
echo "📦 Store path: $STORE_PATH"