This prevents blocking build loops which could be painfully slow at times.
6PZGOLUKHIOB2RJRGSD25TWQLPQFP7I3MZA3FQN2QADOYZ54NJFAC # Upload queue processor script.uploadProcessor = pkgs.writeShellScriptBin "nix-upload-processor" ''#!/usr/bin/env bashset -euQUEUE_DIR=/var/cache/nix/upload-queueQUEUE_FILE="$QUEUE_DIR/pending"PROCESSING="$QUEUE_DIR/processing"DONE="$QUEUE_DIR/done"PIDFILE="$QUEUE_DIR/processor.pid"mkdir -p "$QUEUE_DIR"touch "$QUEUE_FILE" "$PROCESSING" "$DONE"echo $$ > "$PIDFILE"echo "Started (PID: $$)"# Use env set by systemd.export AWS_ACCESS_KEY_ID=$(cat "$AWS_ACCESS_KEY_ID_PATH")export AWS_SECRET_ACCESS_KEY=$(cat "$AWS_SECRET_ACCESS_KEY_PATH")while true; do# Move pending to processing.if [ -s "$QUEUE_FILE" ]; thencount=$(wc -l < "$QUEUE_FILE")echo "Processing $count new path(s)"cat "$QUEUE_FILE" >> "$PROCESSING"> "$QUEUE_FILE"fi# Process each path.while IFS= read -r path || [ -n "$path" ]; do[ -z "$path" ] && continue[ -d "$path" ] || continuesize=$(du -sb "$path" 2>/dev/null | ${getExe pkgs.gawk} '{print $1}' || echo "0")# Check if done previously (avoid duplicates).if grep -qx "$path" "$DONE" 2>/dev/null; thenecho "Skipping $path (already uploaded)"continuefiecho "Uploading $path ($((size / 1024)) KiB)"if /run/current-system/sw/bin/nix copy --to "${s3Cache}" "$path" 2>&1; thenecho "Uploaded $path successfully"echo "$path" >> "$DONE"elseecho "Failed to upload $path"fidone < "$PROCESSING"> "$PROCESSING"# Trim done file.if [ -s "$DONE" ]; thentail -n 1000 "$DONE" > "$DONE.tmp" || truemv "$DONE.tmp" "$DONE" || truefisleep 5done'';
# Upload to S3 cache (only paths larger than MIN_SIZE)for output in $OUT_PATHS; do# Get the size of the store path using dusize=$(du -sb "$output" 2>/dev/null | ${getExe pkgs.gawk} '{print $1}' || echo "0")
# Systemd service for continuous upload queue processingsystemd.services.nix-upload-processor = {description = "Nix binary cache upload queue processor";after = ["network.target""nix-daemon.socket"];wantedBy = [ "multi-user.target" ];serviceConfig = {ExecStart = "${getExe uploadProcessor}";LoadCredential = ["s3-access-key:${secrets.s3AccessKey.path}""s3-secret-key:${secrets.s3SecretKey.path}"];Restart = "on-failure";RestartSec = "10s";StateDirectory = "nix-upload-queue";StateDirectoryMode = "0755";CPUQuota = "25%";};environment = {AWS_ACCESS_KEY_ID_PATH = "${secrets.s3AccessKey.path}";AWS_SECRET_ACCESS_KEY_PATH = "${secrets.s3SecretKey.path}";};};