Skip to content

Instantly share code, notes, and snippets.

@finesse-fingers
Last active February 23, 2026 02:29
Show Gist options
  • Select an option

  • Save finesse-fingers/5d371e6a56daba303480bd4bada2bebd to your computer and use it in GitHub Desktop.

Select an option

Save finesse-fingers/5d371e6a56daba303480bd4bada2bebd to your computer and use it in GitHub Desktop.
End-to-end agentic github security and code quality scans with copilot CLI
#!/usr/bin/env bash
set -euo pipefail
###############################################################################
# alert-planner.sh
#
# Fetches GitHub security alerts (Dependabot + Code Scanning) and code quality
# alerts (Code Scanning), picks the highest-priority unprocessed alerts,
# generates remediation plans using Copilot CLI, and creates GitHub issues.
# Uses "security" and "code-quality" labels for deduplication — no local
# state files needed.
#
# Usage: ./scripts/alert-planner.sh [--dry-run] [--count N]
#
# Options:
# --dry-run Preview what would happen without making changes
# --count N Process up to N alerts in a single run (default: 1)
#
# Prerequisites: gh (authenticated), copilot CLI, git
###############################################################################
SECURITY_LABEL="security"
SECURITY_LABEL_COLOR="d73a4a"
SECURITY_LABEL_DESCRIPTION="Auto-generated security remediation plan"
CODE_QUALITY_LABEL="code-quality"
CODE_QUALITY_LABEL_COLOR="0e8a16"
CODE_QUALITY_LABEL_DESCRIPTION="Auto-generated code quality remediation plan"
DRY_RUN=false
MAX_COUNT=1
ALERT_LIST=""
TEMP_FILES=()
cleanup() {
for f in "${TEMP_FILES[@]}"; do
rm -f "$f" 2>/dev/null
done
if [[ -n "$ALERT_LIST" && -f "$ALERT_LIST" ]]; then
rm -f "$ALERT_LIST"
fi
}
trap cleanup EXIT INT TERM
while [[ $# -gt 0 ]]; do
case "$1" in
--dry-run) DRY_RUN=true; shift ;;
--count)
if [[ -z "${2:-}" || "$2" =~ ^- ]]; then
echo "❌ --count requires a positive integer argument" >&2; exit 1
fi
MAX_COUNT="$2"; shift 2 ;;
*) echo "❌ Unknown argument: $1" >&2; echo "Usage: ./scripts/alert-planner.sh [--dry-run] [--count N]" >&2; exit 1 ;;
esac
done
if [[ "$DRY_RUN" == true ]]; then
echo "🔍 Dry-run mode — no issues will be created"
fi
if [[ "$MAX_COUNT" -gt 1 ]]; then
echo "🔄 Batch mode — processing up to $MAX_COUNT alerts"
fi
# ── Prerequisites ────────────────────────────────────────────────────────────
check_prerequisites() {
local missing=()
command -v gh >/dev/null 2>&1 || missing+=("gh")
command -v copilot >/dev/null 2>&1 || missing+=("copilot")
command -v git >/dev/null 2>&1 || missing+=("git")
command -v jq >/dev/null 2>&1 || missing+=("jq")
if [[ ${#missing[@]} -gt 0 ]]; then
echo "❌ Missing required tools: ${missing[*]}" >&2
exit 1
fi
if ! gh auth status >/dev/null 2>&1; then
echo "❌ gh CLI is not authenticated. Run 'gh auth login' first." >&2
exit 1
fi
if ! git rev-parse --is-inside-work-tree >/dev/null 2>&1; then
echo "❌ Not inside a git repository." >&2
exit 1
fi
}
# ── Repo detection ───────────────────────────────────────────────────────────
detect_repo() {
local remote_url
remote_url=$(git remote get-url origin 2>/dev/null)
# Handle both HTTPS and SSH URLs
REPO_FULL=$(echo "$remote_url" | sed -E 's#.*github\.com[:/]##; s/\.git$//')
REPO_OWNER=$(echo "$REPO_FULL" | cut -d/ -f1)
REPO_NAME=$(echo "$REPO_FULL" | cut -d/ -f2)
if [[ -z "$REPO_OWNER" || -z "$REPO_NAME" ]]; then
echo "❌ Could not detect repo owner/name from git remote." >&2
exit 1
fi
echo "📦 Repository: $REPO_OWNER/$REPO_NAME"
}
# ── Label management ────────────────────────────────────────────────────────
ensure_labels() {
# Create security label if missing
if ! gh label list --repo "$REPO_OWNER/$REPO_NAME" --search "$SECURITY_LABEL" --json name --jq '.[].name' 2>/dev/null | grep -qx "$SECURITY_LABEL"; then
echo "🏷️ Creating label '$SECURITY_LABEL'..."
gh label create "$SECURITY_LABEL" \
--repo "$REPO_OWNER/$REPO_NAME" \
--color "$SECURITY_LABEL_COLOR" \
--description "$SECURITY_LABEL_DESCRIPTION" 2>/dev/null || true
fi
# Create code-quality label if missing
if ! gh label list --repo "$REPO_OWNER/$REPO_NAME" --search "$CODE_QUALITY_LABEL" --json name --jq '.[].name' 2>/dev/null | grep -qx "$CODE_QUALITY_LABEL"; then
echo "🏷️ Creating label '$CODE_QUALITY_LABEL'..."
gh label create "$CODE_QUALITY_LABEL" \
--repo "$REPO_OWNER/$REPO_NAME" \
--color "$CODE_QUALITY_LABEL_COLOR" \
--description "$CODE_QUALITY_LABEL_DESCRIPTION" 2>/dev/null || true
fi
}
# ── Fetch alerts (server-side filtered & sorted) ────────────────────────────
#
# Instead of fetching all 2500+ alerts and sorting client-side, we query each
# API with severity filters in priority order (critical → low) and stop as
# soon as we find an unprocessed alert. This is dramatically faster.
BATCH_SIZE=20 # alerts to fetch per severity tier
fetch_dependabot_alerts() {
local severity="$1"
local stderr_file
stderr_file=$(mktemp)
TEMP_FILES+=("$stderr_file")
gh api "/repos/$REPO_OWNER/$REPO_NAME/dependabot/alerts?state=open&severity=${severity}&sort=created&direction=asc&per_page=${BATCH_SIZE}" \
--jq '.[] | {
source: "dependabot",
category: "security",
number: .number,
severity: .security_vulnerability.severity,
title: (.security_vulnerability.package.name + " - " + .security_advisory.summary),
package: .security_vulnerability.package.name,
description: .security_advisory.description,
html_url: .html_url,
manifest_path: .dependency.manifest_path,
patched_version: (.security_vulnerability.first_patched_version.identifier // "none"),
cve: (.security_advisory.cve_id // "N/A")
}' 2>"$stderr_file" || {
echo " ⚠️ Dependabot API failed (severity=$severity): $(cat "$stderr_file")" >&2
}
rm -f "$stderr_file"
}
fetch_code_scanning_alerts() {
local severity="$1"
local stderr_file
stderr_file=$(mktemp)
TEMP_FILES+=("$stderr_file")
gh api "/repos/$REPO_OWNER/$REPO_NAME/code-scanning/alerts?state=open&severity=${severity}&sort=created&direction=asc&per_page=${BATCH_SIZE}" \
--jq '.[] | {
source: "codescan",
category: (if .rule.security_severity_level != null then "security" else "code-quality" end),
number: .number,
severity: (if .rule.security_severity_level != null then .rule.security_severity_level else .rule.severity end),
title: (.rule.description + " in " + (.most_recent_instance.location.path // "unknown")),
package: .rule.id,
description: .rule.full_description,
html_url: .html_url,
manifest_path: (.most_recent_instance.location.path // "unknown"),
patched_version: "N/A",
cve: "N/A"
}' 2>"$stderr_file" || {
echo " ⚠️ Code Scanning API failed (severity=$severity): $(cat "$stderr_file")" >&2
}
rm -f "$stderr_file"
}
# Severity tiers in priority order
# Dependabot: critical, high, medium, low
# Code Scanning: error, warning, note
DEPENDABOT_SEVERITIES=("critical" "high" "medium" "low")
CODE_SCANNING_SEVERITIES=("error" "warning" "note")
# Unified priority order: each entry is "source:severity"
SEVERITY_TIERS=(
"dependabot:critical"
"codescan:error"
"dependabot:high"
"codescan:warning"
"dependabot:medium"
"codescan:note"
"dependabot:low"
)
build_alert_list() {
local tmpfile
tmpfile=$(mktemp)
TEMP_FILES+=("$tmpfile")
echo "📡 Fetching alerts by severity (highest first)..."
local total_fetched=0
for tier in "${SEVERITY_TIERS[@]}"; do
local source="${tier%%:*}"
local severity="${tier##*:}"
local tier_file
tier_file=$(mktemp)
TEMP_FILES+=("$tier_file")
if [[ "$source" == "dependabot" ]]; then
fetch_dependabot_alerts "$severity" > "$tier_file"
else
fetch_code_scanning_alerts "$severity" > "$tier_file"
fi
local tier_count
tier_count=$(grep -c '"source"' "$tier_file" 2>/dev/null || echo 0)
if [[ "$tier_count" -gt 0 ]]; then
echo " 📊 $source/$severity: $tier_count alerts"
cat "$tier_file" >> "$tmpfile"
total_fetched=$((total_fetched + tier_count))
fi
done
if [[ "$total_fetched" -eq 0 ]]; then
echo "✅ No open alerts found!"
exit 0
fi
echo " 📦 Total: $total_fetched alerts fetched (top $BATCH_SIZE per tier)"
# Slurp into sorted JSON array (already in priority order from tier iteration)
jq -s '.' "$tmpfile" > "${tmpfile}.sorted"
mv "${tmpfile}.sorted" "$tmpfile"
ALERT_LIST="$tmpfile"
}
# ── Issue title for an alert ────────────────────────────────────────────────
make_issue_title() {
local source="$1" number="$2" title="$3" category="$4"
local short_title="${title:0:80}"
if [[ "$category" == "code-quality" ]]; then
echo "[Code Quality] ${source}-${number}: ${short_title}"
else
echo "[Security] ${source}-${number}: ${short_title}"
fi
}
# ── Build lookup set of already-planned alerts ─────────────────────────────
#
# Fetches all existing issues with security/code-quality labels in a single
# API call, extracts machine-readable <!-- alert-ref:source-number --> markers
# from issue bodies, and builds a local lookup set. This replaces per-alert
# gh search calls (N API calls → 1).
PLANNED_ALERTS="" # newline-separated list of "source-number" keys
build_planned_set() {
echo "🔎 Loading existing planned issues..."
local all_issues
all_issues=$(gh issue list \
--repo "$REPO_OWNER/$REPO_NAME" \
--label "$SECURITY_LABEL" \
--state all \
--json title,body \
--jq '.[] | (.body // "") + "\n" + (.title // "")' \
--limit 500 2>/dev/null || echo "")
local cq_issues
cq_issues=$(gh issue list \
--repo "$REPO_OWNER/$REPO_NAME" \
--label "$CODE_QUALITY_LABEL" \
--state all \
--json title,body \
--jq '.[] | (.body // "") + "\n" + (.title // "")' \
--limit 500 2>/dev/null || echo "")
# Extract alert-ref markers from issue bodies (preferred)
# Also extract from title convention [Security] source-N: or [Code Quality] source-N: (fallback)
PLANNED_ALERTS=$(printf '%s\n%s' "$all_issues" "$cq_issues" \
| sed -n \
-e 's/.*<!-- alert-ref:\([^ ]*\) -->.*/\1/p' \
-e 's/^\[Security\] \([a-z]*-[0-9]*\):.*/\1/p' \
-e 's/^\[Code Quality\] \([a-z]*-[0-9]*\):.*/\1/p' \
| sort -u)
local planned_count
planned_count=$(echo "$PLANNED_ALERTS" | grep -c . 2>/dev/null || echo 0)
echo " 📋 Found $planned_count existing plans"
}
alert_already_planned() {
local key="$1"
echo "$PLANNED_ALERTS" | grep -qx "$key" 2>/dev/null
}
# ── Find next unprocessed alert ─────────────────────────────────────────────
ALERT_SCAN_INDEX=0 # tracks position across calls in batch mode
find_unprocessed_alert() {
local total
total=$(jq 'length' "$ALERT_LIST")
if [[ "$ALERT_SCAN_INDEX" -eq 0 ]]; then
echo "🔎 Checking $total alerts against existing plans..."
fi
for i in $(seq "$ALERT_SCAN_INDEX" $((total - 1))); do
local alert
alert=$(jq ".[$i]" "$ALERT_LIST")
local source number title category
source=$(echo "$alert" | jq -r '.source')
number=$(echo "$alert" | jq -r '.number')
title=$(echo "$alert" | jq -r '.title')
category=$(echo "$alert" | jq -r '.category')
local severity
severity=$(echo "$alert" | jq -r '.severity')
local alert_key="${source}-${number}"
if alert_already_planned "$alert_key"; then
echo " ⏭️ Already planned: [$severity] $alert_key — $title"
continue
fi
local issue_title
issue_title=$(make_issue_title "$source" "$number" "$title" "$category")
echo " 🎯 Selected: [$category] [$severity] $alert_key — $title"
SELECTED_ALERT="$alert"
SELECTED_TITLE="$issue_title"
ALERT_SCAN_INDEX=$((i + 1))
return 0
done
echo "✅ All alerts already have remediation plans!"
return 1
}
# ── Generate plan with Copilot CLI ──────────────────────────────────────────
generate_plan() {
local source number severity title description manifest_path patched_version cve html_url package category
source=$(echo "$SELECTED_ALERT" | jq -r '.source')
number=$(echo "$SELECTED_ALERT" | jq -r '.number')
severity=$(echo "$SELECTED_ALERT" | jq -r '.severity')
title=$(echo "$SELECTED_ALERT" | jq -r '.title')
description=$(echo "$SELECTED_ALERT" | jq -r '.description')
manifest_path=$(echo "$SELECTED_ALERT" | jq -r '.manifest_path')
patched_version=$(echo "$SELECTED_ALERT" | jq -r '.patched_version')
cve=$(echo "$SELECTED_ALERT" | jq -r '.cve')
html_url=$(echo "$SELECTED_ALERT" | jq -r '.html_url')
package=$(echo "$SELECTED_ALERT" | jq -r '.package')
category=$(echo "$SELECTED_ALERT" | jq -r '.category')
local alert_type
if [[ "$source" == "dependabot" ]]; then
alert_type="Dependabot (dependency vulnerability)"
elif [[ "$category" == "security" ]]; then
alert_type="Code Scanning (security — static analysis)"
else
alert_type="Code Scanning (code quality — static analysis)"
fi
local role_description
if [[ "$category" == "code-quality" ]]; then
role_description="a code quality engineer reviewing a code quality issue"
else
role_description="a security engineer reviewing a vulnerability"
fi
# Truncate description to avoid overly long prompts
local short_desc="${description:0:2000}"
local prompt
prompt="You are $role_description in the repository $REPO_OWNER/$REPO_NAME.
Analyze this security alert and create a detailed remediation plan.
**Alert Type**: $alert_type
**Alert Number**: #$number
**Severity**: $severity
**Package/Rule**: $package
**Summary**: $title
**CVE**: $cve
**Affected File(s)**: $manifest_path
**Fix Available**: $patched_version
**Description**:
$short_desc
Create a step-by-step remediation plan including:
1. **Impact Assessment** — What is the real-world risk of this vulnerability in this project?
2. **Root Cause** — Why does this vulnerability exist?
3. **Remediation Steps** — Specific commands or code changes needed to fix it
4. **Testing Approach** — How to verify the fix works
5. **Risk Assessment** — Potential breaking changes from the fix
Be specific to this project's technology stack (Java 17/Spring Boot, Angular 18, Gradle, npm)."
echo "🤖 Generating remediation plan with Copilot CLI (claude-opus-4.6)..."
echo " This may take a minute..."
if [[ "$DRY_RUN" == true ]]; then
PLAN_OUTPUT="[DRY RUN] Plan would be generated by: copilot -sp \"/plan ...\" --model claude-opus-4.6"
return 0
fi
# /plan activates Copilot's dedicated planning mode for structured output
PLAN_OUTPUT=$(copilot -sp "/plan $prompt" --model claude-opus-4.6 2>/dev/null) || {
echo "⚠️ Copilot CLI failed. Falling back to a placeholder plan." >&2
PLAN_OUTPUT="⚠️ Copilot CLI could not generate a plan. Please review this alert manually."
}
}
# ── Create GitHub issue ─────────────────────────────────────────────────────
create_issue() {
local source number severity html_url title description patched_version cve package category
source=$(echo "$SELECTED_ALERT" | jq -r '.source')
number=$(echo "$SELECTED_ALERT" | jq -r '.number')
severity=$(echo "$SELECTED_ALERT" | jq -r '.severity')
html_url=$(echo "$SELECTED_ALERT" | jq -r '.html_url')
title=$(echo "$SELECTED_ALERT" | jq -r '.title')
description=$(echo "$SELECTED_ALERT" | jq -r '.description // "N/A"')
patched_version=$(echo "$SELECTED_ALERT" | jq -r '.patched_version')
cve=$(echo "$SELECTED_ALERT" | jq -r '.cve')
package=$(echo "$SELECTED_ALERT" | jq -r '.package')
category=$(echo "$SELECTED_ALERT" | jq -r '.category')
local alert_type_display
if [[ "$source" == "dependabot" ]]; then
alert_type_display="Dependabot"
elif [[ "$category" == "security" ]]; then
alert_type_display="Code Scanning (Security)"
else
alert_type_display="Code Scanning (Quality)"
fi
local issue_label
if [[ "$category" == "code-quality" ]]; then
issue_label="$CODE_QUALITY_LABEL"
else
issue_label="$SECURITY_LABEL"
fi
local heading
if [[ "$category" == "code-quality" ]]; then
heading="## Code Quality Remediation Plan"
else
heading="## Security Alert Remediation Plan"
fi
# Truncate description for the issue body
local short_desc="${description:0:3000}"
local body
body="$heading
| Field | Value |
|-------|-------|
| **Source** | $alert_type_display Alert #$number |
| **Severity** | \`$severity\` |
| **Package/Rule** | \`$package\` |
| **CVE** | $cve |
| **Fix Available** | \`$patched_version\` |
| **Alert Link** | $html_url |
### Alert Details
$short_desc
### Remediation Plan (AI-Generated)
$PLAN_OUTPUT
---
*This issue was auto-generated by \`scripts/alert-planner.sh\` using Copilot CLI (claude-opus-4.6).*
<!-- alert-ref:${source}-${number} -->"
if [[ "$DRY_RUN" == true ]]; then
echo ""
echo "═══════════════════════════════════════════════════════════"
echo "🏷️ DRY RUN — Would create issue:"
echo " Title: $SELECTED_TITLE"
echo " Label: $issue_label"
echo "═══════════════════════════════════════════════════════════"
return 0
fi
echo "📝 Creating GitHub issue..."
local issue_url
issue_url=$(gh issue create \
--repo "$REPO_OWNER/$REPO_NAME" \
--title "$SELECTED_TITLE" \
--body "$body" \
--label "$issue_label" 2>&1)
echo ""
echo "═══════════════════════════════════════════════════════════"
echo "✅ Issue created successfully!"
echo " $issue_url"
echo "═══════════════════════════════════════════════════════════"
}
# ── Main ─────────────────────────────────────────────────────────────────────
main() {
echo ""
echo "🔐 Alert Planner — Automated Remediation Planning"
echo "═══════════════════════════════════════════════════════════"
echo ""
check_prerequisites
detect_repo
ensure_labels
build_alert_list
build_planned_set
local processed=0
while [[ "$processed" -lt "$MAX_COUNT" ]]; do
SELECTED_ALERT=""
SELECTED_TITLE=""
PLAN_OUTPUT=""
if ! find_unprocessed_alert; then
break
fi
generate_plan
create_issue
processed=$((processed + 1))
# Mark as planned so subsequent iterations skip it
local alert_key
alert_key="$(echo "$SELECTED_ALERT" | jq -r '.source')-$(echo "$SELECTED_ALERT" | jq -r '.number')"
PLANNED_ALERTS=$(printf '%s\n%s' "$PLANNED_ALERTS" "$alert_key")
if [[ "$MAX_COUNT" -gt 1 ]]; then
echo ""
echo " 📊 Progress: $processed / $MAX_COUNT"
fi
done
echo ""
echo "═══════════════════════════════════════════════════════════"
echo "✅ Processed $processed alert(s)"
echo "═══════════════════════════════════════════════════════════"
}
main

Alert Remediation Pipeline

Two scripts that automate the lifecycle of security and code-quality alerts: from GitHub alert to remediation plan to reviewed PR.

End-to-End Process Visual

flowchart TD
  A["Open Dependabot and Code Scanning alerts"] --> P1

  subgraph Planner["alert-planner.sh"]
    P1["Fetch and prioritize alerts<br>Model: none"]
    P2["Deduplicate with existing issues (alert ref marker)<br>Model: none"]
    P3["Generate remediation plan<br>Model: claude-opus-4.6 (/plan)"]
    P4["Create labeled issue (security or code-quality)<br>Model: none"]
    P1 --> P2 --> P3 --> P4
  end

  P4 --> O1

  subgraph Orchestrator["issue-orchestrator.sh"]
    O1["Claim issue and create fix worktree<br>Model: none"]
    O2["Implement fix<br>Model: gpt-5.3-codex (ralph-loop)"]
    O3["Commit, push, and create PR<br>Model: claude-opus-4.6 (commit-push-pr)"]
    O4["Validate CI and patch related failures<br>Model: gpt-5.3-codex"]
    O5["Review PR<br>Model: claude-opus-4.6 (review-pr)"]
    O6["Post review comments<br>Model: claude-sonnet-4.6"]
    O7["Address feedback and update PR summary<br>Model: gpt-5.3-codex (resume)"]
    O8["AI judge verdict comment<br>Model: claude-sonnet-4.6"]
    O9["Finalize issue comment and cleanup<br>Model: none"]
    O1 --> O2 --> O3 --> O4 --> O5 --> O6 --> O7 --> O8 --> O9
  end

  O4 -. "If CI fails due to changes" .-> O2
  O9 --> Z["Ready for human review and merge"]
Loading

Prerequisites

  • gh — GitHub CLI, authenticated (gh auth login)
  • copilot — Copilot CLI
  • git — inside a GitHub-hosted repository
  • jq — JSON processor

Quick Start

# Plan a single alert (default)
./scripts/alert-planner.sh

# Plan up to 10 alerts in one run
./scripts/alert-planner.sh --count 10

# Fix a single issue (default)
./scripts/issue-orchestrator.sh

# Fix up to 3 issues in one run
./scripts/issue-orchestrator.sh --count 3

# Preview without making changes
./scripts/alert-planner.sh --dry-run --count 5
./scripts/issue-orchestrator.sh --dry-run --count 3

alert-planner.sh

Fetches open alerts from Dependabot and Code Scanning APIs, selects the highest-priority unprocessed alert, generates a remediation plan via Copilot CLI (claude-opus-4.6), and creates a GitHub issue.

Usage

./scripts/alert-planner.sh [--dry-run] [--count N]
Option Description
--dry-run Preview what would happen without creating issues
--count N Process up to N alerts per run (default: 1)

How It Works

  1. Fetches alerts by severity tier (critical/error first, low/note last)
  2. Builds a lookup set of already-planned alerts from existing GitHub issues
  3. Selects the first unprocessed alert
  4. Generates a remediation plan with Copilot CLI
  5. Creates a GitHub issue with the plan, labeled security or code-quality
  6. In batch mode (--count N), repeats steps 3-5 for up to N alerts

Deduplication

Issues are tagged with <!-- alert-ref:source-number --> markers. The script reads all existing issues with security or code-quality labels and skips alerts that already have a corresponding issue. No local state files are needed.

issue-orchestrator.sh

Picks the first unassigned issue created by the planner, implements the fix, creates a PR, reviews it, addresses feedback, and leaves the PR ready for human review.

Usage

./scripts/issue-orchestrator.sh [--dry-run] [--count N]
Option Description
--dry-run Preview what would happen without making changes
--count N Process up to N issues per run (default: 1)

Pipeline Steps (per issue)

Step Model Description
1 Assign issue to current user (with race-condition check)
2 Create git worktree on a fix/security-<N> branch
3 gpt-5.3-codex Implement the fix described in the issue
4 claude-opus-4.6 Commit, push, and create a PR
5 Capture PR number and link to issue via Closes #N
6 gpt-5.3-codex Validate CI checks — poll for results, fix failures if caused by the change
7 claude-opus-4.6 Review the PR
8 claude-sonnet-4.6 Post review comments on the PR
9 gpt-5.3-codex Address review feedback (resumes original session)
10 claude-sonnet-4.6 AI judge — verdict on whether the fix addresses the issue
11 Comment on issue, clean up worktree

Failure Handling

If any step fails, the cleanup trap:

  • Unassigns the issue
  • Removes the worktree and branch
  • Cleans up session files

In batch mode, each iteration resets all per-issue state, so a failure in one issue does not affect previous successful iterations.

Batch Mode

Both scripts support --count N to process multiple items in a single run. One-time setup (authentication, repo detection, alert fetching) runs once; only the per-item steps loop.

# Cron-friendly: plan 20 alerts, then fix 5 issues
./scripts/alert-planner.sh --count 20
./scripts/issue-orchestrator.sh --count 5

The loop exits early if there are no more items to process, so --count 100 is safe even if only 3 alerts remain.

Labels

The pipeline uses two GitHub labels for tracking:

Label Color Purpose
security red Security vulnerability remediation plans
code-quality green Code quality remediation plans

The planner auto-creates these labels if they don't exist.

File Structure

scripts/
├── alert-planner.sh              # Alert → Issue pipeline
├── issue-orchestrator.sh         # Issue → PR pipeline
└── README-alert-remediation.md   # This file
#!/usr/bin/env bash
set -euo pipefail
###############################################################################
# issue-orchestrator.sh
#
# Orchestrates end-to-end remediation of security and code quality issues
# created by alert-planner.sh. Picks unassigned issues labeled "security" or
# "code-quality", implements the fix, creates a PR, reviews it, addresses
# feedback, and leaves the PR ready for human review.
#
# Usage: ./scripts/issue-orchestrator.sh [--dry-run] [--count N]
#
# Options:
# --dry-run Preview what would happen without making changes
# --count N Process up to N issues in a single run (default: 1)
#
# Prerequisites: gh (authenticated), copilot CLI, git
###############################################################################
SECURITY_LABEL="security"
CODE_QUALITY_LABEL="code-quality"
DRY_RUN=false
MAX_COUNT=1
COPILOT_FLAGS=(--allow-all-tools --allow-all-urls)
# Shared instruction appended to all prompts that involve GitHub operations.
# Ensures copilot CLI agents use gh instead of raw REST API calls.
GH_INSTRUCTION="IMPORTANT: Use the gh CLI for ALL GitHub operations — reading issues, PRs, diffs, review comments, CI check status, and posting reviews. Never call the GitHub REST API directly. The gh CLI is already authenticated."
while [[ $# -gt 0 ]]; do
case "$1" in
--dry-run) DRY_RUN=true; shift ;;
--count)
if [[ -z "${2:-}" || "$2" =~ ^- ]]; then
echo "❌ --count requires a positive integer argument" >&2; exit 1
fi
MAX_COUNT="$2"; shift 2 ;;
*) echo "❌ Unknown argument: $1" >&2; echo "Usage: ./scripts/issue-orchestrator.sh [--dry-run] [--count N]" >&2; exit 1 ;;
esac
done
if [[ "$DRY_RUN" == true ]]; then
echo "🔍 Dry-run mode — no changes will be made"
fi
if [[ "$MAX_COUNT" -gt 1 ]]; then
echo "🔄 Batch mode — processing up to $MAX_COUNT issues"
fi
# Track state for cleanup on failure (reset per iteration in batch mode)
ISSUE_NUMBER=""
BRANCH_NAME=""
CURRENT_USER=""
ORIGINAL_BRANCH=""
CODEX_SESSION_ID=""
CODEX_SESSION_FILE=""
WORKTREE_DIR=""
REPO_ROOT=""
# ── Cleanup on failure ──────────────────────────────────────────────────────
cleanup_on_failure() {
echo ""
echo "⚠️ Orchestration failed — cleaning up..." >&2
if [[ -n "$ISSUE_NUMBER" && -n "$CURRENT_USER" ]]; then
echo " Unassigning issue #$ISSUE_NUMBER..." >&2
gh issue edit "$ISSUE_NUMBER" --repo "$REPO_OWNER/$REPO_NAME" \
--remove-assignee "$CURRENT_USER" 2>/dev/null || true
fi
# Remove worktree and branch
if [[ -n "$WORKTREE_DIR" && -d "$WORKTREE_DIR" ]]; then
echo " Removing worktree $WORKTREE_DIR..." >&2
cd "$REPO_ROOT" 2>/dev/null || true
git worktree remove --force "$WORKTREE_DIR" 2>/dev/null || true
fi
if [[ -n "$BRANCH_NAME" ]]; then
git branch -D "$BRANCH_NAME" 2>/dev/null || true
fi
if [[ -n "$CODEX_SESSION_FILE" && -f "$CODEX_SESSION_FILE" ]]; then
rm -f "$CODEX_SESSION_FILE"
fi
echo " Cleanup complete." >&2
exit 1
}
trap cleanup_on_failure ERR INT TERM
# ── Prerequisites ────────────────────────────────────────────────────────────
check_prerequisites() {
local missing=()
command -v gh >/dev/null 2>&1 || missing+=("gh")
command -v copilot >/dev/null 2>&1 || missing+=("copilot")
command -v git >/dev/null 2>&1 || missing+=("git")
command -v jq >/dev/null 2>&1 || missing+=("jq")
if [[ ${#missing[@]} -gt 0 ]]; then
echo "❌ Missing required tools: ${missing[*]}" >&2
exit 1
fi
if ! gh auth status >/dev/null 2>&1; then
echo "❌ gh CLI is not authenticated. Run 'gh auth login' first." >&2
exit 1
fi
if ! git rev-parse --is-inside-work-tree >/dev/null 2>&1; then
echo "❌ Not inside a git repository." >&2
exit 1
fi
}
# ── Repo detection ───────────────────────────────────────────────────────────
detect_repo() {
local remote_url
remote_url=$(git remote get-url origin 2>/dev/null)
REPO_FULL=$(echo "$remote_url" | sed -E 's#.*github\.com[:/]##; s/\.git$//')
REPO_OWNER=$(echo "$REPO_FULL" | cut -d/ -f1)
REPO_NAME=$(echo "$REPO_FULL" | cut -d/ -f2)
if [[ -z "$REPO_OWNER" || -z "$REPO_NAME" ]]; then
echo "❌ Could not detect repo owner/name from git remote." >&2
exit 1
fi
CURRENT_USER=$(gh api /user --jq '.login' 2>/dev/null)
REPO_ROOT=$(git rev-parse --show-toplevel)
DEFAULT_BRANCH=$(gh repo view "$REPO_OWNER/$REPO_NAME" --json defaultBranchRef --jq '.defaultBranchRef.name' 2>/dev/null || echo "main")
echo "📦 Repository: $REPO_OWNER/$REPO_NAME"
echo "👤 User: $CURRENT_USER"
echo "🌿 Default branch: $DEFAULT_BRANCH"
}
# ── Select unassigned security issue ────────────────────────────────────────
select_issue() {
echo "🔎 Looking for unassigned security & code quality issues..."
# Fetch issues from both labels and merge
local security_issues code_quality_issues
security_issues=$(gh issue list \
--repo "$REPO_OWNER/$REPO_NAME" \
--label "$SECURITY_LABEL" \
--state open \
--search "no:assignee" \
--json number,title,body,url \
--jq '[.[] | select(.title | startswith("[Security]") or startswith("[Code Quality]"))]' 2>/dev/null || echo '[]')
code_quality_issues=$(gh issue list \
--repo "$REPO_OWNER/$REPO_NAME" \
--label "$CODE_QUALITY_LABEL" \
--state open \
--search "no:assignee" \
--json number,title,body,url \
--jq '[.[] | select(.title | startswith("[Security]") or startswith("[Code Quality]"))]' 2>/dev/null || echo '[]')
# Merge, deduplicate by number, sort
local issues
issues=$(echo "$security_issues $code_quality_issues" | jq -s 'add | unique_by(.number) | sort_by(.number)' 2>/dev/null || echo '[]')
local count
count=$(echo "$issues" | jq 'length' 2>/dev/null || echo 0)
if [[ "$count" -eq 0 ]]; then
echo "✅ No unassigned issues found. Nothing to do!"
return 1
fi
# Pick the first unassigned issue
local candidate
candidate=$(echo "$issues" | jq '.[0]')
ISSUE_NUMBER=$(echo "$candidate" | jq -r '.number')
ISSUE_TITLE=$(echo "$candidate" | jq -r '.title')
ISSUE_BODY=$(echo "$candidate" | jq -r '.body')
ISSUE_URL=$(echo "$candidate" | jq -r '.url')
echo " 🎯 Selected issue #$ISSUE_NUMBER: $ISSUE_TITLE"
}
# ── Step 1: Assign issue ────────────────────────────────────────────────────
assign_issue() {
echo ""
echo "── Step 1: Assign issue ───────────────────────────────────"
if [[ "$DRY_RUN" == true ]]; then
echo " [DRY RUN] Would assign #$ISSUE_NUMBER to $CURRENT_USER"
return 0
fi
gh issue edit "$ISSUE_NUMBER" \
--repo "$REPO_OWNER/$REPO_NAME" \
--add-assignee "$CURRENT_USER"
# Verify we won the race — check the issue is assigned to us
local actual_assignee
actual_assignee=$(gh issue view "$ISSUE_NUMBER" \
--repo "$REPO_OWNER/$REPO_NAME" \
--json assignees --jq '.assignees[0].login' 2>/dev/null || echo "")
if [[ "$actual_assignee" != "$CURRENT_USER" ]]; then
echo " ⚠️ Issue #$ISSUE_NUMBER was claimed by $actual_assignee — skipping" >&2
ISSUE_NUMBER=""
exit 1
fi
echo " ✅ Assigned #$ISSUE_NUMBER to $CURRENT_USER"
}
# ── Step 2: Create worktree with fix branch ─────────────────────────────────
create_worktree() {
echo ""
echo "── Step 2: Create worktree ────────────────────────────────"
BRANCH_NAME="fix/security-${ISSUE_NUMBER}"
WORKTREE_DIR="${REPO_ROOT}/.worktrees/${BRANCH_NAME}"
if [[ "$DRY_RUN" == true ]]; then
echo " [DRY RUN] Would create worktree at $WORKTREE_DIR on branch $BRANCH_NAME"
return 0
fi
git fetch origin "$DEFAULT_BRANCH" --quiet
mkdir -p "$(dirname "$WORKTREE_DIR")"
# Clean up stale branch/worktree from a previous failed run
if git worktree list --porcelain | grep -q "$WORKTREE_DIR"; then
echo " 🧹 Removing stale worktree from previous run..."
git worktree remove --force "$WORKTREE_DIR" 2>/dev/null || true
fi
if git show-ref --verify --quiet "refs/heads/$BRANCH_NAME"; then
echo " 🧹 Removing stale branch from previous run..."
git branch -D "$BRANCH_NAME" 2>/dev/null || true
fi
git worktree add -b "$BRANCH_NAME" "$WORKTREE_DIR" "origin/$DEFAULT_BRANCH"
cd "$WORKTREE_DIR"
echo " ✅ Created worktree at $WORKTREE_DIR"
echo " 🌿 Branch: $BRANCH_NAME"
}
# ── Step 3: Implement fix (gpt-5.3-codex) ───────────────────────────────────
implement_fix() {
echo ""
echo "── Step 3: Implement fix (gpt-5.3-codex) ─────────────────"
local task="Read GitHub issue #${ISSUE_NUMBER} (use: gh issue view ${ISSUE_NUMBER} --repo ${REPO_OWNER}/${REPO_NAME}) and implement the fix described in the remediation plan. Make the minimal code changes needed. Do not modify unrelated files.
${GH_INSTRUCTION}"
local prompt="/ralph-wiggum:ralph-loop \"${task}\" --completion-promise \"DONE\" --max-iterations 10"
if [[ "$DRY_RUN" == true ]]; then
echo " [DRY RUN] Would run: copilot -sp \"$prompt\" --model gpt-5.3-codex ${COPILOT_FLAGS[*]} --share copilot-session-fix-${ISSUE_NUMBER}.md"
return 0
fi
echo " 🤖 Invoking gpt-5.3-codex to implement the fix..."
echo " This may take several minutes..."
CODEX_SESSION_FILE="copilot-session-fix-${ISSUE_NUMBER}.md"
copilot -sp "$prompt" --model gpt-5.3-codex "${COPILOT_FLAGS[@]}" --share "$CODEX_SESSION_FILE" || {
echo " ❌ Fix implementation failed" >&2
return 1
}
# Extract session ID from the exported markdown (format: > **Session ID:** `<uuid>`)
if [[ -f "$CODEX_SESSION_FILE" ]]; then
CODEX_SESSION_ID=$(sed -n 's/.*Session ID:\*\* `\([^`]*\)`.*/\1/p' "$CODEX_SESSION_FILE" 2>/dev/null || true)
if [[ -n "$CODEX_SESSION_ID" ]]; then
echo " 📎 Captured session ID: $CODEX_SESSION_ID"
else
echo " ⚠️ Could not extract session ID from $CODEX_SESSION_FILE" >&2
fi
fi
echo " ✅ Fix implementation complete"
# Verify that codex actually changed something
if [[ -z "$(git status --porcelain)" ]]; then
echo " ❌ No files were modified by the fix implementation" >&2
return 1
fi
}
# ── Append validation summary to PR description ────────────────────────────
append_pr_summary() {
local phase="$1" # e.g. "Fix Implementation" or "Review Feedback"
echo ""
echo "── Updating PR description ($phase) ────────────────────────"
local prompt
prompt="Summarize what you just did in this repository for the PR description. Include:
1. What files were changed and why
2. What validation or testing steps you performed (e.g. build, lint, tests)
3. Any risks or caveats the reviewer should know about
Be concise — use bullet points. Output only the summary, no preamble."
if [[ "$DRY_RUN" == true ]]; then
echo " [DRY RUN] Would ask codex to summarize and append to PR #$PR_NUMBER"
return 0
fi
local summary
local resume_flag=()
if [[ -n "$CODEX_SESSION_ID" ]]; then
resume_flag=(--resume "$CODEX_SESSION_ID")
fi
summary=$(copilot -sp "$prompt" --model gpt-5.3-codex "${COPILOT_FLAGS[@]}" "${resume_flag[@]}" 2>/dev/null) || {
echo " ⚠️ Could not generate summary — skipping" >&2
return 0
}
local current_body
current_body=$(gh pr view "$PR_NUMBER" --repo "$REPO_OWNER/$REPO_NAME" --json body --jq '.body' 2>/dev/null || echo "")
gh pr edit "$PR_NUMBER" \
--repo "$REPO_OWNER/$REPO_NAME" \
--body "${current_body}
## 🤖 ${phase} — AI Summary
${summary}" 2>/dev/null || true
echo " ✅ PR description updated with $phase summary"
}
# ── Step 4: Commit, push, create PR (claude-opus-4.6) ───────────────────────
commit_push_pr() {
echo ""
echo "── Step 4: Commit, push, create PR (claude-opus-4.6) ─────"
if [[ "$DRY_RUN" == true ]]; then
echo " [DRY RUN] Would run: copilot -sp \"/commit-commands:commit-push-pr\" --model claude-opus-4.6 ${COPILOT_FLAGS[*]}"
return 0
fi
echo " 🤖 Invoking claude-opus-4.6 to commit, push, and create PR..."
copilot -sp "/commit-commands:commit-push-pr" --model claude-opus-4.6 "${COPILOT_FLAGS[@]}" || {
echo " ❌ Commit/push/PR creation failed" >&2
return 1
}
echo " ✅ PR created"
}
# ── Step 5: Capture PR number ────────────────────────────────────────────────
capture_pr_number() {
echo ""
echo "── Step 5: Capture PR number ──────────────────────────────"
if [[ "$DRY_RUN" == true ]]; then
PR_NUMBER="(dry-run)"
PR_URL="(dry-run)"
echo " [DRY RUN] Would capture PR number from branch $BRANCH_NAME"
return 0
fi
PR_NUMBER=$(gh pr list --repo "$REPO_OWNER/$REPO_NAME" --head "$BRANCH_NAME" --state open --json number --jq '.[0].number' 2>/dev/null) || true
if [[ -z "$PR_NUMBER" || "$PR_NUMBER" == "null" ]]; then
echo " ❌ Could not find PR for branch $BRANCH_NAME" >&2
return 1
fi
PR_URL=$(gh pr view "$PR_NUMBER" --repo "$REPO_OWNER/$REPO_NAME" --json url --jq '.url' 2>/dev/null)
# Link PR to issue via "Closes #N" in PR body — this creates the native
# "Development" link so the issue auto-closes when the PR is merged
local current_body
current_body=$(gh pr view "$PR_NUMBER" --repo "$REPO_OWNER/$REPO_NAME" --json body --jq '.body' 2>/dev/null || echo "")
gh pr edit "$PR_NUMBER" \
--repo "$REPO_OWNER/$REPO_NAME" \
--body "${current_body}
---
Closes #${ISSUE_NUMBER}" 2>/dev/null || true
echo " ✅ PR #$PR_NUMBER: $PR_URL"
echo " 🔗 Linked to issue #$ISSUE_NUMBER (will auto-close on merge)"
}
# ── Step 6: Validate CI checks (gpt-5.3-codex) ──────────────────────────────
validate_ci_checks() {
echo ""
echo "── Step 6: Validate CI checks (gpt-5.3-codex) ─────────────"
local prompt
prompt="You are validating a security fix on pull request #${PR_NUMBER} in repo ${REPO_OWNER}/${REPO_NAME}.
1. Check CI status: gh pr checks ${PR_NUMBER} --repo ${REPO_OWNER}/${REPO_NAME}
2. Wait for CI checks to complete — poll every 30 seconds for up to 10 minutes
3. If any check fails:
a. Read the failed check's logs (use: gh run view <run-id> --repo ${REPO_OWNER}/${REPO_NAME} --log-failed)
b. If the failure is caused by your changes, fix the code, commit, and push
c. If the failure is a flaky/unrelated test, note it but do not modify unrelated code
4. After fixing, wait for the new CI run to pass
5. Summarize the CI validation result: which checks passed, which failed, and what you fixed
Do not modify files unrelated to the fix. If CI passes on the first try, just confirm it.
${GH_INSTRUCTION}"
if [[ "$DRY_RUN" == true ]]; then
echo " [DRY RUN] Would validate CI checks on PR #$PR_NUMBER via gpt-5.3-codex"
return 0
fi
echo " 🤖 Invoking gpt-5.3-codex to validate CI checks..."
local resume_flag=()
if [[ -n "$CODEX_SESSION_ID" ]]; then
resume_flag=(--resume "$CODEX_SESSION_ID")
echo " 📎 Resuming session: $CODEX_SESSION_ID"
fi
copilot -sp "$prompt" --model gpt-5.3-codex "${COPILOT_FLAGS[@]}" "${resume_flag[@]}" || {
echo " ⚠️ CI validation failed — PR still available for manual review" >&2
return 0
}
echo " ✅ CI checks validated"
}
# ── Step 7: PR review (claude-opus-4.6) ──────────────────────────────────────
run_pr_review() {
echo ""
echo "── Step 7: PR review (claude-opus-4.6) ────────────────────"
if [[ "$DRY_RUN" == true ]]; then
REVIEW_OUTPUT="[DRY RUN] Review would be generated"
echo " [DRY RUN] Would run: copilot -sp \"/pr-review-toolkit:review-pr\" --model claude-opus-4.6 ${COPILOT_FLAGS[*]}"
return 0
fi
echo " 🤖 Invoking claude-opus-4.6 to review PR #$PR_NUMBER..."
REVIEW_OUTPUT=$(copilot -sp "/pr-review-toolkit:review-pr" --model claude-opus-4.6 "${COPILOT_FLAGS[@]}" 2>/dev/null) || {
echo " ⚠️ PR review failed — continuing without review" >&2
REVIEW_OUTPUT=""
return 0
}
echo " ✅ PR review complete"
}
# ── Step 8: Post review to PR (claude-sonnet-4.6) ───────────────────────────
post_review() {
echo ""
echo "── Step 8: Post review to PR (claude-sonnet-4.6) ──────────"
if [[ -z "$REVIEW_OUTPUT" ]]; then
echo " ⏭️ No review output to post — skipping"
return 0
fi
local prompt
prompt="Add the following code review as review comments on pull request #${PR_NUMBER} in repo ${REPO_OWNER}/${REPO_NAME}. Use inline comments on specific files/lines where applicable.
Review:
${REVIEW_OUTPUT}
${GH_INSTRUCTION}"
if [[ "$DRY_RUN" == true ]]; then
echo " [DRY RUN] Would post review to PR #$PR_NUMBER via claude-sonnet-4.6"
return 0
fi
echo " 🤖 Invoking claude-sonnet-4.6 to post review comments..."
copilot -sp "$prompt" --model claude-sonnet-4.6 "${COPILOT_FLAGS[@]}" || {
echo " ⚠️ Posting review failed — continuing" >&2
return 0
}
echo " ✅ Review posted to PR #$PR_NUMBER"
}
# ── Step 9: Address review feedback (gpt-5.3-codex) ─────────────────────────
address_feedback() {
echo ""
echo "── Step 9: Address review feedback (gpt-5.3-codex) ────────"
local prompt
prompt="View the review comments on pull request #${PR_NUMBER} in repo ${REPO_OWNER}/${REPO_NAME} (use: gh api repos/${REPO_OWNER}/${REPO_NAME}/pulls/${PR_NUMBER}/comments). For each review comment:
1. Evaluate if the feedback is valid and actionable
2. If valid, implement the suggested change
3. Reply to the comment explaining what you did (use: gh api repos/${REPO_OWNER}/${REPO_NAME}/pulls/comments/{comment_id}/replies -f body='...')
4. Resolve the comment
When done with all comments, commit and push your changes.
${GH_INSTRUCTION}"
if [[ "$DRY_RUN" == true ]]; then
echo " [DRY RUN] Would address PR #$PR_NUMBER feedback via gpt-5.3-codex --resume $CODEX_SESSION_ID"
return 0
fi
echo " 🤖 Invoking gpt-5.3-codex to address review feedback..."
# Resume the original codex session so the model has full context of the fix
local resume_flag=()
if [[ -n "$CODEX_SESSION_ID" ]]; then
resume_flag=(--resume "$CODEX_SESSION_ID")
echo " 📎 Resuming session: $CODEX_SESSION_ID"
fi
copilot -sp "$prompt" --model gpt-5.3-codex "${COPILOT_FLAGS[@]}" "${resume_flag[@]}" || {
echo " ⚠️ Addressing feedback failed — PR still available for manual review" >&2
return 0
}
echo " ✅ Review feedback addressed"
}
# ── Step 10: AI Judge (claude-sonnet-4.6) ────────────────────────────────────
ai_judge() {
echo ""
echo "── Step 10: AI Judge (claude-sonnet-4.6) ───────────────────"
if [[ "$DRY_RUN" == true ]]; then
echo " [DRY RUN] Would invoke claude-sonnet-4.6 to judge the fix"
return 0
fi
local prompt="You are acting as an impartial judge reviewing a security fix.
Here is the original security issue:
- Issue #${ISSUE_NUMBER}: ${ISSUE_TITLE}
- Issue URL: ${ISSUE_URL}
Here is the PR that addresses it:
- PR #${PR_NUMBER}: ${PR_URL}
Please do the following:
1. Read the original issue body (use: gh issue view ${ISSUE_NUMBER} --repo ${REPO_OWNER}/${REPO_NAME})
2. Read the PR diff (use: gh pr diff ${PR_NUMBER} --repo ${REPO_OWNER}/${REPO_NAME})
3. Evaluate whether the fix correctly and completely addresses the security issue
4. Leave a single PR review comment (use: gh pr review ${PR_NUMBER} --repo ${REPO_OWNER}/${REPO_NAME} --comment --body '...')
Your review comment must include:
- ✅ or ❌ verdict (does the fix address the issue?)
- A brief rationale (2-3 sentences max)
- Any remaining concerns or risks
- A confidence level (High/Medium/Low)
Keep it concise — this is for a human to make a quick approve/reject decision.
${GH_INSTRUCTION}"
echo " ⚖️ Invoking claude-sonnet-4.6 as judge..."
copilot -sp "$prompt" --model claude-sonnet-4.6 "${COPILOT_FLAGS[@]}" || {
echo " ⚠️ Judge review failed — PR still available for manual review" >&2
return 0
}
echo " ✅ AI judge review posted"
}
# ── Step 11: Finalize ───────────────────────────────────────────────────────
finalize() {
echo ""
echo "── Step 11: Finalize ──────────────────────────────────────"
if [[ "$DRY_RUN" == true ]]; then
echo " [DRY RUN] Would add PR link to issue"
echo " 🏷️ Issue: #$ISSUE_NUMBER — $ISSUE_TITLE"
return 0
fi
# Comment on issue with PR link
gh issue comment "$ISSUE_NUMBER" \
--repo "$REPO_OWNER/$REPO_NAME" \
--body "🔧 Automated fix submitted in PR #${PR_NUMBER}: ${PR_URL}
This PR has been reviewed by AI and feedback has been addressed. Ready for human review and merge." 2>/dev/null || true
# Return to repo root and remove worktree
cd "$REPO_ROOT"
git worktree remove "$WORKTREE_DIR" 2>/dev/null || true
# Clean up session file
if [[ -n "$CODEX_SESSION_FILE" && -f "$CODEX_SESSION_FILE" ]]; then
rm -f "$CODEX_SESSION_FILE"
fi
echo " ✅ Issue #$ISSUE_NUMBER — PR #$PR_NUMBER ready for human review"
}
# ── Main ─────────────────────────────────────────────────────────────────────
main() {
echo ""
echo "🔧 Issue Orchestrator — Automated Remediation Pipeline"
echo "═══════════════════════════════════════════════════════════"
echo ""
check_prerequisites
detect_repo
local processed=0
while [[ "$processed" -lt "$MAX_COUNT" ]]; do
# Reset per-issue state for each iteration
ISSUE_NUMBER=""
ISSUE_TITLE=""
ISSUE_BODY=""
ISSUE_URL=""
BRANCH_NAME=""
WORKTREE_DIR=""
CODEX_SESSION_ID=""
CODEX_SESSION_FILE=""
PR_NUMBER=""
PR_URL=""
REVIEW_OUTPUT=""
if ! select_issue; then
break
fi
assign_issue
create_worktree
implement_fix
commit_push_pr
capture_pr_number
validate_ci_checks
append_pr_summary "Fix Implementation"
run_pr_review
post_review
address_feedback
append_pr_summary "Review Feedback Addressed"
ai_judge
finalize
processed=$((processed + 1))
if [[ "$MAX_COUNT" -gt 1 ]]; then
echo ""
echo " 📊 Progress: $processed / $MAX_COUNT"
fi
done
echo ""
echo "═══════════════════════════════════════════════════════════"
echo "✅ Orchestration complete — processed $processed issue(s)"
echo "═══════════════════════════════════════════════════════════"
}
main
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment