Last active
January 2, 2026 03:45
-
-
Save rcarmo/9fe5f1523d4c93401dd2255180da299a to your computer and use it in GitHub Desktop.
an MCP server to develop and debug Norns/Supercollider audio synthesiis/plugins/filters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| #!/usr/bin/env python3 | |
| """ | |
| norns_helper.py - MCP server for Norns development workflow automation | |
| This Model Context Protocol (MCP) server provides tools for deploying and debugging | |
| Norns audio scripts remotely over SSH. It was designed to eliminate manual SSH | |
| workflows and Makefile dependencies, enabling rapid iteration during development. | |
| Target System: | |
| Monome Norns (https://monome.org/docs/norns/) is a sound computer for exploring new | |
| approaches to music creation. It's a standalone hardware device running Linux with | |
| SuperCollider for audio synthesis, a Lua-based scripting environment (matron), and | |
| jack for audio routing. Norns scripts combine Lua UI/logic with SuperCollider engines | |
| for real-time audio processing. | |
| This MCP server provides: | |
| - File synchronization (rsync) for Lua and SuperCollider files | |
| - Service management (systemd) for jack, matron, and sclang | |
| - Log retrieval (journalctl) for debugging | |
| - Cache management for SuperCollider engine updates | |
| - Workflow prompts for common debugging scenarios | |
| All tools are synchronous and complete before returning (typical: 1-10s). | |
| Based on: umcp framework (https://github.com/rcarmo/umcp) | |
| Configure via environment: NORNS_HOST (default: norns.local), NORNS_USER (default: we) | |
| or override per-call with host/user parameters. | |
| """ | |
| import base64 | |
| import os | |
| import socket | |
| import subprocess | |
| import sys | |
| import time | |
| from typing import Optional, Dict, List | |
| from umcp import MCPServer | |
| WORKDIR = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir)) | |
| DEFAULT_HOST = os.environ.get("NORNS_HOST", "norns.local") | |
| DEFAULT_USER = os.environ.get("NORNS_USER", "we") | |
| NORNS_SCRIPT_DIR = "/home/we/dust/code/nanocosm" | |
| NORNS_DATA_DIR = "/home/we/dust/data/nanocosm" | |
| # Standard SSH options for all connections | |
| SSH_OPTS = [ | |
| "-o", "BatchMode=yes", | |
| "-o", "ConnectTimeout=3", | |
| "-o", "ServerAliveInterval=2", | |
| "-o", "ServerAliveCountMax=2", | |
| ] | |
| def _run(cmd: list, cwd: Optional[str] = None, env: Optional[Dict[str, str]] = None, timeout: int = 30) -> str: | |
| """ | |
| Run a command and return stdout+stderr. Raises RuntimeError on non-zero exit. | |
| Universal command executor with consistent buffering and timeout handling. | |
| """ | |
| # Build environment with unbuffered I/O | |
| run_env = os.environ.copy() | |
| run_env["PYTHONUNBUFFERED"] = "1" | |
| if env: | |
| run_env.update(env) | |
| try: | |
| proc = subprocess.Popen( | |
| cmd, | |
| cwd=cwd or WORKDIR, | |
| env=run_env, | |
| stdout=subprocess.PIPE, | |
| stderr=subprocess.STDOUT, | |
| text=True, | |
| bufsize=1 # Line buffered | |
| ) | |
| try: | |
| stdout, _ = proc.communicate(timeout=timeout) | |
| except subprocess.TimeoutExpired: | |
| proc.kill() | |
| stdout, _ = proc.communicate() | |
| raise RuntimeError(f"Command timed out after {timeout}s: {' '.join(cmd)}") | |
| if proc.returncode != 0: | |
| raise RuntimeError(f"Command failed ({proc.returncode}): {' '.join(cmd)}\n{stdout}") | |
| return stdout.strip() | |
| except FileNotFoundError: | |
| raise RuntimeError(f"Command not found: {cmd[0]}") | |
| except Exception as e: | |
| raise RuntimeError(f"Command execution error: {e}") | |
| def _scp(remote_path: str, local_path: str, host: str, user: str, timeout: int = 20) -> str: | |
| """Copy a remote file to local via scp using the same SSH opts.""" | |
| cmd = ["scp"] + SSH_OPTS + [f"{user}@{host}:{remote_path}", local_path] | |
| return _run(cmd, timeout=timeout) | |
| def _ssh_raw(host: str, user: str, command: str, timeout: int = 30) -> str: | |
| """Execute SSH command. Low-level - use specific helpers instead.""" | |
| cmd = ["ssh"] + SSH_OPTS + [f"{user}@{host}", command] | |
| return _run(cmd, timeout=timeout) | |
| def _ssh(host: str, user: str, command: str, use_sudo: bool = False, timeout: int = 30) -> str: | |
| """ | |
| Execute SSH command with optional sudo (rarely needed on norns). | |
| Args: | |
| host: Remote host | |
| user: Remote user | |
| command: Command to execute | |
| use_sudo: Prepend 'stdbuf -o0 sudo' to command (rarely needed) | |
| timeout: Command timeout in seconds | |
| """ | |
| if use_sudo: | |
| command = f"stdbuf -o0 sudo {command}" | |
| return _ssh_raw(host, user, command, timeout) | |
| def _ssh_journalctl(host: str, user: str, service: str, lines: int = 100, timeout: int = 8) -> str: | |
| """ | |
| Fetch journalctl logs for a service. Uses --no-pager to prevent blocking. | |
| No sudo required - norns 'we' user has log read access. | |
| """ | |
| cmd = f"journalctl --no-pager -u {service} -n {lines} -o cat" | |
| return _ssh_raw(host, user, cmd, timeout) | |
| def _ssh_systemctl_status(host: str, user: str, services: List[str], timeout: int = 8) -> str: | |
| """ | |
| Check systemctl status for services. | |
| Returns one line per service: 'active', 'inactive', etc. | |
| No sudo required for status checks. | |
| """ | |
| services_str = " ".join(services) | |
| cmd = f"systemctl is-active {services_str}" | |
| return _ssh_raw(host, user, cmd, timeout) | |
| def _ssh_systemctl_restart(host: str, user: str, services: List[str], timeout: int = 20) -> str: | |
| """ | |
| Restart systemctl services. | |
| No sudo required - norns 'we' user has service restart permissions. | |
| """ | |
| services_str = " ".join(services) | |
| cmd = f"systemctl restart {services_str}" | |
| return _ssh_raw(host, user, cmd, timeout) | |
| def _rsync(local_path: str, remote_path: str, host: str, user: str, excludes: Optional[List[str]] = None) -> str: | |
| """ | |
| Rsync files to remote host. | |
| Uses rsync with compression and progress reporting. | |
| """ | |
| cmd = [ | |
| "rsync", | |
| "-avz", | |
| "--timeout=30" # Rsync-level timeout (applies to IO) | |
| ] | |
| for exclude in (excludes or []): | |
| cmd.extend(["--exclude", exclude]) | |
| cmd.append(local_path) | |
| cmd.append(f"{user}@{host}:{remote_path}") | |
| return _run(cmd, timeout=60) | |
| class NornsMCP(MCPServer): | |
| """MCP server exposing Norns dev helpers - all tools complete synchronously.""" | |
| def get_instructions(self) -> str: | |
| return ( | |
| "Norns development MCP server for nanocosm. All tools are synchronous (complete in 1-10s).\n\n" | |
| "Core Tools:\n" | |
| "• sync - Upload code files to Norns (~2-4s)\n" | |
| "• deploy - Full deploy: sync + restart services + verify (~10-15s)\n" | |
| "• restart - Restart matron/sclang services (~3-5s)\n" | |
| "• logs - Fetch matron logs (last N lines, ~1-2s)\n" | |
| "• sc_logs - Fetch SuperCollider logs (last N lines, ~1-2s)\n" | |
| "• clear_sc_cache - Clear SC cache when engine updates don't load (~4-6s)\n" | |
| "• restart_services - Restart specific services by name (~2-5s)\n" | |
| "• check_services - Verify all services are active (~1s)\n" | |
| "• info - Get Norns system information (~1s)\n\n" | |
| "Workflow Prompts:\n" | |
| "• full_deploy_workflow - Complete deployment and verification sequence\n" | |
| "• debug_sc_error - SuperCollider compilation error debugging\n" | |
| "• debug_stuck_loading - Debug stuck loading screen issues\n" | |
| "• engine_not_loading - Engine updates not appearing workflow" | |
| ) | |
| def _get_host_user(self, host: Optional[str], user: Optional[str]) -> tuple[str, str]: | |
| """Get effective host and user.""" | |
| return (host or DEFAULT_HOST, user or DEFAULT_USER) | |
| def _matron_eval(self, host: str, user: str, code: str) -> str: | |
| """ | |
| Send a Lua snippet to the matron WebSocket (port 5555) via a minimal stdlib client. | |
| Uses HTTP Upgrade + masked text frame; returns first frame payload if any. | |
| """ | |
| port = 5555 | |
| key = base64.b64encode(os.urandom(16)).decode() | |
| request = ( | |
| f"GET / HTTP/1.1\r\n" | |
| f"Host: {host}:{port}\r\n" | |
| "Upgrade: websocket\r\n" | |
| "Connection: Upgrade\r\n" | |
| f"Sec-WebSocket-Key: {key}\r\n" | |
| "Sec-WebSocket-Version: 13\r\n" | |
| "Sec-WebSocket-Protocol: bus.sp.nanomsg.org\r\n" | |
| "\r\n" | |
| ).encode() | |
| with socket.create_connection((host, port), timeout=3) as sock: | |
| sock.sendall(request) | |
| handshake = sock.recv(1024) | |
| if b"101" not in handshake: | |
| raise RuntimeError(f"WebSocket upgrade failed: {handshake!r}") | |
| # Build a single masked text frame | |
| payload = (code + "\n").encode() | |
| fin_opcode = 0x81 # FIN + text | |
| mask_bit = 0x80 | |
| length = len(payload) | |
| header = bytearray([fin_opcode]) | |
| if length <= 125: | |
| header.append(mask_bit | length) | |
| elif length < 65536: | |
| header.append(mask_bit | 126) | |
| header.extend(length.to_bytes(2, "big")) | |
| else: | |
| header.append(mask_bit | 127) | |
| header.extend(length.to_bytes(8, "big")) | |
| mask = os.urandom(4) | |
| masked = bytes(b ^ mask[i % 4] for i, b in enumerate(payload)) | |
| frame = bytes(header) + mask + masked | |
| sock.sendall(frame) | |
| # Try to read one response frame (non-masked from server) | |
| sock.settimeout(1) | |
| try: | |
| first = sock.recv(2) | |
| if len(first) < 2: | |
| return "" | |
| opcode = first[0] & 0x0F | |
| length_byte = first[1] & 0x7F | |
| if length_byte == 126: | |
| ext = sock.recv(2) | |
| length = int.from_bytes(ext, "big") | |
| elif length_byte == 127: | |
| ext = sock.recv(8) | |
| length = int.from_bytes(ext, "big") | |
| else: | |
| length = length_byte | |
| data = b"" | |
| while len(data) < length: | |
| chunk = sock.recv(length - len(data)) | |
| if not chunk: | |
| break | |
| data += chunk | |
| if opcode == 1: # text frame | |
| return data.decode(errors="ignore") | |
| return "" | |
| except Exception: | |
| return "" | |
| # --- Core Tools --- | |
| def tool_sync(self, host: Optional[str] = None, user: Optional[str] = None) -> str: | |
| """Upload all script files to Norns (~2-4s). Syncs .lua and .sc files in main dir and lib/ subdirectory.""" | |
| h, u = self._get_host_user(host, user) | |
| # Ensure directories exist | |
| _ssh(h, u, f"mkdir -p {NORNS_SCRIPT_DIR}/lib {NORNS_DATA_DIR}", use_sudo=False, timeout=10) | |
| # Sync files | |
| output = [] | |
| output.append(_rsync(f"{WORKDIR}/nanocosm.lua", f"{NORNS_SCRIPT_DIR}/", h, u)) | |
| output.append(_rsync(f"{WORKDIR}/lib/", f"{NORNS_SCRIPT_DIR}/lib/", h, u, excludes=["__pycache__", ".DS_Store"])) | |
| return "Sync complete!\n" + "\n".join(output) | |
| def tool_deploy(self, host: Optional[str] = None, user: Optional[str] = None) -> str: | |
| """Full deployment (~10-15s): sync files, restart services, verify status.""" | |
| h, u = self._get_host_user(host, user) | |
| output = [] | |
| output.append("=== Syncing files ===") | |
| output.append(self.tool_sync(h, u)) | |
| output.append("\n=== Restarting services ===") | |
| _ssh_systemctl_restart(h, u, ["norns-jack.service", "norns-matron.service", "norns-sclang.service"], timeout=20) | |
| output.append("Services restart initiated") | |
| output.append("\n=== Waiting 5s for services to stabilize ===") | |
| import time | |
| time.sleep(5) | |
| output.append("\n=== Service Status ===") | |
| status = _ssh_systemctl_status(h, u, ["norns-jack.service", "norns-matron.service", "norns-sclang.service"]) | |
| output.append(status) | |
| output.append("\n=== Deployment Complete ===") | |
| return "\n".join(output) | |
| def tool_restart(self, host: Optional[str] = None, user: Optional[str] = None) -> str: | |
| """Restart matron and sclang services (~3-5s). Audio system restart without jack.""" | |
| h, u = self._get_host_user(host, user) | |
| _ssh_systemctl_restart(h, u, ["norns-matron.service", "norns-sclang.service"]) | |
| return "Matron and SuperCollider services restarted" | |
| def tool_logs(self, lines: int = 400, host: Optional[str] = None, user: Optional[str] = None) -> str: | |
| """Fetch recent matron logs (~1-2s). Returns last N lines from systemd journal.""" | |
| h, u = self._get_host_user(host, user) | |
| return _ssh_journalctl(h, u, "norns-matron.service", lines, timeout=8) | |
| def tool_sc_logs(self, lines: int = 200, host: Optional[str] = None, user: Optional[str] = None) -> str: | |
| """Fetch recent SuperCollider logs (~1-2s). Returns last N lines from systemd journal.""" | |
| h, u = self._get_host_user(host, user) | |
| return _ssh_journalctl(h, u, "norns-sclang.service", lines, timeout=8) | |
| def tool_clear_sc_cache(self, host: Optional[str] = None, user: Optional[str] = None) -> str: | |
| """Clear SuperCollider cache and restart sclang (~4-6s). Use when engine updates don't load.""" | |
| h, u = self._get_host_user(host, user) | |
| _ssh(h, u, "rm -rf /home/we/.cache/sclang/", use_sudo=False, timeout=10) | |
| _ssh_systemctl_restart(h, u, ["norns-sclang.service"]) | |
| return "SuperCollider cache cleared and sclang restarted" | |
| def tool_restart_services(self, service: str = "all", host: Optional[str] = None, user: Optional[str] = None) -> str: | |
| """Restart specific Norns service(s) (~2-5s). Options: 'matron', 'sclang', 'jack', or 'all'.""" | |
| h, u = self._get_host_user(host, user) | |
| services_map = { | |
| "matron": ["norns-matron.service"], | |
| "sclang": ["norns-sclang.service"], | |
| "jack": ["norns-jack.service"], | |
| "all": ["norns-jack.service", "norns-matron.service", "norns-sclang.service"] | |
| } | |
| services = services_map.get(service, services_map["all"]) | |
| _ssh_systemctl_restart(h, u, services) | |
| return f"Restarted: {', '.join(services)}" | |
| def tool_check_services(self, host: Optional[str] = None, user: Optional[str] = None) -> str: | |
| """Check if all core Norns services are active (~1s). Checks jack, matron, sclang.""" | |
| h, u = self._get_host_user(host, user) | |
| return _ssh_systemctl_status(h, u, ["norns-jack.service", "norns-matron.service", "norns-sclang.service"]) | |
| def tool_info(self, host: Optional[str] = None, user: Optional[str] = None) -> str: | |
| """Get Norns system information (~1s). Returns kernel, version, disk usage.""" | |
| h, u = self._get_host_user(host, user) | |
| output = [] | |
| output.append("=== System Info ===") | |
| output.append(_ssh(h, u, "uname -a", use_sudo=False, timeout=5)) | |
| output.append("\n=== Norns Version ===") | |
| output.append(_ssh(h, u, "cat /etc/norns-image.version 2>/dev/null || echo 'Version file not found'", use_sudo=False, timeout=5)) | |
| output.append("\n=== Disk Usage ===") | |
| output.append(_ssh(h, u, "df -h | grep -E '(Filesystem|/home)'", use_sudo=False, timeout=5)) | |
| return "\n".join(output) | |
| def tool_screen(self, filename: Optional[str] = None, host: Optional[str] = None, user: Optional[str] = None, embed_base64: bool = True) -> str: | |
| """Capture current Norns screen to PNG, pull it locally, and optionally return base64 PNG data.""" | |
| h, u = self._get_host_user(host, user) | |
| # Prepare paths | |
| ts = int(time.time()) | |
| base = filename or "mcp_screen" | |
| remote_dir = "/home/we/dust/data/nanocosm" | |
| remote_png = f"{remote_dir}/{base}_{ts}.png" | |
| local_dir = os.path.join(WORKDIR, "logs", "screens") | |
| os.makedirs(local_dir, exist_ok=True) | |
| local_png = os.path.join(local_dir, os.path.basename(remote_png)) | |
| # Ensure remote directory exists | |
| _ssh(h, u, f"mkdir -p {remote_dir}", use_sudo=False, timeout=5) | |
| # Ask matron to export the screen | |
| resp = self._matron_eval(h, u, f'_norns.screen_export_png("{remote_png}")') | |
| time.sleep(0.4) # allow write to flush | |
| # Verify the file exists before pulling | |
| exists = _ssh(h, u, f"test -f {remote_png} && echo ok || echo missing", use_sudo=False, timeout=5) | |
| if "missing" in exists: | |
| raise RuntimeError(f"Screen export did not produce a file. Matron reply: {resp!r}") | |
| # Pull the file locally | |
| _scp(remote_png, local_png, h, u, timeout=10) | |
| if not embed_base64: | |
| return f"Saved screenshot to {local_png}" | |
| with open(local_png, "rb") as f: | |
| b64 = base64.b64encode(f.read()).decode() | |
| return f"Saved screenshot to {local_png}\ndata:image/png;base64,{b64}" | |
| # --- Workflow Prompts --- | |
| def prompt_full_deploy_workflow(self) -> str: | |
| """ | |
| Complete deployment and verification workflow. | |
| Category: deployment, workflow | |
| Use when deploying code changes to ensure everything works correctly. | |
| """ | |
| return """Complete Norns Deployment Workflow: | |
| 1. Deploy the code: | |
| - Run: deploy tool | |
| - This syncs files, restarts services, and checks status | |
| - Wait for completion (~10-15s) | |
| 2. Verify SuperCollider engine loaded: | |
| - Run: sc_logs tool (last 200 lines) | |
| - Look for: "Nanocosm engine loaded" or "Engine_Nanocosm" | |
| - If errors present, see debug_sc_error prompt | |
| 3. Check matron startup: | |
| - Run: logs tool (last 100 lines) | |
| - Look for: "nanocosm: ready!" at the end | |
| - Look for: any ERROR or !!!!! messages | |
| 4. Verify services are running: | |
| - Run: check_services tool | |
| - All three should show "active" | |
| If any step fails, use the appropriate debugging prompt (debug_sc_error, debug_stuck_loading, engine_not_loading).""" | |
| def prompt_debug_sc_error(self) -> str: | |
| """ | |
| Debug SuperCollider compilation or engine loading errors. | |
| Category: debugging, supercollider | |
| Use when SuperCollider engine fails to compile or load. | |
| """ | |
| return """SuperCollider Error Debugging: | |
| 1. Get the full error: | |
| - Run: sc_logs tool (200+ lines) | |
| - Find the first "ERROR:" line in the output | |
| - Note the error type (e.g., "Non Boolean in test", "Parse error", etc.) | |
| 2. Common SC errors and fixes: | |
| a) "Non Boolean in test" / "mustBeBoolean": | |
| - Cause: Using .if() on UGen expressions | |
| - Fix: Replace with Select.ar() or SelectX.ar() | |
| - Example: (x <= 0).if({a}, {b}) → Select.ar((x > 0).clip(0,1), [a, b]) | |
| b) "Parse error" / "Syntax error": | |
| - Check for: missing semicolons, unmatched braces, typos | |
| - Check line/char in error message | |
| c) "Class not defined": | |
| - Missing SynthDef.add call | |
| - Typo in class name | |
| 3. After fixing the code: | |
| - Run: deploy tool | |
| - Verify: sc_logs should show no errors and engine loaded message | |
| 4. If engine still won't load after fix: | |
| - Run: clear_sc_cache tool | |
| - Then: restart_services with service='all'""" | |
| def prompt_debug_stuck_loading(self) -> str: | |
| """ | |
| Debug app stuck on "loading..." screen. | |
| Category: debugging, troubleshooting | |
| Use when nanocosm gets stuck on loading screen. | |
| """ | |
| return """Stuck Loading Screen Troubleshooting: | |
| Common causes and solutions: | |
| 1. Parameter ID collision: | |
| - Run: logs tool | |
| - Look for: "parameter ID collision" errors | |
| - Fix: Rename conflicting parameter (system params include: input_level, output_level, etc.) | |
| - System parameters are reserved and cannot be used | |
| 2. Engine fails to load: | |
| - Run: sc_logs tool | |
| - Look for: ERROR messages during engine load | |
| - See: debug_sc_error prompt for SC-specific fixes | |
| 3. Init function error: | |
| - Run: logs tool | |
| - Look for: Lua error in init() function | |
| - Check: Missing files, typos, nil values | |
| 4. Services not running: | |
| - Run: check_services tool | |
| - If any show "inactive": restart_services with service='all' | |
| 5. Cached engine preventing updates: | |
| - Run: clear_sc_cache tool | |
| - Then: restart_services with service='all' | |
| After each fix: | |
| - Run: deploy tool | |
| - Wait 15 seconds | |
| - Check: logs and sc_logs for success messages""" | |
| def prompt_engine_not_loading(self) -> str: | |
| """ | |
| Debug when engine code changes don't appear after deployment. | |
| Category: debugging, engine, cache | |
| Use when you've updated engine code but changes don't take effect. | |
| """ | |
| return """Engine Updates Not Loading: | |
| This usually indicates cached SuperCollider class files. | |
| Steps to fix: | |
| 1. Clear the cache: | |
| - Run: clear_sc_cache tool | |
| - This removes cached .sc files and restarts sclang | |
| 2. Verify the fix: | |
| - Run: sc_logs tool | |
| - Look for: Fresh compilation messages | |
| - Look for: "compiled XXX files" (indicates recompilation) | |
| - Look for: Your engine name loading | |
| 3. If still not working: | |
| - Run: deploy tool (full deploy) | |
| - This ensures files are synced and services restarted | |
| 4. Double-check the sync worked: | |
| - Run: info tool | |
| - Verify connection is working | |
| 5. Alternative: Full restart | |
| - Run: restart_services with service='all' | |
| - Includes jack audio restart for complete refresh | |
| Note: SC caches compiled class files in ~/.cache/sclang/ for performance. | |
| When engine .sc files change, this cache must be cleared.""" | |
| def prompt_check_deployment_status(self) -> str: | |
| """ | |
| Quick health check of current deployment. | |
| Category: monitoring, status | |
| Use to verify everything is running correctly. | |
| """ | |
| return """Quick Deployment Health Check: | |
| Run these tools in sequence: | |
| 1. check_services | |
| - Expected: All three show "active" | |
| - If not: Run restart_services with service='all' | |
| 2. sc_logs (last 50 lines) | |
| - Expected: No ERROR messages | |
| - Expected: Engine name appears in "engines:" list | |
| - If errors: See debug_sc_error prompt | |
| 3. logs (last 100 lines) | |
| - Expected: "nanocosm: ready!" or similar success message | |
| - Expected: No ERROR or !!!!! messages | |
| - If errors: See debug_stuck_loading prompt | |
| All green? Your deployment is healthy!""" | |
| if __name__ == "__main__": | |
| server = NornsMCP() | |
| server.run() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment