Skip to content

Instantly share code, notes, and snippets.

@benabraham
Last active December 28, 2025 17:59
Show Gist options
  • Select an option

  • Save benabraham/3728d8efd4dd62ad82b22e0e286e85d0 to your computer and use it in GitHub Desktop.

Select an option

Save benabraham/3728d8efd4dd62ad82b22e0e286e85d0 to your computer and use it in GitHub Desktop.
Simple Claude Code status line with context usage progress bar (dark/light themes, truecolor/256 fallback)
#!/usr/bin/env python3
"""
Simple Claude Code StatusLine Script
Shows context usage/progress with colored bar
Uses current_usage field for accurate context window calculations
BUG WORKAROUND (remove when fixed):
https://github.com/anthropics/claude-code/issues/13783
The statusline API's context_window data is inaccurate. This script parses
the transcript JSONL as a workaround. When bug #13783 is fixed, delete:
1. The TRANSCRIPT PARSING section (~45 lines, search for "Bug #13783")
2. The 4 lines in main() that call it (search for "Bug #13783 workaround")
INSTALLATION:
1. Save this file to ~/.claude/simple-statusline.py
2. Make it executable:
chmod +x ~/.claude/simple-statusline.py
3. Add to ~/.claude/settings.json:
{
"statusLine": {
"type": "command",
"command": "~/.claude/simple-statusline.py"
}
}
4. Set THEME below to 'dark' or 'light'
5. Restart Claude Code
Note: After initial setup, edits to this script take effect immediately (no restart needed).
Latest version: https://gist.github.com/3728d8efd4dd62ad82b22e0e286e85d0
"""
import json
import os
import subprocess
import sys
# =============================================================================
# CONFIGURATION
# =============================================================================
# Progress bar width in characters
BAR_WIDTH = 33
# Active theme: 'dark' or 'light' (must be set)
THEME = "dark"
# Color format: ("#RRGGBB", fallback_256)
# Set hex to None to always use 256 fallback
THEMES = {
"dark": {
# Model badge colors: (bg, fg)
"model_sonnet": (("#A3BE8C", 108), ("#2E3440", 236)), # nord14 bg, nord0 fg
"model_opus": (("#88C0D0", 110), ("#2E3440", 236)), # nord8 bg, nord0 fg
"model_haiku": (("#4C566A", 60), ("#ECEFF4", 255)), # nord3 bg, nord6 fg
"model_default": (("#D8DEE9", 253), ("#2E3440", 236)), # nord4 bg, nord0 fg
# Unused portion of progress bar
"bar_empty": ("#292c33", 234), # darker than nord0
# Text colors (Nord)
"text_percent": (("#5E81AC", None), 67), # nord10
"text_numbers": (("#5E81AC", None), 67), # nord10
"text_cwd": (("#81A1C1", None), 110), # nord9
"text_git": (("#A3BE8C", None), 108), # nord14 green
"text_na": (("#D08770", None), 173), # nord12 orange
# Progress bar gradient: (threshold, (hex, fallback_256))
# Threshold means "use this color if pct < threshold"
"gradient": [
(10, ("#183522", 22)), # 0-9% dark green
(20, ("#153E21", 22)), # 10-19%
(30, ("#104620", 28)), # 20-29%
(40, ("#0B4E1C", 28)), # 30-39%
(50, ("#065716", 34)), # 40-49% bright green
(60, ("#2E5900", 106)), # 50-59% yellow-green
(70, ("#5D4F00", 136)), # 60-69% olive
(80, ("#833A00", 166)), # 70-79% orange
(90, ("#A10700", 160)), # 80-89% red-orange
(101, ("#B30000", 196)), # 90-100% red
],
},
"light": {
# Model badge colors: (bg, fg)
"model_sonnet": (
("#8FAA78", 107),
("#FFFFFF", 231),
), # muted green bg, white fg
"model_opus": (("#6AA2B2", 73), ("#FFFFFF", 231)), # muted aqua bg, white fg
"model_haiku": (("#8C96AA", 103), ("#FFFFFF", 231)), # muted grey bg, white fg
"model_default": (("#646E82", 66), ("#FFFFFF", 231)), # slate bg, white fg
# Unused portion of progress bar
"bar_empty": ("#D8DEE9", 253), # nord4
# Text colors
"text_percent": (("#505050", None), 240), # dark grey
"text_numbers": (("#505050", None), 240), # dark grey
"text_cwd": (("#3C465A", None), 238), # dark slate
"text_git": (("#508C50", None), 65), # muted green
"text_na": (("#D08770", None), 173), # nord12 orange
# Progress bar gradient
"gradient": [
(10, ("#22783C", 29)), # 0-9% green
(20, ("#228237", 29)), # 10-19%
(30, ("#228C32", 35)), # 20-29%
(40, ("#329628", 35)), # 30-39%
(50, ("#46A01E", 70)), # 40-49%
(60, ("#828C00", 142)), # 50-59% yellow-green
(70, ("#A08200", 178)), # 60-69% olive/yellow
(80, ("#B46400", 172)), # 70-79% orange
(90, ("#C83C00", 166)), # 80-89% red-orange
(101, ("#D21E1E", 160)), # 90-100% red
],
},
}
# =============================================================================
# COLOR SUPPORT DETECTION & CONVERSION
# =============================================================================
def hex_to_rgb(hex_color):
"""Convert '#RRGGBB' hex string to (R, G, B) tuple"""
if hex_color is None:
return None
h = hex_color.lstrip("#")
return tuple(int(h[i : i + 2], 16) for i in (0, 2, 4))
def supports_truecolor():
"""Detect if terminal supports 24-bit true color"""
colorterm = os.environ.get("COLORTERM", "").lower()
return colorterm in ("truecolor", "24bit")
TRUECOLOR = supports_truecolor()
# =============================================================================
# ANSI ESCAPE HELPERS
# =============================================================================
RESET = "\033[0m"
BOLD = "\033[1m"
def _color(rgb, fallback_256, is_bg=False):
"""Generate ANSI color code with truecolor/256 fallback. RGB can be hex string or tuple."""
prefix = 48 if is_bg else 38
if TRUECOLOR and rgb is not None:
if isinstance(rgb, str):
rgb = hex_to_rgb(rgb)
return f"\033[{prefix};2;{rgb[0]};{rgb[1]};{rgb[2]}m"
else:
return f"\033[{prefix};5;{fallback_256}m"
def fg_themed(color_tuple):
"""Foreground color from theme tuple ((rgb, _), fallback) or ((rgb, fallback), _)"""
if isinstance(color_tuple[0], tuple):
rgb, fallback = color_tuple[0]
if fallback is None:
fallback = color_tuple[1]
else:
rgb, fallback = color_tuple
return _color(rgb, fallback, is_bg=False)
def bg_themed(color_tuple):
"""Background color from theme tuple ((rgb, fallback), _)"""
rgb, fallback = color_tuple[0]
return _color(rgb, fallback, is_bg=True)
def fg_gradient(rgb, fallback_256):
"""Foreground from gradient tuple"""
return _color(rgb, fallback_256, is_bg=False)
def fg_empty():
"""Foreground for empty bar portion"""
theme = THEMES[THEME]
rgb, fallback = theme["bar_empty"]
return _color(rgb, fallback, is_bg=False)
# =============================================================================
# THEME-AWARE COLOR FUNCTIONS
# =============================================================================
def get_colors_for_percentage(pct):
"""Return (rgb, fallback_256) for progress bar fill at given percentage"""
theme = THEMES[THEME]
for threshold, color in theme["gradient"]:
if pct < threshold:
return color
return theme["gradient"][-1][1]
def get_model_colors(model):
"""Return (bg_code, fg_code) for model badge"""
theme = THEMES[THEME]
if "Sonnet" in model:
key = "model_sonnet"
elif "Opus" in model:
key = "model_opus"
elif "Haiku" in model:
key = "model_haiku"
else:
key = "model_default"
bg_tuple, fg_tuple = theme[key]
bg_code = _color(bg_tuple[0], bg_tuple[1], is_bg=True)
fg_code = _color(fg_tuple[0], fg_tuple[1], is_bg=False)
return bg_code + BOLD + fg_code
def text_color(key):
"""Get text color by key: 'percent', 'numbers', 'cwd', 'git'"""
theme = THEMES[THEME]
color_tuple = theme[f"text_{key}"]
return fg_themed(color_tuple)
# =============================================================================
# UTILITY FUNCTIONS
# =============================================================================
def center_text(text, min_width=12):
"""Center text with 1-char padding on each side, minimum 12 chars wide"""
width = max(min_width, len(text) + 2)
padding = (width - len(text)) // 2
right_padding = width - len(text) - padding
return " " * padding + text + " " * right_padding
def get_git_branch(cwd):
"""Get current git branch, or None"""
try:
result = subprocess.run(
["git", "-C", cwd, "branch", "--show-current"],
capture_output=True,
text=True,
timeout=1,
)
if result.returncode == 0:
return result.stdout.strip() or None
except Exception:
pass
return None
def get_cwd_suffix(cwd):
"""Format cwd and git branch for display"""
if not cwd:
return ""
# Shorten home directory to ~
home = os.path.expanduser("~")
if cwd.startswith(home):
cwd_short = "~" + cwd[len(home) :]
else:
cwd_short = cwd
suffix = f" {text_color('cwd')}{cwd_short}"
git_branch = get_git_branch(cwd)
if git_branch:
suffix += f" {BOLD}{text_color('git')}[{git_branch}]"
return suffix
# =============================================================================
# MAIN STATUS LINE BUILDER
# =============================================================================
def build_progress_bar(pct, model, cwd, total_tokens, context_limit):
"""Build the full status line string"""
bar_length = BAR_WIDTH
filled = round(pct * BAR_WIDTH / 100)
empty = bar_length - filled
bar_rgb, bar_256 = get_colors_for_percentage(pct)
model_color = get_model_colors(model)
parts = [
model_color + center_text(model) + RESET,
fg_gradient(bar_rgb, bar_256) + "█" * filled,
fg_empty() + "█" * empty,
RESET + text_color("percent"),
f" {pct}%",
text_color("numbers"),
f" ({total_tokens // 1000}k/{context_limit // 1000}k)",
get_cwd_suffix(cwd),
RESET,
]
return "".join(parts)
def build_na_line(model, cwd):
"""Build status line when no usage data available"""
model_color = get_model_colors(model)
suffix = get_cwd_suffix(cwd)
return f"{model_color}{center_text(model)}{RESET} {text_color('na')} context size N/A{suffix}{RESET}"
# =============================================================================
# DEMO MODE
# =============================================================================
def show_scale_demo(mode="animate"):
"""Demo mode to show color gradient"""
import time
def show_bar(pct):
bar_length = BAR_WIDTH
filled = round(pct * BAR_WIDTH / 100)
empty = bar_length - filled
bar_rgb, bar_256 = get_colors_for_percentage(pct)
bar = (
fg_gradient(bar_rgb, bar_256)
+ "█" * filled
+ fg_empty()
+ "█" * empty
+ RESET
)
return bar
if mode == "animate":
try:
while True:
for pct in range(101):
print(f"\r{pct:3d}%: {show_bar(pct)}", end="", flush=True)
time.sleep(0.1)
time.sleep(0.5)
except KeyboardInterrupt:
print()
elif mode in ("min", "max", "mid"):
ranges = [
(0, 9),
(10, 19),
(20, 29),
(30, 39),
(40, 49),
(50, 59),
(60, 69),
(70, 79),
(80, 89),
(90, 100),
]
print(f"Color Scale Demo ({mode} value):")
print()
for lo, hi in ranges:
pct = lo if mode == "min" else hi if mode == "max" else (lo + hi) // 2
print(f"{lo:3d}-{hi:3d}%: {show_bar(pct)}")
else:
print(f"Error: Invalid mode '{mode}'. Use: min, max, mid, or animate")
sys.exit(1)
# =============================================================================
# TRANSCRIPT PARSING (Bug #13783 workaround)
# DELETE THIS ENTIRE SECTION when bug is fixed (~45 lines until next ===)
# =============================================================================
def get_tokens_from_transcript(transcript_path):
"""
Parse JSONL transcript for accurate context tokens.
Workaround for https://github.com/anthropics/claude-code/issues/13783
Remove this function when the bug is fixed.
"""
if not transcript_path or not os.path.exists(transcript_path):
return None
latest_usage = None
latest_timestamp = None
total_output_tokens = 0 # Sum all output tokens (each entry has only its own)
try:
with open(transcript_path, 'r') as f:
for line in f:
try:
entry = json.loads(line)
# Skip sidechains, errors, entries without usage
if entry.get('isSidechain') or entry.get('isApiErrorMessage'):
continue
usage = entry.get('message', {}).get('usage')
timestamp = entry.get('timestamp')
if usage and timestamp:
# Accumulate all output tokens
total_output_tokens += usage.get('output_tokens', 0)
# Track latest for input tokens (which are cumulative per API call)
if latest_timestamp is None or timestamp > latest_timestamp:
latest_timestamp = timestamp
latest_usage = usage
except json.JSONDecodeError:
continue
except (IOError, OSError):
return None
if latest_usage:
return (
latest_usage.get('input_tokens', 0)
+ latest_usage.get('cache_read_input_tokens', 0)
+ latest_usage.get('cache_creation_input_tokens', 0)
+ total_output_tokens # Sum of all assistant responses
)
return None
# =============================================================================
# MAIN
# =============================================================================
def main():
# Check theme is configured
if THEME not in THEMES:
# Yellow text on red bg, then red text on yellow bg
print(
f"\033[48;5;196m\033[38;5;220m\033[1m PLEASE SET THEME to 'dark' or 'light' in simple-statusline.py \033[0m"
)
print(
f"\033[48;5;220m\033[38;5;196m\033[1m PLEASE SET THEME to 'dark' or 'light' in simple-statusline.py \033[0m"
)
return
# Handle --show-scale demo mode
if len(sys.argv) > 1 and sys.argv[1] == "--show-scale":
show_scale_demo(sys.argv[2] if len(sys.argv) > 2 else "animate")
return
# Read and parse JSON input
try:
data = json.load(sys.stdin)
except json.JSONDecodeError:
print("statusline: invalid JSON input", file=sys.stderr)
return
model = data.get("model", {}).get("display_name", "Claude")
cwd = data.get("cwd", "")
# Get context window info
context_window = data.get("context_window", {})
context_limit = context_window.get("context_window_size", 200000)
current_usage = context_window.get("current_usage")
# Calculate total tokens using API method (intended approach)
if current_usage:
total_tokens = (
current_usage.get("input_tokens", 0)
+ current_usage.get("cache_creation_input_tokens", 0)
+ current_usage.get("cache_read_input_tokens", 0)
+ current_usage.get("output_tokens", 0) # assistant responses in context
)
else:
total_tokens = None
# Bug #13783 workaround: correct with transcript parsing
# DELETE THESE 4 LINES when bug is fixed (see docstring at top)
transcript_path = data.get("transcript_path")
transcript_tokens = get_tokens_from_transcript(transcript_path)
if transcript_tokens is not None:
total_tokens = transcript_tokens
# Handle no usage data yet
if total_tokens is None:
print(build_na_line(model, cwd))
return
# Calculate percentage (capped at 100)
pct = min(100, int(total_tokens * 100 / context_limit)) if total_tokens > 0 else 0
print(build_progress_bar(pct, model, cwd, total_tokens, context_limit))
if __name__ == "__main__":
main()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment