Last active
August 29, 2025 23:31
-
-
Save jzumwalt/1692386c907efcaf17112062637e3ed1 to your computer and use it in GitHub Desktop.
Codex Resume session
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| #!/usr/bin/env -S uv run --quiet --with textual>=0.58 --with openai>=1.30.0 | |
| """ | |
| To run: | |
| codex "$(./scripts/codex_resume.py --root ~/.codex/sessions)" | |
| Codex Sessions Browser (TUI) | |
| Lists JSONL sessions under ~/.codex/sessions and shows: | |
| Modified | Created | # Messages | Git Branch | Summary | |
| - Arrow keys / j,k: move selection | |
| - Enter: build a resume prompt with the selected file path and print it to stdout | |
| - q / Ctrl+C: quit | |
| Requires environment variable OPENAI_API_KEY for summaries. | |
| You can override the model via CODEX_SUMMARY_MODEL (default: gpt-4o-mini). | |
| """ | |
| from __future__ import annotations | |
| import json | |
| import os | |
| import sys | |
| import time | |
| import glob | |
| import subprocess | |
| from datetime import datetime, timezone | |
| from pathlib import Path | |
| from typing import Any, Dict, List, Optional, Tuple | |
| from textual.app import App, ComposeResult | |
| from textual.widgets import DataTable, Header, Footer, Static | |
| from textual.reactive import reactive | |
| from textual import events | |
| from concurrent.futures import ThreadPoolExecutor | |
| import hashlib | |
| import argparse | |
| SESSION_RESUME_PROMPT = """ | |
| Codex CLI Session Restore | |
| Role | |
| You are Codex CLI’s session restorer and continuation assistant. You will be given a JSONL transcript of a prior session. Your job is to: | |
| Parse and reconstruct the visible conversation and relevant tool outputs from the JSONL. | |
| Infer the current objective, constraints, and pending tasks. | |
| Produce a compact restored state summary and the next assistant message to seamlessly continue the session. | |
| Input | |
| SESSION: A JSONL string where each line is a JSON object. Records may include: | |
| {"type":"message","role":"user|assistant|system","content":[...]} where content is an array of parts (e.g., {"type":"input_text","text":"..."} or {"type":"output_text","text":"..."}). Prefer text fields named text/content/message/prompt. | |
| {"type":"function_call", ...} and {"type":"function_call_output", ...} representing tool invocations and their outputs. | |
| {"record_type":"state", ...} and {"type":"reasoning", ...} which may contain internal or hidden reasoning; do not reveal hidden chain-of-thought. Use only metadata that is safe and useful (e.g., cwd, branch) if available. | |
| Some records may include "encrypted_content" or other non-readable fields; ignore anything you can’t safely interpret. | |
| Parsing Rules | |
| Process lines in chronological order. | |
| Visible conversation includes: | |
| user messages (role == "user"), | |
| assistant messages (role == "assistant"), | |
| outputs from function_call_output that would have been visible to the user (e.g., shell output). | |
| Ignore hidden/internal reasoning and any encrypted content that you cannot read. | |
| If content is an array, concatenate human-readable "text" entries in order. | |
| Extract useful metadata if present (e.g., git branch, working directory) from fields like branch, git.branch, cwd, working_directory. | |
| Keep an eye out for: | |
| Commands run and their results, | |
| Errors encountered, | |
| Decisions made, | |
| Files and paths referenced, | |
| Pending TODOs and next steps discussed. | |
| If Input Is Large | |
| If the JSONL is very large, summarize earlier segments progressively while preserving: | |
| The evolving goal, | |
| Key decisions, | |
| Artifacts (files/paths), | |
| Outstanding tasks, | |
| Errors blocking progress. | |
| Keep specific technical details needed to continue (flags, versions, paths, branch names, file names, command invocations and outcomes). | |
| Your Outputs | |
| Produce two sections, clearly delimited exactly as follows: | |
| --- RestoredState (JSON) --- | |
| { | |
| "conversation_summary": "<<=200 words high-level recap capturing objective, important context, decisions, errors, and solutions so far>>", | |
| "last_user_message": "<<verbatim last user-visible message, if any>>", | |
| "pending_actions": ["<<bullet-like items with concrete next steps or unresolved tasks>>"], | |
| "artifacts": ["<<key files/paths/URLs referenced>>"], | |
| "environment": { | |
| "working_directory": "<<if known, else null>>", | |
| "git_branch": "<<if known, else null>>", | |
| "tools_used": ["<<e.g., shell, git, docker, kubectl, etc.>>"] | |
| }, | |
| "unresolved_errors": ["<<notable errors still outstanding>>"], | |
| "constraints": ["<<notable requirements, versions, policies, or limits>>"], | |
| "notes": ["<<any other crucial context to continue>>"] | |
| } | |
| --- End RestoredState --- | |
| --- AssistantReply --- | |
| <<Write the next assistant turn as if continuing the session right now. Be concise, helpful, and action-oriented. If appropriate, propose concrete next steps or ask any minimal clarifying question needed to proceed. Do not include hidden chain-of-thought.>> | |
| --- End AssistantReply --- | |
| Additional Guidance | |
| Be specific and practical. Preserve technically relevant details (paths, branch names, commands, flags). | |
| If the last interaction expects you to perform an action (e.g., run a command), propose the exact command(s) you would run and why; if needed, ask for missing details minimally. | |
| Do not fabricate file contents or outcomes not present in the log; if uncertain, state assumptions clearly or ask for confirmation. | |
| Do not expose hidden/internal reasoning or any encrypted content. Summarize outcomes and decisions at a high level instead. | |
| If the session is effectively empty, set conversation_summary to "No prior context" and ask the user what they’d like to do next. | |
| Placeholders to Fill | |
| Example Invocation (conceptual) | |
| System/User message content: | |
| Path to session jsonl file: {{SESSION_JSONL}} | |
| Read this file and use it to start the next session. | |
| """ | |
| # ------------------------ | |
| # Utilities | |
| # ------------------------ | |
| def human_ago(ts: float) -> str: | |
| """Return a simple relative time like '1 day ago'.""" | |
| now = time.time() | |
| diff = max(0, int(now - ts)) | |
| units = [ | |
| (60 * 60 * 24 * 365, "year"), | |
| (60 * 60 * 24 * 30, "month"), | |
| (60 * 60 * 24 * 7, "week"), | |
| (60 * 60 * 24, "day"), | |
| (60 * 60, "hour"), | |
| (60, "minute"), | |
| (1, "second"), | |
| ] | |
| for unit_seconds, label in units: | |
| if diff >= unit_seconds: | |
| count = diff // unit_seconds | |
| return f"{count} {label}{'s' if count != 1 else ''} ago" | |
| return "just now" | |
| def read_jsonl_head(path: Path, max_lines: int = 200) -> List[Dict[str, Any]]: | |
| """Read up to max_lines JSONL objects from the start of a file.""" | |
| rows: List[Dict[str, Any]] = [] | |
| try: | |
| with path.open("r", encoding="utf-8", errors="ignore") as f: | |
| for i, line in enumerate(f): | |
| if i >= max_lines: | |
| break | |
| line = line.strip() | |
| if not line: | |
| continue | |
| try: | |
| rows.append(json.loads(line)) | |
| except Exception: | |
| # Skip malformed lines | |
| continue | |
| except FileNotFoundError: | |
| pass | |
| return rows | |
| def count_lines(path: Path, max_bytes: int = 0) -> int: | |
| """Count lines in a file (fast, streaming).""" | |
| n = 0 | |
| try: | |
| with path.open("rb") as f: | |
| if max_bytes > 0: | |
| data = f.read(max_bytes) | |
| n = data.count(b"\n") + (1 if data and not data.endswith(b"\n") else 0) | |
| else: | |
| for _ in f: | |
| n += 1 | |
| except FileNotFoundError: | |
| pass | |
| return n | |
| def detect_git_branch_from_records(records: List[Dict[str, Any]]) -> Optional[str]: | |
| """Try to find a git branch from JSONL records or repo context.""" | |
| # Look for explicit branch-like fields | |
| for r in records: | |
| for key in ("git_branch", "branch"): | |
| val = r.get(key) | |
| if isinstance(val, str) and val: | |
| return val | |
| git = r.get("git") | |
| if isinstance(git, dict): | |
| val = git.get("branch") | |
| if isinstance(val, str) and val: | |
| return val | |
| # Try to infer from working directory if present | |
| for r in records: | |
| cwd = r.get("cwd") or r.get("working_directory") | |
| if isinstance(cwd, str) and cwd: | |
| try: | |
| res = subprocess.run( | |
| ["git", "-C", cwd, "rev-parse", "--abbrev-ref", "HEAD"], | |
| stdout=subprocess.PIPE, | |
| stderr=subprocess.DEVNULL, | |
| text=True, | |
| timeout=1.5, | |
| ) | |
| branch = res.stdout.strip() | |
| if branch: | |
| return branch | |
| except Exception: | |
| continue | |
| return None | |
| def _extract_textual_content(obj: Dict[str, Any]) -> str: | |
| """Extract best-effort text from a session record that may use structured content. | |
| Handles shapes like: | |
| - {"content": "..."} | |
| - {"content": [{"type": "input_text", "text": "..."}, ...]} | |
| - {"text": "..."} or {"message": "..."} or {"prompt": "..."} or {"instructions": "..."} | |
| """ | |
| pieces: List[str] = [] | |
| content = obj.get("content") | |
| if isinstance(content, str): | |
| pieces.append(content) | |
| elif isinstance(content, list): | |
| for part in content: | |
| if isinstance(part, dict): | |
| t = part.get("text") or part.get("input_text") or part.get("content") | |
| if isinstance(t, str): | |
| pieces.append(t) | |
| # Other common text-like fields | |
| for k in ("text", "message", "prompt", "instructions"): | |
| v = obj.get(k) | |
| if isinstance(v, str): | |
| pieces.append(v) | |
| # Join and normalize whitespace | |
| if not pieces: | |
| return "" | |
| return " ".join(" ".join(pieces).split()) | |
| def summarize_session_openai(path: Path, model: str) -> Optional[str]: | |
| """Create a single-line summary using OpenAI. Return None on any failure/timeout.""" | |
| from openai import OpenAI # Imported here to avoid import if unused | |
| # Keep calls snappy; don't block the UI thread forever on network | |
| client = OpenAI(timeout=float(os.environ.get("CODEX_OPENAI_TIMEOUT", 8)), max_retries=0) | |
| # Collect a compact sample of the session (user+assistant messages only if possible) | |
| snippets: List[str] = [] | |
| added = 0 | |
| try: | |
| with path.open("r", encoding="utf-8", errors="ignore") as f: | |
| for line in f: | |
| if added >= 60: | |
| break | |
| try: | |
| obj = json.loads(line) | |
| except Exception: | |
| continue | |
| role = obj.get("role") | |
| if isinstance(role, str) and role not in ("user", "assistant"): | |
| # Prefer human+assistant exchanges; skip others unless we have no content | |
| pass | |
| text = _extract_textual_content(obj) | |
| if text: | |
| snippets.append(text) | |
| added += 1 | |
| except Exception: | |
| pass | |
| sample = "\n\n".join(snippets)[:6000] | |
| if not sample: | |
| sample = f"Session file: {path.name}" | |
| prompt = ( | |
| "Summarize the main topic(s) of this developer session in one short line (<=12 words). " | |
| "Be concrete and specific, no punctuation at the end.\n\n" + sample | |
| ) | |
| try: | |
| resp = client.chat.completions.create( | |
| model=model, | |
| messages=[ | |
| {"role": "system", "content": "You are a helpful assistant for summarizing dev sessions."}, | |
| {"role": "user", "content": prompt}, | |
| ], | |
| temperature=0.2, | |
| max_tokens=24, | |
| timeout=float(os.environ.get("CODEX_OPENAI_TIMEOUT", 8)), | |
| ) | |
| summary = (resp.choices[0].message.content or "").strip() | |
| # Ensure single line, trimmed | |
| return " ".join(summary.split()) or None | |
| except Exception: | |
| return None | |
| def build_resume_prompt_from_file(path: Path) -> str: | |
| """Build the resume prompt by substituting the full path for SESSION_JSONL. | |
| The template comes from env var SESSION_RESUME_PROMPT or the default | |
| SESSION_RESUME_PROMPT above. We replace both '{{SESSION_JSONL}}' and | |
| the bare token 'SESSION_JSONL' for flexibility. | |
| """ | |
| template = os.environ.get("SESSION_RESUME_PROMPT", SESSION_RESUME_PROMPT) | |
| path_str = str(path) | |
| # Replace both variants | |
| out = template.replace("{{SESSION_JSONL}}", path_str) | |
| out = out.replace("SESSION_JSONL", path_str) | |
| return out | |
| # No LLM call for resume; we return the populated prompt to stdout. | |
| def heuristic_summary(path: Path) -> str: | |
| # Simple fallback: prefer any textual content; otherwise use filename stem | |
| records = read_jsonl_head(path, max_lines=50) | |
| texts: List[str] = [] | |
| for r in records: | |
| t = _extract_textual_content(r) | |
| if t: | |
| texts.append(t) | |
| joined = " ".join(texts)[:200].strip() | |
| if joined: | |
| # Use the first line-ish, trimmed to a sane width | |
| return (joined.split("\n", 1)[0]).strip()[:80] | |
| return path.stem.replace("_", " ")[:80] | |
| # ------------------------ | |
| # Caching | |
| # ------------------------ | |
| def cache_dir() -> Path: | |
| d = Path("/tmp/.codex-resume") | |
| try: | |
| d.mkdir(parents=True, exist_ok=True) | |
| except Exception: | |
| pass | |
| return d | |
| def cache_key_for_file(path: Path, model: str) -> str: | |
| try: | |
| st = path.stat() | |
| base = f"{path.resolve()}::{st.st_size}::{int(st.st_mtime_ns)}::{model}" | |
| except Exception: | |
| base = f"{path.resolve()}::unknown::{model}" | |
| return hashlib.sha256(base.encode("utf-8")).hexdigest() | |
| def cache_read_summary(path: Path, model: str) -> Optional[str]: | |
| key = cache_key_for_file(path, model) | |
| f = cache_dir() / f"{key}.txt" | |
| try: | |
| return f.read_text(encoding="utf-8").strip() | |
| except Exception: | |
| return None | |
| def cache_write_summary(path: Path, model: str, summary: str) -> None: | |
| key = cache_key_for_file(path, model) | |
| f = cache_dir() / f"{key}.txt" | |
| try: | |
| f.write_text(summary.strip() + "\n", encoding="utf-8") | |
| except Exception: | |
| pass | |
| def collect_sessions(root: Path) -> List[Dict[str, Any]]: | |
| files = sorted( | |
| glob.glob(str(root / "**" / "*.jsonl"), recursive=True), | |
| key=lambda p: os.path.getmtime(p), | |
| reverse=True, | |
| ) | |
| sessions: List[Dict[str, Any]] = [] | |
| for p in files: | |
| path = Path(p) | |
| try: | |
| st = path.stat() | |
| except FileNotFoundError: | |
| continue | |
| mtime = st.st_mtime | |
| ctime = st.st_ctime | |
| msg_count = count_lines(path) | |
| head = read_jsonl_head(path, max_lines=100) | |
| branch = detect_git_branch_from_records(head) or "-" | |
| sessions.append( | |
| { | |
| "path": path, | |
| "modified": mtime, | |
| "created": ctime, | |
| "messages": msg_count, | |
| "branch": branch, | |
| "summary": None, # to be filled async | |
| } | |
| ) | |
| return sessions | |
| # ------------------------ | |
| # TUI App | |
| # ------------------------ | |
| class SessionsApp(App): | |
| CSS_PATH = None | |
| BINDINGS = [ | |
| ("q", "quit", "Quit"), | |
| ("enter", "select", "Select"), # Fallback; primary path uses RowSelected event | |
| ("j", "cursor_down", "Down"), | |
| ("k", "cursor_up", "Up"), | |
| ] | |
| root_dir: Path | |
| table: DataTable | |
| info = reactive("") | |
| rows_by_key: Dict[int, Dict[str, Any]] | |
| executor: ThreadPoolExecutor | |
| model_name: str | |
| # Column indices for clarity | |
| COL_NO = 0 | |
| COL_MODIFIED = 1 | |
| COL_CREATED = 2 | |
| COL_MESSAGES = 3 | |
| COL_BRANCH = 4 | |
| COL_SUMMARY = 5 | |
| def __init__(self, root_dir: Path, rebuild_cache: bool = False): | |
| super().__init__() | |
| self.root_dir = root_dir | |
| self.rows_by_key = {} | |
| self.executor = ThreadPoolExecutor(max_workers=4) | |
| self.model_name = os.environ.get("CODEX_SUMMARY_MODEL", "gpt-4o-mini") | |
| self.rebuild_cache = rebuild_cache | |
| self.status = Static() | |
| self.selected_path: Optional[str] = None | |
| self.prompt_text: Optional[str] = None | |
| # Column keys captured from add_columns for reliable updates | |
| self._col_no = None | |
| self._col_modified = None | |
| self._col_created = None | |
| self._col_messages = None | |
| self._col_branch = None | |
| self._col_summary = None | |
| def compose(self) -> ComposeResult: | |
| yield Header(show_clock=True) | |
| self.table = DataTable(zebra_stripes=True) | |
| # Capture column keys; Textual expects keys when calling update_cell | |
| ( | |
| self._col_no, | |
| self._col_modified, | |
| self._col_created, | |
| self._col_messages, | |
| self._col_branch, | |
| self._col_summary, | |
| ) = self.table.add_columns( | |
| "No.", | |
| "Modified", | |
| "Created", | |
| "# Messages", | |
| "Git Branch", | |
| "Summary", | |
| ) | |
| yield self.table | |
| yield self.status | |
| yield Footer() | |
| def on_mount(self) -> None: | |
| sessions = collect_sessions(self.root_dir) | |
| if not sessions: | |
| self.table.add_row("-", "No sessions found", "", "", "", "") | |
| return | |
| if not os.environ.get("OPENAI_API_KEY"): | |
| self.status.update("LLM summaries disabled (no OPENAI_API_KEY). Using heuristic.") | |
| else: | |
| self.status.update(f"Summarizing with model: {self.model_name}") | |
| # Load rows and start async summarization for the top N | |
| for idx, s in enumerate(sessions, start=1): | |
| row_key = self.table.add_row( | |
| f"{idx}.", | |
| human_ago(s["modified"]), | |
| human_ago(s["created"]), | |
| str(s["messages"]), | |
| s["branch"], | |
| "summarizing…", | |
| ) | |
| s["row_key"] = row_key | |
| self.rows_by_key[row_key] = s | |
| self.table.cursor_type = "row" | |
| self.table.focus() | |
| self.table.cursor_coordinate = (0, 0) | |
| # Asynchronously compute summaries for top N sessions to keep UI snappy | |
| # Increase N so most users see filled rows without scrolling | |
| top_keys = list(self.rows_by_key.keys())[:100] | |
| for rk in top_keys: | |
| self._enqueue_summary(rk) | |
| def _enqueue_summary(self, row_key: int) -> None: | |
| s = self.rows_by_key.get(row_key) | |
| if not s or s.get("summary"): | |
| return | |
| path: Path = s["path"] | |
| def work() -> str: | |
| # Try cache first unless rebuilding | |
| if not self.rebuild_cache: | |
| cached = cache_read_summary(path, self.model_name) | |
| if cached: | |
| return cached | |
| summary = None | |
| api_key = os.environ.get("OPENAI_API_KEY") | |
| if api_key: | |
| summary = summarize_session_openai(path, self.model_name) | |
| # Cache only LLM-generated summaries | |
| if summary: | |
| try: | |
| cache_write_summary(path, self.model_name, summary) | |
| except Exception: | |
| pass | |
| if not summary: | |
| summary = heuristic_summary(path) | |
| return summary | |
| def done(summary: str) -> None: | |
| s["summary"] = summary | |
| try: | |
| # Use captured ColumnKey from add_columns (per Textual docs) | |
| self.table.update_cell(row_key, self._col_summary, summary, update_width=True) | |
| except Exception: | |
| pass | |
| fut = self.executor.submit(work) | |
| def _safe_callback(fut_): | |
| try: | |
| result = fut_.result() | |
| except Exception: | |
| return | |
| try: | |
| if getattr(self, "_exiting", False): | |
| return | |
| self.call_from_thread(done, result) | |
| except RuntimeError: | |
| # App already closed; ignore | |
| pass | |
| fut.add_done_callback(_safe_callback) | |
| def action_select(self) -> None: | |
| if not self.rows_by_key: | |
| return | |
| try: | |
| row_key = self.table.cursor_row | |
| s = self.rows_by_key.get(row_key) | |
| if s: | |
| # Build prompt quickly and exit | |
| self.prompt_text = build_resume_prompt_from_file(Path(s["path"])) | |
| self._exiting = True | |
| try: | |
| self.executor.shutdown(wait=False, cancel_futures=True) | |
| except Exception: | |
| pass | |
| self.exit() | |
| except Exception: | |
| pass | |
| def action_cursor_down(self) -> None: | |
| self.table.action_cursor_down() | |
| rk = self.table.cursor_row | |
| self._enqueue_summary(rk) | |
| def action_cursor_up(self) -> None: | |
| self.table.action_cursor_up() | |
| rk = self.table.cursor_row | |
| self._enqueue_summary(rk) | |
| async def on_key(self, event: events.Key) -> None: | |
| if event.key.lower() == "q": | |
| self.exit() | |
| # Prefer DataTable selection event so Enter on the table reliably selects | |
| def on_data_table_row_selected(self, event: DataTable.RowSelected) -> None: | |
| try: | |
| row_key = event.row_key | |
| s = self.rows_by_key.get(row_key) | |
| if s: | |
| self.prompt_text = build_resume_prompt_from_file(Path(s["path"])) | |
| self._exiting = True | |
| try: | |
| self.executor.shutdown(wait=False, cancel_futures=True) | |
| except Exception: | |
| pass | |
| self.exit() | |
| except Exception: | |
| pass | |
| # No background generation path needed; prompt construction is instant. | |
| def print_list(root: Path, rebuild_cache: bool, model_name: str) -> int: | |
| sessions = collect_sessions(root) | |
| if not sessions: | |
| print("No sessions found in", root) | |
| return 0 | |
| # Header | |
| print(" Modified Created # Messages Git Branch Summary") | |
| # Build rows, compute summaries synchronously | |
| for idx, s in enumerate(sessions, start=1): | |
| path: Path = s["path"] | |
| # Cache | |
| summary = None | |
| if not rebuild_cache: | |
| summary = cache_read_summary(path, model_name) | |
| if not summary: | |
| if os.environ.get("OPENAI_API_KEY"): | |
| summary = summarize_session_openai(path, model_name) | |
| if summary: | |
| cache_write_summary(path, model_name, summary) | |
| if not summary: | |
| summary = heuristic_summary(path) | |
| mod = human_ago(s["modified"]).ljust(11) | |
| cre = human_ago(s["created"]).ljust(11) | |
| msgs = str(s["messages"]).rjust(10) | |
| br = (s["branch"] or "-").ljust(14) | |
| prefix = "❯ " if idx == 1 else " " | |
| print(f"{prefix}{idx}. {mod} {cre} {msgs} {br} {summary}") | |
| return 0 | |
| def main() -> int: | |
| parser = argparse.ArgumentParser(description="Codex sessions browser (TUI)") | |
| parser.add_argument("--root", default=os.path.expanduser("~/.codex/sessions"), help="Sessions root directory") | |
| parser.add_argument("--rebuild-cache", action="store_true", help="Ignore cache and regenerate summaries") | |
| parser.add_argument("--list", action="store_true", help="Print a non-interactive list and exit") | |
| parser.add_argument("--clear-cache", action="store_true", help="Prune stale cache entries for current model and exit") | |
| parser.add_argument("--clear-cache-all", action="store_true", help="Remove all cache files and exit") | |
| args = parser.parse_args() | |
| root = Path(args.root).resolve() | |
| model = os.environ.get("CODEX_SUMMARY_MODEL", "gpt-4o-mini") | |
| # Cache maintenance | |
| if args.clear_cache_all: | |
| d = cache_dir() | |
| removed = 0 | |
| for f in d.glob("*.txt"): | |
| try: | |
| f.unlink() | |
| removed += 1 | |
| except Exception: | |
| pass | |
| print(f"Removed {removed} cache file(s) from {d}") | |
| return 0 | |
| if args.clear_cache: | |
| # Compute valid keys for current model and existing sessions (summary only) | |
| sessions = collect_sessions(root) | |
| valid_keys = set(cache_key_for_file(s["path"], model) for s in sessions) | |
| d = cache_dir() | |
| removed = 0 | |
| kept = 0 | |
| for f in d.glob("*.txt"): | |
| key = f.stem | |
| if key not in valid_keys: | |
| try: | |
| f.unlink() | |
| removed += 1 | |
| except Exception: | |
| pass | |
| else: | |
| kept += 1 | |
| print(f"Pruned cache in {d}: removed={removed}, kept={kept}, model={model}") | |
| return 0 | |
| if args.list: | |
| return print_list(root, args.rebuild_cache, model) | |
| app = SessionsApp(root, rebuild_cache=args.rebuild_cache) | |
| app.run() | |
| if app.prompt_text: | |
| print(app.prompt_text, flush=True) | |
| return 0 | |
| if __name__ == "__main__": | |
| raise SystemExit(main()) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment