Skip to content

Instantly share code, notes, and snippets.

@Ylarod
Last active December 26, 2025 09:23
Show Gist options
  • Select an option

  • Save Ylarod/5efb74b54d72c5db77322cf73c3e0eba to your computer and use it in GitHub Desktop.

Select an option

Save Ylarod/5efb74b54d72c5db77322cf73c3e0eba to your computer and use it in GitHub Desktop.
codex-cli transformer for claude-code-router
class CodexCLITransformer {
name = "codex-cli";
constructor(options) {
this.options = options || {};
this.debug = this.options.debug || false;
// Reasoning configuration with validation
this.reasoning = this.validateReasoningConfig(this.options.reasoning || {});
}
validateReasoningConfig(config) {
const validEfforts = ["low", "medium", "high", "none", "minimal"];
const validSummaries = ["auto", "concise", "detailed", "none"];
const defaults = {
enable: false,
effort: "minimal",
summary: "auto"
};
const result = { ...defaults };
// Validate enable
if (config.enable !== undefined) {
if (typeof config.enable === "boolean") {
result.enable = config.enable;
} else {
console.warn(`[CodexCLI] Invalid reasoning.enable value: ${config.enable}. Expected boolean. Using default: ${defaults.enable}`);
}
}
// Validate effort
if (config.effort !== undefined) {
if (validEfforts.includes(config.effort)) {
result.effort = config.effort;
} else {
console.warn(`[CodexCLI] Invalid reasoning.effort value: ${config.effort}. Expected one of: ${validEfforts.join(", ")}. Using default: ${defaults.effort}`);
}
}
// Validate summary
if (config.summary !== undefined) {
if (validSummaries.includes(config.summary)) {
result.summary = config.summary;
} else {
console.warn(`[CodexCLI] Invalid reasoning.summary value: ${config.summary}. Expected one of: ${validSummaries.join(", ")}. Using default: ${defaults.summary}`);
}
}
return result;
}
async transformRequestIn(request, provider) {
const body = typeof request === "string" ? JSON.parse(request) : { ...request };
if (this.debug) {
console.log("\n[DEBUG] CodexCLI Transform Request In");
console.log("Original Request:", JSON.stringify(body, null, 2));
}
// Track if this is a completion request for response conversion
this.isCompletionRequest = body.prompt !== undefined;
// Track if the original request was for streaming
this.isStreamRequest = body.stream || false;
// Convert OpenAI Chat Completions format to OpenAI Response API format
const inputItems = this.convertToResponseInput(body);
const tools = this.convertToolsToResponseFormat(body.tools);
// Handle system messages separately
let instructions = "";
if (body.messages) {
const systemMessage = body.messages.find(m => m.role === "system");
if (systemMessage) {
instructions = typeof systemMessage.content === "string"
? systemMessage.content
: "";
}
}
// Handle reasoning parameter
const include = [];
let reasoningParam = null;
// Enable reasoning if configured in options or explicitly requested
const shouldEnableReasoning = this.reasoning.enable || body.reasoning;
if (shouldEnableReasoning) {
include.push("reasoning.encrypted_content");
// Use options as defaults, allow request body to override
const reasoningOverrides = typeof body.reasoning === "object" ? body.reasoning : {};
reasoningParam = this.buildReasoningParam(
this.reasoning.effort,
this.reasoning.summary,
reasoningOverrides
);
}
// Build OpenAI Response API request body
// Note: OpenAI Response API requires stream to be true
const responseBody = {
model: this.normalizeModelName(body.model),
instructions: instructions || undefined,
input: inputItems,
tools: tools || [],
tool_choice: this.convertToolChoice(body.tool_choice),
parallel_tool_calls: body.parallel_tool_calls || false,
store: false,
stream: true, // Response API requires stream to be true
include: include,
...(reasoningParam && { reasoning: reasoningParam })
};
const result = {
body: responseBody,
config: {
headers: {
"Content-Type": "application/json",
"Accept": "text/event-stream" // Always expect streaming response from Response API
},
},
};
if (this.debug) {
console.log("Transformed Request:", JSON.stringify(result, null, 2));
console.log("Request transformation complete\n");
}
return result;
}
normalizeModelName(name) {
if (typeof name !== "string" || !name.trim()) {
return "gpt-5";
}
const base = name.split(":", 1)[0].trim();
const mapping = {
gpt5: "gpt-5",
"gpt-5-latest": "gpt-5",
"gpt-5": "gpt-5",
codex: "codex-mini-latest",
"codex-mini": "codex-mini-latest",
"codex-mini-latest": "codex-mini-latest"
};
return mapping[base] || base;
}
convertToResponseInput(body) {
const inputItems = [];
// Handle different endpoint types
if (body.prompt !== undefined) {
// /v1/completions endpoint - convert prompt to input format
const messages = [{ role: "user", content: body.prompt }];
return this.convertChatMessagesToResponsesInput(messages);
} else if (body.messages) {
// /v1/chat/completions endpoint
return this.convertChatMessagesToResponsesInput(body.messages);
}
return inputItems;
}
convertChatMessagesToResponsesInput(messages) {
const inputItems = [];
for (const message of messages) {
const role = message.role;
if (role === "system") {
continue; // System messages are handled separately
}
if (role === "tool") {
const callId = message.tool_call_id || message.id;
if (typeof callId === "string" && callId) {
let content = message.content || "";
if (Array.isArray(content)) {
const texts = [];
for (const part of content) {
if (typeof part === "object" && part !== null) {
const t = part.text || part.content;
if (typeof t === "string" && t) {
texts.push(t);
}
}
}
content = texts.join("\n");
}
if (typeof content === "string") {
inputItems.push({
type: "function_call_output",
call_id: callId,
output: content
});
}
}
continue;
}
if (role === "assistant" && Array.isArray(message.tool_calls)) {
for (const tc of message.tool_calls) {
if (typeof tc !== "object" || tc === null) {
continue;
}
const tcType = tc.type || "function";
if (tcType !== "function") {
continue;
}
const callId = tc.id || tc.call_id;
const fn = tc.function;
const name = typeof fn === "object" && fn !== null ? fn.name : null;
const args = typeof fn === "object" && fn !== null ? fn.arguments : null;
if (typeof callId === "string" && typeof name === "string" && typeof args === "string") {
inputItems.push({
type: "function_call",
name: name,
arguments: args,
call_id: callId
});
}
}
}
const content = message.content || "";
const contentItems = [];
if (Array.isArray(content)) {
for (const part of content) {
if (typeof part !== "object" || part === null) {
continue;
}
const ptype = part.type;
if (ptype === "text") {
const text = part.text || part.content || "";
if (typeof text === "string" && text) {
const kind = role === "assistant" ? "output_text" : "input_text";
contentItems.push({ type: kind, text: text });
}
} else if (ptype === "image_url") {
const image = part.image_url;
const url = typeof image === "object" && image !== null ? image.url : image;
if (typeof url === "string" && url) {
contentItems.push({ type: "input_image", image_url: url });
}
}
}
} else if (typeof content === "string" && content) {
const kind = role === "assistant" ? "output_text" : "input_text";
contentItems.push({ type: kind, text: content });
}
if (!contentItems.length) {
continue;
}
const roleOut = role === "assistant" ? "assistant" : "user";
inputItems.push({ type: "message", role: roleOut, content: contentItems });
}
return inputItems;
}
convertToolsToResponseFormat(tools) {
if (!Array.isArray(tools)) {
return [];
}
const out = [];
for (const t of tools) {
if (typeof t !== "object" || t === null) {
continue;
}
if (t.type !== "function") {
continue;
}
const fn = t.function;
if (typeof fn !== "object" || fn === null) {
continue;
}
const name = fn.name;
if (typeof name !== "string" || !name) {
continue;
}
const desc = fn.description;
const params = fn.parameters;
// OpenAI Response API expects tools in specific format (flattened)
out.push({
type: "function",
name: name,
description: desc || "",
parameters: params || { type: "object", properties: {} }
});
}
return out;
}
convertToolChoice(toolChoice) {
if (!toolChoice || toolChoice === "auto" || toolChoice === "none") {
return toolChoice || "auto";
}
if (typeof toolChoice === "object" && toolChoice.type === "function") {
return toolChoice;
}
return "auto";
}
buildReasoningParam(baseEffort = "minimal", baseSummary = "auto", overrides) {
let effort = (baseEffort || "").trim().toLowerCase();
let summary = (baseSummary || "").trim().toLowerCase();
const validEfforts = new Set(["low", "medium", "high", "none", "minimal"]);
const validSummaries = new Set(["auto", "concise", "detailed", "none"]);
if (overrides) {
const oEff = (overrides.effort || "").trim().toLowerCase();
const oSum = (overrides.summary || "").trim().toLowerCase();
if (oEff && validEfforts.has(oEff)) {
effort = oEff;
} else if (oEff && !validEfforts.has(oEff)) {
console.warn(`[CodexCLI] Invalid reasoning.effort override: ${oEff}. Expected one of: ${Array.from(validEfforts).join(", ")}. Using: ${effort}`);
}
if (oSum && validSummaries.has(oSum)) {
summary = oSum;
} else if (oSum && !validSummaries.has(oSum)) {
console.warn(`[CodexCLI] Invalid reasoning.summary override: ${oSum}. Expected one of: ${Array.from(validSummaries).join(", ")}. Using: ${summary}`);
}
}
if (!validEfforts.has(effort)) {
effort = "minimal";
}
if (!validSummaries.has(summary)) {
summary = "auto";
}
const reasoning = { effort: effort };
if (summary !== "none") {
reasoning.summary = summary;
}
return reasoning;
}
applyReasoningToMessage(message, reasoningSummaryText, reasoningFullText) {
// Apply reasoning content to message for non-streaming
const rtxtParts = [];
if (typeof reasoningSummaryText === "string" && reasoningSummaryText.trim()) {
rtxtParts.push(reasoningSummaryText);
}
if (typeof reasoningFullText === "string" && reasoningFullText.trim()) {
rtxtParts.push(reasoningFullText);
}
const rtxt = rtxtParts.filter((p) => p).join("\n\n");
if (rtxt) {
const thinkBlock = `<think>\n${rtxt}\n</think>\n`;
const contentText = message.content || "";
message.content = thinkBlock + (typeof contentText === "string" ? contentText : "");
}
return message;
}
async transformResponseOut(response) {
if (this.debug) {
console.log("\n[DEBUG] CodexCLI Transform Response Out");
console.log("Original Response Status:", response.status);
console.log("Original Response Headers:", Object.fromEntries(response.headers.entries()));
console.log("Response URL:", response.url || "N/A");
console.log("Response Type:", response.type || "N/A");
console.log("Response OK:", response.ok);
console.log("Response Redirected:", response.redirected);
}
// Since Response API always returns streaming, we handle it differently
// based on whether the original request wanted streaming or not
const isOriginalStreamRequest = this.isStreamRequest;
if (response.headers.get("Content-Type")?.includes("stream")) {
// Handle streaming response
if (!response.body) {
return response;
}
// If original request was not streaming, collect all chunks and return non-streaming response
if (!isOriginalStreamRequest) {
return this.convertStreamToNonStream(response);
}
const decoder = new TextDecoder();
const encoder = new TextEncoder();
const self = this;
const stream = new ReadableStream({
async start(controller) {
const reader = response.body.getReader();
let buffer = "";
// State variables for streaming processing
const streamContext = {
thinkOpen: false,
thinkClosed: false,
sawAnySummary: false,
pendingSummaryParagraph: false
};
try {
while (true) {
const { done, value } = await reader.read();
if (done) {
// Process any remaining buffer
if (buffer.trim()) {
self.processStreamLine(buffer, controller, encoder, streamContext);
}
break;
}
buffer += decoder.decode(value, { stream: true });
const lines = buffer.split("\n");
buffer = lines.pop() || ""; // Keep incomplete line in buffer
for (const line of lines) {
if (line.trim()) {
self.processStreamLine(line, controller, encoder, streamContext);
}
}
}
} catch (error) {
console.error("Stream processing error:", error);
controller.error(error);
} finally {
try {
reader.releaseLock();
} catch (e) {
console.error("Error releasing reader lock:", e);
}
controller.close();
}
},
});
const streamResponse = new Response(stream, {
status: response.status,
statusText: response.statusText,
headers: {
"Content-Type": "text/event-stream",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
...this.getCorsHeaders()
},
});
if (this.debug) {
console.log("Streaming Response Created");
console.log("Response transformation complete (streaming)\n");
}
return streamResponse;
}
// Response API only returns streaming responses
return response;
}
processStreamLine(line, controller, encoder, context) {
if (this.debug) {
console.log("[DEBUG] Stream Line:", line);
}
if (line.startsWith("data: ")) {
const data = line.slice(6).trim();
if (data === "[DONE]") {
if (this.debug) {
console.log("[DEBUG] Stream End: [DONE]");
}
controller.enqueue(encoder.encode("data: [DONE]\n\n"));
return;
}
if (!data) return;
try {
const chunk = JSON.parse(data);
if (this.debug) {
console.log("[DEBUG] Parsed SSE Chunk:", JSON.stringify(chunk, null, 2));
}
const convertedChunks = this.convertStreamingChunkWithState(chunk, context);
// convertedChunks can be an array of chunks or null
if (convertedChunks) {
const chunksArray = Array.isArray(convertedChunks) ? convertedChunks : [convertedChunks];
for (const convertedChunk of chunksArray) {
if (convertedChunk) {
if (this.debug) {
console.log("[DEBUG] Converted Chunk:", JSON.stringify(convertedChunk, null, 2));
}
controller.enqueue(encoder.encode(`data: ${JSON.stringify(convertedChunk)}\n\n`));
}
}
} else if (this.debug) {
console.log("[DEBUG] Chunk not converted (filtered out)");
}
} catch (error) {
console.error("Error parsing stream chunk:", error);
if (this.debug) {
console.log("[DEBUG] Failed to parse chunk, passing through:", line);
}
// Pass through the original line if parsing fails
controller.enqueue(encoder.encode(line + "\n"));
}
} else {
// if (this.debug) {
// console.log("[DEBUG] Non-data line passed through:", line);
// }
// Pass through non-data lines
controller.enqueue(encoder.encode(line + "\n"));
}
}
convertNonStreamingResponse(response) {
// Convert OpenAI Response API format to OpenAI Chat Completions format
// Extract text content from response
let content = "";
let toolCalls = [];
let responseId = response.id || `chatcmpl-${Date.now()}`;
// Handle different response structure
if (response.output && Array.isArray(response.output)) {
for (const item of response.output) {
if (item.type === "text") {
content += item.text || "";
} else if (item.type === "function_call") {
toolCalls.push({
id: item.call_id || item.id || `tool_${Math.random().toString(36).substring(2, 15)}`,
type: "function",
function: {
name: item.name || "",
arguments: item.arguments || "{}"
}
});
}
}
}
// Check if this is a completion request (has prompt in original)
const isCompletion = this.isCompletionRequest;
if (isCompletion) {
// Text completion format
return {
id: responseId.replace("chatcmpl-", "cmpl-"),
object: "text_completion",
created: Math.floor(Date.now() / 1000),
model: response.model || "gpt-5",
choices: [{
index: 0,
text: content,
finish_reason: "stop",
logprobs: null
}],
usage: response.usage || {
prompt_tokens: 0,
completion_tokens: 0,
total_tokens: 0
}
};
} else {
// Chat completion format
return {
id: responseId,
object: "chat.completion",
created: Math.floor(Date.now() / 1000),
model: response.model || "gpt-5",
choices: [{
index: 0,
message: {
role: "assistant",
content: content,
...(toolCalls.length > 0 && { tool_calls: toolCalls })
},
finish_reason: "stop"
}],
usage: response.usage || {
prompt_tokens: 0,
completion_tokens: 0,
total_tokens: 0
}
};
}
}
convertStreamingChunkWithState(chunk, context) {
// Handle streaming conversion to OpenAI format
const responseId = chunk.response?.id || `chatcmpl-${Date.now()}`;
const kind = chunk.type;
if (this.debug) {
console.log(`[DEBUG] Converting chunk type: ${kind}`);
}
const created = Math.floor(Date.now() / 1000);
const model = chunk.response?.model || "gpt-5";
// Handle reasoning summary part added for state tracking
if (kind === "response.reasoning_summary_part.added") {
// Initialize reasoning state if not already done
if (!context.thinkingContent) {
context.thinkingContent = "";
context.thinkingStarted = false;
context.currentPartIndex = -1;
}
// Track current part index
const partIndex = chunk.summary_index || 0;
context.currentPartIndex = partIndex;
// If this is not the first part and we have content, add newline separator
if (partIndex > 0 && context.thinkingContent) {
context.thinkingContent += "\n\n";
// Emit newline separator as thinking delta
return {
id: responseId,
object: "chat.completion.chunk",
created: created,
model: model,
choices: [{
index: 0,
delta: {
thinking: {
content: "\n\n"
}
},
finish_reason: null
}]
};
}
return null;
}
// Handle reasoning delta events for streaming
if (kind === "response.reasoning_summary_text.delta" || kind === "response.reasoning_text.delta") {
const deltaTxt = chunk.delta || "";
// Stream thinking deltas
if (kind === "response.reasoning_summary_text.delta") {
// Accumulate thinking content
context.thinkingContent += deltaTxt;
// Stream thinking delta with accumulated content
return {
id: responseId,
object: "chat.completion.chunk",
created: created,
model: model,
choices: [{
index: 0,
delta: {
thinking: {
content: deltaTxt
}
},
finish_reason: null
}]
};
}
// For other reasoning delta types, return null
return null;
}
// Handle output text delta
if (kind === "response.output_text.delta") {
const content = chunk.delta || "";
const isCompletion = this.isCompletionRequest;
const chunks = [];
// Add the actual content
if (isCompletion) {
chunks.push({
id: responseId.replace("chatcmpl-", "cmpl-"),
object: "text_completion.chunk",
created: created,
model: model,
choices: [{ index: 0, text: content, finish_reason: null, logprobs: null }]
});
} else {
chunks.push({
id: responseId,
object: "chat.completion.chunk",
created: created,
model: model,
choices: [{ index: 0, delta: { content: content }, finish_reason: null }]
});
}
return chunks;
}
// Handle response.completed
if (kind === "response.completed" || kind === "response.failed") {
const finishReason = kind === "response.failed" ? "error" : "stop";
const isCompletion = this.isCompletionRequest;
const chunks = [];
// Add completion chunk
if (isCompletion) {
chunks.push({
id: responseId.replace("chatcmpl-", "cmpl-"),
object: "text_completion.chunk",
created: created,
model: model,
choices: [{ index: 0, text: "", finish_reason: finishReason, logprobs: null }]
});
} else {
chunks.push({
id: responseId,
object: "chat.completion.chunk",
created: created,
model: model,
choices: [{ index: 0, delta: {}, finish_reason: finishReason }]
});
}
return chunks;
}
// For all other chunk types, delegate to original method
return this.convertStreamingChunk(chunk);
}
convertStreamingChunk(chunk) {
// Convert OpenAI Response API streaming chunk to OpenAI Chat Completions format
const responseId = chunk.response?.id || `chatcmpl-${Date.now()}`;
const kind = chunk.type;
if (kind === "response.output_text.delta") {
const content = chunk.delta || "";
const isCompletion = this.isCompletionRequest;
if (isCompletion) {
return {
id: responseId.replace("chatcmpl-", "cmpl-"),
object: "text_completion.chunk",
created: Math.floor(Date.now() / 1000),
model: chunk.response?.model || "gpt-5",
choices: [{
index: 0,
text: content,
finish_reason: null,
logprobs: null
}]
};
} else {
return {
id: responseId,
object: "chat.completion.chunk",
created: Math.floor(Date.now() / 1000),
model: chunk.response?.model || "gpt-5",
choices: [{
index: 0,
delta: {
content: content
},
finish_reason: null
}]
};
}
} else if (kind === "response.reasoning_summary_text.delta" || kind === "response.reasoning_text.delta") {
// This method is deprecated - should use convertStreamingChunkWithState instead
// Return null to let the new method handle it
return null;
} else if (kind === "response.output_item.done") {
const item = chunk.item || {};
if (item.type === "function_call") {
return {
id: responseId,
object: "chat.completion.chunk",
created: Math.floor(Date.now() / 1000),
model: chunk.response?.model || "gpt-5",
choices: [{
index: 0,
delta: {
tool_calls: [{
index: 0,
id: item.call_id || item.id || "",
type: "function",
function: {
name: item.name || "",
arguments: item.arguments || ""
}
}]
},
finish_reason: null
}]
};
} else if (item.type === "reasoning") {
// Handle reasoning item completion - this indicates reasoning content is ready
// The actual reasoning content is handled via reasoning_text.delta events
if (this.debug) {
console.log(`[DEBUG] Reasoning item completed: ${item.id}`);
}
// For reasoning items, we don't emit a specific delta chunk as the content
// is handled through reasoning_summary_text.delta and reasoning_text.delta
return null;
}
} else if (kind === "response.output_item.added") {
const item = chunk.item || {};
if (this.debug) {
console.log(`[DEBUG] Output item added: type=${item.type}, id=${item.id}`);
}
// Output item added events are informational - no delta chunk needed
// The actual content will come through delta events
return null;
} else if (kind === "response.reasoning_summary_part.added") {
// Reasoning summary part started - used for state tracking
if (this.debug) {
const partInfo = chunk.part || {};
console.log(`[DEBUG] Reasoning summary part added: type=${partInfo.type}`);
}
return null;
} else if (kind === "response.reasoning_summary_part.done") {
// Reasoning summary part completed
if (this.debug) {
console.log(`[DEBUG] Reasoning summary part completed`);
}
return null;
} else if (kind === "response.reasoning_summary_text.done") {
// Reasoning summary text completed
if (this.debug) {
console.log(`[DEBUG] Reasoning summary text completed`);
}
return null;
} else if (kind === "response.content_part.added") {
// Content part started
if (this.debug) {
const partInfo = chunk.part || {};
console.log(`[DEBUG] Content part added: type=${partInfo.type}`);
}
return null;
} else if (kind === "response.content_part.done") {
// Content part completed
if (this.debug) {
console.log(`[DEBUG] Content part completed`);
}
return null;
} else if (kind === "response.output_text.done") {
// Output text completed - this is handled by response.completed
if (this.debug) {
console.log(`[DEBUG] Output text completed`);
}
return null;
} else if (kind === "response.created") {
// Response created - lifecycle event
if (this.debug) {
console.log(`[DEBUG] Response created: ${responseId}`);
}
return null;
} else if (kind === "response.in_progress") {
// Response in progress - lifecycle event
if (this.debug) {
console.log(`[DEBUG] Response in progress: ${responseId}`);
}
return null;
} else if (kind === "response.completed" || kind === "response.failed") {
const finishReason = kind === "response.failed" ? "error" : "stop";
const isCompletion = this.isCompletionRequest;
if (isCompletion) {
return {
id: responseId.replace("chatcmpl-", "cmpl-"),
object: "text_completion.chunk",
created: Math.floor(Date.now() / 1000),
model: chunk.response?.model || "gpt-5",
choices: [{
index: 0,
text: "",
finish_reason: finishReason,
logprobs: null
}]
};
} else {
return {
id: responseId,
object: "chat.completion.chunk",
created: Math.floor(Date.now() / 1000),
model: chunk.response?.model || "gpt-5",
choices: [{
index: 0,
delta: {},
finish_reason: finishReason
}]
};
}
}
if (this.debug) {
console.log(`[DEBUG] Unrecognized chunk type: ${kind}, skipping`);
}
return null; // Don't emit chunks we don't recognize
}
async convertStreamToNonStream(response) {
if (this.debug) {
console.log("\n[DEBUG] Converting streaming response to non-streaming...");
console.log("[DEBUG] Original stream request:", this.isStreamRequest);
console.log("[DEBUG] Is completion request:", this.isCompletionRequest);
}
// Collect all stream chunks and build a non-streaming response
const reader = response.body.getReader();
const decoder = new TextDecoder();
let buffer = "";
let fullText = "";
let reasoningSummaryText = "";
let reasoningFullText = "";
let toolCalls = [];
let responseId = `chatcmpl-${Date.now()}`;
let errorMessage = null;
try {
while (true) {
const { done, value } = await reader.read();
if (done) break;
buffer += decoder.decode(value, { stream: true });
const lines = buffer.split("\n");
buffer = lines.pop() || "";
for (const line of lines) {
if (line.startsWith("data: ")) {
const data = line.slice(6).trim();
if (data === "[DONE]") break;
if (!data) continue;
try {
const chunk = JSON.parse(data);
const kind = chunk.type;
if (chunk.response && typeof chunk.response.id === "string") {
responseId = chunk.response.id || responseId;
}
if (kind === "response.output_text.delta") {
const delta = chunk.delta || "";
fullText += delta;
if (this.debug && delta) {
console.log(`[DEBUG] Collected text delta: "${delta}"`);
}
} else if (kind === "response.reasoning_summary_text.delta") {
const delta = chunk.delta || "";
reasoningSummaryText += delta;
if (this.debug && delta) {
console.log(`[DEBUG] Collected reasoning summary: "${delta}"`);
}
} else if (kind === "response.reasoning_text.delta") {
const delta = chunk.delta || "";
reasoningFullText += delta;
if (this.debug && delta) {
console.log(`[DEBUG] Collected reasoning text: "${delta}"`);
}
} else if (kind === "response.output_item.done") {
const item = chunk.item || {};
if (item.type === "function_call") {
const callId = item.call_id || item.id || "";
const name = item.name || "";
const args = item.arguments || "";
if (typeof callId === "string" && typeof name === "string" && typeof args === "string") {
const toolCall = {
id: callId,
type: "function",
function: { name: name, arguments: args }
};
toolCalls.push(toolCall);
if (this.debug) {
console.log(`[DEBUG] Collected tool call:`, JSON.stringify(toolCall, null, 2));
}
}
}
} else if (kind === "response.failed") {
errorMessage = (chunk.response && chunk.response.error && chunk.response.error.message) || "response.failed";
}
} catch (parseError) {
console.error("Error parsing stream chunk:", parseError);
}
}
}
}
} catch (streamError) {
console.error("Error reading stream:", streamError);
errorMessage = `Error reading stream: ${streamError}`;
}
if (errorMessage) {
if (this.debug) {
console.log(`[DEBUG] Stream processing failed with error: ${errorMessage}`);
}
return new Response(JSON.stringify({ error: { message: errorMessage } }), {
status: 502,
headers: { "Content-Type": "application/json", ...this.getCorsHeaders() }
});
}
if (this.debug) {
console.log(`[DEBUG] Stream processing completed. Summary:`);
console.log(`[DEBUG] - Full text length: ${fullText.length}`);
console.log(`[DEBUG] - Reasoning summary length: ${reasoningSummaryText.length}`);
console.log(`[DEBUG] - Reasoning full text length: ${reasoningFullText.length}`);
console.log(`[DEBUG] - Tool calls count: ${toolCalls.length}`);
console.log(`[DEBUG] - Response ID: ${responseId}`);
}
// Build non-streaming response
const isCompletion = this.isCompletionRequest;
const created = Math.floor(Date.now() / 1000);
if (isCompletion) {
const completion = {
id: responseId.replace("chatcmpl-", "cmpl-"),
object: "text_completion",
created: created,
model: "gpt-5",
choices: [{
index: 0,
text: fullText,
finish_reason: "stop",
logprobs: null
}],
usage: {
prompt_tokens: 0,
completion_tokens: 0,
total_tokens: 0
}
};
const completionResponse = new Response(JSON.stringify(completion), {
status: response.status,
headers: { "Content-Type": "application/json", ...this.getCorsHeaders() }
});
if (this.debug) {
console.log("Non-streaming Response Created (Text Completion)");
console.log("Final completion object:", JSON.stringify(completion, null, 2));
console.log("Response transformation complete (non-streaming)\n");
}
return completionResponse;
} else {
let message = { role: "assistant", content: fullText };
if (toolCalls.length > 0) {
message.tool_calls = toolCalls;
}
// Apply reasoning to message for non-streaming
message = this.applyReasoningToMessage(message, reasoningSummaryText, reasoningFullText);
const completion = {
id: responseId,
object: "chat.completion",
created: created,
model: "gpt-5",
choices: [{
index: 0,
message: message,
finish_reason: "stop"
}],
usage: {
prompt_tokens: 0,
completion_tokens: 0,
total_tokens: 0
}
};
const chatCompletionResponse = new Response(JSON.stringify(completion), {
status: response.status,
headers: { "Content-Type": "application/json", ...this.getCorsHeaders() }
});
if (this.debug) {
console.log("Non-streaming Response Created (Chat Completion)");
console.log("Final completion object:", JSON.stringify(completion, null, 2));
console.log("Final message after reasoning:", JSON.stringify(message, null, 2));
console.log("Response transformation complete (non-streaming)\n");
}
return chatCompletionResponse;
}
}
getCorsHeaders() {
return {
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Methods": "GET, POST, PUT, DELETE, OPTIONS",
"Access-Control-Allow-Headers": "Content-Type, Authorization",
};
}
}
module.exports = CodexCLITransformer;
{
"LOG": false,
"LOG_LEVEL": "debug",
"CLAUDE_PATH": "",
"HOST": "127.0.0.1",
"PORT": 3456,
"APIKEY": "",
"API_TIMEOUT_MS": "600000",
"PROXY_URL": "",
"transformers": [
{
"name": "codex-cli",
"path": "/Users/user/.claude-code-router/plugins/codex-cli.js",
"options": {
"debug": false,
"reasoning": {
"enable": false,
"effort": "high",
"summary": "detailed"
}
}
}
],
"Providers": [
{
"name": "ChatGPT",
"api_base_url": "https://foo.com/openai/responses",
"api_key": "cr_xxx",
"models": [
"gpt-5-2025-08-07"
],
"transformer": {
"use": [
"codex-cli"
]
}
}
],
"StatusLine": {
"enabled": false,
"currentStyle": "default",
"default": {
"modules": []
},
"powerline": {
"modules": []
}
},
"Router": {
"default": "ChatGPT,gpt-5-2025-08-07",
"background": "ChatGPT,gpt-5-2025-08-07",
"think": "ChatGPT,gpt-5-2025-08-07",
"longContext": "ChatGPT,gpt-5-2025-08-07",
"longContextThreshold": 60000,
"webSearch": "ChatGPT,gpt-5-2025-08-07"
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment