Created
February 7, 2026 14:07
-
-
Save dark-faze/c7cfb3b52c92e865ea69a0530d3f0e88 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| from helpers.utils import execute_with_retry | |
| from generators.llm import LLMClient | |
| import importlib | |
| llmClient = LLMClient() | |
| def tail_words(text: str, max_words: int = 100) -> str: | |
| words = text.split() | |
| return " ".join(words[-max_words:]) if len(words) > max_words else text | |
| def is_abrupt_ending(text: str) -> bool: | |
| """ | |
| Detect if a script segment ends abruptly. | |
| Simple rule: Must end with a period. | |
| Any other ending indicates the segment was cut off. | |
| """ | |
| if not text or not text.strip(): | |
| return False | |
| text = text.strip() | |
| return not text.endswith('.') | |
| def complete_abrupt_segment( | |
| segment: str, | |
| title: str, | |
| chapter_title: str, | |
| event_title: str, | |
| outline_output: str, | |
| niche: str, | |
| llmClient, | |
| llmSettings: dict, | |
| max_tokens: int = 200 | |
| ) -> str: | |
| """ | |
| Generate a proper ending for an abruptly cut-off segment. | |
| Args: | |
| segment: The incomplete segment | |
| title: The video title for context | |
| chapter_title: Title of the current chapter | |
| event_title: Title of the current event/beat | |
| outline_output: The full story outline for context | |
| niche: The niche config name (e.g., "w40kv2") | |
| llmClient: The LLM client instance | |
| llmSettings: LLM configuration | |
| max_tokens: Maximum tokens for the completion | |
| Returns: | |
| The completed segment | |
| """ | |
| # Import niche config | |
| module = importlib.import_module(f"nicheConfigs.{niche}") | |
| # Get completion prompt from niche | |
| completion_prompt = module.getSegmentCompletionPrompt( | |
| title, chapter_title, event_title, outline_output | |
| ) | |
| # Get tail for context (increased to 150 words for better context) | |
| tail = tail_words(segment, 150) | |
| completion_messages = [ | |
| { | |
| "role": "user", | |
| "content": ( | |
| f"{completion_prompt}\n\n" | |
| f"INCOMPLETE SEGMENT (last 150 words):\n...{tail}\n\n" | |
| "Write ONLY the completion (1-2 sentences ending with a period):" | |
| ) | |
| } | |
| ] | |
| # Use the niche's content generation system prompt for consistency | |
| system_prompt = module.getPromptForContentGeneration("") | |
| completion = llmClient.generate_content_script( | |
| system_prompt, | |
| completion_messages, | |
| llmSettings["llmServer"], | |
| llmSettings["llmModel"], | |
| max_tokens | |
| ) | |
| # Check if completion was successful | |
| if completion is None: | |
| raise Exception("LLM returned None - check API credentials and authentication") | |
| # Clean up the completion | |
| completion = completion.strip() | |
| # Append to original segment | |
| return segment.strip() + " " + completion | |
| def build_chapter_outline_context(outline_list, chapter_index: int) -> str: | |
| ch = outline_list[chapter_index] | |
| events = ch.get("events", []) | |
| return f"Context - Chapter outline: {ch['title']}: " + "; ".join(events) | |
| def _generate_script_segments_attempt(niche, chapterConfig, outline_list, outline_output, outline_prompt, title, logger, llmSettings): | |
| debugLLMMessages = [] | |
| storySegmentsList = [[] for _ in range(len(outline_list))] | |
| module_path = f"nicheConfigs.{niche}" | |
| module = importlib.import_module(module_path) | |
| local_outline_prompt = module.getPromptForContentGeneration(title) | |
| # Calculate total segments to detect final segments | |
| total_segments = sum(len(chapter["events"]) for chapter in outline_list) | |
| current_segment_idx = 0 | |
| for chapterIdx in range(len(outline_list)): | |
| for eventIdx in range(len(outline_list[chapterIdx]["events"])): | |
| print(f"Generating script for {chapterIdx} chapter and {eventIdx} event") | |
| # Determine if this is the final or penultimate segment | |
| is_final_segment = (current_segment_idx == total_segments - 1) | |
| is_penultimate_segment = (current_segment_idx == total_segments - 2) | |
| is_last_chapter = (chapterIdx == len(outline_list) - 1) | |
| # Build context from last 2 full segments | |
| prev_segments_context = "" | |
| prev_segment_1 = "" | |
| prev_segment_2 = "" | |
| if eventIdx == 0 and chapterIdx != 0: | |
| # We're at the start of a new chapter | |
| # Get last 2 segments from previous chapter | |
| totalEvents = len(outline_list[chapterIdx-1]["events"]) | |
| prev_segment_1 = storySegmentsList[chapterIdx-1][totalEvents-1] | |
| if totalEvents >= 2: | |
| prev_segment_2 = storySegmentsList[chapterIdx-1][totalEvents-2] | |
| elif eventIdx == 1: | |
| # Second event in chapter - get previous segment from current chapter | |
| prev_segment_1 = storySegmentsList[chapterIdx][0] | |
| # If not first chapter, also get last segment from previous chapter as prev_segment_2 | |
| if chapterIdx > 0: | |
| totalEvents = len(outline_list[chapterIdx-1]["events"]) | |
| prev_segment_2 = storySegmentsList[chapterIdx-1][totalEvents-1] | |
| elif eventIdx > 1: | |
| # Third event or later - 2 previous segments available | |
| prev_segment_1 = storySegmentsList[chapterIdx][eventIdx-1] | |
| prev_segment_2 = storySegmentsList[chapterIdx][eventIdx-2] | |
| # Build context section with clear separation | |
| if prev_segment_2: | |
| prev_segments_context = ( | |
| f"\n\nPREVIOUS SEGMENT 2:\n{prev_segment_2}\n\n" | |
| f"PREVIOUS SEGMENT 1 (most recent):\n{prev_segment_1}" | |
| ) | |
| context_length = len(prev_segment_1) + len(prev_segment_2) | |
| context_words = len(prev_segment_1.split()) + len(prev_segment_2.split()) | |
| print(f" → Including 2 previous segments for context (combined: {context_length} chars, ~{context_words} words)") | |
| logger.saveLogTxt(prev_segments_context, f"context_{chapterIdx}_{eventIdx}.txt", "debug/context") | |
| elif prev_segment_1: | |
| prev_segments_context = f"\n\nPREVIOUS SEGMENT 1:\n{prev_segment_1}" | |
| context_length = len(prev_segment_1) | |
| context_words = len(prev_segment_1.split()) | |
| print(f" → Including 1 previous segment for context (length: {context_length} chars, ~{context_words} words)") | |
| logger.saveLogTxt(prev_segments_context, f"context_{chapterIdx}_{eventIdx}.txt", "debug/context") | |
| # Add special instructions for ending segments | |
| ending_instruction = "" | |
| if is_final_segment: | |
| ending_instruction = ( | |
| "\n\n**IMPORTANT: This is the FINAL segment of the entire narrative.** " | |
| "Bring the story to a satisfying and complete conclusion. Resolve key storylines, " | |
| "provide emotional closure, and end with a strong final moment that gives the narrative finality. " | |
| "The last few sentences should feel like a definitive ending, not a cliffhanger or transition." | |
| ) | |
| elif is_penultimate_segment: | |
| ending_instruction = ( | |
| "\n\n**IMPORTANT: This is the second-to-last segment.** " | |
| "Begin to wrap up major storylines and build toward the conclusion. " | |
| "Create a sense of approaching finality while leaving room for the final segment to provide closure." | |
| ) | |
| elif is_last_chapter and eventIdx >= len(outline_list[chapterIdx]["events"]) - 3: | |
| # Also signal for the last 3 events of the final chapter | |
| ending_instruction = ( | |
| "\n\n**NOTE: You are approaching the end of the story.** " | |
| "Start building toward narrative resolution and avoid introducing new major plot threads." | |
| ) | |
| llmMessages = [ | |
| { | |
| "role": "user", | |
| "content": ( | |
| f"Full outline:\n{outline_output}" + | |
| prev_segments_context + | |
| "\n\n" + | |
| f"Write the script segment for {outline_list[chapterIdx]['title']} : \"{outline_list[chapterIdx]['events'][eventIdx]}\". " | |
| "Only output the segment. DO NOT REPEAT THE EVENT TITLE IN THE SEGMENT. I REPEAT, DO NOT REPEAT THE EVENT TITLE IN THE SEGMENT. DIRECTLY OUTPUT THE SEGMENT, DO NOT ADD ANYTHING ELSE OTHER THAN THE SEGMENT.\n\n" | |
| "**CRITICAL: You MUST end your segment with a complete sentence ending in a period (.).** " | |
| "Monitor your output length and ensure you bring the segment to a complete, natural ending. " | |
| "If you're approaching the token limit, conclude the current thought gracefully rather than cutting off mid-sentence. " | |
| "DO NOT end with commas, dashes, ellipses, or mid-sentence. End with a period. " | |
| "The segment should feel complete and transition smoothly to the next section." + | |
| ending_instruction | |
| ), | |
| } | |
| ] | |
| debugLLMMessages.append(llmMessages) | |
| logger.saveDebugLog(llmMessages, f"llmMessages_{chapterIdx}_{eventIdx}.json", "debug/llmMessages") | |
| systemPrompt = local_outline_prompt | |
| storySegment = llmClient.generate_content_script(systemPrompt, llmMessages, llmSettings["llmServer"], llmSettings["llmModel"], chapterConfig["tokensPerChapter"]) | |
| # Check for abrupt ending and complete if necessary | |
| if is_abrupt_ending(storySegment): | |
| print(f" ⚠️ Detected abrupt ending in segment {chapterIdx}-{eventIdx}, generating completion...") | |
| storySegment = complete_abrupt_segment( | |
| segment=storySegment, | |
| title=title, | |
| chapter_title=outline_list[chapterIdx]['title'], | |
| event_title=outline_list[chapterIdx]['events'][eventIdx], | |
| outline_output=outline_output, | |
| niche=niche, | |
| llmClient=llmClient, | |
| llmSettings=llmSettings, | |
| max_tokens=200 | |
| ) | |
| logger.saveLogTxt(storySegment, f"storySegment_{chapterIdx}_{eventIdx}_completed.txt", "debug/storySegments") | |
| print(f" ✓ Segment completed successfully") | |
| storySegmentsList[chapterIdx].append(storySegment) | |
| logger.saveLogTxt(storySegment, f"storySegment_{chapterIdx}_{eventIdx}.txt", "debug/storySegments") | |
| current_segment_idx += 1 | |
| return storySegmentsList | |
| def _generate_script_one_shot(niche, chapterConfig, outline_list, outline_output, outline_prompt, title, logger, llmSettings): | |
| storySegmentsList = [[]] | |
| module_path = f"nicheConfigs.{niche}" | |
| module = importlib.import_module(module_path) | |
| local_outline_prompt = module.getPromptForContentGeneration(title) | |
| # Compress outline to reduce input tokens | |
| compressed_outline = "\n".join( | |
| f"{ch['title']}: " + "; ".join(ch["events"]) for ch in outline_list | |
| ) | |
| llmMessages = [ | |
| { | |
| "role": "user", | |
| "content": ( | |
| f"Outline:\n{compressed_outline}\n\n" | |
| "Write a ~1000-word script following the outline. Only output the script." | |
| ), | |
| } | |
| ] | |
| systemPrompt = local_outline_prompt | |
| scriptSegment = llmClient.generate_content_script(systemPrompt, llmMessages, llmSettings["llmServer"], llmSettings["llmModel"], chapterConfig["tokensPerChapter"]) | |
| storySegmentsList[0].append(scriptSegment) | |
| return storySegmentsList | |
| def generate_script(niche, chapterConfig, oneShotScript, outline_list, outline_output, outline_prompt, title, logger, llmSettings): | |
| try: | |
| if oneShotScript: | |
| scriptSegmentsList = execute_with_retry(_generate_script_one_shot, 3, niche, chapterConfig, outline_list, outline_output, outline_prompt, title, logger, llmSettings) | |
| else: | |
| scriptSegmentsList = execute_with_retry(_generate_script_segments_attempt, 3, niche, chapterConfig, outline_list, outline_output, outline_prompt, title, logger, llmSettings) | |
| return scriptSegmentsList | |
| except Exception as e: | |
| print(f"Failed to generate script after multiple retries: {e}") | |
| return None | |
| if __name__ == "__main__": | |
| title = "" | |
| chapterConfig = { | |
| "chapterCount" : 4, | |
| "tokensPerChapter" : 8000, | |
| } | |
| generate_script(title, chapterConfig) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment