Created
October 5, 2025 00:29
-
-
Save matthew-harper/01979bfc6372457a9c6936ff3df35ab5 to your computer and use it in GitHub Desktop.
Pattern 7 — Comprehensive Context Engineering demo
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| from dataclasses import dataclass | |
| from typing import List | |
| try: | |
| from strands import Agent | |
| from strands.tools import calculator, mem0_memory, tavily_search, tavily_extract, use_llm | |
| from strands.managers import SlidingWindowConversationManager | |
| except Exception: | |
| class Agent: | |
| def __init__(self, **kwargs): pass | |
| def calculator(*a, **k): return None | |
| def mem0_memory(*a, **k): return None | |
| def tavily_search(*a, **k): return None | |
| def tavily_extract(*a, **k): return None | |
| def use_llm(*a, **k): return None | |
| class SlidingWindowConversationManager: | |
| def __init__(self, window_size:int): self.window_size=window_size | |
| @dataclass | |
| class ContextState: | |
| task: str | |
| constraints: List[str] | |
| memory_bullets: List[str] | |
| citations: List[str] | |
| budget_tokens: int = 500 | |
| def build_prompt(state: ContextState) -> str: | |
| slots = [ | |
| "# Task\n" + state.task, | |
| "# Constraints\n" + "\n".join(f"- {c}" for c in state.constraints), | |
| "# Memory\n" + "\n".join(f"- {b}" for b in state.memory_bullets[:8]), | |
| "# Citations (ref only)\n" + "\n".join(state.citations[:5]), | |
| ] | |
| prompt = "\n\n".join(slots) | |
| if len(prompt.split()) > state.budget_tokens: | |
| prompt = "\n\n".join(slots[:-1]) | |
| return prompt | |
| def run_comprehensive_demo(): | |
| agent = Agent( | |
| system_prompt=( | |
| "You are an advanced AI assistant that uses context engineering best practices. " | |
| "Always structure outputs, use tools, and include citations." | |
| ), | |
| tools=[calculator, mem0_memory, use_llm, tavily_search, tavily_extract], | |
| conversation_manager=SlidingWindowConversationManager(window_size=30) | |
| ) | |
| task = "Research the latest trends in AI context management and calculate the efficiency gains" | |
| state = ContextState( | |
| task=task, | |
| constraints=[ | |
| "Use recent sources (2024-2025)", | |
| "Provide calculations where applicable", | |
| "Include proper citations", | |
| "Keep responses under 200 tokens", | |
| ], | |
| memory_bullets=["Previous research showed 30% improvement in context efficiency"], | |
| citations=[], | |
| ) | |
| prompt = build_prompt(state) | |
| print("Packed Context Prompt:\n" + (prompt[:300] + "..." if len(prompt) > 300 else prompt)) | |
| # In a real run, you would now call the agent with the prompt. | |
| return {"prompt": prompt, "note": "Invoke agent(prompt) in real environment."} | |
| if __name__ == "__main__": | |
| print(run_comprehensive_demo()) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment