Last active
July 23, 2025 07:07
-
-
Save 0bx/7d51cb7b48d279c380120174c3643dae to your computer and use it in GitHub Desktop.
Stands Agents Ollama example
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import os | |
| import sys | |
| from strands import Agent | |
| from strands.models.ollama import OllamaModel | |
| from strands_tools import file_write, file_read, shell, editor, handoff_to_user | |
| from strands.handlers.callback_handler import null_callback_handler | |
| from rich.markdown import Markdown | |
| from rich.console import Console | |
| default_model = "qwen2.5-coder:latest" | |
| work_dir = os.getcwd() | |
| ollama_model = OllamaModel( | |
| host="http://localhost:11434", | |
| model_id=default_model, | |
| temperature=0.1, | |
| top_p=0.5, | |
| additional_args={'think': 'false'} | |
| ) | |
| sys_prompt = f''' | |
| You are an execptional technical writing assistant. When improving text, don't try to use too formal language, instead make the content friendly for newbie and seasoned engineers. | |
| You have acess to filesystem root at "{work_dir}", but your access is limited to this directory and subdirectories only - don't try to go upper as this activity will be recorded and will immediately result in error. | |
| When you are given markdown file to check, analyze it, and let me know your thoughts. Explain what's not clear, what needs improvement, and finally suggest changes to make it better. | |
| If user accepts your version, you may make filesystem changes. | |
| ''' | |
| agent = Agent( | |
| model=ollama_model, | |
| tools=[file_read, file_write, handoff_to_user], | |
| callback_handler=null_callback_handler, | |
| system_prompt=sys_prompt | |
| ) | |
| quit_messages = ['/q', 'q'] | |
| console = Console() | |
| def print_model_response(text): | |
| md = Markdown(text) | |
| console.print(md, style="green") | |
| def print_system_message(text): | |
| md = Markdown(text) | |
| console.print(md, style="cyan") | |
| def print_agent_response(response): | |
| try: | |
| print_model_response(response.message['content'][-1]['text']) | |
| except: | |
| console.print("Failed to print response", style="red") | |
| if isinstance(response, str): | |
| print(response) | |
| elif hasattr(response, 'message'): | |
| print(response.message) | |
| else: | |
| print("Not sure how print agent response") | |
| def print_greeting_message(): | |
| print_system_message(f"Using **{default_model}**; Running in **{work_dir}**") | |
| def main(): | |
| print_greeting_message() | |
| user_args = sys.argv[1:] | |
| if user_args: | |
| agent_response = agent(" ".join(user_args)) | |
| print_agent_response(agent_response) | |
| while True: | |
| prompt = input() | |
| if prompt.strip() in quit_messages: | |
| sys.exit(0) | |
| try: | |
| agent_response = agent(prompt) | |
| print_agent_response(agent_response) | |
| except KeyboardInterrupt: | |
| # TODO: Cancel any open futures of agent | |
| print('\rInterrupted agent task') | |
| if __name__ == '__main__': | |
| try: | |
| main() | |
| except KeyboardInterrupt: | |
| print('\r ') | |
| sys.exit(0) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment