Skip to content

Instantly share code, notes, and snippets.

@anilmuppalla
Last active January 4, 2025 05:05
Show Gist options
  • Select an option

  • Save anilmuppalla/61c5d9171688c959760b885b3cab6492 to your computer and use it in GitHub Desktop.

Select an option

Save anilmuppalla/61c5d9171688c959760b885b3cab6492 to your computer and use it in GitHub Desktop.
import asyncio
from dataclasses import dataclass, field
import ollama
from dotenv import load_dotenv
from ollama import ChatResponse
from pydantic import BaseModel
from pydantic_ai import Agent, RunContext
from pydantic_ai.models.ollama import OllamaModel
load_dotenv()
model_name = "qwen2.5:0.5b"
class OllamaClient:
def __init__(self, model: str):
self.model = model
def chat(self, user_prompt: str) -> str:
messages = [{"role": "user", "content": f"{user_prompt}"}]
chatResponse: ChatResponse = ollama.chat(model=self.model, messages=messages)
return chatResponse["message"]["content"]
@dataclass
class JokeClient:
ollama_client: OllamaClient
class Joke(BaseModel):
joke: str = field(default="unknown")
joker = Agent(
name="identify_characters_agent",
model=OllamaModel(model_name),
deps_type=JokeClient,
result_type=Joke,
system_prompt="""
You are an excellent Joke teller. Run the tool to generate a joke.
""",
)
@joker.tool
def generate_joke_for_type(ctx: RunContext[JokeClient], joke_type: str) -> Joke:
"""Generate a joke for a given type
Args:
ctx (RunContext[Characters]): RunContext
joke_type (str): type of joke
Returns:
str: joke
"""
response_text = ctx.deps.ollama_client.chat(
f"generate a {joke_type} joke",
)
return Joke(joke=response_text)
joke = asyncio.run(
joker.run(
user_prompt="generate a joke",
deps=JokeClient(ollama_client=OllamaClient(model_name)),
)
)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment