Created
May 7, 2025 22:25
-
-
Save Aayush9029/057d9c25045581223f2cb3dbf2bcf4b9 to your computer and use it in GitHub Desktop.
OpenAI LLM.py
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| from openai import AsyncOpenAI | |
| from typing import Type, TypeVar, Optional | |
| import logging | |
| import os | |
| from pydantic import BaseModel | |
| logger = logging.... for logger | |
| T = TypeVar('T', bound=BaseModel) | |
| class ResponseGenerator: | |
| """Class for generating responses using OpenAI's API""" | |
| def __init__(self): | |
| logger.info("π§ Initializing ResponseGenerator") | |
| self.client = AsyncOpenAI(api_key=os.environ["OPENAI_API_KEY"]) | |
| if not self.client.api_key: | |
| raise ValueError("OPENAI_API_KEY must be set") | |
| logger.info("β ResponseGenerator initialized successfully") | |
| async def generate_response( | |
| self, | |
| prompt: str, | |
| user_input: Optional[str] = None, | |
| response_type: Type[T] = None, | |
| image_data: Optional[str] = None, | |
| temperature: float = 0.7 | |
| ) -> Optional[T]: | |
| model = "gpt-4.1-mini" | |
| logger.info("π€ Generating response using model: %s", model) | |
| logger.debug("π Prompt: %s", prompt) | |
| messages = [{"role": "system", "content": prompt}] | |
| # Build user message content | |
| if user_input: | |
| logger.debug("π£οΈ Adding user input to message") | |
| messages.append({"role": "user", "content": user_input}) | |
| if image_data: | |
| logger.debug("πΌοΈ Adding base64 image to message") | |
| messages.append({ | |
| "role": "user", | |
| "content": [ | |
| { | |
| "type": "image_url", | |
| "image_url": { | |
| "url": "data:image/jpeg;base64,%s" % image_data, | |
| "detail": "high" | |
| } | |
| } | |
| ] | |
| }) | |
| try: | |
| logger.info("π Making API call to OpenAI") | |
| completion = await self.client.beta.chat.completions.parse( | |
| model=model, | |
| messages=messages, | |
| temperature=temperature, | |
| response_format=response_type | |
| ) | |
| message = completion.choices[0].message | |
| if message.parsed: | |
| logger.info("β Successfully generated response") | |
| return message.parsed | |
| else: | |
| logger.error( | |
| "β Failed to generate response: %s", | |
| message.refusal) | |
| logger.error("β οΈ Error details: %s", message) | |
| return None | |
| except Exception as e: | |
| logger.error("β Error generating response: %s", str(e)) | |
| raise e |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment