Created
April 16, 2025 14:28
-
-
Save cirolini/309c13d3e19d4deca80d4370d933338d to your computer and use it in GitHub Desktop.
A simple, interactive example demonstrating how to integrate OpenAI's GPT model with function calling to create a cooking assistant that suggests recipes based on ingredients provided by the user.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import os | |
| import json | |
| from openai import OpenAI | |
| # ------------------------------- | |
| # Conversation history (memory) | |
| # ------------------------------- | |
| conversation = [] | |
| # ------------------------------- | |
| # Choose your OpenAI Model | |
| # ------------------------------- | |
| MODEL = "gpt-4-1106-preview" | |
| # ------------------------------- | |
| # Register functions (tools) | |
| # ------------------------------- | |
| FUNCTIONS = [ | |
| { | |
| "name": "find_recipes", | |
| "description": "Suggests recipes based on available ingredients provided by the user.", | |
| "parameters": { | |
| "type": "object", | |
| "properties": { | |
| "ingredients": { | |
| "type": "array", | |
| "items": {"type": "string"}, | |
| "description": "List of ingredients the user has." | |
| } | |
| }, | |
| "required": ["ingredients"] | |
| } | |
| } | |
| ] | |
| # ------------------------------- | |
| # Mock function implementation | |
| # (Simulates a real API call or database query) | |
| # ------------------------------- | |
| def mock_find_recipes(ingredients): | |
| print(f"[TOOL] Searching recipes with ingredients: {', '.join(ingredients)}...") | |
| return { | |
| "recipes": [ | |
| { | |
| "name": "Quick Tomato Pasta", | |
| "ingredients_used": ["tomato", "pasta", "olive oil"], | |
| "missing_ingredients": ["basil", "garlic"], | |
| "steps": [ | |
| "Boil pasta until al dente.", | |
| "Heat olive oil and sauté tomatoes.", | |
| "Mix pasta with sautéed tomatoes and serve." | |
| ] | |
| }, | |
| { | |
| "name": "Simple Omelette", | |
| "ingredients_used": ["eggs", "cheese"], | |
| "missing_ingredients": ["salt", "pepper"], | |
| "steps": [ | |
| "Beat eggs with cheese.", | |
| "Cook eggs in a pan until set.", | |
| "Fold omelette and serve warm." | |
| ] | |
| } | |
| ] | |
| } | |
| # ------------------------------- | |
| # Function to invoke the OpenAI model | |
| # ------------------------------- | |
| def invoke_model(client, messages): | |
| print("\n[LOG] Sending request to OpenAI...") | |
| response = client.chat.completions.create( | |
| model=MODEL, | |
| messages=messages, | |
| tools=[{"type": "function", "function": func} for func in FUNCTIONS], | |
| tool_choice="auto" | |
| ) | |
| print("[LOG] Response received.") | |
| return response | |
| # ------------------------------- | |
| # Handle responses from the assistant | |
| # ------------------------------- | |
| def handle_response(client, response): | |
| # Extract the assistant's message | |
| message = response.choices[0].message | |
| # If assistant replies with a normal message | |
| if message.content: | |
| print("Cooking Assistant:", message.content) | |
| conversation.append({"role": message.role, "content": message.content}) | |
| # If assistant decides to call a registered function/tool | |
| if message.tool_calls: | |
| for tool_call in message.tool_calls: | |
| func_name = tool_call.function.name | |
| args = json.loads(tool_call.function.arguments) | |
| print(f"[LOG] Calling function: {func_name}") | |
| # Call the mock implementation of the function | |
| if func_name == "find_recipes": | |
| result = mock_find_recipes(**args) | |
| # Add the function call to the conversation history | |
| conversation.append({ | |
| "role": "assistant", | |
| "tool_calls": [tool_call.model_dump()] | |
| }) | |
| # Add the function result to the conversation history | |
| conversation.append({ | |
| "role": "tool", | |
| "tool_call_id": tool_call.id, | |
| "name": func_name, | |
| "content": json.dumps(result, ensure_ascii=False) | |
| }) | |
| # Call the model again to generate a new response based on the tool result | |
| new_response = invoke_model(client, conversation) | |
| handle_response(client, new_response) | |
| # ------------------------------- | |
| # Main loop to interact with the user | |
| # ------------------------------- | |
| def main(): | |
| # Set your OpenAI API key | |
| client = OpenAI(api_key=os.environ["OPENAI_API_KEY"]) | |
| # Welcome message | |
| print("[INFO] Welcome to your Cooking Assistant! Type 'exit' to quit.\n") | |
| # Define the assistant's role clearly (system prompt) | |
| system_prompt = { | |
| "role": "system", | |
| "content": ( | |
| "You are a Cooking Assistant AI, helping users create delicious recipes " | |
| "based on ingredients they already have. Suggest recipes, clearly list missing " | |
| "ingredients, and provide easy-to-follow cooking steps." | |
| ) | |
| } | |
| # Start the conversation with the assistant’s role definition | |
| conversation.append(system_prompt) | |
| # Interactive conversation loop | |
| while True: | |
| # Get ingredients from the user | |
| user_input = input("You (list your ingredients): ") | |
| # Exit condition | |
| if user_input.strip().lower() in {"exit", "quit"}: | |
| print("[INFO] Session ended. Happy cooking!") | |
| break | |
| # Add user's message to the conversation history | |
| conversation.append({"role": "user", "content": user_input}) | |
| # Invoke the OpenAI model | |
| response = invoke_model(client, conversation) | |
| # Handle the assistant's response | |
| handle_response(client, response) | |
| # ------------------------------- | |
| # Run the app | |
| # ------------------------------- | |
| if __name__ == "__main__": | |
| main() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment