diff --git a/backend/llm.py b/backend/llm.py index 5c38fc5..a5e8c9a 100644 --- a/backend/llm.py +++ b/backend/llm.py @@ -22,7 +22,10 @@ async def stream_openai_response(messages, callback: Callable[[str], Awaitable[N params["temperature"] = 0 completion = await client.chat.completions.create(**params) - + full_response = "" async for chunk in completion: content = chunk.choices[0].delta.content or "" + full_response += content await callback(content) + + return full_response diff --git a/backend/main.py b/backend/main.py index 4971d00..ad8d02c 100644 --- a/backend/main.py +++ b/backend/main.py @@ -1,4 +1,5 @@ # Load environment variables first +import json from dotenv import load_dotenv import os from datetime import datetime @@ -14,6 +15,19 @@ from llm import stream_openai_response app = FastAPI() +def write_logs(prompt_messages, completion): + # Create run_logs directory if it doesn't exist + if not os.path.exists("run_logs"): + os.makedirs("run_logs") + + # Generate a unique filename using the current timestamp + filename = datetime.now().strftime("run_logs/messages_%Y%m%d_%H%M%S.json") + + # Write the messages dict into a new file for each run + with open(filename, "w") as f: + f.write(json.dumps({"prompt": prompt_messages, "completion": completion})) + + @app.websocket("/generate-code") async def stream_code_test(websocket: WebSocket): await websocket.accept() @@ -23,21 +37,12 @@ async def stream_code_test(websocket: WebSocket): async def process_chunk(content): await websocket.send_json({"type": "chunk", "value": content}) - messages = assemble_prompt("") - print(messages) + prompt_messages = assemble_prompt("") - # Create run_logs directory if it doesn't exist - if not os.path.exists('run_logs'): - os.makedirs('run_logs') - - # Generate a unique filename using the current timestamp - filename = datetime.now().strftime('run_logs/messages_%Y%m%d_%H%M%S.json') - - # Write the messages dict into a new file for each run - with open(filename, "w") as f: - f.write(str(messages)) - - await stream_openai_response( - messages, + completion = await stream_openai_response( + prompt_messages, lambda x: process_chunk(x), ) + + # Write the messages dict into a log so that we can debug later + write_logs(prompt_messages, completion)