diff --git a/backend/llm.py b/backend/llm.py index a0f49e4..e4250d3 100644 --- a/backend/llm.py +++ b/backend/llm.py @@ -36,7 +36,12 @@ async def stream_openai_response( model = Llm.GPT_4_VISION # Base parameters - params = {"model": model, "messages": messages, "stream": True, "timeout": 600} + params = { + "model": model.value, + "messages": messages, + "stream": True, + "timeout": 600, + } # Add 'max_tokens' only if the model is a GPT4 vision model if model == Llm.GPT_4_VISION: