diff --git a/backend/llm.py b/backend/llm.py index e541046..44f97e1 100644 --- a/backend/llm.py +++ b/backend/llm.py @@ -59,9 +59,10 @@ async def stream_openai_response( full_response = "" async for chunk in stream: # type: ignore assert isinstance(chunk, ChatCompletionChunk) - content = chunk.choices[0].delta.content or "" - full_response += content - await callback(content) + if chunk.choices and chunk.choices[0].delta.content: + content = chunk.choices[0].delta.content or "" + full_response += content + await callback(content) await client.close()