Handle gemini chat response completion chunk when streaming

This commit is contained in:
Debanjum
2025-06-13 14:49:54 -07:00
parent 963ebc8875
commit e635b8e3b9
2 changed files with 2 additions and 2 deletions

View File

@@ -238,7 +238,7 @@ async def gemini_chat_completion_with_backoff(
break
# emit thought vs response parts
for part in chunk.candidates[0].content.parts:
for part in chunk.candidates[0].content.parts or []:
if part.thought:
yield ResponseWithThought(thought=part.text)
elif part.text:

View File

@@ -1057,7 +1057,7 @@ async def chat(
# researched_results = await extract_relevant_info(q, researched_results, agent)
if state.verbose > 1:
logger.debug(f'Researched Results: {"".join(r.summarizedResult for r in research_results)}')
logger.debug(f'Researched Results: {"".join(r.summarizedResult or "" for r in research_results)}')
# Gather Context
## Extract Document References