From 0c257c044e199a4782a466a356b88bd8caf67ae9 Mon Sep 17 00:00:00 2001 From: Debanjum Date: Sat, 5 Apr 2025 23:11:32 +0530 Subject: [PATCH] Handle unset response_schema being passed to gemini models --- src/khoj/processor/conversation/google/gemini_chat.py | 3 ++- src/khoj/processor/conversation/google/utils.py | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/khoj/processor/conversation/google/gemini_chat.py b/src/khoj/processor/conversation/google/gemini_chat.py index 4dc62d6c..619ab5b6 100644 --- a/src/khoj/processor/conversation/google/gemini_chat.py +++ b/src/khoj/processor/conversation/google/gemini_chat.py @@ -143,7 +143,8 @@ def gemini_send_message_to_model( # This caused unwanted behavior and terminates response early for gemini 1.5 series. Monitor for flakiness with 2.0 series. if response_type == "json_object" and model in ["gemini-2.0-flash"]: model_kwargs["response_mime_type"] = "application/json" - model_kwargs["response_schema"] = response_schema + if response_schema: + model_kwargs["response_schema"] = response_schema # Get Response from Gemini return gemini_completion_with_backoff( diff --git a/src/khoj/processor/conversation/google/utils.py b/src/khoj/processor/conversation/google/utils.py index 19823d5e..b4f613d5 100644 --- a/src/khoj/processor/conversation/google/utils.py +++ b/src/khoj/processor/conversation/google/utils.py @@ -89,7 +89,7 @@ def gemini_completion_with_backoff( # format model response schema response_schema = None - if model_kwargs and "response_schema" in model_kwargs: + if model_kwargs and not is_none_or_empty(model_kwargs.get("response_schema")): response_schema = clean_response_schema(model_kwargs["response_schema"]) seed = int(os.getenv("KHOJ_LLM_SEED")) if os.getenv("KHOJ_LLM_SEED") else None