diff --git a/src/khoj/processor/conversation/google/gemini_chat.py b/src/khoj/processor/conversation/google/gemini_chat.py index 4dc62d6c..619ab5b6 100644 --- a/src/khoj/processor/conversation/google/gemini_chat.py +++ b/src/khoj/processor/conversation/google/gemini_chat.py @@ -143,7 +143,8 @@ def gemini_send_message_to_model( # This caused unwanted behavior and terminates response early for gemini 1.5 series. Monitor for flakiness with 2.0 series. if response_type == "json_object" and model in ["gemini-2.0-flash"]: model_kwargs["response_mime_type"] = "application/json" - model_kwargs["response_schema"] = response_schema + if response_schema: + model_kwargs["response_schema"] = response_schema # Get Response from Gemini return gemini_completion_with_backoff( diff --git a/src/khoj/processor/conversation/google/utils.py b/src/khoj/processor/conversation/google/utils.py index 19823d5e..b4f613d5 100644 --- a/src/khoj/processor/conversation/google/utils.py +++ b/src/khoj/processor/conversation/google/utils.py @@ -89,7 +89,7 @@ def gemini_completion_with_backoff( # format model response schema response_schema = None - if model_kwargs and "response_schema" in model_kwargs: + if model_kwargs and not is_none_or_empty(model_kwargs.get("response_schema")): response_schema = clean_response_schema(model_kwargs["response_schema"]) seed = int(os.getenv("KHOJ_LLM_SEED")) if os.getenv("KHOJ_LLM_SEED") else None