From 8305fddb1492b1f1d03dc30034dd67edaa67a311 Mon Sep 17 00:00:00 2001 From: Debanjum Date: Sat, 15 Feb 2025 13:55:08 +0530 Subject: [PATCH] Default to non-zero temperature for all queries to Gemini models. It may mitigate the intermittent invalid json output issues. Model maybe going into repetition loops, non-zero temp may avoid that. --- src/khoj/processor/conversation/google/gemini_chat.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/khoj/processor/conversation/google/gemini_chat.py b/src/khoj/processor/conversation/google/gemini_chat.py index cb25258c..f4e52914 100644 --- a/src/khoj/processor/conversation/google/gemini_chat.py +++ b/src/khoj/processor/conversation/google/gemini_chat.py @@ -34,7 +34,7 @@ def extract_questions_gemini( model: Optional[str] = "gemini-2.0-flash", conversation_log={}, api_key=None, - temperature=0, + temperature=0.2, max_tokens=None, location_data: LocationData = None, user: KhojUser = None, @@ -121,7 +121,7 @@ def gemini_send_message_to_model( api_key, model, response_type="text", - temperature=0, + temperature=0.2, model_kwargs=None, tracer={}, ):