Update default temperature for calls to Gemini models to 0.6 from 0.2

This aligns with default temperature used by google ai studio and
may reduce loops and repetitions
This commit is contained in:
Debanjum
2025-03-11 06:07:13 +05:30
parent 50f71be03d
commit 2790ba3121

View File

@@ -34,7 +34,7 @@ def extract_questions_gemini(
model: Optional[str] = "gemini-2.0-flash",
conversation_log={},
api_key=None,
temperature=0.2,
temperature=0.6,
max_tokens=None,
location_data: LocationData = None,
user: KhojUser = None,
@@ -121,7 +121,7 @@ def gemini_send_message_to_model(
api_key,
model,
response_type="text",
temperature=0.2,
temperature=0.6,
model_kwargs=None,
tracer={},
):
@@ -156,7 +156,7 @@ def converse_gemini(
conversation_log={},
model: Optional[str] = "gemini-2.0-flash",
api_key: Optional[str] = None,
temperature: float = 0.2,
temperature: float = 0.6,
completion_func=None,
conversation_commands=[ConversationCommand.Default],
max_prompt_size=None,