From 34dca8e11401f3a22dd81d46166e0b71d7bbdcad Mon Sep 17 00:00:00 2001 From: Debanjum Date: Tue, 19 Aug 2025 15:10:19 -0700 Subject: [PATCH] Fix passing temp kwarg to non-streaming openai completion endpoint It is already being passed in model_kwargs, so not required to be passed explicitly as well. This code path isn't being used currently, but better to fix for if/when it is used --- src/khoj/processor/conversation/openai/utils.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/khoj/processor/conversation/openai/utils.py b/src/khoj/processor/conversation/openai/utils.py index 2b5889c2..7db03f52 100644 --- a/src/khoj/processor/conversation/openai/utils.py +++ b/src/khoj/processor/conversation/openai/utils.py @@ -195,7 +195,6 @@ def completion_with_backoff( chunk = client.beta.chat.completions.parse( messages=formatted_messages, # type: ignore model=model_name, - temperature=temperature, timeout=httpx.Timeout(30, read=read_timeout), **model_kwargs, )