From 14b4d4b66340b2df891a7061f215d3ca166a1753 Mon Sep 17 00:00:00 2001 From: Debanjum Date: Mon, 18 Aug 2025 21:34:04 -0700 Subject: [PATCH] Fix using non-reasoning openai model via responses API Pass arg to include encrypted reasoning only for reasoning openai models. Non reasoning openai models do not except this arg --- src/khoj/processor/conversation/openai/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/khoj/processor/conversation/openai/utils.py b/src/khoj/processor/conversation/openai/utils.py index 51d71cc0..d29796b3 100644 --- a/src/khoj/processor/conversation/openai/utils.py +++ b/src/khoj/processor/conversation/openai/utils.py @@ -458,6 +458,7 @@ def responses_completion_with_backoff( temperature = 1 reasoning_effort = "medium" if deepthought else "low" model_kwargs["reasoning"] = {"effort": reasoning_effort, "summary": "auto"} + model_kwargs["include"] = ["reasoning.encrypted_content"] # Remove unsupported params for reasoning models model_kwargs.pop("top_p", None) model_kwargs.pop("stop", None) @@ -472,7 +473,6 @@ def responses_completion_with_backoff( temperature=temperature, timeout=httpx.Timeout(30, read=read_timeout), # type: ignore store=False, - include=["reasoning.encrypted_content"], **model_kwargs, ) if not model_response or not isinstance(model_response, OpenAIResponse) or not model_response.output: