From 973aded6c51f12384ed86b4c9092f19ab7736e74 Mon Sep 17 00:00:00 2001 From: Debanjum Date: Sun, 20 Apr 2025 20:09:24 +0530 Subject: [PATCH] Fix system prompt to make openai reasoning models md format response --- src/khoj/processor/conversation/openai/utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/khoj/processor/conversation/openai/utils.py b/src/khoj/processor/conversation/openai/utils.py index 845a620b..b73903ae 100644 --- a/src/khoj/processor/conversation/openai/utils.py +++ b/src/khoj/processor/conversation/openai/utils.py @@ -181,9 +181,10 @@ def llm_thread( ] if len(system_messages) > 0: first_system_message_index, first_system_message = system_messages[0] + first_system_message_content = first_system_message["content"] formatted_messages[first_system_message_index][ "content" - ] = f"{first_system_message} Formatting re-enabled" + ] = f"{first_system_message_content}\nFormatting re-enabled" elif is_twitter_reasoning_model(model_name, api_base_url): reasoning_effort = "high" if deepthought else "low" model_kwargs["reasoning_effort"] = reasoning_effort