From 67b2e9c194823461b968fb014fb6947007c0b7d8 Mon Sep 17 00:00:00 2001 From: sabaimran Date: Wed, 29 Jan 2025 09:06:35 -0800 Subject: [PATCH 1/4] Increase subscribed total entries size to 500 MB --- src/khoj/routers/api_content.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/khoj/routers/api_content.py b/src/khoj/routers/api_content.py index fe0a0d50..d9412629 100644 --- a/src/khoj/routers/api_content.py +++ b/src/khoj/routers/api_content.py @@ -104,7 +104,7 @@ async def put_content( incoming_entries_size_limit=10, subscribed_incoming_entries_size_limit=75, total_entries_size_limit=10, - subscribed_total_entries_size_limit=200, + subscribed_total_entries_size_limit=500, ) ), ): @@ -126,7 +126,7 @@ async def patch_content( incoming_entries_size_limit=10, subscribed_incoming_entries_size_limit=75, total_entries_size_limit=10, - subscribed_total_entries_size_limit=200, + subscribed_total_entries_size_limit=500, ) ), ): From d640299edccf92b7a588036a870df5b4b8ff971f Mon Sep 17 00:00:00 2001 From: sabaimran Date: Wed, 29 Jan 2025 14:10:59 -0800 Subject: [PATCH 2/4] use `is_active` property for determine user subscription status --- src/interface/web/app/agents/page.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/interface/web/app/agents/page.tsx b/src/interface/web/app/agents/page.tsx index 8ea08041..1eeeec9d 100644 --- a/src/interface/web/app/agents/page.tsx +++ b/src/interface/web/app/agents/page.tsx @@ -281,7 +281,7 @@ export default function Agents() { const modelOptions: ModelOptions[] = userConfig?.chat_model_options || []; const selectedChatModelOption: number = userConfig?.selected_chat_model_config || 0; - const isSubscribed: boolean = isUserSubscribed(userConfig); + const isSubscribed: boolean = userConfig?.is_active || false; // The default model option should map to the item in the modelOptions array that has the same id as the selectedChatModelOption const defaultModelOption = modelOptions.find( From 5ea056f03e63d551072f66e38e71d27458f9d4b0 Mon Sep 17 00:00:00 2001 From: sabaimran Date: Wed, 29 Jan 2025 14:11:27 -0800 Subject: [PATCH 3/4] Add custom handling logic when speaking with deepseak reasoner --- src/khoj/processor/conversation/openai/utils.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/src/khoj/processor/conversation/openai/utils.py b/src/khoj/processor/conversation/openai/utils.py index e98daf97..977414d1 100644 --- a/src/khoj/processor/conversation/openai/utils.py +++ b/src/khoj/processor/conversation/openai/utils.py @@ -181,6 +181,19 @@ def llm_thread( elif model_name.startswith("o1-"): temperature = 1 model_kwargs.pop("response_format", None) + elif model_name.startswith("deepseek-reasoner"): + # Two successive messages cannot be from the same role. Should merge any back-to-back messages from the same role. + # The first message should always be a user message (except system message). + updated_messages = [] + for i, message in enumerate(formatted_messages): + if i > 0 and message["role"] == formatted_messages[i - 1]["role"]: + updated_messages[-1]["content"] += " " + message["content"] + elif i == 1 and formatted_messages[i - 1]["role"] == "system" and message["role"] == "assistant": + updated_messages[-1]["content"] += " " + message["content"] + else: + updated_messages.append(message) + + formatted_messages = updated_messages if os.getenv("KHOJ_LLM_SEED"): model_kwargs["seed"] = int(os.getenv("KHOJ_LLM_SEED")) From c3cb6086e05f08c0dc286533d4cd3bb6bbda949e Mon Sep 17 00:00:00 2001 From: sabaimran Date: Wed, 29 Jan 2025 14:19:57 -0800 Subject: [PATCH 4/4] Add list typing to the updated_messages temporary variable --- src/khoj/processor/conversation/openai/utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/khoj/processor/conversation/openai/utils.py b/src/khoj/processor/conversation/openai/utils.py index 977414d1..76b175b5 100644 --- a/src/khoj/processor/conversation/openai/utils.py +++ b/src/khoj/processor/conversation/openai/utils.py @@ -1,7 +1,7 @@ import logging import os from threading import Thread -from typing import Dict +from typing import Dict, List import openai from openai.types.chat.chat_completion import ChatCompletion @@ -184,7 +184,7 @@ def llm_thread( elif model_name.startswith("deepseek-reasoner"): # Two successive messages cannot be from the same role. Should merge any back-to-back messages from the same role. # The first message should always be a user message (except system message). - updated_messages = [] + updated_messages: List[dict] = [] for i, message in enumerate(formatted_messages): if i > 0 and message["role"] == formatted_messages[i - 1]["role"]: updated_messages[-1]["content"] += " " + message["content"]