diff --git a/src/khoj/database/models/__init__.py b/src/khoj/database/models/__init__.py index f377c9f7..8e76d3ec 100644 --- a/src/khoj/database/models/__init__.py +++ b/src/khoj/database/models/__init__.py @@ -60,7 +60,7 @@ class PeopleAlsoAsk(PydanticBaseModel): link: Optional[str] = None question: Optional[str] = None snippet: Optional[str] = None - title: str + title: Optional[str] = None class KnowledgeGraph(PydanticBaseModel): diff --git a/src/khoj/processor/conversation/utils.py b/src/khoj/processor/conversation/utils.py index 75a76918..1ab06fae 100644 --- a/src/khoj/processor/conversation/utils.py +++ b/src/khoj/processor/conversation/utils.py @@ -534,9 +534,10 @@ def truncate_messages( encoder = download_model(model_name).tokenizer() except: encoder = tiktoken.encoding_for_model(default_tokenizer) - logger.debug( - f"Fallback to default chat model tokenizer: {default_tokenizer}.\nConfigure tokenizer for model: {model_name} in Khoj settings to improve context stuffing." - ) + if state.verbose > 2: + logger.debug( + f"Fallback to default chat model tokenizer: {default_tokenizer}.\nConfigure tokenizer for model: {model_name} in Khoj settings to improve context stuffing." + ) # Extract system message from messages system_message = None