diff --git a/src/khoj/processor/conversation/gpt4all/chat_model.py b/src/khoj/processor/conversation/gpt4all/chat_model.py index 04a004f0..d3eaa01a 100644 --- a/src/khoj/processor/conversation/gpt4all/chat_model.py +++ b/src/khoj/processor/conversation/gpt4all/chat_model.py @@ -55,10 +55,10 @@ def extract_questions_offline( last_year = datetime.now().year - 1 last_christmas_date = f"{last_year}-12-25" next_christmas_date = f"{datetime.now().year}-12-25" - system_prompt = prompts.extract_questions_system_prompt_llamav2.format( - message=(prompts.system_prompt_message_extract_questions_llamav2) + system_prompt = prompts.system_prompt_extract_questions_gpt4all.format( + message=(prompts.system_prompt_message_extract_questions_gpt4all) ) - example_questions = prompts.extract_questions_llamav2_sample.format( + example_questions = prompts.extract_questions_gpt4all_sample.format( query=text, chat_history=chat_history, current_date=current_date, @@ -150,14 +150,14 @@ def converse_offline( elif conversation_command == ConversationCommand.General or is_none_or_empty(compiled_references_message): conversation_primer = user_query else: - conversation_primer = prompts.notes_conversation_llamav2.format( + conversation_primer = prompts.notes_conversation_gpt4all.format( query=user_query, references=compiled_references_message ) # Setup Prompt with Primer or Conversation History messages = generate_chatml_messages_with_context( conversation_primer, - prompts.system_prompt_message_llamav2, + prompts.system_prompt_message_gpt4all, conversation_log, model_name=model, max_prompt_size=max_prompt_size, @@ -183,16 +183,16 @@ def llm_thread(g, messages: List[ChatMessage], model: Any): conversation_history = messages[1:-1] formatted_messages = [ - prompts.chat_history_llamav2_from_assistant.format(message=message.content) + prompts.khoj_message_gpt4all.format(message=message.content) if message.role == "assistant" - else prompts.chat_history_llamav2_from_user.format(message=message.content) + else prompts.user_message_gpt4all.format(message=message.content) for message in conversation_history ] stop_words = [""] chat_history = "".join(formatted_messages) - templated_system_message = prompts.system_prompt_llamav2.format(message=system_message.content) - templated_user_message = prompts.general_conversation_llamav2.format(query=user_message.content) + templated_system_message = prompts.system_prompt_gpt4all.format(message=system_message.content) + templated_user_message = prompts.user_message_gpt4all.format(message=user_message.content) prompted_message = templated_system_message + chat_history + templated_user_message state.chat_lock.acquire() diff --git a/src/khoj/processor/conversation/prompts.py b/src/khoj/processor/conversation/prompts.py index c11c38ba..fa9f9d91 100644 --- a/src/khoj/processor/conversation/prompts.py +++ b/src/khoj/processor/conversation/prompts.py @@ -35,11 +35,13 @@ no_notes_found = PromptTemplate.from_template( """.strip() ) -system_prompt_message_llamav2 = f"""You are Khoj, a smart, inquisitive and helpful personal assistant. +## Conversation Prompts for GPT4All Models +## -- +system_prompt_message_gpt4all = f"""You are Khoj, a smart, inquisitive and helpful personal assistant. Using your general knowledge and our past conversations as context, answer the following question. If you do not know the answer, say 'I don't know.'""" -system_prompt_message_extract_questions_llamav2 = f"""You are Khoj, a kind and intelligent personal assistant. When the user asks you a question, you ask follow-up questions to clarify the necessary information you need in order to answer from the user's perspective. +system_prompt_message_extract_questions_gpt4all = f"""You are Khoj, a kind and intelligent personal assistant. When the user asks you a question, you ask follow-up questions to clarify the necessary information you need in order to answer from the user's perspective. - Write the question as if you can search for the answer on the user's personal notes. - Try to be as specific as possible. Instead of saying "they" or "it" or "he", use the name of the person or thing you are referring to. For example, instead of saying "Which store did they go to?", say "Which store did Alice and Bob go to?". - Add as much context from the previous questions and notes as required into your search queries. @@ -47,44 +49,32 @@ system_prompt_message_extract_questions_llamav2 = f"""You are Khoj, a kind and i What follow-up questions, if any, will you need to ask to answer the user's question? """ -system_prompt_llamav2 = PromptTemplate.from_template( +system_prompt_gpt4all = PromptTemplate.from_template( """ [INST] <> {message} <>Hi there! [/INST] Hello! How can I help you today? """ ) -extract_questions_system_prompt_llamav2 = PromptTemplate.from_template( +system_prompt_extract_questions_gpt4all = PromptTemplate.from_template( """ [INST] <> {message} <>[/INST]""" ) -general_conversation_llamav2 = PromptTemplate.from_template( - """ -[INST] {query} [/INST] -""".strip() -) - -chat_history_llamav2_from_user = PromptTemplate.from_template( +user_message_gpt4all = PromptTemplate.from_template( """ [INST] {message} [/INST] """.strip() ) -chat_history_llamav2_from_assistant = PromptTemplate.from_template( +khoj_message_gpt4all = PromptTemplate.from_template( """ {message} """.strip() ) -conversation_llamav2 = PromptTemplate.from_template( - """ -[INST] {query} [/INST] -""".strip() -) - ## Notes Conversation ## -- notes_conversation = PromptTemplate.from_template( @@ -99,7 +89,7 @@ Query: {query} """.strip() ) -notes_conversation_llamav2 = PromptTemplate.from_template( +notes_conversation_gpt4all = PromptTemplate.from_template( """ User's Notes: {references} @@ -135,7 +125,10 @@ Question: {user_query} Answer (in second person):""" ) -extract_questions_llamav2_sample = PromptTemplate.from_template( + +## Extract Questions +## -- +extract_questions_gpt4all_sample = PromptTemplate.from_template( """ [INST] <>Current Date: {current_date}<> [/INST] [INST] How was my trip to Cambodia? [/INST] @@ -160,8 +153,6 @@ Use these notes from the user's previous conversations to provide a response: ) -## Extract Questions -## -- extract_questions = PromptTemplate.from_template( """ You are Khoj, an extremely smart and helpful search assistant with the ability to retrieve information from the user's notes. diff --git a/src/khoj/routers/api.py b/src/khoj/routers/api.py index 2bf7fd6f..190fc260 100644 --- a/src/khoj/routers/api.py +++ b/src/khoj/routers/api.py @@ -418,7 +418,6 @@ async def search( user_query, t, question_embedding=encoded_asymmetric_query, - rank_results=r or False, max_distance=max_distance, ) ] diff --git a/src/khoj/search_type/text_search.py b/src/khoj/search_type/text_search.py index d6f27cea..2b99ed66 100644 --- a/src/khoj/search_type/text_search.py +++ b/src/khoj/search_type/text_search.py @@ -104,7 +104,6 @@ async def query( raw_query: str, type: SearchType = SearchType.All, question_embedding: Union[torch.Tensor, None] = None, - rank_results: bool = False, max_distance: float = math.inf, ) -> Tuple[List[dict], List[Entry]]: "Search for entries that answer the query"