From 6e4050fa816480e6b023b90fd2693a93611cc83f Mon Sep 17 00:00:00 2001 From: Debanjum Singh Solanky Date: Tue, 1 Aug 2023 18:53:46 -0700 Subject: [PATCH] Make Llama 2 stop generating response on hitting specified stop words It would previously some times start generating fake dialogue with it's internal prompt patterns of [INST] in responses. This is a jarring experience. Stop generation response when hit Resolves #398 --- src/khoj/processor/conversation/gpt4all/chat_model.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/khoj/processor/conversation/gpt4all/chat_model.py b/src/khoj/processor/conversation/gpt4all/chat_model.py index fa07e59f..9ca5a1b8 100644 --- a/src/khoj/processor/conversation/gpt4all/chat_model.py +++ b/src/khoj/processor/conversation/gpt4all/chat_model.py @@ -160,6 +160,7 @@ def llm_thread(g, messages: List[ChatMessage], model: GPT4All): for message in conversation_history ] + stop_words = [""] chat_history = "".join(formatted_messages) templated_system_message = prompts.system_prompt_llamav2.format(message=system_message.content) templated_user_message = prompts.general_conversation_llamav2.format(query=user_message.content) @@ -168,6 +169,9 @@ def llm_thread(g, messages: List[ChatMessage], model: GPT4All): state.chat_lock.acquire() try: for response in response_iterator: + if any(stop_word in response.strip() for stop_word in stop_words): + logger.debug(f"Stop response as hit stop word in {response}") + break g.send(response) finally: state.chat_lock.release()