mirror of
https://github.com/khoaliber/khoj.git
synced 2026-03-09 13:25:11 +00:00
Put offline model response generation behind the chat lock as well
Not just the chat response streaming
This commit is contained in:
@@ -165,8 +165,9 @@ def llm_thread(g, messages: List[ChatMessage], model: GPT4All):
|
|||||||
templated_system_message = prompts.system_prompt_llamav2.format(message=system_message.content)
|
templated_system_message = prompts.system_prompt_llamav2.format(message=system_message.content)
|
||||||
templated_user_message = prompts.general_conversation_llamav2.format(query=user_message.content)
|
templated_user_message = prompts.general_conversation_llamav2.format(query=user_message.content)
|
||||||
prompted_message = templated_system_message + chat_history + templated_user_message
|
prompted_message = templated_system_message + chat_history + templated_user_message
|
||||||
response_iterator = model.generate(prompted_message, streaming=True, max_tokens=500, n_batch=256)
|
|
||||||
state.chat_lock.acquire()
|
state.chat_lock.acquire()
|
||||||
|
response_iterator = model.generate(prompted_message, streaming=True, max_tokens=500, n_batch=256)
|
||||||
try:
|
try:
|
||||||
for response in response_iterator:
|
for response in response_iterator:
|
||||||
if any(stop_word in response.strip() for stop_word in stop_words):
|
if any(stop_word in response.strip() for stop_word in stop_words):
|
||||||
|
|||||||
Reference in New Issue
Block a user