mirror of
https://github.com/khoaliber/khoj.git
synced 2026-03-07 05:40:17 +00:00
Fix context, response size for Llama 2 to stay within max token limits
Create regression text to ensure it does not throw the prompt size exceeded context window error
This commit is contained in:
@@ -165,7 +165,7 @@ def llm_thread(g, messages: List[ChatMessage], model: GPT4All):
|
||||
templated_system_message = prompts.system_prompt_llamav2.format(message=system_message.content)
|
||||
templated_user_message = prompts.general_conversation_llamav2.format(query=user_message.content)
|
||||
prompted_message = templated_system_message + chat_history + templated_user_message
|
||||
response_iterator = model.generate(prompted_message, streaming=True, max_tokens=1000, n_batch=256)
|
||||
response_iterator = model.generate(prompted_message, streaming=True, max_tokens=500, n_batch=256)
|
||||
state.chat_lock.acquire()
|
||||
try:
|
||||
for response in response_iterator:
|
||||
|
||||
@@ -14,7 +14,7 @@ import queue
|
||||
from khoj.utils.helpers import merge_dicts
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
max_prompt_size = {"gpt-3.5-turbo": 4096, "gpt-4": 8192, "llama-2-7b-chat.ggmlv3.q4_K_S.bin": 2048}
|
||||
max_prompt_size = {"gpt-3.5-turbo": 4096, "gpt-4": 8192, "llama-2-7b-chat.ggmlv3.q4_K_S.bin": 1548}
|
||||
tokenizer = {"llama-2-7b-chat.ggmlv3.q4_K_S.bin": "hf-internal-testing/llama-tokenizer"}
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user