Fix context, response size for Llama 2 to stay within max token limits

Create regression text to ensure it does not throw the prompt size
exceeded context window error
This commit is contained in:
Debanjum Singh Solanky
2023-08-01 19:29:03 -07:00
parent 6e4050fa81
commit c2b7a14ed5
3 changed files with 24 additions and 2 deletions

View File

@@ -165,7 +165,7 @@ def llm_thread(g, messages: List[ChatMessage], model: GPT4All):
templated_system_message = prompts.system_prompt_llamav2.format(message=system_message.content)
templated_user_message = prompts.general_conversation_llamav2.format(query=user_message.content)
prompted_message = templated_system_message + chat_history + templated_user_message
response_iterator = model.generate(prompted_message, streaming=True, max_tokens=1000, n_batch=256)
response_iterator = model.generate(prompted_message, streaming=True, max_tokens=500, n_batch=256)
state.chat_lock.acquire()
try:
for response in response_iterator:

View File

@@ -14,7 +14,7 @@ import queue
from khoj.utils.helpers import merge_dicts
logger = logging.getLogger(__name__)
max_prompt_size = {"gpt-3.5-turbo": 4096, "gpt-4": 8192, "llama-2-7b-chat.ggmlv3.q4_K_S.bin": 2048}
max_prompt_size = {"gpt-3.5-turbo": 4096, "gpt-4": 8192, "llama-2-7b-chat.ggmlv3.q4_K_S.bin": 1548}
tokenizer = {"llama-2-7b-chat.ggmlv3.q4_K_S.bin": "hf-internal-testing/llama-tokenizer"}