mirror of
https://github.com/khoaliber/khoj.git
synced 2026-03-07 21:29:13 +00:00
Bump GPT4All response generation batch size to 512 from 256
A batch size of 512 performs ~20% better on a XPS with no GPU and 16Gb RAM. Seems worth the tradeoff for now
This commit is contained in:
@@ -61,7 +61,7 @@ def extract_questions_offline(
|
|||||||
message = system_prompt + example_questions
|
message = system_prompt + example_questions
|
||||||
state.chat_lock.acquire()
|
state.chat_lock.acquire()
|
||||||
try:
|
try:
|
||||||
response = gpt4all_model.generate(message, max_tokens=200, top_k=2, temp=0, n_batch=256)
|
response = gpt4all_model.generate(message, max_tokens=200, top_k=2, temp=0, n_batch=512)
|
||||||
finally:
|
finally:
|
||||||
state.chat_lock.release()
|
state.chat_lock.release()
|
||||||
|
|
||||||
@@ -167,7 +167,7 @@ def llm_thread(g, messages: List[ChatMessage], model: GPT4All):
|
|||||||
prompted_message = templated_system_message + chat_history + templated_user_message
|
prompted_message = templated_system_message + chat_history + templated_user_message
|
||||||
|
|
||||||
state.chat_lock.acquire()
|
state.chat_lock.acquire()
|
||||||
response_iterator = model.generate(prompted_message, streaming=True, max_tokens=500, n_batch=256)
|
response_iterator = model.generate(prompted_message, streaming=True, max_tokens=500, n_batch=512)
|
||||||
try:
|
try:
|
||||||
for response in response_iterator:
|
for response in response_iterator:
|
||||||
if any(stop_word in response.strip() for stop_word in stop_words):
|
if any(stop_word in response.strip() for stop_word in stop_words):
|
||||||
|
|||||||
Reference in New Issue
Block a user