mirror of
https://github.com/khoaliber/khoj.git
synced 2026-03-07 13:23:15 +00:00
Run GPT4All Chat Model on GPU, when available
GPT4All now supports running models on GPU via Vulkan
This commit is contained in:
@@ -11,4 +11,12 @@ def download_model(model_name: str):
|
||||
logger.info("There was an error importing GPT4All. Please run pip install gpt4all in order to install it.")
|
||||
raise e
|
||||
|
||||
return GPT4All(model_name=model_name)
|
||||
# Use GPU for Chat Model, if available
|
||||
try:
|
||||
model = GPT4All(model_name=model_name, device="gpu")
|
||||
logger.debug("Loaded chat model to GPU.")
|
||||
except ValueError:
|
||||
model = GPT4All(model_name=model_name)
|
||||
logger.debug("Loaded chat model to CPU.")
|
||||
|
||||
return model
|
||||
|
||||
Reference in New Issue
Block a user