Run GPT4All Chat Model on GPU, when available

GPT4All now supports running models on GPU via Vulkan
This commit is contained in:
Debanjum Singh Solanky
2023-10-04 18:42:12 -07:00
parent 13b16a4364
commit d1ff812021

View File

@@ -11,4 +11,12 @@ def download_model(model_name: str):
logger.info("There was an error importing GPT4All. Please run pip install gpt4all in order to install it.") logger.info("There was an error importing GPT4All. Please run pip install gpt4all in order to install it.")
raise e raise e
return GPT4All(model_name=model_name) # Use GPU for Chat Model, if available
try:
model = GPT4All(model_name=model_name, device="gpu")
logger.debug("Loaded chat model to GPU.")
except ValueError:
model = GPT4All(model_name=model_name)
logger.debug("Loaded chat model to CPU.")
return model