From d1ff812021a4c59a5d67495207ad90a0fe0be44d Mon Sep 17 00:00:00 2001 From: Debanjum Singh Solanky Date: Wed, 4 Oct 2023 18:42:12 -0700 Subject: [PATCH] Run GPT4All Chat Model on GPU, when available GPT4All now supports running models on GPU via Vulkan --- src/khoj/processor/conversation/gpt4all/utils.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/src/khoj/processor/conversation/gpt4all/utils.py b/src/khoj/processor/conversation/gpt4all/utils.py index 585df6a6..d5201780 100644 --- a/src/khoj/processor/conversation/gpt4all/utils.py +++ b/src/khoj/processor/conversation/gpt4all/utils.py @@ -11,4 +11,12 @@ def download_model(model_name: str): logger.info("There was an error importing GPT4All. Please run pip install gpt4all in order to install it.") raise e - return GPT4All(model_name=model_name) + # Use GPU for Chat Model, if available + try: + model = GPT4All(model_name=model_name, device="gpu") + logger.debug("Loaded chat model to GPU.") + except ValueError: + model = GPT4All(model_name=model_name) + logger.debug("Loaded chat model to CPU.") + + return model