mirror of
https://github.com/khoaliber/khoj.git
synced 2026-03-09 21:29:11 +00:00
Use MPS on Apple Mac M1 to GPU accelerate Encode, Query Performance
- Note: Support for MPS in Pytorch is currently in v1.13.0 nightly builds - Users will have to wait for PyTorch MPS support to land in stable builds - Until then the code can be tweaked and tested to make use of the GPU acceleration on newer Macs
This commit is contained in:
@@ -1,3 +1,5 @@
|
|||||||
|
# Standard Packages
|
||||||
|
from packaging import version
|
||||||
# External Packages
|
# External Packages
|
||||||
import torch
|
import torch
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
@@ -12,7 +14,15 @@ model = SearchModels()
|
|||||||
processor_config = ProcessorConfigModel()
|
processor_config = ProcessorConfigModel()
|
||||||
config_file: Path = ""
|
config_file: Path = ""
|
||||||
verbose: int = 0
|
verbose: int = 0
|
||||||
device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu") # Set device to GPU if available
|
|
||||||
host: str = None
|
host: str = None
|
||||||
port: int = None
|
port: int = None
|
||||||
cli_args = None
|
cli_args = None
|
||||||
|
|
||||||
|
if torch.cuda.is_available():
|
||||||
|
# Use CUDA GPU
|
||||||
|
device = torch.device("cuda:0")
|
||||||
|
elif version.parse(torch.__version__) >= version.parse("1.13.0.dev") and torch.backends.mps.is_available():
|
||||||
|
# Use Apple M1 Metal Acceleration
|
||||||
|
device = torch.device("mps")
|
||||||
|
else:
|
||||||
|
device = torch.device("cpu")
|
||||||
|
|||||||
Reference in New Issue
Block a user