Improve Query Speed. Normalize Embeddings, Moving them to Cuda GPU

- Move embeddings to CUDA GPU for compute, when available
- Normalize embeddings and Use Dot Product instead of Cosine
This commit is contained in:
Debanjum Singh Solanky
2022-06-30 00:59:57 +04:00
parent 2f7ef08b11
commit eda4b65ddb
3 changed files with 21 additions and 13 deletions

View File

@@ -1,5 +1,6 @@
# Standard Packages
import pytest
import torch
# Internal Packages
from src.search_type import asymmetric, image_search
@@ -35,6 +36,7 @@ def search_config(tmp_path_factory):
@pytest.fixture(scope='session')
def model_dir(search_config):
model_dir = search_config.asymmetric.model_directory
device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
# Generate Image Embeddings from Test Images
content_config = ContentConfig()
@@ -53,7 +55,7 @@ def model_dir(search_config):
compressed_jsonl = model_dir.joinpath('notes.jsonl.gz'),
embeddings_file = model_dir.joinpath('note_embeddings.pt'))
asymmetric.setup(content_config.org, search_config.asymmetric, regenerate=False, verbose=True)
asymmetric.setup(content_config.org, search_config.asymmetric, regenerate=False, device=device, verbose=True)
return model_dir