Use multiple threads to generate model embeddings. Other minor formating

This commit is contained in:
Debanjum Singh Solanky
2021-09-29 20:47:58 -07:00
parent e22e0b41e3
commit 352d2930ee
3 changed files with 5 additions and 0 deletions

View File

@@ -22,6 +22,7 @@ from utils.config import AsymmetricSearchModel
def initialize_model():
"Initialize model for assymetric semantic search. That is, where query smaller than results"
torch.set_num_threads(4)
bi_encoder = SentenceTransformer('sentence-transformers/msmarco-MiniLM-L-6-v3') # The bi-encoder encodes all entries to use for semantic search
top_k = 100 # Number of entries we want to retrieve with the bi-encoder
cross_encoder = CrossEncoder('cross-encoder/ms-marco-MiniLM-L-6-v2') # The cross-encoder re-ranks the results to improve quality

View File

@@ -19,6 +19,7 @@ from processor.ledger.beancount_to_jsonl import beancount_to_jsonl
def initialize_model():
"Initialize model for symetric semantic search. That is, where query of similar size to results"
torch.set_num_threads(4)
bi_encoder = SentenceTransformer('sentence-transformers/paraphrase-MiniLM-L6-v2') # The encoder encodes all entries to use for semantic search
top_k = 100 # Number of entries we want to retrieve with the bi-encoder
cross_encoder = CrossEncoder('cross-encoder/ms-marco-MiniLM-L-6-v2') # The cross-encoder re-ranks the results to improve quality