Use app LRU, not functools LRU decorator, to cache search results in router

- Provides more control to invalidate cache on update to entries, embeddings
- Allows logging when results are being returned from cache etc
- FastAPI, Swagger API docs look better as the `search' controller not
  wrapped in generically named function when using functools LRU decorator
This commit is contained in:
Debanjum Singh Solanky
2022-09-12 09:28:49 +03:00
parent c6fa09d8fc
commit 940c8fac8c
3 changed files with 16 additions and 3 deletions

View File

@@ -88,6 +88,9 @@ def configure_search(model: SearchModels, config: FullConfig, regenerate: bool,
search_config=config.search_type.image,
regenerate=regenerate)
# Invalidate Query Cache
state.query_cache = {}
return model

View File

@@ -4,7 +4,6 @@ import json
import time
import logging
from typing import Optional
from functools import lru_cache
# External Packages
from fastapi import APIRouter
@@ -25,6 +24,7 @@ from src.utils import state, constants
router = APIRouter()
templates = Jinja2Templates(directory=constants.web_directory)
logger = logging.getLogger(__name__)
query_cache = {}
@router.get("/", response_class=FileResponse)
@@ -48,18 +48,23 @@ async def config_data(updated_config: FullConfig):
return state.config
@router.get('/search')
@lru_cache(maxsize=100)
def search(q: str, n: Optional[int] = 5, t: Optional[SearchType] = None, r: Optional[bool] = False):
if q is None or q == '':
logger.info(f'No query param (q) passed in API call to initiate search')
return {}
# initialize variables
user_query = q
user_query = q.strip()
results_count = n
results = {}
query_start, query_end, collate_start, collate_end = None, None, None, None
# return cached results, if available
query_cache_key = f'{user_query}-{n}-{t}-{r}'
if query_cache_key in state.query_cache:
logger.info(f'Return response from query cache')
return state.query_cache[query_cache_key]
if (t == SearchType.Org or t == None) and state.model.orgmode_search:
# query org-mode notes
query_start = time.time()
@@ -121,6 +126,9 @@ def search(q: str, n: Optional[int] = 5, t: Optional[SearchType] = None, r: Opti
count=results_count)
collate_end = time.time()
# Cache results
state.query_cache[query_cache_key] = results
if query_start and query_end:
logger.debug(f"Query took {query_end - query_start:.3f} seconds")
if collate_start and collate_end:

View File

@@ -6,6 +6,7 @@ from pathlib import Path
# Internal Packages
from src.utils.config import SearchModels, ProcessorConfigModel
from src.utils.helpers import LRU
from src.utils.rawconfig import FullConfig
# Application Global State
@@ -17,6 +18,7 @@ verbose: int = 0
host: str = None
port: int = None
cli_args = None
query_cache = LRU()
if torch.cuda.is_available():
# Use CUDA GPU