diff --git a/src/configure.py b/src/configure.py index 89b94cf3..57517883 100644 --- a/src/configure.py +++ b/src/configure.py @@ -88,6 +88,9 @@ def configure_search(model: SearchModels, config: FullConfig, regenerate: bool, search_config=config.search_type.image, regenerate=regenerate) + # Invalidate Query Cache + state.query_cache = {} + return model diff --git a/src/router.py b/src/router.py index a4bd2f84..9d53a00e 100644 --- a/src/router.py +++ b/src/router.py @@ -4,7 +4,6 @@ import json import time import logging from typing import Optional -from functools import lru_cache # External Packages from fastapi import APIRouter @@ -25,6 +24,7 @@ from src.utils import state, constants router = APIRouter() templates = Jinja2Templates(directory=constants.web_directory) logger = logging.getLogger(__name__) +query_cache = {} @router.get("/", response_class=FileResponse) @@ -48,18 +48,23 @@ async def config_data(updated_config: FullConfig): return state.config @router.get('/search') -@lru_cache(maxsize=100) def search(q: str, n: Optional[int] = 5, t: Optional[SearchType] = None, r: Optional[bool] = False): if q is None or q == '': logger.info(f'No query param (q) passed in API call to initiate search') return {} # initialize variables - user_query = q + user_query = q.strip() results_count = n results = {} query_start, query_end, collate_start, collate_end = None, None, None, None + # return cached results, if available + query_cache_key = f'{user_query}-{n}-{t}-{r}' + if query_cache_key in state.query_cache: + logger.info(f'Return response from query cache') + return state.query_cache[query_cache_key] + if (t == SearchType.Org or t == None) and state.model.orgmode_search: # query org-mode notes query_start = time.time() @@ -121,6 +126,9 @@ def search(q: str, n: Optional[int] = 5, t: Optional[SearchType] = None, r: Opti count=results_count) collate_end = time.time() + # Cache results + state.query_cache[query_cache_key] = results + if query_start and query_end: logger.debug(f"Query took {query_end - query_start:.3f} seconds") if collate_start and collate_end: diff --git a/src/utils/state.py b/src/utils/state.py index b5c082d6..f3ceda00 100644 --- a/src/utils/state.py +++ b/src/utils/state.py @@ -6,6 +6,7 @@ from pathlib import Path # Internal Packages from src.utils.config import SearchModels, ProcessorConfigModel +from src.utils.helpers import LRU from src.utils.rawconfig import FullConfig # Application Global State @@ -17,6 +18,7 @@ verbose: int = 0 host: str = None port: int = None cli_args = None +query_cache = LRU() if torch.cuda.is_available(): # Use CUDA GPU