mirror of
https://github.com/khoaliber/khoj.git
synced 2026-03-07 21:29:13 +00:00
Version Khoj API, Update frontends, tests and docs to reflect it
- Split router.py into v1.0, beta and frontend (no-prefix) api modules under new router package. Version tag in main.py via prefix - Update frontends to use the versioned api endpoints - Update tests to work with versioned api endpoints - Update docs to mentioned, reference only versioned api endpoints
This commit is contained in:
@@ -226,7 +226,7 @@ Use `which-key` if available, else display simple message in echo area"
|
||||
|
||||
(defun khoj--get-enabled-content-types ()
|
||||
"Get content types enabled for search from API."
|
||||
(let ((config-url (format "%s/config/data" khoj-server-url)))
|
||||
(let ((config-url (format "%s/api/v1.0/config/data" khoj-server-url)))
|
||||
(with-temp-buffer
|
||||
(erase-buffer)
|
||||
(url-insert-file-contents config-url)
|
||||
@@ -243,7 +243,7 @@ Use `which-key` if available, else display simple message in echo area"
|
||||
"Construct API Query from QUERY, SEARCH-TYPE and (optional) RERANK params."
|
||||
(let ((rerank (or rerank "false"))
|
||||
(encoded-query (url-hexify-string query)))
|
||||
(format "%s/search?q=%s&t=%s&r=%s&n=%s" khoj-server-url encoded-query search-type rerank khoj-results-count)))
|
||||
(format "%s/api/v1.0/search?q=%s&t=%s&r=%s&n=%s" khoj-server-url encoded-query search-type rerank khoj-results-count)))
|
||||
|
||||
(defun khoj--query-api-and-render-results (query search-type query-url buffer-name)
|
||||
"Query Khoj API using QUERY, SEARCH-TYPE, QUERY-URL.
|
||||
|
||||
@@ -10,7 +10,7 @@ var emptyValueDefault = "🖊️";
|
||||
/**
|
||||
* Fetch the existing config file.
|
||||
*/
|
||||
fetch("/config/data")
|
||||
fetch("/api/v1.0/config/data")
|
||||
.then(response => response.json())
|
||||
.then(data => {
|
||||
rawConfig = data;
|
||||
@@ -26,7 +26,7 @@ fetch("/config/data")
|
||||
configForm.addEventListener("submit", (event) => {
|
||||
event.preventDefault();
|
||||
console.log(rawConfig);
|
||||
fetch("/config/data", {
|
||||
fetch("/api/v1.0/config/data", {
|
||||
method: "POST",
|
||||
credentials: "same-origin",
|
||||
headers: {
|
||||
@@ -46,7 +46,7 @@ regenerateButton.addEventListener("click", (event) => {
|
||||
event.preventDefault();
|
||||
regenerateButton.style.cursor = "progress";
|
||||
regenerateButton.disabled = true;
|
||||
fetch("/regenerate")
|
||||
fetch("/api/v1.0/update?force=true")
|
||||
.then(response => response.json())
|
||||
.then(data => {
|
||||
regenerateButton.style.cursor = "pointer";
|
||||
|
||||
@@ -77,8 +77,8 @@
|
||||
|
||||
// Generate Backend API URL to execute Search
|
||||
url = type === "image"
|
||||
? `/search?q=${encodeURIComponent(query)}&t=${type}&n=${results_count}`
|
||||
: `/search?q=${encodeURIComponent(query)}&t=${type}&n=${results_count}&r=${rerank}`;
|
||||
? `/api/v1.0/search?q=${encodeURIComponent(query)}&t=${type}&n=${results_count}`
|
||||
: `/api/v1.0/search?q=${encodeURIComponent(query)}&t=${type}&n=${results_count}&r=${rerank}`;
|
||||
|
||||
// Execute Search and Render Results
|
||||
fetch(url)
|
||||
@@ -94,7 +94,7 @@
|
||||
|
||||
function updateIndex() {
|
||||
type = document.getElementById("type").value;
|
||||
fetch(`/reload?t=${type}`)
|
||||
fetch(`/api/v1.0/update?t=${type}`)
|
||||
.then(response => response.json())
|
||||
.then(data => {
|
||||
console.log(data);
|
||||
@@ -118,7 +118,7 @@
|
||||
function populate_type_dropdown() {
|
||||
// Populate type dropdown field with enabled search types only
|
||||
var possible_search_types = ["org", "markdown", "ledger", "music", "image"];
|
||||
fetch("/config/data")
|
||||
fetch("/api/v1.0/config/data")
|
||||
.then(response => response.json())
|
||||
.then(data => {
|
||||
document.getElementById("type").innerHTML =
|
||||
|
||||
@@ -19,7 +19,9 @@ from PyQt6.QtCore import QThread, QTimer
|
||||
|
||||
# Internal Packages
|
||||
from src.configure import configure_server
|
||||
from src.router import router
|
||||
from src.routers.api_v1_0 import api_v1_0
|
||||
from src.routers.api_beta import api_beta
|
||||
from src.routers.frontend import frontend_router
|
||||
from src.utils import constants, state
|
||||
from src.utils.cli import cli
|
||||
from src.interface.desktop.main_window import MainWindow
|
||||
@@ -29,7 +31,9 @@ from src.interface.desktop.system_tray import create_system_tray
|
||||
# Initialize the Application Server
|
||||
app = FastAPI()
|
||||
app.mount("/static", StaticFiles(directory=constants.web_directory), name="static")
|
||||
app.include_router(router)
|
||||
app.include_router(api_v1_0, prefix="/api/v1.0")
|
||||
app.include_router(api_beta, prefix="/api/beta")
|
||||
app.include_router(frontend_router)
|
||||
|
||||
logger = logging.getLogger('src')
|
||||
|
||||
|
||||
89
src/routers/api_beta.py
Normal file
89
src/routers/api_beta.py
Normal file
@@ -0,0 +1,89 @@
|
||||
# Standard Packages
|
||||
import json
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
# External Packages
|
||||
from fastapi import APIRouter
|
||||
|
||||
# Internal Packages
|
||||
from src.routers.api_v1_0 import search
|
||||
from src.processor.conversation.gpt import converse, extract_search_type, message_to_log, message_to_prompt, understand, summarize
|
||||
from src.utils.config import SearchType
|
||||
from src.utils.helpers import get_absolute_path, get_from_dict
|
||||
from src.utils import state
|
||||
|
||||
|
||||
api_beta = APIRouter()
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@api_beta.get('/search')
|
||||
def search_beta(q: str, n: Optional[int] = 1):
|
||||
# Extract Search Type using GPT
|
||||
metadata = extract_search_type(q, api_key=state.processor_config.conversation.openai_api_key, verbose=state.verbose)
|
||||
search_type = get_from_dict(metadata, "search-type")
|
||||
|
||||
# Search
|
||||
search_results = search(q, n=n, t=SearchType(search_type))
|
||||
|
||||
# Return response
|
||||
return {'status': 'ok', 'result': search_results, 'type': search_type}
|
||||
|
||||
|
||||
@api_beta.get('/chat')
|
||||
def chat(q: str):
|
||||
# Load Conversation History
|
||||
chat_session = state.processor_config.conversation.chat_session
|
||||
meta_log = state.processor_config.conversation.meta_log
|
||||
|
||||
# Converse with OpenAI GPT
|
||||
metadata = understand(q, api_key=state.processor_config.conversation.openai_api_key, verbose=state.verbose)
|
||||
if state.verbose > 1:
|
||||
print(f'Understood: {get_from_dict(metadata, "intent")}')
|
||||
|
||||
if get_from_dict(metadata, "intent", "memory-type") == "notes":
|
||||
query = get_from_dict(metadata, "intent", "query")
|
||||
result_list = search(query, n=1, t=SearchType.Org)
|
||||
collated_result = "\n".join([item["entry"] for item in result_list])
|
||||
if state.verbose > 1:
|
||||
print(f'Semantically Similar Notes:\n{collated_result}')
|
||||
gpt_response = summarize(collated_result, summary_type="notes", user_query=q, api_key=state.processor_config.conversation.openai_api_key)
|
||||
else:
|
||||
gpt_response = converse(q, chat_session, api_key=state.processor_config.conversation.openai_api_key)
|
||||
|
||||
# Update Conversation History
|
||||
state.processor_config.conversation.chat_session = message_to_prompt(q, chat_session, gpt_message=gpt_response)
|
||||
state.processor_config.conversation.meta_log['chat'] = message_to_log(q, metadata, gpt_response, meta_log.get('chat', []))
|
||||
|
||||
return {'status': 'ok', 'response': gpt_response}
|
||||
|
||||
|
||||
@api_beta.on_event('shutdown')
|
||||
def shutdown_event():
|
||||
# No need to create empty log file
|
||||
if not (state.processor_config and state.processor_config.conversation and state.processor_config.conversation.meta_log):
|
||||
return
|
||||
elif state.processor_config.conversation.verbose:
|
||||
print('INFO:\tSaving conversation logs to disk...')
|
||||
|
||||
# Summarize Conversation Logs for this Session
|
||||
chat_session = state.processor_config.conversation.chat_session
|
||||
openai_api_key = state.processor_config.conversation.openai_api_key
|
||||
conversation_log = state.processor_config.conversation.meta_log
|
||||
session = {
|
||||
"summary": summarize(chat_session, summary_type="chat", api_key=openai_api_key),
|
||||
"session-start": conversation_log.get("session", [{"session-end": 0}])[-1]["session-end"],
|
||||
"session-end": len(conversation_log["chat"])
|
||||
}
|
||||
if 'session' in conversation_log:
|
||||
conversation_log['session'].append(session)
|
||||
else:
|
||||
conversation_log['session'] = [session]
|
||||
|
||||
# Save Conversation Metadata Logs to Disk
|
||||
conversation_logfile = get_absolute_path(state.processor_config.conversation.conversation_logfile)
|
||||
with open(conversation_logfile, "w+", encoding='utf-8') as logfile:
|
||||
json.dump(conversation_log, logfile)
|
||||
|
||||
print('INFO:\tConversation logs saved to disk.')
|
||||
@@ -1,45 +1,29 @@
|
||||
# Standard Packages
|
||||
import yaml
|
||||
import json
|
||||
import time
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
# External Packages
|
||||
from fastapi import APIRouter
|
||||
from fastapi import Request
|
||||
from fastapi.responses import HTMLResponse, FileResponse
|
||||
from fastapi.templating import Jinja2Templates
|
||||
|
||||
# Internal Packages
|
||||
from src.configure import configure_search
|
||||
from src.search_type import image_search, text_search
|
||||
from src.processor.conversation.gpt import converse, extract_search_type, message_to_log, message_to_prompt, understand, summarize
|
||||
from src.utils.rawconfig import FullConfig
|
||||
from src.utils.config import SearchType
|
||||
from src.utils.helpers import LRU, get_absolute_path, get_from_dict
|
||||
from src.utils import state, constants
|
||||
|
||||
|
||||
router = APIRouter()
|
||||
templates = Jinja2Templates(directory=constants.web_directory)
|
||||
api_v1_0 = APIRouter()
|
||||
logger = logging.getLogger(__name__)
|
||||
query_cache = LRU()
|
||||
|
||||
|
||||
@router.get("/", response_class=FileResponse)
|
||||
def index():
|
||||
return FileResponse(constants.web_directory / "index.html")
|
||||
|
||||
@router.get('/config', response_class=HTMLResponse)
|
||||
def config_page(request: Request):
|
||||
return templates.TemplateResponse("config.html", context={'request': request})
|
||||
|
||||
@router.get('/config/data', response_model=FullConfig)
|
||||
@api_v1_0.get('/config/data', response_model=FullConfig)
|
||||
def config_data():
|
||||
return state.config
|
||||
|
||||
@router.post('/config/data')
|
||||
@api_v1_0.post('/config/data')
|
||||
async def config_data(updated_config: FullConfig):
|
||||
state.config = updated_config
|
||||
with open(state.config_file, 'w') as outfile:
|
||||
@@ -47,7 +31,7 @@ async def config_data(updated_config: FullConfig):
|
||||
outfile.close()
|
||||
return state.config
|
||||
|
||||
@router.get('/search')
|
||||
@api_v1_0.get('/search')
|
||||
def search(q: str, n: Optional[int] = 5, t: Optional[SearchType] = None, r: Optional[bool] = False):
|
||||
if q is None or q == '':
|
||||
logger.info(f'No query param (q) passed in API call to initiate search')
|
||||
@@ -137,78 +121,7 @@ def search(q: str, n: Optional[int] = 5, t: Optional[SearchType] = None, r: Opti
|
||||
return results
|
||||
|
||||
|
||||
@router.get('/update')
|
||||
@api_v1_0.get('/update')
|
||||
def update(t: Optional[SearchType] = None, force: Optional[bool] = False):
|
||||
state.model = configure_search(state.model, state.config, regenerate=force, t=t)
|
||||
return {'status': 'ok', 'message': 'index updated completed'}
|
||||
|
||||
|
||||
@router.get('/beta/search')
|
||||
def search_beta(q: str, n: Optional[int] = 1):
|
||||
# Extract Search Type using GPT
|
||||
metadata = extract_search_type(q, api_key=state.processor_config.conversation.openai_api_key, verbose=state.verbose)
|
||||
search_type = get_from_dict(metadata, "search-type")
|
||||
|
||||
# Search
|
||||
search_results = search(q, n=n, t=SearchType(search_type))
|
||||
|
||||
# Return response
|
||||
return {'status': 'ok', 'result': search_results, 'type': search_type}
|
||||
|
||||
|
||||
@router.get('/beta/chat')
|
||||
def chat(q: str):
|
||||
# Load Conversation History
|
||||
chat_session = state.processor_config.conversation.chat_session
|
||||
meta_log = state.processor_config.conversation.meta_log
|
||||
|
||||
# Converse with OpenAI GPT
|
||||
metadata = understand(q, api_key=state.processor_config.conversation.openai_api_key, verbose=state.verbose)
|
||||
if state.verbose > 1:
|
||||
print(f'Understood: {get_from_dict(metadata, "intent")}')
|
||||
|
||||
if get_from_dict(metadata, "intent", "memory-type") == "notes":
|
||||
query = get_from_dict(metadata, "intent", "query")
|
||||
result_list = search(query, n=1, t=SearchType.Org)
|
||||
collated_result = "\n".join([item["entry"] for item in result_list])
|
||||
if state.verbose > 1:
|
||||
print(f'Semantically Similar Notes:\n{collated_result}')
|
||||
gpt_response = summarize(collated_result, summary_type="notes", user_query=q, api_key=state.processor_config.conversation.openai_api_key)
|
||||
else:
|
||||
gpt_response = converse(q, chat_session, api_key=state.processor_config.conversation.openai_api_key)
|
||||
|
||||
# Update Conversation History
|
||||
state.processor_config.conversation.chat_session = message_to_prompt(q, chat_session, gpt_message=gpt_response)
|
||||
state.processor_config.conversation.meta_log['chat'] = message_to_log(q, metadata, gpt_response, meta_log.get('chat', []))
|
||||
|
||||
return {'status': 'ok', 'response': gpt_response}
|
||||
|
||||
|
||||
@router.on_event('shutdown')
|
||||
def shutdown_event():
|
||||
# No need to create empty log file
|
||||
if not (state.processor_config and state.processor_config.conversation and state.processor_config.conversation.meta_log):
|
||||
return
|
||||
elif state.processor_config.conversation.verbose:
|
||||
print('INFO:\tSaving conversation logs to disk...')
|
||||
|
||||
# Summarize Conversation Logs for this Session
|
||||
chat_session = state.processor_config.conversation.chat_session
|
||||
openai_api_key = state.processor_config.conversation.openai_api_key
|
||||
conversation_log = state.processor_config.conversation.meta_log
|
||||
session = {
|
||||
"summary": summarize(chat_session, summary_type="chat", api_key=openai_api_key),
|
||||
"session-start": conversation_log.get("session", [{"session-end": 0}])[-1]["session-end"],
|
||||
"session-end": len(conversation_log["chat"])
|
||||
}
|
||||
if 'session' in conversation_log:
|
||||
conversation_log['session'].append(session)
|
||||
else:
|
||||
conversation_log['session'] = [session]
|
||||
|
||||
# Save Conversation Metadata Logs to Disk
|
||||
conversation_logfile = get_absolute_path(state.processor_config.conversation.conversation_logfile)
|
||||
with open(conversation_logfile, "w+", encoding='utf-8') as logfile:
|
||||
json.dump(conversation_log, logfile)
|
||||
|
||||
print('INFO:\tConversation logs saved to disk.')
|
||||
return {'status': 'ok', 'message': 'index updated'}
|
||||
25
src/routers/frontend.py
Normal file
25
src/routers/frontend.py
Normal file
@@ -0,0 +1,25 @@
|
||||
# Standard Packages
|
||||
import logging
|
||||
|
||||
# External Packages
|
||||
from fastapi import APIRouter
|
||||
from fastapi import Request
|
||||
from fastapi.responses import HTMLResponse, FileResponse
|
||||
from fastapi.templating import Jinja2Templates
|
||||
|
||||
# Internal Packages
|
||||
from src.utils import constants
|
||||
|
||||
|
||||
frontend_router = APIRouter()
|
||||
templates = Jinja2Templates(directory=constants.web_directory)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@frontend_router.get("/", response_class=FileResponse)
|
||||
def index():
|
||||
return FileResponse(constants.web_directory / "index.html")
|
||||
|
||||
@frontend_router.get('/config', response_class=HTMLResponse)
|
||||
def config_page(request: Request):
|
||||
return templates.TemplateResponse("config.html", context={'request': request})
|
||||
Reference in New Issue
Block a user