diff --git a/.github/workflows/github_pages_deploy.yml b/.github/workflows/github_pages_deploy.yml index 13d15269..9e02c97f 100644 --- a/.github/workflows/github_pages_deploy.yml +++ b/.github/workflows/github_pages_deploy.yml @@ -1,4 +1,4 @@ -name: build and deploy github pages for documentation +name: deploy documentation on: push: branches: diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 1f5dfc6f..2c6f087c 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -26,7 +26,7 @@ jobs: test: name: Run Tests runs-on: ubuntu-latest - container: ubuntu:jammy + container: ubuntu:latest strategy: fail-fast: false matrix: @@ -67,19 +67,19 @@ jobs: env: DEBIAN_FRONTEND: noninteractive run : | - apt install -y postgresql postgresql-client && apt install -y postgresql-server-dev-14 + apt install -y postgresql postgresql-client && apt install -y postgresql-server-dev-16 - name: โฌ‡๏ธ Install pip run: | apt install -y python3-pip - python -m ensurepip --upgrade - python -m pip install --upgrade pip + python3 -m ensurepip --upgrade + python3 -m pip install --upgrade pip - name: โฌ‡๏ธ Install Application env: PIP_EXTRA_INDEX_URL: "https://download.pytorch.org/whl/cpu https://abetlen.github.io/llama-cpp-python/whl/cpu" CUDA_VISIBLE_DEVICES: "" - run: sed -i 's/dynamic = \["version"\]/version = "0.0.0"/' pyproject.toml && pip install --upgrade .[dev] + run: sed -i 's/dynamic = \["version"\]/version = "0.0.0"/' pyproject.toml && pip install --break-system-packages --upgrade .[dev] - name: ๐Ÿงช Test Application env: diff --git a/Dockerfile b/Dockerfile index fab572cc..f7f67e9e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -58,6 +58,6 @@ RUN cd src && python3 khoj/manage.py collectstatic --noinput # Run the Application # There are more arguments required for the application to run, # but those should be passed in through the docker-compose.yml file. -ARG PORT +ARG PORT=42110 EXPOSE ${PORT} ENTRYPOINT ["python3", "src/khoj/main.py"] diff --git a/docker-compose.yml b/docker-compose.yml index da25558e..053dbbbe 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,7 +1,6 @@ services: database: image: ankane/pgvector - restart: always environment: POSTGRES_USER: postgres POSTGRES_PASSWORD: postgres @@ -15,10 +14,8 @@ services: retries: 5 sandbox: image: ghcr.io/khoj-ai/terrarium:latest - restart: unless-stopped search: image: docker.io/searxng/searxng:latest - restart: unless-stopped volumes: - khoj_search:/etc/searxng environment: @@ -29,7 +26,6 @@ services: condition: service_healthy # Use the following line to use the latest version of khoj. Otherwise, it will build from source. Set this to ghcr.io/khoj-ai/khoj-cloud:latest if you want to use the prod image. image: ghcr.io/khoj-ai/khoj:latest - restart: unless-stopped # Uncomment the following line to build from source. This will take a few minutes. Comment the next two lines out if you want to use the official image. # build: # context: . @@ -63,7 +59,7 @@ services: - KHOJ_SEARXNG_URL=http://search:8080 # Uncomment line below to use with Ollama running on your local machine at localhost:11434. # Change URL to use with other OpenAI API compatible providers like VLLM, LMStudio etc. - # - OPENAI_API_BASE=http://host.docker.internal:11434/v1/ + # - OPENAI_BASE_URL=http://host.docker.internal:11434/v1/ # # Uncomment appropriate lines below to use chat models by OpenAI, Anthropic, Google. # Ensure you set your provider specific API keys. diff --git a/documentation/docs/advanced/lmstudio.md b/documentation/docs/advanced/lmstudio.md index 1ecd7f06..59eb0592 100644 --- a/documentation/docs/advanced/lmstudio.md +++ b/documentation/docs/advanced/lmstudio.md @@ -14,14 +14,14 @@ LM Studio can expose an [OpenAI API compatible server](https://lmstudio.ai/docs/ ## Setup 1. Install [LM Studio](https://lmstudio.ai/) and download your preferred Chat Model 2. Go to the Server Tab on LM Studio, Select your preferred Chat Model and Click the green Start Server button -3. Create a new [OpenAI Processor Conversation Config](http://localhost:42110/server/admin/database/openaiprocessorconversationconfig/add) on your Khoj admin panel +3. Create a new [Add ai model api](http://localhost:42110/server/admin/database/aimodelapi/add/) on your Khoj admin panel - Name: `proxy-name` - Api Key: `any string` - Api Base Url: `http://localhost:1234/v1/` (default for LMStudio) 4. Create a new [Chat Model](http://localhost:42110/server/admin/database/chatmodel/add) on your Khoj admin panel. - Name: `llama3.1` (replace with the name of your local model) - Model Type: `Openai` - - Openai Config: `` + - Ai model api: `` - Max prompt size: `20000` (replace with the max prompt size of your model) - Tokenizer: *Do not set for OpenAI, mistral, llama3 based models* 5. Go to [your config](http://localhost:42110/settings) and select the model you just created in the chat model dropdown. diff --git a/documentation/docs/advanced/ollama.mdx b/documentation/docs/advanced/ollama.mdx index 486357e8..1140f33b 100644 --- a/documentation/docs/advanced/ollama.mdx +++ b/documentation/docs/advanced/ollama.mdx @@ -32,7 +32,7 @@ Restart your Khoj server after first run or update to the settings below to ensu ```bash ollama pull llama3.1 ``` - 3. Uncomment `OPENAI_API_BASE` environment variable in your downloaded Khoj [docker-compose.yml](https://github.com/khoj-ai/khoj/blob/master/docker-compose.yml#:~:text=OPENAI_API_BASE) + 3. Uncomment `OPENAI_BASE_URL` environment variable in your downloaded Khoj [docker-compose.yml](https://github.com/khoj-ai/khoj/blob/master/docker-compose.yml#:~:text=OPENAI_BASE_URL) 4. Start Khoj docker for the first time to automatically integrate and load models from the Ollama running on your host machine ```bash # run below command in the directory where you downloaded the Khoj docker-compose.yml @@ -46,9 +46,9 @@ Restart your Khoj server after first run or update to the settings below to ensu ```bash ollama pull llama3.1 ``` - 3. Set `OPENAI_API_BASE` environment variable to `http://localhost:11434/v1/` in your shell before starting Khoj for the first time + 3. Set `OPENAI_BASE_URL` environment variable to `http://localhost:11434/v1/` in your shell before starting Khoj for the first time ```bash - export OPENAI_API_BASE="http://localhost:11434/v1/" + export OPENAI_BASE_URL="http://localhost:11434/v1/" khoj --anonymous-mode ``` diff --git a/documentation/docs/features/search.md b/documentation/docs/features/search.md index 810a886b..70410f67 100644 --- a/documentation/docs/features/search.md +++ b/documentation/docs/features/search.md @@ -15,3 +15,37 @@ Take advantage of super fast search to find relevant notes and documents from yo ### Demo ![](/img/search_agents_markdown.png ':size=400px') + + +### Implementation Overview +A bi-encoder models is used to create meaning vectors (aka vector embeddings) of your documents and search queries. +1. When you sync you documents with Khoj, it uses the bi-encoder model to create and store meaning vectors of (chunks of) your documents +2. When you initiate a natural language search the bi-encoder model converts your query into a meaning vector and finds the most relevant document chunks for that query by comparing their meaning vectors. +3. The slower but higher-quality cross-encoder model is than used to re-rank these documents for your given query. + +### Setup (Self-Hosting) +You are **not required** to configure the search model config when self-hosting. Khoj sets up decent default local search model config for general use. + +You may want to configure this if you need better multi-lingual search, want to experiment with different, newer models or the default models do not work for your use-case. + +You can use bi-encoder models downloaded locally [from Huggingface](https://huggingface.co/models?library=sentence-transformers), served via the [HuggingFace Inference API](https://endpoints.huggingface.co/), OpenAI API, Azure OpenAI API or any OpenAI Compatible API like Ollama, LiteLLM etc. Follow the steps below to configure your search model: + +1. Open the [SearchModelConfig](http://localhost:42110/server/admin/database/searchmodelconfig/) page on your Khoj admin panel. +2. Hit the Plus button to add a new model config or click the id of an existing model config to edit it. +3. Set the `biencoder` field to the name of the bi-encoder model supported [locally](https://huggingface.co/models?library=sentence-transformers) or via the API you configure. +4. Set the `Embeddings inference endpoint api key` to your OpenAI API key and `Embeddings inference endpoint type` to `OpenAI` to use an OpenAI embedding model. +5. Also set the `Embeddings inference endpoint` to your Azure OpenAI or OpenAI compatible API URL to use the model via those APIs. +6. Ensure the search model config you want to use is the **only one** that has `name` field set to `default`[^1]. +7. Save the search model configs and restart your Khoj server to start using your new, updated search config. + +:::info +You will need to re-index all your documents if you want to use a different bi-encoder model. +::: + +:::info +You may need to tune the `Bi encoder confidence threshold` field for each bi-encoder to get appropriate number of documents for chat with your Knowledge base. + +Confidence here is a normalized measure of semantic distance between your query and documents. The confidence threshold limits the documents returned to chat that fall within the distance specified in this field. It can take values between 0.0 (exact overlap) and 1.0 (no meaning overlap). +::: + +[^1]: Khoj uses the first search model config named `default` it finds on startup as the search model config for that session diff --git a/documentation/docs/get-started/setup.mdx b/documentation/docs/get-started/setup.mdx index 17d18aea..7a2cbb16 100644 --- a/documentation/docs/get-started/setup.mdx +++ b/documentation/docs/get-started/setup.mdx @@ -48,7 +48,7 @@ Restart your Khoj server after the first run to ensure all settings are applied 2. Configure the environment variables in the `docker-compose.yml` - Set `KHOJ_ADMIN_PASSWORD`, `KHOJ_DJANGO_SECRET_KEY` (and optionally the `KHOJ_ADMIN_EMAIL`) to something secure. This allows you to customize Khoj later via the admin panel. - Set `OPENAI_API_KEY`, `ANTHROPIC_API_KEY`, or `GEMINI_API_KEY` to your API key if you want to use OpenAI, Anthropic or Gemini commercial chat models respectively. - - Uncomment `OPENAI_API_BASE` to use [Ollama](/advanced/ollama?type=first-run&server=docker#setup) running on your host machine. Or set it to the URL of your OpenAI compatible API like vLLM or [LMStudio](/advanced/lmstudio). + - Uncomment `OPENAI_BASE_URL` to use [Ollama](/advanced/ollama?type=first-run&server=docker#setup) running on your host machine. Or set it to the URL of your OpenAI compatible API like vLLM or [LMStudio](/advanced/lmstudio). 3. Start Khoj by running the following command in the same directory as your docker-compose.yml file. ```shell cd ~/.khoj @@ -74,7 +74,7 @@ Restart your Khoj server after the first run to ensure all settings are applied 2. Configure the environment variables in the `docker-compose.yml` - Set `KHOJ_ADMIN_PASSWORD`, `KHOJ_DJANGO_SECRET_KEY` (and optionally the `KHOJ_ADMIN_EMAIL`) to something secure. This allows you to customize Khoj later via the admin panel. - Set `OPENAI_API_KEY`, `ANTHROPIC_API_KEY`, or `GEMINI_API_KEY` to your API key if you want to use OpenAI, Anthropic or Gemini commercial chat models respectively. - - Uncomment `OPENAI_API_BASE` to use [Ollama](/advanced/ollama) running on your host machine. Or set it to the URL of your OpenAI compatible API like vLLM or [LMStudio](/advanced/lmstudio). + - Uncomment `OPENAI_BASE_URL` to use [Ollama](/advanced/ollama) running on your host machine. Or set it to the URL of your OpenAI compatible API like vLLM or [LMStudio](/advanced/lmstudio). 3. Start Khoj by running the following command in the same directory as your docker-compose.yml file. ```shell # Windows users should use their WSL2 terminal to run these commands @@ -96,7 +96,7 @@ Restart your Khoj server after the first run to ensure all settings are applied 2. Configure the environment variables in the `docker-compose.yml` - Set `KHOJ_ADMIN_PASSWORD`, `KHOJ_DJANGO_SECRET_KEY` (and optionally the `KHOJ_ADMIN_EMAIL`) to something secure. This allows you to customize Khoj later via the admin panel. - Set `OPENAI_API_KEY`, `ANTHROPIC_API_KEY`, or `GEMINI_API_KEY` to your API key if you want to use OpenAI, Anthropic or Gemini commercial chat models respectively. - - Uncomment `OPENAI_API_BASE` to use [Ollama](/advanced/ollama) running on your host machine. Or set it to the URL of your OpenAI compatible API like vLLM or [LMStudio](/advanced/lmstudio). + - Uncomment `OPENAI_BASE_URL` to use [Ollama](/advanced/ollama) running on your host machine. Or set it to the URL of your OpenAI compatible API like vLLM or [LMStudio](/advanced/lmstudio). 3. Start Khoj by running the following command in the same directory as your docker-compose.yml file. ```shell cd ~/.khoj diff --git a/manifest.json b/manifest.json index 3e5f8331..6c8ad3bf 100644 --- a/manifest.json +++ b/manifest.json @@ -1,7 +1,7 @@ { "id": "khoj", "name": "Khoj", - "version": "1.33.2", + "version": "1.34.0", "minAppVersion": "0.15.0", "description": "Your Second Brain", "author": "Khoj Inc.", diff --git a/src/interface/desktop/chatutils.js b/src/interface/desktop/chatutils.js index 48fb72c3..7fb05e1a 100644 --- a/src/interface/desktop/chatutils.js +++ b/src/interface/desktop/chatutils.js @@ -229,7 +229,7 @@ function generateImageMarkdown(message, intentType, inferredQueries=null) { //sa } else if (intentType === "text-to-image2") { imageMarkdown = `![](${message})`; } else if (intentType === "text-to-image-v3") { - imageMarkdown = `![](data:image/webp;base64,${message})`; + imageMarkdown = `![](${message})`; } const inferredQuery = inferredQueries?.[0]; if (inferredQuery) { @@ -423,7 +423,7 @@ function handleImageResponse(imageJson, rawResponse) { } else if (imageJson.intentType === "text-to-image2") { rawResponse += `![generated_image](${imageJson.image})`; } else if (imageJson.intentType === "text-to-image-v3") { - rawResponse = `![](data:image/webp;base64,${imageJson.image})`; + rawResponse = `![](${imageJson.image})`; } else if (imageJson.intentType === "excalidraw") { const redirectMessage = `Hey, I'm not ready to show you diagrams yet here. But you can view it in the web app`; rawResponse += redirectMessage; diff --git a/src/interface/desktop/package.json b/src/interface/desktop/package.json index ef28b055..e7d0a4c2 100644 --- a/src/interface/desktop/package.json +++ b/src/interface/desktop/package.json @@ -1,6 +1,6 @@ { "name": "Khoj", - "version": "1.33.2", + "version": "1.34.0", "description": "Your Second Brain", "author": "Khoj Inc. ", "license": "GPL-3.0-or-later", diff --git a/src/interface/emacs/khoj.el b/src/interface/emacs/khoj.el index 828139b0..d832fc51 100644 --- a/src/interface/emacs/khoj.el +++ b/src/interface/emacs/khoj.el @@ -6,7 +6,7 @@ ;; Saba Imran ;; Description: Your Second Brain ;; Keywords: search, chat, ai, org-mode, outlines, markdown, pdf, image -;; Version: 1.33.2 +;; Version: 1.34.0 ;; Package-Requires: ((emacs "27.1") (transient "0.3.0") (dash "2.19.1")) ;; URL: https://github.com/khoj-ai/khoj/tree/master/src/interface/emacs diff --git a/src/interface/obsidian/manifest.json b/src/interface/obsidian/manifest.json index 3e5f8331..6c8ad3bf 100644 --- a/src/interface/obsidian/manifest.json +++ b/src/interface/obsidian/manifest.json @@ -1,7 +1,7 @@ { "id": "khoj", "name": "Khoj", - "version": "1.33.2", + "version": "1.34.0", "minAppVersion": "0.15.0", "description": "Your Second Brain", "author": "Khoj Inc.", diff --git a/src/interface/obsidian/package.json b/src/interface/obsidian/package.json index 76a82635..2ef8b849 100644 --- a/src/interface/obsidian/package.json +++ b/src/interface/obsidian/package.json @@ -1,6 +1,6 @@ { "name": "Khoj", - "version": "1.33.2", + "version": "1.34.0", "description": "Your Second Brain", "author": "Debanjum Singh Solanky, Saba Imran ", "license": "GPL-3.0-or-later", diff --git a/src/interface/obsidian/src/chat_view.ts b/src/interface/obsidian/src/chat_view.ts index ebf2ad8d..8fa09a23 100644 --- a/src/interface/obsidian/src/chat_view.ts +++ b/src/interface/obsidian/src/chat_view.ts @@ -501,6 +501,7 @@ export class KhojChatView extends KhojPaneView { conversationId?: string, images?: string[], excalidrawDiagram?: string, + mermaidjsDiagram?: string ) { if (!message) return; @@ -509,8 +510,9 @@ export class KhojChatView extends KhojPaneView { intentType?.includes("text-to-image") || intentType === "excalidraw" || (images && images.length > 0) || + mermaidjsDiagram || excalidrawDiagram) { - let imageMarkdown = this.generateImageMarkdown(message, intentType ?? "", inferredQueries, conversationId, images, excalidrawDiagram); + let imageMarkdown = this.generateImageMarkdown(message, intentType ?? "", inferredQueries, conversationId, images, excalidrawDiagram, mermaidjsDiagram); chatMessageEl = this.renderMessage({ chatBodyEl: chatEl, message: imageMarkdown, @@ -542,28 +544,23 @@ export class KhojChatView extends KhojPaneView { chatMessageBodyEl.appendChild(this.createReferenceSection(references)); } - generateImageMarkdown(message: string, intentType: string, inferredQueries?: string[], conversationId?: string, images?: string[], excalidrawDiagram?: string): string { + generateImageMarkdown(message: string, intentType: string, inferredQueries?: string[], conversationId?: string, images?: string[], excalidrawDiagram?: string, mermaidjsDiagram?: string): string { let imageMarkdown = ""; if (intentType === "text-to-image") { imageMarkdown = `![](data:image/png;base64,${message})`; } else if (intentType === "text-to-image2") { imageMarkdown = `![](${message})`; } else if (intentType === "text-to-image-v3") { - imageMarkdown = `![](data:image/webp;base64,${message})`; + imageMarkdown = `![](${message})`; } else if (intentType === "excalidraw" || excalidrawDiagram) { const domain = this.setting.khojUrl.endsWith("/") ? this.setting.khojUrl : `${this.setting.khojUrl}/`; const redirectMessage = `Hey, I'm not ready to show you diagrams yet here. But you can view it in ${domain}chat?conversationId=${conversationId}`; imageMarkdown = redirectMessage; + } else if (mermaidjsDiagram) { + imageMarkdown = "```mermaid\n" + mermaidjsDiagram + "\n```"; } else if (images && images.length > 0) { - for (let image of images) { - if (image.startsWith("https://")) { - imageMarkdown += `![](${image})\n\n`; - } else { - imageMarkdown += `![](data:image/png;base64,${image})\n\n`; - } - } - - imageMarkdown += `${message}`; + imageMarkdown += images.map(image => `![](${image})`).join('\n\n'); + imageMarkdown += message; } if (images?.length === 0 && inferredQueries) { @@ -961,6 +958,7 @@ export class KhojChatView extends KhojPaneView { chatBodyEl.dataset.conversationId ?? "", chatLog.images, chatLog.excalidrawDiagram, + chatLog.mermaidjsDiagram, ); // push the user messages to the chat history if (chatLog.by === "you") { @@ -1077,7 +1075,7 @@ export class KhojChatView extends KhojPaneView { } handleJsonResponse(jsonData: any): void { - if (jsonData.image || jsonData.detail || jsonData.images || jsonData.excalidrawDiagram) { + if (jsonData.image || jsonData.detail || jsonData.images || jsonData.mermaidjsDiagram) { this.chatMessageState.rawResponse = this.handleImageResponse(jsonData, this.chatMessageState.rawResponse); } else if (jsonData.response) { this.chatMessageState.rawResponse = jsonData.response; @@ -1450,7 +1448,7 @@ export class KhojChatView extends KhojPaneView { } else if (imageJson.intentType === "text-to-image2") { rawResponse += `![generated_image](${imageJson.image})`; } else if (imageJson.intentType === "text-to-image-v3") { - rawResponse = `![](data:image/webp;base64,${imageJson.image})`; + rawResponse = `![generated_image](${imageJson.image})`; } else if (imageJson.intentType === "excalidraw") { const domain = this.setting.khojUrl.endsWith("/") ? this.setting.khojUrl : `${this.setting.khojUrl}/`; const redirectMessage = `Hey, I'm not ready to show you diagrams yet here. But you can view it in ${domain}`; @@ -1462,17 +1460,14 @@ export class KhojChatView extends KhojPaneView { } else if (imageJson.images) { // If response has images field, response is a list of generated images. imageJson.images.forEach((image: any) => { - - if (image.startsWith("http")) { - rawResponse += `![generated_image](${image})\n\n`; - } else { - rawResponse += `![generated_image](data:image/png;base64,${image})\n\n`; - } + rawResponse += `![generated_image](${image})\n\n`; }); } else if (imageJson.excalidrawDiagram) { const domain = this.setting.khojUrl.endsWith("/") ? this.setting.khojUrl : `${this.setting.khojUrl}/`; const redirectMessage = `Hey, I'm not ready to show you diagrams yet here. But you can view it in ${domain}`; rawResponse += redirectMessage; + } else if (imageJson.mermaidjsDiagram) { + rawResponse += imageJson.mermaidjsDiagram; } // If response has detail field, response is an error message. diff --git a/src/interface/obsidian/versions.json b/src/interface/obsidian/versions.json index e7dced26..47ff6efa 100644 --- a/src/interface/obsidian/versions.json +++ b/src/interface/obsidian/versions.json @@ -108,5 +108,6 @@ "1.32.2": "0.15.0", "1.33.0": "0.15.0", "1.33.1": "0.15.0", - "1.33.2": "0.15.0" + "1.33.2": "0.15.0", + "1.34.0": "0.15.0" } diff --git a/src/interface/web/app/chat/page.tsx b/src/interface/web/app/chat/page.tsx index ff7cb796..824a7e4c 100644 --- a/src/interface/web/app/chat/page.tsx +++ b/src/interface/web/app/chat/page.tsx @@ -354,7 +354,15 @@ export default function Chat() { try { await readChatStream(response); } catch (err) { - const apiError = await response.json(); + let apiError; + try { + apiError = await response.json(); + } catch (err) { + // Error reading API error response + apiError = { + streamError: "Error reading API error response stream. Expected JSON response.", + }; + } console.error(apiError); // Retrieve latest message being processed const currentMessage = messages.find((message) => !message.completed); @@ -365,7 +373,9 @@ export default function Chat() { const errorName = (err as Error).name; if (errorMessage.includes("Error in input stream")) currentMessage.rawResponse = `Woops! The connection broke while I was writing my thoughts down. Maybe try again in a bit or dislike this message if the issue persists?`; - else if (response.status === 429) { + else if (apiError.streamError) { + currentMessage.rawResponse = `Umm, not sure what just happened but I lost my train of thought. Could you try again or ask my developers to look into this if the issue persists? They can be contacted at the Khoj Github, Discord or team@khoj.dev.`; + } else if (response.status === 429) { "detail" in apiError ? (currentMessage.rawResponse = `${apiError.detail}`) : (currentMessage.rawResponse = `I'm a bit overwhelmed at the moment. Could you try again in a bit or dislike this message if the issue persists?`); diff --git a/src/interface/web/app/common/chatFunctions.ts b/src/interface/web/app/common/chatFunctions.ts index c64e81ba..be64acf1 100644 --- a/src/interface/web/app/common/chatFunctions.ts +++ b/src/interface/web/app/common/chatFunctions.ts @@ -19,7 +19,7 @@ export interface MessageMetadata { export interface GeneratedAssetsData { images: string[]; - excalidrawDiagram: string; + mermaidjsDiagram: string; files: AttachedFileText[]; } @@ -114,8 +114,8 @@ export function processMessageChunk( currentMessage.generatedImages = generatedAssets.images; } - if (generatedAssets.excalidrawDiagram) { - currentMessage.generatedExcalidrawDiagram = generatedAssets.excalidrawDiagram; + if (generatedAssets.mermaidjsDiagram) { + currentMessage.generatedMermaidjsDiagram = generatedAssets.mermaidjsDiagram; } if (generatedAssets.files) { diff --git a/src/interface/web/app/components/chatHistory/chatHistory.tsx b/src/interface/web/app/components/chatHistory/chatHistory.tsx index 5a750e8b..829211be 100644 --- a/src/interface/web/app/components/chatHistory/chatHistory.tsx +++ b/src/interface/web/app/components/chatHistory/chatHistory.tsx @@ -418,7 +418,7 @@ export default function ChatHistory(props: ChatHistoryProps) { conversationId: props.conversationId, images: message.generatedImages, queryFiles: message.generatedFiles, - excalidrawDiagram: message.generatedExcalidrawDiagram, + mermaidjsDiagram: message.generatedMermaidjsDiagram, turnId: messageTurnId, }} conversationId={props.conversationId} diff --git a/src/interface/web/app/components/chatMessage/chatMessage.tsx b/src/interface/web/app/components/chatMessage/chatMessage.tsx index 89c3038a..c3227f59 100644 --- a/src/interface/web/app/components/chatMessage/chatMessage.tsx +++ b/src/interface/web/app/components/chatMessage/chatMessage.tsx @@ -53,6 +53,7 @@ import { DialogTitle } from "@radix-ui/react-dialog"; import { convertBytesToText } from "@/app/common/utils"; import { ScrollArea } from "@/components/ui/scroll-area"; import { getIconFromFilename } from "@/app/common/iconUtils"; +import Mermaid from "../mermaid/mermaid"; const md = new markdownIt({ html: true, @@ -164,6 +165,7 @@ export interface SingleChatMessage { turnId?: string; queryFiles?: AttachedFileText[]; excalidrawDiagram?: string; + mermaidjsDiagram?: string; } export interface StreamMessage { @@ -182,9 +184,11 @@ export interface StreamMessage { turnId?: string; queryFiles?: AttachedFileText[]; excalidrawDiagram?: string; + mermaidjsDiagram?: string; generatedFiles?: AttachedFileText[]; generatedImages?: string[]; generatedExcalidrawDiagram?: string; + generatedMermaidjsDiagram?: string; } export interface ChatHistoryData { @@ -271,6 +275,7 @@ interface ChatMessageProps { turnId?: string; generatedImage?: string; excalidrawDiagram?: string; + mermaidjsDiagram?: string; generatedFiles?: AttachedFileText[]; } @@ -358,6 +363,7 @@ const ChatMessage = forwardRef((props, ref) => const [isPlaying, setIsPlaying] = useState(false); const [interrupted, setInterrupted] = useState(false); const [excalidrawData, setExcalidrawData] = useState(""); + const [mermaidjsData, setMermaidjsData] = useState(""); const interruptedRef = useRef(false); const messageRef = useRef(null); @@ -401,6 +407,10 @@ const ChatMessage = forwardRef((props, ref) => setExcalidrawData(props.chatMessage.excalidrawDiagram); } + if (props.chatMessage.mermaidjsDiagram) { + setMermaidjsData(props.chatMessage.mermaidjsDiagram); + } + // Replace LaTeX delimiters with placeholders message = message .replace(/\\\(/g, "LEFTPAREN") @@ -718,6 +728,7 @@ const ChatMessage = forwardRef((props, ref) => dangerouslySetInnerHTML={{ __html: markdownRendered }} /> {excalidrawData && } + {mermaidjsData && }
-
- {props.message || "Loading"}{" "} - - - + + + +
+ + + {isMobileWidth ? ( + + + + ) : ( +

Ask Anything

+ )} +
+
+
+
+ {props.message || "Loading"}{" "} + + + +
-
+ ); } diff --git a/src/interface/web/app/components/loginPrompt/loginPrompt.tsx b/src/interface/web/app/components/loginPrompt/loginPrompt.tsx index 5cda444a..633d7b41 100644 --- a/src/interface/web/app/components/loginPrompt/loginPrompt.tsx +++ b/src/interface/web/app/components/loginPrompt/loginPrompt.tsx @@ -2,7 +2,7 @@ import styles from "./loginPrompt.module.css"; import { Button } from "@/components/ui/button"; -import { Dialog, DialogContent } from "@/components/ui/dialog"; +import { Dialog, DialogContent, DialogTitle } from "@/components/ui/dialog"; import { Input } from "@/components/ui/input"; import Autoplay from "embla-carousel-autoplay"; import { @@ -27,6 +27,7 @@ import { } from "@/components/ui/carousel"; import { Card, CardContent } from "@/components/ui/card"; import { InputOTP, InputOTPGroup, InputOTPSlot } from "@/components/ui/input-otp"; +import * as VisuallyHidden from "@radix-ui/react-visually-hidden"; export interface LoginPromptProps { onOpenChange: (open: boolean) => void; @@ -181,6 +182,9 @@ export default function LoginPrompt(props: LoginPromptProps) { + + Login Dialog +
{useEmailSignIn ? ( = ALLOWED_OTP_ATTEMPTS) { setOTPError("Too many failed attempts. Please try again tomorrow."); diff --git a/src/interface/web/app/components/mermaid/mermaid.tsx b/src/interface/web/app/components/mermaid/mermaid.tsx new file mode 100644 index 00000000..d73a4447 --- /dev/null +++ b/src/interface/web/app/components/mermaid/mermaid.tsx @@ -0,0 +1,173 @@ +import React, { useEffect, useState, useRef } from "react"; +import mermaid from "mermaid"; +import { Download, Info } from "@phosphor-icons/react"; +import { Button } from "@/components/ui/button"; + +interface MermaidProps { + chart: string; +} + +const Mermaid: React.FC = ({ chart }) => { + const [mermaidError, setMermaidError] = useState(null); + const [mermaidId] = useState(`mermaid-chart-${Math.random().toString(12).substring(7)}`); + const elementRef = useRef(null); + + useEffect(() => { + mermaid.initialize({ + startOnLoad: false, + }); + + mermaid.parseError = (error) => { + console.error("Mermaid errors:", error); + // Extract error message from error object + // Parse error message safely + let errorMessage; + try { + errorMessage = typeof error === "string" ? JSON.parse(error) : error; + } catch (e) { + errorMessage = error?.toString() || "Unknown error"; + } + + console.log("Mermaid error message:", errorMessage); + + if (errorMessage.str !== "element is null") { + setMermaidError( + "Something went wrong while rendering the diagram. Please try again later or downvote the message if the issue persists.", + ); + } else { + setMermaidError(null); + } + }; + + mermaid.contentLoaded(); + }, []); + + const handleExport = async () => { + if (!elementRef.current) return; + + try { + // Get SVG element + const svgElement = elementRef.current.querySelector("svg"); + if (!svgElement) throw new Error("No SVG found"); + + // Get SVG viewBox dimensions + const viewBox = svgElement.getAttribute("viewBox")?.split(" ").map(Number) || [ + 0, 0, 0, 0, + ]; + const [, , viewBoxWidth, viewBoxHeight] = viewBox; + + // Create canvas with viewBox dimensions + const canvas = document.createElement("canvas"); + const scale = 2; // For better resolution + canvas.width = viewBoxWidth * scale; + canvas.height = viewBoxHeight * scale; + const ctx = canvas.getContext("2d"); + if (!ctx) throw new Error("Failed to get canvas context"); + + // Convert SVG to data URL + const svgData = new XMLSerializer().serializeToString(svgElement); + const svgBlob = new Blob([svgData], { type: "image/svg+xml;charset=utf-8" }); + const svgUrl = URL.createObjectURL(svgBlob); + + // Create and load image + const img = new Image(); + img.src = svgUrl; + + await new Promise((resolve, reject) => { + img.onload = () => { + // Scale context for better resolution + ctx.scale(scale, scale); + ctx.drawImage(img, 0, 0, viewBoxWidth, viewBoxHeight); + + canvas.toBlob((blob) => { + if (!blob) { + reject(new Error("Failed to create blob")); + return; + } + + const url = URL.createObjectURL(blob); + const a = document.createElement("a"); + a.href = url; + a.download = `mermaid-diagram-${Date.now()}.png`; + a.click(); + + // Cleanup + URL.revokeObjectURL(url); + URL.revokeObjectURL(svgUrl); + resolve(true); + }, "image/png"); + }; + + img.onerror = () => reject(new Error("Failed to load SVG")); + }); + } catch (error) { + console.error("Error exporting diagram:", error); + setMermaidError("Failed to export diagram"); + } + }; + + useEffect(() => { + if (elementRef.current) { + elementRef.current.removeAttribute("data-processed"); + + mermaid + .run({ + nodes: [elementRef.current], + }) + .then(() => { + setMermaidError(null); + }) + .catch((error) => { + let errorMessage; + try { + errorMessage = typeof error === "string" ? JSON.parse(error) : error; + } catch (e) { + errorMessage = error?.toString() || "Unknown error"; + } + + console.log("Mermaid error message:", errorMessage); + + if (errorMessage.str !== "element is null") { + setMermaidError( + "Something went wrong while rendering the diagram. Please try again later or downvote the message if the issue persists.", + ); + } else { + setMermaidError(null); + } + }); + } + }, [chart]); + + return ( +
+ {mermaidError ? ( +
+ + Error rendering diagram: {mermaidError} +
+ ) : ( +
+ {chart} +
+ )} + {!mermaidError && ( + + )} +
+ ); +}; + +export default Mermaid; diff --git a/src/interface/web/app/search/page.tsx b/src/interface/web/app/search/page.tsx index 90432508..c4c47122 100644 --- a/src/interface/web/app/search/page.tsx +++ b/src/interface/web/app/search/page.tsx @@ -173,8 +173,10 @@ export default function Search() { const [searchResultsLoading, setSearchResultsLoading] = useState(false); const [focusSearchResult, setFocusSearchResult] = useState(null); const [exampleQuery, setExampleQuery] = useState(""); + const [fileSuggestions, setFileSuggestions] = useState([]); + const [allFiles, setAllFiles] = useState([]); + const [showSuggestions, setShowSuggestions] = useState(false); const searchTimeoutRef = useRef(null); - const isMobileWidth = useIsMobileWidth(); useEffect(() => { @@ -183,8 +185,68 @@ export default function Search() { Math.floor(Math.random() * naturalLanguageSearchQueryExamples.length) ], ); + + // Load all files once on page load + fetch('/api/content/computer', { + method: 'GET', + headers: { + 'Content-Type': 'application/json', + }, + }) + .then(response => response.json()) + .then(data => { + setAllFiles(data); + }) + .catch(error => { + console.error('Error loading files:', error); + }); }, []); + function getFileSuggestions(query: string) { + const fileFilterMatch = query.match(/file:([^"\s]*|"[^"]*")?/); + if (!fileFilterMatch) { + setFileSuggestions([]); + setShowSuggestions(false); + return; + } + + const filePrefix = fileFilterMatch[1]?.replace(/^"|"$/g, '').trim() || ''; + const filteredSuggestions = allFiles + .filter(file => file.toLowerCase().includes(filePrefix.toLowerCase())) + .sort() + .slice(0, 10); + + setFileSuggestions(filteredSuggestions); + setShowSuggestions(true); + } + + function handleSearchInputChange(value: string) { + setSearchQuery(value); + + // Clear previous search timeout + if (searchTimeoutRef.current) { + clearTimeout(searchTimeoutRef.current); + } + + // Get file suggestions immediately + getFileSuggestions(value); + + // Debounce search + if (value.trim()) { + searchTimeoutRef.current = setTimeout(() => { + search(); + }, 750); + } + } + + function applySuggestion(suggestion: string) { + // Replace the file: filter with the selected suggestion + const newQuery = searchQuery.replace(/file:([^"\s]*|"[^"]*")?/, `file:"${suggestion}"`); + setSearchQuery(newQuery); + setShowSuggestions(false); + search(); + } + function search() { if (searchResultsLoading || !searchQuery.trim()) return; @@ -205,30 +267,6 @@ export default function Search() { }); } - useEffect(() => { - if (!searchQuery.trim()) { - return; - } - - setFocusSearchResult(null); - - if (searchTimeoutRef.current) { - clearTimeout(searchTimeoutRef.current); - } - - if (searchQuery.trim()) { - searchTimeoutRef.current = setTimeout(() => { - search(); - }, 750); // 1000 milliseconds = 1 second - } - - return () => { - if (searchTimeoutRef.current) { - clearTimeout(searchTimeoutRef.current); - } - }; - }, [searchQuery]); - return ( @@ -249,14 +287,38 @@ export default function Search() {
- setSearchQuery(e.currentTarget.value)} - onKeyDown={(e) => e.key === "Enter" && search()} - type="search" - placeholder="Search Documents" - /> +
+ handleSearchInputChange(e.currentTarget.value)} + onKeyDown={(e) => { + if (e.key === "Enter") { + if (showSuggestions && fileSuggestions.length > 0) { + applySuggestion(fileSuggestions[0]); + } else { + search(); + } + } + }} + type="search" + placeholder="Search Documents (type 'file:' for file suggestions)" + value={searchQuery} + /> + {showSuggestions && fileSuggestions.length > 0 && ( +
+ {fileSuggestions.map((suggestion, index) => ( +
applySuggestion(suggestion)} + > + {suggestion} +
+ ))} +
+ )} +