From c133d11556683cc67227e3330d29304e86518c52 Mon Sep 17 00:00:00 2001 From: Debanjum Date: Fri, 28 Feb 2025 14:37:54 +0530 Subject: [PATCH] Improvements based on code feedback --- .github/workflows/run_evals.yml | 2 +- src/khoj/processor/conversation/prompts.py | 2 +- src/khoj/processor/tools/run_code.py | 7 ++++--- src/khoj/utils/initialization.py | 2 +- 4 files changed, 7 insertions(+), 6 deletions(-) diff --git a/.github/workflows/run_evals.yml b/.github/workflows/run_evals.yml index 21870c04..2c8e9688 100644 --- a/.github/workflows/run_evals.yml +++ b/.github/workflows/run_evals.yml @@ -108,7 +108,7 @@ jobs: BATCH_SIZE: "20" RANDOMIZE: "True" KHOJ_URL: "http://localhost:42110" - KHOJ_CHAT_MODEL: ${{ github.event_name == 'workflow_dispatch' && inputs.chat_model || 'gemini-2.0-flash' }} + KHOJ_DEFAULT_CHAT_MODEL: ${{ github.event_name == 'workflow_dispatch' && inputs.chat_model || 'gemini-2.0-flash' }} KHOJ_LLM_SEED: "42" GEMINI_API_KEY: ${{ secrets.GEMINI_API_KEY }} SERPER_DEV_API_KEY: ${{ matrix.dataset != 'math500' && secrets.SERPER_DEV_API_KEY }} diff --git a/src/khoj/processor/conversation/prompts.py b/src/khoj/processor/conversation/prompts.py index 890b791c..0c2b3bbe 100644 --- a/src/khoj/processor/conversation/prompts.py +++ b/src/khoj/processor/conversation/prompts.py @@ -1051,7 +1051,7 @@ print(\"Evaluated Expression at x=1:\", evaluated_expression) Example 3: --- -Q: Plot the world ppulation growth over the years, given this year, world population world tuples: [(2000, 6), (2001, 7), (2002, 8), (2003, 9), (2004, 10)]. +Q: Plot the world population growth over the years, given this year, world population world tuples: [(2000, 6), (2001, 7), (2002, 8), (2003, 9), (2004, 10)]. A: Absolutely! We can utilize the Pandas and Matplotlib libraries (as both are available in the sandbox) to create the world population growth plot. ```python import pandas as pd diff --git a/src/khoj/processor/tools/run_code.py b/src/khoj/processor/tools/run_code.py index 5c6cb48d..12e65670 100644 --- a/src/khoj/processor/tools/run_code.py +++ b/src/khoj/processor/tools/run_code.py @@ -40,6 +40,7 @@ logger = logging.getLogger(__name__) SANDBOX_URL = os.getenv("KHOJ_TERRARIUM_URL", "http://localhost:8080") +DEFAULT_E2B_TEMPLATE = "pmt2o0ghpang8gbiys57" class GeneratedCode(NamedTuple): @@ -219,7 +220,7 @@ async def execute_e2b(code: str, input_files: list[dict]) -> dict[str, Any]: sandbox = await AsyncSandbox.create( api_key=os.getenv("E2B_API_KEY"), - template=os.getenv("E2B_TEMPLATE", "pmt2o0ghpang8gbiys57"), + template=os.getenv("E2B_TEMPLATE", DEFAULT_E2B_TEMPLATE), timeout=120, request_timeout=30, ) @@ -232,7 +233,7 @@ async def execute_e2b(code: str, input_files: list[dict]) -> dict[str, Any]: ] await asyncio.gather(*upload_tasks) - # Note stored files before execution + # Note stored files before execution to identify new files created during execution E2bFile = NamedTuple("E2bFile", [("name", str), ("path", str)]) original_files = {E2bFile(f.name, f.path) for f in await sandbox.files.list("~")} @@ -261,7 +262,7 @@ async def execute_e2b(code: str, input_files: list[dict]) -> dict[str, Any]: # Collect output files from execution results for idx, result in enumerate(execution.results): - for result_type in ["png", "jpeg", "svg", "text", "markdown", "json"]: + for result_type in {"png", "jpeg", "svg", "text", "markdown", "json"}: if b64_data := getattr(result, result_type, None): output_files.append({"filename": f"{idx}.{result_type}", "b64_data": b64_data}) break diff --git a/src/khoj/utils/initialization.py b/src/khoj/utils/initialization.py index 3ea73891..b5c661c4 100644 --- a/src/khoj/utils/initialization.py +++ b/src/khoj/utils/initialization.py @@ -309,7 +309,7 @@ def initialization(interactive: bool = True): # Update the default chat model if it doesn't match chat_config = ConversationAdapters.get_default_chat_model() - env_default_chat_model = os.getenv("KHOJ_CHAT_MODEL") + env_default_chat_model = os.getenv("KHOJ_DEFAULT_CHAT_MODEL") if not chat_config or not env_default_chat_model: return if chat_config.name != env_default_chat_model: