mirror of
https://github.com/khoaliber/khoj.git
synced 2026-03-02 13:18:18 +00:00
Use same openai base url env var name as the official openai client
This eases re-use of the OpenAI API across all openai clients, including chat, image generation, speech to text. Resolves #1085
This commit is contained in:
@@ -59,7 +59,7 @@ services:
|
||||
- KHOJ_SEARXNG_URL=http://search:8080
|
||||
# Uncomment line below to use with Ollama running on your local machine at localhost:11434.
|
||||
# Change URL to use with other OpenAI API compatible providers like VLLM, LMStudio etc.
|
||||
# - OPENAI_API_BASE=http://host.docker.internal:11434/v1/
|
||||
# - OPENAI_BASE_URL=http://host.docker.internal:11434/v1/
|
||||
#
|
||||
# Uncomment appropriate lines below to use chat models by OpenAI, Anthropic, Google.
|
||||
# Ensure you set your provider specific API keys.
|
||||
|
||||
@@ -32,7 +32,7 @@ Restart your Khoj server after first run or update to the settings below to ensu
|
||||
```bash
|
||||
ollama pull llama3.1
|
||||
```
|
||||
3. Uncomment `OPENAI_API_BASE` environment variable in your downloaded Khoj [docker-compose.yml](https://github.com/khoj-ai/khoj/blob/master/docker-compose.yml#:~:text=OPENAI_API_BASE)
|
||||
3. Uncomment `OPENAI_BASE_URL` environment variable in your downloaded Khoj [docker-compose.yml](https://github.com/khoj-ai/khoj/blob/master/docker-compose.yml#:~:text=OPENAI_BASE_URL)
|
||||
4. Start Khoj docker for the first time to automatically integrate and load models from the Ollama running on your host machine
|
||||
```bash
|
||||
# run below command in the directory where you downloaded the Khoj docker-compose.yml
|
||||
@@ -46,9 +46,9 @@ Restart your Khoj server after first run or update to the settings below to ensu
|
||||
```bash
|
||||
ollama pull llama3.1
|
||||
```
|
||||
3. Set `OPENAI_API_BASE` environment variable to `http://localhost:11434/v1/` in your shell before starting Khoj for the first time
|
||||
3. Set `OPENAI_BASE_URL` environment variable to `http://localhost:11434/v1/` in your shell before starting Khoj for the first time
|
||||
```bash
|
||||
export OPENAI_API_BASE="http://localhost:11434/v1/"
|
||||
export OPENAI_BASE_URL="http://localhost:11434/v1/"
|
||||
khoj --anonymous-mode
|
||||
```
|
||||
</TabItem>
|
||||
|
||||
@@ -48,7 +48,7 @@ Restart your Khoj server after the first run to ensure all settings are applied
|
||||
2. Configure the environment variables in the `docker-compose.yml`
|
||||
- Set `KHOJ_ADMIN_PASSWORD`, `KHOJ_DJANGO_SECRET_KEY` (and optionally the `KHOJ_ADMIN_EMAIL`) to something secure. This allows you to customize Khoj later via the admin panel.
|
||||
- Set `OPENAI_API_KEY`, `ANTHROPIC_API_KEY`, or `GEMINI_API_KEY` to your API key if you want to use OpenAI, Anthropic or Gemini commercial chat models respectively.
|
||||
- Uncomment `OPENAI_API_BASE` to use [Ollama](/advanced/ollama?type=first-run&server=docker#setup) running on your host machine. Or set it to the URL of your OpenAI compatible API like vLLM or [LMStudio](/advanced/lmstudio).
|
||||
- Uncomment `OPENAI_BASE_URL` to use [Ollama](/advanced/ollama?type=first-run&server=docker#setup) running on your host machine. Or set it to the URL of your OpenAI compatible API like vLLM or [LMStudio](/advanced/lmstudio).
|
||||
3. Start Khoj by running the following command in the same directory as your docker-compose.yml file.
|
||||
```shell
|
||||
cd ~/.khoj
|
||||
@@ -74,7 +74,7 @@ Restart your Khoj server after the first run to ensure all settings are applied
|
||||
2. Configure the environment variables in the `docker-compose.yml`
|
||||
- Set `KHOJ_ADMIN_PASSWORD`, `KHOJ_DJANGO_SECRET_KEY` (and optionally the `KHOJ_ADMIN_EMAIL`) to something secure. This allows you to customize Khoj later via the admin panel.
|
||||
- Set `OPENAI_API_KEY`, `ANTHROPIC_API_KEY`, or `GEMINI_API_KEY` to your API key if you want to use OpenAI, Anthropic or Gemini commercial chat models respectively.
|
||||
- Uncomment `OPENAI_API_BASE` to use [Ollama](/advanced/ollama) running on your host machine. Or set it to the URL of your OpenAI compatible API like vLLM or [LMStudio](/advanced/lmstudio).
|
||||
- Uncomment `OPENAI_BASE_URL` to use [Ollama](/advanced/ollama) running on your host machine. Or set it to the URL of your OpenAI compatible API like vLLM or [LMStudio](/advanced/lmstudio).
|
||||
3. Start Khoj by running the following command in the same directory as your docker-compose.yml file.
|
||||
```shell
|
||||
# Windows users should use their WSL2 terminal to run these commands
|
||||
@@ -96,7 +96,7 @@ Restart your Khoj server after the first run to ensure all settings are applied
|
||||
2. Configure the environment variables in the `docker-compose.yml`
|
||||
- Set `KHOJ_ADMIN_PASSWORD`, `KHOJ_DJANGO_SECRET_KEY` (and optionally the `KHOJ_ADMIN_EMAIL`) to something secure. This allows you to customize Khoj later via the admin panel.
|
||||
- Set `OPENAI_API_KEY`, `ANTHROPIC_API_KEY`, or `GEMINI_API_KEY` to your API key if you want to use OpenAI, Anthropic or Gemini commercial chat models respectively.
|
||||
- Uncomment `OPENAI_API_BASE` to use [Ollama](/advanced/ollama) running on your host machine. Or set it to the URL of your OpenAI compatible API like vLLM or [LMStudio](/advanced/lmstudio).
|
||||
- Uncomment `OPENAI_BASE_URL` to use [Ollama](/advanced/ollama) running on your host machine. Or set it to the URL of your OpenAI compatible API like vLLM or [LMStudio](/advanced/lmstudio).
|
||||
3. Start Khoj by running the following command in the same directory as your docker-compose.yml file.
|
||||
```shell
|
||||
cd ~/.khoj
|
||||
|
||||
@@ -234,7 +234,7 @@ def configure_server(
|
||||
|
||||
if ConversationAdapters.has_valid_ai_model_api():
|
||||
ai_model_api = ConversationAdapters.get_ai_model_api()
|
||||
state.openai_client = openai.OpenAI(api_key=ai_model_api.api_key)
|
||||
state.openai_client = openai.OpenAI(api_key=ai_model_api.api_key, base_url=ai_model_api.api_base_url)
|
||||
|
||||
# Initialize Search Models from Config and initialize content
|
||||
try:
|
||||
|
||||
@@ -43,14 +43,14 @@ def initialization(interactive: bool = True):
|
||||
"🗣️ Configure chat models available to your server. You can always update these at /server/admin using your admin account"
|
||||
)
|
||||
|
||||
openai_api_base = os.getenv("OPENAI_API_BASE")
|
||||
provider = "Ollama" if openai_api_base and openai_api_base.endswith(":11434/v1/") else "OpenAI"
|
||||
openai_api_key = os.getenv("OPENAI_API_KEY", "placeholder" if openai_api_base else None)
|
||||
openai_base_url = os.getenv("OPENAI_BASE_URL")
|
||||
provider = "Ollama" if openai_base_url and openai_base_url.endswith(":11434/v1/") else "OpenAI"
|
||||
openai_api_key = os.getenv("OPENAI_API_KEY", "placeholder" if openai_base_url else None)
|
||||
default_chat_models = default_openai_chat_models
|
||||
if openai_api_base:
|
||||
if openai_base_url:
|
||||
# Get available chat models from OpenAI compatible API
|
||||
try:
|
||||
openai_client = openai.OpenAI(api_key=openai_api_key, base_url=openai_api_base)
|
||||
openai_client = openai.OpenAI(api_key=openai_api_key, base_url=openai_base_url)
|
||||
default_chat_models = [model.id for model in openai_client.models.list()]
|
||||
# Put the available default OpenAI models at the top
|
||||
valid_default_models = [model for model in default_openai_chat_models if model in default_chat_models]
|
||||
@@ -66,7 +66,7 @@ def initialization(interactive: bool = True):
|
||||
ChatModel.ModelType.OPENAI,
|
||||
default_chat_models,
|
||||
default_api_key=openai_api_key,
|
||||
api_base_url=openai_api_base,
|
||||
api_base_url=openai_base_url,
|
||||
vision_enabled=True,
|
||||
is_offline=False,
|
||||
interactive=interactive,
|
||||
|
||||
Reference in New Issue
Block a user