mirror of
https://github.com/khoaliber/khoj.git
synced 2026-03-02 13:18:18 +00:00
Default to gpt-4o-mini instead of gpt-3.5-turbo in tests, func args
GPT-4o-mini is cheaper, smarter and can hold more context than GPT-3.5-turbo. In production, we also default to gpt-4o-mini, so makes sense to upgrade defaults and tests to work with it
This commit is contained in:
@@ -192,7 +192,7 @@ def offline_agent():
|
||||
@pytest.mark.django_db
|
||||
@pytest.fixture
|
||||
def openai_agent():
|
||||
chat_model = ChatModelOptionsFactory(chat_model="gpt-3.5-turbo", model_type="openai")
|
||||
chat_model = ChatModelOptionsFactory(chat_model="gpt-4o-mini", model_type="openai")
|
||||
return Agent.objects.create(
|
||||
name="Accountant",
|
||||
chat_model=chat_model,
|
||||
@@ -301,7 +301,7 @@ def chat_client_builder(search_config, user, index_content=True, require_auth=Fa
|
||||
|
||||
# Initialize Processor from Config
|
||||
if os.getenv("OPENAI_API_KEY"):
|
||||
chat_model = ChatModelOptionsFactory(chat_model="gpt-3.5-turbo", model_type="openai")
|
||||
chat_model = ChatModelOptionsFactory(chat_model="gpt-4o-mini", model_type="openai")
|
||||
chat_model.openai_config = OpenAIProcessorConversationConfigFactory()
|
||||
UserConversationProcessorConfigFactory(user=user, setting=chat_model)
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ from khoj.processor.conversation import utils
|
||||
|
||||
class TestTruncateMessage:
|
||||
max_prompt_size = 10
|
||||
model_name = "gpt-3.5-turbo"
|
||||
model_name = "gpt-4o-mini"
|
||||
encoder = tiktoken.encoding_for_model(model_name)
|
||||
|
||||
def test_truncate_message_all_small(self):
|
||||
|
||||
Reference in New Issue
Block a user