mirror of
https://github.com/khoaliber/khoj.git
synced 2026-03-02 21:19:12 +00:00
Truncate message logs to below max supported prompt size by model
- Use tiktoken to count tokens for chat models - Make conversation turns to add to prompt configurable via method argument to generate_chatml_messages_with_context method
This commit is contained in:
@@ -41,6 +41,7 @@ dependencies = [
|
||||
"fastapi == 0.77.1",
|
||||
"jinja2 == 3.1.2",
|
||||
"openai >= 0.27.0",
|
||||
"tiktoken >= 0.3.0",
|
||||
"pillow == 9.3.0",
|
||||
"pydantic == 1.9.1",
|
||||
"pyqt6 == 6.3.1",
|
||||
|
||||
Reference in New Issue
Block a user