mirror of
https://github.com/khoaliber/khoj.git
synced 2026-03-06 21:29:12 +00:00
Make offline chat model user configurable
Only GPT4All supported Llama v2 models will work given the prompt structure is not currently configurable
This commit is contained in:
@@ -84,7 +84,7 @@ class SearchModels:
|
||||
|
||||
@dataclass
|
||||
class GPT4AllProcessorConfig:
|
||||
chat_model: Optional[str] = "llama-2-7b-chat.ggmlv3.q4_0.bin"
|
||||
chat_model: Optional[str] = None
|
||||
loaded_model: Union[Any, None] = None
|
||||
|
||||
|
||||
@@ -95,6 +95,7 @@ class ConversationProcessorConfigModel:
|
||||
):
|
||||
self.openai_model = conversation_config.openai
|
||||
self.gpt4all_model = GPT4AllProcessorConfig()
|
||||
self.gpt4all_model.chat_model = conversation_config.offline_chat_model
|
||||
self.enable_offline_chat = conversation_config.enable_offline_chat
|
||||
self.conversation_logfile = Path(conversation_config.conversation_logfile)
|
||||
self.chat_session: List[str] = []
|
||||
|
||||
@@ -95,6 +95,7 @@ class ConversationProcessorConfig(ConfigBase):
|
||||
conversation_logfile: Path
|
||||
openai: Optional[OpenAIProcessorConfig]
|
||||
enable_offline_chat: Optional[bool] = False
|
||||
offline_chat_model: Optional[str] = "llama-2-7b-chat.ggmlv3.q4_0.bin"
|
||||
|
||||
|
||||
class ProcessorConfig(ConfigBase):
|
||||
|
||||
Reference in New Issue
Block a user