Rename GPT4AllChatProcessor* to OfflineChatProcessor Config, Model

This commit is contained in:
Debanjum Singh Solanky
2024-03-20 20:55:06 +05:30
parent 2a0b943bb4
commit 1ebd5c3648
6 changed files with 21 additions and 25 deletions

View File

@@ -41,7 +41,7 @@ from khoj.search_filter.date_filter import DateFilter
from khoj.search_filter.file_filter import FileFilter
from khoj.search_filter.word_filter import WordFilter
from khoj.utils import state
from khoj.utils.config import GPT4AllProcessorModel
from khoj.utils.config import OfflineChatProcessorModel
from khoj.utils.helpers import generate_random_name, is_none_or_empty
@@ -610,8 +610,8 @@ class ConversationAdapters:
conversation_config = ConversationAdapters.get_default_conversation_config()
if offline_chat_config and offline_chat_config.enabled and conversation_config.model_type == "offline":
if state.gpt4all_processor_config is None or state.gpt4all_processor_config.loaded_model is None:
state.gpt4all_processor_config = GPT4AllProcessorModel(conversation_config.chat_model)
if state.offline_chat_processor_config is None or state.offline_chat_processor_config.loaded_model is None:
state.offline_chat_processor_config = OfflineChatProcessorModel(conversation_config.chat_model)
return conversation_config

View File

@@ -35,7 +35,7 @@ from khoj.search_filter.file_filter import FileFilter
from khoj.search_filter.word_filter import WordFilter
from khoj.search_type import image_search, text_search
from khoj.utils import constants, state
from khoj.utils.config import GPT4AllProcessorModel
from khoj.utils.config import OfflineChatProcessorModel
from khoj.utils.helpers import ConversationCommand, timer
from khoj.utils.rawconfig import LocationData, SearchResponse
from khoj.utils.state import SearchType
@@ -318,10 +318,10 @@ async def extract_references_and_questions(
using_offline_chat = True
default_offline_llm = await ConversationAdapters.get_default_offline_llm()
chat_model = default_offline_llm.chat_model
if state.gpt4all_processor_config is None:
state.gpt4all_processor_config = GPT4AllProcessorModel(chat_model=chat_model)
if state.offline_chat_processor_config is None:
state.offline_chat_processor_config = OfflineChatProcessorModel(chat_model=chat_model)
loaded_model = state.gpt4all_processor_config.loaded_model
loaded_model = state.offline_chat_processor_config.loaded_model
inferred_queries = extract_questions_offline(
defiltered_query,

View File

@@ -32,7 +32,7 @@ from khoj.processor.conversation.utils import (
)
from khoj.routers.storage import upload_image
from khoj.utils import state
from khoj.utils.config import GPT4AllProcessorModel
from khoj.utils.config import OfflineChatProcessorModel
from khoj.utils.helpers import (
ConversationCommand,
is_none_or_empty,
@@ -68,9 +68,9 @@ async def is_ready_to_chat(user: KhojUser):
if has_offline_config and user_conversation_config and user_conversation_config.model_type == "offline":
chat_model = user_conversation_config.chat_model
if state.gpt4all_processor_config is None:
if state.offline_chat_processor_config is None:
logger.info("Loading Offline Chat Model...")
state.gpt4all_processor_config = GPT4AllProcessorModel(chat_model=chat_model)
state.offline_chat_processor_config = OfflineChatProcessorModel(chat_model=chat_model)
return True
ready = has_openai_config or has_offline_config
@@ -373,10 +373,10 @@ async def send_message_to_model_wrapper(
chat_model = conversation_config.chat_model
if conversation_config.model_type == "offline":
if state.gpt4all_processor_config is None or state.gpt4all_processor_config.loaded_model is None:
state.gpt4all_processor_config = GPT4AllProcessorModel(chat_model)
if state.offline_chat_processor_config is None or state.offline_chat_processor_config.loaded_model is None:
state.offline_chat_processor_config = OfflineChatProcessorModel(chat_model)
loaded_model = state.gpt4all_processor_config.loaded_model
loaded_model = state.offline_chat_processor_config.loaded_model
truncated_messages = generate_chatml_messages_with_context(
user_message=message, system_message=system_message, model_name=chat_model, loaded_model=loaded_model
)
@@ -438,10 +438,10 @@ def generate_chat_response(
conversation_config = ConversationAdapters.get_valid_conversation_config(user)
if conversation_config.model_type == "offline":
if state.gpt4all_processor_config is None or state.gpt4all_processor_config.loaded_model is None:
state.gpt4all_processor_config = GPT4AllProcessorModel(conversation_config.chat_model)
if state.offline_chat_processor_config is None or state.offline_chat_processor_config.loaded_model is None:
state.offline_chat_processor_config = OfflineChatProcessorModel(conversation_config.chat_model)
loaded_model = state.gpt4all_processor_config.loaded_model
loaded_model = state.offline_chat_processor_config.loaded_model
chat_response = converse_offline(
references=compiled_references,
online_results=online_results,

View File

@@ -70,11 +70,11 @@ class SearchModels:
@dataclass
class GPT4AllProcessorConfig:
class OfflineChatProcessorConfig:
loaded_model: Union[Any, None] = None
class GPT4AllProcessorModel:
class OfflineChatProcessorModel:
def __init__(self, chat_model: str = "NousResearch/Hermes-2-Pro-Mistral-7B-GGUF"):
self.chat_model = chat_model
self.loaded_model = None

View File

@@ -32,17 +32,13 @@ def initialization():
)
try:
# Note: gpt4all package is not available on all devices.
# So ensure gpt4all package is installed before continuing this step.
import gpt4all
use_offline_model = input("Use offline chat model? (y/n): ")
if use_offline_model == "y":
logger.info("🗣️ Setting up offline chat model")
OfflineChatProcessorConversationConfig.objects.create(enabled=True)
offline_chat_model = input(
f"Enter the offline chat model you want to use, See GPT4All for supported models (default: {default_offline_chat_model}): "
f"Enter the offline chat model you want to use. See HuggingFace for available GGUF models (default: {default_offline_chat_model}): "
)
if offline_chat_model == "":
ChatModelOptions.objects.create(

View File

@@ -9,7 +9,7 @@ from whisper import Whisper
from khoj.processor.embeddings import CrossEncoderModel, EmbeddingsModel
from khoj.utils import config as utils_config
from khoj.utils.config import ContentIndex, GPT4AllProcessorModel, SearchModels
from khoj.utils.config import ContentIndex, OfflineChatProcessorModel, SearchModels
from khoj.utils.helpers import LRU, get_device
from khoj.utils.rawconfig import FullConfig
@@ -20,7 +20,7 @@ embeddings_model: Dict[str, EmbeddingsModel] = None
cross_encoder_model: Dict[str, CrossEncoderModel] = None
content_index = ContentIndex()
openai_client: OpenAI = None
gpt4all_processor_config: GPT4AllProcessorModel = None
offline_chat_processor_config: OfflineChatProcessorModel = None
whisper_model: Whisper = None
config_file: Path = None
verbose: int = 0