mirror of
https://github.com/khoaliber/khoj.git
synced 2026-03-09 13:25:11 +00:00
Add a logline when the offline model migration script runs
This commit is contained in:
@@ -1,13 +1,18 @@
|
|||||||
import os
|
import os
|
||||||
|
import logging
|
||||||
|
|
||||||
from khoj.utils.yaml import load_config_from_file, save_config_to_file
|
from khoj.utils.yaml import load_config_from_file, save_config_to_file
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def migrate_offline_model(args):
|
def migrate_offline_model(args):
|
||||||
raw_config = load_config_from_file(args.config_file)
|
raw_config = load_config_from_file(args.config_file)
|
||||||
version = raw_config.get("version")
|
version = raw_config.get("version")
|
||||||
|
|
||||||
if version == "0.10.0" or version == None:
|
if version == "0.10.0" or version == None:
|
||||||
|
logger.info(f"Migrating offline model used for version {version} to latest version for {args.version_no}")
|
||||||
|
|
||||||
# If the user has downloaded the offline model, remove it from the cache.
|
# If the user has downloaded the offline model, remove it from the cache.
|
||||||
offline_model_path = os.path.expanduser("~/.cache/gpt4all/llama-2-7b-chat.ggmlv3.q4_K_S.bin")
|
offline_model_path = os.path.expanduser("~/.cache/gpt4all/llama-2-7b-chat.ggmlv3.q4_K_S.bin")
|
||||||
if os.path.exists(offline_model_path):
|
if os.path.exists(offline_model_path):
|
||||||
|
|||||||
Reference in New Issue
Block a user