content-type: org: input-files: null input-filter: "/data/notes/*.org" compressed-jsonl: "/data/generated/.notes.json.gz" embeddings-file: "/data/generated/.note_embeddings.pt" ledger: # input-files: null # input-filter: /data/ledger/*.beancount # compressed-jsonl: /data/generated/.transactions.jsonl.gz # embeddings-file: /data/generated/.transaction_embeddings.pt image: # input-directory: "/data/images/" # embeddings-file: "/data/generated/.image_embeddings.pt" # batch-size: 50 # use-xmp-metadata: "no" music: # input-files: null # input-filter: "/data/music/*.org" # compressed-jsonl: "/data/generated/.songs.jsonl.gz" # embeddings-file: "/data/generated/.song_embeddings.pt" search-type: symmetric: encoder: "sentence-transformers/paraphrase-MiniLM-L6-v2" cross-encoder: "cross-encoder/ms-marco-MiniLM-L-6-v2" model_directory: "/data/models/.symmetric" asymmetric: encoder: "sentence-transformers/msmarco-MiniLM-L-6-v3" cross-encoder: "cross-encoder/ms-marco-MiniLM-L-6-v2" model_directory: "/data/models/.asymmetric" image: encoder: "clip-ViT-B-32" model_directory: "/data/models/.image_encoder" processor: conversation: openai-api-key: null conversation-logfile: "/data/conversation/.conversation_logs.json"