Create wrapper function to get entries from org, md, pdf & text files

- Convert extract_org_entries function to actually extract org entries
  Previously it was extracting intermediary org-node objects instead
  Now it extracts the org-node objects from files and converts them
  into entries
- Create separate, new function to extract_org_nodes from files
- Similarly create wrapper funcs for md, pdf, plaintext to entries

- Update org, md, pdf, plaintext to entries tests to use the new
  simplified wrapper function to extract org entries
This commit is contained in:
Debanjum Singh Solanky
2024-02-09 16:04:41 +05:30
parent f01a12b1d2
commit 28105ee027
8 changed files with 71 additions and 94 deletions

View File

@@ -32,10 +32,8 @@ class MarkdownToEntries(TextToEntries):
deletion_file_names = None
# Extract Entries from specified Markdown files
with timer("Parse entries from Markdown files into dictionaries", logger):
current_entries = MarkdownToEntries.convert_markdown_entries_to_maps(
*MarkdownToEntries.extract_markdown_entries(files)
)
with timer("Extract entries from specified Markdown files", logger):
current_entries = MarkdownToEntries.extract_markdown_entries(files)
# Split entries by max tokens supported by model
with timer("Split entries by max token size supported by model", logger):
@@ -57,13 +55,10 @@ class MarkdownToEntries(TextToEntries):
return num_new_embeddings, num_deleted_embeddings
@staticmethod
def extract_markdown_entries(markdown_files):
def extract_markdown_entries(markdown_files) -> List[Entry]:
"Extract entries by heading from specified Markdown files"
# Regex to extract Markdown Entries by Heading
entries = []
entry_to_file_map = []
entries: List[str] = []
entry_to_file_map: List[Tuple[str, Path]] = []
for markdown_file in markdown_files:
try:
markdown_content = markdown_files[markdown_file]
@@ -71,18 +66,19 @@ class MarkdownToEntries(TextToEntries):
markdown_content, markdown_file, entries, entry_to_file_map
)
except Exception as e:
logger.warning(f"Unable to process file: {markdown_file}. This file will not be indexed.")
logger.warning(e, exc_info=True)
logger.warning(
f"Unable to process file: {markdown_file}. This file will not be indexed.\n{e}", exc_info=True
)
return entries, dict(entry_to_file_map)
return MarkdownToEntries.convert_markdown_entries_to_maps(entries, dict(entry_to_file_map))
@staticmethod
def process_single_markdown_file(
markdown_content: str, markdown_file: Path, entries: List, entry_to_file_map: List
markdown_content: str, markdown_file: Path, entries: List[str], entry_to_file_map: List[Tuple[str, Path]]
):
markdown_heading_regex = r"^#"
markdown_entries_per_file = []
markdown_entries_per_file: List[str] = []
any_headings = re.search(markdown_heading_regex, markdown_content, flags=re.MULTILINE)
for entry in re.split(markdown_heading_regex, markdown_content, flags=re.MULTILINE):
# Add heading level as the regex split removed it from entries with headings
@@ -98,7 +94,7 @@ class MarkdownToEntries(TextToEntries):
@staticmethod
def convert_markdown_entries_to_maps(parsed_entries: List[str], entry_to_file_map) -> List[Entry]:
"Convert each Markdown entries into a dictionary"
entries = []
entries: List[Entry] = []
for parsed_entry in parsed_entries:
raw_filename = entry_to_file_map[parsed_entry]

View File

@@ -21,9 +21,6 @@ class OrgToEntries(TextToEntries):
def process(
self, files: dict[str, str] = None, full_corpus: bool = True, user: KhojUser = None, regenerate: bool = False
) -> Tuple[int, int]:
# Extract required fields from config
index_heading_entries = False
if not full_corpus:
deletion_file_names = set([file for file in files if files[file] == ""])
files_to_process = set(files) - deletion_file_names
@@ -32,11 +29,8 @@ class OrgToEntries(TextToEntries):
deletion_file_names = None
# Extract Entries from specified Org files
with timer("Parse entries from org files into OrgNode objects", logger):
entry_nodes, file_to_entries = self.extract_org_entries(files)
with timer("Convert OrgNodes into list of entries", logger):
current_entries = self.convert_org_nodes_to_entries(entry_nodes, file_to_entries, index_heading_entries)
with timer("Extract entries from specified Org files", logger):
current_entries = self.extract_org_entries(files)
with timer("Split entries by max token size supported by model", logger):
current_entries = self.split_entries_by_max_tokens(current_entries, max_tokens=256)
@@ -57,9 +51,18 @@ class OrgToEntries(TextToEntries):
return num_new_embeddings, num_deleted_embeddings
@staticmethod
def extract_org_entries(org_files: dict[str, str]):
def extract_org_entries(org_files: dict[str, str], index_heading_entries: bool = False):
"Extract entries from specified Org files"
entries = []
with timer("Parse entries from org files into OrgNode objects", logger):
entry_nodes, file_to_entries = OrgToEntries.extract_org_nodes(org_files)
with timer("Convert OrgNodes into list of entries", logger):
return OrgToEntries.convert_org_nodes_to_entries(entry_nodes, file_to_entries, index_heading_entries)
@staticmethod
def extract_org_nodes(org_files: dict[str, str]):
"Extract org nodes from specified org files"
entry_nodes = []
entry_to_file_map: List[Tuple[orgnode.Orgnode, str]] = []
for org_file in org_files:
filename = org_file
@@ -67,16 +70,17 @@ class OrgToEntries(TextToEntries):
try:
org_file_entries = orgnode.makelist(file, filename)
entry_to_file_map += zip(org_file_entries, [org_file] * len(org_file_entries))
entries.extend(org_file_entries)
entry_nodes.extend(org_file_entries)
except Exception as e:
logger.warning(f"Unable to process file: {org_file}. This file will not be indexed.")
logger.warning(e, exc_info=True)
return entries, dict(entry_to_file_map)
return entry_nodes, dict(entry_to_file_map)
@staticmethod
def process_single_org_file(org_content: str, org_file: str, entries: List, entry_to_file_map: List):
# Process single org file. The org parser assumes that the file is a single org file and reads it from a buffer. We'll split the raw conetnt of this file by new line to mimic the same behavior.
# Process single org file. The org parser assumes that the file is a single org file and reads it from a buffer.
# We'll split the raw content of this file by new line to mimic the same behavior.
try:
org_file_entries = orgnode.makelist(org_content, org_file)
entry_to_file_map += zip(org_file_entries, [org_file] * len(org_file_entries))

View File

@@ -32,8 +32,8 @@ class PdfToEntries(TextToEntries):
deletion_file_names = None
# Extract Entries from specified Pdf files
with timer("Parse entries from PDF files into dictionaries", logger):
current_entries = PdfToEntries.convert_pdf_entries_to_maps(*PdfToEntries.extract_pdf_entries(files))
with timer("Extract entries from specified PDF files", logger):
current_entries = PdfToEntries.extract_pdf_entries(files)
# Split entries by max tokens supported by model
with timer("Split entries by max token size supported by model", logger):
@@ -55,11 +55,11 @@ class PdfToEntries(TextToEntries):
return num_new_embeddings, num_deleted_embeddings
@staticmethod
def extract_pdf_entries(pdf_files):
def extract_pdf_entries(pdf_files) -> List[Entry]:
"""Extract entries by page from specified PDF files"""
entries = []
entry_to_location_map = []
entries: List[str] = []
entry_to_location_map: List[Tuple[str, str]] = []
for pdf_file in pdf_files:
try:
# Write the PDF file to a temporary file, as it is stored in byte format in the pdf_file object and the PDF Loader expects a file path
@@ -83,7 +83,7 @@ class PdfToEntries(TextToEntries):
if os.path.exists(f"{tmp_file}"):
os.remove(f"{tmp_file}")
return entries, dict(entry_to_location_map)
return PdfToEntries.convert_pdf_entries_to_maps(entries, dict(entry_to_location_map))
@staticmethod
def convert_pdf_entries_to_maps(parsed_entries: List[str], entry_to_file_map) -> List[Entry]:

View File

@@ -42,8 +42,8 @@ class PlaintextToEntries(TextToEntries):
logger.warning(e, exc_info=True)
# Extract Entries from specified plaintext files
with timer("Parse entries from plaintext files", logger):
current_entries = PlaintextToEntries.convert_plaintext_entries_to_maps(files)
with timer("Parse entries from specified Plaintext files", logger):
current_entries = PlaintextToEntries.extract_plaintext_entries(files)
# Split entries by max tokens supported by model
with timer("Split entries by max token size supported by model", logger):
@@ -74,7 +74,7 @@ class PlaintextToEntries(TextToEntries):
return soup.get_text(strip=True, separator="\n")
@staticmethod
def convert_plaintext_entries_to_maps(entry_to_file_map: dict) -> List[Entry]:
def extract_plaintext_entries(entry_to_file_map: dict[str, str]) -> List[Entry]:
"Convert each plaintext entries into a dictionary"
entries = []
for file, entry in entry_to_file_map.items():