Parse markdown parent entries as single entry if fit within max tokens

These changes improve context available to the search model.
Specifically this should improve entry context from short knowledge trees,
that is knowledge bases with sparse, short heading/entry trees

Previously we'd always split markdown files by headings, even if a
parent entry was small enough to fit entirely within the max token
limits of the search model. This used to reduce the context available
to the search model to select appropriate entries for a query,
especially from short entry trees

Revert back to using regex to parse through markdown file instead of
using MarkdownHeaderTextSplitter. It was easier to implement the
logical split using regexes rather than bend MarkdowHeaderTextSplitter
to implement it.
- DFS traverse the markdown knowledge tree, prefix ancestry to each entry
This commit is contained in:
Debanjum Singh Solanky
2024-02-10 23:03:30 +05:30
parent 982ac1859c
commit db2581459f
2 changed files with 168 additions and 97 deletions

View File

@@ -1,10 +1,9 @@
import logging import logging
import re import re
from pathlib import Path from pathlib import Path
from typing import List, Tuple from typing import Dict, List, Tuple
import urllib3 import urllib3
from langchain.text_splitter import MarkdownHeaderTextSplitter
from khoj.database.models import Entry as DbEntry from khoj.database.models import Entry as DbEntry
from khoj.database.models import KhojUser from khoj.database.models import KhojUser
@@ -80,37 +79,54 @@ class MarkdownToEntries(TextToEntries):
entries: List[str], entries: List[str],
entry_to_file_map: List[Tuple[str, Path]], entry_to_file_map: List[Tuple[str, Path]],
max_tokens=256, max_tokens=256,
ancestry: Dict[int, str] = {},
): ):
if len(TextToEntries.tokenizer(markdown_content)) <= max_tokens: # Prepend the markdown section's heading ancestry
entry_to_file_map += [(markdown_content, markdown_file)] ancestry_string = "\n".join([f"{'#' * key} {ancestry[key]}" for key in sorted(ancestry.keys())])
entries.extend([markdown_content]) markdown_content_with_ancestry = f"{ancestry_string}{markdown_content}"
# If content is small or content has no children headings, save it as a single entry
if len(TextToEntries.tokenizer(markdown_content_with_ancestry)) <= max_tokens or not re.search(
rf"^#{{{len(ancestry)+1},}}\s", markdown_content, re.MULTILINE
):
entry_to_file_map += [(markdown_content_with_ancestry, markdown_file)]
entries.extend([markdown_content_with_ancestry])
return entries, entry_to_file_map return entries, entry_to_file_map
headers_to_split_on = [("#", "1"), ("##", "2"), ("###", "3"), ("####", "4"), ("#####", "5"), ("######", "6")] # Split by next heading level present in the entry
reversed_headers_to_split_on = list(reversed(headers_to_split_on)) next_heading_level = len(ancestry)
markdown_entries_per_file: List[str] = [] sections: List[str] = []
previous_section_metadata, current_section_metadata = None, None while len(sections) < 2:
next_heading_level += 1
sections = re.split(rf"(\n|^)(?=[#]{{{next_heading_level}}} .+\n?)", markdown_content, re.MULTILINE)
splitter = MarkdownHeaderTextSplitter(headers_to_split_on, strip_headers=False, return_each_line=True) for section in sections:
for section in splitter.split_text(markdown_content): # Skip empty sections
current_section_metadata = section.metadata.copy() if section.strip() == "":
# Append the section's content to the last entry if the metadata is the same continue
if previous_section_metadata == current_section_metadata:
markdown_entries_per_file[-1] = f"{markdown_entries_per_file[-1]}\n{section.page_content}" # Extract the section body and (when present) the heading
# Insert new entry with it's heading ancestry, if the section is under a new heading current_ancestry = ancestry.copy()
first_line = [line for line in section.split("\n") if line.strip() != ""][0]
if re.search(rf"^#{{{next_heading_level}}} ", first_line):
# Extract the section body without the heading
current_section_body = "\n".join(section.split(first_line)[1:])
# Parse the section heading into current section ancestry
current_section_title = first_line[next_heading_level:].strip()
current_ancestry[next_heading_level] = current_section_title
else: else:
# Drop the current heading from the metadata. It is already in the section content current_section_body = section
if section.metadata:
section.metadata.pop(max(section.metadata)) # Recurse down children of the current entry
# Prepend the markdown section's heading ancestry MarkdownToEntries.process_single_markdown_file(
for heading in reversed_headers_to_split_on: current_section_body,
if heading[1] in section.metadata: markdown_file,
section.page_content = f"{heading[0]} {section.metadata[heading[1]]}\n{section.page_content}" entries,
previous_section_metadata = current_section_metadata entry_to_file_map,
markdown_entries_per_file += [section.page_content] max_tokens,
current_ancestry,
)
entry_to_file_map += zip(markdown_entries_per_file, [markdown_file] * len(markdown_entries_per_file))
entries.extend(markdown_entries_per_file)
return entries, entry_to_file_map return entries, entry_to_file_map
@staticmethod @staticmethod

View File

@@ -76,6 +76,131 @@ def test_extract_multiple_markdown_entries(tmp_path):
assert all([tmp_path.stem in entry.compiled for entry in entries]) assert all([tmp_path.stem in entry.compiled for entry in entries])
def test_extract_entries_with_different_level_headings(tmp_path):
"Extract markdown entries with different level headings."
# Arrange
entry = f"""
# Heading 1
## Sub-Heading 1.1
# Heading 2
"""
data = {
f"{tmp_path}": entry,
}
# Act
# Extract Entries from specified Markdown files
entries = MarkdownToEntries.extract_markdown_entries(markdown_files=data, max_tokens=3)
# Assert
assert len(entries) == 2
assert entries[0].raw == "# Heading 1\n## Sub-Heading 1.1", "Ensure entry includes heading ancestory"
assert entries[1].raw == "# Heading 2\n"
def test_extract_entries_with_non_incremental_heading_levels(tmp_path):
"Extract markdown entries when deeper child level before shallower child level."
# Arrange
entry = f"""
# Heading 1
#### Sub-Heading 1.1
## Sub-Heading 1.2
# Heading 2
"""
data = {
f"{tmp_path}": entry,
}
# Act
# Extract Entries from specified Markdown files
entries = MarkdownToEntries.extract_markdown_entries(markdown_files=data, max_tokens=3)
# Assert
assert len(entries) == 3
assert entries[0].raw == "# Heading 1\n#### Sub-Heading 1.1", "Ensure entry includes heading ancestory"
assert entries[1].raw == "# Heading 1\n## Sub-Heading 1.2", "Ensure entry includes heading ancestory"
assert entries[2].raw == "# Heading 2\n"
def test_extract_entries_with_text_before_headings(tmp_path):
"Extract markdown entries with some text before any headings."
# Arrange
entry = f"""
Text before headings
# Heading 1
body line 1
## Heading 2
body line 2
"""
data = {
f"{tmp_path}": entry,
}
# Act
# Extract Entries from specified Markdown files
entries = MarkdownToEntries.extract_markdown_entries(markdown_files=data, max_tokens=3)
# Assert
assert len(entries) == 3
assert entries[0].raw == "\nText before headings"
assert entries[1].raw == "# Heading 1\nbody line 1"
assert entries[2].raw == "# Heading 1\n## Heading 2\nbody line 2\n", "Ensure raw entry includes heading ancestory"
def test_parse_markdown_file_into_single_entry_if_small(tmp_path):
"Parse markdown file into single entry if it fits within the token limits."
# Arrange
entry = f"""
# Heading 1
body line 1
## Subheading 1.1
body line 1.1
"""
data = {
f"{tmp_path}": entry,
}
# Act
# Extract Entries from specified Markdown files
entries = MarkdownToEntries.extract_markdown_entries(markdown_files=data, max_tokens=12)
# Assert
assert len(entries) == 1
assert entries[0].raw == entry
def test_parse_markdown_entry_with_children_as_single_entry_if_small(tmp_path):
"Parse markdown entry with child headings as single entry if it fits within the tokens limits."
# Arrange
entry = f"""
# Heading 1
body line 1
## Subheading 1.1
body line 1.1
# Heading 2
body line 2
## Subheading 2.1
longer body line 2.1
"""
data = {
f"{tmp_path}": entry,
}
# Act
# Extract Entries from specified Markdown files
entries = MarkdownToEntries.extract_markdown_entries(markdown_files=data, max_tokens=12)
# Assert
assert len(entries) == 3
assert (
entries[0].raw == "# Heading 1\nbody line 1\n## Subheading 1.1\nbody line 1.1"
), "First entry includes children headings"
assert entries[1].raw == "# Heading 2\nbody line 2", "Second entry does not include children headings"
assert (
entries[2].raw == "# Heading 2\n## Subheading 2.1\nlonger body line 2.1\n"
), "Third entry is second entries child heading"
def test_get_markdown_files(tmp_path): def test_get_markdown_files(tmp_path):
"Ensure Markdown files specified via input-filter, input-files extracted" "Ensure Markdown files specified via input-filter, input-files extracted"
# Arrange # Arrange
@@ -113,76 +238,6 @@ def test_get_markdown_files(tmp_path):
assert set(extracted_org_files.keys()) == expected_files assert set(extracted_org_files.keys()) == expected_files
def test_extract_entries_with_different_level_headings(tmp_path):
"Extract markdown entries with different level headings."
# Arrange
entry = f"""
# Heading 1
## Sub-Heading 1.1
# Heading 2
"""
data = {
f"{tmp_path}": entry,
}
# Act
# Extract Entries from specified Markdown files
entries = MarkdownToEntries.extract_markdown_entries(markdown_files=data, max_tokens=3)
# Assert
assert len(entries) == 3
assert entries[0].raw == "# Heading 1"
assert entries[1].raw == "# Heading 1\n## Sub-Heading 1.1", "Ensure entry includes heading ancestory"
assert entries[2].raw == "# Heading 2"
def test_extract_entries_with_text_before_headings(tmp_path):
"Extract markdown entries with some text before any headings."
# Arrange
entry = f"""
Text before headings
# Heading 1
body line 1
## Heading 2
body line 2
"""
data = {
f"{tmp_path}": entry,
}
# Act
# Extract Entries from specified Markdown files
entries = MarkdownToEntries.extract_markdown_entries(markdown_files=data, max_tokens=3)
# Assert
assert len(entries) == 3
assert entries[0].raw == "Text before headings"
assert entries[1].raw == "# Heading 1\nbody line 1"
assert entries[2].raw == "# Heading 1\n## Heading 2\nbody line 2", "Ensure raw entry includes heading ancestory"
def test_parse_markdown_file_into_single_entry_if_small(tmp_path):
"Parse markdown file into single entry if it fits within the token limits."
# Arrange
entry = f"""
# Heading 1
body line 1
## Subheading 1.1
body line 1.1
"""
data = {
f"{tmp_path}": entry,
}
# Act
# Extract Entries from specified Markdown files
entries = MarkdownToEntries.extract_markdown_entries(markdown_files=data, max_tokens=12)
# Assert
assert len(entries) == 1
assert entries[0].raw == entry
# Helper Functions # Helper Functions
def create_file(tmp_path: Path, entry=None, filename="test.md"): def create_file(tmp_path: Path, entry=None, filename="test.md"):
markdown_file = tmp_path / filename markdown_file = tmp_path / filename