Setup File Filter for Markdown and Ledger content types

- Pass file associated with entries in markdown, beancount to json converters
- Add File, Word, Date Filters to Ledger, Markdown Types
  - Word, Date Filters were accidently removed from the above types yesterday
  - File Filter is the only filter that newly got added
This commit is contained in:
Debanjum Singh Solanky
2022-09-06 15:27:31 +03:00
parent 94cf3e97f3
commit 490157cafa
5 changed files with 62 additions and 28 deletions

View File

@@ -28,10 +28,10 @@ def beancount_to_jsonl(beancount_files, beancount_file_filter, output_file):
beancount_files = get_beancount_files(beancount_files, beancount_file_filter)
# Extract Entries from specified Beancount files
entries = extract_beancount_entries(beancount_files)
entries, transaction_to_file_map = extract_beancount_entries(beancount_files)
# Process Each Entry from All Notes Files
jsonl_data = convert_beancount_entries_to_jsonl(entries)
jsonl_data = convert_beancount_entries_to_jsonl(entries, transaction_to_file_map)
# Compress JSONL formatted Data
if output_file.suffix == ".gz":
@@ -74,22 +74,24 @@ def extract_beancount_entries(beancount_files):
empty_newline = f'^[{empty_escape_sequences}]*$'
entries = []
transaction_to_file_map = []
for beancount_file in beancount_files:
with open(beancount_file) as f:
ledger_content = f.read()
entries.extend([entry.strip(empty_escape_sequences)
transactions_per_file = [entry.strip(empty_escape_sequences)
for entry
in re.split(empty_newline, ledger_content, flags=re.MULTILINE)
if re.match(transaction_regex, entry)])
return entries
if re.match(transaction_regex, entry)]
transaction_to_file_map += [beancount_file]*len(transactions_per_file)
entries.extend(transactions_per_file)
return entries, transaction_to_file_map
def convert_beancount_entries_to_jsonl(entries):
def convert_beancount_entries_to_jsonl(entries, transaction_to_file_map):
"Convert each Beancount transaction to JSON and collate as JSONL"
jsonl = ''
for entry in entries:
entry_dict = {'compiled': entry, 'raw': entry}
for entry_id, entry in enumerate(entries):
entry_dict = {'compiled': entry, 'raw': entry, 'file': f'{transaction_to_file_map[entry_id]}'}
# Convert Dictionary to JSON and Append to JSONL string
jsonl += f'{json.dumps(entry_dict, ensure_ascii=False)}\n'

View File

@@ -28,10 +28,10 @@ def markdown_to_jsonl(markdown_files, markdown_file_filter, output_file):
markdown_files = get_markdown_files(markdown_files, markdown_file_filter)
# Extract Entries from specified Markdown files
entries = extract_markdown_entries(markdown_files)
entries, entry_to_file_map = extract_markdown_entries(markdown_files)
# Process Each Entry from All Notes Files
jsonl_data = convert_markdown_entries_to_jsonl(entries)
jsonl_data = convert_markdown_entries_to_jsonl(entries, entry_to_file_map)
# Compress JSONL formatted Data
if output_file.suffix == ".gz":
@@ -74,21 +74,24 @@ def extract_markdown_entries(markdown_files):
markdown_heading_regex = r'^#'
entries = []
entry_to_file_map = []
for markdown_file in markdown_files:
with open(markdown_file) as f:
markdown_content = f.read()
entries.extend([f'#{entry.strip(empty_escape_sequences)}'
markdown_entries_per_file = [f'#{entry.strip(empty_escape_sequences)}'
for entry
in re.split(markdown_heading_regex, markdown_content, flags=re.MULTILINE)])
in re.split(markdown_heading_regex, markdown_content, flags=re.MULTILINE)]
entry_to_file_map += [markdown_file]*len(markdown_entries_per_file)
entries.extend(markdown_entries_per_file)
return entries
return entries, entry_to_file_map
def convert_markdown_entries_to_jsonl(entries):
def convert_markdown_entries_to_jsonl(entries, entry_to_file_map):
"Convert each Markdown entries to JSON and collate as JSONL"
jsonl = ''
for entry in entries:
entry_dict = {'compiled': entry, 'raw': entry}
for entry_id, entry in enumerate(entries):
entry_dict = {'compiled': entry, 'raw': entry, 'file': f'{entry_to_file_map[entry_id]}'}
# Convert Dictionary to JSON and Append to JSONL string
jsonl += f'{json.dumps(entry_dict, ensure_ascii=False)}\n'