Drop help, summarize and automation /slash commands from chat api

Clean non useful slash commands to make chat API more maintanable.
- App version, chat model via /help is visible in other parts of the
  UX. Asking help questions with site:docs.khoj.dev filter isn't used
  or known to folks
- /summarize is esoterically tuned. Should be rewritten if add back.
  It wasn't being used by /research already
- Automations can be configured via UX. It wasn't being shown in UX
  already
This commit is contained in:
Debanjum
2025-06-05 12:36:11 -07:00
parent 7f6db526c3
commit b21706aa45
4 changed files with 2 additions and 147 deletions

View File

@@ -1055,108 +1055,6 @@ async def chat(
if state.verbose > 1:
logger.debug(f'Researched Results: {"".join(r.summarizedResult for r in research_results)}')
used_slash_summarize = conversation_commands == [ConversationCommand.Summarize]
# Skip trying to summarize if
if (
# summarization intent was inferred
ConversationCommand.Summarize in conversation_commands
# and not triggered via slash command
and not used_slash_summarize
# but we can't actually summarize
and len(file_filters) == 0
):
conversation_commands.remove(ConversationCommand.Summarize)
elif ConversationCommand.Summarize in conversation_commands:
response_log = ""
agent_has_entries = await EntryAdapters.aagent_has_entries(agent)
if len(file_filters) == 0 and not agent_has_entries:
response_log = "No files selected for summarization. Please add files using the section on the left."
async for result in send_llm_response(response_log, tracer.get("usage")):
yield result
else:
async for response in generate_summary_from_files(
q=q,
user=user,
file_filters=file_filters,
chat_history=conversation.messages,
query_images=uploaded_images,
agent=agent,
send_status_func=partial(send_event, ChatEvent.STATUS),
query_files=attached_file_context,
tracer=tracer,
):
if isinstance(response, dict) and ChatEvent.STATUS in response:
yield response[ChatEvent.STATUS]
else:
if isinstance(response, str):
response_log = response
async for result in send_llm_response(response, tracer.get("usage")):
yield result
summarized_document = FileAttachment(
name="Summarized Document",
content=response_log,
type="text/plain",
size=len(response_log.encode("utf-8")),
)
async for result in send_event(ChatEvent.GENERATED_ASSETS, {"files": [summarized_document.model_dump()]}):
yield result
generated_files.append(summarized_document)
custom_filters = []
if conversation_commands == [ConversationCommand.Help]:
if not q:
chat_model = await ConversationAdapters.aget_user_chat_model(user)
if chat_model == None:
chat_model = await ConversationAdapters.aget_default_chat_model(user)
model_type = chat_model.model_type
formatted_help = help_message.format(model=model_type, version=state.khoj_version, device=get_device())
async for result in send_llm_response(formatted_help, tracer.get("usage")):
yield result
return
# Adding specification to search online specifically on khoj.dev pages.
custom_filters.append("site:khoj.dev")
conversation_commands.append(ConversationCommand.Online)
if ConversationCommand.Automation in conversation_commands:
try:
automation, crontime, query_to_run, subject = await create_automation(
q, timezone, user, request.url, chat_history, tracer=tracer
)
except Exception as e:
logger.error(f"Error scheduling task {q} for {user.email}: {e}")
error_message = f"Unable to create automation. Ensure the automation doesn't already exist."
async for result in send_llm_response(error_message, tracer.get("usage")):
yield result
return
llm_response = construct_automation_created_message(automation, crontime, query_to_run, subject)
# Trigger task to save conversation to DB
asyncio.create_task(
save_to_conversation_log(
q,
llm_response,
user,
chat_history,
user_message_time,
intent_type="automation",
client_application=request.user.client_app,
conversation_id=conversation_id,
inferred_queries=[query_to_run],
automation_id=automation.id,
query_images=uploaded_images,
train_of_thought=train_of_thought,
raw_query_files=raw_query_files,
tracer=tracer,
)
)
# Send LLM Response
async for result in send_llm_response(llm_response, tracer.get("usage")):
yield result
return
# Gather Context
## Extract Document References
if not ConversationCommand.Research in conversation_commands:
@@ -1216,7 +1114,7 @@ async def chat(
location,
user,
partial(send_event, ChatEvent.STATUS),
custom_filters,
custom_filters=[],
max_online_searches=3,
query_images=uploaded_images,
query_files=attached_file_context,

View File

@@ -249,8 +249,6 @@ def get_next_url(request: Request) -> str:
def get_conversation_command(query: str) -> ConversationCommand:
if query.startswith("/notes"):
return ConversationCommand.Notes
elif query.startswith("/help"):
return ConversationCommand.Help
elif query.startswith("/general"):
return ConversationCommand.General
elif query.startswith("/online"):
@@ -261,8 +259,6 @@ def get_conversation_command(query: str) -> ConversationCommand:
return ConversationCommand.Image
elif query.startswith("/automated_task"):
return ConversationCommand.AutomatedTask
elif query.startswith("/summarize"):
return ConversationCommand.Summarize
elif query.startswith("/diagram"):
return ConversationCommand.Diagram
elif query.startswith("/code"):
@@ -392,9 +388,6 @@ async def aget_data_sources_and_output_format(
agent_outputs = agent.output_modes if agent else []
for output, description in mode_descriptions_for_llm.items():
# Do not allow tasks to schedule another task
if is_task and output == ConversationCommand.Automation:
continue
output_options[output.value] = description
if len(agent_outputs) == 0 or output.value in agent_outputs:
output_options_str += f'- "{output.value}": "{description}"\n'

View File

@@ -273,7 +273,6 @@ async def research(
code_results: Dict = dict()
document_results: List[Dict[str, str]] = []
operator_results: OperatorRun = None
summarize_files: str = ""
this_iteration = ResearchIteration(tool=None, query=query)
async for result in apick_next_tool(
@@ -473,40 +472,13 @@ async def research(
this_iteration.warning = f"Error operating browser: {e}"
logger.error(this_iteration.warning, exc_info=True)
elif this_iteration.tool == ConversationCommand.Summarize:
try:
async for result in generate_summary_from_files(
this_iteration.query,
user,
file_filters,
construct_tool_chat_history(previous_iterations, ConversationCommand.Summarize),
query_images=query_images,
agent=agent,
send_status_func=send_status_func,
query_files=query_files,
):
if isinstance(result, dict) and ChatEvent.STATUS in result:
yield result[ChatEvent.STATUS]
else:
summarize_files = result # type: ignore
except Exception as e:
this_iteration.warning = f"Error summarizing files: {e}"
logger.error(this_iteration.warning, exc_info=True)
else:
# No valid tools. This is our exit condition.
current_iteration = MAX_ITERATIONS
current_iteration += 1
if (
document_results
or online_results
or code_results
or operator_results
or summarize_files
or this_iteration.warning
):
if document_results or online_results or code_results or operator_results or this_iteration.warning:
results_data = f"\n<iteration>{current_iteration}\n<tool>{this_iteration.tool}</tool>\n<query>{this_iteration.query}</query>\n<results>"
if document_results:
results_data += f"\n<document_references>\n{yaml.dump(document_results, allow_unicode=True, sort_keys=False, default_flow_style=False)}\n</document_references>"
@@ -518,8 +490,6 @@ async def research(
results_data += (
f"\n<browser_operator_results>\n{operator_results.response}\n</browser_operator_results>"
)
if summarize_files:
results_data += f"\n<summarized_files>\n{yaml.dump(summarize_files, allow_unicode=True, sort_keys=False, default_flow_style=False)}\n</summarized_files>"
if this_iteration.warning:
results_data += f"\n<warning>\n{this_iteration.warning}\n</warning>"
results_data += "\n</results>\n</iteration>"

View File

@@ -338,15 +338,12 @@ class ConversationCommand(str, Enum):
Default = "default"
General = "general"
Notes = "notes"
Help = "help"
Online = "online"
Webpage = "webpage"
Code = "code"
Image = "image"
Text = "text"
Automation = "automation"
AutomatedTask = "automated_task"
Summarize = "summarize"
Diagram = "diagram"
Research = "research"
Operator = "operator"
@@ -360,9 +357,6 @@ command_descriptions = {
ConversationCommand.Webpage: "Get information from webpage suggested by you.",
ConversationCommand.Code: "Run Python code to parse information, run complex calculations, create documents and charts.",
ConversationCommand.Image: "Generate illustrative, creative images by describing your imagination in words.",
ConversationCommand.Automation: "Automatically run your query at a specified time or interval.",
ConversationCommand.Help: "Get help with how to use or setup Khoj from the documentation",
ConversationCommand.Summarize: "Get help with a question pertaining to an entire document.",
ConversationCommand.Diagram: "Draw a flowchart, diagram, or any other visual representation best expressed with primitives like lines, rectangles, and text.",
ConversationCommand.Research: "Do deep research on a topic. This will take longer than usual, but give a more detailed, comprehensive answer.",
ConversationCommand.Operator: "Operate and perform tasks using a computer.",