Handle unset ttft in metadata of failed chat response. Fixes evals.

This was causing evals to stop processing rest of batch as well.
This commit is contained in:
Debanjum
2025-05-17 02:36:10 -07:00
parent 673a15b6eb
commit e0352cd8e1
2 changed files with 5 additions and 4 deletions

View File

@@ -809,11 +809,11 @@ async def chat(
chat_metadata = chat_metadata or {}
chat_metadata["conversation_command"] = cmd_set
chat_metadata["agent"] = conversation.agent.slug if conversation and conversation.agent else None
chat_metadata["latency"] = f"{latency:.3f}"
chat_metadata["ttft_latency"] = f"{ttft:.3f}"
chat_metadata["cost"] = f"{cost:.5f}"
logger.info(f"Chat response time to first token: {ttft:.3f} seconds")
chat_metadata["latency"] = f"{latency:.3f}"
if ttft:
chat_metadata["ttft_latency"] = f"{ttft:.3f}"
logger.info(f"Chat response time to first token: {ttft:.3f} seconds")
logger.info(f"Chat response total time: {latency:.3f} seconds")
logger.info(f"Chat response cost: ${cost:.5f}")
update_telemetry_state(

View File

@@ -516,6 +516,7 @@ def process_batch(batch, batch_start, results, dataset_length, response_evaluato
if is_none_or_empty(agent_response):
decision = None
explanation = "Agent response is empty. This maybe due to a service error."
eval_cost = 0.0
else:
decision, explanation, eval_cost = response_evaluator(prompt, agent_response, answer, agent_references)