Fix chat logging messages to get context without flooding logs

This commit is contained in:
Debanjum Singh Solanky
2023-07-05 18:27:06 -07:00
parent 0ba838b53a
commit 46269ddfd3
2 changed files with 3 additions and 1 deletions

View File

@@ -175,6 +175,8 @@ def converse(
conversation_log, conversation_log,
model, model,
) )
truncated_messages = "\n".join({f"{message.content[:40]}..." for message in messages})
logger.debug(f"Conversation Context for GPT: {truncated_messages}")
# Get Response from GPT # Get Response from GPT
return chat_completion_with_backoff( return chat_completion_with_backoff(

View File

@@ -48,7 +48,7 @@ class ThreadedGenerator:
item = self.queue.get() item = self.queue.get()
if item is StopIteration: if item is StopIteration:
time_to_response = perf_counter() - self.start_time time_to_response = perf_counter() - self.start_time
logger.info(f"Time to stream full response: {time_to_response:.3f}") logger.info(f"Chat streaming took: {time_to_response:.3f} seconds")
if self.completion_func: if self.completion_func:
# The completion func effective acts as a callback. # The completion func effective acts as a callback.
# It adds the aggregated response to the conversation history. It's constructed in api.py. # It adds the aggregated response to the conversation history. It's constructed in api.py.