From 1eba7b1c6f1396601074bf792afe5ed78eb007d0 Mon Sep 17 00:00:00 2001 From: Debanjum Singh Solanky Date: Sun, 27 Feb 2022 19:53:53 -0500 Subject: [PATCH] Use empty_escape_sequence constant to strip response text from gpt --- src/processor/conversation/gpt.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/processor/conversation/gpt.py b/src/processor/conversation/gpt.py index f64efeb8..ebdb12ff 100644 --- a/src/processor/conversation/gpt.py +++ b/src/processor/conversation/gpt.py @@ -6,6 +6,9 @@ from datetime import datetime # External Packages import openai +# Internal Packages +from src.utils.constants import empty_escape_sequences + def summarize(text, summary_type, user_query=None, api_key=None, temperature=0.5, max_tokens=100): """ @@ -61,8 +64,8 @@ def understand(text, api_key=None, temperature=0.5, max_tokens=100, verbose=0): stop=["\n"]) # Extract, Clean Message from GPT's Response - story = response['choices'][0]['text'] - return json.loads(story) + story = str(response['choices'][0]['text']) + return json.loads(story.strip(empty_escape_sequences)) def converse(text, conversation_history=None, api_key=None, temperature=0.9, max_tokens=150): @@ -91,8 +94,8 @@ def converse(text, conversation_history=None, api_key=None, temperature=0.9, max stop=["\n", "Human:", "AI:"]) # Extract, Clean Message from GPT's Response - story = response['choices'][0]['text'] - return str(story).strip() + story = str(response['choices'][0]['text']) + return story.strip(empty_escape_sequences) def message_to_prompt(user_message, conversation_history="", gpt_message=None, start_sequence="\nAI:", restart_sequence="\nHuman:"):