Fix chat tests since streaming. Pass args correctly to chat methods

- Fix testing gpt converse method after it started streaming responses
- Pass stop in model_kwargs dictionary and api key in openai_api_key
  parameter to chat completion methods. This should resolve the arg
  warning thrown by OpenAI module
This commit is contained in:
Debanjum Singh Solanky
2023-07-07 15:23:44 -07:00
parent 48870d9170
commit 11f0a9f196
3 changed files with 42 additions and 33 deletions

View File

@@ -31,8 +31,8 @@ def answer(text, user_query, model, api_key=None, temperature=0.5, max_tokens=50
model_name=model,
temperature=temperature,
max_tokens=max_tokens,
stop='"""',
api_key=api_key,
model_kwargs={"stop": ['"""']},
openai_api_key=api_key,
)
# Extract, Clean Message from GPT's Response
@@ -59,8 +59,8 @@ def summarize(text, summary_type, model, user_query=None, api_key=None, temperat
temperature=temperature,
max_tokens=max_tokens,
frequency_penalty=0.2,
stop='"""',
api_key=api_key,
model_kwargs={"stop": ['"""']},
openai_api_key=api_key,
)
# Extract, Clean Message from GPT's Response
@@ -104,8 +104,8 @@ def extract_questions(
model_name=model,
temperature=temperature,
max_tokens=max_tokens,
stop=["A: ", "\n"],
api_key=api_key,
model_kwargs={"stop": ["A: ", "\n"]},
openai_api_key=api_key,
)
# Extract, Clean Message from GPT's Response
@@ -143,8 +143,8 @@ def extract_search_type(text, model, api_key=None, temperature=0.5, max_tokens=1
temperature=temperature,
max_tokens=max_tokens,
frequency_penalty=0.2,
stop=["\n"],
api_key=api_key,
model_kwargs={"stop": ["\n"]},
openai_api_key=api_key,
)
# Extract, Clean Message from GPT's Response
@@ -155,9 +155,9 @@ def converse(
references,
user_query,
conversation_log={},
model: Optional[str] = "gpt-3.5-turbo",
api_key=None,
temperature=0.2,
model: str = "gpt-3.5-turbo",
api_key: Optional[str] = None,
temperature: float = 0.2,
completion_func=None,
):
"""