Skip using max_tokens as input to the extract questions step, as that's not used for max_output

This commit is contained in:
sabaimran
2024-05-27 01:23:54 +05:30
parent 9ebf3a4d80
commit b97ca9d19d
2 changed files with 0 additions and 3 deletions

View File

@@ -25,7 +25,6 @@ def extract_questions_anthropic(
conversation_log={},
api_key=None,
temperature=0,
max_tokens=100,
location_data: LocationData = None,
):
"""
@@ -71,7 +70,6 @@ def extract_questions_anthropic(
model_name=model,
temperature=temperature,
api_key=api_key,
max_tokens=max_tokens,
)
# Extract, Clean Message from Claude's Response

View File

@@ -353,7 +353,6 @@ async def extract_references_and_questions(
api_key=api_key,
conversation_log=meta_log,
location_data=location_data,
max_tokens=conversation_config.max_prompt_size,
)
# Collate search results as context for GPT