Stop using old google generativeai package to raise, catch exceptions

This commit is contained in:
Debanjum
2025-03-11 23:38:27 +05:30
parent bdfa6400ef
commit bd06fcd9be

View File

@@ -4,8 +4,8 @@ from copy import deepcopy
from threading import Thread from threading import Thread
from google import genai from google import genai
from google.genai import errors as gerrors
from google.genai import types as gtypes from google.genai import types as gtypes
from google.generativeai.types.generation_types import StopCandidateException
from langchain.schema import ChatMessage from langchain.schema import ChatMessage
from tenacity import ( from tenacity import (
before_sleep_log, before_sleep_log,
@@ -73,7 +73,7 @@ def gemini_completion_with_backoff(
# Generate the response # Generate the response
response = client.models.generate_content(model=model_name, config=config, contents=formatted_messages) response = client.models.generate_content(model=model_name, config=config, contents=formatted_messages)
response_text = response.text response_text = response.text
except StopCandidateException as e: except gerrors.ClientError as e:
response = None response = None
response_text, _ = handle_gemini_response(e.args) response_text, _ = handle_gemini_response(e.args)
# Respond with reason for stopping # Respond with reason for stopping
@@ -147,7 +147,7 @@ def gemini_llm_thread(
aggregated_response += message aggregated_response += message
g.send(message) g.send(message)
if stopped: if stopped:
raise StopCandidateException(message) raise ValueError(message)
# Calculate cost of chat # Calculate cost of chat
input_tokens = chunk.usage_metadata.prompt_token_count input_tokens = chunk.usage_metadata.prompt_token_count
@@ -159,7 +159,7 @@ def gemini_llm_thread(
tracer["temperature"] = temperature tracer["temperature"] = temperature
if is_promptrace_enabled(): if is_promptrace_enabled():
commit_conversation_trace(messages, aggregated_response, tracer) commit_conversation_trace(messages, aggregated_response, tracer)
except StopCandidateException as e: except ValueError as e:
logger.warning( logger.warning(
f"LLM Response Prevented for {model_name}: {e.args[0]}.\n" f"LLM Response Prevented for {model_name}: {e.args[0]}.\n"
+ f"Last Message by {messages[-1].role}: {messages[-1].content}" + f"Last Message by {messages[-1].role}: {messages[-1].content}"