/g, "");
+
return (
str:
return "\n- ".join(compiled_response)
+async def render_claude_response(response_content: list[BetaContentBlock], page: Page) -> str:
+ """
+ Share the response from Anthropic AI model to be rendered by the client.
+ """
+ compiled_response = [""]
+ for block in deepcopy(response_content):
+ if block.type == "text":
+ compiled_response.append(block.text)
+ elif block.type == "tool_use":
+ if hasattr(block, "name") and block.name == "goto":
+ block_input = {"action": block.name, "url": block.input.get("url")}
+ elif hasattr(block, "name") and block.name == "back":
+ block_input = {"action": block.name}
+ else:
+ block_input = block.input
+
+ if block_input.get("action") == "screenshot":
+ screenshot_base64 = await get_screenshot(page)
+ block_input["image"] = f"data:image/webp;base64,{screenshot_base64}"
+
+ compiled_response.append(f"**Action**: {json.dumps(block_input)}")
+ elif block.type == "thinking":
+ compiled_response.append(f"**Thought**: {block.thinking}")
+ return "\n- ".join(compiled_response)
+
+
async def get_screenshot(page: Page):
"""
Take a viewport screenshot using Playwright and return as base64 encoded webp image.