From 2537c04b5f3d5b279fabd31c9a861378fa1ed52a Mon Sep 17 00:00:00 2001 From: Philipp Temminghoff Date: Mon, 24 Feb 2025 14:45:26 +0100 Subject: [PATCH] chore: adapt to pydantic-ai changes --- src/llmling_agent/messaging/messages.py | 2 +- src/llmling_agent_providers/pydanticai/provider.py | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/src/llmling_agent/messaging/messages.py b/src/llmling_agent/messaging/messages.py index 91d2d95d..129a07fa 100644 --- a/src/llmling_agent/messaging/messages.py +++ b/src/llmling_agent/messaging/messages.py @@ -230,7 +230,7 @@ def _get_content_str(self) -> str: @property def data(self) -> TContent: - """Get content as typed data. Provides compat to RunResult.""" + """Get content as typed data. Provides compat to AgentRunResult.""" return self.content def format( diff --git a/src/llmling_agent_providers/pydanticai/provider.py b/src/llmling_agent_providers/pydanticai/provider.py index d1944d08..c4c89bf3 100644 --- a/src/llmling_agent_providers/pydanticai/provider.py +++ b/src/llmling_agent_providers/pydanticai/provider.py @@ -12,7 +12,7 @@ import pydantic_ai._pydantic from pydantic_ai.messages import ModelResponse from pydantic_ai.models import KnownModelName, Model -from pydantic_ai.result import RunResult, StreamedRunResult +from pydantic_ai.result import StreamedRunResult from pydantic_ai.tools import RunContext from pydantic_ai.usage import UsageLimits as PydanticAiUsageLimits @@ -40,6 +40,8 @@ if TYPE_CHECKING: from collections.abc import AsyncIterator, Awaitable, Callable + from pydantic_ai.agent import AgentRunResult + from llmling_agent.common_types import ModelType from llmling_agent.tools.base import Tool from llmling_agent_config.content import Content @@ -267,7 +269,7 @@ async def generate_response( to_use = model or self.model to_use = infer_model(to_use) if isinstance(to_use, str) else to_use limits = asdict(usage_limits) if usage_limits else {} - result: RunResult = await agent.run( + result: AgentRunResult = await agent.run( prompt, deps=self._context, # type: ignore message_history=[to_model_message(m) for m in message_history],