diff --git a/.github/workflows/run_notebooks.yml b/.github/workflows/run_notebooks.yml index bcf562e8f1..91084cb13c 100644 --- a/.github/workflows/run_notebooks.yml +++ b/.github/workflows/run_notebooks.yml @@ -16,9 +16,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - lib-version: - - "development" - - "latest" + lib-version: ${{ (github.event_name == 'workflow_dispatch' || github.event_name == 'schedule') && fromJSON('["development", "latest"]') || fromJSON('["development"]') }} steps: - uses: actions/checkout@v4 diff --git a/docs/docs/how-tos/react-agent-from-scratch.ipynb b/docs/docs/how-tos/react-agent-from-scratch.ipynb index 8b72c41eb5..eb5cee77d2 100644 --- a/docs/docs/how-tos/react-agent-from-scratch.ipynb +++ b/docs/docs/how-tos/react-agent-from-scratch.ipynb @@ -6,37 +6,15 @@ "source": [ "# How to create a ReAct agent from scratch\n", "\n", - "
Prerequisites
\n", - "\n", - " This guide assumes familiarity with the following:\n", - "
__start__
]):::first - agent(agent) - tools(tools__end__
]):::last - __start__ --> agent; - tools --> agent; - agent -. continue .-> tools; - agent -. exit .-> __end__; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc + __start__ --> rewrite_query; + analyzer_one --> retriever_one; + qa --> __end__; + retriever_one --> qa; + retriever_two --> qa; + rewrite_query --> analyzer_one; + rewrite_query --> retriever_two; ''' # --- -# name: test_conditional_graph[memory].3 +# name: test_in_one_fan_out_state_graph_waiting_edge[sqlite] ''' - { - "nodes": [ - { - "id": "__start__", - "type": "schema", - "data": "__start__" - }, - { - "id": "agent", - "type": "runnable", - "data": { - "id": [ - "langchain", - "schema", - "runnable", - "RunnableAssign" - ], - "name": "agent" - } - }, - { - "id": "tools", - "type": "runnable", - "data": { - "id": [ - "langgraph", - "utils", - "runnable", - "RunnableCallable" - ], - "name": "tools" - }, - "metadata": { - "parents": {}, - "version": 2, - "variant": "b" - } - }, - { - "id": "__end__", - "type": "schema", - "data": "__end__" - } - ], - "edges": [ - { - "source": "__start__", - "target": "agent" - }, - { - "source": "tools", - "target": "agent" - }, - { - "source": "agent", - "target": "tools", - "data": "continue", - "conditional": true - }, - { - "source": "agent", - "target": "__end__", - "data": "exit", - "conditional": true - } - ] - } + graph TD; + __start__ --> rewrite_query; + analyzer_one --> retriever_one; + qa --> __end__; + retriever_one --> qa; + retriever_two --> qa; + rewrite_query --> analyzer_one; + rewrite_query --> retriever_two; + ''' # --- -# name: test_conditional_graph[memory].4 +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[memory] ''' graph TD; - __start__ --> agent; - tools --> agent; - agent -. continue .-> tools; - agent -. exit .-> __end__; + __start__ --> rewrite_query; + analyzer_one --> retriever_one; + qa --> __end__; + retriever_one --> qa; + retriever_two --> qa; + rewrite_query --> analyzer_one; + rewrite_query -.-> retriever_two; ''' # --- -# name: test_conditional_graph[memory].5 +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[memory].1 dict({ - 'edges': list([ - dict({ - 'source': '__start__', - 'target': 'agent', - }), - dict({ - 'source': 'tools', - 'target': 'agent', + 'definitions': dict({ + 'InnerObject': dict({ + 'properties': dict({ + 'yo': dict({ + 'title': 'Yo', + 'type': 'integer', + }), + }), + 'required': list([ + 'yo', + ]), + 'title': 'InnerObject', + 'type': 'object', }), - dict({ - 'conditional': True, - 'data': 'continue', - 'source': 'agent', - 'target': 'tools', + }), + 'properties': dict({ + 'inner': dict({ + '$ref': '#/definitions/InnerObject', }), - dict({ - 'conditional': True, - 'data': 'exit', - 'source': 'agent', - 'target': '__end__', + 'query': dict({ + 'title': 'Query', + 'type': 'string', }), + }), + 'required': list([ + 'query', + 'inner', ]), - 'nodes': list([ - dict({ - 'data': '__start__', - 'id': '__start__', - 'type': 'schema', - }), - dict({ - 'data': dict({ - 'id': list([ - 'langchain', - 'schema', - 'runnable', - 'RunnableAssign', - ]), - 'name': 'agent', - }), - 'id': 'agent', - 'metadata': dict({ - '__interrupt': 'after', - }), - 'type': 'runnable', + 'title': 'Input', + 'type': 'object', + }) +# --- +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[memory].2 + dict({ + 'properties': dict({ + 'answer': dict({ + 'title': 'Answer', + 'type': 'string', }), - dict({ - 'data': dict({ - 'id': list([ - 'langgraph', - 'utils', - 'runnable', - 'RunnableCallable', - ]), - 'name': 'tools', - }), - 'id': 'tools', - 'metadata': dict({ - 'parents': dict({ - }), - 'variant': 'b', - 'version': 2, + 'docs': dict({ + 'items': dict({ + 'type': 'string', }), - 'type': 'runnable', - }), - dict({ - 'data': '__end__', - 'id': '__end__', - 'type': 'schema', + 'title': 'Docs', + 'type': 'array', }), + }), + 'required': list([ + 'answer', + 'docs', ]), + 'title': 'Output', + 'type': 'object', }) # --- -# name: test_conditional_graph[memory].6 +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[postgres] ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% graph TD; - __start__([__start__
]):::first - agent(agent__end__
]):::last - __start__ --> agent; - tools --> agent; - agent -. continue .-> tools; - agent -. exit .-> __end__; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc + __start__ --> rewrite_query; + analyzer_one --> retriever_one; + qa --> __end__; + retriever_one --> qa; + retriever_two --> qa; + rewrite_query --> analyzer_one; + rewrite_query -.-> retriever_two; ''' # --- -# name: test_conditional_graph[postgres] - ''' - { - "nodes": [ - { - "id": "__start__", - "type": "schema", - "data": "__start__" - }, - { - "id": "agent", - "type": "runnable", - "data": { - "id": [ - "langchain", - "schema", - "runnable", - "RunnableAssign" - ], - "name": "agent" - } - }, - { - "id": "tools", - "type": "runnable", - "data": { - "id": [ - "langgraph", - "utils", - "runnable", - "RunnableCallable" - ], - "name": "tools" - }, - "metadata": { - "parents": {}, - "version": 2, - "variant": "b" - } - }, - { - "id": "__end__", - "type": "schema", - "data": "__end__" - } - ], - "edges": [ - { - "source": "__start__", - "target": "agent" - }, - { - "source": "tools", - "target": "agent" - }, - { - "source": "agent", - "target": "tools", - "data": "continue", - "conditional": true - }, - { - "source": "agent", - "target": "__end__", - "data": "exit", - "conditional": true - } - ] - } - ''' +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[postgres].1 + dict({ + 'definitions': dict({ + 'InnerObject': dict({ + 'properties': dict({ + 'yo': dict({ + 'title': 'Yo', + 'type': 'integer', + }), + }), + 'required': list([ + 'yo', + ]), + 'title': 'InnerObject', + 'type': 'object', + }), + }), + 'properties': dict({ + 'inner': dict({ + '$ref': '#/definitions/InnerObject', + }), + 'query': dict({ + 'title': 'Query', + 'type': 'string', + }), + }), + 'required': list([ + 'query', + 'inner', + ]), + 'title': 'Input', + 'type': 'object', + }) +# --- +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[postgres].2 + dict({ + 'properties': dict({ + 'answer': dict({ + 'title': 'Answer', + 'type': 'string', + }), + 'docs': dict({ + 'items': dict({ + 'type': 'string', + }), + 'title': 'Docs', + 'type': 'array', + }), + }), + 'required': list([ + 'answer', + 'docs', + ]), + 'title': 'Output', + 'type': 'object', + }) # --- -# name: test_conditional_graph[postgres].1 +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[postgres_pipe] ''' graph TD; - __start__ --> agent; - tools --> agent; - agent -. continue .-> tools; - agent -. exit .-> __end__; + __start__ --> rewrite_query; + analyzer_one --> retriever_one; + qa --> __end__; + retriever_one --> qa; + retriever_two --> qa; + rewrite_query --> analyzer_one; + rewrite_query -.-> retriever_two; ''' # --- -# name: test_conditional_graph[postgres].2 - ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% - graph TD; - __start__([__start__
]):::first - agent(agent) - tools(tools__end__
]):::last - __start__ --> agent; - tools --> agent; - agent -. continue .-> tools; - agent -. exit .-> __end__; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[postgres_pipe].1 + dict({ + 'definitions': dict({ + 'InnerObject': dict({ + 'properties': dict({ + 'yo': dict({ + 'title': 'Yo', + 'type': 'integer', + }), + }), + 'required': list([ + 'yo', + ]), + 'title': 'InnerObject', + 'type': 'object', + }), + }), + 'properties': dict({ + 'inner': dict({ + '$ref': '#/definitions/InnerObject', + }), + 'query': dict({ + 'title': 'Query', + 'type': 'string', + }), + }), + 'required': list([ + 'query', + 'inner', + ]), + 'title': 'Input', + 'type': 'object', + }) # --- -# name: test_conditional_graph[postgres].3 - ''' - { - "nodes": [ - { - "id": "__start__", - "type": "schema", - "data": "__start__" - }, - { - "id": "agent", - "type": "runnable", - "data": { - "id": [ - "langchain", - "schema", - "runnable", - "RunnableAssign" - ], - "name": "agent" - } - }, - { - "id": "tools", - "type": "runnable", - "data": { - "id": [ - "langgraph", - "utils", - "runnable", - "RunnableCallable" - ], - "name": "tools" - }, - "metadata": { - "parents": {}, - "version": 2, - "variant": "b" - } - }, - { - "id": "__end__", - "type": "schema", - "data": "__end__" - } - ], - "edges": [ - { - "source": "__start__", - "target": "agent" - }, - { - "source": "tools", - "target": "agent" - }, - { - "source": "agent", - "target": "tools", - "data": "continue", - "conditional": true - }, - { - "source": "agent", - "target": "__end__", - "data": "exit", - "conditional": true - } - ] - } - ''' +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[postgres_pipe].2 + dict({ + 'properties': dict({ + 'answer': dict({ + 'title': 'Answer', + 'type': 'string', + }), + 'docs': dict({ + 'items': dict({ + 'type': 'string', + }), + 'title': 'Docs', + 'type': 'array', + }), + }), + 'required': list([ + 'answer', + 'docs', + ]), + 'title': 'Output', + 'type': 'object', + }) # --- -# name: test_conditional_graph[postgres].4 +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[postgres_pool] ''' graph TD; - __start__ --> agent; - tools --> agent; - agent -. continue .-> tools; - agent -. exit .-> __end__; + __start__ --> rewrite_query; + analyzer_one --> retriever_one; + qa --> __end__; + retriever_one --> qa; + retriever_two --> qa; + rewrite_query --> analyzer_one; + rewrite_query -.-> retriever_two; ''' # --- -# name: test_conditional_graph[postgres].5 +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[postgres_pool].1 dict({ - 'edges': list([ - dict({ - 'source': '__start__', - 'target': 'agent', - }), - dict({ - 'source': 'tools', - 'target': 'agent', + 'definitions': dict({ + 'InnerObject': dict({ + 'properties': dict({ + 'yo': dict({ + 'title': 'Yo', + 'type': 'integer', + }), + }), + 'required': list([ + 'yo', + ]), + 'title': 'InnerObject', + 'type': 'object', }), - dict({ - 'conditional': True, - 'data': 'continue', - 'source': 'agent', - 'target': 'tools', + }), + 'properties': dict({ + 'inner': dict({ + '$ref': '#/definitions/InnerObject', }), - dict({ - 'conditional': True, - 'data': 'exit', - 'source': 'agent', - 'target': '__end__', + 'query': dict({ + 'title': 'Query', + 'type': 'string', }), + }), + 'required': list([ + 'query', + 'inner', ]), - 'nodes': list([ - dict({ - 'data': '__start__', - 'id': '__start__', - 'type': 'schema', - }), - dict({ - 'data': dict({ - 'id': list([ - 'langchain', - 'schema', - 'runnable', - 'RunnableAssign', - ]), - 'name': 'agent', - }), - 'id': 'agent', - 'metadata': dict({ - '__interrupt': 'after', - }), - 'type': 'runnable', + 'title': 'Input', + 'type': 'object', + }) +# --- +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[postgres_pool].2 + dict({ + 'properties': dict({ + 'answer': dict({ + 'title': 'Answer', + 'type': 'string', }), - dict({ - 'data': dict({ - 'id': list([ - 'langgraph', - 'utils', - 'runnable', - 'RunnableCallable', - ]), - 'name': 'tools', - }), - 'id': 'tools', - 'metadata': dict({ - 'parents': dict({ - }), - 'variant': 'b', - 'version': 2, + 'docs': dict({ + 'items': dict({ + 'type': 'string', }), - 'type': 'runnable', - }), - dict({ - 'data': '__end__', - 'id': '__end__', - 'type': 'schema', + 'title': 'Docs', + 'type': 'array', }), + }), + 'required': list([ + 'answer', + 'docs', ]), + 'title': 'Output', + 'type': 'object', }) # --- -# name: test_conditional_graph[postgres].6 +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[postgres_shallow] ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% graph TD; - __start__([__start__
]):::first - agent(agent__end__
]):::last - __start__ --> agent; - tools --> agent; - agent -. continue .-> tools; - agent -. exit .-> __end__; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc + __start__ --> rewrite_query; + analyzer_one --> retriever_one; + qa --> __end__; + retriever_one --> qa; + retriever_two --> qa; + rewrite_query --> analyzer_one; + rewrite_query -.-> retriever_two; ''' # --- -# name: test_conditional_graph[postgres_pipe] - ''' - { - "nodes": [ - { - "id": "__start__", - "type": "schema", - "data": "__start__" - }, - { - "id": "agent", - "type": "runnable", - "data": { - "id": [ - "langchain", - "schema", - "runnable", - "RunnableAssign" - ], - "name": "agent" - } - }, - { - "id": "tools", - "type": "runnable", - "data": { - "id": [ - "langgraph", - "utils", - "runnable", - "RunnableCallable" - ], - "name": "tools" - }, - "metadata": { - "parents": {}, - "version": 2, - "variant": "b" - } - }, - { - "id": "__end__", - "type": "schema", - "data": "__end__" - } - ], - "edges": [ - { - "source": "__start__", - "target": "agent" - }, - { - "source": "tools", - "target": "agent" - }, - { - "source": "agent", - "target": "tools", - "data": "continue", - "conditional": true - }, - { - "source": "agent", - "target": "__end__", - "data": "exit", - "conditional": true - } - ] - } - ''' -# --- -# name: test_conditional_graph[postgres_pipe].1 - ''' - graph TD; - __start__ --> agent; - tools --> agent; - agent -. continue .-> tools; - agent -. exit .-> __end__; - - ''' -# --- -# name: test_conditional_graph[postgres_pipe].2 - ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% - graph TD; - __start__([__start__
]):::first - agent(agent) - tools(tools__end__
]):::last - __start__ --> agent; - tools --> agent; - agent -. continue .-> tools; - agent -. exit .-> __end__; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[postgres_shallow].1 + dict({ + 'definitions': dict({ + 'InnerObject': dict({ + 'properties': dict({ + 'yo': dict({ + 'title': 'Yo', + 'type': 'integer', + }), + }), + 'required': list([ + 'yo', + ]), + 'title': 'InnerObject', + 'type': 'object', + }), + }), + 'properties': dict({ + 'inner': dict({ + '$ref': '#/definitions/InnerObject', + }), + 'query': dict({ + 'title': 'Query', + 'type': 'string', + }), + }), + 'required': list([ + 'query', + 'inner', + ]), + 'title': 'Input', + 'type': 'object', + }) # --- -# name: test_conditional_graph[postgres_pipe].3 - ''' - { - "nodes": [ - { - "id": "__start__", - "type": "schema", - "data": "__start__" - }, - { - "id": "agent", - "type": "runnable", - "data": { - "id": [ - "langchain", - "schema", - "runnable", - "RunnableAssign" - ], - "name": "agent" - } - }, - { - "id": "tools", - "type": "runnable", - "data": { - "id": [ - "langgraph", - "utils", - "runnable", - "RunnableCallable" - ], - "name": "tools" - }, - "metadata": { - "parents": {}, - "version": 2, - "variant": "b" - } - }, - { - "id": "__end__", - "type": "schema", - "data": "__end__" - } - ], - "edges": [ - { - "source": "__start__", - "target": "agent" - }, - { - "source": "tools", - "target": "agent" - }, - { - "source": "agent", - "target": "tools", - "data": "continue", - "conditional": true - }, - { - "source": "agent", - "target": "__end__", - "data": "exit", - "conditional": true - } - ] - } - ''' +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[postgres_shallow].2 + dict({ + 'properties': dict({ + 'answer': dict({ + 'title': 'Answer', + 'type': 'string', + }), + 'docs': dict({ + 'items': dict({ + 'type': 'string', + }), + 'title': 'Docs', + 'type': 'array', + }), + }), + 'required': list([ + 'answer', + 'docs', + ]), + 'title': 'Output', + 'type': 'object', + }) # --- -# name: test_conditional_graph[postgres_pipe].4 +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[sqlite] ''' graph TD; - __start__ --> agent; - tools --> agent; - agent -. continue .-> tools; - agent -. exit .-> __end__; + __start__ --> rewrite_query; + analyzer_one --> retriever_one; + qa --> __end__; + retriever_one --> qa; + retriever_two --> qa; + rewrite_query --> analyzer_one; + rewrite_query -.-> retriever_two; ''' # --- -# name: test_conditional_graph[postgres_pipe].5 +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[sqlite].1 dict({ - 'edges': list([ - dict({ - 'source': '__start__', - 'target': 'agent', - }), - dict({ - 'source': 'tools', - 'target': 'agent', + 'definitions': dict({ + 'InnerObject': dict({ + 'properties': dict({ + 'yo': dict({ + 'title': 'Yo', + 'type': 'integer', + }), + }), + 'required': list([ + 'yo', + ]), + 'title': 'InnerObject', + 'type': 'object', }), - dict({ - 'conditional': True, - 'data': 'continue', - 'source': 'agent', - 'target': 'tools', + }), + 'properties': dict({ + 'inner': dict({ + '$ref': '#/definitions/InnerObject', }), - dict({ - 'conditional': True, - 'data': 'exit', - 'source': 'agent', - 'target': '__end__', + 'query': dict({ + 'title': 'Query', + 'type': 'string', }), + }), + 'required': list([ + 'query', + 'inner', ]), - 'nodes': list([ - dict({ - 'data': '__start__', - 'id': '__start__', - 'type': 'schema', - }), - dict({ - 'data': dict({ - 'id': list([ - 'langchain', - 'schema', - 'runnable', - 'RunnableAssign', - ]), - 'name': 'agent', - }), - 'id': 'agent', - 'metadata': dict({ - '__interrupt': 'after', - }), - 'type': 'runnable', + 'title': 'Input', + 'type': 'object', + }) +# --- +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[sqlite].2 + dict({ + 'properties': dict({ + 'answer': dict({ + 'title': 'Answer', + 'type': 'string', }), - dict({ - 'data': dict({ - 'id': list([ - 'langgraph', - 'utils', - 'runnable', - 'RunnableCallable', - ]), - 'name': 'tools', - }), - 'id': 'tools', - 'metadata': dict({ - 'parents': dict({ - }), - 'variant': 'b', - 'version': 2, + 'docs': dict({ + 'items': dict({ + 'type': 'string', }), - 'type': 'runnable', - }), - dict({ - 'data': '__end__', - 'id': '__end__', - 'type': 'schema', + 'title': 'Docs', + 'type': 'array', }), + }), + 'required': list([ + 'answer', + 'docs', ]), + 'title': 'Output', + 'type': 'object', }) # --- -# name: test_conditional_graph[postgres_pipe].6 +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[memory] ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% graph TD; - __start__([__start__
]):::first - agent(agent__end__
]):::last - __start__ --> agent; - tools --> agent; - agent -. continue .-> tools; - agent -. exit .-> __end__; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc + __start__ --> rewrite_query; + analyzer_one --> retriever_one; + qa --> __end__; + retriever_one --> qa; + retriever_two --> qa; + rewrite_query --> analyzer_one; + rewrite_query -.-> retriever_two; ''' # --- -# name: test_conditional_graph[postgres_pool] - ''' - { - "nodes": [ - { - "id": "__start__", - "type": "schema", - "data": "__start__" - }, - { - "id": "agent", - "type": "runnable", - "data": { - "id": [ - "langchain", - "schema", - "runnable", - "RunnableAssign" - ], - "name": "agent" - } - }, - { - "id": "tools", - "type": "runnable", - "data": { - "id": [ - "langgraph", - "utils", - "runnable", - "RunnableCallable" - ], - "name": "tools" - }, - "metadata": { - "parents": {}, - "version": 2, - "variant": "b" - } - }, - { - "id": "__end__", - "type": "schema", - "data": "__end__" - } - ], - "edges": [ - { - "source": "__start__", - "target": "agent" - }, - { - "source": "tools", - "target": "agent" - }, - { - "source": "agent", - "target": "tools", - "data": "continue", - "conditional": true - }, - { - "source": "agent", - "target": "__end__", - "data": "exit", - "conditional": true - } - ] - } - ''' -# --- -# name: test_conditional_graph[postgres_pool].1 - ''' - graph TD; - __start__ --> agent; - tools --> agent; - agent -. continue .-> tools; - agent -. exit .-> __end__; - - ''' -# --- -# name: test_conditional_graph[postgres_pool].2 - ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% - graph TD; - __start__([__start__
]):::first - agent(agent) - tools(tools__end__
]):::last - __start__ --> agent; - tools --> agent; - agent -. continue .-> tools; - agent -. exit .-> __end__; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[memory].1 + dict({ + '$defs': dict({ + 'InnerObject': dict({ + 'properties': dict({ + 'yo': dict({ + 'title': 'Yo', + 'type': 'integer', + }), + }), + 'required': list([ + 'yo', + ]), + 'title': 'InnerObject', + 'type': 'object', + }), + }), + 'properties': dict({ + 'inner': dict({ + '$ref': '#/$defs/InnerObject', + }), + 'query': dict({ + 'title': 'Query', + 'type': 'string', + }), + }), + 'required': list([ + 'query', + 'inner', + ]), + 'title': 'Input', + 'type': 'object', + }) # --- -# name: test_conditional_graph[postgres_pool].3 - ''' - { - "nodes": [ - { - "id": "__start__", - "type": "schema", - "data": "__start__" - }, - { - "id": "agent", - "type": "runnable", - "data": { - "id": [ - "langchain", - "schema", - "runnable", - "RunnableAssign" - ], - "name": "agent" - } - }, - { - "id": "tools", - "type": "runnable", - "data": { - "id": [ - "langgraph", - "utils", - "runnable", - "RunnableCallable" - ], - "name": "tools" - }, - "metadata": { - "parents": {}, - "version": 2, - "variant": "b" - } - }, - { - "id": "__end__", - "type": "schema", - "data": "__end__" - } - ], - "edges": [ - { - "source": "__start__", - "target": "agent" - }, - { - "source": "tools", - "target": "agent" - }, - { - "source": "agent", - "target": "tools", - "data": "continue", - "conditional": true - }, - { - "source": "agent", - "target": "__end__", - "data": "exit", - "conditional": true - } - ] - } - ''' +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[memory].2 + dict({ + 'properties': dict({ + 'answer': dict({ + 'title': 'Answer', + 'type': 'string', + }), + 'docs': dict({ + 'items': dict({ + 'type': 'string', + }), + 'title': 'Docs', + 'type': 'array', + }), + }), + 'required': list([ + 'answer', + 'docs', + ]), + 'title': 'Output', + 'type': 'object', + }) # --- -# name: test_conditional_graph[postgres_pool].4 +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[postgres] ''' graph TD; - __start__ --> agent; - tools --> agent; - agent -. continue .-> tools; - agent -. exit .-> __end__; + __start__ --> rewrite_query; + analyzer_one --> retriever_one; + qa --> __end__; + retriever_one --> qa; + retriever_two --> qa; + rewrite_query --> analyzer_one; + rewrite_query -.-> retriever_two; ''' # --- -# name: test_conditional_graph[postgres_pool].5 +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[postgres].1 dict({ - 'edges': list([ - dict({ - 'source': '__start__', - 'target': 'agent', - }), - dict({ - 'source': 'tools', - 'target': 'agent', + '$defs': dict({ + 'InnerObject': dict({ + 'properties': dict({ + 'yo': dict({ + 'title': 'Yo', + 'type': 'integer', + }), + }), + 'required': list([ + 'yo', + ]), + 'title': 'InnerObject', + 'type': 'object', }), - dict({ - 'conditional': True, - 'data': 'continue', - 'source': 'agent', - 'target': 'tools', + }), + 'properties': dict({ + 'inner': dict({ + '$ref': '#/$defs/InnerObject', }), - dict({ - 'conditional': True, - 'data': 'exit', - 'source': 'agent', - 'target': '__end__', + 'query': dict({ + 'title': 'Query', + 'type': 'string', }), + }), + 'required': list([ + 'query', + 'inner', ]), - 'nodes': list([ - dict({ - 'data': '__start__', - 'id': '__start__', - 'type': 'schema', - }), - dict({ - 'data': dict({ - 'id': list([ - 'langchain', - 'schema', - 'runnable', - 'RunnableAssign', - ]), - 'name': 'agent', - }), - 'id': 'agent', - 'metadata': dict({ - '__interrupt': 'after', - }), - 'type': 'runnable', + 'title': 'Input', + 'type': 'object', + }) +# --- +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[postgres].2 + dict({ + 'properties': dict({ + 'answer': dict({ + 'title': 'Answer', + 'type': 'string', }), - dict({ - 'data': dict({ - 'id': list([ - 'langgraph', - 'utils', - 'runnable', - 'RunnableCallable', - ]), - 'name': 'tools', - }), - 'id': 'tools', - 'metadata': dict({ - 'parents': dict({ - }), - 'variant': 'b', - 'version': 2, + 'docs': dict({ + 'items': dict({ + 'type': 'string', }), - 'type': 'runnable', - }), - dict({ - 'data': '__end__', - 'id': '__end__', - 'type': 'schema', + 'title': 'Docs', + 'type': 'array', }), + }), + 'required': list([ + 'answer', + 'docs', ]), + 'title': 'Output', + 'type': 'object', }) # --- -# name: test_conditional_graph[postgres_pool].6 +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[postgres_pipe] ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% graph TD; - __start__([__start__
]):::first - agent(agent__end__
]):::last - __start__ --> agent; - tools --> agent; - agent -. continue .-> tools; - agent -. exit .-> __end__; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' -# --- -# name: test_conditional_graph[sqlite] - ''' - { - "nodes": [ - { - "id": "__start__", - "type": "schema", - "data": "__start__" - }, - { - "id": "agent", - "type": "runnable", - "data": { - "id": [ - "langchain", - "schema", - "runnable", - "RunnableAssign" - ], - "name": "agent" - } - }, - { - "id": "tools", - "type": "runnable", - "data": { - "id": [ - "langgraph", - "utils", - "runnable", - "RunnableCallable" - ], - "name": "tools" - }, - "metadata": { - "parents": {}, - "version": 2, - "variant": "b" - } - }, - { - "id": "__end__", - "type": "schema", - "data": "__end__" - } - ], - "edges": [ - { - "source": "__start__", - "target": "agent" - }, - { - "source": "tools", - "target": "agent" - }, - { - "source": "agent", - "target": "tools", - "data": "continue", - "conditional": true - }, - { - "source": "agent", - "target": "__end__", - "data": "exit", - "conditional": true - } - ] - } - ''' -# --- -# name: test_conditional_graph[sqlite].1 - ''' - graph TD; - __start__ --> agent; - tools --> agent; - agent -. continue .-> tools; - agent -. exit .-> __end__; - - ''' -# --- -# name: test_conditional_graph[sqlite].2 - ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% - graph TD; - __start__([__start__
]):::first - agent(agent) - tools(tools__end__
]):::last - __start__ --> agent; - tools --> agent; - agent -. continue .-> tools; - agent -. exit .-> __end__; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' -# --- -# name: test_conditional_graph[sqlite].3 - ''' - { - "nodes": [ - { - "id": "__start__", - "type": "schema", - "data": "__start__" - }, - { - "id": "agent", - "type": "runnable", - "data": { - "id": [ - "langchain", - "schema", - "runnable", - "RunnableAssign" - ], - "name": "agent" - } - }, - { - "id": "tools", - "type": "runnable", - "data": { - "id": [ - "langgraph", - "utils", - "runnable", - "RunnableCallable" - ], - "name": "tools" - }, - "metadata": { - "parents": {}, - "version": 2, - "variant": "b" - } - }, - { - "id": "__end__", - "type": "schema", - "data": "__end__" - } - ], - "edges": [ - { - "source": "__start__", - "target": "agent" - }, - { - "source": "tools", - "target": "agent" - }, - { - "source": "agent", - "target": "tools", - "data": "continue", - "conditional": true - }, - { - "source": "agent", - "target": "__end__", - "data": "exit", - "conditional": true - } - ] - } - ''' -# --- -# name: test_conditional_graph[sqlite].4 - ''' - graph TD; - __start__ --> agent; - tools --> agent; - agent -. continue .-> tools; - agent -. exit .-> __end__; + __start__ --> rewrite_query; + analyzer_one --> retriever_one; + qa --> __end__; + retriever_one --> qa; + retriever_two --> qa; + rewrite_query --> analyzer_one; + rewrite_query -.-> retriever_two; ''' # --- -# name: test_conditional_graph[sqlite].5 +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[postgres_pipe].1 dict({ - 'edges': list([ - dict({ - 'source': '__start__', - 'target': 'agent', - }), - dict({ - 'source': 'tools', - 'target': 'agent', + '$defs': dict({ + 'InnerObject': dict({ + 'properties': dict({ + 'yo': dict({ + 'title': 'Yo', + 'type': 'integer', + }), + }), + 'required': list([ + 'yo', + ]), + 'title': 'InnerObject', + 'type': 'object', }), - dict({ - 'conditional': True, - 'data': 'continue', - 'source': 'agent', - 'target': 'tools', + }), + 'properties': dict({ + 'inner': dict({ + '$ref': '#/$defs/InnerObject', }), - dict({ - 'conditional': True, - 'data': 'exit', - 'source': 'agent', - 'target': '__end__', + 'query': dict({ + 'title': 'Query', + 'type': 'string', }), + }), + 'required': list([ + 'query', + 'inner', ]), - 'nodes': list([ - dict({ - 'data': '__start__', - 'id': '__start__', - 'type': 'schema', - }), - dict({ - 'data': dict({ - 'id': list([ - 'langchain', - 'schema', - 'runnable', - 'RunnableAssign', - ]), - 'name': 'agent', - }), - 'id': 'agent', - 'metadata': dict({ - '__interrupt': 'after', - }), - 'type': 'runnable', + 'title': 'Input', + 'type': 'object', + }) +# --- +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[postgres_pipe].2 + dict({ + 'properties': dict({ + 'answer': dict({ + 'title': 'Answer', + 'type': 'string', }), - dict({ - 'data': dict({ - 'id': list([ - 'langgraph', - 'utils', - 'runnable', - 'RunnableCallable', - ]), - 'name': 'tools', - }), - 'id': 'tools', - 'metadata': dict({ - 'parents': dict({ - }), - 'variant': 'b', - 'version': 2, + 'docs': dict({ + 'items': dict({ + 'type': 'string', }), - 'type': 'runnable', - }), - dict({ - 'data': '__end__', - 'id': '__end__', - 'type': 'schema', + 'title': 'Docs', + 'type': 'array', }), + }), + 'required': list([ + 'answer', + 'docs', ]), + 'title': 'Output', + 'type': 'object', }) # --- -# name: test_conditional_graph[sqlite].6 +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[postgres_pool] ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% graph TD; - __start__([__start__
]):::first - agent(agent__end__
]):::last - __start__ --> agent; - tools --> agent; - agent -. continue .-> tools; - agent -. exit .-> __end__; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc + __start__ --> rewrite_query; + analyzer_one --> retriever_one; + qa --> __end__; + retriever_one --> qa; + retriever_two --> qa; + rewrite_query --> analyzer_one; + rewrite_query -.-> retriever_two; ''' # --- -# name: test_conditional_state_graph.1 - '{"title": "LangGraphOutput", "type": "object", "properties": {"input": {"title": "Input", "type": "string"}, "agent_outcome": {"title": "Agent Outcome", "anyOf": [{"$ref": "#/definitions/AgentAction"}, {"$ref": "#/definitions/AgentFinish"}]}, "intermediate_steps": {"title": "Intermediate Steps", "type": "array", "items": {"type": "array", "minItems": 2, "maxItems": 2, "items": [{"$ref": "#/definitions/AgentAction"}, {"type": "string"}]}}}, "definitions": {"AgentAction": {"title": "AgentAction", "description": "Represents a request to execute an action by an agent.\\n\\nThe action consists of the name of the tool to execute and the input to pass\\nto the tool. The log is used to pass along extra information about the action.", "type": "object", "properties": {"tool": {"title": "Tool", "type": "string"}, "tool_input": {"title": "Tool Input", "anyOf": [{"type": "string"}, {"type": "object"}]}, "log": {"title": "Log", "type": "string"}, "type": {"title": "Type", "default": "AgentAction", "enum": ["AgentAction"], "type": "string"}}, "required": ["tool", "tool_input", "log"]}, "AgentFinish": {"title": "AgentFinish", "description": "Final return value of an ActionAgent.\\n\\nAgents return an AgentFinish when they have reached a stopping condition.", "type": "object", "properties": {"return_values": {"title": "Return Values", "type": "object"}, "log": {"title": "Log", "type": "string"}, "type": {"title": "Type", "default": "AgentFinish", "enum": ["AgentFinish"], "type": "string"}}, "required": ["return_values", "log"]}}}' +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[postgres_pool].1 + dict({ + '$defs': dict({ + 'InnerObject': dict({ + 'properties': dict({ + 'yo': dict({ + 'title': 'Yo', + 'type': 'integer', + }), + }), + 'required': list([ + 'yo', + ]), + 'title': 'InnerObject', + 'type': 'object', + }), + }), + 'properties': dict({ + 'inner': dict({ + '$ref': '#/$defs/InnerObject', + }), + 'query': dict({ + 'title': 'Query', + 'type': 'string', + }), + }), + 'required': list([ + 'query', + 'inner', + ]), + 'title': 'Input', + 'type': 'object', + }) # --- -# name: test_conditional_state_graph.2 - ''' - { - "nodes": [ - { - "id": "__start__", - "type": "schema", - "data": "__start__" - }, - { - "id": "agent", - "type": "runnable", - "data": { - "id": [ - "langchain", - "schema", - "runnable", - "RunnableSequence" - ], - "name": "agent" - } - }, - { - "id": "tools", - "type": "runnable", - "data": { - "id": [ - "langgraph", - "utils", - "RunnableCallable" - ], - "name": "tools" - } - }, - { - "id": "__end__", - "type": "schema", - "data": "__end__" - } - ], - "edges": [ - { - "source": "__start__", - "target": "agent" - }, - { - "source": "tools", - "target": "agent" - }, - { - "source": "agent", - "target": "tools", - "data": "continue", - "conditional": true - }, - { - "source": "agent", - "target": "__end__", - "data": "exit", - "conditional": true - } - ] - } - ''' -# --- -# name: test_conditional_state_graph.3 - ''' - graph TD; - __start__ --> agent; - tools --> agent; - agent -.  continue  .-> tools; - agent -.  exit  .-> __end__; - - ''' -# --- -# name: test_conditional_state_graph[memory] - '{"$defs": {"AgentAction": {"description": "Represents a request to execute an action by an agent.\\n\\nThe action consists of the name of the tool to execute and the input to pass\\nto the tool. The log is used to pass along extra information about the action.", "properties": {"tool": {"title": "Tool", "type": "string"}, "tool_input": {"anyOf": [{"type": "string"}, {"type": "object"}], "title": "Tool Input"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentAction", "default": "AgentAction", "enum": ["AgentAction"], "title": "Type", "type": "string"}}, "required": ["tool", "tool_input", "log"], "title": "AgentAction", "type": "object"}, "AgentFinish": {"description": "Final return value of an ActionAgent.\\n\\nAgents return an AgentFinish when they have reached a stopping condition.", "properties": {"return_values": {"title": "Return Values", "type": "object"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentFinish", "default": "AgentFinish", "enum": ["AgentFinish"], "title": "Type", "type": "string"}}, "required": ["return_values", "log"], "title": "AgentFinish", "type": "object"}}, "properties": {"input": {"default": null, "title": "Input", "type": "string"}, "agent_outcome": {"anyOf": [{"$ref": "#/$defs/AgentAction"}, {"$ref": "#/$defs/AgentFinish"}, {"type": "null"}], "default": null, "title": "Agent Outcome"}, "intermediate_steps": {"default": null, "items": {"maxItems": 2, "minItems": 2, "prefixItems": [{"$ref": "#/$defs/AgentAction"}, {"type": "string"}], "type": "array"}, "title": "Intermediate Steps", "type": "array"}}, "title": "LangGraphInput", "type": "object"}' -# --- -# name: test_conditional_state_graph[memory].1 - '{"$defs": {"AgentAction": {"description": "Represents a request to execute an action by an agent.\\n\\nThe action consists of the name of the tool to execute and the input to pass\\nto the tool. The log is used to pass along extra information about the action.", "properties": {"tool": {"title": "Tool", "type": "string"}, "tool_input": {"anyOf": [{"type": "string"}, {"type": "object"}], "title": "Tool Input"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentAction", "default": "AgentAction", "enum": ["AgentAction"], "title": "Type", "type": "string"}}, "required": ["tool", "tool_input", "log"], "title": "AgentAction", "type": "object"}, "AgentFinish": {"description": "Final return value of an ActionAgent.\\n\\nAgents return an AgentFinish when they have reached a stopping condition.", "properties": {"return_values": {"title": "Return Values", "type": "object"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentFinish", "default": "AgentFinish", "enum": ["AgentFinish"], "title": "Type", "type": "string"}}, "required": ["return_values", "log"], "title": "AgentFinish", "type": "object"}}, "properties": {"input": {"default": null, "title": "Input", "type": "string"}, "agent_outcome": {"anyOf": [{"$ref": "#/$defs/AgentAction"}, {"$ref": "#/$defs/AgentFinish"}, {"type": "null"}], "default": null, "title": "Agent Outcome"}, "intermediate_steps": {"default": null, "items": {"maxItems": 2, "minItems": 2, "prefixItems": [{"$ref": "#/$defs/AgentAction"}, {"type": "string"}], "type": "array"}, "title": "Intermediate Steps", "type": "array"}}, "title": "LangGraphOutput", "type": "object"}' -# --- -# name: test_conditional_state_graph[memory].2 - ''' - { - "nodes": [ - { - "id": "__start__", - "type": "schema", - "data": "__start__" - }, - { - "id": "agent", - "type": "runnable", - "data": { - "id": [ - "langchain", - "schema", - "runnable", - "RunnableSequence" - ], - "name": "agent" - } - }, - { - "id": "tools", - "type": "runnable", - "data": { - "id": [ - "langgraph", - "utils", - "runnable", - "RunnableCallable" - ], - "name": "tools" - } - }, - { - "id": "__end__", - "type": "schema", - "data": "__end__" - } - ], - "edges": [ - { - "source": "__start__", - "target": "agent" - }, - { - "source": "tools", - "target": "agent" - }, - { - "source": "agent", - "target": "tools", - "data": "continue", - "conditional": true - }, - { - "source": "agent", - "target": "__end__", - "data": "exit", - "conditional": true - } - ] - } - ''' -# --- -# name: test_conditional_state_graph[memory].3 - ''' - graph TD; - __start__ --> agent; - tools --> agent; - agent -. continue .-> tools; - agent -. exit .-> __end__; - - ''' -# --- -# name: test_conditional_state_graph[postgres] - '{"$defs": {"AgentAction": {"description": "Represents a request to execute an action by an agent.\\n\\nThe action consists of the name of the tool to execute and the input to pass\\nto the tool. The log is used to pass along extra information about the action.", "properties": {"tool": {"title": "Tool", "type": "string"}, "tool_input": {"anyOf": [{"type": "string"}, {"type": "object"}], "title": "Tool Input"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentAction", "default": "AgentAction", "enum": ["AgentAction"], "title": "Type", "type": "string"}}, "required": ["tool", "tool_input", "log"], "title": "AgentAction", "type": "object"}, "AgentFinish": {"description": "Final return value of an ActionAgent.\\n\\nAgents return an AgentFinish when they have reached a stopping condition.", "properties": {"return_values": {"title": "Return Values", "type": "object"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentFinish", "default": "AgentFinish", "enum": ["AgentFinish"], "title": "Type", "type": "string"}}, "required": ["return_values", "log"], "title": "AgentFinish", "type": "object"}}, "properties": {"input": {"default": null, "title": "Input", "type": "string"}, "agent_outcome": {"anyOf": [{"$ref": "#/$defs/AgentAction"}, {"$ref": "#/$defs/AgentFinish"}, {"type": "null"}], "default": null, "title": "Agent Outcome"}, "intermediate_steps": {"default": null, "items": {"maxItems": 2, "minItems": 2, "prefixItems": [{"$ref": "#/$defs/AgentAction"}, {"type": "string"}], "type": "array"}, "title": "Intermediate Steps", "type": "array"}}, "title": "LangGraphInput", "type": "object"}' -# --- -# name: test_conditional_state_graph[postgres].1 - '{"$defs": {"AgentAction": {"description": "Represents a request to execute an action by an agent.\\n\\nThe action consists of the name of the tool to execute and the input to pass\\nto the tool. The log is used to pass along extra information about the action.", "properties": {"tool": {"title": "Tool", "type": "string"}, "tool_input": {"anyOf": [{"type": "string"}, {"type": "object"}], "title": "Tool Input"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentAction", "default": "AgentAction", "enum": ["AgentAction"], "title": "Type", "type": "string"}}, "required": ["tool", "tool_input", "log"], "title": "AgentAction", "type": "object"}, "AgentFinish": {"description": "Final return value of an ActionAgent.\\n\\nAgents return an AgentFinish when they have reached a stopping condition.", "properties": {"return_values": {"title": "Return Values", "type": "object"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentFinish", "default": "AgentFinish", "enum": ["AgentFinish"], "title": "Type", "type": "string"}}, "required": ["return_values", "log"], "title": "AgentFinish", "type": "object"}}, "properties": {"input": {"default": null, "title": "Input", "type": "string"}, "agent_outcome": {"anyOf": [{"$ref": "#/$defs/AgentAction"}, {"$ref": "#/$defs/AgentFinish"}, {"type": "null"}], "default": null, "title": "Agent Outcome"}, "intermediate_steps": {"default": null, "items": {"maxItems": 2, "minItems": 2, "prefixItems": [{"$ref": "#/$defs/AgentAction"}, {"type": "string"}], "type": "array"}, "title": "Intermediate Steps", "type": "array"}}, "title": "LangGraphOutput", "type": "object"}' -# --- -# name: test_conditional_state_graph[postgres].2 - ''' - { - "nodes": [ - { - "id": "__start__", - "type": "schema", - "data": "__start__" - }, - { - "id": "agent", - "type": "runnable", - "data": { - "id": [ - "langchain", - "schema", - "runnable", - "RunnableSequence" - ], - "name": "agent" - } - }, - { - "id": "tools", - "type": "runnable", - "data": { - "id": [ - "langgraph", - "utils", - "runnable", - "RunnableCallable" - ], - "name": "tools" - } - }, - { - "id": "__end__", - "type": "schema", - "data": "__end__" - } - ], - "edges": [ - { - "source": "__start__", - "target": "agent" - }, - { - "source": "tools", - "target": "agent" - }, - { - "source": "agent", - "target": "tools", - "data": "continue", - "conditional": true - }, - { - "source": "agent", - "target": "__end__", - "data": "exit", - "conditional": true - } - ] - } - ''' -# --- -# name: test_conditional_state_graph[postgres].3 - ''' - graph TD; - __start__ --> agent; - tools --> agent; - agent -. continue .-> tools; - agent -. exit .-> __end__; - - ''' -# --- -# name: test_conditional_state_graph[postgres_pipe] - '{"$defs": {"AgentAction": {"description": "Represents a request to execute an action by an agent.\\n\\nThe action consists of the name of the tool to execute and the input to pass\\nto the tool. The log is used to pass along extra information about the action.", "properties": {"tool": {"title": "Tool", "type": "string"}, "tool_input": {"anyOf": [{"type": "string"}, {"type": "object"}], "title": "Tool Input"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentAction", "default": "AgentAction", "enum": ["AgentAction"], "title": "Type", "type": "string"}}, "required": ["tool", "tool_input", "log"], "title": "AgentAction", "type": "object"}, "AgentFinish": {"description": "Final return value of an ActionAgent.\\n\\nAgents return an AgentFinish when they have reached a stopping condition.", "properties": {"return_values": {"title": "Return Values", "type": "object"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentFinish", "default": "AgentFinish", "enum": ["AgentFinish"], "title": "Type", "type": "string"}}, "required": ["return_values", "log"], "title": "AgentFinish", "type": "object"}}, "properties": {"input": {"default": null, "title": "Input", "type": "string"}, "agent_outcome": {"anyOf": [{"$ref": "#/$defs/AgentAction"}, {"$ref": "#/$defs/AgentFinish"}, {"type": "null"}], "default": null, "title": "Agent Outcome"}, "intermediate_steps": {"default": null, "items": {"maxItems": 2, "minItems": 2, "prefixItems": [{"$ref": "#/$defs/AgentAction"}, {"type": "string"}], "type": "array"}, "title": "Intermediate Steps", "type": "array"}}, "title": "LangGraphInput", "type": "object"}' -# --- -# name: test_conditional_state_graph[postgres_pipe].1 - '{"$defs": {"AgentAction": {"description": "Represents a request to execute an action by an agent.\\n\\nThe action consists of the name of the tool to execute and the input to pass\\nto the tool. The log is used to pass along extra information about the action.", "properties": {"tool": {"title": "Tool", "type": "string"}, "tool_input": {"anyOf": [{"type": "string"}, {"type": "object"}], "title": "Tool Input"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentAction", "default": "AgentAction", "enum": ["AgentAction"], "title": "Type", "type": "string"}}, "required": ["tool", "tool_input", "log"], "title": "AgentAction", "type": "object"}, "AgentFinish": {"description": "Final return value of an ActionAgent.\\n\\nAgents return an AgentFinish when they have reached a stopping condition.", "properties": {"return_values": {"title": "Return Values", "type": "object"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentFinish", "default": "AgentFinish", "enum": ["AgentFinish"], "title": "Type", "type": "string"}}, "required": ["return_values", "log"], "title": "AgentFinish", "type": "object"}}, "properties": {"input": {"default": null, "title": "Input", "type": "string"}, "agent_outcome": {"anyOf": [{"$ref": "#/$defs/AgentAction"}, {"$ref": "#/$defs/AgentFinish"}, {"type": "null"}], "default": null, "title": "Agent Outcome"}, "intermediate_steps": {"default": null, "items": {"maxItems": 2, "minItems": 2, "prefixItems": [{"$ref": "#/$defs/AgentAction"}, {"type": "string"}], "type": "array"}, "title": "Intermediate Steps", "type": "array"}}, "title": "LangGraphOutput", "type": "object"}' -# --- -# name: test_conditional_state_graph[postgres_pipe].2 - ''' - { - "nodes": [ - { - "id": "__start__", - "type": "schema", - "data": "__start__" - }, - { - "id": "agent", - "type": "runnable", - "data": { - "id": [ - "langchain", - "schema", - "runnable", - "RunnableSequence" - ], - "name": "agent" - } - }, - { - "id": "tools", - "type": "runnable", - "data": { - "id": [ - "langgraph", - "utils", - "runnable", - "RunnableCallable" - ], - "name": "tools" - } - }, - { - "id": "__end__", - "type": "schema", - "data": "__end__" - } - ], - "edges": [ - { - "source": "__start__", - "target": "agent" - }, - { - "source": "tools", - "target": "agent" - }, - { - "source": "agent", - "target": "tools", - "data": "continue", - "conditional": true - }, - { - "source": "agent", - "target": "__end__", - "data": "exit", - "conditional": true - } - ] - } - ''' -# --- -# name: test_conditional_state_graph[postgres_pipe].3 - ''' - graph TD; - __start__ --> agent; - tools --> agent; - agent -. continue .-> tools; - agent -. exit .-> __end__; - - ''' -# --- -# name: test_conditional_state_graph[postgres_pool] - '{"$defs": {"AgentAction": {"description": "Represents a request to execute an action by an agent.\\n\\nThe action consists of the name of the tool to execute and the input to pass\\nto the tool. The log is used to pass along extra information about the action.", "properties": {"tool": {"title": "Tool", "type": "string"}, "tool_input": {"anyOf": [{"type": "string"}, {"type": "object"}], "title": "Tool Input"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentAction", "default": "AgentAction", "enum": ["AgentAction"], "title": "Type", "type": "string"}}, "required": ["tool", "tool_input", "log"], "title": "AgentAction", "type": "object"}, "AgentFinish": {"description": "Final return value of an ActionAgent.\\n\\nAgents return an AgentFinish when they have reached a stopping condition.", "properties": {"return_values": {"title": "Return Values", "type": "object"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentFinish", "default": "AgentFinish", "enum": ["AgentFinish"], "title": "Type", "type": "string"}}, "required": ["return_values", "log"], "title": "AgentFinish", "type": "object"}}, "properties": {"input": {"default": null, "title": "Input", "type": "string"}, "agent_outcome": {"anyOf": [{"$ref": "#/$defs/AgentAction"}, {"$ref": "#/$defs/AgentFinish"}, {"type": "null"}], "default": null, "title": "Agent Outcome"}, "intermediate_steps": {"default": null, "items": {"maxItems": 2, "minItems": 2, "prefixItems": [{"$ref": "#/$defs/AgentAction"}, {"type": "string"}], "type": "array"}, "title": "Intermediate Steps", "type": "array"}}, "title": "LangGraphInput", "type": "object"}' -# --- -# name: test_conditional_state_graph[postgres_pool].1 - '{"$defs": {"AgentAction": {"description": "Represents a request to execute an action by an agent.\\n\\nThe action consists of the name of the tool to execute and the input to pass\\nto the tool. The log is used to pass along extra information about the action.", "properties": {"tool": {"title": "Tool", "type": "string"}, "tool_input": {"anyOf": [{"type": "string"}, {"type": "object"}], "title": "Tool Input"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentAction", "default": "AgentAction", "enum": ["AgentAction"], "title": "Type", "type": "string"}}, "required": ["tool", "tool_input", "log"], "title": "AgentAction", "type": "object"}, "AgentFinish": {"description": "Final return value of an ActionAgent.\\n\\nAgents return an AgentFinish when they have reached a stopping condition.", "properties": {"return_values": {"title": "Return Values", "type": "object"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentFinish", "default": "AgentFinish", "enum": ["AgentFinish"], "title": "Type", "type": "string"}}, "required": ["return_values", "log"], "title": "AgentFinish", "type": "object"}}, "properties": {"input": {"default": null, "title": "Input", "type": "string"}, "agent_outcome": {"anyOf": [{"$ref": "#/$defs/AgentAction"}, {"$ref": "#/$defs/AgentFinish"}, {"type": "null"}], "default": null, "title": "Agent Outcome"}, "intermediate_steps": {"default": null, "items": {"maxItems": 2, "minItems": 2, "prefixItems": [{"$ref": "#/$defs/AgentAction"}, {"type": "string"}], "type": "array"}, "title": "Intermediate Steps", "type": "array"}}, "title": "LangGraphOutput", "type": "object"}' -# --- -# name: test_conditional_state_graph[postgres_pool].2 - ''' - { - "nodes": [ - { - "id": "__start__", - "type": "schema", - "data": "__start__" - }, - { - "id": "agent", - "type": "runnable", - "data": { - "id": [ - "langchain", - "schema", - "runnable", - "RunnableSequence" - ], - "name": "agent" - } - }, - { - "id": "tools", - "type": "runnable", - "data": { - "id": [ - "langgraph", - "utils", - "runnable", - "RunnableCallable" - ], - "name": "tools" - } - }, - { - "id": "__end__", - "type": "schema", - "data": "__end__" - } - ], - "edges": [ - { - "source": "__start__", - "target": "agent" - }, - { - "source": "tools", - "target": "agent" - }, - { - "source": "agent", - "target": "tools", - "data": "continue", - "conditional": true - }, - { - "source": "agent", - "target": "__end__", - "data": "exit", - "conditional": true - } - ] - } - ''' -# --- -# name: test_conditional_state_graph[postgres_pool].3 - ''' - graph TD; - __start__ --> agent; - tools --> agent; - agent -. continue .-> tools; - agent -. exit .-> __end__; - - ''' -# --- -# name: test_conditional_state_graph[sqlite] - '{"$defs": {"AgentAction": {"description": "Represents a request to execute an action by an agent.\\n\\nThe action consists of the name of the tool to execute and the input to pass\\nto the tool. The log is used to pass along extra information about the action.", "properties": {"tool": {"title": "Tool", "type": "string"}, "tool_input": {"anyOf": [{"type": "string"}, {"type": "object"}], "title": "Tool Input"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentAction", "default": "AgentAction", "enum": ["AgentAction"], "title": "Type", "type": "string"}}, "required": ["tool", "tool_input", "log"], "title": "AgentAction", "type": "object"}, "AgentFinish": {"description": "Final return value of an ActionAgent.\\n\\nAgents return an AgentFinish when they have reached a stopping condition.", "properties": {"return_values": {"title": "Return Values", "type": "object"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentFinish", "default": "AgentFinish", "enum": ["AgentFinish"], "title": "Type", "type": "string"}}, "required": ["return_values", "log"], "title": "AgentFinish", "type": "object"}}, "properties": {"input": {"default": null, "title": "Input", "type": "string"}, "agent_outcome": {"anyOf": [{"$ref": "#/$defs/AgentAction"}, {"$ref": "#/$defs/AgentFinish"}, {"type": "null"}], "default": null, "title": "Agent Outcome"}, "intermediate_steps": {"default": null, "items": {"maxItems": 2, "minItems": 2, "prefixItems": [{"$ref": "#/$defs/AgentAction"}, {"type": "string"}], "type": "array"}, "title": "Intermediate Steps", "type": "array"}}, "title": "LangGraphInput", "type": "object"}' -# --- -# name: test_conditional_state_graph[sqlite].1 - '{"$defs": {"AgentAction": {"description": "Represents a request to execute an action by an agent.\\n\\nThe action consists of the name of the tool to execute and the input to pass\\nto the tool. The log is used to pass along extra information about the action.", "properties": {"tool": {"title": "Tool", "type": "string"}, "tool_input": {"anyOf": [{"type": "string"}, {"type": "object"}], "title": "Tool Input"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentAction", "default": "AgentAction", "enum": ["AgentAction"], "title": "Type", "type": "string"}}, "required": ["tool", "tool_input", "log"], "title": "AgentAction", "type": "object"}, "AgentFinish": {"description": "Final return value of an ActionAgent.\\n\\nAgents return an AgentFinish when they have reached a stopping condition.", "properties": {"return_values": {"title": "Return Values", "type": "object"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentFinish", "default": "AgentFinish", "enum": ["AgentFinish"], "title": "Type", "type": "string"}}, "required": ["return_values", "log"], "title": "AgentFinish", "type": "object"}}, "properties": {"input": {"default": null, "title": "Input", "type": "string"}, "agent_outcome": {"anyOf": [{"$ref": "#/$defs/AgentAction"}, {"$ref": "#/$defs/AgentFinish"}, {"type": "null"}], "default": null, "title": "Agent Outcome"}, "intermediate_steps": {"default": null, "items": {"maxItems": 2, "minItems": 2, "prefixItems": [{"$ref": "#/$defs/AgentAction"}, {"type": "string"}], "type": "array"}, "title": "Intermediate Steps", "type": "array"}}, "title": "LangGraphOutput", "type": "object"}' -# --- -# name: test_conditional_state_graph[sqlite].2 - ''' - { - "nodes": [ - { - "id": "__start__", - "type": "schema", - "data": "__start__" - }, - { - "id": "agent", - "type": "runnable", - "data": { - "id": [ - "langchain", - "schema", - "runnable", - "RunnableSequence" - ], - "name": "agent" - } - }, - { - "id": "tools", - "type": "runnable", - "data": { - "id": [ - "langgraph", - "utils", - "runnable", - "RunnableCallable" - ], - "name": "tools" - } - }, - { - "id": "__end__", - "type": "schema", - "data": "__end__" - } - ], - "edges": [ - { - "source": "__start__", - "target": "agent" - }, - { - "source": "tools", - "target": "agent" - }, - { - "source": "agent", - "target": "tools", - "data": "continue", - "conditional": true - }, - { - "source": "agent", - "target": "__end__", - "data": "exit", - "conditional": true - } - ] - } - ''' -# --- -# name: test_conditional_state_graph[sqlite].3 - ''' - graph TD; - __start__ --> agent; - tools --> agent; - agent -. continue .-> tools; - agent -. exit .-> __end__; - - ''' -# --- -# name: test_conditional_state_graph_with_list_edge_inputs - ''' - { - "nodes": [ - { - "id": "__start__", - "type": "schema", - "data": "__start__" - }, - { - "id": "A", - "type": "runnable", - "data": { - "id": [ - "langgraph", - "utils", - "runnable", - "RunnableCallable" - ], - "name": "A" - } - }, - { - "id": "B", - "type": "runnable", - "data": { - "id": [ - "langgraph", - "utils", - "runnable", - "RunnableCallable" - ], - "name": "B" - } - }, - { - "id": "__end__", - "type": "schema", - "data": "__end__" - } - ], - "edges": [ - { - "source": "A", - "target": "__end__" - }, - { - "source": "B", - "target": "__end__" - }, - { - "source": "__start__", - "target": "A" - }, - { - "source": "__start__", - "target": "B" - } - ] - } - ''' -# --- -# name: test_conditional_state_graph_with_list_edge_inputs.1 - ''' - graph TD; - A --> __end__; - B --> __end__; - __start__ --> A; - __start__ --> B; - - ''' -# --- -# name: test_dynamic_interrupt - ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% - graph TD; - __start__([__start__]):::first - tool_two_slow(tool_two_slow) - tool_two_fast(tool_two_fast) - __end__([__end__]):::last - __start__ -.-> tool_two_slow; - tool_two_slow --> __end__; - __start__ -.-> tool_two_fast; - tool_two_fast --> __end__; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' -# --- -# name: test_falsy_return_from_task[memory] - ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% - graph TD; - __graph(graph) - __falsy_task(falsy_task) - __graph -.-> __falsy_task; - __falsy_task --> __graph; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' -# --- -# name: test_falsy_return_from_task[postgres] - ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% - graph TD; - __graph(graph) - __falsy_task(falsy_task) - __graph -.-> __falsy_task; - __falsy_task --> __graph; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' -# --- -# name: test_falsy_return_from_task[postgres_pipe] - ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% - graph TD; - __graph(graph) - __falsy_task(falsy_task) - __graph -.-> __falsy_task; - __falsy_task --> __graph; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' -# --- -# name: test_falsy_return_from_task[postgres_pool] - ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% - graph TD; - __graph(graph) - __falsy_task(falsy_task) - __graph -.-> __falsy_task; - __falsy_task --> __graph; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' -# --- -# name: test_falsy_return_from_task[postgres_shallow] - ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% - graph TD; - __graph(graph) - __falsy_task(falsy_task) - __graph -.-> __falsy_task; - __falsy_task --> __graph; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' -# --- -# name: test_falsy_return_from_task[sqlite] - ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% - graph TD; - __graph(graph) - __falsy_task(falsy_task) - __graph -.-> __falsy_task; - __falsy_task --> __graph; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' -# --- -# name: test_imp_nested[memory] - ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% - graph TD; - __graph(graph) - LangGraph(LangGraph) - __mapper(mapper) - __submapper(submapper) - __graph -.-> LangGraph; - LangGraph --> __graph; - __graph -.-> __mapper; - __mapper --> __graph; - __mapper -.-> __submapper; - __submapper --> __mapper; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' -# --- -# name: test_imp_nested[postgres] - ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% - graph TD; - __graph(graph) - LangGraph(LangGraph) - __mapper(mapper) - __submapper(submapper) - __graph -.-> LangGraph; - LangGraph --> __graph; - __graph -.-> __mapper; - __mapper --> __graph; - __mapper -.-> __submapper; - __submapper --> __mapper; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' -# --- -# name: test_imp_nested[postgres_pipe] - ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% - graph TD; - __graph(graph) - LangGraph(LangGraph) - __mapper(mapper) - __submapper(submapper) - __graph -.-> LangGraph; - LangGraph --> __graph; - __graph -.-> __mapper; - __mapper --> __graph; - __mapper -.-> __submapper; - __submapper --> __mapper; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' -# --- -# name: test_imp_nested[postgres_pool] - ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% - graph TD; - __graph(graph) - LangGraph(LangGraph) - __mapper(mapper) - __submapper(submapper) - __graph -.-> LangGraph; - LangGraph --> __graph; - __graph -.-> __mapper; - __mapper --> __graph; - __mapper -.-> __submapper; - __submapper --> __mapper; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' -# --- -# name: test_imp_nested[postgres_shallow] - ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% - graph TD; - __graph(graph) - LangGraph(LangGraph) - __mapper(mapper) - __submapper(submapper) - __graph -.-> LangGraph; - LangGraph --> __graph; - __graph -.-> __mapper; - __mapper --> __graph; - __mapper -.-> __submapper; - __submapper --> __mapper; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' -# --- -# name: test_imp_nested[sqlite] - ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% - graph TD; - __graph(graph) - LangGraph(LangGraph) - __mapper(mapper) - __submapper(submapper) - __graph -.-> LangGraph; - LangGraph --> __graph; - __graph -.-> __mapper; - __mapper --> __graph; - __mapper -.-> __submapper; - __submapper --> __mapper; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' -# --- -# name: test_imp_stream_order[memory] - ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% - graph TD; - __graph(graph) - __bar(bar) - __baz(baz) - __foo(foo) - __graph -.-> __bar; - __bar --> __graph; - __graph -.-> __baz; - __baz --> __graph; - __graph -.-> __foo; - __foo --> __graph; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' -# --- -# name: test_imp_stream_order[postgres] - ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% - graph TD; - __graph(graph) - __bar(bar) - __baz(baz) - __foo(foo) - __graph -.-> __bar; - __bar --> __graph; - __graph -.-> __baz; - __baz --> __graph; - __graph -.-> __foo; - __foo --> __graph; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' -# --- -# name: test_imp_stream_order[postgres_pipe] - ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% - graph TD; - __graph(graph) - __bar(bar) - __baz(baz) - __foo(foo) - __graph -.-> __bar; - __bar --> __graph; - __graph -.-> __baz; - __baz --> __graph; - __graph -.-> __foo; - __foo --> __graph; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' -# --- -# name: test_imp_stream_order[postgres_pool] - ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% - graph TD; - __graph(graph) - __bar(bar) - __baz(baz) - __foo(foo) - __graph -.-> __bar; - __bar --> __graph; - __graph -.-> __baz; - __baz --> __graph; - __graph -.-> __foo; - __foo --> __graph; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' -# --- -# name: test_imp_stream_order[postgres_shallow] - ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% - graph TD; - __graph(graph) - __bar(bar) - __baz(baz) - __foo(foo) - __graph -.-> __bar; - __bar --> __graph; - __graph -.-> __baz; - __baz --> __graph; - __graph -.-> __foo; - __foo --> __graph; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' -# --- -# name: test_imp_stream_order[sqlite] - ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% - graph TD; - __graph(graph) - __bar(bar) - __baz(baz) - __foo(foo) - __graph -.-> __bar; - __bar --> __graph; - __graph -.-> __baz; - __baz --> __graph; - __graph -.-> __foo; - __foo --> __graph; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge[memory] - ''' - graph TD; - __start__ --> rewrite_query; - analyzer_one --> retriever_one; - qa --> __end__; - retriever_one --> qa; - retriever_two --> qa; - rewrite_query --> analyzer_one; - rewrite_query --> retriever_two; - - ''' -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge[postgres] - ''' - graph TD; - __start__ --> rewrite_query; - analyzer_one --> retriever_one; - qa --> __end__; - retriever_one --> qa; - retriever_two --> qa; - rewrite_query --> analyzer_one; - rewrite_query --> retriever_two; - - ''' -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge[postgres_pipe] - ''' - graph TD; - __start__ --> rewrite_query; - analyzer_one --> retriever_one; - qa --> __end__; - retriever_one --> qa; - retriever_two --> qa; - rewrite_query --> analyzer_one; - rewrite_query --> retriever_two; - - ''' -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge[postgres_pool] - ''' - graph TD; - __start__ --> rewrite_query; - analyzer_one --> retriever_one; - qa --> __end__; - retriever_one --> qa; - retriever_two --> qa; - rewrite_query --> analyzer_one; - rewrite_query --> retriever_two; - - ''' -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge[postgres_shallow] - ''' - graph TD; - __start__ --> rewrite_query; - analyzer_one --> retriever_one; - qa --> __end__; - retriever_one --> qa; - retriever_two --> qa; - rewrite_query --> analyzer_one; - rewrite_query --> retriever_two; - - ''' -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge[sqlite] - ''' - graph TD; - __start__ --> rewrite_query; - analyzer_one --> retriever_one; - qa --> __end__; - retriever_one --> qa; - retriever_two --> qa; - rewrite_query --> analyzer_one; - rewrite_query --> retriever_two; - - ''' -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class - ''' - graph TD; - __start__ --> rewrite_query; - analyzer_one --> retriever_one; - qa --> __end__; - retriever_one --> qa; - retriever_two --> qa; - rewrite_query --> analyzer_one; - rewrite_query -.-> retriever_two; - - ''' -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1.1 - dict({ - 'definitions': dict({ - 'InnerObject': dict({ - 'properties': dict({ - 'yo': dict({ - 'title': 'Yo', - 'type': 'integer', - }), - }), - 'required': list([ - 'yo', - ]), - 'title': 'InnerObject', - 'type': 'object', - }), - }), - 'properties': dict({ - 'inner': dict({ - '$ref': '#/definitions/InnerObject', - }), - 'query': dict({ - 'title': 'Query', - 'type': 'string', - }), - }), - 'required': list([ - 'query', - 'inner', - ]), - 'title': 'Input', - 'type': 'object', - }) -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1.2 - dict({ - 'properties': dict({ - 'answer': dict({ - 'title': 'Answer', - 'type': 'string', - }), - 'docs': dict({ - 'items': dict({ - 'type': 'string', - }), - 'title': 'Docs', - 'type': 'array', - }), - }), - 'required': list([ - 'answer', - 'docs', - ]), - 'title': 'Output', - 'type': 'object', - }) -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[memory] - ''' - graph TD; - __start__ --> rewrite_query; - analyzer_one --> retriever_one; - qa --> __end__; - retriever_one --> qa; - retriever_two --> qa; - rewrite_query --> analyzer_one; - rewrite_query -.-> retriever_two; - - ''' -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[memory].1 - dict({ - 'definitions': dict({ - 'InnerObject': dict({ - 'properties': dict({ - 'yo': dict({ - 'title': 'Yo', - 'type': 'integer', - }), - }), - 'required': list([ - 'yo', - ]), - 'title': 'InnerObject', - 'type': 'object', - }), - }), - 'properties': dict({ - 'inner': dict({ - '$ref': '#/definitions/InnerObject', - }), - 'query': dict({ - 'title': 'Query', - 'type': 'string', - }), - }), - 'required': list([ - 'query', - 'inner', - ]), - 'title': 'Input', - 'type': 'object', - }) -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[memory].2 - dict({ - 'properties': dict({ - 'answer': dict({ - 'title': 'Answer', - 'type': 'string', - }), - 'docs': dict({ - 'items': dict({ - 'type': 'string', - }), - 'title': 'Docs', - 'type': 'array', - }), - }), - 'required': list([ - 'answer', - 'docs', - ]), - 'title': 'Output', - 'type': 'object', - }) -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[postgres] - ''' - graph TD; - __start__ --> rewrite_query; - analyzer_one --> retriever_one; - qa --> __end__; - retriever_one --> qa; - retriever_two --> qa; - rewrite_query --> analyzer_one; - rewrite_query -.-> retriever_two; - - ''' -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[postgres].1 - dict({ - 'definitions': dict({ - 'InnerObject': dict({ - 'properties': dict({ - 'yo': dict({ - 'title': 'Yo', - 'type': 'integer', - }), - }), - 'required': list([ - 'yo', - ]), - 'title': 'InnerObject', - 'type': 'object', - }), - }), - 'properties': dict({ - 'inner': dict({ - '$ref': '#/definitions/InnerObject', - }), - 'query': dict({ - 'title': 'Query', - 'type': 'string', - }), - }), - 'required': list([ - 'query', - 'inner', - ]), - 'title': 'Input', - 'type': 'object', - }) -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[postgres].2 - dict({ - 'properties': dict({ - 'answer': dict({ - 'title': 'Answer', - 'type': 'string', - }), - 'docs': dict({ - 'items': dict({ - 'type': 'string', - }), - 'title': 'Docs', - 'type': 'array', - }), - }), - 'required': list([ - 'answer', - 'docs', - ]), - 'title': 'Output', - 'type': 'object', - }) -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[postgres_pipe] - ''' - graph TD; - __start__ --> rewrite_query; - analyzer_one --> retriever_one; - qa --> __end__; - retriever_one --> qa; - retriever_two --> qa; - rewrite_query --> analyzer_one; - rewrite_query -.-> retriever_two; - - ''' -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[postgres_pipe].1 - dict({ - 'definitions': dict({ - 'InnerObject': dict({ - 'properties': dict({ - 'yo': dict({ - 'title': 'Yo', - 'type': 'integer', - }), - }), - 'required': list([ - 'yo', - ]), - 'title': 'InnerObject', - 'type': 'object', - }), - }), - 'properties': dict({ - 'inner': dict({ - '$ref': '#/definitions/InnerObject', - }), - 'query': dict({ - 'title': 'Query', - 'type': 'string', - }), - }), - 'required': list([ - 'query', - 'inner', - ]), - 'title': 'Input', - 'type': 'object', - }) -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[postgres_pipe].2 - dict({ - 'properties': dict({ - 'answer': dict({ - 'title': 'Answer', - 'type': 'string', - }), - 'docs': dict({ - 'items': dict({ - 'type': 'string', - }), - 'title': 'Docs', - 'type': 'array', - }), - }), - 'required': list([ - 'answer', - 'docs', - ]), - 'title': 'Output', - 'type': 'object', - }) -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[postgres_pool] - ''' - graph TD; - __start__ --> rewrite_query; - analyzer_one --> retriever_one; - qa --> __end__; - retriever_one --> qa; - retriever_two --> qa; - rewrite_query --> analyzer_one; - rewrite_query -.-> retriever_two; - - ''' -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[postgres_pool].1 - dict({ - 'definitions': dict({ - 'InnerObject': dict({ - 'properties': dict({ - 'yo': dict({ - 'title': 'Yo', - 'type': 'integer', - }), - }), - 'required': list([ - 'yo', - ]), - 'title': 'InnerObject', - 'type': 'object', - }), - }), - 'properties': dict({ - 'inner': dict({ - '$ref': '#/definitions/InnerObject', - }), - 'query': dict({ - 'title': 'Query', - 'type': 'string', - }), - }), - 'required': list([ - 'query', - 'inner', - ]), - 'title': 'Input', - 'type': 'object', - }) -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[postgres_pool].2 - dict({ - 'properties': dict({ - 'answer': dict({ - 'title': 'Answer', - 'type': 'string', - }), - 'docs': dict({ - 'items': dict({ - 'type': 'string', - }), - 'title': 'Docs', - 'type': 'array', - }), - }), - 'required': list([ - 'answer', - 'docs', - ]), - 'title': 'Output', - 'type': 'object', - }) -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[postgres_shallow] - ''' - graph TD; - __start__ --> rewrite_query; - analyzer_one --> retriever_one; - qa --> __end__; - retriever_one --> qa; - retriever_two --> qa; - rewrite_query --> analyzer_one; - rewrite_query -.-> retriever_two; - - ''' -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[postgres_shallow].1 - dict({ - 'definitions': dict({ - 'InnerObject': dict({ - 'properties': dict({ - 'yo': dict({ - 'title': 'Yo', - 'type': 'integer', - }), - }), - 'required': list([ - 'yo', - ]), - 'title': 'InnerObject', - 'type': 'object', - }), - }), - 'properties': dict({ - 'inner': dict({ - '$ref': '#/definitions/InnerObject', - }), - 'query': dict({ - 'title': 'Query', - 'type': 'string', - }), - }), - 'required': list([ - 'query', - 'inner', - ]), - 'title': 'Input', - 'type': 'object', - }) -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[postgres_shallow].2 - dict({ - 'properties': dict({ - 'answer': dict({ - 'title': 'Answer', - 'type': 'string', - }), - 'docs': dict({ - 'items': dict({ - 'type': 'string', - }), - 'title': 'Docs', - 'type': 'array', - }), - }), - 'required': list([ - 'answer', - 'docs', - ]), - 'title': 'Output', - 'type': 'object', - }) -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[sqlite] - ''' - graph TD; - __start__ --> rewrite_query; - analyzer_one --> retriever_one; - qa --> __end__; - retriever_one --> qa; - retriever_two --> qa; - rewrite_query --> analyzer_one; - rewrite_query -.-> retriever_two; - - ''' -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[sqlite].1 - dict({ - 'definitions': dict({ - 'InnerObject': dict({ - 'properties': dict({ - 'yo': dict({ - 'title': 'Yo', - 'type': 'integer', - }), - }), - 'required': list([ - 'yo', - ]), - 'title': 'InnerObject', - 'type': 'object', - }), - }), - 'properties': dict({ - 'inner': dict({ - '$ref': '#/definitions/InnerObject', - }), - 'query': dict({ - 'title': 'Query', - 'type': 'string', - }), - }), - 'required': list([ - 'query', - 'inner', - ]), - 'title': 'Input', - 'type': 'object', - }) -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[sqlite].2 - dict({ - 'properties': dict({ - 'answer': dict({ - 'title': 'Answer', - 'type': 'string', - }), - 'docs': dict({ - 'items': dict({ - 'type': 'string', - }), - 'title': 'Docs', - 'type': 'array', - }), - }), - 'required': list([ - 'answer', - 'docs', - ]), - 'title': 'Output', - 'type': 'object', - }) -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2.1 - dict({ - '$defs': dict({ - 'InnerObject': dict({ - 'properties': dict({ - 'yo': dict({ - 'title': 'Yo', - 'type': 'integer', - }), - }), - 'required': list([ - 'yo', - ]), - 'title': 'InnerObject', - 'type': 'object', - }), - }), - 'properties': dict({ - 'inner': dict({ - '$ref': '#/$defs/InnerObject', - }), - 'query': dict({ - 'title': 'Query', - 'type': 'string', - }), - }), - 'required': list([ - 'query', - 'inner', - ]), - 'title': 'Input', - 'type': 'object', - }) -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2.2 - dict({ - 'properties': dict({ - 'answer': dict({ - 'title': 'Answer', - 'type': 'string', - }), - 'docs': dict({ - 'items': dict({ - 'type': 'string', - }), - 'title': 'Docs', - 'type': 'array', - }), - }), - 'required': list([ - 'answer', - 'docs', - ]), - 'title': 'Output', - 'type': 'object', - }) -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[memory] - ''' - graph TD; - __start__ --> rewrite_query; - analyzer_one --> retriever_one; - qa --> __end__; - retriever_one --> qa; - retriever_two --> qa; - rewrite_query --> analyzer_one; - rewrite_query -.-> retriever_two; - - ''' -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[memory].1 - dict({ - '$defs': dict({ - 'InnerObject': dict({ - 'properties': dict({ - 'yo': dict({ - 'title': 'Yo', - 'type': 'integer', - }), - }), - 'required': list([ - 'yo', - ]), - 'title': 'InnerObject', - 'type': 'object', - }), - }), - 'properties': dict({ - 'inner': dict({ - '$ref': '#/$defs/InnerObject', - }), - 'query': dict({ - 'title': 'Query', - 'type': 'string', - }), - }), - 'required': list([ - 'query', - 'inner', - ]), - 'title': 'Input', - 'type': 'object', - }) -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[memory].2 - dict({ - 'properties': dict({ - 'answer': dict({ - 'title': 'Answer', - 'type': 'string', - }), - 'docs': dict({ - 'items': dict({ - 'type': 'string', - }), - 'title': 'Docs', - 'type': 'array', - }), - }), - 'required': list([ - 'answer', - 'docs', - ]), - 'title': 'Output', - 'type': 'object', - }) -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[postgres] - ''' - graph TD; - __start__ --> rewrite_query; - analyzer_one --> retriever_one; - qa --> __end__; - retriever_one --> qa; - retriever_two --> qa; - rewrite_query --> analyzer_one; - rewrite_query -.-> retriever_two; - - ''' -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[postgres].1 - dict({ - '$defs': dict({ - 'InnerObject': dict({ - 'properties': dict({ - 'yo': dict({ - 'title': 'Yo', - 'type': 'integer', - }), - }), - 'required': list([ - 'yo', - ]), - 'title': 'InnerObject', - 'type': 'object', - }), - }), - 'properties': dict({ - 'inner': dict({ - '$ref': '#/$defs/InnerObject', - }), - 'query': dict({ - 'title': 'Query', - 'type': 'string', - }), - }), - 'required': list([ - 'query', - 'inner', - ]), - 'title': 'Input', - 'type': 'object', - }) -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[postgres].2 - dict({ - 'properties': dict({ - 'answer': dict({ - 'title': 'Answer', - 'type': 'string', - }), - 'docs': dict({ - 'items': dict({ - 'type': 'string', - }), - 'title': 'Docs', - 'type': 'array', - }), - }), - 'required': list([ - 'answer', - 'docs', - ]), - 'title': 'Output', - 'type': 'object', - }) -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[postgres_pipe] - ''' - graph TD; - __start__ --> rewrite_query; - analyzer_one --> retriever_one; - qa --> __end__; - retriever_one --> qa; - retriever_two --> qa; - rewrite_query --> analyzer_one; - rewrite_query -.-> retriever_two; - - ''' -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[postgres_pipe].1 - dict({ - '$defs': dict({ - 'InnerObject': dict({ - 'properties': dict({ - 'yo': dict({ - 'title': 'Yo', - 'type': 'integer', - }), - }), - 'required': list([ - 'yo', - ]), - 'title': 'InnerObject', - 'type': 'object', - }), - }), - 'properties': dict({ - 'inner': dict({ - '$ref': '#/$defs/InnerObject', - }), - 'query': dict({ - 'title': 'Query', - 'type': 'string', - }), - }), - 'required': list([ - 'query', - 'inner', - ]), - 'title': 'Input', - 'type': 'object', - }) -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[postgres_pipe].2 - dict({ - 'properties': dict({ - 'answer': dict({ - 'title': 'Answer', - 'type': 'string', - }), - 'docs': dict({ - 'items': dict({ - 'type': 'string', - }), - 'title': 'Docs', - 'type': 'array', - }), - }), - 'required': list([ - 'answer', - 'docs', - ]), - 'title': 'Output', - 'type': 'object', - }) -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[postgres_pool] - ''' - graph TD; - __start__ --> rewrite_query; - analyzer_one --> retriever_one; - qa --> __end__; - retriever_one --> qa; - retriever_two --> qa; - rewrite_query --> analyzer_one; - rewrite_query -.-> retriever_two; - - ''' -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[postgres_pool].1 - dict({ - '$defs': dict({ - 'InnerObject': dict({ - 'properties': dict({ - 'yo': dict({ - 'title': 'Yo', - 'type': 'integer', - }), - }), - 'required': list([ - 'yo', - ]), - 'title': 'InnerObject', - 'type': 'object', - }), - }), - 'properties': dict({ - 'inner': dict({ - '$ref': '#/$defs/InnerObject', - }), - 'query': dict({ - 'title': 'Query', - 'type': 'string', - }), - }), - 'required': list([ - 'query', - 'inner', - ]), - 'title': 'Input', - 'type': 'object', - }) -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[postgres_pool].2 - dict({ - 'properties': dict({ - 'answer': dict({ - 'title': 'Answer', - 'type': 'string', - }), - 'docs': dict({ - 'items': dict({ - 'type': 'string', - }), - 'title': 'Docs', - 'type': 'array', - }), - }), - 'required': list([ - 'answer', - 'docs', - ]), - 'title': 'Output', - 'type': 'object', - }) -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[postgres_shallow] - ''' - graph TD; - __start__ --> rewrite_query; - analyzer_one --> retriever_one; - qa --> __end__; - retriever_one --> qa; - retriever_two --> qa; - rewrite_query --> analyzer_one; - rewrite_query -.-> retriever_two; - - ''' -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[postgres_shallow].1 - dict({ - '$defs': dict({ - 'InnerObject': dict({ - 'properties': dict({ - 'yo': dict({ - 'title': 'Yo', - 'type': 'integer', - }), - }), - 'required': list([ - 'yo', - ]), - 'title': 'InnerObject', - 'type': 'object', - }), - }), - 'properties': dict({ - 'inner': dict({ - '$ref': '#/$defs/InnerObject', - }), - 'query': dict({ - 'title': 'Query', - 'type': 'string', - }), - }), - 'required': list([ - 'query', - 'inner', - ]), - 'title': 'Input', - 'type': 'object', - }) -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[postgres_shallow].2 - dict({ - 'properties': dict({ - 'answer': dict({ - 'title': 'Answer', - 'type': 'string', - }), - 'docs': dict({ - 'items': dict({ - 'type': 'string', - }), - 'title': 'Docs', - 'type': 'array', - }), - }), - 'required': list([ - 'answer', - 'docs', - ]), - 'title': 'Output', - 'type': 'object', - }) -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[sqlite] - ''' - graph TD; - __start__ --> rewrite_query; - analyzer_one --> retriever_one; - qa --> __end__; - retriever_one --> qa; - retriever_two --> qa; - rewrite_query --> analyzer_one; - rewrite_query -.-> retriever_two; - - ''' -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[sqlite].1 - dict({ - '$defs': dict({ - 'InnerObject': dict({ - 'properties': dict({ - 'yo': dict({ - 'title': 'Yo', - 'type': 'integer', - }), - }), - 'required': list([ - 'yo', - ]), - 'title': 'InnerObject', - 'type': 'object', - }), - }), - 'properties': dict({ - 'inner': dict({ - '$ref': '#/$defs/InnerObject', - }), - 'query': dict({ - 'title': 'Query', - 'type': 'string', - }), - }), - 'required': list([ - 'query', - 'inner', - ]), - 'title': 'Input', - 'type': 'object', - }) -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[sqlite].2 - dict({ - 'properties': dict({ - 'answer': dict({ - 'title': 'Answer', - 'type': 'string', - }), - 'docs': dict({ - 'items': dict({ - 'type': 'string', - }), - 'title': 'Docs', - 'type': 'array', - }), - }), - 'required': list([ - 'answer', - 'docs', - ]), - 'title': 'Output', - 'type': 'object', - }) -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_via_branch[memory] - ''' - graph TD; - __start__ --> rewrite_query; - analyzer_one --> retriever_one; - qa --> __end__; - retriever_one --> qa; - retriever_two --> qa; - rewrite_query --> analyzer_one; - rewrite_query -.-> retriever_two; - - ''' -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_via_branch[postgres] - ''' - graph TD; - __start__ --> rewrite_query; - analyzer_one --> retriever_one; - qa --> __end__; - retriever_one --> qa; - retriever_two --> qa; - rewrite_query --> analyzer_one; - rewrite_query -.-> retriever_two; - - ''' -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_via_branch[postgres_pipe] - ''' - graph TD; - __start__ --> rewrite_query; - analyzer_one --> retriever_one; - qa --> __end__; - retriever_one --> qa; - retriever_two --> qa; - rewrite_query --> analyzer_one; - rewrite_query -.-> retriever_two; - - ''' -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_via_branch[postgres_pool] - ''' - graph TD; - __start__ --> rewrite_query; - analyzer_one --> retriever_one; - qa --> __end__; - retriever_one --> qa; - retriever_two --> qa; - rewrite_query --> analyzer_one; - rewrite_query -.-> retriever_two; - - ''' -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_via_branch[postgres_shallow] - ''' - graph TD; - __start__ --> rewrite_query; - analyzer_one --> retriever_one; - qa --> __end__; - retriever_one --> qa; - retriever_two --> qa; - rewrite_query --> analyzer_one; - rewrite_query -.-> retriever_two; - - ''' -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_via_branch[sqlite] - ''' - graph TD; - __start__ --> rewrite_query; - analyzer_one --> retriever_one; - qa --> __end__; - retriever_one --> qa; - retriever_two --> qa; - rewrite_query --> analyzer_one; - rewrite_query -.-> retriever_two; - - ''' -# --- -# name: test_interrupt_functional[memory] - ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% - graph TD; - __graph(graph) - __bar(bar) - __foo(foo) - __graph -.-> __bar; - __bar --> __graph; - __graph -.-> __foo; - __foo --> __graph; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' -# --- -# name: test_interrupt_functional[postgres] - ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% - graph TD; - __graph(graph) - __bar(bar) - __foo(foo) - __graph -.-> __bar; - __bar --> __graph; - __graph -.-> __foo; - __foo --> __graph; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' -# --- -# name: test_interrupt_functional[postgres_pipe] - ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% - graph TD; - __graph(graph) - __bar(bar) - __foo(foo) - __graph -.-> __bar; - __bar --> __graph; - __graph -.-> __foo; - __foo --> __graph; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' -# --- -# name: test_interrupt_functional[postgres_pool] - ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% - graph TD; - __graph(graph) - __bar(bar) - __foo(foo) - __graph -.-> __bar; - __bar --> __graph; - __graph -.-> __foo; - __foo --> __graph; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' -# --- -# name: test_interrupt_functional[postgres_shallow] - ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% - graph TD; - __graph(graph) - __bar(bar) - __foo(foo) - __graph -.-> __bar; - __bar --> __graph; - __graph -.-> __foo; - __foo --> __graph; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' -# --- -# name: test_interrupt_functional[sqlite] - ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% - graph TD; - __graph(graph) - __bar(bar) - __foo(foo) - __graph -.-> __bar; - __bar --> __graph; - __graph -.-> __foo; - __foo --> __graph; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' -# --- -# name: test_interrupt_task_functional[memory] - ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% - graph TD; - __graph(graph) - __bar(bar) - __foo(foo) - __graph -.-> __bar; - __bar --> __graph; - __graph -.-> __foo; - __foo --> __graph; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' -# --- -# name: test_interrupt_task_functional[postgres] - ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% - graph TD; - __graph(graph) - __bar(bar) - __foo(foo) - __graph -.-> __bar; - __bar --> __graph; - __graph -.-> __foo; - __foo --> __graph; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' -# --- -# name: test_interrupt_task_functional[postgres_pipe] - ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% - graph TD; - __graph(graph) - __bar(bar) - __foo(foo) - __graph -.-> __bar; - __bar --> __graph; - __graph -.-> __foo; - __foo --> __graph; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' -# --- -# name: test_interrupt_task_functional[postgres_pool] - ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% - graph TD; - __graph(graph) - __bar(bar) - __foo(foo) - __graph -.-> __bar; - __bar --> __graph; - __graph -.-> __foo; - __foo --> __graph; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' -# --- -# name: test_interrupt_task_functional[postgres_shallow] - ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% - graph TD; - __graph(graph) - __bar(bar) - __foo(foo) - __graph -.-> __bar; - __bar --> __graph; - __graph -.-> __foo; - __foo --> __graph; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' -# --- -# name: test_interrupt_task_functional[sqlite] - ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% - graph TD; - __graph(graph) - __bar(bar) - __foo(foo) - __graph -.-> __bar; - __bar --> __graph; - __graph -.-> __foo; - __foo --> __graph; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' -# --- -# name: test_message_graph.1 - '{"title": "LangGraphOutput", "type": "array", "items": {"anyOf": [{"$ref": "#/definitions/AIMessage"}, {"$ref": "#/definitions/HumanMessage"}, {"$ref": "#/definitions/ChatMessage"}, {"$ref": "#/definitions/SystemMessage"}, {"$ref": "#/definitions/FunctionMessage"}, {"$ref": "#/definitions/ToolMessage"}]}, "definitions": {"ToolCall": {"title": "ToolCall", "type": "object", "properties": {"name": {"title": "Name", "type": "string"}, "args": {"title": "Args", "type": "object"}, "id": {"title": "Id", "type": "string"}, "type": {"title": "Type", "enum": ["tool_call"], "type": "string"}}, "required": ["name", "args", "id"]}, "InvalidToolCall": {"title": "InvalidToolCall", "type": "object", "properties": {"name": {"title": "Name", "type": "string"}, "args": {"title": "Args", "type": "string"}, "id": {"title": "Id", "type": "string"}, "error": {"title": "Error", "type": "string"}, "type": {"title": "Type", "enum": ["invalid_tool_call"], "type": "string"}}, "required": ["name", "args", "id", "error"]}, "UsageMetadata": {"title": "UsageMetadata", "type": "object", "properties": {"input_tokens": {"title": "Input Tokens", "type": "integer"}, "output_tokens": {"title": "Output Tokens", "type": "integer"}, "total_tokens": {"title": "Total Tokens", "type": "integer"}}, "required": ["input_tokens", "output_tokens", "total_tokens"]}, "AIMessage": {"title": "AIMessage", "description": "Message from an AI.\\n\\nAIMessage is returned from a chat model as a response to a prompt.\\n\\nThis message represents the output of the model and consists of both\\nthe raw output as returned by the model together standardized fields\\n(e.g., tool calls, usage metadata) added by the LangChain framework.", "type": "object", "properties": {"content": {"title": "Content", "anyOf": [{"type": "string"}, {"type": "array", "items": {"anyOf": [{"type": "string"}, {"type": "object"}]}}]}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"title": "Type", "default": "ai", "enum": ["ai"], "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"title": "Id", "type": "string"}, "example": {"title": "Example", "default": false, "type": "boolean"}, "tool_calls": {"title": "Tool Calls", "default": [], "type": "array", "items": {"$ref": "#/definitions/ToolCall"}}, "invalid_tool_calls": {"title": "Invalid Tool Calls", "default": [], "type": "array", "items": {"$ref": "#/definitions/InvalidToolCall"}}, "usage_metadata": {"$ref": "#/definitions/UsageMetadata"}}, "required": ["content"]}, "HumanMessage": {"title": "HumanMessage", "description": "Message from a human.\\n\\nHumanMessages are messages that are passed in from a human to the model.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Instantiate a chat model and invoke it with the messages\\n model = ...\\n print(model.invoke(messages))", "type": "object", "properties": {"content": {"title": "Content", "anyOf": [{"type": "string"}, {"type": "array", "items": {"anyOf": [{"type": "string"}, {"type": "object"}]}}]}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"title": "Type", "default": "human", "enum": ["human"], "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"title": "Id", "type": "string"}, "example": {"title": "Example", "default": false, "type": "boolean"}}, "required": ["content"]}, "ChatMessage": {"title": "ChatMessage", "description": "Message that can be assigned an arbitrary speaker (i.e. role).", "type": "object", "properties": {"content": {"title": "Content", "anyOf": [{"type": "string"}, {"type": "array", "items": {"anyOf": [{"type": "string"}, {"type": "object"}]}}]}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"title": "Type", "default": "chat", "enum": ["chat"], "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"title": "Id", "type": "string"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"]}, "SystemMessage": {"title": "SystemMessage", "description": "Message for priming AI behavior.\\n\\nThe system message is usually passed in as the first of a sequence\\nof input messages.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Define a chat model and invoke it with the messages\\n print(model.invoke(messages))", "type": "object", "properties": {"content": {"title": "Content", "anyOf": [{"type": "string"}, {"type": "array", "items": {"anyOf": [{"type": "string"}, {"type": "object"}]}}]}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"title": "Type", "default": "system", "enum": ["system"], "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"title": "Id", "type": "string"}}, "required": ["content"]}, "FunctionMessage": {"title": "FunctionMessage", "description": "Message for passing the result of executing a tool back to a model.\\n\\nFunctionMessage are an older version of the ToolMessage schema, and\\ndo not contain the tool_call_id field.\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "type": "object", "properties": {"content": {"title": "Content", "anyOf": [{"type": "string"}, {"type": "array", "items": {"anyOf": [{"type": "string"}, {"type": "object"}]}}]}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"title": "Type", "default": "function", "enum": ["function"], "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"title": "Id", "type": "string"}}, "required": ["content", "name"]}, "ToolMessage": {"title": "ToolMessage", "description": "Message for passing the result of executing a tool back to a model.\\n\\nToolMessages contain the result of a tool invocation. Typically, the result\\nis encoded inside the `content` field.\\n\\nExample: A ToolMessage representing a result of 42 from a tool call with id\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n ToolMessage(content=\'42\', tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\')\\n\\n\\nExample: A ToolMessage where only part of the tool output is sent to the model\\n and the full output is passed in to artifact.\\n\\n .. versionadded:: 0.2.17\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n tool_output = {\\n \\"stdout\\": \\"From the graph we can see that the correlation between x and y is ...\\",\\n \\"stderr\\": None,\\n \\"artifacts\\": {\\"type\\": \\"image\\", \\"base64_data\\": \\"/9j/4gIcSU...\\"},\\n }\\n\\n ToolMessage(\\n content=tool_output[\\"stdout\\"],\\n artifact=tool_output,\\n tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\',\\n )\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "type": "object", "properties": {"content": {"title": "Content", "anyOf": [{"type": "string"}, {"type": "array", "items": {"anyOf": [{"type": "string"}, {"type": "object"}]}}]}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"title": "Type", "default": "tool", "enum": ["tool"], "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"title": "Id", "type": "string"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"title": "Artifact"}, "status": {"title": "Status", "default": "success", "enum": ["success", "error"], "type": "string"}}, "required": ["content", "tool_call_id"]}}}' -# --- -# name: test_message_graph.2 - ''' - { - "nodes": [ - { - "id": "__start__", - "type": "schema", - "data": "__start__" - }, - { - "id": "agent", - "type": "runnable", - "data": { - "id": [ - "tests", - "test_pregel", - "FakeFuntionChatModel" - ], - "name": "agent" - } - }, - { - "id": "tools", - "type": "runnable", - "data": { - "id": [ - "langgraph", - "prebuilt", - "tool_node", - "ToolNode" - ], - "name": "tools" - } - }, - { - "id": "__end__", - "type": "schema", - "data": "__end__" - } - ], - "edges": [ - { - "source": "__start__", - "target": "agent" - }, - { - "source": "tools", - "target": "agent" - }, - { - "source": "agent", - "target": "tools", - "data": "continue", - "conditional": true - }, - { - "source": "agent", - "target": "__end__", - "data": "end", - "conditional": true - } - ] - } - ''' -# --- -# name: test_message_graph.3 - ''' - graph TD; - __start__ --> agent; - tools --> agent; - agent -.  continue  .-> tools; - agent -.  end  .-> __end__; - - ''' -# --- -# name: test_message_graph[memory] - '{"$defs": {"AIMessage": {"additionalProperties": true, "description": "Message from an AI.\\n\\nAIMessage is returned from a chat model as a response to a prompt.\\n\\nThis message represents the output of the model and consists of both\\nthe raw output as returned by the model together standardized fields\\n(e.g., tool calls, usage metadata) added by the LangChain framework.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ai", "default": "ai", "enum": ["ai"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}}, "required": ["content"], "title": "AIMessage", "type": "object"}, "AIMessageChunk": {"additionalProperties": true, "description": "Message chunk from an AI.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "AIMessageChunk", "default": "AIMessageChunk", "enum": ["AIMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}, "tool_call_chunks": {"default": [], "items": {"$ref": "#/$defs/ToolCallChunk"}, "title": "Tool Call Chunks", "type": "array"}}, "required": ["content"], "title": "AIMessageChunk", "type": "object"}, "ChatMessage": {"additionalProperties": true, "description": "Message that can be assigned an arbitrary speaker (i.e. role).", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "chat", "default": "chat", "enum": ["chat"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessage", "type": "object"}, "ChatMessageChunk": {"additionalProperties": true, "description": "Chat Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ChatMessageChunk", "default": "ChatMessageChunk", "enum": ["ChatMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessageChunk", "type": "object"}, "FunctionMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nFunctionMessage are an older version of the ToolMessage schema, and\\ndo not contain the tool_call_id field.\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "function", "default": "function", "enum": ["function"], "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessage", "type": "object"}, "FunctionMessageChunk": {"additionalProperties": true, "description": "Function Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "FunctionMessageChunk", "default": "FunctionMessageChunk", "enum": ["FunctionMessageChunk"], "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessageChunk", "type": "object"}, "HumanMessage": {"additionalProperties": true, "description": "Message from a human.\\n\\nHumanMessages are messages that are passed in from a human to the model.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Instantiate a chat model and invoke it with the messages\\n model = ...\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "human", "default": "human", "enum": ["human"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessage", "type": "object"}, "HumanMessageChunk": {"additionalProperties": true, "description": "Human Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "HumanMessageChunk", "default": "HumanMessageChunk", "enum": ["HumanMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessageChunk", "type": "object"}, "InputTokenDetails": {"description": "Breakdown of input token counts.\\n\\nDoes *not* need to sum to full input token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "cache_creation": {"title": "Cache Creation", "type": "integer"}, "cache_read": {"title": "Cache Read", "type": "integer"}}, "title": "InputTokenDetails", "type": "object"}, "InvalidToolCall": {"description": "Allowance for errors made by LLM.\\n\\nHere we add an `error` key to surface errors made during generation\\n(e.g., invalid JSON arguments.)", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "error": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Error"}, "type": {"const": "invalid_tool_call", "enum": ["invalid_tool_call"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "error"], "title": "InvalidToolCall", "type": "object"}, "OutputTokenDetails": {"description": "Breakdown of output token counts.\\n\\nDoes *not* need to sum to full output token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "reasoning": {"title": "Reasoning", "type": "integer"}}, "title": "OutputTokenDetails", "type": "object"}, "SystemMessage": {"additionalProperties": true, "description": "Message for priming AI behavior.\\n\\nThe system message is usually passed in as the first of a sequence\\nof input messages.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Define a chat model and invoke it with the messages\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "system", "default": "system", "enum": ["system"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessage", "type": "object"}, "SystemMessageChunk": {"additionalProperties": true, "description": "System Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "SystemMessageChunk", "default": "SystemMessageChunk", "enum": ["SystemMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessageChunk", "type": "object"}, "ToolCall": {"description": "Represents a request to call a tool.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"name\\": \\"foo\\",\\n \\"args\\": {\\"a\\": 1},\\n \\"id\\": \\"123\\"\\n }\\n\\n This represents a request to call the tool named \\"foo\\" with arguments {\\"a\\": 1}\\n and an identifier of \\"123\\".", "properties": {"name": {"title": "Name", "type": "string"}, "args": {"title": "Args", "type": "object"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "type": {"const": "tool_call", "enum": ["tool_call"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id"], "title": "ToolCall", "type": "object"}, "ToolCallChunk": {"description": "A chunk of a tool call (e.g., as part of a stream).\\n\\nWhen merging ToolCallChunks (e.g., via AIMessageChunk.__add__),\\nall string attributes are concatenated. Chunks are only merged if their\\nvalues of `index` are equal and not None.\\n\\nExample:\\n\\n.. code-block:: python\\n\\n left_chunks = [ToolCallChunk(name=\\"foo\\", args=\'{\\"a\\":\', index=0)]\\n right_chunks = [ToolCallChunk(name=None, args=\'1}\', index=0)]\\n\\n (\\n AIMessageChunk(content=\\"\\", tool_call_chunks=left_chunks)\\n + AIMessageChunk(content=\\"\\", tool_call_chunks=right_chunks)\\n ).tool_call_chunks == [ToolCallChunk(name=\'foo\', args=\'{\\"a\\":1}\', index=0)]", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "index": {"anyOf": [{"type": "integer"}, {"type": "null"}], "title": "Index"}, "type": {"const": "tool_call_chunk", "enum": ["tool_call_chunk"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "index"], "title": "ToolCallChunk", "type": "object"}, "ToolMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nToolMessages contain the result of a tool invocation. Typically, the result\\nis encoded inside the `content` field.\\n\\nExample: A ToolMessage representing a result of 42 from a tool call with id\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n ToolMessage(content=\'42\', tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\')\\n\\n\\nExample: A ToolMessage where only part of the tool output is sent to the model\\n and the full output is passed in to artifact.\\n\\n .. versionadded:: 0.2.17\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n tool_output = {\\n \\"stdout\\": \\"From the graph we can see that the correlation between x and y is ...\\",\\n \\"stderr\\": None,\\n \\"artifacts\\": {\\"type\\": \\"image\\", \\"base64_data\\": \\"/9j/4gIcSU...\\"},\\n }\\n\\n ToolMessage(\\n content=tool_output[\\"stdout\\"],\\n artifact=tool_output,\\n tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\',\\n )\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "tool", "default": "tool", "enum": ["tool"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessage", "type": "object"}, "ToolMessageChunk": {"additionalProperties": true, "description": "Tool Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ToolMessageChunk", "default": "ToolMessageChunk", "enum": ["ToolMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessageChunk", "type": "object"}, "UsageMetadata": {"description": "Usage metadata for a message, such as token counts.\\n\\nThis is a standard representation of token usage that is consistent across models.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"input_tokens\\": 350,\\n \\"output_tokens\\": 240,\\n \\"total_tokens\\": 590,\\n \\"input_token_details\\": {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n },\\n \\"output_token_details\\": {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n }\\n\\n.. versionchanged:: 0.3.9\\n\\n Added ``input_token_details`` and ``output_token_details``.", "properties": {"input_tokens": {"title": "Input Tokens", "type": "integer"}, "output_tokens": {"title": "Output Tokens", "type": "integer"}, "total_tokens": {"title": "Total Tokens", "type": "integer"}, "input_token_details": {"$ref": "#/$defs/InputTokenDetails"}, "output_token_details": {"$ref": "#/$defs/OutputTokenDetails"}}, "required": ["input_tokens", "output_tokens", "total_tokens"], "title": "UsageMetadata", "type": "object"}}, "default": null, "items": {"oneOf": [{"$ref": "#/$defs/AIMessage"}, {"$ref": "#/$defs/HumanMessage"}, {"$ref": "#/$defs/ChatMessage"}, {"$ref": "#/$defs/SystemMessage"}, {"$ref": "#/$defs/FunctionMessage"}, {"$ref": "#/$defs/ToolMessage"}, {"$ref": "#/$defs/AIMessageChunk"}, {"$ref": "#/$defs/HumanMessageChunk"}, {"$ref": "#/$defs/ChatMessageChunk"}, {"$ref": "#/$defs/SystemMessageChunk"}, {"$ref": "#/$defs/FunctionMessageChunk"}, {"$ref": "#/$defs/ToolMessageChunk"}]}, "title": "LangGraphInput", "type": "array"}' -# --- -# name: test_message_graph[memory].1 - '{"$defs": {"AIMessage": {"additionalProperties": true, "description": "Message from an AI.\\n\\nAIMessage is returned from a chat model as a response to a prompt.\\n\\nThis message represents the output of the model and consists of both\\nthe raw output as returned by the model together standardized fields\\n(e.g., tool calls, usage metadata) added by the LangChain framework.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ai", "default": "ai", "enum": ["ai"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}}, "required": ["content"], "title": "AIMessage", "type": "object"}, "AIMessageChunk": {"additionalProperties": true, "description": "Message chunk from an AI.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "AIMessageChunk", "default": "AIMessageChunk", "enum": ["AIMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}, "tool_call_chunks": {"default": [], "items": {"$ref": "#/$defs/ToolCallChunk"}, "title": "Tool Call Chunks", "type": "array"}}, "required": ["content"], "title": "AIMessageChunk", "type": "object"}, "ChatMessage": {"additionalProperties": true, "description": "Message that can be assigned an arbitrary speaker (i.e. role).", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "chat", "default": "chat", "enum": ["chat"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessage", "type": "object"}, "ChatMessageChunk": {"additionalProperties": true, "description": "Chat Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ChatMessageChunk", "default": "ChatMessageChunk", "enum": ["ChatMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessageChunk", "type": "object"}, "FunctionMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nFunctionMessage are an older version of the ToolMessage schema, and\\ndo not contain the tool_call_id field.\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "function", "default": "function", "enum": ["function"], "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessage", "type": "object"}, "FunctionMessageChunk": {"additionalProperties": true, "description": "Function Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "FunctionMessageChunk", "default": "FunctionMessageChunk", "enum": ["FunctionMessageChunk"], "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessageChunk", "type": "object"}, "HumanMessage": {"additionalProperties": true, "description": "Message from a human.\\n\\nHumanMessages are messages that are passed in from a human to the model.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Instantiate a chat model and invoke it with the messages\\n model = ...\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "human", "default": "human", "enum": ["human"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessage", "type": "object"}, "HumanMessageChunk": {"additionalProperties": true, "description": "Human Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "HumanMessageChunk", "default": "HumanMessageChunk", "enum": ["HumanMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessageChunk", "type": "object"}, "InputTokenDetails": {"description": "Breakdown of input token counts.\\n\\nDoes *not* need to sum to full input token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "cache_creation": {"title": "Cache Creation", "type": "integer"}, "cache_read": {"title": "Cache Read", "type": "integer"}}, "title": "InputTokenDetails", "type": "object"}, "InvalidToolCall": {"description": "Allowance for errors made by LLM.\\n\\nHere we add an `error` key to surface errors made during generation\\n(e.g., invalid JSON arguments.)", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "error": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Error"}, "type": {"const": "invalid_tool_call", "enum": ["invalid_tool_call"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "error"], "title": "InvalidToolCall", "type": "object"}, "OutputTokenDetails": {"description": "Breakdown of output token counts.\\n\\nDoes *not* need to sum to full output token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "reasoning": {"title": "Reasoning", "type": "integer"}}, "title": "OutputTokenDetails", "type": "object"}, "SystemMessage": {"additionalProperties": true, "description": "Message for priming AI behavior.\\n\\nThe system message is usually passed in as the first of a sequence\\nof input messages.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Define a chat model and invoke it with the messages\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "system", "default": "system", "enum": ["system"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessage", "type": "object"}, "SystemMessageChunk": {"additionalProperties": true, "description": "System Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "SystemMessageChunk", "default": "SystemMessageChunk", "enum": ["SystemMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessageChunk", "type": "object"}, "ToolCall": {"description": "Represents a request to call a tool.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"name\\": \\"foo\\",\\n \\"args\\": {\\"a\\": 1},\\n \\"id\\": \\"123\\"\\n }\\n\\n This represents a request to call the tool named \\"foo\\" with arguments {\\"a\\": 1}\\n and an identifier of \\"123\\".", "properties": {"name": {"title": "Name", "type": "string"}, "args": {"title": "Args", "type": "object"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "type": {"const": "tool_call", "enum": ["tool_call"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id"], "title": "ToolCall", "type": "object"}, "ToolCallChunk": {"description": "A chunk of a tool call (e.g., as part of a stream).\\n\\nWhen merging ToolCallChunks (e.g., via AIMessageChunk.__add__),\\nall string attributes are concatenated. Chunks are only merged if their\\nvalues of `index` are equal and not None.\\n\\nExample:\\n\\n.. code-block:: python\\n\\n left_chunks = [ToolCallChunk(name=\\"foo\\", args=\'{\\"a\\":\', index=0)]\\n right_chunks = [ToolCallChunk(name=None, args=\'1}\', index=0)]\\n\\n (\\n AIMessageChunk(content=\\"\\", tool_call_chunks=left_chunks)\\n + AIMessageChunk(content=\\"\\", tool_call_chunks=right_chunks)\\n ).tool_call_chunks == [ToolCallChunk(name=\'foo\', args=\'{\\"a\\":1}\', index=0)]", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "index": {"anyOf": [{"type": "integer"}, {"type": "null"}], "title": "Index"}, "type": {"const": "tool_call_chunk", "enum": ["tool_call_chunk"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "index"], "title": "ToolCallChunk", "type": "object"}, "ToolMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nToolMessages contain the result of a tool invocation. Typically, the result\\nis encoded inside the `content` field.\\n\\nExample: A ToolMessage representing a result of 42 from a tool call with id\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n ToolMessage(content=\'42\', tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\')\\n\\n\\nExample: A ToolMessage where only part of the tool output is sent to the model\\n and the full output is passed in to artifact.\\n\\n .. versionadded:: 0.2.17\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n tool_output = {\\n \\"stdout\\": \\"From the graph we can see that the correlation between x and y is ...\\",\\n \\"stderr\\": None,\\n \\"artifacts\\": {\\"type\\": \\"image\\", \\"base64_data\\": \\"/9j/4gIcSU...\\"},\\n }\\n\\n ToolMessage(\\n content=tool_output[\\"stdout\\"],\\n artifact=tool_output,\\n tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\',\\n )\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "tool", "default": "tool", "enum": ["tool"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessage", "type": "object"}, "ToolMessageChunk": {"additionalProperties": true, "description": "Tool Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ToolMessageChunk", "default": "ToolMessageChunk", "enum": ["ToolMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessageChunk", "type": "object"}, "UsageMetadata": {"description": "Usage metadata for a message, such as token counts.\\n\\nThis is a standard representation of token usage that is consistent across models.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"input_tokens\\": 350,\\n \\"output_tokens\\": 240,\\n \\"total_tokens\\": 590,\\n \\"input_token_details\\": {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n },\\n \\"output_token_details\\": {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n }\\n\\n.. versionchanged:: 0.3.9\\n\\n Added ``input_token_details`` and ``output_token_details``.", "properties": {"input_tokens": {"title": "Input Tokens", "type": "integer"}, "output_tokens": {"title": "Output Tokens", "type": "integer"}, "total_tokens": {"title": "Total Tokens", "type": "integer"}, "input_token_details": {"$ref": "#/$defs/InputTokenDetails"}, "output_token_details": {"$ref": "#/$defs/OutputTokenDetails"}}, "required": ["input_tokens", "output_tokens", "total_tokens"], "title": "UsageMetadata", "type": "object"}}, "default": null, "items": {"oneOf": [{"$ref": "#/$defs/AIMessage"}, {"$ref": "#/$defs/HumanMessage"}, {"$ref": "#/$defs/ChatMessage"}, {"$ref": "#/$defs/SystemMessage"}, {"$ref": "#/$defs/FunctionMessage"}, {"$ref": "#/$defs/ToolMessage"}, {"$ref": "#/$defs/AIMessageChunk"}, {"$ref": "#/$defs/HumanMessageChunk"}, {"$ref": "#/$defs/ChatMessageChunk"}, {"$ref": "#/$defs/SystemMessageChunk"}, {"$ref": "#/$defs/FunctionMessageChunk"}, {"$ref": "#/$defs/ToolMessageChunk"}]}, "title": "LangGraphOutput", "type": "array"}' -# --- -# name: test_message_graph[memory].2 - ''' - { - "nodes": [ - { - "id": "__start__", - "type": "schema", - "data": "__start__" - }, - { - "id": "agent", - "type": "runnable", - "data": { - "id": [ - "tests", - "test_pregel", - "FakeFuntionChatModel" - ], - "name": "agent" - } - }, - { - "id": "tools", - "type": "runnable", - "data": { - "id": [ - "langgraph", - "prebuilt", - "tool_node", - "ToolNode" - ], - "name": "tools" - } - }, - { - "id": "__end__", - "type": "schema", - "data": "__end__" - } - ], - "edges": [ - { - "source": "__start__", - "target": "agent" - }, - { - "source": "tools", - "target": "agent" - }, - { - "source": "agent", - "target": "tools", - "data": "continue", - "conditional": true - }, - { - "source": "agent", - "target": "__end__", - "data": "end", - "conditional": true - } - ] - } - ''' -# --- -# name: test_message_graph[memory].3 - ''' - graph TD; - __start__ --> agent; - tools --> agent; - agent -. continue .-> tools; - agent -. end .-> __end__; - - ''' -# --- -# name: test_message_graph[postgres] - '{"$defs": {"AIMessage": {"additionalProperties": true, "description": "Message from an AI.\\n\\nAIMessage is returned from a chat model as a response to a prompt.\\n\\nThis message represents the output of the model and consists of both\\nthe raw output as returned by the model together standardized fields\\n(e.g., tool calls, usage metadata) added by the LangChain framework.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ai", "default": "ai", "enum": ["ai"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}}, "required": ["content"], "title": "AIMessage", "type": "object"}, "AIMessageChunk": {"additionalProperties": true, "description": "Message chunk from an AI.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "AIMessageChunk", "default": "AIMessageChunk", "enum": ["AIMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}, "tool_call_chunks": {"default": [], "items": {"$ref": "#/$defs/ToolCallChunk"}, "title": "Tool Call Chunks", "type": "array"}}, "required": ["content"], "title": "AIMessageChunk", "type": "object"}, "ChatMessage": {"additionalProperties": true, "description": "Message that can be assigned an arbitrary speaker (i.e. role).", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "chat", "default": "chat", "enum": ["chat"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessage", "type": "object"}, "ChatMessageChunk": {"additionalProperties": true, "description": "Chat Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ChatMessageChunk", "default": "ChatMessageChunk", "enum": ["ChatMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessageChunk", "type": "object"}, "FunctionMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nFunctionMessage are an older version of the ToolMessage schema, and\\ndo not contain the tool_call_id field.\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "function", "default": "function", "enum": ["function"], "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessage", "type": "object"}, "FunctionMessageChunk": {"additionalProperties": true, "description": "Function Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "FunctionMessageChunk", "default": "FunctionMessageChunk", "enum": ["FunctionMessageChunk"], "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessageChunk", "type": "object"}, "HumanMessage": {"additionalProperties": true, "description": "Message from a human.\\n\\nHumanMessages are messages that are passed in from a human to the model.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Instantiate a chat model and invoke it with the messages\\n model = ...\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "human", "default": "human", "enum": ["human"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessage", "type": "object"}, "HumanMessageChunk": {"additionalProperties": true, "description": "Human Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "HumanMessageChunk", "default": "HumanMessageChunk", "enum": ["HumanMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessageChunk", "type": "object"}, "InputTokenDetails": {"description": "Breakdown of input token counts.\\n\\nDoes *not* need to sum to full input token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "cache_creation": {"title": "Cache Creation", "type": "integer"}, "cache_read": {"title": "Cache Read", "type": "integer"}}, "title": "InputTokenDetails", "type": "object"}, "InvalidToolCall": {"description": "Allowance for errors made by LLM.\\n\\nHere we add an `error` key to surface errors made during generation\\n(e.g., invalid JSON arguments.)", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "error": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Error"}, "type": {"const": "invalid_tool_call", "enum": ["invalid_tool_call"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "error"], "title": "InvalidToolCall", "type": "object"}, "OutputTokenDetails": {"description": "Breakdown of output token counts.\\n\\nDoes *not* need to sum to full output token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "reasoning": {"title": "Reasoning", "type": "integer"}}, "title": "OutputTokenDetails", "type": "object"}, "SystemMessage": {"additionalProperties": true, "description": "Message for priming AI behavior.\\n\\nThe system message is usually passed in as the first of a sequence\\nof input messages.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Define a chat model and invoke it with the messages\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "system", "default": "system", "enum": ["system"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessage", "type": "object"}, "SystemMessageChunk": {"additionalProperties": true, "description": "System Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "SystemMessageChunk", "default": "SystemMessageChunk", "enum": ["SystemMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessageChunk", "type": "object"}, "ToolCall": {"description": "Represents a request to call a tool.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"name\\": \\"foo\\",\\n \\"args\\": {\\"a\\": 1},\\n \\"id\\": \\"123\\"\\n }\\n\\n This represents a request to call the tool named \\"foo\\" with arguments {\\"a\\": 1}\\n and an identifier of \\"123\\".", "properties": {"name": {"title": "Name", "type": "string"}, "args": {"title": "Args", "type": "object"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "type": {"const": "tool_call", "enum": ["tool_call"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id"], "title": "ToolCall", "type": "object"}, "ToolCallChunk": {"description": "A chunk of a tool call (e.g., as part of a stream).\\n\\nWhen merging ToolCallChunks (e.g., via AIMessageChunk.__add__),\\nall string attributes are concatenated. Chunks are only merged if their\\nvalues of `index` are equal and not None.\\n\\nExample:\\n\\n.. code-block:: python\\n\\n left_chunks = [ToolCallChunk(name=\\"foo\\", args=\'{\\"a\\":\', index=0)]\\n right_chunks = [ToolCallChunk(name=None, args=\'1}\', index=0)]\\n\\n (\\n AIMessageChunk(content=\\"\\", tool_call_chunks=left_chunks)\\n + AIMessageChunk(content=\\"\\", tool_call_chunks=right_chunks)\\n ).tool_call_chunks == [ToolCallChunk(name=\'foo\', args=\'{\\"a\\":1}\', index=0)]", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "index": {"anyOf": [{"type": "integer"}, {"type": "null"}], "title": "Index"}, "type": {"const": "tool_call_chunk", "enum": ["tool_call_chunk"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "index"], "title": "ToolCallChunk", "type": "object"}, "ToolMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nToolMessages contain the result of a tool invocation. Typically, the result\\nis encoded inside the `content` field.\\n\\nExample: A ToolMessage representing a result of 42 from a tool call with id\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n ToolMessage(content=\'42\', tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\')\\n\\n\\nExample: A ToolMessage where only part of the tool output is sent to the model\\n and the full output is passed in to artifact.\\n\\n .. versionadded:: 0.2.17\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n tool_output = {\\n \\"stdout\\": \\"From the graph we can see that the correlation between x and y is ...\\",\\n \\"stderr\\": None,\\n \\"artifacts\\": {\\"type\\": \\"image\\", \\"base64_data\\": \\"/9j/4gIcSU...\\"},\\n }\\n\\n ToolMessage(\\n content=tool_output[\\"stdout\\"],\\n artifact=tool_output,\\n tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\',\\n )\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "tool", "default": "tool", "enum": ["tool"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessage", "type": "object"}, "ToolMessageChunk": {"additionalProperties": true, "description": "Tool Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ToolMessageChunk", "default": "ToolMessageChunk", "enum": ["ToolMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessageChunk", "type": "object"}, "UsageMetadata": {"description": "Usage metadata for a message, such as token counts.\\n\\nThis is a standard representation of token usage that is consistent across models.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"input_tokens\\": 350,\\n \\"output_tokens\\": 240,\\n \\"total_tokens\\": 590,\\n \\"input_token_details\\": {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n },\\n \\"output_token_details\\": {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n }\\n\\n.. versionchanged:: 0.3.9\\n\\n Added ``input_token_details`` and ``output_token_details``.", "properties": {"input_tokens": {"title": "Input Tokens", "type": "integer"}, "output_tokens": {"title": "Output Tokens", "type": "integer"}, "total_tokens": {"title": "Total Tokens", "type": "integer"}, "input_token_details": {"$ref": "#/$defs/InputTokenDetails"}, "output_token_details": {"$ref": "#/$defs/OutputTokenDetails"}}, "required": ["input_tokens", "output_tokens", "total_tokens"], "title": "UsageMetadata", "type": "object"}}, "default": null, "items": {"oneOf": [{"$ref": "#/$defs/AIMessage"}, {"$ref": "#/$defs/HumanMessage"}, {"$ref": "#/$defs/ChatMessage"}, {"$ref": "#/$defs/SystemMessage"}, {"$ref": "#/$defs/FunctionMessage"}, {"$ref": "#/$defs/ToolMessage"}, {"$ref": "#/$defs/AIMessageChunk"}, {"$ref": "#/$defs/HumanMessageChunk"}, {"$ref": "#/$defs/ChatMessageChunk"}, {"$ref": "#/$defs/SystemMessageChunk"}, {"$ref": "#/$defs/FunctionMessageChunk"}, {"$ref": "#/$defs/ToolMessageChunk"}]}, "title": "LangGraphInput", "type": "array"}' -# --- -# name: test_message_graph[postgres].1 - '{"$defs": {"AIMessage": {"additionalProperties": true, "description": "Message from an AI.\\n\\nAIMessage is returned from a chat model as a response to a prompt.\\n\\nThis message represents the output of the model and consists of both\\nthe raw output as returned by the model together standardized fields\\n(e.g., tool calls, usage metadata) added by the LangChain framework.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ai", "default": "ai", "enum": ["ai"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}}, "required": ["content"], "title": "AIMessage", "type": "object"}, "AIMessageChunk": {"additionalProperties": true, "description": "Message chunk from an AI.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "AIMessageChunk", "default": "AIMessageChunk", "enum": ["AIMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}, "tool_call_chunks": {"default": [], "items": {"$ref": "#/$defs/ToolCallChunk"}, "title": "Tool Call Chunks", "type": "array"}}, "required": ["content"], "title": "AIMessageChunk", "type": "object"}, "ChatMessage": {"additionalProperties": true, "description": "Message that can be assigned an arbitrary speaker (i.e. role).", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "chat", "default": "chat", "enum": ["chat"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessage", "type": "object"}, "ChatMessageChunk": {"additionalProperties": true, "description": "Chat Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ChatMessageChunk", "default": "ChatMessageChunk", "enum": ["ChatMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessageChunk", "type": "object"}, "FunctionMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nFunctionMessage are an older version of the ToolMessage schema, and\\ndo not contain the tool_call_id field.\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "function", "default": "function", "enum": ["function"], "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessage", "type": "object"}, "FunctionMessageChunk": {"additionalProperties": true, "description": "Function Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "FunctionMessageChunk", "default": "FunctionMessageChunk", "enum": ["FunctionMessageChunk"], "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessageChunk", "type": "object"}, "HumanMessage": {"additionalProperties": true, "description": "Message from a human.\\n\\nHumanMessages are messages that are passed in from a human to the model.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Instantiate a chat model and invoke it with the messages\\n model = ...\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "human", "default": "human", "enum": ["human"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessage", "type": "object"}, "HumanMessageChunk": {"additionalProperties": true, "description": "Human Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "HumanMessageChunk", "default": "HumanMessageChunk", "enum": ["HumanMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessageChunk", "type": "object"}, "InputTokenDetails": {"description": "Breakdown of input token counts.\\n\\nDoes *not* need to sum to full input token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "cache_creation": {"title": "Cache Creation", "type": "integer"}, "cache_read": {"title": "Cache Read", "type": "integer"}}, "title": "InputTokenDetails", "type": "object"}, "InvalidToolCall": {"description": "Allowance for errors made by LLM.\\n\\nHere we add an `error` key to surface errors made during generation\\n(e.g., invalid JSON arguments.)", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "error": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Error"}, "type": {"const": "invalid_tool_call", "enum": ["invalid_tool_call"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "error"], "title": "InvalidToolCall", "type": "object"}, "OutputTokenDetails": {"description": "Breakdown of output token counts.\\n\\nDoes *not* need to sum to full output token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "reasoning": {"title": "Reasoning", "type": "integer"}}, "title": "OutputTokenDetails", "type": "object"}, "SystemMessage": {"additionalProperties": true, "description": "Message for priming AI behavior.\\n\\nThe system message is usually passed in as the first of a sequence\\nof input messages.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Define a chat model and invoke it with the messages\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "system", "default": "system", "enum": ["system"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessage", "type": "object"}, "SystemMessageChunk": {"additionalProperties": true, "description": "System Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "SystemMessageChunk", "default": "SystemMessageChunk", "enum": ["SystemMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessageChunk", "type": "object"}, "ToolCall": {"description": "Represents a request to call a tool.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"name\\": \\"foo\\",\\n \\"args\\": {\\"a\\": 1},\\n \\"id\\": \\"123\\"\\n }\\n\\n This represents a request to call the tool named \\"foo\\" with arguments {\\"a\\": 1}\\n and an identifier of \\"123\\".", "properties": {"name": {"title": "Name", "type": "string"}, "args": {"title": "Args", "type": "object"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "type": {"const": "tool_call", "enum": ["tool_call"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id"], "title": "ToolCall", "type": "object"}, "ToolCallChunk": {"description": "A chunk of a tool call (e.g., as part of a stream).\\n\\nWhen merging ToolCallChunks (e.g., via AIMessageChunk.__add__),\\nall string attributes are concatenated. Chunks are only merged if their\\nvalues of `index` are equal and not None.\\n\\nExample:\\n\\n.. code-block:: python\\n\\n left_chunks = [ToolCallChunk(name=\\"foo\\", args=\'{\\"a\\":\', index=0)]\\n right_chunks = [ToolCallChunk(name=None, args=\'1}\', index=0)]\\n\\n (\\n AIMessageChunk(content=\\"\\", tool_call_chunks=left_chunks)\\n + AIMessageChunk(content=\\"\\", tool_call_chunks=right_chunks)\\n ).tool_call_chunks == [ToolCallChunk(name=\'foo\', args=\'{\\"a\\":1}\', index=0)]", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "index": {"anyOf": [{"type": "integer"}, {"type": "null"}], "title": "Index"}, "type": {"const": "tool_call_chunk", "enum": ["tool_call_chunk"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "index"], "title": "ToolCallChunk", "type": "object"}, "ToolMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nToolMessages contain the result of a tool invocation. Typically, the result\\nis encoded inside the `content` field.\\n\\nExample: A ToolMessage representing a result of 42 from a tool call with id\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n ToolMessage(content=\'42\', tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\')\\n\\n\\nExample: A ToolMessage where only part of the tool output is sent to the model\\n and the full output is passed in to artifact.\\n\\n .. versionadded:: 0.2.17\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n tool_output = {\\n \\"stdout\\": \\"From the graph we can see that the correlation between x and y is ...\\",\\n \\"stderr\\": None,\\n \\"artifacts\\": {\\"type\\": \\"image\\", \\"base64_data\\": \\"/9j/4gIcSU...\\"},\\n }\\n\\n ToolMessage(\\n content=tool_output[\\"stdout\\"],\\n artifact=tool_output,\\n tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\',\\n )\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "tool", "default": "tool", "enum": ["tool"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessage", "type": "object"}, "ToolMessageChunk": {"additionalProperties": true, "description": "Tool Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ToolMessageChunk", "default": "ToolMessageChunk", "enum": ["ToolMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessageChunk", "type": "object"}, "UsageMetadata": {"description": "Usage metadata for a message, such as token counts.\\n\\nThis is a standard representation of token usage that is consistent across models.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"input_tokens\\": 350,\\n \\"output_tokens\\": 240,\\n \\"total_tokens\\": 590,\\n \\"input_token_details\\": {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n },\\n \\"output_token_details\\": {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n }\\n\\n.. versionchanged:: 0.3.9\\n\\n Added ``input_token_details`` and ``output_token_details``.", "properties": {"input_tokens": {"title": "Input Tokens", "type": "integer"}, "output_tokens": {"title": "Output Tokens", "type": "integer"}, "total_tokens": {"title": "Total Tokens", "type": "integer"}, "input_token_details": {"$ref": "#/$defs/InputTokenDetails"}, "output_token_details": {"$ref": "#/$defs/OutputTokenDetails"}}, "required": ["input_tokens", "output_tokens", "total_tokens"], "title": "UsageMetadata", "type": "object"}}, "default": null, "items": {"oneOf": [{"$ref": "#/$defs/AIMessage"}, {"$ref": "#/$defs/HumanMessage"}, {"$ref": "#/$defs/ChatMessage"}, {"$ref": "#/$defs/SystemMessage"}, {"$ref": "#/$defs/FunctionMessage"}, {"$ref": "#/$defs/ToolMessage"}, {"$ref": "#/$defs/AIMessageChunk"}, {"$ref": "#/$defs/HumanMessageChunk"}, {"$ref": "#/$defs/ChatMessageChunk"}, {"$ref": "#/$defs/SystemMessageChunk"}, {"$ref": "#/$defs/FunctionMessageChunk"}, {"$ref": "#/$defs/ToolMessageChunk"}]}, "title": "LangGraphOutput", "type": "array"}' -# --- -# name: test_message_graph[postgres].2 - ''' - { - "nodes": [ - { - "id": "__start__", - "type": "schema", - "data": "__start__" - }, - { - "id": "agent", - "type": "runnable", - "data": { - "id": [ - "tests", - "test_pregel", - "FakeFuntionChatModel" - ], - "name": "agent" - } - }, - { - "id": "tools", - "type": "runnable", - "data": { - "id": [ - "langgraph", - "prebuilt", - "tool_node", - "ToolNode" - ], - "name": "tools" - } - }, - { - "id": "__end__", - "type": "schema", - "data": "__end__" - } - ], - "edges": [ - { - "source": "__start__", - "target": "agent" - }, - { - "source": "tools", - "target": "agent" - }, - { - "source": "agent", - "target": "tools", - "data": "continue", - "conditional": true - }, - { - "source": "agent", - "target": "__end__", - "data": "end", - "conditional": true - } - ] - } - ''' -# --- -# name: test_message_graph[postgres].3 - ''' - graph TD; - __start__ --> agent; - tools --> agent; - agent -. continue .-> tools; - agent -. end .-> __end__; - - ''' -# --- -# name: test_message_graph[postgres_pipe] - '{"$defs": {"AIMessage": {"additionalProperties": true, "description": "Message from an AI.\\n\\nAIMessage is returned from a chat model as a response to a prompt.\\n\\nThis message represents the output of the model and consists of both\\nthe raw output as returned by the model together standardized fields\\n(e.g., tool calls, usage metadata) added by the LangChain framework.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ai", "default": "ai", "enum": ["ai"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}}, "required": ["content"], "title": "AIMessage", "type": "object"}, "AIMessageChunk": {"additionalProperties": true, "description": "Message chunk from an AI.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "AIMessageChunk", "default": "AIMessageChunk", "enum": ["AIMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}, "tool_call_chunks": {"default": [], "items": {"$ref": "#/$defs/ToolCallChunk"}, "title": "Tool Call Chunks", "type": "array"}}, "required": ["content"], "title": "AIMessageChunk", "type": "object"}, "ChatMessage": {"additionalProperties": true, "description": "Message that can be assigned an arbitrary speaker (i.e. role).", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "chat", "default": "chat", "enum": ["chat"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessage", "type": "object"}, "ChatMessageChunk": {"additionalProperties": true, "description": "Chat Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ChatMessageChunk", "default": "ChatMessageChunk", "enum": ["ChatMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessageChunk", "type": "object"}, "FunctionMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nFunctionMessage are an older version of the ToolMessage schema, and\\ndo not contain the tool_call_id field.\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "function", "default": "function", "enum": ["function"], "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessage", "type": "object"}, "FunctionMessageChunk": {"additionalProperties": true, "description": "Function Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "FunctionMessageChunk", "default": "FunctionMessageChunk", "enum": ["FunctionMessageChunk"], "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessageChunk", "type": "object"}, "HumanMessage": {"additionalProperties": true, "description": "Message from a human.\\n\\nHumanMessages are messages that are passed in from a human to the model.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Instantiate a chat model and invoke it with the messages\\n model = ...\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "human", "default": "human", "enum": ["human"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessage", "type": "object"}, "HumanMessageChunk": {"additionalProperties": true, "description": "Human Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "HumanMessageChunk", "default": "HumanMessageChunk", "enum": ["HumanMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessageChunk", "type": "object"}, "InputTokenDetails": {"description": "Breakdown of input token counts.\\n\\nDoes *not* need to sum to full input token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "cache_creation": {"title": "Cache Creation", "type": "integer"}, "cache_read": {"title": "Cache Read", "type": "integer"}}, "title": "InputTokenDetails", "type": "object"}, "InvalidToolCall": {"description": "Allowance for errors made by LLM.\\n\\nHere we add an `error` key to surface errors made during generation\\n(e.g., invalid JSON arguments.)", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "error": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Error"}, "type": {"const": "invalid_tool_call", "enum": ["invalid_tool_call"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "error"], "title": "InvalidToolCall", "type": "object"}, "OutputTokenDetails": {"description": "Breakdown of output token counts.\\n\\nDoes *not* need to sum to full output token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "reasoning": {"title": "Reasoning", "type": "integer"}}, "title": "OutputTokenDetails", "type": "object"}, "SystemMessage": {"additionalProperties": true, "description": "Message for priming AI behavior.\\n\\nThe system message is usually passed in as the first of a sequence\\nof input messages.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Define a chat model and invoke it with the messages\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "system", "default": "system", "enum": ["system"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessage", "type": "object"}, "SystemMessageChunk": {"additionalProperties": true, "description": "System Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "SystemMessageChunk", "default": "SystemMessageChunk", "enum": ["SystemMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessageChunk", "type": "object"}, "ToolCall": {"description": "Represents a request to call a tool.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"name\\": \\"foo\\",\\n \\"args\\": {\\"a\\": 1},\\n \\"id\\": \\"123\\"\\n }\\n\\n This represents a request to call the tool named \\"foo\\" with arguments {\\"a\\": 1}\\n and an identifier of \\"123\\".", "properties": {"name": {"title": "Name", "type": "string"}, "args": {"title": "Args", "type": "object"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "type": {"const": "tool_call", "enum": ["tool_call"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id"], "title": "ToolCall", "type": "object"}, "ToolCallChunk": {"description": "A chunk of a tool call (e.g., as part of a stream).\\n\\nWhen merging ToolCallChunks (e.g., via AIMessageChunk.__add__),\\nall string attributes are concatenated. Chunks are only merged if their\\nvalues of `index` are equal and not None.\\n\\nExample:\\n\\n.. code-block:: python\\n\\n left_chunks = [ToolCallChunk(name=\\"foo\\", args=\'{\\"a\\":\', index=0)]\\n right_chunks = [ToolCallChunk(name=None, args=\'1}\', index=0)]\\n\\n (\\n AIMessageChunk(content=\\"\\", tool_call_chunks=left_chunks)\\n + AIMessageChunk(content=\\"\\", tool_call_chunks=right_chunks)\\n ).tool_call_chunks == [ToolCallChunk(name=\'foo\', args=\'{\\"a\\":1}\', index=0)]", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "index": {"anyOf": [{"type": "integer"}, {"type": "null"}], "title": "Index"}, "type": {"const": "tool_call_chunk", "enum": ["tool_call_chunk"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "index"], "title": "ToolCallChunk", "type": "object"}, "ToolMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nToolMessages contain the result of a tool invocation. Typically, the result\\nis encoded inside the `content` field.\\n\\nExample: A ToolMessage representing a result of 42 from a tool call with id\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n ToolMessage(content=\'42\', tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\')\\n\\n\\nExample: A ToolMessage where only part of the tool output is sent to the model\\n and the full output is passed in to artifact.\\n\\n .. versionadded:: 0.2.17\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n tool_output = {\\n \\"stdout\\": \\"From the graph we can see that the correlation between x and y is ...\\",\\n \\"stderr\\": None,\\n \\"artifacts\\": {\\"type\\": \\"image\\", \\"base64_data\\": \\"/9j/4gIcSU...\\"},\\n }\\n\\n ToolMessage(\\n content=tool_output[\\"stdout\\"],\\n artifact=tool_output,\\n tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\',\\n )\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "tool", "default": "tool", "enum": ["tool"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessage", "type": "object"}, "ToolMessageChunk": {"additionalProperties": true, "description": "Tool Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ToolMessageChunk", "default": "ToolMessageChunk", "enum": ["ToolMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessageChunk", "type": "object"}, "UsageMetadata": {"description": "Usage metadata for a message, such as token counts.\\n\\nThis is a standard representation of token usage that is consistent across models.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"input_tokens\\": 350,\\n \\"output_tokens\\": 240,\\n \\"total_tokens\\": 590,\\n \\"input_token_details\\": {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n },\\n \\"output_token_details\\": {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n }\\n\\n.. versionchanged:: 0.3.9\\n\\n Added ``input_token_details`` and ``output_token_details``.", "properties": {"input_tokens": {"title": "Input Tokens", "type": "integer"}, "output_tokens": {"title": "Output Tokens", "type": "integer"}, "total_tokens": {"title": "Total Tokens", "type": "integer"}, "input_token_details": {"$ref": "#/$defs/InputTokenDetails"}, "output_token_details": {"$ref": "#/$defs/OutputTokenDetails"}}, "required": ["input_tokens", "output_tokens", "total_tokens"], "title": "UsageMetadata", "type": "object"}}, "default": null, "items": {"oneOf": [{"$ref": "#/$defs/AIMessage"}, {"$ref": "#/$defs/HumanMessage"}, {"$ref": "#/$defs/ChatMessage"}, {"$ref": "#/$defs/SystemMessage"}, {"$ref": "#/$defs/FunctionMessage"}, {"$ref": "#/$defs/ToolMessage"}, {"$ref": "#/$defs/AIMessageChunk"}, {"$ref": "#/$defs/HumanMessageChunk"}, {"$ref": "#/$defs/ChatMessageChunk"}, {"$ref": "#/$defs/SystemMessageChunk"}, {"$ref": "#/$defs/FunctionMessageChunk"}, {"$ref": "#/$defs/ToolMessageChunk"}]}, "title": "LangGraphInput", "type": "array"}' -# --- -# name: test_message_graph[postgres_pipe].1 - '{"$defs": {"AIMessage": {"additionalProperties": true, "description": "Message from an AI.\\n\\nAIMessage is returned from a chat model as a response to a prompt.\\n\\nThis message represents the output of the model and consists of both\\nthe raw output as returned by the model together standardized fields\\n(e.g., tool calls, usage metadata) added by the LangChain framework.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ai", "default": "ai", "enum": ["ai"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}}, "required": ["content"], "title": "AIMessage", "type": "object"}, "AIMessageChunk": {"additionalProperties": true, "description": "Message chunk from an AI.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "AIMessageChunk", "default": "AIMessageChunk", "enum": ["AIMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}, "tool_call_chunks": {"default": [], "items": {"$ref": "#/$defs/ToolCallChunk"}, "title": "Tool Call Chunks", "type": "array"}}, "required": ["content"], "title": "AIMessageChunk", "type": "object"}, "ChatMessage": {"additionalProperties": true, "description": "Message that can be assigned an arbitrary speaker (i.e. role).", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "chat", "default": "chat", "enum": ["chat"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessage", "type": "object"}, "ChatMessageChunk": {"additionalProperties": true, "description": "Chat Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ChatMessageChunk", "default": "ChatMessageChunk", "enum": ["ChatMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessageChunk", "type": "object"}, "FunctionMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nFunctionMessage are an older version of the ToolMessage schema, and\\ndo not contain the tool_call_id field.\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "function", "default": "function", "enum": ["function"], "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessage", "type": "object"}, "FunctionMessageChunk": {"additionalProperties": true, "description": "Function Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "FunctionMessageChunk", "default": "FunctionMessageChunk", "enum": ["FunctionMessageChunk"], "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessageChunk", "type": "object"}, "HumanMessage": {"additionalProperties": true, "description": "Message from a human.\\n\\nHumanMessages are messages that are passed in from a human to the model.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Instantiate a chat model and invoke it with the messages\\n model = ...\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "human", "default": "human", "enum": ["human"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessage", "type": "object"}, "HumanMessageChunk": {"additionalProperties": true, "description": "Human Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "HumanMessageChunk", "default": "HumanMessageChunk", "enum": ["HumanMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessageChunk", "type": "object"}, "InputTokenDetails": {"description": "Breakdown of input token counts.\\n\\nDoes *not* need to sum to full input token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "cache_creation": {"title": "Cache Creation", "type": "integer"}, "cache_read": {"title": "Cache Read", "type": "integer"}}, "title": "InputTokenDetails", "type": "object"}, "InvalidToolCall": {"description": "Allowance for errors made by LLM.\\n\\nHere we add an `error` key to surface errors made during generation\\n(e.g., invalid JSON arguments.)", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "error": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Error"}, "type": {"const": "invalid_tool_call", "enum": ["invalid_tool_call"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "error"], "title": "InvalidToolCall", "type": "object"}, "OutputTokenDetails": {"description": "Breakdown of output token counts.\\n\\nDoes *not* need to sum to full output token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "reasoning": {"title": "Reasoning", "type": "integer"}}, "title": "OutputTokenDetails", "type": "object"}, "SystemMessage": {"additionalProperties": true, "description": "Message for priming AI behavior.\\n\\nThe system message is usually passed in as the first of a sequence\\nof input messages.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Define a chat model and invoke it with the messages\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "system", "default": "system", "enum": ["system"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessage", "type": "object"}, "SystemMessageChunk": {"additionalProperties": true, "description": "System Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "SystemMessageChunk", "default": "SystemMessageChunk", "enum": ["SystemMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessageChunk", "type": "object"}, "ToolCall": {"description": "Represents a request to call a tool.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"name\\": \\"foo\\",\\n \\"args\\": {\\"a\\": 1},\\n \\"id\\": \\"123\\"\\n }\\n\\n This represents a request to call the tool named \\"foo\\" with arguments {\\"a\\": 1}\\n and an identifier of \\"123\\".", "properties": {"name": {"title": "Name", "type": "string"}, "args": {"title": "Args", "type": "object"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "type": {"const": "tool_call", "enum": ["tool_call"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id"], "title": "ToolCall", "type": "object"}, "ToolCallChunk": {"description": "A chunk of a tool call (e.g., as part of a stream).\\n\\nWhen merging ToolCallChunks (e.g., via AIMessageChunk.__add__),\\nall string attributes are concatenated. Chunks are only merged if their\\nvalues of `index` are equal and not None.\\n\\nExample:\\n\\n.. code-block:: python\\n\\n left_chunks = [ToolCallChunk(name=\\"foo\\", args=\'{\\"a\\":\', index=0)]\\n right_chunks = [ToolCallChunk(name=None, args=\'1}\', index=0)]\\n\\n (\\n AIMessageChunk(content=\\"\\", tool_call_chunks=left_chunks)\\n + AIMessageChunk(content=\\"\\", tool_call_chunks=right_chunks)\\n ).tool_call_chunks == [ToolCallChunk(name=\'foo\', args=\'{\\"a\\":1}\', index=0)]", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "index": {"anyOf": [{"type": "integer"}, {"type": "null"}], "title": "Index"}, "type": {"const": "tool_call_chunk", "enum": ["tool_call_chunk"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "index"], "title": "ToolCallChunk", "type": "object"}, "ToolMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nToolMessages contain the result of a tool invocation. Typically, the result\\nis encoded inside the `content` field.\\n\\nExample: A ToolMessage representing a result of 42 from a tool call with id\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n ToolMessage(content=\'42\', tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\')\\n\\n\\nExample: A ToolMessage where only part of the tool output is sent to the model\\n and the full output is passed in to artifact.\\n\\n .. versionadded:: 0.2.17\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n tool_output = {\\n \\"stdout\\": \\"From the graph we can see that the correlation between x and y is ...\\",\\n \\"stderr\\": None,\\n \\"artifacts\\": {\\"type\\": \\"image\\", \\"base64_data\\": \\"/9j/4gIcSU...\\"},\\n }\\n\\n ToolMessage(\\n content=tool_output[\\"stdout\\"],\\n artifact=tool_output,\\n tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\',\\n )\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "tool", "default": "tool", "enum": ["tool"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessage", "type": "object"}, "ToolMessageChunk": {"additionalProperties": true, "description": "Tool Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ToolMessageChunk", "default": "ToolMessageChunk", "enum": ["ToolMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessageChunk", "type": "object"}, "UsageMetadata": {"description": "Usage metadata for a message, such as token counts.\\n\\nThis is a standard representation of token usage that is consistent across models.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"input_tokens\\": 350,\\n \\"output_tokens\\": 240,\\n \\"total_tokens\\": 590,\\n \\"input_token_details\\": {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n },\\n \\"output_token_details\\": {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n }\\n\\n.. versionchanged:: 0.3.9\\n\\n Added ``input_token_details`` and ``output_token_details``.", "properties": {"input_tokens": {"title": "Input Tokens", "type": "integer"}, "output_tokens": {"title": "Output Tokens", "type": "integer"}, "total_tokens": {"title": "Total Tokens", "type": "integer"}, "input_token_details": {"$ref": "#/$defs/InputTokenDetails"}, "output_token_details": {"$ref": "#/$defs/OutputTokenDetails"}}, "required": ["input_tokens", "output_tokens", "total_tokens"], "title": "UsageMetadata", "type": "object"}}, "default": null, "items": {"oneOf": [{"$ref": "#/$defs/AIMessage"}, {"$ref": "#/$defs/HumanMessage"}, {"$ref": "#/$defs/ChatMessage"}, {"$ref": "#/$defs/SystemMessage"}, {"$ref": "#/$defs/FunctionMessage"}, {"$ref": "#/$defs/ToolMessage"}, {"$ref": "#/$defs/AIMessageChunk"}, {"$ref": "#/$defs/HumanMessageChunk"}, {"$ref": "#/$defs/ChatMessageChunk"}, {"$ref": "#/$defs/SystemMessageChunk"}, {"$ref": "#/$defs/FunctionMessageChunk"}, {"$ref": "#/$defs/ToolMessageChunk"}]}, "title": "LangGraphOutput", "type": "array"}' -# --- -# name: test_message_graph[postgres_pipe].2 - ''' - { - "nodes": [ - { - "id": "__start__", - "type": "schema", - "data": "__start__" - }, - { - "id": "agent", - "type": "runnable", - "data": { - "id": [ - "tests", - "test_pregel", - "FakeFuntionChatModel" - ], - "name": "agent" - } - }, - { - "id": "tools", - "type": "runnable", - "data": { - "id": [ - "langgraph", - "prebuilt", - "tool_node", - "ToolNode" - ], - "name": "tools" - } - }, - { - "id": "__end__", - "type": "schema", - "data": "__end__" - } - ], - "edges": [ - { - "source": "__start__", - "target": "agent" - }, - { - "source": "tools", - "target": "agent" - }, - { - "source": "agent", - "target": "tools", - "data": "continue", - "conditional": true - }, - { - "source": "agent", - "target": "__end__", - "data": "end", - "conditional": true - } - ] - } - ''' -# --- -# name: test_message_graph[postgres_pipe].3 - ''' - graph TD; - __start__ --> agent; - tools --> agent; - agent -. continue .-> tools; - agent -. end .-> __end__; - - ''' -# --- -# name: test_message_graph[postgres_pool] - '{"$defs": {"AIMessage": {"additionalProperties": true, "description": "Message from an AI.\\n\\nAIMessage is returned from a chat model as a response to a prompt.\\n\\nThis message represents the output of the model and consists of both\\nthe raw output as returned by the model together standardized fields\\n(e.g., tool calls, usage metadata) added by the LangChain framework.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ai", "default": "ai", "enum": ["ai"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}}, "required": ["content"], "title": "AIMessage", "type": "object"}, "AIMessageChunk": {"additionalProperties": true, "description": "Message chunk from an AI.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "AIMessageChunk", "default": "AIMessageChunk", "enum": ["AIMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}, "tool_call_chunks": {"default": [], "items": {"$ref": "#/$defs/ToolCallChunk"}, "title": "Tool Call Chunks", "type": "array"}}, "required": ["content"], "title": "AIMessageChunk", "type": "object"}, "ChatMessage": {"additionalProperties": true, "description": "Message that can be assigned an arbitrary speaker (i.e. role).", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "chat", "default": "chat", "enum": ["chat"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessage", "type": "object"}, "ChatMessageChunk": {"additionalProperties": true, "description": "Chat Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ChatMessageChunk", "default": "ChatMessageChunk", "enum": ["ChatMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessageChunk", "type": "object"}, "FunctionMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nFunctionMessage are an older version of the ToolMessage schema, and\\ndo not contain the tool_call_id field.\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "function", "default": "function", "enum": ["function"], "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessage", "type": "object"}, "FunctionMessageChunk": {"additionalProperties": true, "description": "Function Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "FunctionMessageChunk", "default": "FunctionMessageChunk", "enum": ["FunctionMessageChunk"], "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessageChunk", "type": "object"}, "HumanMessage": {"additionalProperties": true, "description": "Message from a human.\\n\\nHumanMessages are messages that are passed in from a human to the model.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Instantiate a chat model and invoke it with the messages\\n model = ...\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "human", "default": "human", "enum": ["human"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessage", "type": "object"}, "HumanMessageChunk": {"additionalProperties": true, "description": "Human Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "HumanMessageChunk", "default": "HumanMessageChunk", "enum": ["HumanMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessageChunk", "type": "object"}, "InputTokenDetails": {"description": "Breakdown of input token counts.\\n\\nDoes *not* need to sum to full input token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "cache_creation": {"title": "Cache Creation", "type": "integer"}, "cache_read": {"title": "Cache Read", "type": "integer"}}, "title": "InputTokenDetails", "type": "object"}, "InvalidToolCall": {"description": "Allowance for errors made by LLM.\\n\\nHere we add an `error` key to surface errors made during generation\\n(e.g., invalid JSON arguments.)", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "error": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Error"}, "type": {"const": "invalid_tool_call", "enum": ["invalid_tool_call"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "error"], "title": "InvalidToolCall", "type": "object"}, "OutputTokenDetails": {"description": "Breakdown of output token counts.\\n\\nDoes *not* need to sum to full output token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "reasoning": {"title": "Reasoning", "type": "integer"}}, "title": "OutputTokenDetails", "type": "object"}, "SystemMessage": {"additionalProperties": true, "description": "Message for priming AI behavior.\\n\\nThe system message is usually passed in as the first of a sequence\\nof input messages.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Define a chat model and invoke it with the messages\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "system", "default": "system", "enum": ["system"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessage", "type": "object"}, "SystemMessageChunk": {"additionalProperties": true, "description": "System Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "SystemMessageChunk", "default": "SystemMessageChunk", "enum": ["SystemMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessageChunk", "type": "object"}, "ToolCall": {"description": "Represents a request to call a tool.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"name\\": \\"foo\\",\\n \\"args\\": {\\"a\\": 1},\\n \\"id\\": \\"123\\"\\n }\\n\\n This represents a request to call the tool named \\"foo\\" with arguments {\\"a\\": 1}\\n and an identifier of \\"123\\".", "properties": {"name": {"title": "Name", "type": "string"}, "args": {"title": "Args", "type": "object"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "type": {"const": "tool_call", "enum": ["tool_call"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id"], "title": "ToolCall", "type": "object"}, "ToolCallChunk": {"description": "A chunk of a tool call (e.g., as part of a stream).\\n\\nWhen merging ToolCallChunks (e.g., via AIMessageChunk.__add__),\\nall string attributes are concatenated. Chunks are only merged if their\\nvalues of `index` are equal and not None.\\n\\nExample:\\n\\n.. code-block:: python\\n\\n left_chunks = [ToolCallChunk(name=\\"foo\\", args=\'{\\"a\\":\', index=0)]\\n right_chunks = [ToolCallChunk(name=None, args=\'1}\', index=0)]\\n\\n (\\n AIMessageChunk(content=\\"\\", tool_call_chunks=left_chunks)\\n + AIMessageChunk(content=\\"\\", tool_call_chunks=right_chunks)\\n ).tool_call_chunks == [ToolCallChunk(name=\'foo\', args=\'{\\"a\\":1}\', index=0)]", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "index": {"anyOf": [{"type": "integer"}, {"type": "null"}], "title": "Index"}, "type": {"const": "tool_call_chunk", "enum": ["tool_call_chunk"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "index"], "title": "ToolCallChunk", "type": "object"}, "ToolMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nToolMessages contain the result of a tool invocation. Typically, the result\\nis encoded inside the `content` field.\\n\\nExample: A ToolMessage representing a result of 42 from a tool call with id\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n ToolMessage(content=\'42\', tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\')\\n\\n\\nExample: A ToolMessage where only part of the tool output is sent to the model\\n and the full output is passed in to artifact.\\n\\n .. versionadded:: 0.2.17\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n tool_output = {\\n \\"stdout\\": \\"From the graph we can see that the correlation between x and y is ...\\",\\n \\"stderr\\": None,\\n \\"artifacts\\": {\\"type\\": \\"image\\", \\"base64_data\\": \\"/9j/4gIcSU...\\"},\\n }\\n\\n ToolMessage(\\n content=tool_output[\\"stdout\\"],\\n artifact=tool_output,\\n tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\',\\n )\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "tool", "default": "tool", "enum": ["tool"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessage", "type": "object"}, "ToolMessageChunk": {"additionalProperties": true, "description": "Tool Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ToolMessageChunk", "default": "ToolMessageChunk", "enum": ["ToolMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessageChunk", "type": "object"}, "UsageMetadata": {"description": "Usage metadata for a message, such as token counts.\\n\\nThis is a standard representation of token usage that is consistent across models.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"input_tokens\\": 350,\\n \\"output_tokens\\": 240,\\n \\"total_tokens\\": 590,\\n \\"input_token_details\\": {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n },\\n \\"output_token_details\\": {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n }\\n\\n.. versionchanged:: 0.3.9\\n\\n Added ``input_token_details`` and ``output_token_details``.", "properties": {"input_tokens": {"title": "Input Tokens", "type": "integer"}, "output_tokens": {"title": "Output Tokens", "type": "integer"}, "total_tokens": {"title": "Total Tokens", "type": "integer"}, "input_token_details": {"$ref": "#/$defs/InputTokenDetails"}, "output_token_details": {"$ref": "#/$defs/OutputTokenDetails"}}, "required": ["input_tokens", "output_tokens", "total_tokens"], "title": "UsageMetadata", "type": "object"}}, "default": null, "items": {"oneOf": [{"$ref": "#/$defs/AIMessage"}, {"$ref": "#/$defs/HumanMessage"}, {"$ref": "#/$defs/ChatMessage"}, {"$ref": "#/$defs/SystemMessage"}, {"$ref": "#/$defs/FunctionMessage"}, {"$ref": "#/$defs/ToolMessage"}, {"$ref": "#/$defs/AIMessageChunk"}, {"$ref": "#/$defs/HumanMessageChunk"}, {"$ref": "#/$defs/ChatMessageChunk"}, {"$ref": "#/$defs/SystemMessageChunk"}, {"$ref": "#/$defs/FunctionMessageChunk"}, {"$ref": "#/$defs/ToolMessageChunk"}]}, "title": "LangGraphInput", "type": "array"}' -# --- -# name: test_message_graph[postgres_pool].1 - '{"$defs": {"AIMessage": {"additionalProperties": true, "description": "Message from an AI.\\n\\nAIMessage is returned from a chat model as a response to a prompt.\\n\\nThis message represents the output of the model and consists of both\\nthe raw output as returned by the model together standardized fields\\n(e.g., tool calls, usage metadata) added by the LangChain framework.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ai", "default": "ai", "enum": ["ai"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}}, "required": ["content"], "title": "AIMessage", "type": "object"}, "AIMessageChunk": {"additionalProperties": true, "description": "Message chunk from an AI.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "AIMessageChunk", "default": "AIMessageChunk", "enum": ["AIMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}, "tool_call_chunks": {"default": [], "items": {"$ref": "#/$defs/ToolCallChunk"}, "title": "Tool Call Chunks", "type": "array"}}, "required": ["content"], "title": "AIMessageChunk", "type": "object"}, "ChatMessage": {"additionalProperties": true, "description": "Message that can be assigned an arbitrary speaker (i.e. role).", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "chat", "default": "chat", "enum": ["chat"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessage", "type": "object"}, "ChatMessageChunk": {"additionalProperties": true, "description": "Chat Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ChatMessageChunk", "default": "ChatMessageChunk", "enum": ["ChatMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessageChunk", "type": "object"}, "FunctionMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nFunctionMessage are an older version of the ToolMessage schema, and\\ndo not contain the tool_call_id field.\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "function", "default": "function", "enum": ["function"], "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessage", "type": "object"}, "FunctionMessageChunk": {"additionalProperties": true, "description": "Function Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "FunctionMessageChunk", "default": "FunctionMessageChunk", "enum": ["FunctionMessageChunk"], "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessageChunk", "type": "object"}, "HumanMessage": {"additionalProperties": true, "description": "Message from a human.\\n\\nHumanMessages are messages that are passed in from a human to the model.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Instantiate a chat model and invoke it with the messages\\n model = ...\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "human", "default": "human", "enum": ["human"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessage", "type": "object"}, "HumanMessageChunk": {"additionalProperties": true, "description": "Human Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "HumanMessageChunk", "default": "HumanMessageChunk", "enum": ["HumanMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessageChunk", "type": "object"}, "InputTokenDetails": {"description": "Breakdown of input token counts.\\n\\nDoes *not* need to sum to full input token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "cache_creation": {"title": "Cache Creation", "type": "integer"}, "cache_read": {"title": "Cache Read", "type": "integer"}}, "title": "InputTokenDetails", "type": "object"}, "InvalidToolCall": {"description": "Allowance for errors made by LLM.\\n\\nHere we add an `error` key to surface errors made during generation\\n(e.g., invalid JSON arguments.)", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "error": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Error"}, "type": {"const": "invalid_tool_call", "enum": ["invalid_tool_call"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "error"], "title": "InvalidToolCall", "type": "object"}, "OutputTokenDetails": {"description": "Breakdown of output token counts.\\n\\nDoes *not* need to sum to full output token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "reasoning": {"title": "Reasoning", "type": "integer"}}, "title": "OutputTokenDetails", "type": "object"}, "SystemMessage": {"additionalProperties": true, "description": "Message for priming AI behavior.\\n\\nThe system message is usually passed in as the first of a sequence\\nof input messages.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Define a chat model and invoke it with the messages\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "system", "default": "system", "enum": ["system"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessage", "type": "object"}, "SystemMessageChunk": {"additionalProperties": true, "description": "System Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "SystemMessageChunk", "default": "SystemMessageChunk", "enum": ["SystemMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessageChunk", "type": "object"}, "ToolCall": {"description": "Represents a request to call a tool.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"name\\": \\"foo\\",\\n \\"args\\": {\\"a\\": 1},\\n \\"id\\": \\"123\\"\\n }\\n\\n This represents a request to call the tool named \\"foo\\" with arguments {\\"a\\": 1}\\n and an identifier of \\"123\\".", "properties": {"name": {"title": "Name", "type": "string"}, "args": {"title": "Args", "type": "object"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "type": {"const": "tool_call", "enum": ["tool_call"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id"], "title": "ToolCall", "type": "object"}, "ToolCallChunk": {"description": "A chunk of a tool call (e.g., as part of a stream).\\n\\nWhen merging ToolCallChunks (e.g., via AIMessageChunk.__add__),\\nall string attributes are concatenated. Chunks are only merged if their\\nvalues of `index` are equal and not None.\\n\\nExample:\\n\\n.. code-block:: python\\n\\n left_chunks = [ToolCallChunk(name=\\"foo\\", args=\'{\\"a\\":\', index=0)]\\n right_chunks = [ToolCallChunk(name=None, args=\'1}\', index=0)]\\n\\n (\\n AIMessageChunk(content=\\"\\", tool_call_chunks=left_chunks)\\n + AIMessageChunk(content=\\"\\", tool_call_chunks=right_chunks)\\n ).tool_call_chunks == [ToolCallChunk(name=\'foo\', args=\'{\\"a\\":1}\', index=0)]", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "index": {"anyOf": [{"type": "integer"}, {"type": "null"}], "title": "Index"}, "type": {"const": "tool_call_chunk", "enum": ["tool_call_chunk"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "index"], "title": "ToolCallChunk", "type": "object"}, "ToolMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nToolMessages contain the result of a tool invocation. Typically, the result\\nis encoded inside the `content` field.\\n\\nExample: A ToolMessage representing a result of 42 from a tool call with id\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n ToolMessage(content=\'42\', tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\')\\n\\n\\nExample: A ToolMessage where only part of the tool output is sent to the model\\n and the full output is passed in to artifact.\\n\\n .. versionadded:: 0.2.17\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n tool_output = {\\n \\"stdout\\": \\"From the graph we can see that the correlation between x and y is ...\\",\\n \\"stderr\\": None,\\n \\"artifacts\\": {\\"type\\": \\"image\\", \\"base64_data\\": \\"/9j/4gIcSU...\\"},\\n }\\n\\n ToolMessage(\\n content=tool_output[\\"stdout\\"],\\n artifact=tool_output,\\n tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\',\\n )\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "tool", "default": "tool", "enum": ["tool"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessage", "type": "object"}, "ToolMessageChunk": {"additionalProperties": true, "description": "Tool Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ToolMessageChunk", "default": "ToolMessageChunk", "enum": ["ToolMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessageChunk", "type": "object"}, "UsageMetadata": {"description": "Usage metadata for a message, such as token counts.\\n\\nThis is a standard representation of token usage that is consistent across models.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"input_tokens\\": 350,\\n \\"output_tokens\\": 240,\\n \\"total_tokens\\": 590,\\n \\"input_token_details\\": {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n },\\n \\"output_token_details\\": {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n }\\n\\n.. versionchanged:: 0.3.9\\n\\n Added ``input_token_details`` and ``output_token_details``.", "properties": {"input_tokens": {"title": "Input Tokens", "type": "integer"}, "output_tokens": {"title": "Output Tokens", "type": "integer"}, "total_tokens": {"title": "Total Tokens", "type": "integer"}, "input_token_details": {"$ref": "#/$defs/InputTokenDetails"}, "output_token_details": {"$ref": "#/$defs/OutputTokenDetails"}}, "required": ["input_tokens", "output_tokens", "total_tokens"], "title": "UsageMetadata", "type": "object"}}, "default": null, "items": {"oneOf": [{"$ref": "#/$defs/AIMessage"}, {"$ref": "#/$defs/HumanMessage"}, {"$ref": "#/$defs/ChatMessage"}, {"$ref": "#/$defs/SystemMessage"}, {"$ref": "#/$defs/FunctionMessage"}, {"$ref": "#/$defs/ToolMessage"}, {"$ref": "#/$defs/AIMessageChunk"}, {"$ref": "#/$defs/HumanMessageChunk"}, {"$ref": "#/$defs/ChatMessageChunk"}, {"$ref": "#/$defs/SystemMessageChunk"}, {"$ref": "#/$defs/FunctionMessageChunk"}, {"$ref": "#/$defs/ToolMessageChunk"}]}, "title": "LangGraphOutput", "type": "array"}' -# --- -# name: test_message_graph[postgres_pool].2 - ''' - { - "nodes": [ - { - "id": "__start__", - "type": "schema", - "data": "__start__" - }, - { - "id": "agent", - "type": "runnable", - "data": { - "id": [ - "tests", - "test_pregel", - "FakeFuntionChatModel" - ], - "name": "agent" - } - }, - { - "id": "tools", - "type": "runnable", - "data": { - "id": [ - "langgraph", - "prebuilt", - "tool_node", - "ToolNode" - ], - "name": "tools" - } - }, - { - "id": "__end__", - "type": "schema", - "data": "__end__" - } - ], - "edges": [ - { - "source": "__start__", - "target": "agent" - }, - { - "source": "tools", - "target": "agent" - }, - { - "source": "agent", - "target": "tools", - "data": "continue", - "conditional": true - }, - { - "source": "agent", - "target": "__end__", - "data": "end", - "conditional": true - } - ] - } - ''' -# --- -# name: test_message_graph[postgres_pool].3 - ''' - graph TD; - __start__ --> agent; - tools --> agent; - agent -. continue .-> tools; - agent -. end .-> __end__; - - ''' -# --- -# name: test_message_graph[sqlite] - '{"$defs": {"AIMessage": {"additionalProperties": true, "description": "Message from an AI.\\n\\nAIMessage is returned from a chat model as a response to a prompt.\\n\\nThis message represents the output of the model and consists of both\\nthe raw output as returned by the model together standardized fields\\n(e.g., tool calls, usage metadata) added by the LangChain framework.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ai", "default": "ai", "enum": ["ai"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}}, "required": ["content"], "title": "AIMessage", "type": "object"}, "AIMessageChunk": {"additionalProperties": true, "description": "Message chunk from an AI.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "AIMessageChunk", "default": "AIMessageChunk", "enum": ["AIMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}, "tool_call_chunks": {"default": [], "items": {"$ref": "#/$defs/ToolCallChunk"}, "title": "Tool Call Chunks", "type": "array"}}, "required": ["content"], "title": "AIMessageChunk", "type": "object"}, "ChatMessage": {"additionalProperties": true, "description": "Message that can be assigned an arbitrary speaker (i.e. role).", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "chat", "default": "chat", "enum": ["chat"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessage", "type": "object"}, "ChatMessageChunk": {"additionalProperties": true, "description": "Chat Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ChatMessageChunk", "default": "ChatMessageChunk", "enum": ["ChatMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessageChunk", "type": "object"}, "FunctionMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nFunctionMessage are an older version of the ToolMessage schema, and\\ndo not contain the tool_call_id field.\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "function", "default": "function", "enum": ["function"], "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessage", "type": "object"}, "FunctionMessageChunk": {"additionalProperties": true, "description": "Function Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "FunctionMessageChunk", "default": "FunctionMessageChunk", "enum": ["FunctionMessageChunk"], "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessageChunk", "type": "object"}, "HumanMessage": {"additionalProperties": true, "description": "Message from a human.\\n\\nHumanMessages are messages that are passed in from a human to the model.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Instantiate a chat model and invoke it with the messages\\n model = ...\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "human", "default": "human", "enum": ["human"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessage", "type": "object"}, "HumanMessageChunk": {"additionalProperties": true, "description": "Human Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "HumanMessageChunk", "default": "HumanMessageChunk", "enum": ["HumanMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessageChunk", "type": "object"}, "InputTokenDetails": {"description": "Breakdown of input token counts.\\n\\nDoes *not* need to sum to full input token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "cache_creation": {"title": "Cache Creation", "type": "integer"}, "cache_read": {"title": "Cache Read", "type": "integer"}}, "title": "InputTokenDetails", "type": "object"}, "InvalidToolCall": {"description": "Allowance for errors made by LLM.\\n\\nHere we add an `error` key to surface errors made during generation\\n(e.g., invalid JSON arguments.)", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "error": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Error"}, "type": {"const": "invalid_tool_call", "enum": ["invalid_tool_call"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "error"], "title": "InvalidToolCall", "type": "object"}, "OutputTokenDetails": {"description": "Breakdown of output token counts.\\n\\nDoes *not* need to sum to full output token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "reasoning": {"title": "Reasoning", "type": "integer"}}, "title": "OutputTokenDetails", "type": "object"}, "SystemMessage": {"additionalProperties": true, "description": "Message for priming AI behavior.\\n\\nThe system message is usually passed in as the first of a sequence\\nof input messages.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Define a chat model and invoke it with the messages\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "system", "default": "system", "enum": ["system"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessage", "type": "object"}, "SystemMessageChunk": {"additionalProperties": true, "description": "System Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "SystemMessageChunk", "default": "SystemMessageChunk", "enum": ["SystemMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessageChunk", "type": "object"}, "ToolCall": {"description": "Represents a request to call a tool.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"name\\": \\"foo\\",\\n \\"args\\": {\\"a\\": 1},\\n \\"id\\": \\"123\\"\\n }\\n\\n This represents a request to call the tool named \\"foo\\" with arguments {\\"a\\": 1}\\n and an identifier of \\"123\\".", "properties": {"name": {"title": "Name", "type": "string"}, "args": {"title": "Args", "type": "object"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "type": {"const": "tool_call", "enum": ["tool_call"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id"], "title": "ToolCall", "type": "object"}, "ToolCallChunk": {"description": "A chunk of a tool call (e.g., as part of a stream).\\n\\nWhen merging ToolCallChunks (e.g., via AIMessageChunk.__add__),\\nall string attributes are concatenated. Chunks are only merged if their\\nvalues of `index` are equal and not None.\\n\\nExample:\\n\\n.. code-block:: python\\n\\n left_chunks = [ToolCallChunk(name=\\"foo\\", args=\'{\\"a\\":\', index=0)]\\n right_chunks = [ToolCallChunk(name=None, args=\'1}\', index=0)]\\n\\n (\\n AIMessageChunk(content=\\"\\", tool_call_chunks=left_chunks)\\n + AIMessageChunk(content=\\"\\", tool_call_chunks=right_chunks)\\n ).tool_call_chunks == [ToolCallChunk(name=\'foo\', args=\'{\\"a\\":1}\', index=0)]", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "index": {"anyOf": [{"type": "integer"}, {"type": "null"}], "title": "Index"}, "type": {"const": "tool_call_chunk", "enum": ["tool_call_chunk"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "index"], "title": "ToolCallChunk", "type": "object"}, "ToolMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nToolMessages contain the result of a tool invocation. Typically, the result\\nis encoded inside the `content` field.\\n\\nExample: A ToolMessage representing a result of 42 from a tool call with id\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n ToolMessage(content=\'42\', tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\')\\n\\n\\nExample: A ToolMessage where only part of the tool output is sent to the model\\n and the full output is passed in to artifact.\\n\\n .. versionadded:: 0.2.17\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n tool_output = {\\n \\"stdout\\": \\"From the graph we can see that the correlation between x and y is ...\\",\\n \\"stderr\\": None,\\n \\"artifacts\\": {\\"type\\": \\"image\\", \\"base64_data\\": \\"/9j/4gIcSU...\\"},\\n }\\n\\n ToolMessage(\\n content=tool_output[\\"stdout\\"],\\n artifact=tool_output,\\n tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\',\\n )\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "tool", "default": "tool", "enum": ["tool"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessage", "type": "object"}, "ToolMessageChunk": {"additionalProperties": true, "description": "Tool Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ToolMessageChunk", "default": "ToolMessageChunk", "enum": ["ToolMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessageChunk", "type": "object"}, "UsageMetadata": {"description": "Usage metadata for a message, such as token counts.\\n\\nThis is a standard representation of token usage that is consistent across models.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"input_tokens\\": 350,\\n \\"output_tokens\\": 240,\\n \\"total_tokens\\": 590,\\n \\"input_token_details\\": {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n },\\n \\"output_token_details\\": {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n }\\n\\n.. versionchanged:: 0.3.9\\n\\n Added ``input_token_details`` and ``output_token_details``.", "properties": {"input_tokens": {"title": "Input Tokens", "type": "integer"}, "output_tokens": {"title": "Output Tokens", "type": "integer"}, "total_tokens": {"title": "Total Tokens", "type": "integer"}, "input_token_details": {"$ref": "#/$defs/InputTokenDetails"}, "output_token_details": {"$ref": "#/$defs/OutputTokenDetails"}}, "required": ["input_tokens", "output_tokens", "total_tokens"], "title": "UsageMetadata", "type": "object"}}, "default": null, "items": {"oneOf": [{"$ref": "#/$defs/AIMessage"}, {"$ref": "#/$defs/HumanMessage"}, {"$ref": "#/$defs/ChatMessage"}, {"$ref": "#/$defs/SystemMessage"}, {"$ref": "#/$defs/FunctionMessage"}, {"$ref": "#/$defs/ToolMessage"}, {"$ref": "#/$defs/AIMessageChunk"}, {"$ref": "#/$defs/HumanMessageChunk"}, {"$ref": "#/$defs/ChatMessageChunk"}, {"$ref": "#/$defs/SystemMessageChunk"}, {"$ref": "#/$defs/FunctionMessageChunk"}, {"$ref": "#/$defs/ToolMessageChunk"}]}, "title": "LangGraphInput", "type": "array"}' -# --- -# name: test_message_graph[sqlite].1 - '{"$defs": {"AIMessage": {"additionalProperties": true, "description": "Message from an AI.\\n\\nAIMessage is returned from a chat model as a response to a prompt.\\n\\nThis message represents the output of the model and consists of both\\nthe raw output as returned by the model together standardized fields\\n(e.g., tool calls, usage metadata) added by the LangChain framework.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ai", "default": "ai", "enum": ["ai"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}}, "required": ["content"], "title": "AIMessage", "type": "object"}, "AIMessageChunk": {"additionalProperties": true, "description": "Message chunk from an AI.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "AIMessageChunk", "default": "AIMessageChunk", "enum": ["AIMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}, "tool_call_chunks": {"default": [], "items": {"$ref": "#/$defs/ToolCallChunk"}, "title": "Tool Call Chunks", "type": "array"}}, "required": ["content"], "title": "AIMessageChunk", "type": "object"}, "ChatMessage": {"additionalProperties": true, "description": "Message that can be assigned an arbitrary speaker (i.e. role).", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "chat", "default": "chat", "enum": ["chat"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessage", "type": "object"}, "ChatMessageChunk": {"additionalProperties": true, "description": "Chat Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ChatMessageChunk", "default": "ChatMessageChunk", "enum": ["ChatMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessageChunk", "type": "object"}, "FunctionMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nFunctionMessage are an older version of the ToolMessage schema, and\\ndo not contain the tool_call_id field.\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "function", "default": "function", "enum": ["function"], "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessage", "type": "object"}, "FunctionMessageChunk": {"additionalProperties": true, "description": "Function Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "FunctionMessageChunk", "default": "FunctionMessageChunk", "enum": ["FunctionMessageChunk"], "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessageChunk", "type": "object"}, "HumanMessage": {"additionalProperties": true, "description": "Message from a human.\\n\\nHumanMessages are messages that are passed in from a human to the model.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Instantiate a chat model and invoke it with the messages\\n model = ...\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "human", "default": "human", "enum": ["human"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessage", "type": "object"}, "HumanMessageChunk": {"additionalProperties": true, "description": "Human Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "HumanMessageChunk", "default": "HumanMessageChunk", "enum": ["HumanMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessageChunk", "type": "object"}, "InputTokenDetails": {"description": "Breakdown of input token counts.\\n\\nDoes *not* need to sum to full input token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "cache_creation": {"title": "Cache Creation", "type": "integer"}, "cache_read": {"title": "Cache Read", "type": "integer"}}, "title": "InputTokenDetails", "type": "object"}, "InvalidToolCall": {"description": "Allowance for errors made by LLM.\\n\\nHere we add an `error` key to surface errors made during generation\\n(e.g., invalid JSON arguments.)", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "error": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Error"}, "type": {"const": "invalid_tool_call", "enum": ["invalid_tool_call"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "error"], "title": "InvalidToolCall", "type": "object"}, "OutputTokenDetails": {"description": "Breakdown of output token counts.\\n\\nDoes *not* need to sum to full output token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "reasoning": {"title": "Reasoning", "type": "integer"}}, "title": "OutputTokenDetails", "type": "object"}, "SystemMessage": {"additionalProperties": true, "description": "Message for priming AI behavior.\\n\\nThe system message is usually passed in as the first of a sequence\\nof input messages.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Define a chat model and invoke it with the messages\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "system", "default": "system", "enum": ["system"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessage", "type": "object"}, "SystemMessageChunk": {"additionalProperties": true, "description": "System Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "SystemMessageChunk", "default": "SystemMessageChunk", "enum": ["SystemMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessageChunk", "type": "object"}, "ToolCall": {"description": "Represents a request to call a tool.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"name\\": \\"foo\\",\\n \\"args\\": {\\"a\\": 1},\\n \\"id\\": \\"123\\"\\n }\\n\\n This represents a request to call the tool named \\"foo\\" with arguments {\\"a\\": 1}\\n and an identifier of \\"123\\".", "properties": {"name": {"title": "Name", "type": "string"}, "args": {"title": "Args", "type": "object"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "type": {"const": "tool_call", "enum": ["tool_call"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id"], "title": "ToolCall", "type": "object"}, "ToolCallChunk": {"description": "A chunk of a tool call (e.g., as part of a stream).\\n\\nWhen merging ToolCallChunks (e.g., via AIMessageChunk.__add__),\\nall string attributes are concatenated. Chunks are only merged if their\\nvalues of `index` are equal and not None.\\n\\nExample:\\n\\n.. code-block:: python\\n\\n left_chunks = [ToolCallChunk(name=\\"foo\\", args=\'{\\"a\\":\', index=0)]\\n right_chunks = [ToolCallChunk(name=None, args=\'1}\', index=0)]\\n\\n (\\n AIMessageChunk(content=\\"\\", tool_call_chunks=left_chunks)\\n + AIMessageChunk(content=\\"\\", tool_call_chunks=right_chunks)\\n ).tool_call_chunks == [ToolCallChunk(name=\'foo\', args=\'{\\"a\\":1}\', index=0)]", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "index": {"anyOf": [{"type": "integer"}, {"type": "null"}], "title": "Index"}, "type": {"const": "tool_call_chunk", "enum": ["tool_call_chunk"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "index"], "title": "ToolCallChunk", "type": "object"}, "ToolMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nToolMessages contain the result of a tool invocation. Typically, the result\\nis encoded inside the `content` field.\\n\\nExample: A ToolMessage representing a result of 42 from a tool call with id\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n ToolMessage(content=\'42\', tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\')\\n\\n\\nExample: A ToolMessage where only part of the tool output is sent to the model\\n and the full output is passed in to artifact.\\n\\n .. versionadded:: 0.2.17\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n tool_output = {\\n \\"stdout\\": \\"From the graph we can see that the correlation between x and y is ...\\",\\n \\"stderr\\": None,\\n \\"artifacts\\": {\\"type\\": \\"image\\", \\"base64_data\\": \\"/9j/4gIcSU...\\"},\\n }\\n\\n ToolMessage(\\n content=tool_output[\\"stdout\\"],\\n artifact=tool_output,\\n tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\',\\n )\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "tool", "default": "tool", "enum": ["tool"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessage", "type": "object"}, "ToolMessageChunk": {"additionalProperties": true, "description": "Tool Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ToolMessageChunk", "default": "ToolMessageChunk", "enum": ["ToolMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessageChunk", "type": "object"}, "UsageMetadata": {"description": "Usage metadata for a message, such as token counts.\\n\\nThis is a standard representation of token usage that is consistent across models.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"input_tokens\\": 350,\\n \\"output_tokens\\": 240,\\n \\"total_tokens\\": 590,\\n \\"input_token_details\\": {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n },\\n \\"output_token_details\\": {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n }\\n\\n.. versionchanged:: 0.3.9\\n\\n Added ``input_token_details`` and ``output_token_details``.", "properties": {"input_tokens": {"title": "Input Tokens", "type": "integer"}, "output_tokens": {"title": "Output Tokens", "type": "integer"}, "total_tokens": {"title": "Total Tokens", "type": "integer"}, "input_token_details": {"$ref": "#/$defs/InputTokenDetails"}, "output_token_details": {"$ref": "#/$defs/OutputTokenDetails"}}, "required": ["input_tokens", "output_tokens", "total_tokens"], "title": "UsageMetadata", "type": "object"}}, "default": null, "items": {"oneOf": [{"$ref": "#/$defs/AIMessage"}, {"$ref": "#/$defs/HumanMessage"}, {"$ref": "#/$defs/ChatMessage"}, {"$ref": "#/$defs/SystemMessage"}, {"$ref": "#/$defs/FunctionMessage"}, {"$ref": "#/$defs/ToolMessage"}, {"$ref": "#/$defs/AIMessageChunk"}, {"$ref": "#/$defs/HumanMessageChunk"}, {"$ref": "#/$defs/ChatMessageChunk"}, {"$ref": "#/$defs/SystemMessageChunk"}, {"$ref": "#/$defs/FunctionMessageChunk"}, {"$ref": "#/$defs/ToolMessageChunk"}]}, "title": "LangGraphOutput", "type": "array"}' -# --- -# name: test_message_graph[sqlite].2 - ''' - { - "nodes": [ - { - "id": "__start__", - "type": "schema", - "data": "__start__" - }, - { - "id": "agent", - "type": "runnable", - "data": { - "id": [ - "tests", - "test_pregel", - "FakeFuntionChatModel" - ], - "name": "agent" - } - }, - { - "id": "tools", - "type": "runnable", - "data": { - "id": [ - "langgraph", - "prebuilt", - "tool_node", - "ToolNode" - ], - "name": "tools" - } - }, - { - "id": "__end__", - "type": "schema", - "data": "__end__" - } - ], - "edges": [ - { - "source": "__start__", - "target": "agent" - }, - { - "source": "tools", - "target": "agent" - }, - { - "source": "agent", - "target": "tools", - "data": "continue", - "conditional": true - }, - { - "source": "agent", - "target": "__end__", - "data": "end", - "conditional": true - } - ] - } - ''' -# --- -# name: test_message_graph[sqlite].3 - ''' - graph TD; - __start__ --> agent; - tools --> agent; - agent -. continue .-> tools; - agent -. end .-> __end__; - - ''' -# --- -# name: test_multiple_interrupts_imperative[memory] - ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% - graph TD; - __graph(graph) - __double(double) - __graph -.-> __double; - __double --> __graph; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' -# --- -# name: test_multiple_interrupts_imperative[postgres] - ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% - graph TD; - __graph(graph) - __double(double) - __graph -.-> __double; - __double --> __graph; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' -# --- -# name: test_multiple_interrupts_imperative[postgres_pipe] - ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% - graph TD; - __graph(graph) - __double(double) - __graph -.-> __double; - __double --> __graph; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' -# --- -# name: test_multiple_interrupts_imperative[postgres_pool] - ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% - graph TD; - __graph(graph) - __double(double) - __graph -.-> __double; - __double --> __graph; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' -# --- -# name: test_multiple_interrupts_imperative[postgres_shallow] - ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% - graph TD; - __graph(graph) - __double(double) - __graph -.-> __double; - __double --> __graph; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' -# --- -# name: test_multiple_interrupts_imperative[sqlite] - ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% - graph TD; - __graph(graph) - __double(double) - __graph -.-> __double; - __double --> __graph; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' -# --- -# name: test_multiple_sinks_subgraphs - ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% - graph TD; - __start__([__start__
]):::first - uno(uno) - dos(dos) - subgraph_one(one) - subgraph_two(two) - subgraph_three(three) - __start__ --> uno; - uno -.-> dos; - uno -.-> subgraph_one; - subgraph subgraph - subgraph_one -.-> subgraph_two; - subgraph_one -.-> subgraph_three; - end - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' -# --- -# name: test_nested_graph - ''' - graph TD; - __start__ --> inner; - inner --> side; - side --> __end__; - - ''' -# --- -# name: test_nested_graph.1 - ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% - graph TD; - __start__([__start__
]):::first - inner(inner) - side(side) - __end__([__end__
]):::last - __start__ --> inner; - inner --> side; - side --> __end__; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' -# --- -# name: test_nested_graph_xray - dict({ - 'edges': list([ - dict({ - 'conditional': True, - 'source': 'tool_two:__start__', - 'target': 'tool_two:tool_two_slow', - }), - dict({ - 'source': 'tool_two:tool_two_slow', - 'target': 'tool_two:__end__', - }), - dict({ - 'conditional': True, - 'source': 'tool_two:__start__', - 'target': 'tool_two:tool_two_fast', - }), - dict({ - 'source': 'tool_two:tool_two_fast', - 'target': 'tool_two:__end__', - }), - dict({ - 'conditional': True, - 'source': '__start__', - 'target': 'tool_one', - }), - dict({ - 'source': 'tool_one', - 'target': '__end__', - }), - dict({ - 'conditional': True, - 'source': '__start__', - 'target': 'tool_two:__start__', - }), - dict({ - 'source': 'tool_two:__end__', - 'target': '__end__', - }), - dict({ - 'conditional': True, - 'source': '__start__', - 'target': 'tool_three', - }), - dict({ - 'source': 'tool_three', - 'target': '__end__', - }), - ]), - 'nodes': list([ - dict({ - 'data': '__start__', - 'id': '__start__', - 'type': 'schema', - }), - dict({ - 'data': dict({ - 'id': list([ - 'langgraph', - 'utils', - 'runnable', - 'RunnableCallable', - ]), - 'name': 'tool_one', - }), - 'id': 'tool_one', - 'type': 'runnable', - }), - dict({ - 'data': 'tool_two:__start__', - 'id': 'tool_two:__start__', - 'type': 'schema', - }), - dict({ - 'data': dict({ - 'id': list([ - 'langgraph', - 'utils', - 'runnable', - 'RunnableCallable', - ]), - 'name': 'tool_two:tool_two_slow', - }), - 'id': 'tool_two:tool_two_slow', - 'type': 'runnable', - }), - dict({ - 'data': dict({ - 'id': list([ - 'langgraph', - 'utils', - 'runnable', - 'RunnableCallable', - ]), - 'name': 'tool_two:tool_two_fast', - }), - 'id': 'tool_two:tool_two_fast', - 'type': 'runnable', - }), - dict({ - 'data': 'tool_two:__end__', - 'id': 'tool_two:__end__', - 'type': 'schema', - }), - dict({ - 'data': dict({ - 'id': list([ - 'langgraph', - 'utils', - 'runnable', - 'RunnableCallable', - ]), - 'name': 'tool_three', - }), - 'id': 'tool_three', - 'type': 'runnable', - }), - dict({ - 'data': '__end__', - 'id': '__end__', - 'type': 'schema', - }), - ]), - }) -# --- -# name: test_nested_graph_xray.1 - ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% - graph TD; - __start__([__start__
]):::first - tool_one(tool_one) - tool_two___start__(__start__
) - tool_two_tool_two_slow(tool_two_slow) - tool_two_tool_two_fast(tool_two_fast) - tool_two___end__(__end__
) - tool_three(tool_three) - __end__([__end__
]):::last - __start__ -.-> tool_one; - tool_one --> __end__; - __start__ -.-> tool_two___start__; - tool_two___end__ --> __end__; - __start__ -.-> tool_three; - tool_three --> __end__; - subgraph tool_two - tool_two___start__ -.-> tool_two_tool_two_slow; - tool_two_tool_two_slow --> tool_two___end__; - tool_two___start__ -.-> tool_two_tool_two_fast; - tool_two_tool_two_fast --> tool_two___end__; - end - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' -# --- -# name: test_prebuilt_chat - '{"title": "LangGraphInput", "type": "object", "properties": {"messages": {"title": "Messages", "type": "array", "items": {"$ref": "#/definitions/BaseMessage"}}}, "definitions": {"BaseMessage": {"title": "BaseMessage", "description": "Base abstract message class.\\n\\nMessages are the inputs and outputs of ChatModels.", "type": "object", "properties": {"content": {"title": "Content", "anyOf": [{"type": "string"}, {"type": "array", "items": {"anyOf": [{"type": "string"}, {"type": "object"}]}}]}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"title": "Id", "type": "string"}}, "required": ["content", "type"]}}}' -# --- -# name: test_prebuilt_chat.1 - '{"title": "LangGraphOutput", "type": "object", "properties": {"messages": {"title": "Messages", "type": "array", "items": {"$ref": "#/definitions/BaseMessage"}}}, "definitions": {"BaseMessage": {"title": "BaseMessage", "description": "Base abstract message class.\\n\\nMessages are the inputs and outputs of ChatModels.", "type": "object", "properties": {"content": {"title": "Content", "anyOf": [{"type": "string"}, {"type": "array", "items": {"anyOf": [{"type": "string"}, {"type": "object"}]}}]}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"title": "Id", "type": "string"}}, "required": ["content", "type"]}}}' -# --- -# name: test_prebuilt_chat.2 - ''' - { - "nodes": [ - { - "id": "__start__", - "type": "schema", - "data": "__start__" - }, - { - "id": "agent", - "type": "runnable", - "data": { - "id": [ - "langchain_core", - "runnables", - "base", - "RunnableLambda" - ], - "name": "agent" - } - }, - { - "id": "tools", - "type": "runnable", - "data": { - "id": [ - "langchain_core", - "runnables", - "base", - "RunnableLambda" - ], - "name": "tools" - } - }, - { - "id": "__end__", - "type": "schema", - "data": "__end__" - } - ], - "edges": [ - { - "source": "__start__", - "target": "agent" - }, - { - "source": "tools", - "target": "agent" - }, - { - "source": "agent", - "target": "tools", - "data": "continue", - "conditional": true - }, - { - "source": "agent", - "target": "__end__", - "data": "end", - "conditional": true - } - ] - } - ''' -# --- -# name: test_prebuilt_chat.3 - ''' - graph TD; - __start__ --> agent; - tools --> agent; - agent -.  continue  .-> tools; - agent -.  end  .-> __end__; - - ''' -# --- -# name: test_prebuilt_tool_chat - '{"$defs": {"BaseMessage": {"additionalProperties": true, "description": "Base abstract message class.\\n\\nMessages are the inputs and outputs of ChatModels.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "type"], "title": "BaseMessage", "type": "object"}}, "properties": {"messages": {"items": {"$ref": "#/$defs/BaseMessage"}, "title": "Messages", "type": "array"}}, "required": ["messages"], "title": "LangGraphInput", "type": "object"}' -# --- -# name: test_prebuilt_tool_chat.1 - '{"$defs": {"BaseMessage": {"additionalProperties": true, "description": "Base abstract message class.\\n\\nMessages are the inputs and outputs of ChatModels.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "type"], "title": "BaseMessage", "type": "object"}}, "properties": {"messages": {"items": {"$ref": "#/$defs/BaseMessage"}, "title": "Messages", "type": "array"}}, "required": ["messages"], "title": "LangGraphOutput", "type": "object"}' -# --- -# name: test_prebuilt_tool_chat.2 - ''' - { - "nodes": [ - { - "id": "__start__", - "type": "schema", - "data": "__start__" - }, - { - "id": "agent", - "type": "runnable", - "data": { - "id": [ - "langgraph", - "utils", - "runnable", - "RunnableCallable" - ], - "name": "agent" - } - }, - { - "id": "tools", - "type": "runnable", - "data": { - "id": [ - "langgraph", - "prebuilt", - "tool_node", - "ToolNode" - ], - "name": "tools" - } - }, - { - "id": "__end__", - "type": "schema", - "data": "__end__" - } - ], - "edges": [ - { - "source": "__start__", - "target": "agent" - }, - { - "source": "tools", - "target": "agent" - }, - { - "source": "agent", - "target": "tools", - "conditional": true - }, - { - "source": "agent", - "target": "__end__", - "conditional": true - } - ] - } - ''' -# --- -# name: test_prebuilt_tool_chat.3 - ''' - graph TD; - __start__ --> agent; - tools --> agent; - agent -.-> tools; - agent -.-> __end__; - - ''' +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[postgres_pool].2 + dict({ + 'properties': dict({ + 'answer': dict({ + 'title': 'Answer', + 'type': 'string', + }), + 'docs': dict({ + 'items': dict({ + 'type': 'string', + }), + 'title': 'Docs', + 'type': 'array', + }), + }), + 'required': list([ + 'answer', + 'docs', + ]), + 'title': 'Output', + 'type': 'object', + }) # --- -# name: test_repeat_condition +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[postgres_shallow] ''' graph TD; - __start__ --> Researcher; - Researcher -. continue .-> Chart_Generator; - Researcher -. call_tool .-> Call_Tool; - Researcher -. end .-> __end__; - Chart_Generator -. continue .-> Researcher; - Chart_Generator -. call_tool .-> Call_Tool; - Chart_Generator -. end .-> __end__; - Call_Tool -.-> Researcher; - Call_Tool -.-> Chart_Generator; - Researcher -. redo .-> Researcher; + __start__ --> rewrite_query; + analyzer_one --> retriever_one; + qa --> __end__; + retriever_one --> qa; + retriever_two --> qa; + rewrite_query --> analyzer_one; + rewrite_query -.-> retriever_two; ''' # --- -# name: test_send_react_interrupt_control[memory] - ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% - graph TD; - __start__([__start__
]):::first - agent(agent) - foo([foo]):::last - __start__ --> agent; - agent -.-> foo; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[postgres_shallow].1 + dict({ + '$defs': dict({ + 'InnerObject': dict({ + 'properties': dict({ + 'yo': dict({ + 'title': 'Yo', + 'type': 'integer', + }), + }), + 'required': list([ + 'yo', + ]), + 'title': 'InnerObject', + 'type': 'object', + }), + }), + 'properties': dict({ + 'inner': dict({ + '$ref': '#/$defs/InnerObject', + }), + 'query': dict({ + 'title': 'Query', + 'type': 'string', + }), + }), + 'required': list([ + 'query', + 'inner', + ]), + 'title': 'Input', + 'type': 'object', + }) # --- -# name: test_send_react_interrupt_control[postgres] - ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% - graph TD; - __start__([__start__
]):::first - agent(agent) - foo([foo]):::last - __start__ --> agent; - agent -.-> foo; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[postgres_shallow].2 + dict({ + 'properties': dict({ + 'answer': dict({ + 'title': 'Answer', + 'type': 'string', + }), + 'docs': dict({ + 'items': dict({ + 'type': 'string', + }), + 'title': 'Docs', + 'type': 'array', + }), + }), + 'required': list([ + 'answer', + 'docs', + ]), + 'title': 'Output', + 'type': 'object', + }) # --- -# name: test_send_react_interrupt_control[postgres_pipe] +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[sqlite] ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% graph TD; - __start__([__start__
]):::first - agent(agent) - foo([foo]):::last - __start__ --> agent; - agent -.-> foo; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc + __start__ --> rewrite_query; + analyzer_one --> retriever_one; + qa --> __end__; + retriever_one --> qa; + retriever_two --> qa; + rewrite_query --> analyzer_one; + rewrite_query -.-> retriever_two; ''' # --- -# name: test_send_react_interrupt_control[postgres_pool] - ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% - graph TD; - __start__([__start__
]):::first - agent(agent) - foo([foo]):::last - __start__ --> agent; - agent -.-> foo; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[sqlite].1 + dict({ + '$defs': dict({ + 'InnerObject': dict({ + 'properties': dict({ + 'yo': dict({ + 'title': 'Yo', + 'type': 'integer', + }), + }), + 'required': list([ + 'yo', + ]), + 'title': 'InnerObject', + 'type': 'object', + }), + }), + 'properties': dict({ + 'inner': dict({ + '$ref': '#/$defs/InnerObject', + }), + 'query': dict({ + 'title': 'Query', + 'type': 'string', + }), + }), + 'required': list([ + 'query', + 'inner', + ]), + 'title': 'Input', + 'type': 'object', + }) # --- -# name: test_send_react_interrupt_control[sqlite] - ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% - graph TD; - __start__([__start__
]):::first - agent(agent) - foo([foo]):::last - __start__ --> agent; - agent -.-> foo; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' +# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[sqlite].2 + dict({ + 'properties': dict({ + 'answer': dict({ + 'title': 'Answer', + 'type': 'string', + }), + 'docs': dict({ + 'items': dict({ + 'type': 'string', + }), + 'title': 'Docs', + 'type': 'array', + }), + }), + 'required': list([ + 'answer', + 'docs', + ]), + 'title': 'Output', + 'type': 'object', + }) # --- -# name: test_simple_multi_edge +# name: test_in_one_fan_out_state_graph_waiting_edge_via_branch[memory] ''' graph TD; - __start__ --> up; - down --> __end__; - side --> down; - up --> down; - up --> other; - up --> side; + __start__ --> rewrite_query; + analyzer_one --> retriever_one; + qa --> __end__; + retriever_one --> qa; + retriever_two --> qa; + rewrite_query --> analyzer_one; + rewrite_query -.-> retriever_two; ''' # --- -# name: test_start_branch_then[memory] +# name: test_in_one_fan_out_state_graph_waiting_edge_via_branch[postgres] ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% graph TD; - __start__([__start__
]):::first - tool_two_slow(tool_two_slow) - tool_two_fast(tool_two_fast) - __end__([__end__
]):::last - __start__ -.-> tool_two_slow; - tool_two_slow --> __end__; - __start__ -.-> tool_two_fast; - tool_two_fast --> __end__; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc + __start__ --> rewrite_query; + analyzer_one --> retriever_one; + qa --> __end__; + retriever_one --> qa; + retriever_two --> qa; + rewrite_query --> analyzer_one; + rewrite_query -.-> retriever_two; ''' # --- -# name: test_start_branch_then[postgres] +# name: test_in_one_fan_out_state_graph_waiting_edge_via_branch[postgres_pipe] ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% graph TD; - __start__([__start__
]):::first - tool_two_slow(tool_two_slow) - tool_two_fast(tool_two_fast) - __end__([__end__
]):::last - __start__ -.-> tool_two_slow; - tool_two_slow --> __end__; - __start__ -.-> tool_two_fast; - tool_two_fast --> __end__; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc + __start__ --> rewrite_query; + analyzer_one --> retriever_one; + qa --> __end__; + retriever_one --> qa; + retriever_two --> qa; + rewrite_query --> analyzer_one; + rewrite_query -.-> retriever_two; ''' # --- -# name: test_start_branch_then[postgres_pipe] +# name: test_in_one_fan_out_state_graph_waiting_edge_via_branch[postgres_pool] ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% graph TD; - __start__([__start__
]):::first - tool_two_slow(tool_two_slow) - tool_two_fast(tool_two_fast) - __end__([__end__
]):::last - __start__ -.-> tool_two_slow; - tool_two_slow --> __end__; - __start__ -.-> tool_two_fast; - tool_two_fast --> __end__; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc + __start__ --> rewrite_query; + analyzer_one --> retriever_one; + qa --> __end__; + retriever_one --> qa; + retriever_two --> qa; + rewrite_query --> analyzer_one; + rewrite_query -.-> retriever_two; ''' # --- -# name: test_start_branch_then[postgres_pool] +# name: test_in_one_fan_out_state_graph_waiting_edge_via_branch[postgres_shallow] ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% graph TD; - __start__([__start__
]):::first - tool_two_slow(tool_two_slow) - tool_two_fast(tool_two_fast) - __end__([__end__
]):::last - __start__ -.-> tool_two_slow; - tool_two_slow --> __end__; - __start__ -.-> tool_two_fast; - tool_two_fast --> __end__; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc + __start__ --> rewrite_query; + analyzer_one --> retriever_one; + qa --> __end__; + retriever_one --> qa; + retriever_two --> qa; + rewrite_query --> analyzer_one; + rewrite_query -.-> retriever_two; ''' # --- -# name: test_start_branch_then[sqlite] +# name: test_in_one_fan_out_state_graph_waiting_edge_via_branch[sqlite] ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% graph TD; - __start__([__start__
]):::first - tool_two_slow(tool_two_slow) - tool_two_fast(tool_two_fast) - __end__([__end__
]):::last - __start__ -.-> tool_two_slow; - tool_two_slow --> __end__; - __start__ -.-> tool_two_fast; - tool_two_fast --> __end__; - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc + __start__ --> rewrite_query; + analyzer_one --> retriever_one; + qa --> __end__; + retriever_one --> qa; + retriever_two --> qa; + rewrite_query --> analyzer_one; + rewrite_query -.-> retriever_two; ''' # --- -# name: test_state_graph_w_config - '{"title": "LangGraphConfig", "type": "object", "properties": {"configurable": {"$ref": "#/definitions/Configurable"}}, "definitions": {"Configurable": {"title": "Configurable", "type": "object", "properties": {"tools": {"title": "Tools", "type": "array", "items": {"type": "string"}}}}}}' -# --- -# name: test_state_graph_w_config.1 - '{"title": "LangGraphInput", "type": "object", "properties": {"input": {"title": "Input", "type": "string"}, "agent_outcome": {"title": "Agent Outcome", "anyOf": [{"$ref": "#/definitions/AgentAction"}, {"$ref": "#/definitions/AgentFinish"}]}, "intermediate_steps": {"title": "Intermediate Steps", "type": "array", "items": {"type": "array", "minItems": 2, "maxItems": 2, "items": [{"$ref": "#/definitions/AgentAction"}, {"type": "string"}]}}}, "definitions": {"AgentAction": {"title": "AgentAction", "description": "Represents a request to execute an action by an agent.\\n\\nThe action consists of the name of the tool to execute and the input to pass\\nto the tool. The log is used to pass along extra information about the action.", "type": "object", "properties": {"tool": {"title": "Tool", "type": "string"}, "tool_input": {"title": "Tool Input", "anyOf": [{"type": "string"}, {"type": "object"}]}, "log": {"title": "Log", "type": "string"}, "type": {"title": "Type", "default": "AgentAction", "enum": ["AgentAction"], "type": "string"}}, "required": ["tool", "tool_input", "log"]}, "AgentFinish": {"title": "AgentFinish", "description": "Final return value of an ActionAgent.\\n\\nAgents return an AgentFinish when they have reached a stopping condition.", "type": "object", "properties": {"return_values": {"title": "Return Values", "type": "object"}, "log": {"title": "Log", "type": "string"}, "type": {"title": "Type", "default": "AgentFinish", "enum": ["AgentFinish"], "type": "string"}}, "required": ["return_values", "log"]}}}' -# --- -# name: test_state_graph_w_config.2 - '{"title": "LangGraphOutput", "type": "object", "properties": {"input": {"title": "Input", "type": "string"}, "agent_outcome": {"title": "Agent Outcome", "anyOf": [{"$ref": "#/definitions/AgentAction"}, {"$ref": "#/definitions/AgentFinish"}]}, "intermediate_steps": {"title": "Intermediate Steps", "type": "array", "items": {"type": "array", "minItems": 2, "maxItems": 2, "items": [{"$ref": "#/definitions/AgentAction"}, {"type": "string"}]}}}, "definitions": {"AgentAction": {"title": "AgentAction", "description": "Represents a request to execute an action by an agent.\\n\\nThe action consists of the name of the tool to execute and the input to pass\\nto the tool. The log is used to pass along extra information about the action.", "type": "object", "properties": {"tool": {"title": "Tool", "type": "string"}, "tool_input": {"title": "Tool Input", "anyOf": [{"type": "string"}, {"type": "object"}]}, "log": {"title": "Log", "type": "string"}, "type": {"title": "Type", "default": "AgentAction", "enum": ["AgentAction"], "type": "string"}}, "required": ["tool", "tool_input", "log"]}, "AgentFinish": {"title": "AgentFinish", "description": "Final return value of an ActionAgent.\\n\\nAgents return an AgentFinish when they have reached a stopping condition.", "type": "object", "properties": {"return_values": {"title": "Return Values", "type": "object"}, "log": {"title": "Log", "type": "string"}, "type": {"title": "Type", "default": "AgentFinish", "enum": ["AgentFinish"], "type": "string"}}, "required": ["return_values", "log"]}}}' -# --- -# name: test_state_graph_w_config_inherited_state - '{"title": "LangGraphConfig", "type": "object", "properties": {"configurable": {"$ref": "#/definitions/Configurable"}}, "definitions": {"Configurable": {"title": "Configurable", "type": "object", "properties": {"tools": {"title": "Tools", "type": "array", "items": {"type": "string"}}}}}}' -# --- -# name: test_state_graph_w_config_inherited_state.1 - '{"title": "LangGraphInput", "type": "object", "properties": {"input": {"title": "Input", "type": "string"}, "agent_outcome": {"title": "Agent Outcome", "anyOf": [{"$ref": "#/definitions/AgentAction"}, {"$ref": "#/definitions/AgentFinish"}]}, "intermediate_steps": {"title": "Intermediate Steps", "type": "array", "items": {"type": "array", "minItems": 2, "maxItems": 2, "items": [{"$ref": "#/definitions/AgentAction"}, {"type": "string"}]}}}, "definitions": {"AgentAction": {"title": "AgentAction", "description": "Represents a request to execute an action by an agent.\\n\\nThe action consists of the name of the tool to execute and the input to pass\\nto the tool. The log is used to pass along extra information about the action.", "type": "object", "properties": {"tool": {"title": "Tool", "type": "string"}, "tool_input": {"title": "Tool Input", "anyOf": [{"type": "string"}, {"type": "object"}]}, "log": {"title": "Log", "type": "string"}, "type": {"title": "Type", "default": "AgentAction", "enum": ["AgentAction"], "type": "string"}}, "required": ["tool", "tool_input", "log"]}, "AgentFinish": {"title": "AgentFinish", "description": "Final return value of an ActionAgent.\\n\\nAgents return an AgentFinish when they have reached a stopping condition.", "type": "object", "properties": {"return_values": {"title": "Return Values", "type": "object"}, "log": {"title": "Log", "type": "string"}, "type": {"title": "Type", "default": "AgentFinish", "enum": ["AgentFinish"], "type": "string"}}, "required": ["return_values", "log"]}}}' -# --- -# name: test_state_graph_w_config_inherited_state.2 - '{"title": "LangGraphOutput", "type": "object", "properties": {"input": {"title": "Input", "type": "string"}, "agent_outcome": {"title": "Agent Outcome", "anyOf": [{"$ref": "#/definitions/AgentAction"}, {"$ref": "#/definitions/AgentFinish"}]}, "intermediate_steps": {"title": "Intermediate Steps", "type": "array", "items": {"type": "array", "minItems": 2, "maxItems": 2, "items": [{"$ref": "#/definitions/AgentAction"}, {"type": "string"}]}}}, "definitions": {"AgentAction": {"title": "AgentAction", "description": "Represents a request to execute an action by an agent.\\n\\nThe action consists of the name of the tool to execute and the input to pass\\nto the tool. The log is used to pass along extra information about the action.", "type": "object", "properties": {"tool": {"title": "Tool", "type": "string"}, "tool_input": {"title": "Tool Input", "anyOf": [{"type": "string"}, {"type": "object"}]}, "log": {"title": "Log", "type": "string"}, "type": {"title": "Type", "default": "AgentAction", "enum": ["AgentAction"], "type": "string"}}, "required": ["tool", "tool_input", "log"]}, "AgentFinish": {"title": "AgentFinish", "description": "Final return value of an ActionAgent.\\n\\nAgents return an AgentFinish when they have reached a stopping condition.", "type": "object", "properties": {"return_values": {"title": "Return Values", "type": "object"}, "log": {"title": "Log", "type": "string"}, "type": {"title": "Type", "default": "AgentFinish", "enum": ["AgentFinish"], "type": "string"}}, "required": ["return_values", "log"]}}}' -# --- -# name: test_state_graph_w_config_inherited_state_keys - '{"$defs": {"Configurable": {"properties": {"tools": {"default": null, "items": {"type": "string"}, "title": "Tools", "type": "array"}}, "title": "Configurable", "type": "object"}}, "properties": {"configurable": {"$ref": "#/$defs/Configurable", "default": null}}, "title": "LangGraphConfig", "type": "object"}' -# --- -# name: test_state_graph_w_config_inherited_state_keys.1 - '{"$defs": {"AgentAction": {"description": "Represents a request to execute an action by an agent.\\n\\nThe action consists of the name of the tool to execute and the input to pass\\nto the tool. The log is used to pass along extra information about the action.", "properties": {"tool": {"title": "Tool", "type": "string"}, "tool_input": {"anyOf": [{"type": "string"}, {"type": "object"}], "title": "Tool Input"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentAction", "default": "AgentAction", "enum": ["AgentAction"], "title": "Type", "type": "string"}}, "required": ["tool", "tool_input", "log"], "title": "AgentAction", "type": "object"}, "AgentFinish": {"description": "Final return value of an ActionAgent.\\n\\nAgents return an AgentFinish when they have reached a stopping condition.", "properties": {"return_values": {"title": "Return Values", "type": "object"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentFinish", "default": "AgentFinish", "enum": ["AgentFinish"], "title": "Type", "type": "string"}}, "required": ["return_values", "log"], "title": "AgentFinish", "type": "object"}}, "properties": {"input": {"title": "Input", "type": "string"}, "agent_outcome": {"anyOf": [{"$ref": "#/$defs/AgentAction"}, {"$ref": "#/$defs/AgentFinish"}, {"type": "null"}], "default": null, "title": "Agent Outcome"}, "intermediate_steps": {"default": null, "items": {"maxItems": 2, "minItems": 2, "prefixItems": [{"$ref": "#/$defs/AgentAction"}, {"type": "string"}], "type": "array"}, "title": "Intermediate Steps", "type": "array"}}, "required": ["input"], "title": "LangGraphInput", "type": "object"}' -# --- -# name: test_state_graph_w_config_inherited_state_keys.2 - '{"$defs": {"AgentAction": {"description": "Represents a request to execute an action by an agent.\\n\\nThe action consists of the name of the tool to execute and the input to pass\\nto the tool. The log is used to pass along extra information about the action.", "properties": {"tool": {"title": "Tool", "type": "string"}, "tool_input": {"anyOf": [{"type": "string"}, {"type": "object"}], "title": "Tool Input"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentAction", "default": "AgentAction", "enum": ["AgentAction"], "title": "Type", "type": "string"}}, "required": ["tool", "tool_input", "log"], "title": "AgentAction", "type": "object"}, "AgentFinish": {"description": "Final return value of an ActionAgent.\\n\\nAgents return an AgentFinish when they have reached a stopping condition.", "properties": {"return_values": {"title": "Return Values", "type": "object"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentFinish", "default": "AgentFinish", "enum": ["AgentFinish"], "title": "Type", "type": "string"}}, "required": ["return_values", "log"], "title": "AgentFinish", "type": "object"}}, "properties": {"input": {"title": "Input", "type": "string"}, "agent_outcome": {"anyOf": [{"$ref": "#/$defs/AgentAction"}, {"$ref": "#/$defs/AgentFinish"}, {"type": "null"}], "default": null, "title": "Agent Outcome"}, "intermediate_steps": {"default": null, "items": {"maxItems": 2, "minItems": 2, "prefixItems": [{"$ref": "#/$defs/AgentAction"}, {"type": "string"}], "type": "array"}, "title": "Intermediate Steps", "type": "array"}}, "required": ["input"], "title": "LangGraphOutput", "type": "object"}' -# --- -# name: test_weather_subgraph[memory] +# name: test_multiple_sinks_subgraphs ''' %%{init: {'flowchart': {'curve': 'linear'}}}%% graph TD; __start__([__start__
]):::first - router_node(router_node) - normal_llm_node(normal_llm_node) - weather_graph_model_node(model_node) - weather_graph_weather_node(weather_node__end__
]):::last - __start__ --> router_node; - normal_llm_node --> __end__; - weather_graph_weather_node --> __end__; - router_node -.-> normal_llm_node; - router_node -.-> weather_graph_model_node; - router_node -.-> __end__; - subgraph weather_graph - weather_graph_model_node --> weather_graph_weather_node; + uno(uno) + dos(dos) + subgraph_one(one) + subgraph_two(two) + subgraph_three(three) + __start__ --> uno; + uno -.-> dos; + uno -.-> subgraph_one; + subgraph subgraph + subgraph_one -.-> subgraph_two; + subgraph_one -.-> subgraph_three; end classDef default fill:#f2f0ff,line-height:1.2 classDef first fill-opacity:0 @@ -5285,74 +1318,180 @@ ''' # --- -# name: test_weather_subgraph[postgres] +# name: test_nested_graph ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% graph TD; - __start__([__start__
]):::first - router_node(router_node) - normal_llm_node(normal_llm_node) - weather_graph_model_node(model_node) - weather_graph_weather_node(weather_node__end__
]):::last - __start__ --> router_node; - normal_llm_node --> __end__; - weather_graph_weather_node --> __end__; - router_node -.-> normal_llm_node; - router_node -.-> weather_graph_model_node; - router_node -.-> __end__; - subgraph weather_graph - weather_graph_model_node --> weather_graph_weather_node; - end - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc + __start__ --> inner; + inner --> side; + side --> __end__; ''' # --- -# name: test_weather_subgraph[postgres_pipe] +# name: test_nested_graph.1 ''' %%{init: {'flowchart': {'curve': 'linear'}}}%% graph TD; __start__([__start__
]):::first - router_node(router_node) - normal_llm_node(normal_llm_node) - weather_graph_model_node(model_node) - weather_graph_weather_node(weather_node__end__
]):::last - __start__ --> router_node; - normal_llm_node --> __end__; - weather_graph_weather_node --> __end__; - router_node -.-> normal_llm_node; - router_node -.-> weather_graph_model_node; - router_node -.-> __end__; - subgraph weather_graph - weather_graph_model_node --> weather_graph_weather_node; - end + __start__ --> inner; + inner --> side; + side --> __end__; classDef default fill:#f2f0ff,line-height:1.2 classDef first fill-opacity:0 classDef last fill:#bfb6fc ''' # --- -# name: test_weather_subgraph[postgres_pool] +# name: test_nested_graph_xray + dict({ + 'edges': list([ + dict({ + 'conditional': True, + 'source': 'tool_two:__start__', + 'target': 'tool_two:tool_two_slow', + }), + dict({ + 'source': 'tool_two:tool_two_slow', + 'target': 'tool_two:__end__', + }), + dict({ + 'conditional': True, + 'source': 'tool_two:__start__', + 'target': 'tool_two:tool_two_fast', + }), + dict({ + 'source': 'tool_two:tool_two_fast', + 'target': 'tool_two:__end__', + }), + dict({ + 'conditional': True, + 'source': '__start__', + 'target': 'tool_one', + }), + dict({ + 'source': 'tool_one', + 'target': '__end__', + }), + dict({ + 'conditional': True, + 'source': '__start__', + 'target': 'tool_two:__start__', + }), + dict({ + 'source': 'tool_two:__end__', + 'target': '__end__', + }), + dict({ + 'conditional': True, + 'source': '__start__', + 'target': 'tool_three', + }), + dict({ + 'source': 'tool_three', + 'target': '__end__', + }), + ]), + 'nodes': list([ + dict({ + 'data': '__start__', + 'id': '__start__', + 'type': 'schema', + }), + dict({ + 'data': dict({ + 'id': list([ + 'langgraph', + 'utils', + 'runnable', + 'RunnableCallable', + ]), + 'name': 'tool_one', + }), + 'id': 'tool_one', + 'type': 'runnable', + }), + dict({ + 'data': 'tool_two:__start__', + 'id': 'tool_two:__start__', + 'type': 'schema', + }), + dict({ + 'data': dict({ + 'id': list([ + 'langgraph', + 'utils', + 'runnable', + 'RunnableCallable', + ]), + 'name': 'tool_two:tool_two_slow', + }), + 'id': 'tool_two:tool_two_slow', + 'type': 'runnable', + }), + dict({ + 'data': dict({ + 'id': list([ + 'langgraph', + 'utils', + 'runnable', + 'RunnableCallable', + ]), + 'name': 'tool_two:tool_two_fast', + }), + 'id': 'tool_two:tool_two_fast', + 'type': 'runnable', + }), + dict({ + 'data': 'tool_two:__end__', + 'id': 'tool_two:__end__', + 'type': 'schema', + }), + dict({ + 'data': dict({ + 'id': list([ + 'langgraph', + 'utils', + 'runnable', + 'RunnableCallable', + ]), + 'name': 'tool_three', + }), + 'id': 'tool_three', + 'type': 'runnable', + }), + dict({ + 'data': '__end__', + 'id': '__end__', + 'type': 'schema', + }), + ]), + }) +# --- +# name: test_nested_graph_xray.1 ''' %%{init: {'flowchart': {'curve': 'linear'}}}%% graph TD; __start__([__start__
]):::first - router_node(router_node) - normal_llm_node(normal_llm_node) - weather_graph_model_node(model_node) - weather_graph_weather_node(weather_node__start__
) + tool_two_tool_two_slow(tool_two_slow) + tool_two_tool_two_fast(tool_two_fast) + tool_two___end__(__end__
) + tool_three(tool_three) __end__([__end__
]):::last - __start__ --> router_node; - normal_llm_node --> __end__; - weather_graph_weather_node --> __end__; - router_node -.-> normal_llm_node; - router_node -.-> weather_graph_model_node; - router_node -.-> __end__; - subgraph weather_graph - weather_graph_model_node --> weather_graph_weather_node; + __start__ -.-> tool_one; + tool_one --> __end__; + __start__ -.-> tool_two___start__; + tool_two___end__ --> __end__; + __start__ -.-> tool_three; + tool_three --> __end__; + subgraph tool_two + tool_two___start__ -.-> tool_two_tool_two_slow; + tool_two_tool_two_slow --> tool_two___end__; + tool_two___start__ -.-> tool_two_tool_two_fast; + tool_two_tool_two_fast --> tool_two___end__; end classDef default fill:#f2f0ff,line-height:1.2 classDef first fill-opacity:0 @@ -5360,31 +1499,43 @@ ''' # --- -# name: test_weather_subgraph[sqlite] +# name: test_repeat_condition ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% graph TD; - __start__([__start__
]):::first - router_node(router_node) - normal_llm_node(normal_llm_node) - weather_graph_model_node(model_node) - weather_graph_weather_node(weather_node__end__
]):::last - __start__ --> router_node; - normal_llm_node --> __end__; - weather_graph_weather_node --> __end__; - router_node -.-> normal_llm_node; - router_node -.-> weather_graph_model_node; - router_node -.-> __end__; - subgraph weather_graph - weather_graph_model_node --> weather_graph_weather_node; - end - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc + __start__ --> Researcher; + Researcher -. continue .-> Chart_Generator; + Researcher -. call_tool .-> Call_Tool; + Researcher -. end .-> __end__; + Chart_Generator -. continue .-> Researcher; + Chart_Generator -. call_tool .-> Call_Tool; + Chart_Generator -. end .-> __end__; + Call_Tool -.-> Researcher; + Call_Tool -.-> Chart_Generator; + Researcher -. redo .-> Researcher; + + ''' +# --- +# name: test_simple_multi_edge + ''' + graph TD; + __start__ --> up; + down --> __end__; + side --> down; + up --> down; + up --> other; + up --> side; ''' # --- +# name: test_state_graph_w_config_inherited_state_keys + '{"$defs": {"Configurable": {"properties": {"tools": {"default": null, "items": {"type": "string"}, "title": "Tools", "type": "array"}}, "title": "Configurable", "type": "object"}}, "properties": {"configurable": {"$ref": "#/$defs/Configurable", "default": null}}, "title": "LangGraphConfig", "type": "object"}' +# --- +# name: test_state_graph_w_config_inherited_state_keys.1 + '{"$defs": {"AgentAction": {"description": "Represents a request to execute an action by an agent.\\n\\nThe action consists of the name of the tool to execute and the input to pass\\nto the tool. The log is used to pass along extra information about the action.", "properties": {"tool": {"title": "Tool", "type": "string"}, "tool_input": {"anyOf": [{"type": "string"}, {"type": "object"}], "title": "Tool Input"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentAction", "default": "AgentAction", "enum": ["AgentAction"], "title": "Type", "type": "string"}}, "required": ["tool", "tool_input", "log"], "title": "AgentAction", "type": "object"}, "AgentFinish": {"description": "Final return value of an ActionAgent.\\n\\nAgents return an AgentFinish when they have reached a stopping condition.", "properties": {"return_values": {"title": "Return Values", "type": "object"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentFinish", "default": "AgentFinish", "enum": ["AgentFinish"], "title": "Type", "type": "string"}}, "required": ["return_values", "log"], "title": "AgentFinish", "type": "object"}}, "properties": {"input": {"title": "Input", "type": "string"}, "agent_outcome": {"anyOf": [{"$ref": "#/$defs/AgentAction"}, {"$ref": "#/$defs/AgentFinish"}, {"type": "null"}], "default": null, "title": "Agent Outcome"}, "intermediate_steps": {"default": null, "items": {"maxItems": 2, "minItems": 2, "prefixItems": [{"$ref": "#/$defs/AgentAction"}, {"type": "string"}], "type": "array"}, "title": "Intermediate Steps", "type": "array"}}, "required": ["input"], "title": "LangGraphInput", "type": "object"}' +# --- +# name: test_state_graph_w_config_inherited_state_keys.2 + '{"$defs": {"AgentAction": {"description": "Represents a request to execute an action by an agent.\\n\\nThe action consists of the name of the tool to execute and the input to pass\\nto the tool. The log is used to pass along extra information about the action.", "properties": {"tool": {"title": "Tool", "type": "string"}, "tool_input": {"anyOf": [{"type": "string"}, {"type": "object"}], "title": "Tool Input"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentAction", "default": "AgentAction", "enum": ["AgentAction"], "title": "Type", "type": "string"}}, "required": ["tool", "tool_input", "log"], "title": "AgentAction", "type": "object"}, "AgentFinish": {"description": "Final return value of an ActionAgent.\\n\\nAgents return an AgentFinish when they have reached a stopping condition.", "properties": {"return_values": {"title": "Return Values", "type": "object"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentFinish", "default": "AgentFinish", "enum": ["AgentFinish"], "title": "Type", "type": "string"}}, "required": ["return_values", "log"], "title": "AgentFinish", "type": "object"}}, "properties": {"input": {"title": "Input", "type": "string"}, "agent_outcome": {"anyOf": [{"$ref": "#/$defs/AgentAction"}, {"$ref": "#/$defs/AgentFinish"}, {"type": "null"}], "default": null, "title": "Agent Outcome"}, "intermediate_steps": {"default": null, "items": {"maxItems": 2, "minItems": 2, "prefixItems": [{"$ref": "#/$defs/AgentAction"}, {"type": "string"}], "type": "array"}, "title": "Intermediate Steps", "type": "array"}}, "required": ["input"], "title": "LangGraphOutput", "type": "object"}' +# --- # name: test_xray_bool ''' %%{init: {'flowchart': {'curve': 'linear'}}}%% diff --git a/libs/langgraph/tests/__snapshots__/test_pregel_async.ambr b/libs/langgraph/tests/__snapshots__/test_pregel_async.ambr index 69c63b531a..69fdad4948 100644 --- a/libs/langgraph/tests/__snapshots__/test_pregel_async.ambr +++ b/libs/langgraph/tests/__snapshots__/test_pregel_async.ambr @@ -1,334 +1,4 @@ # serializer version: 1 -# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class - ''' - +-----------+ - | __start__ | - +-----------+ - * - * - * - +---------------+ - | rewrite_query | - +---------------+ - *** ... - * . - ** ... - +--------------+ . - | analyzer_one | . - +--------------+ . - * . - * . - * . - +---------------+ +---------------+ - | retriever_one | | retriever_two | - +---------------+ +---------------+ - *** *** - * * - ** ** - +----+ - | qa | - +----+ - * - * - * - +---------+ - | __end__ | - +---------+ - ''' -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class[memory] - ''' - +-----------+ - | __start__ | - +-----------+ - * - * - * - +---------------+ - | rewrite_query | - +---------------+ - *** ... - * . - ** ... - +--------------+ . - | analyzer_one | . - +--------------+ . - * . - * . - * . - +---------------+ +---------------+ - | retriever_one | | retriever_two | - +---------------+ +---------------+ - *** *** - * * - ** ** - +----+ - | qa | - +----+ - * - * - * - +---------+ - | __end__ | - +---------+ - ''' -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class[postgres_aio] - ''' - +-----------+ - | __start__ | - +-----------+ - * - * - * - +---------------+ - | rewrite_query | - +---------------+ - *** ... - * . - ** ... - +--------------+ . - | analyzer_one | . - +--------------+ . - * . - * . - * . - +---------------+ +---------------+ - | retriever_one | | retriever_two | - +---------------+ +---------------+ - *** *** - * * - ** ** - +----+ - | qa | - +----+ - * - * - * - +---------+ - | __end__ | - +---------+ - ''' -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class[postgres_aio_pipe] - ''' - +-----------+ - | __start__ | - +-----------+ - * - * - * - +---------------+ - | rewrite_query | - +---------------+ - *** ... - * . - ** ... - +--------------+ . - | analyzer_one | . - +--------------+ . - * . - * . - * . - +---------------+ +---------------+ - | retriever_one | | retriever_two | - +---------------+ +---------------+ - *** *** - * * - ** ** - +----+ - | qa | - +----+ - * - * - * - +---------+ - | __end__ | - +---------+ - ''' -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class[postgres_aio_pool] - ''' - +-----------+ - | __start__ | - +-----------+ - * - * - * - +---------------+ - | rewrite_query | - +---------------+ - *** ... - * . - ** ... - +--------------+ . - | analyzer_one | . - +--------------+ . - * . - * . - * . - +---------------+ +---------------+ - | retriever_one | | retriever_two | - +---------------+ +---------------+ - *** *** - * * - ** ** - +----+ - | qa | - +----+ - * - * - * - +---------+ - | __end__ | - +---------+ - ''' -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class[sqlite_aio] - ''' - +-----------+ - | __start__ | - +-----------+ - * - * - * - +---------------+ - | rewrite_query | - +---------------+ - *** ... - * . - ** ... - +--------------+ . - | analyzer_one | . - +--------------+ . - * . - * . - * . - +---------------+ +---------------+ - | retriever_one | | retriever_two | - +---------------+ +---------------+ - *** *** - * * - ** ** - +----+ - | qa | - +----+ - * - * - * - +---------+ - | __end__ | - +---------+ - ''' -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2.1 - dict({ - '$defs': dict({ - 'InnerObject': dict({ - 'properties': dict({ - 'yo': dict({ - 'title': 'Yo', - 'type': 'integer', - }), - }), - 'required': list([ - 'yo', - ]), - 'title': 'InnerObject', - 'type': 'object', - }), - }), - 'properties': dict({ - 'answer': dict({ - 'anyOf': list([ - dict({ - 'type': 'string', - }), - dict({ - 'type': 'null', - }), - ]), - 'default': None, - 'title': 'Answer', - }), - 'docs': dict({ - 'items': dict({ - 'type': 'string', - }), - 'title': 'Docs', - 'type': 'array', - }), - 'inner': dict({ - '$ref': '#/$defs/InnerObject', - }), - 'query': dict({ - 'title': 'Query', - 'type': 'string', - }), - }), - 'required': list([ - 'query', - 'inner', - 'docs', - ]), - 'title': 'State', - 'type': 'object', - }) -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2.2 - dict({ - '$defs': dict({ - 'InnerObject': dict({ - 'properties': dict({ - 'yo': dict({ - 'title': 'Yo', - 'type': 'integer', - }), - }), - 'required': list([ - 'yo', - ]), - 'title': 'InnerObject', - 'type': 'object', - }), - }), - 'properties': dict({ - 'answer': dict({ - 'anyOf': list([ - dict({ - 'type': 'string', - }), - dict({ - 'type': 'null', - }), - ]), - 'default': None, - 'title': 'Answer', - }), - 'docs': dict({ - 'items': dict({ - 'type': 'string', - }), - 'title': 'Docs', - 'type': 'array', - }), - 'inner': dict({ - '$ref': '#/$defs/InnerObject', - }), - 'query': dict({ - 'title': 'Query', - 'type': 'string', - }), - }), - 'required': list([ - 'query', - 'inner', - 'docs', - ]), - 'title': 'State', - 'type': 'object', - }) -# --- # name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[memory] ''' graph TD; @@ -1055,253 +725,6 @@ 'type': 'object', }) # --- -# name: test_in_one_fan_out_state_graph_waiting_edge_via_branch - ''' - +-----------+ - | __start__ | - +-----------+ - * - * - * - +---------------+ - | rewrite_query | - +---------------+ - *** ... - * . - ** ... - +--------------+ . - | analyzer_one | . - +--------------+ . - * . - * . - * . - +---------------+ +---------------+ - | retriever_one | | retriever_two | - +---------------+ +---------------+ - *** *** - * * - ** ** - +----+ - | qa | - +----+ - * - * - * - +---------+ - | __end__ | - +---------+ - ''' -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_via_branch[memory] - ''' - +-----------+ - | __start__ | - +-----------+ - * - * - * - +---------------+ - | rewrite_query | - +---------------+ - *** ... - * . - ** ... - +--------------+ . - | analyzer_one | . - +--------------+ . - * . - * . - * . - +---------------+ +---------------+ - | retriever_one | | retriever_two | - +---------------+ +---------------+ - *** *** - * * - ** ** - +----+ - | qa | - +----+ - * - * - * - +---------+ - | __end__ | - +---------+ - ''' -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_via_branch[postgres_aio] - ''' - +-----------+ - | __start__ | - +-----------+ - * - * - * - +---------------+ - | rewrite_query | - +---------------+ - *** ... - * . - ** ... - +--------------+ . - | analyzer_one | . - +--------------+ . - * . - * . - * . - +---------------+ +---------------+ - | retriever_one | | retriever_two | - +---------------+ +---------------+ - *** *** - * * - ** ** - +----+ - | qa | - +----+ - * - * - * - +---------+ - | __end__ | - +---------+ - ''' -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_via_branch[postgres_aio_pipe] - ''' - +-----------+ - | __start__ | - +-----------+ - * - * - * - +---------------+ - | rewrite_query | - +---------------+ - *** ... - * . - ** ... - +--------------+ . - | analyzer_one | . - +--------------+ . - * . - * . - * . - +---------------+ +---------------+ - | retriever_one | | retriever_two | - +---------------+ +---------------+ - *** *** - * * - ** ** - +----+ - | qa | - +----+ - * - * - * - +---------+ - | __end__ | - +---------+ - ''' -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_via_branch[postgres_aio_pool] - ''' - +-----------+ - | __start__ | - +-----------+ - * - * - * - +---------------+ - | rewrite_query | - +---------------+ - *** ... - * . - ** ... - +--------------+ . - | analyzer_one | . - +--------------+ . - * . - * . - * . - +---------------+ +---------------+ - | retriever_one | | retriever_two | - +---------------+ +---------------+ - *** *** - * * - ** ** - +----+ - | qa | - +----+ - * - * - * - +---------+ - | __end__ | - +---------+ - ''' -# --- -# name: test_in_one_fan_out_state_graph_waiting_edge_via_branch[sqlite_aio] - ''' - +-----------+ - | __start__ | - +-----------+ - * - * - * - +---------------+ - | rewrite_query | - +---------------+ - *** ... - * . - ** ... - +--------------+ . - | analyzer_one | . - +--------------+ . - * . - * . - * . - +---------------+ +---------------+ - | retriever_one | | retriever_two | - +---------------+ +---------------+ - *** *** - * * - ** ** - +----+ - | qa | - +----+ - * - * - * - +---------+ - | __end__ | - +---------+ - ''' -# --- -# name: test_nested_graph - ''' - +-----------+ - | __start__ | - +-----------+ - * - * - * - +-------+ - | inner | - +-------+ - * - * - * - +------+ - | side | - +------+ - * - * - * - +---------+ - | __end__ | - +---------+ - ''' -# --- # name: test_send_react_interrupt_control[memory] ''' %%{init: {'flowchart': {'curve': 'linear'}}}%% @@ -1392,128 +815,3 @@ ''' # --- -# name: test_weather_subgraph[memory] - ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% - graph TD; - __start__([__start__
]):::first - router_node(router_node) - normal_llm_node(normal_llm_node) - weather_graph_model_node(model_node) - weather_graph_weather_node(weather_node__end__
]):::last - __start__ --> router_node; - normal_llm_node --> __end__; - weather_graph_weather_node --> __end__; - router_node -.-> normal_llm_node; - router_node -.-> weather_graph_model_node; - router_node -.-> __end__; - subgraph weather_graph - weather_graph_model_node --> weather_graph_weather_node; - end - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' -# --- -# name: test_weather_subgraph[postgres_aio] - ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% - graph TD; - __start__([__start__
]):::first - router_node(router_node) - normal_llm_node(normal_llm_node) - weather_graph_model_node(model_node) - weather_graph_weather_node(weather_node__end__
]):::last - __start__ --> router_node; - normal_llm_node --> __end__; - weather_graph_weather_node --> __end__; - router_node -.-> normal_llm_node; - router_node -.-> weather_graph_model_node; - router_node -.-> __end__; - subgraph weather_graph - weather_graph_model_node --> weather_graph_weather_node; - end - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' -# --- -# name: test_weather_subgraph[postgres_aio_pipe] - ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% - graph TD; - __start__([__start__
]):::first - router_node(router_node) - normal_llm_node(normal_llm_node) - weather_graph_model_node(model_node) - weather_graph_weather_node(weather_node__end__
]):::last - __start__ --> router_node; - normal_llm_node --> __end__; - weather_graph_weather_node --> __end__; - router_node -.-> normal_llm_node; - router_node -.-> weather_graph_model_node; - router_node -.-> __end__; - subgraph weather_graph - weather_graph_model_node --> weather_graph_weather_node; - end - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' -# --- -# name: test_weather_subgraph[postgres_aio_pool] - ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% - graph TD; - __start__([__start__
]):::first - router_node(router_node) - normal_llm_node(normal_llm_node) - weather_graph_model_node(model_node) - weather_graph_weather_node(weather_node__end__
]):::last - __start__ --> router_node; - normal_llm_node --> __end__; - weather_graph_weather_node --> __end__; - router_node -.-> normal_llm_node; - router_node -.-> weather_graph_model_node; - router_node -.-> __end__; - subgraph weather_graph - weather_graph_model_node --> weather_graph_weather_node; - end - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' -# --- -# name: test_weather_subgraph[sqlite_aio] - ''' - %%{init: {'flowchart': {'curve': 'linear'}}}%% - graph TD; - __start__([__start__
]):::first - router_node(router_node) - normal_llm_node(normal_llm_node) - weather_graph_model_node(model_node) - weather_graph_weather_node(weather_node__end__
]):::last - __start__ --> router_node; - normal_llm_node --> __end__; - weather_graph_weather_node --> __end__; - router_node -.-> normal_llm_node; - router_node -.-> weather_graph_model_node; - router_node -.-> __end__; - subgraph weather_graph - weather_graph_model_node --> weather_graph_weather_node; - end - classDef default fill:#f2f0ff,line-height:1.2 - classDef first fill-opacity:0 - classDef last fill:#bfb6fc - - ''' -# --- diff --git a/libs/langgraph/tests/test_pregel.py b/libs/langgraph/tests/test_pregel.py index eff8ab25e8..e7d2a81642 100644 --- a/libs/langgraph/tests/test_pregel.py +++ b/libs/langgraph/tests/test_pregel.py @@ -1568,8 +1568,6 @@ def graph(input: list[int]) -> list[str]: "title": "LangGraphOutput", } - assert graph.get_graph().draw_mermaid() == snapshot - thread1 = {"configurable": {"thread_id": "1"}} assert [*graph.stream([0, 1], thread1)] == [ {"submapper": "0"}, @@ -1619,8 +1617,6 @@ def graph(state: dict) -> dict: fut_baz = baz(fut_bar.result()) return fut_baz.result() - assert graph.get_graph().draw_mermaid() == snapshot - thread1 = {"configurable": {"thread_id": "1"}} assert [c for c in graph.stream({"a": "0"}, thread1)] == [ { @@ -5048,8 +5044,6 @@ def graph(inputs: dict) -> dict: fut_bar = bar(bar_input) return fut_bar.result() - assert graph.get_graph().draw_mermaid() == snapshot - config = {"configurable": {"thread_id": "1"}} # First run, interrupted at bar graph.invoke({"a": ""}, config) @@ -5081,8 +5075,6 @@ def graph(inputs: dict) -> dict: fut_bar = bar(fut_foo.result()) return fut_bar.result() - assert graph.get_graph().draw_mermaid() == snapshot - config = {"configurable": {"thread_id": "1"}} # First run, interrupted at bar assert not graph.invoke({"a": ""}, config) @@ -5572,8 +5564,6 @@ def graph(state: dict) -> dict: falsy_task().result() interrupt("test") - assert graph.get_graph().draw_mermaid() == snapshot - configurable = {"configurable": {"thread_id": str(uuid.uuid4())}} graph.invoke({"a": 5}, configurable) graph.invoke(Command(resume="123"), configurable) @@ -5606,8 +5596,6 @@ def graph(state: dict) -> dict: return {"values": values} - assert graph.get_graph().draw_mermaid() == snapshot - configurable = {"configurable": {"thread_id": str(uuid.uuid4())}} graph.invoke({}, configurable) graph.invoke(Command(resume="a"), configurable)