Create a run and stream the results.
stream(
self,
thread_id: str | None,
assistant_id: str,
*,
input: Input | None = None,
command: Command | None = None,
stream_mode: StreamMode | Sequence[StreamMode] = 'values',
stream_subgraphs: bool = False,
stream_resumable: bool = False,
metadata: Mapping[str, Any] | None = None,
config: Config | None = None,
context: Context | None = None,
checkpoint: Checkpoint | None = None,
checkpoint_id: str | None = None,
checkpoint_during: bool | None = None,
interrupt_before: All | Sequence[str] | None = None,
interrupt_after: All | Sequence[str] | None = None,
feedback_keys: Sequence[str] | None = None,
on_disconnect: DisconnectMode | None = None,
on_completion: OnCompletionBehavior | None = None,
webhook: str | None = None,
multitask_strategy: MultitaskStrategy | None = None,
if_not_exists: IfNotExists | None = None,
after_seconds: int | None = None,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
on_run_created: Callable[[RunCreateMetadata], None] | None = None,
durability: Durability | None = None
) -> Iterator[StreamPart]client = get_sync_client(url="http://localhost:2024")
async for chunk in client.runs.stream(
thread_id=None,
assistant_id="agent",
input={"messages": [{"role": "user", "content": "how are you?"}]},
stream_mode=["values","debug"],
metadata={"name":"my_run"},
context={"model_name": "anthropic"},
interrupt_before=["node_to_stop_before_1","node_to_stop_before_2"],
interrupt_after=["node_to_stop_after_1","node_to_stop_after_2"],
feedback_keys=["my_feedback_key_1","my_feedback_key_2"],
webhook="https://my.fake.webhook.com",
multitask_strategy="interrupt"
):
print(chunk)
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
StreamPart(event='metadata', data={'run_id': '1ef4a9b8-d7da-679a-a45a-872054341df2'})
StreamPart(event='values', data={'messages': [{'content': 'how are you?', 'additional_kwargs': {}, 'response_metadata': {}, 'type': 'human', 'name': None, 'id': 'fe0a5778-cfe9-42ee-b807-0adaa1873c10', 'example': False}]})
StreamPart(event='values', data={'messages': [{'content': 'how are you?', 'additional_kwargs': {}, 'response_metadata': {}, 'type': 'human', 'name': None, 'id': 'fe0a5778-cfe9-42ee-b807-0adaa1873c10', 'example': False}, {'content': "I'm doing well, thanks for asking! I'm an AI assistant created by Anthropic to be helpful, honest, and harmless.", 'additional_kwargs': {}, 'response_metadata': {}, 'type': 'ai', 'name': None, 'id': 'run-159b782c-b679-4830-83c6-cef87798fe8b', 'example': False, 'tool_calls': [], 'invalid_tool_calls': [], 'usage_metadata': None}]})
StreamPart(event='end', data=None)| Name | Type | Description |
|---|---|---|
thread_id* | str | None | the thread ID to assign to the thread.
If |
assistant_id* | str | The assistant ID or graph name to stream from. If using graph name, will default to first assistant created from that graph. |
input | Input | None | Default: NoneThe input to the graph. |
command | Command | None | Default: NoneThe command to execute. |
stream_mode | StreamMode | Sequence[StreamMode] | Default: 'values'The stream mode(s) to use. |
stream_subgraphs | bool | Default: FalseWhether to stream output from subgraphs. |
stream_resumable | bool | Default: FalseWhether the stream is considered resumable. If true, the stream can be resumed and replayed in its entirety even after disconnection. |
metadata | Mapping[str, Any] | None | Default: NoneMetadata to assign to the run. |
config | Config | None | Default: NoneThe configuration for the assistant. |
context | Context | None | Default: NoneStatic context to add to the assistant. |
checkpoint | Checkpoint | None | Default: NoneThe checkpoint to resume from. |
checkpoint_during | bool | None | Default: None(deprecated) Whether to checkpoint during the run (or only at the end/interruption). |
interrupt_before | All | Sequence[str] | None | Default: NoneNodes to interrupt immediately before they get executed. |
interrupt_after | All | Sequence[str] | None | Default: NoneNodes to Nodes to interrupt immediately after they get executed. |
feedback_keys | Sequence[str] | None | Default: NoneFeedback keys to assign to run. |
on_disconnect | DisconnectMode | None | Default: NoneThe disconnect mode to use. Must be one of 'cancel' or 'continue'. |
on_completion | OnCompletionBehavior | None | Default: NoneWhether to delete or keep the thread created for a stateless run. Must be one of 'delete' or 'keep'. |
webhook | str | None | Default: NoneWebhook to call after LangGraph API call is done. |
multitask_strategy | MultitaskStrategy | None | Default: NoneMultitask strategy to use. Must be one of 'reject', 'interrupt', 'rollback', or 'enqueue'. |
if_not_exists | IfNotExists | None | Default: NoneHow to handle missing thread. Defaults to 'reject'. Must be either 'reject' (raise error if missing), or 'create' (create new thread). |
after_seconds | int | None | Default: NoneThe number of seconds to wait before starting the run. Use to schedule future runs. |
headers | Mapping[str, str] | None | Default: NoneOptional custom headers to include with the request. |
on_run_created | Callable[[RunCreateMetadata], None] | None | Default: NoneOptional callback to call when a run is created. |
durability | Durability | None | Default: NoneThe durability to use for the run. Values are "sync", "async", or "exit". "async" means checkpoints are persisted async while next graph step executes, replaces checkpoint_during=True "sync" means checkpoints are persisted sync after graph step executes, replaces checkpoint_during=False "exit" means checkpoints are only persisted when the run exits, does not save intermediate steps |