Create a background run.
create(
self,
thread_id: str | None,
assistant_id: str,
*,
input: Input | None = None,
command: Command | None = None,
stream_mode: StreamMode | Sequence[StreamMode] = 'values',
stream_subgraphs: bool = False,
stream_resumable: bool = False,
metadata: Mapping[str, Any] | None = None,
config: Config | None = None,
context: Context | None = None,
checkpoint: Checkpoint | None = None,
checkpoint_id: str | None = None,
checkpoint_during: bool | None = None,
interrupt_before: All | Sequence[str] | None = None,
interrupt_after: All | Sequence[str] | None = None,
webhook: str | None = None,
multitask_strategy: MultitaskStrategy | None = None,
if_not_exists: IfNotExists | None = None,
on_completion: OnCompletionBehavior | None = None,
after_seconds: int | None = None,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
on_run_created: Callable[[RunCreateMetadata], None] | None = None,
durability: Durability | None = None
) -> Runclient = get_sync_client(url="http://localhost:2024")
background_run = client.runs.create(
thread_id="my_thread_id",
assistant_id="my_assistant_id",
input={"messages": [{"role": "user", "content": "hello!"}]},
metadata={"name":"my_run"},
context={"model_name": "openai"},
interrupt_before=["node_to_stop_before_1","node_to_stop_before_2"],
interrupt_after=["node_to_stop_after_1","node_to_stop_after_2"],
webhook="https://my.fake.webhook.com",
multitask_strategy="interrupt"
)
print(background_run)
--------------------------------------------------------------------------------
{
'run_id': 'my_run_id',
'thread_id': 'my_thread_id',
'assistant_id': 'my_assistant_id',
'created_at': '2024-07-25T15:35:42.598503+00:00',
'updated_at': '2024-07-25T15:35:42.598503+00:00',
'metadata': {},
'status': 'pending',
'kwargs':
{
'input':
{
'messages': [
{
'role': 'user',
'content': 'how are you?'
}
]
},
'config':
{
'metadata':
{
'created_by': 'system'
},
'configurable':
{
'run_id': 'my_run_id',
'user_id': None,
'graph_id': 'agent',
'thread_id': 'my_thread_id',
'checkpoint_id': None,
'assistant_id': 'my_assistant_id'
}
},
'context':
{
'model_name': 'openai'
},
'webhook': "https://my.fake.webhook.com",
'temporary': False,
'stream_mode': ['values'],
'feedback_keys': None,
'interrupt_after': ["node_to_stop_after_1","node_to_stop_after_2"],
'interrupt_before': ["node_to_stop_before_1","node_to_stop_before_2"]
},
'multitask_strategy': 'interrupt'
}| Name | Type | Description |
|---|---|---|
thread_id* | str | None | the thread ID to assign to the thread.
If |
assistant_id* | str | The assistant ID or graph name to stream from. If using graph name, will default to first assistant created from that graph. |
input | Input | None | Default: NoneThe input to the graph. |
command | Command | None | Default: NoneThe command to execute. |
stream_mode | StreamMode | Sequence[StreamMode] | Default: 'values'The stream mode(s) to use. |
stream_subgraphs | bool | Default: FalseWhether to stream output from subgraphs. |
stream_resumable | bool | Default: FalseWhether the stream is considered resumable. If true, the stream can be resumed and replayed in its entirety even after disconnection. |
metadata | Mapping[str, Any] | None | Default: NoneMetadata to assign to the run. |
config | Config | None | Default: NoneThe configuration for the assistant. |
context | Context | None | Default: NoneStatic context to add to the assistant. |
checkpoint | Checkpoint | None | Default: NoneThe checkpoint to resume from. |
checkpoint_during | bool | None | Default: None(deprecated) Whether to checkpoint during the run (or only at the end/interruption). |
interrupt_before | All | Sequence[str] | None | Default: NoneNodes to interrupt immediately before they get executed. |
interrupt_after | All | Sequence[str] | None | Default: NoneNodes to Nodes to interrupt immediately after they get executed. |
webhook | str | None | Default: NoneWebhook to call after LangGraph API call is done. |
multitask_strategy | MultitaskStrategy | None | Default: NoneMultitask strategy to use. Must be one of 'reject', 'interrupt', 'rollback', or 'enqueue'. |
on_completion | OnCompletionBehavior | None | Default: NoneWhether to delete or keep the thread created for a stateless run. Must be one of 'delete' or 'keep'. |
if_not_exists | IfNotExists | None | Default: NoneHow to handle missing thread. Defaults to 'reject'. Must be either 'reject' (raise error if missing), or 'create' (create new thread). |
after_seconds | int | None | Default: NoneThe number of seconds to wait before starting the run. Use to schedule future runs. |
headers | Mapping[str, str] | None | Default: NoneOptional custom headers to include with the request. |
on_run_created | Callable[[RunCreateMetadata], None] | None | Default: NoneOptional callback to call when a run is created. |
durability | Durability | None | Default: NoneThe durability to use for the run. Values are "sync", "async", or "exit". "async" means checkpoints are persisted async while next graph step executes, replaces checkpoint_during=True "sync" means checkpoints are persisted sync after graph step executes, replaces checkpoint_during=False "exit" means checkpoints are only persisted when the run exits, does not save intermediate steps |