with_structured_output(
self,
schema: _DictOrPydanticClass | None = None,
*,
method: Literal| Name | Type | Description |
|---|---|---|
schema | _DictOrPydanticClass | None | Default: NoneThe output schema. Can be passed in as:
If See |
method | Literal['function_calling', 'json_mode', 'json_schema'] | Default: 'function_calling'The method for steering model generation, one of:
|
include_raw | bool | Default: FalseIf If an error occurs during model output parsing it will be raised. If If an error occurs during output parsing it will be caught and returned as well. The final output is always a |
strict | bool | None | Default: None |
tools | list | None | Default: None |
kwargs | Any | Default: {} |
Model wrapper that returns outputs formatted to match the given schema.
langchain-openai 0.3.12Support for tools added.
langchain-openai 0.3.21Pass kwargs through to the model.
True:
Model output is guaranteed to exactly match the schema.
The input schema will also be validated according to the
supported schemas.False:
Input schema will not be validated and model output will not be
validated.None:
strict argument will not be passed to the model.Additional keyword args are passed through to the model.
A list of tool-like objects to bind to the chat model. Requires that:
method is 'json_schema' (default).strict=Trueinclude_raw=TrueIf a model elects to call a tool, the resulting AIMessage in 'raw'
will include tool calls.
from langchain.chat_models import init_chat_model
from pydantic import BaseModel
class ResponseSchema(BaseModel):
response: str
def get_weather(location: str) -> str:
"""Get weather at a location."""
pass
model = init_chat_model("openai:gpt-4o-mini")
structured_model = model.with_structured_output(
ResponseSchema,
tools=[get_weather],
strict=True,
include_raw=True,
)
structured_model.invoke("What's the weather in Boston?")
{
"raw": AIMessage(content="", tool_calls=[...], ...),
"parsing_error": None,
"parsed": None,
}