Model wrapper that returns outputs formatted to match the given schema.
with_structured_output(
self,
schema: _DictOrPydanticClass | None = None,
*,
method: Literal['function_calling', 'json_mode', 'json_schema'] = 'json_schema',
include_raw: bool = False,
strict: bool | None = None,
tools: list | None = None,
**kwargs: Any = {}
) -> Runnable[LanguageModelInput, _DictOrPydantic]langchain-openai 0.3.0method default changed from "function_calling" to "json_schema".
langchain-openai 0.3.12Support for tools added.
langchain-openai 0.3.21Pass kwargs through to the model.
schema=Pydantic class, method='json_schema', include_raw=False, strict=TrueNote, OpenAI has a number of restrictions on what types of schemas can be
provided if strict = True. When using Pydantic, our model cannot
specify any Field metadata (like min/max constraints) and fields cannot
have default values.
See all constraints.
from langchain_openai import ChatOpenAI
from pydantic import BaseModel, Field
class AnswerWithJustification(BaseModel):
'''An answer to the user question along with justification for the answer.'''
answer: str
justification: str | None = Field(
default=..., description="A justification for the answer."
)
model = ChatOpenAI(model="...", temperature=0)
structured_model = model.with_structured_output(AnswerWithJustification)
structured_model.invoke(
"What weighs more a pound of bricks or a pound of feathers"
)
AnswerWithJustification(
answer="They weigh the same",
justification="Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.",
)schema=Pydantic class, method='function_calling', include_raw=False, strict=Falsefrom langchain_openai import ChatOpenAI
from pydantic import BaseModel, Field
class AnswerWithJustification(BaseModel):
'''An answer to the user question along with justification for the answer.'''
answer: str
justification: str | None = Field(
default=..., description="A justification for the answer."
)
model = ChatOpenAI(model="...", temperature=0)
structured_model = model.with_structured_output(
AnswerWithJustification, method="function_calling"
)
structured_model.invoke(
"What weighs more a pound of bricks or a pound of feathers"
)
AnswerWithJustification(
answer="They weigh the same",
justification="Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.",
)schema=Pydantic class, method='json_schema', include_raw=Truefrom langchain_openai import ChatOpenAI
from pydantic import BaseModel
class AnswerWithJustification(BaseModel):
'''An answer to the user question along with justification for the answer.'''
answer: str
justification: str
model = ChatOpenAI(model="...", temperature=0)
structured_model = model.with_structured_output(
AnswerWithJustification, include_raw=True
)
structured_model.invoke(
"What weighs more a pound of bricks or a pound of feathers"
)
{
"raw": AIMessage(
content="",
additional_kwargs={
"tool_calls": [
{
"id": "call_Ao02pnFYXD6GN1yzc0uXPsvF",
"function": {
"arguments": '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}',
"name": "AnswerWithJustification",
},
"type": "function",
}
]
},
),
"parsed": AnswerWithJustification(
answer="They weigh the same.",
justification="Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.",
),
"parsing_error": None,
}schema=TypedDict class, method='json_schema', include_raw=False, strict=Falsefrom typing_extensions import Annotated, TypedDict
from langchain_openai import ChatOpenAI
class AnswerWithJustification(TypedDict):
'''An answer to the user question along with justification for the answer.'''
answer: str
justification: Annotated[
str | None, None, "A justification for the answer."
]
model = ChatOpenAI(model="...", temperature=0)
structured_model = model.with_structured_output(AnswerWithJustification)
structured_model.invoke(
"What weighs more a pound of bricks or a pound of feathers"
)
{
"answer": "They weigh the same",
"justification": "Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.",
}schema=OpenAI function schema, method='json_schema', include_raw=Falsefrom langchain_openai import ChatOpenAI
oai_schema = {
"name": "AnswerWithJustification",
"description": "An answer to the user question along with justification for the answer.",
"parameters": {
"type": "object",
"properties": {
"answer": {"type": "string"},
"justification": {
"description": "A justification for the answer.",
"type": "string",
},
},
"required": ["answer"],
},
}
model = ChatOpenAI(model="...", temperature=0)
structured_model = model.with_structured_output(oai_schema)
structured_model.invoke(
"What weighs more a pound of bricks or a pound of feathers"
)
{
"answer": "They weigh the same",
"justification": "Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.",
}schema=Pydantic class, method='json_mode', include_raw=Truefrom langchain_openai import ChatOpenAI
from pydantic import BaseModel
class AnswerWithJustification(BaseModel):
answer: str
justification: str
model = ChatOpenAI(model="...", temperature=0)
structured_model = model.with_structured_output(
AnswerWithJustification, method="json_mode", include_raw=True
)
structured_model.invoke(
"Answer the following question. "
"Make sure to return a JSON blob with keys 'answer' and 'justification'.\\n\\n"
"What's heavier a pound of bricks or a pound of feathers?"
)
{
"raw": AIMessage(
content='{\\n "answer": "They are both the same weight.",\\n "justification": "Both a pound of bricks and a pound of feathers weigh one pound. The difference lies in the volume and density of the materials, not the weight." \\n}'
),
"parsed": AnswerWithJustification(
answer="They are both the same weight.",
justification="Both a pound of bricks and a pound of feathers weigh one pound. The difference lies in the volume and density of the materials, not the weight.",
),
"parsing_error": None,
}schema=None, method='json_mode', include_raw=Truestructured_model = model.with_structured_output(
method="json_mode", include_raw=True
)
structured_model.invoke(
"Answer the following question. "
"Make sure to return a JSON blob with keys 'answer' and 'justification'.\\n\\n"
"What's heavier a pound of bricks or a pound of feathers?"
)
{
"raw": AIMessage(
content='{\\n "answer": "They are both the same weight.",\\n "justification": "Both a pound of bricks and a pound of feathers weigh one pound. The difference lies in the volume and density of the materials, not the weight." \\n}'
),
"parsed": {
"answer": "They are both the same weight.",
"justification": "Both a pound of bricks and a pound of feathers weigh one pound. The difference lies in the volume and density of the materials, not the weight.",
},
"parsing_error": None,
}| Name | Type | Description |
|---|---|---|
schema | _DictOrPydanticClass | None | Default: NoneThe output schema. Can be passed in as:
If See |
method | Literal['function_calling', 'json_mode', 'json_schema'] | Default: 'json_schema'The method for steering model generation, one of:
Learn more about the differences between methods. |
include_raw | bool | Default: FalseIf If an error occurs during model output parsing it will be raised. If If an error occurs during output parsing it will be caught and returned as well. The final output is always a |
strict | bool | None | Default: None
If schema is specified via Note
|
tools | list | None | Default: NoneA list of tool-like objects to bind to the chat model. Requires that:
If a model elects to call a
tool, the resulting Example
|
kwargs | Any | Default: {}Additional keyword args are passed through to the model. |