Create a runnable sequence that uses OpenAI functions.
create_structured_runnable(
function: type[BaseModel] | Sequence[type[BaseModel]],
llm: Runnable,
*,
prompt: BasePromptTemplate | None = None,
use_extra_step: bool = False
) -> RunnableExample:
from typing import Optional
from langchain_google_vertexai import ChatVertexAI, create_structured_runnable
from langchain_core.prompts import ChatPromptTemplate
from pydantic import BaseModel, Field
class RecordPerson(BaseModel):
"""Record some identifying information about a person."""
name: str = Field(..., description="The person's name")
age: int = Field(..., description="The person's age")
fav_food: Optional[str] = Field(None, description="The person's favorite food")
class RecordDog(BaseModel):
"""Record some identifying information about a dog."""
name: str = Field(..., description="The dog's name")
color: str = Field(..., description="The dog's color")
fav_food: Optional[str] = Field(None, description="The dog's favorite food")
llm = ChatVertexAI(model_name="gemini-pro")
prompt = ChatPromptTemplate.from_template("""
You are a world class algorithm for recording entities.
Make calls to the relevant function to record the entities in the following input: {input}
Tip: Make sure to answer in the correct format"""
)
chain = create_structured_runnable([RecordPerson, RecordDog], llm, prompt=prompt)
chain.invoke({"input": "Harry was a chubby brown beagle who loved chicken"})
# -> RecordDog(name="Harry", color="brown", fav_food="chicken")| Name | Type | Description |
|---|---|---|
function* | type[BaseModel] | Sequence[type[BaseModel]] | Either a single |
llm* | Runnable | Language model to use, assumed to support the Google Vertex function-calling API. |
prompt | BasePromptTemplate | None | Default: None
|
use_extra_step | bool | Default: FalseWhether to make an extra step to parse output into a function. |