Create feedback for a run.
To enable feedback to be batch uploaded in the background you must
specify trace_id. We highly encourage this for latency-sensitive environments.
create_feedback(
self,
run_id: Optional[ID_TYPE] = None,
key: str = 'unnamed',
*,
score: Union[float, int, bool, None] = None,
value: Union[float, int, bool, str, dict, None] = None,
trace_id: Optional[ID_TYPE] = None,
correction: Union[dict, None] = None,
comment: Union[str, None] = None,
source_info: Optional[dict[str, Any]] = None,
feedback_source_type: Union[ls_schemas.FeedbackSourceType, str] = ls_schemas.FeedbackSourceType.API,
source_run_id: Optional[ID_TYPE] = None,
feedback_id: Optional[ID_TYPE] = None,
feedback_config: Optional[ls_schemas.FeedbackConfig] = None,
stop_after_attempt: int = 10,
project_id: Optional[ID_TYPE] = None,
comparative_experiment_id: Optional[ID_TYPE] = None,
feedback_group_id: Optional[ID_TYPE] = None,
extra: Optional[dict] = None,
error: Optional[bool] = None,
session_id: Optional[ID_TYPE] = None,
start_time: Optional[datetime.datetime] = None,
**kwargs: Any = {}
) -> ls_schemas.FeedbackExample:
from langsmith import trace, traceable, Client
@traceable
def foo(x):
return {"y": x * 2}
@traceable
def bar(y):
return {"z": y - 1}
client = Client()
inputs = {"x": 1}
with trace(name="foobar", inputs=inputs) as root_run:
result = foo(**inputs)
result = bar(**result)
root_run.outputs = result
trace_id = root_run.id
child_runs = root_run.child_runs
# Provide feedback for a trace (a.k.a. a root run)
client.create_feedback(
key="user_feedback",
score=1,
trace_id=trace_id,
)
# Provide feedback for a child run
foo_run_id = [run for run in child_runs if run.name == "foo"][0].id
client.create_feedback(
key="correctness",
score=0,
run_id=foo_run_id,
# trace_id= is optional but recommended to enable batched and backgrounded
# feedback ingestion.
trace_id=trace_id,
)| Name | Type | Description |
|---|---|---|
key | str | Default: 'unnamed'The name of the feedback metric. |
score | Optional[Union[float, int, bool]] | Default: NoneThe score to rate this run on the metric or aspect. |
value | Optional[Union[float, int, bool, str, dict]] | Default: NoneThe display value or non-numeric value for this feedback. |
run_id | Optional[Union[UUID, str]] | Default: NoneThe ID of the run to provide feedback for. At least one of run_id, trace_id, or project_id must be specified. |
trace_id | Optional[Union[UUID, str]] | Default: NoneThe ID of the trace (i.e. root parent run) of the run to provide feedback for (specified by run_id). If run_id and trace_id are the same, only trace_id needs to be specified. NOTE: trace_id is required feedback ingestion to be batched and backgrounded. |
correction | Optional[dict] | Default: NoneThe proper ground truth for this run. |
comment | Optional[str] | Default: NoneA comment about this feedback, such as a justification for the score or chain-of-thought trajectory for an LLM judge. |
source_info | Optional[Dict[str, Any]] | Default: NoneInformation about the source of this feedback. |
feedback_source_type | Union[FeedbackSourceType, str] | Default: ls_schemas.FeedbackSourceType.APIThe type of feedback source, such as model (for model-generated feedback) or API. |
source_run_id | Optional[Union[UUID, str]] | Default: NoneThe ID of the run that generated this feedback, if a "model" type. |
feedback_id | Optional[Union[UUID, str]] | Default: NoneThe ID of the feedback to create. If not provided, a random UUID will be generated. |
feedback_config | Optional[FeedbackConfig] | Default: NoneThe configuration specifying how to interpret feedback with this key. Examples include continuous (with min/max bounds), categorical, or freeform. |
stop_after_attempt | int, default=10 | Default: 10The number of times to retry the request before giving up. |
project_id | Optional[Union[UUID, str]] | Default: NoneThe ID of the project (or experiment) to provide feedback on. This is used for creating summary metrics for experiments. Cannot specify run_id or trace_id if project_id is specified, and vice versa. |
comparative_experiment_id | Optional[Union[UUID, str]] | Default: NoneIf this feedback was logged as a part of a comparative experiment, this associates the feedback with that experiment. |
feedback_group_id | Optional[Union[UUID, str]] | Default: NoneWhen logging preferences, ranking runs, or other comparative feedback, this is used to group feedback together. |
extra | Optional[Dict] | Default: NoneMetadata for the feedback. |
session_id | Optional[Union[UUID, str]] | Default: NoneThe session (project) ID of the run this feedback is for. Used to optimize feedback ingestion by avoiding server-side lookups. |
start_time | Optional[datetime] | Default: NoneThe start time of the run this feedback is for. Used to optimize feedback ingestion by avoiding server-side lookups. |
**kwargs | Any | Default: {}Additional keyword arguments. |