Patch the Anthropic client to make it traceable.
wrap_anthropic(
client: C,
*,
tracing_extra: Optional[TracingExtra] = None,
chat_name: str = 'ChatAnthropic',
completions_name: str = 'Anthropic'
) -> CExample:
import anthropic
from langsmith import wrappers
client = wrappers.wrap_anthropic(anthropic.Anthropic())
# Use Anthropic client same as you normally would:
system = "You are a helpful assistant."
messages = [
{
"role": "user",
"content": "What physics breakthroughs do you predict will happen by 2300?",
}
]
completion = client.messages.create(
model="claude-3-5-sonnet-latest",
messages=messages,
max_tokens=1000,
system=system,
)
print(completion.content)
# With raw response to access headers:
raw_response = client.messages.with_raw_response.create(
model="claude-3-5-sonnet-latest",
messages=messages,
max_tokens=1000,
system=system,
)
print(raw_response.headers) # Access HTTP headers
message = raw_response.parse() # Get parsed response
# You can also use the streaming context manager:
with client.messages.stream(
model="claude-3-5-sonnet-latest",
messages=messages,
max_tokens=1000,
system=system,
) as stream:
for text in stream.text_stream:
print(text, end="", flush=True)
message = stream.get_final_message()| Name | Type | Description |
|---|---|---|
client* | C | The client to patch. |
tracing_extra | Optional[TracingExtra] | Default: NoneExtra tracing information. |
chat_name | str | Default: 'ChatAnthropic'The run name for the messages endpoint. |
completions_name | str | Default: 'Anthropic'The run name for the completions endpoint. |