class ChatDeepSeekChatOpenAICompletions<ChatDeepSeekCallOptions>Deepseek chat model integration.
The Deepseek API is compatible to the OpenAI API with some limitations.
Setup:
Install @langchain/deepseek and set an environment variable named DEEPSEEK_API_KEY.
npm install @langchain/deepseek
export DEEPSEEK_API_KEY="your-api-key"
Runtime args can be passed as the second argument to any of the base runnable methods .invoke. .stream, .batch, etc.
They can also be passed via .withConfig, or the second arg in .bindTools, like shown in the examples below:
// When calling `.withConfig`, call options should be passed via the first argument
const llmWithArgsBound = llm.withConfig({
stop: ["\n"],
tools: [...],
});
// When calling `.bindTools`, call options should be passed via the second argument
const llmWithTools = llm.bindTools(
[...],
{
tool_choice: "auto",
}
);
import { ChatDeepSeek } from '@langchain/deepseek';
const llm = new ChatDeepSeek({
model: "deepseek-reasoner",
temperature: 0,
// other params...
});
const input = `Translate "I love programming" into French.`;
// Models also accept a list of chat messages or a formatted prompt
const result = await llm.invoke(input);
console.log(result);
AIMessage {
"content": "The French translation of \"I love programming\" is \"J'aime programmer\". In this sentence, \"J'aime\" is the first person singular conjugation of the French verb \"aimer\" which means \"to love\", and \"programmer\" is the French infinitive for \"to program\". I hope this helps! Let me know if you have any other questions.",
"additional_kwargs": {
"reasoning_content": "...",
},
"response_metadata": {
"tokenUsage": {
"completionTokens": 82,
"promptTokens": 20,
"totalTokens": 102
},
"finish_reason": "stop"
},
"tool_calls": [],
"invalid_tool_calls": []
}
for await (const chunk of await llm.stream(input)) {
console.log(chunk);
}
AIMessageChunk {
"content": "",
"additional_kwargs": {
"reasoning_content": "...",
},
"response_metadata": {
"finishReason": null
},
"tool_calls": [],
"tool_call_chunks": [],
"invalid_tool_calls": []
}
AIMessageChunk {
"content": "The",
"additional_kwargs": {
"reasoning_content": "...",
},
"response_metadata": {
"finishReason": null
},
"tool_calls": [],
"tool_call_chunks": [],
"invalid_tool_calls": []
}
AIMessageChunk {
"content": " French",
"additional_kwargs": {
"reasoning_content": "...",
},
"response_metadata": {
"finishReason": null
},
"tool_calls": [],
"tool_call_chunks": [],
"invalid_tool_calls": []
}
AIMessageChunk {
"content": " translation",
"additional_kwargs": {
"reasoning_content": "...",
},
"response_metadata": {
"finishReason": null
},
"tool_calls": [],
"tool_call_chunks": [],
"invalid_tool_calls": []
}
AIMessageChunk {
"content": " of",
"additional_kwargs": {
"reasoning_content": "...",
},
"response_metadata": {
"finishReason": null
},
"tool_calls": [],
"tool_call_chunks": [],
"invalid_tool_calls": []
}
AIMessageChunk {
"content": " \"",
"additional_kwargs": {
"reasoning_content": "...",
},
"response_metadata": {
"finishReason": null
},
"tool_calls": [],
"tool_call_chunks": [],
"invalid_tool_calls": []
}
AIMessageChunk {
"content": "I",
"additional_kwargs": {
"reasoning_content": "...",
},
"response_metadata": {
"finishReason": null
},
"tool_calls": [],
"tool_call_chunks": [],
"invalid_tool_calls": []
}
AIMessageChunk {
"content": " love",
"additional_kwargs": {
"reasoning_content": "...",
},
"response_metadata": {
"finishReason": null
},
"tool_calls": [],
"tool_call_chunks": [],
"invalid_tool_calls": []
}
...
AIMessageChunk {
"content": ".",
"additional_kwargs": {
"reasoning_content": "...",
},
"response_metadata": {
"finishReason": null
},
"tool_calls": [],
"tool_call_chunks": [],
"invalid_tool_calls": []
}
AIMessageChunk {
"content": "",
"additional_kwargs": {
"reasoning_content": "...",
},
"response_metadata": {
"finishReason": "stop"
},
"tool_calls": [],
"tool_call_chunks": [],
"invalid_tool_calls": []
}
import { AIMessageChunk } from '@langchain/core/messages';
import { concat } from '@langchain/core/utils/stream';
const stream = await llm.stream(input);
let full: AIMessageChunk | undefined;
for await (const chunk of stream) {
full = !full ? chunk : concat(full, chunk);
}
console.log(full);
AIMessageChunk {
"content": "The French translation of \"I love programming\" is \"J'aime programmer\". In this sentence, \"J'aime\" is the first person singular conjugation of the French verb \"aimer\" which means \"to love\", and \"programmer\" is the French infinitive for \"to program\". I hope this helps! Let me know if you have any other questions.",
"additional_kwargs": {
"reasoning_content": "...",
},
"response_metadata": {
"finishReason": "stop"
},
"tool_calls": [],
"tool_call_chunks": [],
"invalid_tool_calls": []
}
import { z } from 'zod';
const llmForToolCalling = new ChatDeepSeek({
model: "deepseek-chat",
temperature: 0,
// other params...
});
const GetWeather = {
name: "GetWeather",
description: "Get the current weather in a given location",
schema: z.object({
location: z.string().describe("The city and state, e.g. San Francisco, CA")
}),
}
const GetPopulation = {
name: "GetPopulation",
description: "Get the current population in a given location",
schema: z.object({
location: z.string().describe("The city and state, e.g. San Francisco, CA")
}),
}
const llmWithTools = llmForToolCalling.bindTools([GetWeather, GetPopulation]);
const aiMsg = await llmWithTools.invoke(
"Which city is hotter today and which is bigger: LA or NY?"
);
console.log(aiMsg.tool_calls);
[
{
name: 'GetWeather',
args: { location: 'Los Angeles, CA' },
type: 'tool_call',
id: 'call_cd34'
},
{
name: 'GetWeather',
args: { location: 'New York, NY' },
type: 'tool_call',
id: 'call_68rf'
},
{
name: 'GetPopulation',
args: { location: 'Los Angeles, CA' },
type: 'tool_call',
id: 'call_f81z'
},
{
name: 'GetPopulation',
args: { location: 'New York, NY' },
type: 'tool_call',
id: 'call_8byt'
}
]
import { z } from 'zod';
const Joke = z.object({
setup: z.string().describe("The setup of the joke"),
punchline: z.string().describe("The punchline to the joke"),
rating: z.number().optional().describe("How funny the joke is, from 1 to 10")
}).describe('Joke to tell user.');
const structuredLlm = llmForToolCalling.withStructuredOutput(Joke, { name: "Joke" });
const jokeResult = await structuredLlm.invoke("Tell me a joke about cats");
console.log(jokeResult);
{
setup: "Why don't cats play poker in the wild?",
punchline: 'Because there are too many cheetahs.'
}
Whether to include the raw OpenAI response in the output message's "additional_kwargs" field. Currently in experimental beta.
API key to use when making requests to OpenAI. Defaults to the value of
OPENAI_API_KEY environment variable.
Parameters for audio output. Required when audio output is requested with
modalities: ["audio"].
Learn more.
The async caller should be used by subclasses to make any async calls, which will thus benefit from the concurrency and retry logic.
Penalizes repeated tokens according to frequency
Dictionary used to adjust the probability of specific tokens being generated
Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message.
Maximum number of tokens to generate in the completion. -1 returns as many tokens as possible given the prompt and the model's maximum context size.
Output types that you would like the model to generate for this request. Most models are capable of generating text, which is the default:
["text"]
The gpt-4o-audio-preview model can also be used to
generate audio. To request that
this model generate both text and audio responses, you can use:
["text", "audio"]
Model name to use
Holds any additional parameters that are valid to pass to openai.createCompletion that are not explicitly specified on this class.
Number of completions to generate for each prompt
Penalizes repeated tokens
Used by OpenAI to cache responses for similar requests to optimize your cache hit rates. Learn more.
Used by OpenAI to set cache retention time
Options for reasoning models.
Note that some options, like reasoning summaries, are only available when using the responses API. This option is ignored when not using a reasoning model.
Service tier to use for this request. Can be "auto", "default", or "flex" or "priority". Specifies the service tier for prioritization and latency optimization.
List of stop words to use when generating
Alias for stopSequences
List of stop words to use when generating
Whether to stream the results or not. Enabling disables tokenUsage reporting
Whether or not to include token usage data in streamed chunks.
Whether the model supports the strict argument when passing in tools.
If undefined the strict argument will not be passed to OpenAI.
Sampling temperature to use
Timeout to use when making requests to OpenAI.
An integer between 0 and 5 specifying the number of most likely tokens to return at each token position, each with an associated log probability. logprobs must be set to true if this parameter is used.
Total probability mass of tokens to consider at each step
Unique string identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
Whether to print out response text.
The verbosity of the model's response.
Must be set to true in tenancies with Zero Data Retention. Setting to true will disable
output storage in the Responses API, but this DOES NOT enable Zero Data Retention in your
OpenAI organization or project. This must be configured directly with OpenAI.
See: https://platform.openai.com/docs/guides/your-data https://platform.openai.com/docs/api-reference/responses/create#responses-create-store
Internal method that handles batching and configuration for a runnable It takes a function, input values, and optional configuration, and returns a promise that resolves to the output values.
Create a unique cache key for a specific call to a specific language model.
Default streaming implementation. Subclasses should override this method if they support streaming output.
Helper method to transform an Iterator of Input values into an Iterator of
Output values, with callbacks.
Use this to implement stream() or transform() in Runnable subclasses.
Assigns new fields to the dict output of this runnable. Returns a new runnable.
Convert a runnable to a tool. Return a new instance of RunnableToolLike
which contains the runnable, name, description and schema.
Default implementation of batch, which calls invoke N times. Subclasses should override this method if they can batch more efficiently.
Bind tool-like objects to this chat model.
Generates chat based on the input messages.
Generates a prompt based on the input prompt values.
Get the number of tokens in the content.
Get the identifying parameters for the model
Invokes the chat model with a single input.
Moderate content using OpenAI's Moderation API.
This method checks whether content violates OpenAI's content policy by analyzing text for categories such as hate, harassment, self-harm, sexual content, violence, and more.
Pick keys from the dict output of this runnable. Returns a new runnable.
Create a new runnable sequence that runs each individual runnable in series, piping the output of one runnable into another runnable or runnable-like.
Stream output in chunks.
Stream all output from a runnable, as reported to the callback system. This includes all inner runs of LLMs, Retrievers, Tools, etc. Output is streamed as Log objects, which include a list of jsonpatch ops that describe how the state of the run has changed in each step, and the final state of the run. The jsonpatch ops can be applied in order to construct state.
Default implementation of transform, which buffers input and then calls stream. Subclasses should override this method if they can start producing output while input is still being generated.
Bind config to a Runnable, returning a new Runnable.
Create a new runnable from the current one that will try invoking other passed fallback runnables if the initial invocation fails.
Bind lifecycle listeners to a Runnable, returning a new Runnable. The Run object contains information about the run, including its id, type, input, output, error, startTime, endTime, and any tags or metadata added to the run.
Add retry logic to an existing runnable.
Generate a stream of events emitted by the internal steps of the runnable.
Use to create an iterator over StreamEvents that provide real-time information about the progress of the runnable, including StreamEvents from intermediate results.
A StreamEvent is a dictionary with the following schema:
event: string - Event names are of the format: on_[runnable_type]_(start|stream|end).name: string - The name of the runnable that generated the event.run_id: string - Randomly generated ID associated with the given execution of
the runnable that emitted the event. A child runnable that gets invoked as part of the execution of a
parent runnable is assigned its own unique ID.tags: string[] - The tags of the runnable that generated the event.metadata: Record<string, any> - The metadata of the runnable that generated the event.data: Record<string, any>Below is a table that illustrates some events that might be emitted by various chains. Metadata fields have been omitted from the table for brevity. Chain definitions have been included after the table.
ATTENTION This reference table is for the V2 version of the schema.
+----------------------+-----------------------------+------------------------------------------+
| event | input | output/chunk |
+======================+=============================+==========================================+
| on_chat_model_start | {"messages": BaseMessage[]} | |
+----------------------+-----------------------------+------------------------------------------+
| on_chat_model_stream | | AIMessageChunk("hello") |
+----------------------+-----------------------------+------------------------------------------+
| on_chat_model_end | {"messages": BaseMessage[]} | AIMessageChunk("hello world") |
+----------------------+-----------------------------+------------------------------------------+
| on_llm_start | {'input': 'hello'} | |
+----------------------+-----------------------------+------------------------------------------+
| on_llm_stream | | 'Hello' |
+----------------------+-----------------------------+------------------------------------------+
| on_llm_end | 'Hello human!' | |
+----------------------+-----------------------------+------------------------------------------+
| on_chain_start | | |
+----------------------+-----------------------------+------------------------------------------+
| on_chain_stream | | "hello world!" |
+----------------------+-----------------------------+------------------------------------------+
| on_chain_end | [Document(...)] | "hello world!, goodbye world!" |
+----------------------+-----------------------------+------------------------------------------+
| on_tool_start | {"x": 1, "y": "2"} | |
+----------------------+-----------------------------+------------------------------------------+
| on_tool_end | | {"x": 1, "y": "2"} |
+----------------------+-----------------------------+------------------------------------------+
| on_retriever_start | {"query": "hello"} | |
+----------------------+-----------------------------+------------------------------------------+
| on_retriever_end | {"query": "hello"} | [Document(...), ..] |
+----------------------+-----------------------------+------------------------------------------+
| on_prompt_start | {"question": "hello"} | |
+----------------------+-----------------------------+------------------------------------------+
| on_prompt_end | {"question": "hello"} | ChatPromptValue(messages: BaseMessage[]) |
+----------------------+-----------------------------+------------------------------------------+
The "on_chain_*" events are the default for Runnables that don't fit one of the above categories.
In addition to the standard events above, users can also dispatch custom events.
Custom events will be only be surfaced with in the v2 version of the API!
A custom event has following format:
+-----------+------+------------------------------------------------------------+
| Attribute | Type | Description |
+===========+======+============================================================+
| name | str | A user defined name for the event. |
+-----------+------+------------------------------------------------------------+
| data | Any | The data associated with the event. This can be anything. |
+-----------+------+------------------------------------------------------------+
Here's an example:
import { RunnableLambda } from "@langchain/core/runnables";
import { dispatchCustomEvent } from "@langchain/core/callbacks/dispatch";
// Use this import for web environments that don't support "async_hooks"
// and manually pass config to child runs.
// import { dispatchCustomEvent } from "@langchain/core/callbacks/dispatch/web";
const slowThing = RunnableLambda.from(async (someInput: string) => {
// Placeholder for some slow operation
await new Promise((resolve) => setTimeout(resolve, 100));
await dispatchCustomEvent("progress_event", {
message: "Finished step 1 of 2",
});
await new Promise((resolve) => setTimeout(resolve, 100));
return "Done";
});
const eventStream = await slowThing.streamEvents("hello world", {
version: "v2",
});
for await (const event of eventStream) {
if (event.event === "on_custom_event") {
console.log(event);
}
}