langchain.js
    Preparing search index...

    xAI chat model integration.

    The xAI API is compatible to the OpenAI API with some limitations.

    Setup: Install @langchain/xai and set an environment variable named XAI_API_KEY.

    npm install @langchain/xai
    export XAI_API_KEY="your-api-key"

    Runtime args can be passed as the second argument to any of the base runnable methods .invoke. .stream, .batch, etc. They can also be passed via .withConfig, or the second arg in .bindTools, like shown in the examples below:

    // When calling `.withConfig`, call options should be passed via the first argument
    const llmWithArgsBound = llm.withConfig({
    stop: ["\n"],
    tools: [...],
    });

    // When calling `.bindTools`, call options should be passed via the second argument
    const llmWithTools = llm.bindTools(
    [...],
    {
    tool_choice: "auto",
    }
    );
    Instantiate
    import { ChatXAI } from '@langchain/xai';

    const llm = new ChatXAI({
    model: "grok-beta",
    temperature: 0,
    // other params...
    });

    Invoking
    const input = `Translate "I love programming" into French.`;

    // Models also accept a list of chat messages or a formatted prompt
    const result = await llm.invoke(input);
    console.log(result);
    AIMessage {
      "content": "The French translation of \"I love programming\" is \"J'aime programmer\". In this sentence, \"J'aime\" is the first person singular conjugation of the French verb \"aimer\" which means \"to love\", and \"programmer\" is the French infinitive for \"to program\". I hope this helps! Let me know if you have any other questions.",
      "additional_kwargs": {},
      "response_metadata": {
        "tokenUsage": {
          "completionTokens": 82,
          "promptTokens": 20,
          "totalTokens": 102
        },
        "finish_reason": "stop"
      },
      "tool_calls": [],
      "invalid_tool_calls": []
    }
    

    Streaming Chunks
    for await (const chunk of await llm.stream(input)) {
    console.log(chunk);
    }
    AIMessageChunk {
      "content": "",
      "additional_kwargs": {},
      "response_metadata": {
        "finishReason": null
      },
      "tool_calls": [],
      "tool_call_chunks": [],
      "invalid_tool_calls": []
    }
    AIMessageChunk {
      "content": "The",
      "additional_kwargs": {},
      "response_metadata": {
        "finishReason": null
      },
      "tool_calls": [],
      "tool_call_chunks": [],
      "invalid_tool_calls": []
    }
    AIMessageChunk {
      "content": " French",
      "additional_kwargs": {},
      "response_metadata": {
        "finishReason": null
      },
      "tool_calls": [],
      "tool_call_chunks": [],
      "invalid_tool_calls": []
    }
    AIMessageChunk {
      "content": " translation",
      "additional_kwargs": {},
      "response_metadata": {
        "finishReason": null
      },
      "tool_calls": [],
      "tool_call_chunks": [],
      "invalid_tool_calls": []
    }
    AIMessageChunk {
      "content": " of",
      "additional_kwargs": {},
      "response_metadata": {
        "finishReason": null
      },
      "tool_calls": [],
      "tool_call_chunks": [],
      "invalid_tool_calls": []
    }
    AIMessageChunk {
      "content": " \"",
      "additional_kwargs": {},
      "response_metadata": {
        "finishReason": null
      },
      "tool_calls": [],
      "tool_call_chunks": [],
      "invalid_tool_calls": []
    }
    AIMessageChunk {
      "content": "I",
      "additional_kwargs": {},
      "response_metadata": {
        "finishReason": null
      },
      "tool_calls": [],
      "tool_call_chunks": [],
      "invalid_tool_calls": []
    }
    AIMessageChunk {
      "content": " love",
      "additional_kwargs": {},
      "response_metadata": {
        "finishReason": null
      },
      "tool_calls": [],
      "tool_call_chunks": [],
      "invalid_tool_calls": []
    }
    ...
    AIMessageChunk {
      "content": ".",
      "additional_kwargs": {},
      "response_metadata": {
        "finishReason": null
      },
      "tool_calls": [],
      "tool_call_chunks": [],
      "invalid_tool_calls": []
    }
    AIMessageChunk {
      "content": "",
      "additional_kwargs": {},
      "response_metadata": {
        "finishReason": "stop"
      },
      "tool_calls": [],
      "tool_call_chunks": [],
      "invalid_tool_calls": []
    }
    

    Aggregate Streamed Chunks
    import { AIMessageChunk } from '@langchain/core/messages';
    import { concat } from '@langchain/core/utils/stream';

    const stream = await llm.stream(input);
    let full: AIMessageChunk | undefined;
    for await (const chunk of stream) {
    full = !full ? chunk : concat(full, chunk);
    }
    console.log(full);
    AIMessageChunk {
      "content": "The French translation of \"I love programming\" is \"J'aime programmer\". In this sentence, \"J'aime\" is the first person singular conjugation of the French verb \"aimer\" which means \"to love\", and \"programmer\" is the French infinitive for \"to program\". I hope this helps! Let me know if you have any other questions.",
      "additional_kwargs": {},
      "response_metadata": {
        "finishReason": "stop"
      },
      "tool_calls": [],
      "tool_call_chunks": [],
      "invalid_tool_calls": []
    }
    

    Bind tools
    import { z } from 'zod';

    const llmForToolCalling = new ChatXAI({
    model: "grok-beta",
    temperature: 0,
    // other params...
    });

    const GetWeather = {
    name: "GetWeather",
    description: "Get the current weather in a given location",
    schema: z.object({
    location: z.string().describe("The city and state, e.g. San Francisco, CA")
    }),
    }

    const GetPopulation = {
    name: "GetPopulation",
    description: "Get the current population in a given location",
    schema: z.object({
    location: z.string().describe("The city and state, e.g. San Francisco, CA")
    }),
    }

    const llmWithTools = llmForToolCalling.bindTools([GetWeather, GetPopulation]);
    const aiMsg = await llmWithTools.invoke(
    "Which city is hotter today and which is bigger: LA or NY?"
    );
    console.log(aiMsg.tool_calls);
    [
      {
        name: 'GetWeather',
        args: { location: 'Los Angeles, CA' },
        type: 'tool_call',
        id: 'call_cd34'
      },
      {
        name: 'GetWeather',
        args: { location: 'New York, NY' },
        type: 'tool_call',
        id: 'call_68rf'
      },
      {
        name: 'GetPopulation',
        args: { location: 'Los Angeles, CA' },
        type: 'tool_call',
        id: 'call_f81z'
      },
      {
        name: 'GetPopulation',
        args: { location: 'New York, NY' },
        type: 'tool_call',
        id: 'call_8byt'
      }
    ]
    

    Structured Output
    import { z } from 'zod';

    const Joke = z.object({
    setup: z.string().describe("The setup of the joke"),
    punchline: z.string().describe("The punchline to the joke"),
    rating: z.number().optional().describe("How funny the joke is, from 1 to 10")
    }).describe('Joke to tell user.');

    const structuredLlm = llmForToolCalling.withStructuredOutput(Joke, { name: "Joke" });
    const jokeResult = await structuredLlm.invoke("Tell me a joke about cats");
    console.log(jokeResult);
    {
      setup: "Why don't cats play poker in the wild?",
      punchline: 'Because there are too many cheetahs.'
    }
    

    Server Tool Calling (Live Search)

    xAI supports server-side tools that are executed by the API rather than requiring client-side execution. The live_search tool enables the model to search the web for real-time information.

    // Method 1: Using the built-in live_search tool
    const llm = new ChatXAI({
    model: "grok-beta",
    temperature: 0,
    });

    const llmWithSearch = llm.bindTools([{ type: "live_search" }]);
    const result = await llmWithSearch.invoke("What happened in tech news today?");
    console.log(result.content);
    // The model will search the web and include real-time information in its response
    // Method 2: Using searchParameters for more control
    const llm = new ChatXAI({
    model: "grok-beta",
    searchParameters: {
    mode: "auto", // "auto" | "on" | "off"
    max_search_results: 5,
    from_date: "2024-01-01", // ISO date string
    return_citations: true,
    }
    });

    const result = await llm.invoke("What are the latest AI developments?");
    // Method 3: Override search parameters per request
    const result = await llm.invoke("Find recent news about SpaceX", {
    searchParameters: {
    mode: "on",
    max_search_results: 10,
    sources: [
    { type: "web", allowed_websites: ["spacex.com", "nasa.gov"] },
    ],
    }
    });

    Hierarchy (View Summary)

    Index

    Constructors

    • Parameters

      Returns ChatXAI

    Properties

    lc_namespace: string[] = ...
    lc_serializable: boolean = true
    searchParameters?: XAISearchParameters

    Default search parameters for the Live Search API.

    Accessors

    • get lc_secrets(): undefined | { [key: string]: string }

      Returns undefined | { [key: string]: string }

    • get profile(): ModelProfile

      Return profiling information for the model.

      Provides information about the model's capabilities and constraints, including token limits, multimodal support, and advanced features like tool calling and structured output.

      Returns ModelProfile

      An object describing the model's capabilities and constraints

      const model = new ChatXAI({ model: "grok-beta" });
      const profile = model.profile;
      console.log(profile.maxInputTokens); // 128000
      console.log(profile.imageInputs); // true

    Methods

    • Parameters

      • delta: Record<string, any>
      • rawResponse: ChatCompletionChunk
      • OptionaldefaultRole: "function" | "user" | "system" | "developer" | "assistant" | "tool"

      Returns AIMessageChunk

    • Parameters

      • message: any
      • rawResponse: ChatCompletion

      Returns AIMessageChunk

    • Get the effective search parameters, merging defaults with call options.

      Parameters

      • Optionaloptions: unknown

        Call options that may contain search parameters

      Returns undefined | XAISearchParameters

      Merged search parameters or undefined if none are configured

    • Check if any built-in tools (like live_search) are in the tools list.

      Parameters

      • Optionaltools: any[]

        List of tools to check

      Returns boolean

      true if any built-in tools are present

    • Returns string

    • Parameters

      • request: ChatCompletionCreateParamsStreaming
      • Optionaloptions: any

      Returns Promise<AsyncIterable<ChatCompletionChunk, any, any>>

    • Parameters

      • request: ChatCompletionCreateParamsNonStreaming
      • Optionaloptions: any

      Returns Promise<ChatCompletion>

    • Formats tools to xAI/OpenAI format, preserving provider-specific definitions.

      Parameters

      • tools: any[]

        The tools to format

      Returns undefined | any[]

      The formatted tools

    • Parameters

      • options: unknown

      Returns LangSmithParams

    • Returns Serialized

    • Type Parameters

      • RunOutput extends Record<string, any> = Record<string, any>

      Parameters

      • outputSchema: any
      • Optionalconfig: any

      Returns Runnable<BaseLanguageModelInput, RunOutput>

    • Type Parameters

      • RunOutput extends Record<string, any> = Record<string, any>

      Parameters

      • outputSchema: any
      • Optionalconfig: any

      Returns Runnable<BaseLanguageModelInput, { parsed: RunOutput; raw: BaseMessage }>

    • Type Parameters

      • RunOutput extends Record<string, any> = Record<string, any>

      Parameters

      • outputSchema: any
      • Optionalconfig: any

      Returns any

    • Returns string