langchain.js
    Preparing search index...

    AWS Bedrock chat model integration.

    Setup: Install @langchain/community and set the following environment variables:

    npm install @langchain/openai
    export AWS_REGION="your-aws-region"
    export AWS_SECRET_ACCESS_KEY="your-aws-secret-access-key"
    export AWS_ACCESS_KEY_ID="your-aws-access-key-id"

    Runtime args can be passed as the second argument to any of the base runnable methods .invoke. .stream, .batch, etc. They can also be passed via .withConfig, or the second arg in .bindTools, like shown in the examples below:

    // When calling `.withConfig`, call options should be passed via the first argument
    const llmWithArgsBound = llm.withConfig({
    stop: ["\n"],
    tools: [...],
    });

    // When calling `.bindTools`, call options should be passed via the second argument
    const llmWithTools = llm.bindTools(
    [...],
    {
    stop: ["stop on this token!"],
    }
    );
    Instantiate
    import { BedrockChat } from '@langchain/community/chat_models/bedrock';

    const llm = new BedrockChat({
    region: process.env.BEDROCK_AWS_REGION,
    maxRetries: 0,
    model: "anthropic.claude-3-5-sonnet-20240620-v1:0",
    temperature: 0,
    maxTokens: undefined,
    // other params...
    });

    // You can also pass credentials in explicitly:
    const llmWithCredentials = new BedrockChat({
    region: process.env.BEDROCK_AWS_REGION,
    model: "anthropic.claude-3-5-sonnet-20240620-v1:0",
    credentials: {
    secretAccessKey: process.env.BEDROCK_AWS_SECRET_ACCESS_KEY!,
    accessKeyId: process.env.BEDROCK_AWS_ACCESS_KEY_ID!,
    },
    });

    Invoking
    const messages = [
    {
    type: "system" as const,
    content: "You are a helpful translator. Translate the user sentence to French.",
    },
    {
    type: "human" as const,
    content: "I love programming.",
    },
    ];
    const result = await llm.invoke(messages);
    console.log(result);
    AIMessage {
      "content": "Here's the translation to French:\n\nJ'adore la programmation.",
      "additional_kwargs": {
        "id": "msg_bdrk_01HCZHa2mKbMZeTeHjLDd286"
      },
      "response_metadata": {
        "type": "message",
        "role": "assistant",
        "model": "claude-3-5-sonnet-20240620",
        "stop_reason": "end_turn",
        "stop_sequence": null,
        "usage": {
          "input_tokens": 25,
          "output_tokens": 19
        }
      },
      "tool_calls": [],
      "invalid_tool_calls": []
    }
    

    Streaming Chunks
    for await (const chunk of await llm.stream(messages)) {
    console.log(chunk);
    }
    AIMessageChunk {
      "content": "",
      "additional_kwargs": {
        "id": "msg_bdrk_01RhFuGR9uJ2bj5GbdAma4y6"
      },
      "response_metadata": {
        "type": "message",
        "role": "assistant",
        "model": "claude-3-5-sonnet-20240620",
        "stop_reason": null,
        "stop_sequence": null
      },
    }
    AIMessageChunk {
      "content": "J",
    }
    AIMessageChunk {
      "content": "'adore la",
    }
    AIMessageChunk {
      "content": " programmation.",
    }
    AIMessageChunk {
      "content": "",
      "additional_kwargs": {
        "stop_reason": "end_turn",
        "stop_sequence": null
      },
    }
    AIMessageChunk {
      "content": "",
      "response_metadata": {
        "amazon-bedrock-invocationMetrics": {
          "inputTokenCount": 25,
          "outputTokenCount": 11,
          "invocationLatency": 659,
          "firstByteLatency": 506
        }
      },
      "usage_metadata": {
        "input_tokens": 25,
        "output_tokens": 11,
        "total_tokens": 36
      }
    }
    

    Aggregate Streamed Chunks
    import { AIMessageChunk } from '@langchain/core/messages';
    import { concat } from '@langchain/core/utils/stream';

    const stream = await llm.stream(messages);
    let full: AIMessageChunk | undefined;
    for await (const chunk of stream) {
    full = !full ? chunk : concat(full, chunk);
    }
    console.log(full);
    AIMessageChunk {
      "content": "J'adore la programmation.",
      "additional_kwargs": {
        "id": "msg_bdrk_017b6PuBybA51P5LZ9K6gZHm",
        "stop_reason": "end_turn",
        "stop_sequence": null
      },
      "response_metadata": {
        "type": "message",
        "role": "assistant",
        "model": "claude-3-5-sonnet-20240620",
        "stop_reason": null,
        "stop_sequence": null,
        "amazon-bedrock-invocationMetrics": {
          "inputTokenCount": 25,
          "outputTokenCount": 11,
          "invocationLatency": 1181,
          "firstByteLatency": 1177
        }
      },
      "usage_metadata": {
        "input_tokens": 25,
        "output_tokens": 11,
        "total_tokens": 36
      }
    }
    

    Bind tools
    import { z } from 'zod';
    import { AIMessage } from '@langchain/core/messages';

    const GetWeather = {
    name: "GetWeather",
    description: "Get the current weather in a given location",
    schema: z.object({
    location: z.string().describe("The city and state, e.g. San Francisco, CA")
    }),
    }

    const GetPopulation = {
    name: "GetPopulation",
    description: "Get the current population in a given location",
    schema: z.object({
    location: z.string().describe("The city and state, e.g. San Francisco, CA")
    }),
    }

    const llmWithTools = llm.bindTools([GetWeather, GetPopulation]);
    const aiMsg: AIMessage = await llmWithTools.invoke(
    "Which city is hotter today and which is bigger: LA or NY?"
    );
    console.log(aiMsg.tool_calls);
    [
      {
        name: 'GetWeather',
        args: { location: 'Los Angeles, CA' },
        id: 'toolu_bdrk_01R2daqwHR931r4baVNzbe38',
        type: 'tool_call'
      },
      {
        name: 'GetWeather',
        args: { location: 'New York, NY' },
        id: 'toolu_bdrk_01WDadwNc7PGqVZvCN7Dr7eD',
        type: 'tool_call'
      },
      {
        name: 'GetPopulation',
        args: { location: 'Los Angeles, CA' },
        id: 'toolu_bdrk_014b8zLkpAgpxrPfewKinJFc',
        type: 'tool_call'
      },
      {
        name: 'GetPopulation',
        args: { location: 'New York, NY' },
        id: 'toolu_bdrk_01Tt8K2MUP15kNuMDFCLEFKN',
        type: 'tool_call'
      }
    ]
    

    Structured Output
    const Joke = z.object({
    setup: z.string().describe("The setup of the joke"),
    punchline: z.string().describe("The punchline to the joke"),
    rating: z.number().optional().describe("How funny the joke is, from 1 to 10")
    }).describe('Joke to tell user.');

    const structuredLlm = llm.withStructuredOutput(Joke);
    const jokeResult = await structuredLlm.invoke("Tell me a joke about cats");
    console.log(jokeResult);
    {
      setup: "Why don't cats play poker in the jungle?",
      punchline: 'Too many cheetahs!'
    }
    

    Response Metadata
    const aiMsgForResponseMetadata = await llm.invoke(messages);
    console.log(aiMsgForResponseMetadata.response_metadata);
    "response_metadata": {
      "type": "message",
      "role": "assistant",
      "model": "claude-3-5-sonnet-20240620",
      "stop_reason": "end_turn",
      "stop_sequence": null,
      "usage": {
        "input_tokens": 25,
        "output_tokens": 19
      }
    }
    

    Hierarchy (View Summary)

    Index

    Constructors

    Properties

    codec: EventStreamCodec = ...
    credentials: CredentialType

    AWS Credentials. If no credentials are provided, the default credentials from @aws-sdk/credential-provider-node will be used.

    endpointHost?: string

    Override the default endpoint hostname.

    fetchFn: {
        (input: URL | RequestInfo, init?: RequestInit): Promise<Response>;
        (input: string | URL | Request, init?: RequestInit): Promise<Response>;
    }

    A custom fetch function for low-level access to AWS API. Defaults to fetch().

    Type Declaration

      • (input: URL | RequestInfo, init?: RequestInit): Promise<Response>
      • Parameters

        • input: URL | RequestInfo
        • Optionalinit: RequestInit

        Returns Promise<Response>

      • (input: string | URL | Request, init?: RequestInit): Promise<Response>
      • Parameters

        • input: string | URL | Request
        • Optionalinit: RequestInit

        Returns Promise<Response>

    guardrailConfig?: {
        streamProcessingMode: "SYNCHRONOUS" | "ASYNCHRONOUS";
        tagSuffix: string;
    }

    Required when Guardrail is in use.

    guardrailIdentifier: string = ""

    Identifier for the guardrail configuration.

    guardrailVersion: string = ""

    Version for the guardrail configuration.

    lc_serializable: boolean = true
    maxTokens?: number = undefined

    Max tokens.

    model: string = "amazon.titan-tg1-large"

    Model to use. For example, "amazon.titan-tg1-large", this is equivalent to the modelId property in the list-foundation-models api.

    modelKwargs?: Record<string, unknown>

    Additional kwargs to pass to the model.

    modelProvider: string
    region: string

    The AWS region e.g. us-west-2. Fallback to AWS_DEFAULT_REGION env variable or region specified in ~/.aws/config in case it is not provided here.

    stopSequences?: string[]

    Use as a call option using .withConfig() instead.

    streaming: boolean = false

    Whether or not to stream responses

    temperature?: number = undefined

    Temperature.

    trace?: "ENABLED" | "DISABLED"

    Trace settings for the Bedrock Guardrails.

    usesMessagesApi: boolean = false

    Accessors

    • get lc_aliases(): Record<string, string>

      Returns Record<string, string>

    • get lc_attributes(): undefined | SerializedFields

      Returns undefined | SerializedFields

    • get lc_secrets(): undefined | { [key: string]: string }

      Returns undefined | { [key: string]: string }

    Methods

    • Returns {}

    • Parameters

      • messages: BaseMessage[]
      • options: Partial<this["ParsedCallOptions"]>
      • OptionalrunManager: any

      Returns Promise<ChatResult>

    • Parameters

      • messages: BaseMessage[]
      • options: Partial<this["ParsedCallOptions"]>
      • Optional_runManager: any

      Returns Promise<ChatResult>

    • Returns Record<string, string>

    • Returns string

    • Parameters

      • reader: any

      Returns { "[asyncIterator]"(): AsyncGenerator<Uint8Array<ArrayBuffer>, void, unknown> }

    • Parameters

      • messages: BaseMessage[]
      • options: unknown
      • fields: {
            bedrockMethod: "invoke" | "invoke-with-response-stream";
            endpointHost: string;
            provider: string;
        }

      Returns Promise<any>

    • Parameters

      • messages: BaseMessage[]
      • options: unknown
      • OptionalrunManager: any

      Returns AsyncGenerator<ChatGenerationChunk>

    • Parameters

      • tools: any[]
      • Optional_kwargs: Partial<unknown>

      Returns Runnable<BaseLanguageModelInput, BaseMessageChunk, this["ParsedCallOptions"]>

    • Parameters

      • options: unknown

      Returns LangSmithParams

    • Parameters

      • Optionaloptions: unknown

      Returns {
          guardrailConfig:
              | undefined
              | {
                  streamProcessingMode: "SYNCHRONOUS"
                  | "ASYNCHRONOUS";
                  tagSuffix: string;
              };
          max_tokens: undefined
          | number;
          modelKwargs: undefined | Record<string, unknown>;
          stop: any;
          temperature: undefined | number;
          tools: undefined | AnthropicTool[];
      }

    • Returns string