langchain.js
    Preparing search index...

    Type Alias WrapAISDKConfig<T>

    WrapAISDKConfig: Partial<
        Omit<
            RunTreeConfig,
            | "inputs"
            | "outputs"
            | "run_type"
            | "child_runs"
            | "parent_run"
            | "error"
            | "serialized",
        >,
    > & {
        processChildLLMRunInputs?: (
            inputs: LanguageModelV2CallOptions,
        ) => Record<string, unknown>;
        processChildLLMRunOutputs?: (
            outputs: "fullStream" extends keyof Awaited<ReturnType<T>>
                ? AggregatedDoStreamOutput
                : Awaited<ReturnType<LanguageModelV2["doGenerate"]>>,
        ) => Record<string, unknown>;
        processInputs?: (inputs: Parameters<T>[0]) => Record<string, unknown>;
        processOutputs?: (
            outputs: Awaited<ReturnType<T>>,
        ) => Record<string, unknown>;
    }

    Type Parameters

    • T extends (...args: any[]) => any = (...args: any[]) => any

    Type Declaration

    • OptionalprocessChildLLMRunInputs?: (inputs: LanguageModelV2CallOptions) => Record<string, unknown>

      Apply transformations to AI SDK child LLM run inputs before logging. This function should NOT mutate the inputs. Receives both "raw" and LangSmith-suggested "formatted" inputs, and should combine them into a single LangSmith-formatted input.

      import {
      wrapAISDK,
      createLangSmithProviderOptions,
      } from "langsmith/experimental/vercel";
      import * as ai from "ai";
      import { openai } from "@ai-sdk/openai";

      const { generateText } = wrapAISDK(ai);

      const lsConfig = createLangSmithProviderOptions<typeof ai.generateText>({
      processChildLLMRunInputs: (inputs) => {
      const { prompt } = inputs;
      return {
      messages: prompt.map((message) => ({
      ...message,
      content: "REDACTED CHILD INPUTS",
      })),
      };
      },
      });
      const { text } = await generateText({
      model: openai("gpt-5-nano"),
      prompt: "What is the capital of France?",
      providerOptions: {
      langsmith: lsConfig,
      },
      });
    • OptionalprocessChildLLMRunOutputs?: (
          outputs: "fullStream" extends keyof Awaited<ReturnType<T>>
              ? AggregatedDoStreamOutput
              : Awaited<ReturnType<LanguageModelV2["doGenerate"]>>,
      ) => Record<string, unknown>

      Apply transformations to AI SDK child LLM run outputs before logging. This function should NOT mutate the outputs. Receives both "raw" and LangSmith-suggested "formatted" outputs, and should combine them into a single LangSmith-formatted output.

      import {
      wrapAISDK,
      createLangSmithProviderOptions,
      } from "langsmith/experimental/vercel";
      import * as ai from "ai";
      import { openai } from "@ai-sdk/openai";

      const { generateText } = wrapAISDK(ai);

      const lsConfig = createLangSmithProviderOptions<typeof ai.generateText>({
      processChildLLMRunOutputs: (outputs) => {
      return {
      providerMetadata: outputs.providerMetadata,
      content: "REDACTED CHILD OUTPUTS",
      role: "assistant",
      };
      },
      });
      const { text } = await generateText({
      model: openai("gpt-5-nano"),
      prompt: "What is the capital of France?",
      providerOptions: {
      langsmith: lsConfig,
      },
      });
    • OptionalprocessInputs?: (inputs: Parameters<T>[0]) => Record<string, unknown>

      Apply transformations to AI SDK inputs before logging. This function should NOT mutate the inputs. Receives both "raw" and LangSmith-suggested "formatted" inputs, and should combine them into a single LangSmith-formatted input.

      import {
      wrapAISDK,
      createLangSmithProviderOptions,
      } from "langsmith/experimental/vercel";
      import * as ai from "ai";
      import { openai } from "@ai-sdk/openai";

      const { generateText } = wrapAISDK(ai);

      const lsConfig = createLangSmithProviderOptions<typeof ai.generateText>({
      processInputs: (inputs) => {
      const { messages } = inputs;
      return {
      messages: messages?.map((message) => ({
      providerMetadata: message.providerOptions,
      role: "assistant",
      content: "REDACTED",
      })),
      prompt: "REDACTED",
      };
      },
      });
      const { text } = await generateText({
      model: openai("gpt-5-nano"),
      prompt: "What is the capital of France?",
      providerOptions: {
      langsmith: lsConfig,
      },
      });

      This function is not inherited by nested LLM runs or tool calls. Pass processChildLLMRunInputs to override child LLM run input processing or wrap your tool's execute method in a separate traceable for tool calls.

    • OptionalprocessOutputs?: (outputs: Awaited<ReturnType<T>>) => Record<string, unknown>

      Apply transformations to AI SDK outputs before logging. This function should NOT mutate the outputs. Receives both "raw" and LangSmith-suggested "formatted" outputs, and should combine them into a single LangSmith-formatted output.

      import {
      wrapAISDK,
      createLangSmithProviderOptions,
      } from "langsmith/experimental/vercel";
      import * as ai from "ai";
      import { openai } from "@ai-sdk/openai";

      const { generateText } = wrapAISDK(ai);

      const lsConfig = createLangSmithProviderOptions<typeof ai.generateText>({
      processOutputs: (outputs) => {
      return {
      providerMetadata: outputs.providerMetadata,
      role: "assistant",
      content: "REDACTED",
      };
      },
      });
      const { text } = await generateText({
      model: openai("gpt-5-nano"),
      prompt: "What is the capital of France?",
      providerOptions: {
      langsmith: lsConfig,
      },
      });

      This function is not inherited by nested LLM runs or tool calls. Pass processChildLLMRunOutputs to override child LLM run output processing or wrap your tool's execute method in a separate traceable for tool calls.