langchain.js
    Preparing search index...

    Interface ClientConfig

    interface ClientConfig {
        anonymizer?: (values: KVMap) => KVMap | Promise<KVMap>;
        apiKey?: string;
        apiUrl?: string;
        autoBatchTracing?: boolean;
        batchSizeBytesLimit?: number;
        batchSizeLimit?: number;
        blockOnRootRunFinalization?: boolean;
        cache?: boolean | PromptCache;
        callerOptions?: AsyncCallerParams;
        debug?: boolean;
        disablePromptCache?: boolean;
        fetchImplementation?: {
            (input: RequestInfo | URL, init?: RequestInit): Promise<Response>;
            (input: string | Request | URL, init?: RequestInit): Promise<Response>;
        };
        fetchOptions?: RequestInit;
        hideInputs?: boolean
        | ((inputs: KVMap) => KVMap | Promise<KVMap>);
        hideOutputs?: boolean | ((outputs: KVMap) => KVMap | Promise<KVMap>);
        manualFlushMode?: boolean;
        maxIngestMemoryBytes?: number;
        omitTracedRuntimeInfo?: boolean;
        timeout_ms?: number;
        traceBatchConcurrency?: number;
        tracingSamplingRate?: number;
        webUrl?: string;
        workspaceId?: string;
    }
    Index

    Properties

    anonymizer?: (values: KVMap) => KVMap | Promise<KVMap>
    apiKey?: string
    apiUrl?: string
    autoBatchTracing?: boolean
    batchSizeBytesLimit?: number

    Maximum size of a batch of runs in bytes.

    batchSizeLimit?: number

    Maximum number of operations to batch in a single request.

    blockOnRootRunFinalization?: boolean
    cache?: boolean | PromptCache

    Use configureGlobalPromptCache() to configure caching, or disablePromptCache: true to disable it. This parameter is deprecated.

    Configuration for caching. Can be:

    • true: Enable caching with default settings (uses global singleton)
    • Cache/PromptCache instance: Use custom cache configuration
    • false: Disable caching (equivalent to disablePromptCache: true)
    import { Client, Cache, configureGlobalPromptCache } from "langsmith";

    // Enable with defaults
    const client1 = new Client({});

    // Or use custom configuration
    import { configureGlobalPromptCache } from "langsmith";
    configureGlobalPromptCache({
    maxSize: 100,
    ttlSeconds: 3600, // 1 hour, or null for infinite TTL
    });
    const client2 = new Client({});

    // Or disable for a specific client
    const client3 = new Client({ disablePromptCache: true });
    callerOptions?: AsyncCallerParams
    debug?: boolean

    Enable debug mode for the client. If set, all sent HTTP requests will be logged.

    disablePromptCache?: boolean

    Disable prompt caching for this client. By default, prompt caching is enabled globally.

    fetchImplementation?: {
        (input: RequestInfo | URL, init?: RequestInit): Promise<Response>;
        (input: string | Request | URL, init?: RequestInit): Promise<Response>;
    }

    Custom fetch implementation. Useful for testing.

    Type Declaration

      • (input: RequestInfo | URL, init?: RequestInit): Promise<Response>
      • Parameters

        • input: RequestInfo | URL
        • Optionalinit: RequestInit

        Returns Promise<Response>

      • (input: string | Request | URL, init?: RequestInit): Promise<Response>
      • Parameters

        • input: string | Request | URL
        • Optionalinit: RequestInit

        Returns Promise<Response>

    fetchOptions?: RequestInit
    hideInputs?: boolean | ((inputs: KVMap) => KVMap | Promise<KVMap>)
    hideOutputs?: boolean | ((outputs: KVMap) => KVMap | Promise<KVMap>)
    manualFlushMode?: boolean

    Whether to require manual .flush() calls before sending traces. Useful if encountering network rate limits at trace high volumes.

    maxIngestMemoryBytes?: number

    Maximum total memory (in bytes) for both the AutoBatchQueue and batchIngestCaller queue. When exceeded, runs/batches are dropped. Defaults to 1GB.

    omitTracedRuntimeInfo?: boolean

    Whether to omit runtime information from traced runs. If true, runtime information (SDK version, platform, etc.) and LangChain environment variable metadata will not be stored in runs. Defaults to false.

    timeout_ms?: number
    traceBatchConcurrency?: number
    tracingSamplingRate?: number
    webUrl?: string
    workspaceId?: string

    The workspace ID. Required for org-scoped API keys.