langchain.js
    Preparing search index...

    Interface ChatOllamaInput

    Input to chat model class.

    interface ChatOllamaInput {
        baseUrl?: string;
        checkOrPullModel?: boolean;
        embeddingOnly?: boolean;
        f16Kv?: boolean;
        fetch?: {
            (input: RequestInfo | URL, init?: RequestInit): Promise<Response>;
            (input: string | Request | URL, init?: RequestInit): Promise<Response>;
        };
        format?: string
        | Record<string, any>;
        frequencyPenalty?: number;
        headers?: Headers | Record<string, string>;
        keepAlive?: string | number;
        logitsAll?: boolean;
        lowVram?: boolean;
        mainGpu?: number;
        mirostat?: number;
        mirostatEta?: number;
        mirostatTau?: number;
        model?: string;
        numa?: boolean;
        numBatch?: number;
        numCtx?: number;
        numGpu?: number;
        numKeep?: number;
        numPredict?: number;
        numThread?: number;
        penalizeNewline?: boolean;
        presencePenalty?: number;
        repeatLastN?: number;
        repeatPenalty?: number;
        seed?: number;
        stop?: string[];
        streaming?: boolean;
        temperature?: number;
        tfsZ?: number;
        topK?: number;
        topP?: number;
        typicalP?: number;
        useMlock?: boolean;
        useMmap?: boolean;
        vocabOnly?: boolean;
    }

    Hierarchy (View Summary)

    Implemented by

    Index

    Properties

    baseUrl?: string

    The host URL of the Ollama server.

    "http://127.0.0.1:11434"
    
    checkOrPullModel?: boolean

    Whether or not to check the model exists on the local machine before invoking it. If set to true, the model will be pulled if it does not exist.

    false
    
    embeddingOnly?: boolean
    f16Kv?: boolean
    fetch?: {
        (input: RequestInfo | URL, init?: RequestInit): Promise<Response>;
        (input: string | Request | URL, init?: RequestInit): Promise<Response>;
    }

    The fetch function to use.

    Type Declaration

      • (input: RequestInfo | URL, init?: RequestInit): Promise<Response>
      • Parameters

        • input: RequestInfo | URL
        • Optionalinit: RequestInit

        Returns Promise<Response>

      • (input: string | Request | URL, init?: RequestInit): Promise<Response>
      • Parameters

        • input: string | Request | URL
        • Optionalinit: RequestInit

        Returns Promise<Response>

    fetch
    
    format?: string | Record<string, any>
    frequencyPenalty?: number
    headers?: Headers | Record<string, string>

    Optional HTTP Headers to include in the request.

    keepAlive?: string | number
    "5m"
    
    logitsAll?: boolean
    lowVram?: boolean
    mainGpu?: number
    mirostat?: number
    mirostatEta?: number
    mirostatTau?: number
    model?: string

    The model to invoke. If the model does not exist, it will be pulled.

    "llama3"
    
    numa?: boolean
    numBatch?: number
    numCtx?: number
    numGpu?: number
    numKeep?: number
    numPredict?: number
    numThread?: number
    penalizeNewline?: boolean
    presencePenalty?: number
    repeatLastN?: number
    repeatPenalty?: number
    seed?: number
    stop?: string[]
    streaming?: boolean
    temperature?: number
    tfsZ?: number
    topK?: number
    topP?: number
    typicalP?: number
    useMlock?: boolean
    useMmap?: boolean
    vocabOnly?: boolean