Run the LLM on the given prompts and input.
_generate(
prompts: string[],
options: Omit<BaseLLMCallOptions, "callbacks" | "tags" | "metadata" | "configurable" | "recursionLimit" | "runName" | "runId">,
runManager: CallbackManagerForLLMRun
): Promise<LLMResult>| Name | Type | Description |
|---|---|---|
prompts* | string[] | |
options* | Omit<BaseLLMCallOptions, "callbacks" | "tags" | "metadata" | "configurable" | "recursionLimit" | "runName" | "runId"> | |
runManager | CallbackManagerForLLMRun |