Interface LmChatOllamaNodeParameters

Source
interface LmChatOllamaNodeParameters {
    model?: string;
    options?: {
        format?: "json" | "default";
        frequencyPenalty?: number;
        keepAlive?: string;
        lowVram?: boolean;
        mainGpu?: number;
        numBatch?: number;
        numCtx?: number;
        numGpu?: number;
        numPredict?: number;
        numThread?: number;
        penalizeNewline?: boolean;
        presencePenalty?: number;
        repeatPenalty?: number;
        temperature?: number;
        topK?: number;
        topP?: number;
        useMLock?: boolean;
        useMMap?: boolean;
        vocabOnly?: boolean;
    };
}

Properties§

Source§

readonly model?: string

The model which will generate the completion. To download models, visit Ollama Models Library. Default: "llama3.2" Type options: {"loadOptions":{"routing":{"request":{"method":"GET","url":"/api/tags"},"output":{"postReceive":[{"type":"rootProperty","properties":{"property":"models"}},{"type":"setKeyValue","properties":{"name":"={{$responseItem.name}}","value":"={{$responseItem.name}}"}},{"type":"sort","properties":{"key":"name"}}]}}}}

Source§

readonly options?: {
    format?: "json" | "default";
    frequencyPenalty?: number;
    keepAlive?: string;
    lowVram?: boolean;
    mainGpu?: number;
    numBatch?: number;
    numCtx?: number;
    numGpu?: number;
    numPredict?: number;
    numThread?: number;
    penalizeNewline?: boolean;
    presencePenalty?: number;
    repeatPenalty?: number;
    temperature?: number;
    topK?: number;
    topP?: number;
    useMLock?: boolean;
    useMMap?: boolean;
    vocabOnly?: boolean;
}

Additional options to add Default: {}