interface LmOllamaNodeParameters {
model?: string;
options?: {
format?: "json" | "default";
frequencyPenalty?: number;
keepAlive?: string;
lowVram?: boolean;
mainGpu?: number;
numBatch?: number;
numCtx?: number;
numGpu?: number;
numPredict?: number;
numThread?: number;
penalizeNewline?: boolean;
presencePenalty?: number;
repeatPenalty?: number;
temperature?: number;
topK?: number;
topP?: number;
useMLock?: boolean;
useMMap?: boolean;
vocabOnly?: boolean;
};
}
Properties§
Source§readonly options?: {
format?: "json" | "default";
frequencyPenalty?: number;
keepAlive?: string;
lowVram?: boolean;
mainGpu?: number;
numBatch?: number;
numCtx?: number;
numGpu?: number;
numPredict?: number;
numThread?: number;
penalizeNewline?: boolean;
presencePenalty?: number;
repeatPenalty?: number;
temperature?: number;
topK?: number;
topP?: number;
useMLock?: boolean;
useMMap?: boolean;
vocabOnly?: boolean;
}
readonly options?: {
format?: "json" | "default";
frequencyPenalty?: number;
keepAlive?: string;
lowVram?: boolean;
mainGpu?: number;
numBatch?: number;
numCtx?: number;
numGpu?: number;
numPredict?: number;
numThread?: number;
penalizeNewline?: boolean;
presencePenalty?: number;
repeatPenalty?: number;
temperature?: number;
topK?: number;
topP?: number;
useMLock?: boolean;
useMMap?: boolean;
vocabOnly?: boolean;
}
Additional options to add Default: {}
The model which will generate the completion. To download models, visit Ollama Models Library. Default: "llama3.2" Type options: {"loadOptions":{"routing":{"request":{"method":"GET","url":"/api/tags"},"output":{"postReceive":[{"type":"rootProperty","properties":{"property":"models"}},{"type":"setKeyValue","properties":{"name":"={{$responseItem.name}}","value":"={{$responseItem.name}}"}},{"type":"sort","properties":{"key":"name"}}]}}}}