Interface LmChatAwsBedrockNodeParameters

Source
interface LmChatAwsBedrockNodeParameters {
    model?: string;
    modelSource?: "onDemand" | "inferenceProfile";
    options?: {
        maxTokensToSample?: number;
        temperature?: number;
    };
}

Properties§

Source§

readonly model?: string

The model which will generate the completion. Learn more. Type options: {"loadOptionsDependsOn":["modelSource"],"loadOptions":{"routing":{"request":{"method":"GET","url":"/foundation-models?&byOutputModality=TEXT&byInferenceType=ON_DEMAND"},"output":{"postReceive":[{"type":"rootProperty","properties":{"property":"modelSummaries"}},{"type":"setKeyValue","properties":{"name":"={{$responseItem.modelName}}","description":"={{$responseItem.modelArn}}","value":"={{$responseItem.modelId}}"}},{"type":"sort","properties":{"key":"name"}}]}}}}

Source§

readonly modelSource?: "onDemand" | "inferenceProfile"

Choose between on-demand foundation models or inference profiles Default: "onDemand"

Source§

readonly options?: { maxTokensToSample?: number; temperature?: number }

Additional options to add Default: {}