createModel method
Future<InferenceModel>
createModel({
- required ModelType modelType,
- int maxTokens = 1024,
- PreferredBackend? preferredBackend,
- List<
int> ? loraRanks, - int? maxNumImages,
- bool supportImage = false,
override
Creates and returns a new InferenceModel instance.
modelType
— model type to create.
maxTokens
— maximum context length for the model.
preferredBackend
— backend preference (e.g., CPU, GPU).
loraRanks
— optional supported LoRA ranks.
maxNumImages
— maximum number of images (for multimodal models).
supportImage
— whether the model supports images.
Implementation
@override
Future<InferenceModel> createModel({
required ModelType modelType,
int maxTokens = 1024,
PreferredBackend? preferredBackend,
List<int>? loraRanks,
int? maxNumImages, // Добавляем поддержку изображений (заглушка)
bool supportImage = false, // Добавляем флаг поддержки изображений (заглушка)
}) {
// TODO: Implement multimodal support for web
if (supportImage || maxNumImages != null) {
if (kDebugMode) {
print('Warning: Image support is not yet implemented for web platform');
}
}
final model = _initializedModel ??= WebInferenceModel(
modelType: modelType,
maxTokens: maxTokens,
loraRanks: loraRanks,
modelManager: modelManager,
supportImage: supportImage, // Передаем флаг
maxNumImages: maxNumImages, // Передаем количество изображений
onClose: () {
_initializedModel = null;
},
);
return Future.value(model);
}