createModel method

  1. @override
Future<InferenceModel> createModel({
  1. required ModelType modelType,
  2. int maxTokens = 1024,
  3. PreferredBackend? preferredBackend,
  4. List<int>? loraRanks,
  5. int? maxNumImages,
  6. bool supportImage = false,
})
override

Creates and returns a new InferenceModel instance.

modelType — model type to create. maxTokens — maximum context length for the model. preferredBackend — backend preference (e.g., CPU, GPU). loraRanks — optional supported LoRA ranks. maxNumImages — maximum number of images (for multimodal models). supportImage — whether the model supports images.

Implementation

@override
Future<InferenceModel> createModel({
  required ModelType modelType,
  int maxTokens = 1024,
  PreferredBackend? preferredBackend,
  List<int>? loraRanks,
  int? maxNumImages, // Добавляем поддержку изображений (заглушка)
  bool supportImage = false, // Добавляем флаг поддержки изображений (заглушка)
}) {
  // TODO: Implement multimodal support for web
  if (supportImage || maxNumImages != null) {
    if (kDebugMode) {
      print('Warning: Image support is not yet implemented for web platform');
    }
  }

  final model = _initializedModel ??= WebInferenceModel(
    modelType: modelType,
    maxTokens: maxTokens,
    loraRanks: loraRanks,
    modelManager: modelManager,
    supportImage: supportImage, // Передаем флаг
    maxNumImages: maxNumImages, // Передаем количество изображений
    onClose: () {
      _initializedModel = null;
    },
  );
  return Future.value(model);
}