createModel method
Future<InferenceModel>
createModel({
- required ModelType modelType,
- ModelFileType fileType = ModelFileType.task,
- int maxTokens = 1024,
- PreferredBackend? preferredBackend,
- List<
int> ? loraRanks, - int? maxNumImages,
- bool supportImage = false,
override
Creates and returns a new InferenceModel instance.
modelType
— model type to create.
maxTokens
— maximum context length for the model.
preferredBackend
— backend preference (e.g., CPU, GPU).
loraRanks
— optional supported LoRA ranks.
maxNumImages
— maximum number of images (for multimodal models).
supportImage
— whether the model supports images.
Implementation
@override
Future<InferenceModel> createModel({
required ModelType modelType,
ModelFileType fileType = ModelFileType.task,
int maxTokens = 1024,
PreferredBackend? preferredBackend,
List<int>? loraRanks,
int? maxNumImages,
bool supportImage = false,
}) async {
if (_initCompleter case Completer<InferenceModel> completer) {
return completer.future;
}
final completer = _initCompleter = Completer<InferenceModel>();
// Check if model is ready through unified system
final manager = _unifiedManager;
// Use the current active model, or find any installed inference model for backward compatibility
ModelSpec? activeModel = manager.currentActiveModel;
// Backward compatibility: if no active model, try to find any installed inference model
if (activeModel == null) {
final installedFiles = await manager.getInstalledModels(ModelManagementType.inference);
if (installedFiles.isNotEmpty) {
// Create spec from first installed model and set as active
activeModel = InferenceModelSpec(
name: installedFiles.first,
modelUrl: 'local://installed', // Dummy URL since file is already downloaded
);
// Set as current active model for future use
await manager.ensureModelReady(installedFiles.first, 'local://installed');
debugPrint('Backward compatibility: Set ${installedFiles.first} as active model');
} else {
completer.completeError(
Exception('No models installed. Use the `modelManager` to download a model first'),
);
return completer.future;
}
}
// Verify the active model is still installed
final isModelInstalled = await manager.isModelInstalled(activeModel);
if (!isModelInstalled) {
completer.completeError(
Exception('Active model is no longer installed. Use the `modelManager` to load the model first'),
);
return completer.future;
}
// Get the actual model file path through unified system
final modelFilePaths = await manager.getModelFilePaths(activeModel);
if (modelFilePaths == null || modelFilePaths.isEmpty) {
completer.completeError(
Exception('Model file paths not found. Use the `modelManager` to load the model first'),
);
return completer.future;
}
final modelFile = File(modelFilePaths.values.first);
if (!await modelFile.exists()) {
completer.completeError(
Exception('Model file not found at path: ${modelFile.path}'),
);
return completer.future;
}
debugPrint('Using unified model file: ${modelFile.path}');
try {
await _platformService.createModel(
maxTokens: maxTokens,
modelPath: modelFile.path,
loraRanks: loraRanks ?? supportedLoraRanks,
preferredBackend: preferredBackend,
maxNumImages: supportImage ? (maxNumImages ?? 1) : null,
);
final model = _initializedModel = MobileInferenceModel(
maxTokens: maxTokens,
modelType: modelType,
fileType: fileType,
preferredBackend: preferredBackend,
supportedLoraRanks: loraRanks ?? supportedLoraRanks,
supportImage: supportImage,
maxNumImages: maxNumImages,
onClose: () {
_initializedModel = null;
_initCompleter = null;
},
);
completer.complete(model);
return model;
} catch (e, st) {
completer.completeError(e, st);
Error.throwWithStackTrace(e, st);
}
}