loadVLMModel static method

Future<void> loadVLMModel(
  1. String modelId
)

Load a VLM model by ID

Matches Swift: RunAnywhere.loadVLMModel(_:) (ModelInfo version)

Resolves the main model .gguf file and mmproj .gguf file from the model folder.

await RunAnywhere.loadVLMModel('llava-1.5-7b');
print('VLM model loaded: ${RunAnywhere.currentVLMModelId}');

Implementation

static Future<void> loadVLMModel(String modelId) async {
  if (!_isInitialized) {
    throw SDKError.notInitialized();
  }

  final logger = SDKLogger('RunAnywhere.LoadVLMModel');
  logger.info('Loading VLM model: $modelId');
  final startTime = DateTime.now().millisecondsSinceEpoch;

  // Emit load started event
  EventBus.shared.publish(SDKModelEvent.loadStarted(modelId: modelId));

  try {
    // Find the model in available models
    final models = await availableModels();
    final model = models.where((m) => m.id == modelId).firstOrNull;

    if (model == null) {
      throw SDKError.modelNotFound('VLM model not found: $modelId');
    }

    // Check if model has a local path (downloaded)
    if (model.localPath == null) {
      throw SDKError.modelNotDownloaded(
        'VLM model is not downloaded. Call downloadModel() first.',
      );
    }

    // Resolve the model folder path
    final modelFolder = model.localPath!.toFilePath();
    logger.info('VLM model folder: $modelFolder');

    // Resolve the actual model file path
    final modelPath = await _resolveVLMModelFilePath(modelFolder, model);
    if (modelPath == null) {
      throw SDKError.modelNotFound(
        'Could not find main VLM model file in: $modelFolder',
      );
    }
    logger.info('Resolved VLM model path: $modelPath');

    // Get the model directory for finding mmproj
    final modelDir = Directory(modelPath).parent.path;

    // Try to find mmproj file in same directory
    final mmprojPath = await _findMmprojFile(modelDir);
    logger.info('mmproj path: ${mmprojPath ?? "not found"}');

    // Unload any existing model first
    if (DartBridge.vlm.isLoaded) {
      logger.debug('Unloading previous VLM model');
      DartBridge.vlm.unload();
    }

    // Load the VLM model via the bridge
    logger.debug('Loading VLM model via C++ bridge');
    await DartBridge.vlm.loadModel(
      modelPath,
      mmprojPath,
      modelId,
      model.name,
    );

    // Verify the model loaded successfully
    if (!DartBridge.vlm.isLoaded) {
      throw SDKError.vlmModelLoadFailed(
        'VLM model failed to load - model may not be compatible',
      );
    }

    final loadTimeMs = DateTime.now().millisecondsSinceEpoch - startTime;
    logger.info(
      'VLM model loaded successfully: ${model.name} (isLoaded=${DartBridge.vlm.isLoaded})',
    );

    // Track model load success
    TelemetryService.shared.trackModelLoad(
      modelId: modelId,
      modelType: 'vlm',
      success: true,
      loadTimeMs: loadTimeMs,
    );

    // Emit load completed event
    EventBus.shared.publish(SDKModelEvent.loadCompleted(modelId: modelId));
  } catch (e) {
    logger.error('Failed to load VLM model: $e');

    // Track model load failure
    TelemetryService.shared.trackModelLoad(
      modelId: modelId,
      modelType: 'vlm',
      success: false,
    );
    TelemetryService.shared.trackError(
      errorCode: 'vlm_model_load_failed',
      errorMessage: e.toString(),
      context: {'model_id': modelId},
    );

    // Emit load failed event
    EventBus.shared.publish(SDKModelEvent.loadFailed(
      modelId: modelId,
      error: e.toString(),
    ));

    rethrow;
  }
}