processImage static method

Future<VLMResult> processImage(
  1. VLMImage image, {
  2. required String prompt,
  3. int maxTokens = 2048,
  4. double temperature = 0.7,
  5. double topP = 0.9,
})

Process an image with VLM

Matches Swift: RunAnywhere.processImage(_:prompt:maxTokens:temperature:topP:)

final result = await RunAnywhere.processImage(
  VLMImage.filePath('/path/to/image.jpg'),
  prompt: 'Describe this image in detail',
  maxTokens: 512,
  temperature: 0.7,
);
print('Response: ${result.text}');
print('Tokens: ${result.completionTokens}');
print('Speed: ${result.tokensPerSecond} tok/s');

Implementation

static Future<VLMResult> processImage(
  VLMImage image, {
  required String prompt,
  int maxTokens = 2048,
  double temperature = 0.7,
  double topP = 0.9,
}) async {
  if (!_isInitialized) throw SDKError.notInitialized();
  if (!DartBridge.vlm.isLoaded) throw SDKError.vlmNotInitialized();

  final logger = SDKLogger('RunAnywhere.VLM.ProcessImage');
  final modelId = DartBridge.vlm.currentModelId ?? 'unknown';

  try {
    // Call the bridge to process the image
    final bridgeResult = await _processImageViaBridge(
      image,
      prompt,
      maxTokens,
      temperature,
      topP,
      useGpu: true,
    );

    logger.info(
      'VLM processing complete: ${bridgeResult.completionTokens} tokens, '
      '${bridgeResult.tokensPerSecond.toStringAsFixed(1)} tok/s',
    );

    // Track VLM generation success
    TelemetryService.shared.trackGeneration(
      modelId: modelId,
      modelName: DartBridge.vlm.currentModelId,
      promptTokens: bridgeResult.promptTokens,
      completionTokens: bridgeResult.completionTokens,
      latencyMs: bridgeResult.totalTimeMs.round(),
      temperature: temperature,
      maxTokens: maxTokens,
      tokensPerSecond: bridgeResult.tokensPerSecond,
      isStreaming: false,
    );

    return bridgeResult;
  } catch (e) {
    logger.error('VLM processing failed: $e');

    // Track VLM generation failure
    TelemetryService.shared.trackError(
      errorCode: 'vlm_processing_failed',
      errorMessage: e.toString(),
      context: {'model_id': modelId},
    );

    rethrow;
  }
}