addQueryChunk method

  1. @override
Future<void> addQueryChunk(
  1. Message message
)
override

Implementation

@override
Future<void> addQueryChunk(Message message) async {
  if (kDebugMode) {
    debugPrint(
        '🟢 WebModelSession.addQueryChunk() called - hasImage: ${message.hasImage}, hasAudio: ${message.hasAudio}, supportImage: $supportImage, supportAudio: $supportAudio');
  }

  var messageToSend = message;
  if (message.isUser &&
      !_systemInstructionSent &&
      systemInstruction != null &&
      systemInstruction!.isNotEmpty) {
    _systemInstructionSent = true;
    messageToSend = message.copyWith(
      text: '[System: ${systemInstruction!}]\n\n${message.text}',
    );
  }

  final finalPrompt = messageToSend.transformToChatPrompt(
      type: modelType, fileType: fileType);

  // Add image parts first, then audio, then text last.
  if (message.hasImage) {
    if (!supportImage) {
      if (kDebugMode) {
        debugPrint('🔴 Model does not support images - throwing exception');
      }
      throw ArgumentError('This model does not support images');
    }

    final images = message.images.isNotEmpty
        ? message.images
        : (message.imageBytes != null
            ? [message.imageBytes!]
            : const <Uint8List>[]);
    for (final imageBytes in images) {
      if (kDebugMode) {
        debugPrint('🟢 Processing image: ${imageBytes.length} bytes');
      }
      final imagePart = ImagePromptPart.fromBytes(imageBytes);
      _promptParts.add(imagePart);
      if (kDebugMode) {
        debugPrint(
            '🟢 Added image part with dataUrl length: ${imagePart.dataUrl.length}');
      }
    }
  }

  // Handle audio processing for web (Gemma 3n E4B)
  if (message.hasAudio && message.audioBytes != null) {
    if (kDebugMode) {
      debugPrint('🎵 Processing audio: ${message.audioBytes!.length} bytes');
    }
    if (!supportAudio) {
      if (kDebugMode) {
        debugPrint('🔴 Model does not support audio - throwing exception');
      }
      throw ArgumentError('This model does not support audio');
    }
    // Add audio part
    final audioPart = AudioPromptPart(message.audioBytes!);
    _promptParts.add(audioPart);
    if (kDebugMode) {
      debugPrint(
          '🎵 Added audio part with ${message.audioBytes!.length} bytes');
    }
  }

  // Add text part last so multimodal turns keep image/audio context first.
  _promptParts.add(TextPromptPart(finalPrompt));
  if (kDebugMode) {
    debugPrint('🟢 Added text part: ${finalPrompt.substring(0, math.min(100, finalPrompt.length))}...');
  }

  if (kDebugMode) {
    debugPrint('🟢 Total prompt parts: ${_promptParts.length}');
  }
}