addQueryChunk method
Implementation
@override
Future<void> addQueryChunk(Message message) async {
if (kDebugMode) {
debugPrint(
'🟢 WebModelSession.addQueryChunk() called - hasImage: ${message.hasImage}, hasAudio: ${message.hasAudio}, supportImage: $supportImage, supportAudio: $supportAudio');
}
final finalPrompt = message.transformToChatPrompt(type: modelType, fileType: fileType);
// Add text part
_promptParts.add(TextPromptPart(finalPrompt));
if (kDebugMode) {
debugPrint(
'🟢 Added text part: ${finalPrompt.substring(0, math.min(100, finalPrompt.length))}...');
}
// Handle image processing for web
if (message.hasImage && message.imageBytes != null) {
if (kDebugMode) {
debugPrint('🟢 Processing image: ${message.imageBytes!.length} bytes');
}
if (!supportImage) {
if (kDebugMode) {
debugPrint('🔴 Model does not support images - throwing exception');
}
throw ArgumentError('This model does not support images');
}
// Add image part
final imagePart = ImagePromptPart.fromBytes(message.imageBytes!);
_promptParts.add(imagePart);
if (kDebugMode) {
debugPrint('🟢 Added image part with dataUrl length: ${imagePart.dataUrl.length}');
}
}
// Handle audio processing for web (Gemma 3n E4B)
if (message.hasAudio && message.audioBytes != null) {
if (kDebugMode) {
debugPrint('🎵 Processing audio: ${message.audioBytes!.length} bytes');
}
if (!supportAudio) {
if (kDebugMode) {
debugPrint('🔴 Model does not support audio - throwing exception');
}
throw ArgumentError('This model does not support audio');
}
// Add audio part
final audioPart = AudioPromptPart(message.audioBytes!);
_promptParts.add(audioPart);
if (kDebugMode) {
debugPrint('🎵 Added audio part with ${message.audioBytes!.length} bytes');
}
}
if (kDebugMode) {
debugPrint('🟢 Total prompt parts: ${_promptParts.length}');
}
}