translateAudio method

  1. @override
Future<STTResponse> translateAudio(
  1. AudioTranslationRequest request
)
override

Translate audio to English text

Throws UnsupportedError if not supported. Check supportedFeatures first.

Implementation

@override
Future<STTResponse> translateAudio(AudioTranslationRequest request) async {
  // Basic validation
  if (request.audioData == null && request.filePath == null) {
    throw const InvalidRequestError(
      'Either audioData or filePath must be provided',
    );
  }

  final formData = FormData();

  if (request.audioData != null) {
    formData.files.add(
      MapEntry(
        'file',
        MultipartFile.fromBytes(
          request.audioData!,
          filename: 'audio.${request.format ?? 'wav'}',
        ),
      ),
    );
  } else if (request.filePath != null) {
    formData.files.add(
      MapEntry('file', await MultipartFile.fromFile(request.filePath!)),
    );
  }

  formData.fields.add(MapEntry(
      'model', request.model ?? ProviderDefaults.openaiDefaultSTTModel));
  if (request.prompt != null) {
    formData.fields.add(MapEntry('prompt', request.prompt!));
  }
  if (request.responseFormat != null) {
    formData.fields.add(MapEntry('response_format', request.responseFormat!));
  }
  if (request.temperature != null) {
    formData.fields.add(
      MapEntry('temperature', request.temperature.toString()),
    );
  }

  final responseData = await client.postForm('audio/translations', formData);

  return STTResponse(
    text: responseData['text'] as String,
    language: 'en', // Translations are always to English
    confidence: null,
    words: null, // Translation doesn't provide word timing
    model: request.model,
    duration: responseData['duration'] as double?,
    usage: null,
  );
}