voiceCall method

Future<Map<String, dynamic>> voiceCall({
  1. required String name,
  2. required String pathway,
  3. required String tenant,
  4. required String username,
  5. required String sessionId,
  6. required String token,
  7. required String baseURL,
  8. Function? onConnected,
  9. Function? onMentorSpeaking,
  10. Function? onMentorStoppedSpeaking,
  11. Function? onBufferSent,
  12. Function? onVoiceRecordingStarted,
  13. Function? onVoiceRecordingStopped,
  14. dynamic onProcessError(
    1. String? error
    )?,
})

Implementation

Future<Map<String, dynamic>> voiceCall({
  required String name,
  required String pathway,
  required String tenant,
  required String username,
  required String sessionId,
  required String token,
  required String baseURL,
  Function? onConnected,
  Function? onMentorSpeaking,
  Function? onMentorStoppedSpeaking,
  Function? onBufferSent,
  Function? onVoiceRecordingStarted,
  Function? onVoiceRecordingStopped,
  Function(String? error)? onProcessError,
}) async {
  // Add your function code here!
  // setState = setStateFunction;
  _onConnected = onConnected;
  _onMentorSpeaking = onMentorSpeaking;
  _onMentorStoppedSpeaking = onMentorStoppedSpeaking;
  _onVoiceRecordingStarted = onVoiceRecordingStarted;
  _onvoiceRecordingStopped = onVoiceRecordingStopped;
  _onBufferSent = onBufferSent;
  _onProcessError = onProcessError;
  await remoteRTCVideoRenderer.initialize();
  await selectAudioOutput();

  uuid = uuid_.v4();

  final Map<String, dynamic> mediaConstraints = {
    'audio': true,
    'video': false
  };
  _localStream = await MediaDevices.getUserMedia(mediaConstraints);
  await selectAudioOutput();

  channel = WebSocketChannel.connect(
    Uri.parse('wss://$baseURL/ws/webrtc-audio/'),
  );
  final dataWithoutPrompt = {
    "flow": {
      "name": name,
      "tenant": tenant,
      "username": username,
      'pathway': name
    },
    "session_id": sessionId,
    "token": token
  };
  // var server_error_list = [
  //   "oops... error in speech generation.",
  //   "no prompt in audio",
  //   "an error occurred",
  // ];

  // Send initial data without prompt
  channel.sink.add(json.encode(dataWithoutPrompt));
  channel.stream.listen((event) async {
    var event_decode = json.decode(event);
    if (event_decode.containsKey('error')) {
      if (_onProcessError != null) {
        _onProcessError!(event_decode['error']);
      }
      handleVoiceRecord();
    } else if (event_decode.containsKey('type') &&
        event_decode['type'] == "mentor_started") {
      if (_onMentorSpeaking != null) {
        _onMentorSpeaking!();
      }
      setMentorSpeaking();
    } else if (event_decode.containsKey('type') &&
        event_decode['type'] == "mentor_stopped") {
      if (_onMentorStoppedSpeaking != null) {
        _onMentorStoppedSpeaking!();
      }
      if (isMentorSpeaking == true) {
        setStoppedMentorSpeaking();
      }
    } else if (event_decode.containsKey('connected') &&
        event_decode['connected'] == true &&
        connectionEstablished == false) {
      connectionEstablished = true;
      if (_onConnected != null) {
        _onConnected!();
      }
      start_recording();
      handleVoiceRecord();
    } else {
      gotMessageFromServer(event);
    }
  });

  return {
    'status': true,
    'channel': channel,
    'audioController': audioWavController,
    'stopMentor': stopMentor,
    'dispose': dispose,
    'sendaudio': sendBufferToMentor,
  };
}