voiceCall method
Future<Map<String, dynamic> >
voiceCall({
- required String name,
- required String pathway,
- required String tenant,
- required String username,
- required String sessionId,
- required String token,
- required String baseURL,
- Function? onConnected,
- Function? onMentorSpeaking,
- Function? onMentorStoppedSpeaking,
- Function? onBufferSent,
- Function? onVoiceRecordingStarted,
- Function? onVoiceRecordingStopped,
- dynamic onProcessError(
- String? error
Implementation
Future<Map<String, dynamic>> voiceCall({
required String name,
required String pathway,
required String tenant,
required String username,
required String sessionId,
required String token,
required String baseURL,
Function? onConnected,
Function? onMentorSpeaking,
Function? onMentorStoppedSpeaking,
Function? onBufferSent,
Function? onVoiceRecordingStarted,
Function? onVoiceRecordingStopped,
Function(String? error)? onProcessError,
}) async {
// Add your function code here!
// setState = setStateFunction;
_onConnected = onConnected;
_onMentorSpeaking = onMentorSpeaking;
_onMentorStoppedSpeaking = onMentorStoppedSpeaking;
_onVoiceRecordingStarted = onVoiceRecordingStarted;
_onvoiceRecordingStopped = onVoiceRecordingStopped;
_onBufferSent = onBufferSent;
_onProcessError = onProcessError;
await remoteRTCVideoRenderer.initialize();
await selectAudioOutput();
uuid = uuid_.v4();
final Map<String, dynamic> mediaConstraints = {
'audio': true,
'video': false
};
_localStream = await MediaDevices.getUserMedia(mediaConstraints);
await selectAudioOutput();
channel = WebSocketChannel.connect(
Uri.parse('wss://$baseURL/ws/webrtc-audio/'),
);
final dataWithoutPrompt = {
"flow": {
"name": name,
"tenant": tenant,
"username": username,
'pathway': name
},
"session_id": sessionId,
"token": token
};
// var server_error_list = [
// "oops... error in speech generation.",
// "no prompt in audio",
// "an error occurred",
// ];
// Send initial data without prompt
channel.sink.add(json.encode(dataWithoutPrompt));
channel.stream.listen((event) async {
var event_decode = json.decode(event);
if (event_decode.containsKey('error')) {
if (_onProcessError != null) {
_onProcessError!(event_decode['error']);
}
handleVoiceRecord();
} else if (event_decode.containsKey('type') &&
event_decode['type'] == "mentor_started") {
if (_onMentorSpeaking != null) {
_onMentorSpeaking!();
}
setMentorSpeaking();
} else if (event_decode.containsKey('type') &&
event_decode['type'] == "mentor_stopped") {
if (_onMentorStoppedSpeaking != null) {
_onMentorStoppedSpeaking!();
}
if (isMentorSpeaking == true) {
setStoppedMentorSpeaking();
}
} else if (event_decode.containsKey('connected') &&
event_decode['connected'] == true &&
connectionEstablished == false) {
connectionEstablished = true;
if (_onConnected != null) {
_onConnected!();
}
start_recording();
handleVoiceRecord();
} else {
gotMessageFromServer(event);
}
});
return {
'status': true,
'channel': channel,
'audioController': audioWavController,
'stopMentor': stopMentor,
'dispose': dispose,
'sendaudio': sendBufferToMentor,
};
}