initialize method
Future<bool>
initialize({
- dynamic debugLogging = false,
- List<
SpeechConfigOption> ? options,
override
Initialize speech recognition services, returns true if successful, false if failed.
This method must be called before any other speech functions. If this method returns false no further SpeechToText methods should be used. False usually means that the user has denied permission to use speech.
debugLogging
controls whether there is detailed logging from the underlying
plugins. It is off by default, usually only useful for troubleshooting issues
with a particular OS version or device, fairly verbose
Implementation
@override
Future<bool> initialize(
{debugLogging = false, List<SpeechConfigOption>? options}) async {
if (!html.SpeechRecognition.supported) {
var error = SpeechRecognitionError('not supported', true);
onError?.call(jsonEncode(error.toJson()));
return false;
}
var initialized = false;
try {
_webSpeech = html.SpeechRecognition();
if (null != _webSpeech) {
_aggregateResults =
BalancedAlternates.isAggregateResultsEnabled(options);
_webSpeech!.onError.listen((error) => _onError(error));
_webSpeech!.onStart.listen((startEvent) => _onSpeechStart(startEvent));
_webSpeech!.onSpeechStart
.listen((startEvent) => _onSpeechStart(startEvent));
_webSpeech!.onEnd.listen((endEvent) => _onSpeechEnd(endEvent));
// _webSpeech!.onSpeechEnd.listen((endEvent) => _onSpeechEnd(endEvent));
_webSpeech!.onNoMatch
.listen((noMatchEvent) => _onNoMatch(noMatchEvent));
initialized = true;
}
} finally {
if (null == _webSpeech) {
if (null != onError) {
var error = SpeechRecognitionError('speech_not_supported', true);
onError!(jsonEncode(error.toJson()));
}
}
}
return initialized;
}