google_speech 5.3.0 copy "google_speech: ^5.3.0" to clipboard
google_speech: ^5.3.0 copied to clipboard

Flutter Plugin for the Google Cloud GRPC Speech-to-Text Api.

example/README.md

import 'dart:io';

import 'package:flutter/material.dart'; import 'package:flutter/services.dart'; import 'package:google_speech/google_speech.dart'; import 'package:path_provider/path_provider.dart';

void main() { runApp(MyApp()); }

class MyApp extends StatelessWidget { // This widget is the root of your application. @override Widget build(BuildContext context) { return MaterialApp( title: 'Audio File Example', theme: ThemeData( primarySwatch: Colors.blue, visualDensity: VisualDensity.adaptivePlatformDensity, ), home: AudioRecognize(), ); } }

class AudioRecognize extends StatefulWidget { @override State

class _AudioRecognizeState extends State

void recognize() async { setState(() { recognizing = true; }); final serviceAccount = ServiceAccount.fromString( '${(await rootBundle.loadString('assets/test_service_account.json'))}'); final speechToText = SpeechToText.viaServiceAccount(serviceAccount); final config = _getConfig(); final audio = await _getAudioContent('test.wav');

await speechToText.recognize(config, audio).then((value) {
  setState(() {
    text = value.results
        .map((e) => e.alternatives.first.transcript)
        .join('\n');
  });
}).whenComplete(() => setState(() {
      recognizeFinished = true;
      recognizing = false;
    }));

}

void streamingRecognize() async { setState(() { recognizing = true; }); final serviceAccount = ServiceAccount.fromString( '${(await rootBundle.loadString('assets/test_service_account.json'))}'); final speechToText = SpeechToText.viaServiceAccount(serviceAccount); final config = _getConfig();

final responseStream = speechToText.streamingRecognize(
    StreamingRecognitionConfig(config: config, interimResults: true),
    await _getAudioStream('test.wav'));

responseStream.listen((data) {
  setState(() {
    text =
        data.results.map((e) => e.alternatives.first.transcript).join('\n');
    recognizeFinished = true;
  });
}, onDone: () {
  setState(() {
    recognizing = false;
  });
});

}

RecognitionConfig _getConfig() => RecognitionConfig( encoding: AudioEncoding.LINEAR16, model: RecognitionModel.basic, enableAutomaticPunctuation: true, sampleRateHertz: 16000, languageCode: 'en-US');

Future

Future<List

Future<Stream<List

@override Widget build(BuildContext context) { return Scaffold( appBar: AppBar( title: Text('Audio File Example'), ), body: Center( child: Column( mainAxisAlignment: MainAxisAlignment.spaceAround, children:

class _RecognizeContent extends StatelessWidget { final String text;

const _RecognizeContent({Key? key, required this.text}) : super(key: key);

@override Widget build(BuildContext context) { return Padding( padding: const EdgeInsets.all(16.0), child: Column( children: