loadModel method
Implementation
Future loadModel() async {
Delegate delegate;
try {
delegate = GpuDelegateV2(
options: GpuDelegateOptionsV2(
false,
TfLiteGpuInferenceUsage.fastSingleAnswer,
TfLiteGpuInferencePriority.minLatency,
TfLiteGpuInferencePriority.auto,
TfLiteGpuInferencePriority.auto,
));
var interpreterOptions = InterpreterOptions()..addDelegate(delegate);
this._interpreter = await Interpreter.fromAsset('mobilefacenet.tflite', options: interpreterOptions);
} catch (e) {
CryLogger.debug('Failed to load model.');
CryLogger.debug(e);
}
}