rawModerationCheck method
Classifies if text violates OpenAI's Content Policy, returns a Map
Implementation
Future<Map<String, dynamic>> rawModerationCheck(
{required String input}) async {
try {
_checkApi(values: [input]);
final map = {'input': input};
final result = await service.checkModeration(input: map, apiKey: _apiKey);
return result.toMap();
} catch (e) {
throw _exceptionCheck(e);
}
}