isTextHarmful method
Checks if the given text
is harmful
customApiKey
is the API key to use for the check, defaults to the Groq instance API key
Returns whether the text is harmful, the harmful category and the rate limit information
Example:
final (isHarmful, harmfulCategory, usage, rateLimit) = await groq.isTextHarmful(
text: 'YOUR_TEXT',
);
Implementation
Future<(bool, GroqLlamaGuardCategory?, GroqUsage, GroqRateLimitInformation?)>
isTextHarmful({
required String text,
String? customApiKey,
}) async {
final specificApiKey = customApiKey ?? apiKey;
final chat = GroqChat(GroqModels.llama_guard_3_8b, specificApiKey,
GroqChatSettings.defaults());
final (response, usage) = await chat.sendMessage(text);
final answerString = response.choices.first.message;
bool isHarmful = false;
GroqLlamaGuardCategory? harmfulCategory;
if (answerString.contains("unsafe")) {
isHarmful = true;
final List<String> answerList = answerString.trim().split('\n');
if (answerList.length < 2) {
throw GroqException(
statusCode: 400,
error: GroqError(
message: 'Received invalid response', type: 'InvalidResponse'));
}
String harmfulCategoryString = answerList[1];
harmfulCategory =
GroqParser.groqLlamaGuardCategoryFromString(harmfulCategoryString);
}
return (isHarmful, harmfulCategory, usage, chat.rateLimitInfo);
}