complete method

  1. @override
Future<CompletionResponse> complete(
  1. CompletionRequest request
)
override

Sends a completion request to generate text

request - The completion request parameters

Returns the generated completion text or throws an LLMError

Implementation

@override
Future<CompletionResponse> complete(CompletionRequest request) async {
  // OpenAI doesn't have a separate completion endpoint in newer APIs
  // Convert to chat format for compatibility
  final messages = [ChatMessage.user(request.prompt)];

  final requestBody = <String, dynamic>{
    'model': config.model,
    'messages': client.buildApiMessages(messages),
    'stream': false,
    if (request.maxTokens != null) 'max_tokens': request.maxTokens,
    if (request.temperature != null) 'temperature': request.temperature,
    if (request.topP != null) 'top_p': request.topP,
    if (request.stop != null) 'stop': request.stop,
  };

  final responseData = await client.postJson('chat/completions', requestBody);

  // Extract text from chat response format
  final choices = responseData['choices'] as List?;
  if (choices == null || choices.isEmpty) {
    return const CompletionResponse(text: '');
  }

  final message = choices.first['message'] as Map<String, dynamic>?;
  final text = message?['content'] as String? ?? '';

  // Extract usage information
  UsageInfo? usage;
  final usageData = responseData['usage'] as Map<String, dynamic>?;
  if (usageData != null) {
    usage = UsageInfo.fromJson(usageData);
  }

  return CompletionResponse(text: text, usage: usage);
}