CompletionRequest constructor

const CompletionRequest({
  1. required String prompt,
  2. required String model,
  3. @JsonKey(name: 'max_tokens') int? maxTokens,
  4. @Default(0) int temperature,
})

Implementation

const factory CompletionRequest({
  /// The prompt(s) to generate completions for, encoded as a string,
  /// array of strings, array of tokens, or array of
  /// token arrays.
  required final String prompt,

  /// ID of the model to use. You can use the
  /// [List models API](https://beta.openai.com/docs/api-reference/models/list)
  /// to see all of your available models, or see our
  /// [Model overview](https://beta.openai.com/docs/models/overview)
  /// for descriptions of them.
  required final String model,

  /// The maximum number of [tokens](https://beta.openai.com/tokenizer)
  /// to generate in the completion.
  @JsonKey(name: 'max_tokens') final int? maxTokens,

  /// What [sampling temperature](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277?gi=3d1b289238b6)
  /// to use. Higher values means the model will take more risks.
  @Default(0) final int temperature,
}) = _CompletionRequest;