FineTuneRequest constructor
const
FineTuneRequest({
- @JsonKey(name: 'training_file') required String trainingFile,
- @JsonKey(name: 'validation_file') String? validationFile,
- String? model,
- @JsonKey(name: 'n_epochs') int? nEpochs,
- @JsonKey(name: 'batch_size') int? batchSize,
- @JsonKey(name: 'learning_rate_multiplier') double? learningRateMultiplier,
- @JsonKey(name: 'prompt_loss_weight') double? promptLossWeight,
- @JsonKey(name: 'compute_classification_metrics') bool? computeClassificationMetrics,
- @JsonKey(name: 'classification_n_classes') int? classificationNClasses,
- @JsonKey(name: 'classification_positive_class') String? classificationPositiveClass,
- @JsonKey(name: 'classification_betas') List<
double> ? classificationBetas, - String? suffix,
Implementation
const factory FineTuneRequest({
/// The ID of an uploaded file that contains training data.
/// See [upload file](https://beta.openai.com/docs/api-reference/files/upload)
/// for how to upload a file.
///
/// Your dataset must be formatted as a JSONL file, where each training example
/// is a JSON object with the keys "prompt" and "completion". Additionally,
/// you must upload your file with the purpose fine-tune.
@JsonKey(name: 'training_file') required final String trainingFile,
/// The ID of an uploaded file that contains validation data.
///
/// Your dataset must be formatted as a JSONL file, where each training example
/// is a JSON object with the keys "prompt" and "completion". Additionally,
/// you must upload your file with the purpose fine-tune.
@JsonKey(name: 'validation_file') final String? validationFile,
/// The name of the base model to fine-tune. You can select one of "ada",
/// "babbage", "curie", "davinci", or a fine-tuned model created after
/// 2022-04-21.
final String? model,
/// The number of epochs to train the model for. An epoch refers to one
/// full cycle through the training dataset.
@JsonKey(name: 'n_epochs') final int? nEpochs,
/// The batch size to use for training. The batch size is the number of
/// training examples used to train a single forward and backward pass.
///
/// By default, the batch size will be dynamically configured to be ~0.2%
/// of the number of examples in the training set, capped at 256 - in general,
/// we've found that larger batch sizes tend to work better for larger
/// datasets.
@JsonKey(name: 'batch_size') final int? batchSize,
/// The learning rate multiplier to use for training. The fine-tuning
/// learning rate is the original learning rate used for pretraining
/// multiplied by this value.
///
/// By default, the learning rate multiplier is the 0.05, 0.1, or 0.2
/// depending on final batch_size (larger learning rates tend to perform
/// better with larger batch sizes). We recommend experimenting with values
/// in the range 0.02 to 0.2 to see what produces the best results.
@JsonKey(name: 'learning_rate_multiplier')
final double? learningRateMultiplier,
/// The weight to use for loss on the prompt tokens. This controls how much
/// the model tries to learn to generate the prompt (as compared to the
/// completion which always has a weight of 1.0), and can add a stabilizing
/// effect to training when completions are short.
///
/// If prompts are extremely long (relative to completions), it may make
/// sense to reduce this weight so as to avoid over-prioritizing learning
/// the prompt.
@JsonKey(name: 'prompt_loss_weight') final double? promptLossWeight,
/// If set, we calculate classification-specific metrics such as accuracy
/// and F-1 score using the validation set at the end of every epoch.
///
/// In order to compute classification metrics, you must provide a
/// `validation_file`. Additionally, you must specify `classification_n_classes`
/// for multiclass classification or `classification_positive_class` for binary
/// classification.
@JsonKey(name: 'compute_classification_metrics')
final bool? computeClassificationMetrics,
/// The number of classes in a classification task.
@JsonKey(name: 'classification_n_classes')
final int? classificationNClasses,
/// The positive class in binary classification.
@JsonKey(name: 'classification_positive_class')
final String? classificationPositiveClass,
/// If this is provided, we calculate F-beta scores at the specified beta
/// values. The F-beta score is a generalization of F-1 score. This is
/// only used for binary classification.
@JsonKey(name: 'classification_betas')
final List<double>? classificationBetas,
/// A string of up to 40 characters that will be added to your fine-tuned
/// model name.
final String? suffix,
}) = _FineTuneRequest;