main function
void
main()
Implementation
void main() {
const lr = 0.01;
final model = MultiLayerPerceptron(lr); // 4 inputs → 2 outputs
print("Image bytes legnt: ${imgBytes.length}"); // 784 bytes
final inputs = [
ValueVector.fromDoubleList(imgBytes
.map((e) => e / 255.0)
.toList()), // Correct casting might be needed
ValueVector.fromDoubleList(imgBytes.map((e) => e / 255.0).toList()),
ValueVector.fromDoubleList(imgBytes.map((e) => e / 255.0).toList())
];
final targets = [
ValueVector([
Value(0.0),
Value(0.0),
Value(0.0),
Value(0.0),
Value(0.0),
Value(1.0),
Value(0.0),
Value(0.0),
Value(0.0),
Value(0.0)
]),
ValueVector([
Value(0.0),
Value(0.0),
Value(0.0),
Value(0.0),
Value(0.0),
Value(1.0),
Value(0.0),
Value(0.0),
Value(0.0),
Value(0.0)
]),
ValueVector([
Value(0.0),
Value(0.0),
Value(0.0),
Value(0.0),
Value(0.0),
Value(1.0),
Value(0.0),
Value(0.0),
Value(0.0),
Value(0.0)
])
];
const epochs = 400;
for (int epoch = 0; epoch < epochs; epoch++) {
final losses = <Value>[];
// Reset gradients
// Compute loss for all samples
for (int i = 0; i < inputs.length; i++) {
final yPred = model.forward(inputs[i]);
final yTrue = targets[i];
final diff = yPred - yTrue;
final squared = diff.squared();
final sampleLoss = squared.mean();
losses.add(sampleLoss);
}
final totalLoss = losses.reduce((a, b) => a + b);
// final avgLoss = totalLoss * (1.0 / inputs.length);
// avgLoss.backward();
totalLoss.backward();
// Gradient descent
model.updateWeights();
// if (epoch % 4 == 0) {
// print("Epoch $epoch | Loss = ${totalLoss.data.toStringAsFixed(4)}");
// }
if (epoch % 4 == 0) {
print("Epoch $epoch | Loss = ${totalLoss.data.toStringAsFixed(10)}");
}
model.zeroGrad();
}
for (var input in inputs) {
// Reset gradients
for (var p in model.parameters()) {
p.grad = 0;
}
// print("Input: ${input}");
print("Output: ${model.forward(input)}");
print("");
}
}