step method
Performs a single optimization step according to the AMSGrad update rule.
Implementation
@override
void step() {
_t++;
for (Tensor param in parameters) {
dynamic m = _m[param]!;
dynamic v = _v[param]!;
dynamic vHatBuffer = _vHat[param]!;
if (param.value is Vector) {
Vector valVec = param.value as Vector;
Vector gradVec = param.grad as Vector;
Vector mVec = m as Vector;
Vector vVec = v as Vector;
Vector vHatVec = vHatBuffer as Vector;
for (int i = 0; i < valVec.length; i++) {
mVec[i] = beta1 * mVec[i] + (1 - beta1) * gradVec[i];
vVec[i] = beta2 * vVec[i] + (1 - beta2) * pow(gradVec[i], 2);
vHatVec[i] = max(vHatVec[i], vVec[i]);
double mHat = mVec[i] / (1 - pow(beta1, _t));
valVec[i] -= learningRate * mHat / (sqrt(vHatVec[i]) + epsilon);
}
} else if (param.value is Matrix) {
Matrix valMat = param.value as Matrix;
Matrix gradMat = param.grad as Matrix;
Matrix mMat = m as Matrix;
Matrix vMat = v as Matrix;
Matrix vHatMat = vHatBuffer as Matrix;
for (int r = 0; r < valMat.length; r++) {
for (int c = 0; c < valMat[0].length; c++) {
mMat[r][c] = beta1 * mMat[r][c] + (1 - beta1) * gradMat[r][c];
vMat[r][c] = beta2 * vMat[r][c] + (1 - beta2) * pow(gradMat[r][c], 2);
vHatMat[r][c] = max(vHatMat[r][c], vMat[r][c]);
double mHat = mMat[r][c] / (1 - pow(beta1, _t));
valMat[r][c] -= learningRate * mHat / (sqrt(vHatMat[r][c]) + epsilon);
}
}
}
}
}