render method
void
render(
- WebGLRenderer renderer,
- WebGLRenderTarget writeBuffer,
- WebGLRenderTarget readBuffer, {
- num? deltaTime,
- bool? maskActive,
override
Implementation
@override
void render(WebGLRenderer renderer, WebGLRenderTarget writeBuffer, WebGLRenderTarget readBuffer, {num? deltaTime, bool? maskActive}) {
if (sampleRenderTarget == null) {
sampleRenderTarget = WebGLRenderTarget(
readBuffer.width,
readBuffer.height,
WebGLRenderTargetOptions({
"minFilter": LinearFilter,
"magFilter": LinearFilter,
"format": RGBAFormat
}));
sampleRenderTarget!.texture.name = 'SSAARenderPass.sample';
}
final jitterOffsets = _jitterVectors[math.max(0, math.min(sampleLevel, 5))];
final autoClear = renderer.autoClear;
renderer.autoClear = false;
renderer.getClearColor(_oldClearColor);
final oldClearAlpha = renderer.getClearAlpha();
final baseSampleWeight = 1.0 / jitterOffsets.length;
const roundingRange = 1 / 32;
copyUniforms['tDiffuse']["value"] = sampleRenderTarget!.texture;
final Map<String,dynamic> viewOffset = {
"fullWidth": readBuffer.width,
"fullHeight": readBuffer.height,
"offsetX": 0,
"offsetY": 0,
"width": readBuffer.width,
"height": readBuffer.height
};
Map<String, dynamic> originalViewOffset = camera.view?.toMap ?? {};//jsonDecode(jsonEncode(camera.view ?? {}));
if (originalViewOffset["enabled"] == true){
viewOffset.addAll(originalViewOffset);
}
// render the scene multiple times, each slightly jitter offset from the last and accumulate the results.
for (int i = 0; i < jitterOffsets.length; i++) {
final jitterOffset = jitterOffsets[i];
if (camera.type == "PerspectiveCamera") {
(camera as PerspectiveCamera).setViewOffset(
viewOffset["fullWidth"]*1.0,
viewOffset["fullHeight"]*1.0,
viewOffset["offsetX"] + jitterOffset[0] * 0.0625,
viewOffset["offsetY"] + jitterOffset[1] * 0.0625, // 0.0625 = 1 / 16
viewOffset["width"]*1.0,
viewOffset["height"]*1.0
);
} else if (camera.type == "OrthographicCamera") {
(camera as OrthographicCamera).setViewOffset(
viewOffset["fullWidth"]*1.0,
viewOffset["fullHeight"]*1.0,
viewOffset["offsetX"] + jitterOffset[0] * 0.0625,
viewOffset["offsetY"] + jitterOffset[1] * 0.0625, // 0.0625 = 1 / 16
viewOffset["width"]*1.0,
viewOffset["height"]*1.0
);
}
double sampleWeight = baseSampleWeight;
if (unbiased) {
// the theory is that equal weights for each sample lead to an accumulation of rounding errors.
// The following equation varies the sampleWeight per sample so that it is uniformly distributed
// across a range of values whose rounding errors cancel each other out.
final uniformCenteredDistribution = (-0.5 + (i + 0.5) / jitterOffsets.length);
sampleWeight += roundingRange * uniformCenteredDistribution;
}
copyUniforms['opacity']["value"] = sampleWeight;
renderer.setClearColor(clearColor, clearAlpha);
renderer.setRenderTarget(sampleRenderTarget);
renderer.clear(true, true, true);
renderer.render(scene, camera);
renderer.setRenderTarget(renderToScreen ? null : writeBuffer);
if (i == 0) {
renderer.setClearColor(Color.fromHex32(0x000000), 0.0);
renderer.clear(true, true, true);
}
fsQuad.render(renderer);
}
if (camera.type == "OrthographicCamera" &&
originalViewOffset["enabled"] == true) {
(camera as OrthographicCamera).setViewOffset(
originalViewOffset["fullWidth"],
originalViewOffset["fullHeight"],
originalViewOffset["offsetX"],
originalViewOffset["offsetY"],
originalViewOffset["width"],
originalViewOffset["height"]);
} else if (camera.type == "PerspectiveCamera" &&
originalViewOffset["enabled"] == true) {
(camera as PerspectiveCamera).setViewOffset(
originalViewOffset["fullWidth"],
originalViewOffset["fullHeight"],
originalViewOffset["offsetX"],
originalViewOffset["offsetY"],
originalViewOffset["width"],
originalViewOffset["height"]);
} else if (camera.type == "PerspectiveCamera") {
(camera as PerspectiveCamera).clearViewOffset();
} else if (camera.type == "OrthographicCamera") {
(camera as OrthographicCamera).clearViewOffset();
}
renderer.autoClear = autoClear;
renderer.setClearColor(_oldClearColor, oldClearAlpha);
}