From ef345e0e482a4c71974b0a0d0b385433cac0c863 Mon Sep 17 00:00:00 2001 From: genxium Date: Tue, 24 Jan 2023 10:08:34 +0800 Subject: [PATCH] Fine tune for rollback params. --- battle_srv/models/room.go | 2 +- frontend/assets/scenes/login.fire | 2 +- frontend/assets/scripts/Map.js | 5 +++++ frontend/assets/scripts/NetworkDoctor.js | 3 ++- 4 files changed, 9 insertions(+), 3 deletions(-) diff --git a/battle_srv/models/room.go b/battle_srv/models/room.go index 041456a..bedde4f 100644 --- a/battle_srv/models/room.go +++ b/battle_srv/models/room.go @@ -808,7 +808,7 @@ func (pR *Room) OnDismissed() { pR.BattleDurationFrames = int32(60 * serverFps) pR.BattleDurationNanos = int64(pR.BattleDurationFrames) * (pR.RollbackEstimatedDtNanos + 1) pR.InputFrameUpsyncDelayTolerance = battle.ConvertToNoDelayInputFrameId(pR.NstDelayFrames) - 1 // this value should be strictly smaller than (NstDelayFrames >> InputScaleFrames), otherwise "type#1 forceConfirmation" might become a lag avalanche - pR.MaxChasingRenderFramesPerUpdate = 4 // Don't set this value too high to avoid exhausting frontend CPU within a single frame + pR.MaxChasingRenderFramesPerUpdate = 9 // Don't set this value too high to avoid exhausting frontend CPU within a single frame, roughly as the "turn-around frames to recover" is empirically OK pR.BackendDynamicsEnabled = true // [WARNING] When "false", recovery upon reconnection wouldn't work! pR.ForceAllResyncOnAnyActiveSlowTicker = true // See tradeoff discussion in "downsyncToAllPlayers" diff --git a/frontend/assets/scenes/login.fire b/frontend/assets/scenes/login.fire index 24899cb..40ebf00 100644 --- a/frontend/assets/scenes/login.fire +++ b/frontend/assets/scenes/login.fire @@ -362,7 +362,7 @@ "array": [ 0, 0, - 216.67832656600567, + 217.36746944238692, 0, 0, 0, diff --git a/frontend/assets/scripts/Map.js b/frontend/assets/scripts/Map.js index e4e5cb4..f2504cb 100644 --- a/frontend/assets/scripts/Map.js +++ b/frontend/assets/scripts/Map.js @@ -882,9 +882,14 @@ batchInputFrameIdRange=[${batch[0].inputFrameId}, ${batch[batch.length - 1].inpu let effCnt = 0; //console.log(`Received peer inputFrameUpsync batch w/ inputFrameId in [${batch[0].inputFrameId}, ${batch[batch.length - 1].inputFrameId}] for prediction assistance`); + const renderedInputFrameIdUpper = gopkgs.ConvertToDelayedInputFrameId(self.renderFrameId); for (let k in batch) { const inputFrameDownsync = batch[k]; const inputFrameDownsyncId = inputFrameDownsync.inputFrameId; + if (inputFrameDownsyncId < renderedInputFrameIdUpper) { + // Avoid obfuscating already rendered history + continue; + } if (inputFrameDownsyncId <= self.lastAllConfirmedInputFrameId) { continue; } diff --git a/frontend/assets/scripts/NetworkDoctor.js b/frontend/assets/scripts/NetworkDoctor.js index bc8326d..5b1f4e8 100644 --- a/frontend/assets/scripts/NetworkDoctor.js +++ b/frontend/assets/scripts/NetworkDoctor.js @@ -80,12 +80,13 @@ NetworkDoctor.prototype.logSkippedRenderFrameCnt = function() { } NetworkDoctor.prototype.isTooFast = function() { + return false; const [sendingFps, srvDownsyncFps, peerUpsyncFps, rollbackFrames, skippedRenderFrameCnt] = this.stats(); if (sendingFps >= this.inputRateThreshold + 2) { // Don't send too fast return true; } else if (sendingFps >= this.inputRateThreshold && srvDownsyncFps >= this.inputRateThreshold) { - // At least my network is OK for both TX & RX directions -- PING value might help as a supplement information here to confirm that the "selfPlayer" is not lagged in RX which results in the "rollbackFrames", but not necessary -- a significant lag within the "inputFrameDownsyncQ" will reduce "srvDownsyncFps". + // An outstanding lag within the "inputFrameDownsyncQ" will reduce "srvDownsyncFps", HOWEVER, a constant lag wouldn't impact "srvDownsyncFps"! In native platforms we might use PING value might help as a supplement information to confirm that the "selfPlayer" is not lagged within the time accounted by "inputFrameDownsyncQ". if (rollbackFrames >= this.rollbackFramesThreshold) { // I got many frames rolled back while none of my peers effectively helped my preciction. Deliberately not using "peerUpsyncThreshold" here because when using UDP p2p upsync broadcasting, we expect to receive effective p2p upsyncs from every other player. return true;