Fine tune for rollback params.

This commit is contained in:
genxium 2023-01-24 10:08:34 +08:00
parent 0168e2182e
commit ef345e0e48
4 changed files with 9 additions and 3 deletions

View File

@ -808,7 +808,7 @@ func (pR *Room) OnDismissed() {
pR.BattleDurationFrames = int32(60 * serverFps)
pR.BattleDurationNanos = int64(pR.BattleDurationFrames) * (pR.RollbackEstimatedDtNanos + 1)
pR.InputFrameUpsyncDelayTolerance = battle.ConvertToNoDelayInputFrameId(pR.NstDelayFrames) - 1 // this value should be strictly smaller than (NstDelayFrames >> InputScaleFrames), otherwise "type#1 forceConfirmation" might become a lag avalanche
pR.MaxChasingRenderFramesPerUpdate = 4 // Don't set this value too high to avoid exhausting frontend CPU within a single frame
pR.MaxChasingRenderFramesPerUpdate = 9 // Don't set this value too high to avoid exhausting frontend CPU within a single frame, roughly as the "turn-around frames to recover" is empirically OK
pR.BackendDynamicsEnabled = true // [WARNING] When "false", recovery upon reconnection wouldn't work!
pR.ForceAllResyncOnAnyActiveSlowTicker = true // See tradeoff discussion in "downsyncToAllPlayers"

View File

@ -362,7 +362,7 @@
"array": [
0,
0,
216.67832656600567,
217.36746944238692,
0,
0,
0,

View File

@ -882,9 +882,14 @@ batchInputFrameIdRange=[${batch[0].inputFrameId}, ${batch[batch.length - 1].inpu
let effCnt = 0;
//console.log(`Received peer inputFrameUpsync batch w/ inputFrameId in [${batch[0].inputFrameId}, ${batch[batch.length - 1].inputFrameId}] for prediction assistance`);
const renderedInputFrameIdUpper = gopkgs.ConvertToDelayedInputFrameId(self.renderFrameId);
for (let k in batch) {
const inputFrameDownsync = batch[k];
const inputFrameDownsyncId = inputFrameDownsync.inputFrameId;
if (inputFrameDownsyncId < renderedInputFrameIdUpper) {
// Avoid obfuscating already rendered history
continue;
}
if (inputFrameDownsyncId <= self.lastAllConfirmedInputFrameId) {
continue;
}

View File

@ -80,12 +80,13 @@ NetworkDoctor.prototype.logSkippedRenderFrameCnt = function() {
}
NetworkDoctor.prototype.isTooFast = function() {
return false;
const [sendingFps, srvDownsyncFps, peerUpsyncFps, rollbackFrames, skippedRenderFrameCnt] = this.stats();
if (sendingFps >= this.inputRateThreshold + 2) {
// Don't send too fast
return true;
} else if (sendingFps >= this.inputRateThreshold && srvDownsyncFps >= this.inputRateThreshold) {
// At least my network is OK for both TX & RX directions -- PING value might help as a supplement information here to confirm that the "selfPlayer" is not lagged in RX which results in the "rollbackFrames", but not necessary -- a significant lag within the "inputFrameDownsyncQ" will reduce "srvDownsyncFps".
// An outstanding lag within the "inputFrameDownsyncQ" will reduce "srvDownsyncFps", HOWEVER, a constant lag wouldn't impact "srvDownsyncFps"! In native platforms we might use PING value might help as a supplement information to confirm that the "selfPlayer" is not lagged within the time accounted by "inputFrameDownsyncQ".
if (rollbackFrames >= this.rollbackFramesThreshold) {
// I got many frames rolled back while none of my peers effectively helped my preciction. Deliberately not using "peerUpsyncThreshold" here because when using UDP p2p upsync broadcasting, we expect to receive effective p2p upsyncs from every other player.
return true;