From 3e54670a1bae13afa4f7a97d01dd03d3e20764da Mon Sep 17 00:00:00 2001 From: genxium Date: Thu, 15 Dec 2022 14:44:50 +0800 Subject: [PATCH] Updated ConcerningEdgeCases.md --- ConcerningEdgeCases.md | 8 ++++++-- battle_srv/models/room.go | 16 ++++++++++++++-- frontend/assets/scenes/login.fire | 2 +- 3 files changed, 21 insertions(+), 5 deletions(-) diff --git a/ConcerningEdgeCases.md b/ConcerningEdgeCases.md index 9534f89..ac51ebd 100644 --- a/ConcerningEdgeCases.md +++ b/ConcerningEdgeCases.md @@ -1,3 +1,7 @@ +# What to be concerned for internet syncing +1. Server received too late (solution: force confirmation) +2. Client received too late (solution: prediction and frame chasing, big impact on user experience because the graphics will be inconsistent if mismatches occur too often) + # Potential avalanche from local lag Under the current "input delay" algorithm, the lag of a single player would cause all the other players to receive outdated commands, e.g. when at a certain moment - player#1: renderFrameId = 100, significantly lagged due to local CPU overheated @@ -7,9 +11,9 @@ Under the current "input delay" algorithm, the lag of a single player would caus players #2, #3 #4 would receive "outdated(in their subjective feelings) but all-confirmed commands" from then on, thus forced to rollback and chase many frames - the lag due to "large range of frame-chasing" would then further deteriorate the situation - like an avalanche. -In a "no-server & p2p" setup, I couldn't think of a proper way to cope with such edge case. Solely on the frontend we could only mitigate the impact to players #2, #3, #4, e.g. a potential lag due to "large range of frame-chasing" is proactively avoided in `/frontend/assets/scripts/Map.js, function update(dt)`. +In a "no-server & p2p" setup, I couldn't think of a proper way to cope with such edge case. Solely on the frontend we could only mitigate the impact to players #2, #3, #4, e.g. a potential lag due to "large range of frame-chasing" is proactively avoided in `/frontend/assets/scripts/Map.js, function update(dt)`. -However in a "server as authority" setup, the server could force confirming an inputFrame without player#1's upsync, and notify player#1 to apply a "roomDownsyncFrame" as well as drop all its outdated local inputFrames. +To be fair, **a "p2p" setup can reduce round-trip to single-trip**, but w/o a point of authority in such case player#1 needs a way to recognize the slowness (e.g. check the received peer inputs) and ticks faster for a while to catch up; in contrast in a "server as authority" setup, the server could force confirming an inputFrame without player#1's upsync, and notify player#1 to apply a "roomDownsyncFrame" as well as drop all its outdated local inputFrames. # Start up frames renderFrameId | generatedInputFrameId | toApplyInputFrameId diff --git a/battle_srv/models/room.go b/battle_srv/models/room.go index a3a44b9..5a56440 100644 --- a/battle_srv/models/room.go +++ b/battle_srv/models/room.go @@ -1732,8 +1732,20 @@ func (pR *Room) downsyncToAllPlayers(inputsBufferSnapshot *InputsBufferSnapshot) The use of "downsyncLoop of each player" also waives the need of guarding each "pR.PlayerDownsyncSessionDict[playerId]" from multithread-access (e.g. by a "pR.PlayerDownsyncSessionMutexDict[playerId]"), i.e. Gorilla v1.2.0 "conn.WriteMessage" isn't thread-safe https://github.com/gorilla/websocket/blob/v1.2.0/conn.go#L585. */ - playerDownsyncChan <- (*inputsBufferSnapshot) - Logger.Debug(fmt.Sprintf("Sent inputsBufferSnapshot(refRenderFrameId:%d, unconfirmedMask:%v) to for (roomId: %d, playerId:%d, playerDownsyncChan:%p)#1", inputsBufferSnapshot.RefRenderFrameId, inputsBufferSnapshot.UnconfirmedMask, pR.Id, playerId, playerDownsyncChan)) + if player, existent := pR.Players[playerId]; existent { + playerBattleState := atomic.LoadInt32(&(player.BattleState)) + switch playerBattleState { + case PlayerBattleStateIns.DISCONNECTED: + case PlayerBattleStateIns.LOST: + case PlayerBattleStateIns.EXPELLED_DURING_GAME: + case PlayerBattleStateIns.EXPELLED_IN_DISMISSAL: + case PlayerBattleStateIns.ADDED_PENDING_BATTLE_COLLIDER_ACK: + case PlayerBattleStateIns.READDED_PENDING_BATTLE_COLLIDER_ACK: + continue + } + playerDownsyncChan <- (*inputsBufferSnapshot) + // Logger.Info(fmt.Sprintf("Sent inputsBufferSnapshot(refRenderFrameId:%d, unconfirmedMask:%v) to for (roomId: %d, playerId:%d, playerDownsyncChan:%p)#1", inputsBufferSnapshot.RefRenderFrameId, inputsBufferSnapshot.UnconfirmedMask, pR.Id, playerId, playerDownsyncChan)) + } } } diff --git a/frontend/assets/scenes/login.fire b/frontend/assets/scenes/login.fire index f2acb21..e5bc0e5 100644 --- a/frontend/assets/scenes/login.fire +++ b/frontend/assets/scenes/login.fire @@ -440,7 +440,7 @@ "array": [ 0, 0, - 216.67520680312998, + 216.19964242526865, 0, 0, 0,