From 847607f3e6d5295856d0ff0d7b5dcec436dea096 Mon Sep 17 00:00:00 2001 From: genxium Date: Mon, 19 Dec 2022 19:51:55 +0800 Subject: [PATCH] Further enhanced comments. --- battle_srv/models/room.go | 18 +++++++++--------- frontend/assets/scripts/Map.js | 5 +++++ frontend/assets/scripts/WsSessionMgr.js | 2 +- 3 files changed, 15 insertions(+), 10 deletions(-) diff --git a/battle_srv/models/room.go b/battle_srv/models/room.go index 76942f2..9c1666f 100644 --- a/battle_srv/models/room.go +++ b/battle_srv/models/room.go @@ -1175,11 +1175,13 @@ func (pR *Room) markConfirmationIfApplicable(inputFrameUpsyncBatch []*InputFrame if 0 < newAllConfirmedCount { /* - [WARNING] + [WARNING] - If "pR.InputsBufferLock" was previously held by "doBattleMainLoopPerTickBackendDynamicsWithProperLocking", then "snapshotStFrameId" would be just (pR.LastAllConfirmedInputFrameId - newAllConfirmedCount). + If "pR.InputsBufferLock" was previously held by "doBattleMainLoopPerTickBackendDynamicsWithProperLocking", then "snapshotStFrameId" would be just (LastAllConfirmedInputFrameId - newAllConfirmedCount). - However if "pR.InputsBufferLock" was previously held by another "OnBattleCmdReceived", "snapshotStFrameId" might be smaller than (pR.LastAllConfirmedInputFrameId - newAllConfirmedCount)! + However if "pR.InputsBufferLock" was previously held by another "OnBattleCmdReceived", the proper value for "snapshotStFrameId" might be smaller than (pR.LastAllConfirmedInputFrameId - newAllConfirmedCount) -- but why? Especially when we've already wrapped this whole function in "InputsBufferLock", the order of "markConfirmationIfApplicable" generated snapshots is preserved for sending, isn't (LastAllConfirmedInputFrameId - newAllConfirmedCount) good enough here? + + Unfortunately no, for a reconnected player to get recovered asap (of course with BackendDynamicsEnabled), we put a check of READDED_BATTLE_COLLIDER_ACKED in "downsyncToSinglePlayer" -- which could be called right after "markConfirmationIfApplicable" yet without going through "forceConfirmationIfApplicable" -- and if a READDED_BATTLE_COLLIDER_ACKED player is found there we need a proper "(refRenderFrameId, snapshotStFrameId)" pair for that player! */ snapshotStFrameId := (pR.LastAllConfirmedInputFrameId - newAllConfirmedCount) refRenderFrameIdIfNeeded := pR.CurDynamicsRenderFrameId - 1 @@ -1767,14 +1769,12 @@ func (pR *Room) downsyncToAllPlayers(inputsBufferSnapshot *InputsBufferSnapshot) break } /* - [WARNING] + [WARNING] There's a tradeoff here, if the `ACTIVE SLOW TICKER` doesn't resume for a long period of time, the current approach is to kick it out by "connWatchdog" instead of forcing resync of all players in the same battle all the way along. - There's a tradeoff here, if the `ACTIVE SLOW TICKER` doesn't resume for a long period of time, the current approach is to kick it out by "connWatchdog" instead of forcing resync of all players in the same battle all the way along. + [FIXME] + In practice, I tested in internet environment by toggling player#1 "CPU throttling: 1x -> 4x -> 1x -> 6x -> 1x" and checked the logs of all players which showed that "all received inputFrameIds are consecutive for all players", yet not forcing resync of all players here still result in occasional inconsistent graphics for the `ACTIVE NORMAL TICKER`s. - [FIXME] - In practice, I tested in internet environment by toggling player#1 "CPU throttling: 1x -> 4x -> 1x -> 6x -> 1x" and checked the logs of all players which showed that "all received inputFrameIds are consecutive for all players", yet not forcing resync of all players here still result in occasional inconsistent graphics for the `ACTIVE NORMAL TICKER`s. - - More investigation into this issue is needed, it's possible that the inconsistent graphics is just a result of difference of backend/frontend collision calculations, yet before it's totally resolved we'd keep forcing resync here. + More investigation into this issue is needed, it's possible that the inconsistent graphics is just a result of difference of backend/frontend collision calculations, yet before it's totally resolved we'd keep forcing resync here. */ thatPlayerJoinMask := uint64(1 << uint32(player.JoinIndex-1)) isActiveSlowTicker := (0 < (thatPlayerJoinMask & inputsBufferSnapshot.UnconfirmedMask)) && (PlayerBattleStateIns.ACTIVE == playerBattleState) diff --git a/frontend/assets/scripts/Map.js b/frontend/assets/scripts/Map.js index d94717f..9d6b0b0 100644 --- a/frontend/assets/scripts/Map.js +++ b/frontend/assets/scripts/Map.js @@ -588,6 +588,11 @@ cc.Class({ const shouldForceDumping1 = (window.MAGIC_ROOM_DOWNSYNC_FRAME_ID.BATTLE_START == rdf.id); const shouldForceDumping2 = (rdf.id > self.renderFrameId + self.renderFrameIdLagTolerance); const shouldForceResync = rdf.shouldForceResync; + /* + TODO + + If "BackendUnconfirmedMask" is non-all-1 and contains the current player, show a label/button to hint manual reconnection. Note that the continuity of "recentInputCache" is not a good indicator, because due to network delay upon a [type#1 forceConfirmation] a player might just lag in upsync networking and have all consecutive inputFrameIds locally. + */ const [dumpRenderCacheRet, oldStRenderFrameId, oldEdRenderFrameId] = (shouldForceDumping1 || shouldForceDumping2 || shouldForceResync) ? self.recentRenderCache.setByFrameId(rdf, rdf.id) : [window.RING_BUFF_CONSECUTIVE_SET, null, null]; if (window.RING_BUFF_FAILED_TO_SET == dumpRenderCacheRet) { diff --git a/frontend/assets/scripts/WsSessionMgr.js b/frontend/assets/scripts/WsSessionMgr.js index 6d35914..e797ae9 100644 --- a/frontend/assets/scripts/WsSessionMgr.js +++ b/frontend/assets/scripts/WsSessionMgr.js @@ -144,7 +144,7 @@ window.initPersistentSessionClient = function(onopenCb, expectedRoomId) { if (null == evt || null == evt.data) { return; } - // FIXME: In practice, it seems like the thread invoking "onmessage" could be different from "Map.update(dt)", which makes it necessary to guard "recentRenderCache & recentInputCache" for "_generateInputFrameUpsync & rollbackAndChase & onRoomDownsyncFrame & onInputFrameDownsyncBatch" to avoid mysterious RAM contamination, but there's no explicit mutex in JavaScript for browsers. + // FIXME: In practice, it seems like the thread invoking "onmessage" could be different from "Map.update(dt)", which makes it necessary to guard "recentRenderCache & recentInputCache" for "_generateInputFrameUpsync & rollbackAndChase & onRoomDownsyncFrame & onInputFrameDownsyncBatch" to avoid mysterious RAM contamination, but there's no explicit mutex in JavaScript for browsers -- this issue is found in Firefox (108.0.1, 64-bit, Windows 11), but not in Chrome (108.0.5359.125, Official Build, 64-bit, Windows 11) -- just breakpoint in "Map.rollbackAndChase" then see whether the logs of "onmessage" can still be printed and whether the values of "recentRenderCache & recentInputCache" change in console). try { const resp = window.pb.protos.WsResp.decode(new Uint8Array(evt.data)); //console.log(`Got non-empty onmessage decoded: resp.act=${resp.act}`);