mirror of
				https://github.com/genxium/DelayNoMore
				synced 2025-10-31 03:17:20 +00:00 
			
		
		
		
	Updated ConcerningEdgeCases.md
This commit is contained in:
		| @@ -1,3 +1,7 @@ | ||||
| # What to be concerned for internet syncing | ||||
| 1. Server received too late (solution: force confirmation) | ||||
| 2. Client received too late (solution: prediction and frame chasing, big impact on user experience because the graphics will be inconsistent if mismatches occur too often) | ||||
|  | ||||
| # Potential avalanche from local lag | ||||
| Under the current "input delay" algorithm, the lag of a single player would cause all the other players to receive outdated commands, e.g. when at a certain moment    | ||||
| - player#1: renderFrameId = 100, significantly lagged due to local CPU overheated | ||||
| @@ -9,7 +13,7 @@ players #2, #3 #4 would receive "outdated(in their subjective feelings) but all- | ||||
|  | ||||
| In a "no-server & p2p" setup, I couldn't think of a proper way to cope with such edge case. Solely on the frontend we could only mitigate the impact to players #2, #3, #4, e.g. a potential lag due to "large range of frame-chasing" is proactively avoided in `<proj-root>/frontend/assets/scripts/Map.js, function update(dt)`.  | ||||
|  | ||||
| However in a "server as authority" setup, the server could force confirming an inputFrame without player#1's upsync, and notify player#1 to apply a "roomDownsyncFrame" as well as drop all its outdated local inputFrames.  | ||||
| To be fair, **a "p2p" setup can reduce round-trip to single-trip**, but w/o a point of authority in such case player#1 needs a way to recognize the slowness (e.g. check the received peer inputs) and ticks faster for a while to catch up; in contrast in a "server as authority" setup, the server could force confirming an inputFrame without player#1's upsync, and notify player#1 to apply a "roomDownsyncFrame" as well as drop all its outdated local inputFrames.  | ||||
|  | ||||
| # Start up frames | ||||
| renderFrameId      |   generatedInputFrameId    |  toApplyInputFrameId             | ||||
|   | ||||
| @@ -1732,8 +1732,20 @@ func (pR *Room) downsyncToAllPlayers(inputsBufferSnapshot *InputsBufferSnapshot) | ||||
|  | ||||
| 		   The use of "downsyncLoop of each player" also waives the need of guarding each "pR.PlayerDownsyncSessionDict[playerId]" from multithread-access (e.g. by a "pR.PlayerDownsyncSessionMutexDict[playerId]"), i.e. Gorilla v1.2.0 "conn.WriteMessage" isn't thread-safe https://github.com/gorilla/websocket/blob/v1.2.0/conn.go#L585. | ||||
| 		*/ | ||||
| 		if player, existent := pR.Players[playerId]; existent { | ||||
| 			playerBattleState := atomic.LoadInt32(&(player.BattleState)) | ||||
| 			switch playerBattleState { | ||||
| 			case PlayerBattleStateIns.DISCONNECTED: | ||||
| 			case PlayerBattleStateIns.LOST: | ||||
| 			case PlayerBattleStateIns.EXPELLED_DURING_GAME: | ||||
| 			case PlayerBattleStateIns.EXPELLED_IN_DISMISSAL: | ||||
| 			case PlayerBattleStateIns.ADDED_PENDING_BATTLE_COLLIDER_ACK: | ||||
| 			case PlayerBattleStateIns.READDED_PENDING_BATTLE_COLLIDER_ACK: | ||||
| 				continue | ||||
| 			} | ||||
| 			playerDownsyncChan <- (*inputsBufferSnapshot) | ||||
| 		Logger.Debug(fmt.Sprintf("Sent inputsBufferSnapshot(refRenderFrameId:%d, unconfirmedMask:%v) to for (roomId: %d, playerId:%d, playerDownsyncChan:%p)#1", inputsBufferSnapshot.RefRenderFrameId, inputsBufferSnapshot.UnconfirmedMask, pR.Id, playerId, playerDownsyncChan)) | ||||
| 			// Logger.Info(fmt.Sprintf("Sent inputsBufferSnapshot(refRenderFrameId:%d, unconfirmedMask:%v) to for (roomId: %d, playerId:%d, playerDownsyncChan:%p)#1", inputsBufferSnapshot.RefRenderFrameId, inputsBufferSnapshot.UnconfirmedMask, pR.Id, playerId, playerDownsyncChan)) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
|   | ||||
| @@ -440,7 +440,7 @@ | ||||
|       "array": [ | ||||
|         0, | ||||
|         0, | ||||
|         216.67520680312998, | ||||
|         216.19964242526865, | ||||
|         0, | ||||
|         0, | ||||
|         0, | ||||
|   | ||||
		Reference in New Issue
	
	Block a user