mirror of
https://github.com/genxium/DelayNoMore
synced 2024-12-26 11:48:56 +00:00
Improved backend implementation.
This commit is contained in:
parent
e3d844abad
commit
6bf70463fa
@ -13,6 +13,7 @@ import (
|
|||||||
type PlayerBattleState struct {
|
type PlayerBattleState struct {
|
||||||
ADDED_PENDING_BATTLE_COLLIDER_ACK int32
|
ADDED_PENDING_BATTLE_COLLIDER_ACK int32
|
||||||
READDED_PENDING_BATTLE_COLLIDER_ACK int32
|
READDED_PENDING_BATTLE_COLLIDER_ACK int32
|
||||||
|
READDED_BATTLE_COLLIDER_ACKED int32
|
||||||
ACTIVE int32
|
ACTIVE int32
|
||||||
DISCONNECTED int32
|
DISCONNECTED int32
|
||||||
LOST int32
|
LOST int32
|
||||||
@ -26,11 +27,12 @@ func InitPlayerBattleStateIns() {
|
|||||||
PlayerBattleStateIns = PlayerBattleState{
|
PlayerBattleStateIns = PlayerBattleState{
|
||||||
ADDED_PENDING_BATTLE_COLLIDER_ACK: 0,
|
ADDED_PENDING_BATTLE_COLLIDER_ACK: 0,
|
||||||
READDED_PENDING_BATTLE_COLLIDER_ACK: 1,
|
READDED_PENDING_BATTLE_COLLIDER_ACK: 1,
|
||||||
ACTIVE: 2,
|
READDED_BATTLE_COLLIDER_ACKED: 2,
|
||||||
DISCONNECTED: 3,
|
ACTIVE: 3,
|
||||||
LOST: 4,
|
DISCONNECTED: 4,
|
||||||
EXPELLED_DURING_GAME: 5,
|
LOST: 5,
|
||||||
EXPELLED_IN_DISMISSAL: 6,
|
EXPELLED_DURING_GAME: 6,
|
||||||
|
EXPELLED_IN_DISMISSAL: 7,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -147,6 +147,7 @@ type Room struct {
|
|||||||
* Moreover, during the invocation of `PlayerSignalToCloseDict`, the `Player` instance is supposed to be deallocated (though not synchronously).
|
* Moreover, during the invocation of `PlayerSignalToCloseDict`, the `Player` instance is supposed to be deallocated (though not synchronously).
|
||||||
*/
|
*/
|
||||||
PlayerDownsyncSessionDict map[int32]*websocket.Conn
|
PlayerDownsyncSessionDict map[int32]*websocket.Conn
|
||||||
|
PlayerDownsyncSessionLock map[int32]*sync.Mutex // Guards [PlayerDownsyncSessionDict, each player.LastSentInputFrameId]
|
||||||
PlayerSignalToCloseDict map[int32]SignalToCloseConnCbType
|
PlayerSignalToCloseDict map[int32]SignalToCloseConnCbType
|
||||||
Score float32
|
Score float32
|
||||||
State int32
|
State int32
|
||||||
@ -157,7 +158,7 @@ type Room struct {
|
|||||||
DismissalWaitGroup sync.WaitGroup
|
DismissalWaitGroup sync.WaitGroup
|
||||||
Barriers map[int32]*Barrier
|
Barriers map[int32]*Barrier
|
||||||
InputsBuffer *RingBuffer // Indices are STRICTLY consecutive
|
InputsBuffer *RingBuffer // Indices are STRICTLY consecutive
|
||||||
InputsBufferLock sync.Mutex
|
InputsBufferLock sync.Mutex // Guards [InputsBuffer, LastAllConfirmedInputFrameId, LastAllConfirmedInputList, LastAllConfirmedInputFrameIdWithChange]
|
||||||
RenderFrameBuffer *RingBuffer // Indices are STRICTLY consecutive
|
RenderFrameBuffer *RingBuffer // Indices are STRICTLY consecutive
|
||||||
LastAllConfirmedInputFrameId int32
|
LastAllConfirmedInputFrameId int32
|
||||||
LastAllConfirmedInputFrameIdWithChange int32
|
LastAllConfirmedInputFrameIdWithChange int32
|
||||||
@ -200,6 +201,7 @@ func (pR *Room) AddPlayerIfPossible(pPlayerFromDbInit *Player, session *websocke
|
|||||||
|
|
||||||
pR.Players[playerId] = pPlayerFromDbInit
|
pR.Players[playerId] = pPlayerFromDbInit
|
||||||
pR.PlayerDownsyncSessionDict[playerId] = session
|
pR.PlayerDownsyncSessionDict[playerId] = session
|
||||||
|
pR.PlayerDownsyncSessionLock[playerId] = &sync.Mutex{}
|
||||||
pR.PlayerSignalToCloseDict[playerId] = signalToCloseConnOfThisPlayer
|
pR.PlayerSignalToCloseDict[playerId] = signalToCloseConnOfThisPlayer
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
@ -230,6 +232,7 @@ func (pR *Room) ReAddPlayerIfPossible(pTmpPlayerInstance *Player, session *webso
|
|||||||
pEffectiveInRoomPlayerInstance.ColliderRadius = DEFAULT_PLAYER_RADIUS // Hardcoded
|
pEffectiveInRoomPlayerInstance.ColliderRadius = DEFAULT_PLAYER_RADIUS // Hardcoded
|
||||||
|
|
||||||
pR.PlayerDownsyncSessionDict[playerId] = session
|
pR.PlayerDownsyncSessionDict[playerId] = session
|
||||||
|
pR.PlayerDownsyncSessionLock[playerId] = &sync.Mutex{}
|
||||||
pR.PlayerSignalToCloseDict[playerId] = signalToCloseConnOfThisPlayer
|
pR.PlayerSignalToCloseDict[playerId] = signalToCloseConnOfThisPlayer
|
||||||
|
|
||||||
Logger.Warn("ReAddPlayerIfPossible finished.", zap.Any("roomId", pR.Id), zap.Any("playerId", playerId), zap.Any("joinIndex", pEffectiveInRoomPlayerInstance.JoinIndex), zap.Any("playerBattleState", pEffectiveInRoomPlayerInstance.BattleState), zap.Any("roomState", pR.State), zap.Any("roomEffectivePlayerCount", pR.EffectivePlayerCount), zap.Any("AckingFrameId", pEffectiveInRoomPlayerInstance.AckingFrameId), zap.Any("AckingInputFrameId", pEffectiveInRoomPlayerInstance.AckingInputFrameId), zap.Any("LastSentInputFrameId", pEffectiveInRoomPlayerInstance.LastSentInputFrameId))
|
Logger.Warn("ReAddPlayerIfPossible finished.", zap.Any("roomId", pR.Id), zap.Any("playerId", playerId), zap.Any("joinIndex", pEffectiveInRoomPlayerInstance.JoinIndex), zap.Any("playerBattleState", pEffectiveInRoomPlayerInstance.BattleState), zap.Any("roomState", pR.State), zap.Any("roomEffectivePlayerCount", pR.EffectivePlayerCount), zap.Any("AckingFrameId", pEffectiveInRoomPlayerInstance.AckingFrameId), zap.Any("AckingInputFrameId", pEffectiveInRoomPlayerInstance.AckingInputFrameId), zap.Any("LastSentInputFrameId", pEffectiveInRoomPlayerInstance.LastSentInputFrameId))
|
||||||
@ -325,7 +328,9 @@ func (pR *Room) ChooseStage() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (pR *Room) ConvertToInputFrameId(renderFrameId int32, inputDelayFrames int32) int32 {
|
func (pR *Room) ConvertToInputFrameId(renderFrameId int32, inputDelayFrames int32) int32 {
|
||||||
// Specifically when "renderFrameId < inputDelayFrames", the result is 0.
|
if renderFrameId < inputDelayFrames {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
return ((renderFrameId - inputDelayFrames) >> pR.InputScaleFrames)
|
return ((renderFrameId - inputDelayFrames) >> pR.InputScaleFrames)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -410,13 +415,13 @@ func (pR *Room) StartBattle() {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
pR.LastRenderFrameIdTriggeredAt = utils.UnixtimeNano()
|
pR.LastRenderFrameIdTriggeredAt = utils.UnixtimeNano()
|
||||||
|
battleStartedAt := utils.UnixtimeNano()
|
||||||
Logger.Info("The `battleMainLoop` is started for:", zap.Any("roomId", pR.Id))
|
Logger.Info("The `battleMainLoop` is started for:", zap.Any("roomId", pR.Id))
|
||||||
for {
|
for {
|
||||||
stCalculation := utils.UnixtimeNano()
|
stCalculation := utils.UnixtimeNano()
|
||||||
elapsedNanosSinceLastFrameIdTriggered := stCalculation - pR.LastRenderFrameIdTriggeredAt
|
elapsedNanosSinceLastFrameIdTriggered := stCalculation - pR.LastRenderFrameIdTriggeredAt
|
||||||
if elapsedNanosSinceLastFrameIdTriggered < pR.RollbackEstimatedDtNanos {
|
if elapsedNanosSinceLastFrameIdTriggered < pR.RollbackEstimatedDtNanos {
|
||||||
Logger.Debug(fmt.Sprintf("renderFrameId=%v@roomId=%v: Is backend running too fast? elapsedNanosSinceLastFrameIdTriggered=%v", pR.RenderFrameId, pR.Id, elapsedNanosSinceLastFrameIdTriggered))
|
Logger.Warn(fmt.Sprintf("renderFrameId=%v@roomId=%v: Is backend running too fast? elapsedNanosSinceLastFrameIdTriggered=%v", pR.RenderFrameId, pR.Id, elapsedNanosSinceLastFrameIdTriggered))
|
||||||
}
|
}
|
||||||
|
|
||||||
if pR.RenderFrameId > pR.BattleDurationFrames {
|
if pR.RenderFrameId > pR.BattleDurationFrames {
|
||||||
@ -440,30 +445,44 @@ func (pR *Room) StartBattle() {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
kickoffFrame := pR.RenderFrameBuffer.GetByFrameId(0).(*RoomDownsyncFrame)
|
kickoffFrame := pR.RenderFrameBuffer.GetByFrameId(0).(*RoomDownsyncFrame)
|
||||||
pR.sendSafely(kickoffFrame, nil, DOWNSYNC_MSG_ACT_BATTLE_START, playerId)
|
pR.sendSafely(kickoffFrame, nil, DOWNSYNC_MSG_ACT_BATTLE_START, playerId, true)
|
||||||
}
|
}
|
||||||
Logger.Info(fmt.Sprintf("In `battleMainLoop` for roomId=%v sent out kickoffFrame", pR.Id))
|
Logger.Info(fmt.Sprintf("In `battleMainLoop` for roomId=%v sent out kickoffFrame", pR.Id))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
[WARNING]
|
||||||
|
Golang "time.Sleep" is known to be taking longer than specified time to wake up at millisecond granularity, as discussed in https://github.com/golang/go/issues/44343
|
||||||
|
However, we assume that while "time.Sleep(16.67 ms)" might wake up after ~30ms, it still only covers at most 1 inputFrame generation.
|
||||||
|
*/
|
||||||
|
totalElapsedNanos := utils.UnixtimeNano() - battleStartedAt
|
||||||
|
nextRenderFrameId := int32((totalElapsedNanos + pR.dilutedRollbackEstimatedDtNanos - 1) / pR.dilutedRollbackEstimatedDtNanos) // fast ceiling
|
||||||
|
toSleepNanos := int64(0)
|
||||||
|
if nextRenderFrameId > pR.RenderFrameId {
|
||||||
|
prevRenderFrameId := pR.RenderFrameId
|
||||||
|
pR.RenderFrameId = nextRenderFrameId
|
||||||
|
|
||||||
dynamicsDuration := int64(0)
|
dynamicsDuration := int64(0)
|
||||||
|
|
||||||
// Prefab and buffer backend inputFrameDownsync
|
// Prefab and buffer backend inputFrameDownsync
|
||||||
if pR.BackendDynamicsEnabled {
|
if pR.BackendDynamicsEnabled {
|
||||||
unconfirmedMask, inputsBufferSnapshot := pR.doBattleMainLoopPerTickBackendDynamicsWithProperLocking(&dynamicsDuration)
|
unconfirmedMask, refRenderFrameId, inputsBufferSnapshot := pR.doBattleMainLoopPerTickBackendDynamicsWithProperLocking(prevRenderFrameId, &dynamicsDuration)
|
||||||
if 0 < unconfirmedMask {
|
if 0 < unconfirmedMask {
|
||||||
// Otherwise no need to downsync immediately
|
Logger.Warn(fmt.Sprintf("roomId=%v, room.RenderFrameId=%v, room.CurDynamicsRenderFrameId=%v, room.LastAllConfirmedInputFrameId=%v, unconfirmedMask=%v", pR.Id, pR.RenderFrameId, pR.CurDynamicsRenderFrameId, pR.LastAllConfirmedInputFrameId, unconfirmedMask))
|
||||||
Logger.Debug(fmt.Sprintf("roomId=%v, room.RenderFrameId=%v, room.CurDynamicsRenderFrameId=%v, room.LastAllConfirmedInputFrameId=%v, unconfirmedMask=%v", pR.Id, pR.RenderFrameId, pR.CurDynamicsRenderFrameId, pR.LastAllConfirmedInputFrameId, unconfirmedMask))
|
pR.downsyncToAllPlayers(refRenderFrameId, unconfirmedMask, inputsBufferSnapshot)
|
||||||
pR.downsyncToAllPlayers(pR.LastAllConfirmedInputFrameId, unconfirmedMask, inputsBufferSnapshot)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pR.RenderFrameId++
|
|
||||||
pR.LastRenderFrameIdTriggeredAt = utils.UnixtimeNano()
|
pR.LastRenderFrameIdTriggeredAt = utils.UnixtimeNano()
|
||||||
elapsedInCalculation := (pR.LastRenderFrameIdTriggeredAt - stCalculation)
|
|
||||||
|
elapsedInCalculation := (utils.UnixtimeNano() - stCalculation)
|
||||||
|
toSleepNanos = pR.dilutedRollbackEstimatedDtNanos - elapsedInCalculation // don't sleep if "nextRenderFrame == pR.RenderFrameId"
|
||||||
if elapsedInCalculation > pR.RollbackEstimatedDtNanos {
|
if elapsedInCalculation > pR.RollbackEstimatedDtNanos {
|
||||||
Logger.Warn(fmt.Sprintf("SLOW FRAME! Elapsed time statistics: roomId=%v, room.RenderFrameId=%v, elapsedInCalculation=%v ns, dynamicsDuration=%v ns, RollbackEstimatedDtNanos=%v", pR.Id, pR.RenderFrameId, elapsedInCalculation, dynamicsDuration, pR.RollbackEstimatedDtNanos))
|
Logger.Warn(fmt.Sprintf("SLOW FRAME! Elapsed time statistics: roomId=%v, room.RenderFrameId=%v, elapsedInCalculation=%v ns, dynamicsDuration=%v ns, RollbackEstimatedDtNanos=%v, dilutedRollbackEstimatedDtNanos=%v", pR.Id, pR.RenderFrameId, elapsedInCalculation, dynamicsDuration, pR.RollbackEstimatedDtNanos, pR.dilutedRollbackEstimatedDtNanos))
|
||||||
}
|
}
|
||||||
time.Sleep(time.Duration(pR.dilutedRollbackEstimatedDtNanos - elapsedInCalculation))
|
}
|
||||||
|
|
||||||
|
time.Sleep(time.Duration(toSleepNanos))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -499,11 +518,15 @@ func (pR *Room) OnBattleCmdReceived(pReq *WsReq) {
|
|||||||
atomic.StoreInt32(&(player.AckingFrameId), ackingFrameId)
|
atomic.StoreInt32(&(player.AckingFrameId), ackingFrameId)
|
||||||
atomic.StoreInt32(&(player.AckingInputFrameId), ackingInputFrameId)
|
atomic.StoreInt32(&(player.AckingInputFrameId), ackingInputFrameId)
|
||||||
|
|
||||||
newAllConfirmedCount := pR.markConfirmationIfApplicable(inputFrameUpsyncBatch, playerId, player)
|
newAllConfirmedCount, refRenderFrameIdIfNeeded, inputsBufferSnapshot := pR.markConfirmationIfApplicable(inputFrameUpsyncBatch, playerId, player)
|
||||||
|
|
||||||
|
if swapped := atomic.CompareAndSwapInt32(&pR.State, RoomBattleStateIns.IN_BATTLE, RoomBattleStateIns.IN_BATTLE); !swapped {
|
||||||
|
return
|
||||||
|
}
|
||||||
// [WARNING] By now "pR.InputsBufferLock" is unlocked
|
// [WARNING] By now "pR.InputsBufferLock" is unlocked
|
||||||
if 0 < newAllConfirmedCount {
|
if 0 < newAllConfirmedCount {
|
||||||
// Downsync new all-confirmed inputFrames asap
|
// Downsync new all-confirmed inputFrames asap
|
||||||
pR.downsyncToAllPlayers(pR.LastAllConfirmedInputFrameId, uint64(0), pR.createInputsBufferSnapshot())
|
pR.downsyncToAllPlayers(refRenderFrameIdIfNeeded, uint64(0), inputsBufferSnapshot)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -555,7 +578,7 @@ func (pR *Room) StopBattleForSettlement() {
|
|||||||
Players: toPbPlayers(pR.Players, false),
|
Players: toPbPlayers(pR.Players, false),
|
||||||
CountdownNanos: -1, // TODO: Replace this magic constant!
|
CountdownNanos: -1, // TODO: Replace this magic constant!
|
||||||
}
|
}
|
||||||
pR.sendSafely(&assembledFrame, nil, DOWNSYNC_MSG_ACT_BATTLE_STOPPED, playerId)
|
pR.sendSafely(&assembledFrame, nil, DOWNSYNC_MSG_ACT_BATTLE_STOPPED, playerId, true)
|
||||||
}
|
}
|
||||||
// Note that `pR.onBattleStoppedForSettlement` will be called by `battleMainLoop`.
|
// Note that `pR.onBattleStoppedForSettlement` will be called by `battleMainLoop`.
|
||||||
}
|
}
|
||||||
@ -584,7 +607,7 @@ func (pR *Room) onBattlePrepare(cb BattleStartCbType) {
|
|||||||
|
|
||||||
Logger.Info("Sending out frame for RoomBattleState.PREPARE:", zap.Any("battleReadyToStartFrame", battleReadyToStartFrame))
|
Logger.Info("Sending out frame for RoomBattleState.PREPARE:", zap.Any("battleReadyToStartFrame", battleReadyToStartFrame))
|
||||||
for _, player := range pR.Players {
|
for _, player := range pR.Players {
|
||||||
pR.sendSafely(battleReadyToStartFrame, nil, DOWNSYNC_MSG_ACT_BATTLE_READY_TO_START, player.Id)
|
pR.sendSafely(battleReadyToStartFrame, nil, DOWNSYNC_MSG_ACT_BATTLE_READY_TO_START, player.Id, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
battlePreparationNanos := int64(6000000000)
|
battlePreparationNanos := int64(6000000000)
|
||||||
@ -654,6 +677,7 @@ func (pR *Room) OnDismissed() {
|
|||||||
pR.PlayersArr = make([]*Player, pR.Capacity)
|
pR.PlayersArr = make([]*Player, pR.Capacity)
|
||||||
pR.CollisionSysMap = make(map[int32]*resolv.Object)
|
pR.CollisionSysMap = make(map[int32]*resolv.Object)
|
||||||
pR.PlayerDownsyncSessionDict = make(map[int32]*websocket.Conn)
|
pR.PlayerDownsyncSessionDict = make(map[int32]*websocket.Conn)
|
||||||
|
pR.PlayerDownsyncSessionLock = make(map[int32]*sync.Mutex, pR.Capacity)
|
||||||
pR.PlayerSignalToCloseDict = make(map[int32]SignalToCloseConnCbType)
|
pR.PlayerSignalToCloseDict = make(map[int32]SignalToCloseConnCbType)
|
||||||
pR.JoinIndexBooleanArr = make([]bool, pR.Capacity)
|
pR.JoinIndexBooleanArr = make([]bool, pR.Capacity)
|
||||||
pR.Barriers = make(map[int32]*Barrier)
|
pR.Barriers = make(map[int32]*Barrier)
|
||||||
@ -668,13 +692,13 @@ func (pR *Room) OnDismissed() {
|
|||||||
pR.RenderFrameId = 0
|
pR.RenderFrameId = 0
|
||||||
pR.CurDynamicsRenderFrameId = 0
|
pR.CurDynamicsRenderFrameId = 0
|
||||||
pR.InputDelayFrames = 8
|
pR.InputDelayFrames = 8
|
||||||
pR.NstDelayFrames = 4
|
pR.NstDelayFrames = 8
|
||||||
pR.InputScaleFrames = uint32(2)
|
pR.InputScaleFrames = uint32(2)
|
||||||
pR.ServerFps = 60
|
pR.ServerFps = 60
|
||||||
pR.RollbackEstimatedDtMillis = 16.667 // Use fixed-and-low-precision to mitigate the inconsistent floating-point-number issue between Golang and JavaScript
|
pR.RollbackEstimatedDtMillis = 16.667 // Use fixed-and-low-precision to mitigate the inconsistent floating-point-number issue between Golang and JavaScript
|
||||||
pR.RollbackEstimatedDtNanos = 16666666 // A little smaller than the actual per frame time, just for logging FAST FRAME
|
pR.RollbackEstimatedDtNanos = 16666666 // A little smaller than the actual per frame time, just for logging FAST FRAME
|
||||||
dilutedServerFps := float64(59.5)
|
dilutedServerFps := float64(55.0)
|
||||||
pR.dilutedRollbackEstimatedDtNanos = pR.RollbackEstimatedDtNanos * int64(float64(pR.ServerFps)/dilutedServerFps)
|
pR.dilutedRollbackEstimatedDtNanos = int64(float64(pR.RollbackEstimatedDtNanos) * float64(pR.ServerFps) / dilutedServerFps)
|
||||||
pR.BattleDurationFrames = 30 * pR.ServerFps
|
pR.BattleDurationFrames = 30 * pR.ServerFps
|
||||||
pR.BattleDurationNanos = int64(pR.BattleDurationFrames) * (pR.RollbackEstimatedDtNanos + 1)
|
pR.BattleDurationNanos = int64(pR.BattleDurationFrames) * (pR.RollbackEstimatedDtNanos + 1)
|
||||||
pR.InputFrameUpsyncDelayTolerance = 2
|
pR.InputFrameUpsyncDelayTolerance = 2
|
||||||
@ -806,6 +830,7 @@ func (pR *Room) onPlayerLost(playerId int32) {
|
|||||||
func (pR *Room) clearPlayerNetworkSession(playerId int32) {
|
func (pR *Room) clearPlayerNetworkSession(playerId int32) {
|
||||||
if _, y := pR.PlayerDownsyncSessionDict[playerId]; y {
|
if _, y := pR.PlayerDownsyncSessionDict[playerId]; y {
|
||||||
Logger.Debug("clearPlayerNetworkSession:", zap.Any("roomId", pR.Id), zap.Any("playerId", playerId))
|
Logger.Debug("clearPlayerNetworkSession:", zap.Any("roomId", pR.Id), zap.Any("playerId", playerId))
|
||||||
|
// [WARNING] No need to delete "pR.PlayerDownsyncSessionLock[playerId]" here!
|
||||||
delete(pR.PlayerDownsyncSessionDict, playerId)
|
delete(pR.PlayerDownsyncSessionDict, playerId)
|
||||||
delete(pR.PlayerSignalToCloseDict, playerId)
|
delete(pR.PlayerSignalToCloseDict, playerId)
|
||||||
}
|
}
|
||||||
@ -869,8 +894,10 @@ func (pR *Room) OnPlayerBattleColliderAcked(playerId int32) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
shouldTryToStartBattle := true
|
||||||
Logger.Debug(fmt.Sprintf("OnPlayerBattleColliderAcked-before: roomId=%v, roomState=%v, targetPlayerId=%v, targetPlayerBattleState=%v, capacity=%v, EffectivePlayerCount=%v", pR.Id, pR.State, targetPlayer.Id, targetPlayer.BattleState, pR.Capacity, pR.EffectivePlayerCount))
|
Logger.Debug(fmt.Sprintf("OnPlayerBattleColliderAcked-before: roomId=%v, roomState=%v, targetPlayerId=%v, targetPlayerBattleState=%v, capacity=%v, EffectivePlayerCount=%v", pR.Id, pR.State, targetPlayer.Id, targetPlayer.BattleState, pR.Capacity, pR.EffectivePlayerCount))
|
||||||
switch targetPlayer.BattleState {
|
targetPlayerBattleState := atomic.LoadInt32(&(targetPlayer.BattleState))
|
||||||
|
switch targetPlayerBattleState {
|
||||||
case PlayerBattleStateIns.ADDED_PENDING_BATTLE_COLLIDER_ACK:
|
case PlayerBattleStateIns.ADDED_PENDING_BATTLE_COLLIDER_ACK:
|
||||||
playerAckedFrame := &RoomDownsyncFrame{
|
playerAckedFrame := &RoomDownsyncFrame{
|
||||||
Id: pR.RenderFrameId,
|
Id: pR.RenderFrameId,
|
||||||
@ -878,7 +905,7 @@ func (pR *Room) OnPlayerBattleColliderAcked(playerId int32) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Broadcast normally added player info to all players in the same room
|
// Broadcast normally added player info to all players in the same room
|
||||||
for thatPlayerId, thatPlayer := range pR.Players {
|
for _, thatPlayer := range pR.Players {
|
||||||
/*
|
/*
|
||||||
[WARNING]
|
[WARNING]
|
||||||
This `playerAckedFrame` is the first ever "RoomDownsyncFrame" for every "PersistentSessionClient on the frontend", and it goes right after each "BattleColliderInfo".
|
This `playerAckedFrame` is the first ever "RoomDownsyncFrame" for every "PersistentSessionClient on the frontend", and it goes right after each "BattleColliderInfo".
|
||||||
@ -887,28 +914,29 @@ func (pR *Room) OnPlayerBattleColliderAcked(playerId int32) bool {
|
|||||||
|
|
||||||
This function is triggered by an upsync message via WebSocket, thus downsync sending is also available by now.
|
This function is triggered by an upsync message via WebSocket, thus downsync sending is also available by now.
|
||||||
*/
|
*/
|
||||||
|
thatPlayerId := thatPlayer.Id
|
||||||
thatPlayerBattleState := atomic.LoadInt32(&(thatPlayer.BattleState))
|
thatPlayerBattleState := atomic.LoadInt32(&(thatPlayer.BattleState))
|
||||||
Logger.Debug(fmt.Sprintf("OnPlayerBattleColliderAcked-middle: roomId=%v, roomState=%v, targetPlayerId=%v, targetPlayerBattleState=%v, thatPlayerId=%v, thatPlayerBattleState=%v", pR.Id, pR.State, targetPlayer.Id, targetPlayer.BattleState, thatPlayer.Id, thatPlayerBattleState))
|
Logger.Debug(fmt.Sprintf("OnPlayerBattleColliderAcked-middle: roomId=%v, roomState=%v, targetPlayerId=%v, targetPlayerBattleState=%v, thatPlayerId=%v, thatPlayerBattleState=%v", pR.Id, pR.State, targetPlayer.Id, targetPlayer.BattleState, thatPlayer.Id, thatPlayerBattleState))
|
||||||
if thatPlayerId == targetPlayer.Id || (PlayerBattleStateIns.ADDED_PENDING_BATTLE_COLLIDER_ACK == thatPlayerBattleState || PlayerBattleStateIns.ACTIVE == thatPlayerBattleState) {
|
if thatPlayerId == targetPlayer.Id || (PlayerBattleStateIns.ADDED_PENDING_BATTLE_COLLIDER_ACK == thatPlayerBattleState || PlayerBattleStateIns.ACTIVE == thatPlayerBattleState) {
|
||||||
Logger.Debug(fmt.Sprintf("OnPlayerBattleColliderAcked-sending DOWNSYNC_MSG_ACT_PLAYER_ADDED_AND_ACKED: roomId=%v, roomState=%v, targetPlayerId=%v, targetPlayerBattleState=%v, capacity=%v, EffectivePlayerCount=%v", pR.Id, pR.State, targetPlayer.Id, targetPlayer.BattleState, pR.Capacity, pR.EffectivePlayerCount))
|
Logger.Debug(fmt.Sprintf("OnPlayerBattleColliderAcked-sending DOWNSYNC_MSG_ACT_PLAYER_ADDED_AND_ACKED: roomId=%v, roomState=%v, targetPlayerId=%v, targetPlayerBattleState=%v, capacity=%v, EffectivePlayerCount=%v", pR.Id, pR.State, targetPlayer.Id, targetPlayer.BattleState, pR.Capacity, pR.EffectivePlayerCount))
|
||||||
pR.sendSafely(playerAckedFrame, nil, DOWNSYNC_MSG_ACT_PLAYER_ADDED_AND_ACKED, thatPlayer.Id)
|
pR.sendSafely(playerAckedFrame, nil, DOWNSYNC_MSG_ACT_PLAYER_ADDED_AND_ACKED, thatPlayer.Id, true)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
atomic.StoreInt32(&(targetPlayer.BattleState), PlayerBattleStateIns.ACTIVE)
|
||||||
case PlayerBattleStateIns.READDED_PENDING_BATTLE_COLLIDER_ACK:
|
case PlayerBattleStateIns.READDED_PENDING_BATTLE_COLLIDER_ACK:
|
||||||
// only send resync info to the targetPlayer
|
shouldTryToStartBattle = false
|
||||||
// i.e. implies that "targetPlayer.LastSentInputFrameId == MAGIC_LAST_SENT_INPUT_FRAME_ID_READDED"
|
atomic.StoreInt32(&(targetPlayer.BattleState), PlayerBattleStateIns.READDED_BATTLE_COLLIDER_ACKED)
|
||||||
pR.downsyncToSinglePlayer(playerId, targetPlayer, pR.LastAllConfirmedInputFrameId, uint64(0), pR.createInputsBufferSnapshot())
|
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
|
|
||||||
targetPlayer.BattleState = PlayerBattleStateIns.ACTIVE
|
|
||||||
Logger.Debug(fmt.Sprintf("OnPlayerBattleColliderAcked-post-downsync: roomId=%v, roomState=%v, targetPlayerId=%v, targetPlayerBattleState=%v, capacity=%v, EffectivePlayerCount=%v", pR.Id, pR.State, targetPlayer.Id, targetPlayer.BattleState, pR.Capacity, pR.EffectivePlayerCount))
|
Logger.Debug(fmt.Sprintf("OnPlayerBattleColliderAcked-post-downsync: roomId=%v, roomState=%v, targetPlayerId=%v, targetPlayerBattleState=%v, capacity=%v, EffectivePlayerCount=%v", pR.Id, pR.State, targetPlayer.Id, targetPlayer.BattleState, pR.Capacity, pR.EffectivePlayerCount))
|
||||||
|
|
||||||
|
if shouldTryToStartBattle {
|
||||||
if pR.Capacity == int(pR.EffectivePlayerCount) {
|
if pR.Capacity == int(pR.EffectivePlayerCount) {
|
||||||
allAcked := true
|
allAcked := true
|
||||||
for _, p := range pR.Players {
|
for _, p := range pR.Players {
|
||||||
if PlayerBattleStateIns.ACTIVE != p.BattleState {
|
if PlayerBattleStateIns.ACTIVE != p.BattleState {
|
||||||
Logger.Warn("unexpectedly got an inactive player", zap.Any("roomId", pR.Id), zap.Any("playerId", p.Id), zap.Any("battleState", p.BattleState))
|
Logger.Warn("Unexpectedly got an inactive player", zap.Any("roomId", pR.Id), zap.Any("playerId", p.Id), zap.Any("battleState", p.BattleState))
|
||||||
allAcked = false
|
allAcked = false
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -919,16 +947,25 @@ func (pR *Room) OnPlayerBattleColliderAcked(playerId int32) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pR.updateScore()
|
pR.updateScore()
|
||||||
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pR *Room) sendSafely(roomDownsyncFrame *RoomDownsyncFrame, toSendInputFrameDownsyncs []*InputFrameDownsync, act int32, playerId int32) {
|
func (pR *Room) sendSafely(roomDownsyncFrame *RoomDownsyncFrame, toSendInputFrameDownsyncs []*InputFrameDownsync, act int32, playerId int32, needLockExplicitly bool) {
|
||||||
|
// [WARNING] This function MUST BE called while "pR.PlayerDownsyncSessionLock[player.JoinIndex-1]" is locked!
|
||||||
defer func() {
|
defer func() {
|
||||||
if r := recover(); r != nil {
|
if r := recover(); r != nil {
|
||||||
Logger.Error("sendSafely, recovered from: ", zap.Any("roomId", pR.Id), zap.Any("playerId", playerId), zap.Any("panic", r))
|
Logger.Error("sendSafely, recovered from: ", zap.Any("roomId", pR.Id), zap.Any("playerId", playerId), zap.Any("panic", r))
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
if needLockExplicitly {
|
||||||
|
pR.PlayerDownsyncSessionLock[playerId].Lock()
|
||||||
|
defer func() {
|
||||||
|
pR.PlayerDownsyncSessionLock[playerId].Unlock()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
if playerDownsyncSession, existent := pR.PlayerDownsyncSessionDict[playerId]; existent {
|
if playerDownsyncSession, existent := pR.PlayerDownsyncSessionDict[playerId]; existent {
|
||||||
pResp := &WsResp{
|
pResp := &WsResp{
|
||||||
Ret: int32(Constants.RetCode.Ok),
|
Ret: int32(Constants.RetCode.Ok),
|
||||||
@ -948,8 +985,13 @@ func (pR *Room) sendSafely(roomDownsyncFrame *RoomDownsyncFrame, toSendInputFram
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pR *Room) shouldPrefabInputFrameDownsync(renderFrameId int32) bool {
|
func (pR *Room) shouldPrefabInputFrameDownsync(prevRenderFrameId int32, renderFrameId int32) (bool, int32) {
|
||||||
return ((renderFrameId & ((1 << pR.InputScaleFrames) - 1)) == 0)
|
for i := prevRenderFrameId + 1; i <= renderFrameId; i++ {
|
||||||
|
if (0 <= i) && (0 == (i & ((1 << pR.InputScaleFrames) - 1))) {
|
||||||
|
return true, i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false, -1
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pR *Room) prefabInputFrameDownsync(inputFrameId int32) *InputFrameDownsync {
|
func (pR *Room) prefabInputFrameDownsync(inputFrameId int32) *InputFrameDownsync {
|
||||||
@ -988,7 +1030,7 @@ func (pR *Room) prefabInputFrameDownsync(inputFrameId int32) *InputFrameDownsync
|
|||||||
return currInputFrameDownsync
|
return currInputFrameDownsync
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pR *Room) markConfirmationIfApplicable(inputFrameUpsyncBatch []*InputFrameUpsync, playerId int32, player *Player) int {
|
func (pR *Room) markConfirmationIfApplicable(inputFrameUpsyncBatch []*InputFrameUpsync, playerId int32, player *Player) (int32, int32, []*InputFrameDownsync) {
|
||||||
Logger.Debug(fmt.Sprintf("markConfirmationIfApplicable-InputsBufferLock about to lock: roomId=%v, fromPlayerId=%v", pR.Id, playerId))
|
Logger.Debug(fmt.Sprintf("markConfirmationIfApplicable-InputsBufferLock about to lock: roomId=%v, fromPlayerId=%v", pR.Id, playerId))
|
||||||
pR.InputsBufferLock.Lock()
|
pR.InputsBufferLock.Lock()
|
||||||
Logger.Debug(fmt.Sprintf("markConfirmationIfApplicable-InputsBufferLock locked: roomId=%v, fromPlayerId=%v", pR.Id, playerId))
|
Logger.Debug(fmt.Sprintf("markConfirmationIfApplicable-InputsBufferLock locked: roomId=%v, fromPlayerId=%v", pR.Id, playerId))
|
||||||
@ -1001,15 +1043,15 @@ func (pR *Room) markConfirmationIfApplicable(inputFrameUpsyncBatch []*InputFrame
|
|||||||
clientInputFrameId := inputFrameUpsync.InputFrameId
|
clientInputFrameId := inputFrameUpsync.InputFrameId
|
||||||
if clientInputFrameId < pR.InputsBuffer.StFrameId {
|
if clientInputFrameId < pR.InputsBuffer.StFrameId {
|
||||||
// The updates to "pR.InputsBuffer.StFrameId" is monotonically increasing, thus if "clientInputFrameId < pR.InputsBuffer.StFrameId" at any moment of time, it is obsolete in the future.
|
// The updates to "pR.InputsBuffer.StFrameId" is monotonically increasing, thus if "clientInputFrameId < pR.InputsBuffer.StFrameId" at any moment of time, it is obsolete in the future.
|
||||||
Logger.Debug(fmt.Sprintf("Omitting obsolete inputFrameUpsync#1: roomId=%v, playerId=%v, clientInputFrameId=%v, lastAllConfirmedInputFrameId=%v, InputsBuffer=%v", pR.Id, playerId, clientInputFrameId, pR.LastAllConfirmedInputFrameId, pR.InputsBufferString(false)))
|
Logger.Debug(fmt.Sprintf("Omitting obsolete inputFrameUpsync#1: roomId=%v, playerId=%v, clientInputFrameId=%v, InputsBuffer=%v", pR.Id, playerId, clientInputFrameId, pR.InputsBufferString(false)))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if clientInputFrameId < pR.LastAllConfirmedInputFrameId {
|
if clientInputFrameId < pR.LastAllConfirmedInputFrameId {
|
||||||
Logger.Info(fmt.Sprintf("Omitting obsolete inputFrameUpsync#2: roomId=%v, playerId=%v, clientInputFrameId=%v, lastAllConfirmedInputFrameId=%v, InputsBuffer=%v", pR.Id, playerId, clientInputFrameId, pR.LastAllConfirmedInputFrameId, pR.InputsBufferString(false)))
|
Logger.Info(fmt.Sprintf("Omitting obsolete inputFrameUpsync#2: roomId=%v, playerId=%v, clientInputFrameId=%v, InputsBuffer=%v", pR.Id, playerId, clientInputFrameId, pR.InputsBufferString(false)))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if clientInputFrameId > pR.InputsBuffer.EdFrameId {
|
if clientInputFrameId > pR.InputsBuffer.EdFrameId {
|
||||||
Logger.Warn(fmt.Sprintf("Dropping too advanced inputFrameUpsync: roomId=%v, playerId=%v, clientInputFrameId=%v, lastAllConfirmedInputFrameId=%v, InputsBuffer=%v; is this player cheating?", pR.Id, playerId, clientInputFrameId, pR.LastAllConfirmedInputFrameId, pR.InputsBufferString(false)))
|
Logger.Warn(fmt.Sprintf("Dropping too advanced inputFrameUpsync: roomId=%v, playerId=%v, clientInputFrameId=%v, InputsBuffer=%v; is this player cheating?", pR.Id, playerId, clientInputFrameId, pR.InputsBufferString(false)))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
var targetInputFrameDownsync *InputFrameDownsync = nil
|
var targetInputFrameDownsync *InputFrameDownsync = nil
|
||||||
@ -1024,7 +1066,7 @@ func (pR *Room) markConfirmationIfApplicable(inputFrameUpsyncBatch []*InputFrame
|
|||||||
targetInputFrameDownsync.ConfirmedList |= uint64(1 << uint32(player.JoinIndex-1))
|
targetInputFrameDownsync.ConfirmedList |= uint64(1 << uint32(player.JoinIndex-1))
|
||||||
}
|
}
|
||||||
|
|
||||||
newAllConfirmedCount := 0
|
newAllConfirmedCount := int32(0)
|
||||||
inputFrameId1 := pR.LastAllConfirmedInputFrameId + 1
|
inputFrameId1 := pR.LastAllConfirmedInputFrameId + 1
|
||||||
totPlayerCnt := uint32(pR.Capacity)
|
totPlayerCnt := uint32(pR.Capacity)
|
||||||
allConfirmedMask := uint64((1 << totPlayerCnt) - 1)
|
allConfirmedMask := uint64((1 << totPlayerCnt) - 1)
|
||||||
@ -1060,28 +1102,57 @@ func (pR *Room) markConfirmationIfApplicable(inputFrameUpsyncBatch []*InputFrame
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Logger.Debug(fmt.Sprintf("markConfirmationIfApplicable checking inputFrameId=[%v, %v) for roomId=%v, newAllConfirmedCount=%d: InputsBuffer=%v", inputFrameId1, pR.InputsBuffer.EdFrameId, pR.Id, newAllConfirmedCount, pR.InputsBufferString(false)))
|
var inputsBufferSnapshot []*InputFrameDownsync = nil
|
||||||
return newAllConfirmedCount
|
if 0 < newAllConfirmedCount {
|
||||||
|
snapshotStFrameId := pR.LastAllConfirmedInputFrameId - newAllConfirmedCount
|
||||||
|
if snapshotStFrameId < 0 {
|
||||||
|
// This applies to the first all-confirmed inputFrame whose inputFrameId is 0
|
||||||
|
snapshotStFrameId = 0
|
||||||
|
}
|
||||||
|
inputsBufferSnapshot = pR.createInputsBufferSnapshot(snapshotStFrameId, pR.LastAllConfirmedInputFrameId+1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pR *Room) forceConfirmationIfApplicable() uint64 {
|
Logger.Debug(fmt.Sprintf("markConfirmationIfApplicable for roomId=%v returning newAllConfirmedCount=%d: InputsBuffer=%v", pR.Id, newAllConfirmedCount, pR.InputsBufferString(false)))
|
||||||
|
refRenderFrameIdIfNeeded := pR.CurDynamicsRenderFrameId - 1
|
||||||
|
return newAllConfirmedCount, refRenderFrameIdIfNeeded, inputsBufferSnapshot
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pR *Room) forceConfirmationIfApplicable(prevRenderFrameId int32) (uint64, int32, []*InputFrameDownsync) {
|
||||||
// [WARNING] This function MUST BE called while "pR.InputsBufferLock" is locked!
|
// [WARNING] This function MUST BE called while "pR.InputsBufferLock" is locked!
|
||||||
// Force confirmation of non-all-confirmed inputFrame EXACTLY ONE AT A TIME, returns the non-confirmed mask of players, e.g. in a 4-player-battle returning 1001 means that players with JoinIndex=1 and JoinIndex=4 are non-confirmed for inputFrameId2
|
// Force confirmation of non-all-confirmed inputFrame EXACTLY ONE AT A TIME, returns the non-confirmed mask of players, e.g. in a 4-player-battle returning 1001 means that players with JoinIndex=1 and JoinIndex=4 are non-confirmed for inputFrameId2
|
||||||
|
|
||||||
|
/*
|
||||||
|
Upon resynced on frontend, "refRenderFrameId" is now set to as advanced as possible, and it's the frontend's responsibility now to pave way for the "gap inputFrames"
|
||||||
|
|
||||||
|
If "NstDelayFrames" becomes larger, "pR.RenderFrameId - refRenderFrameId" possibly becomes larger because the force confirmation is delayed more.
|
||||||
|
|
||||||
|
Upon resync, it's still possible that "refRenderFrameId < frontend.chaserRenderFrameId" -- and this is allowed.
|
||||||
|
*/
|
||||||
|
refRenderFrameIdIfNeeded := pR.CurDynamicsRenderFrameId - 1
|
||||||
|
if 0 > refRenderFrameIdIfNeeded {
|
||||||
|
// Without a "refRenderFrame", there's no point to force confirmation, i.e. nothing to downsync to the "ACTIVE but slowly ticking frontend(s)"
|
||||||
|
return 0, -1, nil
|
||||||
|
}
|
||||||
renderFrameId1 := (pR.RenderFrameId - pR.NstDelayFrames) // the renderFrameId which should've been rendered on frontend
|
renderFrameId1 := (pR.RenderFrameId - pR.NstDelayFrames) // the renderFrameId which should've been rendered on frontend
|
||||||
if 0 > renderFrameId1 || !pR.shouldPrefabInputFrameDownsync(renderFrameId1) {
|
if 0 > renderFrameId1 {
|
||||||
|
return 0, -1, nil
|
||||||
|
}
|
||||||
|
ok := false
|
||||||
|
renderFrameId2 := int32(-1)
|
||||||
|
if ok, renderFrameId2 = pR.shouldPrefabInputFrameDownsync(prevRenderFrameId-pR.NstDelayFrames, renderFrameId1); !ok {
|
||||||
/*
|
/*
|
||||||
The backend "shouldPrefabInputFrameDownsync" shares the same rule as frontend "shouldGenerateInputFrameUpsync".
|
The backend "shouldPrefabInputFrameDownsync" shares the same rule as frontend "shouldGenerateInputFrameUpsync".
|
||||||
|
|
||||||
It's also important that "forceConfirmationIfApplicable" is NOT EXECUTED for every renderFrame, such that when a player is forced to resync, it has some time, i.e. (1 << InputScaleFrames) renderFrames, to upsync again.
|
It's also important that "forceConfirmationIfApplicable" is NOT EXECUTED for every renderFrame, such that when a player is forced to resync, it has some time, i.e. (1 << InputScaleFrames) renderFrames, to upsync again.
|
||||||
*/
|
*/
|
||||||
return 0
|
return 0, -1, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
inputFrameId2 := pR.ConvertToInputFrameId(renderFrameId1, 0) // The inputFrame to force confirmation (if necessary)
|
inputFrameId2 := pR.ConvertToInputFrameId(renderFrameId2, 0) // The inputFrame to force confirmation (if necessary)
|
||||||
if inputFrameId2 < pR.LastAllConfirmedInputFrameId {
|
if inputFrameId2 < pR.LastAllConfirmedInputFrameId {
|
||||||
// No need to force confirmation, the inputFrames already arrived
|
// No need to force confirmation, the inputFrames already arrived
|
||||||
Logger.Debug(fmt.Sprintf("inputFrameId2=%v is already all-confirmed for roomId=%v[type#1], no need to force confirmation of it", inputFrameId2, pR.Id))
|
Logger.Debug(fmt.Sprintf("inputFrameId2=%v is already all-confirmed for roomId=%v[type#1], no need to force confirmation of it", inputFrameId2, pR.Id))
|
||||||
return 0
|
return 0, -1, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
tmp := pR.InputsBuffer.GetByFrameId(inputFrameId2)
|
tmp := pR.InputsBuffer.GetByFrameId(inputFrameId2)
|
||||||
@ -1099,10 +1170,18 @@ func (pR *Room) forceConfirmationIfApplicable() uint64 {
|
|||||||
inputFrame2.ConfirmedList = allConfirmedMask
|
inputFrame2.ConfirmedList = allConfirmedMask
|
||||||
pR.onInputFrameDownsyncAllConfirmed(inputFrame2, -1)
|
pR.onInputFrameDownsyncAllConfirmed(inputFrame2, -1)
|
||||||
|
|
||||||
return unconfirmedMask
|
var inputsBufferSnapshot []*InputFrameDownsync = nil
|
||||||
|
if 0 < unconfirmedMask {
|
||||||
|
// This condition should be rarely met!
|
||||||
|
snapshotStFrameId := pR.ConvertToInputFrameId(refRenderFrameIdIfNeeded, pR.InputDelayFrames)
|
||||||
|
inputsBufferSnapshot = pR.createInputsBufferSnapshot(snapshotStFrameId, pR.LastAllConfirmedInputFrameId+1)
|
||||||
|
}
|
||||||
|
|
||||||
|
return unconfirmedMask, refRenderFrameIdIfNeeded, inputsBufferSnapshot
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pR *Room) applyInputFrameDownsyncDynamics(fromRenderFrameId int32, toRenderFrameId int32, spaceOffsetX, spaceOffsetY float64) {
|
func (pR *Room) applyInputFrameDownsyncDynamics(fromRenderFrameId int32, toRenderFrameId int32, spaceOffsetX, spaceOffsetY float64) {
|
||||||
|
// [WARNING] This function MUST BE called while "pR.InputsBufferLock" is locked!
|
||||||
if fromRenderFrameId >= toRenderFrameId {
|
if fromRenderFrameId >= toRenderFrameId {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -1111,14 +1190,6 @@ func (pR *Room) applyInputFrameDownsyncDynamics(fromRenderFrameId int32, toRende
|
|||||||
totPlayerCnt := uint32(pR.Capacity)
|
totPlayerCnt := uint32(pR.Capacity)
|
||||||
allConfirmedMask := uint64((1 << totPlayerCnt) - 1)
|
allConfirmedMask := uint64((1 << totPlayerCnt) - 1)
|
||||||
|
|
||||||
Logger.Debug(fmt.Sprintf("applyInputFrameDownsyncDynamics-InputsBufferLock about to lock: roomId=%v", pR.Id))
|
|
||||||
pR.InputsBufferLock.Lock()
|
|
||||||
Logger.Debug(fmt.Sprintf("applyInputFrameDownsyncDynamics-InputsBufferLock locked: roomId=%v", pR.Id))
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
pR.InputsBufferLock.Unlock()
|
|
||||||
Logger.Debug(fmt.Sprintf("applyInputFrameDownsyncDynamics-InputsBufferLock unlocked: roomId=%v", pR.Id))
|
|
||||||
}()
|
|
||||||
for collisionSysRenderFrameId := fromRenderFrameId; collisionSysRenderFrameId < toRenderFrameId; collisionSysRenderFrameId++ {
|
for collisionSysRenderFrameId := fromRenderFrameId; collisionSysRenderFrameId < toRenderFrameId; collisionSysRenderFrameId++ {
|
||||||
currRenderFrameTmp := pR.RenderFrameBuffer.GetByFrameId(collisionSysRenderFrameId)
|
currRenderFrameTmp := pR.RenderFrameBuffer.GetByFrameId(collisionSysRenderFrameId)
|
||||||
if nil == currRenderFrameTmp {
|
if nil == currRenderFrameTmp {
|
||||||
@ -1148,7 +1219,7 @@ func (pR *Room) applyInputFrameDownsyncDynamics(fromRenderFrameId int32, toRende
|
|||||||
|
|
||||||
// TODO: Write unit-test for this function to compare with its frontend counter part
|
// TODO: Write unit-test for this function to compare with its frontend counter part
|
||||||
func (pR *Room) applyInputFrameDownsyncDynamicsOnSingleRenderFrame(delayedInputFrame *InputFrameDownsync, currRenderFrame *RoomDownsyncFrame, collisionSysMap map[int32]*resolv.Object) *RoomDownsyncFrame {
|
func (pR *Room) applyInputFrameDownsyncDynamicsOnSingleRenderFrame(delayedInputFrame *InputFrameDownsync, currRenderFrame *RoomDownsyncFrame, collisionSysMap map[int32]*resolv.Object) *RoomDownsyncFrame {
|
||||||
// TODO: Derive "nextRenderFramePlayers[*].CharacterState" as the frontend counter-part!
|
// [WARNING] This function MUST BE called while "pR.InputsBufferLock" is locked!
|
||||||
nextRenderFramePlayers := make(map[int32]*PlayerDownsync, pR.Capacity)
|
nextRenderFramePlayers := make(map[int32]*PlayerDownsync, pR.Capacity)
|
||||||
// Make a copy first
|
// Make a copy first
|
||||||
for playerId, currPlayerDownsync := range currRenderFrame.Players {
|
for playerId, currPlayerDownsync := range currRenderFrame.Players {
|
||||||
@ -1423,7 +1494,7 @@ func (pR *Room) printBarrier(barrierCollider *resolv.Object) {
|
|||||||
Logger.Info(fmt.Sprintf("Barrier in roomId=%v: w=%v, h=%v, shape=%v", pR.Id, barrierCollider.W, barrierCollider.H, barrierCollider.Shape))
|
Logger.Info(fmt.Sprintf("Barrier in roomId=%v: w=%v, h=%v, shape=%v", pR.Id, barrierCollider.W, barrierCollider.H, barrierCollider.Shape))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pR *Room) doBattleMainLoopPerTickBackendDynamicsWithProperLocking(pDynamicsDuration *int64) (uint64, *RingBuffer) {
|
func (pR *Room) doBattleMainLoopPerTickBackendDynamicsWithProperLocking(prevRenderFrameId int32, pDynamicsDuration *int64) (uint64, int32, []*InputFrameDownsync) {
|
||||||
Logger.Debug(fmt.Sprintf("doBattleMainLoopPerTickBackendDynamicsWithProperLocking-InputsBufferLock to about lock: roomId=%v", pR.Id))
|
Logger.Debug(fmt.Sprintf("doBattleMainLoopPerTickBackendDynamicsWithProperLocking-InputsBufferLock to about lock: roomId=%v", pR.Id))
|
||||||
pR.InputsBufferLock.Lock()
|
pR.InputsBufferLock.Lock()
|
||||||
Logger.Debug(fmt.Sprintf("doBattleMainLoopPerTickBackendDynamicsWithProperLocking-InputsBufferLock locked: roomId=%v", pR.Id))
|
Logger.Debug(fmt.Sprintf("doBattleMainLoopPerTickBackendDynamicsWithProperLocking-InputsBufferLock locked: roomId=%v", pR.Id))
|
||||||
@ -1434,15 +1505,15 @@ func (pR *Room) doBattleMainLoopPerTickBackendDynamicsWithProperLocking(pDynamic
|
|||||||
Logger.Debug(fmt.Sprintf("doBattleMainLoopPerTickBackendDynamicsWithProperLocking-InputsBufferLock unlocked: roomId=%v", pR.Id))
|
Logger.Debug(fmt.Sprintf("doBattleMainLoopPerTickBackendDynamicsWithProperLocking-InputsBufferLock unlocked: roomId=%v", pR.Id))
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if pR.shouldPrefabInputFrameDownsync(pR.RenderFrameId) {
|
if ok, inputFrameGeneratingRenderFrameId := pR.shouldPrefabInputFrameDownsync(prevRenderFrameId, pR.RenderFrameId); ok {
|
||||||
noDelayInputFrameId := pR.ConvertToInputFrameId(pR.RenderFrameId, 0)
|
noDelayInputFrameId := pR.ConvertToInputFrameId(inputFrameGeneratingRenderFrameId, 0)
|
||||||
if existingInputFrame := pR.InputsBuffer.GetByFrameId(noDelayInputFrameId); nil == existingInputFrame {
|
if existingInputFrame := pR.InputsBuffer.GetByFrameId(noDelayInputFrameId); nil == existingInputFrame {
|
||||||
pR.prefabInputFrameDownsync(noDelayInputFrameId)
|
pR.prefabInputFrameDownsync(noDelayInputFrameId)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Force setting all-confirmed of buffered inputFrames periodically, kindly note that if "pR.BackendDynamicsEnabled", what we want to achieve is "recovery upon reconnection", which certainly requires "forceConfirmationIfApplicable" to move "pR.LastAllConfirmedInputFrameId" forward as much as possible
|
// Force setting all-confirmed of buffered inputFrames periodically, kindly note that if "pR.BackendDynamicsEnabled", what we want to achieve is "recovery upon reconnection", which certainly requires "forceConfirmationIfApplicable" to move "pR.LastAllConfirmedInputFrameId" forward as much as possible
|
||||||
unconfirmedMask := pR.forceConfirmationIfApplicable()
|
unconfirmedMask, refRenderFrameId, inputsBufferSnapshot := pR.forceConfirmationIfApplicable(prevRenderFrameId)
|
||||||
|
|
||||||
if 0 <= pR.LastAllConfirmedInputFrameId {
|
if 0 <= pR.LastAllConfirmedInputFrameId {
|
||||||
dynamicsStartedAt := utils.UnixtimeNano()
|
dynamicsStartedAt := utils.UnixtimeNano()
|
||||||
@ -1453,69 +1524,53 @@ func (pR *Room) doBattleMainLoopPerTickBackendDynamicsWithProperLocking(pDynamic
|
|||||||
*pDynamicsDuration = utils.UnixtimeNano() - dynamicsStartedAt
|
*pDynamicsDuration = utils.UnixtimeNano() - dynamicsStartedAt
|
||||||
}
|
}
|
||||||
|
|
||||||
var inputsBufferSnapshot *RingBuffer = nil
|
return unconfirmedMask, refRenderFrameId, inputsBufferSnapshot
|
||||||
if 0 < unconfirmedMask {
|
|
||||||
// [WARNING] This condition should be rarely met!
|
|
||||||
// Otherwise there's no need to create the inputsBufferSnapshot
|
|
||||||
inputsBufferSnapshot = pR.createInputsBufferSnapshot()
|
|
||||||
}
|
|
||||||
return unconfirmedMask, inputsBufferSnapshot
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pR *Room) downsyncToAllPlayers(upperToSendInputFrameId int32, unconfirmedMask uint64, inputsBufferSnapshot *RingBuffer) {
|
func (pR *Room) downsyncToAllPlayers(refRenderFrameId int32, unconfirmedMask uint64, inputsBufferSnapshot []*InputFrameDownsync) {
|
||||||
// [WARNING] This function MUST BE called while "pR.InputsBufferLock" is unlocked -- otherwise the blocking "sendSafely" might cause significant lag!
|
/*
|
||||||
|
[WARNING] This function MUST BE called while "pR.InputsBufferLock" is unlocked -- otherwise the blocking "sendSafely" might cause significant lag!
|
||||||
|
Moreover, we're downsyncing a same "inputsBufferSnapshot" for all players in the same battle and this is by design, i.e. not respecting "player.LastSentInputFrameId" because "new all-confirmed inputFrameDownsyncs" are the same for all players and ws is TCP-based (no loss of consecutive packets except for reconnection -- which is already handled by READDED_BATTLE_COLLIDER_ACKED)
|
||||||
|
*/
|
||||||
for _, player := range pR.PlayersArr {
|
for _, player := range pR.PlayersArr {
|
||||||
thatPlayerBattleState := atomic.LoadInt32(&(player.BattleState))
|
pR.downsyncToSinglePlayer(player.Id, player, refRenderFrameId, unconfirmedMask, inputsBufferSnapshot)
|
||||||
switch thatPlayerBattleState {
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pR *Room) downsyncToSinglePlayer(playerId int32, player *Player, refRenderFrameId int32, unconfirmedMask uint64, toSendInputFrameDownsyncsSnapshot []*InputFrameDownsync) {
|
||||||
|
/*
|
||||||
|
[WARNING] This function MUST BE called while "pR.InputsBufferLock" is unlocked -- otherwise the blocking "sendSafely" might cause significant lag!
|
||||||
|
We hereby assume that Golang runtime allocates & frees small amount of RAM quickly enough compared to either network I/O worst cases or the high frequency "per inputFrameDownsync*player" locking (though "OnBattleCmdReceived" locks at the same frequency but it's inevitable).
|
||||||
|
*/
|
||||||
|
|
||||||
|
playerJoinIndex := player.JoinIndex - 1
|
||||||
|
Logger.Debug(fmt.Sprintf("downsyncToSinglePlayer-SessionLock about to lock: roomId=%v, playerId=%v", pR.Id, playerId))
|
||||||
|
pR.PlayerDownsyncSessionLock[playerId].Lock()
|
||||||
|
Logger.Debug(fmt.Sprintf("downsyncToSinglePlayer-SessionLock locked: roomId=%v, playerId=%v", pR.Id, playerId))
|
||||||
|
defer func() {
|
||||||
|
pR.PlayerDownsyncSessionLock[playerId].Unlock()
|
||||||
|
Logger.Debug(fmt.Sprintf("downsyncToSinglePlayer-SessionLock unlocked: roomId=%v, playerId=%v", pR.Id, playerId))
|
||||||
|
}()
|
||||||
|
|
||||||
|
playerBattleState := atomic.LoadInt32(&(player.BattleState))
|
||||||
|
switch playerBattleState {
|
||||||
case PlayerBattleStateIns.DISCONNECTED:
|
case PlayerBattleStateIns.DISCONNECTED:
|
||||||
case PlayerBattleStateIns.LOST:
|
case PlayerBattleStateIns.LOST:
|
||||||
case PlayerBattleStateIns.EXPELLED_DURING_GAME:
|
case PlayerBattleStateIns.EXPELLED_DURING_GAME:
|
||||||
case PlayerBattleStateIns.EXPELLED_IN_DISMISSAL:
|
case PlayerBattleStateIns.EXPELLED_IN_DISMISSAL:
|
||||||
case PlayerBattleStateIns.READDED_PENDING_BATTLE_COLLIDER_ACK: // This is the reason why battleState filter is put at "downsyncToAllPlayers" instead of "downsyncToSinglePlayer"
|
case PlayerBattleStateIns.ADDED_PENDING_BATTLE_COLLIDER_ACK:
|
||||||
continue
|
case PlayerBattleStateIns.READDED_PENDING_BATTLE_COLLIDER_ACK:
|
||||||
}
|
return
|
||||||
pR.downsyncToSinglePlayer(player.Id, player, pR.LastAllConfirmedInputFrameId, unconfirmedMask, inputsBufferSnapshot)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pR *Room) downsyncToSinglePlayer(playerId int32, player *Player, upperToSendInputFrameId int32, unconfirmedMask uint64, inputsBufferSnapshot *RingBuffer) {
|
shouldResync1 := (PlayerBattleStateIns.READDED_BATTLE_COLLIDER_ACKED == playerBattleState) // i.e. implies that "MAGIC_LAST_SENT_INPUT_FRAME_ID_READDED == player.LastSentInputFrameId", kindly note that this check as well as the re-assignment at the bottom of this function are both guarded by "pR.PlayerDownsyncSessionLock[playerId]"
|
||||||
// [WARNING] This function MUST BE called while "pR.InputsBufferLock" is unlocked -- otherwise the blocking "sendSafely" might cause significant lag!
|
shouldResync2 := (0 < (unconfirmedMask & uint64(1<<uint32(playerJoinIndex)))) // This condition is critical, if we don't send resync upon this condition, the "reconnected or slowly-clocking player" might never get its input synced
|
||||||
// [WARNING] Websocket is TCP-based, thus no need to re-send a previously sent inputFrame to a same player!
|
|
||||||
lowerToSentInputFrameId := player.LastSentInputFrameId + 1
|
|
||||||
/*
|
|
||||||
[WARNING]
|
|
||||||
Upon resynced on frontend, "refRenderFrameId" is now set to as advanced as possible, and it's the frontend's responsibility now to pave way for the "gap inputFrames"
|
|
||||||
|
|
||||||
If "NstDelayFrames" becomes larger, "pR.RenderFrameId - refRenderFrameId" possibly becomes larger because the force confirmation is delayed more.
|
|
||||||
|
|
||||||
Upon resync, it's still possible that "refRenderFrameId < frontend.chaserRenderFrameId" -- and this is allowed.
|
|
||||||
*/
|
|
||||||
refRenderFrameId := pR.CurDynamicsRenderFrameId - 1
|
|
||||||
|
|
||||||
shouldResync1 := (MAGIC_LAST_SENT_INPUT_FRAME_ID_READDED == player.LastSentInputFrameId)
|
|
||||||
shouldResync2 := (0 < (unconfirmedMask & uint64(1<<uint32(player.JoinIndex-1)))) // This condition is critical, if we don't send resync upon this condition, the "reconnected or slowly-clocking player" might never get its input synced
|
|
||||||
// shouldResync2 := (0 < unconfirmedMask) // An easier version of the above, might keep sending "refRenderFrame"s to still connected players when any player is disconnected
|
// shouldResync2 := (0 < unconfirmedMask) // An easier version of the above, might keep sending "refRenderFrame"s to still connected players when any player is disconnected
|
||||||
shouldResyncOverall := (shouldResync1 || shouldResync2)
|
shouldResyncOverall := (shouldResync1 || shouldResync2)
|
||||||
|
|
||||||
if shouldResyncOverall {
|
if shouldResyncOverall {
|
||||||
// A rejoined player, should guarantee that when it resyncs to "refRenderFrameId" a matching inputFrame to apply exists
|
// A rejoined player, should guarantee that when it resyncs to "refRenderFrameId" a matching inputFrame to apply exists
|
||||||
lowerToSentInputFrameId = pR.ConvertToInputFrameId(refRenderFrameId, pR.InputDelayFrames)
|
Logger.Debug(fmt.Sprintf("Resyncing player: roomId=%v, playerId=%v, playerJoinIndex=%v, renderFrameId=%v, curDynamicsRenderFrameId=%v, playerLastSentInputFrameId=%v, refRenderFrameId=%v", pR.Id, playerId, player.JoinIndex, pR.RenderFrameId, pR.CurDynamicsRenderFrameId, player.LastSentInputFrameId, refRenderFrameId))
|
||||||
Logger.Info(fmt.Sprintf("Resyncing player: roomId=%v, playerId=%v, playerJoinIndex=%v, renderFrameId=%v, curDynamicsRenderFrameId=%v, playerLastSentInputFrameId=%v, refRenderFrameId=%v, lowerToSentInputFrameId=%v, upperToSendInputFrameId=%v", pR.Id, playerId, player.JoinIndex, pR.RenderFrameId, pR.CurDynamicsRenderFrameId, player.LastSentInputFrameId, refRenderFrameId, lowerToSentInputFrameId, upperToSendInputFrameId))
|
|
||||||
// TODO: What if "lowerToSentInputFrameId > pR.InputsBuffer.StFrameId" now?
|
|
||||||
}
|
|
||||||
|
|
||||||
if lowerToSentInputFrameId > upperToSendInputFrameId {
|
|
||||||
Logger.Debug(fmt.Sprintf("Not sending due to potentially empty toSendInputFrameDownsyncs: roomId=%v, playerId=%v, playerLastSentInputFrameId=%v, refRenderFrameId=%v, lowerToSentInputFrameId=%v, upperToSendInputFrameId=%v, lastSentInputFrameId=%v, playerAckingInputFrameId=%v", pR.Id, playerId, player.LastSentInputFrameId, refRenderFrameId, lowerToSentInputFrameId, upperToSendInputFrameId, player.LastSentInputFrameId, player.AckingInputFrameId))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// [WARNING] EDGE CASE HERE: Upon initialization, all of "lastAllConfirmedInputFrameId", "lastAllConfirmedInputFrameIdWithChange" and "anchorInputFrameId" are "-1", thus "j" starts with "0", however "inputFrameId: 0" might not have been all confirmed!
|
|
||||||
// [WARNING] Clone to deliberately avoid holding "pR.InputsBufferLock" while using network I/O
|
|
||||||
j, toSendInputFrameDownsyncs := pR.cloneInputFrameDownsyncsByFrameIdRange(lowerToSentInputFrameId, upperToSendInputFrameId+1, inputsBufferSnapshot)
|
|
||||||
|
|
||||||
if 0 >= len(toSendInputFrameDownsyncs) {
|
|
||||||
Logger.Debug(fmt.Sprintf("Not sending due to actually empty toSendInputFrameDownsyncs: roomId=%v, playerId=%v, playerLastSentInputFrameId=%v, refRenderFrameId=%v, lowerToSentInputFrameId=%v, upperToSendInputFrameId=%v, j=%v, lastSentInputFrameId=%v, playerAckingInputFrameId=%v", pR.Id, playerId, player.LastSentInputFrameId, refRenderFrameId, lowerToSentInputFrameId, upperToSendInputFrameId, j, player.LastSentInputFrameId, player.AckingInputFrameId))
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1523,10 +1578,12 @@ func (pR *Room) downsyncToSinglePlayer(playerId int32, player *Player, upperToSe
|
|||||||
1. when player with a slower frontend clock lags significantly behind and thus wouldn't get its inputUpsync recognized due to faster "forceConfirmation"
|
1. when player with a slower frontend clock lags significantly behind and thus wouldn't get its inputUpsync recognized due to faster "forceConfirmation"
|
||||||
2. reconnection
|
2. reconnection
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
toSendInputFrameIdSt, toSendInputFrameIdEd := toSendInputFrameDownsyncsSnapshot[0].InputFrameId, toSendInputFrameDownsyncsSnapshot[len(toSendInputFrameDownsyncsSnapshot)-1].InputFrameId+1
|
||||||
if pR.BackendDynamicsEnabled && shouldResyncOverall {
|
if pR.BackendDynamicsEnabled && shouldResyncOverall {
|
||||||
tmp := pR.RenderFrameBuffer.GetByFrameId(refRenderFrameId)
|
tmp := pR.RenderFrameBuffer.GetByFrameId(refRenderFrameId)
|
||||||
if nil == tmp {
|
if nil == tmp {
|
||||||
panic(fmt.Sprintf("Required refRenderFrameId=%v for roomId=%v, renderFrameId=%v, playerId=%v, playerLastSentInputFrameId=%v, j=%v doesn't exist! InputsBuffer=%v, RenderFrameBuffer=%v", refRenderFrameId, pR.Id, pR.RenderFrameId, playerId, player.LastSentInputFrameId, j, pR.InputsBufferString(false), pR.RenderFrameBufferString()))
|
panic(fmt.Sprintf("Required refRenderFrameId=%v for (roomId=%v, renderFrameId=%v, playerId=%v, playerLastSentInputFrameId=%v) doesn't exist! InputsBuffer=%v, RenderFrameBuffer=%v", refRenderFrameId, pR.Id, pR.RenderFrameId, playerId, player.LastSentInputFrameId, pR.InputsBufferString(false), pR.RenderFrameBufferString()))
|
||||||
}
|
}
|
||||||
|
|
||||||
refRenderFrame := tmp.(*RoomDownsyncFrame)
|
refRenderFrame := tmp.(*RoomDownsyncFrame)
|
||||||
@ -1534,31 +1591,24 @@ func (pR *Room) downsyncToSinglePlayer(playerId int32, player *Player, upperToSe
|
|||||||
refRenderFrame.Players[player.Id].ColliderRadius = player.ColliderRadius // hardcoded for now
|
refRenderFrame.Players[player.Id].ColliderRadius = player.ColliderRadius // hardcoded for now
|
||||||
}
|
}
|
||||||
refRenderFrame.BackendUnconfirmedMask = unconfirmedMask
|
refRenderFrame.BackendUnconfirmedMask = unconfirmedMask
|
||||||
Logger.Warn(fmt.Sprintf("Sending refRenderFrameId=%v for roomId=%v, , playerId=%v, playerJoinIndex=%v, renderFrameId=%v, curDynamicsRenderFrameId=%v, playerLastSentInputFrameId=%v, lowerToSentInputFrameId=%v, upperToSendInputFrameId=%v, j=%v: InputsBuffer=%v", refRenderFrameId, pR.Id, playerId, player.JoinIndex, pR.RenderFrameId, pR.CurDynamicsRenderFrameId, player.LastSentInputFrameId, lowerToSentInputFrameId, upperToSendInputFrameId, j, pR.InputsBufferString(false)))
|
pR.sendSafely(refRenderFrame, toSendInputFrameDownsyncsSnapshot, DOWNSYNC_MSG_ACT_FORCED_RESYNC, playerId, false)
|
||||||
pR.sendSafely(refRenderFrame, toSendInputFrameDownsyncs, DOWNSYNC_MSG_ACT_FORCED_RESYNC, playerId)
|
Logger.Warn(fmt.Sprintf("Sent refRenderFrameId=%v & inputFrameIds [%d, %d), for roomId=%v, playerId=%d, playerJoinIndex=%d, renderFrameId=%d, curDynamicsRenderFrameId=%d, playerLastSentInputFrameId=%d: InputsBuffer=%v", refRenderFrameId, toSendInputFrameIdSt, toSendInputFrameIdEd, pR.Id, playerId, player.JoinIndex, pR.RenderFrameId, pR.CurDynamicsRenderFrameId, player.LastSentInputFrameId, pR.InputsBufferString(false)))
|
||||||
} else {
|
} else {
|
||||||
pR.sendSafely(nil, toSendInputFrameDownsyncs, DOWNSYNC_MSG_ACT_INPUT_BATCH, playerId)
|
pR.sendSafely(nil, toSendInputFrameDownsyncsSnapshot, DOWNSYNC_MSG_ACT_INPUT_BATCH, playerId, false)
|
||||||
|
}
|
||||||
|
player.LastSentInputFrameId = toSendInputFrameIdEd - 1
|
||||||
|
if shouldResync1 {
|
||||||
|
atomic.StoreInt32(&(player.BattleState), PlayerBattleStateIns.ACTIVE)
|
||||||
}
|
}
|
||||||
player.LastSentInputFrameId = j - 1
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pR *Room) createInputsBufferSnapshot() *RingBuffer {
|
func (pR *Room) createInputsBufferSnapshot(stFrameId, edFrameId int32) []*InputFrameDownsync {
|
||||||
// [WARNING] This function MUST BE called while "pR.InputsBufferLock" is locked!
|
// [WARNING] This function MUST BE called while "pR.InputsBufferLock" is locked!
|
||||||
snapshot := NewRingBuffer(pR.InputsBuffer.N)
|
snapshot := make([]*InputFrameDownsync, 0, edFrameId-stFrameId)
|
||||||
for j := pR.InputsBuffer.StFrameId; j < pR.InputsBuffer.Ed; j++ {
|
|
||||||
tmp := pR.InputsBuffer.GetByFrameId(j)
|
|
||||||
snapshot.Put(tmp)
|
|
||||||
}
|
|
||||||
|
|
||||||
return snapshot
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pR *Room) cloneInputFrameDownsyncsByFrameIdRange(stFrameId, edFrameId int32, src *RingBuffer) (int32, []*InputFrameDownsync) {
|
|
||||||
dst := make([]*InputFrameDownsync, 0, src.Cnt)
|
|
||||||
prevFrameFound := true
|
prevFrameFound := true
|
||||||
j := stFrameId
|
j := stFrameId
|
||||||
for j < edFrameId {
|
for j < edFrameId {
|
||||||
tmp := src.GetByFrameId(j)
|
tmp := pR.InputsBuffer.GetByFrameId(j)
|
||||||
if nil == tmp {
|
if nil == tmp {
|
||||||
if false == prevFrameFound {
|
if false == prevFrameFound {
|
||||||
// The "id"s are always consecutive
|
// The "id"s are always consecutive
|
||||||
@ -1577,9 +1627,9 @@ func (pR *Room) cloneInputFrameDownsyncsByFrameIdRange(stFrameId, edFrameId int3
|
|||||||
for i, input := range foo.InputList {
|
for i, input := range foo.InputList {
|
||||||
bar.InputList[i] = input
|
bar.InputList[i] = input
|
||||||
}
|
}
|
||||||
dst = append(dst, bar)
|
snapshot = append(snapshot, bar)
|
||||||
j++
|
j++
|
||||||
}
|
}
|
||||||
|
|
||||||
return j, dst
|
return snapshot
|
||||||
}
|
}
|
||||||
|
@ -440,7 +440,7 @@
|
|||||||
"array": [
|
"array": [
|
||||||
0,
|
0,
|
||||||
0,
|
0,
|
||||||
210.4441731196186,
|
210.7506841057913,
|
||||||
0,
|
0,
|
||||||
0,
|
0,
|
||||||
0,
|
0,
|
||||||
|
Loading…
Reference in New Issue
Block a user