DelayNoMore/battle_srv/models/room.go

1288 lines
61 KiB
Go
Raw Normal View History

2022-09-20 15:50:01 +00:00
package models
import (
2022-11-09 06:20:26 +00:00
. "battle_srv/common"
"battle_srv/common/utils"
. "battle_srv/protos"
. "dnmshared"
. "dnmshared/sharedprotos"
"encoding/xml"
2022-09-20 15:50:01 +00:00
"fmt"
"github.com/golang/protobuf/proto"
"github.com/gorilla/websocket"
"github.com/solarlune/resolv"
2022-09-20 15:50:01 +00:00
"go.uber.org/zap"
"io/ioutil"
"math"
2022-09-20 15:50:01 +00:00
"math/rand"
"os"
"path/filepath"
"strings"
2022-09-20 15:50:01 +00:00
"sync"
"sync/atomic"
"time"
)
const (
UPSYNC_MSG_ACT_HB_PING = int32(1)
UPSYNC_MSG_ACT_PLAYER_CMD = int32(2)
UPSYNC_MSG_ACT_PLAYER_COLLIDER_ACK = int32(3)
2022-10-02 16:22:05 +00:00
DOWNSYNC_MSG_ACT_HB_REQ = int32(1)
DOWNSYNC_MSG_ACT_INPUT_BATCH = int32(2)
DOWNSYNC_MSG_ACT_BATTLE_STOPPED = int32(3)
DOWNSYNC_MSG_ACT_FORCED_RESYNC = int32(4)
2022-10-01 15:54:48 +00:00
DOWNSYNC_MSG_ACT_BATTLE_READY_TO_START = int32(-1)
DOWNSYNC_MSG_ACT_BATTLE_START = int32(0)
DOWNSYNC_MSG_ACT_PLAYER_ADDED_AND_ACKED = int32(-98)
DOWNSYNC_MSG_ACT_PLAYER_READDED_AND_ACKED = int32(-97)
2022-09-20 15:50:01 +00:00
)
const (
MAGIC_JOIN_INDEX_DEFAULT = 0
MAGIC_JOIN_INDEX_INVALID = -1
)
const (
COLLISION_CATEGORY_CONTROLLED_PLAYER = (1 << 1)
COLLISION_CATEGORY_BARRIER = (1 << 2)
COLLISION_MASK_FOR_CONTROLLED_PLAYER = (COLLISION_CATEGORY_BARRIER)
COLLISION_MASK_FOR_BARRIER = (COLLISION_CATEGORY_CONTROLLED_PLAYER)
COLLISION_PLAYER_INDEX_PREFIX = (1 << 17)
COLLISION_BARRIER_INDEX_PREFIX = (1 << 16)
2022-09-20 15:50:01 +00:00
)
2022-10-04 03:24:47 +00:00
const (
MAGIC_LAST_SENT_INPUT_FRAME_ID_NORMAL_ADDED = -1
MAGIC_LAST_SENT_INPUT_FRAME_ID_READDED = -2
)
// These directions are chosen such that when speed is changed to "(speedX+delta, speedY+delta)" for any of them, the direction is unchanged.
2022-09-20 15:50:01 +00:00
var DIRECTION_DECODER = [][]int32{
{0, 0},
{0, +2},
{0, -2},
2022-09-20 15:50:01 +00:00
{+2, 0},
{-2, 0},
{+1, +1},
{-1, -1},
{+1, -1},
{-1, +1},
}
2022-09-20 15:50:01 +00:00
type RoomBattleState struct {
IDLE int32
WAITING int32
PREPARE int32
IN_BATTLE int32
STOPPING_BATTLE_FOR_SETTLEMENT int32
IN_SETTLEMENT int32
IN_DISMISSAL int32
}
type BattleStartCbType func()
type SignalToCloseConnCbType func(customRetCode int, customRetMsg string)
// A single instance containing only "named constant integers" to be shared by all threads.
var RoomBattleStateIns RoomBattleState
func InitRoomBattleStateIns() {
RoomBattleStateIns = RoomBattleState{
IDLE: 0,
WAITING: -1,
PREPARE: 10000000,
IN_BATTLE: 10000001,
STOPPING_BATTLE_FOR_SETTLEMENT: 10000002,
IN_SETTLEMENT: 10000003,
IN_DISMISSAL: 10000004,
}
}
func calRoomScore(inRoomPlayerCount int32, roomPlayerCnt int, currentRoomBattleState int32) float32 {
x := float32(inRoomPlayerCount) / float32(roomPlayerCnt)
d := (x - 0.5)
d2 := d * d
return -7.8125*d2 + 5.0 - float32(currentRoomBattleState)
}
type Room struct {
2022-11-09 04:19:29 +00:00
Id int32
Capacity int
collisionSpaceOffsetX float64
collisionSpaceOffsetY float64
Players map[int32]*Player
PlayersArr []*Player // ordered by joinIndex
CollisionSysMap map[int32]*resolv.Object
2022-09-20 15:50:01 +00:00
/**
* The following `PlayerDownsyncSessionDict` is NOT individually put
* under `type Player struct` for a reason.
*
* Upon each connection establishment, a new instance `player Player` is created for the given `playerId`.
* To be specific, if
* - that `playerId == 42` accidentally reconnects in just several milliseconds after a passive disconnection, e.g. due to bad wireless signal strength, and
* - that `type Player struct` contains a `DownsyncSession` field
*
* , then we might have to
* - clean up `previousPlayerInstance.DownsyncSession`
* - initialize `currentPlayerInstance.DownsyncSession`
*
* to avoid chaotic flaws.
*
* Moreover, during the invocation of `PlayerSignalToCloseDict`, the `Player` instance is supposed to be deallocated (though not synchronously).
*/
PlayerDownsyncSessionDict map[int32]*websocket.Conn
PlayerSignalToCloseDict map[int32]SignalToCloseConnCbType
Score float32
State int32
Index int
RenderFrameId int32
CurDynamicsRenderFrameId int32 // [WARNING] The dynamics of backend is ALWAYS MOVING FORWARD BY ALL-CONFIRMED INPUTFRAMES (either by upsync or forced), i.e. no rollback
2022-10-02 03:33:40 +00:00
ServerFps int32
2022-10-10 04:17:23 +00:00
BattleDurationFrames int32
2022-09-20 15:50:01 +00:00
BattleDurationNanos int64
2022-10-02 03:33:40 +00:00
InputFrameUpsyncDelayTolerance int32
MaxChasingRenderFramesPerUpdate int32
2022-09-20 15:50:01 +00:00
EffectivePlayerCount int32
DismissalWaitGroup sync.WaitGroup
Barriers map[int32]*Barrier
2022-10-03 03:42:19 +00:00
InputsBuffer *RingBuffer // Indices are STRICTLY consecutive
DiscreteInputsBuffer sync.Map // Indices are NOT NECESSARILY consecutive
RenderFrameBuffer *RingBuffer
2022-09-20 15:50:01 +00:00
LastAllConfirmedInputFrameId int32
LastAllConfirmedInputFrameIdWithChange int32
LastAllConfirmedInputList []uint64
InputDelayFrames int32 // in the count of render frames
NstDelayFrames int32 // network-single-trip delay in the count of render frames, proposed to be (InputDelayFrames >> 1) because we expect a round-trip delay to be exactly "InputDelayFrames"
2022-09-20 15:50:01 +00:00
InputScaleFrames uint32 // inputDelayedAndScaledFrameId = ((originalFrameId - InputDelayFrames) >> InputScaleFrames)
JoinIndexBooleanArr []bool
BackendDynamicsEnabled bool
LastRenderFrameIdTriggeredAt int64
PlayerDefaultSpeed int32
BattleColliderInfo // Compositing to send centralized magic numbers
2022-09-20 15:50:01 +00:00
}
func (pR *Room) updateScore() {
pR.Score = calRoomScore(pR.EffectivePlayerCount, pR.Capacity, pR.State)
}
func (pR *Room) AddPlayerIfPossible(pPlayerFromDbInit *Player, session *websocket.Conn, signalToCloseConnOfThisPlayer SignalToCloseConnCbType) bool {
playerId := pPlayerFromDbInit.Id
// TODO: Any thread-safety concern for accessing "pR" here?
if RoomBattleStateIns.IDLE != pR.State && RoomBattleStateIns.WAITING != pR.State {
Logger.Warn("AddPlayerIfPossible error, roomState:", zap.Any("playerId", playerId), zap.Any("roomId", pR.Id), zap.Any("roomState", pR.State), zap.Any("roomEffectivePlayerCount", pR.EffectivePlayerCount))
return false
}
if _, existent := pR.Players[playerId]; existent {
Logger.Warn("AddPlayerIfPossible error, existing in the room.PlayersDict:", zap.Any("playerId", playerId), zap.Any("roomId", pR.Id), zap.Any("roomState", pR.State), zap.Any("roomEffectivePlayerCount", pR.EffectivePlayerCount))
return false
}
defer pR.onPlayerAdded(playerId)
2022-10-04 03:24:47 +00:00
pPlayerFromDbInit.AckingFrameId = -1
2022-09-20 15:50:01 +00:00
pPlayerFromDbInit.AckingInputFrameId = -1
2022-10-04 03:24:47 +00:00
pPlayerFromDbInit.LastSentInputFrameId = MAGIC_LAST_SENT_INPUT_FRAME_ID_NORMAL_ADDED
2022-09-20 15:50:01 +00:00
pPlayerFromDbInit.BattleState = PlayerBattleStateIns.ADDED_PENDING_BATTLE_COLLIDER_ACK
2022-11-09 10:13:53 +00:00
pPlayerFromDbInit.Speed = pR.PlayerDefaultSpeed // Hardcoded
pPlayerFromDbInit.ColliderRadius = float64(24) // Hardcoded
2022-09-20 15:50:01 +00:00
pR.Players[playerId] = pPlayerFromDbInit
pR.PlayerDownsyncSessionDict[playerId] = session
pR.PlayerSignalToCloseDict[playerId] = signalToCloseConnOfThisPlayer
return true
}
func (pR *Room) ReAddPlayerIfPossible(pTmpPlayerInstance *Player, session *websocket.Conn, signalToCloseConnOfThisPlayer SignalToCloseConnCbType) bool {
playerId := pTmpPlayerInstance.Id
// TODO: Any thread-safety concern for accessing "pR" and "pEffectiveInRoomPlayerInstance" here?
if RoomBattleStateIns.PREPARE != pR.State && RoomBattleStateIns.WAITING != pR.State && RoomBattleStateIns.IN_BATTLE != pR.State && RoomBattleStateIns.IN_SETTLEMENT != pR.State && RoomBattleStateIns.IN_DISMISSAL != pR.State {
Logger.Warn("ReAddPlayerIfPossible error due to roomState:", zap.Any("playerId", playerId), zap.Any("roomId", pR.Id), zap.Any("roomState", pR.State), zap.Any("roomEffectivePlayerCount", pR.EffectivePlayerCount))
return false
}
if _, existent := pR.Players[playerId]; !existent {
Logger.Warn("ReAddPlayerIfPossible error due to player nonexistent for room:", zap.Any("playerId", playerId), zap.Any("roomId", pR.Id), zap.Any("roomState", pR.State), zap.Any("roomEffectivePlayerCount", pR.EffectivePlayerCount))
return false
}
/*
* WARNING: The "pTmpPlayerInstance *Player" used here is a temporarily constructed
* instance from "<proj-root>/battle_srv/ws/serve.go", which is NOT the same as "pR.Players[pTmpPlayerInstance.Id]".
* -- YFLu
*/
defer pR.onPlayerReAdded(playerId)
pR.PlayerDownsyncSessionDict[playerId] = session
pR.PlayerSignalToCloseDict[playerId] = signalToCloseConnOfThisPlayer
pEffectiveInRoomPlayerInstance := pR.Players[playerId]
2022-10-04 03:24:47 +00:00
pEffectiveInRoomPlayerInstance.AckingFrameId = -1
2022-09-20 15:50:01 +00:00
pEffectiveInRoomPlayerInstance.AckingInputFrameId = -1
2022-10-04 03:24:47 +00:00
pEffectiveInRoomPlayerInstance.LastSentInputFrameId = MAGIC_LAST_SENT_INPUT_FRAME_ID_READDED
2022-09-20 15:50:01 +00:00
pEffectiveInRoomPlayerInstance.BattleState = PlayerBattleStateIns.READDED_PENDING_BATTLE_COLLIDER_ACK
2022-11-09 10:13:53 +00:00
pEffectiveInRoomPlayerInstance.Speed = pR.PlayerDefaultSpeed // Hardcoded
pEffectiveInRoomPlayerInstance.ColliderRadius = float64(16) // Hardcoded
2022-09-20 15:50:01 +00:00
2022-10-04 03:24:47 +00:00
Logger.Warn("ReAddPlayerIfPossible finished.", zap.Any("roomId", pR.Id), zap.Any("playerId", playerId), zap.Any("joinIndex", pEffectiveInRoomPlayerInstance.JoinIndex), zap.Any("playerBattleState", pEffectiveInRoomPlayerInstance.BattleState), zap.Any("roomState", pR.State), zap.Any("roomEffectivePlayerCount", pR.EffectivePlayerCount), zap.Any("AckingFrameId", pEffectiveInRoomPlayerInstance.AckingFrameId), zap.Any("AckingInputFrameId", pEffectiveInRoomPlayerInstance.AckingInputFrameId), zap.Any("LastSentInputFrameId", pEffectiveInRoomPlayerInstance.LastSentInputFrameId))
2022-09-20 15:50:01 +00:00
return true
}
func (pR *Room) ChooseStage() error {
/*
* We use the verb "refresh" here to imply that upon invocation of this function, all colliders will be recovered if they were destroyed in the previous battle.
*
* -- YFLu, 2019-09-04
*/
pwd, err := os.Getwd()
if nil != err {
panic(err)
}
2022-09-20 15:50:01 +00:00
rand.Seed(time.Now().Unix())
stageNameList := []string{"dungeon" /*"dungeon", "simple", "richsoil" */}
2022-09-20 15:50:01 +00:00
chosenStageIndex := rand.Int() % len(stageNameList) // Hardcoded temporarily. -- YFLu
pR.StageName = stageNameList[chosenStageIndex]
relativePathForAllStages := "../frontend/assets/resources/map"
relativePathForChosenStage := fmt.Sprintf("%s/%s", relativePathForAllStages, pR.StageName)
pTmxMapIns := &TmxMap{}
absDirPathContainingDirectlyTmxFile := filepath.Join(pwd, relativePathForChosenStage)
absTmxFilePath := fmt.Sprintf("%s/map.tmx", absDirPathContainingDirectlyTmxFile)
if !filepath.IsAbs(absTmxFilePath) {
panic("Tmx filepath must be absolute!")
}
byteArr, err := ioutil.ReadFile(absTmxFilePath)
if nil != err {
panic(err)
}
err = xml.Unmarshal(byteArr, pTmxMapIns)
if nil != err {
panic(err)
}
// Obtain the content of `gidBoundariesMapInB2World`.
gidBoundariesMapInB2World := make(map[int]StrToPolygon2DListMap, 0)
for _, tileset := range pTmxMapIns.Tilesets {
relativeTsxFilePath := fmt.Sprintf("%s/%s", filepath.Join(pwd, relativePathForChosenStage), tileset.Source) // Note that "TmxTileset.Source" can be a string of "relative path".
absTsxFilePath, err := filepath.Abs(relativeTsxFilePath)
if nil != err {
panic(err)
}
if !filepath.IsAbs(absTsxFilePath) {
panic("Filepath must be absolute!")
}
byteArrOfTsxFile, err := ioutil.ReadFile(absTsxFilePath)
if nil != err {
panic(err)
}
DeserializeTsxToColliderDict(pTmxMapIns, byteArrOfTsxFile, int(tileset.FirstGid), gidBoundariesMapInB2World)
}
stageDiscreteW, stageDiscreteH, stageTileW, stageTileH, strToVec2DListMap, strToPolygon2DListMap, err := ParseTmxLayersAndGroups(pTmxMapIns, gidBoundariesMapInB2World)
2022-09-20 15:50:01 +00:00
if nil != err {
panic(err)
}
pR.StageDiscreteW = stageDiscreteW
pR.StageDiscreteH = stageDiscreteH
pR.StageTileW = stageTileW
pR.StageTileH = stageTileH
pR.StrToVec2DListMap = strToVec2DListMap
pR.StrToPolygon2DListMap = strToPolygon2DListMap
2022-09-20 15:50:01 +00:00
barrierPolygon2DList := *(strToPolygon2DListMap["Barrier"])
2022-09-20 15:50:01 +00:00
var barrierLocalIdInBattle int32 = 0
2022-11-09 06:20:26 +00:00
for _, polygon2DUnaligned := range barrierPolygon2DList.Eles {
polygon2D := AlignPolygon2DToBoundingBox(polygon2DUnaligned)
2022-09-20 15:50:01 +00:00
/*
// For debug-printing only.
Logger.Info("ChooseStage printing polygon2D for barrierPolygon2DList", zap.Any("barrierLocalIdInBattle", barrierLocalIdInBattle), zap.Any("polygon2D.Anchor", polygon2D.Anchor), zap.Any("polygon2D.Points", polygon2D.Points))
2022-09-20 15:50:01 +00:00
*/
pR.Barriers[barrierLocalIdInBattle] = &Barrier{
Boundary: polygon2D,
2022-09-20 15:50:01 +00:00
}
barrierLocalIdInBattle++
2022-09-20 15:50:01 +00:00
}
return nil
}
func (pR *Room) ConvertToInputFrameId(renderFrameId int32, inputDelayFrames int32) int32 {
2022-10-01 12:45:38 +00:00
// Specifically when "renderFrameId < inputDelayFrames", the result is 0.
return ((renderFrameId - inputDelayFrames) >> pR.InputScaleFrames)
}
2022-10-02 16:22:05 +00:00
func (pR *Room) ConvertToGeneratingRenderFrameId(inputFrameId int32) int32 {
return (inputFrameId << pR.InputScaleFrames)
}
func (pR *Room) ConvertToFirstUsedRenderFrameId(inputFrameId int32, inputDelayFrames int32) int32 {
return ((inputFrameId << pR.InputScaleFrames) + inputDelayFrames)
}
func (pR *Room) ConvertToLastUsedRenderFrameId(inputFrameId int32, inputDelayFrames int32) int32 {
2022-10-01 12:45:38 +00:00
return ((inputFrameId << pR.InputScaleFrames) + inputDelayFrames + (1 << pR.InputScaleFrames) - 1)
2022-09-20 15:50:01 +00:00
}
2022-10-02 16:22:05 +00:00
func (pR *Room) RenderFrameBufferString() string {
return fmt.Sprintf("{renderFrameId: %d, stRenderFrameId: %d, edRenderFrameId: %d, lastAllConfirmedRenderFrameId: %d}", pR.RenderFrameId, pR.RenderFrameBuffer.StFrameId, pR.RenderFrameBuffer.EdFrameId, pR.CurDynamicsRenderFrameId)
}
2022-10-03 03:42:19 +00:00
func (pR *Room) InputsBufferString(allDetails bool) string {
2022-10-01 12:45:38 +00:00
if allDetails {
2022-10-02 03:33:40 +00:00
// Appending of the array of strings can be very SLOW due to on-demand heap allocation! Use this printing with caution.
s := make([]string, 0)
2022-10-03 03:42:19 +00:00
s = append(s, fmt.Sprintf("{renderFrameId: %v, stInputFrameId: %v, edInputFrameId: %v, lastAllConfirmedInputFrameIdWithChange: %v, lastAllConfirmedInputFrameId: %v}", pR.RenderFrameId, pR.InputsBuffer.StFrameId, pR.InputsBuffer.EdFrameId, pR.LastAllConfirmedInputFrameIdWithChange, pR.LastAllConfirmedInputFrameId))
2022-10-01 12:45:38 +00:00
for playerId, player := range pR.Players {
s = append(s, fmt.Sprintf("{playerId: %v, ackingFrameId: %v, ackingInputFrameId: %v, lastSentInputFrameId: %v}", playerId, player.AckingFrameId, player.AckingInputFrameId, player.LastSentInputFrameId))
}
2022-10-03 03:42:19 +00:00
for i := pR.InputsBuffer.StFrameId; i < pR.InputsBuffer.EdFrameId; i++ {
tmp := pR.InputsBuffer.GetByFrameId(i)
2022-10-01 12:45:38 +00:00
if nil == tmp {
break
}
2022-11-09 06:20:26 +00:00
f := tmp.(*InputFrameDownsync)
2022-10-01 12:45:38 +00:00
s = append(s, fmt.Sprintf("{inputFrameId: %v, inputList: %v, confirmedList: %v}", f.InputFrameId, f.InputList, f.ConfirmedList))
2022-09-20 15:50:01 +00:00
}
2022-10-02 16:22:05 +00:00
return strings.Join(s, "; ")
2022-10-02 03:33:40 +00:00
} else {
2022-10-03 03:42:19 +00:00
return fmt.Sprintf("{renderFrameId: %d, stInputFrameId: %d, edInputFrameId: %d, lastAllConfirmedInputFrameIdWithChange: %d, lastAllConfirmedInputFrameId: %d}", pR.RenderFrameId, pR.InputsBuffer.StFrameId, pR.InputsBuffer.EdFrameId, pR.LastAllConfirmedInputFrameIdWithChange, pR.LastAllConfirmedInputFrameId)
2022-10-02 03:33:40 +00:00
}
2022-09-20 15:50:01 +00:00
}
func (pR *Room) StartBattle() {
if RoomBattleStateIns.WAITING != pR.State {
2022-10-04 03:24:47 +00:00
Logger.Warn("[StartBattle] Battle not started due to not being WAITING!", zap.Any("roomId", pR.Id), zap.Any("roomState", pR.State))
2022-09-20 15:50:01 +00:00
return
}
// Always instantiates a new channel and let the old one die out due to not being retained by any root reference.
2022-10-02 03:33:40 +00:00
nanosPerFrame := 1000000000 / int64(pR.ServerFps)
pR.RenderFrameId = 0
2022-10-01 12:45:38 +00:00
// Initialize the "collisionSys" as well as "RenderFrameBuffer"
pR.CurDynamicsRenderFrameId = 0
2022-11-09 06:20:26 +00:00
kickoffFrame := &RoomDownsyncFrame{
2022-10-01 12:45:38 +00:00
Id: pR.RenderFrameId,
2022-11-21 09:27:32 +00:00
Players: toPbPlayers(pR.Players, false),
2022-10-01 12:45:38 +00:00
CountdownNanos: pR.BattleDurationNanos,
}
pR.RenderFrameBuffer.Put(kickoffFrame)
2022-09-20 15:50:01 +00:00
// Refresh "Colliders"
spaceW := pR.StageDiscreteW * pR.StageTileW
spaceH := pR.StageDiscreteH * pR.StageTileH
2022-11-12 14:53:35 +00:00
pR.collisionSpaceOffsetX, pR.collisionSpaceOffsetY = float64(spaceW)*0.5, float64(spaceH)*0.5
2022-11-09 04:19:29 +00:00
pR.refreshColliders(spaceW, spaceH)
2022-09-20 15:50:01 +00:00
/**
* Will be triggered from a goroutine which executes the critical `Room.AddPlayerIfPossible`, thus the `battleMainLoop` should be detached.
* All of the consecutive stages, e.g. settlement, dismissal, should share the same goroutine with `battleMainLoop`.
*/
battleMainLoop := func() {
defer func() {
if r := recover(); r != nil {
Logger.Error("battleMainLoop, recovery spot#1, recovered from: ", zap.Any("roomId", pR.Id), zap.Any("panic", r))
2022-10-02 03:33:40 +00:00
pR.StopBattleForSettlement()
}
2022-09-20 15:50:01 +00:00
Logger.Info("The `battleMainLoop` is stopped for:", zap.Any("roomId", pR.Id))
pR.onBattleStoppedForSettlement()
}()
2022-10-10 04:17:23 +00:00
pR.LastRenderFrameIdTriggeredAt = utils.UnixtimeNano()
2022-09-20 15:50:01 +00:00
Logger.Info("The `battleMainLoop` is started for:", zap.Any("roomId", pR.Id))
for {
stCalculation := utils.UnixtimeNano()
2022-10-10 04:17:23 +00:00
elapsedNanosSinceLastFrameIdTriggered := stCalculation - pR.LastRenderFrameIdTriggeredAt
if elapsedNanosSinceLastFrameIdTriggered < pR.RollbackEstimatedDtNanos {
Logger.Debug(fmt.Sprintf("Avoiding too fast frame@roomId=%v, renderFrameId=%v: elapsedNanosSinceLastFrameIdTriggered=%v", pR.Id, pR.RenderFrameId, elapsedNanosSinceLastFrameIdTriggered))
continue
2022-10-10 04:17:23 +00:00
}
if pR.RenderFrameId > pR.BattleDurationFrames {
Logger.Info(fmt.Sprintf("The `battleMainLoop` for roomId=%v is stopped@renderFrameId=%v, with battleDurationFrames=%v:\n%v", pR.Id, pR.RenderFrameId, pR.BattleDurationFrames, pR.InputsBufferString(true)))
2022-09-20 15:50:01 +00:00
pR.StopBattleForSettlement()
return
2022-09-20 15:50:01 +00:00
}
if swapped := atomic.CompareAndSwapInt32(&pR.State, RoomBattleStateIns.IN_BATTLE, RoomBattleStateIns.IN_BATTLE); !swapped {
return
}
// Prefab and buffer backend inputFrameDownsync
if pR.shouldPrefabInputFrameDownsync(pR.RenderFrameId) {
noDelayInputFrameId := pR.ConvertToInputFrameId(pR.RenderFrameId, 0)
pR.prefabInputFrameDownsync(noDelayInputFrameId)
2022-09-20 15:50:01 +00:00
}
2022-10-01 12:45:38 +00:00
2022-11-11 12:10:43 +00:00
pR.markConfirmationIfApplicable()
2022-10-25 02:42:36 +00:00
unconfirmedMask := uint64(0)
2022-10-25 15:36:55 +00:00
if pR.BackendDynamicsEnabled {
// Force setting all-confirmed of buffered inputFrames periodically
2022-10-25 15:36:55 +00:00
unconfirmedMask = pR.forceConfirmationIfApplicable()
}
2022-10-01 12:45:38 +00:00
2022-10-02 16:22:05 +00:00
upperToSendInputFrameId := atomic.LoadInt32(&(pR.LastAllConfirmedInputFrameId))
2022-10-03 03:42:19 +00:00
/*
[WARNING]
Upon resynced on frontend, "refRenderFrameId" MUST BE CAPPED somehow by "upperToSendInputFrameId", if frontend resyncs itself to a more advanced value than given below, upon the next renderFrame tick on the frontend it might generate non-consecutive "nextInputFrameId > frontend.recentInputCache.edFrameId+1".
If "NstDelayFrames" becomes larger, "pR.RenderFrameId - refRenderFrameId" possibly becomes larger because the force confirmation is delayed more.
Upon resync, it's still possible that "refRenderFrameId < frontend.chaserRenderFrameId" -- and this is allowed.
2022-10-03 03:42:19 +00:00
*/
refRenderFrameId := pR.ConvertToGeneratingRenderFrameId(upperToSendInputFrameId) + (1 << pR.InputScaleFrames) - 1
if refRenderFrameId > pR.RenderFrameId {
refRenderFrameId = pR.RenderFrameId
}
2022-10-25 02:42:36 +00:00
2022-10-25 15:36:55 +00:00
dynamicsDuration := int64(0)
if pR.BackendDynamicsEnabled {
if 0 <= pR.LastAllConfirmedInputFrameId {
dynamicsStartedAt := utils.UnixtimeNano()
// Apply "all-confirmed inputFrames" to move forward "pR.CurDynamicsRenderFrameId"
nextDynamicsRenderFrameId := pR.ConvertToLastUsedRenderFrameId(pR.LastAllConfirmedInputFrameId, pR.InputDelayFrames)
Logger.Debug(fmt.Sprintf("roomId=%v, room.RenderFrameId=%v, LastAllConfirmedInputFrameId=%v, InputDelayFrames=%v, nextDynamicsRenderFrameId=%v", pR.Id, pR.RenderFrameId, pR.LastAllConfirmedInputFrameId, pR.InputDelayFrames, nextDynamicsRenderFrameId))
2022-11-12 14:53:35 +00:00
pR.applyInputFrameDownsyncDynamics(pR.CurDynamicsRenderFrameId, nextDynamicsRenderFrameId, pR.collisionSpaceOffsetX, pR.collisionSpaceOffsetY)
2022-10-25 15:36:55 +00:00
dynamicsDuration = utils.UnixtimeNano() - dynamicsStartedAt
}
// [WARNING] The following inequality are seldom true, but just to avoid that in good network condition the frontend resyncs itself to a "too advanced frontend.renderFrameId", and then starts upsyncing "too advanced inputFrameId".
if refRenderFrameId > pR.CurDynamicsRenderFrameId {
refRenderFrameId = pR.CurDynamicsRenderFrameId
}
}
2022-11-11 12:10:43 +00:00
2022-10-01 12:45:38 +00:00
for playerId, player := range pR.Players {
if swapped := atomic.CompareAndSwapInt32(&player.BattleState, PlayerBattleStateIns.ACTIVE, PlayerBattleStateIns.ACTIVE); !swapped {
// [WARNING] DON'T send anything if the player is disconnected, because it could jam the channel and cause significant delay upon "battle recovery for reconnected player".
continue
}
if 0 == pR.RenderFrameId {
2022-11-09 06:20:26 +00:00
kickoffFrame := pR.RenderFrameBuffer.GetByFrameId(0).(*RoomDownsyncFrame)
2022-10-01 15:54:48 +00:00
pR.sendSafely(kickoffFrame, nil, DOWNSYNC_MSG_ACT_BATTLE_START, playerId)
2022-10-01 12:45:38 +00:00
} else {
// [WARNING] Websocket is TCP-based, thus no need to re-send a previously sent inputFrame to a same player!
2022-11-09 06:20:26 +00:00
toSendInputFrames := make([]*InputFrameDownsync, 0, pR.InputsBuffer.Cnt)
2022-10-04 03:24:47 +00:00
candidateToSendInputFrameId := pR.Players[playerId].LastSentInputFrameId + 1
2022-10-03 03:42:19 +00:00
if candidateToSendInputFrameId < pR.InputsBuffer.StFrameId {
// [WARNING] As "player.LastSentInputFrameId <= lastAllConfirmedInputFrameIdWithChange" for each iteration, and "lastAllConfirmedInputFrameIdWithChange <= lastAllConfirmedInputFrameId" where the latter is used to "applyInputFrameDownsyncDynamics" and then evict "pR.InputsBuffer", thus there's a very high possibility that "player.LastSentInputFrameId" is already evicted.
2022-11-11 12:10:43 +00:00
Logger.Warn(fmt.Sprintf("LastSentInputFrameId already popped: roomId=%v, playerId=%v, lastSentInputFrameId=%v, playerAckingInputFrameId=%v, InputsBuffer=%v", pR.Id, playerId, candidateToSendInputFrameId-1, player.AckingInputFrameId, pR.InputsBufferString(false)))
2022-10-03 03:42:19 +00:00
candidateToSendInputFrameId = pR.InputsBuffer.StFrameId
2022-10-01 12:45:38 +00:00
}
2022-10-04 03:24:47 +00:00
if MAGIC_LAST_SENT_INPUT_FRAME_ID_READDED == player.LastSentInputFrameId {
// A rejoined player, should guarantee that when it resyncs to "refRenderFrameId" a matching inputFrame to apply exists
candidateToSendInputFrameId = pR.ConvertToInputFrameId(refRenderFrameId, pR.InputDelayFrames)
Logger.Warn(fmt.Sprintf("Resetting refRenderFrame for rejoined player: roomId=%v, playerId=%v, refRenderFrameId=%v, candidateToSendInputFrameId=%v, upperToSendInputFrameId=%v, lastSentInputFrameId=%v, playerAckingInputFrameId=%v", pR.Id, playerId, refRenderFrameId, candidateToSendInputFrameId, upperToSendInputFrameId, player.LastSentInputFrameId, player.AckingInputFrameId))
}
2022-10-01 12:45:38 +00:00
2022-10-04 03:24:47 +00:00
// [WARNING] EDGE CASE HERE: Upon initialization, all of "lastAllConfirmedInputFrameId", "lastAllConfirmedInputFrameIdWithChange" and "anchorInputFrameId" are "-1", thus "candidateToSendInputFrameId" starts with "0", however "inputFrameId: 0" might not have been all confirmed!
2022-10-02 16:22:05 +00:00
for candidateToSendInputFrameId <= upperToSendInputFrameId {
2022-10-03 03:42:19 +00:00
tmp := pR.InputsBuffer.GetByFrameId(candidateToSendInputFrameId)
2022-10-01 12:45:38 +00:00
if nil == tmp {
2022-10-03 03:42:19 +00:00
panic(fmt.Sprintf("Required inputFrameId=%v for roomId=%v, playerId=%v doesn't exist! InputsBuffer=%v", candidateToSendInputFrameId, pR.Id, playerId, pR.InputsBufferString(false)))
2022-10-01 12:45:38 +00:00
}
2022-11-09 06:20:26 +00:00
f := tmp.(*InputFrameDownsync)
2022-10-01 12:45:38 +00:00
if pR.inputFrameIdDebuggable(candidateToSendInputFrameId) {
2022-10-03 03:42:19 +00:00
Logger.Debug("inputFrame lifecycle#3[sending]:", zap.Any("roomId", pR.Id), zap.Any("playerId", playerId), zap.Any("playerAckingInputFrameId", player.AckingInputFrameId), zap.Any("inputFrameId", candidateToSendInputFrameId), zap.Any("inputFrameId-doublecheck", f.InputFrameId), zap.Any("InputsBuffer", pR.InputsBufferString(false)), zap.Any("ConfirmedList", f.ConfirmedList))
2022-10-01 12:45:38 +00:00
}
toSendInputFrames = append(toSendInputFrames, f)
candidateToSendInputFrameId++
}
2022-10-02 16:22:05 +00:00
if 0 >= len(toSendInputFrames) {
// [WARNING] When sending DOWNSYNC_MSG_ACT_FORCED_RESYNC, there MUST BE accompanying "toSendInputFrames" for calculating "refRenderFrameId"!
2022-10-04 03:24:47 +00:00
if MAGIC_LAST_SENT_INPUT_FRAME_ID_READDED == player.LastSentInputFrameId {
Logger.Warn(fmt.Sprintf("Not sending due to empty toSendInputFrames: roomId=%v, playerId=%v, refRenderFrameId=%v, upperToSendInputFrameId=%v, lastSentInputFrameId=%v, playerAckingInputFrameId=%v", pR.Id, playerId, refRenderFrameId, upperToSendInputFrameId, player.LastSentInputFrameId, player.AckingInputFrameId))
2022-10-04 03:24:47 +00:00
}
2022-10-02 16:22:05 +00:00
continue
}
2022-11-11 15:43:51 +00:00
/*
Resync helps
1. when player with a slower frontend clock lags significantly behind and thus wouldn't get its inputUpsync recognized due to faster "forceConfirmation"
2. reconnection
*/
shouldResync1 := (MAGIC_LAST_SENT_INPUT_FRAME_ID_READDED == player.LastSentInputFrameId)
shouldResync2 := (0 < (unconfirmedMask & uint64(1<<uint32(player.JoinIndex-1)))) // This condition is critical, if we don't send resync upon this condition, the "reconnected or slowly-clocking player" might never get its input synced
// shouldResync2 := (0 < unconfirmedMask) // An easier version of the above, might keep sending "refRenderFrame"s to still connected players when any player is disconnected
2022-11-11 15:43:51 +00:00
if pR.BackendDynamicsEnabled && (shouldResync1 || shouldResync2) {
2022-10-02 16:22:05 +00:00
tmp := pR.RenderFrameBuffer.GetByFrameId(refRenderFrameId)
if nil == tmp {
2022-10-03 03:42:19 +00:00
panic(fmt.Sprintf("Required refRenderFrameId=%v for roomId=%v, playerId=%v, candidateToSendInputFrameId=%v doesn't exist! InputsBuffer=%v, RenderFrameBuffer=%v", refRenderFrameId, pR.Id, playerId, candidateToSendInputFrameId, pR.InputsBufferString(false), pR.RenderFrameBufferString()))
2022-10-02 16:22:05 +00:00
}
2022-11-09 06:20:26 +00:00
refRenderFrame := tmp.(*RoomDownsyncFrame)
2022-10-01 12:45:38 +00:00
pR.sendSafely(refRenderFrame, toSendInputFrames, DOWNSYNC_MSG_ACT_FORCED_RESYNC, playerId)
} else {
pR.sendSafely(nil, toSendInputFrames, DOWNSYNC_MSG_ACT_INPUT_BATCH, playerId)
}
2022-10-04 03:24:47 +00:00
pR.Players[playerId].LastSentInputFrameId = candidateToSendInputFrameId - 1
2022-10-01 12:45:38 +00:00
}
}
2022-11-11 12:10:43 +00:00
if pR.BackendDynamicsEnabled {
// Evict no longer required "RenderFrameBuffer"
for pR.RenderFrameBuffer.N < pR.RenderFrameBuffer.Cnt || (0 < pR.RenderFrameBuffer.Cnt && pR.RenderFrameBuffer.StFrameId < refRenderFrameId) {
_ = pR.RenderFrameBuffer.Pop()
}
2022-10-01 12:45:38 +00:00
}
minToKeepInputFrameId := pR.ConvertToInputFrameId(refRenderFrameId, pR.InputDelayFrames) - pR.SpAtkLookupFrames
2022-11-11 12:10:43 +00:00
/*
[WARNING]
The following updates to "minToKeepInputFrameId" is necessary because when "false == pR.BackendDynamicsEnabled", the variable "refRenderFrameId" is not well defined.
2022-11-11 12:10:43 +00:00
*/
minLastSentInputFrameId := int32(math.MaxInt32)
for _, player := range pR.Players {
if PlayerBattleStateIns.ACTIVE != player.BattleState {
continue
}
2022-11-11 12:10:43 +00:00
if player.LastSentInputFrameId >= minLastSentInputFrameId {
continue
}
2022-11-11 12:10:43 +00:00
minLastSentInputFrameId = player.LastSentInputFrameId
}
if minLastSentInputFrameId < minToKeepInputFrameId {
minToKeepInputFrameId = minLastSentInputFrameId
}
for pR.InputsBuffer.N < pR.InputsBuffer.Cnt || (0 < pR.InputsBuffer.Cnt && pR.InputsBuffer.StFrameId < minToKeepInputFrameId) {
2022-11-09 06:20:26 +00:00
f := pR.InputsBuffer.Pop().(*InputFrameDownsync)
2022-10-01 12:45:38 +00:00
if pR.inputFrameIdDebuggable(f.InputFrameId) {
// Popping of an "inputFrame" would be AFTER its being all being confirmed, because it requires the "inputFrame" to be all acked
Logger.Debug("inputFrame lifecycle#4[popped]:", zap.Any("roomId", pR.Id), zap.Any("inputFrameId", f.InputFrameId), zap.Any("minToKeepInputFrameId", minToKeepInputFrameId), zap.Any("InputsBuffer", pR.InputsBufferString(false)))
2022-10-01 12:45:38 +00:00
}
}
pR.RenderFrameId++
2022-10-01 12:45:38 +00:00
elapsedInCalculation := (utils.UnixtimeNano() - stCalculation)
if elapsedInCalculation > nanosPerFrame {
Logger.Warn(fmt.Sprintf("SLOW FRAME! Elapsed time statistics: roomId=%v, room.RenderFrameId=%v, elapsedInCalculation=%v ns, dynamicsDuration=%v ns, expected nanosPerFrame=%v", pR.Id, pR.RenderFrameId, elapsedInCalculation, dynamicsDuration, nanosPerFrame))
2022-10-01 12:45:38 +00:00
}
2022-09-20 15:50:01 +00:00
time.Sleep(time.Duration(nanosPerFrame - elapsedInCalculation))
}
}
pR.onBattlePrepare(func() {
pR.onBattleStarted() // NOTE: Deliberately not using `defer`.
go battleMainLoop()
})
}
2022-10-03 03:42:19 +00:00
func (pR *Room) toDiscreteInputsBufferIndex(inputFrameId int32, joinIndex int32) int32 {
return (inputFrameId << 2) + joinIndex // allowing joinIndex upto 15
}
2022-11-09 06:20:26 +00:00
func (pR *Room) OnBattleCmdReceived(pReq *WsReq) {
2022-09-20 15:50:01 +00:00
if swapped := atomic.CompareAndSwapInt32(&pR.State, RoomBattleStateIns.IN_BATTLE, RoomBattleStateIns.IN_BATTLE); !swapped {
return
}
playerId := pReq.PlayerId
inputFrameUpsyncBatch := pReq.InputFrameUpsyncBatch
ackingFrameId := pReq.AckingFrameId
ackingInputFrameId := pReq.AckingInputFrameId
if _, existent := pR.Players[playerId]; !existent {
2022-10-01 12:45:38 +00:00
Logger.Warn(fmt.Sprintf("upcmd player doesn't exist: roomId=%v, playerId=%v", pR.Id, playerId))
return
}
2022-09-20 15:50:01 +00:00
if swapped := atomic.CompareAndSwapInt32(&(pR.Players[playerId].AckingFrameId), pR.Players[playerId].AckingFrameId, ackingFrameId); !swapped {
panic(fmt.Sprintf("Failed to update AckingFrameId to %v for roomId=%v, playerId=%v", ackingFrameId, pR.Id, playerId))
}
2022-09-20 15:50:01 +00:00
if swapped := atomic.CompareAndSwapInt32(&(pR.Players[playerId].AckingInputFrameId), pR.Players[playerId].AckingInputFrameId, ackingInputFrameId); !swapped {
panic(fmt.Sprintf("Failed to update AckingInputFrameId to %v for roomId=%v, playerId=%v", ackingInputFrameId, pR.Id, playerId))
}
for _, inputFrameUpsync := range inputFrameUpsyncBatch {
2022-09-20 15:50:01 +00:00
clientInputFrameId := inputFrameUpsync.InputFrameId
2022-10-03 03:42:19 +00:00
if clientInputFrameId < pR.InputsBuffer.StFrameId {
// The updates to "pR.InputsBuffer.StFrameId" is monotonically increasing, thus if "clientInputFrameId < pR.InputsBuffer.StFrameId" at any moment of time, it is obsolete in the future.
Logger.Debug(fmt.Sprintf("Omitting obsolete inputFrameUpsync: roomId=%v, playerId=%v, clientInputFrameId=%v, InputsBuffer=%v", pR.Id, playerId, clientInputFrameId, pR.InputsBufferString(false)))
continue
}
2022-09-20 15:50:01 +00:00
2022-10-03 03:42:19 +00:00
bufIndex := pR.toDiscreteInputsBufferIndex(clientInputFrameId, pReq.JoinIndex)
pR.DiscreteInputsBuffer.Store(bufIndex, inputFrameUpsync)
2022-09-20 15:50:01 +00:00
2022-10-03 03:42:19 +00:00
// TODO: "pR.DiscreteInputsBuffer" might become too large with outdated "inputFrameUpsync" items, maintain another queue orderd by timestamp to evict them
2022-09-20 15:50:01 +00:00
}
}
2022-11-09 06:20:26 +00:00
func (pR *Room) onInputFrameDownsyncAllConfirmed(inputFrameDownsync *InputFrameDownsync, playerId int32) {
2022-10-01 12:45:38 +00:00
inputFrameId := inputFrameDownsync.InputFrameId
if -1 == pR.LastAllConfirmedInputFrameIdWithChange || false == pR.equalInputLists(inputFrameDownsync.InputList, pR.LastAllConfirmedInputList) {
2022-10-02 16:22:05 +00:00
if -1 == playerId {
2022-10-10 13:58:29 +00:00
Logger.Debug(fmt.Sprintf("Key inputFrame change: roomId=%v, newInputFrameId=%v, lastInputFrameId=%v, newInputList=%v, lastInputList=%v, InputsBuffer=%v", pR.Id, inputFrameId, pR.LastAllConfirmedInputFrameId, inputFrameDownsync.InputList, pR.LastAllConfirmedInputList, pR.InputsBufferString(false)))
2022-10-02 16:22:05 +00:00
} else {
2022-10-10 13:58:29 +00:00
Logger.Debug(fmt.Sprintf("Key inputFrame change: roomId=%v, playerId=%v, newInputFrameId=%v, lastInputFrameId=%v, newInputList=%v, lastInputList=%v, InputsBuffer=%v", pR.Id, playerId, inputFrameId, pR.LastAllConfirmedInputFrameId, inputFrameDownsync.InputList, pR.LastAllConfirmedInputList, pR.InputsBufferString(false)))
2022-10-02 16:22:05 +00:00
}
2022-10-01 15:54:48 +00:00
atomic.StoreInt32(&(pR.LastAllConfirmedInputFrameIdWithChange), inputFrameId)
}
2022-10-01 12:45:38 +00:00
atomic.StoreInt32(&(pR.LastAllConfirmedInputFrameId), inputFrameId) // [WARNING] It's IMPORTANT that "pR.LastAllConfirmedInputFrameId" is NOT NECESSARILY CONSECUTIVE, i.e. if one of the players disconnects and reconnects within a considerable amount of frame delays!
for i, v := range inputFrameDownsync.InputList {
// To avoid potential misuse of pointers
pR.LastAllConfirmedInputList[i] = v
}
2022-10-02 16:22:05 +00:00
if -1 == playerId {
2022-10-03 03:42:19 +00:00
Logger.Debug(fmt.Sprintf("inputFrame lifecycle#2[forced-allconfirmed]: roomId=%v, InputsBuffer=%v", pR.Id, pR.InputsBufferString(false)))
2022-10-02 16:22:05 +00:00
} else {
2022-10-10 13:58:29 +00:00
Logger.Debug(fmt.Sprintf("inputFrame lifecycle#2[allconfirmed]: roomId=%v, playerId=%v, InputsBuffer=%v", pR.Id, playerId, pR.InputsBufferString(false)))
}
}
2022-09-20 15:50:01 +00:00
func (pR *Room) equalInputLists(lhs []uint64, rhs []uint64) bool {
if len(lhs) != len(rhs) {
return false
}
for i, _ := range lhs {
if lhs[i] != rhs[i] {
return false
}
}
return true
}
func (pR *Room) StopBattleForSettlement() {
if RoomBattleStateIns.IN_BATTLE != pR.State {
return
}
pR.State = RoomBattleStateIns.STOPPING_BATTLE_FOR_SETTLEMENT
Logger.Info("Stopping the `battleMainLoop` for:", zap.Any("roomId", pR.Id))
pR.RenderFrameId++
2022-09-20 15:50:01 +00:00
for playerId, _ := range pR.Players {
2022-11-09 06:20:26 +00:00
assembledFrame := RoomDownsyncFrame{
Id: pR.RenderFrameId,
2022-11-21 09:27:32 +00:00
Players: toPbPlayers(pR.Players, false),
2022-09-20 15:50:01 +00:00
CountdownNanos: -1, // TODO: Replace this magic constant!
}
2022-10-02 16:22:05 +00:00
pR.sendSafely(&assembledFrame, nil, DOWNSYNC_MSG_ACT_BATTLE_STOPPED, playerId)
2022-09-20 15:50:01 +00:00
}
// Note that `pR.onBattleStoppedForSettlement` will be called by `battleMainLoop`.
}
func (pR *Room) onBattleStarted() {
if RoomBattleStateIns.PREPARE != pR.State {
return
}
pR.State = RoomBattleStateIns.IN_BATTLE
pR.updateScore()
}
func (pR *Room) onBattlePrepare(cb BattleStartCbType) {
if RoomBattleStateIns.WAITING != pR.State {
Logger.Warn("[onBattlePrepare] Battle not started after all players' battle state checked!", zap.Any("roomId", pR.Id), zap.Any("roomState", pR.State))
return
}
pR.State = RoomBattleStateIns.PREPARE
Logger.Info("Battle state transitted to RoomBattleStateIns.PREPARE for:", zap.Any("roomId", pR.Id))
2022-11-09 06:20:26 +00:00
battleReadyToStartFrame := &RoomDownsyncFrame{
2022-10-01 15:54:48 +00:00
Id: DOWNSYNC_MSG_ACT_BATTLE_READY_TO_START,
2022-11-21 09:27:32 +00:00
Players: toPbPlayers(pR.Players, true),
CountdownNanos: pR.BattleDurationNanos,
2022-09-20 15:50:01 +00:00
}
2022-10-02 03:33:40 +00:00
Logger.Info("Sending out frame for RoomBattleState.PREPARE:", zap.Any("battleReadyToStartFrame", battleReadyToStartFrame))
2022-09-20 15:50:01 +00:00
for _, player := range pR.Players {
2022-10-01 15:54:48 +00:00
pR.sendSafely(battleReadyToStartFrame, nil, DOWNSYNC_MSG_ACT_BATTLE_READY_TO_START, player.Id)
2022-09-20 15:50:01 +00:00
}
battlePreparationNanos := int64(6000000000)
preparationLoop := func() {
defer func() {
Logger.Info("The `preparationLoop` is stopped for:", zap.Any("roomId", pR.Id))
cb()
}()
preparationLoopStartedNanos := utils.UnixtimeNano()
totalElapsedNanos := int64(0)
for {
if totalElapsedNanos > battlePreparationNanos {
break
}
now := utils.UnixtimeNano()
totalElapsedNanos = (now - preparationLoopStartedNanos)
time.Sleep(time.Duration(battlePreparationNanos - totalElapsedNanos))
}
}
go preparationLoop()
}
func (pR *Room) onBattleStoppedForSettlement() {
if RoomBattleStateIns.STOPPING_BATTLE_FOR_SETTLEMENT != pR.State {
return
}
defer func() {
pR.onSettlementCompleted()
}()
pR.State = RoomBattleStateIns.IN_SETTLEMENT
Logger.Info("The room is in settlement:", zap.Any("roomId", pR.Id))
// TODO: Some settlement labor.
}
func (pR *Room) onSettlementCompleted() {
pR.Dismiss()
}
func (pR *Room) Dismiss() {
if RoomBattleStateIns.IN_SETTLEMENT != pR.State {
return
}
pR.State = RoomBattleStateIns.IN_DISMISSAL
if 0 < len(pR.Players) {
Logger.Info("The room is in dismissal:", zap.Any("roomId", pR.Id))
for playerId, _ := range pR.Players {
Logger.Info("Adding 1 to pR.DismissalWaitGroup:", zap.Any("roomId", pR.Id), zap.Any("playerId", playerId))
pR.DismissalWaitGroup.Add(1)
pR.expelPlayerForDismissal(playerId)
pR.DismissalWaitGroup.Done()
Logger.Info("Decremented 1 to pR.DismissalWaitGroup:", zap.Any("roomId", pR.Id), zap.Any("playerId", playerId))
}
pR.DismissalWaitGroup.Wait()
}
2022-10-02 03:33:40 +00:00
pR.OnDismissed()
2022-09-20 15:50:01 +00:00
}
2022-10-02 03:33:40 +00:00
func (pR *Room) OnDismissed() {
2022-09-20 15:50:01 +00:00
// Always instantiates new HeapRAM blocks and let the old blocks die out due to not being retained by any root reference.
pR.WorldToVirtualGridRatio = float64(1000)
2022-11-09 04:19:29 +00:00
pR.VirtualGridToWorldRatio = float64(1.0) / pR.WorldToVirtualGridRatio // this is a one-off computation, should avoid division in iterations
pR.SpAtkLookupFrames = 5
pR.PlayerDefaultSpeed = int32(float64(2) * pR.WorldToVirtualGridRatio) // in virtual grids per frame
2022-09-20 15:50:01 +00:00
pR.Players = make(map[int32]*Player)
pR.PlayersArr = make([]*Player, pR.Capacity)
pR.CollisionSysMap = make(map[int32]*resolv.Object)
2022-09-20 15:50:01 +00:00
pR.PlayerDownsyncSessionDict = make(map[int32]*websocket.Conn)
pR.PlayerSignalToCloseDict = make(map[int32]SignalToCloseConnCbType)
2022-10-02 03:33:40 +00:00
pR.JoinIndexBooleanArr = make([]bool, pR.Capacity)
pR.Barriers = make(map[int32]*Barrier)
2022-10-03 03:42:19 +00:00
pR.InputsBuffer = NewRingBuffer(1024)
pR.DiscreteInputsBuffer = sync.Map{}
2022-10-02 03:33:40 +00:00
pR.RenderFrameBuffer = NewRingBuffer(1024)
2022-09-20 15:50:01 +00:00
pR.LastAllConfirmedInputFrameId = -1
pR.LastAllConfirmedInputFrameIdWithChange = -1
pR.LastAllConfirmedInputList = make([]uint64, pR.Capacity)
2022-10-02 03:33:40 +00:00
pR.RenderFrameId = 0
pR.CurDynamicsRenderFrameId = 0
pR.InputDelayFrames = 8
2022-10-04 03:24:47 +00:00
pR.NstDelayFrames = 8
2022-10-02 03:33:40 +00:00
pR.InputScaleFrames = uint32(2)
pR.ServerFps = 60
pR.RollbackEstimatedDtMillis = 16.667 // Use fixed-and-low-precision to mitigate the inconsistent floating-point-number issue between Golang and JavaScript
2022-10-10 04:17:23 +00:00
pR.RollbackEstimatedDtNanos = 16666666 // A little smaller than the actual per frame time, just for preventing FAST FRAME
pR.BattleDurationFrames = 30 * pR.ServerFps
pR.BattleDurationNanos = int64(pR.BattleDurationFrames) * (pR.RollbackEstimatedDtNanos + 1)
2022-10-02 03:33:40 +00:00
pR.InputFrameUpsyncDelayTolerance = 2
pR.MaxChasingRenderFramesPerUpdate = 8
2022-09-20 15:50:01 +00:00
2022-10-25 15:36:55 +00:00
pR.BackendDynamicsEnabled = true // [WARNING] When "false", recovery upon reconnection wouldn't work!
2022-10-25 02:42:36 +00:00
2022-09-20 15:50:01 +00:00
pR.ChooseStage()
pR.EffectivePlayerCount = 0
// [WARNING] It's deliberately ordered such that "pR.State = RoomBattleStateIns.IDLE" is put AFTER all the refreshing operations above.
pR.State = RoomBattleStateIns.IDLE
pR.updateScore()
Logger.Info("The room is completely dismissed:", zap.Any("roomId", pR.Id))
}
func (pR *Room) expelPlayerDuringGame(playerId int32) {
defer pR.onPlayerExpelledDuringGame(playerId)
}
func (pR *Room) expelPlayerForDismissal(playerId int32) {
pR.onPlayerExpelledForDismissal(playerId)
}
func (pR *Room) onPlayerExpelledDuringGame(playerId int32) {
pR.onPlayerLost(playerId)
}
func (pR *Room) onPlayerExpelledForDismissal(playerId int32) {
pR.onPlayerLost(playerId)
Logger.Info("onPlayerExpelledForDismissal:", zap.Any("playerId", playerId), zap.Any("roomId", pR.Id), zap.Any("nowRoomBattleState", pR.State), zap.Any("nowRoomEffectivePlayerCount", pR.EffectivePlayerCount))
}
func (pR *Room) OnPlayerDisconnected(playerId int32) {
defer func() {
if r := recover(); r != nil {
Logger.Error("Room OnPlayerDisconnected, recovery spot#1, recovered from: ", zap.Any("playerId", playerId), zap.Any("roomId", pR.Id), zap.Any("panic", r))
}
}()
if _, existent := pR.Players[playerId]; existent {
switch pR.Players[playerId].BattleState {
case PlayerBattleStateIns.DISCONNECTED:
case PlayerBattleStateIns.LOST:
case PlayerBattleStateIns.EXPELLED_DURING_GAME:
case PlayerBattleStateIns.EXPELLED_IN_DISMISSAL:
Logger.Info("Room OnPlayerDisconnected[early return #1]:", zap.Any("playerId", playerId), zap.Any("playerBattleState", pR.Players[playerId].BattleState), zap.Any("roomId", pR.Id), zap.Any("nowRoomBattleState", pR.State), zap.Any("nowRoomEffectivePlayerCount", pR.EffectivePlayerCount))
return
}
} else {
// Not even the "pR.Players[playerId]" exists.
Logger.Info("Room OnPlayerDisconnected[early return #2]:", zap.Any("playerId", playerId), zap.Any("roomId", pR.Id), zap.Any("nowRoomBattleState", pR.State), zap.Any("nowRoomEffectivePlayerCount", pR.EffectivePlayerCount))
return
}
switch pR.State {
case RoomBattleStateIns.WAITING:
pR.onPlayerLost(playerId)
delete(pR.Players, playerId) // Note that this statement MUST be put AFTER `pR.onPlayerLost(...)` to avoid nil pointer exception.
if 0 == pR.EffectivePlayerCount {
pR.State = RoomBattleStateIns.IDLE
}
pR.updateScore()
Logger.Info("Player disconnected while room is at RoomBattleStateIns.WAITING:", zap.Any("playerId", playerId), zap.Any("roomId", pR.Id), zap.Any("nowRoomBattleState", pR.State), zap.Any("nowRoomEffectivePlayerCount", pR.EffectivePlayerCount))
default:
pR.Players[playerId].BattleState = PlayerBattleStateIns.DISCONNECTED
pR.clearPlayerNetworkSession(playerId) // Still need clear the network session pointers, because "OnPlayerDisconnected" is only triggered from "signalToCloseConnOfThisPlayer" in "ws/serve.go", when the same player reconnects the network session pointers will be re-assigned
Logger.Info("Player disconnected from room:", zap.Any("playerId", playerId), zap.Any("playerBattleState", pR.Players[playerId].BattleState), zap.Any("roomId", pR.Id), zap.Any("nowRoomBattleState", pR.State), zap.Any("nowRoomEffectivePlayerCount", pR.EffectivePlayerCount))
}
}
func (pR *Room) onPlayerLost(playerId int32) {
defer func() {
if r := recover(); r != nil {
Logger.Error("Room OnPlayerLost, recovery spot, recovered from: ", zap.Any("playerId", playerId), zap.Any("roomId", pR.Id), zap.Any("panic", r))
}
}()
if player, existent := pR.Players[playerId]; existent {
player.BattleState = PlayerBattleStateIns.LOST
pR.clearPlayerNetworkSession(playerId)
pR.EffectivePlayerCount--
indiceInJoinIndexBooleanArr := int(player.JoinIndex - 1)
if (0 <= indiceInJoinIndexBooleanArr) && (indiceInJoinIndexBooleanArr < len(pR.JoinIndexBooleanArr)) {
pR.JoinIndexBooleanArr[indiceInJoinIndexBooleanArr] = false
} else {
Logger.Warn("Room OnPlayerLost, pR.JoinIndexBooleanArr is out of range: ", zap.Any("playerId", playerId), zap.Any("roomId", pR.Id), zap.Any("indiceInJoinIndexBooleanArr", indiceInJoinIndexBooleanArr), zap.Any("len(pR.JoinIndexBooleanArr)", len(pR.JoinIndexBooleanArr)))
}
player.JoinIndex = MAGIC_JOIN_INDEX_INVALID
Logger.Info("Room OnPlayerLost: ", zap.Any("playerId", playerId), zap.Any("roomId", pR.Id), zap.Any("resulted pR.JoinIndexBooleanArr", pR.JoinIndexBooleanArr))
}
}
func (pR *Room) clearPlayerNetworkSession(playerId int32) {
if _, y := pR.PlayerDownsyncSessionDict[playerId]; y {
Logger.Info("sending termination symbol for:", zap.Any("playerId", playerId), zap.Any("roomId", pR.Id))
delete(pR.PlayerDownsyncSessionDict, playerId)
delete(pR.PlayerSignalToCloseDict, playerId)
}
}
func (pR *Room) onPlayerAdded(playerId int32) {
pR.EffectivePlayerCount++
if 1 == pR.EffectivePlayerCount {
pR.State = RoomBattleStateIns.WAITING
}
for index, value := range pR.JoinIndexBooleanArr {
if false == value {
pR.Players[playerId].JoinIndex = int32(index) + 1
pR.JoinIndexBooleanArr[index] = true
// Lazily assign the initial position of "Player" for "RoomDownsyncFrame".
playerPosList := *(pR.StrToVec2DListMap["PlayerStartingPos"])
2022-11-09 06:20:26 +00:00
if index > len(playerPosList.Eles) {
2022-09-20 15:50:01 +00:00
panic(fmt.Sprintf("onPlayerAdded error, index >= len(playerPosList), roomId=%v, playerId=%v, roomState=%v, roomEffectivePlayerCount=%v", pR.Id, playerId, pR.State, pR.EffectivePlayerCount))
}
2022-11-09 06:20:26 +00:00
playerPos := playerPosList.Eles[index]
2022-09-20 15:50:01 +00:00
if nil == playerPos {
panic(fmt.Sprintf("onPlayerAdded error, nil == playerPos, roomId=%v, playerId=%v, roomState=%v, roomEffectivePlayerCount=%v", pR.Id, playerId, pR.State, pR.EffectivePlayerCount))
}
pR.Players[playerId].VirtualGridX, pR.Players[playerId].VirtualGridY = WorldToVirtualGridPos(playerPos.X, playerPos.Y, pR.WorldToVirtualGridRatio)
2022-09-20 15:50:01 +00:00
break
}
}
pR.updateScore()
Logger.Info("onPlayerAdded:", zap.Any("playerId", playerId), zap.Any("roomId", pR.Id), zap.Any("joinIndex", pR.Players[playerId].JoinIndex), zap.Any("EffectivePlayerCount", pR.EffectivePlayerCount), zap.Any("resulted pR.JoinIndexBooleanArr", pR.JoinIndexBooleanArr), zap.Any("RoomBattleState", pR.State))
}
func (pR *Room) onPlayerReAdded(playerId int32) {
/*
* [WARNING]
*
* If a player quits at "RoomBattleState.WAITING", then his/her re-joining will always invoke `AddPlayerIfPossible(...)`. Therefore, this
* function will only be invoked for players who quit the battle at ">RoomBattleState.WAITING" and re-join at "RoomBattleState.IN_BATTLE", during which the `pR.JoinIndexBooleanArr` doesn't change.
*/
Logger.Info("Room got `onPlayerReAdded` invoked,", zap.Any("roomId", pR.Id), zap.Any("playerId", playerId), zap.Any("resulted pR.JoinIndexBooleanArr", pR.JoinIndexBooleanArr))
pR.updateScore()
}
func (pR *Room) OnPlayerBattleColliderAcked(playerId int32) bool {
2022-10-04 03:24:47 +00:00
targetPlayer, existing := pR.Players[playerId]
if false == existing {
2022-09-20 15:50:01 +00:00
return false
}
2022-10-02 03:33:40 +00:00
// Broadcast added or readded player info to all players in the same room
2022-10-01 15:54:48 +00:00
for _, eachPlayer := range pR.Players {
2022-09-20 15:50:01 +00:00
/*
2022-10-10 04:17:23 +00:00
[WARNING]
This `playerAckedFrame` is the first ever "RoomDownsyncFrame" for every "PersistentSessionClient on the frontend", and it goes right after each "BattleColliderInfo".
2022-10-04 03:24:47 +00:00
2022-10-10 04:17:23 +00:00
By making use of the sequential nature of each ws session, all later "RoomDownsyncFrame"s generated after `pRoom.StartBattle()` will be put behind this `playerAckedFrame`.
2022-09-20 15:50:01 +00:00
2022-10-10 04:17:23 +00:00
This function is triggered by an upsync message via WebSocket, thus downsync sending is also available by now.
2022-09-20 15:50:01 +00:00
*/
2022-10-02 03:33:40 +00:00
switch targetPlayer.BattleState {
case PlayerBattleStateIns.ADDED_PENDING_BATTLE_COLLIDER_ACK:
2022-11-09 06:20:26 +00:00
playerAckedFrame := &RoomDownsyncFrame{
2022-11-21 09:27:32 +00:00
Id: pR.RenderFrameId,
Players: toPbPlayers(pR.Players, true),
2022-10-02 03:33:40 +00:00
}
pR.sendSafely(playerAckedFrame, nil, DOWNSYNC_MSG_ACT_PLAYER_ADDED_AND_ACKED, eachPlayer.Id)
case PlayerBattleStateIns.READDED_PENDING_BATTLE_COLLIDER_ACK:
2022-11-09 06:20:26 +00:00
playerAckedFrame := &RoomDownsyncFrame{
2022-11-21 09:27:32 +00:00
Id: pR.RenderFrameId,
Players: toPbPlayers(pR.Players, true),
2022-10-02 03:33:40 +00:00
}
pR.sendSafely(playerAckedFrame, nil, DOWNSYNC_MSG_ACT_PLAYER_READDED_AND_ACKED, eachPlayer.Id)
default:
}
2022-09-20 15:50:01 +00:00
}
2022-10-01 15:54:48 +00:00
targetPlayer.BattleState = PlayerBattleStateIns.ACTIVE
2022-10-04 03:24:47 +00:00
Logger.Info(fmt.Sprintf("OnPlayerBattleColliderAcked: roomId=%v, roomState=%v, targetPlayerId=%v, targetPlayerBattleState=%v, capacity=%v, EffectivePlayerCount=%v", pR.Id, pR.State, targetPlayer.Id, targetPlayer.BattleState, pR.Capacity, pR.EffectivePlayerCount))
2022-09-20 15:50:01 +00:00
2022-10-01 15:54:48 +00:00
if pR.Capacity == int(pR.EffectivePlayerCount) {
2022-09-20 15:50:01 +00:00
allAcked := true
for _, p := range pR.Players {
if PlayerBattleStateIns.ACTIVE != p.BattleState {
Logger.Info("unexpectedly got an inactive player", zap.Any("roomId", pR.Id), zap.Any("playerId", p.Id), zap.Any("battleState", p.BattleState))
allAcked = false
break
}
}
if true == allAcked {
pR.StartBattle() // WON'T run if the battle state is not in WAITING.
}
}
pR.updateScore()
return true
}
2022-11-09 06:20:26 +00:00
func (pR *Room) sendSafely(roomDownsyncFrame *RoomDownsyncFrame, toSendFrames []*InputFrameDownsync, act int32, playerId int32) {
2022-09-20 15:50:01 +00:00
defer func() {
if r := recover(); r != nil {
pR.PlayerSignalToCloseDict[playerId](Constants.RetCode.UnknownError, fmt.Sprintf("%v", r))
}
}()
2022-11-09 06:20:26 +00:00
pResp := &WsResp{
2022-10-01 12:45:38 +00:00
Ret: int32(Constants.RetCode.Ok),
Act: act,
Rdf: roomDownsyncFrame,
InputFrameDownsyncBatch: toSendFrames,
}
2022-09-20 15:50:01 +00:00
theBytes, marshalErr := proto.Marshal(pResp)
2022-09-20 15:50:01 +00:00
if nil != marshalErr {
panic(fmt.Sprintf("Error marshaling downsync message: roomId=%v, playerId=%v, roomState=%v, roomEffectivePlayerCount=%v", pR.Id, playerId, pR.State, pR.EffectivePlayerCount))
}
if err := pR.PlayerDownsyncSessionDict[playerId].WriteMessage(websocket.BinaryMessage, theBytes); nil != err {
2022-10-02 16:22:05 +00:00
panic(fmt.Sprintf("Error sending downsync message: roomId=%v, playerId=%v, roomState=%v, roomEffectivePlayerCount=%v, err=%v", pR.Id, playerId, pR.State, pR.EffectivePlayerCount, err))
2022-09-20 15:50:01 +00:00
}
}
func (pR *Room) shouldPrefabInputFrameDownsync(renderFrameId int32) bool {
return ((renderFrameId & ((1 << pR.InputScaleFrames) - 1)) == 0)
2022-09-20 15:50:01 +00:00
}
2022-11-09 06:20:26 +00:00
func (pR *Room) prefabInputFrameDownsync(inputFrameId int32) *InputFrameDownsync {
/*
Kindly note that on backend the prefab is much simpler than its frontend counterpart, because frontend will upsync its latest command immediately if there's any change w.r.t. its own prev cmd, thus if no upsync received from a frontend,
- EITHER it's due to local lag and bad network,
- OR there's no change w.r.t. to its prev cmd.
*/
2022-11-09 06:20:26 +00:00
var currInputFrameDownsync *InputFrameDownsync = nil
2022-10-03 03:42:19 +00:00
if 0 == inputFrameId && 0 == pR.InputsBuffer.Cnt {
2022-11-09 06:20:26 +00:00
currInputFrameDownsync = &InputFrameDownsync{
InputFrameId: 0,
InputList: make([]uint64, pR.Capacity),
ConfirmedList: uint64(0),
}
} else {
tmp := pR.InputsBuffer.GetByFrameId(inputFrameId - 1) // There's no need for the backend to find the "lastAllConfirmed inputs" for prefabbing, either "BackendDynamicsEnabled" is true or false
if nil == tmp {
2022-10-03 03:42:19 +00:00
panic(fmt.Sprintf("Error prefabbing inputFrameDownsync: roomId=%v, InputsBuffer=%v", pR.Id, pR.InputsBufferString(false)))
}
2022-11-09 06:20:26 +00:00
prevInputFrameDownsync := tmp.(*InputFrameDownsync)
currInputList := prevInputFrameDownsync.InputList // Would be a clone of the values
2022-11-09 06:20:26 +00:00
currInputFrameDownsync = &InputFrameDownsync{
InputFrameId: inputFrameId,
InputList: currInputList,
ConfirmedList: uint64(0),
}
2022-10-04 03:24:47 +00:00
}
2022-10-03 03:42:19 +00:00
pR.InputsBuffer.Put(currInputFrameDownsync)
return currInputFrameDownsync
}
2022-10-25 02:42:36 +00:00
func (pR *Room) markConfirmationIfApplicable() {
2022-10-25 15:36:55 +00:00
inputFrameId1 := pR.LastAllConfirmedInputFrameId + 1
totPlayerCnt := uint32(pR.Capacity)
allConfirmedMask := uint64((1 << totPlayerCnt) - 1)
for inputFrameId := inputFrameId1; inputFrameId < pR.InputsBuffer.EdFrameId; inputFrameId++ {
2022-10-25 15:36:55 +00:00
tmp := pR.InputsBuffer.GetByFrameId(inputFrameId)
if nil == tmp {
panic(fmt.Sprintf("inputFrameId=%v doesn't exist for roomId=%v, this is abnormal because the server should prefab inputFrameDownsync in a most advanced pace, check the prefab logic! InputsBuffer=%v", inputFrameId, pR.Id, pR.InputsBufferString(false)))
}
2022-11-09 06:20:26 +00:00
inputFrameDownsync := tmp.(*InputFrameDownsync)
2022-10-25 15:36:55 +00:00
for _, player := range pR.Players {
bufIndex := pR.toDiscreteInputsBufferIndex(inputFrameId, player.JoinIndex)
tmp, loaded := pR.DiscreteInputsBuffer.LoadAndDelete(bufIndex) // It's safe to "LoadAndDelete" here because the "inputFrameUpsync" of this player is already remembered by the corresponding "inputFrameDown".
if !loaded {
continue
}
2022-11-09 06:20:26 +00:00
inputFrameUpsync := tmp.(*InputFrameUpsync)
2022-10-25 15:36:55 +00:00
indiceInJoinIndexBooleanArr := uint32(player.JoinIndex - 1)
inputFrameDownsync.InputList[indiceInJoinIndexBooleanArr] = inputFrameUpsync.Encoded
2022-10-25 15:36:55 +00:00
inputFrameDownsync.ConfirmedList |= (1 << indiceInJoinIndexBooleanArr)
}
if allConfirmedMask == inputFrameDownsync.ConfirmedList {
pR.onInputFrameDownsyncAllConfirmed(inputFrameDownsync, -1)
} else {
break
}
}
2022-10-25 02:42:36 +00:00
}
func (pR *Room) forceConfirmationIfApplicable() uint64 {
2022-10-01 12:45:38 +00:00
// Force confirmation of non-all-confirmed inputFrame EXACTLY ONE AT A TIME, returns the non-confirmed mask of players, e.g. in a 4-player-battle returning 1001 means that players with JoinIndex=1 and JoinIndex=4 are non-confirmed for inputFrameId2
renderFrameId1 := (pR.RenderFrameId - pR.NstDelayFrames) // the renderFrameId which should've been rendered on frontend
if 0 > renderFrameId1 || !pR.shouldPrefabInputFrameDownsync(renderFrameId1) {
/*
The backend "shouldPrefabInputFrameDownsync" shares the same rule as frontend "shouldGenerateInputFrameUpsync".
It's also important that "forceConfirmationIfApplicable" is NOT EXECUTED for every renderFrame, such that when a player is forced to resync, it has some time, i.e. (1 << InputScaleFrames) renderFrames, to upsync again.
*/
return 0
}
inputFrameId2 := pR.ConvertToInputFrameId(renderFrameId1, 0) // The inputFrame to force confirmation (if necessary)
2022-10-02 16:22:05 +00:00
if inputFrameId2 < pR.LastAllConfirmedInputFrameId {
// No need to force confirmation, the inputFrames already arrived
2022-10-02 03:33:40 +00:00
Logger.Debug(fmt.Sprintf("inputFrameId2=%v is already all-confirmed for roomId=%v[type#1], no need to force confirmation of it", inputFrameId2, pR.Id))
2022-10-02 16:22:05 +00:00
return 0
}
2022-10-03 03:42:19 +00:00
tmp := pR.InputsBuffer.GetByFrameId(inputFrameId2)
2022-10-01 12:45:38 +00:00
if nil == tmp {
2022-10-03 03:42:19 +00:00
panic(fmt.Sprintf("inputFrameId2=%v doesn't exist for roomId=%v, this is abnormal because the server should prefab inputFrameDownsync in a most advanced pace, check the prefab logic! InputsBuffer=%v", inputFrameId2, pR.Id, pR.InputsBufferString(false)))
2022-10-01 12:45:38 +00:00
}
totPlayerCnt := uint32(pR.Capacity)
allConfirmedMask := uint64((1 << totPlayerCnt) - 1)
2022-10-01 12:45:38 +00:00
// Force confirmation of "inputFrame2"
2022-11-11 12:10:43 +00:00
inputFrame2 := tmp.(*InputFrameDownsync)
oldConfirmedList := inputFrame2.ConfirmedList
unconfirmedMask := (oldConfirmedList ^ allConfirmedMask)
inputFrame2.ConfirmedList = allConfirmedMask
2022-10-01 12:45:38 +00:00
pR.onInputFrameDownsyncAllConfirmed(inputFrame2, -1)
2022-10-01 12:45:38 +00:00
return unconfirmedMask
2022-09-20 15:50:01 +00:00
}
func (pR *Room) applyInputFrameDownsyncDynamics(fromRenderFrameId int32, toRenderFrameId int32, spaceOffsetX, spaceOffsetY float64) {
if fromRenderFrameId >= toRenderFrameId {
return
2022-09-20 15:50:01 +00:00
}
2022-10-01 12:45:38 +00:00
Logger.Debug(fmt.Sprintf("Applying inputFrame dynamics: roomId=%v, room.RenderFrameId=%v, fromRenderFrameId=%v, toRenderFrameId=%v", pR.Id, pR.RenderFrameId, fromRenderFrameId, toRenderFrameId))
totPlayerCnt := uint32(pR.Capacity)
allConfirmedMask := uint64((1 << totPlayerCnt) - 1)
for collisionSysRenderFrameId := fromRenderFrameId; collisionSysRenderFrameId < toRenderFrameId; collisionSysRenderFrameId++ {
currRenderFrameTmp := pR.RenderFrameBuffer.GetByFrameId(collisionSysRenderFrameId)
if nil == currRenderFrameTmp {
panic(fmt.Sprintf("collisionSysRenderFrameId=%v doesn't exist for roomId=%v, this is abnormal because it's to be used for applying dynamics to [fromRenderFrameId:%v, toRenderFrameId:%v)! RenderFrameBuffer=%v", collisionSysRenderFrameId, pR.Id, fromRenderFrameId, toRenderFrameId, pR.RenderFrameBufferString()))
}
currRenderFrame := currRenderFrameTmp.(*RoomDownsyncFrame)
delayedInputFrameId := pR.ConvertToInputFrameId(collisionSysRenderFrameId, pR.InputDelayFrames)
var delayedInputFrame *InputFrameDownsync = nil
if 0 <= delayedInputFrameId {
2022-10-02 16:22:05 +00:00
if delayedInputFrameId > pR.LastAllConfirmedInputFrameId {
2022-10-03 03:42:19 +00:00
panic(fmt.Sprintf("delayedInputFrameId=%v is not yet all-confirmed for roomId=%v, this is abnormal because it's to be used for applying dynamics to [fromRenderFrameId:%v, toRenderFrameId:%v) @ collisionSysRenderFrameId=%v! InputsBuffer=%v", delayedInputFrameId, pR.Id, fromRenderFrameId, toRenderFrameId, collisionSysRenderFrameId, pR.InputsBufferString(false)))
2022-10-02 16:22:05 +00:00
}
2022-10-03 03:42:19 +00:00
tmp := pR.InputsBuffer.GetByFrameId(delayedInputFrameId)
2022-10-01 12:45:38 +00:00
if nil == tmp {
2022-10-03 03:42:19 +00:00
panic(fmt.Sprintf("delayedInputFrameId=%v doesn't exist for roomId=%v, this is abnormal because it's to be used for applying dynamics to [fromRenderFrameId:%v, toRenderFrameId:%v) @ collisionSysRenderFrameId=%v! InputsBuffer=%v", delayedInputFrameId, pR.Id, fromRenderFrameId, toRenderFrameId, collisionSysRenderFrameId, pR.InputsBufferString(false)))
2022-10-01 12:45:38 +00:00
}
delayedInputFrame = tmp.(*InputFrameDownsync)
2022-10-02 16:22:05 +00:00
// [WARNING] It's possible that by now "allConfirmedMask != delayedInputFrame.ConfirmedList && delayedInputFrameId <= pR.LastAllConfirmedInputFrameId", we trust "pR.LastAllConfirmedInputFrameId" as the TOP AUTHORITY.
atomic.StoreUint64(&(delayedInputFrame.ConfirmedList), allConfirmedMask)
}
nextRenderFrame := pR.applyInputFrameDownsyncDynamicsOnSingleRenderFrame(delayedInputFrame, currRenderFrame, pR.CollisionSysMap)
// Update in the latest player pointers
for playerId, playerDownsync := range nextRenderFrame.Players {
pR.Players[playerId].VirtualGridX = playerDownsync.VirtualGridX
pR.Players[playerId].VirtualGridY = playerDownsync.VirtualGridY
pR.Players[playerId].Dir.Dx = playerDownsync.Dir.Dx
pR.Players[playerId].Dir.Dy = playerDownsync.Dir.Dy
}
pR.RenderFrameBuffer.Put(nextRenderFrame)
pR.CurDynamicsRenderFrameId++
}
}
2022-10-03 03:42:19 +00:00
// TODO: Write unit-test for this function to compare with its frontend counter part
func (pR *Room) applyInputFrameDownsyncDynamicsOnSingleRenderFrame(delayedInputFrame *InputFrameDownsync, currRenderFrame *RoomDownsyncFrame, collisionSysMap map[int32]*resolv.Object) *RoomDownsyncFrame {
nextRenderFramePlayers := make(map[int32]*PlayerDownsync, pR.Capacity)
// Make a copy first
for playerId, currPlayerDownsync := range currRenderFrame.Players {
nextRenderFramePlayers[playerId] = &PlayerDownsync{
Id: playerId,
VirtualGridX: currPlayerDownsync.VirtualGridX,
VirtualGridY: currPlayerDownsync.VirtualGridY,
Dir: &Direction{
Dx: currPlayerDownsync.Dir.Dx,
Dy: currPlayerDownsync.Dir.Dy,
},
Speed: currPlayerDownsync.Speed,
BattleState: currPlayerDownsync.BattleState,
Score: currPlayerDownsync.Score,
Removed: currPlayerDownsync.Removed,
JoinIndex: currPlayerDownsync.JoinIndex,
}
}
toRet := &RoomDownsyncFrame{
Id: currRenderFrame.Id + 1,
Players: nextRenderFramePlayers,
CountdownNanos: (pR.BattleDurationNanos - int64(currRenderFrame.Id)*pR.RollbackEstimatedDtNanos),
}
if nil != delayedInputFrame {
inputList := delayedInputFrame.InputList
effPushbacks := make([]Vec2D, pR.Capacity) // Guaranteed determinism regardless of traversal order
for playerId, player := range pR.Players {
joinIndex := player.JoinIndex
effPushbacks[joinIndex-1].X, effPushbacks[joinIndex-1].Y = float64(0), float64(0)
currPlayerDownsync := currRenderFrame.Players[playerId]
decodedInput := pR.decodeInput(inputList[joinIndex-1])
proposedVirtualGridDx, proposedVirtualGridDy := (decodedInput.Dx + decodedInput.Dx*currPlayerDownsync.Speed), (decodedInput.Dy + decodedInput.Dy*currPlayerDownsync.Speed)
newVx, newVy := (currPlayerDownsync.VirtualGridX + proposedVirtualGridDx), (currPlayerDownsync.VirtualGridY + proposedVirtualGridDy)
// Reset playerCollider position from the "virtual grid position"
collisionPlayerIndex := COLLISION_PLAYER_INDEX_PREFIX + joinIndex
playerCollider := collisionSysMap[collisionPlayerIndex]
playerCollider.X, playerCollider.Y = VirtualGridToPolygonColliderAnchorPos(newVx, newVy, player.ColliderRadius, player.ColliderRadius, pR.collisionSpaceOffsetX, pR.collisionSpaceOffsetY, pR.VirtualGridToWorldRatio)
// Update in the collision system
playerCollider.Update()
2022-11-21 09:27:32 +00:00
if 0 != decodedInput.Dx || 0 != decodedInput.Dy {
nextRenderFramePlayers[playerId].Dir.Dx = decodedInput.Dx
nextRenderFramePlayers[playerId].Dir.Dy = decodedInput.Dy
}
}
2022-11-09 04:19:29 +00:00
// handle pushbacks upon collision after all movements treated as simultaneous
for _, player := range pR.Players {
joinIndex := player.JoinIndex
collisionPlayerIndex := COLLISION_PLAYER_INDEX_PREFIX + joinIndex
playerCollider := collisionSysMap[collisionPlayerIndex]
if collision := playerCollider.Check(0, 0); collision != nil {
playerShape := playerCollider.Shape.(*resolv.ConvexPolygon)
for _, obj := range collision.Objects {
barrierShape := obj.Shape.(*resolv.ConvexPolygon)
if overlapped, pushbackX, pushbackY, overlapResult := CalcPushbacks(0, 0, playerShape, barrierShape); overlapped {
2022-11-12 14:53:35 +00:00
Logger.Debug(fmt.Sprintf("Overlapped: a=%v, b=%v, pushbackX=%v, pushbackY=%v", ConvexPolygonStr(playerShape), ConvexPolygonStr(barrierShape), pushbackX, pushbackY))
effPushbacks[joinIndex-1].X += pushbackX
effPushbacks[joinIndex-1].Y += pushbackY
} else {
2022-11-12 14:53:35 +00:00
Logger.Debug(fmt.Sprintf("Collided BUT not overlapped: a=%v, b=%v, overlapResult=%v", ConvexPolygonStr(playerShape), ConvexPolygonStr(barrierShape), overlapResult))
}
2022-10-10 04:17:23 +00:00
}
}
}
2022-09-20 15:50:01 +00:00
for playerId, player := range pR.Players {
joinIndex := player.JoinIndex
collisionPlayerIndex := COLLISION_PLAYER_INDEX_PREFIX + joinIndex
playerCollider := collisionSysMap[collisionPlayerIndex]
// Update "virtual grid position"
newVx, newVy := PolygonColliderAnchorToVirtualGridPos(playerCollider.X-effPushbacks[joinIndex-1].X, playerCollider.Y-effPushbacks[joinIndex-1].Y, player.ColliderRadius, player.ColliderRadius, pR.collisionSpaceOffsetX, pR.collisionSpaceOffsetY, pR.WorldToVirtualGridRatio)
nextRenderFramePlayers[playerId].VirtualGridX = newVx
nextRenderFramePlayers[playerId].VirtualGridY = newVy
2022-10-01 12:45:38 +00:00
}
Logger.Debug(fmt.Sprintf("After applyInputFrameDownsyncDynamicsOnSingleRenderFrame: currRenderFrame.Id=%v, inputList=%v, currRenderFrame.Players=%v, nextRenderFramePlayers=%v", currRenderFrame.Id, inputList, currRenderFrame.Players, nextRenderFramePlayers))
2022-09-20 15:50:01 +00:00
}
return toRet
2022-09-20 15:50:01 +00:00
}
func (pR *Room) decodeInput(encodedInput uint64) *InputFrameDecoded {
encodedDirection := (encodedInput & uint64(15))
btnALevel := int32((encodedInput >> 4) & 1)
return &InputFrameDecoded{
Dx: DIRECTION_DECODER[encodedDirection][0],
Dy: DIRECTION_DECODER[encodedDirection][1],
BtnALevel: btnALevel,
}
}
func (pR *Room) inputFrameIdDebuggable(inputFrameId int32) bool {
return 0 == (inputFrameId % 10)
2022-09-20 15:50:01 +00:00
}
2022-11-09 04:19:29 +00:00
func (pR *Room) refreshColliders(spaceW, spaceH int32) {
2022-10-01 12:45:38 +00:00
// Kindly note that by now, we've already got all the shapes in the tmx file into "pR.(Players | Barriers)" from "ParseTmxLayersAndGroups"
2022-10-10 13:58:29 +00:00
minStep := (int(float64(pR.PlayerDefaultSpeed)*pR.VirtualGridToWorldRatio) << 2) // the approx minimum distance a player can move per frame in world coordinate
space := resolv.NewSpace(int(spaceW), int(spaceH), minStep, minStep) // allocate a new collision space everytime after a battle is settled
for _, player := range pR.Players {
wx, wy := VirtualGridToWorldPos(player.VirtualGridX, player.VirtualGridY, pR.VirtualGridToWorldRatio)
2022-11-09 10:13:53 +00:00
playerCollider := GenerateRectCollider(wx, wy, player.ColliderRadius*2, player.ColliderRadius*2, pR.collisionSpaceOffsetX, pR.collisionSpaceOffsetY, "Player")
2022-10-01 12:45:38 +00:00
space.Add(playerCollider)
// Keep track of the collider in "pR.CollisionSysMap"
joinIndex := player.JoinIndex
pR.PlayersArr[joinIndex-1] = player
2022-10-01 12:45:38 +00:00
collisionPlayerIndex := COLLISION_PLAYER_INDEX_PREFIX + joinIndex
pR.CollisionSysMap[collisionPlayerIndex] = playerCollider
}
for _, barrier := range pR.Barriers {
boundaryUnaligned := barrier.Boundary
2022-11-09 04:19:29 +00:00
barrierCollider := GenerateConvexPolygonCollider(boundaryUnaligned, pR.collisionSpaceOffsetX, pR.collisionSpaceOffsetY, "Barrier")
2022-10-01 12:45:38 +00:00
space.Add(barrierCollider)
}
2022-09-20 15:50:01 +00:00
}
2022-10-10 04:17:23 +00:00
func (pR *Room) printBarrier(barrierCollider *resolv.Object) {
Logger.Info(fmt.Sprintf("Barrier in roomId=%v: w=%v, h=%v, shape=%v", pR.Id, barrierCollider.W, barrierCollider.H, barrierCollider.Shape))
}