Fixes for rollback on UDP peer upsync.

This commit is contained in:
genxium 2023-02-03 22:06:03 +08:00
parent 70ae4a4c92
commit f367609276
2 changed files with 19 additions and 13 deletions

View File

@ -394,6 +394,8 @@ cc.Class({
self.networkDoctor = new NetworkDoctor(20); self.networkDoctor = new NetworkDoctor(20);
self.skipRenderFrameFlag = false; self.skipRenderFrameFlag = false;
self.allowRollbackOnPeerUpsync = true;
self.countdownNanos = null; self.countdownNanos = null;
if (self.countdownLabel) { if (self.countdownLabel) {
self.countdownLabel.string = ""; self.countdownLabel.string = "";
@ -950,18 +952,8 @@ fromUDP=${fromUDP}`);
const inputFrame = batch[k]; // could be either "pb.InputFrameDownsync" or "pb.InputFrameUpsync", depending on "fromUDP" const inputFrame = batch[k]; // could be either "pb.InputFrameDownsync" or "pb.InputFrameUpsync", depending on "fromUDP"
const inputFrameId = inputFrame.inputFrameId; const inputFrameId = inputFrame.inputFrameId;
const peerEncodedInput = (true == fromUDP ? inputFrame.encoded : inputFrame.inputList[peerJoinIndex - 1]); const peerEncodedInput = (true == fromUDP ? inputFrame.encoded : inputFrame.inputList[peerJoinIndex - 1]);
if (inputFrameId <= renderedInputFrameIdUpper) { if (false == self.allowRollbackOnPeerUpsync && inputFrameId <= renderedInputFrameIdUpper) {
// [WARNING] Avoid obfuscating already rendered history, even at "inputFrameId == renderedInputFrameIdUpper", due to the use of "INPUT_SCALE_FRAMES" some previous render frames might already be rendered with "inputFrameId"! // [WARNING] Avoid obfuscating already rendered history, even at "inputFrameId == renderedInputFrameIdUpper", due to the use of "INPUT_SCALE_FRAMES" some previous render frames might already be rendered with "inputFrameId"!
// TODO: Shall we update the "chaserRenderFrameId" if the rendered history was wrong? It doesn't seem to impact eventual correctness if we allow the update of "chaserRenderFrameId" upon "inputFrameId <= renderedInputFrameIdUpper" here, however UDP upsync doesn't reserve order from a same sender and there might be multiple other senders, hence it might result in unnecessarily frequent chasing.
const localInputFrame = self.recentInputCache.GetByFrameId(inputFrameId);
if (null != localInputFrame
&&
null == firstPredictedYetIncorrectInputFrameId
&&
localInputFrame.InputList[peerJoinIndex - 1] != peerEncodedInput
) {
firstPredictedYetIncorrectInputFrameId = inputFrameId;
}
continue; continue;
} }
if (inputFrameId <= self.lastAllConfirmedInputFrameId) { if (inputFrameId <= self.lastAllConfirmedInputFrameId) {
@ -986,12 +978,26 @@ fromUDP=${fromUDP}`);
const newInputFrameDownsyncLocal = gopkgs.NewInputFrameDownsync(inputFrameId, newInputList, newConfirmedList); const newInputFrameDownsyncLocal = gopkgs.NewInputFrameDownsync(inputFrameId, newInputList, newConfirmedList);
//console.log(`Updated encoded input of peerJoinIndex=${peerJoinIndex} to ${peerEncodedInput} for inputFrameId=${inputFrameId}/renderedInputFrameIdUpper=${renderedInputFrameIdUpper} from ${JSON.stringify(inputFrame)}; newInputFrameDownsyncLocal=${self.gopkgsInputFrameDownsyncStr(newInputFrameDownsyncLocal)}; existingInputFrame=${self.gopkgsInputFrameDownsyncStr(existingInputFrame)}`); //console.log(`Updated encoded input of peerJoinIndex=${peerJoinIndex} to ${peerEncodedInput} for inputFrameId=${inputFrameId}/renderedInputFrameIdUpper=${renderedInputFrameIdUpper} from ${JSON.stringify(inputFrame)}; newInputFrameDownsyncLocal=${self.gopkgsInputFrameDownsyncStr(newInputFrameDownsyncLocal)}; existingInputFrame=${self.gopkgsInputFrameDownsyncStr(existingInputFrame)}`);
self.recentInputCache.SetByFrameId(newInputFrameDownsyncLocal, inputFrameId); self.recentInputCache.SetByFrameId(newInputFrameDownsyncLocal, inputFrameId);
if (self.allowRollbackOnPeerUpsync) {
// Reaching here implies that "true == self.allowRollbackOnPeerUpsync".
// Shall we update the "chaserRenderFrameId" if the rendered history was wrong? It doesn't seem to impact eventual correctness if we allow the update of "chaserRenderFrameId" upon "inputFrameId <= renderedInputFrameIdUpper" here, however UDP upsync doesn't reserve order from a same sender and there might be multiple other senders, hence it might result in unnecessarily frequent chasing.
if (
null == firstPredictedYetIncorrectInputFrameId
&&
existingInputFrame.InputList[peerJoinIndex - 1] != peerEncodedInput
) {
firstPredictedYetIncorrectInputFrameId = inputFrameId;
}
}
} }
if (0 < effCnt) { if (0 < effCnt) {
//self._markConfirmationIfApplicable(); //self._markConfirmationIfApplicable();
self.networkDoctor.logPeerInputFrameUpsync(batch[0].inputFrameId, batch[batch.length - 1].inputFrameId); self.networkDoctor.logPeerInputFrameUpsync(batch[0].inputFrameId, batch[batch.length - 1].inputFrameId);
} }
if (true == self.allowRollbackOnPeerUpsync) {
self._handleIncorrectlyRenderedPrediction(firstPredictedYetIncorrectInputFrameId, batch, fromUDP); self._handleIncorrectlyRenderedPrediction(firstPredictedYetIncorrectInputFrameId, batch, fromUDP);
}
}, },
onPlayerAdded(rdf /* pb.RoomDownsyncFrame */ ) { onPlayerAdded(rdf /* pb.RoomDownsyncFrame */ ) {

View File

@ -6,7 +6,7 @@
int const punchServerCnt = 3; int const punchServerCnt = 3;
int const punchPeerCnt = 3; int const punchPeerCnt = 3;
int const broadcastUpsyncCnt = 1; int const broadcastUpsyncCnt = 2;
uv_udp_t *udpRecvSocket = NULL, *udpSendSocket = NULL; uv_udp_t *udpRecvSocket = NULL, *udpSendSocket = NULL;
uv_thread_t recvTid, sendTid; uv_thread_t recvTid, sendTid;