|
|
|
|
@@ -3,23 +3,15 @@
|
|
|
|
|
#include "cocos/platform/CCApplication.h"
|
|
|
|
|
#include "cocos/base/CCScheduler.h"
|
|
|
|
|
#include "cocos/scripting/js-bindings/jswrapper/SeApi.h"
|
|
|
|
|
#include "uv/uv.h"
|
|
|
|
|
|
|
|
|
|
uv_udp_t* udpSocket = NULL;
|
|
|
|
|
uv_thread_t recvTid;
|
|
|
|
|
uv_async_t uvLoopStopSig;
|
|
|
|
|
uv_loop_t* loop = NULL; // Only this loop is used for this simple PoC
|
|
|
|
|
|
|
|
|
|
int const maxPeerCnt = 10;
|
|
|
|
|
struct PeerAddr {
|
|
|
|
|
struct sockaddr_in sockAddrIn;
|
|
|
|
|
uint32_t authKey;
|
|
|
|
|
};
|
|
|
|
|
struct PeerAddr peerAddrList[maxPeerCnt];
|
|
|
|
|
|
|
|
|
|
uv_mutex_t sendLock, recvLock;
|
|
|
|
|
|
|
|
|
|
CHARC * SRV_IP = NULL;
|
|
|
|
|
char SRV_IP[256];
|
|
|
|
|
int SRV_PORT = 0;
|
|
|
|
|
|
|
|
|
|
void _onRead(uv_udp_t* req, ssize_t nread, const uv_buf_t* buf, const struct sockaddr* addr, unsigned flags) {
|
|
|
|
|
@@ -31,10 +23,10 @@ void _onRead(uv_udp_t* req, ssize_t nread, const uv_buf_t* buf, const struct soc
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct sockaddr_in* sockAddr = (struct sockaddr_in*)addr;
|
|
|
|
|
char ip[64] = { 0 };
|
|
|
|
|
char ip[17] = { 0 };
|
|
|
|
|
uv_ip4_name(sockAddr, ip, sizeof ip);
|
|
|
|
|
int port = sockAddr->sin_port;
|
|
|
|
|
|
|
|
|
|
int port = ntohs(sockAddr->sin_port);
|
|
|
|
|
|
|
|
|
|
int const gameThreadMsgSize = 256;
|
|
|
|
|
char* const gameThreadMsg = (char* const)malloc(gameThreadMsgSize);
|
|
|
|
|
memset(gameThreadMsg, 0, gameThreadMsgSize);
|
|
|
|
|
@@ -43,7 +35,7 @@ void _onRead(uv_udp_t* req, ssize_t nread, const uv_buf_t* buf, const struct soc
|
|
|
|
|
CCLOG("UDP read %d bytes from %s:%d, converted to %d bytes for the JS callback", nread, ip, port, strlen(gameThreadMsg));
|
|
|
|
|
free(buf->base);
|
|
|
|
|
//uv_udp_recv_stop(req);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
cocos2d::Application::getInstance()->getScheduler()->performFunctionInCocosThread([=]() {
|
|
|
|
|
// [WARNING] Use of the "ScriptEngine" is only allowed in "GameThread a.k.a. CocosThread"!
|
|
|
|
|
se::Value onUdpMessageCb;
|
|
|
|
|
@@ -64,24 +56,127 @@ void _onRead(uv_udp_t* req, ssize_t nread, const uv_buf_t* buf, const struct soc
|
|
|
|
|
|
|
|
|
|
static void _allocBuffer(uv_handle_t* handle, size_t suggested_size, uv_buf_t* buf) {
|
|
|
|
|
(void)handle;
|
|
|
|
|
buf->base = (char *)malloc(suggested_size);
|
|
|
|
|
buf->base = (char*)malloc(suggested_size);
|
|
|
|
|
buf->len = suggested_size;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void diep(char* s) {
|
|
|
|
|
perror(s);
|
|
|
|
|
exit(1);
|
|
|
|
|
}
|
|
|
|
|
typedef struct client {
|
|
|
|
|
int host;
|
|
|
|
|
short port;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
void _onUvStopSig(uv_async_t* handle) {
|
|
|
|
|
uv_stop(loop);
|
|
|
|
|
CCLOG("UDP recv loop is signaled to stop in UvThread");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void _onSend(uv_udp_send_t* req, int status) {
|
|
|
|
|
free(req); // No need to free "req->base", it'll be handled in each "_afterXxx" callback
|
|
|
|
|
if (status) {
|
|
|
|
|
CCLOGERROR("uv_udp_send_cb error: %s\n", uv_strerror(status));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
class PunchServerWork {
|
|
|
|
|
public:
|
|
|
|
|
BYTEC bytes[128]; // Wasting some RAM here thus no need for explicit recursive destruction
|
|
|
|
|
size_t bytesLen;
|
|
|
|
|
|
|
|
|
|
PunchServerWork(BYTEC* const newBytes, size_t newBytesLen) {
|
|
|
|
|
memset(this->bytes, 0, sizeof(this->bytes));
|
|
|
|
|
memcpy(this->bytes, newBytes, newBytesLen);
|
|
|
|
|
|
|
|
|
|
this->bytesLen = newBytesLen;
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
void _punchServerOnUvThread(uv_work_t* wrapper) {
|
|
|
|
|
PunchServerWork* work = (PunchServerWork*)wrapper->data;
|
|
|
|
|
uv_udp_send_t* req = (uv_udp_send_t*)malloc(sizeof(uv_udp_send_t));
|
|
|
|
|
uv_buf_t sendBuffer = uv_buf_init(work->bytes, work->bytesLen);
|
|
|
|
|
struct sockaddr_in destAddr;
|
|
|
|
|
uv_ip4_addr(SRV_IP, SRV_PORT, &destAddr);
|
|
|
|
|
uv_udp_send(req, udpSocket, &sendBuffer, 1, (struct sockaddr const*)&destAddr, _onSend);
|
|
|
|
|
}
|
|
|
|
|
void _afterPunchServer(uv_work_t* wrapper, int status) {
|
|
|
|
|
PunchServerWork* work = (PunchServerWork*)wrapper->data;
|
|
|
|
|
delete work;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
class PunchPeerWork {
|
|
|
|
|
public:
|
|
|
|
|
int roomCapacity;
|
|
|
|
|
int selfJoinIndex;
|
|
|
|
|
PunchPeerWork(int newRoomCapacity, int newSelfJoinIndex) {
|
|
|
|
|
this->roomCapacity = newRoomCapacity;
|
|
|
|
|
this->selfJoinIndex = newSelfJoinIndex;
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
void _punchPeerOnUvThread(uv_work_t* wrapper) {
|
|
|
|
|
PunchPeerWork* work = (PunchPeerWork*)wrapper->data;
|
|
|
|
|
int roomCapacity = work->roomCapacity;
|
|
|
|
|
int selfJoinIndex = work->selfJoinIndex;
|
|
|
|
|
for (int i = 0; i < roomCapacity; i++) {
|
|
|
|
|
if (i + 1 == selfJoinIndex) {
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
if (0 == peerAddrList[i].sockAddrIn.sin_port) {
|
|
|
|
|
// Peer addr not initialized
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
char peerIp[17] = { 0 };
|
|
|
|
|
uv_ip4_name((struct sockaddr_in*)&(peerAddrList[i].sockAddrIn), peerIp, sizeof peerIp);
|
|
|
|
|
for (int j = 0; j < 3; j++) {
|
|
|
|
|
uv_udp_send_t* req = (uv_udp_send_t*)malloc(sizeof(uv_udp_send_t));
|
|
|
|
|
uv_buf_t sendBuffer = uv_buf_init("foobar", 6); // hardcoded for now
|
|
|
|
|
uv_udp_send(req, udpSocket, &sendBuffer, 1, (struct sockaddr const*)&peerAddrList[i], _onSend);
|
|
|
|
|
CCLOG("UDP punched peer %s:%d by 6 bytes round-%d", peerIp, ntohs(peerAddrList[i].sockAddrIn.sin_port), j);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
void _afterPunchPeer(uv_work_t* wrapper, int status) {
|
|
|
|
|
PunchPeerWork* work = (PunchPeerWork*)wrapper->data;
|
|
|
|
|
delete work;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
class BroadcastInputFrameUpsyncWork {
|
|
|
|
|
public:
|
|
|
|
|
BYTEC bytes[128]; // Wasting some RAM here thus no need for explicit recursive destruction
|
|
|
|
|
size_t bytesLen;
|
|
|
|
|
int roomCapacity;
|
|
|
|
|
int selfJoinIndex;
|
|
|
|
|
|
|
|
|
|
BroadcastInputFrameUpsyncWork(BYTEC* const newBytes, size_t newBytesLen, int newRoomCapacity, int newSelfJoinIndex) {
|
|
|
|
|
memset(this->bytes, 0, sizeof(this->bytes));
|
|
|
|
|
memcpy(this->bytes, newBytes, newBytesLen);
|
|
|
|
|
|
|
|
|
|
this->bytesLen = newBytesLen;
|
|
|
|
|
|
|
|
|
|
this->roomCapacity = newRoomCapacity;
|
|
|
|
|
this->selfJoinIndex = newSelfJoinIndex;
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
void _broadcastInputFrameUpsyncOnUvThread(uv_work_t* wrapper) {
|
|
|
|
|
BroadcastInputFrameUpsyncWork* work = (BroadcastInputFrameUpsyncWork*)wrapper->data;
|
|
|
|
|
int roomCapacity = work->roomCapacity;
|
|
|
|
|
int selfJoinIndex = work->selfJoinIndex;
|
|
|
|
|
for (int i = 0; i < roomCapacity; i++) {
|
|
|
|
|
if (i + 1 == selfJoinIndex) {
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
if (0 == peerAddrList[i].sockAddrIn.sin_port) {
|
|
|
|
|
// Peer addr not initialized
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
char peerIp[17] = { 0 };
|
|
|
|
|
uv_ip4_name((struct sockaddr_in*)&(peerAddrList[i].sockAddrIn), peerIp, sizeof peerIp);
|
|
|
|
|
// Might want to send several times for better arrival rate
|
|
|
|
|
for (int j = 0; j < 1; j++) {
|
|
|
|
|
uv_udp_send_t* req = (uv_udp_send_t*)malloc(sizeof(uv_udp_send_t));
|
|
|
|
|
uv_buf_t sendBuffer = uv_buf_init(work->bytes, work->bytesLen);
|
|
|
|
|
uv_udp_send(req, udpSocket, &sendBuffer, 1, (struct sockaddr const*)&peerAddrList[i], _onSend);
|
|
|
|
|
CCLOG("UDP broadcasted upsync to peer %s:%d by %u bytes round-%d", peerIp, ntohs(peerAddrList[i].sockAddrIn.sin_port), work->bytesLen, j);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
void _afterBroadcastInputFrameUpsync(uv_work_t* wrapper, int status) {
|
|
|
|
|
BroadcastInputFrameUpsyncWork* work = (BroadcastInputFrameUpsyncWork*)wrapper->data;
|
|
|
|
|
delete work;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void _onWalkCleanup(uv_handle_t* handle, void* data) {
|
|
|
|
|
(void)data;
|
|
|
|
|
uv_close(handle, NULL);
|
|
|
|
|
@@ -99,19 +194,17 @@ void startRecvLoop(void* arg) {
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool DelayNoMore::UdpSession::openUdpSession(int port) {
|
|
|
|
|
|
|
|
|
|
uv_mutex_init(&sendLock);
|
|
|
|
|
uv_mutex_init(&recvLock);
|
|
|
|
|
|
|
|
|
|
udpSocket = (uv_udp_t*)malloc(sizeof(uv_udp_t));
|
|
|
|
|
struct sockaddr_in recv_addr;
|
|
|
|
|
uv_ip4_addr("0.0.0.0", port, &recv_addr);
|
|
|
|
|
uv_udp_bind(udpSocket, (struct sockaddr const*)&recv_addr, UV_UDP_REUSEADDR);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
CCLOG("About to open UDP session at port=%d...", port);
|
|
|
|
|
loop = uv_loop_new();
|
|
|
|
|
uv_udp_init(loop, udpSocket);
|
|
|
|
|
uv_async_init(loop, &uvLoopStopSig, _onUvStopSig);
|
|
|
|
|
|
|
|
|
|
uv_udp_recv_start(udpSocket, _allocBuffer, _onRead);
|
|
|
|
|
|
|
|
|
|
uv_thread_create(&recvTid, startRecvLoop, loop);
|
|
|
|
|
@@ -121,9 +214,9 @@ bool DelayNoMore::UdpSession::openUdpSession(int port) {
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool DelayNoMore::UdpSession::closeUdpSession() {
|
|
|
|
|
bool DelayNoMore::UdpSession::closeUdpSession() {
|
|
|
|
|
CCLOG("About to close udp session and dealloc all resources...");
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
for (int i = 0; i < maxPeerCnt; i++) {
|
|
|
|
|
peerAddrList[i].authKey = -1; // hardcoded for now
|
|
|
|
|
memset((char*)&peerAddrList[i].sockAddrIn, 0, sizeof(peerAddrList[i].sockAddrIn));
|
|
|
|
|
@@ -136,73 +229,51 @@ bool DelayNoMore::UdpSession::closeUdpSession() {
|
|
|
|
|
free(udpSocket);
|
|
|
|
|
free(loop);
|
|
|
|
|
|
|
|
|
|
uv_mutex_destroy(&sendLock);
|
|
|
|
|
uv_mutex_destroy(&recvLock);
|
|
|
|
|
|
|
|
|
|
CCLOG("Closed udp session and dealloc all resources in GameThread...");
|
|
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void _onSend(uv_udp_send_t* req, int status) {
|
|
|
|
|
free(req);
|
|
|
|
|
if (status) {
|
|
|
|
|
fprintf(stderr, "uv_udp_send_cb error: %s\n", uv_strerror(status));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool DelayNoMore::UdpSession::upsertPeerUdpAddr(int joinIndex, CHARC* const ip, int port, uint32_t authKey, int roomCapacity, int selfJoinIndex) {
|
|
|
|
|
CCLOG("upsertPeerUdpAddr called by js for joinIndex=%d, ip=%s, port=%d, authKey=%lu; roomCapacity=%d, selfJoinIndex=%d.", joinIndex, ip, port, authKey, roomCapacity, selfJoinIndex);
|
|
|
|
|
|
|
|
|
|
// Punching between existing peer-pairs for Address/Port-restricted Cone NAT (not need for Full Cone NAT)
|
|
|
|
|
uv_mutex_lock(&sendLock);
|
|
|
|
|
for (int i = 0; i < roomCapacity; i++) {
|
|
|
|
|
if (i == selfJoinIndex - 1) continue;
|
|
|
|
|
uv_ip4_addr(ip, port, &(peerAddrList[i].sockAddrIn));
|
|
|
|
|
peerAddrList[i].authKey = authKey;
|
|
|
|
|
for (int j = 0; j < 10; j++) {
|
|
|
|
|
uv_udp_send_t* req = (uv_udp_send_t*)malloc(sizeof(uv_udp_send_t));
|
|
|
|
|
uv_buf_t sendBuffer = uv_buf_init("foobar", 6); // hardcoded for now
|
|
|
|
|
uv_udp_send(req, udpSocket, &sendBuffer, 1, (struct sockaddr const*)&peerAddrList[i], _onSend);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
uv_mutex_unlock(&sendLock);
|
|
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool DelayNoMore::UdpSession::punchToServer(CHARC* const srvIp, int const srvPort, BYTEC* const bytes, size_t bytesLen) {
|
|
|
|
|
/*
|
|
|
|
|
[WARNING] The RAM space used for "bytes", either on stack or in heap, is preallocatedand managed by the caller.
|
|
|
|
|
|
|
|
|
|
Moreover, there's no need to lock on "bytes". Only "udpSocket" is possibly accessed by multiple threads.
|
|
|
|
|
*/
|
|
|
|
|
SRV_IP = srvIp;
|
|
|
|
|
SRV_PORT = srvPort;
|
|
|
|
|
[WARNING] The RAM space used for "bytes", either on stack or in heap, is preallocatedand managed by the caller which runs on the GameThread. Actual sending will be made on UvThread.
|
|
|
|
|
|
|
|
|
|
Therefore we make a copy of this message before dispatching it "GameThread -> UvThread".
|
|
|
|
|
*/
|
|
|
|
|
memset(SRV_IP, 0, sizeof SRV_IP);
|
|
|
|
|
memcpy(SRV_IP, srvIp, strlen(srvIp));
|
|
|
|
|
SRV_PORT = srvPort;
|
|
|
|
|
PunchServerWork* work = new PunchServerWork(bytes, bytesLen);
|
|
|
|
|
uv_work_t* wrapper = (uv_work_t*)malloc(sizeof(uv_work_t));
|
|
|
|
|
wrapper->data = work;
|
|
|
|
|
uv_queue_work(loop, wrapper, _punchServerOnUvThread, _afterPunchServer);
|
|
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool DelayNoMore::UdpSession::upsertPeerUdpAddr(struct PeerAddr* newPeerAddrList, int roomCapacity, int selfJoinIndex) {
|
|
|
|
|
CCLOG("upsertPeerUdpAddr called by js for roomCapacity=%d, selfJoinIndex=%d.", roomCapacity, selfJoinIndex);
|
|
|
|
|
|
|
|
|
|
// Punching between existing peer-pairs for Address/Port-restricted Cone NAT (not need for Full Cone NAT); UvThread never writes into "peerAddrList", so I assume that it's safe to skip locking for them
|
|
|
|
|
for (int i = 0; i < roomCapacity; i++) {
|
|
|
|
|
if (i == selfJoinIndex - 1) continue;
|
|
|
|
|
peerAddrList[i].sockAddrIn = (*(newPeerAddrList + i)).sockAddrIn;
|
|
|
|
|
peerAddrList[i].authKey = (*(newPeerAddrList + i)).authKey;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
PunchPeerWork* work = new PunchPeerWork(roomCapacity, selfJoinIndex);
|
|
|
|
|
uv_work_t* wrapper = (uv_work_t*)malloc(sizeof(uv_work_t));
|
|
|
|
|
wrapper->data = work;
|
|
|
|
|
uv_queue_work(loop, wrapper, _punchPeerOnUvThread, _afterPunchPeer);
|
|
|
|
|
|
|
|
|
|
uv_udp_send_t* req = (uv_udp_send_t*)malloc(sizeof(uv_udp_send_t));
|
|
|
|
|
uv_buf_t sendBuffer = uv_buf_init(bytes, bytesLen);
|
|
|
|
|
struct sockaddr_in destAddr;
|
|
|
|
|
|
|
|
|
|
uv_ip4_addr(SRV_IP, SRV_PORT, &destAddr);
|
|
|
|
|
uv_mutex_lock(&sendLock);
|
|
|
|
|
uv_udp_send(req, udpSocket, &sendBuffer, 1, (struct sockaddr const*)&destAddr, _onSend);
|
|
|
|
|
uv_mutex_unlock(&sendLock);
|
|
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool DelayNoMore::UdpSession::broadcastInputFrameUpsync(BYTEC* const bytes, size_t bytesLen, int roomCapacity, int selfJoinIndex) {
|
|
|
|
|
uv_mutex_lock(&sendLock);
|
|
|
|
|
for (int i = 0; i < roomCapacity; i++) {
|
|
|
|
|
if (i == selfJoinIndex - 1) continue;
|
|
|
|
|
for (int j = 0; j < 10; j++) {
|
|
|
|
|
uv_udp_send_t* req = (uv_udp_send_t*)malloc(sizeof(uv_udp_send_t));
|
|
|
|
|
uv_buf_t sendBuffer = uv_buf_init(bytes, bytesLen);
|
|
|
|
|
uv_udp_send(req, udpSocket, &sendBuffer, 1, (struct sockaddr const*)&peerAddrList[i], _onSend);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
uv_mutex_unlock(&sendLock);
|
|
|
|
|
BroadcastInputFrameUpsyncWork* work = new BroadcastInputFrameUpsyncWork(bytes, bytesLen, roomCapacity, selfJoinIndex);
|
|
|
|
|
uv_work_t* wrapper = (uv_work_t*)malloc(sizeof(uv_work_t));
|
|
|
|
|
wrapper->data = work;
|
|
|
|
|
uv_queue_work(loop, wrapper, _broadcastInputFrameUpsyncOnUvThread, _afterBroadcastInputFrameUpsync);
|
|
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
}
|