689 lines
24 KiB
C++
689 lines
24 KiB
C++
/*
|
|
* File: SplitApplication.h
|
|
* Author: sgaebler
|
|
*
|
|
* Created on April 12, 2011, 04:37 PM
|
|
*/
|
|
|
|
#include "SplitApplication.h"
|
|
|
|
#include "Dispatcher.h"
|
|
#include "Moversight.h"
|
|
#include "app/Application.h"
|
|
#include "common/transport/TransportAddress.h"
|
|
#include "merge/events/MergeRequestEvent.h"
|
|
#include "ms/Invitation.h"
|
|
#include "ms/PeerResources.h"
|
|
#include "ms/events/PeerJoinedEvent.h"
|
|
#include "ms/events/JoinConfirmEvent.h"
|
|
#include "ms/events/JoinRequestEvent.h"
|
|
#include "ms/events/JoinRejectedEvent.h"
|
|
#include "ms/events/JoinAbortedEvent.h"
|
|
#include "ms/events/PeerLeftEvent.h"
|
|
#include "mt/msg/GroupData.h"
|
|
#include "mt/events/PendingPeersEvent.h"
|
|
#include "fd/events/PeerReconnectedEvent.h"
|
|
#include "simutils/statistics/DurationStatisticRecorder.h"
|
|
#include "simutils/OmnetppIniUtils.h"
|
|
#include "split/events/SplitDoneEvent.h"
|
|
|
|
|
|
|
|
#undef DEBUG
|
|
#define DEBUG(msg) if (module.isPrintDebugAPP()) \
|
|
MOV_DEBUG << "APP@"<<(dis->getLocalState()==DISJOINED?"TA_":"");\
|
|
if(dis->getLocalState()==DISJOINED){MOV_DEBUG << module.getLocalAddress();}\
|
|
else{MOV_DEBUG<< dis->getMembershipService().getLocalID();}MOV_DEBUG<<" "<<msg<<std::endl;
|
|
|
|
//defines a test options parameter
|
|
//Bitlesrichtung-->
|
|
//bit 0: semantic flag
|
|
//bit 1: flush flag
|
|
//bit 2: reply flag
|
|
//bit 3-7: leer
|
|
#define TEST_OPTIONS 20 //0b00100000
|
|
|
|
namespace ubeeme {
|
|
namespace moversight {
|
|
|
|
/**
|
|
* @brief constructor
|
|
* @param m The moversight module
|
|
*/
|
|
SplitApplication::SplitApplication(Moversight & m) : Application(m, "SplitApplication"), joinDelayStat(m, "joinDelay") {
|
|
WATCH(state);
|
|
splitOnce = false;
|
|
|
|
splitDuration.setName("splitDuration");
|
|
splitDuration.setUnit("s");
|
|
|
|
numberOfPeers = module.par("numberOfPeers");
|
|
numberOfClusters = numberOfPeers / module.getMaxPeerCount();
|
|
|
|
dt = 0.0;
|
|
}
|
|
|
|
/**
|
|
* @brief destructor
|
|
*/
|
|
SplitApplication::~SplitApplication() {
|
|
}
|
|
|
|
void
|
|
SplitApplication::initialise() {
|
|
dis->subscribe<PeerJoinedEvent>(this);
|
|
dis->subscribe<JoinConfirmEvent>(this);
|
|
dis->subscribe<JoinRequestEvent>(this);
|
|
dis->subscribe<JoinRejectedEvent>(this);
|
|
dis->subscribe<JoinAbortedEvent>(this);
|
|
dis->subscribe<PeerLeftEvent>(this);
|
|
dis->subscribe<PeerReconnectedEvent>(this);
|
|
dis->subscribe<SplitDoneEvent>(this);
|
|
dis->subscribe<MergeRequestEvent>(this);
|
|
}
|
|
|
|
void
|
|
SplitApplication::finalise() {
|
|
}
|
|
|
|
/**
|
|
* @brief Invites a peer
|
|
* @param ta The address to invite
|
|
*/
|
|
void
|
|
SplitApplication::invitePeer(TransportAddress & ta) {
|
|
|
|
std::stringstream buf;
|
|
buf << "invitePeer - invite peer at address " << ta;
|
|
DEBUG(buf.str().c_str());
|
|
PeerDescription pDesc;
|
|
dis->invitePeer(ta, pDesc);
|
|
joinDelayStat.startRecord();
|
|
|
|
}
|
|
|
|
/**
|
|
* @brief Leaves the group
|
|
*/
|
|
void
|
|
SplitApplication::leaveGroup() {
|
|
|
|
DEBUG("leaveGroup - peer leave the group");
|
|
dis->leaveGroup();
|
|
}
|
|
|
|
/**
|
|
* @brief Sends dummy test data to the group
|
|
*/
|
|
void
|
|
SplitApplication::sendData() {
|
|
|
|
DEBUG("sendData - send 24 byte dummy data to the group");
|
|
GroupData data;
|
|
dis->sendMessage(data);
|
|
|
|
}//End
|
|
|
|
void
|
|
SplitApplication::splitGroup(unsigned char options, PeerIDList splitPeers) {
|
|
|
|
DEBUG("splitPeers - the group is splitting");
|
|
dis->splitGroup(options, splitPeers);
|
|
|
|
VectorStatistic vs("splitGroup");
|
|
vs.record(GenericTime::currentTime());
|
|
|
|
}//End
|
|
|
|
/**
|
|
* @brief Tries to start the given test case.
|
|
* @param i The test case to start
|
|
*/
|
|
void
|
|
SplitApplication::startTestCase(unsigned int i) {
|
|
|
|
switch (i) {
|
|
case 0:
|
|
testCase00();
|
|
break;
|
|
case 1:
|
|
testCase01();
|
|
break;
|
|
case 2:
|
|
testCase02();
|
|
break;
|
|
case 3:
|
|
testCase03();
|
|
break;
|
|
case 4:
|
|
testCase04();
|
|
break;
|
|
case 5:
|
|
testCase05(); // bis hier splitcases abgedeckt
|
|
break;
|
|
case 6:
|
|
testCase06();
|
|
break;
|
|
case 7:
|
|
testCase07();
|
|
break;
|
|
case 8:
|
|
testCase08();
|
|
break;
|
|
default:
|
|
break;
|
|
}//End switch
|
|
}
|
|
|
|
/**
|
|
* @brief Test case 0: complete Cluster
|
|
*/
|
|
void
|
|
SplitApplication::testCase00() {
|
|
|
|
// init app
|
|
if (initApp) {
|
|
createRoster(numberOfPeers + 1, numberOfClusters);
|
|
}
|
|
//------------------------------------------------------------------
|
|
//run the app
|
|
//------------------------------------------------------------------
|
|
|
|
if (module.getLocalAddress().getHostAddress().get4().getDByte(3) == 1) {
|
|
if (dis->getMembershipService().getLocalState() == JOINED) {
|
|
if (!splitOnce) {
|
|
splitOnce = true;
|
|
|
|
PeerList peerList = dis->getMembershipService().getPeerList();
|
|
PeerIDList pIdList;
|
|
for (size_t i = 0; i < peerList.size(); i++) {
|
|
if (peerList.get(i).getClusterID() == 0) {
|
|
pIdList.add(peerList.get(i).getPeerID());
|
|
}//End if
|
|
}//end for
|
|
|
|
splitGroup(TEST_OPTIONS, pIdList); //SPLITreq
|
|
module.scheduleTestCase(45);
|
|
}//End if
|
|
}//End if
|
|
}//End if
|
|
}
|
|
|
|
/**
|
|
* @brief Test case1: whole masters
|
|
*/
|
|
void
|
|
SplitApplication::testCase01() {
|
|
|
|
// init app
|
|
if (initApp) {
|
|
createRoster(numberOfPeers + 1, numberOfClusters);
|
|
}
|
|
//------------------------------------------------------------------
|
|
//run the app
|
|
//----------------------------------
|
|
|
|
if (module.getLocalAddress().getHostAddress().get4().getDByte(3) == 1) {
|
|
if (dis->getMembershipService().getLocalState() == JOINED) {
|
|
if (!splitOnce) {
|
|
splitOnce = true;
|
|
PeerIDList pidl;
|
|
pidl.add(dis->getMembershipService().getMasterPeerIDList());
|
|
|
|
splitGroup(TEST_OPTIONS, pidl); //SPLITreq
|
|
module.scheduleTestCase(45);
|
|
}//End if
|
|
}//End if
|
|
}//End if
|
|
}
|
|
|
|
/**
|
|
* @brief Test case2: <50% of slaves
|
|
*/
|
|
void
|
|
SplitApplication::testCase02() {
|
|
|
|
// init app
|
|
if (initApp) {
|
|
createRoster(numberOfPeers + 1, numberOfClusters);
|
|
}
|
|
//------------------------------------------------------------------
|
|
//run the app
|
|
//----------------------------------
|
|
|
|
if (module.getLocalAddress().getHostAddress().get4().getDByte(3) == 1) {
|
|
if (dis->getMembershipService().getLocalState() == JOINED) {
|
|
if (!splitOnce) {
|
|
splitOnce = true;
|
|
PeerIDList pidl;
|
|
int numberOfSlavesToAdd = (numberOfPeers - numberOfClusters) / 2 - 1;
|
|
PeerIDList peersToAdd = getSpecifiedNumberOfPeers(false, numberOfSlavesToAdd, numberOfClusters);
|
|
pidl.add(peersToAdd);
|
|
|
|
splitGroup(TEST_OPTIONS, pidl); //SPLITreq
|
|
module.scheduleTestCase(45);
|
|
}//End if
|
|
}//End if
|
|
}//End if
|
|
}
|
|
|
|
/**
|
|
* @brief Test case3: >= 50% of slaves
|
|
*/
|
|
void
|
|
SplitApplication::testCase03() {
|
|
|
|
// init app
|
|
if (initApp) {
|
|
createRoster(numberOfPeers + 1, numberOfClusters);
|
|
}
|
|
//------------------------------------------------------------------
|
|
//run the app
|
|
//----------------------------------
|
|
|
|
if (module.getLocalAddress().getHostAddress().get4().getDByte(3) == 1) {
|
|
if (dis->getMembershipService().getLocalState() == JOINED) {
|
|
if (!splitOnce) {
|
|
splitOnce = true;
|
|
PeerIDList pidl;
|
|
int numberOfSlavesToAdd = (numberOfPeers - numberOfClusters) / 2;
|
|
PeerIDList peersToAdd = getSpecifiedNumberOfPeers(false, numberOfSlavesToAdd, numberOfClusters);
|
|
pidl.add(peersToAdd);
|
|
|
|
splitGroup(TEST_OPTIONS, pidl); //SPLITreq
|
|
module.scheduleTestCase(45);
|
|
}//End if
|
|
}//End if
|
|
}//End if
|
|
}
|
|
|
|
/**
|
|
* @brief Test case4: <50% mixed
|
|
*/
|
|
void
|
|
SplitApplication::testCase04() {
|
|
|
|
// init app
|
|
if (initApp) {
|
|
createRoster(numberOfPeers + 1, numberOfClusters);
|
|
}
|
|
//------------------------------------------------------------------
|
|
//run the app
|
|
//----------------------------------
|
|
|
|
if (module.getLocalAddress().getHostAddress().get4().getDByte(3) == 1) {
|
|
if (dis->getMembershipService().getLocalState() == JOINED) {
|
|
if (!splitOnce) {
|
|
splitOnce = true;
|
|
PeerIDList pidl;
|
|
int numberOfSlavesToAdd = numberOfPeers / 2 - 1;
|
|
PeerIDList peersToAdd = getSpecifiedNumberOfPeers(true, numberOfSlavesToAdd, numberOfClusters);
|
|
pidl.add(peersToAdd);
|
|
|
|
splitGroup(TEST_OPTIONS, pidl); //SPLITreq
|
|
module.scheduleTestCase(45);
|
|
}//End if
|
|
}//End if
|
|
}//End if
|
|
}
|
|
|
|
/**
|
|
* @brief Test case5: >= 50% mixed
|
|
*/
|
|
void
|
|
SplitApplication::testCase05() {
|
|
|
|
// init app
|
|
if (initApp) {
|
|
createRoster(numberOfPeers + 1, numberOfClusters);
|
|
}
|
|
//------------------------------------------------------------------
|
|
//run the app
|
|
//----------------------------------
|
|
|
|
if (module.getLocalAddress().getHostAddress().get4().getDByte(3) == 1) {
|
|
if (dis->getMembershipService().getLocalState() == JOINED) {
|
|
if (!splitOnce) {
|
|
splitOnce = true;
|
|
PeerIDList pidl;
|
|
int numberOfSlavesToAdd = numberOfPeers / 2;
|
|
PeerIDList peersToAdd = getSpecifiedNumberOfPeers(true, numberOfSlavesToAdd, numberOfClusters);
|
|
pidl.add(peersToAdd);
|
|
|
|
splitGroup(TEST_OPTIONS, pidl); //SPLITreq
|
|
module.scheduleTestCase(45);
|
|
}//End if
|
|
}//End if
|
|
}//End if
|
|
}
|
|
|
|
/**
|
|
* @brief Test case 6: 6 peers cs 6 (Join - split)
|
|
*/
|
|
void
|
|
SplitApplication::testCase06() {
|
|
int moduleIndex = module.getLocalAddress().getHostAddress().get4().getDByte(3);
|
|
|
|
if (initApp) {
|
|
candidates = OmnetppIniUtils::getDestinationAddressesFromOmnetppIni(module);
|
|
waitCounter = 5;
|
|
initApp = false;
|
|
if (moduleIndex == 1) {
|
|
module.scheduleTestCase(45);
|
|
}//End if
|
|
}//End if
|
|
if (moduleIndex == 1) {
|
|
//we have more candidates?
|
|
if (candidates.size() > 0) {
|
|
if (waitCounter > 0) {
|
|
waitCounter--;
|
|
module.scheduleTestCase(45);
|
|
}
|
|
if (candidatesIndex < candidates.size()) {
|
|
invitePeer(candidates.get(candidatesIndex++));
|
|
module.scheduleTestCase(45);
|
|
}
|
|
else {
|
|
if (dis->getMembershipService().getLocalState() == JOINED) {
|
|
if (!splitOnce) {
|
|
splitOnce = true;
|
|
PeerIDList pidl;
|
|
int numberOfSlavesToAdd = 3;
|
|
PeerIDList peersToAdd = getSpecifiedNumberOfPeers(true, numberOfSlavesToAdd, 1);
|
|
pidl.add(peersToAdd);
|
|
splitGroup(TEST_OPTIONS, pidl); //SPLITreq
|
|
module.scheduleTestCase(45);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
/**
|
|
* @brief Test case 7: 24 peers cs 6 (Join - split)
|
|
*/
|
|
void
|
|
SplitApplication::testCase07() {
|
|
int moduleIndex = module.getLocalAddress().getHostAddress().get4().getDByte(3);
|
|
|
|
if (initApp) {
|
|
candidates = OmnetppIniUtils::getDestinationAddressesFromOmnetppIni(module);
|
|
waitCounter = 5;
|
|
initApp = false;
|
|
if (moduleIndex == 1) {
|
|
module.scheduleTestCase(45);
|
|
}//End if
|
|
}//End if
|
|
if (moduleIndex == 1) {
|
|
//we have more candidates?
|
|
if (candidates.size() > 0) {
|
|
if (waitCounter > 0) {
|
|
waitCounter--;
|
|
module.scheduleTestCase(45);
|
|
}
|
|
if (candidatesIndex < candidates.size()) {
|
|
invitePeer(candidates.get(candidatesIndex++));
|
|
module.scheduleTestCase(45);
|
|
}
|
|
else {
|
|
if (dis->getMembershipService().getLocalState() == JOINED) {
|
|
if (!splitOnce) {
|
|
splitOnce = true;
|
|
PeerIDList pidl;
|
|
int numberOfSlavesToAdd = 12;
|
|
PeerIDList peersToAdd = getSpecifiedNumberOfPeers(true, numberOfSlavesToAdd, 4);
|
|
pidl.add(peersToAdd);
|
|
splitGroup(TEST_OPTIONS, pidl); //SPLITreq
|
|
module.scheduleTestCase(45);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
/**
|
|
* @brief Test case 8: 96 peers cs 6 (Join - split)
|
|
*/
|
|
void
|
|
SplitApplication::testCase08() {
|
|
int moduleIndex = module.getLocalAddress().getHostAddress().get4().getDByte(3);
|
|
|
|
if (initApp) {
|
|
candidates = OmnetppIniUtils::getDestinationAddressesFromOmnetppIni(module);
|
|
waitCounter = 5;
|
|
initApp = false;
|
|
if (moduleIndex == 1) {
|
|
module.scheduleTestCase(45);
|
|
}//End if
|
|
}//End if
|
|
if (moduleIndex == 1) {
|
|
//we have more candidates?
|
|
if (candidates.size() > 0) {
|
|
if (waitCounter > 0) {
|
|
waitCounter--;
|
|
module.scheduleTestCase(45);
|
|
}
|
|
if (candidatesIndex < candidates.size()) {
|
|
invitePeer(candidates.get(candidatesIndex++));
|
|
module.scheduleTestCase(45);
|
|
}
|
|
else {
|
|
if (dis->getMembershipService().getLocalState() == JOINED) {
|
|
if (!splitOnce) {
|
|
splitOnce = true;
|
|
PeerIDList pidl;
|
|
int numberOfSlavesToAdd = 48;
|
|
PeerIDList peersToAdd = getSpecifiedNumberOfPeers(true, numberOfSlavesToAdd, 8);
|
|
pidl.add(peersToAdd);
|
|
splitGroup(TEST_OPTIONS, pidl); //SPLITreq
|
|
module.scheduleTestCase(45);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
/**
|
|
* @brief Adding the specified number of peers to the peerIDList
|
|
* @param mixed - if true: master and slaves, if false: only slaves
|
|
* @param numberOfPeersToAdd - how many peers to add to the list
|
|
* @param numberOfClusters - number of clusters in the network
|
|
* @return PeerIDList with the peers to split
|
|
*/
|
|
PeerIDList
|
|
SplitApplication::getSpecifiedNumberOfPeers(bool mixed, int numberOfPeersToAdd, int numberOfClusters) {
|
|
PeerIDList peersToAdd;
|
|
for (int i = 0; i < numberOfClusters; i++) {
|
|
PeerIDList possiblePeers;
|
|
if (mixed) {
|
|
possiblePeers = dis->getMembershipService().getPeerIDList();
|
|
}
|
|
else {
|
|
possiblePeers = dis->getMembershipService().getClusterPeerIDListSlavesOnly(i);
|
|
}
|
|
for (unsigned int j = 0; j < possiblePeers.size(); j++) {
|
|
peersToAdd.add(possiblePeers.get(j));
|
|
if (peersToAdd.size() == (unsigned) numberOfPeersToAdd) {
|
|
break;
|
|
}
|
|
}
|
|
if (peersToAdd.size() == (unsigned) numberOfPeersToAdd) {
|
|
break;
|
|
}
|
|
}
|
|
return peersToAdd;
|
|
}
|
|
|
|
/**
|
|
* @brief Creates a new Roster for the given peers and clusters
|
|
* @param nextPeerId - the next peer id that can be used
|
|
* @param numberOfClusters - the number of clusters to create
|
|
*/
|
|
void
|
|
SplitApplication::createRoster(int nextPeerId, int numberOfClusters) {
|
|
|
|
Roster roster;
|
|
roster.setNextPeerID(nextPeerId);
|
|
roster.setViewID(nextPeerId);
|
|
|
|
PeerDescription dummyPDesc;
|
|
PeerResources dummyPRes;
|
|
|
|
ClusterID cID = 0;
|
|
for (unsigned int i = 1; i <= numberOfPeers; i++) {
|
|
|
|
TransportAddress ta = getNewTransportAddress(i);
|
|
ta.setPort(module.getLocalAddress().getPort());
|
|
MemberDescription mDesc(i, ta, JOINED, cID, dummyPDesc, dummyPRes);
|
|
roster.addMemberDescription(mDesc);
|
|
|
|
if ((i % module.getMaxPeerCount()) == 0) {
|
|
cID++;
|
|
}//End if
|
|
}//end for
|
|
|
|
//set up group for each peer
|
|
dis->setupGroupFromRoster(roster, module.getLocalAddress().getHostAddress().get4().getDByte(3));
|
|
dt = simTime();
|
|
}
|
|
|
|
/**
|
|
* @brief Create a new TransportAddress with the given index
|
|
* @param i - index to create the TransportAddress with
|
|
* @return the created transportaddress
|
|
*/
|
|
TransportAddress
|
|
SplitApplication::getNewTransportAddress(int i) {
|
|
std::stringstream taStream;
|
|
taStream << "192.168.0." << i;
|
|
std::string s(taStream.str());
|
|
return TransportAddress(IPvXAddress(s.c_str()));
|
|
}
|
|
|
|
void
|
|
SplitApplication::receiveGroupData(const GroupData & dat, const PeerID sender) {
|
|
std::stringstream buf;
|
|
buf << "receiveGroupData - receive group data from peer ID " << sender;
|
|
DEBUG(buf.str().c_str());
|
|
}
|
|
|
|
/**
|
|
* @brief Handle an incoming JoinRequestEvent.
|
|
* @param The event.
|
|
*
|
|
* Received an invitation from from a group
|
|
*/
|
|
void
|
|
SplitApplication::handleEvent(const JoinRequestEvent & e) {
|
|
|
|
DEBUG("invitationReceived - invitation received");
|
|
std::stringstream buf;
|
|
buf << "invitationReceived - invitationID: " << e.getInvitation().getInvitationID() << " inviterID: " << e.getInvitation().getInviterID();
|
|
DEBUG(buf.str().c_str());
|
|
|
|
DEBUG("invitationReceived - accept invitation");
|
|
PeerDescription pDesc;
|
|
PeerResources resources;
|
|
dis->acceptInvitation(e.getInvitation(), "accept invitation", pDesc, resources);
|
|
|
|
}
|
|
|
|
/**
|
|
* @brief Handle an incoming JoinConfirmEvent.
|
|
* @param The event.
|
|
*
|
|
* An invitation was accepted by the foreign host.
|
|
*/
|
|
void
|
|
SplitApplication::handleEvent(const JoinConfirmEvent & e) {
|
|
|
|
std::stringstream buf;
|
|
buf << "receiveInvitationResponse - peer at TA: " << e.getInvitation().getInviteeAddress() << " accept invitation " << e.getInvitation().getInvitationID();
|
|
buf << " message: " << e.getMessage() << ", peer description: " << e.getDescription().getDescription();
|
|
DEBUG(buf.str().c_str());
|
|
}
|
|
|
|
void
|
|
SplitApplication::handleEvent(const JoinRejectedEvent & e) {
|
|
|
|
std::stringstream buf;
|
|
buf << "invitationAborted - invitation ID " << e.getInvitation().getInvitationID() << " aborted";
|
|
buf << " reason: " << e.getReason();
|
|
DEBUG(buf.str().c_str());
|
|
}
|
|
|
|
void
|
|
SplitApplication::handleEvent(const JoinAbortedEvent & e) {
|
|
|
|
std::stringstream buf;
|
|
buf << "invitationAborted - invitation to peer " << e.getTransportAddress() << " aborted";
|
|
buf << " reason: " << e.getReason();
|
|
DEBUG(buf.str().c_str());
|
|
}
|
|
|
|
void
|
|
SplitApplication::handleEvent(const PeerJoinedEvent & e) {
|
|
|
|
std::stringstream buf;
|
|
buf << "peerJoined - peer " << e.getPeerID() << "@TA_" << e.getTransportAddress() << " joined group successfully (" << e.getDescription().getDescription() << ")";
|
|
DEBUG(buf.str().c_str());
|
|
}
|
|
|
|
void
|
|
SplitApplication::handleEvent(const PendingPeersEvent & e) {
|
|
|
|
std::stringstream buf;
|
|
buf << "peerIsPending - peer " << e.getPeerIDList() << " is pending";
|
|
DEBUG(buf.str().c_str());
|
|
}
|
|
|
|
void
|
|
SplitApplication::handleEvent(const PeerReconnectedEvent & e) {
|
|
|
|
std::stringstream buf;
|
|
buf << "peerReconnected - peer " << e.getPeerID() << " is reconnected";
|
|
DEBUG(buf.str().c_str());
|
|
}
|
|
|
|
/**
|
|
* @brief Signals, that a dedicated peer has left the group.
|
|
* @param peer The left peer.
|
|
*/
|
|
void
|
|
SplitApplication::handleEvent(const PeerLeftEvent & e) {
|
|
|
|
std::stringstream buf;
|
|
buf << "peerLeft - peer " << e.getPeer().getPeerID() << "@TA_" << e.getPeer().getLocalAddress() << " left the group";
|
|
DEBUG(buf.str().c_str());
|
|
}
|
|
|
|
/**
|
|
* @brief Signals that a split is done
|
|
*/
|
|
void
|
|
SplitApplication::handleEvent(const SplitDoneEvent & e) {
|
|
|
|
#if OMNETPP
|
|
splitDuration.recordWithTimestamp(simTime() - dt, numberOfPeers);
|
|
VectorStatistic vs("splitDone");
|
|
vs.record(GenericTime::currentTime());
|
|
if (module.getLocalAddress().getHostAddress().get4().getDByte(3) == 1) {
|
|
TransportAddress ta;
|
|
ta = getNewTransportAddress(3);
|
|
dis->mergeGroup(ta);
|
|
}
|
|
#endif
|
|
}
|
|
|
|
void
|
|
SplitApplication::handleEvent(const MergeRequestEvent & e) {
|
|
|
|
dis->acceptGroupMerge();
|
|
}
|
|
}//End namespace moversight
|
|
}//End namespace ubeeme
|
|
|