def __init__(self, addr, coder=None): self.loop = Loop() self.acceptor = Acceptor(addr, self.loop) self.coder = coder self.clients = {} self.acceptor.set_error_callback(self.fatal_error) self.acceptor.set_connect_callback(self.process_new)
def run(self): self.successful_event = 0 initialconfig = Config([], [], []) self.times = [] c = 0 for i in range(self.no_of_replicas): pid = "replica %d" % i Replica(self, pid, initialconfig) initialconfig.replicas.append(pid) for i in range(NACCEPTORS): pid = "acceptor %d.%d" % (c, i) Acceptor(self, pid) initialconfig.acceptors.append(pid) for i in range(NLEADERS): pid = "leader %d.%d" % (c, i) Leader(self, pid, initialconfig) initialconfig.leaders.append(pid) for i in range(self.no_of_requests): self.start_time = datetime.now() pid = "client %d.%d" % (c, i) for r in initialconfig.replicas: cmd = Command(pid, 0, "operation %d.%d" % (c, i)) self.sendMessage(r, RequestMessage(pid, cmd)) time.sleep(1) self.end_time = datetime.now() self.total_time = self.end_time - self.start_time # print(self.times) self.times.append(self.total_time.total_seconds()) for c in range(1, NCONFIGS): # Create new configuration config = Config(initialconfig.replicas, [], []) for i in range(NACCEPTORS): pid = "acceptor %d.%d" % (c, i) Acceptor(self, pid) config.acceptors.append(pid) for i in range(NLEADERS): pid = "leader %d.%d" % (c, i) Leader(self, pid, config) config.leaders.append(pid) # Send reconfiguration request for r in config.replicas: pid = "master %d.%d" % (c, i) cmd = ReconfigCommand(pid, 0, str(config)) self.sendMessage(r, RequestMessage(pid, cmd)) time.sleep(1) for i in range(WINDOW - 1): pid = "master %d.%d" % (c, i) for r in config.replicas: cmd = Command(pid, 0, "operation noop") self.sendMessage(r, RequestMessage(pid, cmd)) time.sleep(1) for i in range(self.no_of_requests): pid = "client %d.%d" % (c, i) for r in config.replicas: cmd = Command(pid, 0, "operation %d.%d" % (c, i)) self.sendMessage(r, RequestMessage(pid, cmd)) time.sleep(1)
def _initActors(self): self.proposer = Proposer(self.servers, self.id) self.acceptor = Acceptor(self.servers, self.id) self.learner = Learner(self.servers, self.id) # load saved if os.path.isfile(self.stateFileName): print('Loading from:', self.stateFileName) self._loadState()
def createAcceptor(self): acceptor = Acceptor(self.port + 1, self.ips, self.ip, self.num) if not self.logging_switch: acceptor.logging(False) acceptor.log('starting') acceptor.listen() acceptor.log('exiting')
def __init__(self, id, quorum_size, is_leader=False, promised_id=None, accepted_id=None, accepted_value=None): Proposer.__init__(self, id, quorum_size, is_leader) Acceptor.__init__(self, id, promised_id, accepted_id, accepted_value) Learner.__init__(self, id, quorum_size)
def run(self): initialconfig = Config([], [], []) c = 0 for i in range(NREPLICAS): pid = "replica %d" % i Replica(self, pid, initialconfig) initialconfig.replicas.append(pid) for i in range(NACCEPTORS): pid = "acceptor %d.%d" % (c,i) Acceptor(self, pid) initialconfig.acceptors.append(pid) for i in range(NLEADERS): pid = "leader %d.%d" % (c,i) Leader(self, pid, initialconfig) initialconfig.leaders.append(pid) for i in range(NREQUESTS): pid = "client %d.%d" % (c,i) for r in initialconfig.replicas: cmd = Command(pid,0,"operation %d.%d" % (c,i)) self.sendMessage(r,RequestMessage(pid,cmd)) time.sleep(1) for c in range(1, NCONFIGS): # Create new configuration config = Config(initialconfig.replicas, [], []) for i in range(NACCEPTORS): pid = "acceptor %d.%d" % (c,i) Acceptor(self, pid) config.acceptors.append(pid) for i in range(NLEADERS): pid = "leader %d.%d" % (c,i) Leader(self, pid, config) config.leaders.append(pid) # Send reconfiguration request for r in config.replicas: pid = "master %d.%d" % (c,i) cmd = ReconfigCommand(pid,0,str(config)) self.sendMessage(r, RequestMessage(pid, cmd)) time.sleep(1) for i in range(WINDOW-1): pid = "master %d.%d" % (c,i) for r in config.replicas: cmd = Command(pid,0,"operation noop") self.sendMessage(r, RequestMessage(pid, cmd)) time.sleep(1) for i in range(NREQUESTS): pid = "client %d.%d" % (c,i) for r in config.replicas: cmd = Command(pid,0,"operation %d.%d"%(c,i)) self.sendMessage(r, RequestMessage(pid, cmd)) time.sleep(1)
def run(self): initialconfig = Config([], [], []) c = 0 # Create replicas for i in range(self.clusterSize): pid = "replica %d" % i Replica(self, pid, initialconfig) initialconfig.replicas.append(pid) # Create acceptors (initial configuration) for i in range(self.clusterSize): pid = "acceptor %d.%d" % (c, i) Acceptor(self, pid) initialconfig.acceptors.append(pid) # Create leaders (initial configuration) for i in range(self.clusterSize): pid = "leader %d.%d" % (c, i) Leader(self, pid, initialconfig) initialconfig.leaders.append(pid) self.start_time = time.time() executor = ThreadPoolExecutor(max_workers=self.clientSize) executor.submit(self.sendRequest(initialconfig, c)) self.sendRequest(initialconfig, c)
def run(self): initialconfig = Config([], [], []) c = 0 for i in range(NREPLICAS): pid = "replica %d" % i Replica(self, pid, initialconfig, self.conc_clients * NREQUESTS, self.verbose) initialconfig.replicas.append(pid) for i in range(self.quorum_size): pid = "acceptor %d.%d" % (c, i) Acceptor(self, pid, self.verbose) initialconfig.acceptors.append(pid) for i in range(NLEADERS): pid = "leader %d.%d" % (c, i) Leader(self, pid, initialconfig, self.verbose) initialconfig.leaders.append(pid) for i in range(self.conc_clients): pid = f"client {c}.{i}" Client(self, pid, initialconfig.replicas, NREQUESTS, self.verbose) completed = False while not completed: completed = True for i in range(NREPLICAS): if self.procs[initialconfig.replicas[i]].difference is None: completed = False time.sleep(1)
def run(self): initialconfig = Config([], [], []) c = 0 # Create replicas for i in range(self.clusterSize): pid = "replica %d" % i Replica(self, pid, initialconfig) initialconfig.replicas.append(pid) # Create acceptors (initial configuration) for i in range(self.clusterSize): pid = "acceptor %d.%d" % (c, i) Acceptor(self, pid) initialconfig.acceptors.append(pid) # Create leaders (initial configuration) for i in range(self.clusterSize): pid = "leader %d.%d" % (c, i) Leader(self, pid, initialconfig) initialconfig.leaders.append(pid) self.start_time = time.time() for i in range(self.clientSize): self.p_list.append( mp.Process(target=self.sendRequest, args=(initialconfig, c))) self.p_list[i].start() for i in self.p_list: i.join()
class TcpServer: def __init__(self, ip, port, sub_reactor_num): self.__acceptor = Acceptor(ip, port) self.__loop = AcceptorLoop(self.__acceptor) self.__sub_reactors = SubReactorThreadPool(sub_reactor_num) pass def start(self): self.__sub_reactors.start() self.__acceptor.ready() self.__loop.loop(self.__sub_reactors) pass def close(self): logger.simple_log('正在停止新连接的接收') self.__loop.un_loop() logger.simple_log('正在关闭 reactors') self.__sub_reactors.stop() pass
class TestAcceptor: def setup_class(self): self.receiver = Mock() self.acceptor = Acceptor(('127.0.0.1', PORT), self.receiver) def test_stream(self): assert self.acceptor.stream().getsockname() == ('127.0.0.1', PORT) def test_accept(self): peer = socket.socket(socket.AF_INET, socket.SOCK_STREAM) peer.setblocking(0) try: peer.connect(('127.0.0.1', PORT)) except Exception as e: err = peer.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) if err != 0 and err != errno.EINPROGRESS: raise e select.select([self.acceptor.stream()], [], []) self.acceptor.read_event() assert self.receiver.accepted_connection.called
class TestAcceptor: def setup_class(self): self.receiver = Mock() self.acceptor = Acceptor(('127.0.0.1', PORT), self.receiver) def test_stream(self): assert self.acceptor.stream().getsockname() == ('127.0.0.1', PORT) def test_accept(self): peer = socket.socket(socket.AF_INET, socket.SOCK_STREAM) peer.setblocking(0) try: peer.connect(('127.0.0.1', PORT)) except Exception as e: err = peer.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) if err != 0 and err != errno.EINPROGRESS: raise e select.select([self.acceptor.stream()],[],[]) self.acceptor.read_event() assert self.receiver.accepted_connection.called
def main(): global incoming, incoming_lock, N, pid, port, root_port, send # Read global state. pid = int(sys.argv[1]) N = int(sys.argv[2]) port = int(sys.argv[3]) # Start and configure debugger LOG.basicConfig(filename='LOG/%d.log' % pid, level=LOG.DEBUG) # Create the necessary classes. mhandler = MasterHandler(pid, address, port) handler = WorkerThread(address, root_port + pid) communicator = Communicator(incoming, incoming_lock, pid, send, mhandler) #LOG.debug('Handlers initiated') acceptors = [(i, 'acceptor') for i in xrange(N)] leaders = [(i, 'leader') for i in xrange(N)] replicas = [(i, 'replica') for i in xrange(N)] acceptor = Acceptor(communicator) my_dict = dict() replica = Replica(leaders, my_dict, communicator) leader = Leader(acceptors, replicas, communicator) acceptor.start() handler.start() mhandler.start() while not os.path.isfile('LOG/%d.log' % (N - 1)): time.sleep(0.1) leader.start() replica.start() LOG.debug('main() ends IDENTITY pid: %d, port: %d ' % (pid, port))
def run(): # Step 1: Initialization # 视频: video_helper.py # 检测: detector.py # 结果接收:acceptor.py # 参数配置: config.py # 总控: multiple_object_controller.py configs = Configs() detector = Detector(configs) acceptor = Acceptor(configs) video_helper = VideoHelper(configs) object_controller = MultipleObjectController(configs, video_helper) # step 2: 总体流程:main loop # A: 对物体,每帧检测,不要跟踪 (可以要平滑) # B: 对物体,要跟踪: a. 此帧有检测 (+observation correction) # b. 此帧无检测(只跟踪,pure predicton) cur_frame_counter = 0 detection_loop_counter = 0 while video_helper.not_finished(cur_frame_counter): # 0. get frame frame = video_helper.get_frame() # 1.1 每帧都检测 if not configs.NUM_JUMP_FRAMES: detects = detector.detect(frame) object_controller.update(detects) else: # 1.2 隔帧检测 # 1.2.1 此帧有检测 if detection_loop_counter % configs.NUM_JUMP_FRAMES == 0: detection_loop_counter = 0 detects = detector.detect(frame) object_controller.update(detects) # 核心 # 1.2.2 此帧无检测 else: object_controller.update_without_detection() # 核心 # deal with acceptor # ask acceptor do something cur_frame_counter += 1 detection_loop_counter += 1
def __init__(self, site, hosts, messenger): self.site = site self.hosts = hosts self.messenger = messenger self.store = StableStorage(site) log, accProps, maxPrep, accepts = self.store.initialize() self.proposer = Proposer(site, hosts, messenger) self.acceptor = Acceptor(hosts, messenger, accProps, maxPrep) self.learner = Learner(hosts, messenger, self, log, accepts) self.airport = Planes() self.airport.fillPlane(self.learner.log) self.messenger.addListener(self.proposer) self.messenger.addListener(self.acceptor) self.messenger.addListener(self.learner) self.fillToCurrent()
class Server(object): def __init__(self, addr, coder=None): self.loop = Loop() self.acceptor = Acceptor(addr, self.loop) self.coder = coder self.clients = {} self.acceptor.set_error_callback(self.fatal_error) self.acceptor.set_connect_callback(self.process_new) def start(self): self.acceptor.listen() self.loop.loop() def fatal_error(self, msg): print msg self.quit() def quit(self): for fd in self.clients.keys(): ch = self.clients.pop(fd) ch.close() self.loop.quit() def process_new(self, sock): ch = Channel(Socket(sock), self.loop, self.coder) ch.set_read_callback(self.on_msg_in) ch.set_write_callback(self.on_msg_sent) ch.set_error_callback(self.on_error) ch.set_peer_closed(self.peer_closed) self.clients[ch.fd] = ch self.on_connect(ch) def on_connect(self, ch=None): print 'client {} connect.'.format(ch.peer_addr) pass def on_msg_in(self, msg, ch=None): pass def on_msg_sent(self, ch=None): pass def on_error(self, ch=None): print 'error' self.clients.pop(ch.fd) def peer_closed(self, ch=None): print 'client {} disconnect.'.format(ch.peer_addr) self.clients.pop(ch.fd)
def setup(self): # Initiate system for i in range(self.NREPLICAS): pid = "replica %d" % i Replica(self, pid, self.conf) self.conf.replicas.append(pid) for i in range(self.NACCEPTORS): pid = "acceptor %d" % i Acceptor(self, pid) self.conf.acceptors.append(pid) for i in range(NLEADERS): pid = "leader %d" % i Leader(self, pid, self.conf) self.conf.leaders.append(pid) # Intiate clients for c in range(self.num_clients): pid = "client %d" % c Client(self, pid) self.conf.clients.append(pid)
def run(self): initialconfig = Config([], [], []) c = 0 for i in range(self.cluster_size): pid = "acceptor %d.%d" % (c,i) Acceptor(self, pid) initialconfig.acceptors.append(pid) for i in range(NLEADERS): pid = "leader %d.%d" % (c,i) Leader(self, pid, initialconfig) initialconfig.leaders.append(pid) for i in range(NREPLICAS): pid = "replica %d" % i Replica(self, pid, initialconfig) initialconfig.replicas.append(pid) for i in range(self.number_clients): pid = "client %d.%d" % (c,i) Client(self, pid, initialconfig.replicas, NREQUESTS) done = False while not done: done = False for x in initialconfig.replicas: if self.procs[x].accepted - (NREQUESTS*self.number_clients) == 0: time_end = time.perf_counter() done = True time.sleep(1) #Timing variables start = self.procs[x].start_time acc = self.procs[x].accepted time_total = (time_end - start) per_second = int(acc/time_total) with open('data.txt', 'a') as outfile: outfile.write(str(per_second)) outfile.write("\n")
def __init__(self): self._peer_id = "-HS0001-" + str(int(time.time())).zfill(12) self._torrents = {} self._downloads = set() for self._port in range(_PORT_FIRST, _PORT_LAST + 1): try: self._acceptor = Acceptor(("localhost", self._port), self) break except Exception as err: logger.debug(err) continue else: logger.critical( ("Could not find free port in range {}-{} to " "accept connections").format(_PORT_FIRST, _PORT_LAST)) sys.exit(1) logger.info("Listening on port {}".format(self._port)) Reactor().schedule_timer(.01, self.start_downloads) Reactor().run()
def __init__(self, ip, port, sub_reactor_num): self.__acceptor = Acceptor(ip, port) self.__loop = AcceptorLoop(self.__acceptor) self.__sub_reactors = SubReactorThreadPool(sub_reactor_num) pass
def setup_class(self): self.receiver = Mock() self.acceptor = Acceptor(('127.0.0.1', PORT), self.receiver)
sys.exit(0) if not accept_mode and not connect_mode: print(_helpText) sys.exit(0) Tunnel.set_tcp_fin_received_handler(tcptun.on_stream_fin_received) Tunnel.set_tcp_closed_handler(tcptun.on_stream_closed) Tunnel.set_udp_closed_handler(udptun.on_dgram_closed) if accept_mode: Tunnel.set_tcp_initial_handler(tcptun.on_server_side_initialized) Tunnel.set_udp_initial_handler(udptun.on_server_side_initialized) for addr, port, type_, arg in server_list: if accept_mode: acceptor = Acceptor('TUNNEL') acceptor.bind(addr, port) acceptor.listen() acceptor.set_on_accepted(server_side_on_accepted) acceptor.set_on_closed(acceptor_on_closed) else: via, to = arg if type_ == 'tcp': acceptor = Acceptor('TCP') acceptor.bind(addr, port) acceptor.listen() acceptor.set_on_accepted(tcptun.gen_on_client_side_accepted(via, to)) acceptor.set_on_closed(acceptor_on_closed) elif type_ == 'udp': receiver = Dgram() receiver.bind(addr, port)
from client import Client from learner import Learner from logger import get_logger from logging import getLogger, CRITICAL, INFO, DEBUG critical, debug, info = get_logger(__name__) if __name__ == '__main__': debug('Starting processes') from sys import stdin values = [line.strip() for line in stdin] config = 'config.txt' acceptors = [Acceptor(1, config), Acceptor(2, config), Acceptor(3, config)] proposers = [Proposer(1, config)] learners = [Learner(1, config)] clients = [Client(3, config, values)] # suppress logging for module in (x.__module__ for x in {Acceptor, Proposer, Client}): getLogger(module).setLevel(level=CRITICAL) getLogger(Learner.__module__).setLevel(level=INFO) from sys import exit joinall([spawn(x.reader_loop) for x in acceptors+proposers+learners+clients]
class Server(BaseActor): def __init__(self, servers, sId, stateFileName): BaseActor.__init__(self, servers, sId) self.stateFileName = stateFileName self.SERVER_IP = None self.UDP_PORT = None self.TCP_PORT = None self.udpSock = None self.tcpSock = None self.inputs = None self.currentRequests = set() self.requestQueue = queue.Queue() self.election = Election(servers, sId) self.recovery = True self.recoveryLookahead = True self.inRecovery = set() self.lookaheadIndex = None self.recoveryTimer = None self.proposer = None self.acceptor = None self.learner = None # writes current state of node to disk def _saveState(self): acceptorDump = self.acceptor.exportDict() learnerDump = self.learner.exportDict() dump = dict() dump['acceptor'] = acceptorDump dump['learner'] = learnerDump with open(self.stateFileName, 'wb') as sf: pickle.dump(dump, sf) # load state from disk def _loadState(self): with open(self.stateFileName, 'rb') as sf: dump = pickle.load(sf) acceptorDump = dump['acceptor'] learnerDump = dump['learner'] self.acceptor.importDict(acceptorDump) self.learner.importDict(learnerDump) def _initActors(self): self.proposer = Proposer(self.servers, self.id) self.acceptor = Acceptor(self.servers, self.id) self.learner = Learner(self.servers, self.id) # load saved if os.path.isfile(self.stateFileName): print('Loading from:', self.stateFileName) self._loadState() # set up networking def _initNetworking(self): self.SERVER_IP, self.UDP_PORT, self.TCP_PORT = self.servers[self.id] print(self.SERVER_IP, self.UDP_PORT, self.TCP_PORT, self.id) self.udpSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.udpSock.bind((self.SERVER_IP, self.UDP_PORT)) self.tcpSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.tcpSock.bind((self.SERVER_IP, self.TCP_PORT)) self.tcpSock.listen(5) self.inputs = [ self.udpSock, self.tcpSock ] # initialize actors and networking def init(self): self._initActors() self._initNetworking() # if not leader, forward message to the leader # if no leader, initiate leader election def forwardToLeader(self, msg): leader = self.election.getLeader() if leader is None: self.election.startElection() else: send(self.servers[leader], msg) def handleRequest(self, msg): # is the leader, check is already complete, add to queue, propose when ready if self.election.isLeader(): self.requestQueue.put(msg) else: self.forwardToLeader(msg) def handleLog(self, msg): if self.election.isLeader(): log = sorted([ (k, v) for k, v in self.learner.learnedValues.items()]) for entry in log: print(entry) else: self.forwardToLeader(msg) # find the current head of the log by creating proposals for the next slot until head is found def doLookahead(self): if not self.recoveryLookahead: self.lookaheadIndex = None return if not self.election.isLeader(): return currentMaxIndex = self.learner.getMaxIndex() if currentMaxIndex is None: if self.lookaheadIndex is None: self.lookaheadIndex = self.learner.getBaseIndex() msg = self.createEmptyRequest(self.lookaheadIndex) self.proposer.newReqProposal(msg, self.lookaheadIndex) elif self.lookaheadIndex is None or self.lookaheadIndex <= currentMaxIndex: self.lookaheadIndex = currentMaxIndex + 1 msg = self.createEmptyRequest(self.lookaheadIndex) self.proposer.newReqProposal(msg, self.lookaheadIndex) elif self.lookaheadIndex == currentMaxIndex + 1: # still at current lookahead return else: print('Error: Lookahead is too far ahead', file=sys.stderr) # recover known missing slots in the log def doRecovery(self): self.startRecoveryTimer() if not self.recovery and not self.recoveryLookahead: return # if not leader, stop with recovery if not self.election.isLeader(): return missing = self.learner.getMissingValues() print('Missing:', missing) print('In Recovery:', self.inRecovery) print('Perform Lookahead:', self.recoveryLookahead) print('Lookahead Index:', self.lookaheadIndex) if len(missing) == 0: self.recovery = False self.inRecovery.clear() # we're done with recovery, do lookahead if not done self.doLookahead() return # if we need recovery, do recovery also self.recoveryLookahead = True for index in missing: # already in recovery if index in self.inRecovery: continue self.inRecovery.add(index) # create dummy message to fill in gap msg = self.createEmptyRequest(index) self.proposer.newReqProposal(msg, index) def resetRecoveryState(self): self.recovery = True def startRecoveryTimer(self): if self.recoveryTimer is not None and self.recoveryTimer.is_alive(): return self.recoveryTimer = threading.Timer(5.0, self.resetRecoveryState) self.recoveryTimer.start() def createEmptyRequest(self, index): msg = dict() msg['type'] = 'request' msg['clientid'] = self.id*-1 # negative server id will indicate server request msg['reqid'] = index # fake reqid msg['value'] = "" return msg def processRequest(self): if self.recovery or self.recoveryLookahead or self.requestQueue.empty(): return msg = self.requestQueue.get() clientId = msg['clientid'] clientReqId = msg['reqid'] gReqId = getGlobalReqId(clientId, clientReqId) completed = self.learner.getCompleted(gReqId) if completed is not None: clientRetIP = msg.get('retip') #str clientRetPort = msg.get('retport') #int self.sendReply(clientRetIP, clientRetPort, completed) self.processRequest() return index = self.learner.getNextIndex() self.proposer.newReqProposal(msg, index) def handleAccepted(self, msg): resultLearner = self.learner.handleAccepted(msg) acceptedProposal = self.proposer.handleAccepted(msg) if resultLearner is False or acceptedProposal is None: return if resultLearner != acceptedProposal.isLearned(): print('Error: Learner and Proposer disagree', acceptedProposal.isLearned(), file=sys.stderr) wasOverridden = acceptedProposal.isOverridden() origRetried = acceptedProposal.isOrigRetried() if wasOverridden and not origRetried: retryMsg = acceptedProposal.recreateOrigRequest() # if request was dummy, don't resubmit if retryMsg.get('clientid') < 0: acceptedProposal.setOrigRetried() return self.requestQueue.put(retryMsg) acceptedProposal.setOrigRetried() elif not wasOverridden: self.recoveryLookahead = False retIp, retPort = acceptedProposal.getReturnInfo() index = acceptedProposal.getIndex() if not self.learner.checkReply(index): self.sendReply(retIp, retPort, index) def sendReply(self, retIp, retPort, index): if retIp is None or retPort is None: return print('Sent Reply', retIp, retPort) value = self.learner.getLearnedValue(index) omsg = dict() omsg['type'] = 'response' omsg['value'] = value send((retIp, retPort, None), omsg) self.learner.addReply(index) def handleMsg(self, msg): if msg is None: return msgType = msg['type'] if msgType == 'request': print('request') self.handleRequest(msg) elif msgType == 'log': print('log') self.handleLog(msg) elif msgType == 'prepare': print('prepare') print(msg) self.acceptor.handlePrepare(msg) elif msgType == 'promise': print('promise') result = self.proposer.handlePromise(msg) if result: self.recoveryLookahead = False elif msgType == 'accept': print('accept') self.acceptor.handleAccept(msg) elif msgType == 'accepted': print('accepted') self.handleAccepted(msg) else: print('msgType not recognized', file=sys.stderr) def handleElectionMsg(self, msg): if msg is None: return msgType = msg['type'] if msgType == 'election': self.election.handleElection(msg) elif msgType == 'answer': self.election.handleAnswer(msg) elif msgType == 'coordinator': self.election.handleCoordinator(msg) self.proposer.setLeader(self.election.isLeader()) print('Leader is:', self.election.getLeader()) else: print('msgType not recognized', file=sys.stderr) def run(self): if self.election.getLeader() is None: self.election.startElection() while True: readable, writable, exceptional = select.select(self.inputs, [], [], 1) for s in readable: # tcpSock if s is self.tcpSock: conn, addr = s.accept() msg = recv(conn) print('Received', msg) conn.close() self.handleElectionMsg(msg) # udpSock else: msg = recv(s) self.handleMsg(msg) self._saveState() self.doRecovery() self.processRequest()
def secondProposal(): print("secondProposal") processes = ['alpha', 'beta', 'gamma'] um = UniversalMessenger() m1 = MockMessenger('alpha', processes, um) m2 = MockMessenger('beta', processes, um) m3 = MockMessenger('gamma', processes, um) p1 = Proposer('alpha', processes, m1) p2 = Proposer('beta', processes, m2) p3 = Proposer('gamma', processes, m3) a1 = Acceptor(processes, m1) a2 = Acceptor(processes, m2) a3 = Acceptor(processes, m3) l1 = Learner(processes, m1) l2 = Learner(processes, m2) l3 = Learner(processes, m3) p1.prepare('ABC', 0) a1.promise(um.lastMessageToSite('alpha', 'prepare')) p1.receivePromise(um.lastMessageToSite('alpha', 'promise')) a2.promise(um.lastMessageToSite('beta', 'prepare')) p1.receivePromise(um.lastMessageToSite('alpha', 'promise')) a3.promise(um.lastMessageToSite('gamma', 'prepare')) p1.receivePromise(um.lastMessageToSite('alpha', 'promise')) a1.accept(um.lastMessageToSite('alpha', 'accept')) l1.receiveAccepted(um.lastMessageToSite('alpha', 'accepted')) l2.receiveAccepted(um.lastMessageToSite('beta', 'accepted')) l3.receiveAccepted(um.lastMessageToSite('gamma', 'accepted')) a2.accept(um.lastMessageToSite('beta', 'accept')) l1.receiveAccepted(um.lastMessageToSite('alpha', 'accepted')) l2.receiveAccepted(um.lastMessageToSite('beta', 'accepted')) l3.receiveAccepted(um.lastMessageToSite('gamma', 'accepted')) a3.accept(um.lastMessageToSite('gamma', 'accept')) l1.receiveAccepted(um.lastMessageToSite('alpha', 'accepted')) l2.receiveAccepted(um.lastMessageToSite('beta', 'accepted')) l3.receiveAccepted(um.lastMessageToSite('gamma', 'accepted')) p2.prepare('XYZ', 0) a1.promise(um.lastMessageToSite('alpha', 'prepare')) p2.receivePromise(um.lastMessageToSite('beta', 'promise')) a2.promise(um.lastMessageToSite('beta', 'prepare')) p2.receivePromise(um.lastMessageToSite('beta', 'promise')) a3.promise(um.lastMessageToSite('gamma', 'prepare')) p2.receivePromise(um.lastMessageToSite('beta', 'promise')) um.printMessages()
def run(self): initialconfig = Config([], [], []) c = 0 # Create replicas for i in range(NREPLICAS): pid = "replica %d" % i Replica(self, pid, initialconfig) initialconfig.replicas.append(pid) # Create acceptors (initial configuration) for i in range(NACCEPTORS): pid = "acceptor %d.%d" % (c, i) Acceptor(self, pid) initialconfig.acceptors.append(pid) # Create leaders (initial configuration) for i in range(NLEADERS): pid = "leader %d.%d" % (c, i) Leader(self, pid, initialconfig) initialconfig.leaders.append(pid) # Send client requests to replicas for i in range(NREQUESTS): pid = "client %d.%d" % (c, i) for r in initialconfig.replicas: cmd = Command(pid, 0, "operation %d.%d" % (c, i)) self.sendMessage(r, RequestMessage(pid, cmd)) time.sleep(1) # Create new configurations. The configuration contains the # leaders and the acceptors (but not the replicas). for c in range(1, NCONFIGS): config = Config(initialconfig.replicas, [], []) # Create acceptors in the new configuration for i in range(NACCEPTORS): pid = "acceptor %d.%d" % (c, i) Acceptor(self, pid) config.acceptors.append(pid) # Create leaders in the new configuration for i in range(NLEADERS): pid = "leader %d.%d" % (c, i) Leader(self, pid, config) config.leaders.append(pid) # Send reconfiguration request for r in config.replicas: pid = "master %d.%d" % (c, i) cmd = ReconfigCommand(pid, 0, str(config)) self.sendMessage(r, RequestMessage(pid, cmd)) time.sleep(1) # Send WINDOW noops to speed up reconfiguration for i in range(WINDOW - 1): pid = "master %d.%d" % (c, i) for r in config.replicas: cmd = Command(pid, 0, "operation noop") self.sendMessage(r, RequestMessage(pid, cmd)) time.sleep(1) # Send client requests to replicas for i in range(NREQUESTS): pid = "client %d.%d" % (c, i) for r in config.replicas: cmd = Command(pid, 0, "operation %d.%d" % (c, i)) self.sendMessage(r, RequestMessage(pid, cmd)) time.sleep(1)
def rivalProposal(): print("rivalProposal") processes = ['alpha', 'beta', 'gamma'] um = UniversalMessenger() m1 = MockMessenger('alpha', processes, um) m2 = MockMessenger('beta', processes, um) m3 = MockMessenger('gamma', processes, um) p1 = Proposer('alpha', processes, m1) p2 = Proposer('beta', processes, m2) p3 = Proposer('gamma', processes, m3) a1 = Acceptor(processes, m1) a2 = Acceptor(processes, m2) a3 = Acceptor(processes, m3) l1 = Learner(processes, m1) l2 = Learner(processes, m2) l3 = Learner(processes, m3) p1.prepare('ABC', 0) a1.promise(um.lastMessageToSite('alpha', 'prepare')) p1.receivePromise(um.lastMessageToSite('alpha', 'promise')) a2.promise(um.lastMessageToSite('beta', 'prepare')) p1.receivePromise(um.lastMessageToSite('alpha', 'promise')) #a3.promise(um.lastMessageToSite('gamma', 'prepare')) #p1.receivePromise(um.lastMessageToSite('alpha', 'promise')) #a1.accept(um.lastMessageToSite('alpha', 'accept')) #a2.accept(um.lastMessageToSite('beta', 'accept')) #a3.accept(um.lastMessageToSite('gamma', 'accept')) p2.prepare( 'XYZ', 0) # A majority of these need to be uncommented for XYZ to be accepted #a1.promise(um.lastMessageToSite('alpha', 'prepare')) #p2.receivePromise(um.lastMessageToSite('beta', 'promise')) a2.promise(um.lastMessageToSite('beta', 'prepare')) p2.receivePromise(um.lastMessageToSite('beta', 'promise')) a3.promise(um.lastMessageToSite('gamma', 'prepare')) p2.receivePromise(um.lastMessageToSite('beta', 'promise')) a1.accept(um.lastMessageToSite('alpha', 'accept')) l1.receiveAccepted(um.lastMessageToSite('alpha', 'accepted')) l2.receiveAccepted(um.lastMessageToSite('beta', 'accepted')) l3.receiveAccepted(um.lastMessageToSite('gamma', 'accepted')) a2.accept(um.lastMessageToSite('beta', 'accept')) l1.receiveAccepted(um.lastMessageToSite('alpha', 'accepted')) l2.receiveAccepted(um.lastMessageToSite('beta', 'accepted')) l3.receiveAccepted(um.lastMessageToSite('gamma', 'accepted')) a3.accept(um.lastMessageToSite('gamma', 'accept')) l1.receiveAccepted(um.lastMessageToSite('alpha', 'accepted')) l2.receiveAccepted(um.lastMessageToSite('beta', 'accepted')) l3.receiveAccepted(um.lastMessageToSite('gamma', 'accepted')) um.printMessages()
def readqueue(self): queue = Queue.Queue() try: f = open('user.queue', 'r') for line in f: queue.queue = deque(eval(line)) except: "" finally: return queue if __name__=="__main__": bullyalgorithm(opt=False) producer = Producer() producer.setDaemon(True) producer.start() consumer = Consumer(producer.queue) consumer.setDaemon(True) consumer.start() acceptor = Acceptor() acceptor.setDaemon(True) acceptor.start() while threading.active_count() > 0: time.sleep(0.1)
def server(server_id, config_file='../config/servers.yaml'): server_id = int(server_id) #load config file with open(config_file, 'r') as config_handler: config = yaml.load(config_handler) f = int(config['f']) #the number of failure that can be tolerated state_backup_folder = config['state_backup_folder'] if not os.path.exists(state_backup_folder): call(['mkdir', '-p', state_backup_folder]) num_server = 2 * f + 1 servers_list = { server_idx: config['servers_list'][server_idx] for server_idx in range(num_server) } quorum = num_server / 2 + 1 # load state state_backup = get_state_backup(server_id, state_backup_folder) if not os.path.exists(state_backup): state = dict(view=0, decided_log={}, promised_proposal_id=None, accepted_proposal_id={}, accepted_proposal_val={}, accepted_client_info={}) save_state(state_backup, state) else: MyLogging.info("Recovering server") state = load_state(state_backup) loss_rate = config['msg_drop_rate'] proposer = Proposer(server_id, servers_list, loss_rate) acceptor = Acceptor(server_id, servers_list, state['promised_proposal_id'], state['accepted_proposal_id'], state['accepted_proposal_val'], state['accepted_client_info'], state_backup, loss_rate) learner = Learner(server_id, quorum, state['decided_log'], state_backup, loss_rate) #initialize view. The view will be used for proposal_id for elected leader view = state['view'] num_acceptors = num_server HOST = servers_list[server_id]['host'] PORT = servers_list[server_id]['port'] s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind((HOST, PORT)) s.listen(100) #for test case 4 to skip slot x if 'x' in config and config['x'] >= 0: x = int(config['x']) else: x = None if 'num_failed_primary' in config and config['num_failed_primary'] >= 0: num_failed_primary = int(config['num_failed_primary']) else: num_failed_primary = None #for test 4 to specify the server on which the skipped slot occurs server_skip = 0 request_val_queue = collections.deque() client_info_queue = collections.deque() while True: #try to crash if view % num_acceptors == server_id: server_crash(server_id, crash_rate) MyLogging.debug("wait for connection") conn, addr = s.accept() MyLogging.debug('Connection by ' + str(addr)) data = conn.recv(4096 * 2) msg = pickle.loads(data) MyLogging.debug('RCVD: ' + str(msg)) if msg['type'] == 'request': if msg['resend_idx'] != 0: #if this is an resent message, triger view change # save updated state first state = load_state(state_backup) state['view'] = view + 1 save_state(state_backup, state) view += 1 #new leader clears the request queue request_val_queue.clear() client_info_queue.clear() proposer.need_prepare = True MyLogging.debug("change to view %s" % (str(view))) if view % num_acceptors == server_id: #this is leader #testcase 2 and 3 if num_failed_primary is not None and server_id < num_failed_primary: MyLogging.info("force the primary %s to crash" % (str(server_id))) MyLogging.error("server id %s crashes" % (str(server_id))) exit() #testcase 4 if x is not None and x + 1 in learner.decided_log and server_skip == server_id: #server_skip = server_id MyLogging.info('server id %s has learned slot %s' % (str(server_id), str(x + 1))) MyLogging.error("server id %s crashes" % (str(server_id))) exit() request_val_queue.append(msg['request_val']) client_info_queue.append(msg['client_info']) if proposer.need_prepare is True: proposer.prepare(view) else: #directly propose without prepare stage proposal_pack = {} MyLogging.debug("no need to prepare") MyLogging.debug(request_val_queue) for _ in range(len(request_val_queue)): request_val = request_val_queue.popleft() client_info = client_info_queue.popleft() proposal_pack = proposer.addNewRequest( proposal_pack, request_val, client_info) #testcase 4 if x is not None and x in proposal_pack and server_skip == server_id: MyLogging.debug('At slot %s: %s' % (str(x), str(proposal_pack[x]))) MyLogging.debug( 'proposer %s skips slot %s for server_skip %s' % (str(server_id), str(x), str(server_skip))) del proposal_pack[x] proposer.propose(proposal_pack, without_prepare=True) elif msg['type'] == 'promise': proposer.addVote(msg) if proposer.checkQuorumSatisfied() is True: if proposer.need_prepare is True: proposal_pack = proposer.getProposalPack( learner.getDecidedLog()) MyLogging.debug("proposal pack for holes: %s" % (str(proposal_pack))) for _ in range(len(request_val_queue)): request_val = request_val_queue.popleft() client_info = client_info_queue.popleft() proposal_pack = proposer.addNewRequest( proposal_pack, request_val, client_info) #testcase 4 if x is not None and x in proposal_pack and server_skip == server_id: MyLogging.debug('At slot %s: %s' % (str(x), str(proposal_pack[x]))) MyLogging.debug( 'proposer %s skips slot %s for server_skip %s' % (str(server_id), str(x), str(server_skip))) del proposal_pack[x] proposer.propose(proposal_pack) proposer.need_prepare = False elif msg['type'] == 'prepare': # save updated state first state = load_state(state_backup) state['view'] = max(view, msg['proposal_id']) save_state(state_backup, state) view = max(view, msg['proposal_id'] ) # try to catch up with the most recent view MyLogging.debug("change to max view %s" % (str(view))) acceptor.promise(msg) elif msg['type'] == 'propose': acceptor.accept(msg) elif msg['type'] == 'accept': slot_idx = msg['slot_idx'] learner.addVote(msg, slot_idx) if learner.checkQuorumSatisfied(slot_idx) is True: learner.decide(slot_idx) conn.close()
#print("recv " + str(mv)) conn.send("*1\r\n$4\r\npong\r\n") return MessageCallback.__call__(self, conn, mv) class ConnectHandler(OnConnectCallback): def __call__(self, conn): OnConnectCallback.__call__(self, conn) #print("on connect, send ping") #conn.send("*3\r\n$3\r\nset\r\n$3\r\nage\r\n$3\r\n711\r\n") class NewConnHandler(NewConnectionCallback): def __call__(self, conn): NewConnectionCallback.__call__(self, conn) conn.setMsgCallback(MsgHandler()) conn.setOnConnectCallback(ConnectHandler()) def print_time(a='default'): import time print("From print_time", time.time(), a) loop = IOLoop() loop._sched.enter(delay=0.4, count=10, action=print_time) acceptor = Acceptor(port=6379, loop=loop) acceptor.setNewConnCallback(NewConnHandler()) #connector = Connector(loop = loop) #connector.setNewConnCallback(NewConnHandler()) #connector.connect(port = 6379) print("start") loop.start()
def readqueue(self): queue = Queue.Queue() try: f = open('user.queue', 'r') for line in f: queue.queue = deque(eval(line)) except: "" finally: return queue if __name__ == "__main__": bullyalgorithm(opt=False) producer = Producer() producer.setDaemon(True) producer.start() consumer = Consumer(producer.queue) consumer.setDaemon(True) consumer.start() acceptor = Acceptor() acceptor.setDaemon(True) acceptor.start() while threading.active_count() > 0: time.sleep(0.1)