def run(self): self.successful_event = 0 initialconfig = Config([], [], []) self.times = [] c = 0 for i in range(self.no_of_replicas): pid = "replica %d" % i Replica(self, pid, initialconfig) initialconfig.replicas.append(pid) for i in range(NACCEPTORS): pid = "acceptor %d.%d" % (c, i) Acceptor(self, pid) initialconfig.acceptors.append(pid) for i in range(NLEADERS): pid = "leader %d.%d" % (c, i) Leader(self, pid, initialconfig) initialconfig.leaders.append(pid) for i in range(self.no_of_requests): self.start_time = datetime.now() pid = "client %d.%d" % (c, i) for r in initialconfig.replicas: cmd = Command(pid, 0, "operation %d.%d" % (c, i)) self.sendMessage(r, RequestMessage(pid, cmd)) time.sleep(1) self.end_time = datetime.now() self.total_time = self.end_time - self.start_time # print(self.times) self.times.append(self.total_time.total_seconds()) for c in range(1, NCONFIGS): # Create new configuration config = Config(initialconfig.replicas, [], []) for i in range(NACCEPTORS): pid = "acceptor %d.%d" % (c, i) Acceptor(self, pid) config.acceptors.append(pid) for i in range(NLEADERS): pid = "leader %d.%d" % (c, i) Leader(self, pid, config) config.leaders.append(pid) # Send reconfiguration request for r in config.replicas: pid = "master %d.%d" % (c, i) cmd = ReconfigCommand(pid, 0, str(config)) self.sendMessage(r, RequestMessage(pid, cmd)) time.sleep(1) for i in range(WINDOW - 1): pid = "master %d.%d" % (c, i) for r in config.replicas: cmd = Command(pid, 0, "operation noop") self.sendMessage(r, RequestMessage(pid, cmd)) time.sleep(1) for i in range(self.no_of_requests): pid = "client %d.%d" % (c, i) for r in config.replicas: cmd = Command(pid, 0, "operation %d.%d" % (c, i)) self.sendMessage(r, RequestMessage(pid, cmd)) time.sleep(1)
def rivalProposal(): print("rivalProposal") processes = ['alpha', 'beta', 'gamma'] um = UniversalMessenger() m1 = MockMessenger('alpha', processes, um) m2 = MockMessenger('beta', processes, um) m3 = MockMessenger('gamma', processes, um) p1 = Proposer('alpha', processes, m1) p2 = Proposer('beta', processes, m2) p3 = Proposer('gamma', processes, m3) a1 = Acceptor(processes, m1) a2 = Acceptor(processes, m2) a3 = Acceptor(processes, m3) l1 = Learner(processes, m1) l2 = Learner(processes, m2) l3 = Learner(processes, m3) p1.prepare('ABC', 0) a1.promise(um.lastMessageToSite('alpha', 'prepare')) p1.receivePromise(um.lastMessageToSite('alpha', 'promise')) a2.promise(um.lastMessageToSite('beta', 'prepare')) p1.receivePromise(um.lastMessageToSite('alpha', 'promise')) #a3.promise(um.lastMessageToSite('gamma', 'prepare')) #p1.receivePromise(um.lastMessageToSite('alpha', 'promise')) #a1.accept(um.lastMessageToSite('alpha', 'accept')) #a2.accept(um.lastMessageToSite('beta', 'accept')) #a3.accept(um.lastMessageToSite('gamma', 'accept')) p2.prepare( 'XYZ', 0) # A majority of these need to be uncommented for XYZ to be accepted #a1.promise(um.lastMessageToSite('alpha', 'prepare')) #p2.receivePromise(um.lastMessageToSite('beta', 'promise')) a2.promise(um.lastMessageToSite('beta', 'prepare')) p2.receivePromise(um.lastMessageToSite('beta', 'promise')) a3.promise(um.lastMessageToSite('gamma', 'prepare')) p2.receivePromise(um.lastMessageToSite('beta', 'promise')) a1.accept(um.lastMessageToSite('alpha', 'accept')) l1.receiveAccepted(um.lastMessageToSite('alpha', 'accepted')) l2.receiveAccepted(um.lastMessageToSite('beta', 'accepted')) l3.receiveAccepted(um.lastMessageToSite('gamma', 'accepted')) a2.accept(um.lastMessageToSite('beta', 'accept')) l1.receiveAccepted(um.lastMessageToSite('alpha', 'accepted')) l2.receiveAccepted(um.lastMessageToSite('beta', 'accepted')) l3.receiveAccepted(um.lastMessageToSite('gamma', 'accepted')) a3.accept(um.lastMessageToSite('gamma', 'accept')) l1.receiveAccepted(um.lastMessageToSite('alpha', 'accepted')) l2.receiveAccepted(um.lastMessageToSite('beta', 'accepted')) l3.receiveAccepted(um.lastMessageToSite('gamma', 'accepted')) um.printMessages()
def run(self): initialconfig = Config([], [], []) c = 0 for i in range(NREPLICAS): pid = "replica %d" % i Replica(self, pid, initialconfig) initialconfig.replicas.append(pid) for i in range(NACCEPTORS): pid = "acceptor %d.%d" % (c,i) Acceptor(self, pid) initialconfig.acceptors.append(pid) for i in range(NLEADERS): pid = "leader %d.%d" % (c,i) Leader(self, pid, initialconfig) initialconfig.leaders.append(pid) for i in range(NREQUESTS): pid = "client %d.%d" % (c,i) for r in initialconfig.replicas: cmd = Command(pid,0,"operation %d.%d" % (c,i)) self.sendMessage(r,RequestMessage(pid,cmd)) time.sleep(1) for c in range(1, NCONFIGS): # Create new configuration config = Config(initialconfig.replicas, [], []) for i in range(NACCEPTORS): pid = "acceptor %d.%d" % (c,i) Acceptor(self, pid) config.acceptors.append(pid) for i in range(NLEADERS): pid = "leader %d.%d" % (c,i) Leader(self, pid, config) config.leaders.append(pid) # Send reconfiguration request for r in config.replicas: pid = "master %d.%d" % (c,i) cmd = ReconfigCommand(pid,0,str(config)) self.sendMessage(r, RequestMessage(pid, cmd)) time.sleep(1) for i in range(WINDOW-1): pid = "master %d.%d" % (c,i) for r in config.replicas: cmd = Command(pid,0,"operation noop") self.sendMessage(r, RequestMessage(pid, cmd)) time.sleep(1) for i in range(NREQUESTS): pid = "client %d.%d" % (c,i) for r in config.replicas: cmd = Command(pid,0,"operation %d.%d"%(c,i)) self.sendMessage(r, RequestMessage(pid, cmd)) time.sleep(1)
def __init__(self, addr, coder=None): self.loop = Loop() self.acceptor = Acceptor(addr, self.loop) self.coder = coder self.clients = {} self.acceptor.set_error_callback(self.fatal_error) self.acceptor.set_connect_callback(self.process_new)
def run(self): initialconfig = Config([], [], []) c = 0 # Create replicas for i in range(self.clusterSize): pid = "replica %d" % i Replica(self, pid, initialconfig) initialconfig.replicas.append(pid) # Create acceptors (initial configuration) for i in range(self.clusterSize): pid = "acceptor %d.%d" % (c, i) Acceptor(self, pid) initialconfig.acceptors.append(pid) # Create leaders (initial configuration) for i in range(self.clusterSize): pid = "leader %d.%d" % (c, i) Leader(self, pid, initialconfig) initialconfig.leaders.append(pid) self.start_time = time.time() for i in range(self.clientSize): self.p_list.append( mp.Process(target=self.sendRequest, args=(initialconfig, c))) self.p_list[i].start() for i in self.p_list: i.join()
def run(self): initialconfig = Config([], [], []) c = 0 # Create replicas for i in range(self.clusterSize): pid = "replica %d" % i Replica(self, pid, initialconfig) initialconfig.replicas.append(pid) # Create acceptors (initial configuration) for i in range(self.clusterSize): pid = "acceptor %d.%d" % (c, i) Acceptor(self, pid) initialconfig.acceptors.append(pid) # Create leaders (initial configuration) for i in range(self.clusterSize): pid = "leader %d.%d" % (c, i) Leader(self, pid, initialconfig) initialconfig.leaders.append(pid) self.start_time = time.time() executor = ThreadPoolExecutor(max_workers=self.clientSize) executor.submit(self.sendRequest(initialconfig, c)) self.sendRequest(initialconfig, c)
def run(self): initialconfig = Config([], [], []) c = 0 for i in range(NREPLICAS): pid = "replica %d" % i Replica(self, pid, initialconfig, self.conc_clients * NREQUESTS, self.verbose) initialconfig.replicas.append(pid) for i in range(self.quorum_size): pid = "acceptor %d.%d" % (c, i) Acceptor(self, pid, self.verbose) initialconfig.acceptors.append(pid) for i in range(NLEADERS): pid = "leader %d.%d" % (c, i) Leader(self, pid, initialconfig, self.verbose) initialconfig.leaders.append(pid) for i in range(self.conc_clients): pid = f"client {c}.{i}" Client(self, pid, initialconfig.replicas, NREQUESTS, self.verbose) completed = False while not completed: completed = True for i in range(NREPLICAS): if self.procs[initialconfig.replicas[i]].difference is None: completed = False time.sleep(1)
def secondProposal(): print("secondProposal") processes = ['alpha', 'beta', 'gamma'] um = UniversalMessenger() m1 = MockMessenger('alpha', processes, um) m2 = MockMessenger('beta', processes, um) m3 = MockMessenger('gamma', processes, um) p1 = Proposer('alpha', processes, m1) p2 = Proposer('beta', processes, m2) p3 = Proposer('gamma', processes, m3) a1 = Acceptor(processes, m1) a2 = Acceptor(processes, m2) a3 = Acceptor(processes, m3) l1 = Learner(processes, m1) l2 = Learner(processes, m2) l3 = Learner(processes, m3) p1.prepare('ABC', 0) a1.promise(um.lastMessageToSite('alpha', 'prepare')) p1.receivePromise(um.lastMessageToSite('alpha', 'promise')) a2.promise(um.lastMessageToSite('beta', 'prepare')) p1.receivePromise(um.lastMessageToSite('alpha', 'promise')) a3.promise(um.lastMessageToSite('gamma', 'prepare')) p1.receivePromise(um.lastMessageToSite('alpha', 'promise')) a1.accept(um.lastMessageToSite('alpha', 'accept')) l1.receiveAccepted(um.lastMessageToSite('alpha', 'accepted')) l2.receiveAccepted(um.lastMessageToSite('beta', 'accepted')) l3.receiveAccepted(um.lastMessageToSite('gamma', 'accepted')) a2.accept(um.lastMessageToSite('beta', 'accept')) l1.receiveAccepted(um.lastMessageToSite('alpha', 'accepted')) l2.receiveAccepted(um.lastMessageToSite('beta', 'accepted')) l3.receiveAccepted(um.lastMessageToSite('gamma', 'accepted')) a3.accept(um.lastMessageToSite('gamma', 'accept')) l1.receiveAccepted(um.lastMessageToSite('alpha', 'accepted')) l2.receiveAccepted(um.lastMessageToSite('beta', 'accepted')) l3.receiveAccepted(um.lastMessageToSite('gamma', 'accepted')) p2.prepare('XYZ', 0) a1.promise(um.lastMessageToSite('alpha', 'prepare')) p2.receivePromise(um.lastMessageToSite('beta', 'promise')) a2.promise(um.lastMessageToSite('beta', 'prepare')) p2.receivePromise(um.lastMessageToSite('beta', 'promise')) a3.promise(um.lastMessageToSite('gamma', 'prepare')) p2.receivePromise(um.lastMessageToSite('beta', 'promise')) um.printMessages()
def _initActors(self): self.proposer = Proposer(self.servers, self.id) self.acceptor = Acceptor(self.servers, self.id) self.learner = Learner(self.servers, self.id) # load saved if os.path.isfile(self.stateFileName): print('Loading from:', self.stateFileName) self._loadState()
def createAcceptor(self): acceptor = Acceptor(self.port + 1, self.ips, self.ip, self.num) if not self.logging_switch: acceptor.logging(False) acceptor.log('starting') acceptor.listen() acceptor.log('exiting')
def run(): # Step 1: Initialization # 视频: video_helper.py # 检测: detector.py # 结果接收:acceptor.py # 参数配置: config.py # 总控: multiple_object_controller.py configs = Configs() detector = Detector(configs) acceptor = Acceptor(configs) video_helper = VideoHelper(configs) object_controller = MultipleObjectController(configs, video_helper) # step 2: 总体流程:main loop # A: 对物体,每帧检测,不要跟踪 (可以要平滑) # B: 对物体,要跟踪: a. 此帧有检测 (+observation correction) # b. 此帧无检测(只跟踪,pure predicton) cur_frame_counter = 0 detection_loop_counter = 0 while video_helper.not_finished(cur_frame_counter): # 0. get frame frame = video_helper.get_frame() # 1.1 每帧都检测 if not configs.NUM_JUMP_FRAMES: detects = detector.detect(frame) object_controller.update(detects) else: # 1.2 隔帧检测 # 1.2.1 此帧有检测 if detection_loop_counter % configs.NUM_JUMP_FRAMES == 0: detection_loop_counter = 0 detects = detector.detect(frame) object_controller.update(detects) # 核心 # 1.2.2 此帧无检测 else: object_controller.update_without_detection() # 核心 # deal with acceptor # ask acceptor do something cur_frame_counter += 1 detection_loop_counter += 1
def setup(self): # Initiate system for i in range(self.NREPLICAS): pid = "replica %d" % i Replica(self, pid, self.conf) self.conf.replicas.append(pid) for i in range(self.NACCEPTORS): pid = "acceptor %d" % i Acceptor(self, pid) self.conf.acceptors.append(pid) for i in range(NLEADERS): pid = "leader %d" % i Leader(self, pid, self.conf) self.conf.leaders.append(pid) # Intiate clients for c in range(self.num_clients): pid = "client %d" % c Client(self, pid) self.conf.clients.append(pid)
def __init__(self, site, hosts, messenger): self.site = site self.hosts = hosts self.messenger = messenger self.store = StableStorage(site) log, accProps, maxPrep, accepts = self.store.initialize() self.proposer = Proposer(site, hosts, messenger) self.acceptor = Acceptor(hosts, messenger, accProps, maxPrep) self.learner = Learner(hosts, messenger, self, log, accepts) self.airport = Planes() self.airport.fillPlane(self.learner.log) self.messenger.addListener(self.proposer) self.messenger.addListener(self.acceptor) self.messenger.addListener(self.learner) self.fillToCurrent()
def run(self): initialconfig = Config([], [], []) c = 0 for i in range(self.cluster_size): pid = "acceptor %d.%d" % (c,i) Acceptor(self, pid) initialconfig.acceptors.append(pid) for i in range(NLEADERS): pid = "leader %d.%d" % (c,i) Leader(self, pid, initialconfig) initialconfig.leaders.append(pid) for i in range(NREPLICAS): pid = "replica %d" % i Replica(self, pid, initialconfig) initialconfig.replicas.append(pid) for i in range(self.number_clients): pid = "client %d.%d" % (c,i) Client(self, pid, initialconfig.replicas, NREQUESTS) done = False while not done: done = False for x in initialconfig.replicas: if self.procs[x].accepted - (NREQUESTS*self.number_clients) == 0: time_end = time.perf_counter() done = True time.sleep(1) #Timing variables start = self.procs[x].start_time acc = self.procs[x].accepted time_total = (time_end - start) per_second = int(acc/time_total) with open('data.txt', 'a') as outfile: outfile.write(str(per_second)) outfile.write("\n")
def __init__(self): self._peer_id = "-HS0001-" + str(int(time.time())).zfill(12) self._torrents = {} self._downloads = set() for self._port in range(_PORT_FIRST, _PORT_LAST + 1): try: self._acceptor = Acceptor(("localhost", self._port), self) break except Exception as err: logger.debug(err) continue else: logger.critical( ("Could not find free port in range {}-{} to " "accept connections").format(_PORT_FIRST, _PORT_LAST)) sys.exit(1) logger.info("Listening on port {}".format(self._port)) Reactor().schedule_timer(.01, self.start_downloads) Reactor().run()
def main(): global incoming, incoming_lock, N, pid, port, root_port, send # Read global state. pid = int(sys.argv[1]) N = int(sys.argv[2]) port = int(sys.argv[3]) # Start and configure debugger LOG.basicConfig(filename='LOG/%d.log' % pid, level=LOG.DEBUG) # Create the necessary classes. mhandler = MasterHandler(pid, address, port) handler = WorkerThread(address, root_port + pid) communicator = Communicator(incoming, incoming_lock, pid, send, mhandler) #LOG.debug('Handlers initiated') acceptors = [(i, 'acceptor') for i in xrange(N)] leaders = [(i, 'leader') for i in xrange(N)] replicas = [(i, 'replica') for i in xrange(N)] acceptor = Acceptor(communicator) my_dict = dict() replica = Replica(leaders, my_dict, communicator) leader = Leader(acceptors, replicas, communicator) acceptor.start() handler.start() mhandler.start() while not os.path.isfile('LOG/%d.log' % (N - 1)): time.sleep(0.1) leader.start() replica.start() LOG.debug('main() ends IDENTITY pid: %d, port: %d ' % (pid, port))
def setup_class(self): self.receiver = Mock() self.acceptor = Acceptor(('127.0.0.1', PORT), self.receiver)
def __init__(self, ip, port, sub_reactor_num): self.__acceptor = Acceptor(ip, port) self.__loop = AcceptorLoop(self.__acceptor) self.__sub_reactors = SubReactorThreadPool(sub_reactor_num) pass
from client import Client from learner import Learner from logger import get_logger from logging import getLogger, CRITICAL, INFO, DEBUG critical, debug, info = get_logger(__name__) if __name__ == '__main__': debug('Starting processes') from sys import stdin values = [line.strip() for line in stdin] config = 'config.txt' acceptors = [Acceptor(1, config), Acceptor(2, config), Acceptor(3, config)] proposers = [Proposer(1, config)] learners = [Learner(1, config)] clients = [Client(3, config, values)] # suppress logging for module in (x.__module__ for x in {Acceptor, Proposer, Client}): getLogger(module).setLevel(level=CRITICAL) getLogger(Learner.__module__).setLevel(level=INFO) from sys import exit joinall([spawn(x.reader_loop) for x in acceptors+proposers+learners+clients]
def run(self): initialconfig = Config([], [], []) c = 0 # Create replicas for i in range(NREPLICAS): pid = "replica %d" % i Replica(self, pid, initialconfig) initialconfig.replicas.append(pid) # Create acceptors (initial configuration) for i in range(NACCEPTORS): pid = "acceptor %d.%d" % (c, i) Acceptor(self, pid) initialconfig.acceptors.append(pid) # Create leaders (initial configuration) for i in range(NLEADERS): pid = "leader %d.%d" % (c, i) Leader(self, pid, initialconfig) initialconfig.leaders.append(pid) # Send client requests to replicas for i in range(NREQUESTS): pid = "client %d.%d" % (c, i) for r in initialconfig.replicas: cmd = Command(pid, 0, "operation %d.%d" % (c, i)) self.sendMessage(r, RequestMessage(pid, cmd)) time.sleep(1) # Create new configurations. The configuration contains the # leaders and the acceptors (but not the replicas). for c in range(1, NCONFIGS): config = Config(initialconfig.replicas, [], []) # Create acceptors in the new configuration for i in range(NACCEPTORS): pid = "acceptor %d.%d" % (c, i) Acceptor(self, pid) config.acceptors.append(pid) # Create leaders in the new configuration for i in range(NLEADERS): pid = "leader %d.%d" % (c, i) Leader(self, pid, config) config.leaders.append(pid) # Send reconfiguration request for r in config.replicas: pid = "master %d.%d" % (c, i) cmd = ReconfigCommand(pid, 0, str(config)) self.sendMessage(r, RequestMessage(pid, cmd)) time.sleep(1) # Send WINDOW noops to speed up reconfiguration for i in range(WINDOW - 1): pid = "master %d.%d" % (c, i) for r in config.replicas: cmd = Command(pid, 0, "operation noop") self.sendMessage(r, RequestMessage(pid, cmd)) time.sleep(1) # Send client requests to replicas for i in range(NREQUESTS): pid = "client %d.%d" % (c, i) for r in config.replicas: cmd = Command(pid, 0, "operation %d.%d" % (c, i)) self.sendMessage(r, RequestMessage(pid, cmd)) time.sleep(1)
def readqueue(self): queue = Queue.Queue() try: f = open('user.queue', 'r') for line in f: queue.queue = deque(eval(line)) except: "" finally: return queue if __name__ == "__main__": bullyalgorithm(opt=False) producer = Producer() producer.setDaemon(True) producer.start() consumer = Consumer(producer.queue) consumer.setDaemon(True) consumer.start() acceptor = Acceptor() acceptor.setDaemon(True) acceptor.start() while threading.active_count() > 0: time.sleep(0.1)
def server(server_id, config_file='../config/servers.yaml'): server_id = int(server_id) #load config file with open(config_file, 'r') as config_handler: config = yaml.load(config_handler) f = int(config['f']) #the number of failure that can be tolerated state_backup_folder = config['state_backup_folder'] if not os.path.exists(state_backup_folder): call(['mkdir', '-p', state_backup_folder]) num_server = 2 * f + 1 servers_list = { server_idx: config['servers_list'][server_idx] for server_idx in range(num_server) } quorum = num_server / 2 + 1 # load state state_backup = get_state_backup(server_id, state_backup_folder) if not os.path.exists(state_backup): state = dict(view=0, decided_log={}, promised_proposal_id=None, accepted_proposal_id={}, accepted_proposal_val={}, accepted_client_info={}) save_state(state_backup, state) else: MyLogging.info("Recovering server") state = load_state(state_backup) loss_rate = config['msg_drop_rate'] proposer = Proposer(server_id, servers_list, loss_rate) acceptor = Acceptor(server_id, servers_list, state['promised_proposal_id'], state['accepted_proposal_id'], state['accepted_proposal_val'], state['accepted_client_info'], state_backup, loss_rate) learner = Learner(server_id, quorum, state['decided_log'], state_backup, loss_rate) #initialize view. The view will be used for proposal_id for elected leader view = state['view'] num_acceptors = num_server HOST = servers_list[server_id]['host'] PORT = servers_list[server_id]['port'] s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind((HOST, PORT)) s.listen(100) #for test case 4 to skip slot x if 'x' in config and config['x'] >= 0: x = int(config['x']) else: x = None if 'num_failed_primary' in config and config['num_failed_primary'] >= 0: num_failed_primary = int(config['num_failed_primary']) else: num_failed_primary = None #for test 4 to specify the server on which the skipped slot occurs server_skip = 0 request_val_queue = collections.deque() client_info_queue = collections.deque() while True: #try to crash if view % num_acceptors == server_id: server_crash(server_id, crash_rate) MyLogging.debug("wait for connection") conn, addr = s.accept() MyLogging.debug('Connection by ' + str(addr)) data = conn.recv(4096 * 2) msg = pickle.loads(data) MyLogging.debug('RCVD: ' + str(msg)) if msg['type'] == 'request': if msg['resend_idx'] != 0: #if this is an resent message, triger view change # save updated state first state = load_state(state_backup) state['view'] = view + 1 save_state(state_backup, state) view += 1 #new leader clears the request queue request_val_queue.clear() client_info_queue.clear() proposer.need_prepare = True MyLogging.debug("change to view %s" % (str(view))) if view % num_acceptors == server_id: #this is leader #testcase 2 and 3 if num_failed_primary is not None and server_id < num_failed_primary: MyLogging.info("force the primary %s to crash" % (str(server_id))) MyLogging.error("server id %s crashes" % (str(server_id))) exit() #testcase 4 if x is not None and x + 1 in learner.decided_log and server_skip == server_id: #server_skip = server_id MyLogging.info('server id %s has learned slot %s' % (str(server_id), str(x + 1))) MyLogging.error("server id %s crashes" % (str(server_id))) exit() request_val_queue.append(msg['request_val']) client_info_queue.append(msg['client_info']) if proposer.need_prepare is True: proposer.prepare(view) else: #directly propose without prepare stage proposal_pack = {} MyLogging.debug("no need to prepare") MyLogging.debug(request_val_queue) for _ in range(len(request_val_queue)): request_val = request_val_queue.popleft() client_info = client_info_queue.popleft() proposal_pack = proposer.addNewRequest( proposal_pack, request_val, client_info) #testcase 4 if x is not None and x in proposal_pack and server_skip == server_id: MyLogging.debug('At slot %s: %s' % (str(x), str(proposal_pack[x]))) MyLogging.debug( 'proposer %s skips slot %s for server_skip %s' % (str(server_id), str(x), str(server_skip))) del proposal_pack[x] proposer.propose(proposal_pack, without_prepare=True) elif msg['type'] == 'promise': proposer.addVote(msg) if proposer.checkQuorumSatisfied() is True: if proposer.need_prepare is True: proposal_pack = proposer.getProposalPack( learner.getDecidedLog()) MyLogging.debug("proposal pack for holes: %s" % (str(proposal_pack))) for _ in range(len(request_val_queue)): request_val = request_val_queue.popleft() client_info = client_info_queue.popleft() proposal_pack = proposer.addNewRequest( proposal_pack, request_val, client_info) #testcase 4 if x is not None and x in proposal_pack and server_skip == server_id: MyLogging.debug('At slot %s: %s' % (str(x), str(proposal_pack[x]))) MyLogging.debug( 'proposer %s skips slot %s for server_skip %s' % (str(server_id), str(x), str(server_skip))) del proposal_pack[x] proposer.propose(proposal_pack) proposer.need_prepare = False elif msg['type'] == 'prepare': # save updated state first state = load_state(state_backup) state['view'] = max(view, msg['proposal_id']) save_state(state_backup, state) view = max(view, msg['proposal_id'] ) # try to catch up with the most recent view MyLogging.debug("change to max view %s" % (str(view))) acceptor.promise(msg) elif msg['type'] == 'propose': acceptor.accept(msg) elif msg['type'] == 'accept': slot_idx = msg['slot_idx'] learner.addVote(msg, slot_idx) if learner.checkQuorumSatisfied(slot_idx) is True: learner.decide(slot_idx) conn.close()
#print("recv " + str(mv)) conn.send("*1\r\n$4\r\npong\r\n") return MessageCallback.__call__(self, conn, mv) class ConnectHandler(OnConnectCallback): def __call__(self, conn): OnConnectCallback.__call__(self, conn) #print("on connect, send ping") #conn.send("*3\r\n$3\r\nset\r\n$3\r\nage\r\n$3\r\n711\r\n") class NewConnHandler(NewConnectionCallback): def __call__(self, conn): NewConnectionCallback.__call__(self, conn) conn.setMsgCallback(MsgHandler()) conn.setOnConnectCallback(ConnectHandler()) def print_time(a='default'): import time print("From print_time", time.time(), a) loop = IOLoop() loop._sched.enter(delay=0.4, count=10, action=print_time) acceptor = Acceptor(port=6379, loop=loop) acceptor.setNewConnCallback(NewConnHandler()) #connector = Connector(loop = loop) #connector.setNewConnCallback(NewConnHandler()) #connector.connect(port = 6379) print("start") loop.start()