def __init__(self, my_addr, partners_addrs): self.log = {} self.al = ReplDict() #adjacency list self.lock = threading.Lock() self.rwlock = RWLock() self.interfaces = dict() #for talking with other servers syncObj = SyncObj(my_addr, partners_addrs, consumers=[self.al])
def __init__(self, logger, host, port): cfg = SyncObjConf() cfg.fullDumpFile = 'raft.bin' cfg.logCompactionMinTime = 10 cfg.useFork = True self.serv = ThreadXMLRPCServer( (host, port), logRequests=True) for name in self._rpc_methods: self.serv.register_function(getattr(self, name)) self.logger = logger self.host = host self.port = port self.lock = RWLock() self.act_vol_serv = dict() self.writable_vid = ReplList() # 可写的vid self.vid = ReplCounter() self.fkey = ReplCounter() self.db = ReplDict() super(Master, self).__init__(config.addr, config.clusters, cfg, consumers=[self.vid, self.fkey, self.db, self.writable_vid])
def __init__(self, port, members, secret=""): JSBASE.__init__(self) self._members = members self.port = port self.dict1 = ReplDict() remotes = ["%s:%s" % item for item in self.members] cfg = SyncObjConf(autoTick=True) cfg.onReady = self.onReady if secret is not "" and secret is not None: print("SECRET") cfg.password = secret cfg.appendEntriesPeriod = 0.01 cfg.appendEntriesUseBatch = True cfg.raftMinTimeout = 0.4 cfg.raftMaxTimeout = 1.4 cfg.dynamicMembershipChange = True cfg.onStateChanged = None cfg.commandsWaitLeader = False cfg.connectionRetryTime = 5.0 # connect to other down nodes every so many secs cfg.connectionTimeout = 3.5 cfg.leaderFallbackTimeout = 10.0 cfg.journalFile = "/tmp/raft/raft_%s" % self.port cfg.leaderFallbackTimeout = True cfg.logCompactionMinEntries = 1000 cfg.logCompactionMinTime = 60 self._log_debug("port:%s" % self.port) self._log_debug("members:%s" % remotes) # self._log_debug("secret:%s"%secret) self.syncobj = SyncObj("localhost:%s" % port, remotes, consumers=[self.dict1], conf=cfg) # for i in range(100000000): # time.sleep(0.001) # # self.syncobj.doTick() # from IPython import embed;embed(colors='Linux') # s while self.syncobj.isReady() == False: time.sleep(1) print("wait sync") time.sleep(1) self.start()
def __init__(self, serverport, other_members): logger.info("Initializing SyncObj with serverport %s (others: %s)" % (serverport, other_members)) self.replicated_game_state = ReplDict() conf = SyncObjConf(dynamicMembershipChange=False) super(ShardSyncWatcher, self).__init__( serverport, other_members, conf=conf, consumers=[self.replicated_game_state]) self.__action_clock = 0
def __initDistributedDict(self): rr_raft = ReplDict() config = SyncObjConf(appendEntriesUseBatch=True) syncObj = SyncObj(self.__node, self.__other_nodes, consumers=[rr_raft], conf=config) if not self.__silent: print "Initializing Raft..." while not syncObj.isReady(): continue if not self.__silent: print "Raft initialized!" return rr_raft
import time from pysyncobj import SyncObj from pysyncobj.batteries import ReplCounter, ReplDict dict1 = ReplDict() #syncObj = SyncObj('localhost:4321', ['localhost:4322', 'localhost:4323'], consumers=[dict1]) syncObj = SyncObj('localhost:4321', [], consumers=[dict1]) print dir(syncObj) while True: if syncObj.isReady(): print dict1.get('key') time.sleep(1)
def test_ReplDict(): d = ReplDict() d.reset({ 1: 1, 2: 22, }, _doApply=True) assert d.rawData() == { 1: 1, 2: 22, } d.__setitem__(1, 10, _doApply=True) assert d.rawData() == { 1: 10, 2: 22, } d.set(1, 20, _doApply=True) assert d.rawData() == { 1: 20, 2: 22, } assert d.setdefault(1, 50, _doApply=True) == 20 assert d.setdefault(3, 50, _doApply=True) == 50 d.update({ 5: 5, 6: 7, }, _doApply=True) assert d.rawData() == { 1: 20, 2: 22, 3: 50, 5: 5, 6: 7, } assert d.pop(3, _doApply=True) == 50 assert d.pop(6, _doApply=True) == 7 assert d.pop(6, _doApply=True) == None assert d.pop(6, 0, _doApply=True) == 0 assert d.rawData() == { 1: 20, 2: 22, 5: 5, } assert d[1] == 20 assert d.get(2) == 22 assert d.get(22) == None assert d.get(22, 10) == 10 assert len(d) == 3 assert 2 in d assert 22 not in d assert sorted(d.keys()) == [1, 2, 5] assert sorted(d.values()) == [5, 20, 22] assert d.items() == d.rawData().items() d.clear(_doApply=True) assert len(d) == 0
def test_batteriesCommon(): d1 = ReplDict() l1 = ReplLockManager(autoUnlockTime=30.0) d2 = ReplDict() l2 = ReplLockManager(autoUnlockTime=30.0) a = [getNextAddr(), getNextAddr()] o1 = TestObj(a[0], [a[1]], TEST_TYPE.AUTO_TICK_1, consumers=[d1, l1]) o2 = TestObj(a[1], [a[0]], TEST_TYPE.AUTO_TICK_1, consumers=[d2, l2]) doAutoTicks(10.0, stopFunc=lambda: o1.isReady() and o2.isReady()) assert o1.isReady() and o2.isReady() d1.set('testKey', 'testValue', sync=True) doAutoTicks(3.0, stopFunc=lambda: d2.get('testKey') == 'testValue') assert d2['testKey'] == 'testValue' d2.pop('testKey', sync=True) doAutoTicks(3.0, stopFunc=lambda: d1.get('testKey') == None) assert d1.get('testKey') == None assert l1.tryAcquire('test.lock1', sync=True) == True assert l2.tryAcquire('test.lock1', sync=True) == False assert l2.isAcquired('test.lock1') == False l1.release('test.lock1', sync=True) assert l2.tryAcquire('test.lock1', sync=True) == True assert d1.setdefault('keyA', 'valueA', sync=True) == 'valueA' assert d2.setdefault('keyA', 'valueB', sync=True) == 'valueA' d2.pop('keyA', sync=True) assert d2.setdefault('keyA', 'valueB', sync=True) == 'valueB' o1.destroy() o2.destroy() l1.destroy() l2.destroy()
class RaftServer(JSBASE): def __init__(self, port, members, secret=""): JSBASE.__init__(self) self._members = members self.port = port self.dict1 = ReplDict() remotes = ["%s:%s" % item for item in self.members] cfg = SyncObjConf(autoTick=True) cfg.onReady = self.onReady cfg.password = secret cfg.appendEntriesPeriod = 0.01 # cfg.appendEntriesUseBatch = True cfg.raftMinTimeout = 0.4 cfg.raftMaxTimeout = 1.4 cfg.dynamicMembershipChange = True cfg.onStateChanged = None cfg.commandsWaitLeader = False cfg.connectionRetryTime = 5.0 #connect to other down nodes every so many secs cfg.connectionTimeout = 3.5 cfg.leaderFallbackTimeout = 10.0 # cfg.journalFile = "/tmp/raft_%s"%self.port # cfg.leaderFallbackTimeout = True cfg.logCompactionMinEntries = 1000 cfg.logCompactionMinTime = 60 self.logger.debug("port:%s" % self.port) self.logger.debug("members:%s" % remotes) # self.logger.debug("secret:%s"%secret) self.syncobj = SyncObj('localhost:%s' % port, remotes, consumers=[self.dict1], conf=cfg) # for i in range(100000000): # time.sleep(0.001) # # self.syncobj.doTick() # from IPython import embed;embed(colors='Linux') # s while self.syncobj.isReady() == False: time.sleep(1) print("wait sync") time.sleep(1) self.start() def onReady(self): self.logger.debug("READY") print(self.dict1.items()) # self.start() @property def members(self): res = [] for item in self._members.split(","): addr, port = item.split(":") addr = addr.strip() port = int(port) res.append((addr, port)) return res def start(self): c = self.dict1.get('test:%s' % self.port) if c == None: self.logger.debug("initial value for:%s" % self.port) self.dict1.set('test:%s' % self.port, 0, sync=True) self.logger.debug("initial value DONE:%s" % self.port) speed = 0.1 previnsert = 0.0 prevprint = 0.0 for i in range(100000000): time.sleep(0.001) last = time.time() if last > previnsert + 0.1: # print("insert") c = self.dict1.get('test:%s' % self.port) c = c + 1 res = False while res == False: try: self.dict1.set('test:%s' % self.port, c, sync=True, timeout=5) res = True except Exception as e: print("error in set:%s" % e.errorCode) print(e) time.sleep(0.1) previnsert = last if last > prevprint + 1.0: print(self.dict1.items()) prevprint = last
class Master(SyncObj): _rpc_methods = ['assign_volumn', 'assign_fid', 'find_volumn', 'find_writable_volumn', 'volumn_status', 'node_status'] def __init__(self, logger, host, port): cfg = SyncObjConf() cfg.fullDumpFile = 'raft.bin' cfg.logCompactionMinTime = 10 cfg.useFork = True self.serv = ThreadXMLRPCServer( (host, port), logRequests=True) for name in self._rpc_methods: self.serv.register_function(getattr(self, name)) self.logger = logger self.host = host self.port = port self.lock = RWLock() self.act_vol_serv = dict() self.writable_vid = ReplList() # 可写的vid self.vid = ReplCounter() self.fkey = ReplCounter() self.db = ReplDict() super(Master, self).__init__(config.addr, config.clusters, cfg, consumers=[self.vid, self.fkey, self.db, self.writable_vid]) def update_master(self, masters): pass def _recover(self, vid, dead_vid, from_vid, to_vid): from_proxy = ServerProxy(self.act_vol_serv[from_vid]) to_addr = self.act_vol_serv[to_vid] self.logger.info('Begin to migrate volumn %d from %s to %s...!' % (vid, from_vid, to_vid)) from_proxy.migrate_volumn_to(vid, to_addr) self.logger.info('Migrate volumn %d from %s to %s succeed!' % (vid, from_vid, to_vid)) vids = self.db[vid] vids.remove(dead_vid) vids.append(to_vid) self.db.set(vid, vids, sync=True) self.update_writable_volumn() self.logger.info('Remove %s, append %s' % (dead_vid, to_vid)) def _check(self, dead_vid): self.logger.info('Monitor dead volumn server %s ...' % dead_vid) t = 60 while t > 0: time.sleep(1) if dead_vid in self.act_vol_serv.keys(): self.logger.info('Volumn %s becomes live. Stop recover' % dead_vid) _thread.exit() t -= 1 for vid, vvids in self.db.items(): if dead_vid in vvids: for recov_vid in vvids: if recov_vid != dead_vid and recov_vid in self.act_vol_serv.keys(): from_vid = recov_vid avl_vids = list(set(self.act_vol_serv.keys()) - set(vvids)) if avl_vids: to_vid = random.choice(avl_vids) _thread.start_new_thread(self._recover, (vid, dead_vid, from_vid, to_vid)) else: self.logger.warn('No available volumns to migrate') break def update_writable_volumn(self, checkLeader=True): if checkLeader and not self._isLeader(): return writable_vid = list() for vid, vvids in self.db.items(): flag = True for vvid in vvids: if vvid not in self.act_vol_serv.keys(): flag = False break if flag: writable_vid.append(vid) self.writable_vid.reset(writable_vid, sync=True) # 检查volumn下线的情况,搬运 def update_volumn(self, volumns): if self._isLeader(): old_volumns = set(self.act_vol_serv.keys()) new_volumns = set([volumn[0] for volumn in volumns]) off_volumns = list(old_volumns - new_volumns) if off_volumns: self.logger.info('{} volumns become offline'.format(off_volumns)) for off_volumn in off_volumns: _thread.start_new_thread(self._check, (off_volumn,)) self.act_vol_serv.clear() for volumn in volumns: self.act_vol_serv[volumn[0]] = volumn[1] while not self._isReady(): time.sleep(1) self.update_writable_volumn() def assign_volumn(self, size): vid = self.vid.inc(sync=True) vids = random.sample(self.act_vol_serv.keys(), 2) for vvid in vids: s = ServerProxy(self.act_vol_serv[vvid]) s.assign_volumn(vid, size) self.db.set(vid, vids, sync=True) self.update_writable_volumn(False) return vid def assign_fid(self): if not self.writable_vid: return '' vid = random.choice(list(self.writable_vid)) fkey = self.fkey.inc(sync=True) fid = '%d,%d' % (vid, fkey) return fid def find_volumn(self, vid): vids = self.db[vid] addrs = [] for vid in vids: if vid in self.act_vol_serv: addrs.append(self.act_vol_serv[vid]) return addrs def find_writable_volumn(self, vid): if vid in self.writable_vid: return self.find_volumn(vid) else: return [] def volumn_status(self): res = dict() vol_status = dict() for vol_serv_id, vol_serv in self.act_vol_serv.items(): try: s = ServerProxy(vol_serv) vv = s.status() vol_status[vol_serv_id] = vv except: pass for vid, vvids in self.db.items(): sdoc = dict() ava_nodes = list(set(vol_status.keys()) & set(vvids)) sdoc['tat_node_num'] = len(vvids) sdoc['ava_node_num'] = len(ava_nodes) if ava_nodes: vol_sdoc = vol_status[ava_nodes[0]] vdoc = vol_sdoc['vdb'][str(vid)] sdoc['total_size'] = vdoc['size'] sdoc['used_size'] = vdoc['counter'] sdoc['free_size'] = sdoc['total_size'] - sdoc['used_size'] else: sdoc['total_size'] = 0 sdoc['used_size'] = 0 sdoc['free_size'] = 0 res[str(vid)] = sdoc return res def node_status(self): res = dict() for vol_serv_id, vol_serv in self.act_vol_serv.items(): try: s = ServerProxy(vol_serv) vv = s.status() vol_status = dict() vol_status['addr'] = vol_serv vol_status['total'] = vv['total'] vol_status['used'] = vv['used'] vol_status['free'] = vv['free'] vol_status['nodes'] = list(vv['vdb'].keys()) for node in vol_status['nodes']: vids = self.db.get(int(node), []) if vol_serv_id not in vids: vol_status['nodes'].remove(node) res[vol_serv_id] = vol_status except: self.logger.exception('Got an exception') return res def start(self): self.logger.info('Start serving at %s:%d' % (self.host, self.port)) self.serv.serve_forever()
import time from pysyncobj import SyncObj from pysyncobj.batteries import ReplCounter, ReplDict dict1 = ReplDict() syncObj = SyncObj('localhost:4322', ['localhost:4321', 'localhost:4323'], consumers=[dict1]) while True: dict1.set('key', time.time(), sync=True) print dict1['key'] time.sleep(1)
from pywebio import start_server from pywebio.input import * from pywebio.output import * from pywebio.session import * from pywebio import session from raft_server import join_cluster, get_node_info # 最大消息记录保存 MAX_MESSAGES_CNT = 10**4 # 管理员账户名 ADMIN_USER = '******' chat_msgs = ReplList() # 聊天记录 (name, msg) node_user_cnt = ReplDict() # 每个节点的用户数 node_webui_addr = ReplDict() # 每个节点Web聊天室的地址 local_online_users = set() # 本节点在线用户 raft_server = None def onStateChanged(oldState, newState, node): """节点角色发生变化时的回调函数""" states = ["folower", "candidate", "leader"] send_msg(ADMIN_USER, '节点`%s`角色发生变化, `%s` -> `%s`' % (node, states[oldState], states[newState]), instant_output=False)
class GraphHandler(SyncObj): def __init__(self, my_addr, partners_addrs): self.log = {} self.al = ReplDict() #adjacency list self.lock = threading.Lock() self.rwlock = RWLock() self.interfaces = dict() #for talking with other servers syncObj = SyncObj(my_addr, partners_addrs, consumers=[self.al]) # if os.path.isfile("graph.pickle"): # with open('graph.pickle', 'rb') as f: # self.al = pickle.load(f) # print("loaded data from graph.pickle") # else: # self.al = dict() # with open('graph.pickle', 'wb') as f: # pickle.dump(self.al,f) # print("created graph.pickle") def init_interface(self, port): transport = TSocket.TSocket('localhost', port) # Buffering is critical. Raw sockets are very slow transport = TTransport.TBufferedTransport(transport) # Wrap in a protocol protocol = TBinaryProtocol.TBinaryProtocol(transport) # Create a client to use the protocol encoder client = Graph.Client(protocol) # Connect! transport.open() self.interfaces[port] = client def hash(self, vertex): h = hashlib.md5(str(vertex).encode('utf-8')).hexdigest() return int(h, 16) % cluster_qty def check_remote(self, vertex): cluster = self.hash(vertex) server = server_index if cluster != my_cluster: for i in range(total_replicas): server = self.check_replica(i, cluster) if server != -1: return server if server == -1: print("error requesting cluster{}".format(cluster)) x = NotFound() x.dsc = "falha no servidor" raise x return server def check_replica(self, replica, cluster): server = get_server_port(cluster, replica) try: if server not in self.interfaces: self.init_interface(server) self.interfaces[server].ping() except Exception as e: print("server{} from cluster{} is unreachable".format( server % 1000, cluster)) server = -1 return server def ping(self): print('ping()') def add_upd_vertex(self, nome, cor, desc, peso): server = self.check_remote(nome) if server != server_index: print("add/update vertex {}: requesting server{}".format( nome, server)) res = self.interfaces[server].add_upd_vertex(nome, cor, desc, peso) print(res) return res print("add/update vertex {}".format(nome)) self.rwlock.acquire_write() if nome not in self.al: self.al.set(nome, Vertex(nome, cor, desc, peso), sync=True) res = "vertice {} criado".format(nome) else: self.al.set(nome, Vertex(nome, cor, desc, peso), sync=True) res = "vertice {} alterado".format(nome) # with open('graph.pickle', 'wb') as f: # pickle.dump(self.al,f) self.rwlock.release() print(res) return res def add_upd_edge(self, v1, v2, peso, bi_flag): try: self.get_vertex(v1) self.get_vertex(v2) except Exception as e: print(str(e)) raise e res = self.add_upd_edge2(v1, v2, peso, bi_flag) self.add_edge_in(v1, v2, peso, bi_flag) if bi_flag: self.add_upd_edge2(v2, v1, peso, bi_flag) self.add_edge_in(v2, v1, peso, bi_flag) return res def add_upd_edge2(self, v1, v2, peso, bi_flag): server = self.check_remote(v1) if server != server_index: print("add/update edge {},{}: requesting server{}".format( v1, v2, server)) res = self.interfaces[server].add_upd_edge2(v1, v2, peso, bi_flag) print(res) return res print("add/update edge {},{}".format(v1, v2)) self.rwlock.acquire_write() ver1 = self.al[v1] if v2 not in ver1.edges_out: ver1.edges_out[v2] = Edge(v1, v2, peso, bi_flag) self.al.set(v1, ver1, sync=True) self.rwlock.release() res = "aresta {},{} criada".format(v1, v2) else: ver1.edges_out[v2].set_att(v1, v2, peso, bi_flag) self.al.set(v1, ver1, sync=True) self.rwlock.release() res = "aresta {},{} alterada".format(v1, v2) # if bi_flag: # print(ver1.edges_out[v2]) # self.rwlock.acquire_write() # ver1.edges_in[v2] = ver1.edges_out[v2] # self.rwlock.release() # self.add_edge_bi(v1, v2, peso, bi_flag) # with open('graph.pickle', 'wb') as f: # pickle.dump(self.al,f) print(res) return res def add_edge_in(self, v1, v2, peso, bi_flag): server = self.check_remote(v2) if server != server_index: print("edge_in {},{}: requesting server{}".format(v1, v2, server)) res = self.interfaces[server].add_edge_in(v1, v2, peso, bi_flag) return res print("edge_in {},{}".format(v1, v2)) self.rwlock.acquire_write() ver2 = self.al[v2] ver2.edges_in[v1] = Edge(v1, v2, peso, bi_flag) self.al.set(v2, ver2, sync=True) self.rwlock.release() return "ok" def get_vertex(self, v): server = self.check_remote(v) if server != server_index: print("get vertex {}: requesting server{}".format(v, server)) res = self.interfaces[server].get_vertex(v) print(res) return res print("get vertex {}".format(v)) self.rwlock.acquire_read() #time.sleep(5) if v not in self.al: x = NotFound() x.dsc = "vertice não encontrado" self.rwlock.release() print(x.dsc) raise x res = str(self.al[v]) self.rwlock.release() print(res) return res def get_edge(self, v1, v2): server = self.check_remote(v1) if server != server_index: print("get edge {},{}: requesting server{}".format(v1, v2, server)) res = self.interfaces[server].get_edge(v1, v2) print(res) return res print("get edge {},{}".format(v1, v2)) self.rwlock.acquire_read() if v1 not in self.al or v2 not in self.al[v1].edges_out: x = NotFound() x.dsc = "aresta não encontrada" self.rwlock.release() print(str(x)) raise x res = str(self.al[v1].edges_out[v2]) self.rwlock.release() print(res) return res def del_vertex(self, v): try: edges_out = eval(self.list_neighbors(v)) edges_in = eval(self.list_neighbors_in(v)) except Exception as e: print(str(e)) raise e for v2 in edges_out: try: self.del_edge2(v, v2) except: pass try: self.del_edge_in(v, v2) except: pass for v2 in edges_in: try: self.del_edge2(v2, v) except: pass try: self.del_edge_in(v2, v) except: pass res = self.del_vertex2(v) return res def del_vertex2(self, v): server = self.check_remote(v) if server != server_index: print("delete vertex {}: requesting server{}".format(v, server)) res = self.interfaces[server].del_vertex(v) print(res) return res print("delete vertex {}".format(v)) self.rwlock.acquire_read() if v not in self.al: x = NotFound() x.dsc = "vertice não encontrado" self.rwlock.release() raise x self.rwlock.release() self.rwlock.acquire_write() self.al.pop(v, sync=True) # with open('graph.pickle', 'wb') as f: # pickle.dump(self.al,f) self.rwlock.release() res = "vertice {} deletado".format(v) print(res) return res def del_edge(self, v1, v2): try: bi_flag = eval(self.get_edge(v1, v2) + "[-1]") res = self.del_edge2(v1, v2) except Exception as e: print(str(e)) raise e try: self.del_edge_in(v1, v2) if bi_flag: print("delete edge {},{} (bidirectional)".format(v2, v1)) self.del_edge2(v2, v1) self.del_edge_in(v2, v1) except: pass return res def del_edge2(self, v1, v2): server = self.check_remote(v1) if server != server_index: print("delete edge {},{}: requesting server{}".format( v1, v2, server)) res = self.interfaces[server].del_edge2(v1, v2) print(res) return res print("delete edge {},{}".format(v1, v2)) self.rwlock.acquire_read() if v1 not in self.al or v2 not in self.al[v1].edges_out: x = NotFound() x.dsc = "aresta não encontrada" self.rwlock.release() raise x self.rwlock.release() self.rwlock.acquire_write() ver1 = self.al[v1] ver1.edges_out.pop(v2, None) self.al.set(v1, ver1, sync=True) # with open('graph.pickle', 'wb') as f: # pickle.dump(self.al,f) self.rwlock.release() res = "aresta {},{} deletada".format(v1, v2) print(res) return res def del_edge_in(self, v1, v2): server = self.check_remote(v2) if server != server_index: print("del edge_in {},{}: requesting server{}".format( v1, v2, server)) res = self.interfaces[server].del_edge_in(v1, v2) return res print("edge_in {},{}".format(v1, v2)) self.rwlock.acquire_write() ver2 = self.al[v2] ver2.edges_in.pop(v1, None) self.al.set(v2, ver2, sync=True) self.rwlock.release() return "ok" def list_edges(self, v): server = self.check_remote(v) if server != server_index: print("list edges from vertex {}: requesting server{}".format( v, server)) res = self.interfaces[server].list_edges(v) print(res) return res print("list edges from vertex {}".format(v)) self.rwlock.acquire_read() if v not in self.al: x = NotFound() x.dsc = "vertice não encontrado" self.rwlock.release() raise x res = str([self.al[v].edges_out[x] for x in self.al[v].edges_out]) self.rwlock.release() print(res) return res def list_vertices(self, v1, v2): server = self.check_remote(v1) if server != server_index: print("list vertices of edge {},{}: requesting server{}".format( v1, v2, server)) self.interfaces[server].get_edge(v1, v2) res = [self.interfaces[server].get_vertex(v1)] else: print("list vertices of edge {},{}".format(v1, v2)) self.rwlock.acquire_read() self.get_edge(v1, v2) res = [self.get_vertex(v1)] self.rwlock.release() server = self.check_remote(v2) if server != server_index: res.append(self.interfaces[server].get_vertex(v2)) else: self.rwlock.acquire_read() res.append(self.get_vertex(v2)) self.rwlock.release() print(res) return str(res) def list_neighbors(self, v): server = self.check_remote(v) if server != server_index: print("list neighbors of vertex {}: requesting server{}".format( v, server)) res = self.interfaces[server].list_neighbors(v) print(res) return res print("list neighbors of vertex {}".format(v)) self.rwlock.acquire_read() if v not in self.al: x = NotFound() x.dsc = "vertice não encontrado" self.rwlock.release() raise x res = str([x for x in self.al[v].edges_out]) self.rwlock.release() print(res) return res def list_neighbors_in(self, v): server = self.check_remote(v) if server != server_index: print("list neighbors_in of vertex {}: requesting server{}".format( v, server)) res = self.interfaces[server].list_neighbors_in(v) print(res) return res print("list neighbors_in of vertex {}".format(v)) self.rwlock.acquire_read() if v not in self.al: x = NotFound() x.dsc = "vertice não encontrado" self.rwlock.release() raise x res = str([x for x in self.al[v].edges_in]) self.rwlock.release() print(res) return res #dijkstra def shortest_path(self, v1, v2): Q = [v1] dist = dict() prev = dict() visited = dict() dist[v1] = 0 while Q != []: u = Q.pop(0) visited[u] = 1 neighbors = eval(self.list_neighbors(u)) for n in neighbors: edge = eval(self.get_edge(u, n)) length = edge[2] alt = dist[u] + length if n not in dist or alt < dist[n]: dist[n] = alt prev[n] = u if n not in visited: Q.append(n) Q.sort() if (v2 not in dist): res = "nao existe caminho entre {} e {}".format(v1, v2) else: u = v2 path = [] while u != v1: path.append(u) u = prev[u] path.append(v1) res = str(path) print(res) return res