def devices(self): try: connection = self.master.db_connect() with connection.cursor() as cursor: sql = f'''SELECT nd.roll,nd.country,nd.area,nd.uid,nd.ip,nd.hostname,nd.platform,ds.x,ds.y FROM network_devices as nd inner join\ diagram_state_net_devices as ds ON nd.uid=ds.net_device_uid where ds.diagram_state_uid={self.uid}''' cursor.execute(sql) data = cursor.fetchall() attrs = {row['ip']: row for row in data} platforms = {row['ip']: row['platform'] for row in data} devices = Devices(ip_list=attrs.keys(), platfforms=platforms, check_up=False, master=self.master) devices.set_attrs(data=attrs) devices.set_uid = True self.devices_uid = {device.uid: device for device in devices} connection.close() self.devs = devices return devices except Exception as e: self.db_log.warning( f'devices unable to load {self.diagram.name} {sql} {e}') connection.close() return [] return []
def te_json(): request.get_json() data = request.json['data'] nodes = {edge['from_id'] for edge in data} | {edge['to_id'] for edge in data} G = nx.Graph() G.add_nodes_from(nodes) for edge in data: G.add_edge(edge['to_id'], edge['from_id']) G.edges[edge['to_id'], edge['from_id']]['attr'] = { str(edge['to_id']) + 'ip': edge['to_ip'], str(edge['from_id']) + 'ip': edge['from_ip'] } final = [node for node in G.nodes if len(G.edges(node)) == 1] shortest = nx.shortest_path(G, source=final[0], target=final[1]) dev1 = list(Devices.load_uids(master=master, uids=[final[0]]))[0] dev2 = list(Devices.load_uids(master=master, uids=[final[1]]))[0] ip_go = [] ip_return = [] for index in range(len(shortest) - 1): ip_return.append(G.edges[shortest[index], shortest[index + 1]]['attr'][f'{shortest[index]}ip']) ip_go.append(G.edges[shortest[index], shortest[index + 1]]['attr'][f'{shortest[index + 1]}ip']) ip_return = list(reversed(ip_return)) path1 = f"explicit-path name {dev1.hostname}_{dev2.hostname} <br>" path2 = f"explicit-path name {dev2.hostname}_{dev1.hostname} <br>" print(dev1.platform) print(dev2.platform) for index, hop in enumerate(ip_go): if dev1.platform == "CiscoXR": path1 += f'index {(index + 1) * 5} ' path1 += f"next-address {hop} <br>" for index, hop in enumerate(ip_return): if dev2.platform == "CiscoXR": path2 += f'index {(index + 1) * 5} ' path2 += f"next-address {hop} <br>" path1 = dev1.new_ip_explicit_path(to_device=dev2, ip_hops=ip_go).replace("\n", "<br>") path2 = dev2.new_ip_explicit_path(to_device=dev1, ip_hops=ip_return).replace("\n", "<br>") path1 = f' <h3>{dev1.hostname} {dev1.ip}</h3> <br> {path1}' path2 = f' <h3>{dev2.hostname} {dev2.ip}</h3> <br> {path2}' return jsonify({ "a-b": ip_go, 'b-a': ip_return, "a": final[0], "b": final[1], "str_a": path1, "str_b": path2 })
def set_ospf_data(self, ip_seed_router, process_id, area): seed_router = Devices.factory_device(master=self.isp.master, ip=ip_seed_router) self.p2p_data, routers = seed_router.ospf_area_adjacency_p2p(process_id=process_id, area=area) self.devices = Devices(master=self.isp.master, ip_list=routers, check_up=False) self.devices.execute_processes(methods=['set_snmp_location_attr']) self.devices.set_uids() # self.devices.execute_processes(methods=['set_interfaces']) self.verbose.warning(f"_INIT_real_time p2p :{len(self.p2p_data)} rourters {len(routers)}") self.real_routers = routers try: old_devices = self.diagram.state.devices() self.devices.copy_attr(other_devices=old_devices, attrs=["x", "y"]) except AttributeError as e: self.dev.warning('ospf_database no initial state')
# 'network_name': '_ospf_ufinet_regional' # } # guatemala = { # "ip_seed_router": "172.17.22.52", # "process_id": '502', "area": '502008', 'network_name': 'RCE_GUATEMALA' # } # dbd = ospf_database(isp=isp, source='real_time', **honduras) # # hn_ips = list(dbd.devices.devices.keys()) # with open("hn_ips", "w") as f: # f.write('\n'.join(hn_ips)) with open("hn_ips", "r") as f: ips = [line.replace("\n", "") for line in f] devs = Devices(ip_list=ips, master=isp.master, check_up=True) methods = {'set_ip_ospf_interfaces', 'set_interfaces_snmp'} kwargs = {dev.ip: {'set_ospf_interfaces': {'as_ospf': '504'}} for dev in devs} devs.execute(methods=methods) data_devices = {} suffix = ' L3:MPLOSP D:B L1:DP ' for dev in devs: data_devices[dev.ip] = [] for index, ospf_data in dev.ospf_interfaces.items(): data_config = f' interface {index} \n description {suffix} {dev.interfaces[index].description}' data_devices[dev.ip].append() pickle.dump(data_devices, open("hn.p", "wb")) # pickle.dump(def_dict,open( "gt.p", "wb" ))
class ospf_database: edge_roundness = {1: .00, 2: .065, 3: .065, 4: .14, 5: .14, 6: .21, 7: .21, 8: .28, 9: .28, 10: .35, 11: .35, 12: .40, 13: .40} def __init__(self, ip_seed_router, isp, process_id='1', area='0', interface_method="interfaces_from_db_today", network_name='ospf_regional', source="real_time", period_start="", period_end="", sort_field="output_rate"): self.neighbors_occurrences_count = defaultdict(int) self.source = source self.area = area self.isp = isp self.period_start = period_start self.period_end = period_end self.diagram = Diagram(master=isp.master, name=network_name) self.diagram.get_newer_state() self.lock = Lock() if source == 'real_time': self.set_ospf_data(ip_seed_router, process_id, area) elif source == "db": self.set_db_data() elif source == 'period': self.diagram.get_state_between(period_end=period_end) self.set_db_data() if not source == 'delay_start': self.set_interfaces_data() self.set_p2p() self.ospf_down_interfaces() self.verbose.warning("OSPF DATABASE INIT FINISH") # if len(self.adjacencies) < len(p2ps): # difference = set(p2ps.keys()).difference(set(self.adjacencies.keys())) # self.verbose.warning(f"__INIT__Fishish missing nets {difference} ") def delay_start(self, source='period'): self.source = source if source == 'period': self.verbose.warning(f'delay_start period START') self.diagram.get_state_between(period_end=self.period_end) self.set_db_data() self.set_interfaces_data() self.set_p2p() self.ospf_down_interfaces() self.verbose.warning("OSPF DATABASE INIT FINISH") def set_ospf_data(self, ip_seed_router, process_id, area): seed_router = Devices.factory_device(master=self.isp.master, ip=ip_seed_router) self.p2p_data, routers = seed_router.ospf_area_adjacency_p2p(process_id=process_id, area=area) self.devices = Devices(master=self.isp.master, ip_list=routers, check_up=False) self.devices.execute_processes(methods=['set_snmp_location_attr']) self.devices.set_uids() # self.devices.execute_processes(methods=['set_interfaces']) self.verbose.warning(f"_INIT_real_time p2p :{len(self.p2p_data)} rourters {len(routers)}") self.real_routers = routers try: old_devices = self.diagram.state.devices() self.devices.copy_attr(other_devices=old_devices, attrs=["x", "y"]) except AttributeError as e: self.dev.warning('ospf_database no initial state') def set_p2p(self): threads = [] result_p2p = [] self.verbose.warning(f"__INIT__Fishish diagram.state.p2p {len(self.p2p_data)} ") for network, neighbors in self.p2p_data.items(): kwargs = {'lock': self.lock, "list_networks": result_p2p, 'network_id': network, 'ospf_database': self, 'neighbors': neighbors, 'network_type': 'p2p'} t = Thread(target=ospf_adjacency.add_ospf_adjacency, name=network, kwargs=kwargs) t.start() threads.append(t) for index, t in enumerate(threads): self.verbose.debug(f"tried p2p {index} {t.name} ") t.join() self.verbose.debug(f"joined p2p {index} {t.name} ") self.verbose.debug(f"__INIT__Fishish join add_ospf_adjacency p2p {len(result_p2p)} ") self.p2p = {p2p.network_id: p2p for p2p in result_p2p} self.verbose.warning(f"__INIT__Fishish join add_ospf_adjacency real p2p {len(self.p2p)} ") def set_interfaces_data(self): if self.source == 'period': self.devices.set_interfaces_db_period(period_start=self.period_start, period_end=self.period_end) else: self.devices.set_interfaces_db() def set_db_data(self): self.devices = self.diagram.state.devices() self.p2p_data = self.diagram.state.p2p() self.verbose.warning(f"_INIT_ db p2p:{len(self.p2p_data)} dev:{len(self.devices)}") def get_vs_period(self, period_start, period_end): self.devices.set_interfaces_db_period(period_start=period_start, period_end=period_end) self.set_p2p() return self.get_vs() @property def adjacencies(self): return ChainMap(self.p2p, self.p2p_down_links) @property def routers(self): return self.devices.devices def ospf_down_interfaces(self): if self.source == 'real_time': p2p = {} down_ospf_interfaces = [interface for device in self.devices for interface in device.interfaces.values() if (interface.protocol_state == 'down' or interface.link_state == 'down') and interface.l3_protocol == "MPLS" and interface.l3_protocol_attr == "OSP"] for interface in down_ospf_interfaces: net_id = str(ipaddress.IPv4Network(str(interface.ip) + "/30", strict=False).network_address) if interface.parent_device.ip in []: self.verbose.warning(f' nt:{net_id} n:{interface.parent_device.ip} i:{interface.ip}') try: if net_id != "127.0.0.0": if net_id in p2p: if len(p2p[net_id]) == 1: data = {'router_id': interface.parent_device.ip, 'neighbor_ip': p2p[net_id][0]['router_id'], 'interface_ip': str(interface.ip), 'area': self.area, 'metric': "100000", 'network': net_id } p2p[net_id].append(data) p2p[net_id][0]['neighbor_ip'] = interface.parent_device.ip else: data = {'router_id': interface.parent_device.ip, 'interface_ip': str(interface.ip), 'area': self.area, 'metric': "100000", 'network': net_id } p2p[net_id] = [data] self.verbose.debug( f'set_down_mpls_interfaces p2p down {net_id} {interface.parent_device.ip} {len(p2p)}') except Exception as e: self.verbose.warning(f'set_down_mpls_interfaces {interface} error {e}') p2p_down_adj = [] threads = [] for network, neighbors in p2p.items(): if len(neighbors) == 2: kwargs = {'lock': self.lock, "list_networks": p2p_down_adj, 'network_id': network, 'ospf_database': self, 'neighbors': neighbors, 'network_type': 'p2p_down', 'state': 'down'} t = Thread(target=ospf_adjacency.add_ospf_adjacency, kwargs=kwargs) t.start() threads.append(t) for t in threads: t.join() self.p2p_down_links = {p2p.network_id: p2p for p2p in p2p_down_adj} self.verbose.warning(f'OSPF LINKS DOWN {len(self.p2p_down_links)}') else: self.p2p_down_links = {} def get_yed_file(self, filename): for ip, router in self.routers.items(): self.graph.nodes[router.ip] = router.get_yed_node() for network, p2p in self.p2p.items(): self.graph.edges[network] = p2p.yed_edge() with open(filename + ".graphml", "w") as file: file.write(self.graph.get_graph()) def get_vs(self): edges = [edge.get_vs() for edge in self.p2p.values()] edges_down = [edge.get_vs() for edge in self.p2p_down_links.values()] topology = {"nodes": [ router.get_vs() for router in self.routers.values()], "edges": edges + edges_down, 'options': self.get_vs_options()} return topology def get_vs_options(self): physics = {'enabled': False, 'solver': 'barnesHut', 'barnesHut': { 'gravitationalConstant': -2000, 'centralGravity': 0.3, 'springLength': 195, 'springConstant': 0.14, 'damping': 0.09, 'avoidOverlap': 1 }} edges = {"smooth": {"type": "discrete", "forceDirection": "vertical"}} layout = {'randomSeed': 4444} return {'physics': physics, 'edges': edges, 'layout': layout} @staticmethod def get_vs_from_shelve(shelve_name): with shelve.open(shelve_name) as sh: try: dict_ospf = sh["db"] return dict_ospf except Exception as e: print("no able lo load shelve") return {"label": "no_data_in_date"} @staticmethod def save_xy(master, data): ids = data.keys() pass def save_state(self): self.diagram.save_state(devices=self.devices, adjacencies=self.adjacencies)
# # interfaces = {index: {'index': index, 'description': interface.description, 'ip': interface.ip, 'mask': interface.mask} # for # index, interface in cisco.interfaces.items() if "BDI" in index} # # with open('co_sc', 'wb') as co: # pickle.dump({'pw': pseudo, 'interface': interfaces},file=co) # print('guardo') with open('co_sc', 'rb') as co: data = pickle.load(co) interfaces = data['interface'] pseudo = data['pw'] ips = {pw['destination_ip'] for pw in pseudo.values()} pprint(ips) devs = Devices(master=master, ip_list=ips) methods = ['set_pseudowires'] full_command = "" devs.execute(methods=methods) # table=[] # full_command = "" # command_failed = "" # for interface in interfaces.values(): # if "BD" in interface['index']: # try: # vlan_id = interface['index'].replace("BDI", "") # new_index = "BVI" + vlan_id # pw = pseudo[vlan_id] # command = f''' interface {new_index} # description L3:CONCUS D:D L1:WR {interface['description'].strip()} pw:{pw['destination_ip']}:{pw['vc_id']}
def load_device_uid(self, uid): return Devices.load_uid(master=self.master, uid=uid)
def load_devices_uid(self, uids): return Devices.load_uids(master=self.master, uids=uids)
from model.InternetServiceProvider import InternetServiceProvider as ISP from config.Master import Master from model.Devices import Devices from pprint import pprint import argparse isp = ISP() isp.master = Master() parser = argparse.ArgumentParser(description='Interfaces report saver') parser.add_argument('host', type=str, help='host_ip') args = parser.parse_args() ip = args.host device = list(iter(Devices(master=isp.master, ip_list=[ip])))[0] device.save_bgp_neighbors_states(special_community='INTERNET_UFINET')
connection = master.db_connect() with connection.cursor() as cursor: cursor.execute(sql) data = cursor.fetchall() uids = {row['net_device_uid'] for row in data} window = 15 for diagram in diagrams: not_polled = set(diagram.state.devices_uid.keys()).difference(uids) if not_polled: ips = [diagram.state.devices_uid[k].ip for k in not_polled] ips_uid = {diagram.state.devices_uid[k].ip: k for k in not_polled} top_ten_ips = ips[0:window - 1] if len(ips) > window else ips ips_uid = {ip: ips_uid[ip] for ip in top_ten_ips} break # # devs = Devices(master=Master(), ip_list=top_ten_ips, check_up=False) for dev in devs: dev.uid = ips_uid[dev.ip] devs.add_snmp_event(event='interfaces', type='asyc') devs.execute(methods=["set_snmp_community"], thread_window=50) devs.execute_processes(methods=["set_interfaces_snmp"], thread_window=50) # # # asyncio.get_event_loop().run_until_complete(devs.interfaces_async()) # # devs.save_interfaces()
from model.Devices import Devices from config.Master import Master from model.Diagram import Diagram import asyncio import datetime top_ten_ips = ['172.16.30.247'] # devs = Devices(master=Master(), ip_list=top_ten_ips, check_up=False) devs.devices['172.16.30.247'].uid_db() devs.add_snmp_event(event='interfaces', type='asyc') devs.devices['172.16.30.247'].community = 'pnrw-all' devs.execute(methods=['set_interfaces_snmp']) # # # asyncio.get_event_loop().run_until_complete(devs.set_interfaces_snmp()) print(devs.devices['172.16.30.247'].interfaces['BVI2150'].util_in) # #
def save_interfaces_state_db(self, template='internet_ufinet.yml'): template_data = yaml.load(open(template).read()) methods = ['set_interfaces'] for device_group, data in template_data.items(): if 'network' in data: devices = Devices(self.master, network=data['network']) else: devices = Devices(self.master, ip_list=data['devices']) devices.remove_duplicates_db() devices.execute(methods=methods, thread_window=10) for filter_ in data['filters']: devices.execute(methods=["save_interfaces_states"], kwargs={ ip: { "save_interfaces_states": { 'filters': filter_ } } for ip in devices.keys() }, thread_window=25) devices.execute(methods=["correct_ip_interfaces"], thread_window=55)
sql = f'''SELECT net_device_uid from poll_events where timestamp>'{date_last_ten_minutes}' ''' connection = master.db_connect() with connection.cursor() as cursor: cursor.execute(sql) data = cursor.fetchall() uids = {row['net_device_uid'] for row in data} window = 15 for diagram in diagrams: not_polled = set(diagram.state.devices_uid.keys()).difference(uids) if not_polled: ips = [diagram.state.devices_uid[k].ip for k in not_polled] ips_uid = {diagram.state.devices_uid[k].ip: k for k in not_polled} top_ten_ips = ips[0:window - 1] if len(ips) > window else ips ips_uid = {ip: ips_uid[ip] for ip in top_ten_ips} break # # devs = Devices(master=Master(), ip_list=top_ten_ips, check_up=False) for dev in devs: dev.uid = ips_uid[dev.ip] devs.add_snmp_event(event='interfaces', type='asyc') devs.execute(methods=["set_snmp_community"], thread_window=50) # # asyncio.get_event_loop().run_until_complete(devs.interfaces_async()) # # devs.save_interfaces()