def test_get_opposite_dpid(self): topo = Topology() topo.register_link(1, 24, 2, 19) self.assertEqual(topo.get_opposite_dpid(1, 19), 2) self.assertEqual(topo.get_opposite_dpid(2, 24), 1)
def test(self, router): def _mk_router(type_): m = create_mock(["interface"]) m.interface = create_mock(["link_type"]) m.interface.link_type = type_ routers[type_].append(m) return m routers = defaultdict(list) router_dict = { "br-parent": "PARENT", "br-child": "CHILD", "br-peer": "PEER", "br-core0": "CORE", "br-core1": "CORE", } inst = Topology() router.side_effect = lambda v, k: _mk_router(v) # Call inst._parse_router_dicts({"BorderRouters": router_dict}) # Tests ntools.assert_count_equal(inst.parent_border_routers, routers["PARENT"]) ntools.assert_count_equal(inst.child_border_routers, routers["CHILD"]) ntools.assert_count_equal(inst.peer_border_routers, routers["PEER"]) ntools.assert_count_equal(inst.core_border_routers, routers["CORE"])
def test(self, server): topo_dict = { 'BeaconService': {"bs1": "bs1 val"}, 'CertificateService': {"cs1": "cs1 val"}, 'PathService': {"ps1": "ps1 val", "ps2": "ps2 val"}, 'SIG': {"sig1": "sig1 val"}, 'DiscoveryService': {"ds1": "ds1 val"}, } inst = Topology() server.side_effect = lambda v, k: "%s-%s" % (k, v) # Call inst._parse_srv_dicts(topo_dict) # Tests assert_these_calls(server, [ call("bs1 val", "bs1"), call("cs1 val", "cs1"), call("ps1 val", "ps1"), call("ps2 val", "ps2"), call("sig1 val", "sig1"), call("ds1 val", "ds1"), ], any_order=True) ntools.eq_(inst.beacon_servers, ["bs1-bs1 val"]) ntools.eq_(inst.certificate_servers, ["cs1-cs1 val"]) ntools.eq_(sorted(inst.path_servers), sorted(["ps1-ps1 val", "ps2-ps2 val"])) ntools.eq_(inst.discovery_servers, ["ds1-ds1 val"])
def test_basic(self): inst = Topology() for i in range(4): bs = create_mock(["name"]) bs.name = "bs%d" % i inst.beacon_servers.append(bs) # Call ntools.eq_(inst.get_own_config("bs", "bs3"), inst.beacon_servers[3])
def _write_as_topos(self): for topo_id, as_topo, base in _srv_iter( self.topo_dicts, self.out_dir, common=True): path = os.path.join(base, TOPO_FILE) contents_json = json.dumps(self.topo_dicts[topo_id], default=_json_default, indent=2) write_file(path, contents_json + '\n') # Test if topo file parses cleanly Topology.from_file(path)
def _write_as_topos(self): for topo_id, as_topo, base in _srv_iter(self.topo_dicts, self.out_dir, common=True): path = os.path.join(base, TOPO_FILE) contents = yaml.dump(self.topo_dicts[topo_id], default_flow_style=False) write_file(path, contents) # Test if topo file parses cleanly Topology.from_file(path)
def test(self, parse): zk_dict = { 'zk0': {'Addr': 'zkv4', 'L4Port': 2181}, 'zk1': {'Addr': 'zkv6', 'L4Port': 2182}, } inst = Topology() parse.side_effect = lambda x: x # Call inst._parse_zk_dicts({"ZookeeperService": zk_dict}) # Tests ntools.assert_count_equal(inst.zookeepers, ["[zkv4]:2181", "[zkv6]:2182"])
def test(self, router): def _mk_router(type_): m = create_mock(["interfaces"]) m.interfaces = {0: create_mock(["link_type"])} m.interfaces[0].link_type = type_ routers[type_].append(m) return m routers = defaultdict(list) router_dict = {"br-parent": "parent"} inst = Topology() router.side_effect = lambda v, k: _mk_router(v) # Call inst._parse_router_dicts({"BorderRouters": router_dict}) # Tests ntools.assert_count_equal(inst.border_routers, routers["parent"])
def __init__(self, server_id, conf_dir, public=None, bind=None): """ :param str server_id: server identifier. :param str conf_dir: configuration directory. :param list public: (host_addr, port) of the element's public address (i.e. the address visible to other network elements). :param list bind: (host_addr, port) of the element's bind address, if any (i.e. the address the element uses to identify itself to the local operating system, if it differs from the public address due to NAT). """ self.id = server_id self.conf_dir = conf_dir self.ifid2br = {} self.topology = Topology.from_file( os.path.join(self.conf_dir, TOPO_FILE)) self.config = Config.from_file( os.path.join(self.conf_dir, AS_CONF_FILE)) # Must be over-ridden by child classes: self.CTRL_PLD_CLASS_MAP = {} self.SCMP_PLD_CLASS_MAP = {} self.public = public self.bind = bind if self.SERVICE_TYPE: own_config = self.topology.get_own_config(self.SERVICE_TYPE, server_id) if public is None: self.public = own_config.public if bind is None: self.bind = own_config.bind self.init_ifid2br() self.trust_store = TrustStore(self.conf_dir) self.total_dropped = 0 self._core_ases = defaultdict( list) # Mapping ISD_ID->list of core ASes self.init_core_ases() self.run_flag = threading.Event() self.run_flag.set() self.stopped_flag = threading.Event() self.stopped_flag.clear() self._in_buf = queue.Queue(MAX_QUEUE) self._socks = SocketMgr() self._startup = time.time() if self.USE_TCP: self._DefaultMeta = TCPMetadata else: self._DefaultMeta = UDPMetadata self.unverified_segs = set() self.unv_segs_lock = threading.RLock() self.requested_trcs = {} self.req_trcs_lock = threading.Lock() self.requested_certs = {} self.req_certs_lock = threading.Lock() # TODO(jonghoonkwon): Fix me to setup sockets for multiple public addresses host_addr, self._port = self.public[0] self.addr = SCIONAddr.from_values(self.topology.isd_as, host_addr) self._setup_sockets(True)
def test(self): topology = Topology() topology.parent_border_routers = [0, 1] topology.child_border_routers = [2] topology.peer_border_routers = [3, 4, 5] topology.core_border_routers = [6, 7] ntools.eq_(topology.get_all_border_routers(), list(range(8)))
def test(self): topology = Topology() topology.parent_edge_routers = [0, 1] topology.child_edge_routers = [2] topology.peer_edge_routers = [3, 4, 5] topology.routing_edge_routers = [6, 7] ntools.eq_(topology.get_all_edge_routers(), list(range(8)))
def test(self): topology = Topology() topology.parent_interfaces = [0, 1] topology.child_interfaces = [2] topology.peer_interfaces = [3, 4, 5] topology.core_interfaces = [6, 7] ntools.eq_(topology.get_all_interfaces(), list(range(8)))
def _load_credentials(as_path, isd_as): print("Updating AS%s" % isd_as) # The element to get the credentials from. # We assume that the beacon server exists in every AS configuration. key_dict = {} core_key_dict = {} as_path = os.path.join(PROJECT_ROOT, GEN_PATH, 'ISD%s/AS%s' % (isd_as.isd_str(), isd_as.as_file_fmt())) instance_id = "bs%s-%s-1" % (isd_as.isd_str(), isd_as.as_file_fmt()) instance_path = os.path.join(as_path, instance_id) topo_path = os.path.join(instance_path, TOPO_FILE) # Credential files for all ASes as_key_path = { 'cert_path': get_cert_chain_file_path(instance_path, isd_as, INITIAL_CERT_VERSION), 'trc_path': get_trc_file_path(instance_path, isd_as[0], INITIAL_TRC_VERSION), 'enc_key_path': get_enc_key_file_path(instance_path), 'sig_key_path': get_sig_key_file_path(instance_path), 'sig_key_raw_path': get_sig_key_raw_file_path(instance_path), 'as_config_path': os.path.join(instance_path, AS_CONF_FILE), } # Credential files for core ASes core_key_path = { 'core_sig_key_path': get_core_sig_key_file_path(instance_path), 'core_sig_key_raw_path': get_core_sig_key_raw_file_path(instance_path), 'online_key_path': get_online_key_file_path(instance_path), 'online_key_raw_path': get_online_key_raw_file_path(instance_path), 'offline_key_path': get_offline_key_file_path(instance_path), 'offline_key_raw_path': get_offline_key_raw_file_path(instance_path), } for key, path in as_key_path.items(): try: if key.startswith('cert'): cert = _json_file_to_str(path) elif key.startswith('trc'): trc = _json_file_to_str(path) elif key.startswith('as'): as_config_dict = _yaml_file_to_dict(path) key_dict['master_as_key'] = as_config_dict['MasterASKey'] else: key_name = key[:len(key)-5] key_dict[key_name] = read_file(path)[:-1] except IOError as err: print("IOError({0}): {1}" % (err, path)) exit(1) tp = Topology.from_file(topo_path) if tp.is_core_as: for key, path in core_key_path.items(): try: key_name = key[:len(key)-5] core_key_dict[key_name] = read_file(path)[:-1] except IOError as err: print("IOError({0}): {1}" % (err, path)) exit(1) return ASCredential(cert, trc, key_dict, core_key_dict)
def main_default(type_, local_type=None, trace_=False, **kwargs): """ Default main() method. Parses cmdline args, setups up signal handling, logging, creates the appropriate object and runs it. :param type type_: Primary type to instantiate. :param type local_type: If not `None`, load the topology to check if this is a core or local AS. If it's a core AS, instantiate the primary type, otherwise the local type. :param bool trace_: Should a periodic thread stacktrace report be created? """ handle_signals() parser = argparse.ArgumentParser() parser.add_argument('--log_dir', default="logs/", help='Log dir (Default: logs/)') parser.add_argument( '--spki_cache_dir', default="gen-cache/", help='Cache dir for SCION TRCs and cert chains (Default: gen-cache/)') parser.add_argument('--prom', type=str, help='Address to export prometheus metrics on') parser.add_argument('server_id', help='Server identifier') parser.add_argument('conf_dir', nargs='?', default='.', help='Configuration directory (Default: ./)') args = parser.parse_args() init_logging(os.path.join(args.log_dir, args.server_id)) if local_type is None: inst = type_(args.server_id, args.conf_dir, prom_export=args.prom, spki_cache_dir=args.spki_cache_dir, **kwargs) else: # Load the topology to check if this is a core AD or not topo = Topology.from_file(os.path.join(args.conf_dir, TOPO_FILE)) if topo.is_core_as: inst = type_(args.server_id, args.conf_dir, prom_export=args.prom, spki_cache_dir=args.spki_cache_dir, **kwargs) else: inst = local_type(args.server_id, args.conf_dir, prom_export=args.prom, spki_cache_dir=args.spki_cache_dir, **kwargs) if trace_: trace(inst.id) logging.info("Started %s", args.server_id) inst.run()
def test_register_link__multiple_times(self): topo = Topology() for _ in range(10): topo.register_link(1, 24, 2, 19) self.assertEqual(topo.get_ports(2), [19]) self.assertEqual(topo.get_opposite_dpid(1, 19), 2) # check side effects self.assertEqual(topo.get_ports(10), []) self.assertEqual(topo.get_opposite_dpid(15, 4), 0)
def test(self, server): topo_dict = { 'ControlService': { "cs1": "cs1 val" }, 'SIG': { "sig1": "sig1 val" }, } inst = Topology() server.side_effect = lambda v, k: "%s-%s" % (k, v) # Call inst._parse_srv_dicts(topo_dict) # Tests assert_these_calls(server, [ call("cs1 val", "cs1"), call("sig1 val", "sig1"), ], any_order=True) ntools.eq_(inst.control_servers, ["cs1-cs1 val"])
def __init__(self, server_id: str, conf_dir: str, host_addr: HostAddrBase = None, port: int = None) -> None: """ :param str server_id: server identifier. :param str conf_dir: configuration directory. :param `HostAddrBase` host_addr: the interface to bind to. Overrides the address in the topology config. :param int port: the port to bind to. Overrides the address in the topology config. """ self.id = server_id self.conf_dir = conf_dir self.ifid2br = {} # type: Dict[int, RouterElement] self._port = port self.topology = Topology.from_file( os.path.join(self.conf_dir, TOPO_FILE)) self.config = Config.from_file( os.path.join(self.conf_dir, AS_CONF_FILE)) # Must be over-ridden by child classes: # self.CTRL_PLD_CLASS_MAP = {} # type: Dict[str, Dict[Optional[int], Callable[[object, object, object], None]]] # self.SCMP_PLD_CLASS_MAP = {} # type: Dict[int, Dict[Optional[int], Callable[[object, object], None]]] if self._service_type(): own_config = self.topology.get_own_config(self._service_type(), server_id) if host_addr is None: host_addr = own_config.addr if self._port is None: self._port = own_config.port self.addr = SCIONAddr.from_values(self.topology.isd_as, host_addr) # type: SCIONAddr self.init_ifid2br() self.trust_store = TrustStore(self.conf_dir) self.total_dropped = 0 self._core_ases = defaultdict( list_object ) # type: defaultdict[int, List[object]] # Mapping ISD_ID->list of core ASes self.init_core_ases() self.run_flag = threading.Event() self.run_flag.set() self.stopped_flag = threading.Event() self.stopped_flag.clear() self._in_buf = queue.Queue(MAX_QUEUE) # type: queue.Queue[object] self._socks = SocketMgr() self._setup_sockets(True) self._startup = time.time() if SCIONElement.USE_TCP: self.DefaultMeta = TCPMetadata # type: Type[MetadataBase] else: self.DefaultMeta = UDPMetadata
def __init__(self, server_id, conf_dir, host_addr=None, port=None): """ :param str server_id: server identifier. :param str conf_dir: configuration directory. :param `HostAddrBase` host_addr: the interface to bind to. Overrides the address in the topology config. :param int port: the port to bind to. Overrides the address in the topology config. """ self.id = server_id self.conf_dir = conf_dir self.ifid2br = {} self._port = port self.topology = Topology.from_file( os.path.join(self.conf_dir, TOPO_FILE)) self.config = Config.from_file( os.path.join(self.conf_dir, AS_CONF_FILE)) # Must be over-ridden by child classes: self.CTRL_PLD_CLASS_MAP = {} self.SCMP_PLD_CLASS_MAP = {} if self.SERVICE_TYPE: own_config = self.topology.get_own_config(self.SERVICE_TYPE, server_id) if host_addr is None: host_addr = own_config.addr if self._port is None: self._port = own_config.port self.addr = SCIONAddr.from_values(self.topology.isd_as, host_addr) self.init_ifid2br() self.trust_store = TrustStore(self.conf_dir) self.total_dropped = 0 self._core_ases = defaultdict(list) # Mapping ISD_ID->list of core ASes self.init_core_ases() self.run_flag = threading.Event() self.run_flag.set() self.stopped_flag = threading.Event() self.stopped_flag.clear() self._in_buf = queue.Queue(MAX_QUEUE) self._socks = SocketMgr() self._setup_sockets(True) self._startup = time.time() if self.USE_TCP: self.DefaultMeta = TCPMetadata else: self.DefaultMeta = UDPMetadata self.unverified_segs = set() self.unv_segs_lock = threading.RLock() self.requested_trcs = set() self.req_trcs_lock = threading.Lock() self.requested_certs = set() self.req_certs_lock = threading.Lock()
def test_get_ports(self): topo = Topology() self.assertEqual(topo.get_ports(1), []) topo.register_link(1, 24, 2, 19) topo.register_link(1, 21, 3, 10) self.assertEqual(topo.get_ports(1), [24, 21]) self.assertEqual(topo.get_ports(2), [19]) self.assertEqual(topo.get_ports(3), [10])
def __init__(self, server_id, conf_dir, host_addr=None, port=SCION_UDP_PORT): """ :param str server_id: server identifier. :param str conf_dir: configuration directory. :param `HostAddrBase` host_addr: the interface to bind to. Overrides the address in the topology config. :param int port: the port to bind to. """ self.id = server_id self.conf_dir = conf_dir self.ifid2er = {} self._port = port self.topology = Topology.from_file( os.path.join(self.conf_dir, TOPO_FILE)) self.config = Config.from_file( os.path.join(self.conf_dir, AS_CONF_FILE)) # Must be over-ridden by child classes: self.CTRL_PLD_CLASS_MAP = {} self.SCMP_PLD_CLASS_MAP = {} if host_addr is None: own_config = self.topology.get_own_config(self.SERVICE_TYPE, server_id) host_addr = own_config.addr self.addr = SCIONAddr.from_values(self.topology.isd_as, host_addr) self._dns = DNSCachingClient( [str(s.addr) for s in self.topology.dns_servers], self.topology.dns_domain) self.init_ifid2er() self.trust_store = TrustStore(self.conf_dir) self.total_dropped = 0 self._core_ases = defaultdict( list) # Mapping ISD_ID->list of core ASes self.init_core_ases() self.run_flag = threading.Event() self.run_flag.set() self.stopped_flag = threading.Event() self.stopped_flag.clear() self._in_buf = queue.Queue(MAX_QUEUE) self._socks = SocketMgr() self._setup_socket(True) self._startup = time.time()
def test(self, isd_as): topo_dict = {'Core': True, 'ISD_AS': '1-ff00:0:312', 'MTU': 440, 'Overlay': 'UDP/IPv4'} inst = Topology() inst._parse_srv_dicts = create_mock() inst._parse_router_dicts = create_mock() inst._parse_zk_dicts = create_mock() # Call inst.parse_dict(topo_dict) # Tests ntools.eq_(inst.is_core_as, True) ntools.eq_(inst.isd_as, isd_as.return_value) ntools.eq_(inst.mtu, 440) inst._parse_srv_dicts.assert_called_once_with(topo_dict) inst._parse_router_dicts.assert_called_once_with(topo_dict) inst._parse_zk_dicts.assert_called_once_with(topo_dict)
def test(self, isd_as): topo_dict = {'Core': True, 'ISD_AS': '1-2', 'DnsDomain': 3, 'MTU': 440} inst = Topology() inst._parse_srv_dicts = create_mock() inst._parse_router_dicts = create_mock() inst._parse_zk_dicts = create_mock() # Call inst.parse_dict(topo_dict) # Tests ntools.eq_(inst.is_core_as, True) ntools.eq_(inst.isd_as, isd_as.return_value) ntools.eq_(inst.dns_domain, 3) ntools.eq_(inst.mtu, 440) inst._parse_srv_dicts.assert_called_once_with(topo_dict) inst._parse_router_dicts.assert_called_once_with(topo_dict) inst._parse_zk_dicts.assert_called_once_with(topo_dict)
################## # Main Stuff # ################## # take the .dlin input file, check and parse it # contents: lst_parsed_input = [str_symbols,str_formula,str_path,str_trivLen,str_tol] p = cls_parser() lst_parsed_input = p.fun_dlin_parse(sys.argv[1]) # simplify the propositional formula, format it into a nested list s = cls_symbolic_logic() lst_nested_formula = s.fun_simplifyLogic(lst_parsed_input[0],lst_parsed_input[1]) # build a diagraph out of the nested logic statement list g = cls_digraph_formula() G_nested_formula = g.fun_formula_parse_tree(lst_nested_formula,str_time) # compute topology t = Topology() G_topology_Digraph = t.fun_nestedLogicDigraph_2_topologyDigraph(G_nested_formula,str_time) # find lengths with Prism pc = Prism_Compiler() hash_track_lengths = pc.fun_iterate_through_graph(G_topology_Digraph,lst_parsed_input[2],lst_parsed_input[3],lst_parsed_input[4]) # plot final track layout tl = Track_Layout() tl.plot_final(G_topology_Digraph,hash_track_lengths,str_time)
class LinkMonitor(app_manager.RyuApp): OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION] SRC_MAC = '00:00:00:00:00:00' DST_MAC = 'ff:ff:ff:ff:ff:ff' ETH_TYPE = 0x0815 def __init__(self, *args, **kwargs): super(LinkMonitor, self).__init__(*args, **kwargs) self.datapaths = {} self.link_latency_repository = LinkLatencyRepository() self.topology = Topology() self.monitor_thread = hub.spawn(self._monitor) def _monitor(self): while True: time.sleep(5) for dp in self.datapaths.values(): for port in self.topology.get_ports(dp.id): self.send_test_packet(dp, TestPacket(port), port) self.logger.debug(self.link_latency_repository) def send_test_packet(self, datapath, packet_payload, out_port): ofproto = datapath.ofproto parser = datapath.ofproto_parser pkt = packet.Packet() pkt.add_protocol( ethernet.ethernet(src=self.SRC_MAC, dst=self.DST_MAC, ethertype=self.ETH_TYPE)) pkt.add_protocol(str(packet_payload).encode()) pkt.serialize() actions = [parser.OFPActionOutput(out_port)] out = parser.OFPPacketOut(datapath=datapath, buffer_id=ofproto.OFP_NO_BUFFER, in_port=ofproto.OFPP_CONTROLLER, actions=actions, data=pkt.data) self.logger.debug('sending msg %s to %016x; out_port %d', packet_payload, datapath.id, out_port) datapath.send_msg(out) @set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER) def _packet_in_handler(self, ev): msg = ev.msg datapath = msg.datapath ofproto = datapath.ofproto parser = datapath.ofproto_parser pkt = packet.Packet(msg.data) eth_pkt = pkt.get_protocol(ethernet.ethernet) dst = eth_pkt.dst src = eth_pkt.src self.logger.debug("packet in from %016x; src=%s dst=%s", datapath.id, src, dst) if src == self.SRC_MAC and dst == self.DST_MAC: payload = pkt.protocols[-1] self.logger.debug("test packet received") payload_string = payload.decode() self.logger.debug("payload: %s", payload_string) pkt = TestPacket.from_string(payload_string) dst_dpid = datapath.id src_dpid = self.topology.get_opposite_dpid(dst_dpid, pkt._src_port) rpkt = ReceivedTestPacket(src_dpid, dst_dpid, pkt._send_ts) self.link_latency_repository.parse_test_packet(rpkt) @set_ev_cls(event.EventSwitchEnter) def handler_switch_enter(self, ev): for link in copy.copy(get_link(self)): self.topology.register_link(link.src.dpid, link.src.port_no, link.dst.dpid, link.dst.port_no) @set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER) def switch_features_handler(self, ev): datapath = ev.msg.datapath ofproto = datapath.ofproto parser = datapath.ofproto_parser # install the table-miss flow entry. match = parser.OFPMatch() actions = [ parser.OFPActionOutput(ofproto.OFPP_CONTROLLER, ofproto.OFPCML_NO_BUFFER) ] self.add_flow(datapath, 0, match, actions) self.logger.debug('datapath %016x registered', datapath.id) self.datapaths[datapath.id] = datapath def add_flow(self, datapath, priority, match, actions): ofproto = datapath.ofproto parser = datapath.ofproto_parser inst = [ parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, actions) ] mod = parser.OFPFlowMod(datapath=datapath, priority=priority, match=match, instructions=inst) datapath.send_msg(mod)
def test_get_ports__empty_topology(self): topo = Topology() self.assertEqual(topo.get_ports(1), [])
def __init__(self, *args, **kwargs): super(LinkMonitor, self).__init__(*args, **kwargs) self.datapaths = {} self.link_latency_repository = LinkLatencyRepository() self.topology = Topology() self.monitor_thread = hub.spawn(self._monitor)
def reload_data_from_files(topology_files, on_the_fly_refs=False): ad_num = len(topology_files) print("> {} yaml topology files found".format(ad_num)) isds = {} as_topos = [] as_topo_dicts = {} same_as_ids = False # Iterate over all topology files and fill some data structures for topo_file in topology_files: topo_dict = get_topology(topo_file) topology = Topology.from_dict(topo_dict) isds[topology.isd_as[0]] = None if not same_as_ids and topology.isd_as[1] in as_topo_dicts: same_as_ids = True as_topo_dicts[topology.isd_as[1]] = topo_dict as_topos.append(topology) as_topos = sorted(as_topos, key=lambda t: t.isd_as[1]) assert len(as_topos) == ad_num if same_as_ids: id_map = {} print("> Several ASes with identical IDs are found. Currently, this " "case is not supported. Renumerating ASes...") ad_id = 1 for topo in as_topos: id_map[(topo.ad_id, topo.isd_id)] = ad_id topo.ad_id = ad_id ad_id += 1 # Fixing routers for topo in as_topos: routers = topo.get_all_border_routers() for router in routers: neighbor_id = router.interface.neighbor_ad new_neighbor_id = id_map[(neighbor_id, router.interface.neighbor_isd)] router.interface.neighbor_ad = new_neighbor_id # Create ISD objects for isd_id in sorted(isds.keys( )): # sorted(isds.keys()): # TODO: Does it need sorting? isd = ISD(id=isd_id) isd.save() isds[isd_id] = isd # First, save all add ASes to avoid IntegrityError report_ranges = {int(ad_num / 10.0 * x): x * 10 for x in range(1, 11)} for i, as_topo in enumerate(as_topos, start=1): if i in report_ranges: print("{}%".format(report_ranges[i])) AD.objects.update_or_create(as_id=as_topo.isd_as[1], isd=isds[as_topo.isd_as[0]], is_core_ad=as_topo.is_core_as) transaction.commit() print("> ASes instances were added") # Second, add routers, servers, etc. for as_topo in as_topos: ad = AD.objects.get(id=as_topo.isd_as[1], isd=isds[ as_topo.isd_as[0]]) # getitem[0] = self._isd, [1] = self._as topo_dict = as_topo_dicts[ad.id] ad.fill_from_topology(topo_dict, auto_refs=on_the_fly_refs) print('> AS {} is loaded'.format(ad)) transaction.commit() transaction.set_autocommit(True)
def test_unknown_type(self): inst = Topology() # Call ntools.assert_raises(SCIONKeyError, inst.get_own_config, "asdf", 1)
# take the .dlin input file, check and parse it # contents: lst_parsed_input = [str_symbols,str_formula,str_path,str_trivLen,str_tol] p = cls_parser() lst_parsed_input = p.fun_dlin_parse(sys.argv[1]) # simplify the propositional formula, format it into a nested list s = cls_symbolic_logic() lst_nested_formula = s.fun_simplifyLogic(lst_parsed_input[0], lst_parsed_input[1]) # build a diagraph out of the nested logic statement list g = cls_digraph_formula() G_nested_formula = g.fun_formula_parse_tree(lst_nested_formula, str_time) # compute topology t = Topology() G_topology_Digraph = t.fun_nestedLogicDigraph_2_topologyDigraph( G_nested_formula, str_time) # find lengths with Prism pc = Prism_Compiler() hash_track_lengths = pc.fun_iterate_through_graph(G_topology_Digraph, lst_parsed_input[2], lst_parsed_input[3], lst_parsed_input[4]) # plot final track layout tl = Track_Layout() tl.plot_final(G_topology_Digraph, hash_track_lengths, str_time)
def test_unknown_server(self): inst = Topology() # Call ntools.assert_raises(SCIONKeyError, inst.get_own_config, "bs", "name")
def test_get_opposite_dpid__empty_topology(self): topo = Topology() self.assertEqual(topo.get_opposite_dpid(1, 19), 0)