def restartRouter(self): # Starts actual daemons without init (ie restart) # cd to per node directory self.cmd('cd {}/{}'.format(self.logdir, self.name)) self.cmd('umask 000') #Re-enable to allow for report per run self.reportCores = True if self.version == None: self.version = self.cmd(os.path.join(self.daemondir, 'bgpd')+' -v').split()[2] logger.info('{}: running version: {}'.format(self.name,self.version)) # Start Zebra first if self.daemons['zebra'] == 1: zebra_path = os.path.join(self.daemondir, 'zebra') zebra_option = self.daemons_options['zebra'] self.cmd('{0} {1} > zebra.out 2> zebra.err &'.format( zebra_path, zebra_option, self.logdir, self.name )) self.waitOutput() logger.debug('{}: {} zebra started'.format(self, self.routertype)) sleep(1, '{}: waiting for zebra to start'.format(self.name)) # Start staticd next if required if self.daemons['staticd'] == 1: staticd_path = os.path.join(self.daemondir, 'staticd') staticd_option = self.daemons_options['staticd'] self.cmd('{0} {1} > staticd.out 2> staticd.err &'.format( staticd_path, staticd_option, self.logdir, self.name )) self.waitOutput() logger.debug('{}: {} staticd started'.format(self, self.routertype)) # Fix Link-Local Addresses # Somehow (on Mininet only), Zebra removes the IPv6 Link-Local addresses on start. Fix this self.cmd('for i in `ls /sys/class/net/` ; do mac=`cat /sys/class/net/$i/address`; IFS=\':\'; set $mac; unset IFS; ip address add dev $i scope link fe80::$(printf %02x $((0x$1 ^ 2)))$2:${3}ff:fe$4:$5$6/64; done') # Now start all the other daemons for daemon in self.daemons: # Skip disabled daemons and zebra if self.daemons[daemon] == 0 or daemon == 'zebra' or daemon == 'staticd': continue daemon_path = os.path.join(self.daemondir, daemon) self.cmd('{0} {1} > {2}.out 2> {2}.err &'.format( daemon_path, self.daemons_options.get(daemon, ''), daemon )) self.waitOutput() logger.debug('{}: {} {} started'.format(self, self.routertype, daemon))
def pytest_runtest_makereport(item, call): "Log all assert messages to default logger with error level" # Nothing happened if call.when == "call": pause = topotest_extra_config["pause"] else: pause = False title = "unset" if call.excinfo is None: error = False else: parent = item.parent modname = parent.module.__name__ # Treat skips as non errors, don't pause after if call.excinfo.typename == "Skipped": pause = False error = False logger.info('test skipped at "{}/{}": {}'.format( modname, item.name, call.excinfo.value)) else: error = True # Handle assert failures parent._previousfailed = item # pylint: disable=W0212 logger.error('test failed at "{}/{}": {}'.format( modname, item.name, call.excinfo.value)) title = "{}/{}".format(modname, item.name) # We want to pause, if requested, on any error not just test cases # (e.g., call.when == "setup") if not pause: pause = (topotest_extra_config["pause_on_error"] or topotest_extra_config["pause"]) # (topogen) Set topology error to avoid advancing in the test. tgen = get_topogen() if tgen is not None: # This will cause topogen to report error on `routers_have_failure`. tgen.set_error("{}/{}".format(modname, item.name)) commander = Commander("pytest") isatty = sys.stdout.isatty() error_cmd = None if error and topotest_extra_config["vtysh_on_error"]: error_cmd = commander.get_exec_path(["vtysh"]) elif error and topotest_extra_config["shell_on_error"]: error_cmd = os.getenv("SHELL", commander.get_exec_path(["bash"])) if error_cmd: is_tmux = bool(os.getenv("TMUX", "")) is_screen = not is_tmux and bool(os.getenv("STY", "")) is_xterm = not is_tmux and not is_screen and bool( os.getenv("DISPLAY", "")) channel = None win_info = None wait_for_channels = [] wait_for_procs = [] # Really would like something better than using this global here. # Not all tests use topogen though so get_topogen() won't work. for node in Mininet.g_mnet_inst.hosts.values(): pause = True if is_tmux: channel = ("{}-{}".format(os.getpid(), Commander.tmux_wait_gen) if not isatty else None) Commander.tmux_wait_gen += 1 wait_for_channels.append(channel) pane_info = node.run_in_window( error_cmd, new_window=win_info is None, background=True, title="{} ({})".format(title, node.name), name=title, tmux_target=win_info, wait_for=channel, ) if is_tmux: if win_info is None: win_info = pane_info elif is_xterm: assert isinstance(pane_info, subprocess.Popen) wait_for_procs.append(pane_info) # Now wait on any channels for channel in wait_for_channels: logger.debug("Waiting on TMUX channel %s", channel) commander.cmd_raises( [commander.get_exec_path("tmux"), "wait", channel]) for p in wait_for_procs: logger.debug("Waiting on TMUX xterm process %s", p) o, e = p.communicate() if p.wait(): logger.warning("xterm proc failed: %s:", proc_error(p, o, e)) if error and topotest_extra_config["cli_on_error"]: # Really would like something better than using this global here. # Not all tests use topogen though so get_topogen() won't work. if Mininet.g_mnet_inst: cli(Mininet.g_mnet_inst, title=title, background=False) else: logger.error("Could not launch CLI b/c no mininet exists yet") while pause and isatty: try: user = raw_input( 'PAUSED, "cli" for CLI, "pdb" to debug, "Enter" to continue: ') except NameError: user = input( 'PAUSED, "cli" for CLI, "pdb" to debug, "Enter" to continue: ') user = user.strip() if user == "cli": cli(Mininet.g_mnet_inst) elif user == "pdb": pdb.set_trace() elif user: print('Unrecognized input: "%s"' % user) else: break
def create_route_maps(tgen, input_dict, build=False): """ Create route-map on the devices as per the arguments passed Parameters ---------- * `tgen` : Topogen object * `input_dict` : Input dict data, required when configuring from testcase * `build` : Only for initial setup phase this is set as True. Usage ----- # route_maps: key, value pair for route-map name and its attribute # rmap_match_prefix_list_1: user given name for route-map # action: PERMIT/DENY # match: key,value pair for match criteria. prefix_list, community-list, large-community-list or tag. Only one option at a time. # prefix_list: name of prefix list # large-community-list: name of large community list # community-ist: name of community list # tag: tag id for static routes # set: key, value pair for modifying route attributes # localpref: preference value for the network # med: metric value advertised for AS # aspath: set AS path value # weight: weight for the route # community: standard community value to be attached # large_community: large community value to be attached # community_additive: if set to "additive", adds community/large-community value to the existing values of the network prefix Example: -------- input_dict = { "r1": { "route_maps": { "rmap_match_prefix_list_1": [ { "action": "PERMIT", "match": { "ipv4": { "prefix_list": "pf_list_1" } "ipv6": { "prefix_list": "pf_list_1" } "large-community-list": "{ "id": "community_1", "exact_match": True } "community": { "id": "community_2", "exact_match": True } "tag": "tag_id" }, "set": { "localpref": 150, "med": 30, "aspath": { "num": 20000, "action": "prepend", }, "weight": 500, "community": { "num": "1:2 2:3", "action": additive } "large_community": { "num": "1:2:3 4:5;6", "action": additive }, } } ] } } } Returns ------- errormsg(str) or True """ result = False logger.debug("Entering lib API: create_route_maps()") try: for router in input_dict.keys(): if "route_maps" not in input_dict[router]: errormsg = "route_maps not present in input_dict" logger.info(errormsg) continue rmap_data = [] for rmap_name, rmap_value in \ input_dict[router]["route_maps"].iteritems(): for rmap_dict in rmap_value: del_action = rmap_dict.setdefault("delete", False) if del_action: rmap_data.append("no route-map {}".format(rmap_name)) continue if "action" not in rmap_dict: errormsg = "action not present in input_dict" logger.error(errormsg) return False rmap_action = rmap_dict.setdefault("action", "deny") seq_id = rmap_dict.setdefault("seq_id", None) if seq_id is None: seq_id = get_seq_id("route_maps", router, rmap_name) else: set_seq_id("route_maps", router, seq_id, rmap_name) rmap_data.append("route-map {} {} {}".format( rmap_name, rmap_action, seq_id)) # Verifying if SET criteria is defined if "set" in rmap_dict: set_data = rmap_dict["set"] local_preference = set_data.setdefault( "localpref", None) metric = set_data.setdefault("med", None) as_path = set_data.setdefault("aspath", {}) weight = set_data.setdefault("weight", None) community = set_data.setdefault("community", {}) large_community = set_data.setdefault( "large_community", {}) set_action = set_data.setdefault("set_action", None) # Local Preference if local_preference: rmap_data.append("set local-preference {}".format( local_preference)) # Metric if metric: rmap_data.append("set metric {} \n".format(metric)) # AS Path Prepend if as_path: as_num = as_path.setdefault("as_num", None) as_action = as_path.setdefault("as_action", None) if as_action and as_num: rmap_data.append("set as-path {} {}".format( as_action, as_num)) # Community if community: num = community.setdefault("num", None) comm_action = community.setdefault("action", None) if num: cmd = "set community {}".format(num) if comm_action: cmd = "{} {}".format(cmd, comm_action) rmap_data.append(cmd) else: logger.error("In community, AS Num not" " provided") return False if large_community: num = large_community.setdefault("num", None) comm_action = large_community.setdefault( "action", None) if num: cmd = "set large-community {}".format(num) if comm_action: cmd = "{} {}".format(cmd, comm_action) rmap_data.append(cmd) else: logger.errror("In large_community, AS Num not" " provided") return False # Weight if weight: rmap_data.append("set weight {} \n".format(weight)) # Adding MATCH and SET sequence to RMAP if defined if "match" in rmap_dict: match_data = rmap_dict["match"] ipv4_data = match_data.setdefault("ipv4", {}) ipv6_data = match_data.setdefault("ipv6", {}) community = match_data.setdefault("community-list", {}) large_community = match_data.setdefault( "large-community-list", {}) tag = match_data.setdefault("tag", None) if ipv4_data: prefix_name = ipv4_data.setdefault( "prefix_lists", None) if prefix_name: rmap_data.append("match ip address prefix-list" " {}".format(prefix_name)) if ipv6_data: prefix_name = ipv6_data.setdefault( "prefix_lists", None) if prefix_name: rmap_data.append( "match ipv6 address " "prefix-list {}".format(prefix_name)) if tag: rmap_data.append("match tag {}".format(tag)) if community: if "id" not in community: logger.error("'id' is mandatory for " "community-list in match" " criteria") return False cmd = "match community {}".format(community["id"]) exact_match = community.setdefault( "exact_match", False) if exact_match: cmd = "{} exact-match".format(cmd) rmap_data.append(cmd) if large_community: if "id" not in large_community: logger.error("'num' is mandatory for " "large-community-list in match " "criteria") return False cmd = "match large-community {}".format( large_community["id"]) exact_match = large_community.setdefault( "exact_match", False) if exact_match: cmd = "{} exact-match".format(cmd) rmap_data.append(cmd) result = create_common_configuration(tgen, router, rmap_data, "route_maps", build=build) except InvalidCLIError: # Traceback errormsg = traceback.format_exc() logger.error(errormsg) return errormsg logger.debug("Exiting lib API: create_prefix_lists()") return result
def create_prefix_lists(tgen, input_dict, build=False): """ Create ip prefix lists as per the config provided in input JSON or input_dict Parameters ---------- * `tgen` : Topogen object * `input_dict` : Input dict data, required when configuring from testcase * `build` : Only for initial setup phase this is set as True. Usage ----- # pf_lists_1: name of prefix-list, user defined # seqid: prefix-list seqid, auto-generated if not given by user # network: criteria for applying prefix-list # action: permit/deny # le: less than or equal number of bits # ge: greater than or equal number of bits Example ------- input_dict = { "r1": { "prefix_lists":{ "ipv4": { "pf_list_1": [ { "seqid": 10, "network": "any", "action": "permit", "le": "32", "ge": "30", "delete": True } ] } } } } Returns ------- errormsg or True """ logger.debug("Entering lib API: create_prefix_lists()") result = False try: for router in input_dict.keys(): if "prefix_lists" not in input_dict[router]: errormsg = "prefix_lists not present in input_dict" logger.info(errormsg) continue config_data = [] prefix_lists = input_dict[router]["prefix_lists"] for addr_type, prefix_data in prefix_lists.iteritems(): if not check_address_types(addr_type): continue for prefix_name, prefix_list in prefix_data.iteritems(): for prefix_dict in prefix_list: if "action" not in prefix_dict or \ "network" not in prefix_dict: errormsg = "'action' or network' missing in" \ " input_dict" return errormsg network_addr = prefix_dict["network"] action = prefix_dict["action"] le = prefix_dict.setdefault("le", None) ge = prefix_dict.setdefault("ge", None) seqid = prefix_dict.setdefault("seqid", None) del_action = prefix_dict.setdefault("delete", False) if seqid is None: seqid = get_seq_id("prefix_lists", router, prefix_name) else: set_seq_id("prefix_lists", router, seqid, prefix_name) if addr_type == "ipv4": protocol = "ip" else: protocol = "ipv6" cmd = "{} prefix-list {} seq {} {} {}".format( protocol, prefix_name, seqid, action, network_addr) if le: cmd = "{} le {}".format(cmd, le) if ge: cmd = "{} ge {}".format(cmd, ge) if del_action: cmd = "no {}".format(cmd) config_data.append(cmd) result = create_common_configuration(tgen, router, config_data, "prefix_list", build=build) except InvalidCLIError: # Traceback errormsg = traceback.format_exc() logger.error(errormsg) return errormsg logger.debug("Exiting lib API: create_prefix_lists()") return result
def create_static_routes(tgen, input_dict, build=False): """ Create static routes for given router as defined in input_dict Parameters ---------- * `tgen` : Topogen object * `input_dict` : Input dict data, required when configuring from testcase * `build` : Only for initial setup phase this is set as True. Usage ----- input_dict should be in the format below: # static_routes: list of all routes # network: network address # no_of_ip: number of next-hop address that will be configured # admin_distance: admin distance for route/routes. # next_hop: starting next-hop address # tag: tag id for static routes # delete: True if config to be removed. Default False. Example: "routers": { "r1": { "static_routes": [ { "network": "100.0.20.1/32", "no_of_ip": 9, "admin_distance": 100, "next_hop": "10.0.0.1", "tag": 4001 "delete": true } ] } } Returns ------- errormsg(str) or True """ result = False logger.debug("Entering lib API: create_static_routes()") try: for router in input_dict.keys(): if "static_routes" not in input_dict[router]: errormsg = "static_routes not present in input_dict" logger.info(errormsg) continue static_routes_list = [] static_routes = input_dict[router]["static_routes"] for static_route in static_routes: del_action = static_route.setdefault("delete", False) # No of IPs no_of_ip = static_route.setdefault("no_of_ip", 1) admin_distance = static_route.setdefault( "admin_distance", None) tag = static_route.setdefault("tag", None) if "next_hop" not in static_route or \ "network" not in static_route: errormsg = "'next_hop' or 'network' missing in" \ " input_dict" return errormsg next_hop = static_route["next_hop"] network = static_route["network"] ip_list = generate_ips([network], no_of_ip) for ip in ip_list: addr_type = validate_ip_address(ip) if addr_type == "ipv4": cmd = "ip route {} {}".format(ip, next_hop) else: cmd = "ipv6 route {} {}".format(ip, next_hop) if tag: cmd = "{} {}".format(cmd, str(tag)) if admin_distance: cmd = "{} {}".format(cmd, admin_distance) if del_action: cmd = "no {}".format(cmd) static_routes_list.append(cmd) result = create_common_configuration(tgen, router, static_routes_list, "static_route", build=build) except InvalidCLIError: # Traceback errormsg = traceback.format_exc() logger.error(errormsg) return errormsg logger.debug("Exiting lib API: create_static_routes()") return result
def reset_config_on_routers(tgen, routerName=None): """ Resets configuration on routers to the snapshot created using input JSON file. It replaces existing router configuration with FRRCFG_BKUP_FILE Parameters ---------- * `tgen` : Topogen object * `routerName` : router config is to be reset """ logger.debug("Entering API: reset_config_on_routers") router_list = tgen.routers() for rname, router in router_list.iteritems(): if routerName and routerName != rname: continue cfg = router.run("vtysh -c 'show running'") fname = "{}/{}/frr.sav".format(TMPDIR, rname) dname = "{}/{}/delta.conf".format(TMPDIR, rname) f = open(fname, "w") for line in cfg.split("\n"): line = line.strip() if (line == "Building configuration..." or line == "Current configuration:" or not line): continue f.write(line) f.write("\n") f.close() command = "/usr/lib/frr/frr-reload.py --input {}/{}/frr.sav" \ " --test {}/{}/frr_json_initial.conf > {}". \ format(TMPDIR, rname, TMPDIR, rname, dname) result = call(command, shell=True, stderr=SUB_STDOUT) # Assert if command fail if result > 0: errormsg = ("Command:{} is failed due to non-zero exit" " code".format(command)) return errormsg f = open(dname, "r") delta = StringIO.StringIO() delta.write("configure terminal\n") t_delta = f.read() for line in t_delta.split("\n"): line = line.strip() if (line == "Lines To Delete" or line == "===============" or line == "Lines To Add" or line == "============" or not line): continue delta.write(line) delta.write("\n") delta.write("end\n") output = router.vtysh_multicmd(delta.getvalue(), pretty_output=False) logger.info("New configuration for router {}:".format(rname)) delta.close() delta = StringIO.StringIO() cfg = router.run("vtysh -c 'show running'") for line in cfg.split("\n"): line = line.strip() delta.write(line) delta.write("\n") # Router current configuration to log file or console if # "show_router_config" is defined in "pytest.ini" if show_router_config: logger.info(delta.getvalue()) delta.close() logger.debug("Exting API: reset_config_on_routers") return True
def verify_ospf_neighbor(tgen, topo, dut=None, input_dict=None, lan=False): """ This API is to verify ospf neighborship by running show ip ospf neighbour command, Parameters ---------- * `tgen` : Topogen object * `topo` : json file data * `dut`: device under test * `input_dict` : Input dict data, required when configuring from testcase * `lan` : verify neighbors in lan topology Usage ----- 1. To check FULL neighbors. verify_ospf_neighbor(tgen, topo, dut=dut) 2. To check neighbors with their roles. input_dict = { "r0": { "ospf": { "neighbors": { "r1": { "state": "Full", "role": "DR" }, "r2": { "state": "Full", "role": "DROther" }, "r3": { "state": "Full", "role": "DROther" } } } } } result = verify_ospf_neighbor(tgen, topo, dut, input_dict, lan=True) Returns ------- True or False (Error Message) """ logger.debug("Entering lib API: verify_ospf_neighbor()") result = False if input_dict: for router, rnode in tgen.routers().items(): if "ospf" not in topo["routers"][router]: continue if dut is not None and dut != router: continue logger.info("Verifying OSPF neighborship on router %s:", router) show_ospf_json = run_frr_cmd(rnode, "show ip ospf neighbor all json", isjson=True) # Verifying output dictionary show_ospf_json is empty or not if not bool(show_ospf_json): errormsg = "OSPF is not running" return errormsg ospf_data_list = input_dict[router]["ospf"] ospf_nbr_list = ospf_data_list["neighbors"] for ospf_nbr, nbr_data in ospf_nbr_list.items(): data_ip = topo["routers"][ospf_nbr]["links"] data_rid = topo["routers"][ospf_nbr]["ospf"]["router_id"] if ospf_nbr in data_ip: nbr_details = nbr_data[ospf_nbr] elif lan: for switch in topo["switches"]: if "ospf" in topo["switches"][switch]["links"][router]: neighbor_ip = data_ip[switch]["ipv4"].split("/")[0] else: continue else: neighbor_ip = data_ip[router]["ipv4"].split("/")[0] nh_state = None neighbor_ip = neighbor_ip.lower() nbr_rid = data_rid try: nh_state = show_ospf_json[nbr_rid][0]["state"].split( "/")[0] intf_state = show_ospf_json[nbr_rid][0]["state"].split( "/")[1] except KeyError: errormsg = "[DUT: {}] OSPF peer {} missing".format( router, nbr_rid) return errormsg nbr_state = nbr_data.setdefault("state", None) nbr_role = nbr_data.setdefault("role", None) if nbr_state: if nbr_state == nh_state: logger.info( "[DUT: {}] OSPF Nbr is {}:{} State {}".format( router, ospf_nbr, nbr_rid, nh_state)) result = True else: errormsg = ("[DUT: {}] OSPF is not Converged, neighbor" " state is {}".format(router, nh_state)) return errormsg if nbr_role: if nbr_role == intf_state: logger.info( "[DUT: {}] OSPF Nbr is {}: {} Role {}".format( router, ospf_nbr, nbr_rid, nbr_role)) else: errormsg = ("[DUT: {}] OSPF is not Converged with rid" "{}, role is {}".format( router, nbr_rid, intf_state)) return errormsg continue else: for router, rnode in tgen.routers().items(): if "ospf" not in topo["routers"][router]: continue if dut is not None and dut != router: continue logger.info("Verifying OSPF neighborship on router %s:", router) show_ospf_json = run_frr_cmd(rnode, "show ip ospf neighbor all json", isjson=True) # Verifying output dictionary show_ospf_json is empty or not if not bool(show_ospf_json): errormsg = "OSPF is not running" return errormsg ospf_data_list = topo["routers"][router]["ospf"] ospf_neighbors = ospf_data_list["neighbors"] total_peer = 0 total_peer = len(ospf_neighbors.keys()) no_of_ospf_nbr = 0 ospf_nbr_list = ospf_data_list["neighbors"] no_of_peer = 0 for ospf_nbr, nbr_data in ospf_nbr_list.items(): if nbr_data: data_ip = topo["routers"][nbr_data["nbr"]]["links"] data_rid = topo["routers"][ nbr_data["nbr"]]["ospf"]["router_id"] else: data_ip = topo["routers"][ospf_nbr]["links"] data_rid = topo["routers"][ospf_nbr]["ospf"]["router_id"] if ospf_nbr in data_ip: nbr_details = nbr_data[ospf_nbr] elif lan: for switch in topo["switches"]: if "ospf" in topo["switches"][switch]["links"][router]: neighbor_ip = data_ip[switch]["ipv4"].split("/")[0] else: continue else: neighbor_ip = data_ip[router]["ipv4"].split("/")[0] nh_state = None neighbor_ip = neighbor_ip.lower() nbr_rid = data_rid try: nh_state = show_ospf_json[nbr_rid][0]["state"].split( "/")[0] except KeyError: errormsg = "[DUT: {}] OSPF peer {} missing,from " "{} ".format( router, nbr_rid, ospf_nbr) return errormsg if nh_state == "Full": no_of_peer += 1 if no_of_peer == total_peer: logger.info("[DUT: {}] OSPF is Converged".format(router)) result = True else: errormsg = "[DUT: {}] OSPF is not Converged".format(router) return errormsg logger.debug("Exiting API: verify_ospf_neighbor()") return result
def __create_ospf_global(tgen, input_dict, router, build=False, load_config=True): """ Helper API to create ospf global configuration. Parameters ---------- * `tgen` : Topogen object * `input_dict` : Input dict data, required when configuring from testcase * `router` : router to be configured. * `build` : Only for initial setup phase this is set as True. * `load_config` : Loading the config to router this is set as True. Returns ------- True or False """ result = False logger.debug("Entering lib API: __create_ospf_global()") try: ospf_data = input_dict[router]["ospf"] del_ospf_action = ospf_data.setdefault("delete", False) if del_ospf_action: config_data = ["no router ospf"] result = create_common_configuration(tgen, router, config_data, "ospf", build, load_config) return result config_data = [] cmd = "router ospf" config_data.append(cmd) # router id router_id = ospf_data.setdefault("router_id", None) del_router_id = ospf_data.setdefault("del_router_id", False) if del_router_id: config_data.append("no ospf router-id") if router_id: config_data.append("ospf router-id {}".format(router_id)) # redistribute command redistribute_data = ospf_data.setdefault("redistribute", {}) if redistribute_data: for redistribute in redistribute_data: if "redist_type" not in redistribute: logger.debug( "Router %s: 'redist_type' not present in " "input_dict", router) else: cmd = "redistribute {}".format(redistribute["redist_type"]) for red_type in redistribute_data: if "route_map" in red_type: cmd = cmd + " route-map {}".format( red_type["route_map"]) del_action = redistribute.setdefault("delete", False) if del_action: cmd = "no {}".format(cmd) config_data.append(cmd) # area information area_data = ospf_data.setdefault("area", {}) if area_data: for area in area_data: if "id" not in area: logger.debug( "Router %s: 'area id' not present in " "input_dict", router) else: cmd = "area {}".format(area["id"]) if "type" in area: cmd = cmd + " {}".format(area["type"]) del_action = area.setdefault("delete", False) if del_action: cmd = "no {}".format(cmd) config_data.append(cmd) result = create_common_configuration(tgen, router, config_data, "ospf", build, load_config) # summary information summary_data = ospf_data.setdefault("summary-address", {}) if summary_data: for summary in summary_data: if "prefix" not in summary: logger.debug( "Router %s: 'summary-address' not present in " "input_dict", router, ) else: cmd = "summary {}/{}".format(summary["prefix"], summary["mask"]) _tag = summary.setdefault("tag", None) if _tag: cmd = "{} tag {}".format(cmd, _tag) _advertise = summary.setdefault("advertise", True) if not _advertise: cmd = "{} no-advertise".format(cmd) del_action = summary.setdefault("delete", False) if del_action: cmd = "no {}".format(cmd) config_data.append(cmd) result = create_common_configuration(tgen, router, config_data, "ospf", build, load_config) except InvalidCLIError: # Traceback errormsg = traceback.format_exc() logger.error(errormsg) return errormsg logger.debug("Exiting lib API: create_ospf_global()") return result
def verify_ospf_summary(tgen, topo, dut, input_dict): """ This API is to verify ospf routes by running show ip ospf interface command. Parameters ---------- * `tgen` : Topogen object * `topo` : topology descriptions * `dut`: device under test * `input_dict` : Input dict data, required when configuring from testcase Usage ----- input_dict = { "11.0.0.0/8": { "Summary address": "11.0.0.0/8", "Metric-type": "E2", "Metric": 20, "Tag": 0, "External route count": 5 } } result = verify_ospf_summary(tgen, topo, dut, input_dict) Returns ------- True or False (Error Message) """ logger.debug("Entering lib API: verify_ospf_summary()") result = False router = dut logger.info("Verifying OSPF summary on router %s:", router) if "ospf" not in topo["routers"][dut]: errormsg = "[DUT: {}] OSPF is not configured on the router.".format( router) return errormsg rnode = tgen.routers()[dut] show_ospf_json = run_frr_cmd(rnode, "show ip ospf summary detail json", isjson=True) # Verifying output dictionary show_ospf_json is empty or not if not bool(show_ospf_json): errormsg = "OSPF is not running" return errormsg # To find neighbor ip type ospf_summary_data = input_dict for ospf_summ, summ_data in ospf_summary_data.items(): if ospf_summ not in show_ospf_json: continue summary = ospf_summary_data[ospf_summ]["Summary address"] if summary in show_ospf_json: for summ in summ_data: if summ_data[summ] == show_ospf_json[summary][summ]: logger.info( "[DUT: %s] OSPF summary %s:%s is %s", router, summary, summ, summ_data[summ], ) result = True else: errormsg = ("[DUT: {}] OSPF summary {}:{} is %s, " "Expected is {}".format( router, summary, summ, show_ospf_json[summary][summ])) return errormsg logger.debug("Exiting API: verify_ospf_summary()") return result
def verify_ospf_database(tgen, topo, dut, input_dict): """ This API is to verify ospf lsa's by running show ip ospf database command. Parameters ---------- * `tgen` : Topogen object * `dut`: device under test * `input_dict` : Input dict data, required when configuring from testcase * `topo` : next to be verified Usage ----- input_dict = { "areas": { "0.0.0.0": { "Router Link States": { "100.1.1.0-100.1.1.0": { "LSID": "100.1.1.0", "Advertised router": "100.1.1.0", "LSA Age": 130, "Sequence Number": "80000006", "Checksum": "a703", "Router links": 3 } }, "Net Link States": { "10.0.0.2-100.1.1.1": { "LSID": "10.0.0.2", "Advertised router": "100.1.1.1", "LSA Age": 137, "Sequence Number": "80000001", "Checksum": "9583" } }, }, } } result = verify_ospf_database(tgen, topo, dut, input_dict) Returns ------- True or False (Error Message) """ result = False router = dut logger.debug("Entering lib API: verify_ospf_database()") if "ospf" not in topo["routers"][dut]: errormsg = "[DUT: {}] OSPF is not configured on the router.".format( dut) return errormsg rnode = tgen.routers()[dut] logger.info("Verifying OSPF interface on router %s:", dut) show_ospf_json = run_frr_cmd(rnode, "show ip ospf database json", isjson=True) # Verifying output dictionary show_ospf_json is empty or not if not bool(show_ospf_json): errormsg = "OSPF is not running" return errormsg # for inter and inter lsa's ospf_db_data = input_dict.setdefault("areas", None) ospf_external_lsa = input_dict.setdefault("AS External Link States", None) if ospf_db_data: for ospf_area, area_lsa in ospf_db_data.items(): if ospf_area in show_ospf_json["areas"]: if "Router Link States" in area_lsa: for lsa in area_lsa["Router Link States"]: if (lsa in show_ospf_json["areas"][ospf_area] ["Router Link States"]): logger.info( "[DUT: %s] OSPF LSDB area %s:Router " "LSA %s", router, ospf_area, lsa, ) result = True else: errormsg = ( "[DUT: {}] OSPF LSDB area {}: expected" " Router LSA is {}".format( router, ospf_area, lsa)) return errormsg if "Net Link States" in area_lsa: for lsa in area_lsa["Net Link States"]: if lsa in show_ospf_json["areas"][ospf_area][ "Net Link States"]: logger.info( "[DUT: %s] OSPF LSDB area %s:Network " "LSA %s", router, ospf_area, lsa, ) result = True else: errormsg = ( "[DUT: {}] OSPF LSDB area {}: expected" " Network LSA is {}".format( router, ospf_area, lsa)) return errormsg if "Summary Link States" in area_lsa: for lsa in area_lsa["Summary Link States"]: if (lsa in show_ospf_json["areas"][ospf_area] ["Summary Link States"]): logger.info( "[DUT: %s] OSPF LSDB area %s:Summary " "LSA %s", router, ospf_area, lsa, ) result = True else: errormsg = ( "[DUT: {}] OSPF LSDB area {}: expected" " Summary LSA is {}".format( router, ospf_area, lsa)) return errormsg if "ASBR-Summary Link States" in area_lsa: for lsa in area_lsa["ASBR-Summary Link States"]: if (lsa in show_ospf_json["areas"][ospf_area] ["ASBR-Summary Link States"]): logger.info( "[DUT: %s] OSPF LSDB area %s:ASBR Summary " "LSA %s", router, ospf_area, lsa, ) result = True else: errormsg = ( "[DUT: {}] OSPF LSDB area {}: expected" " ASBR Summary LSA is {}".format( router, ospf_area, lsa)) return errormsg if ospf_external_lsa: for ospf_ext_lsa, ext_lsa_data in ospf_external_lsa.items(): if ospf_ext_lsa in show_ospf_json["AS External Link States"]: logger.info("[DUT: %s] OSPF LSDB:External LSA %s", router, ospf_ext_lsa) result = True else: errormsg = ("[DUT: {}] OSPF LSDB : expected" " External LSA is {}".format(router, ospf_ext_lsa)) return errormsg logger.debug("Exiting API: verify_ospf_database()") return result
def verify_ospf_interface(tgen, topo, dut=None, lan=False, input_dict=None): """ This API is to verify ospf routes by running show ip ospf interface command. Parameters ---------- * `tgen` : Topogen object * `topo` : topology descriptions * `dut`: device under test * `lan`: if set to true this interface belongs to LAN. * `input_dict` : Input dict data, required when configuring from testcase Usage ----- input_dict= { 'r0': { 'links':{ 's1': { 'ospf':{ 'priority':98, 'timerDeadSecs': 4, 'area': '0.0.0.3', 'mcastMemberOspfDesignatedRouters': True, 'mcastMemberOspfAllRouters': True, 'ospfEnabled': True, } } } } } result = verify_ospf_interface(tgen, topo, dut=dut, input_dict=input_dict) Returns ------- True or False (Error Message) """ logger.debug("Entering lib API: verify_ospf_interface()") result = False for router, rnode in tgen.routers().items(): if "ospf" not in topo["routers"][router]: continue if dut is not None and dut != router: continue logger.info("Verifying OSPF interface on router %s:", router) show_ospf_json = run_frr_cmd(rnode, "show ip ospf interface json", isjson=True) # Verifying output dictionary show_ospf_json is empty or not if not bool(show_ospf_json): errormsg = "OSPF is not running" return errormsg # To find neighbor ip type ospf_intf_data = input_dict[router]["links"] for ospf_intf, intf_data in ospf_intf_data.items(): intf = topo["routers"][router]["links"][ospf_intf]["interface"] if intf in show_ospf_json["interfaces"]: for intf_attribute in intf_data["ospf"]: if (intf_data["ospf"][intf_attribute] == show_ospf_json[ "interfaces"][intf][intf_attribute]): logger.info( "[DUT: %s] OSPF interface %s: %s is %s", router, intf, intf_attribute, intf_data["ospf"][intf_attribute], ) else: errormsg = "[DUT: {}] OSPF interface {}: {} is {}, \ Expected is {}".format( router, intf, intf_attribute, intf_data["ospf"][intf_attribute], show_ospf_json["interfaces"][intf][intf_attribute], ) return errormsg result = True logger.debug("Exiting API: verify_ospf_interface()") return result
def __create_ospf_global(tgen, input_dict, router, build=False, load_config=True, ospf="ospf"): """ Helper API to create ospf global configuration. Parameters ---------- * `tgen` : Topogen object * `input_dict` : Input dict data, required when configuring from testcase * `router` : router to be configured. * `build` : Only for initial setup phase this is set as True. * `load_config` : Loading the config to router this is set as True. * `ospf` : either 'ospf' or 'ospf6' Usage ----- input_dict = { "routers": { "r1": { "links": { "r3": { "ipv6": "2013:13::1/64", "ospf6": { "hello_interval": 1, "dead_interval": 4, "network": "point-to-point" } } }, "ospf6": { "router_id": "1.1.1.1", "neighbors": { "r3": { "area": "1.1.1.1" } } } } } Returns ------- True or False """ result = False logger.debug("Entering lib API: __create_ospf_global()") try: ospf_data = input_dict[router][ospf] del_ospf_action = ospf_data.setdefault("delete", False) if del_ospf_action: config_data = ["no router {}".format(ospf)] result = create_common_configuration(tgen, router, config_data, ospf, build, load_config) return result config_data = [] cmd = "router {}".format(ospf) config_data.append(cmd) # router id router_id = ospf_data.setdefault("router_id", None) del_router_id = ospf_data.setdefault("del_router_id", False) if del_router_id: config_data.append("no {} router-id".format(ospf)) if router_id: config_data.append("{} router-id {}".format(ospf, router_id)) # redistribute command redistribute_data = ospf_data.setdefault("redistribute", {}) if redistribute_data: for redistribute in redistribute_data: if "redist_type" not in redistribute: logger.debug( "Router %s: 'redist_type' not present in " "input_dict", router) else: cmd = "redistribute {}".format(redistribute["redist_type"]) for red_type in redistribute_data: if "route_map" in red_type: cmd = cmd + " route-map {}".format( red_type["route_map"]) del_action = redistribute.setdefault("delete", False) if del_action: cmd = "no {}".format(cmd) config_data.append(cmd) # area information area_data = ospf_data.setdefault("area", {}) if area_data: for area in area_data: if "id" not in area: logger.debug( "Router %s: 'area id' not present in " "input_dict", router) else: cmd = "area {}".format(area["id"]) if "type" in area: cmd = cmd + " {}".format(area["type"]) del_action = area.setdefault("delete", False) if del_action: cmd = "no {}".format(cmd) config_data.append(cmd) # area interface information for ospf6d only if ospf == "ospf6": area_iface = ospf_data.setdefault("neighbors", {}) if area_iface: for neighbor in area_iface: if "area" in area_iface[neighbor]: iface = input_dict[router]["links"][neighbor][ "interface"] cmd = "interface {} area {}".format( iface, area_iface[neighbor]["area"]) if area_iface[neighbor].setdefault("delete", False): cmd = "no {}".format(cmd) config_data.append(cmd) # summary information summary_data = ospf_data.setdefault("summary-address", {}) if summary_data: for summary in summary_data: if "prefix" not in summary: logger.debug( "Router %s: 'summary-address' not present in " "input_dict", router, ) else: cmd = "summary {}/{}".format(summary["prefix"], summary["mask"]) _tag = summary.setdefault("tag", None) if _tag: cmd = "{} tag {}".format(cmd, _tag) _advertise = summary.setdefault("advertise", True) if not _advertise: cmd = "{} no-advertise".format(cmd) del_action = summary.setdefault("delete", False) if del_action: cmd = "no {}".format(cmd) config_data.append(cmd) result = create_common_configuration(tgen, router, config_data, ospf, build, load_config) except InvalidCLIError: # Traceback errormsg = traceback.format_exc() logger.error(errormsg) return errormsg logger.debug("Exiting lib API: create_ospf_global()") return result
def build_topo_from_json(tgen, topo): """ Reads configuration from JSON file. Adds routers, creates interface names dynamically and link routers as defined in JSON to create topology. Assigns IPs dynamically to all interfaces of each router. * `tgen`: Topogen object * `topo`: json file data """ ROUTER_LIST = sorted(topo['routers'].keys(), key=lambda x: int(re_search('\d+', x).group(0))) listRouters = ROUTER_LIST[:] for routerN in ROUTER_LIST: logger.info('Topo: Add router {}'.format(routerN)) tgen.add_router(routerN) listRouters.append(routerN) if 'ipv4base' in topo: ipv4Next = ipaddr.IPv4Address(topo['link_ip_start']['ipv4']) ipv4Step = 2**(32 - topo['link_ip_start']['v4mask']) if topo['link_ip_start']['v4mask'] < 32: ipv4Next += 1 if 'ipv6base' in topo: ipv6Next = ipaddr.IPv6Address(topo['link_ip_start']['ipv6']) ipv6Step = 2**(128 - topo['link_ip_start']['v6mask']) if topo['link_ip_start']['v6mask'] < 127: ipv6Next += 1 for router in listRouters: topo['routers'][router]['nextIfname'] = 0 while listRouters != []: curRouter = listRouters.pop(0) # Physical Interfaces if 'links' in topo['routers'][curRouter]: def link_sort(x): if x == 'lo': return 0 elif 'link' in x: return int(x.split('-link')[1]) else: return int(re_search('\d+', x).group(0)) for destRouterLink, data in sorted(topo['routers'][curRouter]['links']. \ iteritems(), key=lambda x: link_sort(x[0])): currRouter_lo_json = \ topo['routers'][curRouter]['links'][destRouterLink] # Loopback interfaces if 'type' in data and data['type'] == 'loopback': if 'ipv4' in currRouter_lo_json and \ currRouter_lo_json['ipv4'] == 'auto': currRouter_lo_json['ipv4'] = '{}{}.{}/{}'. \ format(topo['lo_prefix']['ipv4'], number_to_row(curRouter), \ number_to_column(curRouter), topo['lo_prefix']['v4mask']) if 'ipv6' in currRouter_lo_json and \ currRouter_lo_json['ipv6'] == 'auto': currRouter_lo_json['ipv6'] = '{}{}:{}/{}'. \ format(topo['lo_prefix']['ipv6'], number_to_row(curRouter), \ number_to_column(curRouter), topo['lo_prefix']['v6mask']) if "-" in destRouterLink: # Spliting and storing destRouterLink data in tempList tempList = destRouterLink.split("-") # destRouter destRouter = tempList.pop(0) # Current Router Link tempList.insert(0, curRouter) curRouterLink = "-".join(tempList) else: destRouter = destRouterLink curRouterLink = curRouter if destRouter in listRouters: currRouter_link_json = \ topo['routers'][curRouter]['links'][destRouterLink] destRouter_link_json = \ topo['routers'][destRouter]['links'][curRouterLink] # Assigning name to interfaces currRouter_link_json['interface'] = \ '{}-{}-eth{}'.format(curRouter, destRouter, topo['routers'] \ [curRouter]['nextIfname']) destRouter_link_json['interface'] = \ '{}-{}-eth{}'.format(destRouter, curRouter, topo['routers'] \ [destRouter]['nextIfname']) topo['routers'][curRouter]['nextIfname'] += 1 topo['routers'][destRouter]['nextIfname'] += 1 # Linking routers to each other as defined in JSON file tgen.gears[curRouter].add_link(tgen.gears[destRouter], topo['routers'][curRouter]['links'][destRouterLink] \ ['interface'], topo['routers'][destRouter]['links'] \ [curRouterLink]['interface']) # IPv4 if 'ipv4' in currRouter_link_json: if currRouter_link_json['ipv4'] == 'auto': currRouter_link_json['ipv4'] = \ '{}/{}'.format(ipv4Next, topo['link_ip_start'][ \ 'v4mask']) destRouter_link_json['ipv4'] = \ '{}/{}'.format(ipv4Next + 1, topo['link_ip_start'][ \ 'v4mask']) ipv4Next += ipv4Step # IPv6 if 'ipv6' in currRouter_link_json: if currRouter_link_json['ipv6'] == 'auto': currRouter_link_json['ipv6'] = \ '{}/{}'.format(ipv6Next, topo['link_ip_start'][ \ 'v6mask']) destRouter_link_json['ipv6'] = \ '{}/{}'.format(ipv6Next + 1, topo['link_ip_start'][ \ 'v6mask']) ipv6Next = ipaddr.IPv6Address( int(ipv6Next) + ipv6Step) logger.debug( "Generated link data for router: %s\n%s", curRouter, json_dumps(topo["routers"][curRouter]["links"], indent=4, sort_keys=True))
def build_topo_from_json(tgen, topo=None): """ Reads configuration from JSON file. Adds routers, creates interface names dynamically and link routers as defined in JSON to create topology. Assigns IPs dynamically to all interfaces of each router. * `tgen`: Topogen object * `topo`: json file data, or use tgen.json_topo if None """ if topo is None: topo = tgen.json_topo router_list = sorted(topo["routers"].keys(), key=lambda x: int(re_search(r"\d+", x).group(0))) switch_list = [] if "switches" in topo: switch_list = sorted(topo["switches"].keys(), key=lambda x: int(re_search(r"\d+", x).group(0))) listRouters = sorted(router_list[:]) listSwitches = sorted(switch_list[:]) listAllRouters = deepcopy(listRouters) dictSwitches = {} for routerN in router_list: logger.info("Topo: Add router {}".format(routerN)) tgen.add_router(routerN) for switchN in switch_list: logger.info("Topo: Add switch {}".format(switchN)) dictSwitches[switchN] = tgen.add_switch(switchN) if "ipv4base" in topo: ipv4Next = ipaddress.IPv4Address(topo["link_ip_start"]["ipv4"]) ipv4Step = 2**(32 - topo["link_ip_start"]["v4mask"]) if topo["link_ip_start"]["v4mask"] < 32: ipv4Next += 1 if "ipv6base" in topo: ipv6Next = ipaddress.IPv6Address(topo["link_ip_start"]["ipv6"]) ipv6Step = 2**(128 - topo["link_ip_start"]["v6mask"]) if topo["link_ip_start"]["v6mask"] < 127: ipv6Next += 1 for router in listRouters: topo["routers"][router]["nextIfname"] = 0 router_count = 0 while listRouters != []: curRouter = listRouters.pop(0) # Physical Interfaces if "links" in topo["routers"][curRouter]: for destRouterLink, data in sorted( topo["routers"][curRouter]["links"].items()): currRouter_lo_json = topo["routers"][curRouter]["links"][ destRouterLink] # Loopback interfaces if "type" in data and data["type"] == "loopback": router_count += 1 if ("ipv4" in currRouter_lo_json and currRouter_lo_json["ipv4"] == "auto"): currRouter_lo_json["ipv4"] = "{}{}.{}/{}".format( topo["lo_prefix"]["ipv4"], router_count, number_to_column(curRouter), topo["lo_prefix"]["v4mask"], ) if ("ipv6" in currRouter_lo_json and currRouter_lo_json["ipv6"] == "auto"): currRouter_lo_json["ipv6"] = "{}{}:{}/{}".format( topo["lo_prefix"]["ipv6"], router_count, number_to_column(curRouter), topo["lo_prefix"]["v6mask"], ) if "-" in destRouterLink: # Spliting and storing destRouterLink data in tempList tempList = destRouterLink.split("-") # destRouter destRouter = tempList.pop(0) # Current Router Link tempList.insert(0, curRouter) curRouterLink = "-".join(tempList) else: destRouter = destRouterLink curRouterLink = curRouter if destRouter in listRouters: currRouter_link_json = topo["routers"][curRouter]["links"][ destRouterLink] destRouter_link_json = topo["routers"][destRouter][ "links"][curRouterLink] # Assigning name to interfaces currRouter_link_json["interface"] = "{}-{}-eth{}".format( curRouter, destRouter, topo["routers"][curRouter]["nextIfname"]) destRouter_link_json["interface"] = "{}-{}-eth{}".format( destRouter, curRouter, topo["routers"][destRouter]["nextIfname"]) # add link interface destRouter_link_json[ "peer-interface"] = "{}-{}-eth{}".format( curRouter, destRouter, topo["routers"][curRouter]["nextIfname"]) currRouter_link_json[ "peer-interface"] = "{}-{}-eth{}".format( destRouter, curRouter, topo["routers"][destRouter]["nextIfname"]) topo["routers"][curRouter]["nextIfname"] += 1 topo["routers"][destRouter]["nextIfname"] += 1 # Linking routers to each other as defined in JSON file tgen.gears[curRouter].add_link( tgen.gears[destRouter], topo["routers"][curRouter]["links"][destRouterLink] ["interface"], topo["routers"][destRouter]["links"][curRouterLink] ["interface"], ) # IPv4 if "ipv4" in currRouter_link_json: if currRouter_link_json["ipv4"] == "auto": currRouter_link_json["ipv4"] = "{}/{}".format( ipv4Next, topo["link_ip_start"]["v4mask"]) destRouter_link_json["ipv4"] = "{}/{}".format( ipv4Next + 1, topo["link_ip_start"]["v4mask"]) ipv4Next += ipv4Step # IPv6 if "ipv6" in currRouter_link_json: if currRouter_link_json["ipv6"] == "auto": currRouter_link_json["ipv6"] = "{}/{}".format( ipv6Next, topo["link_ip_start"]["v6mask"]) destRouter_link_json["ipv6"] = "{}/{}".format( ipv6Next + 1, topo["link_ip_start"]["v6mask"]) ipv6Next = ipaddress.IPv6Address( int(ipv6Next) + ipv6Step) logger.debug( "Generated link data for router: %s\n%s", curRouter, json.dumps(topo["routers"][curRouter]["links"], indent=4, sort_keys=True), ) switch_count = 0 add_switch_to_topo = [] while listSwitches != []: curSwitch = listSwitches.pop(0) # Physical Interfaces if "links" in topo["switches"][curSwitch]: for destRouterLink, data in sorted( topo["switches"][curSwitch]["links"].items()): # Loopback interfaces if "dst_node" in data: destRouter = data["dst_node"] elif "-" in destRouterLink: # Spliting and storing destRouterLink data in tempList tempList = destRouterLink.split("-") # destRouter destRouter = tempList.pop(0) else: destRouter = destRouterLink if destRouter in listAllRouters: topo["routers"][destRouter]["links"][curSwitch] = deepcopy( topo["switches"][curSwitch]["links"][destRouterLink]) # Assigning name to interfaces topo["routers"][destRouter]["links"][curSwitch][ "interface"] = "{}-{}-eth{}".format( destRouter, curSwitch, topo["routers"][destRouter]["nextIfname"]) topo["switches"][curSwitch]["links"][destRouter][ "interface"] = "{}-{}-eth{}".format( curSwitch, destRouter, topo["routers"][destRouter]["nextIfname"]) topo["routers"][destRouter]["nextIfname"] += 1 # Add links dictSwitches[curSwitch].add_link( tgen.gears[destRouter], topo["switches"][curSwitch]["links"][destRouter] ["interface"], topo["routers"][destRouter]["links"][curSwitch] ["interface"], ) # IPv4 if "ipv4" in topo["routers"][destRouter]["links"][ curSwitch]: if (topo["routers"][destRouter]["links"][curSwitch] ["ipv4"] == "auto"): topo["routers"][destRouter]["links"][curSwitch][ "ipv4"] = "{}/{}".format( ipv4Next, topo["link_ip_start"]["v4mask"]) ipv4Next += 1 # IPv6 if "ipv6" in topo["routers"][destRouter]["links"][ curSwitch]: if (topo["routers"][destRouter]["links"][curSwitch] ["ipv6"] == "auto"): topo["routers"][destRouter]["links"][curSwitch][ "ipv6"] = "{}/{}".format( ipv6Next, topo["link_ip_start"]["v6mask"]) ipv6Next = ipaddress.IPv6Address( int(ipv6Next) + ipv6Step) logger.debug( "Generated link data for router: %s\n%s", curRouter, json.dumps(topo["routers"][curRouter]["links"], indent=4, sort_keys=True), )
def config_ospf_interface(tgen, topo, input_dict=None, build=False, load_config=True): """ API to configure ospf on router. Parameters ---------- * `tgen` : Topogen object * `topo` : json file data * `input_dict` : Input dict data, required when configuring from testcase * `build` : Only for initial setup phase this is set as True. * `load_config` : Loading the config to router this is set as True. Usage ----- r1_ospf_auth = { "r1": { "links": { "r2": { "ospf": { "authentication": "message-digest", "authentication-key": "ospf", "message-digest-key": "10" } } } } } result = config_ospf_interface(tgen, topo, r1_ospf_auth) Returns ------- True or False """ logger.debug("Enter lib config_ospf_interface") if not input_dict: input_dict = deepcopy(topo) else: input_dict = deepcopy(input_dict) for router in input_dict.keys(): config_data = [] for lnk in input_dict[router]["links"].keys(): if "ospf" not in input_dict[router]["links"][lnk]: logger.debug( "Router %s: ospf configs is not present in" "input_dict, passed input_dict", router, input_dict, ) continue ospf_data = input_dict[router]["links"][lnk]["ospf"] data_ospf_area = ospf_data.setdefault("area", None) data_ospf_auth = ospf_data.setdefault("authentication", None) data_ospf_dr_priority = ospf_data.setdefault("priority", None) data_ospf_cost = ospf_data.setdefault("cost", None) try: intf = topo["routers"][router]["links"][lnk]["interface"] except KeyError: intf = topo["switches"][router]["links"][lnk]["interface"] # interface cmd = "interface {}".format(intf) config_data.append(cmd) # interface area config if data_ospf_area: cmd = "ip ospf area {}".format(data_ospf_area) config_data.append(cmd) # interface ospf auth if data_ospf_auth: if data_ospf_auth == "null": cmd = "ip ospf authentication null" elif data_ospf_auth == "message-digest": cmd = "ip ospf authentication message-digest" else: cmd = "ip ospf authentication" if "del_action" in ospf_data: cmd = "no {}".format(cmd) config_data.append(cmd) if "message-digest-key" in ospf_data: cmd = "ip ospf message-digest-key {} md5 {}".format( ospf_data["message-digest-key"], ospf_data["authentication-key"]) if "del_action" in ospf_data: cmd = "no {}".format(cmd) config_data.append(cmd) if ("authentication-key" in ospf_data and "message-digest-key" not in ospf_data): cmd = "ip ospf authentication-key {}".format( ospf_data["authentication-key"]) if "del_action" in ospf_data: cmd = "no {}".format(cmd) config_data.append(cmd) # interface ospf dr priority if data_ospf_dr_priority in ospf_data: cmd = "ip ospf priority {}".format(ospf_data["priority"]) if "del_action" in ospf_data: cmd = "no {}".format(cmd) config_data.append(cmd) # interface ospf cost if data_ospf_cost in ospf_data: cmd = "ip ospf cost {}".format(ospf_data["cost"]) if "del_action" in ospf_data: cmd = "no {}".format(cmd) config_data.append(cmd) if build: return config_data else: result = create_common_configuration(tgen, router, config_data, "interface_config", build=build) logger.debug("Exiting lib API: create_igmp_config()") return result
def build_topo_from_json(tgen, topo): """ Reads configuration from JSON file. Adds routers, creates interface names dynamically and link routers as defined in JSON to create topology. Assigns IPs dynamically to all interfaces of each router. * `tgen`: Topogen object * `topo`: json file data """ ROUTER_LIST = sorted(topo["routers"].keys(), key=lambda x: int(re_search("\d+", x).group(0))) SWITCH_LIST = [] if "switches" in topo: SWITCH_LIST = sorted(topo["switches"].keys(), key=lambda x: int(re_search("\d+", x).group(0))) listRouters = ROUTER_LIST[:] listSwitches = SWITCH_LIST[:] listAllRouters = deepcopy(listRouters) dictSwitches = {} for routerN in ROUTER_LIST: logger.info("Topo: Add router {}".format(routerN)) tgen.add_router(routerN) listRouters.append(routerN) for switchN in SWITCH_LIST: logger.info("Topo: Add switch {}".format(switchN)) dictSwitches[switchN] = tgen.add_switch(switchN) listSwitches.append(switchN) if "ipv4base" in topo: ipv4Next = ipaddress.IPv4Address(topo["link_ip_start"]["ipv4"]) ipv4Step = 2**(32 - topo["link_ip_start"]["v4mask"]) if topo["link_ip_start"]["v4mask"] < 32: ipv4Next += 1 if "ipv6base" in topo: ipv6Next = ipaddress.IPv6Address(topo["link_ip_start"]["ipv6"]) ipv6Step = 2**(128 - topo["link_ip_start"]["v6mask"]) if topo["link_ip_start"]["v6mask"] < 127: ipv6Next += 1 for router in listRouters: topo["routers"][router]["nextIfname"] = 0 while listRouters != []: curRouter = listRouters.pop(0) # Physical Interfaces if "links" in topo["routers"][curRouter]: def link_sort(x): if x == "lo": return 0 elif "link" in x: return int(x.split("-link")[1]) else: return int(re_search("\d+", x).group(0)) for destRouterLink, data in sorted( topo["routers"][curRouter]["links"].items(), key=lambda x: link_sort(x[0]), ): currRouter_lo_json = topo["routers"][curRouter]["links"][ destRouterLink] # Loopback interfaces if "type" in data and data["type"] == "loopback": if ("ipv4" in currRouter_lo_json and currRouter_lo_json["ipv4"] == "auto"): currRouter_lo_json["ipv4"] = "{}{}.{}/{}".format( topo["lo_prefix"]["ipv4"], number_to_row(curRouter), number_to_column(curRouter), topo["lo_prefix"]["v4mask"], ) if ("ipv6" in currRouter_lo_json and currRouter_lo_json["ipv6"] == "auto"): currRouter_lo_json["ipv6"] = "{}{}:{}/{}".format( topo["lo_prefix"]["ipv6"], number_to_row(curRouter), number_to_column(curRouter), topo["lo_prefix"]["v6mask"], ) if "-" in destRouterLink: # Spliting and storing destRouterLink data in tempList tempList = destRouterLink.split("-") # destRouter destRouter = tempList.pop(0) # Current Router Link tempList.insert(0, curRouter) curRouterLink = "-".join(tempList) else: destRouter = destRouterLink curRouterLink = curRouter if destRouter in listRouters: currRouter_link_json = topo["routers"][curRouter]["links"][ destRouterLink] destRouter_link_json = topo["routers"][destRouter][ "links"][curRouterLink] # Assigning name to interfaces currRouter_link_json["interface"] = "{}-{}-eth{}".format( curRouter, destRouter, topo["routers"][curRouter]["nextIfname"]) destRouter_link_json["interface"] = "{}-{}-eth{}".format( destRouter, curRouter, topo["routers"][destRouter]["nextIfname"]) topo["routers"][curRouter]["nextIfname"] += 1 topo["routers"][destRouter]["nextIfname"] += 1 # Linking routers to each other as defined in JSON file tgen.gears[curRouter].add_link( tgen.gears[destRouter], topo["routers"][curRouter]["links"][destRouterLink] ["interface"], topo["routers"][destRouter]["links"][curRouterLink] ["interface"], ) # IPv4 if "ipv4" in currRouter_link_json: if currRouter_link_json["ipv4"] == "auto": currRouter_link_json["ipv4"] = "{}/{}".format( ipv4Next, topo["link_ip_start"]["v4mask"]) destRouter_link_json["ipv4"] = "{}/{}".format( ipv4Next + 1, topo["link_ip_start"]["v4mask"]) ipv4Next += ipv4Step # IPv6 if "ipv6" in currRouter_link_json: if currRouter_link_json["ipv6"] == "auto": currRouter_link_json["ipv6"] = "{}/{}".format( ipv6Next, topo["link_ip_start"]["v6mask"]) destRouter_link_json["ipv6"] = "{}/{}".format( ipv6Next + 1, topo["link_ip_start"]["v6mask"]) ipv6Next = ipaddress.IPv6Address( int(ipv6Next) + ipv6Step) logger.debug( "Generated link data for router: %s\n%s", curRouter, json_dumps(topo["routers"][curRouter]["links"], indent=4, sort_keys=True), ) switch_count = 0 add_switch_to_topo = [] while listSwitches != []: curSwitch = listSwitches.pop(0) # Physical Interfaces if "links" in topo['switches'][curSwitch]: for destRouterLink, data in sorted( topo['switches'][curSwitch]['links'].items()): # Loopback interfaces if "dst_node" in data: destRouter = data['dst_node'] elif "-" in destRouterLink: # Spliting and storing destRouterLink data in tempList tempList = destRouterLink.split("-") # destRouter destRouter = tempList.pop(0) else: destRouter = destRouterLink if destRouter in listAllRouters: topo['routers'][destRouter]['links'][curSwitch] = \ deepcopy(topo['switches'][curSwitch]['links'][destRouterLink]) # Assigning name to interfaces topo['routers'][destRouter]['links'][curSwitch]['interface'] = \ '{}-{}-eth{}'.format(destRouter, curSwitch, topo['routers'] \ [destRouter]['nextIfname']) topo['switches'][curSwitch]['links'][destRouter]['interface'] = \ '{}-{}-eth{}'.format(curSwitch, destRouter, topo['routers'] \ [destRouter]['nextIfname']) topo['routers'][destRouter]['nextIfname'] += 1 # Add links dictSwitches[curSwitch].add_link(tgen.gears[destRouter], \ topo['switches'][curSwitch]['links'][destRouter]['interface'], topo['routers'][destRouter]['links'][curSwitch]['interface'], ) # IPv4 if 'ipv4' in topo['routers'][destRouter]['links'][ curSwitch]: if topo['routers'][destRouter]['links'][curSwitch][ 'ipv4'] == 'auto': topo['routers'][destRouter]['links'][curSwitch]['ipv4'] = \ '{}/{}'.format(ipv4Next, topo['link_ip_start'][ \ 'v4mask']) ipv4Next += 1 # IPv6 if 'ipv6' in topo['routers'][destRouter]['links'][ curSwitch]: if topo['routers'][destRouter]['links'][curSwitch][ 'ipv6'] == 'auto': topo['routers'][destRouter]['links'][curSwitch]['ipv6'] = \ '{}/{}'.format(ipv6Next, topo['link_ip_start'][ \ 'v6mask']) ipv6Next = ipaddr.IPv6Address( int(ipv6Next) + ipv6Step) logger.debug( "Generated link data for router: %s\n%s", curRouter, json_dumps(topo["routers"][curRouter]["links"], indent=4, sort_keys=True), )
def verify_stale_routes_list(tgen, addr_type, dut, input_dict): """ This API is use verify Stale routes on refering the network with next hop value Parameters ---------- * `tgen`: topogen object * `dut`: input dut router name * `addr_type` : ip type ipv4/ipv6 * `input_dict` : input dict, has details of static routes Usage ----- dut = 'r1' input_dict = { "r3": { "static_routes": [ { "network": [NETWORK1_1[addr_type]], "no_of_ip": 2, "vrf": "RED" } ] } } result = verify_stale_routes_list(tgen, addr_type, dut, input_dict) Returns ------- errormsg(str) or True """ logger.debug("Entering lib API: verify_stale_routes_list()") router_list = tgen.routers() additional_nexthops_in_required_nhs = [] list1 = [] list2 = [] found_hops = [] for routerInput in input_dict.keys(): for router, rnode in router_list.items(): if router != dut: continue # Verifying RIB routes command = "show bgp" # Static routes sleep(2) logger.info('Checking router {} BGP RIB:'.format(dut)) if 'static_routes' in input_dict[routerInput]: static_routes = input_dict[routerInput]["static_routes"] for static_route in static_routes: found_routes = [] missing_routes = [] st_found = False nh_found = False vrf = static_route.setdefault("vrf", None) community = static_route.setdefault("community", None) largeCommunity = \ static_route.setdefault("largeCommunity", None) if vrf: cmd = "{} vrf {} {}".\ format(command, vrf, addr_type) if community: cmd = "{} community {}".\ format(cmd, community) if largeCommunity: cmd = "{} large-community {}".\ format(cmd, largeCommunity) else: cmd = "{} {}".\ format(command, addr_type) cmd = "{} json".format(cmd) rib_routes_json = run_frr_cmd(rnode, cmd, isjson=True) # Verifying output dictionary rib_routes_json is not empty if bool(rib_routes_json) == False: errormsg = "[DUT: {}]: No route found in rib of router". \ format(router) return errormsg elif "warning" in rib_routes_json: errormsg = "[DUT: {}]: {}". \ format(router, rib_routes_json["warning"]) return errormsg network = static_route["network"] if "no_of_ip" in static_route: no_of_ip = static_route["no_of_ip"] else: no_of_ip = 1 # Generating IPs for verification ip_list = generate_ips(network, no_of_ip) for st_rt in ip_list: st_rt = str(ipaddress.ip_network(st_rt)) _addr_type = validate_ip_address(st_rt) if _addr_type != addr_type: continue if st_rt in rib_routes_json["routes"]: st_found = True found_routes.append(st_rt) for mnh in range(0, len(rib_routes_json[ 'routes'][st_rt])): found_hops.append([rib_r[ "ip"] for rib_r in rib_routes_json[ 'routes'][st_rt][ mnh]["nexthops"]]) return found_hops else: return 'error msg - no hops found'