def get_ospf_router_id(device, vrf, address_family, instance): """ Returns router-id for ospf Args: device ('obj'): device to run on vrf ('str'): vrf name address_family ('str'): address family instance ('str'): instance value Returns: str: single router id None: if empty Raises: None """ try: out = device.parse("show ip protocols") except (SchemaEmptyParserError): return None reqs = R([ 'protocols', 'ospf', 'vrf', vrf, 'address_family', address_family, 'instance', instance, 'router_id', '(?P<router_id>.*)' ]) found = find([out], reqs, filter_=False, all_keys=True) if not found: return None key_list = GroupKeys.group_keys(reqs=reqs.args, ret_num={}, source=found, all_keys=True) return key_list.pop()['router_id']
def _verify_finds_root_interface(ops, requirements, **kwargs): '''Triggers in this file specified verify method. This is to check only 1 interface change to root after change the priority to highest ''' log.info(banner("check only One interface change to root for each vlan")) ret = find([ops], R(requirements), filter_=False) if not ret: raise Exception( 'There is no Root interfaces after changing the priority') group_keys = GroupKeys.group_keys(reqs=[requirements], ret_num={}, source=ret) vlan_dict = {} for item in group_keys: vlan_dict.setdefault(item['vlan'], {}).setdefault(item['interface'], {}) for vlan in vlan_dict: if len(vlan_dict[vlan].keys()) != 1: raise Exception( 'Expect ONE Root interface for vlan {v} but got {i}'.format( v=vlan, i=list(vlan_dict[vlan].keys()))) else: log.info('Find ONE ROOT interface {i} for vlan {v}'.format(i=list( vlan_dict[vlan].keys())[0], v=vlan))
def verify_isis_neighbor_in_state(device, interfaces, state='up', max_time=60, check_interval=20): ''' Verify ISIS neighbor state Args: device (`obj`): Device object interfaces (`list`): ISIS neighbor interfaces state (`str`): Expected state max_time (`int`): Max time check_interval (`int`): Check interval Returns: result (`bool`): Verified result ''' cmd = 'show isis neighbors' timeout = Timeout(max_time, check_interval) while timeout.iterate(): try: out = device.parse(cmd) except Exception as e: log.error("Failed to parse '{}':\n{}".format(cmd, e)) timeout.sleep() continue result = True intfs = '|'.join(interfaces) reqs = R([ 'isis', '(.*)', 'vrf', '(.*)', 'interfaces', '(?P<interface>' + intfs + ')', 'neighbors', '(?P<neighbor>.*)', 'state', '(?P<state>.*)' ]) found = find([out], reqs, filter_=False, all_keys=True) if found and len(found) == len(interfaces): keys = GroupKeys.group_keys(reqs=reqs.args, ret_num={}, source=found, all_keys=True) else: log.error( "Failed to find required ISIS neighbor interface: {}".format( interfaces)) timeout.sleep() continue for intf_dict in keys: log.info("Interface {} status is {}, expected value is {}".format( intf_dict['interface'], intf_dict['state'].lower(), state)) if intf_dict['state'].lower() != state.lower(): result = False if result: return True timeout.sleep() return False
def find_value(self, ops, values, r, path, **kwargs): # get the values for group_keys for key in values: sub_key = list(values[key].keys())[0] if sub_key: values = values[key] else: values = {key: values[key][sub_key]} keys = [] # find required info from ops obj for r_obj in r: ret = find([ops], r_obj, filter_=False) if not ret: # Could not find anything satisfying return {} ret = GroupKeys.group_keys(ret_num=values, source=ret, reqs=path) # To Modify to support more than 1 conf object key = {} for v in values: if v not in ret[0]: raise Exception("'{v} could not be found in previously " "learnt keys".format(v=v)) key[v] = ret[0][v] keys.append(key) # TODO: To modify for multiple conf object return keys return keys
def get_hardware_slot_state(device, slot): """ Get slot state Args: device (`obj`): Device object slot (`str`): Slot Returns: state (`str`): Slot state None Raises: None """ log.info("Getting slot {} state on device {}".format(slot, device.name)) try: out = device.parse("show platform") except SchemaEmptyParserError: return None reqs = R([ "slot", str(slot), "(?P<type>.*)", "(?P<name>.*)", "state", "(?P<state>.*)", ]) found = find([out], reqs, filter_=False, all_keys=True) if found: keys = GroupKeys.group_keys(reqs=reqs.args, ret_num={}, source=found, all_keys=True) return keys[0]["state"] else: return None
def get_mpls_forwarding_table_key_value_pairs(device, ip): """ Gets all key:value pairs from the mpls forwarding table Args: device (`obj`): Device object ip (`str`): IP address Returns: result (`bool`): Verified result Raises: N/A """ try: out = device.parse('show mpls forwarding-table {}'.format(ip)) except SchemaEmptyParserError: log.info("Device output is empty.") return {} reqs = R([ 'vrf', '(.*)', 'local_label', '(?P<local_label>.*)', 'outgoing_label_or_vc', '(?P<outgoing_label>.*)', 'prefix_or_tunnel_id', '(?P<prefix>.*)', 'outgoing_interface', '(?P<interface>.*)', 'next_hop', '(?P<next_hop>.*)' ]) found = find([out], reqs, filter_=False, all_keys=True) if found: return GroupKeys.group_keys(reqs=reqs.args, ret_num={}, source=found, all_keys=True) return {}
def verify_cef_labels(device, route, expected_first_label, expected_last_label=None, max_time=90, check_interval=10): """ Verify first and last label on route Args: device ('obj'): Device object route ('str'): Route address expected_first_label ('str'): Expected first label expected_last_label ('str'): Expected last label max_time ('int'): Max time in seconds checking output check_interval ('int'): Interval in seconds of each checking Return: True/False Raises: None """ reqs = R([ 'vrf', '(.*)', 'address_family', '(.*)', 'prefix', '(.*{}.*)'.format(route), 'nexthop', '(.*)', 'outgoing_interface', '(.*)', '(?P<val>.*)' ]) timeout = Timeout(max_time, check_interval) while timeout.iterate(): result = True out = None try: out = device.parse('show ip cef {}'.format(route)) except SchemaEmptyParserError: out = None if not out: result = False log.info( 'Could not get information about show ip cef {}'.format(route)) timeout.sleep() continue found = find([out], reqs, filter_=False, all_keys=True) if found: keys = GroupKeys.group_keys(reqs=reqs.args, ret_num={}, source=found) for item in keys: first_label = item.get('val', {}).get('outgoing_label', None) if first_label and str(expected_first_label) not in str( first_label): result = False if expected_last_label: sid = item.get('val', {}).get('sid', None) if str(expected_last_label) != str(sid): result = False if result: return True timeout.sleep() return False
def verify_mpls_forwarding_table_has_prefix_in_subnet_range(device, subnet, max_time=30, check_interval=10): """ Verifies local label for entries with a prefix inside subnet Args: device ('obj'): Device to use subnet ('str'): Subnet to verify inside max_time ('int'): Max time to check check_interval ('int'): How often to check returns: True/False raises: N/A """ log.info('Checking atleast one entry has a prefix in subnet {subnet} range' .format(subnet=subnet)) try: subnet = IPNetwork(subnet) except Exception: log.info('Bad subnet provided') return False timeout = Timeout(max_time, check_interval) while timeout.iterate(): try: out = device.parse('show mpls forwarding-table') except SchemaEmptyParserError: log.info('Parser output is empty') timeout.sleep() continue reqs = R(['vrf', '(.*)', 'local_label', '(?P<local_label>.*)', 'outgoing_label_or_vc', '(.*)', 'prefix_or_tunnel_id', '(?P<prefix>.*)', 'outgoing_interface', '(.*)', ]) found = find([out], reqs, filter_=False, all_keys=True) if found: keys = GroupKeys.group_keys(reqs=reqs.args, ret_num={}, source=found, all_keys=True) for key in keys: try: prefix = IPNetwork(key['prefix']) except Exception: continue if prefix in subnet: return True return False
def get_ospf_interfaces_with_neighbor(ops, neighbor): '''Get OSPF interfaces by given neighbor''' # find the neighbors on uut connected to the helper device reqs = [[ 'info', 'vrf', '(?P<vrf>.*)', 'address_family', '(?P<af>.*)', 'instance', '(?P<instance>.*)', 'areas', '(?P<areas>.*)', 'interfaces', '(?P<interfaces>.*)', 'neighbors', neighbor, '(?P<neighbors_info>.*)' ]] rs_uut = [R(i) for i in reqs] ret_uut = find([ops], *rs_uut, filter_=False) return GroupKeys.group_keys(ret_num={}, source=ret_uut, reqs=reqs)
def get_hardware_sfp_slot_dict(device, sfp_descr=".*"): """ Get SFP slot dict Args: device (`obj`): Device object sfp_descr (`str`): SFP descr Returns: sfp_slot_dict (`dict`): SFP slot dict example: { '1/1/6':{'slot': '1', 'subslot': '1 transceiver 6', 'lc': 'ASR1000-SIP10', 'pid': 'SFP-GE-S', 'descr': 'GE SX'}} Raises: None """ log.info("Getting inventory on {}".format(device.name)) keys = [] try: out = device.parse("show inventory") except SchemaEmptyParserError: return keys reqs = R([ "slot", "(?P<slot>.*)", "lc", "(?P<lc>.*)", "subslot", "(?P<subslot>.*)", "(?P<pid>.*)", "descr", "(?P<descr>" + sfp_descr + ")", ]) found = find([out], reqs, filter_=False, all_keys=True) if found: keys = GroupKeys.group_keys(reqs=reqs.args, ret_num={}, source=found, all_keys=True) sfp_slot_dict = {} p = re.compile(r"(?<=\d)( +\w+ )(?=\d)") for sfp in keys: slot = sfp["slot"] + "/" + re.sub(p, "/", sfp["subslot"]) sfp_slot_dict.update({slot: sfp}) return sfp_slot_dict
def get_hardware_rp_slot(device, state="standby", max_time=90, check_interval=30): """ Get RP slot from device Args: device (`obj`): Device object state (`str`): RP state max_time (`int`): max wait time check_interval (`int`): check interval Returns: result (`str`): RP slot in required state None Raises: None """ log.info("Finding {st} RP on device {dev}".format(st=state, dev=device.name)) reqs = R([ "slot", "(?P<slot>.*)", "(?P<type>.*)", "(?P<name>.*)", "state", "(?P<state>.*)", ]) timeout = Timeout(max_time, check_interval) while timeout.iterate(): try: out = device.parse("show platform") except SchemaEmptyParserError: timeout.sleep() continue found = find([out], reqs, filter_=False, all_keys=True) keys = GroupKeys.group_keys(reqs=reqs.args, ret_num={}, source=found, all_keys=True) for key in keys: if "R" in key["slot"] and state in key["state"]: log.info( "Found {st} RP {key[name]} on slot {key[slot]}".format( st=state, key=key)) return key["slot"] timeout.sleep() return None
def is_type_10_opaque_area_link_states_originated(device): """ Verifies if Type 10 opaque area link states are originated from command 'show ip ospf database opaque-area self-originate' Args: device (`obj`): Device to be executed command Raises: None Returns True False """ try: out = device.parse('show ip ospf database opaque-area self-originate') except (SchemaEmptyParserError): return False reqs = R( [ 'vrf', '(?P<vrf>.*)', 'address_family', '(?P<af>.*)', 'instance', '(?P<instance>.*)', 'areas', '(?P<areas>.*)', 'database', 'lsa_types', '(?P<lsa_types>.*)', 'lsa_type', '(?P<lsa_type>.*)' ] ) found = find([out], reqs, filter_=False, all_keys=True) if not found: return False key_list = GroupKeys.group_keys( reqs=reqs.args, ret_num={}, source=found, all_keys=True ) return key_list.pop()['lsa_type'] == 10
def get_ntp_outgoing_interface(device, system_peer, vrf=None): """ Get the interface which is used to communicate with NTP system peer Args: device (`obj`): Device object system_peer (`str`): System peer ip Returns: interface (`str`): Interface name """ if vrf: cmd = "show ip cef vrf {vrf} {ip}".format(vrf=vrf, ip=system_peer) else: cmd = "show ip cef {ip}".format(ip=system_peer) try: out = device.parse(cmd) except Exception as e: log.error("Failed to parse cmd {cmd}: {e}".format(cmd=cmd, e=e)) return None reqs = R([ "vrf", "(?P<vrf>.*)", "address_family", "(?P<af>.*)", "prefix", "(?P<ip>.*)", "nexthop", "(?P<nexthop>.*)", "outgoing_interface", "(?P<intf>.*)", "(?:.*)", ]) found = find([out], reqs, filter_=False, all_keys=True) if found: keys = GroupKeys.group_keys(reqs=reqs.args, ret_num={}, source=found, all_keys=True) else: log.error("No interface was found") return None interface = keys[0]["intf"] return interface
def get_interface_ip_address(device, interface, address_family): """ Get interface ip_address from device Args: interface('str'): Interface to get address device ('obj'): Device object address_family ('str'): Address family Returns: None ip_address ('str'): If has multiple addresses will return the first one. Raises: None """ if address_family not in ["ipv4", "ipv6", "inet", "inet6"]: log.info( 'Must provide one of the following address families: "ipv4", "ipv6", "inet", "inet6"' ) return if address_family == "ipv4": address_family = "inet" elif address_family == "ipv6": address_family = "inet6" try: out = device.parse( 'show interfaces {interface} terse'.format(interface=interface)) except SchemaEmptyParserError: return reqs = R([ interface, 'protocol', address_family, '(.*)', 'local', '(?P<local>.*)' ]) found = find([out], reqs, filter_=False, all_keys=True) if found: keys = GroupKeys.group_keys(reqs=reqs.args, ret_num={}, source=found, all_keys=True) return keys[0]['local']
def get_bgp_last_reset_list(device): """ Get last reset list from - show ip bgp all neighbors Args: device(`obj`): Device object Returns: key_list(`list`): result list table(`obj`): table to display Raises: SchemaEmptyParserError """ try: out = device.parse("show ip bgp all neighbors") except SchemaEmptyParserError: log.info("Command has not returned any results") return [], None reqs = R([ "vrf", "(?P<vrf>.*)", "neighbor", "(?P<ip>.*)", "bgp_session_transport", "connection", "last_reset", "(?P<reset>.*)", ]) found = find([out], reqs, filter_=False, all_keys=True) if not found: return [], None key_list = GroupKeys.group_keys(reqs=reqs.args, ret_num={}, source=found, all_keys=True) # display in table table = PrettyTable() table.field_names = ["Vrf", "Neighbor", "Reset Count"] for key in key_list: table.add_row([key["vrf"], key["ip"], key["reset"]]) return key_list, table
def get_interface_qlimit_bytes(device, interface): """ Get interface qlimit in bytes Args: device (`obj`): Device object interface (`str`): Interface name Returns: None qlimit_bytes (`int`): Interface qlimit_bytes Raises: None """ try: out = device.parse( "show platform hardware qfp active infrastructure bqs " "queue output default interface {interface}".format( interface=interface ) ) except SchemaEmptyParserError: return reqs = R( [ interface, "index", "(?P<index>.*)", "software_control_info", "qlimit_bytes", "(?P<qlimit>.*)", ] ) found = find([out], reqs, filter_=False, all_keys=True) if found: keys = GroupKeys.group_keys( reqs=reqs.args, ret_num={}, source=found, all_keys=True ) return keys[0]["qlimit"] else: return
def get_ntp_outgoing_interface(device, system_peer): """ Get the interface which is used to communicate with NTP system peer Args: device (`obj`): Device object system_peer (`str`): System peer ip Returns: interface (`str`): Interface name """ try: out = device.parse("show ip cef {}".format(system_peer)) except SchemaEmptyParserError as e: log.error("Command 'show ip cef {}' " "did not return any results".format(system_peer)) return None reqs = R([ "vrf", "(?P<vrf>.*)", "address_family", "(?P<af>.*)", "prefix", "(?P<ip>.*)", "nexthop", "(?P<nexthop>.*)", "outgoing_interface", "(?P<intf>.*)", "(?:.*)", ]) found = find([out], reqs, filter_=False, all_keys=True) if found: keys = GroupKeys.group_keys(reqs=reqs.args, ret_num={}, source=found, all_keys=True) else: log.error("No interface was found") return None interface = keys[0]["intf"] return interface
def get_ready_rps_lcs(ops): '''Get ready RPs/LCs from platform ops''' reqs = [ [ 'slot', 'rp', '(?P<rp>.*)', 'state', '(?P<state>ok, active|ok, standby|Ready)' ], # ['slot', 'lc', '(?P<lc>.*)','state', 'ok'], [ 'slot', 'oc', '(?P<oc>.*)', 'state', '(?P<oc_state>ok, active|ok, standby|ok|ps, fail)' ] ] rs = [R(i) for i in reqs] ret = find([ops], *rs, filter_=False, all_keys=True) return GroupKeys.group_keys(ret_num={}, source=ret, reqs=reqs, all_keys=True)
def get_routes(device, protocol_codes=None): """ Retrieve all routes in specific protocal - show ip route Args: device ('obj'): Device object protocol_codes ('str'): Protocol codes If not provided, it will get all protocal routes Returns: routes ('list'): List of routes """ routes = [] cmd = 'show ip route' if protocol_codes is None: protocol_codes = '(.*)' try: out = device.parse(cmd) except Exception as e: log.error("Failed to parse '{}':\n{}".format(cmd, e)) return routes reqs = R([ 'vrf', '(.*)', 'address_family', '(.*)', 'routes', '(?P<route>.*)', 'source_protocol_codes', protocol_codes ]) found = find([out], reqs, filter_=False, all_keys=True) if found: keys = GroupKeys.group_keys(reqs=reqs.args, ret_num={}, source=found, all_keys=True) for route in keys: routes.append(route['route']) else: log.error("Could not find any route with protocol_codes '{}'".\ format(protocol_codes)) return routes
def get_ospf_router_id(device, vrf='(.*)', address_family='(.*)', instance='(.*)'): """ Get ospf router-id - show ip protocols Args: device ('obj'): device to run on vrf ('str'): vrf name address_family ('str'): address family instance ('str'): instance value Returns: str: single router id None: if empty Raises: None """ log.info("Getting OSPF router-id") router_id = None cmd = 'show ip protocols' try: out = device.parse(cmd) except Exception as e: log.error("Failed to parse '{}':\n{}".format(cmd, e)) return router_id reqs = R(['protocols', 'ospf', 'vrf', vrf, 'address_family', address_family, 'instance', instance, 'router_id', '(?P<router_id>.*)']) found = find([out], reqs, filter_=False, all_keys=True) if found: key_list = GroupKeys.group_keys(reqs=reqs.args, ret_num={}, source=found, all_keys=True) return key_list.pop()['router_id'] else: log.error("No ospf router id was found") return router_id
def get_topologies(self, tmp, instance, intf): ret_dict = {} mapping = {'L1': 'level-1', 'L2': 'level-2', 'L1L2': 'level-all'} adj_dict = tmp.get('adjacencies', {}).get(instance, {}) reqs = R(['(?P<nbr_id>.*)', '(?P<lvl>.*)', 'interface', intf]) found = find([adj_dict], reqs, filter_=False, all_keys=True) if found: keys = GroupKeys.group_keys(reqs=reqs.args, ret_num={}, source=found, all_keys=True) nbr_id = keys[0]['nbr_id'] lvl = keys[0]['lvl'] target = adj_dict[nbr_id][lvl] snpa = target.get('snpa', '') state = target.get('state', '') uptime = target.get('uptime', '') holdtime = target.get('holdtime') priority = target.get('priority') circuit_id = target.get('circuit_id', '') topology = target.get('topology', []) for topo in topology: topo_dict = ret_dict.setdefault(topo, {}) topo_dict.update({'name': topo}) sub_dict = topo_dict.setdefault('adjacencies', {}).setdefault(nbr_id, {})\ .setdefault('neighbor_snpa', {}).setdefault(snpa, {})\ .setdefault('level', {}).setdefault(mapping.get(lvl), {}) sub_dict.update({ 'neighbor_extended_circuit_id': circuit_id, 'hold_timer': holdtime, 'neighbor_priority': priority, 'lastuptime': uptime, 'state': state.capitalize() }) return ret_dict
def get_hardware_all_fans_speed(device): """ Get fan speed for all fans Args: device (`obj`): Device object Returns: fans (`list`): Fans info Raises: None """ fans = [] p = re.compile(r"Fan +Speed +(?P<speed>.*)%") try: out = device.parse("show environment | include Fan") except (SchemaEmptyParserError, SubCommandFailure) as e: return fans reqs = R([ "slot", "(?P<slot>.*)", "sensor", "(?P<sensor>.*)", "state", "(?P<state>.*)", ]) found = find([out], reqs, filter_=False, all_keys=True) if found: fans = GroupKeys.group_keys(reqs=reqs.args, ret_num={}, source=found, all_keys=True) for fan in fans: fan["speed"] = int(p.search(fan["state"]).groupdict()["speed"]) log.info("Found fan on {fan[slot]} with Speed {fan[speed]}%".format( fan=fan)) return fans
def get_ospf_interface_affinity_bits(device, interface): """ Get affinity bits value of an ospf interface Args: device ('obj'): Device object interface ('str'): Interface name Returns: bits ('str'): Affinity bits """ log.info( "Getting Affinity bits of interface {intf}".format(intf=interface)) cmd = 'show ip ospf interface {intf}'.format(intf=interface) try: out = device.parse(cmd) except Exception as e: log.error("Failed to parse '{cmd}': {e}".format(cmd=cmd, e=e)) return None reqs = R([ 'vrf', '(.*)', 'address_family', '(.*)', 'instance', '(.*)', 'areas', '(.*)', 'interfaces', '(.*)', 'teapp', '(.*)', 'affinity', 'bits', '(?P<bits>.*)' ]) found = find([out], reqs, filter_=False, all_keys=True) if found: keys = GroupKeys.group_keys(reqs=reqs.args, ret_num={}, source=found, all_keys=True) bits = keys[0]['bits'] log.info("Get affinity bits '{bits}' on {intf}".format(bits=bits, intf=interface)) return bits else: log.error( "Failed to get affinity bits on {intf}".format(intf=interface)) return None
def get_ospf_router_id(ops): '''Get OSPF router-id from ospf ops''' # Create R object to contain the required interface ops attributes path # find router_id rs_rd_helper = R([ 'info', 'vrf', '(?P<vrf>.*)', 'address_family', '(?P<af>.*)', 'instance', '(?P<instance>.*)', 'router_id', '(?P<router_id>.*)' ]) # use find object to find required interfaces and ip address # returned value is like # [('10.2.2.2', ['info', 'vrf', 'default', 'address_family', 'ipv4', 'instance', '1', 'router_id'])] ret_rd_helper = find([ops], rs_rd_helper, filter_=False) if not ret_rd_helper: return None # call function to get dict of {key: value} ret = GroupKeys.group_keys(ret_num={}, source=ret_rd_helper, reqs=rs_rd_helper.args) # return the values return ret
def get_ospf_process_id_on_interface(device, interface): """ Get ospf interface process id Args: device ('obj'): device object interface ('str'): interface name Returns: ospf_id ('str'): ospf process id """ log.info( "Getting ospf interface {intf} process id from device {dev}".format( intf=interface, dev=device.name)) cmd = 'show ip ospf interface {intf}'.format(intf=interface) try: out = device.parse(cmd) except Exception as e: log.error("Failed to parse '{cmd}': {e}".format(cmd=cmd, e=e)) return None reqs = R([ 'vrf', '(?P<vrf>.*)', 'address_family', '(?P<af>.*)', 'instance', '(?P<instance>.*)', 'areas', '(?P<area>.*)', 'interfaces', interface, 'name', '(?P<name>.*)' ]) found = find([out], reqs, filter_=False, all_keys=True) if found: keys = GroupKeys.group_keys(reqs=reqs.args, ret_num={}, source=found, all_keys=True) return keys[0]['instance'] else: return None
def get_ospf_interfaces(ops): '''Get OSPF interfaces by given neighbor''' # Create R object to contain the required interface ops attributes path # find any ospf 'up' interface reqs = [[ 'info', 'vrf', '(?P<vrf>.*)', 'address_family', '(?P<af>.*)', 'instance', '(?P<instance>.*)', 'areas', '(?P<areas>.*)', 'interfaces', '(?P<interfaces>.*)', 'state', '(?P<state>(dr|bdr|dr_other|point_to_point))' ], [ 'info', 'vrf', '(?P<vrf>.*)', 'address_family', '(?P<af>.*)', 'instance', '(?P<instance>.*)', 'areas', '(?P<areas>.*)', 'interfaces', '(?P<interfaces>.*)', 'cost', '(?P<cost>.*)' ]] rs = [R(i) for i in reqs] ret = find([ops], *rs, filter_=False, all_keys=True) return GroupKeys.group_keys(ret_num={}, source=ret, reqs=reqs, all_keys=True)
def learn_routing(device, address_family, paths, ops_container=[], ret_container={}): '''Dynamic learn routing information by using the paths that specified, and store the data into dictionary. Args: Mandatory: device (`obj`): Device object address_family (`str`) : Value of address_family, could be ipv4/ipv6 paths (`list`) : Ops paths to look for the desired routing values Optional: ops_container (`list`): Container to store the learned ops, in case multiple learning ret_container (`dict`) : Container to store the learned routes to let parent update on it Returns: None. Instead of returned values, it will store the learned information in the container to let parent update on it. The container values for ret_container looks like below 10.9.1.0/24: { 10.9.1.2: {'R5': {route: 10.9.1.0/24, intf: Vlan99, vrf: default}}, 10.9.1.1: {'R1': {route: 10.9.1.0/24, intf: Vlan99, vrf: default}}, } 10.9.1.0/24: { 10.9.1.2: {'R5': {route: 10.9.1.0/24, intf: GigabitEthernet1/0/4, vrf: test2}}, 10.9.1.1: {'R5': {route: 10.9.1.0/24, intf: Vlan99, vrf: test1}}, } Raises: Exception: Routing ops cannot sucessfully learned ''' log.info(banner("learn routing info on device {}".format(device.name))) # get ip and vrf routing_ops = ops.routing.iosxe.routing.Routing(device) # learn the routing ops try: routing_ops.learn() except Exception as e: raise Exception('cannot learn routing ops: {}'.format(e)) ops_container[device.name] = routing_ops log.info(banner("Get routing groups from device {}".format(device.name))) rs = [R(p) for p in paths] ret = find([routing_ops], *rs, filter_=False, all_keys=True) if ret: groups = GroupKeys.group_keys(reqs=paths, ret_num={}, source=ret, all_keys=True) if groups: # learn interfaces ip if 'ipv4' in address_family: ip_out = ShowIpInterfaceBrief(device).parse() else: ip_out = ShowIpv6Interface(device).parse() for keys in groups: # find interface ip if 'ipv4' in address_family: intf_r = [ R([ 'interface', keys['intf'], 'ip_address', '(?P<ip>.*)' ]) ] else: intf_r = [ R([ keys['intf'], 'ipv6', '(?P<ip_addr>.*)', 'ip', '(?P<ip>.*)' ]), R([ keys['intf'], 'ipv6', '(?P<ip_addr>.*)', NotExists('origin') ]) ] ip = find([ip_out], *intf_r, filter_=False) if ip: ip = ip[0][0] ret_container.setdefault(keys['route'], {}).\ setdefault(ip, {}).update({device.name: keys})
def verify_mpls_forwarding_table_outgoing_label(device, ip, expected_label="", same_as_local=False, max_time=30, check_interval=10): """ Verify local and remote binding labels for ipv4 Args: device (`obj`): Device object ip (`str`): IP address expected_label (`str`): Expected label same_as_local (`bool`): True if verify outgoing labels with local label False if verify outgoing labels with expected label max_time (`int`): Max time, default: 30 check_interval (`int`): Check interval, default: 10 Returns: result (`bool`): Verified result """ timeout = Timeout(max_time, check_interval) while timeout.iterate(): result = True try: out = device.parse('show mpls forwarding-table {}'.format(ip)) except SchemaEmptyParserError: log.info("Device output is empty.") result = False timeout.sleep() continue reqs = R([ 'vrf', '(.*)', 'local_label', '(?P<local_label>.*)', 'outgoing_label_or_vc', '(?P<outgoing_label>.*)', 'prefix_or_tunnel_id', '(?P<prefix>.*)', 'outgoing_interface', '(?P<interface>.*)', 'next_hop', '(?P<next_hop>.*)' ]) found = find([out], reqs, filter_=False, all_keys=True) if found: keys = GroupKeys.group_keys(reqs=reqs.args, ret_num={}, source=found, all_keys=True) for route in keys: if same_as_local: log.info("Interface {route[interface]} has local label " "'{route[local_label]}' and outgoing label " "'{route[outgoing_label]}'".format(route=route)) if str(route['outgoing_label']) != str( route['local_label']): result = False else: log.info( "Interface {route[interface]} outgoing label is " "'{route[outgoing_label]}', exepected to have label " "'{expected}'".format(route=route, expected=expected_label)) if str(route['outgoing_label']) != str(expected_label): result = False else: log.error("Could not find any mpls route for {}".format(ip)) result = False if result is True: return result timeout.sleep() return result
def verify_ospf_interface_cost(device, interface, expected_cost, cost_type='ospf', instance=None, area=None, max_time=60, check_interval=15): """ Verifies ospf cost on interface Args: device ('obj'): device to use interface ('str'): Interface to use cost_type ('str'): Cost type configured expected_cost ('int'): Expected configured cost instance ('str'): Instance to use area ('str'): Area to use max_time ('int'): Maximum time to keep checking check_interval ('int'): How often to check Returns: True/False Raises: N/A """ timeout = Timeout(max_time, check_interval) if 'ospf' in cost_type.lower(): while timeout.iterate(): try: out = device.parse( 'show ospf interface {interface} detail'.format( interface=interface)) except SchemaEmptyParserError: log.info('Parser is empty') timeout.sleep() continue reqs = R([ 'instance', '{}'.format(instance if instance else 'master'), 'areas', '{}'.format(area if area else '(.*)'), 'interfaces', interface, 'cost', '(?P<cost>.*)' ]) found = find([out], reqs, filter_=False, all_keys=True) if found: keys = GroupKeys.group_keys(reqs=reqs.args, ret_num={}, source=found, all_keys=True) if 'cost' in keys[0] and int(expected_cost) == int( keys[0]['cost']): return True timeout.sleep() return False elif 'te' in cost_type.lower(): while timeout.iterate(): try: out = device.parse('show interfaces {interface} terse'.format( interface=interface)) except SchemaEmptyParserError: log.info('Parser is empty') timeout.sleep() continue reqs = R([ interface, 'protocol', 'inet', '(.*)', 'local', '(?P<local>.*)' ]) found = find([out], reqs, filter_=False, all_keys=True) if found: keys = GroupKeys.group_keys(reqs=reqs.args, ret_num={}, source=found, all_keys=True) local_address = keys[0].get('local') try: out = device.parse('show ted database extensive') except SchemaEmptyParserError: log.info('Parser is empty') timeout.sleep() continue reqs = R([ 'node', '(.*)', 'protocol', '(.*)', 'to', '(.*)', 'local', local_address.split('/')[0], 'remote', '(.*)', 'metric', '(?P<metric>.*)' ]) found = find([out], reqs, filter_=False, all_keys=True) if found: keys = GroupKeys.group_keys(reqs=reqs.args, ret_num={}, source=found, all_keys=True) if 'metric' in keys[0] and int(expected_cost) == int( keys[0]['metric']): return True timeout.sleep() return False log.info('This api does not support cost type {}'.format(cost_type))
def learn_ops(self, device, abstract, steps, timeout, **kwargs): '''Learn Ops object and populate the keys''' # Holds Ops object self._ops_ret = {} # Holds Conf object self._conf_ret = {} # How the keys learnt - Those are the regex values self.keys = [] # TODO: Remove this self.req_list_flag = {} self.timeout = timeout # Loop over each requirement all_requirements = [] # store hardcode values if provided # pop out the provided_values to leave the ops path only provided_values = self.requirements.pop('provided_values') \ if 'provided_values' in self.requirements else {} for base, requirements in self.requirements.items(): name = base.split('.')[-1] # Instantiate the abstracted base object # Create attrgetter for abstract which would find the right base # object abstracted_base = attrgetter(base)(abstract) is_ops = issubclass(abstracted_base, OpsBase) type_ = 'Ops' if is_ops else 'Conf' with steps.start("Learning '{n}' {t}".format(n=name, t=type_)) as step: # Modify requirements so everything is learn if 'requirements' in requirements: req = requirements.copy() del req['requirements'] try: o = self._learn_base(device, name, step, abstracted_base, is_ops, base, req) except StopIteration as e: step.failed("Could not learn '{n}'".format(n=name), from_exception=e) if o: # process ops requirements if is_ops: self._ops_ret[base] = o # process conf requirements else: self._conf_ret[base] = o with steps.start('Verifying requirements') as step: # print out the log to show which are the hardcode values if provided_values: log.info('Updating the requirements with the information provided: {}' .format(provided_values)) reqs = requirements['requirements'] if not any(isinstance(el, list) for el in reqs[0]): reqs_list = [reqs] self.req_list_flag[base] = False else: reqs_list = reqs self.req_list_flag[base] = True ret_reqs = [] for reqs in reqs_list: # Needed for [[ ]] requirements if isinstance(reqs[0], list): all_requirements.extend(reqs) req_msg = '\n'.join([str(re) for re in reqs]) log.info("Requirements pattern to " "verify:\n{r}\n\n".format(r=req_msg)) # To populate the path first with hardcode values # to only store the attributes that contains the hardcoded values reqs = self._populate_path(reqs, device, keys=[provided_values]) # Populate the keys into R object # Useful if want to learn from previously learnt requirements reqs = self._populate_path(reqs, device, keys=self.keys, device_only=True) all_keys = requirements.get('all_keys', False) # Check if the requirements is [Operator('info')] # if so, it will check if the ops output is empty # TODO - this particular case will be enhanced in find expect_empty = False for i in reqs: if len(i) < 2: attr = i[0] if isinstance(i[0], str) else i[0].value if not hasattr(o, attr): expect_empty = True step.passed('The ops attribute {} is empty as expected' .format(attr)) rs = [R(requirement) for requirement in reqs] if not isinstance(o, list): o = [o] for item in sorted(o)[:1]: # exclude the managemnet interface from the selected # interfaces find_obj = self.exclude_management_interface(device, requirements, item) ret = find([find_obj], *rs, filter_=False, all_keys=all_keys) if not ret: # Requirements are not satisfied err_msg = '\n'.join([str(re) for re in reqs]) step.skipped("Following requirements were not satisfied " "for '{n}':\n{e}".format(n=name, e=err_msg)) group_keys = GroupKeys.group_keys( reqs=reqs, ret_num={}, source=ret, all_keys=all_keys) temp_keys = [] for key in group_keys: temp_keys.extend(GroupKeys.merge_all_keys(self.keys, [], key)) self.keys = temp_keys.copy() with steps.start('Merge requirements') as step: # update the self.keys with hardcode values for following needs if not self.keys: self.keys.append(provided_values) else: for item in self.keys: item.update(provided_values) self.keys = GroupKeys.max_amount(self.keys, self.num_values) req = self._populate_path(all_requirements, device, keys=self.keys) if not req: step.skipped('Could not merge the requirements ' 'together\n{k}'.format(k=self.keys)) ret_reqs.extend(req) # Requirements were satisfied msg = '\n'.join([str(re) for re in ret_reqs]) log.info("\n\nFound the following requirements:\n{e}".format(n=name, e=msg)) # Search if any regex remaining. for item in ret_reqs: for elem in item: # expect_empty useage reason: # When requirements is [Operator('info')] # the ops output is empty, the full path won't be populated # it could contain ('(?P<'), in this case pass the step # TODO - will remove expect_empty when find is enhanced if isinstance(elem, str) and elem.startswith('(?P<') and not expect_empty: step.skipped('Could not satisfy all requirement\n{k}' .format(k=self.keys)) return self._ops_ret