def get_ospf_segment_routing_lb_srlb_base_and_range(device, process_id, router_id): """ Gets 'SRLB Base' and 'SRLB Range' values Args: device ('obj'): Device to use process_id ('str'): Ospf process_id router_id ('str'): Which router_id entry to use Returns: if can filter down to one result: (('int'): SRLB Base value, ('dict'): Output from parser) Raises: None """ try: output = device.parse("show ip ospf segment-routing local-block") except SchemaEmptyParserError: return None, None reqs_base = R([ "instance", process_id, "areas", "(?P<area>.*)", "router_id", router_id, "srlb_base", "(?P<srlb_base>.*)", ]) found_base = find(output, reqs_base, filter_=False, all_keys=True) if not found_base: return None, None reqs_range = R([ "instance", process_id, "areas", "(?P<area>.*)", "router_id", router_id, "srlb_range", "(?P<srlb_range>.*)", ]) found_range = find(output, reqs_range, filter_=False, all_keys=True) if not found_range: return None, None return found_base[0][0], found_range[0][0]
class TriggerClearIPv6NeighborVrfAll(TriggerClear): # Argument with dynamic value for verify callable # As verify callable can be re-used in multiple triggers # with different variable names. This dictionary is used to map # dynamic argument name to actual script argument name # <expected argument_name for callable>: <script argument name> mapping_extra_args = { 'ops': 'ops_obj', 'relation': 'sign', 'threshold_time': 'compare_time' } mapping = Mapping(requirements={'ops.nd.nd.Nd':{ 'requirements':[\ ['info', 'interfaces', '(?P<interface>.*)' ,'neighbors', '(?P<neighbor>.*)','(.*)']], 'kwargs': {'attributes': ['info[interfaces][(.*)][neighbors][(.*)][age]']}, 'exclude': exclude}}, num_values={'interface': 'all', 'neighbor': 'all'}) # Verify callable definition verify = CompareUptime.compare_uptime ops_lib = 'ops.nd.nd.Nd' # Arguments for verify callable # Must be dictionary, key is argument from verify callable, # value should be the value to pass into the callable verify_func_args = { 'r_obj': [ R([ 'info', 'interfaces', '(?P<interface>.*)', 'neighbors', '(?P<neighbor>.*)', 'age', '(.*)' ]) ] }
class TriggerClearCounters(TriggerClear): mapping_extra_args = { 'ops': 'ops_obj', 'relation': 'sign', 'threshold_time': 'compare_time' } # Learn interface ops for all interfaces on device mapping = Mapping(\ requirements={'ops.interface.interface.Interface':{ 'requirements':[\ ['info', '(?P<name>[\w\-\/]+$)', 'enabled', '(?P<enabled>.*)']], 'exclude': interface_exclude}}, num_values={'name': 'all'}) # Verify callable definition verify = CompareUptime.compare_uptime ops_lib = 'ops.interface.interface.Interface' # Arguments for verify callable verify_func_args = { 'r_obj': [R(['info', '(?P<name>.*)', 'counters', 'last_clear', '(.*)'])] }
def _get_static_rp_device(vrf): '''Get device which has static_rp from learned LTS in common_setup Can be controlled via sections parameters which is provided by the triggers/verification datafile Args: Mandatory: vrf (`str`) : vrf information that want the feaure on Returns: AETEST results Raises: None ''' # check if uut has bsr_rp feature if not vrf: vrf = '(?P<vrf>^(?!default)\w+$)' reqs = [[ 'ops.pim.pim.Pim', 'vrf', vrf, 'address_family', '(.*)', 'rp', 'static_rp', '(?P<static_rp>.*)' ]] rs = [R(r) for r in reqs] return rs
def get_hardware_slot_state(device, slot): """ Get slot state Args: device (`obj`): Device object slot (`str`): Slot Returns: state (`str`): Slot state None Raises: None """ log.info("Getting slot {} state on device {}".format(slot, device.name)) try: out = device.parse("show platform") except SchemaEmptyParserError: return None reqs = R([ "slot", str(slot), "(?P<type>.*)", "(?P<name>.*)", "state", "(?P<state>.*)", ]) found = find([out], reqs, filter_=False, all_keys=True) if found: keys = GroupKeys.group_keys(reqs=reqs.args, ret_num={}, source=found, all_keys=True) return keys[0]["state"] else: return None
def _get_auto_rp_interface_device(vrf): '''Get device which has auto-rp 'up' interface from learned LTS in common_setup Can be controlled via sections parameters which is provided by the triggers/verification datafile Args: Mandatory: vrf (`str`) : vrf information that want the feaure on Returns: AETEST results Raises: None ''' # check if uut has auto_rp feature if not vrf: vrf = '(?P<vrf>^(?!default)\w+$)' reqs = [[ 'conf.pim.Pim', '(?P<dev>.*)', 'device_attr', '(?P<dev>.*)', '_vrf_attr', vrf, '_address_family_attr', 'ipv4', 'send_rp_announce_intf', '(?P<intf>.*)' ]] rs = [R(r) for r in reqs] return rs
def _verify_finds_root_interface(ops, requirements, **kwargs): '''Triggers in this file specified verify method. This is to check only 1 interface change to root after change the priority to highest ''' log.info(banner("check only One interface change to root for each vlan")) ret = find([ops], R(requirements), filter_=False) if not ret: raise Exception( 'There is no Root interfaces after changing the priority') group_keys = GroupKeys.group_keys(reqs=[requirements], ret_num={}, source=ret) vlan_dict = {} for item in group_keys: vlan_dict.setdefault(item['vlan'], {}).setdefault(item['interface'], {}) for vlan in vlan_dict: if len(vlan_dict[vlan].keys()) != 1: raise Exception( 'Expect ONE Root interface for vlan {v} but got {i}'.format( v=vlan, i=list(vlan_dict[vlan].keys()))) else: log.info('Find ONE ROOT interface {i} for vlan {v}'.format(i=list( vlan_dict[vlan].keys())[0], v=vlan))
class TriggerClearBgpVpnv4UnicastVrfAll(TriggerClearBgp): mapping = Mapping(requirements={'ops.bgp.bgp.Bgp': { 'requirements': [ \ [['info', 'instance', '(?P<instance>.*)', 'vrf', '(?P<vrf>.*)', 'neighbor', '(?P<neighbor>.*)', 'address_family','(?P<af>(vpnv4 unicast).*)', 'session_state', 'established']], [['routes_per_peer', 'instance', 'default',\ 'vrf', '(?P<vrf>.*)','neighbor','(?P<neighbor>.*)',\ 'address_family', '(?P<af>(vpnv4 unicast).*)','(.*)']]], 'all_keys': True, 'kwargs': {'attributes': ['routes_per_peer','info']}, 'exclude': exclude + ['msg_sent','msg_rcvd','up_down','tbl_ver']}}, num_values={'vrf': 'all','neighbor': 'all', 'af': 'all'}) verify_func_args = { 'r_obj': [ R([ 'routes_per_peer', 'instance', 'default', 'vrf', '(?P<vrf>.*)', 'neighbor', '(?P<neighbor>.*)', 'address_family', '(?P<af>vpnv4 unicast.*)', 'up_down', '(.*)' ]) ] }
def _get_msdp_device(vrf): '''Get device which has msdp from learned LTS in common_setup Can be controlled via sections parameters which is provided by the triggers/verification datafile Args: Mandatory: vrf (`str`) : vrf information that want the feaure on Returns: AETEST results Raises: None ''' # check if uut has msdp feature if not vrf: vrf = '(?P<vrf>.*)' reqs = [[ 'ops.msdp.msdp.Msdp', '(?P<dev>.*)', 'info', 'vrf', vrf, 'peer', '(?P<peer>.*)', 'session_state', 'established' ]] rs = [R(r) for r in reqs] return rs
def get_ospf_router_id(device, vrf, address_family, instance): """ Returns router-id for ospf Args: device ('obj'): device to run on vrf ('str'): vrf name address_family ('str'): address family instance ('str'): instance value Returns: str: single router id None: if empty Raises: None """ try: out = device.parse("show ip protocols") except (SchemaEmptyParserError): return None reqs = R([ 'protocols', 'ospf', 'vrf', vrf, 'address_family', address_family, 'instance', instance, 'router_id', '(?P<router_id>.*)' ]) found = find([out], reqs, filter_=False, all_keys=True) if not found: return None key_list = GroupKeys.group_keys(reqs=reqs.args, ret_num={}, source=found, all_keys=True) return key_list.pop()['router_id']
def _modify_ops_snapshot(self, original, current, path): # First does path exists in original, except the value r = R(path[:-1] + ['(.*)']) ret = find([original], r, filter_=False) if not ret: raise Exception("'{p}' does not exists on original " "snapshot".format(p=path)) self._modify_value(current, path[:-1], ret[0][0])
def check_issu_state(cls, device, slot, expected_state, attempt=3, sleep=5): ''' Check if the ISSU state is in the expected state Args: device (`obj`): Device Object. expected_state (`str`): Acceptable ISSU states are: - loadversion - runversion - acceptversion - commitversion slot (`str`): Slot for which we need to check ISSU state attempt (`int`): Attempt numbers when learn the feature. sleep (`int`): The sleep time. Returns: None Raises: AssertionError: 'expected_state' is not as expected Exception: Cannot parse 'show issu state detail' output No output form 'show issu state detail' Unable to execute 'show issu state detail' Example: >>> check_issu_state(device=uut, slot='R1', expected_state='commitversion') ''' assert expected_state in [ 'loadversion', 'runversion', 'acceptversion', 'commitversion' ] lookup = Lookup.from_device(device) for i in range(attempt): try: issu_dict = lookup.parser.show_issu.\ ShowIssuStateDetail(device=device).parse() rs = R(['slot', slot, 'last_operation', expected_state]) ret = find([issu_dict], rs, filter_=False, all_keys=True) if ret: break except SchemaEmptyParserError as e: raise Exception( "No output or unable to parse 'show issu state " "detail'", from_exception=e) except Exception as e: raise Exception("Unable to execute 'show issu state detail'", from_exception=e) time.sleep(sleep) else: raise AssertionError("FAIL: ISSU state not '{}' - this is " "unexpected".format(expected_state))
def _modify_ops_snapshot(original, current, path): # First does path exists in original, except the value r = R(path[:-1] + ['(.*)']) ret = find([original], r, filter_=False) if not ret: raise ValueError( "'{p}' does not exist on original snapshot " "as per the original trigger requirement".format(p=path)) _modify_value(current, path[:-1], ret[0][0])
def check_parsed_key(self, key, output, step): keys = str_to_list(key) with step.start("Verify that '{k}' is in the " "output".format(k=key)) as step: reqs = R(list(keys)) found = find([output], reqs, filter_=False, all_keys=True) if not found: step.failed("Could not find '{k}'".format(k=key)) else: log.info("Found {f}".format(f=found))
def check_feature_status(cls, device, expect, feature_name, abstract, attempt=3, sleep=5): ''' Check if the feature is disabled/enabled Args: device (`obj`): Device Object. abstract (`obj`): Abstract Lookup Object. expect (`str`): Feature status. Only accept 'disabled' and 'enabled' feature_name (`str`): Feature namne. sleep_time (`int`): The sleep time. attempt (`int`): Attempt numbers when learn the feature. Returns: None Raises: AssertionError: 'expect' is not 'disabled' or 'enabled' Or the status is not same as expect value SyntaxError: Cannot parse show feature output Example: >>> check_feature_status(device=uut, expect='disabled', feature_name='bgp',abstract=abstract) ''' assert expect in ['disabled', 'enabled'] for i in range(attempt): try: ret = abstract.parser.show_feature.ShowFeature(device=device) ret = ret.parse() except Exception as e: raise SyntaxError("Cannot parse command 'show " "feature'") from e ret = find([ret], R([ 'feature', feature_name, 'instance', '(.*)', 'state', expect ]), filter_=False) if ret: break time.sleep(sleep) else: raise AssertionError('{n} is failed to {s}'.format(n=feature_name, s=expect))
def get_ospf_interfaces_with_neighbor(ops, neighbor): '''Get OSPF interfaces by given neighbor''' # find the neighbors on uut connected to the helper device reqs = [[ 'info', 'vrf', '(?P<vrf>.*)', 'address_family', '(?P<af>.*)', 'instance', '(?P<instance>.*)', 'areas', '(?P<areas>.*)', 'interfaces', '(?P<interfaces>.*)', 'neighbors', neighbor, '(?P<neighbors_info>.*)' ]] rs_uut = [R(i) for i in reqs] ret_uut = find([ops], *rs_uut, filter_=False) return GroupKeys.group_keys(ret_num={}, source=ret_uut, reqs=reqs)
def get_hardware_rp_slot(device, state="standby", max_time=90, check_interval=30): """ Get RP slot from device Args: device (`obj`): Device object state (`str`): RP state max_time (`int`): max wait time check_interval (`int`): check interval Returns: result (`str`): RP slot in required state None Raises: None """ log.info("Finding {st} RP on device {dev}".format(st=state, dev=device.name)) reqs = R([ "slot", "(?P<slot>.*)", "(?P<type>.*)", "(?P<name>.*)", "state", "(?P<state>.*)", ]) timeout = Timeout(max_time, check_interval) while timeout.iterate(): try: out = device.parse("show platform") except SchemaEmptyParserError: timeout.sleep() continue found = find([out], reqs, filter_=False, all_keys=True) keys = GroupKeys.group_keys(reqs=reqs.args, ret_num={}, source=found, all_keys=True) for key in keys: if "R" in key["slot"] and state in key["state"]: log.info( "Found {st} RP {key[name]} on slot {key[slot]}".format( st=state, key=key)) return key["slot"] timeout.sleep() return None
def get_hardware_sfp_slot_dict(device, sfp_descr=".*"): """ Get SFP slot dict Args: device (`obj`): Device object sfp_descr (`str`): SFP descr Returns: sfp_slot_dict (`dict`): SFP slot dict example: { '1/1/6':{'slot': '1', 'subslot': '1 transceiver 6', 'lc': 'ASR1000-SIP10', 'pid': 'SFP-GE-S', 'descr': 'GE SX'}} Raises: None """ log.info("Getting inventory on {}".format(device.name)) keys = [] try: out = device.parse("show inventory") except SchemaEmptyParserError: return keys reqs = R([ "slot", "(?P<slot>.*)", "lc", "(?P<lc>.*)", "subslot", "(?P<subslot>.*)", "(?P<pid>.*)", "descr", "(?P<descr>" + sfp_descr + ")", ]) found = find([out], reqs, filter_=False, all_keys=True) if found: keys = GroupKeys.group_keys(reqs=reqs.args, ret_num={}, source=found, all_keys=True) sfp_slot_dict = {} p = re.compile(r"(?<=\d)( +\w+ )(?=\d)") for sfp in keys: slot = sfp["slot"] + "/" + re.sub(p, "/", sfp["subslot"]) sfp_slot_dict.update({slot: sfp}) return sfp_slot_dict
def _verify_find(self, ops, requirements, missing=False, all_keys=False, **kwargs): '''Verify the ops response to the requirements''' if not requirements: return rs = [R(requirement) for requirement in requirements] ret = find([ops], *rs, filter_=False, all_keys=all_keys) # If missing is True, then we expect it to be missing, aka ret empty if not ret and not missing: raise Exception("'{req}' does not exists in " "'{o}'".format(req=requirements, o=ops)) if ret and missing: # It should be missing raise Exception("'{req}' exists in " "'{o}' and it should not " "exists".format(req=requirements, o=ops))
def _verify(self, ops, pre_time, uut, **kwargs): # See if it can be verified self.mapping._verify_same(ops=ops, **kwargs) # At the moment only support one for ops_obj but could be enhanced self.ops_obj = ops # If no verify attribute if not hasattr(self, 'verify'): return # in case of inherit, introduce local var # for holding class glob vars loc_args = self.verify_func_args.copy() # populate r_object path reqs = [] if 'r_obj' in loc_args: for r in loc_args['r_obj']: reqs.extend(self.mapping._path_population(r.args, uut)) # store the populate path back to self.verify_func_args as R object if reqs: loc_args['r_obj'] = [] for req in reqs: loc_args['r_obj'].append(R(req)) # diff the pre and post time to compare the uptime self.compare_time = int(time.time() - pre_time) # update the mapping extra_args with variables try: for key, value in self.mapping_extra_args.items(): loc_args[key] = getattr(self, value) except Exception as e: self.errored('Failed to get key {k} value {v}'.format(k=key, v=value), from_exception=e) # compare the attributes that may changed as expected if 'r_obj' in loc_args: back_up = loc_args['r_obj'].copy() for r in back_up: loc_args['r_obj'] = [r] self.verify(**loc_args)
def is_type_10_opaque_area_link_states_originated(device): """ Verifies if Type 10 opaque area link states are originated from command 'show ip ospf database opaque-area self-originate' Args: device (`obj`): Device to be executed command Raises: None Returns True False """ try: out = device.parse('show ip ospf database opaque-area self-originate') except (SchemaEmptyParserError): return False reqs = R( [ 'vrf', '(?P<vrf>.*)', 'address_family', '(?P<af>.*)', 'instance', '(?P<instance>.*)', 'areas', '(?P<areas>.*)', 'database', 'lsa_types', '(?P<lsa_types>.*)', 'lsa_type', '(?P<lsa_type>.*)' ] ) found = find([out], reqs, filter_=False, all_keys=True) if not found: return False key_list = GroupKeys.group_keys( reqs=reqs.args, ret_num={}, source=found, all_keys=True ) return key_list.pop()['lsa_type'] == 10
def verify_clear_callable(ops, uut, pre_time, verify_func, mapping, **kwargs): # If no verify attribute if not kwargs.get('verify_func_args', None): return # in case of inherit, introduce local var # for holding class glob vars verify_func_args = kwargs['verify_func_args'].copy() # populate r_object path reqs = [] if 'r_obj' in verify_func_args: reqs.extend(mapping._path_population(verify_func_args['r_obj'], uut)) # store the populate path back to self.verify_func_args as R object extra_args = {} if reqs: verify_func_args['r_obj'] = [] for req in reqs: verify_func_args['r_obj'].append(R(req)) # diff the pre and post time to compare the uptime # + 1 is fuzzy time that may diff from routers timing and script compare_time = int(time.time() - pre_time + 1) # update the mapping extra_args with variables for key, value in verify_func_args.items(): if isinstance(value, str): # get the value from the inital ops to compare if value.startswith('(?P'): value = mapping._populate_path([[value]], ops.device, mapping.keys) verify_func_args[key] = value[0][0] else: if locals().get(value) or locals().get(value) == 0: verify_func_args[key] = locals().get(value) else: verify_func_args[key] = value # compare the attributes that may changed as expected if 'r_obj' in verify_func_args: back_up = verify_func_args['r_obj'].copy() for r in back_up: verify_func_args['r_obj'] = [r] verify_func(**verify_func_args)
def verify_ntp_synchronized_alias(self, device, alias=None): '''Verify that NTP is synchronized on this device verify NTP is synchronized on device "<device>" ''' ops = self.genie_ops_on_device_alias('ntp', device, alias) rs = [ R([ 'info', 'clock_state', 'system_status', 'associations_address', '(?P<neighbors>.*)' ]) ] output = find([ops], *rs, filter_=False, all_keys=True) if not output: self.builtin.fail( "{} does not have NTP synchronized".format(device))
def is_issu_in_state(device, slot, expected_state, max_time=1200, interval=30): """ Verify if ISSU is in state for a specific slot Args: device ('obj'): Device object slot ('str'): Slot for which we need to check ISSU state expected_state ('str'): Acceptable ISSU states are: - loadversion - runversion - acceptversion - commitversion max_time ('int'): Max time checking issu state interval ('int': Interval checking Raise: None Return True False """ assert expected_state in [ "loadversion", "runversion", "acceptversion", "commitversion", ] rs = R(["slot", slot, "last_operation", expected_state]) timeout = Timeout(max_time=max_time, interval=interval) while timeout.iterate(): try: output = device.parse("show issu state detail") except SchemaEmptyParserError: timeout.sleep() continue ret = find([output], rs, filter_=False, all_keys=True) if ret: return True timeout.sleep() return False
def get_ntp_outgoing_interface(device, system_peer): """ Get the interface which is used to communicate with NTP system peer Args: device (`obj`): Device object system_peer (`str`): System peer ip Returns: interface (`str`): Interface name """ try: out = device.parse("show ip cef {}".format(system_peer)) except SchemaEmptyParserError as e: log.error("Command 'show ip cef {}' " "did not return any results".format(system_peer)) return None reqs = R([ "vrf", "(?P<vrf>.*)", "address_family", "(?P<af>.*)", "prefix", "(?P<ip>.*)", "nexthop", "(?P<nexthop>.*)", "outgoing_interface", "(?P<intf>.*)", "(?:.*)", ]) found = find([out], reqs, filter_=False, all_keys=True) if found: keys = GroupKeys.group_keys(reqs=reqs.args, ret_num={}, source=found, all_keys=True) else: log.error("No interface was found") return None interface = keys[0]["intf"] return interface
def get_ready_rps_lcs(ops): '''Get ready RPs/LCs from platform ops''' reqs = [ [ 'slot', 'rp', '(?P<rp>.*)', 'state', '(?P<state>ok, active|ok, standby|Ready)' ], # ['slot', 'lc', '(?P<lc>.*)','state', 'ok'], [ 'slot', 'oc', '(?P<oc>.*)', 'state', '(?P<oc_state>ok, active|ok, standby|ok|ps, fail)' ] ] rs = [R(i) for i in reqs] ret = find([ops], *rs, filter_=False, all_keys=True) return GroupKeys.group_keys(ret_num={}, source=ret, reqs=reqs, all_keys=True)
def get_ospf_router_id(device, vrf='(.*)', address_family='(.*)', instance='(.*)'): """ Get ospf router-id - show ip protocols Args: device ('obj'): device to run on vrf ('str'): vrf name address_family ('str'): address family instance ('str'): instance value Returns: str: single router id None: if empty Raises: None """ log.info("Getting OSPF router-id") router_id = None cmd = 'show ip protocols' try: out = device.parse(cmd) except Exception as e: log.error("Failed to parse '{}':\n{}".format(cmd, e)) return router_id reqs = R(['protocols', 'ospf', 'vrf', vrf, 'address_family', address_family, 'instance', instance, 'router_id', '(?P<router_id>.*)']) found = find([out], reqs, filter_=False, all_keys=True) if found: key_list = GroupKeys.group_keys(reqs=reqs.args, ret_num={}, source=found, all_keys=True) return key_list.pop()['router_id'] else: log.error("No ospf router id was found") return router_id
class TriggerClearIpOspfNeighborVrfAll(TriggerClear): # Argument with dynamic value for verify callable # As verify callable can be re-used in multiple triggers # with different variable names. This dictionary is used to map # dynamic argument name to actual script argument name # <expected argument_name for callable>: <script argument name> mapping_extra_args = { 'ops': 'ops_obj', 'relation': 'sign', 'threshold_time': 'compare_time' } mapping = Mapping(requirements={'ops.ospf.ospf.Ospf':{ 'requirements': [\ ['info', 'vrf', '(?P<vrf>.*)', 'address_family','(?P<af>.*)', 'instance', '(?P<instance>.*)', 'areas','(?P<area>.*)', 'interfaces', '(?P<intf>.*)', 'neighbors', '(?P<neighbor>.*)','(.*)']], 'all_keys': True, 'kwargs': {'attributes':['info']}, 'exclude': exclude }}, num_values={'vrf':'all', 'instance':'all','neighbor':'all' , 'intf':'all', 'area': 'all'}) # Verify callable definition verify = CompareUptime.compare_uptime # Arguments for verify callable # Must be dictionary, key is argument from verify callable, # value should be the value to pass into the callable verify_func_args = { 'r_obj': [ R([ 'info', 'vrf', '(?P<vrf>.*)', 'address_family', '(?P<af>.*)', 'instance', '(?P<instance>.*)', 'areas', '(?P<area>).*', 'interfaces', '(?P<intf>.*)', 'neighbors', '(?P<neighbor>.*)', 'last_state_change', '(.*)' ]) ] }
def is_platform_slot_in_state(device, slot, state="ok, active", max_time=1200, interval=120): """ Verify if slot is in state Args: device ('obj'): Device object slot ('str'): Slot number state ('str'): State being checked max_time ('int'): Max time checking interval ('int'): Interval checking Return: True False Raises: None """ log.info("Verifying state of slot {slot}".format(slot=slot)) timeout = Timeout(max_time=max_time, interval=interval) rs = R(["slot", slot, "rp", "(?P<val2>.*)", "state", state]) while timeout.iterate(): try: output = device.parse("show platform") except SchemaEmptyParserError: timeout.sleep() continue ret = find([output], rs, filter_=False, all_keys=True) if ret: log.info("Slot {slot} reached state '{state}'".format(slot=slot, state=state)) return True timeout.sleep() return False
def get_interface_qlimit_bytes(device, interface): """ Get interface qlimit in bytes Args: device (`obj`): Device object interface (`str`): Interface name Returns: None qlimit_bytes (`int`): Interface qlimit_bytes Raises: None """ try: out = device.parse( "show platform hardware qfp active infrastructure bqs " "queue output default interface {interface}".format( interface=interface)) except SchemaEmptyParserError: return reqs = R([ interface, "index", "(?P<index>.*)", "software_control_info", "qlimit_bytes", "(?P<qlimit>.*)", ]) found = find([out], reqs, filter_=False, all_keys=True) if found: keys = GroupKeys.group_keys(reqs=reqs.args, ret_num={}, source=found, all_keys=True) return keys[0]["qlimit"] else: return