Beispiel #1
0
def verify_segment_routing_traffic_eng_policies(
        device,
        policy_name=None,
        expected_preference=None,
        expected_admin_status=None,
        expected_oper_status=None,
        expected_metric_type=None,
        expected_path_accumulated_metric=None,
        expected_path_status=None,
        expected_affinity_type=None,
        expected_affinities=None,
        expected_endpoint_ip=None,
        max_time=30,
        check_interval=10):
    """ Verifies configured traffic_eng policies have expected configurations

        Args:
            device ('obj'): Device to use
            policy_name ('str'): Policy name to verify. If not specified will verify all
            expected_admin_status ('str'): Expected admin status
            expected_oper_status ('str'): Expected operational status
            expected_metric_type ('str'): Expected metric type
            expected_path_accumulated_metric ('int'): Expected path accumulated metric
            expected_path_status ('str'): Expected path status
            expected_affinity_type ('str'): Expected affinity type
            expected_affinities ('list'): Expected affinities
            expected_preference ('int'): Expected preference path 
            expected_endpoint_ip ('str'): Expected Endpoint IP
            max_time ('int'): Maximum amount of time to keep checking
            check_interval ('int'): How often to check

        Returns:
            True/False

        Raises:
            N/A
    """
    if (not expected_admin_status and not expected_oper_status
            and not expected_metric_type
            and not expected_path_accumulated_metric
            and not expected_path_status and not expected_affinity_type
            and not expected_affinities and not expected_endpoint_ip):
        log.info('Must provide at-least one optional argument to verify')
        return False

    if policy_name:
        cmd = 'show segment-routing traffic-eng policy name {policy}'.format(
            policy=policy_name)
    else:
        cmd = 'show segment-routing traffic-eng policy all'

    timeout = Timeout(max_time, check_interval)
    while timeout.iterate():
        try:
            out = device.parse(cmd)
        except SchemaEmptyParserError:
            log.info('Parser output is empty')
            timeout.sleep()
            continue
        for policy in out:
            if expected_endpoint_ip:
                endpoint_ip = out[policy].get('end_point', None)
                if endpoint_ip != expected_endpoint_ip:
                    # Check other policy
                    continue
            admin = out[policy].get('status', {}).get('admin', '')
            if expected_admin_status and expected_admin_status != admin:
                log.info(
                    'Expected admin status is "{admin_status}" actual is "{admin}"'
                    .format(admin_status=expected_admin_status, admin=admin))
                break
            operational = out[policy].get('status',
                                          {}).get('operational',
                                                  {}).get('state', '')
            if expected_oper_status and expected_oper_status != operational:
                log.info(
                    'Expected operational status is "{operational_status}" actual is "{operational}"'
                    .format(operational_status=expected_oper_status,
                            operational=operational))
                break

            for preference in out[policy].get('candidate_paths',
                                              {}).get('preference', {}):
                if expected_preference and expected_preference != preference:
                    continue
                if out[policy]['candidate_paths']['preference'][
                        preference].get('path_type'):
                    path_type_dict = out[policy]['candidate_paths'][
                        'preference'][preference]['path_type']

                    if 'dynamic' in path_type_dict:
                        metric = path_type_dict['dynamic'].get(
                            'metric_type', '')
                        if expected_metric_type and expected_metric_type != metric:
                            log.info(
                                'Expected metric type for path {path} is "{expected}" actual is "{actual}"'
                                .format(expected=expected_metric_type,
                                        actual=metric,
                                        path=preference))
                            break

                        status = path_type_dict['dynamic'].get('status', '')
                        if expected_path_status and expected_path_status != status:
                            log.info(
                                'Expected status for path {path} is "{expected}" actual is "{actual}"'
                                .format(expected=expected_path_status,
                                        actual=status,
                                        path=preference))
                            break

                        accumulated_metric = path_type_dict['dynamic'].get(
                            'path_accumulated_metric', '')
                        if (expected_path_accumulated_metric
                                and isinstance(accumulated_metric, int)
                                and expected_path_accumulated_metric !=
                                accumulated_metric):
                            log.info(
                                'Expected accumulated metric for path {path} is "{expected}" actual is "{actual}"'
                                .format(
                                    expected=expected_path_accumulated_metric,
                                    actual=accumulated_metric,
                                    path=preference))
                            break

                    elif 'explicit' in path_type_dict:
                        for segment in path_type_dict['explicit'].get(
                                'segment_list', {}):
                            metric = path_type_dict['explicit'].get(
                                'segment_list',
                                {}).get(segment, {}).get('metric_type', '')

                            if expected_metric_type and expected_metric_type != metric:
                                log.info(
                                    'Expected metric type for path {path} is "{expected}" actual is "{actual}"'
                                    .format(expected=expected_metric_type,
                                            actual=metric,
                                            path=preference))
                                break

                            status = path_type_dict['explicit'].get(
                                'segment_list', {}).get(segment,
                                                        {}).get('status', '')
                            if expected_path_status and expected_path_status != status:
                                log.info(
                                    'Expected path status for path {path} is "{expected}" actual is "{actual}"'
                                    .format(expected=expected_path_status,
                                            actual=status,
                                            path=preference))
                                break

                        else:
                            continue
                        break

                    else:
                        log.info('Path type not defined in api call.')
                        break

                if out[policy]['candidate_paths']['preference'][
                        preference].get('constraints'):
                    constraint_dict = out[policy]['candidate_paths'][
                        'preference'][preference]['constraints']

                    if expected_affinity_type and expected_affinity_type not in constraint_dict.get(
                            'affinity', {}):
                        log.info(
                            'Expected affinity_type is for path {path} "{expected}". Actual is "{actual}"'
                            .format(expected=expected_affinity_type,
                                    actual=list(constraint_dict.keys()),
                                    path=preference))
                        break

                    if expected_affinities:
                        for aff_type in constraint_dict.get('affinity', {}):
                            if not set(expected_affinities).issubset(
                                    constraint_dict['affinity'][aff_type]):
                                log.info(
                                    'Expected affinities "{expected}" for path {path} are not set'
                                    .format(expected=expected_affinities,
                                            path=preference))
                                break

                        else:
                            continue
                        break

            else:
                continue
            break

        else:
            return True

        timeout.sleep()

    return False
Beispiel #2
0
def verify_ip_cef_nexthop_label(device,
                                ip,
                                expected_label=None,
                                vrf='default',
                                table=None,
                                max_time=30,
                                check_interval=10):
    """ Verify ip cef nexthop does (not) have expected label

        Args:
            device (`obj`): Device object
            ip (`str`): IP address
            expected_label (`str`): Expected label. None if no label expected
            vrf (`str`): Vrf name
            table (`str`): Not used on IOSXE
            max_time (`int`): Max time, default: 30
            check_interval (`int`): Check interval, default: 10
        Returns:
            result (`bool`): Verified result
        Raises:
            N/A
    """
    timeout = Timeout(max_time, check_interval)
    if vrf and vrf != 'default':
        cmd = 'show ip cef vrf {vrf} {ip} detail'.format(vrf=vrf, ip=ip)
    else:
        cmd = 'show ip cef {} detail'.format(ip)

    while timeout.iterate():
        try:
            out = device.parse(cmd)
        except Exception as e:
            log.error("Failed to parse '{}':\n{}".format(cmd, e))
            timeout.sleep()
            continue

        reqs = R([
            'vrf', vrf, 'address_family', '(.*)', 'prefix', '(.*)', 'nexthop',
            '(.*)', 'outgoing_interface', '(?P<interface>.*)',
            'outgoing_label', '(?P<outgoing_label>.*)'
        ])
        found = find([out], reqs, filter_=False, all_keys=True)

        if expected_label:
            if found:
                for item in found:
                    interface = item[1][-2]
                    label = ' '.join(item[0])
                    log.info(
                        "Found outgoing interface '{}' has outgoing label '{}', "
                        "expected to have label '{}'".format(
                            interface, label, expected_label))

                    if expected_label in label:
                        return True
            else:
                log.error("Failed to get outgoing label for '{}'".format(ip))
                timeout.sleep()
                continue
        else:
            if found:
                log.error("Found outgoing label for '{}', "
                          "but expected no label".format(ip))
                timeout.sleep()
                continue
            else:
                log.info(
                    "No outgoing label aftar the nexthop info for '{}'".format(
                        ip))
                return True

        timeout.sleep()

    return False
Beispiel #3
0
def verify_ping(device,
                address=None,
                ttl=None,
                tos=None,
                size=None,
                wait=None,
                mpls_rsvp=None,
                loss_rate=0,
                ping_size=None,
                count=None,
                source=None,
                rapid=False,
                do_not_fragment=False,
                max_time=30,
                check_interval=10):
    """ Verify ping loss rate on ip address provided

        Args:
            device ('obj'): Device object
            address ('str'): Address value
            size {'str'}: Size value for ping command
            tos {'str'}: tos value for ping command
            ping_size {'str'}: data bytes expected
            ttl ('int'): ttl value passed in command
            wait ('int'): wait value passed in command
            mpls_rsvp ('str'): MPLS RSVP value
            loss_rate ('int'): Expected loss rate value
            count ('int'): Count value for ping command
            source ('str'): Source IP address, default: None
            rapid ('bool'): Appears in command or not, default: False
            do_not_fragment ('bool'): Appears in command or not, default: False
            max_time (`int`): Max time, default: 30
            check_interval (`int`): Check interval, default: 10
        Returns:
            Boolean
        Raises:
            None
    """
    timeout = Timeout(max_time, check_interval)

    while timeout.iterate():
        if tos:
            cmd = 'ping {address} source {source} size {size} count {count} tos {tos} rapid'.format(
                address=address,
                source=source,
                size=size,
                count=count,
                tos=tos)
        elif do_not_fragment and ping_size and count:
            cmd = 'ping {address} size {ping_size} count {count} do-not-fragment'.format(
                address=address,
                ping_size=ping_size,
                count=count,
            )
        elif address and count and source and rapid:
            cmd = 'ping {address} source {source} count {count} rapid'.format(
                address=address, source=source, count=count)
        elif address and count and source:
            cmd = 'ping {address} source {source} count {count}'.format(
                address=address, source=source, count=count)
        elif address and count and not ttl and not wait:
            cmd = 'ping {address} count {count}'.format(address=address,
                                                        count=count)
        elif address and count and ttl and wait:
            cmd = 'ping {address} ttl {ttl} count {count} wait {wait}'.format(
                address=address, ttl=ttl, count=count, wait=wait)
        elif not address and mpls_rsvp:
            cmd = 'ping mpls rsvp {rsvp}'.format(rsvp=mpls_rsvp)
        elif address:
            cmd = 'ping {address}'.format(address=address)
        else:
            log.info('Need to pass address as argument')
            return False
        try:
            out = device.parse(cmd)
        except SchemaEmptyParserError as e:
            timeout.sleep()
            continue
        # Example dictionary structure:
        #     {
        #         "ping": {
        #             "address": "10.189.5.94",
        #             "data-bytes": 56,
        #             "result": [
        #                 {
        #                     "bytes": 64,
        #                     "from": "10.189.5.94",
        #                     "icmp-seq": 0,
        #                     "time": "2.261",
        #                     "ttl": 62
        #                 },
        #             ],
        #             "source": "10.189.5.94",
        #             "statistics": {
        #                 "loss-rate": 0,
        #                 "received": 1,
        #                 "round-trip": {
        #                     "avg": "2.175",
        #                     "max": "2.399",
        #                     "min": "1.823",
        #                     "stddev": "0.191"
        #                 },
        #                 "send": 1
        #             }
        #         }
        #     }
        loss_rate_found = Dq(out).get_values("loss-rate", 0)

        if size:
            if loss_rate_found == loss_rate and Dq(out).get_values(
                    "data-bytes", 0) == int(size):
                return True
        if loss_rate_found == loss_rate:
            return True
        timeout.sleep()
    return False
Beispiel #4
0
def verify_bgp_peer_prefixes_match(
    device: object,
    peer_address: str,
    active: bool = True,
    received: bool = True,
    accepted: bool = True,
    max_time: int = 60,
    check_interval: int = 10
) -> bool:
    """Verifies whether BGP peer prefixes match or not

    Args:
        device (object): Device object
        peer_address (str): Peer address
        active (bool, optional): Check active prefix. Defaults to True.
        received (bool, optional): Check received prefix. Defaults to True.
        accepted (bool, optional): Check accepted prefix. Defaults to True.
        max_time (int, optional): Maximum timeout time. Defaults to 60.
        check_interval (int, optional): Check interval. Defaults to 10.

    Returns:
        bool: True/False
    """

    timeout = Timeout(max_time, check_interval)
    while timeout.iterate():
        try:
            out = device.parse('show bgp neighbor')
        except SchemaEmptyParserError:
            timeout.sleep()
            continue

        # Example dict
        # "bgp-information": {
        #     "bgp-peer": [
        #         {
        #             "bgp-rib": [
        #                 {
        #                     "accepted-prefix-count": str,
        #                     "active-prefix-count": str,
        #                     "received-prefix-count": str,

        for peer in out.q.get_values('bgp-peer'):
            peer_interface_ = peer.get('peer-address')

            # 20.0.0.3+63208
            if '+' in peer_interface_:
                peer_interface_ = peer_interface_.split('+')[0]

            # 20.0.0.2/24
            if '/' in peer_address:
                peer_address = peer_address.split('/')[0]

            if peer_interface_ != peer_address:
                continue

            prefix_list_ = list()

            if active:
                prefix_list_.append(Dq(peer).get_values('active-prefix-count', 0))
            if accepted:
                prefix_list_.append(Dq(peer).get_values('accepted-prefix-count', 0))
            if received:
                prefix_list_.append(Dq(peer).get_values('received-prefix-count', 0))

            if len(set(prefix_list_)) == 1:
                return True

        timeout.sleep()
    return False
Beispiel #5
0
def verify_vpn_route_targets(
    device,
    route_targets,
    rt_type,
    address_family,
    vrf=None,
    route_distinguisher=None,
    max_time=15,
    check_interval=5,
):
    """ Verify route target are imported, exported or both

        Args:
            device ('obj'): Device object
            route_targets ('list'): list of route targets to check
                ex.)
                    [
                        '65109:4005',
                        '65109:4006'
                    ]
            rt_type ('str'): route target type
                ex.) rt_type = 'import' OR
                     rt_type = 'export' OR
                     rt_type = 'both'
            address_family ('str'): address family to check
            vrf ('str'): vrf name
            route_distinguisher ('str'): route distinguisher value
            max_time (int): Maximum wait time for the trigger,
                            in second. Default: 15
            check_interval (int): Wait time between iterations when looping is needed,
                            in second. Default: 5
            
        Returns:
            True
            False
    """

    # Check if both route targets exists on device
    if route_targets:
        if not isinstance(route_targets, list):
            log.error("route_targets must be list")
            return False

    result = True

    timeout = Timeout(max_time, check_interval)
    while timeout.iterate():
        result = True
        for rt in route_targets:
            current_rt = None
            try:
                current_rt = get_vrf_route_targets(
                    device=device,
                    rt_type=rt_type,
                    address_family=address_family,
                    vrf=vrf,
                    route_distinguisher=route_distinguisher,
                )
            except Exception as e:
                log.error(str(e))

            if not current_rt:
                log.info(
                    "Route target of type {} not found for VRF {} on device {}"
                    .format(rt_type, vrf, device.name))
                result = False
        if result:
            return result
        timeout.sleep()
    return result
Beispiel #6
0
def verify_bgp_error_message(device, interface, expected_message, expected_error_message, max_time=60, check_interval=10):
    """
    Verify bgp last error

    Args:
        device('obj'): device to use
        interface('str'): Peer interface   
        expected_message('str'): Expected message
        expected_error_message('str') : Expected error message
        max_time ('int', optional): Maximum time to keep checking. Default to 60
        check_interval ('int', optional): How often to check. Default to 10

    Returns:  
        Boolean       
    Raises:
        N/A    
    """
    timeout = Timeout(max_time, check_interval)

    # show commands: "show bgp neighbor"

    # {'bgp-information': {'bgp-peer': [{
    #                                 "flap-count": '0',
    #                                 "peer-address": '20.0.0.3+63208',
    #                                 "peer-restart-flags-received": 'Notification',
    #                                 "bgp-error": [
    #                                 {
    #                                     "name": "Hold Timer Expired " "Error",
    #                                     "receive-count": "40",
    #                                     "send-count": "27",
    #                                 },
    #                                 {"name": "Cease", "receive-count": "0", "send-count": "16"},
    #                             ],
    #                                   .
    #                                   .
    #                                   .

    while timeout.iterate():
        try:
            out = device.parse('show bgp neighbor')
        except SchemaEmptyParserError:
            timeout.sleep()
            continue

        peers_list = out.q.get_values("bgp-peer")

        for peer in peers_list:

            peer_interface = peer.get('peer-address')

            # 20.0.0.3+63208
            if '+' in peer_interface:
                peer_interface = peer_interface.split('+')[0]

            notification_message = Dq(peer).get_values(
                "peer-restart-flags-received", 0)

            error_message_dict = Dq(peer).get_values("bgp-error", 0)

            if len(notification_message) > 0 and error_message_dict:
                if peer_interface == interface and notification_message == expected_message and error_message_dict.get('name') == expected_error_message:
                    return True

        timeout.sleep()
    return False
Beispiel #7
0
def verify_bgp_updown_time(device, given_seconds, invert=False, max_time=60, check_interval=10):
    """
    Verify the up/down time of all neighbors is less than given_time

    Args:
        device('obj'): device to use
        given_seconds('int') : Given time in seconds
        max_time ('int', optional): Maximum time to keep checking. Default to 60 seconds
        check_interval ('int', optional): How often to check. Default to 10 seconds

    Returns:  
        Boolean       
    Raises:
        N/A    
    """
    op = operator.ge
    if invert:
        op = operator.lt

    timeout = Timeout(max_time, check_interval)

    # show commands: "show bgp summary"

    while timeout.iterate():
        try:
            out = device.parse('show bgp summary')
        except SchemaEmptyParserError:
            timeout.sleep()
            continue

        # 'bgp-peer': [{'bgp-rib': [{'accepted-prefix-count': '0',
        #                             'active-prefix-count': '0',
        #                             'name': 'inet.0',
        #                             'received-prefix-count': '0',
        #                             'suppressed-prefix-count': '0'}],
        #             'elapsed-time': {'#text': '1:01'}, <-------------
        #             'flap-count': '1',
        
        # time_list: ['1:01', '57']
        time_list = out.q.get_values('#text')

        seconds_per_hour = 60*60
        seconds_per_day = 24*seconds_per_hour
        seconds_per_week = 7*seconds_per_day

        for elapsed_time in time_list:

            # '1:01'
            p_min = re.compile(r'^\d+\:\d+$')

            # '11:11:11'
            p_hour = re.compile(r'^\d+\:\d+\:\d+$')

            # "29w5d 22:42:36"
            p_day = re.compile(r'^(?P<week>\d+)w(?P<day>\d+)d\s+(?P<hour>\d+)\:(?P<minute>\d+)\:(?P<second>\d+)$')
            p_day_match = p_day.match(elapsed_time)

            if p_min.match(elapsed_time):
                min_sec_list = elapsed_time.split(':')
                elapsed_time = int(min_sec_list[0])*60 + int(min_sec_list[1])

            
            elif p_hour.match(elapsed_time):
                hour_min_sec_list = elapsed_time.split(':')
                elapsed_time = int(hour_min_sec_list[0])*seconds_per_hour+\
                    int(hour_min_sec_list[1])*60+\
                    int(hour_min_sec_list[2])

            
            elif p_day_match:
                group = p_day_match.groupdict()
                elapsed_time = int(group['week'])*seconds_per_week+\
                    int(group['day'])*seconds_per_day+\
                    int(group['hour'])*seconds_per_hour+\
                    int(group['minute'])*60+\
                    int(group['second'])
            
            else:
                elapsed_time = int(elapsed_time)

            # compare current up/dwn time with given time
            if op(elapsed_time, given_seconds):
                return False

        timeout.sleep()
    return True
Beispiel #8
0
def verify_no_ospf_neigbor_output(device,
                                  expected_interface=None,
                                  extensive=False,
                                  max_time=60,
                                  check_interval=10):
    """ Verifies ospf neighbor doesn't exists

        Args:
            device ('obj'): device to use
            expected_interface ('str'): Interface being searched for
            extensive ('bool'): If ospf command is extensive
            max_time ('int'): Maximum time to keep checking
            check_interval ('int'): How often to check

        Returns:
            True/False

        Raises:
            N/A
    """
    timeout = Timeout(max_time, check_interval)
    exists = False

    while timeout.iterate():

        if extensive:
            try:
                output = device.parse('show ospf neighbor extensive')
            except SchemaEmptyParserError:
                output = None
                timeout.sleep()
                continue

        else:
            try:
                output = device.parse('show ospf neighbor')
            except SchemaEmptyParserError:
                output = None
                timeout.sleep()
                continue

        for neighbor in Dq(output).get_values('ospf-neighbor'):
            if neighbor.get('interface-name') == expected_interface:
                exists = True
                timeout.sleep()
                break
            else:
                exists = False

        timeout.sleep()


    if not output or not exists:
        return True
    else:
        return False
Beispiel #9
0
def verify_ospf_interface_cost(device,
                               interface,
                               expected_cost,
                               cost_type='ospf',
                               instance=None,
                               area=None,
                               max_time=60,
                               check_interval=15):
    """ Verifies ospf cost on interface

        Args:
            device ('obj'): device to use
            interface ('str'): Interface to use
            cost_type ('str'): Cost type configured
            expected_cost ('int'): Expected configured cost
            instance ('str'): Instance to use
            area ('str'): Area to use
            max_time ('int'): Maximum time to keep checking
            check_interval ('int'): How often to check

        Returns:
            True/False

        Raises:
            N/A
    """
    timeout = Timeout(max_time, check_interval)

    if 'ospf' in cost_type.lower():
        while timeout.iterate():
            try:
                out = device.parse(
                    'show ospf interface {interface} detail'.format(
                        interface=interface))
            except SchemaEmptyParserError:
                log.info('Parser is empty')
                timeout.sleep()
                continue

            reqs = R([
                'instance', '{}'.format(instance if instance else 'master'),
                'areas', '{}'.format(area if area else '(.*)'), 'interfaces',
                interface, 'cost', '(?P<cost>.*)'
            ])

            found = find([out], reqs, filter_=False, all_keys=True)
            if found:
                keys = GroupKeys.group_keys(
                    reqs=reqs.args, ret_num={}, source=found, all_keys=True)
                if 'cost' in keys[0] and int(expected_cost) == int(
                        keys[0]['cost']):
                    return True

            timeout.sleep()
        return False
    elif 'te' in cost_type.lower():
        while timeout.iterate():
            try:
                out = device.parse('show interfaces {interface} terse'.format(
                    interface=interface))
            except SchemaEmptyParserError:
                log.info('Parser is empty')
                timeout.sleep()
                continue

            reqs = R([
                interface, 'protocol', 'inet', '(.*)', 'local', '(?P<local>.*)'
            ])

            found = find([out], reqs, filter_=False, all_keys=True)
            if found:
                keys = GroupKeys.group_keys(
                    reqs=reqs.args, ret_num={}, source=found, all_keys=True)
                local_address = keys[0].get('local')

                try:
                    out = device.parse('show ted database extensive')
                except SchemaEmptyParserError:
                    log.info('Parser is empty')
                    timeout.sleep()
                    continue

                reqs = R([
                    'node', '(.*)', 'protocol', '(.*)', 'to', '(.*)', 'local',
                    local_address.split('/')[0], 'remote', '(.*)', 'metric',
                    '(?P<metric>.*)'
                ])
                found = find([out], reqs, filter_=False, all_keys=True)
                if found:
                    keys = GroupKeys.group_keys(
                        reqs=reqs.args,
                        ret_num={},
                        source=found,
                        all_keys=True)
                    if 'metric' in keys[0] and int(expected_cost) == int(
                            keys[0]['metric']):
                        return True

            timeout.sleep()
        return False

    log.info('This api does not support cost type {}'.format(cost_type))
Beispiel #10
0
def install_image(section, steps, device, images, save_system_config=False,
                  install_timeout=500, reload_timeout=800):
    """
    Clean yaml file schema:
    -----------------------
    install_image:
        images: <Image to install `list`> (Mandatory)
        save_system_config: <Whether or not to save the system config if it was modified `bool`> (Optional) Default: False
        install_timeout: <Maximum time to wait for install process to finish `int`> (Optional) Default: 500
        reload_timeout: <Maximum time to wait for reload process to finish `int`> (Optional) Default: 800

    Example:
    --------
    install_image:
        images:
          - /auto/some-location/that-this/image/stay-isr-image.bin
        save_system_config: True
        install_timeout: 1000
        reload_timeout: 1000

    """

    # Need to get directory that the image would be unpacked to
    # This is the same directory the image is in
    directory = images[0].split(':')[0]

    # pacakges.conf is hardcoded because it is required to set
    # the boot variable to packages.conf for 'install mode'
    # ----------------------------------------------------------
    # 'Bundle mode' is when we set the boot var to the image.bin
    new_boot_var = directory+':packages.conf'

    with steps.start("Configure boot variables for 'install mode' on '{dev}'".format(dev=device.hostname)) as step:
        with step.start("Delete all boot variables") as substep:
            # Get list of existing boot variables
            try:
                curr_boot_images = device.api.get_boot_variables(boot_var='current')
            except Exception as e:
                substep.failed("Unable to retrieve boot variables\nError: {e}".format(e=str(e)))

            if not curr_boot_images:
                substep.passed("No boot variables are configured")

            # delete the existing boot variables
            try:
                device.api.execute_delete_boot_variable(
                    boot_images=curr_boot_images, timeout=60)
            except Exception as e:
                substep.failed("Failed to delete all boot variables\nError: {e}".format(e=str(e)))
            else:
                substep.passed("Deleted all boot variables")

        with step.start("Configure boot system variable to '{boot_var}'".format(boot_var=new_boot_var)) as substep:
            try:
                device.api.execute_set_boot_variable(
                    boot_images=[new_boot_var], timeout=60)
            except Exception as e:
                substep.failed("Failed to configure new boot variables\nError: {e}".format(e=str(e)))
            else:
                substep.passed("Configured boot system variable")

        with step.start("Copy running-configuration to startup-configuration") as substep:
            try:
                device.api.execute_copy_run_to_start(
                    max_time=60, check_interval=30)
            except Exception as e:
                substep.failed("Failed to copy running-config to startup-config\nError: {e}".format(e=str(e)))
            else:
                substep.passed("Copied running-config to startup-config")

        # Verify next reload boot variables are correctly set
        with step.start("Verify next reload boot variables are correctly set") as substep:
            if not device.api.verify_boot_variable(boot_images=[new_boot_var]):
                substep.failed("Boot variables are not correctly set to {}"
                               .format([new_boot_var]))
            else:
                substep.passed("Boot variables are correctly set".format(device.name))

    with steps.start("Installing image '{img}' onto {dev}".format(
            img=images[0], dev=device.hostname)) as step:

        install_add_one_shot_dialog = Dialog([
            Statement(pattern=r".*Press Quit\(q\) to exit, you may save "
                              r"configuration and re-enter the command\. "
                              r"\[y\/n\/q\]",
                      action='sendline(y)' if save_system_config else 'sendline(n)',
                      loop_continue=True,
                      continue_timer=False),
            Statement(pattern=r".*Please confirm you have changed boot config "
                              r"to flash\:packages\.conf \[y\/n\]",
                      action='sendline(y)',
                      loop_continue=True,
                      continue_timer=False),
            Statement(pattern=r".*This operation may require a reload of the "
                              r"system\. Do you want to proceed\? \[y\/n\]",
                      action='sendline(y)',
                      loop_continue=True,
                      continue_timer=False),
        ])

        try:
            device.execute('install add file {} activate commit'.format(images[0]),
                           reply=install_add_one_shot_dialog,
                           error_pattern=['.*FAILED: install_add_activate_commit.*'],
                           timeout=install_timeout)
        except Exception as e:
            step.failed("Installing image '{img}' failed. Error: {e}"
                        .format(img=images[0], e=str(e)))

    with steps.start("Waiting for {dev} to reload".format(dev=device.hostname)) as step:

        timeout = Timeout(reload_timeout, 60)
        while timeout.iterate():
            timeout.sleep()
            device.destroy()

            try:
                device.connect(learn_hostname=True)
            except Exception:
                log.info("{dev} is not reloaded".format(dev=device.hostname))
            else:
                step.passed("{dev} has successfully reloaded".format(dev=device.hostname))

        step.failed("{dev} failed to reboot".format(dev=device.hostname))

    image_mapping = section.history['install_image'].parameters.setdefault(
        'image_mapping', {})
    image_mapping.update({images[0]: new_boot_var})
Beispiel #11
0
    def _perform_issu(self, steps, upgrade_image, timeout=300):
        """Perform the ISSU steps in sequence on the ASR1K device:

            1.  Execute 'issu loadversion' to begin ISSU process
            2.  Poll until standby RP reaches 'ok' state
            3.  Verify ISSU state is now 'loadversion'
            4.  Execute 'issu runversion' to initiate RP failover
            5.  Reconnect to the device
            6.  Verify ISSU state is now 'runversion'
            7.  Execute 'issu acceptversion' to cancel rollback timer
            8.  Verify ISSU state is now 'acceptversion'
            9.  Verify ISSU rollback timer has been cancelled
            10. Poll until standby RP reaches 'ok' state
            11. Save running-configuration to startup-configuration
            12. Execute 'issu commitversion' to complete ISSU process
            13. Reload the device and then reconnect to it
            14. Verify device is now booted with ISSU upgrade image

        Raises:
            Unicon errors
            Exception

        Example:
            >>> _perform_issu(steps=steps, upgrade_image='someimage')
        """

        # Init
        device = self.device
        lookup = Lookup.from_device(device)
        filetransfer = FileUtils.from_device(device)
        image_name = basename(upgrade_image)

        # ======================================================================
        #                           Get standby RP
        # ======================================================================
        with steps.start("Get standby RP information", continue_=True) as step:
            platform_dict = lookup.parser.show_platform.\
                            ShowPlatform(device=device).parse()
            # Standby RP
            rs = R([
                'slot', '(?P<val1>.*)', 'rp', '(?P<val2>.*)', 'state',
                'ok, standby'
            ])
            ret = find([platform_dict], rs, filter_=False, all_keys=True)
            if not ret:
                raise Exception(
                    "Device '{}' does not have standby RP - cannot "
                    "perform ISSU".format(device.name))
            standby_rp = ret[0][1][1]
            srp = re.search('(?P<srp>(\d))', standby_rp).groupdict()['srp']
            logger.info("Standby RP on '{dev}' is: '{standby_rp}'".format(
                dev=device.name, standby_rp=standby_rp))

        # ======================================================================
        #                          issu loadversion
        # ======================================================================
        with steps.start("Execute 'issu loadversion' to begin ISSU process",
                         continue_=True) as step:
            try:
                output = device.execute('issu loadversion rp {srp} file '
                                        'stby-harddisk:{image}'.format(
                                            srp=srp, image=image_name),
                                        timeout=600)
                if 'FAILED' in output:
                    device.execute('issu abortversion', timeout=timeout)
                    raise Exception("Unable to execute 'issu loadversion'")
            except Exception as e:
                raise Exception("Unable to execute 'issu loadversion'")

            # Poll until standby RP reaches 'ok' state in 'show platform'
            logger.info("Poll until standby RP reaches 'ok' state")
            platform_timeout = Timeout(max_time=1200, interval=120)
            while platform_timeout.iterate():
                platform_dict = lookup.parser.show_platform.\
                                ShowPlatform(device=device).parse()
                # Create requirement to find standby-RP with 'ok, standby' state
                rs = R([
                    'slot', '(?P<val1>.*)', 'rp', '(?P<val2>.*)', 'state',
                    'ok, standby'
                ])
                ret = find([platform_dict], rs, filter_=False, all_keys=True)
                if ret:
                    logger.info("Stanby RP '{}' is in 'ok' state".\
                                format(standby_rp))
                    break
                # Standby RP is not 'ok' state as yet, sleep and recheck
                platform_timeout.sleep()

            # Verify issu state
            logger.info("Verify ISSU state is now 'loadversion'")
            try:
                self.check_issu_state(device=device,
                                      slot=standby_rp,
                                      expected_state='loadversion')
                logger.info("ISSU state is 'loadversion' as exepcted")
            except Exception as e:
                raise Exception(str(e))

        # ======================================================================
        #                          issu runversion
        # ======================================================================
        with steps.start("Execute 'issu runversion' to initiate RP failover",
                         continue_=True) as step:
            try:
                output = device.execute('issu runversion', timeout=timeout)
            except SubCommandFailure:
                # Timeout Unicon SubCommandFailure expected
                # Wait a bit as the device is booting with the ISSU upgrade image
                time.sleep(timeout)
                pass

            # Reconnect to device
            logger.info("Reconnect to the device after runversion")
            reconnect_timeout = Timeout(max_time=1200, interval=120)
            self._reconnect(steps=steps, timeout=reconnect_timeout)

            # Verify issu state
            logger.info("Verify ISSU state is now 'runversion'")
            try:
                self.check_issu_state(device=device,
                                      slot=standby_rp,
                                      expected_state='runversion')
                logger.info("ISSU state is 'runversion' as exepcted")
            except Exception as e:
                raise Exception(str(e))

        # ======================================================================
        #                          issu acceptversion
        # ======================================================================
        with steps.start(
                "Execute 'issu acceptversion' to cancel rollback timer",
                continue_=True) as step:
            try:
                output = device.execute('issu acceptversion', timeout=timeout)
                if 'FAILED' in output:
                    raise Exception("Unable to execute 'issu acceptversion'")
            except Exception as e:
                raise Exception("Unable to execute 'issu acceptversion'",
                                from_exception=e)

            # Verify issu state
            logger.info("Verify ISSU state is now 'acceptversion'")
            try:
                self.check_issu_state(device=device,
                                      slot=standby_rp,
                                      expected_state='acceptversion')
                logger.info("ISSU state is 'acceptversion' as exepcted")
            except Exception as e:
                raise Exception(str(e))

            # Verify rollback timer
            logger.info("Verify ISSU rollback timer is now 'inactive'")
            try:
                self.check_issu_rollback_timer(device=device,
                                               slot=standby_rp,
                                               expected_state='inactive')
                logger.info("ISSU rollback timer is 'inactive' as exepcted")
            except Exception as e:
                raise Exception(str(e))

            # Poll until standby RP reaches 'ok' state in 'show platform'
            logger.info("Poll until standby RP reaches 'ok' state")
            platform_timeout = Timeout(max_time=1200, interval=120)
            while platform_timeout.iterate():
                platform_dict = lookup.parser.show_platform.\
                                ShowPlatform(device=device).parse()
                # Create requirement to find standby-RP with 'ok, standby' state
                rs = R([
                    'slot', '(?P<val1>.*)', 'rp', '(?P<val2>.*)', 'state',
                    'ok, standby'
                ])
                ret = find([platform_dict], rs, filter_=False, all_keys=True)
                if ret:
                    logger.info("Stanby RP '{}' is in 'ok' state".\
                                format(standby_rp))
                    break
                # Standby RP is not 'ok' state as yet, sleep and recheck
                platform_timeout.sleep()

            # Save running-configuration to startup-configuration
            logger.info("Save running-configuration to startup-configuration")
            filetransfer.copyconfiguration(source='running-config',
                                           destination='startup-config',
                                           device=device)

        # ======================================================================
        #                          issu commitversion
        # ======================================================================
        with steps.start("Execute 'issu commitversion'",
                         continue_=True) as step:
            try:
                output = device.execute('issu commitversion', timeout=timeout)
                if 'FAILED' in output:
                    raise Exception("Unable to execute 'issu commitversion'")
            except Exception as e:
                raise Exception("Unable to execute 'issu commitversion'",
                                from_exception=e)

        # ======================================================================
        #                          reload device
        # ======================================================================
        try:
            reload_timeout = Timeout(max_time=1200, interval=120)
            self.reload(steps=steps, timeout=reload_timeout)
        except Exception as e:
            raise Exception("Unable to reload the device after ISSU completed",
                            from_exception=e)

        # ======================================================================
        #                          verify image version
        # ======================================================================
        with steps.start(
                "Verify device is loaded with upgraded image after ISSU",
                continue_=True) as step:
            try:
                output = device.execute('show version | i image')
                if image_name in output:
                    logger.info("ISSU upgrade image is successfully loaded on "
                                "the device '{}'".format(device.name))
            except Exception as e:
                raise Exception("Unable to execute 'show version'",
                                from_exception=e)
Beispiel #12
0
def fabric_clean(section,
                 steps,
                 device,
                 cleaning_timeout=90,
                 reload_timeout=800,
                 sleep_after_reload=60):
    """ This stage will clean APIC controllers.

    The stage will execute 'acidiag touch clean' and then reload the controller.

    Stage Schema
    ------------
    fabric_clean:

        cleaning_timeout (int, optional): Max time for cleaning scripts to execute.
            Defaults to 90.

        reload_timeout (int, optional): Max time for reload. Defaults to 800.

        sleep_after_reload (int, optional): Time in seconds to sleep after the
            device completes reloading. Defaults to 60.

    Example
    -------
    fabric_clean:
        cleaning_timeout: 90
    """

    with steps.start("Cleaning the device") as step:
        result = device.api.execute_clean_controller_fabric(
            max_time=cleaning_timeout)

        if not result:
            step.failed("Failed to clean the device")
        else:
            step.passed("Successfully cleaned the device")

    with steps.start("Reloading '{dev}'".format(dev=device.name)):

        reload_dialog = Dialog([
            Statement(
                pattern=
                r".*This command will restart this device\, Proceed\? \[y\/N\].*",
                action='sendline(y)')
        ])

        device.sendline('acidiag reboot')
        reload_dialog.process(device.spawn)

    with steps.start(
            "Waiting for {dev} to reload".format(dev=device.hostname)) as step:
        timeout = Timeout(reload_timeout, 60)
        while timeout.iterate():
            timeout.sleep()
            device.destroy()

            try:
                device.connect(learn_hostname=True)
            except Exception:
                log.info("{dev} is not reloaded".format(dev=device.hostname))
            else:
                step.passed("{dev} has successfully reloaded".format(
                    dev=device.hostname))

        step.failed("{dev} failed to reboot".format(dev=device.hostname))

    log.info("Sleeping for '{}' seconds for '{}' to stabilize.".format(
        sleep_after_reload, device.name))

    time.sleep(sleep_after_reload)
Beispiel #13
0
def verify_ospf_neighbor_address(device,
                                 neighbor_address,
                                 expected_state='Full',
                                 max_time=90,
                                 check_interval=10,
                                 expected_failure=False):
    """ Verifies ospf neighbors address
        Args:
            device ('obj'): device to use
            max_time ('int'): Maximum time to keep checking
                              Default to 90 secs
            check_interval ('int'): How often to check
                                    Default to 10 secs
            neighbor_address ('str'): neighbor_address
            expected_state (`str`): expected neighbor state
                                    Default to `Full`
            expected_failure (`bool`): flag to make result opposite
                                       Default to False
        Returns:
            True/False
        Raises:
            N/A
    """
    timeout = Timeout(max_time, check_interval)
    while timeout.iterate():
        out = None
        try:
            out = device.parse('show ospf neighbor')
        except SchemaEmptyParserError:
            timeout.sleep()
            continue

        # example of out
        # {
        #   "ospf-neighbor-information": {
        #     "ospf-neighbor": [ # <-----
        #       {
        #         "activity-timer": "32",
        #         "interface-name": "ge-0/0/0.0",
        #         "neighbor-address": "10.189.5.94",
        #         "neighbor-id": "10.189.5.253",
        #         "neighbor-priority": "128",
        #         "ospf-neighbor-state": "Full"
        #       },
        ospf_neighbors = out.q.get_values('ospf-neighbor')

        result = []
        for neighbor in ospf_neighbors:
            if (neighbor['neighbor-address'] == neighbor_address
                    and neighbor['ospf-neighbor-state'] == expected_state):
                result.append(True)
            else:
                continue

        if expected_failure:
            if False == all(result) or result == []:
                return True
        else:
            if True == all(result):
                return True

        timeout.sleep()

    return False
Beispiel #14
0
def verify_ospf_database(device,
                         lsa_type=None,
                         expected_lsa_id=None,
                         max_time=60,
                         check_interval=10,
                         extensive=True,
                         invert=False):
    """ Verifies information from show ospf database

    Args:
        device ([obj]): Device object
        lsa_type ([str], optional): LSA type to check. Defaults to None.
        expected_lsa_id ([str], optional): Expected LSA ID to find. Defaults to None.
        max_time (int, optional): Maximum timeout time. Defaults to 60.
        check_interval (int, optional): Check interval. Defaults to 10.
        extensive (bool, optional): Extensive or not. Default to True.
        invert (bool, optional): Inverts verification to check if criteria doesn't exist
    """

    op = operator.ne
    if invert:
        op = operator.eq

    timeout = Timeout(max_time, check_interval)

    while timeout.iterate():
        out = None
        try:
            if extensive:
                if lsa_type:
                    out = device.parse(
                        'show ospf database {lsa_type} extensive'.format(
                            lsa_type=lsa_type.lower()))
                else:
                    out = device.parse('show ospf database extensive')
            else:
                if lsa_type:
                    out = device.parse('show ospf database {lsa_type}'.format(
                        lsa_type=lsa_type.lower()))
                else:
                    out = device.parse('show ospf database')
        except SchemaEmptyParserError:
            timeout.sleep()
            continue

        count = 0

        ospf_database_ = out.q.get_values('ospf-database')
        for database in ospf_database_:
            if expected_lsa_id and op(
                    expected_lsa_id.split('/')[0], database.get('lsa-id')):
                continue

            # Add criteria to check against

            count += 1
            if not invert:
                return True
            else:
                if count == len(ospf_database_):
                    return True

        timeout.sleep()

    return False
Beispiel #15
0
def check_xe_sanity_device_ready(self,
                                 testbed,
                                 steps,
                                 max_time=60,
                                 interval=10):
    """Check redudancy status, critial processes status and chassis properties

       Args:
           testbed (`obj`): Testbed object
           steps (`obj`): aetest steps object

       Returns:
           None

       Raises:
           pyATS Results
    """
    log.info(
        banner('Check redudancy status,\n'
               'critial processes status,\n'
               'chassis properties'))
    # get uut
    devices = testbed.find_devices(alias='uut')

    for uut in devices:
        lookup = Lookup.from_device(uut)
        # get platform pts

        platform_pts = self.parameters.get('pts', {}).get('platform',
                                                          {}).get('uut', None)

        # check redudancy
        with steps.start("Perform Redundancy Check on device {} - "
                         "to check if device reach 'SSO' status".format(
                             uut.name)) as step:

            # create timeout object
            timeout = Timeout(max_time=int(max_time), interval=int(interval))
            try:
                lookup.sdk.libs.abstracted_libs\
                  .subsection.stack_ha_redundancy_state(
                      device=uut, timeout=timeout, platform_pts=platform_pts)
            except Exception as e:
                step.passx('Redundancy state SSO not reached in the stack',
                           from_exception=e)

        if hasattr(uut, 'workers'):
            with uut.allocate() as worker:
                # check Process
                with steps.start("Verify that all critical process are up "
                                 "and running on device {}".format(
                                     uut.name)) as step:
                    try:
                        lookup.sdk.libs.abstracted_libs\
                          .subsection.process_check(device=worker)
                    except Exception as e:
                        step.passx('Processes verification test failed')

                # check Chasfs
                with steps.start("Verify that Chasfs properties are updated "
                                 "by Stack Manager on device {}".format(
                                     uut.name)) as step:
                    try:
                        lookup.sdk.libs.abstracted_libs\
                          .subsection.chasfs_properties(device=worker, timeout=timeout)
                    except Exception as e:
                        step.passx(
                            'Chasfs verification test failed\n{}'.format(e))
        else:
            # check Process
            with steps.start("Verify that all critical process are up "
                             "and running on device {}".format(
                                 uut.name)) as step:
                try:
                    lookup.sdk.libs.abstracted_libs\
                      .subsection.process_check(device=uut)
                except Exception as e:
                    step.passx('Processes verification test failed')

            # check Chasfs
            with steps.start("Verify that Chasfs properties are updated "
                             "by Stack Manager on device {}".format(
                                 uut.name)) as step:
                try:
                    lookup.sdk.libs.abstracted_libs\
                      .subsection.chasfs_properties(device=uut, timeout=timeout)
                except Exception as e:
                    step.passx('Chasfs verification test failed\n{}'.format(e))
Beispiel #16
0
def verify_ospf_interface(device,
                          expected_interface=None,
                          expected_interface_type=None,
                          expected_state=None,
                          extensive=True,
                          max_time=60,
                          check_interval=10,
                          expected_hello_interval=None):
    """ Verifies ospf interface exists with criteria

        Args:
            device ('obj'): device to use
            expected_interface ('str'): Interface to use
            expected_interface_type ('str'): Interface type
            expected_state ('str'): Interface state
            extensive ('boolean'): Flag for extensive command
            max_time ('int'): Maximum time to keep checking
            check_interval ('int'): How often to check
            expected_hello_interval ('str'): Expected hello interval

        Returns:
            True/False

        Raises:
            N/A
    """
    timeout = Timeout(max_time, check_interval)

    while timeout.iterate():
        out = None
        try:
            if extensive:
                out = device.parse('show ospf interface extensive')
            else:
                out = device.parse('show ospf interface')
        except SchemaEmptyParserError:
            timeout.sleep()
            continue

        # {
        # "ospf-interface-information": {
        #     "ospf-interface": [
        #         {
        #             "interface-name": "ge-0/0/0.0",
        #             "interface-type": "P2P",
        #             "ospf-interface-state": "PtToPt",
        #             "hello-interval": "100",
        #         },

        for ospf_interface in out.q.get_values('ospf-interface'):
            # check variables
            intf = ospf_interface.get('interface-name', None)
            if expected_interface and expected_interface != intf:
                continue

            intf_type = ospf_interface.get('interface-type', None)
            if expected_interface_type and expected_interface_type != intf_type:
                continue

            intf_state = ospf_interface.get('ospf-interface-state', None)
            if expected_state and expected_state != intf_state:
                continue

            intf_hello_interval = ospf_interface.get('hello-interval', None)
            if expected_hello_interval and str(expected_hello_interval) != intf_hello_interval:
                continue

            return True
        timeout.sleep()
    return False
Beispiel #17
0
def verify_bgp_peer_state(device, interface, expected_state,
                          max_time=60, check_interval=10):
    """
    Verify bgp peer state

    Args:
        device('obj'): device to use
        interface('str'): Peer interface   
        expected_state('str') : Expected peer state
        check_peer('boolean'): pass True if want to check peer address; default False
        max_time ('int', optional): Maximum time to keep checking. Default to 60
        check_interval ('int', optional): How often to check. Default to 10

    Returns:  
        Boolean       
    Raises:
        N/A    
    """
    timeout = Timeout(max_time, check_interval)

    # show commands: "show bgp neighbor"

    # {'bgp-information': {'bgp-peer': [{
    #                                 'flap-count': '0',
    #                                 'peer-address': '20.0.0.3+63208',
    #                                 'peer-state': 'Established',
    #                                   .
    #                                   .
    #                                   .

    while timeout.iterate():
        try:
            out = device.parse('show bgp neighbor')
        except SchemaEmptyParserError:
            timeout.sleep()
            continue
        
        is_bgp_running = out.q.get_values("is-bgp-running")

        if False in is_bgp_running:
            timeout.sleep()
            continue
            
        peers_list = out.q.get_values("bgp-peer")

        for peer in peers_list:
            peer_interface = peer.get('peer-address')
            peer_state = peer.get("peer-state")

            # 20.0.0.3+63208
            if '+' in peer_interface:
                peer_interface = peer_interface.split('+')[0]

            # 20.0.0.2/24
            if '/' in interface:
                interface = interface.split('/')[0]

            if peer_interface == interface and peer_state == expected_state:
                return True

        timeout.sleep()
    return False
Beispiel #18
0
def verify_ospf_neighbor_number(device,
                                expected_interface=None,
                                expected_number=None,
                                expected_state=None,
                                extensive=False,
                                max_time=60,
                                check_interval=10):
    """ Verifies the number of ospf neighbors that meets the criteria

        Args:
            device ('obj'): device to use
            expected_interface ('str'): Interface to use
            expected_number ('str'): State occurrence
            expected_state ('str'): Interface state
            extensive ('bool'): Flag to differentiate show commands
            max_time ('int'): Maximum time to keep checking
            check_interval ('int'): How often to check

        Returns:
            Boolean

        Raises:
            N/A
    """

    timeout = Timeout(max_time, check_interval)

    while timeout.iterate():
        try:
            if not extensive:
                out = device.parse("show ospf neighbor")
            else:
                out = device.parse("show ospf neighbor extensive")
        except SchemaEmptyParserError:
            timeout.sleep()
            continue

        # {
        # "ospf3-neighbor-information": {
        #     "ospf3-neighbor": [
        #         {
        #             "interface-name": "ge-0/0/0.0",
        #             "ospf-neighbor-state": "Full"
        #         },

        count = 0
        for neighbor in out.q.get_values('ospf-neighbor'):
            # check variables

            interface_name = neighbor.get('interface-name', None)
            if expected_interface and expected_interface != interface_name:
                continue

            neighbor_state = neighbor.get('ospf-neighbor-state', None)
            if expected_state and expected_state.lower(
            ) != neighbor_state.lower():
                continue

            # if all variables exist, count plus 1
            count += 1

        if count >= expected_number:
            return True

        timeout.sleep()

    return False
Beispiel #19
0
def is_bgp_neighbor_authentication_key_configured(device, interface=None, max_time=60, check_interval=10):
    """
    Verify that all of bgp neighbors have Authentication key is configured

    Args:
        device('obj'): device to use
        interface('str'): peer interface. Default to None.
        max_time ('int', optional): Maximum time to keep checking. Default to 60
        check_interval ('int', optional): How often to check. Default to 10

    Returns:
        Boolean
    Raises:
        N/A
    """
    timeout = Timeout(max_time, check_interval)
    auth_key_not_configured = False

    # show commands: "show bgp neighbor"

    # {'bgp-information': {'bgp-peer': [{
    #                                 'flap-count': '0',
    #                                 'peer-address': '20.0.0.3+63208',
    #                                 'peer-state': 'Established',
    #                                  "bgp-option-information": {
    #                                           "address-families": "inet-unicast " "inet-labeled-unicast",
    #                                           "authentication-configured": True,

    while timeout.iterate():
        try:
            out = device.parse('show bgp neighbor')
        except SchemaEmptyParserError:
            timeout.sleep()
            continue

        peers_list = out.q.get_values("bgp-peer")
        if interface:
            # 20.0.0.3+63208
            # 20.0.0.2/24
            if '+' in interface:
                interface = interface.split('+')[0]
            elif '/' in interface:
                interface = interface.split('/')[0]

        for peer in peers_list:
            # If interface is given, API checks authorization key is configured for given
            # the interface else it will check the authorization key for all peers.
            if interface:
                peer_interface = peer.get('peer-address', '')

                # 20.0.0.3+63208
                # 20.0.0.2/24
                if '+' in peer_interface:
                    peer_interface = peer_interface.split('+')[0]
                elif '/' in peer_interface:
                    peer_interface = peer_interface.split('/')[0]

                if peer_interface == interface and peer.get('bgp-option-information', {}).get(
                        'authentication-configured', False):
                    return True

            elif not peer.get('bgp-option-information', {}).get(
                    'authentication-configured', False):
                auth_key_not_configured = True
                break

        if not interface and not auth_key_not_configured:
            return True

        timeout.sleep()
    return False
Beispiel #20
0
def verify_ospf_interface_in_database(device,
                                      expected_interface,
                                      expected_interface_type=None,
                                      subnet_mask=None,
                                      expected_metric=None,
                                      adv_router=False,
                                      max_time=60,
                                      check_interval=10):
    """ Verifies ospf interface exists with criteria

        Args:
            device ('obj'): device to use
            expected_interface ('str'): Interface to use
            expected_interface_type ('str'): Interface type
            subnet_mask ('str'): Subnet mask
            expected_metric ('str'): Metric of Interface
            adv_router ('bool'): Whether to look for address in adversiting router
            max_time ('int'): Maximum time to keep checking
            check_interval ('int'): How often to check

        Returns:
            Boolean
        Raises:
            N/A
    """            
    timeout = Timeout(max_time, check_interval)
    while timeout.iterate():
        try:
            out = device.parse('show ospf database extensive')
        except SchemaEmptyParserError:
            timeout.sleep()
            continue


        #'ospf-database':[
        #    {'lsa-type':
        #        'Router',
        #        'lsa-id': '1.1.1.1'
        #    }
        #]

        for ospf_database in Dq(out).get_values('ospf-database'):

            #'ospf-external-lsa':
            #   {'address-mask': '255.255.255.255',
            #   'ospf-external-lsa-topology': {
            #        'ospf-topology-name':
            #            'default'}}
            ospf_external_lsa = ospf_database.get('ospf-external-lsa',{})
            if not adv_router:
                if subnet_mask != None:
                    if 'address-mask' not in ospf_external_lsa:
                        continue
                    else:
                        #{'address-mask': '255.255.255.255'}
                        current_mask = IPAddress(ospf_external_lsa.get('address-mask')).netmask_bits()
                        if str(current_mask) != subnet_mask:
                            continue

                #'type-value': '2'
                if not ospf_external_lsa:
                    continue
                if not ospf_external_lsa.get('ospf-external-lsa-topology',{}):
                    continue
                if str(expected_metric) != ospf_external_lsa.get('ospf-external-lsa-topology',{}).get('type-value',{}):
                    continue

                if expected_interface_type != None:
                    #'lsa-type': 'Extern'
                    lsa_type = ospf_database.get('lsa-type', None)
                    lsa_type = lsa_type.lower() if lsa_type else lsa_type

                    if expected_interface_type.lower() != lsa_type:
                            continue

                #'lsa-id': '11.11.11.11'
                lsa_id = ospf_database.get('lsa-id', None)
                if expected_interface != lsa_id:
                    continue
            else:
                advertising_router = ospf_database.get('advertising-router', None)
                if expected_interface != advertising_router:
                    continue

            return True

        timeout.sleep()

    return False
Beispiel #21
0
def get_bgp_neighbor_prefixes_count(device, interface, max_time=60, check_interval=10):
    """
    Get bgp neighbor accepted, received or advertised prefixes count

    Args:
        device('obj'): device to use
        interface('str'): peer interface. Default to None.
        max_time ('int', optional): Maximum time to keep checking. Default to 60 seconds.
        check_interval ('int', optional): How often to check. Default to 10 seconds.

    Returns:
        Boolean
    Raises:
        N/A
    """
    timeout = Timeout(max_time, check_interval)
    prefixes_count_dict = {}

    # show commands: "show bgp neighbor"

    # {'bgp-information': {'bgp-peer': [{
    #                                 "bgp-rib": [
    #                                         {
    #                                             "accepted-prefix-count": "684",
    #                                             "active-prefix-count": "682",
    #                                             "advertised-prefix-count": "0",
    #                                             "bgp-rib-state": "BGP restart " "is complete",
    #                                             "name": "inet.0",
    #                                             "received-prefix-count": "684",
    #                                             "rib-bit": "20000",
    #                                             "send-state": "in sync",
    #                                             "suppressed-prefix-count": "0",
    #                                         },

    while timeout.iterate():
        try:
            out = device.parse('show bgp neighbor')
        except SchemaEmptyParserError:
            timeout.sleep()
            continue

        peers_list = out.q.get_values("bgp-peer")
        # 20.0.0.3+63208
        # 20.0.0.2/24
        if '+' in interface:
            interface = interface.split('+')[0]
        elif '/' in interface:
            interface = interface.split('/')[0]

        for peer in peers_list:
            peer_interface = peer.get('peer-address', '')

            # 20.0.0.3+63208
            # 20.0.0.2/24
            if '+' in peer_interface:
                peer_interface = peer_interface.split('+')[0]
            elif '/' in peer_interface:
                peer_interface = peer_interface.split('/')[0]

            if peer_interface == interface:
                bgp_rib_list = peer.get('bgp-rib', [])
                for bgp_rib in bgp_rib_list:
                    if bgp_rib.get('accepted-prefix-count', None):
                        prefixes_count_dict['accepted_prefix_count'] = int(bgp_rib.get('accepted-prefix-count'))
                    if bgp_rib.get('received-prefix-count', None):
                        prefixes_count_dict['received_prefix_count'] = int(bgp_rib.get('received-prefix-count'))
                    if bgp_rib.get('advertised-prefix-count', None):
                        prefixes_count_dict['advertised_prefix_count'] = int(bgp_rib.get('advertised-prefix-count'))
                return prefixes_count_dict

        timeout.sleep()
    return prefixes_count_dict
Beispiel #22
0
def verify_diff_timestamp(device, expected_spf_delay=None, ospf_trace_log=None,\
                       max_time=60, check_interval=10):
    """
    Verify the difference between time on two logs

    Args:
        device('obj'): device to use
        expected_spf_delay('float'): SPF change value   
        ospf_trace_log('str') : OSPF trace log
        max_time ('int'): Maximum time to keep checking
        check_interval ('int'): How often to check

    Returns:  
        Boolean       
    Raises:
        N/A    
    """
    timeout = Timeout(max_time, check_interval)

    # show commands: "show log {ospf_trace_log}"
    while timeout.iterate():
        try:
            output = device.parse('show log {ospf_trace_log}'.format(
                ospf_trace_log=ospf_trace_log))
        except SchemaEmptyParserError:
            timeout.sleep()
            continue

        # Example parsed output:
        #
        # {
        #     "file-content": [
        #     "        show log messages",
        #     "        Mar  5 00:45:00 sr_hktGCS001 newsyslog[89037]: "
        #     "logfile turned over due to size>1024K",
        #     "        Mar  5 02:42:53  sr_hktGCS001 sshd[87374]: Received "
        #     "disconnect from 10.1.0.1 port 46480:11: disconnected by user",
        #     "        Mar  5 02:42:53  sr_hktGCS001 sshd[87374]: "
        #     "Disconnected from 10.1.0.1 port 46480",
        #     "        Mar  5 02:42:53  sr_hktGCS001 inetd[6841]: "
        #     "/usr/sbin/sshd[87371]: exited, status 255",
        # }

        file_content_list = output['file-content']

        scheduled_time = start_time = datetime.datetime.now()

        for i in file_content_list:
            scheduled_time_str = device.api.get_ospf_spf_scheduled_time(i)
            if scheduled_time_str:
                scheduled_time = datetime.datetime.strptime(
                    scheduled_time_str, '%H:%M:%S.%f')

            start_time_str = device.api.get_ospf_spf_start_time(i)
            if start_time_str:
                start_time = datetime.datetime.strptime(
                    start_time_str, '%H:%M:%S.%f')

            time_change = (start_time - scheduled_time).seconds
            if time_change == expected_spf_delay:
                return True

        timeout.sleep()
    return False
Beispiel #23
0
def verify_bgp_peer_option(device,
                           interface,
                           protocol,
                           expected_bgp_option,
                           invert=False,
                           max_time=60,
                           check_interval=10):
    """
    Verify bgp peer's bgp option

    Args:
        device('obj'): device to use
        interface('str'): Peer interface
        protocol('str'): protocol name
        expected_bgp_option('str') : Expected peer bgp-option flag
        invert (bool, optional): True if output does not contain expected_bgp_option. Defaults to False.
        max_time ('int', optional): Maximum time to keep checking. Default to 60 seconds.
        check_interval ('int', optional): How often to check. Default to 10 seconds.

    Returns:
        Boolean
    Raises:
        N/A
    """
    op = operator.contains
    if invert:
        op = lambda data, check: operator.not_(operator.contains(data, check))

    timeout = Timeout(max_time, check_interval)

    # show commands: "show bgp neighbor"

    # {'bgp-information': {'bgp-peer': [{
    #                                 'peer-address': '20.0.0.3+63208',
    #                                 'peer-state': 'Established',
    #                                  {'bgp-option-information': {
    #                                       "bgp-options": "Confed",
    #                                   .

    # 20.0.0.2/24
    interface = interface.split('/')[0]

    while timeout.iterate():
        try:
            out = device.parse('show {protocol} neighbor'.format(protocol=protocol))
        except SchemaEmptyParserError:
            timeout.sleep()
            continue

        peers_list = out.q.get_values("bgp-peer")

        for peer in peers_list:
            peer_interface = peer.get('peer-address')
            peer_bgp_option = Dq(peer).get_values('bgp-options')

            # 20.0.0.3+63208
            peer_interface = peer_interface.split('+')[0]
            if peer_interface == interface and op(''.join(peer_bgp_option), expected_bgp_option):
                return True

        timeout.sleep()
    return False
Beispiel #24
0
def get_cef_internal_repair_interface(device,
                                      prefix,
                                      vrf=None,
                                      max_time=60,
                                      check_interval=15):
    """ Get cef internal output repair interface

        Args:
            device (`obj`): Device object
            vrf (`str`): VRF to check
            prefix (`str`): Prefix to check
            max_time (`int`): Maximum time to keep checking
            check_interval (`int`): How long to wait between checks

        Raises:
            N/A

        Returns:
            interface name/None
    """
    if vrf:
        cmd = 'show ip cef vrf {vrf} {prefix} internal'.format(vrf=vrf,
                                                               prefix=prefix)
    else:
        cmd = 'show ip cef vrf {prefix} internal'.format(prefix=prefix)

    vrf = vrf if vrf else "default"

    reqs = R([
        "vrf",
        "(?P<vrf>{vrf})".format(vrf=vrf),
        "address_family",
        "(?P<address_family>.*)",
        "prefix",
        "(?P<prefix>.*)",
        "output_chain",
        "tag_midchain",
        "(?P<tag_midchain>.*)",
        "frr",
        "primary",
        "repair",
        "tag_adj",
        "(?P<interface>.*)",
    ])

    timeout = Timeout(max_time, check_interval)
    while timeout.iterate():
        try:
            out = device.parse(cmd)
        except SchemaEmptyParserError:
            log.info("Parser output is empty")
            timeout.sleep()
            continue

        found = find([out], reqs, filter_=False, all_keys=True)
        if found:
            for interface in found[0][0]:
                return interface

        timeout.sleep()

    log.error("Failed to get repair interface")
    return None
Beispiel #25
0
def verify_rib_fib_lfib_consistency(device,
                                    route,
                                    none_pattern='',
                                    max_time=30,
                                    check_interval=10):
    """ Verify the outgoing label for route are the same in:
        - show ip route <route>
        - show ip cef <route>
        - show mpls forwarding-table <route>

        Args:
            device (`obj`): Device object
            route (`str`): Route or ipn
            none_pattern (`list`): None label pattern
            max_time (`int`): Max time, default: 30
            check_interval (`int`): Check interval, default: 10
        Returns:
            result (`bool`): Verified result
    """
    route = route.split('/')
    if len(route) > 1:
        route = '{} {}'.format(route[0], device.api.int_to_mask(int(route[1])))
    else:
        route = route[0]

    patterns = ['No Label', 'implicit-null', 'Pop Label', 'none', '']
    if none_pattern:
        patterns.extend(none_pattern)

    cmd1 = "show ip route {}".format(route)
    cmd2 = "show ip cef {}".format(route)
    cmd3 = "show mpls forwarding-table {}".format(route)

    timeout = Timeout(max_time, check_interval)
    while timeout.iterate():
        result = True
        table = PrettyTable()
        table.field_names = [
            'RIB (INTF)', 'RIB (NH)', 'RIB (LABEL)', 'FIB (INTF)', 'FIB(NH) ',
            'FIB (LABEL)', 'FIB (LOCAL LABEL)', 'LFIB (INTF)', 'LFIB(NH)',
            'LFIB (LABEL)', 'LFIB (LOCAL LABEL)', 'PASS/FAIL'
        ]

        try:
            out1 = device.parse(cmd1)
        except Exception as e:
            log.error("Failed to parse '{}':\n{}".format(cmd1, e))
            result = False
            timeout.sleep()
            continue

        try:
            out2 = device.parse(cmd2)
        except Exception as e:
            log.error("Failed to parse '{}':\n{}".format(cmd2, e))
            result = False
            timeout.sleep()
            continue

        try:
            out3 = device.parse(cmd3)
        except Exception as e:
            log.error("Failed to parse '{}':\n{}".format(cmd3, e))
            result = False
            timeout.sleep()
            continue

        reqs1 = R(['entry', '(.*)', 'paths', '(.*)', '(?P<route>.*)'])
        found1 = find([out1], reqs1, filter_=False, all_keys=True)
        route_dict1 = {}
        # eg: {"GigabitEthernet2": {
        #         "nexthop": "10.0.0.5",
        #         "from": "10.16.2.2",
        #         "age": "2w0d",
        #         "interface": "GigabitEthernet2",
        #         "prefer_non_rib_labels": true,
        #         "merge_labels": true,
        #         "metric": "3",
        #         "share_count": "1",
        #         "mpls_label": "16002",
        #         "mpls_flags": "NSF"}}

        if found1:
            for item in found1:
                route_dict1.update({item[0]['interface']: item[0]})
        else:
            log.error("Failed to find route from '{}'".format(cmd1))
            result = False
            timeout.sleep()
            continue

        reqs2 = R([
            'vrf', '(.*)', 'address_family', '(.*)', 'prefix', '(.*)',
            'nexthop', '(?P<nexthop>.*)', 'outgoing_interface',
            '(?P<interface>.*)', '(?P<sub>.*)'
        ])
        found2 = find([out2], reqs2, filter_=False, all_keys=True)
        route_dict2 = {}
        # eg: {'GigabitEthernet2': {
        #        'local_label': 16002,
        #        'outgoing_label': ['16002'],
        #        'nexthop': '10.0.0.5'}}

        if found2:
            for item in found2:
                interface = item[1][-1]
                nexthop = item[1][-3]
                item[0].update({'nexthop': nexthop})
                route_dict2.update({interface: item[0]})
        else:
            log.error(
                "Failed to find outgoing interface from '{}'".format(cmd2))
            result = False
            timeout.sleep()
            continue

        reqs3 = R([
            'vrf', '(.*)', 'local_label', '(?P<local_label>.*)',
            'outgoing_label_or_vc', '(?P<outgoing_label>.*)',
            'prefix_or_tunnel_id', '(?P<prefix>.*)', 'outgoing_interface',
            '(?P<interface>.*)', 'next_hop', '(?P<next_hop>.*)'
        ])
        found3 = find([out3], reqs3, filter_=False, all_keys=True)
        route_dict3 = {}
        # eg: {'GigabitEthernet4': {
        #         "local_label": 16,
        #         "outgoing_label": "Pop Label",
        #         "prefix": "10.0.0.13-A",
        #         "interface": "GigabitEthernet4",
        #         "next_hop": "10.0.0.13"}}

        if found3:
            keys = GroupKeys.group_keys(reqs=reqs3.args,
                                        ret_num={},
                                        source=found3,
                                        all_keys=True)
            for item in keys:
                route_dict3.update({item['interface']: item})
        else:
            log.error(
                "Failed to get outgoing interface from '{}'".format(cmd3))
            result = False
            timeout.sleep()
            continue

        if len(route_dict1) != len(route_dict2) != len(route_dict3):
            log.error("The number of routes are different in the 3 output")
            result = False
            timeout.sleep()
            continue

        for interface in route_dict1.keys():
            # get info from show ip route
            rib_intf = interface
            rib_nh = route_dict1[interface].get('nexthop', '')
            rib_label = route_dict1[interface].get('mpls_label', '')
            tmp_rib_label = None if rib_label in patterns else rib_label

            # get info from show ip cef
            fib_intf = interface if route_dict2.get(interface, '') else ''
            fib_nh = route_dict2.get(interface, {}).get('nexthop', '')
            fib_label = ' '.join(
                route_dict2.get(interface, {}).get('outgoing_label', []))
            fib_local = route_dict2.get(interface, {}).get('local_label', '')
            tmp_fib_label = None if fib_label in patterns else fib_label

            # get info from show mpls forwarding table
            lfib_intf = interface if route_dict3.get(interface, '') else ''
            lfib_nh = route_dict3.get(interface, {}).get('next_hop', '')
            lfib_label = route_dict3.get(interface,
                                         {}).get('outgoing_label', '')
            lfib_local = route_dict3.get(interface, {}).get('local_label', '')
            tmp_lfib_label = None if lfib_label in patterns else lfib_label

            # if multiple entried forwarding table and prefer not rib labels, only check rib and lfib lable
            # other wise check all labels
            if (len(found3)>1 and route_dict1[interface].get('prefer_non_rib_labels') and (
                    tmp_rib_label == tmp_lfib_label and
                    rib_intf == fib_intf == lfib_intf and
                    rib_nh == fib_nh == lfib_nh)) \
                or (rib_intf == fib_intf == lfib_intf and
                 rib_nh == fib_nh == lfib_nh and
                 tmp_rib_label == tmp_fib_label == tmp_lfib_label):

                status = 'PASS'

            else:
                result = False
                status = 'FAIL'

            table.add_row([
                rib_intf, rib_nh, rib_label, fib_intf, fib_nh, fib_label,
                fib_local, lfib_intf, lfib_nh, lfib_label, lfib_local, status
            ])

        log.info("Summary Result for {}:\n{}".format(route, table))

        if result is True:
            return result

        timeout.sleep()

    return result
def _loop_iterator(self,
                   steps,
                   testbed,
                   section,
                   name,
                   ret_list,
                   loop_variable_name=None,
                   loop_until=False,
                   max_time=None,
                   check_interval=None,
                   until=None,
                   do_until=None,
                   actions=None,
                   iterator_item=None,
                   every_seconds=None,
                   parallel=None,
                   **kwargs):
    """actually iterate over the actions under loop and call them"""

    # TODO cant save vars in loop, enhancement needed

    # timeout if until or do_until or loop_until are looping through infinity
    timeout = Timeout(max_time, check_interval)

    iterator_len = None
    iterator_index = 0
    iterator_len = max(len(itm)
                       for itm in iterator_item) if iterator_item else None
    pcall_payload = []

    # until condition would be sent to blitz_control
    # in order to evaluate and see if the condition is true or not
    # loop wont start if the condition is true with step as passed
    until_condition = False
    if until and blitz_control(self, section, until, 'until'):
        until_condition = True
        log.info('Until condition is met. Loop terminated')

    while not until_condition:

        # capturing the start time of the action to calculate
        # the duration of the action
        loop_start_time = datetime.now()
        log.debug('loop start time: {}'.format(str(loop_start_time)))

        if _check_pre_iteration(iterator_len, iterator_index, max_time,
                                timeout, parallel):
            break

        # assign each item in an iterable_item to a variable
        # to be used later on within the actions
        # the loop_variable_name should exist in the testcase
        _save_iterator_items(self, section, loop_variable_name, iterator_item,
                             iterator_index)

        # running all the actions and adding the outputs of those actions to return list
        kwargs = {
            'self': self,
            'steps': steps,
            'testbed': testbed,
            'section': section,
            'data': actions,
            'loop_until': loop_until,
            'parallel': parallel,
            'name': name,
        }

        list_of_kwargs = list(callback_blitz_dispatcher_gen(**kwargs))

        # increase the iterator_index until it hits the iterator_len
        # it terminates the iteration when reaches the end of iterator
        if iterator_len:
            iterator_index += 1

        # if parallel then create a pcall_payload of all the actions kwargs
        # to call them later on in parallel
        # NOTE: parallel would not work with until, do_until and loop_until
        if parallel:
            pcall_payload.extend(list_of_kwargs)
            continue

        ret_list.extend(list_of_kwargs)

        # check if loop_until is true and get the item that meet the loop_until value
        loop_until_last_index = _check_loop_until(self, ret_list, loop_until)
        if loop_until_last_index is not None:
            ret_list = ret_list[loop_until_last_index:loop_until_last_index +
                                1]

        until_condition = _check_post_iteration(self, section, until, do_until,
                                                loop_until_last_index)

        _report_every_seconds(loop_start_time, every_seconds)

    # execute each iteration of the loop in parallel
    _actions_execute_in_loop_in_parallel(self, section, pcall_payload,
                                         ret_list, steps)
    return ret_list
Beispiel #27
0
def verify_cef_internal_label_stack(device, vrf, prefix, stack, max_time=60, check_interval=15):
    """ Verify stack is programmed for prefix

        Args:
            device (`obj`): Device object
            vrf (`str`): VRF to check
            prefix (`str`): Prefix to check
            stack (`list`): Stack list to verify exists
            max_time (`int`): Maximum time to keep checking
            check_interval (`int`): How long to wait between checks

        Raises:
            N/A

        Returns:
            True/False
    """
    reqs = R(
        [
            "vrf",
            "(?P<vrf>{vrf})".format(vrf=vrf),
            "address_family",
            "(?P<address_family>.*)",
            "prefix",
            "(?P<prefix>.*)",
            "output_chain",
            "tag_midchain",
            "(?P<tag_midchain>.*)",
            "label",
            "(?P<label>.*)"
        ]
    )

    timeout = Timeout(max_time, check_interval)
    while timeout.iterate():
        try:
            out = device.parse('show ip cef vrf {vrf} {prefix} internal'.format(vrf=vrf, prefix=prefix))
        except SchemaEmptyParserError:
            log.info("Parser output is empty")
            timeout.sleep()
            continue

        stack_copy = list(stack)
        found = find([out], reqs, filter_=False, all_keys=True)
        if found:
            for label in found[0][0]:
                for stack_item in stack_copy:
                    if str(stack_item) in label:
                        # If item found, break to prevent else block from running
                        stack_copy.remove(stack_item)
                        break
                else:
                    # If the label is not any of the stack_items break
                    # to prevent running next else block
                    break
            else:
                # If items exist in stack copy that means the stack
                # from output had less items than expected
                if not stack_copy:
                    return True
                else:
                    log.info('The following labels are not in the output: {labels}'
                             .format(labels=stack_copy))

        timeout.sleep()
    return False
def _output_query_template(self,
                           output,
                           steps,
                           device,
                           command,
                           include,
                           exclude,
                           max_time,
                           check_interval,
                           continue_,
                           action,
                           reply=None,
                           arguments=None,
                           rest_device_alias=None,
                           rest_kwargs={}):

    keys = _include_exclude_list(include, exclude)
    max_time, check_interval = _get_timeout_from_ratios(
        device=device, max_time=max_time, check_interval=check_interval)

    timeout = Timeout(max_time, check_interval)
    for query, style in keys:
        # dict that would be sent with various data for inclusion/exclusion check
        kwargs = {}
        send_cmd = False
        # for each query and style
        with steps.start("Verify that '{query}' is {style} in the output".\
            format(query=query, style=style), continue_=continue_) as substep:

            while True:

                if send_cmd:

                    output = _send_command(command,
                                           device,
                                           action,
                                           arguments=arguments,
                                           reply=reply,
                                           rest_device_alias=rest_device_alias,
                                           rest_kwargs=rest_kwargs)

                if action == 'execute':
                    # validating the inclusion/exclusion of action execute,
                    pattern = re.compile(str(query))
                    found = pattern.search(str(output))
                    kwargs.update({
                        'action_output': found,
                        'operation': None,
                        'expected_value': None,
                        'style': style,
                        'key': query,
                        'query_type': 'execute_query'
                    })
                else:
                    # verifying the inclusion/exclusion of actions : learn, parse and api
                    found = _get_output_from_query_validators(output, query)
                    kwargs = found
                    kwargs.update({'style': style, 'key': None})

                # Function would return (pass | fail | error)
                step_result, message = _verify_include_exclude(**kwargs)

                if step_result == Passed:
                    substep.passed(message)

                send_cmd = True
                timeout.sleep()
                if not timeout.iterate():
                    break

            # failing logic in case of timeout
            substep.failed(message)

    return output
Beispiel #29
0
def verify_services_accounting_flow(device,
                                    expected_flow_packets_ten_second_rate=None,
                                    expected_active_flows=None,
                                    max_time=60,
                                    check_interval=10,
                                    invert=False):
    """ Verify 'show services accounting flow' against criteria

    Args:
        device (obj): Device object
        expected_flow_packets_ten_second_rate (str): flow packets ten second rate
        expected_active_flows (str): expected active flows
        max_time (int, optional): Maximum timeout time. Defaults to 60.
        check_interval (int, optional): Check interval. Defaults to 10.
        invert (bool, optional): Inverts the API

    Returns:  
        Boolean

    Raises:
        N/A
    """

    op = operator.ne
    if invert:
        op = operator.eq

    timeout = Timeout(max_time, check_interval)
    while timeout.iterate():
        out = None
        try:
            out = device.parse('show services accounting flow')
        except SchemaEmptyParserError:
            timeout.sleep()
            continue

        #"flow-information": [
        #    {
        #        "interface-name": "ms-9/0/0",
        #        "local-ifd-index": "140",
        #        "flow-packets": "0",
        #        "flow-bytes": "0",
        #        "flow-packets-ten-second-rate": "0",
        #        "flow-bytes-ten-second-rate": "0",
        #        "active-flows": "0",
        #        "flows": "0",
        #        "flows-exported": "0",
        #        "flow-packets-exported": "9",
        #        "flows-expired": "0",
        #        "flows-aged": "0",
        #    }
        #]

        if expected_flow_packets_ten_second_rate and op(
                out.q.get_values('flow-packets-ten-second-rate', 0),
                str(expected_flow_packets_ten_second_rate)):
            timeout.sleep()
            continue
        if expected_active_flows and op(out.q.get_values('active-flows', 0),
                                        str(expected_active_flows)):
            timeout.sleep()
            continue

        return True

    return False
Beispiel #30
0
def verify_ip_and_sid_in_segment_routing_mapping_server(
        device,
        address_sid_dict,
        address_family,
        algorithm,
        mapping_server,
        max_time=300,
        check_interval=30,
        expected_result=True,
        output=None):
    """ Verifies if IP address and SID is present in Segment Routing mapping server
        from show segment-routing mpls mapping-server {address_family}'
        Args:
            device (`obj`): Device to be executed command
            address_family (`str`): Address family
            address_sid_dict (`dict`): Dictionary containing ip address and SID as key and value pair
            ex.)
                {
                    '10.4.1.1/32': 1,
                    '10.4.1.2/32': 2,
                } 
            algorithm (`str`): Algorithm to check
            ex.) 
                algorithm = 'ALGO_0' 
            mapping_server (`str`): mapping server to check
            ex.)
                mapping_server = 'PREFIX_SID_EXPORT_MAP'   or
                mapping_server = 'PREFIX_SID_REMOTE_EXPORT_MAP'
            max_time ('int'): maximum time to wait
            check_interval ('int'): how often to check
            expected_result ('bool'): Expected result
                set expected_result = False if method should fail
                set expected_result = True if method should pass (default value)
                
        Raises:
            None
        Returns
            True/False

    """

    mapping_dict_export = {
        'ipv4': 'ipv4_prefix_sid_export_map',
        'ipv6': 'ipv6_prefix_sid_export_map',
    }

    mapping_dict_remote_export = {
        'ipv4': 'ipv4_prefix_sid_remote_export_map',
        'ipv6': 'ipv6_prefix_sid_remote_export_map',
    }

    timeout = Timeout(max_time, check_interval)
    while timeout.iterate():
        try:
            out = None
            if output:
                out = device.parse(
                    "show segment-routing mpls mapping-server {}".format(
                        address_family),
                    output=output)
            else:
                out = device.parse(
                    "show segment-routing mpls mapping-server {}".format(
                        address_family))
            output = None
        except (SchemaEmptyParserError):
            pass

        found = None

        for ip_address, sid in address_sid_dict.items():

            # find using Prefix SID local
            # It will use ipv4_prefix_sid_local or ipv6_prefix_sid_local as key for search data
            # based on address_family provided
            if out:
                reqs = R([
                    'segment_routing', 'bindings', 'mapping_server', 'policy',
                    mapping_server.lower(), address_family, 'mapping_entry',
                    ip_address, 'algorithm', algorithm, 'sid', sid
                ])

                found = find([out], reqs, filter_=False, all_keys=True)

            # Returns false if SID is not found Prefix SID or Prefix SID local
            if not expected_result and not found:
                return expected_result
        if expected_result and found:
            return expected_result

        if not found:
            timeout.sleep()

    return False