Exemplo n.º 1
0
def threshold(ctx, port_name, queue_index, queue_type):
    """ Clear queue threshold for a queue on a port """
    # If no params are provided, clear all priority-group entries.
    config_db = ConfigDBConnector()
    config_db.connect()

    all = False

    if port_name is None and queue_index is None and queue_type is None:
        # clear all entries.
        key = 'queue'
        all = True
    elif port_name is None or queue_index is None or queue_type is None:
        ctx.fail(
            "port_name, queue_index and queue_type are mandatory parameters.")
    else:
        if queue_index not in range(0, 8):
            ctx.fail("queue index must be in range 0-7")
        if interface_name_is_valid(port_name) is False:
            ctx.fail("Interface name is invalid!!")
        key = 'queue' + '|' + queue_type + '|' + port_name + '|' + str(
            queue_index)

    if all is True:
        entry_table = config_db.get_keys('THRESHOLD_TABLE')
        # Clear data for all keys
        for k in natsorted(entry_table):
            if k[0] == 'queue':
                config_db.set_entry('THRESHOLD_TABLE', k, None)
    else:
        entry = config_db.get_entry('THRESHOLD_TABLE', key)
        if entry:
            config_db.set_entry('THRESHOLD_TABLE', key, None)
Exemplo n.º 2
0
def name(vxlan_name):
    """Show vxlan name <vxlan_name> information"""
    config_db = ConfigDBConnector()
    config_db.connect()
    header = [
        'vxlan tunnel name', 'source ip', 'destination ip', 'tunnel map name',
        'tunnel map mapping(vni -> vlan)'
    ]

    # Fetching data from config_db for VXLAN TUNNEL
    vxlan_data = config_db.get_entry('VXLAN_TUNNEL', vxlan_name)

    table = []
    if vxlan_data:
        r = []
        r.append(vxlan_name)
        r.append(vxlan_data.get('src_ip'))
        r.append(vxlan_data.get('dst_ip'))
        vxlan_map_keys = config_db.keys(
            config_db.CONFIG_DB,
            'VXLAN_TUNNEL_MAP{}{}{}*'.format(config_db.KEY_SEPARATOR,
                                             vxlan_name,
                                             config_db.KEY_SEPARATOR))
        if vxlan_map_keys:
            vxlan_map_mapping = config_db.get_all(config_db.CONFIG_DB,
                                                  vxlan_map_keys[0])
            r.append(vxlan_map_keys[0].split(config_db.KEY_SEPARATOR, 2)[2])
            r.append("{} -> {}".format(vxlan_map_mapping.get('vni'),
                                       vxlan_map_mapping.get('vlan')))
        table.append(r)

    click.echo(tabulate(table, header))
Exemplo n.º 3
0
def getAllLines():
    config_db = ConfigDBConnector()
    config_db.connect()

    # Querying CONFIG_DB to get configured console ports
    keys = config_db.get_keys(CONSOLE_PORT_TABLE)
    lines = []
    for k in keys:
        line = config_db.get_entry(CONSOLE_PORT_TABLE, k)
        line[LINE_KEY] = k
        lines.append(line)

    # Querying device directory to get all available console ports
    cmd = "ls " + DEVICE_PREFIX + "*"
    output = run_command(cmd)
    availableTtys = output.split('\n')
    availableTtys = list(
        filter(lambda dev: re.match(DEVICE_PREFIX + r"\d+", dev) != None,
               availableTtys))
    for tty in availableTtys:
        k = tty[len(DEVICE_PREFIX):]
        if k not in keys:
            line = {LINE_KEY: k}
            lines.append(line)
    return lines
Exemplo n.º 4
0
def _is_neighbor_ipaddress(ipaddress):
    """Returns True if a neighbor has the IP address <ipaddress>, False if not
    """
    config_db = ConfigDBConnector()
    config_db.connect()
    entry = config_db.get_entry('BGP_NEIGHBOR', ipaddress)
    return True if entry else False
Exemplo n.º 5
0
def add(address, timeout, key, auth_type, port, pri, use_mgmt_vrf):
    """Specify a TACACS+ server"""
    if not is_ipaddress(address):
        click.echo('Invalid ip address')
        return

    config_db = ConfigDBConnector()
    config_db.connect()
    old_data = config_db.get_entry('TACPLUS_SERVER', address)
    if old_data != {}:
        click.echo('server %s already exists' % address)
    else:
        data = {
            'tcp_port': str(port),
            'priority': pri
        }
        if auth_type is not None:
            data['auth_type'] = auth_type
        if timeout is not None:
            data['timeout'] = str(timeout)
        if key is not None:
            data['passkey'] = key
        if use_mgmt_vrf :
            data['vrf'] = "mgmt"
        config_db.set_entry('TACPLUS_SERVER', address, data)
Exemplo n.º 6
0
def _is_neighbor_ipaddress(ipaddress):
    """Returns True if a neighbor has the IP address <ipaddress>, False if not
    """
    config_db = ConfigDBConnector()
    config_db.connect()
    entry = config_db.get_entry('BGP_NEIGHBOR', ipaddress)
    return True if entry else False
Exemplo n.º 7
0
def add(address, timeout, key, auth_type, port, pri, use_mgmt_vrf):
    """Specify a TACACS+ server"""
    if not is_ipaddress(address):
        click.echo('Invalid ip address')
        return

    config_db = ConfigDBConnector()
    config_db.connect()
    old_data = config_db.get_entry('TACPLUS_SERVER', address)
    if old_data != {}:
        click.echo('server %s already exists' % address)
    else:
        data = {
            'tcp_port': str(port),
            'priority': pri
        }
        if auth_type is not None:
            data['auth_type'] = auth_type
        if timeout is not None:
            data['timeout'] = str(timeout)
        if key is not None:
            data['passkey'] = key
        if use_mgmt_vrf :
            data['vrf'] = "mgmt"
        config_db.set_entry('TACPLUS_SERVER', address, data)
Exemplo n.º 8
0
def getTwiceNatIdCountWithDynamicBinding(twice_nat_id, count, dynamic_key):
    """Get the twice nat id count with dynamic binding"""

    config_db = ConfigDBConnector()
    config_db.connect()

    nat_binding_dict = config_db.get_table('NAT_BINDINGS')
    twice_id_count = count

    if not nat_binding_dict:
        return twice_id_count

    for key, values in nat_binding_dict.items():
        nat_pool_data = config_db.get_entry('NAT_POOL', values["nat_pool"])
        twice_id = 0

        if dynamic_key is not None:
            if dynamic_key == key:
                continue

        if not nat_pool_data:
            continue

        if "twice_nat_id" in values:
            if values["twice_nat_id"] == "NULL":
                continue
            else:
                twice_id = int(values["twice_nat_id"])
        else:
            continue

        if twice_id == twice_nat_id:
            twice_id_count += 1

    return twice_id_count
Exemplo n.º 9
0
def remove_pool(ctx, pool_name):
    """Remove Pool for Dynamic NAT-related configutation"""

    entryFound = False
    table = "NAT_POOL"
    key = pool_name

    if len(pool_name) > 32:
        ctx.fail(
            "Invalid pool name. Maximum allowed pool name is 32 characters !!")

    config_db = ConfigDBConnector()
    config_db.connect()

    data = config_db.get_entry(table, key)
    if not data:
        click.echo("Trying to delete pool, which is not present.")
        entryFound = True

    binding_dict = config_db.get_table('NAT_BINDINGS')
    if binding_dict and entryFound == False:
        for binding_name, binding_values in binding_dict.items():
            if binding_values['nat_pool'] == pool_name:
                click.echo(
                    "Pool is not removed, as it is mapped to Binding {}, remove the pool binding first !!"
                    .format(binding_name))
                entryFound = True
                break

    if entryFound == False:
        config_db.set_entry(table, key, None)
Exemplo n.º 10
0
def remove_basic(ctx, global_ip, local_ip):
    """Remove Static NAT-related configutation"""

    # Verify the ip address format
    if is_valid_ipv4_address(local_ip) is False:
        ctx.fail(
            "Given local ip address {} is invalid. Please enter a valid local ip address !!"
            .format(local_ip))

    if is_valid_ipv4_address(global_ip) is False:
        ctx.fail(
            "Given global ip address {} is invalid. Please enter a valid global ip address !!"
            .format(global_ip))

    config_db = ConfigDBConnector()
    config_db.connect()

    entryFound = False
    table = 'STATIC_NAT'
    key = global_ip
    dataKey = 'local_ip'

    data = config_db.get_entry(table, key)
    if data:
        if data[dataKey] == local_ip:
            config_db.set_entry(table, key, None)
            entryFound = True

    if entryFound is False:
        click.echo("Trying to delete static nat entry, which is not present.")
Exemplo n.º 11
0
def remove_udp(ctx, global_ip, global_port, local_ip, local_port):
    """Remove Static UDP Protocol NAPT-related configutation"""

    # Verify the ip address format
    if is_valid_ipv4_address(local_ip) is False:
        ctx.fail(
            "Given local ip address {} is invalid. Please enter a valid local ip address !!"
            .format(local_ip))

    if is_valid_ipv4_address(global_ip) is False:
        ctx.fail(
            "Given global ip address {} is invalid. Please enter a valid global ip address !!"
            .format(global_ip))

    config_db = ConfigDBConnector()
    config_db.connect()

    entryFound = False
    table = "STATIC_NAPT"
    key = "{}|UDP|{}".format(global_ip, global_port)
    dataKey1 = 'local_ip'
    dataKey2 = 'local_port'

    data = config_db.get_entry(table, key)
    if data:
        if data[dataKey1] == local_ip and data[dataKey2] == str(local_port):
            config_db.set_entry(table, key, None)
            entryFound = True

    if entryFound is False:
        click.echo("Trying to delete static napt entry, which is not present.")
Exemplo n.º 12
0
def filter_out_local_interfaces(keys):
    rt = []
    local_if = set(['eth0', 'lo', 'docker0'])

    db = ConfigDBConnector()
    db.db_connect('APPL_DB')

    for k in keys:
        e = db.get_entry('ROUTE_TABLE', k)
        if not e:
            # Prefix might have been added. So try w/o it.
            e = db.get_entry('ROUTE_TABLE', k.split("/")[0])
        if not e or (e['ifname'] not in local_if):
            rt.append(k)

    return rt
Exemplo n.º 13
0
def del_table_key(table, entry, key):
    config_db = ConfigDBConnector()
    config_db.connect()
    data = config_db.get_entry(table, entry)
    if data:
        if key in data:
            del data[key]
        config_db.set_entry(table, entry, data)
Exemplo n.º 14
0
def checkDevice(linenum):
    config_db = ConfigDBConnector()
    config_db.connect()

    entry = config_db.get_entry(CONSOLE_PORT_TABLE, str(linenum))
    if not entry:
        click.echo("Line number {} does not exist".format(linenum))
        sys.exit(ERR_DEV)
Exemplo n.º 15
0
def del_table_key(table, entry, key):
    config_db = ConfigDBConnector()
    config_db.connect()
    data = config_db.get_entry(table, entry)
    if data:
        if key in data:
            del data[key]
        config_db.set_entry(table, entry, data)
Exemplo n.º 16
0
def breakout(ctx):
    """Show Breakout Mode information by interfaces"""
    # Reading data from Redis configDb
    config_db = ConfigDBConnector()
    config_db.connect()
    ctx.obj = {'db': config_db}

    try:
        cur_brkout_tbl = config_db.get_table('BREAKOUT_CFG')
    except Exception as e:
        click.echo("Breakout table is not present in Config DB")
        raise click.Abort()

    if ctx.invoked_subcommand is None:
        # Get port capability from platform and hwsku related files
        hwsku_path = device_info.get_path_to_hwsku_dir()
        platform_file = device_info.get_path_to_port_config_file()
        platform_dict = readJsonFile(platform_file)['interfaces']
        hwsku_file = os.path.join(hwsku_path, HWSKU_JSON)
        hwsku_dict = readJsonFile(hwsku_file)['interfaces']

        if not platform_dict or not hwsku_dict:
            click.echo("Can not load port config from {} or {} file".format(
                platform_file, hwsku_file))
            raise click.Abort()

        for port_name in platform_dict:
            cur_brkout_mode = cur_brkout_tbl[port_name]["brkout_mode"]

            # Update deafult breakout mode and current breakout mode to platform_dict
            platform_dict[port_name].update(hwsku_dict[port_name])
            platform_dict[port_name]["Current Breakout Mode"] = cur_brkout_mode

            # List all the child ports if present
            child_port_dict = get_child_ports(port_name, cur_brkout_mode,
                                              platform_file)
            if not child_port_dict:
                click.echo(
                    "Cannot find ports from {} file ".format(platform_file))
                raise click.Abort()

            child_ports = natsorted(list(child_port_dict.keys()))

            children, speeds = [], []
            # Update portname and speed of child ports if present
            for port in child_ports:
                speed = config_db.get_entry('PORT', port).get('speed')
                if speed is not None:
                    speeds.append(str(int(speed) // 1000) + 'G')
                    children.append(port)

            platform_dict[port_name]["child ports"] = ",".join(children)
            platform_dict[port_name]["child port speeds"] = ",".join(speeds)

        # Sorted keys by name in natural sort Order for human readability
        parsed = OrderedDict((k, platform_dict[k])
                             for k in natsorted(list(platform_dict.keys())))
        click.echo(json.dumps(parsed, indent=4))
Exemplo n.º 17
0
def getConnectionInfo(linenum):
    config_db = ConfigDBConnector()
    config_db.connect()
    entry = config_db.get_entry(CONSOLE_PORT_TABLE, str(linenum))

    conf_baud = "-" if BAUD_KEY not in entry else entry[BAUD_KEY]
    act_baud = DEFAULT_BAUD if conf_baud == "-" else conf_baud
    flow_control = False
    if FLOW_KEY in entry and entry[FLOW_KEY] == "1":
        flow_control = True

    return (act_baud, conf_baud, flow_control)
Exemplo n.º 18
0
def get_routes():
    db = ConfigDBConnector()
    db.db_connect('APPL_DB')
    print_message(MODE_DEBUG, "APPL DB connected for routes")
    keys = db.get_keys('ROUTE_TABLE')
    print_message(MODE_DEBUG, json.dumps({"ROUTE_TABLE": keys}, indent=4))

    valid_rt = []
    skip_rt = []
    for k in keys:
        if db.get_entry('ROUTE_TABLE', k)['nexthop'] != '':
            valid_rt.append(add_prefix_ifnot(k))
        else:
            skip_rt.append(k)

    print_message(MODE_INFO, json.dumps({"skipped_routes" : skip_rt}, indent=4))
    return sorted(valid_rt)
Exemplo n.º 19
0
def get_routes():
    db = ConfigDBConnector()
    db.db_connect('APPL_DB')
    print_message(MODE_DEBUG, "APPL DB connected for routes")
    keys = db.get_keys('ROUTE_TABLE')
    print_message(MODE_DEBUG, json.dumps({"ROUTE_TABLE": keys}, indent=4))

    valid_rt = []
    skip_rt = []
    for k in keys:
        if db.get_entry('ROUTE_TABLE', k)['nexthop'] != '':
            valid_rt.append(add_prefix_ifnot(k))
        else:
            skip_rt.append(k)

    print_message(MODE_INFO, json.dumps({"skipped_routes": skip_rt}, indent=4))
    return sorted(valid_rt)
Exemplo n.º 20
0
def getLineNumber(target, deviceBool):
    if not deviceBool:
        return target

    config_db = ConfigDBConnector()
    config_db.connect()

    devices = getAllDevices()
    linenums = list(map(lambda dev: dev[len(DEVICE_PREFIX):], devices))

    for linenum in linenums:
        entry = config_db.get_entry(CONSOLE_PORT_TABLE, linenum)
        if DEVICE_KEY in entry and entry[DEVICE_KEY] == target:
            return linenum

    click.echo("Device {} does not exist".format(target))
    sys.exit(ERR_DEV)
    return ""
Exemplo n.º 21
0
def remove_binding(ctx, binding_name):
    """Remove Binding for Dynamic NAT-related configutation"""

    entryFound = False
    table = 'NAT_BINDINGS'
    key = binding_name

    if len(binding_name) > 32:
        ctx.fail(
            "Invalid binding name. Maximum allowed binding name is 32 characters !!"
        )

    config_db = ConfigDBConnector()
    config_db.connect()

    data = config_db.get_entry(table, key)
    if not data:
        click.echo("Trying to delete binding, which is not present.")
        entryFound = True

    if entryFound == False:
        config_db.set_entry(table, key, None)
Exemplo n.º 22
0
def add_pool(ctx, pool_name, global_ip_range, global_port_range):
    """Add Pool for Dynamic NAT-related configutation"""

    if len(pool_name) > 32:
        ctx.fail(
            "Invalid pool name. Maximum allowed pool name is 32 characters !!")

    # Verify the ip address range and format
    ip_address = global_ip_range.split("-")
    if len(ip_address) > 2:
        ctx.fail(
            "Given ip address range {} is invalid. Please enter a valid ip address range !!"
            .format(global_ip_range))
    elif len(ip_address) == 2:
        if is_valid_ipv4_address(ip_address[0]) is False:
            ctx.fail(
                "Given ip address {} is not valid global address. Please enter a valid ip address !!"
                .format(ip_address[0]))

        if is_valid_ipv4_address(ip_address[1]) is False:
            ctx.fail(
                "Given ip address {} is not valid global address. Please enter a valid ip address !!"
                .format(ip_address[1]))

        ipLowLimit = int(ipaddress.IPv4Address(ip_address[0]))
        ipHighLimit = int(ipaddress.IPv4Address(ip_address[1]))
        if ipLowLimit >= ipHighLimit:
            ctx.fail(
                "Given ip address range {} is invalid. Please enter a valid ip address range !!"
                .format(global_ip_range))
    else:
        if is_valid_ipv4_address(ip_address[0]) is False:
            ctx.fail(
                "Given ip address {} is not valid global address. Please enter a valid ip address !!"
                .format(ip_address[0]))
        ipLowLimit = int(ipaddress.IPv4Address(ip_address[0]))
        ipHighLimit = int(ipaddress.IPv4Address(ip_address[0]))

    # Verify the port address range and format
    if global_port_range is not None:
        port_address = global_port_range.split("-")

        if len(port_address) > 2:
            ctx.fail(
                "Given port address range {} is invalid. Please enter a valid port address range !!"
                .format(global_port_range))
        elif len(port_address) == 2:
            if is_valid_port_address(port_address[0]) is False:
                ctx.fail(
                    "Given port value {} is invalid. Please enter a valid port value !!"
                    .format(port_address[0]))

            if is_valid_port_address(port_address[1]) is False:
                ctx.fail(
                    "Given port value {} is invalid. Please enter a valid port value !!"
                    .format(port_address[1]))

            portLowLimit = int(port_address[0])
            portHighLimit = int(port_address[1])
            if portLowLimit >= portHighLimit:
                ctx.fail(
                    "Given port address range {} is invalid. Please enter a valid port address range !!"
                    .format(global_port_range))
        else:
            if is_valid_port_address(port_address[0]) is False:
                ctx.fail(
                    "Given port value {} is invalid. Please enter a valid port value !!"
                    .format(port_address[0]))
            portLowLimit = int(port_address[0])
            portHighLimit = int(port_address[0])
    else:
        global_port_range = "NULL"

    config_db = ConfigDBConnector()
    config_db.connect()

    entryFound = False
    table = "NAT_POOL"
    key = pool_name
    dataKey1 = 'nat_ip'
    dataKey2 = 'nat_port'

    data = config_db.get_entry(table, key)
    if data:
        if data[dataKey1] == global_ip_range and data[
                dataKey2] == global_port_range:
            click.echo("Trying to add pool, which is already present.")
            entryFound = True

    pool_dict = config_db.get_table(table)
    if len(pool_dict) == 16:
        click.echo(
            "Failed to add pool, as already reached maximum pool limit 16.")
        entryFound = True

    # Verify the Ip address is overlapping with any Static NAT entry
    if entryFound == False:
        static_dict = config_db.get_table('STATIC_NAT')
        if static_dict:
            for staticKey, staticValues in static_dict.items():
                global_ip = "---"
                local_ip = "---"
                nat_type = "dnat"

                if isinstance(staticKey, unicode) is True:
                    global_ip = staticKey
                else:
                    continue

                local_ip = staticValues["local_ip"]

                if "nat_type" in staticValues:
                    nat_type = staticValues["nat_type"]

                if nat_type == "snat":
                    global_ip = local_ip

                ipAddress = int(ipaddress.IPv4Address(unicode(global_ip)))
                if (ipAddress >= ipLowLimit and ipAddress <= ipHighLimit):
                    ctx.fail(
                        "Given Ip address entry is overlapping with existing Static NAT entry !!"
                    )

    if entryFound == False:
        config_db.set_entry(table, key, {
            dataKey1: global_ip_range,
            dataKey2: global_port_range
        })
Exemplo n.º 23
0
class Ts(object):
    def __init__(self):
        # connect CONFIG DB
        self.config_db = ConfigDBConnector()
        self.config_db.connect()

        # connect COUNTER DB
        self.counters_db = ConfigDBConnector()
        self.counters_db.db_connect('COUNTERS_DB')

        # connect APPL DB
        self.app_db = ConfigDBConnector()
        self.app_db.db_connect('APPL_DB')

    def config_enable(self, args):
        """ Enable ifa """
        key = 'feature'
        self.config_db.set_entry(TAM_INT_IFA_TS_FEATURE_TABLE_PREFIX, key,
                                 {'enable': "true"})
        print "Enabled IFA"

        return

    def config_disable(self, args):
        """ Disable ifa """
        key = 'feature'
        self.config_db.set_entry(TAM_INT_IFA_TS_FEATURE_TABLE_PREFIX, key,
                                 {'enable': "false"})
        print "Disabled IFA"

        return

    def config_flow(self, args):
        key = TAM_INT_IFA_FLOW_TS_TABLE_PREFIX + '|' + args.flowname
        entry = self.config_db.get_all(self.config_db.CONFIG_DB, key)
        if entry is None:
            if args.acl_table_name:
                self.config_db.mod_entry(
                    TAM_INT_IFA_FLOW_TS_TABLE_PREFIX, args.flowname,
                    {'acl-table-name': args.acl_table_name})
            if args.acl_rule_name:
                self.config_db.mod_entry(TAM_INT_IFA_FLOW_TS_TABLE_PREFIX,
                                         args.flowname,
                                         {'acl-rule-name': args.acl_rule_name})
        else:
            print "Entry Already Exists"
            return False
        return

    def clear_each_flow(self, flowname):
        entry = self.config_db.get_entry(TAM_INT_IFA_FLOW_TS_TABLE_PREFIX,
                                         flowname)
        if entry:
            self.config_db.set_entry(TAM_INT_IFA_FLOW_TS_TABLE_PREFIX,
                                     flowname, None)
        else:
            print "Entry Not Found"
            return False

        return

    def clear_flow(self, args):
        key = args.flowname
        if key == "all":
            # Get all the flow keys
            table_data = self.config_db.get_keys(
                TAM_INT_IFA_FLOW_TS_TABLE_PREFIX)
            if not table_data:
                return True
            # Clear each flow key
            for key in table_data:
                self.clear_each_flow(key)
        else:
            # Clear the specified flow entry
            self.clear_each_flow(key)

        return

    def show_flow(self, args):
        self.get_print_all_ifa_flows(args.flowname)
        return

    def show_status(self):
        # Get data for all keys
        flowtable_keys = self.config_db.get_keys(
            TAM_INT_IFA_FLOW_TS_TABLE_PREFIX)

        api_response = {}
        key = TAM_INT_IFA_TS_FEATURE_TABLE_PREFIX + '|' + 'feature'
        raw_data_feature = self.config_db.get_all(self.config_db.CONFIG_DB,
                                                  key)
        api_response['ietf-ts:feature-data'] = raw_data_feature
        api_inner_response = {}
        api_inner_response["num-of-flows"] = len(flowtable_keys)
        api_response['ietf-ts:num-of-flows'] = api_inner_response
        key = TAM_DEVICE_TABLE_PREFIX + '|' + 'device'
        raw_data_device = self.config_db.get_all(self.config_db.CONFIG_DB, key)
        api_response['ietf-ts:device-data'] = raw_data_device
        show_cli_output("show_status.j2", api_response)

        return

    def get_ifa_flow_stat(self, flowname):
        api_response_stat = {}
        api_response, entryfound = self.get_ifa_flow_info(flowname)
        api_response_stat['flow-name'] = flowname
        if entryfound is not None:
            for k in api_response:
                if k == "ietf-ts:each-flow-data":
                    acl_rule_name = api_response['ietf-ts:each-flow-data'][
                        'acl-rule-name']
                    acl_table_name = api_response['ietf-ts:each-flow-data'][
                        'acl-table-name']
                    api_response_stat['rule-name'] = acl_rule_name
                    api_response_stat['table-name'] = acl_table_name

        acl_rule_keys = self.config_db.get_keys(ACL_RULE_TABLE_PREFIX)
        for acl_rule_key in acl_rule_keys:
            if acl_rule_key[1] == acl_rule_name:
                acl_counter_key = 'COUNTERS:' + acl_rule_key[
                    0] + ':' + acl_rule_key[1]
                raw_ifa_stats = self.counters_db.get_all(
                    self.counters_db.COUNTERS_DB, acl_counter_key)
                api_response_stat['ietf-ts:ifa-stats'] = raw_ifa_stats

        return api_response_stat, entryfound

    def get_print_all_ifa_stats(self, name):
        stat_dict = {}
        stat_list = []
        if name != 'all':
            api_response, entryfound = self.get_ifa_flow_stat(name)
            if entryfound is not None:
                stat_list.append(api_response)
        else:
            table_data = self.config_db.get_keys(
                TAM_INT_IFA_FLOW_TS_TABLE_PREFIX)
            # Get data for all keys
            for k in table_data:
                api_each_stat_response, entryfound = self.get_ifa_flow_stat(k)
                if entryfound is not None:
                    stat_list.append(api_each_stat_response)

        stat_dict['stat-list'] = stat_list
        show_cli_output("show_statistics_flow.j2", stat_dict)
        return

    def show_statistics(self, args):
        self.get_print_all_ifa_stats(args.flowname)
        return

    def get_ifa_flow_info(self, k):
        flow_data = {}
        flow_data['acl-table-name'] = ''
        flow_data['sampling-rate'] = ''
        flow_data['collector'] = ''

        api_response = {}
        key = TAM_INT_IFA_FLOW_TS_TABLE_PREFIX + '|' + k
        raw_flow_data = self.config_db.get_all(self.config_db.CONFIG_DB, key)
        api_response['ietf-ts:flow-key'] = k
        api_response['ietf-ts:each-flow-data'] = raw_flow_data
        return api_response, raw_flow_data

    def get_print_all_ifa_flows(self, name):
        flow_dict = {}
        flow_list = []
        if name != 'all':
            api_response, entryfound = self.get_ifa_flow_info(name)
            if entryfound is not None:
                flow_list.append(api_response)
        else:
            table_data = self.config_db.get_keys(
                TAM_INT_IFA_FLOW_TS_TABLE_PREFIX)
            # Get data for all keys
            for k in table_data:
                api_each_flow_response, entryfound = self.get_ifa_flow_info(k)
                if entryfound is not None:
                    flow_list.append(api_each_flow_response)

        flow_dict['flow-list'] = flow_list
        show_cli_output("show_flow.j2", flow_dict)
        return

    def get_ifa_supported_info(self):
        key = 'TAM_INT_IFA_TS_FEATURE_TABLE|feature'
        data = self.config_db.get_all(self.config_db.CONFIG_DB, key)

        if data is None:
            return

        if data['enable'] == "true":
            print "TAM INT IFA TS Supported - True"
            return True
        elif data['enable'] == "false":
            print "TAM INT IFA TS Supported - False "
            return False

        return

    def get_ifa_enabled_info(self):
        print "In get_ifa_enabled_info"
        key = 'SWITCH_TABLE:switch'
        data = self.app_db.get(self.app_db.APPL_DB, key, 'ifa_enabled')

        if data and data == 'True':
            return True

        return True
Exemplo n.º 24
0
def add_udp(ctx, global_ip, global_port, local_ip, local_port, nat_type,
            twice_nat_id):
    """Add Static UDP Protocol NAPT-related configutation"""

    # Verify the ip address format
    if is_valid_ipv4_address(local_ip) is False:
        ctx.fail(
            "Given local ip address {} is invalid. Please enter a valid local ip address !!"
            .format(local_ip))

    if is_valid_ipv4_address(global_ip) is False:
        ctx.fail(
            "Given global ip address {} is invalid. Please enter a valid global ip address !!"
            .format(global_ip))

    config_db = ConfigDBConnector()
    config_db.connect()

    entryFound = False
    table = "STATIC_NAPT"
    key = "{}|UDP|{}".format(global_ip, global_port)
    dataKey1 = 'local_ip'
    dataKey2 = 'local_port'
    dataKey3 = 'nat_type'
    dataKey4 = 'twice_nat_id'

    data = config_db.get_entry(table, key)
    if data:
        if data[dataKey1] == local_ip and data[dataKey2] == str(local_port):
            click.echo(
                "Trying to add static napt entry, which is already present.")
            entryFound = True

    if nat_type == 'snat':
        ipAddress = local_ip
    else:
        ipAddress = global_ip

    if isIpOverlappingWithAnyStaticEntry(ipAddress, 'STATIC_NAT') is True:
        ctx.fail("Given entry is overlapping with existing NAT entry !!")

    if entryFound is False:
        counters_db = SonicV2Connector()
        counters_db.connect(counters_db.COUNTERS_DB)
        snat_entries = 0
        max_entries = 0
        exists = counters_db.exists(counters_db.COUNTERS_DB,
                                    'COUNTERS_GLOBAL_NAT:Values')
        if exists:
            counter_entry = counters_db.get_all(counters_db.COUNTERS_DB,
                                                'COUNTERS_GLOBAL_NAT:Values')
            if 'SNAT_ENTRIES' in counter_entry:
                snat_entries = counter_entry['SNAT_ENTRIES']
            if 'MAX_NAT_ENTRIES' in counter_entry:
                max_entries = counter_entry['MAX_NAT_ENTRIES']

        if int(snat_entries) >= int(max_entries):
            click.echo(
                "Max limit is reached for NAT entries, skipping adding the entry."
            )
            entryFound = True

    if entryFound is False:
        count = 0
        if twice_nat_id is not None:
            count = getTwiceNatIdCountWithStaticEntries(
                twice_nat_id, table, count)
            count = getTwiceNatIdCountWithDynamicBinding(
                twice_nat_id, count, None)
            if count > 1:
                ctx.fail(
                    "Same Twice nat id is not allowed for more than 2 entries!!"
                )

        if nat_type is not None and twice_nat_id is not None:
            config_db.set_entry(
                table, key, {
                    dataKey1: local_ip,
                    dataKey2: local_port,
                    dataKey3: nat_type,
                    dataKey4: twice_nat_id
                })
        elif nat_type is not None:
            config_db.set_entry(table, key, {
                dataKey1: local_ip,
                dataKey2: local_port,
                dataKey3: nat_type
            })
        elif twice_nat_id is not None:
            config_db.set_entry(table, key, {
                dataKey1: local_ip,
                dataKey2: local_port,
                dataKey4: twice_nat_id
            })
        else:
            config_db.set_entry(table, key, {
                dataKey1: local_ip,
                dataKey2: local_port
            })
Exemplo n.º 25
0

def copy_profile_list_with_profile_replaced(table, pl, port, profile_list):
    pl['profile_list'] = profile_list
    configdb.set_entry(table, port, pl)


def update_buffer_pool_size(poolname, default_config):
    pool = configdb.get_entry('BUFFER_POOL', poolname)
    pool['size'] = buffers['BUFFER_POOL'][poolname]['size']
    configdb.set_entry('BUFFER_POOL', poolname, pool)


# step 0: preparation: get all the necessary info
# fetch the meta data
metadata = configdb.get_entry('DEVICE_METADATA', 'localhost')
platform = metadata['platform']
hwsku = metadata['hwsku']

# fetch the default buffer configuration
_, skudir = device_info.get_paths_to_platform_and_hwsku_dirs()
buffer_file = '/tmp/buffers.json'
RENDER_BUFFER_CONF_CMD = '/usr/local/bin/sonic-cfggen -d -t {}/buffers.json.j2 > {}'.format(
    skudir, buffer_file)
p = subprocess.Popen(RENDER_BUFFER_CONF_CMD,
                     shell=True,
                     stdout=subprocess.PIPE)
out, err = p.communicate()
with open(buffer_file) as bf:
    buffers = json.load(bf)
Exemplo n.º 26
0
class DBMigrator():
    def __init__(self, namespace, socket=None):
        """
        Version string format:
           version_<major>_<minor>_<build>
              major: starting from 1, sequentially incrementing in master
                     branch.
              minor: in github branches, minor version stays in 0. This minor
                     version creates space for private branches derived from
                     github public branches. These private branches shall use
                     none-zero values.
              build: sequentially increase within a minor version domain.
        """
        self.CURRENT_VERSION = 'version_1_0_3'

        self.TABLE_NAME      = 'VERSIONS'
        self.TABLE_KEY       = 'DATABASE'
        self.TABLE_FIELD     = 'VERSION'

        db_kwargs = {}
        if socket:
            db_kwargs['unix_socket_path'] = socket

        if namespace is None:
            self.configDB = ConfigDBConnector(**db_kwargs)
        else:
            self.configDB = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace, **db_kwargs)
        self.configDB.db_connect('CONFIG_DB')

        self.appDB = SonicV2Connector(host='127.0.0.1')
        if self.appDB is not None:
            self.appDB.connect(self.appDB.APPL_DB)

    def migrate_pfc_wd_table(self):
        '''
        Migrate all data entries from table PFC_WD_TABLE to PFC_WD
        '''
        data = self.configDB.get_table('PFC_WD_TABLE')
        for key in data.keys():
            self.configDB.set_entry('PFC_WD', key, data[key])
        self.configDB.delete_table('PFC_WD_TABLE')

    def is_ip_prefix_in_key(self, key):
        '''
        Function to check if IP address is present in the key. If it
        is present, then the key would be a tuple or else, it shall be
        be string
        '''
        return (isinstance(key, tuple))

    def migrate_interface_table(self):
        '''
        Migrate all data from existing INTERFACE table with IP Prefix
        to have an additional ONE entry without IP Prefix. For. e.g, for an entry
        "Vlan1000|192.168.0.1/21": {}", this function shall add an entry without
        IP prefix as ""Vlan1000": {}". This is for VRF compatibility.
        '''
        if_db = []
        if_tables = {
                     'INTERFACE',
                     'PORTCHANNEL_INTERFACE',
                     'VLAN_INTERFACE',
                     'LOOPBACK_INTERFACE'
                    }
        for table in if_tables:
            data = self.configDB.get_table(table)
            for key in data.keys():
                if not self.is_ip_prefix_in_key(key):
                    if_db.append(key)
                    continue

        for table in if_tables:
            data = self.configDB.get_table(table)
            for key in data.keys():
                if not self.is_ip_prefix_in_key(key) or key[0] in if_db:
                    continue
                log_info('Migrating interface table for ' + key[0])
                self.configDB.set_entry(table, key[0], data[key])
                if_db.append(key[0])

    def migrate_intf_table(self):
        '''
        Migrate all data from existing INTF table in APP DB during warmboot with IP Prefix
        to have an additional ONE entry without IP Prefix. For. e.g, for an entry
        "Vlan1000:192.168.0.1/21": {}", this function shall add an entry without
        IP prefix as ""Vlan1000": {}". This also migrates 'lo' to 'Loopback0' interface
        '''

        if self.appDB is None:
            return

        data = self.appDB.keys(self.appDB.APPL_DB, "INTF_TABLE:*")

        if data is None:
            return

        if_db = []
        for key in data:
            if_name = key.split(":")[1]
            if if_name == "lo":
                self.appDB.delete(self.appDB.APPL_DB, key)
                key = key.replace(if_name, "Loopback0")
                log_info('Migrating lo entry to ' + key)
                self.appDB.set(self.appDB.APPL_DB, key, 'NULL', 'NULL')

            if '/' not in key:
                if_db.append(key.split(":")[1])
                continue

        data = self.appDB.keys(self.appDB.APPL_DB, "INTF_TABLE:*")
        for key in data:
            if_name = key.split(":")[1]
            if if_name in if_db:
                continue
            log_info('Migrating intf table for ' + if_name)
            table = "INTF_TABLE:" + if_name
            self.appDB.set(self.appDB.APPL_DB, table, 'NULL', 'NULL')
            if_db.append(if_name)

    def mlnx_migrate_buffer_pool_size(self):
        """
        On Mellanox platform the buffer pool size changed since 
        version with new SDK 4.3.3052, SONiC to SONiC update 
        from version with old SDK will be broken without migration.
        This migration is specifically for Mellanox platform. 
        """
        # Buffer pools defined in version 1_0_2
        buffer_pools = ['ingress_lossless_pool', 'egress_lossless_pool', 'ingress_lossy_pool', 'egress_lossy_pool']

        # Old default buffer pool values on Mellanox platform 
        spc1_t0_default_value = [{'ingress_lossless_pool': '4194304'}, {'egress_lossless_pool': '16777152'}, {'ingress_lossy_pool': '7340032'}, {'egress_lossy_pool': '7340032'}]
        spc1_t1_default_value = [{'ingress_lossless_pool': '2097152'}, {'egress_lossless_pool': '16777152'}, {'ingress_lossy_pool': '5242880'}, {'egress_lossy_pool': '5242880'}]
        spc2_t0_default_value = [{'ingress_lossless_pool': '8224768'}, {'egress_lossless_pool': '35966016'}, {'ingress_lossy_pool': '8224768'}, {'egress_lossy_pool': '8224768'}]
        spc2_t1_default_value = [{'ingress_lossless_pool': '12042240'}, {'egress_lossless_pool': '35966016'}, {'ingress_lossy_pool': '12042240'}, {'egress_lossy_pool': '12042240'}]

        # New default buffer pool configuration on Mellanox platform
        spc1_t0_default_config = {"ingress_lossless_pool": { "size": "5029836", "type": "ingress", "mode": "dynamic" },
                                  "ingress_lossy_pool": { "size": "5029836", "type": "ingress", "mode": "dynamic" },
                                  "egress_lossless_pool": { "size": "14024599", "type": "egress", "mode": "dynamic" },
                                  "egress_lossy_pool": {"size": "5029836", "type": "egress", "mode": "dynamic" } }
        spc1_t1_default_config = {"ingress_lossless_pool": { "size": "2097100", "type": "ingress", "mode": "dynamic" },
                                  "ingress_lossy_pool": { "size": "2097100", "type": "ingress", "mode": "dynamic" },
                                  "egress_lossless_pool": { "size": "14024599", "type": "egress", "mode": "dynamic" },
                                  "egress_lossy_pool": {"size": "2097100", "type": "egress", "mode": "dynamic" } }
        spc2_t0_default_config = {"ingress_lossless_pool": { "size": "14983147", "type": "ingress", "mode": "dynamic" },
                                  "ingress_lossy_pool": { "size": "14983147", "type": "ingress", "mode": "dynamic" },
                                  "egress_lossless_pool": { "size": "34340822", "type": "egress", "mode": "dynamic" },
                                  "egress_lossy_pool": {"size": "14983147", "type": "egress", "mode": "dynamic" } }
        spc2_t1_default_config = {"ingress_lossless_pool": { "size": "9158635", "type": "ingress", "mode": "dynamic" },
                                  "ingress_lossy_pool": { "size": "9158635", "type": "ingress", "mode": "dynamic" },
                                  "egress_lossless_pool": { "size": "34340822", "type": "egress", "mode": "dynamic" },
                                  "egress_lossy_pool": {"size": "9158635", "type": "egress", "mode": "dynamic" } }
        # 3800 platform has gearbox installed so the buffer pool size is different with other Spectrum2 platform
        spc2_3800_t0_default_config = {"ingress_lossless_pool": { "size": "28196784", "type": "ingress", "mode": "dynamic" },
                                  "ingress_lossy_pool": { "size": "28196784", "type": "ingress", "mode": "dynamic" },
                                  "egress_lossless_pool": { "size": "34340832", "type": "egress", "mode": "dynamic" },
                                  "egress_lossy_pool": {"size": "28196784", "type": "egress", "mode": "dynamic" } }
        spc2_3800_t1_default_config = {"ingress_lossless_pool": { "size": "17891280", "type": "ingress", "mode": "dynamic" },
                                  "ingress_lossy_pool": { "size": "17891280", "type": "ingress", "mode": "dynamic" },
                                  "egress_lossless_pool": { "size": "34340832", "type": "egress", "mode": "dynamic" },
                                  "egress_lossy_pool": {"size": "17891280", "type": "egress", "mode": "dynamic" } }
 
        # Try to get related info from DB
        buffer_pool_conf = {}
        device_data = self.configDB.get_table('DEVICE_METADATA')
        if 'localhost' in device_data.keys():
            hwsku = device_data['localhost']['hwsku']
            platform = device_data['localhost']['platform']
        else:
            log_error("Trying to get DEVICE_METADATA from DB but doesn't exist, skip migration")
            return False
        buffer_pool_conf = self.configDB.get_table('BUFFER_POOL')

        # Get current buffer pool configuration, only migrate configuration which 
        # with default values, if it's not default, leave it as is.
        pool_size_in_db_list = []
        pools_in_db = buffer_pool_conf.keys()

        # Buffer pool numbers is different with default, don't need migrate
        if len(pools_in_db) != len(buffer_pools):
            return True

        # If some buffer pool is not default ones, don't need migrate
        for buffer_pool in buffer_pools:
            if buffer_pool not in pools_in_db:
                return True
            pool_size_in_db_list.append({buffer_pool: buffer_pool_conf[buffer_pool]['size']})
        
        # To check if the buffer pool size is equal to old default values
        new_buffer_pool_conf = None
        if pool_size_in_db_list == spc1_t0_default_value:
            new_buffer_pool_conf = spc1_t0_default_config
        elif pool_size_in_db_list == spc1_t1_default_value:
            new_buffer_pool_conf = spc1_t1_default_config
        elif pool_size_in_db_list == spc2_t0_default_value:
            if platform == 'x86_64-mlnx_msn3800-r0':
                new_buffer_pool_conf = spc2_3800_t0_default_config
            else:
                new_buffer_pool_conf = spc2_t0_default_config
        elif pool_size_in_db_list == spc2_t1_default_value:
            if platform == 'x86_64-mlnx_msn3800-r0':
                new_buffer_pool_conf = spc2_3800_t1_default_config
            else:
                new_buffer_pool_conf = spc2_t1_default_config
        else:
            # It's not using default buffer pool configuration, no migration needed.
            log_info("buffer pool size is not old default value, no need to migrate")
            return True
        # Migrate old buffer conf to latest.
        for pool in buffer_pools:
            self.configDB.set_entry('BUFFER_POOL', pool, new_buffer_pool_conf[pool])
        log_info("Successfully migrate mlnx buffer pool size to the latest.")
        return True

    def version_unknown(self):
        """
        version_unknown tracks all SONiC versions that doesn't have a version
        string defined in config_DB.
        Nothing can be assumped when migrating from this version to the next
        version.
        Any migration operation needs to test if the DB is in expected format
        before migrating date to the next version.
        """

        log_info('Handling version_unknown')

        # NOTE: Uncomment next 3 lines of code when the migration code is in
        #       place. Note that returning specific string is intentional,
        #       here we only intended to migrade to DB version 1.0.1.
        #       If new DB version is added in the future, the incremental
        #       upgrade will take care of the subsequent migrations.
        self.migrate_pfc_wd_table()
        self.migrate_interface_table()
        self.migrate_intf_table()
        self.set_version('version_1_0_2')
        return 'version_1_0_2'

    def version_1_0_1(self):
        """
        Version 1_0_1.
        """
        log_info('Handling version_1_0_1')

        self.migrate_interface_table()
        self.migrate_intf_table()
        self.set_version('version_1_0_2')
        return 'version_1_0_2'

    def version_1_0_2(self):
        """
        Version 1_0_2.
        """
        log_info('Handling version_1_0_2')
        # Check ASIC type, if Mellanox platform then need DB migration
        version_info = sonic_device_util.get_sonic_version_info()
        if version_info['asic_type'] == "mellanox":
            if self.mlnx_migrate_buffer_pool_size():
                self.set_version('version_1_0_3')
        else:
            self.set_version('version_1_0_3')
        return None

    def version_1_0_3(self):
        """
        Current latest version. Nothing to do here.
        """
        log_info('Handling version_1_0_3')

        return None

    def get_version(self):
        version = self.configDB.get_entry(self.TABLE_NAME, self.TABLE_KEY)
        if version and version[self.TABLE_FIELD]:
            return version[self.TABLE_FIELD]

        return 'version_unknown'


    def set_version(self, version=None):
        if not version:
            version = self.CURRENT_VERSION
        log_info('Setting version to ' + version)
        entry = { self.TABLE_FIELD : version }
        self.configDB.set_entry(self.TABLE_NAME, self.TABLE_KEY, entry)


    def migrate(self):
        version = self.get_version()
        log_info('Upgrading from version ' + version)
        while version:
            next_version = getattr(self, version)()
            if next_version == version:
                raise Exception('Version migrate from %s stuck in same version' % version)
            version = next_version
Exemplo n.º 27
0
class DBMigrator():
    def __init__(self):
        """
        Version string format:
           version_<major>_<minor>_<build>
              major: starting from 1, sequentially incrementing in master
                     branch.
              minor: in github branches, minor version stays in 0. This minor
                     version creates space for private branches derived from
                     github public branches. These private branches shall use
                     none-zero values.
              build: sequentially increase within a minor version domain.
        """
        self.CURRENT_VERSION = 'version_1_0_1'

        self.TABLE_NAME      = 'VERSIONS'
        self.TABLE_KEY       = 'DATABASE'
        self.TABLE_FIELD     = 'VERSION'
        self.configDB        = ConfigDBConnector()
        self.configDB.db_connect('CONFIG_DB')


    def migrate_pfc_wd_table(self):
        # Migrate all data entries from table PFC_WD_TABLE to PFC_WD
        data = self.configDB.get_table('PFC_WD_TABLE')
        for key in data.keys():
            self.configDB.set_entry('PFC_WD', key, data[key])
        self.configDB.delete_table('PFC_WD_TABLE')


    def version_unknown(self):
        """
        version_unknown tracks all SONiC versions that doesn't have a version
        string defined in config_DB.
        Nothing can be assumped when migrating from this version to the next
        version.
        Any migration operation needs to test if the DB is in expected format
        before migrating date to the next version.
        """

        log_info('Handling version_unknown')

        # NOTE: Uncomment next 3 lines of code when the migration code is in
        #       place. Note that returning specific string is intentional,
        #       here we only intended to migrade to DB version 1.0.1.
        #       If new DB version is added in the future, the incremental
        #       upgrade will take care of the subsequent migrations.
        # self.migrate_pfc_wd_table()
        # self.set_version('version_1_0_1')
        # return 'version_1_0_1'


    def version_1_0_1(self):
        """
        Current latest version. Nothing to do here.
        """
        log_info('Handling version_1_0_1')

        return None


    def get_version(self):
        version = self.configDB.get_entry(self.TABLE_NAME, self.TABLE_KEY)
        if version and version[self.TABLE_FIELD]:
            return version[self.TABLE_FIELD]

        return 'version_unknown'


    def set_version(self, version=None):
        if not version:
            version = self.CURRENT_VERSION
        log_info('Setting version to ' + version)
        entry = { self.TABLE_FIELD : version }
        self.configDB.set_entry(self.TABLE_NAME, self.TABLE_KEY, entry)


    def migrate(self):
        version = self.get_version()
        log_info('Upgrading from version ' + version)
        while version:
            next_version = getattr(self, version)()
            if next_version == version:
                raise Exception('Version migrate from %s stuck in same version' % version)
            version = next_version
Exemplo n.º 28
0
class DBMigrator():
    def __init__(self, socket=None):
        """
        Version string format:
           version_<major>_<minor>_<build>
              major: starting from 1, sequentially incrementing in master
                     branch.
              minor: in github branches, minor version stays in 0. This minor
                     version creates space for private branches derived from
                     github public branches. These private branches shall use
                     none-zero values.
              build: sequentially increase within a minor version domain.
        """
        self.CURRENT_VERSION = 'version_1_0_1'

        self.TABLE_NAME = 'VERSIONS'
        self.TABLE_KEY = 'DATABASE'
        self.TABLE_FIELD = 'VERSION'

        db_kwargs = {}
        if socket:
            db_kwargs['unix_socket_path'] = socket

        self.configDB = ConfigDBConnector(**db_kwargs)
        self.configDB.db_connect('CONFIG_DB')

    def migrate_pfc_wd_table(self):
        '''
        Migrate all data entries from table PFC_WD_TABLE to PFC_WD
        '''
        data = self.configDB.get_table('PFC_WD_TABLE')
        for key in data.keys():
            self.configDB.set_entry('PFC_WD', key, data[key])
        self.configDB.delete_table('PFC_WD_TABLE')

    def is_ip_prefix_in_key(self, key):
        '''
        Function to check if IP address is present in the key. If it
        is present, then the key would be a tuple or else, it shall be
        be string
        '''
        return (isinstance(key, tuple))

    def migrate_interface_table(self):
        '''
        Migrate all data from existing INTERFACE table with IP Prefix
        to have an additional ONE entry without IP Prefix. For. e.g, for an entry
        "Vlan1000|192.168.0.1/21": {}", this function shall add an entry without
        IP prefix as ""Vlan1000": {}". This is for VRF compatibility.
        '''
        if_db = []
        if_tables = {'INTERFACE', 'PORTCHANNEL_INTERFACE', 'VLAN_INTERFACE'}
        for table in if_tables:
            data = self.configDB.get_table(table)
            for key in data.keys():
                if not self.is_ip_prefix_in_key(key):
                    if_db.append(key)
                    continue

        for table in if_tables:
            data = self.configDB.get_table(table)
            for key in data.keys():
                if not self.is_ip_prefix_in_key(key) or key[0] in if_db:
                    continue
                log_info('Migrating interface table for ' + key[0])
                self.configDB.set_entry(table, key[0], data[key])
                if_db.append(key[0])

    def version_unknown(self):
        """
        version_unknown tracks all SONiC versions that doesn't have a version
        string defined in config_DB.
        Nothing can be assumped when migrating from this version to the next
        version.
        Any migration operation needs to test if the DB is in expected format
        before migrating date to the next version.
        """

        log_info('Handling version_unknown')

        # NOTE: Uncomment next 3 lines of code when the migration code is in
        #       place. Note that returning specific string is intentional,
        #       here we only intended to migrade to DB version 1.0.1.
        #       If new DB version is added in the future, the incremental
        #       upgrade will take care of the subsequent migrations.
        self.migrate_pfc_wd_table()
        self.migrate_interface_table()
        self.set_version('version_1_0_1')
        return 'version_1_0_1'

    def version_1_0_1(self):
        """
        Current latest version. Nothing to do here.
        """
        log_info('Handling version_1_0_1')

        return None

    def get_version(self):
        version = self.configDB.get_entry(self.TABLE_NAME, self.TABLE_KEY)
        if version and version[self.TABLE_FIELD]:
            return version[self.TABLE_FIELD]

        return 'version_unknown'

    def set_version(self, version=None):
        if not version:
            version = self.CURRENT_VERSION
        log_info('Setting version to ' + version)
        entry = {self.TABLE_FIELD: version}
        self.configDB.set_entry(self.TABLE_NAME, self.TABLE_KEY, entry)

    def migrate(self):
        version = self.get_version()
        log_info('Upgrading from version ' + version)
        while version:
            next_version = getattr(self, version)()
            if next_version == version:
                raise Exception(
                    'Version migrate from %s stuck in same version' % version)
            version = next_version
Exemplo n.º 29
0
class DBMigrator():
    def __init__(self, namespace, socket=None):
        """
        Version string format:
           version_<major>_<minor>_<build>
              major: starting from 1, sequentially incrementing in master
                     branch.
              minor: in github branches, minor version stays in 0. This minor
                     version creates space for private branches derived from
                     github public branches. These private branches shall use
                     none-zero values.
              build: sequentially increase within a minor version domain.
        """
        self.CURRENT_VERSION = 'version_1_0_4'

        self.TABLE_NAME      = 'VERSIONS'
        self.TABLE_KEY       = 'DATABASE'
        self.TABLE_FIELD     = 'VERSION'

        db_kwargs = {}
        if socket:
            db_kwargs['unix_socket_path'] = socket

        if namespace is None:
            self.configDB = ConfigDBConnector(**db_kwargs)
        else:
            self.configDB = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace, **db_kwargs)
        self.configDB.db_connect('CONFIG_DB')

        self.appDB = SonicV2Connector(host='127.0.0.1')
        if self.appDB is not None:
            self.appDB.connect(self.appDB.APPL_DB)

        version_info = device_info.get_sonic_version_info()
        asic_type = version_info.get('asic_type')
        self.asic_type = asic_type

        if asic_type == "mellanox":
            from mellanox_buffer_migrator import MellanoxBufferMigrator
            self.mellanox_buffer_migrator = MellanoxBufferMigrator(self.configDB)

    def migrate_pfc_wd_table(self):
        '''
        Migrate all data entries from table PFC_WD_TABLE to PFC_WD
        '''
        data = self.configDB.get_table('PFC_WD_TABLE')
        for key in data.keys():
            self.configDB.set_entry('PFC_WD', key, data[key])
        self.configDB.delete_table('PFC_WD_TABLE')

    def is_ip_prefix_in_key(self, key):
        '''
        Function to check if IP address is present in the key. If it
        is present, then the key would be a tuple or else, it shall be
        be string
        '''
        return (isinstance(key, tuple))

    def migrate_interface_table(self):
        '''
        Migrate all data from existing INTERFACE table with IP Prefix
        to have an additional ONE entry without IP Prefix. For. e.g, for an entry
        "Vlan1000|192.168.0.1/21": {}", this function shall add an entry without
        IP prefix as ""Vlan1000": {}". This is for VRF compatibility.
        '''
        if_db = []
        if_tables = {
                     'INTERFACE',
                     'PORTCHANNEL_INTERFACE',
                     'VLAN_INTERFACE',
                     'LOOPBACK_INTERFACE'
                    }
        for table in if_tables:
            data = self.configDB.get_table(table)
            for key in data.keys():
                if not self.is_ip_prefix_in_key(key):
                    if_db.append(key)
                    continue

        for table in if_tables:
            data = self.configDB.get_table(table)
            for key in data.keys():
                if not self.is_ip_prefix_in_key(key) or key[0] in if_db:
                    continue
                log.log_info('Migrating interface table for ' + key[0])
                self.configDB.set_entry(table, key[0], data[key])
                if_db.append(key[0])

    def migrate_intf_table(self):
        '''
        Migrate all data from existing INTF table in APP DB during warmboot with IP Prefix
        to have an additional ONE entry without IP Prefix. For. e.g, for an entry
        "Vlan1000:192.168.0.1/21": {}", this function shall add an entry without
        IP prefix as ""Vlan1000": {}". This also migrates 'lo' to 'Loopback0' interface
        '''

        if self.appDB is None:
            return

        data = self.appDB.keys(self.appDB.APPL_DB, "INTF_TABLE:*")

        if data is None:
            return

        if_db = []
        for key in data:
            if_name = key.split(":")[1]
            if if_name == "lo":
                self.appDB.delete(self.appDB.APPL_DB, key)
                key = key.replace(if_name, "Loopback0")
                log.log_info('Migrating lo entry to ' + key)
                self.appDB.set(self.appDB.APPL_DB, key, 'NULL', 'NULL')

            if '/' not in key:
                if_db.append(key.split(":")[1])
                continue

        data = self.appDB.keys(self.appDB.APPL_DB, "INTF_TABLE:*")
        for key in data:
            if_name = key.split(":")[1]
            if if_name in if_db:
                continue
            log.log_info('Migrating intf table for ' + if_name)
            table = "INTF_TABLE:" + if_name
            self.appDB.set(self.appDB.APPL_DB, table, 'NULL', 'NULL')
            if_db.append(if_name)

    def version_unknown(self):
        """
        version_unknown tracks all SONiC versions that doesn't have a version
        string defined in config_DB.
        Nothing can be assumped when migrating from this version to the next
        version.
        Any migration operation needs to test if the DB is in expected format
        before migrating date to the next version.
        """

        log.log_info('Handling version_unknown')

        # NOTE: Uncomment next 3 lines of code when the migration code is in
        #       place. Note that returning specific string is intentional,
        #       here we only intended to migrade to DB version 1.0.1.
        #       If new DB version is added in the future, the incremental
        #       upgrade will take care of the subsequent migrations.
        self.migrate_pfc_wd_table()
        self.migrate_interface_table()
        self.migrate_intf_table()
        self.set_version('version_1_0_2')
        return 'version_1_0_2'

    def version_1_0_1(self):
        """
        Version 1_0_1.
        """
        log.log_info('Handling version_1_0_1')

        self.migrate_interface_table()
        self.migrate_intf_table()
        self.set_version('version_1_0_2')
        return 'version_1_0_2'

    def version_1_0_2(self):
        """
        Version 1_0_2.
        """
        log.log_info('Handling version_1_0_2')
        # Check ASIC type, if Mellanox platform then need DB migration
        if self.asic_type == "mellanox":
            if self.mellanox_buffer_migrator.mlnx_migrate_buffer_pool_size('version_1_0_2', 'version_1_0_3'):
                self.set_version('version_1_0_3')
        else:
            self.set_version('version_1_0_3')
        return 'version_1_0_3'

    def version_1_0_3(self):
        """
        Version 1_0_3.
        """
        log.log_info('Handling version_1_0_3')

        # Check ASIC type, if Mellanox platform then need DB migration
        if self.asic_type == "mellanox":
            if self.mellanox_buffer_migrator.mlnx_migrate_buffer_pool_size('version_1_0_3', 'version_1_0_4') and self.mellanox_buffer_migrator.mlnx_migrate_buffer_profile('version_1_0_3', 'version_1_0_4'):
                self.set_version('version_1_0_4')
        else:
            self.set_version('version_1_0_4')

        return 'version_1_0_4'

    def version_1_0_4(self):
        """
        Current latest version. Nothing to do here.
        """
        log.log_info('Handling version_1_0_4')

        return None

    def get_version(self):
        version = self.configDB.get_entry(self.TABLE_NAME, self.TABLE_KEY)
        if version and version[self.TABLE_FIELD]:
            return version[self.TABLE_FIELD]

        return 'version_unknown'


    def set_version(self, version=None):
        if not version:
            version = self.CURRENT_VERSION
        log.log_info('Setting version to ' + version)
        entry = { self.TABLE_FIELD : version }
        self.configDB.set_entry(self.TABLE_NAME, self.TABLE_KEY, entry)


    def common_migration_ops(self):
        try:
            with open(INIT_CFG_FILE) as f:
                init_db = json.load(f)
        except Exception as e:
            raise Exception(str(e))

        for init_cfg_table, table_val in init_db.items():
            data = self.configDB.get_table(init_cfg_table)
            if data:
                # Ignore overriding the values that pre-exist in configDB
                continue
            log.log_info("Migrating table {} from INIT_CFG to config_db".format(init_cfg_table))
            # Update all tables that do not exist in configDB but are present in INIT_CFG
            for init_table_key, init_table_val in table_val.items():
                self.configDB.set_entry(init_cfg_table, init_table_key, init_table_val)

    def migrate(self):
        version = self.get_version()
        log.log_info('Upgrading from version ' + version)
        while version:
            next_version = getattr(self, version)()
            if next_version == version:
                raise Exception('Version migrate from %s stuck in same version' % version)
            version = next_version
        # Perform common migration ops
        self.common_migration_ops()
Exemplo n.º 30
0
        click.echo('Downloading image...')
        try:
            urllib.urlretrieve(url, DEFAULT_IMAGE_PATH, reporthook)
        except Exception, e:
            click.echo("Download error", e)
            return
        image_path = DEFAULT_IMAGE_PATH
    else:
        image_path = os.path.join("./", url)

    # TODO: Validate the new docker image before disrupting existsing images.

    warm = False
    config_db = ConfigDBConnector()
    config_db.connect()
    entry = config_db.get_entry('WARM_RESTART', container_name)
    if entry and entry['enable'].lower() == 'true':
        warm = True

    # warm restart specific procssing for swss, bgp and teamd dockers.
    if warm == True:
        # make sure orchagent is in clean state if swss is to be upgraded
        if container_name == "swss":
            skipPendingTaskCheck = " -s"
            if enforce_check:
                skipPendingTaskCheck = ""

            cmd = "docker exec -i swss orchagent_restart_check -w 1000 " + skipPendingTaskCheck
            for i in range(1, 6):
                proc = subprocess.Popen(cmd,
                                        stdout=subprocess.PIPE,
Exemplo n.º 31
0
def add_binding(ctx, binding_name, pool_name, acl_name, nat_type,
                twice_nat_id):
    """Add Binding for Dynamic NAT-related configutation"""

    entryFound = False
    table = 'NAT_BINDINGS'
    key = binding_name
    dataKey1 = 'access_list'
    dataKey2 = 'nat_pool'
    dataKey3 = 'nat_type'
    dataKey4 = 'twice_nat_id'

    if acl_name is None:
        acl_name = ""

    if len(binding_name) > 32:
        ctx.fail(
            "Invalid binding name. Maximum allowed binding name is 32 characters !!"
        )

    config_db = ConfigDBConnector()
    config_db.connect()

    data = config_db.get_entry(table, key)
    if data:
        if data[dataKey1] == acl_name and data[dataKey2] == pool_name:
            click.echo("Trying to add binding, which is already present.")
            entryFound = True

    binding_dict = config_db.get_table(table)
    if len(binding_dict) == 16:
        click.echo(
            "Failed to add binding, as already reached maximum binding limit 16."
        )
        entryFound = True

    if nat_type is not None:
        if nat_type == "dnat":
            click.echo("Ignored, DNAT is not yet suported for Binding ")
            entryFound = True
    else:
        nat_type = "snat"

    if twice_nat_id is None:
        twice_nat_id = "NULL"

    if entryFound is False:
        count = 0
        if twice_nat_id is not None:
            count = getTwiceNatIdCountWithStaticEntries(
                twice_nat_id, 'STATIC_NAT', count)
            count = getTwiceNatIdCountWithStaticEntries(
                twice_nat_id, 'STATIC_NAPT', count)
            count = getTwiceNatIdCountWithDynamicBinding(
                twice_nat_id, count, key)
            if count > 1:
                ctx.fail(
                    "Same Twice nat id is not allowed for more than 2 entries!!"
                )

        config_db.set_entry(
            table, key, {
                dataKey1: acl_name,
                dataKey2: pool_name,
                dataKey3: nat_type,
                dataKey4: twice_nat_id
            })
class DropMon(object):
    def __init__(self):
        # connect CONFIG DB
        self.config_db = ConfigDBConnector()
        self.config_db.connect()

        # connect COUNTERS_DB
        self.counters_db = ConfigDBConnector()
        self.counters_db.db_connect('COUNTERS_DB')

        # connect APPL DB
        self.app_db = ConfigDBConnector()
        self.app_db.db_connect('APPL_DB')

    def config_drop_mon(self, args):
        self.config_db.mod_entry(
            TAM_DROP_MONITOR_FLOW_TABLE, args.flowname, {
                'acl-table': args.acl_table,
                'acl-rule': args.acl_rule,
                'collector': args.dropcollector,
                'sample': args.dropsample
            })
        return

    def config_drop_mon_aging(self, args):
        self.config_db.mod_entry(TAM_DROP_MONITOR_AGING_INTERVAL_TABLE,
                                 "aging",
                                 {'aging-interval': args.aginginterval})
        return

    def config_drop_mon_sample(self, args):
        self.config_db.mod_entry(SAMPLE_RATE_TABLE, args.samplename,
                                 {'sampling-rate': args.rate})
        return

    def clear_single_drop_mon_flow(self, key):
        entry = self.config_db.get_entry(TAM_DROP_MONITOR_FLOW_TABLE, key)
        if entry:
            self.config_db.set_entry(TAM_DROP_MONITOR_FLOW_TABLE, key, None)
        else:
            return False
        return

    def clear_drop_mon_flow(self, args):
        key = args.flowname
        if key == "all":
            # Get all the flow keys
            table_data = self.config_db.get_keys(TAM_DROP_MONITOR_FLOW_TABLE)
            if not table_data:
                return True
            # Clear each flow key
            for key in table_data:
                self.clear_single_drop_mon_flow(key)
        else:
            # Clear the specified flow entry
            self.clear_single_drop_mon_flow(key)

        return

    def clear_drop_mon_sample(self, args):
        key = args.samplename
        entry = self.config_db.get_entry(SAMPLE_RATE_TABLE, key)
        if entry:
            self.config_db.set_entry(SAMPLE_RATE_TABLE, key, None)
        else:
            print "Entry Not Found"
            return False
        return

    def clear_drop_mon_aging_int(self, args):
        key = "aging"
        entry = self.config_db.get_entry(TAM_DROP_MONITOR_AGING_INTERVAL_TABLE,
                                         key)
        if entry:
            self.config_db.set_entry(TAM_DROP_MONITOR_AGING_INTERVAL_TABLE,
                                     key, None)
        else:
            return False
        return

    def show_flow(self, args):
        self.get_print_all_dropmon_flows(args.flowname)
        return

    def get_dropmon_flow_stat(self, flowname):
        api_response_stat = {}
        api_response, entryfound = self.get_dropmon_flow_info(flowname)
        api_response_stat['flow-name'] = flowname
        if entryfound is not None:
            for k in api_response:
                if k == "ietf-ts:each-flow-data":
                    acl_rule = api_response['ietf-ts:each-flow-data'][
                        'acl-rule']
                    acl_table = api_response['ietf-ts:each-flow-data'][
                        'acl-table']
                    api_response_stat['rule-name'] = acl_rule
                    api_response_stat['table-name'] = acl_table

        acl_rule_keys = self.config_db.get_keys(ACL_RULE_TABLE_PREFIX)
        for acl_rule_key in acl_rule_keys:
            if acl_rule_key[1] == acl_rule:
                acl_counter_key = 'COUNTERS:' + acl_rule_key[
                    0] + ':' + acl_rule_key[1]
                raw_dropmon_stats = self.counters_db.get_all(
                    self.counters_db.COUNTERS_DB, acl_counter_key)
                api_response_stat['ietf-ts:dropmon-stats'] = raw_ifa_stats

        return api_response_stat, entryfound

    def get_print_all_dropmon_stats(self, name):
        stat_dict = {}
        stat_list = []
        if name != 'all':
            api_response, entryfound = self.get_dropmon_flow_stat(name)
            if entryfound is not None:
                stat_list.append(api_response)
        else:
            table_data = self.config_db.get_keys(TAM_DROP_MONITOR_FLOW_TABLE)
            # Get data for all keys
            for k in table_data:
                api_each_stat_response, entryfound = self.get_dropmon_flow_stat(
                    k)
                if entryfound is not None:
                    stat_list.append(api_each_stat_response)

        stat_dict['stat-list'] = stat_list
        show_cli_output("show_statistics_flow.j2", stat_dict)
        return

    def show_statistics(self, args):
        self.get_print_all_dropmon_stats(args.flowname)
        return

    def show_aging_interval(self, args):
        key = "aging"
        entry = self.config_db.get_entry(TAM_DROP_MONITOR_AGING_INTERVAL_TABLE,
                                         key)
        if entry:
            print "Aging interval : {}".format(entry['aging-interval'])
        return

    def show_sample(self, args):
        self.get_print_all_sample(args.samplename)
        return

    def get_dropmon_flow_info(self, k):
        flow_data = {}
        flow_data['acl-table-name'] = ''
        flow_data['sampling-rate'] = ''
        flow_data['collector'] = ''

        api_response = {}
        key = TAM_DROP_MONITOR_FLOW_TABLE + '|' + k
        raw_flow_data = self.config_db.get_all(self.config_db.CONFIG_DB, key)
        if raw_flow_data:
            sample = raw_flow_data['sample']
            rate = self.config_db.get_entry(SAMPLE_RATE_TABLE, sample)
            raw_flow_data['sample'] = rate['sampling-rate']
        api_response['ietf-ts:flow-key'] = k
        api_response['ietf-ts:each-flow-data'] = raw_flow_data
        return api_response, raw_flow_data

    def get_print_all_dropmon_flows(self, name):
        flow_dict = {}
        flow_list = []
        if name != 'all':
            api_response, entryfound = self.get_dropmon_flow_info(name)
            if entryfound is not None:
                flow_list.append(api_response)
        else:
            table_data = self.config_db.get_keys(TAM_DROP_MONITOR_FLOW_TABLE)
            # Get data for all keys
            for k in table_data:
                api_each_flow_response, entryfound = self.get_dropmon_flow_info(
                    k)
                if entryfound is not None:
                    flow_list.append(api_each_flow_response)

        flow_dict['flow-list'] = flow_list
        show_cli_output("show_drop_monitor_flow.j2", flow_dict)
        return

    def get_sample_info(self, k):
        sample_data = {}
        sample_data['sampling-rate'] = ''

        api_response = {}
        key = SAMPLE_RATE_TABLE + '|' + k
        raw_sample_data = self.config_db.get_all(self.config_db.CONFIG_DB, key)
        api_response['ietf-ts:sample-key'] = k
        api_response['ietf-ts:each-sample-data'] = raw_sample_data
        return api_response, raw_sample_data

    def get_print_all_sample(self, name):
        sample_dict = {}
        sample_list = []
        if name != 'all':
            api_response, entryfound = self.get_sample_info(name)
            if entryfound is not None:
                sample_list.append(api_response)
        else:
            table_data = self.config_db.get_keys(SAMPLE_RATE_TABLE)
            # Get data for all keys
            for k in table_data:
                api_each_flow_response, entryfound = self.get_sample_info(k)
                if entryfound is not None:
                    sample_list.append(api_each_flow_response)

        sample_dict['sample-list'] = sample_list
        show_cli_output("show_sample.j2", sample_dict)
        return
class TunnelPacketHandler(object):

    def __init__(self):
        self.config_db = ConfigDBConnector()
        self.config_db.connect()
        self.state_db = SonicV2Connector()
        self.state_db.connect(STATE_DB)
        self._portchannel_intfs = None
        self.up_portchannels = None
        self.netlink_api = IPRoute()

    @property
    def portchannel_intfs(self):
        """
        Gets all portchannel interfaces and IPv4 addresses in config DB

        Returns:
            (list) Tuples of a portchannel interface name (str) and
                   associated IPv4 address (str)
        """
        if self._portchannel_intfs is None:
            intf_keys = self.config_db.get_keys(PORTCHANNEL_INTERFACE_TABLE)
            portchannel_intfs = []

            for key in intf_keys:
                if isinstance(key, tuple) and len(key) > 1:
                    if ip_interface(key[1]).version == 4:
                        portchannel_intfs.append(key)

            self._portchannel_intfs = portchannel_intfs

        return self._portchannel_intfs

    def get_portchannel_index_mapping(self):
        """
        Gets a mapping of interface kernel indices to portchannel interfaces

        Returns:
            (list) integers representing kernel indices
        """
        index_map = {}
        for portchannel in self.portchannel_intfs:
            index = self.netlink_api.link_lookup(ifname=portchannel[0])[0]
            index_map[index] = portchannel

        return index_map

    def get_up_portchannels(self):
        """
        Returns the portchannels which are operationally up

        Returns:
            (list) of interface names which are up, as strings
        """
        pc_index_map = self.get_portchannel_index_mapping()
        pc_indices = list(pc_index_map.keys())
        link_statuses = self.netlink_api.get_links(*pc_indices)
        up_portchannels = []

        for status in link_statuses:
            if status['state'] == 'up':
                port_index = status['index']
                up_portchannels.append(pc_index_map[port_index][0])

        return up_portchannels

    def all_portchannels_established(self):
        """
        Checks if the portchannel interfaces are established

        Note that this status does not indicate operational state
        Returns:
            (bool) True, if all interfaces are established
                   False, otherwise
        """
        intfs = self.portchannel_intfs
        for intf in intfs:
            intf_table_name = INTF_TABLE_TEMPLATE.format(intf[0], intf[1])
            intf_state = self.state_db.get(
                                STATE_DB,
                                intf_table_name,
                                STATE_KEY
                              )

            if intf_state and intf_state.lower() != 'ok':
                return False

        return True

    def wait_for_portchannels(self, interval=5, timeout=60):
        """
        Continuosly checks if all portchannel host interfaces are established

        Args:
            interval: the interval (in seconds) at which to perform the check
            timeout: maximum allowed duration (in seconds) to wait for
                     interfaces to come up

        Raises:
            RuntimeError if the timeout duration is reached and interfaces are
                still not up
        """
        start = datetime.now()

        while (datetime.now() - start).seconds < timeout:
            if self.all_portchannels_established():
                logger.log_info("All portchannel intfs are established")
                return None
            logger.log_info("Not all portchannel intfs are established")
            time.sleep(interval)

        raise RuntimeError('Portchannel intfs were not established '
                           'within {}'.format(timeout))

    def get_ipinip_tunnel_addrs(self):
        """
        Get the IP addresses used for the IPinIP tunnel

        These should be the Loopback0 addresses for this device and the
        peer device

        Returns:
            ((str) self_loopback_ip, (str) peer_loopback_ip)
            or
            (None, None) If the tunnel type is not IPinIP
                         or
                         if an error is encountered. This most likely means
                         the host device is not a dual ToR device
        """
        try:
            peer_switch = self.config_db.get_keys(PEER_SWITCH_TABLE)[0]
            tunnel = self.config_db.get_keys(TUNNEL_TABLE)[0]
        except IndexError:
            logger.log_warning('PEER_SWITCH or TUNNEL table'
                               'not found in config DB')
            return None, None

        try:
            tunnel_table = self.config_db.get_entry(TUNNEL_TABLE, tunnel)
            tunnel_type = tunnel_table[TUNNEL_TYPE_KEY].lower()
            self_loopback_ip = tunnel_table[DST_IP_KEY]
            peer_loopback_ip = self.config_db.get_entry(
                                    PEER_SWITCH_TABLE, peer_switch
                                    )[ADDRESS_IPV4_KEY]
        except KeyError as e:
            logger.log_warning(
                'PEER_SWITCH or TUNNEL table missing data, '
                'could not find key {}'
                .format(e)
            )
            return None, None

        if tunnel_type == IPINIP_TUNNEL:
            return self_loopback_ip, peer_loopback_ip

        return None, None

    def get_inner_pkt_type(self, packet):
        """
        Get the type of an inner encapsulated packet

        Returns:
            (str)  'v4' if the inner packet is IPv4
            (str)  'v6' if the inner packet is IPv6
            (bool) False if `packet` is not an IPinIP packet
        """
        if packet.haslayer(IP):
            # Determine inner packet type based on IP protocol number
            # The outer packet type should always be IPv4
            if packet[IP].proto == 4:
                return IP
            elif packet[IP].proto == 41:
                return IPv6
        return False

    def wait_for_netlink_msgs(self):
        """
        Gathers any RTM_NEWLINK messages

        Returns:
            (list) containing any received messages
        """
        msgs = []
        with IPRoute() as ipr:
            ipr.bind()
            for msg in ipr.get():
                if msg['event'] == RTM_NEWLINK:
                    msgs.append(msg)

        return msgs

    def sniffer_restart_required(self, messages):
        """
        Determines if the packet sniffer needs to be restarted

        A restart is required if all of the following conditions are met:
            1. A netlink message of type RTM_NEWLINK is received
               (this is checked by `wait_for_netlink_msgs`)
            2. The interface index of the message corresponds to a portchannel
               interface
            3. The state of the interface in the message is 'up'
                    Here, we do not care about an interface going down since
                    the sniffer is able to continue sniffing on the other
                    interfaces. However, if an interface has gone down and
                    come back up, we need to restart the sniffer to be able
                    to sniff traffic on the interface that has come back up.
        """
        pc_index_map = self.get_portchannel_index_mapping()
        for msg in messages:
            if msg['index'] in pc_index_map:
                if msg['state'] == 'up':
                    logger.log_info('{} came back up, sniffer restart required'
                                    .format(pc_index_map[msg['index']]))
                    return True
        return False

    def listen_for_tunnel_pkts(self):
        """
        Listens for tunnel packets that are trapped to CPU

        These packets may be trapped if there is no neighbor info for the
        inner packet destination IP in the hardware.
        """

        def _ping_inner_dst(packet):
            """
            Pings the inner destination IP for an encapsulated packet

            Args:
                packet: The encapsulated packet received
            """
            inner_packet_type = self.get_inner_pkt_type(packet)
            if inner_packet_type and packet[IP].dst == self_ip:
                cmds = ['timeout', '0.2', 'ping', '-c1',
                        '-W1', '-i0', '-n', '-q']
                if inner_packet_type == IPv6:
                    cmds.append('-6')
                dst_ip = packet[IP].payload[inner_packet_type].dst
                cmds.append(dst_ip)
                logger.log_info("Running command '{}'".format(' '.join(cmds)))
                subprocess.run(cmds, stdout=subprocess.DEVNULL)

        self_ip, peer_ip = self.get_ipinip_tunnel_addrs()
        if self_ip is None or peer_ip is None:
            logger.log_notice('Could not get tunnel addresses from '
                              'config DB, exiting...')
            return None

        packet_filter = 'host {} and host {}'.format(self_ip, peer_ip)
        logger.log_notice('Starting tunnel packet handler for {}'
                          .format(packet_filter))

        sniff_intfs = self.get_up_portchannels()
        logger.log_info("Listening on interfaces {}".format(sniff_intfs))

        sniffer = AsyncSniffer(
            iface=sniff_intfs,
            filter=packet_filter,
            prn=_ping_inner_dst

        )
        sniffer.start()
        while True:
            msgs = self.wait_for_netlink_msgs()
            if self.sniffer_restart_required(msgs):
                sniffer.stop()
                sniff_intfs = self.get_up_portchannels()
                logger.log_notice('Restarting tunnel packet handler on '
                                  'interfaces {}'.format(sniff_intfs))
                sniffer = AsyncSniffer(
                    iface=sniff_intfs,
                    filter=packet_filter,
                    prn=_ping_inner_dst
                )
                sniffer.start()

    def run(self):
        self.wait_for_portchannels()
        self.listen_for_tunnel_pkts()
Exemplo n.º 34
0
class ZTPEngine():
    '''!
    \brief This class performs core functions of ZTP service.
    '''
    def __init__(self):
        ## Flag to indicate if configuration ztp restart is requested
        self.__ztp_restart = False

        ## start time of ZTP engine
        self.__ztp_engine_start_time = None

        ## Flag to indicate if ZTP configuration has been loaded
        self.__ztp_profile_loaded = False

        ## Run ZTP engine in unit test mode
        self.test_mode = False

        ## Flag to determine if interfaces link scan has to be enabled or not
        self.__link_scan_enabled = None

        ## Interface on which ZTP information has been discovered using DHCP
        self.__ztp_interface = None

        ## ZTP JSON object
        self.objztpJson = None

        ## Flag to indicate reboot
        self.reboot_on_completion = False

        ## Interfaces state table
        self.__intf_state = dict()

        ## Redis DB connectors
        self.configDB = None
        self.applDB = None

    def __connect_to_redis(self):
        '''!
        Establishes connection to the redis DB
           @return  False - If connection to the redis DB failed
                    True  - If connection to the redis DB is successful
        '''
        # Connect to ConfigDB
        try:
            if self.configDB is None:
                self.configDB = ConfigDBConnector()
                self.configDB.connect()
        except:
            self.configDB = None
            return False

        # Connect to AppDB
        try:
            if self.applDB is None:
                self.applDB = SonicV2Connector()
                self.applDB.connect(self.applDB.APPL_DB)
        except:
            self.applDB = None
            return False
        return True

    def __detect_intf_state(self):
        '''!
        Identifies all the interfaces on which ZTP discovery needs to be performed.
        Link state of each identified interface is checked and stored in a dictionary
        for reference.

           @return  True   - If an interface moved from link down to link up state
                    False  - If no interface transitions have been observed
        '''
        link_up_detected = False
        intf_data = os.listdir('/sys/class/net')
        if getCfg('feat-inband'):
            r_intf = re.compile("Ethernet.*|eth.*")
        else:
            r_intf = re.compile("eth.*")
        intf_list = list(filter(r_intf.match, intf_data))
        for intf in natsorted(intf_list):
            try:
                if intf[0:3] == 'eth':
                    fh = open('/sys/class/net/{}/operstate'.format(intf), 'r')
                    operstate = fh.readline().strip().lower()
                    fh.close()
                else:
                    port_entry = self.applDB.get_all(self.applDB.APPL_DB,
                                                     'PORT_TABLE:' + intf)
                    operstate = port_entry.get(b'oper_status').decode(
                        'utf-8').lower()
            except:
                operstate = 'down'
            if ((self.__intf_state.get(intf) is None) or \
                (self.__intf_state.get(intf).get('operstate') != operstate)) and \
                operstate == 'up':
                link_up_detected = True
                logger.info('Link up detected for interface %s' % intf)
            if self.__intf_state.get(intf) is None:
                self.__intf_state[intf] = dict()
            self.__intf_state[intf]['operstate'] = operstate

        # Weed out any stale interfaces that may exist when an expanded port is joined back
        intf_snapshot = list(self.__intf_state.keys())
        for intf in intf_snapshot:
            if intf not in intf_list:
                del self.__intf_state[intf]

        return link_up_detected

    def __is_ztp_profile_active(self):
        '''!
        Checks if the ZTP configuration profile is loaded as the switch running
        configuration and is active

           @return  False - ZTP configuration profile is not active
                    True  - ZTP configuration profile is active
        '''
        profile_active = False
        if self.__connect_to_redis():
            # Check if ZTP configuration is active
            data = self.configDB.get_entry("ZTP", "mode")
            if data is not None and data.get("profile") is not None:
                if data.get("profile") == 'active':
                    profile_active = True
        return profile_active

    def __link_scan(self):
        '''!
        Scan all in-band interface's operational status to detect a link up event
           @return  False - If a link scan did not detect at least one switch port link up event
                    True  - If at least one switch port link up event has been detected
        '''

        # Do not attempt link scan when in test mode
        if self.test_mode:
            return False

        if self.__connect_to_redis() is False:
            self.__link_scan_enabled = None
            return False

        if self.__link_scan_enabled is None:
            # Check if ZTP configuration is active
            if self.__is_ztp_profile_active():
                self.__link_scan_enabled = 'True'
            else:
                self.__link_scan_enabled = 'False'

        if self.__link_scan_enabled == 'False':
            return False

        # Populate data of all ztp eligible interfaces
        link_scan_result = self.__detect_intf_state()
        return link_scan_result

    def __cleanup_dhcp_leases(self):

        # Use ZTP interface used to obtain provisioning information
        runCommand('rm -f /var/lib/dhcp/dhclient*.eth0.leases',
                   capture_stdout=False)
        if getCfg('feat-inband'):
            runCommand('rm -f /var/lib/dhcp/dhclient*.Ethernet*.leases',
                       capture_stdout=False)

    def __removeZTPProfile(self):
        '''!
         If ZTP configuration profile is operational, remove ZTP configuration profile and load
         startup configuration file. If there is no startup configuration file,
         load factory default configuration.
        '''

        # Do not attempt to remove ZTP configuration if working in unit test mode
        if self.test_mode:
            return

        # Remove ZTP configuration profile if loaded
        updateActivity('Verifying configuration')

        # Use a fallback default configuration if configured to
        _config_fallback = ''
        if (self.objztpJson is not None and (self.objztpJson['status'] == 'FAILED' or self.objztpJson['status'] == 'SUCCESS') \
            and self.objztpJson['config-fallback']) or \
           (self.objztpJson is None and getCfg('config-fallback') is True):
            _config_fallback = ' config-fallback'

        # Execute profile removal command with appropriate options
        rc = runCommand(getCfg('ztp-lib-dir') + '/ztp-profile.sh remove' +
                        _config_fallback,
                        capture_stdout=False)

        # Remove ZTP configuration startup-config
        if os.path.isfile(getCfg('config-db-json')) is True:
            try:
                config_db = None
                with open(getCfg('config-db-json')) as json_file:
                    config_db = json.load(json_file)
                    json_file.close()
                if config_db is not None and config_db.get('ZTP'):
                    logger.info("Deleting ZTP configuration saved in '%s'." %
                                (getCfg('config-db-json')))
                    del config_db['ZTP']
                    with open(getCfg('config-db-json'), 'w') as json_file:
                        json.dump(config_db, json_file, indent=4)
                        json_file.close()
            except Exception as e:
                logger.error(
                    "Exception [%s] encountered while verifying '%s'." %
                    (str(e), getCfg('config-db-json')))

        self.__ztp_profile_loaded = False

    def __loadZTPProfile(self, event):
        '''!
         Load ZTP configuration profile if there is no saved configuration file.
         This establishes connectivity to all interfaces and starts DHCP discovery.
           @return  False - If ZTP configuration profile is not loaded
                    True  - If ZTP configuration profile is loaded
        '''
        # Do not attempt to install ZTP configuration if working in unit test mode
        if self.test_mode:
            return False

        if self.__ztp_profile_loaded is False:
            updateActivity('Checking running configuration')
            logger.info(
                'Checking running configuration to load ZTP configuration profile.'
            )
            cmd = getCfg('ztp-lib-dir') + '/ztp-profile.sh install ' + event
            # When performing ZTP discovery, force load ZTP profile. When
            # ZTP is resuming previous session, use configuration already loaded during
            # config-setup
            rc = runCommand(cmd, capture_stdout=False)
            self.__ztp_profile_loaded = True
            return True
        return False

    def __createProvScriptJson(self):
        '''!
         Create ZTP JSON data to execute provisioning script specified by DHCP Option 239 URL.
        '''

        json_data = '{"ztp": {"provisioning-script":{"plugin":{"url":"file://' + getCfg(
            'provisioning-script') + '","ignore-section-data":true}}\
                   ,"restart-ztp-no-config":false}}'

        f = open(getCfg('ztp-json'), 'w')
        f.write(json_data)
        f.close

    def __createGraphserviceJson(self):
        '''!
         Create ZTP JSON data to load graph file specified by DHCP Option 225 URL. Also
         includes ACL JSON file if specified by DHCP Option 226.
        '''

        # Verify that graph file can be downloaded
        if self.__downloadURL(getCfg('graph-url'),
                              '/tmp/test_minigraph.xml') is False:
            return False
        else:
            # Clean up
            os.remove('/tmp/test_minigraph.xml')

        # Verify that acl json file can be downloaded
        if os.path.isfile(getCfg('acl-url')):
            if self.__downloadURL(getCfg('acl-url'),
                                  '/tmp/test_acl.json') is False:
                return False
            else:
                # Clean up
                os.remove('/tmp/test_acl.json')

        # Read the url file and identify the URL to be downloaded
        f = open(getCfg('graph-url'), 'r')
        graph_url_str = f.readline().strip()
        f.close()

        acl_url_str = None
        if os.path.isfile(getCfg('acl-url')):
            f = open(getCfg('acl-url'), 'r')
            acl_url_str = f.readline().strip()
            f.close()
        json_data = '{"ztp":{"graphservice": { "minigraph-url" : { "url":"' + graph_url_str + '"}'
        if acl_url_str is not None and len(acl_url_str) != 0:
            json_data = json_data + ', "acl-url" : { "url":"' + acl_url_str + '"}'
        json_data = json_data + '}, "restart-ztp-no-config":false} }'
        f = open(getCfg('ztp-json'), 'w')
        f.write(json_data)
        f.close
        return True

    def __rebootAction(self, section, delayed_reboot=False):
        '''!
         Perform system reboot if reboot-on-success or reboot-on-failure is defined in the
         configuration section data.

         @param section (dict) Configuration section data containing status and reboot-on flags

        '''

        # Obtain section status
        status = section.get('status')

        # Check if flag is set to reboot on SUCCESS and status is SUCCESS as well
        if getField(section, 'reboot-on-success', bool,
                    False) is True and status == 'SUCCESS':
            logger.warning(
                'ZTP is rebooting the device as reboot-on-success flag is set.'
            )
            updateActivity('System reboot requested on success')
            if self.test_mode and delayed_reboot == False:
                sys.exit(0)
            else:
                if delayed_reboot:
                    self.reboot_on_completion = True
                else:
                    systemReboot()

        # Check if flag is set to reboot on FAIL and status is FAILED as well
        if getField(section, 'reboot-on-failure', bool,
                    False) is True and status == 'FAILED':
            logger.warning(
                'ZTP is rebooting the device as reboot-on-failure flag is set.'
            )
            updateActivity('System reboot requested on failure')
            if self.test_mode and delayed_reboot == False:
                sys.exit(0)
            else:
                if delayed_reboot:
                    self.reboot_on_completion = True
                else:
                    systemReboot()

    def __evalZTPResult(self):
        '''!
         Determines the final result of ZTP after processing all configuration sections and
         their results. Als performs system reboot if reboot-on flag is set

         ZTP result is determined as SUCCESS if - Configuration section(s) status is SUCCESS
                                                or (configuration section(s) status is FAILED and
                                                    configuration section(s) ignore-result is True)
                                                or ZTP ignore-result is True

         Disabled Configuration sections are ignored.
        '''

        updateActivity('Evaluating ZTP result')
        # Check if overall ZTP ignore-result flag is set
        if self.objztpJson['ignore-result']:
            self.objztpJson['status'] = 'SUCCESS'
            logger.info(
                'ZTP result is marked as SUCCESS at %s. ZTP ignore-result flag is set.'
                % self.objztpJson['timestamp'])

        else:
            # Look through individual configuration sections
            for sec in self.objztpJson.section_names:
                # Retrieve section data
                section = self.objztpJson.ztpDict.get(sec)
                logger.info('Checking configuration section %s result: %s, ignore-result: %r.' % \
                              (sec, section.get('status'), section.get('ignore-result')))
                # Check if configuration section has failed and ignore-result flag is not set
                if section.get('status') == 'FAILED' and section.get(
                        'ignore-result') is False:
                    # Mark ZTP as failed and bail out
                    self.objztpJson['error'] = '%s FAILED' % sec
                    self.objztpJson['status'] = 'FAILED'
                    logger.info(
                        'ZTP failed at %s as configuration section %s FAILED.'
                        % (self.objztpJson['timestamp'], sec))
                    return

        # Mark ZTP as SUCCESS
        self.objztpJson['status'] = 'SUCCESS'
        logger.info('ZTP successfully completed at %s.' %
                    self.objztpJson['timestamp'])

        # Check reboot on result flags and take action
        self.__rebootAction(self.objztpJson.ztpDict, delayed_reboot=True)

    def __processConfigSections(self):
        '''!
         Process and execute individual configuration sections defined in ZTP JSON. Plugin for each
         configuration section is resolved and executed. Configuration section data is provided as
         command line argument to the plugin. Each and every section is processed before this function
         returns.

        '''

        # Obtain a copy of the list of configuration sections
        section_names = list(self.objztpJson.section_names)

        # set temporary flags
        abort = False
        sort = True

        logger.debug('Processing configuration sections: %s' %
                     ', '.join(section_names))
        # Loop through each sections till all of them are processed
        while section_names and abort is False:
            # Take a fresh sorted list to begin with and if any changes happen to it while processing
            if sort:
                sorted_list = sorted(section_names)
                sort = False
            # Loop through configuration section in a sorted order
            for sec in sorted_list:
                # Retrieve configuration section data
                section = self.objztpJson.ztpDict.get(sec)
                try:
                    # Retrieve individual section's progress
                    sec_status = section.get('status')
                    if sec_status == 'BOOT' or sec_status == 'SUSPEND':
                        # Mark section status as in progress
                        self.objztpJson.updateStatus(section, 'IN-PROGRESS')
                        if section.get('start-timestamp') is None:
                            section['start-timestamp'] = section['timestamp']
                            self.objztpJson.objJson.writeJson()
                        logger.info(
                            'Processing configuration section %s at %s.' %
                            (sec, section['timestamp']))
                    elif sec_status != 'IN-PROGRESS':
                        # Skip completed sections
                        logger.debug(
                            'Removing section %s from list. Status %s.' %
                            (sec, sec_status))
                        section_names.remove(sec)
                        # set flag to sort the configuration sections list again
                        sort = True
                        # Ignore disabled configuration sections
                        if sec_status == 'DISABLED':
                            logger.info(
                                'Configuration section %s skipped as its status is set to DISABLED.'
                                % sec)
                        continue
                    updateActivity('Processing configuration section %s' % sec)
                    # Get the appropriate plugin to be used for this configuration section
                    plugin = self.objztpJson.plugin(sec)
                    # Get the location of this configuration section's input data parsed from the input ZTP JSON file
                    plugin_input = getCfg(
                        'ztp-tmp-persistent') + '/' + sec + '/' + getCfg(
                            'section-input-file')
                    # Initialize result flag to FAILED
                    finalResult = 'FAILED'
                    rc = 1
                    # Check if plugin could not be resolved
                    if plugin is None:
                        logger.error(
                            'Unable to resolve plugin to be used for configuration section %s. Marking it as FAILED.'
                            % sec)
                        section[
                            'error'] = 'Unable to find or download requested plugin'
                    elif os.path.isfile(plugin) and os.path.isfile(
                            plugin_input):
                        plugin_args = self.objztpJson.pluginArgs(sec)
                        plugin_data = section.get('plugin')

                        # Determine if shell has to be used to execute plugin
                        _shell = getField(plugin_data, 'shell', bool, False)

                        # Construct the full plugin command string along with arguments
                        plugin_cmd = plugin
                        if plugin_args is not None:
                            plugin_cmd = plugin_cmd + ' ' + plugin_args

                        # A plugin has been resolved and its input configuration section data as well
                        logger.debug('Executing plugin %s.' % (plugin_cmd))
                        # Execute identified plugin
                        rc = runCommand(plugin_cmd,
                                        capture_stdout=False,
                                        use_shell=_shell)

                        logger.debug('Plugin %s exit code = %d.' %
                                     (plugin_cmd, rc))
                        # Compare plugin exit code
                        if rc == 0:
                            finalResult = 'SUCCESS'
                        elif section.get('suspend-exit-code'
                                         ) is not None and section.get(
                                             'suspend-exit-code') == rc:
                            finalResult = 'SUSPEND'
                        else:
                            finalResult = 'FAILED'
                except Exception as e:
                    logger.debug(
                        'Exception [%s] encountered for configuration section %s.'
                        % (str(e), sec))
                    logger.info(
                        'Exception encountered while processing configuration section %s. Marking it as FAILED.'
                        % sec)
                    section[
                        'error'] = 'Exception [%s] encountered while executing the plugin' % (
                            str(e))
                    finalResult = 'FAILED'

                # Update this configuration section's result in ztp json file
                logger.info(
                    'Processed Configuration section %s with result %s, exit code (%d) at %s.'
                    % (sec, finalResult, rc, section['timestamp']))
                if finalResult == 'FAILED' and section.get('error') is None:
                    section['error'] = 'Plugin failed'
                section['exit-code'] = rc
                self.objztpJson.updateStatus(section, finalResult)

                # Check if abort ZTP on failure flag is set
                if getField(section, 'halt-on-failure', bool,
                            False) is True and finalResult == 'FAILED':
                    logger.info(
                        'Halting ZTP as Configuration section %s FAILED and halt-on-failure flag is set.'
                        % sec)
                    abort = True
                    break

                # Check reboot on result flags
                self.__rebootAction(section)

    def __processZTPJson(self):
        '''!
         Process ZTP JSON file downloaded using URL provided by DHCP Option 67, DHCPv6 Option 59 or
         local ZTP JSON file.

        '''
        logger.debug('Starting to process ZTP JSON file %s.' % self.json_src)
        updateActivity('Processing ZTP JSON file %s' % self.json_src)
        try:
            # Read provided ZTP JSON file and load it
            self.objztpJson = ZTPJson(self.json_src, getCfg('ztp-json'))
        except ValueError as e:
            logger.error(
                'Exception [%s] occured while processing ZTP JSON file %s.' %
                (str(e), self.json_src))
            logger.error('ZTP JSON file %s processing failed.' %
                         (self.json_src))
            try:
                os.remove(getCfg('ztp-json'))
                if os.path.isfile(getCfg('ztp-json-shadow')):
                    os.remove(getCfg('ztp-json-shadow'))
            except OSError as v:
                if v.errno != errno.ENOENT:
                    logger.warning(
                        'Exception [%s] encountered while deleting ZTP JSON file %s.'
                        % (str(v), getCfg('ztp-json')))
                    raise
            self.objztpJson = None
            # Restart networking after a wait time to discover new provisioning data
            if getCfg('restart-ztp-on-invalid-data'):
                return ("restart", "Invalid provisioning data processed")
            else:
                return ("stop", "Invalid provisioning data processed")

        if self.objztpJson['ztp-json-source'] is None:
            self.objztpJson['ztp-json-source'] = self.ztp_mode

        # Check if ZTP process has already completed. If not mark start of ZTP.
        if self.objztpJson['status'] == 'BOOT':
            self.objztpJson['status'] = 'IN-PROGRESS'
            if self.objztpJson['start-timestamp'] is None:
                self.objztpJson[
                    'start-timestamp'] = self.__ztp_engine_start_time
                self.objztpJson.objJson.writeJson()
        elif self.objztpJson['status'] != 'IN-PROGRESS':
            # Re-start ZTP if requested
            if getCfg('monitor-startup-config') is True and self.__ztp_restart:
                self.__ztp_restart = False
                # Discover new ZTP data after deleting historic ZTP data
                logger.info(
                    "ZTP restart requested. Deleting previous ZTP session JSON data."
                )
                os.remove(getCfg('ztp-json'))
                if os.path.isfile(getCfg('ztp-json-shadow')):
                    os.remove(getCfg('ztp-json-shadow'))
                self.objztpJson = None
                return ("retry", "ZTP restart requested")
            else:
                # ZTP was successfully completed in previous session. No need to proceed, return and exit service.
                logger.info(
                    "ZTP already completed with result %s at %s." %
                    (self.objztpJson['status'], self.objztpJson['timestamp']))
                return ("stop", "ZTP completed")

        logger.info('Starting ZTP using JSON file %s at %s.' %
                    (self.json_src, self.objztpJson['timestamp']))

        # Initialize connectivity if not done already
        self.__loadZTPProfile("resume")

        # Process available configuration sections in ZTP JSON
        self.__processConfigSections()

        # Determine ZTP result
        self.__evalZTPResult()

        # Check restart ZTP condition
        # ZTP result is failed and restart-ztp-on-failure is set  or
        _restart_ztp_on_failure = (self.objztpJson['status'] == 'FAILED' and \
                        self.objztpJson['restart-ztp-on-failure'] == True)

        # ZTP completed and no startup-config is found, restart-ztp-no-config and config-fallback is not set
        _restart_ztp_missing_config = ( (self.objztpJson['status'] == 'SUCCESS' or self.objztpJson['status'] == 'FAILED') and \
                           self.objztpJson['restart-ztp-no-config'] == True and \
                           self.objztpJson['config-fallback'] == False and
                           os.path.isfile(getCfg('config-db-json')) is False )

        # Mark ZTP for restart
        if _restart_ztp_missing_config or _restart_ztp_on_failure:
            os.remove(getCfg('ztp-json'))
            if os.path.isfile(getCfg('ztp-json-shadow')):
                os.remove(getCfg('ztp-json-shadow'))
            self.objztpJson = None
            # Remove startup-config file to obtain a new one through ZTP
            if getCfg('monitor-startup-config') is True and os.path.isfile(
                    getCfg('config-db-json')):
                os.remove(getCfg('config-db-json'))
            if _restart_ztp_missing_config:
                return (
                    "restart",
                    "ZTP completed but startup configuration '%s' not found" %
                    (getCfg('config-db-json')))
            elif _restart_ztp_on_failure:
                return ("restart", "ZTP completed with FAILED status")

        return ("stop", "ZTP completed")

    def __updateZTPMode(self, mode, src_file):
        '''!
         Identify source of ZTP JSON file. Store ZTP mode of operation.

         @param mode (str) Indicates how provisioning data has been provided to the switch
                               - Local file
                               - DHCP Option 67
                               - DHCPv6 Option 59
                               - DHCP Option 239
                               - DHCPv6 Option 239
                               - Minigraph URL Option 225, ACL URL Option 226

         @param src_file (str) File used as ZTP JSON file source

         @return          Always returns True

        '''
        logger.debug('Set ZTP mode as %s and provisioning data is %s.' %
                     (mode, src_file))
        dhcp_list = [
            'dhcp-opt67', 'dhcp6-opt59', 'dhcp-opt239', 'dhcp6-opt239',
            'dhcp-opt225-graph-url'
        ]
        self.json_src = src_file
        self.ztp_mode = mode
        if self.ztp_mode == 'local-fs':
            self.ztp_mode = self.ztp_mode + ' (' + src_file + ')'
        elif self.ztp_mode in dhcp_list and self.__ztp_interface is not None:
            self.ztp_mode = self.ztp_mode + ' (' + self.__ztp_interface + ')'
        return True

    def __read_ztp_interface(self):
        intf_file = getCfg('ztp-run-dir') + '/ztp.lock/interface'
        if os.path.isfile(intf_file):
            f = open(intf_file, 'r')
            try:
                self.__ztp_interface = f.readline().strip().split(':')[1]
            except:
                self.__ztp_interface = None
                pass
            f.close()

    def __downloadURL(self, url_file, dst_file, url_prefix=None):
        '''!
         Helper API to read url information from a file, download the
         file using the url and store contents as a dst_file.

         @param url_file (str) File containing URL to be downloaded
         @param dst_file (str) Destination file to be used
         @param url_prefix (str) Optional string to be prepended to url

         @return   True - If url_file was successfully downloaded
                   False - Failed to download url_file

        '''

        logger.debug('Downloading provided URL %s and saving as %s.' %
                     (url_file, dst_file))
        try:
            # Read the url file and identify the URL to be downloaded
            f = open(url_file, 'r')
            url_str = f.readline().strip()
            f.close()

            res = urlparse(url_str)
            if res is None or res.scheme == '':
                # Use passed url_prefix to construct final URL
                if url_prefix is not None:
                    url_str = url_prefix + url_str
                    if urlparse(url_str) is None:
                        logger.error(
                            'Failed to download provided URL %s, malformed url.'
                            % (url_str))
                        return False
                else:
                    logger.error(
                        'Failed to download provided URL %s, malformed url.' %
                        (url_str))
                    return False

            # Create a downloader object using source and destination information
            updateActivity('Downloading provisioning data from %s to %s' %
                           (url_str, dst_file))
            logger.info('Downloading provisioning data from %s to %s' %
                        (url_str, dst_file))
            objDownloader = Downloader(url_str, dst_file)
            # Initiate download
            rc, fname = objDownloader.getUrl()
            # Check download result
            if rc == 0 and fname is not None and os.path.isfile(dst_file):
                # Get the interface on which ZTP data was received
                self.__read_ztp_interface()
                return True
            else:
                logger.error(
                    'Failed to download provided URL %s returncode=%d.' %
                    (url_str, rc))
                return False
        except (IOError, OSError) as e:
            logger.error(
                'Exception [%s] encountered during download of provided URL %s.'
                % (str(e), url_str))
            return False

    def __discover(self):
        '''!
         ZTP data discover logic. Following is the order of precedence followed:

             Processed or under-process ZTP JSON file
           > ZTP JSON file specified in pre-defined location as part of the image
           > ZTP JSON URL specified via DHCP Option-67
           > ZTP JSON URL specified via DHCPv6 Option-59
           > Simple provisioning script URL specified via DHCP Option-239
           > Simple provisioning script URL specified via DHCPv6 Option-239
           > Minigraph URL and ACL URL specified via DHCP Option 225, 226

           @return  False - If no ZTP data is found or ZTP data could not be downloaded.
                    True  - ZTP data recoginized and ZTP JSON / provisioning script was
                            successfully downloaded. Startup configuration file detected.

        '''

        logger.debug('Start discovery.')
        if os.path.isfile(getCfg('ztp-json')):
            return self.__updateZTPMode('ztp-session', getCfg('ztp-json'))

        if os.path.isfile(
                getCfg('config-db-json')) and getCfg('monitor-startup-config'):
            self.ztp_mode = 'MANUAL_CONFIG'
            return True

        if os.path.isfile(getCfg('ztp-json-local')):
            return self.__updateZTPMode('local-fs', getCfg('ztp-json-local'))
        if os.path.isfile(getCfg('opt67-url')):
            _tftp_server = None
            _url_prefix = None
            # Check if tftp-server name has been passed
            if os.path.isfile(getCfg('opt66-tftp-server')):
                fh = open(getCfg('opt66-tftp-server'), 'r')
                _tftp_server = fh.readline().strip()
                fh.close()
                if _tftp_server is not None and _tftp_server != '':
                    _url_prefix = 'tftp://' + _tftp_server + '/'
            if self.__downloadURL(getCfg('opt67-url'),
                                  getCfg('ztp-json-opt67'),
                                  url_prefix=_url_prefix):
                return self.__updateZTPMode('dhcp-opt67',
                                            getCfg('ztp-json-opt67'))
        if os.path.isfile(getCfg('opt59-v6-url')):
            if self.__downloadURL(getCfg('opt59-v6-url'),
                                  getCfg('ztp-json-opt59')):
                return self.__updateZTPMode('dhcp6-opt59',
                                            getCfg('ztp-json-opt59'))
        if os.path.isfile(getCfg('opt239-url')):
            if self.__downloadURL(getCfg('opt239-url'),
                                  getCfg('provisioning-script')):
                self.__createProvScriptJson()
                return self.__updateZTPMode('dhcp-opt239', getCfg('ztp-json'))
        if os.path.isfile(getCfg('opt239-v6-url')):
            if self.__downloadURL(getCfg('opt239-v6-url'),
                                  getCfg('provisioning-script')):
                self.__createProvScriptJson()
                return self.__updateZTPMode('dhcp6-opt239', getCfg('ztp-json'))
        if os.path.isfile(getCfg('graph-url')):
            if self.__createGraphserviceJson():
                return self.__updateZTPMode('dhcp-opt225-graph-url',
                                            getCfg('ztp-json'))
        return False

    def __forceRestartDiscovery(self, msg):
        # Remove existing leases to source new provisioning data
        self.__cleanup_dhcp_leases()
        _msg = '%s. Waiting for %d seconds before restarting ZTP.' % (
            msg, getCfg('restart-ztp-interval'))
        logger.warning(_msg)
        updateActivity(_msg)
        time.sleep(getCfg('restart-ztp-interval'))
        self.ztp_mode = 'DISCOVERY'
        # Force install of ZTP configuration profile
        self.__ztp_profile_loaded = False
        # Restart link-scan
        self.__intf_state = dict()

    def executeLoop(self, test_mode=False):
        '''!
         ZTP service loop which peforms provisioning data discovery and initiates processing.
        '''

        updateActivity('Initializing')

        # Set testing mode
        self.test_mode = test_mode

        # Check if ZTP is disabled administratively, bail out if disabled
        if getCfg('admin-mode') is False:
            logger.info('ZTP is administratively disabled.')
            self.__removeZTPProfile()
            return

        # Check if ZTP data restart flag is set
        if os.path.isfile(getCfg('ztp-restart-flag')):
            self.__ztp_restart = True
            os.remove(getCfg('ztp-restart-flag'))

        if self.test_mode:
            logger.warning(
                'ZTP service started in test mode with restricted functionality.'
            )
        else:
            logger.info('ZTP service started.')

        self.__ztp_engine_start_time = getTimestamp()
        _start_time = None
        self.ztp_mode = 'DISCOVERY'
        # Main provisioning data discovery loop
        while self.ztp_mode == 'DISCOVERY':
            updateActivity('Discovering provisioning data', overwrite=False)
            try:
                result = self.__discover()
            except Exception as e:
                logger.error(
                    "Exception [%s] encountered while running the discovery logic."
                    % (str(e)))
                _exc_type, _exc_value, _exc_traceback = sys.exc_info()
                __tb = traceback.extract_tb(_exc_traceback)
                for l in __tb:
                    logger.debug('  File ' + l[0] + ', line ' + str(l[1]) +
                                 ', in ' + str(l[2]))
                    logger.debug('    ' + str(l[3]))
                self.__forceRestartDiscovery(
                    "Invalid provisioning data received")
                continue

            if result:
                if self.ztp_mode == 'MANUAL_CONFIG':
                    logger.info(
                        "Configuration file '%s' detected. Shutting down ZTP service."
                        % (getCfg('config-db-json')))
                    break
                elif self.ztp_mode != 'DISCOVERY':
                    (rv, msg) = self.__processZTPJson()
                    if rv == "retry":
                        self.ztp_mode = 'DISCOVERY'
                    elif rv == "restart":
                        self.__forceRestartDiscovery(msg)
                    else:
                        break

            # Initialize in-band interfaces to establish connectivity if not done already
            self.__loadZTPProfile("discovery")
            logger.debug('Provisioning data not found.')

            # Scan for inband interfaces to link up and restart interface connectivity
            if self.__link_scan():
                updateActivity('Restarting network discovery after link scan')
                logger.info('Restarting network discovery after link scan.')
                runCommand('systemctl restart interfaces-config',
                           capture_stdout=False)
                logger.info('Restarted network discovery after link scan.')
                _start_time = time.time()
                continue

            # Start keeping time of last time restart networking was done
            if _start_time is None:
                _start_time = time.time()

            # Check if we have to restart networking
            if (time.time() - _start_time > getCfg('restart-ztp-interval')):
                updateActivity('Restarting network discovery')
                if self.test_mode is False:
                    # Remove existing leases to source new provisioning data
                    self.__cleanup_dhcp_leases()
                    logger.info('Restarting network discovery.')
                    runCommand('systemctl restart interfaces-config',
                               capture_stdout=False)
                    logger.info('Restarted network discovery.')
                _start_time = time.time()
                continue

            # Try after sometime
            time.sleep(getCfg('discovery-interval'))

        # Cleanup installed ZTP configuration profile
        self.__removeZTPProfile()
        if self.reboot_on_completion and self.test_mode == False:
            updateActivity('System reboot requested')
            systemReboot()
        updateActivity('Exiting ZTP server')