예제 #1
0
def show():
    """ Show the counter configuration """
    configdb = ConfigDBConnector()
    configdb.connect()
    queue_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'QUEUE')
    port_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'PORT')
    port_drop_info = configdb.get_entry('FLEX_COUNTER_TABLE', PORT_BUFFER_DROP)
    rif_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'RIF')
    queue_wm_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'QUEUE_WATERMARK')
    pg_wm_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'PG_WATERMARK')
    pg_drop_info = configdb.get_entry('FLEX_COUNTER_TABLE', PG_DROP)
    buffer_pool_wm_info = configdb.get_entry('FLEX_COUNTER_TABLE', BUFFER_POOL_WATERMARK)
    
    header = ("Type", "Interval (in ms)", "Status")
    data = []
    if queue_info:
        data.append(["QUEUE_STAT", queue_info.get("POLL_INTERVAL", DEFLT_10_SEC), queue_info.get("FLEX_COUNTER_STATUS", DISABLE)])
    if port_info:
        data.append(["PORT_STAT", port_info.get("POLL_INTERVAL", DEFLT_1_SEC), port_info.get("FLEX_COUNTER_STATUS", DISABLE)])
    if port_drop_info:
        data.append([PORT_BUFFER_DROP, port_drop_info.get("POLL_INTERVAL", DEFLT_60_SEC), port_drop_info.get("FLEX_COUNTER_STATUS", DISABLE)])
    if rif_info:
        data.append(["RIF_STAT", rif_info.get("POLL_INTERVAL", DEFLT_1_SEC), rif_info.get("FLEX_COUNTER_STATUS", DISABLE)])
    if queue_wm_info:
        data.append(["QUEUE_WATERMARK_STAT", queue_wm_info.get("POLL_INTERVAL", DEFLT_10_SEC), queue_wm_info.get("FLEX_COUNTER_STATUS", DISABLE)])
    if pg_wm_info:
        data.append(["PG_WATERMARK_STAT", pg_wm_info.get("POLL_INTERVAL", DEFLT_10_SEC), pg_wm_info.get("FLEX_COUNTER_STATUS", DISABLE)])
    if pg_drop_info:
        data.append(['PG_DROP_STAT', pg_drop_info.get("POLL_INTERVAL", DEFLT_10_SEC), pg_drop_info.get("FLEX_COUNTER_STATUS", DISABLE)])
    if buffer_pool_wm_info:
        data.append(["BUFFER_POOL_WATERMARK_STAT", buffer_pool_wm_info.get("POLL_INTERVAL", DEFLT_10_SEC), buffer_pool_wm_info.get("FLEX_COUNTER_STATUS", DISABLE)])

    click.echo(tabulate(data, headers=header, tablefmt="simple", missingval=""))
예제 #2
0
def name(vxlan_name):
    """Show vxlan name <vxlan_name> information"""
    config_db = ConfigDBConnector()
    config_db.connect()
    header = [
        'vxlan tunnel name', 'source ip', 'destination ip', 'tunnel map name',
        'tunnel map mapping(vni -> vlan)'
    ]

    # Fetching data from config_db for VXLAN TUNNEL
    vxlan_data = config_db.get_entry('VXLAN_TUNNEL', vxlan_name)

    table = []
    if vxlan_data:
        r = []
        r.append(vxlan_name)
        r.append(vxlan_data.get('src_ip'))
        r.append(vxlan_data.get('dst_ip'))
        vxlan_map_keys = config_db.keys(
            config_db.CONFIG_DB,
            'VXLAN_TUNNEL_MAP{}{}{}*'.format(config_db.KEY_SEPARATOR,
                                             vxlan_name,
                                             config_db.KEY_SEPARATOR))
        if vxlan_map_keys:
            vxlan_map_mapping = config_db.get_all(config_db.CONFIG_DB,
                                                  vxlan_map_keys[0])
            r.append(vxlan_map_keys[0].split(config_db.KEY_SEPARATOR, 2)[2])
            r.append("{} -> {}".format(vxlan_map_mapping.get('vni'),
                                       vxlan_map_mapping.get('vlan')))
        table.append(r)

    click.echo(tabulate(table, header))
예제 #3
0
def showPfcPrio(interface):
    """
    PFC handler to display PFC enabled priority information.
    """
    header = ('Interface', 'Lossless priorities')
    table = []
        
    configdb = ConfigDBConnector()
    configdb.connect()
    
    """Get all the interfaces with QoS map information"""
    intfs = configdb.get_keys('PORT_QOS_MAP')
    
    """The user specifies an interface but we cannot find it"""    
    if interface and interface not in intfs:
        click.echo('Cannot find interface {0}'.format(interface))
        return 
    
    if interface:
        intfs = [interface]
    
    for intf in intfs: 
        entry = configdb.get_entry('PORT_QOS_MAP', intf)
        table.append([intf, entry.get('pfc_enable', 'N/A')])
    
    sorted_table = natsorted(table)
    click.echo()
    click.echo(tabulate(sorted_table, headers=header, tablefmt="simple", missingval=""))
    click.echo()
예제 #4
0
def remove_basic(ctx, global_ip, local_ip):
    """Remove Static NAT-related configutation"""

    # Verify the ip address format
    if is_valid_ipv4_address(local_ip) is False:
        ctx.fail(
            "Given local ip address {} is invalid. Please enter a valid local ip address !!"
            .format(local_ip))

    if is_valid_ipv4_address(global_ip) is False:
        ctx.fail(
            "Given global ip address {} is invalid. Please enter a valid global ip address !!"
            .format(global_ip))

    config_db = ConfigDBConnector()
    config_db.connect()

    entryFound = False
    table = 'STATIC_NAT'
    key = global_ip
    dataKey = 'local_ip'

    data = config_db.get_entry(table, key)
    if data:
        if data[dataKey] == local_ip:
            config_db.set_entry(table, key, None)
            entryFound = True

    if entryFound is False:
        click.echo("Trying to delete static nat entry, which is not present.")
예제 #5
0
def showPfcAsym(interface):
    """
    PFC handler to display asymmetric PFC information.
    """
    header = ('Interface', 'Asymmetric')

    configdb = ConfigDBConnector()
    configdb.connect()

    if interface:
        db_keys = configdb.keys(configdb.CONFIG_DB, 'PORT|{0}'.format(interface))
    else:
        db_keys = configdb.keys(configdb.CONFIG_DB, 'PORT|*')

    table = []
        
    for i in db_keys or [None]:
        key = None 
        if i:
            key = i.split('|')[-1]

        if key and key.startswith('Ethernet'):
            entry = configdb.get_entry('PORT', key)
            table.append([key, entry.get('pfc_asym', 'N/A')])

    sorted_table = natsorted(table)

    click.echo()
    click.echo(tabulate(sorted_table, headers=header, tablefmt="simple", missingval=""))
    click.echo()
예제 #6
0
def add(address, timeout, key, auth_type, port, pri, use_mgmt_vrf):
    """Specify a TACACS+ server"""
    if not clicommon.is_ipaddress(address):
        click.echo('Invalid ip address')
        return

    config_db = ConfigDBConnector()
    config_db.connect()
    old_data = config_db.get_entry('TACPLUS_SERVER', address)
    if old_data != {}:
        click.echo('server %s already exists' % address)
    else:
        data = {
            'tcp_port': str(port),
            'priority': pri
        }
        if auth_type is not None:
            data['auth_type'] = auth_type
        if timeout is not None:
            data['timeout'] = str(timeout)
        if key is not None:
            data['passkey'] = key
        if use_mgmt_vrf :
            data['vrf'] = "mgmt"
        config_db.set_entry('TACPLUS_SERVER', address, data)
예제 #7
0
def remove_udp(ctx, global_ip, global_port, local_ip, local_port):
    """Remove Static UDP Protocol NAPT-related configutation"""

    # Verify the ip address format
    if is_valid_ipv4_address(local_ip) is False:
        ctx.fail(
            "Given local ip address {} is invalid. Please enter a valid local ip address !!"
            .format(local_ip))

    if is_valid_ipv4_address(global_ip) is False:
        ctx.fail(
            "Given global ip address {} is invalid. Please enter a valid global ip address !!"
            .format(global_ip))

    config_db = ConfigDBConnector()
    config_db.connect()

    entryFound = False
    table = "STATIC_NAPT"
    key = "{}|UDP|{}".format(global_ip, global_port)
    dataKey1 = 'local_ip'
    dataKey2 = 'local_port'

    data = config_db.get_entry(table, key)
    if data:
        if data[dataKey1] == local_ip and data[dataKey2] == str(local_port):
            config_db.set_entry(table, key, None)
            entryFound = True

    if entryFound is False:
        click.echo("Trying to delete static napt entry, which is not present.")
예제 #8
0
def getTwiceNatIdCountWithDynamicBinding(twice_nat_id, count, dynamic_key):
    """Get the twice nat id count with dynamic binding"""

    config_db = ConfigDBConnector()
    config_db.connect()

    nat_binding_dict = config_db.get_table('NAT_BINDINGS')
    twice_id_count = count

    if not nat_binding_dict:
        return twice_id_count

    for key, values in nat_binding_dict.items():
        nat_pool_data = config_db.get_entry('NAT_POOL', values["nat_pool"])
        twice_id = 0

        if dynamic_key is not None:
            if dynamic_key == key:
                continue

        if not nat_pool_data:
            continue

        if "twice_nat_id" in values:
            if values["twice_nat_id"] == "NULL":
                continue
            else:
                twice_id = int(values["twice_nat_id"])
        else:
            continue

        if twice_id == twice_nat_id:
            twice_id_count += 1

    return twice_id_count
예제 #9
0
def configPfcPrio(status, interface, priority):
    configdb = ConfigDBConnector()
    configdb.connect()

    if interface not in configdb.get_keys('PORT_QOS_MAP'):
        click.echo('Cannot find interface {0}'.format(interface))
        return 

    """Current lossless priorities on the interface""" 
    entry = configdb.get_entry('PORT_QOS_MAP', interface)
    enable_prio = entry.get('pfc_enable').split(',')
    
    """Avoid '' in enable_prio"""
    enable_prio = [x.strip() for x in enable_prio if x.strip()]
    
    if status == 'on' and priority in enable_prio:
        click.echo('Priority {0} has already been enabled on {1}'.format(priority, interface))
        return
    
    if status == 'off' and priority not in enable_prio:
        click.echo('Priority {0} is not enabled on {1}'.format(priority, interface))
        return
    
    if status == 'on':
        enable_prio.append(priority)
    
    else:
        enable_prio.remove(priority)
     
    enable_prio.sort()    
    configdb.mod_entry("PORT_QOS_MAP", interface, {'pfc_enable': ','.join(enable_prio)})
    
    """Show the latest PFC configuration"""
    showPfcPrio(interface)
예제 #10
0
def remove_pool(ctx, pool_name):
    """Remove Pool for Dynamic NAT-related configutation"""

    entryFound = False
    table = "NAT_POOL"
    key = pool_name

    if len(pool_name) > 32:
        ctx.fail(
            "Invalid pool name. Maximum allowed pool name is 32 characters !!")

    config_db = ConfigDBConnector()
    config_db.connect()

    data = config_db.get_entry(table, key)
    if not data:
        click.echo("Trying to delete pool, which is not present.")
        entryFound = True

    binding_dict = config_db.get_table('NAT_BINDINGS')
    if binding_dict and entryFound == False:
        for binding_name, binding_values in binding_dict.items():
            if binding_values['nat_pool'] == pool_name:
                click.echo(
                    "Pool is not removed, as it is mapped to Binding {}, remove the pool binding first !!"
                    .format(binding_name))
                entryFound = True
                break

    if entryFound == False:
        config_db.set_entry(table, key, None)
예제 #11
0
def del_table_key(table, entry, key):
    config_db = ConfigDBConnector()
    config_db.connect()
    data = config_db.get_entry(table, entry)
    if data:
        if key in data:
            del data[key]
        config_db.set_entry(table, entry, data)
예제 #12
0
def breakout(ctx):
    """Show Breakout Mode information by interfaces"""
    # Reading data from Redis configDb
    config_db = ConfigDBConnector()
    config_db.connect()
    ctx.obj = {'db': config_db}

    try:
        cur_brkout_tbl = config_db.get_table('BREAKOUT_CFG')
    except Exception as e:
        click.echo("Breakout table is not present in Config DB")
        raise click.Abort()

    if ctx.invoked_subcommand is None:
        # Get port capability from platform and hwsku related files
        hwsku_path = device_info.get_path_to_hwsku_dir()
        platform_file = device_info.get_path_to_port_config_file()
        platform_dict = readJsonFile(platform_file)['interfaces']
        hwsku_file = os.path.join(hwsku_path, HWSKU_JSON)
        hwsku_dict = readJsonFile(hwsku_file)['interfaces']

        if not platform_dict or not hwsku_dict:
            click.echo("Can not load port config from {} or {} file".format(
                platform_file, hwsku_file))
            raise click.Abort()

        for port_name in platform_dict:
            cur_brkout_mode = cur_brkout_tbl[port_name]["brkout_mode"]

            # Update deafult breakout mode and current breakout mode to platform_dict
            platform_dict[port_name].update(hwsku_dict[port_name])
            platform_dict[port_name]["Current Breakout Mode"] = cur_brkout_mode

            # List all the child ports if present
            child_port_dict = get_child_ports(port_name, cur_brkout_mode,
                                              platform_file)
            if not child_port_dict:
                click.echo(
                    "Cannot find ports from {} file ".format(platform_file))
                raise click.Abort()

            child_ports = natsorted(list(child_port_dict.keys()))

            children, speeds = [], []
            # Update portname and speed of child ports if present
            for port in child_ports:
                speed = config_db.get_entry('PORT', port).get('speed')
                if speed is not None:
                    speeds.append(str(int(speed) // 1000) + 'G')
                    children.append(port)

            platform_dict[port_name]["child ports"] = ",".join(children)
            platform_dict[port_name]["child port speeds"] = ",".join(speeds)

        # Sorted keys by name in natural sort Order for human readability
        parsed = OrderedDict((k, platform_dict[k])
                             for k in natsorted(list(platform_dict.keys())))
        click.echo(json.dumps(parsed, indent=4))
예제 #13
0
    def show_thresholds(self, resource):
        """
        CRM Handler to display thresholds information.
        """

        configdb = self.cfgdb
        if configdb is None:
            # Get the namespace list
            namespaces = multi_asic.get_namespace_list()

            configdb = ConfigDBConnector(namespace=namespaces[0])
            configdb.connect()

        crm_info = configdb.get_entry('CRM', 'Config')

        header = ("Resource Name", "Threshold Type", "Low Threshold",
                  "High Threshold")
        data = []

        if crm_info:
            if resource == 'all':
                for res in [
                        "ipv4_route", "ipv6_route", "ipv4_nexthop",
                        "ipv6_nexthop", "ipv4_neighbor", "ipv6_neighbor",
                        "nexthop_group_member", "nexthop_group", "acl_table",
                        "acl_group", "acl_entry", "acl_counter", "fdb_entry",
                        "ipmc_entry", "snat_entry", "dnat_entry", "mpls_inseg",
                        "mpls_nexthop"
                ]:
                    try:
                        data.append([
                            res, crm_info[res + "_threshold_type"],
                            crm_info[res + "_low_threshold"],
                            crm_info[res + "_high_threshold"]
                        ])
                    except KeyError:
                        pass
            else:
                try:
                    data.append([
                        resource, crm_info[resource + "_threshold_type"],
                        crm_info[resource + "_low_threshold"],
                        crm_info[resource + "_high_threshold"]
                    ])
                except KeyError:
                    pass
        else:
            click.echo('\nError! Could not get CRM configuration.')

        click.echo()
        click.echo(
            tabulate(data, headers=header, tablefmt="simple", missingval=""))
        click.echo()
예제 #14
0
def name(vnet_name):
    """Show vnet name <vnet name> information"""
    config_db = ConfigDBConnector()
    config_db.connect()
    header = ['vnet name', 'vxlan tunnel', 'vni', 'peer list']

    # Fetching data from config_db for VNET
    vnet_data = config_db.get_entry('VNET', vnet_name)

    def tablelize(vnet_key, vnet_data):
        table = []
        if vnet_data:
            r = []
            r.append(vnet_key)
            r.append(vnet_data.get('vxlan_tunnel'))
            r.append(vnet_data.get('vni'))
            r.append(vnet_data.get('peer_list'))
            table.append(r)
        return table

    click.echo(tabulate(tablelize(vnet_name, vnet_data), header))
예제 #15
0
def remove_binding(ctx, binding_name):
    """Remove Binding for Dynamic NAT-related configutation"""

    entryFound = False
    table = 'NAT_BINDINGS'
    key = binding_name

    if len(binding_name) > 32:
        ctx.fail(
            "Invalid binding name. Maximum allowed binding name is 32 characters !!"
        )

    config_db = ConfigDBConnector()
    config_db.connect()

    data = config_db.get_entry(table, key)
    if not data:
        click.echo("Trying to delete binding, which is not present.")
        entryFound = True

    if entryFound == False:
        config_db.set_entry(table, key, None)
예제 #16
0
    def show_summary(self):
        """
        CRM Handler to display general information.
        """

        configdb = self.cfgdb
        if configdb is None:
            # Get the namespace list
            namespaces = multi_asic.get_namespace_list()

            configdb = ConfigDBConnector(namespace=namespaces[0])
            configdb.connect()

        crm_info = configdb.get_entry('CRM', 'Config')

        if crm_info:
            try:
                click.echo('\nPolling Interval: ' + crm_info['polling_interval'] + ' second(s)\n')
            except KeyError:
                click.echo('\nError! Could not get CRM configuration.\n')
                click.echo('\nError! Please configure polling interval.\n')
        else:
            click.echo('\nError! Could not get CRM configuration.\n')
    metadata['buffer_model'] = 'dynamic'
    config_db.set_entry('DEVICE_METADATA', 'localhost', metadata)

    logger.log_notice("buffer_model has been updated to dynamic")

    # Start the buffermgrd
    command = 'docker exec swss supervisorctl start buffermgrd'
    proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
    _, err = proc.communicate()
    if err:
        print("Failed to start buffermgrd {}".format(err))
        exit(1)

    logger.log_notice("Daemon buffermgrd has been started")


# Connect to the CONFIG_DB
db_kwargs = {}
config_db = ConfigDBConnector(**db_kwargs)
config_db.db_connect('CONFIG_DB')

# Don't enable dynamic buffer calculation if it is not a default SKU
metadata = config_db.get_entry('DEVICE_METADATA', 'localhost')
if 'ACS-MSN' not in metadata['hwsku']:
    print("Don't enable dynamic buffer calculation for non-default SKUs")
    exit(0)

lossless_pgs = stop_traditional_buffer_model(config_db)

start_dynamic_buffer_model(config_db, lossless_pgs, metadata)
예제 #18
0
def add_binding(ctx, binding_name, pool_name, acl_name, nat_type,
                twice_nat_id):
    """Add Binding for Dynamic NAT-related configutation"""

    entryFound = False
    table = 'NAT_BINDINGS'
    key = binding_name
    dataKey1 = 'access_list'
    dataKey2 = 'nat_pool'
    dataKey3 = 'nat_type'
    dataKey4 = 'twice_nat_id'

    if acl_name is None:
        acl_name = ""

    if len(binding_name) > 32:
        ctx.fail(
            "Invalid binding name. Maximum allowed binding name is 32 characters !!"
        )

    config_db = ConfigDBConnector()
    config_db.connect()

    data = config_db.get_entry(table, key)
    if data:
        if data[dataKey1] == acl_name and data[dataKey2] == pool_name:
            click.echo("Trying to add binding, which is already present.")
            entryFound = True

    binding_dict = config_db.get_table(table)
    if len(binding_dict) == 16:
        click.echo(
            "Failed to add binding, as already reached maximum binding limit 16."
        )
        entryFound = True

    if nat_type is not None:
        if nat_type == "dnat":
            click.echo("Ignored, DNAT is not yet suported for Binding ")
            entryFound = True
    else:
        nat_type = "snat"

    if twice_nat_id is None:
        twice_nat_id = "NULL"

    if entryFound is False:
        count = 0
        if twice_nat_id is not None:
            count = getTwiceNatIdCountWithStaticEntries(
                twice_nat_id, 'STATIC_NAT', count)
            count = getTwiceNatIdCountWithStaticEntries(
                twice_nat_id, 'STATIC_NAPT', count)
            count = getTwiceNatIdCountWithDynamicBinding(
                twice_nat_id, count, key)
            if count > 1:
                ctx.fail(
                    "Same Twice nat id is not allowed for more than 2 entries!!"
                )

        config_db.set_entry(
            table, key, {
                dataKey1: acl_name,
                dataKey2: pool_name,
                dataKey3: nat_type,
                dataKey4: twice_nat_id
            })
예제 #19
0
def add_udp(ctx, global_ip, global_port, local_ip, local_port, nat_type,
            twice_nat_id):
    """Add Static UDP Protocol NAPT-related configutation"""

    # Verify the ip address format
    if is_valid_ipv4_address(local_ip) is False:
        ctx.fail(
            "Given local ip address {} is invalid. Please enter a valid local ip address !!"
            .format(local_ip))

    if is_valid_ipv4_address(global_ip) is False:
        ctx.fail(
            "Given global ip address {} is invalid. Please enter a valid global ip address !!"
            .format(global_ip))

    config_db = ConfigDBConnector()
    config_db.connect()

    entryFound = False
    table = "STATIC_NAPT"
    key = "{}|UDP|{}".format(global_ip, global_port)
    dataKey1 = 'local_ip'
    dataKey2 = 'local_port'
    dataKey3 = 'nat_type'
    dataKey4 = 'twice_nat_id'

    data = config_db.get_entry(table, key)
    if data:
        if data[dataKey1] == local_ip and data[dataKey2] == str(local_port):
            click.echo(
                "Trying to add static napt entry, which is already present.")
            entryFound = True

    if nat_type == 'snat':
        ipAddress = local_ip
    else:
        ipAddress = global_ip

    if isIpOverlappingWithAnyStaticEntry(ipAddress, 'STATIC_NAT') is True:
        ctx.fail("Given entry is overlapping with existing NAT entry !!")

    if entryFound is False:
        counters_db = SonicV2Connector()
        counters_db.connect(counters_db.COUNTERS_DB)
        snat_entries = 0
        max_entries = 0
        exists = counters_db.exists(counters_db.COUNTERS_DB,
                                    'COUNTERS_GLOBAL_NAT:Values')
        if exists:
            counter_entry = counters_db.get_all(counters_db.COUNTERS_DB,
                                                'COUNTERS_GLOBAL_NAT:Values')
            if 'SNAT_ENTRIES' in counter_entry:
                snat_entries = counter_entry['SNAT_ENTRIES']
            if 'MAX_NAT_ENTRIES' in counter_entry:
                max_entries = counter_entry['MAX_NAT_ENTRIES']

        if int(snat_entries) >= int(max_entries):
            click.echo(
                "Max limit is reached for NAT entries, skipping adding the entry."
            )
            entryFound = True

    if entryFound is False:
        count = 0
        if twice_nat_id is not None:
            count = getTwiceNatIdCountWithStaticEntries(
                twice_nat_id, table, count)
            count = getTwiceNatIdCountWithDynamicBinding(
                twice_nat_id, count, None)
            if count > 1:
                ctx.fail(
                    "Same Twice nat id is not allowed for more than 2 entries!!"
                )

        if nat_type is not None and twice_nat_id is not None:
            config_db.set_entry(
                table, key, {
                    dataKey1: local_ip,
                    dataKey2: local_port,
                    dataKey3: nat_type,
                    dataKey4: twice_nat_id
                })
        elif nat_type is not None:
            config_db.set_entry(table, key, {
                dataKey1: local_ip,
                dataKey2: local_port,
                dataKey3: nat_type
            })
        elif twice_nat_id is not None:
            config_db.set_entry(table, key, {
                dataKey1: local_ip,
                dataKey2: local_port,
                dataKey4: twice_nat_id
            })
        else:
            config_db.set_entry(table, key, {
                dataKey1: local_ip,
                dataKey2: local_port
            })
예제 #20
0
def add_pool(ctx, pool_name, global_ip_range, global_port_range):
    """Add Pool for Dynamic NAT-related configutation"""

    if len(pool_name) > 32:
        ctx.fail(
            "Invalid pool name. Maximum allowed pool name is 32 characters !!")

    # Verify the ip address range and format
    ip_address = global_ip_range.split("-")
    if len(ip_address) > 2:
        ctx.fail(
            "Given ip address range {} is invalid. Please enter a valid ip address range !!"
            .format(global_ip_range))
    elif len(ip_address) == 2:
        if is_valid_ipv4_address(ip_address[0]) is False:
            ctx.fail(
                "Given ip address {} is not valid global address. Please enter a valid ip address !!"
                .format(ip_address[0]))

        if is_valid_ipv4_address(ip_address[1]) is False:
            ctx.fail(
                "Given ip address {} is not valid global address. Please enter a valid ip address !!"
                .format(ip_address[1]))

        ipLowLimit = int(ipaddress.IPv4Address(ip_address[0]))
        ipHighLimit = int(ipaddress.IPv4Address(ip_address[1]))
        if ipLowLimit >= ipHighLimit:
            ctx.fail(
                "Given ip address range {} is invalid. Please enter a valid ip address range !!"
                .format(global_ip_range))
    else:
        if is_valid_ipv4_address(ip_address[0]) is False:
            ctx.fail(
                "Given ip address {} is not valid global address. Please enter a valid ip address !!"
                .format(ip_address[0]))
        ipLowLimit = int(ipaddress.IPv4Address(ip_address[0]))
        ipHighLimit = int(ipaddress.IPv4Address(ip_address[0]))

    # Verify the port address range and format
    if global_port_range is not None:
        port_address = global_port_range.split("-")

        if len(port_address) > 2:
            ctx.fail(
                "Given port address range {} is invalid. Please enter a valid port address range !!"
                .format(global_port_range))
        elif len(port_address) == 2:
            if is_valid_port_address(port_address[0]) is False:
                ctx.fail(
                    "Given port value {} is invalid. Please enter a valid port value !!"
                    .format(port_address[0]))

            if is_valid_port_address(port_address[1]) is False:
                ctx.fail(
                    "Given port value {} is invalid. Please enter a valid port value !!"
                    .format(port_address[1]))

            portLowLimit = int(port_address[0])
            portHighLimit = int(port_address[1])
            if portLowLimit >= portHighLimit:
                ctx.fail(
                    "Given port address range {} is invalid. Please enter a valid port address range !!"
                    .format(global_port_range))
        else:
            if is_valid_port_address(port_address[0]) is False:
                ctx.fail(
                    "Given port value {} is invalid. Please enter a valid port value !!"
                    .format(port_address[0]))
    else:
        global_port_range = "NULL"

    config_db = ConfigDBConnector()
    config_db.connect()

    entryFound = False
    table = "NAT_POOL"
    key = pool_name
    dataKey1 = 'nat_ip'
    dataKey2 = 'nat_port'

    data = config_db.get_entry(table, key)
    if data:
        if data[dataKey1] == global_ip_range and data[
                dataKey2] == global_port_range:
            click.echo("Trying to add pool, which is already present.")
            entryFound = True

    pool_dict = config_db.get_table(table)
    if len(pool_dict) == 16:
        click.echo(
            "Failed to add pool, as already reached maximum pool limit 16.")
        entryFound = True

    # Verify the Ip address is overlapping with any Static NAT entry
    if entryFound == False:
        static_dict = config_db.get_table('STATIC_NAT')
        if static_dict:
            for staticKey, staticValues in static_dict.items():
                global_ip = "---"
                local_ip = "---"
                nat_type = "dnat"

                if isinstance(staticKey, str) is True:
                    global_ip = staticKey
                else:
                    continue

                local_ip = staticValues["local_ip"]

                if "nat_type" in staticValues:
                    nat_type = staticValues["nat_type"]

                if nat_type == "snat":
                    global_ip = local_ip

                ipAddress = int(ipaddress.IPv4Address(global_ip))
                if (ipAddress >= ipLowLimit and ipAddress <= ipHighLimit):
                    ctx.fail(
                        "Given Ip address entry is overlapping with existing Static NAT entry !!"
                    )

    if entryFound == False:
        config_db.set_entry(table, key, {
            dataKey1: global_ip_range,
            dataKey2: global_port_range
        })
예제 #21
0
class DBMigrator():
    def __init__(self, namespace, socket=None):
        """
        Version string format:
           version_<major>_<minor>_<build>
              major: starting from 1, sequentially incrementing in master
                     branch.
              minor: in github branches, minor version stays in 0. This minor
                     version creates space for private branches derived from
                     github public branches. These private branches shall use
                     none-zero values.
              build: sequentially increase within a minor version domain.
        """
        self.CURRENT_VERSION = 'version_2_0_4'

        self.TABLE_NAME = 'VERSIONS'
        self.TABLE_KEY = 'DATABASE'
        self.TABLE_FIELD = 'VERSION'

        db_kwargs = {}
        if socket:
            db_kwargs['unix_socket_path'] = socket

        if namespace is None:
            self.configDB = ConfigDBConnector(**db_kwargs)
        else:
            self.configDB = ConfigDBConnector(use_unix_socket_path=True,
                                              namespace=namespace,
                                              **db_kwargs)
        self.configDB.db_connect('CONFIG_DB')

        if namespace is None:
            self.appDB = ConfigDBConnector(**db_kwargs)
        else:
            self.appDB = ConfigDBConnector(use_unix_socket_path=True,
                                           namespace=namespace,
                                           **db_kwargs)
        self.appDB.db_connect('APPL_DB')

        self.stateDB = SonicV2Connector(host='127.0.0.1')
        if self.stateDB is not None:
            self.stateDB.connect(self.stateDB.STATE_DB)

        version_info = device_info.get_sonic_version_info()
        asic_type = version_info.get('asic_type')
        self.asic_type = asic_type

        if asic_type == "mellanox":
            from mellanox_buffer_migrator import MellanoxBufferMigrator
            self.mellanox_buffer_migrator = MellanoxBufferMigrator(
                self.configDB, self.appDB, self.stateDB)

    def migrate_pfc_wd_table(self):
        '''
        Migrate all data entries from table PFC_WD_TABLE to PFC_WD
        '''
        data = self.configDB.get_table('PFC_WD_TABLE')
        for key in data:
            self.configDB.set_entry('PFC_WD', key, data[key])
        self.configDB.delete_table('PFC_WD_TABLE')

    def is_ip_prefix_in_key(self, key):
        '''
        Function to check if IP address is present in the key. If it
        is present, then the key would be a tuple or else, it shall be
        be string
        '''
        return (isinstance(key, tuple))

    def migrate_interface_table(self):
        '''
        Migrate all data from existing INTERFACE table with IP Prefix
        to have an additional ONE entry without IP Prefix. For. e.g, for an entry
        "Vlan1000|192.168.0.1/21": {}", this function shall add an entry without
        IP prefix as ""Vlan1000": {}". This is for VRF compatibility.
        '''
        if_db = []
        if_tables = {
            'INTERFACE', 'PORTCHANNEL_INTERFACE', 'VLAN_INTERFACE',
            'LOOPBACK_INTERFACE'
        }
        for table in if_tables:
            data = self.configDB.get_table(table)
            for key in data:
                if not self.is_ip_prefix_in_key(key):
                    if_db.append(key)
                    continue

        for table in if_tables:
            data = self.configDB.get_table(table)
            for key in data:
                if not self.is_ip_prefix_in_key(key) or key[0] in if_db:
                    continue
                log.log_info('Migrating interface table for ' + key[0])
                self.configDB.set_entry(table, key[0], data[key])
                if_db.append(key[0])

    def migrate_intf_table(self):
        '''
        Migrate all data from existing INTF table in APP DB during warmboot with IP Prefix
        to have an additional ONE entry without IP Prefix. For. e.g, for an entry
        "Vlan1000:192.168.0.1/21": {}", this function shall add an entry without
        IP prefix as ""Vlan1000": {}". This also migrates 'lo' to 'Loopback0' interface
        '''
        if self.appDB is None:
            return

        data = self.appDB.keys(self.appDB.APPL_DB, "INTF_TABLE:*")

        if data is None:
            return

        if_db = []
        for key in data:
            if_name = key.split(":")[1]
            if if_name == "lo":
                self.appDB.delete(self.appDB.APPL_DB, key)
                key = key.replace(if_name, "Loopback0")
                log.log_info('Migrating lo entry to ' + key)
                self.appDB.set(self.appDB.APPL_DB, key, 'NULL', 'NULL')

            if '/' not in key:
                if_db.append(key.split(":")[1])
                continue

        data = self.appDB.keys(self.appDB.APPL_DB, "INTF_TABLE:*")
        for key in data:
            if_name = key.split(":")[1]
            if if_name in if_db:
                continue
            log.log_info('Migrating intf table for ' + if_name)
            table = "INTF_TABLE:" + if_name
            self.appDB.set(self.appDB.APPL_DB, table, 'NULL', 'NULL')
            if_db.append(if_name)

    def migrate_copp_table(self):
        '''
        Delete the existing COPP table
        '''
        if self.appDB is None:
            return

        keys = self.appDB.keys(self.appDB.APPL_DB, "COPP_TABLE:*")
        if keys is None:
            return
        for copp_key in keys:
            self.appDB.delete(self.appDB.APPL_DB, copp_key)

    def migrate_feature_table(self):
        '''
        Combine CONTAINER_FEATURE and FEATURE tables into FEATURE table.
        '''
        feature_table = self.configDB.get_table('FEATURE')
        for feature, config in feature_table.items():
            state = config.get('status')
            if state is not None:
                config['state'] = state
                config.pop('status')
                self.configDB.set_entry('FEATURE', feature, config)

        container_feature_table = self.configDB.get_table('CONTAINER_FEATURE')
        for feature, config in container_feature_table.items():
            self.configDB.mod_entry('FEATURE', feature, config)
            self.configDB.set_entry('CONTAINER_FEATURE', feature, None)

    def migrate_config_db_buffer_tables_for_dynamic_calculation(
            self, speed_list, cable_len_list, default_dynamic_th,
            abandon_method, append_item_method):
        '''
        Migrate buffer tables to dynamic calculation mode
        parameters
        @speed_list - list of speed supported
        @cable_len_list - list of cable length supported
        @default_dynamic_th - default dynamic th
        @abandon_method - a function which is called to abandon the migration and keep the current configuration
                          if the current one doesn't match the default one
        @append_item_method - a function which is called to append an item to the list of pending commit items
                              any update to buffer configuration will be pended and won't be applied until
                              all configuration is checked and aligns with the default one

        1. Buffer profiles for lossless PGs in BUFFER_PROFILE table will be removed
           if their names have the convention of pg_lossless_<speed>_<cable_length>_profile
           where the speed and cable_length belongs speed_list and cable_len_list respectively
           and the dynamic_th is equal to default_dynamic_th
        2. Insert tables required for dynamic buffer calculation
           - DEFAULT_LOSSLESS_BUFFER_PARAMETER|AZURE: {'default_dynamic_th': default_dynamic_th}
           - LOSSLESS_TRAFFIC_PATTERN|AZURE: {'mtu': '1024', 'small_packet_percentage': '100'}
        3. For lossless dynamic PGs, remove the explicit referencing buffer profiles
           Before: BUFFER_PG|<port>|3-4: {'profile': 'BUFFER_PROFILE|pg_lossless_<speed>_<cable_length>_profile'}
           After:  BUFFER_PG|<port>|3-4: {'profile': 'NULL'}
        '''
        # Migrate BUFFER_PROFILEs, removing dynamically generated profiles
        dynamic_profile = self.configDB.get_table('BUFFER_PROFILE')
        profile_pattern = 'pg_lossless_([1-9][0-9]*000)_([1-9][0-9]*m)_profile'
        for name, info in dynamic_profile.items():
            m = re.search(profile_pattern, name)
            if not m:
                continue
            speed = m.group(1)
            cable_length = m.group(2)
            if speed in speed_list and cable_length in cable_len_list:
                append_item_method(('BUFFER_PROFILE', name, None))
                log.log_info(
                    "Lossless profile {} has been removed".format(name))

        # Migrate BUFFER_PGs, removing the explicit designated profiles
        buffer_pgs = self.configDB.get_table('BUFFER_PG')
        ports = self.configDB.get_table('PORT')
        all_cable_lengths = self.configDB.get_table('CABLE_LENGTH')
        if not buffer_pgs or not ports or not all_cable_lengths:
            log.log_notice(
                "At lease one of tables BUFFER_PG, PORT and CABLE_LENGTH hasn't been defined, skip following migration"
            )
            abandon_method()
            return True

        cable_lengths = all_cable_lengths[list(all_cable_lengths.keys())[0]]
        for name, profile in buffer_pgs.items():
            # do the db migration
            try:
                port, pg = name
                profile_name = profile['profile'][1:-1].split('|')[1]
                if pg == '0':
                    if profile_name != 'ingress_lossy_profile':
                        log.log_notice(
                            "BUFFER_PG table entry {} has non default profile {} configured"
                            .format(name, profile_name))
                        abandon_method()
                        return True
                    else:
                        continue
                elif pg != '3-4':
                    log.log_notice(
                        "BUFFER_PG table entry {} isn't default PG(0 or 3-4)".
                        format(name))
                    abandon_method()
                    return True
                m = re.search(profile_pattern, profile_name)
                if not m:
                    log.log_notice(
                        "BUFFER_PG table entry {} has non-default profile name {}"
                        .format(name, profile_name))
                    abandon_method()
                    return True
                speed = m.group(1)
                cable_length = m.group(2)

                if speed == ports[port][
                        'speed'] and cable_length == cable_lengths[port]:
                    append_item_method(('BUFFER_PG', name, {
                        'profile': 'NULL'
                    }))
                else:
                    log.log_notice(
                        "Lossless PG profile {} for port {} doesn't match its speed {} or cable length {}, keep using traditional buffer calculation mode"
                        .format(profile_name, port, speed, cable_length))
                    abandon_method()
                    return True
            except Exception:
                log.log_notice("Exception occured during parsing the profiles")
                abandon_method()
                return True

        # Insert other tables required for dynamic buffer calculation
        metadata = self.configDB.get_entry('DEVICE_METADATA', 'localhost')
        metadata['buffer_model'] = 'dynamic'
        append_item_method(('DEVICE_METADATA', 'localhost', metadata))
        append_item_method(('DEFAULT_LOSSLESS_BUFFER_PARAMETER', 'AZURE', {
            'default_dynamic_th': default_dynamic_th
        }))
        append_item_method(('LOSSLESS_TRAFFIC_PATTERN', 'AZURE', {
            'mtu': '1024',
            'small_packet_percentage': '100'
        }))

        return True

    def prepare_dynamic_buffer_for_warm_reboot(self,
                                               buffer_pools=None,
                                               buffer_profiles=None,
                                               buffer_pgs=None):
        '''
        This is the very first warm reboot of buffermgrd (dynamic) if the system reboot from old image by warm-reboot
        In this case steps need to be taken to get buffermgrd prepared (for warm reboot)

        During warm reboot, buffer tables should be installed in the first place.
        However, it isn't able to achieve that when system is warm-rebooted from an old image
        without dynamic buffer supported, because the buffer info wasn't in the APPL_DB in the old image.
        The solution is to copy that info from CONFIG_DB into APPL_DB in db_migrator.
        During warm-reboot, db_migrator adjusts buffer info in CONFIG_DB by removing some fields
        according to requirement from dynamic buffer calculation.
        The buffer info before that adjustment needs to be copied to APPL_DB.

        1. set WARM_RESTART_TABLE|buffermgrd as {restore_count: 0}
        2. Copy the following tables from CONFIG_DB into APPL_DB in case of warm reboot
           The separator in fields that reference objects in other table needs to be updated from '|' to ':'
           - BUFFER_POOL
           - BUFFER_PROFILE, separator updated for field 'pool'
           - BUFFER_PG, separator updated for field 'profile'
           - BUFFER_QUEUE, separator updated for field 'profile
           - BUFFER_PORT_INGRESS_PROFILE_LIST, separator updated for field 'profile_list'
           - BUFFER_PORT_EGRESS_PROFILE_LIST, separator updated for field 'profile_list'

        '''
        warmreboot_state = self.stateDB.get(
            self.stateDB.STATE_DB, 'WARM_RESTART_ENABLE_TABLE|system',
            'enable')
        mmu_size = self.stateDB.get(self.stateDB.STATE_DB,
                                    'BUFFER_MAX_PARAM_TABLE|global',
                                    'mmu_size')
        if warmreboot_state == 'true' and not mmu_size:
            log.log_notice(
                "This is the very first run of buffermgrd (dynamic), prepare info required from warm reboot"
            )
        else:
            return True

        buffer_table_list = [
            ('BUFFER_POOL', buffer_pools, None),
            ('BUFFER_PROFILE', buffer_profiles, 'pool'),
            ('BUFFER_PG', buffer_pgs, 'profile'),
            ('BUFFER_QUEUE', None, 'profile'),
            ('BUFFER_PORT_INGRESS_PROFILE_LIST', None, 'profile_list'),
            ('BUFFER_PORT_EGRESS_PROFILE_LIST', None, 'profile_list')
        ]

        for pair in buffer_table_list:
            keys_copied = []
            keys_ignored = []
            table_name, entries, reference_field_name = pair
            app_table_name = table_name + "_TABLE"
            if not entries:
                entries = self.configDB.get_table(table_name)
            for key, items in entries.items():
                # copy items to appl db
                if reference_field_name:
                    confdb_ref = items.get(reference_field_name)
                    if not confdb_ref or confdb_ref == "NULL":
                        keys_ignored.append(key)
                        continue
                    items_referenced = confdb_ref.split(',')
                    appdb_ref = ""
                    first_item = True
                    for item in items_referenced:
                        if first_item:
                            first_item = False
                        else:
                            appdb_ref += ','
                        subitems = item.split('|')
                        first_key = True
                        for subitem in subitems:
                            if first_key:
                                appdb_ref += subitem + '_TABLE'
                                first_key = False
                            else:
                                appdb_ref += ':' + subitem

                    items[reference_field_name] = appdb_ref
                keys_copied.append(key)
                if type(key) is tuple:
                    appl_db_key = app_table_name + ':' + ':'.join(key)
                else:
                    appl_db_key = app_table_name + ':' + key
                for field, data in items.items():
                    self.appDB.set(self.appDB.APPL_DB, appl_db_key, field,
                                   data)

            if keys_copied:
                log.log_info(
                    "The following items in table {} in CONFIG_DB have been copied to APPL_DB: {}"
                    .format(table_name, keys_copied))
            if keys_ignored:
                log.log_info(
                    "The following items in table {} in CONFIG_DB have been ignored: {}"
                    .format(table_name, keys_copied))

        return True

    def migrate_config_db_port_table_for_auto_neg(self):
        table_name = 'PORT'
        port_table = self.configDB.get_table(table_name)
        for key, value in port_table.items():
            if 'autoneg' in value:
                if value['autoneg'] == '1':
                    self.configDB.set(self.configDB.CONFIG_DB,
                                      '{}|{}'.format(table_name,
                                                     key), 'autoneg', 'on')
                    if 'speed' in value and 'adv_speeds' not in value:
                        self.configDB.set(self.configDB.CONFIG_DB,
                                          '{}|{}'.format(table_name, key),
                                          'adv_speeds', value['speed'])
                elif value['autoneg'] == '0':
                    self.configDB.set(self.configDB.CONFIG_DB,
                                      '{}|{}'.format(table_name,
                                                     key), 'autoneg', 'off')

    def migrate_qos_db_fieldval_reference_remove(self, table_list, db, db_num,
                                                 db_delimeter):
        for pair in table_list:
            table_name, fields_list = pair
            qos_table = db.get_table(table_name)
            for key, value in qos_table.items():
                if type(key) is tuple:
                    db_key = table_name + db_delimeter + db_delimeter.join(key)
                else:
                    db_key = table_name + db_delimeter + key

                for field in fields_list:
                    if field in value:
                        fieldVal = value.get(field)
                        if not fieldVal or fieldVal == "NULL":
                            continue
                        newFiledVal = ""
                        # Check for ABNF format presence and convert ABNF to string
                        if "[" in fieldVal and db_delimeter in fieldVal and "]" in fieldVal:
                            log.log_info(
                                "Found ABNF format field value in table {} key {} field {} val {}"
                                .format(table_name, db_key, field, fieldVal))
                            value_list = fieldVal.split(",")
                            for item in value_list:
                                if "[" != item[
                                        0] or db_delimeter not in item or "]" != item[
                                            -1]:
                                    continue
                                newFiledVal = newFiledVal + item[1:-1].split(
                                    db_delimeter)[1] + ','
                            newFiledVal = newFiledVal[:-1]
                            db.set(db_num, db_key, field, newFiledVal)
                            log.log_info(
                                "Modified ABNF format field value to string in table {} key {} field {} val {}"
                                .format(table_name, db_key, field,
                                        newFiledVal))
        return True

    def migrate_qos_fieldval_reference_format(self):
        '''
        This is to change for first time to remove field refernces of ABNF format
        in APPL DB for warm boot.
        i.e "[Tabale_name:name]" to string in APPL_DB. Reasons for doing this
         - To consistent with all other SoNIC CONFIG_DB/APPL_DB tables and fields
         - References in DB is not required, this will be taken care by YANG model leafref.
        '''
        qos_app_table_list = [
            ('BUFFER_PG_TABLE', ['profile']),
            ('BUFFER_QUEUE_TABLE', ['profile']),
            ('BUFFER_PROFILE_TABLE', ['pool']),
            ('BUFFER_PORT_INGRESS_PROFILE_LIST_TABLE', ['profile_list']),
            ('BUFFER_PORT_EGRESS_PROFILE_LIST_TABLE', ['profile_list'])
        ]

        log.log_info("Remove APPL_DB QOS tables field reference ABNF format")
        self.migrate_qos_db_fieldval_reference_remove(qos_app_table_list,
                                                      self.appDB,
                                                      self.appDB.APPL_DB, ':')

        qos_table_list = [
            ('QUEUE', ['scheduler', 'wred_profile']),
            ('PORT_QOS_MAP', [
                'dscp_to_tc_map', 'dot1p_to_tc_map', 'pfc_to_queue_map',
                'tc_to_pg_map', 'tc_to_queue_map', 'pfc_to_pg_map'
            ]), ('BUFFER_PG', ['profile']), ('BUFFER_QUEUE', ['profile']),
            ('BUFFER_PROFILE', ['pool']),
            ('BUFFER_PORT_INGRESS_PROFILE_LIST', ['profile_list']),
            ('BUFFER_PORT_EGRESS_PROFILE_LIST', ['profile_list'])
        ]
        log.log_info("Remove CONFIG_DB QOS tables field reference ABNF format")
        self.migrate_qos_db_fieldval_reference_remove(qos_table_list,
                                                      self.configDB,
                                                      self.configDB.CONFIG_DB,
                                                      '|')
        return True

    def version_unknown(self):
        """
        version_unknown tracks all SONiC versions that doesn't have a version
        string defined in config_DB.
        Nothing can be assumped when migrating from this version to the next
        version.
        Any migration operation needs to test if the DB is in expected format
        before migrating date to the next version.
        """

        log.log_info('Handling version_unknown')

        # NOTE: Uncomment next 3 lines of code when the migration code is in
        #       place. Note that returning specific string is intentional,
        #       here we only intended to migrade to DB version 1.0.1.
        #       If new DB version is added in the future, the incremental
        #       upgrade will take care of the subsequent migrations.
        self.migrate_pfc_wd_table()
        self.migrate_interface_table()
        self.migrate_intf_table()
        self.set_version('version_1_0_2')
        return 'version_1_0_2'

    def version_1_0_1(self):
        """
        Version 1_0_1.
        """
        log.log_info('Handling version_1_0_1')

        self.migrate_interface_table()
        self.migrate_intf_table()
        self.set_version('version_1_0_2')
        return 'version_1_0_2'

    def version_1_0_2(self):
        """
        Version 1_0_2.
        """
        log.log_info('Handling version_1_0_2')
        # Check ASIC type, if Mellanox platform then need DB migration
        if self.asic_type == "mellanox":
            if self.mellanox_buffer_migrator.mlnx_migrate_buffer_pool_size('version_1_0_2', 'version_1_0_3') \
               and self.mellanox_buffer_migrator.mlnx_flush_new_buffer_configuration():
                self.set_version('version_1_0_3')
        else:
            self.set_version('version_1_0_3')
        return 'version_1_0_3'

    def version_1_0_3(self):
        """
        Version 1_0_3.
        """
        log.log_info('Handling version_1_0_3')

        self.migrate_feature_table()

        # Check ASIC type, if Mellanox platform then need DB migration
        if self.asic_type == "mellanox":
            if self.mellanox_buffer_migrator.mlnx_migrate_buffer_pool_size('version_1_0_3', 'version_1_0_4') \
               and self.mellanox_buffer_migrator.mlnx_migrate_buffer_profile('version_1_0_3', 'version_1_0_4') \
               and self.mellanox_buffer_migrator.mlnx_flush_new_buffer_configuration():
                self.set_version('version_1_0_4')
        else:
            self.set_version('version_1_0_4')

        return 'version_1_0_4'

    def version_1_0_4(self):
        """
        Version 1_0_4.
        """
        log.log_info('Handling version_1_0_4')

        # Check ASIC type, if Mellanox platform then need DB migration
        if self.asic_type == "mellanox":
            if self.mellanox_buffer_migrator.mlnx_migrate_buffer_pool_size('version_1_0_4', 'version_1_0_5') \
               and self.mellanox_buffer_migrator.mlnx_migrate_buffer_profile('version_1_0_4', 'version_1_0_5') \
               and self.mellanox_buffer_migrator.mlnx_flush_new_buffer_configuration():
                self.set_version('version_1_0_5')
        else:
            self.set_version('version_1_0_5')

        return 'version_1_0_5'

    def version_1_0_5(self):
        """
        Version 1_0_5.
        """
        log.log_info('Handling version_1_0_5')

        # Check ASIC type, if Mellanox platform then need DB migration
        if self.asic_type == "mellanox":
            if self.mellanox_buffer_migrator.mlnx_migrate_buffer_pool_size('version_1_0_5', 'version_1_0_6') \
               and self.mellanox_buffer_migrator.mlnx_migrate_buffer_profile('version_1_0_5', 'version_1_0_6') \
               and self.mellanox_buffer_migrator.mlnx_flush_new_buffer_configuration():
                self.set_version('version_1_0_6')
        else:
            self.set_version('version_1_0_6')

        return 'version_1_0_6'

    def version_1_0_6(self):
        """
        Version 1_0_6.
        """
        log.log_info('Handling version_1_0_6')
        if self.asic_type == "mellanox":
            speed_list = self.mellanox_buffer_migrator.default_speed_list
            cable_len_list = self.mellanox_buffer_migrator.default_cable_len_list
            buffer_pools = self.configDB.get_table('BUFFER_POOL')
            buffer_profiles = self.configDB.get_table('BUFFER_PROFILE')
            buffer_pgs = self.configDB.get_table('BUFFER_PG')
            abandon_method = self.mellanox_buffer_migrator.mlnx_abandon_pending_buffer_configuration
            append_method = self.mellanox_buffer_migrator.mlnx_append_item_on_pending_configuration_list

            if self.mellanox_buffer_migrator.mlnx_migrate_buffer_pool_size('version_1_0_6', 'version_2_0_0') \
               and self.mellanox_buffer_migrator.mlnx_migrate_buffer_profile('version_1_0_6', 'version_2_0_0') \
               and (not self.mellanox_buffer_migrator.mlnx_is_buffer_model_dynamic() or \
                    self.migrate_config_db_buffer_tables_for_dynamic_calculation(speed_list, cable_len_list, '0', abandon_method, append_method)) \
               and self.mellanox_buffer_migrator.mlnx_flush_new_buffer_configuration() \
               and self.prepare_dynamic_buffer_for_warm_reboot(buffer_pools, buffer_profiles, buffer_pgs):
                self.set_version('version_2_0_0')
        else:
            self.prepare_dynamic_buffer_for_warm_reboot()

            metadata = self.configDB.get_entry('DEVICE_METADATA', 'localhost')
            metadata['buffer_model'] = 'traditional'
            self.configDB.set_entry('DEVICE_METADATA', 'localhost', metadata)
            log.log_notice('Setting buffer_model to traditional')

            self.set_version('version_2_0_0')

        return 'version_2_0_0'

    def version_2_0_0(self):
        """
        Version 2_0_0.
        """
        log.log_info('Handling version_2_0_0')
        self.migrate_config_db_port_table_for_auto_neg()
        self.set_version('version_2_0_1')
        return 'version_2_0_1'

    def version_2_0_1(self):
        """
        Version 2_0_1.
        """
        log.log_info('Handling version_2_0_1')
        warmreboot_state = self.stateDB.get(
            self.stateDB.STATE_DB, 'WARM_RESTART_ENABLE_TABLE|system',
            'enable')

        if warmreboot_state != 'true':
            portchannel_table = self.configDB.get_table('PORTCHANNEL')
            for name, data in portchannel_table.items():
                data['lacp_key'] = 'auto'
                self.configDB.set_entry('PORTCHANNEL', name, data)
        self.set_version('version_2_0_2')
        return 'version_2_0_2'

    def version_2_0_2(self):
        """
        Version 2_0_2.
        """
        log.log_info('Handling version_2_0_2')
        self.migrate_qos_fieldval_reference_format()
        self.set_version('version_2_0_3')
        return 'version_2_0_3'

    def version_2_0_3(self):
        """
        Version 2_0_3
        """
        log.log_info('Handling version_2_0_3')
        if self.asic_type == "mellanox":
            self.mellanox_buffer_migrator.mlnx_reclaiming_unused_buffer()
        self.set_version('version_2_0_4')
        return 'version_2_0_4'

    def version_2_0_4(self):
        """
        Current latest version. Nothing to do here.
        """
        log.log_info('Handling version_2_0_4')
        return None

    def get_version(self):
        version = self.configDB.get_entry(self.TABLE_NAME, self.TABLE_KEY)
        if version and version[self.TABLE_FIELD]:
            return version[self.TABLE_FIELD]

        return 'version_unknown'

    def set_version(self, version=None):
        if not version:
            version = self.CURRENT_VERSION
        log.log_info('Setting version to ' + version)
        entry = {self.TABLE_FIELD: version}
        self.configDB.set_entry(self.TABLE_NAME, self.TABLE_KEY, entry)

    def common_migration_ops(self):
        try:
            with open(INIT_CFG_FILE) as f:
                init_db = json.load(f)
        except Exception as e:
            raise Exception(str(e))

        for init_cfg_table, table_val in init_db.items():
            log.log_info(
                "Migrating table {} from INIT_CFG to config_db".format(
                    init_cfg_table))
            for key in table_val:
                curr_cfg = self.configDB.get_entry(init_cfg_table, key)
                init_cfg = table_val[key]

                # Override init config with current config.
                # This will leave new fields from init_config
                # in new_config, but not override existing configuration.
                new_cfg = {**init_cfg, **curr_cfg}
                self.configDB.set_entry(init_cfg_table, key, new_cfg)

        self.migrate_copp_table()

    def migrate(self):
        version = self.get_version()
        log.log_info('Upgrading from version ' + version)
        while version:
            next_version = getattr(self, version)()
            if next_version == version:
                raise Exception(
                    'Version migrate from %s stuck in same version' % version)
            version = next_version
        # Perform common migration ops
        self.common_migration_ops()
예제 #22
0
class ZTPEngine():
    '''!
    \brief This class performs core functions of ZTP service.
    '''
    def __init__(self):
        ## Flag to indicate if configuration ztp restart is requested
        self.__ztp_restart = False

        ## start time of ZTP engine
        self.__ztp_engine_start_time = None

        ## Flag to indicate if ZTP configuration has been loaded
        self.__ztp_profile_loaded = False

        ## Run ZTP engine in unit test mode
        self.test_mode = False

        ## Flag to determine if interfaces link scan has to be enabled or not
        self.__link_scan_enabled = None

        ## Interface on which ZTP information has been discovered using DHCP
        self.__ztp_interface = None

        ## ZTP JSON object
        self.objztpJson = None

        ## Flag to indicate reboot
        self.reboot_on_completion = False

        ## Interfaces state table
        self.__intf_state = dict()

        ## Redis DB connectors
        self.configDB = None
        self.applDB = None

    def __connect_to_redis(self):
        '''!
        Establishes connection to the redis DB
           @return  False - If connection to the redis DB failed
                    True  - If connection to the redis DB is successful
        '''
        # Connect to ConfigDB
        try:
            if self.configDB is None:
                self.configDB = ConfigDBConnector()
                self.configDB.connect()
        except:
            self.configDB = None
            return False

        # Connect to AppDB
        try:
            if self.applDB is None:
                self.applDB = SonicV2Connector()
                self.applDB.connect(self.applDB.APPL_DB)
        except:
            self.applDB = None
            return False
        return True

    def __detect_intf_state(self):
        '''!
        Identifies all the interfaces on which ZTP discovery needs to be performed.
        Link state of each identified interface is checked and stored in a dictionary
        for reference.

           @return  True   - If an interface moved from link down to link up state
                    False  - If no interface transitions have been observed
        '''
        link_up_detected = False
        intf_data = os.listdir('/sys/class/net')
        if getCfg('feat-inband'):
            r_intf = re.compile("Ethernet.*|eth.*")
        else:
            r_intf = re.compile("eth.*")
        intf_list = list(filter(r_intf.match, intf_data))
        for intf in natsorted(intf_list):
            try:
                if intf[0:3] == 'eth':
                    fh = open('/sys/class/net/{}/operstate'.format(intf), 'r')
                    operstate = fh.readline().strip().lower()
                    fh.close()
                else:
                    if self.applDB.exists(self.applDB.APPL_DB,
                                          'PORT_TABLE:' + intf):
                        port_entry = self.applDB.get_all(
                            self.applDB.APPL_DB, 'PORT_TABLE:' + intf)
                        operstate = port_entry.get('oper_status').lower()
                    else:
                        operstate = 'down'
            except:
                operstate = 'down'
            if ((self.__intf_state.get(intf) is None) or \
                (self.__intf_state.get(intf).get('operstate') != operstate)) and \
                operstate == 'up':
                link_up_detected = True
                logger.info('Link up detected for interface %s' % intf)
            if self.__intf_state.get(intf) is None:
                self.__intf_state[intf] = dict()
            self.__intf_state[intf]['operstate'] = operstate

        # Weed out any stale interfaces that may exist when an expanded port is joined back
        intf_snapshot = list(self.__intf_state.keys())
        for intf in intf_snapshot:
            if intf not in intf_list:
                del self.__intf_state[intf]

        return link_up_detected

    def __is_ztp_profile_active(self):
        '''!
        Checks if the ZTP configuration profile is loaded as the switch running
        configuration and is active

           @return  False - ZTP configuration profile is not active
                    True  - ZTP configuration profile is active
        '''
        profile_active = False
        if self.__connect_to_redis():
            # Check if ZTP configuration is active
            data = self.configDB.get_entry("ZTP", "mode")
            if data is not None and data.get("profile") is not None:
                if data.get("profile") == 'active':
                    profile_active = True
        return profile_active

    def __link_scan(self):
        '''!
        Scan all in-band interface's operational status to detect a link up event
           @return  False - If a link scan did not detect at least one switch port link up event
                    True  - If at least one switch port link up event has been detected
        '''

        # Do not attempt link scan when in test mode
        if self.test_mode:
            return False

        if self.__connect_to_redis() is False:
            self.__link_scan_enabled = None
            return False

        if self.__link_scan_enabled is None:
            # Check if ZTP configuration is active
            if self.__is_ztp_profile_active():
                self.__link_scan_enabled = 'True'
            else:
                self.__link_scan_enabled = 'False'

        if self.__link_scan_enabled == 'False':
            return False

        # Populate data of all ztp eligible interfaces
        link_scan_result = self.__detect_intf_state()
        return link_scan_result

    def __cleanup_dhcp_leases(self):

        # Use ZTP interface used to obtain provisioning information
        runCommand('rm -f /var/lib/dhcp/dhclient*.eth0.leases',
                   capture_stdout=False)
        if getCfg('feat-inband'):
            runCommand('rm -f /var/lib/dhcp/dhclient*.Ethernet*.leases',
                       capture_stdout=False)

    def __removeZTPProfile(self):
        '''!
         If ZTP configuration profile is operational, remove ZTP configuration profile and load
         startup configuration file. If there is no startup configuration file,
         load factory default configuration.
        '''

        # Do not attempt to remove ZTP configuration if working in unit test mode
        if self.test_mode:
            return

        # Remove ZTP configuration profile if loaded
        updateActivity('Verifying configuration')

        # Use a fallback default configuration if configured to
        _config_fallback = ''
        if (self.objztpJson is not None and (self.objztpJson['status'] == 'FAILED' or self.objztpJson['status'] == 'SUCCESS') \
            and self.objztpJson['config-fallback']) or \
           (self.objztpJson is None and getCfg('config-fallback') is True):
            _config_fallback = ' config-fallback'

        # Execute profile removal command with appropriate options
        rc = runCommand(getCfg('ztp-lib-dir') + '/ztp-profile.sh remove' +
                        _config_fallback,
                        capture_stdout=False)

        # Remove ZTP configuration startup-config
        if os.path.isfile(getCfg('config-db-json')) is True:
            try:
                config_db = None
                with open(getCfg('config-db-json')) as json_file:
                    config_db = json.load(json_file)
                    json_file.close()
                if config_db is not None and config_db.get('ZTP'):
                    logger.info("Deleting ZTP configuration saved in '%s'." %
                                (getCfg('config-db-json')))
                    del config_db['ZTP']
                    with open(getCfg('config-db-json'), 'w') as json_file:
                        json.dump(config_db, json_file, indent=4)
                        json_file.close()
            except Exception as e:
                logger.error(
                    "Exception [%s] encountered while verifying '%s'." %
                    (str(e), getCfg('config-db-json')))

        self.__ztp_profile_loaded = False

    def __loadZTPProfile(self, event):
        '''!
         Load ZTP configuration profile if there is no saved configuration file.
         This establishes connectivity to all interfaces and starts DHCP discovery.
           @return  False - If ZTP configuration profile is not loaded
                    True  - If ZTP configuration profile is loaded
        '''
        # Do not attempt to install ZTP configuration if working in unit test mode
        if self.test_mode:
            return False

        if self.__ztp_profile_loaded is False:
            updateActivity('Checking running configuration')
            logger.info(
                'Checking running configuration to load ZTP configuration profile.'
            )
            cmd = getCfg('ztp-lib-dir') + '/ztp-profile.sh install ' + event
            # When performing ZTP discovery, force load ZTP profile. When
            # ZTP is resuming previous session, use configuration already loaded during
            # config-setup
            rc = runCommand(cmd, capture_stdout=False)
            self.__ztp_profile_loaded = True
            return True
        return False

    def __createProvScriptJson(self):
        '''!
         Create ZTP JSON data to execute provisioning script specified by DHCP Option 239 URL.
        '''

        json_data = '{"ztp": {"provisioning-script":{"plugin":{"url":"file://' + getCfg(
            'provisioning-script') + '","ignore-section-data":true}}\
                   ,"restart-ztp-no-config":false}}'

        f = open(getCfg('ztp-json'), 'w')
        f.write(json_data)
        f.close

    def __createGraphserviceJson(self):
        '''!
         Create ZTP JSON data to load graph file specified by DHCP Option 225 URL. Also
         includes ACL JSON file if specified by DHCP Option 226.
        '''

        # Verify that graph file can be downloaded
        if self.__downloadURL(getCfg('graph-url'),
                              '/tmp/test_minigraph.xml') is False:
            return False
        else:
            # Clean up
            os.remove('/tmp/test_minigraph.xml')

        # Verify that acl json file can be downloaded
        if os.path.isfile(getCfg('acl-url')):
            if self.__downloadURL(getCfg('acl-url'),
                                  '/tmp/test_acl.json') is False:
                return False
            else:
                # Clean up
                os.remove('/tmp/test_acl.json')

        # Read the url file and identify the URL to be downloaded
        f = open(getCfg('graph-url'), 'r')
        graph_url_str = f.readline().strip()
        f.close()

        acl_url_str = None
        if os.path.isfile(getCfg('acl-url')):
            f = open(getCfg('acl-url'), 'r')
            acl_url_str = f.readline().strip()
            f.close()
        json_data = '{"ztp":{"graphservice": { "minigraph-url" : { "url":"' + graph_url_str + '"}'
        if acl_url_str is not None and len(acl_url_str) != 0:
            json_data = json_data + ', "acl-url" : { "url":"' + acl_url_str + '"}'
        json_data = json_data + '}, "restart-ztp-no-config":false} }'
        f = open(getCfg('ztp-json'), 'w')
        f.write(json_data)
        f.close
        return True

    def __rebootAction(self, section, delayed_reboot=False):
        '''!
         Perform system reboot if reboot-on-success or reboot-on-failure is defined in the
         configuration section data.

         @param section (dict) Configuration section data containing status and reboot-on flags

        '''

        # Obtain section status
        status = section.get('status')

        # Check if flag is set to reboot on SUCCESS and status is SUCCESS as well
        if getField(section, 'reboot-on-success', bool,
                    False) is True and status == 'SUCCESS':
            logger.warning(
                'ZTP is rebooting the device as reboot-on-success flag is set.'
            )
            updateActivity('System reboot requested on success')
            if self.test_mode and delayed_reboot == False:
                sys.exit(0)
            else:
                if delayed_reboot:
                    self.reboot_on_completion = True
                else:
                    systemReboot()

        # Check if flag is set to reboot on FAIL and status is FAILED as well
        if getField(section, 'reboot-on-failure', bool,
                    False) is True and status == 'FAILED':
            logger.warning(
                'ZTP is rebooting the device as reboot-on-failure flag is set.'
            )
            updateActivity('System reboot requested on failure')
            if self.test_mode and delayed_reboot == False:
                sys.exit(0)
            else:
                if delayed_reboot:
                    self.reboot_on_completion = True
                else:
                    systemReboot()

    def __evalZTPResult(self):
        '''!
         Determines the final result of ZTP after processing all configuration sections and
         their results. Als performs system reboot if reboot-on flag is set

         ZTP result is determined as SUCCESS if - Configuration section(s) status is SUCCESS
                                                or (configuration section(s) status is FAILED and
                                                    configuration section(s) ignore-result is True)
                                                or ZTP ignore-result is True

         Disabled Configuration sections are ignored.
        '''

        updateActivity('Evaluating ZTP result')
        # Check if overall ZTP ignore-result flag is set
        if self.objztpJson['ignore-result']:
            self.objztpJson['status'] = 'SUCCESS'
            logger.info(
                'ZTP result is marked as SUCCESS at %s. ZTP ignore-result flag is set.'
                % self.objztpJson['timestamp'])

        else:
            # Look through individual configuration sections
            for sec in self.objztpJson.section_names:
                # Retrieve section data
                section = self.objztpJson.ztpDict.get(sec)
                logger.info('Checking configuration section %s result: %s, ignore-result: %r.' % \
                              (sec, section.get('status'), section.get('ignore-result')))
                # Check if configuration section has failed and ignore-result flag is not set
                if section.get('status') == 'FAILED' and section.get(
                        'ignore-result') is False:
                    # Mark ZTP as failed and bail out
                    self.objztpJson['error'] = '%s FAILED' % sec
                    self.objztpJson['status'] = 'FAILED'
                    logger.info(
                        'ZTP failed at %s as configuration section %s FAILED.'
                        % (self.objztpJson['timestamp'], sec))
                    return

        # Mark ZTP as SUCCESS
        self.objztpJson['status'] = 'SUCCESS'
        logger.info('ZTP successfully completed at %s.' %
                    self.objztpJson['timestamp'])

        # Check reboot on result flags and take action
        self.__rebootAction(self.objztpJson.ztpDict, delayed_reboot=True)

    def __processConfigSections(self):
        '''!
         Process and execute individual configuration sections defined in ZTP JSON. Plugin for each
         configuration section is resolved and executed. Configuration section data is provided as
         command line argument to the plugin. Each and every section is processed before this function
         returns.

        '''

        # Obtain a copy of the list of configuration sections
        section_names = list(self.objztpJson.section_names)

        # set temporary flags
        abort = False
        sort = True

        logger.debug('Processing configuration sections: %s' %
                     ', '.join(section_names))
        # Loop through each sections till all of them are processed
        while section_names and abort is False:
            # Take a fresh sorted list to begin with and if any changes happen to it while processing
            if sort:
                sorted_list = sorted(section_names)
                sort = False
            # Loop through configuration section in a sorted order
            for sec in sorted_list:
                # Retrieve configuration section data
                section = self.objztpJson.ztpDict.get(sec)
                try:
                    # Retrieve individual section's progress
                    sec_status = section.get('status')
                    if sec_status == 'BOOT' or sec_status == 'SUSPEND':
                        # Mark section status as in progress
                        self.objztpJson.updateStatus(section, 'IN-PROGRESS')
                        if section.get('start-timestamp') is None:
                            section['start-timestamp'] = section['timestamp']
                            self.objztpJson.objJson.writeJson()
                        logger.info(
                            'Processing configuration section %s at %s.' %
                            (sec, section['timestamp']))
                    elif sec_status != 'IN-PROGRESS':
                        # Skip completed sections
                        logger.debug(
                            'Removing section %s from list. Status %s.' %
                            (sec, sec_status))
                        section_names.remove(sec)
                        # set flag to sort the configuration sections list again
                        sort = True
                        # Ignore disabled configuration sections
                        if sec_status == 'DISABLED':
                            logger.info(
                                'Configuration section %s skipped as its status is set to DISABLED.'
                                % sec)
                        continue
                    updateActivity('Processing configuration section %s' % sec)
                    # Get the appropriate plugin to be used for this configuration section
                    plugin = self.objztpJson.plugin(sec)
                    # Get the location of this configuration section's input data parsed from the input ZTP JSON file
                    plugin_input = getCfg(
                        'ztp-tmp-persistent') + '/' + sec + '/' + getCfg(
                            'section-input-file')
                    # Initialize result flag to FAILED
                    finalResult = 'FAILED'
                    rc = 1
                    # Check if plugin could not be resolved
                    if plugin is None:
                        logger.error(
                            'Unable to resolve plugin to be used for configuration section %s. Marking it as FAILED.'
                            % sec)
                        section[
                            'error'] = 'Unable to find or download requested plugin'
                    elif os.path.isfile(plugin) and os.path.isfile(
                            plugin_input):
                        plugin_args = self.objztpJson.pluginArgs(sec)
                        plugin_data = section.get('plugin')

                        # Determine if shell has to be used to execute plugin
                        _shell = getField(plugin_data, 'shell', bool, False)

                        # Construct the full plugin command string along with arguments
                        plugin_cmd = plugin
                        if plugin_args is not None:
                            plugin_cmd = plugin_cmd + ' ' + plugin_args

                        # A plugin has been resolved and its input configuration section data as well
                        logger.debug('Executing plugin %s.' % (plugin_cmd))
                        # Execute identified plugin
                        rc = runCommand(plugin_cmd,
                                        capture_stdout=False,
                                        use_shell=_shell)

                        logger.debug('Plugin %s exit code = %d.' %
                                     (plugin_cmd, rc))
                        # Compare plugin exit code
                        if rc == 0:
                            finalResult = 'SUCCESS'
                        elif section.get('suspend-exit-code'
                                         ) is not None and section.get(
                                             'suspend-exit-code') == rc:
                            finalResult = 'SUSPEND'
                        else:
                            finalResult = 'FAILED'
                except Exception as e:
                    logger.debug(
                        'Exception [%s] encountered for configuration section %s.'
                        % (str(e), sec))
                    logger.info(
                        'Exception encountered while processing configuration section %s. Marking it as FAILED.'
                        % sec)
                    section[
                        'error'] = 'Exception [%s] encountered while executing the plugin' % (
                            str(e))
                    finalResult = 'FAILED'

                # Update this configuration section's result in ztp json file
                logger.info(
                    'Processed Configuration section %s with result %s, exit code (%d) at %s.'
                    % (sec, finalResult, rc, section['timestamp']))
                if finalResult == 'FAILED' and section.get('error') is None:
                    section['error'] = 'Plugin failed'
                section['exit-code'] = rc
                self.objztpJson.updateStatus(section, finalResult)

                # Check if abort ZTP on failure flag is set
                if getField(section, 'halt-on-failure', bool,
                            False) is True and finalResult == 'FAILED':
                    logger.info(
                        'Halting ZTP as Configuration section %s FAILED and halt-on-failure flag is set.'
                        % sec)
                    abort = True
                    break

                # Check reboot on result flags
                self.__rebootAction(section)

    def __processZTPJson(self):
        '''!
         Process ZTP JSON file downloaded using URL provided by DHCP Option 67, DHCPv6 Option 59 or
         local ZTP JSON file.

        '''
        logger.debug('Starting to process ZTP JSON file %s.' % self.json_src)
        updateActivity('Processing ZTP JSON file %s' % self.json_src)
        try:
            # Read provided ZTP JSON file and load it
            self.objztpJson = ZTPJson(self.json_src, getCfg('ztp-json'))
        except ValueError as e:
            logger.error(
                'Exception [%s] occured while processing ZTP JSON file %s.' %
                (str(e), self.json_src))
            logger.error('ZTP JSON file %s processing failed.' %
                         (self.json_src))
            try:
                os.remove(getCfg('ztp-json'))
                if os.path.isfile(getCfg('ztp-json-shadow')):
                    os.remove(getCfg('ztp-json-shadow'))
            except OSError as v:
                if v.errno != errno.ENOENT:
                    logger.warning(
                        'Exception [%s] encountered while deleting ZTP JSON file %s.'
                        % (str(v), getCfg('ztp-json')))
                    raise
            self.objztpJson = None
            # Restart networking after a wait time to discover new provisioning data
            if getCfg('restart-ztp-on-invalid-data'):
                return ("restart", "Invalid provisioning data processed")
            else:
                return ("stop", "Invalid provisioning data processed")

        if self.objztpJson['ztp-json-source'] is None:
            self.objztpJson['ztp-json-source'] = self.ztp_mode

        # Check if ZTP process has already completed. If not mark start of ZTP.
        if self.objztpJson['status'] == 'BOOT':
            self.objztpJson['status'] = 'IN-PROGRESS'
            if self.objztpJson['start-timestamp'] is None:
                self.objztpJson[
                    'start-timestamp'] = self.__ztp_engine_start_time
                self.objztpJson.objJson.writeJson()
        elif self.objztpJson['status'] != 'IN-PROGRESS':
            # Re-start ZTP if requested
            if getCfg('monitor-startup-config') is True and self.__ztp_restart:
                self.__ztp_restart = False
                # Discover new ZTP data after deleting historic ZTP data
                logger.info(
                    "ZTP restart requested. Deleting previous ZTP session JSON data."
                )
                os.remove(getCfg('ztp-json'))
                if os.path.isfile(getCfg('ztp-json-shadow')):
                    os.remove(getCfg('ztp-json-shadow'))
                self.objztpJson = None
                return ("retry", "ZTP restart requested")
            else:
                # ZTP was successfully completed in previous session. No need to proceed, return and exit service.
                logger.info(
                    "ZTP already completed with result %s at %s." %
                    (self.objztpJson['status'], self.objztpJson['timestamp']))
                return ("stop", "ZTP completed")

        logger.info('Starting ZTP using JSON file %s at %s.' %
                    (self.json_src, self.objztpJson['timestamp']))

        # Initialize connectivity if not done already
        self.__loadZTPProfile("resume")

        # Process available configuration sections in ZTP JSON
        self.__processConfigSections()

        # Determine ZTP result
        self.__evalZTPResult()

        # Check restart ZTP condition
        # ZTP result is failed and restart-ztp-on-failure is set  or
        _restart_ztp_on_failure = (self.objztpJson['status'] == 'FAILED' and \
                        self.objztpJson['restart-ztp-on-failure'] == True)

        # ZTP completed and no startup-config is found, restart-ztp-no-config and config-fallback is not set
        _restart_ztp_missing_config = ( (self.objztpJson['status'] == 'SUCCESS' or self.objztpJson['status'] == 'FAILED') and \
                           self.objztpJson['restart-ztp-no-config'] == True and \
                           self.objztpJson['config-fallback'] == False and
                           os.path.isfile(getCfg('config-db-json')) is False )

        # Mark ZTP for restart
        if _restart_ztp_missing_config or _restart_ztp_on_failure:
            os.remove(getCfg('ztp-json'))
            if os.path.isfile(getCfg('ztp-json-shadow')):
                os.remove(getCfg('ztp-json-shadow'))
            self.objztpJson = None
            # Remove startup-config file to obtain a new one through ZTP
            if getCfg('monitor-startup-config') is True and os.path.isfile(
                    getCfg('config-db-json')):
                os.remove(getCfg('config-db-json'))
            if _restart_ztp_missing_config:
                return (
                    "restart",
                    "ZTP completed but startup configuration '%s' not found" %
                    (getCfg('config-db-json')))
            elif _restart_ztp_on_failure:
                return ("restart", "ZTP completed with FAILED status")

        return ("stop", "ZTP completed")

    def __updateZTPMode(self, mode, src_file):
        '''!
         Identify source of ZTP JSON file. Store ZTP mode of operation.

         @param mode (str) Indicates how provisioning data has been provided to the switch
                               - Local file
                               - DHCP Option 67
                               - DHCPv6 Option 59
                               - DHCP Option 239
                               - DHCPv6 Option 239
                               - Minigraph URL Option 225, ACL URL Option 226

         @param src_file (str) File used as ZTP JSON file source

         @return          Always returns True

        '''
        logger.debug('Set ZTP mode as %s and provisioning data is %s.' %
                     (mode, src_file))
        dhcp_list = [
            'dhcp-opt67', 'dhcp6-opt59', 'dhcp-opt239', 'dhcp6-opt239',
            'dhcp-opt225-graph-url'
        ]
        self.json_src = src_file
        self.ztp_mode = mode
        if self.ztp_mode == 'local-fs':
            self.ztp_mode = self.ztp_mode + ' (' + src_file + ')'
        elif self.ztp_mode in dhcp_list and self.__ztp_interface is not None:
            self.ztp_mode = self.ztp_mode + ' (' + self.__ztp_interface + ')'
        return True

    def __read_ztp_interface(self):
        intf_file = getCfg('ztp-run-dir') + '/ztp.lock/interface'
        if os.path.isfile(intf_file):
            f = open(intf_file, 'r')
            try:
                self.__ztp_interface = f.readline().strip().split(':')[1]
            except:
                self.__ztp_interface = None
                pass
            f.close()

    def __downloadURL(self, url_file, dst_file, url_prefix=None):
        '''!
         Helper API to read url information from a file, download the
         file using the url and store contents as a dst_file.

         @param url_file (str) File containing URL to be downloaded
         @param dst_file (str) Destination file to be used
         @param url_prefix (str) Optional string to be prepended to url

         @return   True - If url_file was successfully downloaded
                   False - Failed to download url_file

        '''

        logger.debug('Downloading provided URL %s and saving as %s.' %
                     (url_file, dst_file))
        try:
            # Read the url file and identify the URL to be downloaded
            f = open(url_file, 'r')
            url_str = f.readline().strip()
            f.close()

            res = urlparse(url_str)
            if res is None or res.scheme == '':
                # Use passed url_prefix to construct final URL
                if url_prefix is not None:
                    url_str = url_prefix + url_str
                    if urlparse(url_str) is None:
                        logger.error(
                            'Failed to download provided URL %s, malformed url.'
                            % (url_str))
                        return False
                else:
                    logger.error(
                        'Failed to download provided URL %s, malformed url.' %
                        (url_str))
                    return False

            # Create a downloader object using source and destination information
            updateActivity('Downloading provisioning data from %s to %s' %
                           (url_str, dst_file))
            logger.info('Downloading provisioning data from %s to %s' %
                        (url_str, dst_file))
            objDownloader = Downloader(url_str, dst_file)
            # Initiate download
            rc, fname = objDownloader.getUrl()
            # Check download result
            if rc == 0 and fname is not None and os.path.isfile(dst_file):
                # Get the interface on which ZTP data was received
                self.__read_ztp_interface()
                return True
            else:
                logger.error(
                    'Failed to download provided URL %s returncode=%d.' %
                    (url_str, rc))
                return False
        except (IOError, OSError) as e:
            logger.error(
                'Exception [%s] encountered during download of provided URL %s.'
                % (str(e), url_str))
            return False

    def __discover(self):
        '''!
         ZTP data discover logic. Following is the order of precedence followed:

             Processed or under-process ZTP JSON file
           > ZTP JSON file specified in pre-defined location as part of the image
           > ZTP JSON URL specified via DHCP Option-67
           > ZTP JSON URL specified via DHCPv6 Option-59
           > Simple provisioning script URL specified via DHCP Option-239
           > Simple provisioning script URL specified via DHCPv6 Option-239
           > Minigraph URL and ACL URL specified via DHCP Option 225, 226

           @return  False - If no ZTP data is found or ZTP data could not be downloaded.
                    True  - ZTP data recoginized and ZTP JSON / provisioning script was
                            successfully downloaded. Startup configuration file detected.

        '''

        logger.debug('Start discovery.')
        if os.path.isfile(getCfg('ztp-json')):
            return self.__updateZTPMode('ztp-session', getCfg('ztp-json'))

        if os.path.isfile(
                getCfg('config-db-json')) and getCfg('monitor-startup-config'):
            self.ztp_mode = 'MANUAL_CONFIG'
            return True

        if os.path.isfile(getCfg('ztp-json-local')):
            return self.__updateZTPMode('local-fs', getCfg('ztp-json-local'))
        if os.path.isfile(getCfg('opt67-url')):
            _tftp_server = None
            _url_prefix = None
            # Check if tftp-server name has been passed
            if os.path.isfile(getCfg('opt66-tftp-server')):
                fh = open(getCfg('opt66-tftp-server'), 'r')
                _tftp_server = fh.readline().strip()
                fh.close()
                if _tftp_server is not None and _tftp_server != '':
                    _url_prefix = 'tftp://' + _tftp_server + '/'
            if self.__downloadURL(getCfg('opt67-url'),
                                  getCfg('ztp-json-opt67'),
                                  url_prefix=_url_prefix):
                return self.__updateZTPMode('dhcp-opt67',
                                            getCfg('ztp-json-opt67'))
        if os.path.isfile(getCfg('opt59-v6-url')):
            if self.__downloadURL(getCfg('opt59-v6-url'),
                                  getCfg('ztp-json-opt59')):
                return self.__updateZTPMode('dhcp6-opt59',
                                            getCfg('ztp-json-opt59'))
        if os.path.isfile(getCfg('opt239-url')):
            if self.__downloadURL(getCfg('opt239-url'),
                                  getCfg('provisioning-script')):
                self.__createProvScriptJson()
                return self.__updateZTPMode('dhcp-opt239', getCfg('ztp-json'))
        if os.path.isfile(getCfg('opt239-v6-url')):
            if self.__downloadURL(getCfg('opt239-v6-url'),
                                  getCfg('provisioning-script')):
                self.__createProvScriptJson()
                return self.__updateZTPMode('dhcp6-opt239', getCfg('ztp-json'))
        if os.path.isfile(getCfg('graph-url')):
            if self.__createGraphserviceJson():
                return self.__updateZTPMode('dhcp-opt225-graph-url',
                                            getCfg('ztp-json'))
        return False

    def __forceRestartDiscovery(self, msg):
        # Remove existing leases to source new provisioning data
        self.__cleanup_dhcp_leases()
        _msg = '%s. Waiting for %d seconds before restarting ZTP.' % (
            msg, getCfg('restart-ztp-interval'))
        logger.warning(_msg)
        updateActivity(_msg)
        time.sleep(getCfg('restart-ztp-interval'))
        self.ztp_mode = 'DISCOVERY'
        # Force install of ZTP configuration profile
        self.__ztp_profile_loaded = False
        # Restart link-scan
        self.__intf_state = dict()

    def executeLoop(self, test_mode=False):
        '''!
         ZTP service loop which peforms provisioning data discovery and initiates processing.
        '''

        updateActivity('Initializing')

        # Set testing mode
        self.test_mode = test_mode

        # Check if ZTP is disabled administratively, bail out if disabled
        if getCfg('admin-mode') is False:
            logger.info('ZTP is administratively disabled.')
            self.__removeZTPProfile()
            return

        # Check if ZTP data restart flag is set
        if os.path.isfile(getCfg('ztp-restart-flag')):
            self.__ztp_restart = True
            os.remove(getCfg('ztp-restart-flag'))

        if self.test_mode:
            logger.warning(
                'ZTP service started in test mode with restricted functionality.'
            )
        else:
            logger.info('ZTP service started.')

        self.__ztp_engine_start_time = getTimestamp()
        _start_time = None
        self.ztp_mode = 'DISCOVERY'
        # Main provisioning data discovery loop
        while self.ztp_mode == 'DISCOVERY':
            updateActivity('Discovering provisioning data', overwrite=False)
            try:
                result = self.__discover()
            except Exception as e:
                logger.error(
                    "Exception [%s] encountered while running the discovery logic."
                    % (str(e)))
                _exc_type, _exc_value, _exc_traceback = sys.exc_info()
                __tb = traceback.extract_tb(_exc_traceback)
                for l in __tb:
                    logger.debug('  File ' + l[0] + ', line ' + str(l[1]) +
                                 ', in ' + str(l[2]))
                    logger.debug('    ' + str(l[3]))
                self.__forceRestartDiscovery(
                    "Invalid provisioning data received")
                continue

            if result:
                if self.ztp_mode == 'MANUAL_CONFIG':
                    logger.info(
                        "Configuration file '%s' detected. Shutting down ZTP service."
                        % (getCfg('config-db-json')))
                    break
                elif self.ztp_mode != 'DISCOVERY':
                    (rv, msg) = self.__processZTPJson()
                    if rv == "retry":
                        self.ztp_mode = 'DISCOVERY'
                    elif rv == "restart":
                        self.__forceRestartDiscovery(msg)
                    else:
                        break

            # Initialize in-band interfaces to establish connectivity if not done already
            self.__loadZTPProfile("discovery")
            logger.debug('Provisioning data not found.')

            # Scan for inband interfaces to link up and restart interface connectivity
            if self.__link_scan():
                updateActivity('Restarting network discovery after link scan')
                logger.info('Restarting network discovery after link scan.')
                runCommand('systemctl restart interfaces-config',
                           capture_stdout=False)
                logger.info('Restarted network discovery after link scan.')
                _start_time = time.time()
                continue

            # Start keeping time of last time restart networking was done
            if _start_time is None:
                _start_time = time.time()

            # Check if we have to restart networking
            if (time.time() - _start_time > getCfg('restart-ztp-interval')):
                updateActivity('Restarting network discovery')
                if self.test_mode is False:
                    # Remove existing leases to source new provisioning data
                    self.__cleanup_dhcp_leases()
                    logger.info('Restarting network discovery.')
                    runCommand('systemctl restart interfaces-config',
                               capture_stdout=False)
                    logger.info('Restarted network discovery.')
                _start_time = time.time()
                continue

            # Try after sometime
            time.sleep(getCfg('discovery-interval'))

        # Cleanup installed ZTP configuration profile
        self.__removeZTPProfile()
        if self.reboot_on_completion and self.test_mode == False:
            updateActivity('System reboot requested')
            systemReboot()
        updateActivity('Exiting ZTP server')