def udp_timeout(ctx): """Reset NAT UDP timeout configuration to default value (300 seconds)""" config_db = ConfigDBConnector() config_db.connect() seconds = 300 config_db.mod_entry("NAT_GLOBAL", "Values", {"nat_udp_timeout": seconds})
def disable(): """ Disable queue counter query """ configdb = ConfigDBConnector() configdb.connect() queue_info = {} queue_info['FLEX_COUNTER_STATUS'] = 'disable' configdb.mod_entry("FLEX_COUNTER_TABLE", "QUEUE", queue_info)
def configPfcPrio(status, interface, priority): configdb = ConfigDBConnector() configdb.connect() if interface not in configdb.get_keys('PORT_QOS_MAP'): click.echo('Cannot find interface {0}'.format(interface)) return """Current lossless priorities on the interface""" entry = configdb.get_entry('PORT_QOS_MAP', interface) enable_prio = entry.get('pfc_enable').split(',') """Avoid '' in enable_prio""" enable_prio = [x.strip() for x in enable_prio if x.strip()] if status == 'on' and priority in enable_prio: click.echo('Priority {0} has already been enabled on {1}'.format(priority, interface)) return if status == 'off' and priority not in enable_prio: click.echo('Priority {0} is not enabled on {1}'.format(priority, interface)) return if status == 'on': enable_prio.append(priority) else: enable_prio.remove(priority) enable_prio.sort() configdb.mod_entry("PORT_QOS_MAP", interface, {'pfc_enable': ','.join(enable_prio)}) """Show the latest PFC configuration""" showPfcPrio(interface)
def disable(): """ Disable rif counter query """ configdb = ConfigDBConnector() configdb.connect() rif_info = {} rif_info['FLEX_COUNTER_STATUS'] = 'disable' configdb.mod_entry("FLEX_COUNTER_TABLE", "RIF", rif_info)
def disable(): """ Disable port counter query """ configdb = ConfigDBConnector() configdb.connect() port_info = {} port_info['FLEX_COUNTER_STATUS'] = 'disable' configdb.mod_entry("FLEX_COUNTER_TABLE", "PORT", port_info)
def disable(): """ Disable port counter query """ configdb = ConfigDBConnector() configdb.connect() port_info = {} port_info['FLEX_COUNTER_STATUS'] = DISABLE configdb.mod_entry("FLEX_COUNTER_TABLE", PORT_BUFFER_DROP, port_info)
def interval(poll_interval): """ Set tunnel counter query interval """ configdb = ConfigDBConnector() configdb.connect() tunnel_info = {} tunnel_info['POLL_INTERVAL'] = poll_interval configdb.mod_entry("FLEX_COUNTER_TABLE", "TUNNEL", tunnel_info)
def disable(): """ Disable tunnel counter query """ configdb = ConfigDBConnector() configdb.connect() tunnel_info = {} tunnel_info['FLEX_COUNTER_STATUS'] = DISABLE configdb.mod_entry("FLEX_COUNTER_TABLE", "TUNNEL", tunnel_info)
def remove_interface(ctx, interface_name): """Remove interface related NAT configuration""" config_db = ConfigDBConnector() config_db.connect() if nat_interface_name_is_valid(interface_name) is False: ctx.fail( "Interface name is invalid. Please enter a valid interface name!!" ) if interface_name.startswith("Ethernet"): interface_table_type = "INTERFACE" elif interface_name.startswith("PortChannel"): interface_table_type = "PORTCHANNEL_INTERFACE" elif interface_name.startswith("Vlan"): interface_table_type = "VLAN_INTERFACE" elif interface_name.startswith("Loopback"): interface_table_type = "LOOPBACK_INTERFACE" interface_table_dict = config_db.get_table(interface_table_type) if not interface_table_dict or interface_name not in interface_table_dict: ctx.fail( "Interface table is not present. Ignoring the nat zone configuration" ) config_db.mod_entry(interface_table_type, interface_name, {"nat_zone": "0"})
def add_interface(ctx, interface_name, nat_zone): """Add interface related nat configuration""" config_db = ConfigDBConnector() config_db.connect() if nat_interface_name_is_valid(interface_name) is False: ctx.fail( "Interface name is invalid. Please enter a valid interface name!!" ) if interface_name.startswith("Ethernet"): interface_table_type = "INTERFACE" elif interface_name.startswith("PortChannel"): interface_table_type = "PORTCHANNEL_INTERFACE" elif interface_name.startswith("Vlan"): interface_table_type = "VLAN_INTERFACE" elif interface_name.startswith("Loopback"): interface_table_type = "LOOPBACK_INTERFACE" interface_table_dict = config_db.get_table(interface_table_type) if not interface_table_dict or interface_name not in interface_table_dict: ctx.fail( "Interface table is not present. Please configure ip-address on {} and apply the nat zone !!" .format(interface_name)) config_db.mod_entry(interface_table_type, interface_name, {"nat_zone": nat_zone})
def profile(profile): # Check if profile can be changed completed_process = subprocess.run( ['docker', 'exec', '-it', 'syncd', 'test', '-h', '/opt/bfn/install']) if completed_process.returncode != 0: click.echo('Cannot change profile: default one is in use') raise click.Abort() # Get chip family hwsku_dir = device_info.get_path_to_hwsku_dir() with open(hwsku_dir + '/switch-tna-sai.conf') as file: chip_family = json.load(file)['chip_list'][0]['chip_family'].lower() # Check if profile is supported if chip_family == 'tofino' and profile[0] == 'y' or \ chip_family == 'tofino2' and profile[0] == 'x': click.echo('Specified profile is unsupported on the system') raise click.Abort() # Check if profile exists completed_process = subprocess.run([ 'docker', 'exec', '-it', 'syncd', 'test', '-d', '/opt/bfn/install_' + profile + '_profile' ]) if completed_process.returncode != 0: click.echo('No profile with the provided name found') raise click.Abort() # Update configuration config_db = ConfigDBConnector() config_db.connect() config_db.mod_entry('DEVICE_METADATA', 'localhost', {'p4_profile': profile + '_profile'}) subprocess.run(['systemctl', 'restart', 'swss'], check=True)
def interval(poll_interval): """ Set queue counter query interval """ configdb = ConfigDBConnector() configdb.connect() port_info = {} if poll_interval is not None: port_info['POLL_INTERVAL'] = poll_interval configdb.mod_entry("FLEX_COUNTER_TABLE", "PORT", port_info)
def interval(poll_interval): """ Set rif counter query interval """ configdb = ConfigDBConnector() configdb.connect() rif_info = {} if poll_interval is not None: rif_info['POLL_INTERVAL'] = poll_interval configdb.mod_entry("FLEX_COUNTER_TABLE", "RIF", rif_info)
def configPfcAsym(interface, pfc_asym): """ PFC handler to configure asymmentric PFC. """ configdb = ConfigDBConnector() configdb.connect() configdb.mod_entry("PORT", interface, {'pfc_asym': pfc_asym})
def interval(poll_interval): """ Set port_buffer_drop counter query interval This counter group causes high CPU usage when polled, hence the allowed interval is between 30s and 300s. This is a short term solution and should be changed once the performance is enhanced """ configdb = ConfigDBConnector() configdb.connect() port_info = {} if poll_interval: port_info['POLL_INTERVAL'] = poll_interval configdb.mod_entry("FLEX_COUNTER_TABLE", PORT_BUFFER_DROP, port_info)
def disable(): """ Disable watermark counter query """ configdb = ConfigDBConnector() configdb.connect() fc_info = {} fc_info['FLEX_COUNTER_STATUS'] = 'disable' configdb.mod_entry("FLEX_COUNTER_TABLE", "QUEUE_WATERMARK", fc_info) configdb.mod_entry("FLEX_COUNTER_TABLE", "PG_WATERMARK", fc_info) configdb.mod_entry("FLEX_COUNTER_TABLE", BUFFER_POOL_WATERMARK, fc_info)
def interval(poll_interval): """ Set watermark counter query interval for both queue and PG watermarks """ configdb = ConfigDBConnector() configdb.connect() queue_wm_info = {} pg_wm_info = {} buffer_pool_wm_info = {} if poll_interval is not None: queue_wm_info['POLL_INTERVAL'] = poll_interval pg_wm_info['POLL_INTERVAL'] = poll_interval buffer_pool_wm_info['POLL_INTERVAL'] = poll_interval configdb.mod_entry("FLEX_COUNTER_TABLE", "QUEUE_WATERMARK", queue_wm_info) configdb.mod_entry("FLEX_COUNTER_TABLE", "PG_WATERMARK", pg_wm_info) configdb.mod_entry("FLEX_COUNTER_TABLE", BUFFER_POOL_WATERMARK, buffer_pool_wm_info)
class DBMigrator(): def __init__(self, namespace, socket=None): """ Version string format: version_<major>_<minor>_<build> major: starting from 1, sequentially incrementing in master branch. minor: in github branches, minor version stays in 0. This minor version creates space for private branches derived from github public branches. These private branches shall use none-zero values. build: sequentially increase within a minor version domain. """ self.CURRENT_VERSION = 'version_2_0_4' self.TABLE_NAME = 'VERSIONS' self.TABLE_KEY = 'DATABASE' self.TABLE_FIELD = 'VERSION' db_kwargs = {} if socket: db_kwargs['unix_socket_path'] = socket if namespace is None: self.configDB = ConfigDBConnector(**db_kwargs) else: self.configDB = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace, **db_kwargs) self.configDB.db_connect('CONFIG_DB') if namespace is None: self.appDB = ConfigDBConnector(**db_kwargs) else: self.appDB = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace, **db_kwargs) self.appDB.db_connect('APPL_DB') self.stateDB = SonicV2Connector(host='127.0.0.1') if self.stateDB is not None: self.stateDB.connect(self.stateDB.STATE_DB) version_info = device_info.get_sonic_version_info() asic_type = version_info.get('asic_type') self.asic_type = asic_type if asic_type == "mellanox": from mellanox_buffer_migrator import MellanoxBufferMigrator self.mellanox_buffer_migrator = MellanoxBufferMigrator( self.configDB, self.appDB, self.stateDB) def migrate_pfc_wd_table(self): ''' Migrate all data entries from table PFC_WD_TABLE to PFC_WD ''' data = self.configDB.get_table('PFC_WD_TABLE') for key in data: self.configDB.set_entry('PFC_WD', key, data[key]) self.configDB.delete_table('PFC_WD_TABLE') def is_ip_prefix_in_key(self, key): ''' Function to check if IP address is present in the key. If it is present, then the key would be a tuple or else, it shall be be string ''' return (isinstance(key, tuple)) def migrate_interface_table(self): ''' Migrate all data from existing INTERFACE table with IP Prefix to have an additional ONE entry without IP Prefix. For. e.g, for an entry "Vlan1000|192.168.0.1/21": {}", this function shall add an entry without IP prefix as ""Vlan1000": {}". This is for VRF compatibility. ''' if_db = [] if_tables = { 'INTERFACE', 'PORTCHANNEL_INTERFACE', 'VLAN_INTERFACE', 'LOOPBACK_INTERFACE' } for table in if_tables: data = self.configDB.get_table(table) for key in data: if not self.is_ip_prefix_in_key(key): if_db.append(key) continue for table in if_tables: data = self.configDB.get_table(table) for key in data: if not self.is_ip_prefix_in_key(key) or key[0] in if_db: continue log.log_info('Migrating interface table for ' + key[0]) self.configDB.set_entry(table, key[0], data[key]) if_db.append(key[0]) def migrate_intf_table(self): ''' Migrate all data from existing INTF table in APP DB during warmboot with IP Prefix to have an additional ONE entry without IP Prefix. For. e.g, for an entry "Vlan1000:192.168.0.1/21": {}", this function shall add an entry without IP prefix as ""Vlan1000": {}". This also migrates 'lo' to 'Loopback0' interface ''' if self.appDB is None: return data = self.appDB.keys(self.appDB.APPL_DB, "INTF_TABLE:*") if data is None: return if_db = [] for key in data: if_name = key.split(":")[1] if if_name == "lo": self.appDB.delete(self.appDB.APPL_DB, key) key = key.replace(if_name, "Loopback0") log.log_info('Migrating lo entry to ' + key) self.appDB.set(self.appDB.APPL_DB, key, 'NULL', 'NULL') if '/' not in key: if_db.append(key.split(":")[1]) continue data = self.appDB.keys(self.appDB.APPL_DB, "INTF_TABLE:*") for key in data: if_name = key.split(":")[1] if if_name in if_db: continue log.log_info('Migrating intf table for ' + if_name) table = "INTF_TABLE:" + if_name self.appDB.set(self.appDB.APPL_DB, table, 'NULL', 'NULL') if_db.append(if_name) def migrate_copp_table(self): ''' Delete the existing COPP table ''' if self.appDB is None: return keys = self.appDB.keys(self.appDB.APPL_DB, "COPP_TABLE:*") if keys is None: return for copp_key in keys: self.appDB.delete(self.appDB.APPL_DB, copp_key) def migrate_feature_table(self): ''' Combine CONTAINER_FEATURE and FEATURE tables into FEATURE table. ''' feature_table = self.configDB.get_table('FEATURE') for feature, config in feature_table.items(): state = config.get('status') if state is not None: config['state'] = state config.pop('status') self.configDB.set_entry('FEATURE', feature, config) container_feature_table = self.configDB.get_table('CONTAINER_FEATURE') for feature, config in container_feature_table.items(): self.configDB.mod_entry('FEATURE', feature, config) self.configDB.set_entry('CONTAINER_FEATURE', feature, None) def migrate_config_db_buffer_tables_for_dynamic_calculation( self, speed_list, cable_len_list, default_dynamic_th, abandon_method, append_item_method): ''' Migrate buffer tables to dynamic calculation mode parameters @speed_list - list of speed supported @cable_len_list - list of cable length supported @default_dynamic_th - default dynamic th @abandon_method - a function which is called to abandon the migration and keep the current configuration if the current one doesn't match the default one @append_item_method - a function which is called to append an item to the list of pending commit items any update to buffer configuration will be pended and won't be applied until all configuration is checked and aligns with the default one 1. Buffer profiles for lossless PGs in BUFFER_PROFILE table will be removed if their names have the convention of pg_lossless_<speed>_<cable_length>_profile where the speed and cable_length belongs speed_list and cable_len_list respectively and the dynamic_th is equal to default_dynamic_th 2. Insert tables required for dynamic buffer calculation - DEFAULT_LOSSLESS_BUFFER_PARAMETER|AZURE: {'default_dynamic_th': default_dynamic_th} - LOSSLESS_TRAFFIC_PATTERN|AZURE: {'mtu': '1024', 'small_packet_percentage': '100'} 3. For lossless dynamic PGs, remove the explicit referencing buffer profiles Before: BUFFER_PG|<port>|3-4: {'profile': 'BUFFER_PROFILE|pg_lossless_<speed>_<cable_length>_profile'} After: BUFFER_PG|<port>|3-4: {'profile': 'NULL'} ''' # Migrate BUFFER_PROFILEs, removing dynamically generated profiles dynamic_profile = self.configDB.get_table('BUFFER_PROFILE') profile_pattern = 'pg_lossless_([1-9][0-9]*000)_([1-9][0-9]*m)_profile' for name, info in dynamic_profile.items(): m = re.search(profile_pattern, name) if not m: continue speed = m.group(1) cable_length = m.group(2) if speed in speed_list and cable_length in cable_len_list: append_item_method(('BUFFER_PROFILE', name, None)) log.log_info( "Lossless profile {} has been removed".format(name)) # Migrate BUFFER_PGs, removing the explicit designated profiles buffer_pgs = self.configDB.get_table('BUFFER_PG') ports = self.configDB.get_table('PORT') all_cable_lengths = self.configDB.get_table('CABLE_LENGTH') if not buffer_pgs or not ports or not all_cable_lengths: log.log_notice( "At lease one of tables BUFFER_PG, PORT and CABLE_LENGTH hasn't been defined, skip following migration" ) abandon_method() return True cable_lengths = all_cable_lengths[list(all_cable_lengths.keys())[0]] for name, profile in buffer_pgs.items(): # do the db migration try: port, pg = name profile_name = profile['profile'][1:-1].split('|')[1] if pg == '0': if profile_name != 'ingress_lossy_profile': log.log_notice( "BUFFER_PG table entry {} has non default profile {} configured" .format(name, profile_name)) abandon_method() return True else: continue elif pg != '3-4': log.log_notice( "BUFFER_PG table entry {} isn't default PG(0 or 3-4)". format(name)) abandon_method() return True m = re.search(profile_pattern, profile_name) if not m: log.log_notice( "BUFFER_PG table entry {} has non-default profile name {}" .format(name, profile_name)) abandon_method() return True speed = m.group(1) cable_length = m.group(2) if speed == ports[port][ 'speed'] and cable_length == cable_lengths[port]: append_item_method(('BUFFER_PG', name, { 'profile': 'NULL' })) else: log.log_notice( "Lossless PG profile {} for port {} doesn't match its speed {} or cable length {}, keep using traditional buffer calculation mode" .format(profile_name, port, speed, cable_length)) abandon_method() return True except Exception: log.log_notice("Exception occured during parsing the profiles") abandon_method() return True # Insert other tables required for dynamic buffer calculation metadata = self.configDB.get_entry('DEVICE_METADATA', 'localhost') metadata['buffer_model'] = 'dynamic' append_item_method(('DEVICE_METADATA', 'localhost', metadata)) append_item_method(('DEFAULT_LOSSLESS_BUFFER_PARAMETER', 'AZURE', { 'default_dynamic_th': default_dynamic_th })) append_item_method(('LOSSLESS_TRAFFIC_PATTERN', 'AZURE', { 'mtu': '1024', 'small_packet_percentage': '100' })) return True def prepare_dynamic_buffer_for_warm_reboot(self, buffer_pools=None, buffer_profiles=None, buffer_pgs=None): ''' This is the very first warm reboot of buffermgrd (dynamic) if the system reboot from old image by warm-reboot In this case steps need to be taken to get buffermgrd prepared (for warm reboot) During warm reboot, buffer tables should be installed in the first place. However, it isn't able to achieve that when system is warm-rebooted from an old image without dynamic buffer supported, because the buffer info wasn't in the APPL_DB in the old image. The solution is to copy that info from CONFIG_DB into APPL_DB in db_migrator. During warm-reboot, db_migrator adjusts buffer info in CONFIG_DB by removing some fields according to requirement from dynamic buffer calculation. The buffer info before that adjustment needs to be copied to APPL_DB. 1. set WARM_RESTART_TABLE|buffermgrd as {restore_count: 0} 2. Copy the following tables from CONFIG_DB into APPL_DB in case of warm reboot The separator in fields that reference objects in other table needs to be updated from '|' to ':' - BUFFER_POOL - BUFFER_PROFILE, separator updated for field 'pool' - BUFFER_PG, separator updated for field 'profile' - BUFFER_QUEUE, separator updated for field 'profile - BUFFER_PORT_INGRESS_PROFILE_LIST, separator updated for field 'profile_list' - BUFFER_PORT_EGRESS_PROFILE_LIST, separator updated for field 'profile_list' ''' warmreboot_state = self.stateDB.get( self.stateDB.STATE_DB, 'WARM_RESTART_ENABLE_TABLE|system', 'enable') mmu_size = self.stateDB.get(self.stateDB.STATE_DB, 'BUFFER_MAX_PARAM_TABLE|global', 'mmu_size') if warmreboot_state == 'true' and not mmu_size: log.log_notice( "This is the very first run of buffermgrd (dynamic), prepare info required from warm reboot" ) else: return True buffer_table_list = [ ('BUFFER_POOL', buffer_pools, None), ('BUFFER_PROFILE', buffer_profiles, 'pool'), ('BUFFER_PG', buffer_pgs, 'profile'), ('BUFFER_QUEUE', None, 'profile'), ('BUFFER_PORT_INGRESS_PROFILE_LIST', None, 'profile_list'), ('BUFFER_PORT_EGRESS_PROFILE_LIST', None, 'profile_list') ] for pair in buffer_table_list: keys_copied = [] keys_ignored = [] table_name, entries, reference_field_name = pair app_table_name = table_name + "_TABLE" if not entries: entries = self.configDB.get_table(table_name) for key, items in entries.items(): # copy items to appl db if reference_field_name: confdb_ref = items.get(reference_field_name) if not confdb_ref or confdb_ref == "NULL": keys_ignored.append(key) continue items_referenced = confdb_ref.split(',') appdb_ref = "" first_item = True for item in items_referenced: if first_item: first_item = False else: appdb_ref += ',' subitems = item.split('|') first_key = True for subitem in subitems: if first_key: appdb_ref += subitem + '_TABLE' first_key = False else: appdb_ref += ':' + subitem items[reference_field_name] = appdb_ref keys_copied.append(key) if type(key) is tuple: appl_db_key = app_table_name + ':' + ':'.join(key) else: appl_db_key = app_table_name + ':' + key for field, data in items.items(): self.appDB.set(self.appDB.APPL_DB, appl_db_key, field, data) if keys_copied: log.log_info( "The following items in table {} in CONFIG_DB have been copied to APPL_DB: {}" .format(table_name, keys_copied)) if keys_ignored: log.log_info( "The following items in table {} in CONFIG_DB have been ignored: {}" .format(table_name, keys_copied)) return True def migrate_config_db_port_table_for_auto_neg(self): table_name = 'PORT' port_table = self.configDB.get_table(table_name) for key, value in port_table.items(): if 'autoneg' in value: if value['autoneg'] == '1': self.configDB.set(self.configDB.CONFIG_DB, '{}|{}'.format(table_name, key), 'autoneg', 'on') if 'speed' in value and 'adv_speeds' not in value: self.configDB.set(self.configDB.CONFIG_DB, '{}|{}'.format(table_name, key), 'adv_speeds', value['speed']) elif value['autoneg'] == '0': self.configDB.set(self.configDB.CONFIG_DB, '{}|{}'.format(table_name, key), 'autoneg', 'off') def migrate_qos_db_fieldval_reference_remove(self, table_list, db, db_num, db_delimeter): for pair in table_list: table_name, fields_list = pair qos_table = db.get_table(table_name) for key, value in qos_table.items(): if type(key) is tuple: db_key = table_name + db_delimeter + db_delimeter.join(key) else: db_key = table_name + db_delimeter + key for field in fields_list: if field in value: fieldVal = value.get(field) if not fieldVal or fieldVal == "NULL": continue newFiledVal = "" # Check for ABNF format presence and convert ABNF to string if "[" in fieldVal and db_delimeter in fieldVal and "]" in fieldVal: log.log_info( "Found ABNF format field value in table {} key {} field {} val {}" .format(table_name, db_key, field, fieldVal)) value_list = fieldVal.split(",") for item in value_list: if "[" != item[ 0] or db_delimeter not in item or "]" != item[ -1]: continue newFiledVal = newFiledVal + item[1:-1].split( db_delimeter)[1] + ',' newFiledVal = newFiledVal[:-1] db.set(db_num, db_key, field, newFiledVal) log.log_info( "Modified ABNF format field value to string in table {} key {} field {} val {}" .format(table_name, db_key, field, newFiledVal)) return True def migrate_qos_fieldval_reference_format(self): ''' This is to change for first time to remove field refernces of ABNF format in APPL DB for warm boot. i.e "[Tabale_name:name]" to string in APPL_DB. Reasons for doing this - To consistent with all other SoNIC CONFIG_DB/APPL_DB tables and fields - References in DB is not required, this will be taken care by YANG model leafref. ''' qos_app_table_list = [ ('BUFFER_PG_TABLE', ['profile']), ('BUFFER_QUEUE_TABLE', ['profile']), ('BUFFER_PROFILE_TABLE', ['pool']), ('BUFFER_PORT_INGRESS_PROFILE_LIST_TABLE', ['profile_list']), ('BUFFER_PORT_EGRESS_PROFILE_LIST_TABLE', ['profile_list']) ] log.log_info("Remove APPL_DB QOS tables field reference ABNF format") self.migrate_qos_db_fieldval_reference_remove(qos_app_table_list, self.appDB, self.appDB.APPL_DB, ':') qos_table_list = [ ('QUEUE', ['scheduler', 'wred_profile']), ('PORT_QOS_MAP', [ 'dscp_to_tc_map', 'dot1p_to_tc_map', 'pfc_to_queue_map', 'tc_to_pg_map', 'tc_to_queue_map', 'pfc_to_pg_map' ]), ('BUFFER_PG', ['profile']), ('BUFFER_QUEUE', ['profile']), ('BUFFER_PROFILE', ['pool']), ('BUFFER_PORT_INGRESS_PROFILE_LIST', ['profile_list']), ('BUFFER_PORT_EGRESS_PROFILE_LIST', ['profile_list']) ] log.log_info("Remove CONFIG_DB QOS tables field reference ABNF format") self.migrate_qos_db_fieldval_reference_remove(qos_table_list, self.configDB, self.configDB.CONFIG_DB, '|') return True def version_unknown(self): """ version_unknown tracks all SONiC versions that doesn't have a version string defined in config_DB. Nothing can be assumped when migrating from this version to the next version. Any migration operation needs to test if the DB is in expected format before migrating date to the next version. """ log.log_info('Handling version_unknown') # NOTE: Uncomment next 3 lines of code when the migration code is in # place. Note that returning specific string is intentional, # here we only intended to migrade to DB version 1.0.1. # If new DB version is added in the future, the incremental # upgrade will take care of the subsequent migrations. self.migrate_pfc_wd_table() self.migrate_interface_table() self.migrate_intf_table() self.set_version('version_1_0_2') return 'version_1_0_2' def version_1_0_1(self): """ Version 1_0_1. """ log.log_info('Handling version_1_0_1') self.migrate_interface_table() self.migrate_intf_table() self.set_version('version_1_0_2') return 'version_1_0_2' def version_1_0_2(self): """ Version 1_0_2. """ log.log_info('Handling version_1_0_2') # Check ASIC type, if Mellanox platform then need DB migration if self.asic_type == "mellanox": if self.mellanox_buffer_migrator.mlnx_migrate_buffer_pool_size('version_1_0_2', 'version_1_0_3') \ and self.mellanox_buffer_migrator.mlnx_flush_new_buffer_configuration(): self.set_version('version_1_0_3') else: self.set_version('version_1_0_3') return 'version_1_0_3' def version_1_0_3(self): """ Version 1_0_3. """ log.log_info('Handling version_1_0_3') self.migrate_feature_table() # Check ASIC type, if Mellanox platform then need DB migration if self.asic_type == "mellanox": if self.mellanox_buffer_migrator.mlnx_migrate_buffer_pool_size('version_1_0_3', 'version_1_0_4') \ and self.mellanox_buffer_migrator.mlnx_migrate_buffer_profile('version_1_0_3', 'version_1_0_4') \ and self.mellanox_buffer_migrator.mlnx_flush_new_buffer_configuration(): self.set_version('version_1_0_4') else: self.set_version('version_1_0_4') return 'version_1_0_4' def version_1_0_4(self): """ Version 1_0_4. """ log.log_info('Handling version_1_0_4') # Check ASIC type, if Mellanox platform then need DB migration if self.asic_type == "mellanox": if self.mellanox_buffer_migrator.mlnx_migrate_buffer_pool_size('version_1_0_4', 'version_1_0_5') \ and self.mellanox_buffer_migrator.mlnx_migrate_buffer_profile('version_1_0_4', 'version_1_0_5') \ and self.mellanox_buffer_migrator.mlnx_flush_new_buffer_configuration(): self.set_version('version_1_0_5') else: self.set_version('version_1_0_5') return 'version_1_0_5' def version_1_0_5(self): """ Version 1_0_5. """ log.log_info('Handling version_1_0_5') # Check ASIC type, if Mellanox platform then need DB migration if self.asic_type == "mellanox": if self.mellanox_buffer_migrator.mlnx_migrate_buffer_pool_size('version_1_0_5', 'version_1_0_6') \ and self.mellanox_buffer_migrator.mlnx_migrate_buffer_profile('version_1_0_5', 'version_1_0_6') \ and self.mellanox_buffer_migrator.mlnx_flush_new_buffer_configuration(): self.set_version('version_1_0_6') else: self.set_version('version_1_0_6') return 'version_1_0_6' def version_1_0_6(self): """ Version 1_0_6. """ log.log_info('Handling version_1_0_6') if self.asic_type == "mellanox": speed_list = self.mellanox_buffer_migrator.default_speed_list cable_len_list = self.mellanox_buffer_migrator.default_cable_len_list buffer_pools = self.configDB.get_table('BUFFER_POOL') buffer_profiles = self.configDB.get_table('BUFFER_PROFILE') buffer_pgs = self.configDB.get_table('BUFFER_PG') abandon_method = self.mellanox_buffer_migrator.mlnx_abandon_pending_buffer_configuration append_method = self.mellanox_buffer_migrator.mlnx_append_item_on_pending_configuration_list if self.mellanox_buffer_migrator.mlnx_migrate_buffer_pool_size('version_1_0_6', 'version_2_0_0') \ and self.mellanox_buffer_migrator.mlnx_migrate_buffer_profile('version_1_0_6', 'version_2_0_0') \ and (not self.mellanox_buffer_migrator.mlnx_is_buffer_model_dynamic() or \ self.migrate_config_db_buffer_tables_for_dynamic_calculation(speed_list, cable_len_list, '0', abandon_method, append_method)) \ and self.mellanox_buffer_migrator.mlnx_flush_new_buffer_configuration() \ and self.prepare_dynamic_buffer_for_warm_reboot(buffer_pools, buffer_profiles, buffer_pgs): self.set_version('version_2_0_0') else: self.prepare_dynamic_buffer_for_warm_reboot() metadata = self.configDB.get_entry('DEVICE_METADATA', 'localhost') metadata['buffer_model'] = 'traditional' self.configDB.set_entry('DEVICE_METADATA', 'localhost', metadata) log.log_notice('Setting buffer_model to traditional') self.set_version('version_2_0_0') return 'version_2_0_0' def version_2_0_0(self): """ Version 2_0_0. """ log.log_info('Handling version_2_0_0') self.migrate_config_db_port_table_for_auto_neg() self.set_version('version_2_0_1') return 'version_2_0_1' def version_2_0_1(self): """ Version 2_0_1. """ log.log_info('Handling version_2_0_1') warmreboot_state = self.stateDB.get( self.stateDB.STATE_DB, 'WARM_RESTART_ENABLE_TABLE|system', 'enable') if warmreboot_state != 'true': portchannel_table = self.configDB.get_table('PORTCHANNEL') for name, data in portchannel_table.items(): data['lacp_key'] = 'auto' self.configDB.set_entry('PORTCHANNEL', name, data) self.set_version('version_2_0_2') return 'version_2_0_2' def version_2_0_2(self): """ Version 2_0_2. """ log.log_info('Handling version_2_0_2') self.migrate_qos_fieldval_reference_format() self.set_version('version_2_0_3') return 'version_2_0_3' def version_2_0_3(self): """ Version 2_0_3 """ log.log_info('Handling version_2_0_3') if self.asic_type == "mellanox": self.mellanox_buffer_migrator.mlnx_reclaiming_unused_buffer() self.set_version('version_2_0_4') return 'version_2_0_4' def version_2_0_4(self): """ Current latest version. Nothing to do here. """ log.log_info('Handling version_2_0_4') return None def get_version(self): version = self.configDB.get_entry(self.TABLE_NAME, self.TABLE_KEY) if version and version[self.TABLE_FIELD]: return version[self.TABLE_FIELD] return 'version_unknown' def set_version(self, version=None): if not version: version = self.CURRENT_VERSION log.log_info('Setting version to ' + version) entry = {self.TABLE_FIELD: version} self.configDB.set_entry(self.TABLE_NAME, self.TABLE_KEY, entry) def common_migration_ops(self): try: with open(INIT_CFG_FILE) as f: init_db = json.load(f) except Exception as e: raise Exception(str(e)) for init_cfg_table, table_val in init_db.items(): log.log_info( "Migrating table {} from INIT_CFG to config_db".format( init_cfg_table)) for key in table_val: curr_cfg = self.configDB.get_entry(init_cfg_table, key) init_cfg = table_val[key] # Override init config with current config. # This will leave new fields from init_config # in new_config, but not override existing configuration. new_cfg = {**init_cfg, **curr_cfg} self.configDB.set_entry(init_cfg_table, key, new_cfg) self.migrate_copp_table() def migrate(self): version = self.get_version() log.log_info('Upgrading from version ' + version) while version: next_version = getattr(self, version)() if next_version == version: raise Exception( 'Version migrate from %s stuck in same version' % version) version = next_version # Perform common migration ops self.common_migration_ops()
def add_table_kv(table, entry, key, val): config_db = ConfigDBConnector() config_db.connect() config_db.mod_entry(table, entry, {key:val})
def udp_timeout(ctx, seconds): """Set NAT UDP timeout configuration""" config_db = ConfigDBConnector() config_db.connect() config_db.mod_entry("NAT_GLOBAL", "Values", {"nat_udp_timeout": seconds})
def disable(ctx): """Disable the NAT feature """ config_db = ConfigDBConnector() config_db.connect() config_db.mod_entry("NAT_GLOBAL", "Values", {"admin_mode": "disabled"})
def memory(kdump_memory): """Set memory allocated for kdump capture kernel""" config_db = ConfigDBConnector() if config_db is not None: config_db.connect() config_db.mod_entry("KDUMP", "config", {"memory": kdump_memory})
def num_dumps(kdump_num_dumps): """Set max number of dump files for kdump""" config_db = ConfigDBConnector() if config_db is not None: config_db.connect() config_db.mod_entry("KDUMP", "config", {"num_dumps": kdump_num_dumps})
class AclLoader(object): ACL_TABLE = "ACL_TABLE" ACL_RULE = "ACL_RULE" ACL_TABLE_TYPE_MIRROR = "MIRROR" ACL_TABLE_TYPE_CTRLPLANE = "CTRLPLANE" CFG_MIRROR_SESSION_TABLE = "MIRROR_SESSION" STATE_MIRROR_SESSION_TABLE = "MIRROR_SESSION_TABLE" POLICER = "POLICER" SESSION_PREFIX = "everflow" SWITCH_CAPABILITY_TABLE = "SWITCH_CAPABILITY" ACL_ACTIONS_CAPABILITY_FIELD = "ACL_ACTIONS" ACL_ACTION_CAPABILITY_FIELD = "ACL_ACTION" min_priority = 1 max_priority = 10000 ethertype_map = { "ETHERTYPE_LLDP": 0x88CC, "ETHERTYPE_VLAN": 0x8100, "ETHERTYPE_ROCE": 0x8915, "ETHERTYPE_ARP": 0x0806, "ETHERTYPE_IPV4": 0x0800, "ETHERTYPE_IPV6": 0x86DD, "ETHERTYPE_MPLS": 0x8847 } ip_protocol_map = { "IP_TCP": 6, "IP_ICMP": 1, "IP_UDP": 17, "IP_IGMP": 2, "IP_PIM": 103, "IP_RSVP": 46, "IP_GRE": 47, "IP_AUTH": 51, "IP_ICMPV6": 58, "IP_L2TP": 115 } def __init__(self): self.yang_acl = None self.requested_session = None self.mirror_stage = None self.current_table = None self.tables_db_info = {} self.rules_db_info = {} self.rules_info = {} if multi_asic.is_multi_asic(): # Load global db config SonicDBConfig.load_sonic_global_db_config() else: SonicDBConfig.initialize() self.sessions_db_info = {} self.configdb = ConfigDBConnector() self.configdb.connect() self.statedb = SonicV2Connector(host="127.0.0.1") self.statedb.connect(self.statedb.STATE_DB) # For multi-npu architecture we will have both global and per front asic namespace. # Global namespace will be used for Control plane ACL which are via IPTables. # Per ASIC namespace will be used for Data and Everflow ACL's. # Global Configdb will have all ACL information for both Ctrl and Data/Evereflow ACL's # and will be used as souurce of truth for ACL modification to config DB which will be done to both Global DB and # front asic namespace self.per_npu_configdb = {} # State DB are used for to get mirror Session monitor port. # For multi-npu platforms each asic namespace can have different monitor port # dependinding on which route to session destination ip. So for multi-npu # platforms we get state db for all front asic namespace in addition to self.per_npu_statedb = {} # Getting all front asic namespace and correspding config and state DB connector namespaces = device_info.get_all_namespaces() for front_asic_namespaces in namespaces['front_ns']: self.per_npu_configdb[front_asic_namespaces] = ConfigDBConnector( use_unix_socket_path=True, namespace=front_asic_namespaces) self.per_npu_configdb[front_asic_namespaces].connect() self.per_npu_statedb[front_asic_namespaces] = SonicV2Connector( use_unix_socket_path=True, namespace=front_asic_namespaces) self.per_npu_statedb[front_asic_namespaces].connect( self.per_npu_statedb[front_asic_namespaces].STATE_DB) self.read_tables_info() self.read_rules_info() self.read_sessions_info() self.read_policers_info() def read_tables_info(self): """ Read ACL_TABLE table from configuration database :return: """ self.tables_db_info = self.configdb.get_table(self.ACL_TABLE) def get_tables_db_info(self): return self.tables_db_info def read_rules_info(self): """ Read ACL_RULE table from configuration database :return: """ self.rules_db_info = self.configdb.get_table(self.ACL_RULE) def get_rules_db_info(self): return self.rules_db_info def read_policers_info(self): """ Read POLICER table from configuration database :return: """ # For multi-npu platforms we will read from any one of front asic namespace # config db as the information should be same across all config db if self.per_npu_configdb: namespace_configdb = list(self.per_npu_configdb.values())[0] self.policers_db_info = namespace_configdb.get_table(self.POLICER) else: self.policers_db_info = self.configdb.get_table(self.POLICER) def get_policers_db_info(self): return self.policers_db_info def read_sessions_info(self): """ Read MIRROR_SESSION table from configuration database :return: """ # For multi-npu platforms we will read from any one of front asic namespace # config db as the information should be same across all config db if self.per_npu_configdb: namespace_configdb = list(self.per_npu_configdb.values())[0] self.sessions_db_info = namespace_configdb.get_table( self.CFG_MIRROR_SESSION_TABLE) else: self.sessions_db_info = self.configdb.get_table( self.CFG_MIRROR_SESSION_TABLE) for key in self.sessions_db_info: if self.per_npu_statedb: # For multi-npu platforms we will read from all front asic name space # statedb as the monitor port will be differnt for each asic # and it's status also might be different (ideally should not happen) # We will store them as dict of 'asic' : value self.sessions_db_info[key]["status"] = {} self.sessions_db_info[key]["monitor_port"] = {} for namespace_key, namespace_statedb in self.per_npu_statedb.items( ): state_db_info = namespace_statedb.get_all( self.statedb.STATE_DB, "{}|{}".format(self.STATE_MIRROR_SESSION_TABLE, key)) self.sessions_db_info[key]["status"][ namespace_key] = state_db_info.get( "status", "inactive") if state_db_info else "error" self.sessions_db_info[key][ "monitor_port"][namespace_key] = state_db_info.get( "monitor_port", "") if state_db_info else "" else: state_db_info = self.statedb.get_all( self.statedb.STATE_DB, "{}|{}".format(self.STATE_MIRROR_SESSION_TABLE, key)) self.sessions_db_info[key]["status"] = state_db_info.get( "status", "inactive") if state_db_info else "error" self.sessions_db_info[key]["monitor_port"] = state_db_info.get( "monitor_port", "") if state_db_info else "" def get_sessions_db_info(self): return self.sessions_db_info def get_session_name(self): """ Get requested mirror session name or default session :return: Mirror session name """ if self.requested_session: return self.requested_session for key in self.get_sessions_db_info(): if key.startswith(self.SESSION_PREFIX): return key return None def set_table_name(self, table_name): """ Set table name to restrict the table to be modified :param table_name: Table name :return: """ if not self.is_table_valid(table_name): warning("Table \"%s\" not found" % table_name) self.current_table = table_name def set_session_name(self, session_name): """ Set session name to be used in ACL rule action :param session_name: Mirror session name :return: """ if session_name not in self.get_sessions_db_info(): raise AclLoaderException("Session %s does not exist" % session_name) self.requested_session = session_name def set_mirror_stage(self, stage): """ Set mirror stage to be used in ACL mirror rule action :param session_name: stage 'ingress'/'egress' :return: """ self.mirror_stage = stage.upper() def set_max_priority(self, priority): """ Set rules max priority :param priority: Rules max priority :return: """ self.max_priority = int(priority) def is_table_valid(self, tname): return self.tables_db_info.get(tname) def is_table_mirror(self, tname): """ Check if ACL table type is ACL_TABLE_TYPE_MIRROR or ACL_TABLE_TYPE_MIRRORV6 :param tname: ACL table name :return: True if table type is MIRROR or MIRRORV6 else False """ return self.tables_db_info[tname]['type'].upper().startswith( self.ACL_TABLE_TYPE_MIRROR) def is_table_ipv6(self, tname): """ Check if ACL table type is IPv6 (L3V6 or MIRRORV6) :param tname: ACL table name :return: True if table type is IPv6 else False """ return self.tables_db_info[tname]["type"].upper() in ("L3V6", "MIRRORV6") def is_table_control_plane(self, tname): """ Check if ACL table type is ACL_TABLE_TYPE_CTRLPLANE :param tname: ACL table name :return: True if table type is ACL_TABLE_TYPE_CTRLPLANE else False """ return self.tables_db_info[tname]['type'].upper( ) == self.ACL_TABLE_TYPE_CTRLPLANE @staticmethod def parse_acl_json(filename): yang_acl = pybindJSON.load(filename, openconfig_acl, "openconfig_acl") # Check pybindJSON parsing # pybindJSON.load will silently return an empty json object if input invalid with open(filename, 'r') as f: plain_json = json.load(f) if len(plain_json['acl']['acl-sets']['acl-set']) != len( yang_acl.acl.acl_sets.acl_set): raise AclLoaderException("Invalid input file %s" % filename) return yang_acl def load_rules_from_file(self, filename): """ Load file with ACL rules configuration in openconfig ACL format. Convert rules to Config DB schema. :param filename: File in openconfig ACL format :return: """ self.yang_acl = AclLoader.parse_acl_json(filename) self.convert_rules() def convert_action(self, table_name, rule_idx, rule): rule_props = {} if rule.actions.config.forwarding_action == "ACCEPT": if self.is_table_control_plane(table_name): rule_props[AclAction.PACKET] = PacketAction.ACCEPT elif self.is_table_mirror(table_name): session_name = self.get_session_name() if not session_name: raise AclLoaderException( "Mirroring session does not exist") if self.mirror_stage == Stage.INGRESS: mirror_action = AclAction.MIRROR_INGRESS elif self.mirror_stage == Stage.EGRESS: mirror_action = AclAction.MIRROR_EGRESS else: raise AclLoaderException( "Invalid mirror stage passed {}".format( self.mirror_stage)) rule_props[mirror_action] = session_name else: rule_props[AclAction.PACKET] = PacketAction.FORWARD elif rule.actions.config.forwarding_action == "DROP": rule_props[AclAction.PACKET] = PacketAction.DROP elif rule.actions.config.forwarding_action == "REJECT": rule_props[AclAction.PACKET] = PacketAction.DROP else: raise AclLoaderException( "Unknown rule action {} in table {}, rule {}".format( rule.actions.config.forwarding_action, table_name, rule_idx)) if not self.validate_actions(table_name, rule_props): raise AclLoaderException( "Rule action {} is not supported in table {}, rule {}".format( rule.actions.config.forwarding_action, table_name, rule_idx)) return rule_props def validate_actions(self, table_name, action_props): if self.is_table_control_plane(table_name): return True action_count = len(action_props) if table_name not in self.tables_db_info: raise AclLoaderException( "Table {} does not exist".format(table_name)) stage = self.tables_db_info[table_name].get("stage", Stage.INGRESS) # check if per npu state db is there then read using first state db # else read from global statedb if self.per_npu_statedb: # For multi-npu we will read using anyone statedb connector for front asic namespace. # Same information should be there in all state DB's # as it is static information about switch capability namespace_statedb = list(self.per_npu_statedb.values())[0] capability = namespace_statedb.get_all( self.statedb.STATE_DB, "{}|switch".format(self.SWITCH_CAPABILITY_TABLE)) else: capability = self.statedb.get_all( self.statedb.STATE_DB, "{}|switch".format(self.SWITCH_CAPABILITY_TABLE)) for action_key in dict(action_props): key = "{}|{}".format(self.ACL_ACTIONS_CAPABILITY_FIELD, stage.upper()) if key not in capability: del action_props[action_key] continue values = capability[key].split(",") if action_key.upper() not in values: del action_props[action_key] continue if action_key == AclAction.PACKET: # Check if action_value is supported action_value = action_props[action_key] key = "{}|{}".format(self.ACL_ACTION_CAPABILITY_FIELD, action_key.upper()) if key not in capability: del action_props[action_key] continue if action_value not in capability[key]: del action_props[action_key] continue return action_count == len(action_props) def convert_l2(self, table_name, rule_idx, rule): rule_props = {} if rule.l2.config.ethertype: if rule.l2.config.ethertype in self.ethertype_map: rule_props["ETHER_TYPE"] = self.ethertype_map[ rule.l2.config.ethertype] else: try: rule_props["ETHER_TYPE"] = int(rule.l2.config.ethertype) except Exception as e: raise AclLoaderException( "Failed to convert ethertype %s; table %s rule %s; exception=%s" % (rule.l2.config.ethertype, table_name, rule_idx, str(e))) if rule.l2.config.vlan_id != "" and rule.l2.config.vlan_id != "null": vlan_id = rule.l2.config.vlan_id if vlan_id <= 0 or vlan_id >= 4096: raise AclLoaderException( "VLAN ID %d is out of bounds (0, 4096)" % (vlan_id)) rule_props["VLAN_ID"] = vlan_id return rule_props def convert_ip(self, table_name, rule_idx, rule): rule_props = {} # FIXME: 0 is a valid protocol number, but openconfig seems to use it as a default value, # so there isn't currently a good way to check if the user defined proto=0 or not. if rule.ip.config.protocol: if rule.ip.config.protocol in self.ip_protocol_map: # Special case: ICMP has different protocol numbers for IPv4 and IPv6, so if we receive # "IP_ICMP" we need to pick the correct protocol number for the IP version if rule.ip.config.protocol == "IP_ICMP" and self.is_table_ipv6( table_name): rule_props["IP_PROTOCOL"] = self.ip_protocol_map[ "IP_ICMPV6"] else: rule_props["IP_PROTOCOL"] = self.ip_protocol_map[ rule.ip.config.protocol] else: try: int(rule.ip.config.protocol) except: raise AclLoaderException( "Unknown rule protocol %s in table %s, rule %d!" % (rule.ip.config.protocol, table_name, rule_idx)) rule_props["IP_PROTOCOL"] = rule.ip.config.protocol if rule.ip.config.source_ip_address: source_ip_address = rule.ip.config.source_ip_address if ipaddress.ip_network(source_ip_address).version == 4: rule_props["SRC_IP"] = source_ip_address else: rule_props["SRC_IPV6"] = source_ip_address if rule.ip.config.destination_ip_address: destination_ip_address = rule.ip.config.destination_ip_address if ipaddress.ip_network(destination_ip_address).version == 4: rule_props["DST_IP"] = destination_ip_address else: rule_props["DST_IPV6"] = destination_ip_address # NOTE: DSCP is available only for MIRROR table if self.is_table_mirror(table_name): if rule.ip.config.dscp: rule_props["DSCP"] = rule.ip.config.dscp return rule_props def convert_icmp(self, table_name, rule_idx, rule): rule_props = {} is_table_v6 = self.is_table_ipv6(table_name) type_key = "ICMPV6_TYPE" if is_table_v6 else "ICMP_TYPE" code_key = "ICMPV6_CODE" if is_table_v6 else "ICMP_CODE" if rule.icmp.config.type != "" and rule.icmp.config.type != "null": icmp_type = rule.icmp.config.type if icmp_type < 0 or icmp_type > 255: raise AclLoaderException( "ICMP type %d is out of bounds [0, 255]" % (icmp_type)) rule_props[type_key] = icmp_type if rule.icmp.config.code != "" and rule.icmp.config.code != "null": icmp_code = rule.icmp.config.code if icmp_code < 0 or icmp_code > 255: raise AclLoaderException( "ICMP code %d is out of bounds [0, 255]" % (icmp_code)) rule_props[code_key] = icmp_code return rule_props def convert_port(self, port): """ Convert port field format from openconfig ACL to Config DB schema :param port: String, ACL port number or range in openconfig format :return: Tuple, first value is converted port string, second value is boolean, True if value is a port range, False if it is a single port value """ # OpenConfig port range is of the format "####..####", whereas # Config DB format is "####-####" if ".." in port: return port.replace("..", "-"), True else: return port, False def convert_transport(self, table_name, rule_idx, rule): rule_props = {} if rule.transport.config.source_port: port, is_range = self.convert_port( str(rule.transport.config.source_port)) rule_props[ "L4_SRC_PORT_RANGE" if is_range else "L4_SRC_PORT"] = port if rule.transport.config.destination_port: port, is_range = self.convert_port( str(rule.transport.config.destination_port)) rule_props[ "L4_DST_PORT_RANGE" if is_range else "L4_DST_PORT"] = port tcp_flags = 0x00 for flag in rule.transport.config.tcp_flags: if flag == "TCP_FIN": tcp_flags |= 0x01 if flag == "TCP_SYN": tcp_flags |= 0x02 if flag == "TCP_RST": tcp_flags |= 0x04 if flag == "TCP_PSH": tcp_flags |= 0x08 if flag == "TCP_ACK": tcp_flags |= 0x10 if flag == "TCP_URG": tcp_flags |= 0x20 if flag == "TCP_ECE": tcp_flags |= 0x40 if flag == "TCP_CWR": tcp_flags |= 0x80 if tcp_flags: rule_props["TCP_FLAGS"] = '0x{:02x}/0x{:02x}'.format( tcp_flags, tcp_flags) return rule_props def convert_input_interface(self, table_name, rule_idx, rule): rule_props = {} if rule.input_interface.interface_ref.config.interface: rule_props[ "IN_PORTS"] = rule.input_interface.interface_ref.config.interface return rule_props def convert_rule_to_db_schema(self, table_name, rule): """ Convert rules format from openconfig ACL to Config DB schema :param table_name: ACL table name to which rule belong :param rule: ACL rule in openconfig format :return: dict with Config DB schema """ rule_idx = int(rule.config.sequence_id) rule_props = {} rule_data = {(table_name, "RULE_" + str(rule_idx)): rule_props} rule_props["PRIORITY"] = str(self.max_priority - rule_idx) deep_update(rule_props, self.convert_action(table_name, rule_idx, rule)) deep_update(rule_props, self.convert_l2(table_name, rule_idx, rule)) deep_update(rule_props, self.convert_ip(table_name, rule_idx, rule)) deep_update(rule_props, self.convert_icmp(table_name, rule_idx, rule)) deep_update(rule_props, self.convert_transport(table_name, rule_idx, rule)) deep_update(rule_props, self.convert_input_interface(table_name, rule_idx, rule)) return rule_data def deny_rule(self, table_name): """ Create default deny rule in Config DB format :param table_name: ACL table name to which rule belong :return: dict with Config DB schema """ rule_props = {} rule_data = {(table_name, "DEFAULT_RULE"): rule_props} rule_props["PRIORITY"] = str(self.min_priority) rule_props["PACKET_ACTION"] = "DROP" if 'v6' in table_name.lower(): rule_props[ "IP_TYPE"] = "IPV6ANY" # ETHERTYPE is not supported for DATAACLV6 else: rule_props["ETHER_TYPE"] = str( self.ethertype_map["ETHERTYPE_IPV4"]) return rule_data def convert_rules(self): """ Convert rules in openconfig ACL format to Config DB schema :return: """ for acl_set_name in self.yang_acl.acl.acl_sets.acl_set: table_name = acl_set_name.replace(" ", "_").replace("-", "_").upper() acl_set = self.yang_acl.acl.acl_sets.acl_set[acl_set_name] if not self.is_table_valid(table_name): warning("%s table does not exist" % (table_name)) continue if self.current_table is not None and self.current_table != table_name: continue for acl_entry_name in acl_set.acl_entries.acl_entry: acl_entry = acl_set.acl_entries.acl_entry[acl_entry_name] try: rule = self.convert_rule_to_db_schema( table_name, acl_entry) deep_update(self.rules_info, rule) except AclLoaderException as ex: error("Error processing rule %s: %s. Skipped." % (acl_entry_name, ex)) if not self.is_table_mirror(table_name): deep_update(self.rules_info, self.deny_rule(table_name)) def full_update(self): """ Perform full update of ACL rules configuration. All existing rules will be removed. New rules loaded from file will be installed. If the current_table is not empty, only rules within that table will be removed and new rules in that table will be installed. :return: """ for key in self.rules_db_info: if self.current_table is None or self.current_table == key[0]: self.configdb.mod_entry(self.ACL_RULE, key, None) # Program for per front asic namespace also if present for namespace_configdb in self.per_npu_configdb.values(): namespace_configdb.mod_entry(self.ACL_RULE, key, None) self.configdb.mod_config({self.ACL_RULE: self.rules_info}) # Program for per front asic namespace also if present for namespace_configdb in self.per_npu_configdb.values(): namespace_configdb.mod_config({self.ACL_RULE: self.rules_info}) def incremental_update(self): """ Perform incremental ACL rules configuration update. Get existing rules from Config DB. Compare with rules specified in file and perform corresponding modifications. :return: """ # TODO: Until we test ASIC behavior, we cannot assume that we can insert # dataplane ACLs and shift existing ACLs. Therefore, we perform a full # update on dataplane ACLs, and only perform an incremental update on # control plane ACLs. new_rules = set(self.rules_info.keys()) new_dataplane_rules = set() new_controlplane_rules = set() current_rules = set(self.rules_db_info.keys()) current_dataplane_rules = set() current_controlplane_rules = set() for key in new_rules: table_name = key[0] if self.tables_db_info[table_name]['type'].upper( ) == self.ACL_TABLE_TYPE_CTRLPLANE: new_controlplane_rules.add(key) else: new_dataplane_rules.add(key) for key in current_rules: table_name = key[0] if self.tables_db_info[table_name]['type'].upper( ) == self.ACL_TABLE_TYPE_CTRLPLANE: current_controlplane_rules.add(key) else: current_dataplane_rules.add(key) # Remove all existing dataplane rules for key in current_dataplane_rules: self.configdb.mod_entry(self.ACL_RULE, key, None) # Program for per-asic namespace also if present for namespace_configdb in self.per_npu_configdb.values(): namespace_configdb.mod_entry(self.ACL_RULE, key, None) # Add all new dataplane rules for key in new_dataplane_rules: self.configdb.mod_entry(self.ACL_RULE, key, self.rules_info[key]) # Program for per-asic namespace corresponding to front asic also if present. for namespace_configdb in self.per_npu_configdb.values(): namespace_configdb.mod_entry(self.ACL_RULE, key, self.rules_info[key]) added_controlplane_rules = new_controlplane_rules.difference( current_controlplane_rules) removed_controlplane_rules = current_controlplane_rules.difference( new_controlplane_rules) existing_controlplane_rules = new_rules.intersection( current_controlplane_rules) for key in added_controlplane_rules: self.configdb.mod_entry(self.ACL_RULE, key, self.rules_info[key]) # Program for per-asic namespace corresponding to front asic also if present. # For control plane ACL it's not needed but to keep all db in sync program everywhere for namespace_configdb in self.per_npu_configdb.values(): namespace_configdb.mod_entry(self.ACL_RULE, key, self.rules_info[key]) for key in removed_controlplane_rules: self.configdb.mod_entry(self.ACL_RULE, key, None) # Program for per-asic namespace corresponding to front asic also if present. # For control plane ACL it's not needed but to keep all db in sync program everywhere for namespace_configdb in self.per_npu_configdb.values(): namespace_configdb.mod_entry(self.ACL_RULE, key, None) for key in existing_controlplane_rules: if cmp(self.rules_info[key], self.rules_db_info[key]) != 0: self.configdb.set_entry(self.ACL_RULE, key, self.rules_info[key]) # Program for per-asic namespace corresponding to front asic also if present. # For control plane ACL it's not needed but to keep all db in sync program everywhere for namespace_configdb in self.per_npu_configdb.values(): namespace_configdb.set_entry(self.ACL_RULE, key, self.rules_info[key]) def delete(self, table=None, rule=None): """ :param table: :param rule: :return: """ for key in self.rules_db_info: if not table or table == key[0]: if not rule or rule == key[1]: self.configdb.set_entry(self.ACL_RULE, key, None) # Program for per-asic namespace corresponding to front asic also if present. for namespace_configdb in self.per_npu_configdb.values(): namespace_configdb.set_entry(self.ACL_RULE, key, None) def show_table(self, table_name): """ Show ACL table configuration. :param table_name: Optional. ACL table name. Filter tables by specified name. :return: """ header = ("Name", "Type", "Binding", "Description", "Stage") data = [] for key, val in self.get_tables_db_info().items(): if table_name and key != table_name: continue stage = val.get("stage", Stage.INGRESS).lower() if val["type"] == AclLoader.ACL_TABLE_TYPE_CTRLPLANE: services = natsorted(val["services"]) data.append( [key, val["type"], services[0], val["policy_desc"], stage]) if len(services) > 1: for service in services[1:]: data.append(["", "", service, "", ""]) else: if not val["ports"]: data.append( [key, val["type"], "", val["policy_desc"], stage]) else: ports = natsorted(val["ports"]) data.append([ key, val["type"], ports[0], val["policy_desc"], stage ]) if len(ports) > 1: for port in ports[1:]: data.append(["", "", port, "", ""]) print( tabulate.tabulate(data, headers=header, tablefmt="simple", missingval="")) def show_session(self, session_name): """ Show mirror session configuration. :param session_name: Optional. Mirror session name. Filter sessions by specified name. :return: """ erspan_header = ("Name", "Status", "SRC IP", "DST IP", "GRE", "DSCP", "TTL", "Queue", "Policer", "Monitor Port", "SRC Port", "Direction") span_header = ("Name", "Status", "DST Port", "SRC Port", "Direction", "Queue", "Policer") erspan_data = [] span_data = [] for key, val in self.get_sessions_db_info().items(): if session_name and key != session_name: continue if val.get("type") == "SPAN": span_data.append([ key, val.get("status", ""), val.get("dst_port", ""), val.get("src_port", ""), val.get("direction", "").lower(), val.get("queue", ""), val.get("policer", "") ]) else: erspan_data.append([ key, val.get("status", ""), val.get("src_ip", ""), val.get("dst_ip", ""), val.get("gre_type", ""), val.get("dscp", ""), val.get("ttl", ""), val.get("queue", ""), val.get("policer", ""), val.get("monitor_port", ""), val.get("src_port", ""), val.get("direction", "").lower() ]) print("ERSPAN Sessions") print( tabulate.tabulate(erspan_data, headers=erspan_header, tablefmt="simple", missingval="")) print("\nSPAN Sessions") print( tabulate.tabulate(span_data, headers=span_header, tablefmt="simple", missingval="")) def show_policer(self, policer_name): """ Show policer configuration. :param policer_name: Optional. Policer name. Filter policers by specified name. :return: """ header = ("Name", "Type", "Mode", "CIR", "CBS") data = [] for key, val in self.get_policers_db_info().items(): if policer_name and key != policer_name: continue data.append([ key, val["meter_type"], val["mode"], val.get("cir", ""), val.get("cbs", "") ]) print( tabulate.tabulate(data, headers=header, tablefmt="simple", missingval="")) def show_rule(self, table_name, rule_id): """ Show ACL rules configuration. :param table_name: Optional. ACL table name. Filter rules by specified table name. :param rule_id: Optional. ACL rule name. Filter rule by specified rule name. :return: """ header = ("Table", "Rule", "Priority", "Action", "Match") def pop_priority(val): priority = "N/A" for key in dict(val): if (key.upper() == "PRIORITY"): priority = val.pop(key) return priority return priority def pop_action(val): action = "" for key in dict(val): key = key.upper() if key == AclAction.PACKET: action = val.pop(key) elif key == AclAction.REDIRECT: action = "REDIRECT: {}".format(val.pop(key)) elif key in (AclAction.MIRROR, AclAction.MIRROR_INGRESS): action = "MIRROR INGRESS: {}".format(val.pop(key)) elif key == AclAction.MIRROR_EGRESS: action = "MIRROR EGRESS: {}".format(val.pop(key)) else: continue return action def pop_matches(val): matches = list(sorted(["%s: %s" % (k, val[k]) for k in val])) if len(matches) == 0: matches.append("N/A") return matches raw_data = [] for (tname, rid), val in self.get_rules_db_info().items(): if table_name and table_name != tname: continue if rule_id and rule_id != rid: continue priority = pop_priority(val) action = pop_action(val) matches = pop_matches(val) rule_data = [[tname, rid, priority, action, matches[0]]] if len(matches) > 1: for m in matches[1:]: rule_data.append(["", "", "", "", m]) raw_data.append([priority, rule_data]) raw_data.sort(key=lambda x: x[0], reverse=True) data = [] for _, d in raw_data: data += d print( tabulate.tabulate(data, headers=header, tablefmt="simple", missingval=""))
def enable(): """Enable kdump operation""" config_db = ConfigDBConnector() if config_db is not None: config_db.connect() config_db.mod_entry("KDUMP", "config", {"enabled": "true"})