def get_tam_int_ifa_ts_flow_stats(args): api_response = {} api = cc.ApiClient() # connect to COUNTERS_DB counters_db = ConfigDBConnector() counters_db.db_connect('COUNTERS_DB') if len(args) == 1 and args[0] != "all": path = cc.Path('/restconf/data/sonic-tam-int-ifa-ts:sonic-tam-int-ifa-ts/TAM_INT_IFA_TS_FLOW_TABLE/TAM_INT_IFA_TS_FLOW_TABLE_LIST={name}', name=args[0]) else: path = cc.Path('/restconf/data/sonic-tam-int-ifa-ts:sonic-tam-int-ifa-ts/TAM_INT_IFA_TS_FLOW_TABLE') response = api.get(path) if response.ok(): if response.content: if len(args) == 1 and args[0] != "all": api_response = response.content['sonic-tam-int-ifa-ts:TAM_INT_IFA_TS_FLOW_TABLE_LIST'] else: api_response = response.content['sonic-tam-int-ifa-ts:TAM_INT_IFA_TS_FLOW_TABLE']['TAM_INT_IFA_TS_FLOW_TABLE_LIST'] for i in range(len(api_response)): api_response[i]['Packets'] = 0 api_response[i]['Bytes'] = 0 if "acl-table-name" not in api_response[i] and "acl-rule-name" not in api_response[i]: return acl_counter_key = 'COUNTERS:' + api_response[i]['acl-table-name'] + ':' + api_response[i]['acl-rule-name'] flow_stats = counters_db.get_all(counters_db.COUNTERS_DB, acl_counter_key) if flow_stats is not None: api_response[i]['Packets'] = flow_stats['Packets'] api_response[i]['Bytes'] = flow_stats['Bytes'] show_cli_output("show_tam_int_ifa_ts_flow_stats.j2", api_response)
def get_route_entries(): db = ConfigDBConnector() db.db_connect('ASIC_DB') print_message(MODE_DEBUG, "ASIC DB connected") keys = db.get_keys('ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY', False) print_message(MODE_DEBUG, json.dumps({"ASIC_ROUTE_ENTRY": keys}, indent=4)) rt = [] for k in keys: rt.append(k.split("\"", -1)[3]) return sorted(rt)
def get_routes(): db = ConfigDBConnector() db.db_connect('APPL_DB') print_message(syslog.LOG_DEBUG, "APPL DB connected for routes") keys = db.get_keys('ROUTE_TABLE') valid_rt = [] for k in keys: if not is_local(k): valid_rt.append(add_prefix_ifnot(k.lower())) print_message(syslog.LOG_DEBUG, json.dumps({"ROUTE_TABLE": sorted(valid_rt)}, indent=4)) return sorted(valid_rt)
def get_route_entries(): db = ConfigDBConnector() db.db_connect('ASIC_DB') print_message(syslog.LOG_DEBUG, "ASIC DB connected") keys = db.get_keys('ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY', False) rt = [] for k in keys: e = k.lower().split("\"", -1)[3] if not is_local(e): rt.append(e) print_message(syslog.LOG_DEBUG, json.dumps({"ASIC_ROUTE_ENTRY": sorted(rt)}, indent=4)) return sorted(rt)
def filter_out_local_interfaces(keys): rt = [] local_if = set(['eth0', 'lo', 'docker0']) db = ConfigDBConnector() db.db_connect('APPL_DB') for k in keys: e = db.get_entry('ROUTE_TABLE', k) if not e: # Prefix might have been added. So try w/o it. e = db.get_entry('ROUTE_TABLE', k.split("/")[0]) if not e or (e['ifname'] not in local_if): rt.append(k) return rt
def get_tam_int_ifa_ts_supported(args): api_response = {} # connect to APPL_DB app_db = ConfigDBConnector() app_db.db_connect('APPL_DB') key = 'SWITCH_TABLE:switch' data = app_db.get(app_db.APPL_DB, key, 'tam_int_ifa_ts_supported') if data and data == 'True': api_response['feature'] = data else: api_response['feature'] = 'False' show_cli_output("show_tam_ifa_ts_feature_supported.j2", api_response)
def get_routes(): db = ConfigDBConnector() db.db_connect('APPL_DB') print_message(MODE_DEBUG, "APPL DB connected for routes") keys = db.get_keys('ROUTE_TABLE') print_message(MODE_DEBUG, json.dumps({"ROUTE_TABLE": keys}, indent=4)) valid_rt = [] skip_rt = [] for k in keys: if db.get_entry('ROUTE_TABLE', k)['nexthop'] != '': valid_rt.append(add_prefix_ifnot(k)) else: skip_rt.append(k) print_message(MODE_INFO, json.dumps({"skipped_routes" : skip_rt}, indent=4)) return sorted(valid_rt)
def get_routes(): db = ConfigDBConnector() db.db_connect('APPL_DB') print_message(MODE_DEBUG, "APPL DB connected for routes") keys = db.get_keys('ROUTE_TABLE') print_message(MODE_DEBUG, json.dumps({"ROUTE_TABLE": keys}, indent=4)) valid_rt = [] skip_rt = [] for k in keys: if db.get_entry('ROUTE_TABLE', k)['nexthop'] != '': valid_rt.append(add_prefix_ifnot(k)) else: skip_rt.append(k) print_message(MODE_INFO, json.dumps({"skipped_routes": skip_rt}, indent=4)) return sorted(valid_rt)
def update_dhcp_mgmt_ip_info(): app_db = ConfigDBConnector() app_db.db_connect('APPL_DB', wait_for_init=False, retry_on=True) appdb_entry = {} appdb_entry["NULL"] = "NULL" op = sys.argv[2] plen = ipaddress.ip_network((0, sys.argv[4])).prefixlen key = sys.argv[1] + ":" + sys.argv[3] + "/" + str(plen) syslog.syslog( syslog.LOG_INFO, "update_dhcp_mgmt_ip_info : op - {}, key - {}".format(op, key)) if op == "add": app_db.set_entry(APP_MGMT_INTF_TABLE, key, appdb_entry) elif op == "del": app_db.delete_entry(APP_MGMT_INTF_TABLE, key) return
def get_interfaces(): db = ConfigDBConnector() db.db_connect('APPL_DB') print_message(MODE_DEBUG, "APPL DB connected for interfaces") intf = [] keys = db.get_keys('INTF_TABLE') print_message(MODE_DEBUG, json.dumps({"APPL_DB_INTF": keys}, indent=4)) for k in keys: subk = k.split(':', -1) alias = subk[0] ip_prefix = ":".join(subk[1:]) ip = add_prefix(ip_prefix.split("/", -1)[0]) if (subk[0] == "eth0") or (subk[0] == "docker0"): continue if (subk[0] != "lo"): intf.append(ip_subnet(ip_prefix)) intf.append(ip) return sorted(intf)
def get_interfaces(): db = ConfigDBConnector() db.db_connect('APPL_DB') print_message(syslog.LOG_DEBUG, "APPL DB connected for interfaces") intf = [] keys = db.get_keys('INTF_TABLE') for k in keys: lst = re.split(':', k.lower(), maxsplit=1) if len(lst) == 1: # No IP address in key; ignore continue ip = add_prefix(lst[1].split("/", -1)[0]) if not is_local(ip): intf.append(ip) print_message(syslog.LOG_DEBUG, json.dumps({"APPL_DB_INTF": sorted(intf)}, indent=4)) return sorted(intf)
class DBMigrator(): def __init__(self): """ Version string format: version_<major>_<minor>_<build> major: starting from 1, sequentially incrementing in master branch. minor: in github branches, minor version stays in 0. This minor version creates space for private branches derived from github public branches. These private branches shall use none-zero values. build: sequentially increase within a minor version domain. """ self.CURRENT_VERSION = 'version_1_0_1' self.TABLE_NAME = 'VERSIONS' self.TABLE_KEY = 'DATABASE' self.TABLE_FIELD = 'VERSION' self.configDB = ConfigDBConnector() self.configDB.db_connect('CONFIG_DB') def migrate_pfc_wd_table(self): # Migrate all data entries from table PFC_WD_TABLE to PFC_WD data = self.configDB.get_table('PFC_WD_TABLE') for key in data.keys(): self.configDB.set_entry('PFC_WD', key, data[key]) self.configDB.delete_table('PFC_WD_TABLE') def version_unknown(self): """ version_unknown tracks all SONiC versions that doesn't have a version string defined in config_DB. Nothing can be assumped when migrating from this version to the next version. Any migration operation needs to test if the DB is in expected format before migrating date to the next version. """ log_info('Handling version_unknown') # NOTE: Uncomment next 3 lines of code when the migration code is in # place. Note that returning specific string is intentional, # here we only intended to migrade to DB version 1.0.1. # If new DB version is added in the future, the incremental # upgrade will take care of the subsequent migrations. # self.migrate_pfc_wd_table() # self.set_version('version_1_0_1') # return 'version_1_0_1' def version_1_0_1(self): """ Current latest version. Nothing to do here. """ log_info('Handling version_1_0_1') return None def get_version(self): version = self.configDB.get_entry(self.TABLE_NAME, self.TABLE_KEY) if version and version[self.TABLE_FIELD]: return version[self.TABLE_FIELD] return 'version_unknown' def set_version(self, version=None): if not version: version = self.CURRENT_VERSION log_info('Setting version to ' + version) entry = { self.TABLE_FIELD : version } self.configDB.set_entry(self.TABLE_NAME, self.TABLE_KEY, entry) def migrate(self): version = self.get_version() log_info('Upgrading from version ' + version) while version: next_version = getattr(self, version)() if next_version == version: raise Exception('Version migrate from %s stuck in same version' % version) version = next_version
class DBMigrator(): def __init__(self, namespace, socket=None): """ Version string format: version_<major>_<minor>_<build> major: starting from 1, sequentially incrementing in master branch. minor: in github branches, minor version stays in 0. This minor version creates space for private branches derived from github public branches. These private branches shall use none-zero values. build: sequentially increase within a minor version domain. """ self.CURRENT_VERSION = 'version_2_0_0' self.TABLE_NAME = 'VERSIONS' self.TABLE_KEY = 'DATABASE' self.TABLE_FIELD = 'VERSION' db_kwargs = {} if socket: db_kwargs['unix_socket_path'] = socket if namespace is None: self.configDB = ConfigDBConnector(**db_kwargs) else: self.configDB = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace, **db_kwargs) self.configDB.db_connect('CONFIG_DB') self.appDB = SonicV2Connector(host='127.0.0.1') if self.appDB is not None: self.appDB.connect(self.appDB.APPL_DB) self.stateDB = SonicV2Connector(host='127.0.0.1') if self.stateDB is not None: self.stateDB.connect(self.stateDB.STATE_DB) version_info = device_info.get_sonic_version_info() asic_type = version_info.get('asic_type') self.asic_type = asic_type if asic_type == "mellanox": from mellanox_buffer_migrator import MellanoxBufferMigrator self.mellanox_buffer_migrator = MellanoxBufferMigrator( self.configDB) def migrate_pfc_wd_table(self): ''' Migrate all data entries from table PFC_WD_TABLE to PFC_WD ''' data = self.configDB.get_table('PFC_WD_TABLE') for key in data: self.configDB.set_entry('PFC_WD', key, data[key]) self.configDB.delete_table('PFC_WD_TABLE') def is_ip_prefix_in_key(self, key): ''' Function to check if IP address is present in the key. If it is present, then the key would be a tuple or else, it shall be be string ''' return (isinstance(key, tuple)) def migrate_interface_table(self): ''' Migrate all data from existing INTERFACE table with IP Prefix to have an additional ONE entry without IP Prefix. For. e.g, for an entry "Vlan1000|192.168.0.1/21": {}", this function shall add an entry without IP prefix as ""Vlan1000": {}". This is for VRF compatibility. ''' if_db = [] if_tables = { 'INTERFACE', 'PORTCHANNEL_INTERFACE', 'VLAN_INTERFACE', 'LOOPBACK_INTERFACE' } for table in if_tables: data = self.configDB.get_table(table) for key in data: if not self.is_ip_prefix_in_key(key): if_db.append(key) continue for table in if_tables: data = self.configDB.get_table(table) for key in data: if not self.is_ip_prefix_in_key(key) or key[0] in if_db: continue log.log_info('Migrating interface table for ' + key[0]) self.configDB.set_entry(table, key[0], data[key]) if_db.append(key[0]) def migrate_intf_table(self): ''' Migrate all data from existing INTF table in APP DB during warmboot with IP Prefix to have an additional ONE entry without IP Prefix. For. e.g, for an entry "Vlan1000:192.168.0.1/21": {}", this function shall add an entry without IP prefix as ""Vlan1000": {}". This also migrates 'lo' to 'Loopback0' interface ''' if self.appDB is None: return data = self.appDB.keys(self.appDB.APPL_DB, "INTF_TABLE:*") if data is None: return if_db = [] for key in data: if_name = key.split(":")[1] if if_name == "lo": self.appDB.delete(self.appDB.APPL_DB, key) key = key.replace(if_name, "Loopback0") log.log_info('Migrating lo entry to ' + key) self.appDB.set(self.appDB.APPL_DB, key, 'NULL', 'NULL') if '/' not in key: if_db.append(key.split(":")[1]) continue data = self.appDB.keys(self.appDB.APPL_DB, "INTF_TABLE:*") for key in data: if_name = key.split(":")[1] if if_name in if_db: continue log.log_info('Migrating intf table for ' + if_name) table = "INTF_TABLE:" + if_name self.appDB.set(self.appDB.APPL_DB, table, 'NULL', 'NULL') if_db.append(if_name) def migrate_copp_table(self): ''' Delete the existing COPP table ''' if self.appDB is None: return keys = self.appDB.keys(self.appDB.APPL_DB, "COPP_TABLE:*") if keys is None: return for copp_key in keys: self.appDB.delete(self.appDB.APPL_DB, copp_key) def migrate_config_db_buffer_tables_for_dynamic_calculation( self, speed_list, cable_len_list, default_dynamic_th, default_lossless_profiles, abandon_method, append_item_method): ''' Migrate buffer tables to dynamic calculation mode parameters @speed_list - list of speed supported @cable_len_list - list of cable length supported @default_dynamic_th - default dynamic th @default_lossless_profiles - default lossless profiles from the previous image @abandon_method - a function which is called to abandon the migration and keep the current configuration if the current one doesn't match the default one @append_item_method - a function which is called to append an item to the list of pending commit items any update to buffer configuration will be pended and won't be applied until all configuration is checked and aligns with the default one 1. Buffer profiles for lossless PGs in BUFFER_PROFILE table will be removed if their names have the convention of pg_lossless_<speed>_<cable_length>_profile where the speed and cable_length belongs speed_list and cable_len_list respectively and the dynamic_th is equal to default_dynamic_th 2. Insert tables required for dynamic buffer calculation - DEFAULT_LOSSLESS_BUFFER_PARAMETER|AZURE: {'default_dynamic_th': default_dynamic_th} - LOSSLESS_TRAFFIC_PATTERN|AZURE: {'mtu': '1500', 'small_packet_percentage': '100'} 3. For lossless dynamic PGs, remove the explicit referencing buffer profiles Before: BUFFER_PG|<port>|3-4: {'profile': 'BUFFER_PROFILE|pg_lossless_<speed>_<cable_length>_profile'} After: BUFFER_PG|<port>|3-4: {'profile': 'NULL'} ''' # Migrate BUFFER_PROFILEs, removing dynamically generated profiles dynamic_profile = self.configDB.get_table('BUFFER_PROFILE') profile_pattern = 'pg_lossless_([1-9][0-9]*000)_([1-9][0-9]*m)_profile' for name, info in dynamic_profile.items(): m = re.search(profile_pattern, name) if not m: continue speed = m.group(1) cable_length = m.group(2) if speed in speed_list and cable_length in cable_len_list: log.log_info("current profile {} {}".format(name, info)) log.log_info("default profile {} {}".format( name, default_lossless_profiles.get(name))) default_profile = default_lossless_profiles.get(name) if info.get("xon") == default_profile.get("xon") and info.get( "size") == default_profile.get("size") and info.get( 'dynamic_th') == default_dynamic_th: append_item_method(('BUFFER_PROFILE', name, None)) log.log_info( "Lossless profile {} has been removed".format(name)) else: log.log_notice( "Lossless profile {} doesn't match the default configuration, keep using traditional buffer calculation mode" ) abandon_method() return True # Migrate BUFFER_PGs, removing the explicit designated profiles buffer_pgs = self.configDB.get_table('BUFFER_PG') ports = self.configDB.get_table('PORT') all_cable_lengths = self.configDB.get_table('CABLE_LENGTH') if not buffer_pgs or not ports or not all_cable_lengths: log.log_notice( "At lease one of tables BUFFER_PG, PORT and CABLE_LENGTH hasn't been defined, skip following migration" ) abandon_method() return True cable_lengths = all_cable_lengths[list(all_cable_lengths.keys())[0]] for name, profile in buffer_pgs.items(): # do the db migration port, pg = name if pg != '3-4': continue try: profile_name = profile['profile'][1:-1].split('|')[1] m = re.search(profile_pattern, profile_name) except Exception: continue if not m: continue speed = m.group(1) cable_length = m.group(2) try: if speed == ports[port][ 'speed'] and cable_length == cable_lengths[port]: append_item_method(('BUFFER_PG', name, { 'profile': 'NULL' })) else: log.log_notice( "Lossless PG profile {} for port {} doesn't match its speed {} or cable length {}, keep using traditional buffer calculation mode" .format(profile_name, port, speed, cable_length)) abandon_method() return True except Exception: continue # Insert other tables required for dynamic buffer calculation metadata = self.configDB.get_entry('DEVICE_METADATA', 'localhost') metadata['buffer_model'] = 'dynamic' append_item_method(('DEVICE_METADATA', 'localhost', metadata)) append_item_method(('DEFAULT_LOSSLESS_BUFFER_PARAMETER', 'AZURE', { 'default_dynamic_th': default_dynamic_th })) append_item_method(('LOSSLESS_TRAFFIC_PATTERN', 'AZURE', { 'mtu': '1500', 'small_packet_percentage': '100' })) return True def prepare_dynamic_buffer_for_warm_reboot(self, buffer_pools=None, buffer_profiles=None, buffer_pgs=None): ''' This is the very first warm reboot of buffermgrd (dynamic) if the system reboot from old image by warm-reboot In this case steps need to be taken to get buffermgrd prepared (for warm reboot) During warm reboot, buffer tables should be installed in the first place. However, it isn't able to achieve that when system is warm-rebooted from an old image without dynamic buffer supported, because the buffer info wasn't in the APPL_DB in the old image. The solution is to copy that info from CONFIG_DB into APPL_DB in db_migrator. During warm-reboot, db_migrator adjusts buffer info in CONFIG_DB by removing some fields according to requirement from dynamic buffer calculation. The buffer info before that adjustment needs to be copied to APPL_DB. 1. set WARM_RESTART_TABLE|buffermgrd as {restore_count: 0} 2. Copy the following tables from CONFIG_DB into APPL_DB in case of warm reboot The separator in fields that reference objects in other table needs to be updated from '|' to ':' - BUFFER_POOL - BUFFER_PROFILE, separator updated for field 'pool' - BUFFER_PG, separator updated for field 'profile' - BUFFER_QUEUE, separator updated for field 'profile - BUFFER_PORT_INGRESS_PROFILE_LIST, separator updated for field 'profile_list' - BUFFER_PORT_EGRESS_PROFILE_LIST, separator updated for field 'profile_list' ''' warmreboot_state = self.stateDB.get( self.stateDB.STATE_DB, 'WARM_RESTART_ENABLE_TABLE|system', 'enable') mmu_size = self.stateDB.get(self.stateDB.STATE_DB, 'BUFFER_MAX_PARAM_TABLE|global', 'mmu_size') if warmreboot_state == 'true' and not mmu_size: log.log_notice( "This is the very first run of buffermgrd (dynamic), prepare info required from warm reboot" ) else: return True buffer_table_list = [ ('BUFFER_POOL', buffer_pools, None), ('BUFFER_PROFILE', buffer_profiles, 'pool'), ('BUFFER_PG', buffer_pgs, 'profile'), ('BUFFER_QUEUE', None, 'profile'), ('BUFFER_PORT_INGRESS_PROFILE_LIST', None, 'profile_list'), ('BUFFER_PORT_EGRESS_PROFILE_LIST', None, 'profile_list') ] for pair in buffer_table_list: keys_copied = [] keys_ignored = [] table_name, entries, reference_field_name = pair app_table_name = table_name + "_TABLE" if not entries: entries = self.configDB.get_table(table_name) for key, items in entries.items(): # copy items to appl db if reference_field_name: confdb_ref = items.get(reference_field_name) if not confdb_ref or confdb_ref == "NULL": keys_ignored.append(key) continue items_referenced = confdb_ref.split(',') appdb_ref = "" first_item = True for item in items_referenced: if first_item: first_item = False else: appdb_ref += ',' subitems = item.split('|') first_key = True for subitem in subitems: if first_key: appdb_ref += subitem + '_TABLE' first_key = False else: appdb_ref += ':' + subitem items[reference_field_name] = appdb_ref keys_copied.append(key) if type(key) is tuple: appl_db_key = app_table_name + ':' + ':'.join(key) else: appl_db_key = app_table_name + ':' + key for field, data in items.items(): self.appDB.set(self.appDB.APPL_DB, appl_db_key, field, data) if keys_copied: log.log_info( "The following items in table {} in CONFIG_DB have been copied to APPL_DB: {}" .format(table_name, keys_copied)) if keys_ignored: log.log_info( "The following items in table {} in CONFIG_DB have been ignored: {}" .format(table_name, keys_copied)) return True def version_unknown(self): """ version_unknown tracks all SONiC versions that doesn't have a version string defined in config_DB. Nothing can be assumped when migrating from this version to the next version. Any migration operation needs to test if the DB is in expected format before migrating date to the next version. """ log.log_info('Handling version_unknown') # NOTE: Uncomment next 3 lines of code when the migration code is in # place. Note that returning specific string is intentional, # here we only intended to migrade to DB version 1.0.1. # If new DB version is added in the future, the incremental # upgrade will take care of the subsequent migrations. self.migrate_pfc_wd_table() self.migrate_interface_table() self.migrate_intf_table() self.set_version('version_1_0_2') return 'version_1_0_2' def version_1_0_1(self): """ Version 1_0_1. """ log.log_info('Handling version_1_0_1') self.migrate_interface_table() self.migrate_intf_table() self.set_version('version_1_0_2') return 'version_1_0_2' def version_1_0_2(self): """ Version 1_0_2. """ log.log_info('Handling version_1_0_2') # Check ASIC type, if Mellanox platform then need DB migration if self.asic_type == "mellanox": if self.mellanox_buffer_migrator.mlnx_migrate_buffer_pool_size('version_1_0_2', 'version_1_0_3') \ and self.mellanox_buffer_migrator.mlnx_flush_new_buffer_configuration(): self.set_version('version_1_0_3') else: self.set_version('version_1_0_3') return 'version_1_0_3' def version_1_0_3(self): """ Version 1_0_3. """ log.log_info('Handling version_1_0_3') # Check ASIC type, if Mellanox platform then need DB migration if self.asic_type == "mellanox": if self.mellanox_buffer_migrator.mlnx_migrate_buffer_pool_size('version_1_0_3', 'version_1_0_4') \ and self.mellanox_buffer_migrator.mlnx_migrate_buffer_profile('version_1_0_3', 'version_1_0_4') \ and self.mellanox_buffer_migrator.mlnx_flush_new_buffer_configuration(): self.set_version('version_1_0_4') else: self.set_version('version_1_0_4') return 'version_1_0_4' def version_1_0_4(self): """ Current latest version. Nothing to do here. """ log.log_info('Handling version_1_0_4') # Check ASIC type, if Mellanox platform then need DB migration if self.asic_type == "mellanox": speed_list = self.mellanox_buffer_migrator.default_speed_list cable_len_list = self.mellanox_buffer_migrator.default_cable_len_list buffer_pools = self.configDB.get_table('BUFFER_POOL') buffer_profiles = self.configDB.get_table('BUFFER_PROFILE') buffer_pgs = self.configDB.get_table('BUFFER_PG') default_lossless_profiles = self.mellanox_buffer_migrator.mlnx_get_default_lossless_profile( 'version_1_0_4') abandon_method = self.mellanox_buffer_migrator.mlnx_abandon_pending_buffer_configuration append_method = self.mellanox_buffer_migrator.mlnx_append_item_on_pending_configuration_list if self.mellanox_buffer_migrator.mlnx_migrate_buffer_pool_size('version_1_0_4', 'version_2_0_0') \ and self.mellanox_buffer_migrator.mlnx_migrate_buffer_profile('version_1_0_4', 'version_2_0_0') \ and self.migrate_config_db_buffer_tables_for_dynamic_calculation(speed_list, cable_len_list, '0', default_lossless_profiles, abandon_method, append_method) \ and self.mellanox_buffer_migrator.mlnx_flush_new_buffer_configuration() \ and self.prepare_dynamic_buffer_for_warm_reboot(buffer_pools, buffer_profiles, buffer_pgs): metadata = self.configDB.get_entry('DEVICE_METADATA', 'localhost') if not metadata.get('buffer_model'): metadata['buffer_model'] = 'traditional' self.configDB.set_entry('DEVICE_METADATA', 'localhost', metadata) log.log_notice('Setting buffer_model to traditional') else: log.log_notice('Got buffer_model {}'.format( metadata.get('buffer_model'))) self.set_version('version_2_0_0') else: self.prepare_dynamic_buffer_for_warm_reboot() metadata = self.configDB.get_entry('DEVICE_METADATA', 'localhost') metadata['buffer_model'] = 'traditional' self.configDB.set_entry('DEVICE_METADATA', 'localhost', metadata) log.log_notice('Setting buffer_model to traditional') self.set_version('version_2_0_0') return 'version_2_0_0' def version_2_0_0(self): """ Current latest version. Nothing to do here. """ log.log_info('Handling version_2_0_0') return None def get_version(self): version = self.configDB.get_entry(self.TABLE_NAME, self.TABLE_KEY) if version and version[self.TABLE_FIELD]: return version[self.TABLE_FIELD] return 'version_unknown' def set_version(self, version=None): if not version: version = self.CURRENT_VERSION log.log_info('Setting version to ' + version) entry = {self.TABLE_FIELD: version} self.configDB.set_entry(self.TABLE_NAME, self.TABLE_KEY, entry) def common_migration_ops(self): try: with open(INIT_CFG_FILE) as f: init_db = json.load(f) except Exception as e: raise Exception(str(e)) for init_cfg_table, table_val in init_db.items(): data = self.configDB.get_table(init_cfg_table) if data: # Ignore overriding the values that pre-exist in configDB continue log.log_info( "Migrating table {} from INIT_CFG to config_db".format( init_cfg_table)) # Update all tables that do not exist in configDB but are present in INIT_CFG for init_table_key, init_table_val in table_val.items(): self.configDB.set_entry(init_cfg_table, init_table_key, init_table_val) self.migrate_copp_table() def migrate(self): version = self.get_version() log.log_info('Upgrading from version ' + version) while version: next_version = getattr(self, version)() if next_version == version: raise Exception( 'Version migrate from %s stuck in same version' % version) version = next_version # Perform common migration ops self.common_migration_ops()
class Ts(object): def __init__(self): # connect CONFIG DB self.config_db = ConfigDBConnector() self.config_db.connect() # connect COUNTER DB self.counters_db = ConfigDBConnector() self.counters_db.db_connect('COUNTERS_DB') # connect APPL DB self.app_db = ConfigDBConnector() self.app_db.db_connect('APPL_DB') def config_enable(self, args): """ Enable ifa """ key = 'feature' self.config_db.set_entry(TAM_INT_IFA_TS_FEATURE_TABLE_PREFIX, key, {'enable': "true"}) print "Enabled IFA" return def config_disable(self, args): """ Disable ifa """ key = 'feature' self.config_db.set_entry(TAM_INT_IFA_TS_FEATURE_TABLE_PREFIX, key, {'enable': "false"}) print "Disabled IFA" return def config_flow(self, args): key = TAM_INT_IFA_FLOW_TS_TABLE_PREFIX + '|' + args.flowname entry = self.config_db.get_all(self.config_db.CONFIG_DB, key) if entry is None: if args.acl_table_name: self.config_db.mod_entry( TAM_INT_IFA_FLOW_TS_TABLE_PREFIX, args.flowname, {'acl-table-name': args.acl_table_name}) if args.acl_rule_name: self.config_db.mod_entry(TAM_INT_IFA_FLOW_TS_TABLE_PREFIX, args.flowname, {'acl-rule-name': args.acl_rule_name}) else: print "Entry Already Exists" return False return def clear_each_flow(self, flowname): entry = self.config_db.get_entry(TAM_INT_IFA_FLOW_TS_TABLE_PREFIX, flowname) if entry: self.config_db.set_entry(TAM_INT_IFA_FLOW_TS_TABLE_PREFIX, flowname, None) else: print "Entry Not Found" return False return def clear_flow(self, args): key = args.flowname if key == "all": # Get all the flow keys table_data = self.config_db.get_keys( TAM_INT_IFA_FLOW_TS_TABLE_PREFIX) if not table_data: return True # Clear each flow key for key in table_data: self.clear_each_flow(key) else: # Clear the specified flow entry self.clear_each_flow(key) return def show_flow(self, args): self.get_print_all_ifa_flows(args.flowname) return def show_status(self): # Get data for all keys flowtable_keys = self.config_db.get_keys( TAM_INT_IFA_FLOW_TS_TABLE_PREFIX) api_response = {} key = TAM_INT_IFA_TS_FEATURE_TABLE_PREFIX + '|' + 'feature' raw_data_feature = self.config_db.get_all(self.config_db.CONFIG_DB, key) api_response['ietf-ts:feature-data'] = raw_data_feature api_inner_response = {} api_inner_response["num-of-flows"] = len(flowtable_keys) api_response['ietf-ts:num-of-flows'] = api_inner_response key = TAM_DEVICE_TABLE_PREFIX + '|' + 'device' raw_data_device = self.config_db.get_all(self.config_db.CONFIG_DB, key) api_response['ietf-ts:device-data'] = raw_data_device show_cli_output("show_status.j2", api_response) return def get_ifa_flow_stat(self, flowname): api_response_stat = {} api_response, entryfound = self.get_ifa_flow_info(flowname) api_response_stat['flow-name'] = flowname if entryfound is not None: for k in api_response: if k == "ietf-ts:each-flow-data": acl_rule_name = api_response['ietf-ts:each-flow-data'][ 'acl-rule-name'] acl_table_name = api_response['ietf-ts:each-flow-data'][ 'acl-table-name'] api_response_stat['rule-name'] = acl_rule_name api_response_stat['table-name'] = acl_table_name acl_rule_keys = self.config_db.get_keys(ACL_RULE_TABLE_PREFIX) for acl_rule_key in acl_rule_keys: if acl_rule_key[1] == acl_rule_name: acl_counter_key = 'COUNTERS:' + acl_rule_key[ 0] + ':' + acl_rule_key[1] raw_ifa_stats = self.counters_db.get_all( self.counters_db.COUNTERS_DB, acl_counter_key) api_response_stat['ietf-ts:ifa-stats'] = raw_ifa_stats return api_response_stat, entryfound def get_print_all_ifa_stats(self, name): stat_dict = {} stat_list = [] if name != 'all': api_response, entryfound = self.get_ifa_flow_stat(name) if entryfound is not None: stat_list.append(api_response) else: table_data = self.config_db.get_keys( TAM_INT_IFA_FLOW_TS_TABLE_PREFIX) # Get data for all keys for k in table_data: api_each_stat_response, entryfound = self.get_ifa_flow_stat(k) if entryfound is not None: stat_list.append(api_each_stat_response) stat_dict['stat-list'] = stat_list show_cli_output("show_statistics_flow.j2", stat_dict) return def show_statistics(self, args): self.get_print_all_ifa_stats(args.flowname) return def get_ifa_flow_info(self, k): flow_data = {} flow_data['acl-table-name'] = '' flow_data['sampling-rate'] = '' flow_data['collector'] = '' api_response = {} key = TAM_INT_IFA_FLOW_TS_TABLE_PREFIX + '|' + k raw_flow_data = self.config_db.get_all(self.config_db.CONFIG_DB, key) api_response['ietf-ts:flow-key'] = k api_response['ietf-ts:each-flow-data'] = raw_flow_data return api_response, raw_flow_data def get_print_all_ifa_flows(self, name): flow_dict = {} flow_list = [] if name != 'all': api_response, entryfound = self.get_ifa_flow_info(name) if entryfound is not None: flow_list.append(api_response) else: table_data = self.config_db.get_keys( TAM_INT_IFA_FLOW_TS_TABLE_PREFIX) # Get data for all keys for k in table_data: api_each_flow_response, entryfound = self.get_ifa_flow_info(k) if entryfound is not None: flow_list.append(api_each_flow_response) flow_dict['flow-list'] = flow_list show_cli_output("show_flow.j2", flow_dict) return def get_ifa_supported_info(self): key = 'TAM_INT_IFA_TS_FEATURE_TABLE|feature' data = self.config_db.get_all(self.config_db.CONFIG_DB, key) if data is None: return if data['enable'] == "true": print "TAM INT IFA TS Supported - True" return True elif data['enable'] == "false": print "TAM INT IFA TS Supported - False " return False return def get_ifa_enabled_info(self): print "In get_ifa_enabled_info" key = 'SWITCH_TABLE:switch' data = self.app_db.get(self.app_db.APPL_DB, key, 'ifa_enabled') if data and data == 'True': return True return True
class DBMigrator(): def __init__(self, namespace, socket=None): """ Version string format: version_<major>_<minor>_<build> major: starting from 1, sequentially incrementing in master branch. minor: in github branches, minor version stays in 0. This minor version creates space for private branches derived from github public branches. These private branches shall use none-zero values. build: sequentially increase within a minor version domain. """ self.CURRENT_VERSION = 'version_1_0_4' self.TABLE_NAME = 'VERSIONS' self.TABLE_KEY = 'DATABASE' self.TABLE_FIELD = 'VERSION' db_kwargs = {} if socket: db_kwargs['unix_socket_path'] = socket if namespace is None: self.configDB = ConfigDBConnector(**db_kwargs) else: self.configDB = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace, **db_kwargs) self.configDB.db_connect('CONFIG_DB') self.appDB = SonicV2Connector(host='127.0.0.1') if self.appDB is not None: self.appDB.connect(self.appDB.APPL_DB) version_info = device_info.get_sonic_version_info() asic_type = version_info.get('asic_type') self.asic_type = asic_type if asic_type == "mellanox": from mellanox_buffer_migrator import MellanoxBufferMigrator self.mellanox_buffer_migrator = MellanoxBufferMigrator(self.configDB) def migrate_pfc_wd_table(self): ''' Migrate all data entries from table PFC_WD_TABLE to PFC_WD ''' data = self.configDB.get_table('PFC_WD_TABLE') for key in data.keys(): self.configDB.set_entry('PFC_WD', key, data[key]) self.configDB.delete_table('PFC_WD_TABLE') def is_ip_prefix_in_key(self, key): ''' Function to check if IP address is present in the key. If it is present, then the key would be a tuple or else, it shall be be string ''' return (isinstance(key, tuple)) def migrate_interface_table(self): ''' Migrate all data from existing INTERFACE table with IP Prefix to have an additional ONE entry without IP Prefix. For. e.g, for an entry "Vlan1000|192.168.0.1/21": {}", this function shall add an entry without IP prefix as ""Vlan1000": {}". This is for VRF compatibility. ''' if_db = [] if_tables = { 'INTERFACE', 'PORTCHANNEL_INTERFACE', 'VLAN_INTERFACE', 'LOOPBACK_INTERFACE' } for table in if_tables: data = self.configDB.get_table(table) for key in data.keys(): if not self.is_ip_prefix_in_key(key): if_db.append(key) continue for table in if_tables: data = self.configDB.get_table(table) for key in data.keys(): if not self.is_ip_prefix_in_key(key) or key[0] in if_db: continue log.log_info('Migrating interface table for ' + key[0]) self.configDB.set_entry(table, key[0], data[key]) if_db.append(key[0]) def migrate_intf_table(self): ''' Migrate all data from existing INTF table in APP DB during warmboot with IP Prefix to have an additional ONE entry without IP Prefix. For. e.g, for an entry "Vlan1000:192.168.0.1/21": {}", this function shall add an entry without IP prefix as ""Vlan1000": {}". This also migrates 'lo' to 'Loopback0' interface ''' if self.appDB is None: return data = self.appDB.keys(self.appDB.APPL_DB, "INTF_TABLE:*") if data is None: return if_db = [] for key in data: if_name = key.split(":")[1] if if_name == "lo": self.appDB.delete(self.appDB.APPL_DB, key) key = key.replace(if_name, "Loopback0") log.log_info('Migrating lo entry to ' + key) self.appDB.set(self.appDB.APPL_DB, key, 'NULL', 'NULL') if '/' not in key: if_db.append(key.split(":")[1]) continue data = self.appDB.keys(self.appDB.APPL_DB, "INTF_TABLE:*") for key in data: if_name = key.split(":")[1] if if_name in if_db: continue log.log_info('Migrating intf table for ' + if_name) table = "INTF_TABLE:" + if_name self.appDB.set(self.appDB.APPL_DB, table, 'NULL', 'NULL') if_db.append(if_name) def version_unknown(self): """ version_unknown tracks all SONiC versions that doesn't have a version string defined in config_DB. Nothing can be assumped when migrating from this version to the next version. Any migration operation needs to test if the DB is in expected format before migrating date to the next version. """ log.log_info('Handling version_unknown') # NOTE: Uncomment next 3 lines of code when the migration code is in # place. Note that returning specific string is intentional, # here we only intended to migrade to DB version 1.0.1. # If new DB version is added in the future, the incremental # upgrade will take care of the subsequent migrations. self.migrate_pfc_wd_table() self.migrate_interface_table() self.migrate_intf_table() self.set_version('version_1_0_2') return 'version_1_0_2' def version_1_0_1(self): """ Version 1_0_1. """ log.log_info('Handling version_1_0_1') self.migrate_interface_table() self.migrate_intf_table() self.set_version('version_1_0_2') return 'version_1_0_2' def version_1_0_2(self): """ Version 1_0_2. """ log.log_info('Handling version_1_0_2') # Check ASIC type, if Mellanox platform then need DB migration if self.asic_type == "mellanox": if self.mellanox_buffer_migrator.mlnx_migrate_buffer_pool_size('version_1_0_2', 'version_1_0_3'): self.set_version('version_1_0_3') else: self.set_version('version_1_0_3') return 'version_1_0_3' def version_1_0_3(self): """ Version 1_0_3. """ log.log_info('Handling version_1_0_3') # Check ASIC type, if Mellanox platform then need DB migration if self.asic_type == "mellanox": if self.mellanox_buffer_migrator.mlnx_migrate_buffer_pool_size('version_1_0_3', 'version_1_0_4') and self.mellanox_buffer_migrator.mlnx_migrate_buffer_profile('version_1_0_3', 'version_1_0_4'): self.set_version('version_1_0_4') else: self.set_version('version_1_0_4') return 'version_1_0_4' def version_1_0_4(self): """ Current latest version. Nothing to do here. """ log.log_info('Handling version_1_0_4') return None def get_version(self): version = self.configDB.get_entry(self.TABLE_NAME, self.TABLE_KEY) if version and version[self.TABLE_FIELD]: return version[self.TABLE_FIELD] return 'version_unknown' def set_version(self, version=None): if not version: version = self.CURRENT_VERSION log.log_info('Setting version to ' + version) entry = { self.TABLE_FIELD : version } self.configDB.set_entry(self.TABLE_NAME, self.TABLE_KEY, entry) def common_migration_ops(self): try: with open(INIT_CFG_FILE) as f: init_db = json.load(f) except Exception as e: raise Exception(str(e)) for init_cfg_table, table_val in init_db.items(): data = self.configDB.get_table(init_cfg_table) if data: # Ignore overriding the values that pre-exist in configDB continue log.log_info("Migrating table {} from INIT_CFG to config_db".format(init_cfg_table)) # Update all tables that do not exist in configDB but are present in INIT_CFG for init_table_key, init_table_val in table_val.items(): self.configDB.set_entry(init_cfg_table, init_table_key, init_table_val) def migrate(self): version = self.get_version() log.log_info('Upgrading from version ' + version) while version: next_version = getattr(self, version)() if next_version == version: raise Exception('Version migrate from %s stuck in same version' % version) version = next_version # Perform common migration ops self.common_migration_ops()
#!/usr/bin/python from sonic_py_common import device_info, logger from swsssdk import ConfigDBConnector import time configdb = ConfigDBConnector(**{}) configdb.db_connect('CONFIG_DB') def copy_profile_with_pool_replaced(profile, new_name, new_pool): profile['pool'] = '[BUFFER_POOL|{}]'.format(new_pool) configdb.set_entry('BUFFER_PROFILE', new_name, profile) def copy_profile_list_with_profile_replaced(table, pl, port, profile_list): pl['profile_list'] = profile_list configdb.set_entry(table, port, pl) # step 1: Create a new buffer pools for lossy and lossless: ingress_lossless_pool_new. # It can be copied from ingress_lossless_pool with size updated properly. ingress_pool = {'type': 'ingress', 'mode': 'dynamic', 'size': '7719936'} egress_lossy_pool = {'type': 'egress', 'mode': 'dynamic', 'size': '7719936'} configdb.set_entry('BUFFER_POOL', 'ingress_pool', ingress_pool) configdb.set_entry('BUFFER_POOL', 'egress_lossy_pool_new', egress_lossy_pool) # step 2: Create the following new buffer profiles based on the new ingress pool profiles = configdb.get_table('BUFFER_PROFILE') for name, profile in profiles.iteritems(): if name[0:12] == 'pg_lossless_' or name[0:12] == 'ingress_loss':
class DBMigrator(): def __init__(self, socket=None): """ Version string format: version_<major>_<minor>_<build> major: starting from 1, sequentially incrementing in master branch. minor: in github branches, minor version stays in 0. This minor version creates space for private branches derived from github public branches. These private branches shall use none-zero values. build: sequentially increase within a minor version domain. """ self.CURRENT_VERSION = 'version_1_0_1' self.TABLE_NAME = 'VERSIONS' self.TABLE_KEY = 'DATABASE' self.TABLE_FIELD = 'VERSION' db_kwargs = {} if socket: db_kwargs['unix_socket_path'] = socket self.configDB = ConfigDBConnector(**db_kwargs) self.configDB.db_connect('CONFIG_DB') def migrate_pfc_wd_table(self): ''' Migrate all data entries from table PFC_WD_TABLE to PFC_WD ''' data = self.configDB.get_table('PFC_WD_TABLE') for key in data.keys(): self.configDB.set_entry('PFC_WD', key, data[key]) self.configDB.delete_table('PFC_WD_TABLE') def is_ip_prefix_in_key(self, key): ''' Function to check if IP address is present in the key. If it is present, then the key would be a tuple or else, it shall be be string ''' return (isinstance(key, tuple)) def migrate_interface_table(self): ''' Migrate all data from existing INTERFACE table with IP Prefix to have an additional ONE entry without IP Prefix. For. e.g, for an entry "Vlan1000|192.168.0.1/21": {}", this function shall add an entry without IP prefix as ""Vlan1000": {}". This is for VRF compatibility. ''' if_db = [] if_tables = {'INTERFACE', 'PORTCHANNEL_INTERFACE', 'VLAN_INTERFACE'} for table in if_tables: data = self.configDB.get_table(table) for key in data.keys(): if not self.is_ip_prefix_in_key(key): if_db.append(key) continue for table in if_tables: data = self.configDB.get_table(table) for key in data.keys(): if not self.is_ip_prefix_in_key(key) or key[0] in if_db: continue log_info('Migrating interface table for ' + key[0]) self.configDB.set_entry(table, key[0], data[key]) if_db.append(key[0]) def version_unknown(self): """ version_unknown tracks all SONiC versions that doesn't have a version string defined in config_DB. Nothing can be assumped when migrating from this version to the next version. Any migration operation needs to test if the DB is in expected format before migrating date to the next version. """ log_info('Handling version_unknown') # NOTE: Uncomment next 3 lines of code when the migration code is in # place. Note that returning specific string is intentional, # here we only intended to migrade to DB version 1.0.1. # If new DB version is added in the future, the incremental # upgrade will take care of the subsequent migrations. self.migrate_pfc_wd_table() self.migrate_interface_table() self.set_version('version_1_0_1') return 'version_1_0_1' def version_1_0_1(self): """ Current latest version. Nothing to do here. """ log_info('Handling version_1_0_1') return None def get_version(self): version = self.configDB.get_entry(self.TABLE_NAME, self.TABLE_KEY) if version and version[self.TABLE_FIELD]: return version[self.TABLE_FIELD] return 'version_unknown' def set_version(self, version=None): if not version: version = self.CURRENT_VERSION log_info('Setting version to ' + version) entry = {self.TABLE_FIELD: version} self.configDB.set_entry(self.TABLE_NAME, self.TABLE_KEY, entry) def migrate(self): version = self.get_version() log_info('Upgrading from version ' + version) while version: next_version = getattr(self, version)() if next_version == version: raise Exception( 'Version migrate from %s stuck in same version' % version) version = next_version
class DropMon(object): def __init__(self): # connect CONFIG DB self.config_db = ConfigDBConnector() self.config_db.connect() # connect COUNTERS_DB self.counters_db = ConfigDBConnector() self.counters_db.db_connect('COUNTERS_DB') # connect APPL DB self.app_db = ConfigDBConnector() self.app_db.db_connect('APPL_DB') def config_drop_mon(self, args): self.config_db.mod_entry( TAM_DROP_MONITOR_FLOW_TABLE, args.flowname, { 'acl-table': args.acl_table, 'acl-rule': args.acl_rule, 'collector': args.dropcollector, 'sample': args.dropsample }) return def config_drop_mon_aging(self, args): self.config_db.mod_entry(TAM_DROP_MONITOR_AGING_INTERVAL_TABLE, "aging", {'aging-interval': args.aginginterval}) return def config_drop_mon_sample(self, args): self.config_db.mod_entry(SAMPLE_RATE_TABLE, args.samplename, {'sampling-rate': args.rate}) return def clear_single_drop_mon_flow(self, key): entry = self.config_db.get_entry(TAM_DROP_MONITOR_FLOW_TABLE, key) if entry: self.config_db.set_entry(TAM_DROP_MONITOR_FLOW_TABLE, key, None) else: return False return def clear_drop_mon_flow(self, args): key = args.flowname if key == "all": # Get all the flow keys table_data = self.config_db.get_keys(TAM_DROP_MONITOR_FLOW_TABLE) if not table_data: return True # Clear each flow key for key in table_data: self.clear_single_drop_mon_flow(key) else: # Clear the specified flow entry self.clear_single_drop_mon_flow(key) return def clear_drop_mon_sample(self, args): key = args.samplename entry = self.config_db.get_entry(SAMPLE_RATE_TABLE, key) if entry: self.config_db.set_entry(SAMPLE_RATE_TABLE, key, None) else: print "Entry Not Found" return False return def clear_drop_mon_aging_int(self, args): key = "aging" entry = self.config_db.get_entry(TAM_DROP_MONITOR_AGING_INTERVAL_TABLE, key) if entry: self.config_db.set_entry(TAM_DROP_MONITOR_AGING_INTERVAL_TABLE, key, None) else: return False return def show_flow(self, args): self.get_print_all_dropmon_flows(args.flowname) return def get_dropmon_flow_stat(self, flowname): api_response_stat = {} api_response, entryfound = self.get_dropmon_flow_info(flowname) api_response_stat['flow-name'] = flowname if entryfound is not None: for k in api_response: if k == "ietf-ts:each-flow-data": acl_rule = api_response['ietf-ts:each-flow-data'][ 'acl-rule'] acl_table = api_response['ietf-ts:each-flow-data'][ 'acl-table'] api_response_stat['rule-name'] = acl_rule api_response_stat['table-name'] = acl_table acl_rule_keys = self.config_db.get_keys(ACL_RULE_TABLE_PREFIX) for acl_rule_key in acl_rule_keys: if acl_rule_key[1] == acl_rule: acl_counter_key = 'COUNTERS:' + acl_rule_key[ 0] + ':' + acl_rule_key[1] raw_dropmon_stats = self.counters_db.get_all( self.counters_db.COUNTERS_DB, acl_counter_key) api_response_stat['ietf-ts:dropmon-stats'] = raw_ifa_stats return api_response_stat, entryfound def get_print_all_dropmon_stats(self, name): stat_dict = {} stat_list = [] if name != 'all': api_response, entryfound = self.get_dropmon_flow_stat(name) if entryfound is not None: stat_list.append(api_response) else: table_data = self.config_db.get_keys(TAM_DROP_MONITOR_FLOW_TABLE) # Get data for all keys for k in table_data: api_each_stat_response, entryfound = self.get_dropmon_flow_stat( k) if entryfound is not None: stat_list.append(api_each_stat_response) stat_dict['stat-list'] = stat_list show_cli_output("show_statistics_flow.j2", stat_dict) return def show_statistics(self, args): self.get_print_all_dropmon_stats(args.flowname) return def show_aging_interval(self, args): key = "aging" entry = self.config_db.get_entry(TAM_DROP_MONITOR_AGING_INTERVAL_TABLE, key) if entry: print "Aging interval : {}".format(entry['aging-interval']) return def show_sample(self, args): self.get_print_all_sample(args.samplename) return def get_dropmon_flow_info(self, k): flow_data = {} flow_data['acl-table-name'] = '' flow_data['sampling-rate'] = '' flow_data['collector'] = '' api_response = {} key = TAM_DROP_MONITOR_FLOW_TABLE + '|' + k raw_flow_data = self.config_db.get_all(self.config_db.CONFIG_DB, key) if raw_flow_data: sample = raw_flow_data['sample'] rate = self.config_db.get_entry(SAMPLE_RATE_TABLE, sample) raw_flow_data['sample'] = rate['sampling-rate'] api_response['ietf-ts:flow-key'] = k api_response['ietf-ts:each-flow-data'] = raw_flow_data return api_response, raw_flow_data def get_print_all_dropmon_flows(self, name): flow_dict = {} flow_list = [] if name != 'all': api_response, entryfound = self.get_dropmon_flow_info(name) if entryfound is not None: flow_list.append(api_response) else: table_data = self.config_db.get_keys(TAM_DROP_MONITOR_FLOW_TABLE) # Get data for all keys for k in table_data: api_each_flow_response, entryfound = self.get_dropmon_flow_info( k) if entryfound is not None: flow_list.append(api_each_flow_response) flow_dict['flow-list'] = flow_list show_cli_output("show_drop_monitor_flow.j2", flow_dict) return def get_sample_info(self, k): sample_data = {} sample_data['sampling-rate'] = '' api_response = {} key = SAMPLE_RATE_TABLE + '|' + k raw_sample_data = self.config_db.get_all(self.config_db.CONFIG_DB, key) api_response['ietf-ts:sample-key'] = k api_response['ietf-ts:each-sample-data'] = raw_sample_data return api_response, raw_sample_data def get_print_all_sample(self, name): sample_dict = {} sample_list = [] if name != 'all': api_response, entryfound = self.get_sample_info(name) if entryfound is not None: sample_list.append(api_response) else: table_data = self.config_db.get_keys(SAMPLE_RATE_TABLE) # Get data for all keys for k in table_data: api_each_flow_response, entryfound = self.get_sample_info(k) if entryfound is not None: sample_list.append(api_each_flow_response) sample_dict['sample-list'] = sample_list show_cli_output("show_sample.j2", sample_dict) return
class DBMigrator(): def __init__(self, namespace, socket=None): """ Version string format: version_<major>_<minor>_<build> major: starting from 1, sequentially incrementing in master branch. minor: in github branches, minor version stays in 0. This minor version creates space for private branches derived from github public branches. These private branches shall use none-zero values. build: sequentially increase within a minor version domain. """ self.CURRENT_VERSION = 'version_1_0_3' self.TABLE_NAME = 'VERSIONS' self.TABLE_KEY = 'DATABASE' self.TABLE_FIELD = 'VERSION' db_kwargs = {} if socket: db_kwargs['unix_socket_path'] = socket if namespace is None: self.configDB = ConfigDBConnector(**db_kwargs) else: self.configDB = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace, **db_kwargs) self.configDB.db_connect('CONFIG_DB') self.appDB = SonicV2Connector(host='127.0.0.1') if self.appDB is not None: self.appDB.connect(self.appDB.APPL_DB) def migrate_pfc_wd_table(self): ''' Migrate all data entries from table PFC_WD_TABLE to PFC_WD ''' data = self.configDB.get_table('PFC_WD_TABLE') for key in data.keys(): self.configDB.set_entry('PFC_WD', key, data[key]) self.configDB.delete_table('PFC_WD_TABLE') def is_ip_prefix_in_key(self, key): ''' Function to check if IP address is present in the key. If it is present, then the key would be a tuple or else, it shall be be string ''' return (isinstance(key, tuple)) def migrate_interface_table(self): ''' Migrate all data from existing INTERFACE table with IP Prefix to have an additional ONE entry without IP Prefix. For. e.g, for an entry "Vlan1000|192.168.0.1/21": {}", this function shall add an entry without IP prefix as ""Vlan1000": {}". This is for VRF compatibility. ''' if_db = [] if_tables = { 'INTERFACE', 'PORTCHANNEL_INTERFACE', 'VLAN_INTERFACE', 'LOOPBACK_INTERFACE' } for table in if_tables: data = self.configDB.get_table(table) for key in data.keys(): if not self.is_ip_prefix_in_key(key): if_db.append(key) continue for table in if_tables: data = self.configDB.get_table(table) for key in data.keys(): if not self.is_ip_prefix_in_key(key) or key[0] in if_db: continue log_info('Migrating interface table for ' + key[0]) self.configDB.set_entry(table, key[0], data[key]) if_db.append(key[0]) def migrate_intf_table(self): ''' Migrate all data from existing INTF table in APP DB during warmboot with IP Prefix to have an additional ONE entry without IP Prefix. For. e.g, for an entry "Vlan1000:192.168.0.1/21": {}", this function shall add an entry without IP prefix as ""Vlan1000": {}". This also migrates 'lo' to 'Loopback0' interface ''' if self.appDB is None: return data = self.appDB.keys(self.appDB.APPL_DB, "INTF_TABLE:*") if data is None: return if_db = [] for key in data: if_name = key.split(":")[1] if if_name == "lo": self.appDB.delete(self.appDB.APPL_DB, key) key = key.replace(if_name, "Loopback0") log_info('Migrating lo entry to ' + key) self.appDB.set(self.appDB.APPL_DB, key, 'NULL', 'NULL') if '/' not in key: if_db.append(key.split(":")[1]) continue data = self.appDB.keys(self.appDB.APPL_DB, "INTF_TABLE:*") for key in data: if_name = key.split(":")[1] if if_name in if_db: continue log_info('Migrating intf table for ' + if_name) table = "INTF_TABLE:" + if_name self.appDB.set(self.appDB.APPL_DB, table, 'NULL', 'NULL') if_db.append(if_name) def mlnx_migrate_buffer_pool_size(self): """ On Mellanox platform the buffer pool size changed since version with new SDK 4.3.3052, SONiC to SONiC update from version with old SDK will be broken without migration. This migration is specifically for Mellanox platform. """ # Buffer pools defined in version 1_0_2 buffer_pools = ['ingress_lossless_pool', 'egress_lossless_pool', 'ingress_lossy_pool', 'egress_lossy_pool'] # Old default buffer pool values on Mellanox platform spc1_t0_default_value = [{'ingress_lossless_pool': '4194304'}, {'egress_lossless_pool': '16777152'}, {'ingress_lossy_pool': '7340032'}, {'egress_lossy_pool': '7340032'}] spc1_t1_default_value = [{'ingress_lossless_pool': '2097152'}, {'egress_lossless_pool': '16777152'}, {'ingress_lossy_pool': '5242880'}, {'egress_lossy_pool': '5242880'}] spc2_t0_default_value = [{'ingress_lossless_pool': '8224768'}, {'egress_lossless_pool': '35966016'}, {'ingress_lossy_pool': '8224768'}, {'egress_lossy_pool': '8224768'}] spc2_t1_default_value = [{'ingress_lossless_pool': '12042240'}, {'egress_lossless_pool': '35966016'}, {'ingress_lossy_pool': '12042240'}, {'egress_lossy_pool': '12042240'}] # New default buffer pool configuration on Mellanox platform spc1_t0_default_config = {"ingress_lossless_pool": { "size": "5029836", "type": "ingress", "mode": "dynamic" }, "ingress_lossy_pool": { "size": "5029836", "type": "ingress", "mode": "dynamic" }, "egress_lossless_pool": { "size": "14024599", "type": "egress", "mode": "dynamic" }, "egress_lossy_pool": {"size": "5029836", "type": "egress", "mode": "dynamic" } } spc1_t1_default_config = {"ingress_lossless_pool": { "size": "2097100", "type": "ingress", "mode": "dynamic" }, "ingress_lossy_pool": { "size": "2097100", "type": "ingress", "mode": "dynamic" }, "egress_lossless_pool": { "size": "14024599", "type": "egress", "mode": "dynamic" }, "egress_lossy_pool": {"size": "2097100", "type": "egress", "mode": "dynamic" } } spc2_t0_default_config = {"ingress_lossless_pool": { "size": "14983147", "type": "ingress", "mode": "dynamic" }, "ingress_lossy_pool": { "size": "14983147", "type": "ingress", "mode": "dynamic" }, "egress_lossless_pool": { "size": "34340822", "type": "egress", "mode": "dynamic" }, "egress_lossy_pool": {"size": "14983147", "type": "egress", "mode": "dynamic" } } spc2_t1_default_config = {"ingress_lossless_pool": { "size": "9158635", "type": "ingress", "mode": "dynamic" }, "ingress_lossy_pool": { "size": "9158635", "type": "ingress", "mode": "dynamic" }, "egress_lossless_pool": { "size": "34340822", "type": "egress", "mode": "dynamic" }, "egress_lossy_pool": {"size": "9158635", "type": "egress", "mode": "dynamic" } } # 3800 platform has gearbox installed so the buffer pool size is different with other Spectrum2 platform spc2_3800_t0_default_config = {"ingress_lossless_pool": { "size": "28196784", "type": "ingress", "mode": "dynamic" }, "ingress_lossy_pool": { "size": "28196784", "type": "ingress", "mode": "dynamic" }, "egress_lossless_pool": { "size": "34340832", "type": "egress", "mode": "dynamic" }, "egress_lossy_pool": {"size": "28196784", "type": "egress", "mode": "dynamic" } } spc2_3800_t1_default_config = {"ingress_lossless_pool": { "size": "17891280", "type": "ingress", "mode": "dynamic" }, "ingress_lossy_pool": { "size": "17891280", "type": "ingress", "mode": "dynamic" }, "egress_lossless_pool": { "size": "34340832", "type": "egress", "mode": "dynamic" }, "egress_lossy_pool": {"size": "17891280", "type": "egress", "mode": "dynamic" } } # Try to get related info from DB buffer_pool_conf = {} device_data = self.configDB.get_table('DEVICE_METADATA') if 'localhost' in device_data.keys(): hwsku = device_data['localhost']['hwsku'] platform = device_data['localhost']['platform'] else: log_error("Trying to get DEVICE_METADATA from DB but doesn't exist, skip migration") return False buffer_pool_conf = self.configDB.get_table('BUFFER_POOL') # Get current buffer pool configuration, only migrate configuration which # with default values, if it's not default, leave it as is. pool_size_in_db_list = [] pools_in_db = buffer_pool_conf.keys() # Buffer pool numbers is different with default, don't need migrate if len(pools_in_db) != len(buffer_pools): return True # If some buffer pool is not default ones, don't need migrate for buffer_pool in buffer_pools: if buffer_pool not in pools_in_db: return True pool_size_in_db_list.append({buffer_pool: buffer_pool_conf[buffer_pool]['size']}) # To check if the buffer pool size is equal to old default values new_buffer_pool_conf = None if pool_size_in_db_list == spc1_t0_default_value: new_buffer_pool_conf = spc1_t0_default_config elif pool_size_in_db_list == spc1_t1_default_value: new_buffer_pool_conf = spc1_t1_default_config elif pool_size_in_db_list == spc2_t0_default_value: if platform == 'x86_64-mlnx_msn3800-r0': new_buffer_pool_conf = spc2_3800_t0_default_config else: new_buffer_pool_conf = spc2_t0_default_config elif pool_size_in_db_list == spc2_t1_default_value: if platform == 'x86_64-mlnx_msn3800-r0': new_buffer_pool_conf = spc2_3800_t1_default_config else: new_buffer_pool_conf = spc2_t1_default_config else: # It's not using default buffer pool configuration, no migration needed. log_info("buffer pool size is not old default value, no need to migrate") return True # Migrate old buffer conf to latest. for pool in buffer_pools: self.configDB.set_entry('BUFFER_POOL', pool, new_buffer_pool_conf[pool]) log_info("Successfully migrate mlnx buffer pool size to the latest.") return True def version_unknown(self): """ version_unknown tracks all SONiC versions that doesn't have a version string defined in config_DB. Nothing can be assumped when migrating from this version to the next version. Any migration operation needs to test if the DB is in expected format before migrating date to the next version. """ log_info('Handling version_unknown') # NOTE: Uncomment next 3 lines of code when the migration code is in # place. Note that returning specific string is intentional, # here we only intended to migrade to DB version 1.0.1. # If new DB version is added in the future, the incremental # upgrade will take care of the subsequent migrations. self.migrate_pfc_wd_table() self.migrate_interface_table() self.migrate_intf_table() self.set_version('version_1_0_2') return 'version_1_0_2' def version_1_0_1(self): """ Version 1_0_1. """ log_info('Handling version_1_0_1') self.migrate_interface_table() self.migrate_intf_table() self.set_version('version_1_0_2') return 'version_1_0_2' def version_1_0_2(self): """ Version 1_0_2. """ log_info('Handling version_1_0_2') # Check ASIC type, if Mellanox platform then need DB migration version_info = sonic_device_util.get_sonic_version_info() if version_info['asic_type'] == "mellanox": if self.mlnx_migrate_buffer_pool_size(): self.set_version('version_1_0_3') else: self.set_version('version_1_0_3') return None def version_1_0_3(self): """ Current latest version. Nothing to do here. """ log_info('Handling version_1_0_3') return None def get_version(self): version = self.configDB.get_entry(self.TABLE_NAME, self.TABLE_KEY) if version and version[self.TABLE_FIELD]: return version[self.TABLE_FIELD] return 'version_unknown' def set_version(self, version=None): if not version: version = self.CURRENT_VERSION log_info('Setting version to ' + version) entry = { self.TABLE_FIELD : version } self.configDB.set_entry(self.TABLE_NAME, self.TABLE_KEY, entry) def migrate(self): version = self.get_version() log_info('Upgrading from version ' + version) while version: next_version = getattr(self, version)() if next_version == version: raise Exception('Version migrate from %s stuck in same version' % version) version = next_version
class Tam(object): def __init__(self): # connect CONFIG DB self.config_db = ConfigDBConnector() self.config_db.connect() # connect APPL DB self.app_db = ConfigDBConnector() self.app_db.db_connect('APPL_DB') def get_tam_collector_info(self, k): api_response = {} key = TAM_COLLECTOR_TABLE_PREFIX + '|' + k raw_coll_data = self.config_db.get_all(self.config_db.CONFIG_DB, key) api_response['coll-key'] = k api_response['each-coll-data'] = raw_coll_data return api_response, raw_coll_data def get_print_all_tam_collectors(self, name): coll_dict = {} coll_list = [] if name != 'all': api_response, entryfound = self.get_tam_collector_info(name) if entryfound is not None: coll_list.append(api_response) else: table_data = self.config_db.get_keys(TAM_COLLECTOR_TABLE_PREFIX) # Get data for all keys for k in table_data: api_each_flow_response, entryfound = self.get_tam_collector_info( k) if entryfound is not None: coll_list.append(api_each_flow_response) coll_dict['flow-list'] = coll_list show_cli_output("show_collector.j2", coll_dict) return def config_device_id(self, args): key = 'device' entry = self.config_db.get_entry(TAM_DEVICE_TABLE_PREFIX, key) if entry is None: if args.deviceid: self.config_db.set_entry(TAM_DEVICE_TABLE_PREFIX, key, {'deviceid': args.deviceid}) else: if args.deviceid: entry_value = entry.get('deviceid', []) if entry_value != args.deviceid: self.config_db.mod_entry(TAM_DEVICE_TABLE_PREFIX, key, {'deviceid': args.deviceid}) return def config_collector(self, args): if args.iptype == 'ipv4': if args.ipaddr == "0.0.0.0": print "Collector IP should be non-zero ip address" return False if args.iptype == 'ipv6': print "IPv6 Collector type not supported" return False self.config_db.mod_entry( TAM_COLLECTOR_TABLE_PREFIX, args.collectorname, { 'ipaddress-type': args.iptype, 'ipaddress': args.ipaddr, 'port': args.port }) return def clear_device_id(self): key = 'device' entry = self.config_db.get_entry(TAM_DEVICE_TABLE_PREFIX, key) if entry: self.config_db.set_entry(TAM_DEVICE_TABLE_PREFIX, key, None) return def clear_collector(self, args): key = args.collectorname entry = self.config_db.get_entry(TAM_COLLECTOR_TABLE_PREFIX, key) if entry: self.config_db.set_entry(TAM_COLLECTOR_TABLE_PREFIX, key, None) else: print "Entry Not Found" return False return def show_device_id(self): key = TAM_DEVICE_TABLE_PREFIX + '|' + 'device' data = self.config_db.get_all(self.config_db.CONFIG_DB, key) print "TAM Device identifier" print "-------------------------------" if data: if 'deviceid' in data: print "Device Identifier - ", data['deviceid'] return def show_collector(self, args): self.get_print_all_tam_collectors(args.collectorname) return