def __init__(self): self.yang_acl = None self.requested_session = None self.mirror_stage = None self.current_table = None self.tables_db_info = {} self.rules_db_info = {} self.rules_info = {} if multi_asic.is_multi_asic(): # Load global db config SonicDBConfig.load_sonic_global_db_config() else: SonicDBConfig.initialize() self.sessions_db_info = {} self.configdb = ConfigDBConnector() self.configdb.connect() self.statedb = SonicV2Connector(host="127.0.0.1") self.statedb.connect(self.statedb.STATE_DB) # For multi-npu architecture we will have both global and per front asic namespace. # Global namespace will be used for Control plane ACL which are via IPTables. # Per ASIC namespace will be used for Data and Everflow ACL's. # Global Configdb will have all ACL information for both Ctrl and Data/Evereflow ACL's # and will be used as souurce of truth for ACL modification to config DB which will be done to both Global DB and # front asic namespace self.per_npu_configdb = {} # State DB are used for to get mirror Session monitor port. # For multi-npu platforms each asic namespace can have different monitor port # dependinding on which route to session destination ip. So for multi-npu # platforms we get state db for all front asic namespace in addition to self.per_npu_statedb = {} # Getting all front asic namespace and correspding config and state DB connector namespaces = device_info.get_all_namespaces() for front_asic_namespaces in namespaces['front_ns']: self.per_npu_configdb[front_asic_namespaces] = ConfigDBConnector( use_unix_socket_path=True, namespace=front_asic_namespaces) self.per_npu_configdb[front_asic_namespaces].connect() self.per_npu_statedb[front_asic_namespaces] = SonicV2Connector( use_unix_socket_path=True, namespace=front_asic_namespaces) self.per_npu_statedb[front_asic_namespaces].connect( self.per_npu_statedb[front_asic_namespaces].STATE_DB) self.read_tables_info() self.read_rules_info() self.read_sessions_info() self.read_policers_info()
def generate_fdb_entries(filename): fdb_entries = [] db = SonicV2Connector(use_unix_socket_path=False) db.connect(db.ASIC_DB, False) # Make one attempt only bridge_id_2_iface = get_map_bridge_port_id_2_iface_name(db) vlan_ifaces = get_vlan_ifaces() all_available_macs = set() map_mac_ip_per_vlan = {} for vlan in vlan_ifaces: vlan_id = int(vlan.replace('Vlan', '')) fdb_entry, available_macs, map_mac_ip_per_vlan[vlan] = get_fdb( db, vlan, vlan_id, bridge_id_2_iface) all_available_macs |= available_macs fdb_entries.extend(fdb_entry) db.close(db.ASIC_DB) with open(filename, 'w') as fp: json.dump(fdb_entries, fp, indent=2, separators=(',', ': ')) return all_available_macs, map_mac_ip_per_vlan
def generate_media_config(filename): db = SonicV2Connector(host='127.0.0.1') db.connect(db.APPL_DB, False) # Make one attempt only media_config= [] port_serdes_keys = ["preemphasis", "idriver", "ipredriver", "pre1", "pre2", "pre3", "main", "post1", "post2", "post3","attn"] keys = db.keys(db.APPL_DB, 'PORT_TABLE:*') keys = [] if keys is None else keys for key in keys: entry = db.get_all(db.APPL_DB, key) media_attributes = {} for attr in entry.keys(): if attr in port_serdes_keys: media_attributes[attr] = entry[attr] obj = { '%s' % (key) : media_attributes, 'OP': 'SET' } media_config.append(obj) db.close(db.APPL_DB) with open(filename, 'w') as fp: json.dump(media_config, fp, indent=2, separators=(',', ': ')) return media_config
def main(): output = os.environ.get("MONIT_DESCRIPTION") syslog.openlog(logoption=syslog.LOG_PID) db = SonicV2Connector(use_unix_socket_path=True) db.connect(CFG_DB) db.connect(STATE_DB) if not output: syslog.syslog( syslog.LOG_ERR, "Expected to get output from environment variable MONIT_DESCRIPTION" ) return EXIT_FAILURE if "--" not in output: syslog.syslog(syslog.LOG_ERR, "Unexpected value in environment variable MONIT_DESCRIPTION") return EXIT_FAILURE monit_output = output.split("--")[1].strip() # If the output of memory_threshold_check is empty # that means that memory threshold check failed for the host. # In this case monit inserts "no output" string in MONIT_DESCRIPTION if monit_output == "no output": container = None else: container = monit_output invoke_ts_command_rate_limited(db, EVENT_TYPE_MEMORY, container) return EXIT_SUCCESS
def midplane_status(chassis_module_name): """Show chassis-modules midplane-status""" header = ['Name', 'IP-Address', 'Reachability'] state_db = SonicV2Connector(host="127.0.0.1") state_db.connect(state_db.STATE_DB) key_pattern = '*' if chassis_module_name: key_pattern = '|' + chassis_module_name keys = state_db.keys(state_db.STATE_DB, CHASSIS_MIDPLANE_INFO_TABLE + key_pattern) if not keys: print('Key {} not found in {} table'.format(key_pattern, CHASSIS_MIDPLANE_INFO_TABLE)) return table = [] for key in natsorted(keys): key_list = key.split('|') if len(key_list) != 2: # error data in DB, log it and ignore print('Warn: Invalid Key {} in {} table'.format(key, CHASSIS_MIDPLANE_INFO_TABLE)) continue data_dict = state_db.get_all(state_db.STATE_DB, key) ip = data_dict[CHASSIS_MIDPLANE_INFO_IP_FIELD] access = data_dict[CHASSIS_MIDPLANE_INFO_ACCESS_FIELD] table.append((key_list[1], ip, access)) if table: click.echo(tabulate(table, header, tablefmt='simple', stralign='right')) else: click.echo('No data available in CHASSIS_MIDPLANE_TABLE\n')
def history(): """Show history of reboot-cause""" REBOOT_CAUSE_TABLE_NAME = "REBOOT_CAUSE" TABLE_NAME_SEPARATOR = '|' db = SonicV2Connector(host='127.0.0.1') db.connect(db.STATE_DB, False) # Make one attempt only prefix = REBOOT_CAUSE_TABLE_NAME + TABLE_NAME_SEPARATOR _hash = '{}{}'.format(prefix, '*') table_keys = db.keys(db.STATE_DB, _hash) if table_keys is not None: table_keys.sort(reverse=True) table = [] for tk in table_keys: entry = db.get_all(db.STATE_DB, tk) r = [] r.append(tk.replace(prefix, "")) r.append(entry['cause'] if 'cause' in entry else "") r.append(entry['time'] if 'time' in entry else "") r.append(entry['user'] if 'user' in entry else "") r.append(entry['comment'] if 'comment' in entry else "") table.append(r) header = ['Name', 'Cause', 'Time', 'User', 'Comment'] click.echo(tabulate(table, header, numalign="left")) else: click.echo("Reboot-cause history is not yet available in StateDB") sys.exit(1)
def match_engine(): print("SETUP") os.environ["VERBOSE"] = "1" # Monkey Patch the SonicV2Connector Object from ...mock_tables import dbconnector db = SonicV2Connector() # popualate the db with mock data db_names = list(dedicated_dbs.keys()) try: populate_mock(db, db_names, dedicated_dbs) except Exception as e: assert False, "Mock initialization failed: " + str(e) # Initialize connection pool conn_pool = ConnectionPool() conn_pool.fill(DEFAULT_NAMESPACE, db, db_names) # Initialize match_engine match_engine = MatchEngine(conn_pool) yield match_engine print("TEARDOWN") os.environ["VERBOSE"] = "0"
def generate_neighbor_entries(filename, all_available_macs): db = SonicV2Connector(use_unix_socket_path=False) db.connect(db.APPL_DB, False) # Make one attempt only arp_output = [] neighbor_entries = [] keys = db.keys(db.APPL_DB, 'NEIGH_TABLE:*') keys = [] if keys is None else keys for key in keys: vlan_name = key.split(':')[1] entry = db.get_all(db.APPL_DB, key) mac = entry['neigh'].lower() if (vlan_name, mac) not in all_available_macs: # FIXME: print me to log continue obj = {key: entry, 'OP': 'SET'} arp_output.append(obj) ip_addr = key.split(':', 2)[2] neighbor_entries.append((vlan_name, mac, ip_addr)) syslog.syslog( syslog.LOG_INFO, "Neighbor entry: [Vlan: %s, Mac: %s, Ip: %s]" % (vlan_name, mac, ip_addr)) db.close(db.APPL_DB) with open(filename, 'w') as fp: json.dump(arp_output, fp, indent=2, separators=(',', ': ')) return neighbor_entries
def initialize_connector(self, ns): if not SonicDBConfig.isInit(): if multi_asic.is_multi_asic(): SonicDBConfig.load_sonic_global_db_config() else: SonicDBConfig.load_sonic_db_config() return SonicV2Connector(namespace=ns, use_unix_socket_path=True)
def get_route_flow_counter_capability(): state_db = SonicV2Connector(host="127.0.0.1") state_db.connect(state_db.STATE_DB) return state_db.get_all( state_db.STATE_DB, '{}|{}'.format(FLOW_COUNTER_CAPABILITY_TABLE, FLOW_COUNTER_CAPABILITY_KEY))
def match_engine(): print("SETUP") os.environ["VERBOSE"] = "1" dump_port_input = os.path.join(os.path.dirname(__file__), "../dump_input/dump/default") dedicated_dbs = {} dedicated_dbs['CONFIG_DB'] = os.path.join(dump_port_input, "config_db.json") dedicated_dbs['APPL_DB'] = os.path.join(dump_port_input, "appl_db.json") dedicated_dbs['STATE_DB'] = os.path.join(dump_port_input, "state_db.json") dedicated_dbs['ASIC_DB'] = os.path.join(dump_port_input, "asic_db.json") conn = SonicV2Connector() # popualate the db ,with mock data db_names = list(dedicated_dbs.keys()) try: populate_mock(conn, db_names, dedicated_dbs) except Exception as e: assert False, "Mock initialization failed: " + str(e) conn_pool = ConnectionPool() conn_pool.fill(DEFAULT_NAMESPACE, conn, db_names) match_engine = MatchEngine(conn_pool) yield match_engine print("TEARDOWN")
def __init__(self): self.cfgdb_clients = {} self.db_clients = {} self.cfgdb = ConfigDBConnector() self.cfgdb.connect() self.cfgdb_pipe = ConfigDBPipeConnector() self.cfgdb_pipe.connect() self.db = SonicV2Connector(host="127.0.0.1") # Skip connecting to chassis databases in line cards self.db_list = list(self.db.get_db_list()) if not device_info.is_supervisor(): try: self.db_list.remove('CHASSIS_APP_DB') self.db_list.remove('CHASSIS_STATE_DB') except Exception: pass for db_id in self.db_list: self.db.connect(db_id) self.cfgdb_clients[constants.DEFAULT_NAMESPACE] = self.cfgdb self.db_clients[constants.DEFAULT_NAMESPACE] = self.db if multi_asic.is_multi_asic(): self.ns_list = multi_asic_ns_choices() for ns in self.ns_list: self.cfgdb_clients[ns] = ( multi_asic.connect_config_db_for_ns(ns)) self.db_clients[ns] = multi_asic.connect_to_all_dbs_for_ns(ns)
def error_status(port, fetch_from_hardware): """Display error status of SFP transceiver(s)""" output_table = [] table_header = ["Port", "Error Status"] # Create a list containing the logical port names of all ports we're interested in if port and platform_sfputil.is_logical_port(port) == 0: click.echo("Error: invalid port '{}'\n".format(port)) click.echo("Valid values for port: {}\n".format( str(platform_sfputil.logical))) sys.exit(ERROR_INVALID_PORT) if fetch_from_hardware: output_table = fetch_error_status_from_platform_api(port) else: # Connect to STATE_DB state_db = SonicV2Connector(host='127.0.0.1') if state_db is not None: state_db.connect(state_db.STATE_DB) else: click.echo("Failed to connect to STATE_DB") return output_table = fetch_error_status_from_state_db(port, state_db) click.echo(tabulate(output_table, table_header, tablefmt='simple'))
def thread_coming_data(): print("Start thread: thread_coming_data") db = SonicV2Connector(use_unix_socket_path=True) db.connect("TEST_DB") time.sleep(DBInterface.PUB_SUB_NOTIFICATION_TIMEOUT * 2) db.set("TEST_DB", "key0_coming", "field1", "value2") print("Leave thread: thread_coming_data")
def match_engine(): print("SETUP") os.environ["VERBOSE"] = "1" # Monkey Patch the SonicV2Connector Object from ...mock_tables import dbconnector db = SonicV2Connector() # popualate the db with mock data db_names = list(dedicated_dbs.keys()) try: populate_mock(db, db_names, dedicated_dbs) except Exception as e: assert False, "Mock initialization failed: " + str(e) # Initialize connection pool conn_pool = ConnectionPool() DEF_NS = '' # Default Namespace conn_pool.cache = {DEF_NS: {'conn': db, 'connected_to': set(db_names)}} # Initialize match_engine match_engine = MatchEngine(conn_pool) yield match_engine print("TEARDOWN") os.environ["VERBOSE"] = "0"
def __init__(self, namespace, socket=None): """ Version string format: version_<major>_<minor>_<build> major: starting from 1, sequentially incrementing in master branch. minor: in github branches, minor version stays in 0. This minor version creates space for private branches derived from github public branches. These private branches shall use none-zero values. build: sequentially increase within a minor version domain. """ self.CURRENT_VERSION = 'version_2_0_0' self.TABLE_NAME = 'VERSIONS' self.TABLE_KEY = 'DATABASE' self.TABLE_FIELD = 'VERSION' db_kwargs = {} if socket: db_kwargs['unix_socket_path'] = socket if namespace is None: self.configDB = ConfigDBConnector(**db_kwargs) else: self.configDB = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace, **db_kwargs) self.configDB.db_connect('CONFIG_DB') self.appDB = SonicV2Connector(host='127.0.0.1') if self.appDB is not None: self.appDB.connect(self.appDB.APPL_DB) self.stateDB = SonicV2Connector(host='127.0.0.1') if self.stateDB is not None: self.stateDB.connect(self.stateDB.STATE_DB) version_info = device_info.get_sonic_version_info() asic_type = version_info.get('asic_type') self.asic_type = asic_type if asic_type == "mellanox": from mellanox_buffer_migrator import MellanoxBufferMigrator self.mellanox_buffer_migrator = MellanoxBufferMigrator( self.configDB)
def generate_fdb_entries(filename): asic_db = SonicV2Connector(use_unix_socket_path=False) app_db = SonicV2Connector(use_unix_socket_path=False) asic_db.connect(asic_db.ASIC_DB, False) # Make one attempt only app_db.connect(app_db.APPL_DB, False) # Make one attempt only vlan_ifaces = get_vlan_ifaces() fdb_entries, all_available_macs, map_mac_ip_per_vlan = generate_fdb_entries_logic(asic_db, app_db, vlan_ifaces) asic_db.close(asic_db.ASIC_DB) app_db.close(app_db.APPL_DB) with open(filename, 'w') as fp: json.dump(fdb_entries, fp, indent=2, separators=(',', ': ')) return all_available_macs, map_mac_ip_per_vlan
def __init__(self, update_interval=None): super(LldpSyncDaemon, self).__init__() self._update_interval = update_interval or DEFAULT_UPDATE_INTERVAL self.db_connector = SonicV2Connector() self.db_connector.connect(self.db_connector.APPL_DB) self.chassis_cache = {} self.interfaces_cache = {}
def neighbors(): """Show vnet neighbors information""" config_db = ConfigDBConnector() config_db.connect() header = ['<vnet_name>', 'neighbor', 'mac_address', 'interfaces'] # Fetching data from config_db for interfaces intfs_data = config_db.get_table("INTERFACE") vlan_intfs_data = config_db.get_table("VLAN_INTERFACE") vnet_intfs = {} for k, v in intfs_data.items(): if 'vnet_name' in v: vnet_name = v['vnet_name'] if vnet_name in vnet_intfs: vnet_intfs[vnet_name].append(k) else: vnet_intfs[vnet_name] = [k] for k, v in vlan_intfs_data.items(): if 'vnet_name' in v: vnet_name = v['vnet_name'] if vnet_name in vnet_intfs: vnet_intfs[vnet_name].append(k) else: vnet_intfs[vnet_name] = [k] appl_db = SonicV2Connector() appl_db.connect(appl_db.APPL_DB) # Fetching data from appl_db for neighbors nbrs = appl_db.keys(appl_db.APPL_DB, "NEIGH_TABLE:*") nbrs_data = {} for nbr in nbrs if nbrs else []: tbl, intf, ip = nbr.split(":", 2) mac = appl_db.get(appl_db.APPL_DB, nbr, 'neigh') if intf in nbrs_data: nbrs_data[intf].append((ip, mac)) else: nbrs_data[intf] = [(ip, mac)] table = [] for k, v in vnet_intfs.items(): v = natsorted(v) header[0] = k table = [] for intf in v: if intf in nbrs_data: for ip, mac in nbrs_data[intf]: r = ["", ip, mac, intf] table.append(r) click.echo(tabulate(table, header)) click.echo() if not bool(vnet_intfs): click.echo(tabulate(table, header))
def asic_db(self): """ Returns the ASIC DB connector. Initializes the connector during the first call """ if self.asic_db_connector is None: self.asic_db_connector = SonicV2Connector() self.asic_db_connector.connect('ASIC_DB') return self.asic_db_connector
def main(): parser = argparse.ArgumentParser( description='Auto Techsupport Invocation and CoreDump Mgmt Script') parser.add_argument('name', type=str, help='TechSupport Dump Name') args = parser.parse_args() syslog.openlog(logoption=syslog.LOG_PID) db = SonicV2Connector(use_unix_socket_path=True) db.connect(CFG_DB) db.connect(STATE_DB) handle_techsupport_creation_event(args.name, db)
def init_db(): """ Connects to DB :return: db_conn """ SonicDBConfig.load_sonic_global_db_config() # SyncD database connector. THIS MUST BE INITIALIZED ON A PER-THREAD BASIS. # Redis PubSub objects (such as those within swsssdk) are NOT thread-safe. db_conn = SonicV2Connector(**redis_kwargs) return db_conn
def init_namespace_dbs(): db_conn = [] SonicDBConfig.load_sonic_global_db_config() for namespace in SonicDBConfig.get_ns_list(): db = SonicV2Connector(use_unix_socket_path=True, namespace=namespace, decode_responses=True) db_conn.append(db) Namespace.connect_namespace_dbs(db_conn) return db_conn
def breakOutPort(self, delPorts=list(), portJson=dict(), force=False, \ loadDefConfig=True): ''' This is the main function for port breakout. Exposed to caller. Parameters: delPorts (list): ports to be deleted. portJson (dict): Config DB json Part of all Ports, generated from platform.json. force (bool): if false return dependecies, else delete dependencies. loadDefConfig: If loadDefConfig, add default config for ports as well. Returns: (deps, ret) (tuple)[list, bool]: dependecies and success/failure. ''' MAX_WAIT = 60 try: # delete Port and get the Config diff, deps and True/False delConfigToLoad, deps, ret = self._deletePorts(ports=delPorts, \ force=force) # return dependencies if delete port fails if ret == False: return deps, ret # add Ports and get the config diff and True/False addConfigtoLoad, ret = self._addPorts(portJson=portJson, \ loadDefConfig=loadDefConfig) # return if ret is False, Great thing, no change is done in Config if ret == False: return None, ret # Save Port OIDs Mapping Before Deleting Port dataBase = SonicV2Connector(host="127.0.0.1") if_name_map, if_oid_map = port_util.get_interface_oid_map(dataBase) self.sysLog(syslog.LOG_DEBUG, 'if_name_map {}'.format(if_name_map)) # If we are here, then get ready to update the Config DB as below: # -- shutdown the ports, # -- Update deletion of ports in Config DB, # -- verify Asic DB for port deletion, # -- then update addition of ports in config DB. self._shutdownIntf(delPorts) self.writeConfigDB(delConfigToLoad) # Verify in Asic DB, self._verifyAsicDB(db=dataBase, ports=delPorts, portMap=if_name_map, \ timeout=MAX_WAIT) self.writeConfigDB(addConfigtoLoad) except Exception as e: self.sysLog(doPrint=True, logLevel=syslog.LOG_ERR, msg=str(e)) return None, False return None, True
def main(): parser = argparse.ArgumentParser(description='Auto Techsupport Invocation and CoreDump Mgmt Script') parser.add_argument('name', type=str, help='Core Dump Name') parser.add_argument('container', type=str, help='Container Name') args = parser.parse_args() syslog.openlog(logoption=syslog.LOG_PID) db = SonicV2Connector(use_unix_socket_path=True) db.connect(CFG_DB) db.connect(STATE_DB) cls = CriticalProcCoreDumpHandle(args.name, args.container, db) cls.handle_core_dump_creation_event() handle_coredump_cleanup(args.name, db)
def test_DBInterface(): dbintf = DBInterface() dbintf.set_redis_kwargs("", "127.0.0.1", 6379) dbintf.connect(15, "TEST_DB") db = SonicV2Connector(use_unix_socket_path=True, namespace='') assert db.namespace == '' db.connect("TEST_DB") db.set("TEST_DB", "key0", "field1", "value2") fvs = db.get_all("TEST_DB", "key0") assert "field1" in fvs assert fvs["field1"] == "value2"
def connect(self, db, ns): try: if not SonicDBConfig.isInit(): if multi_asic.is_multi_asic(): SonicDBConfig.load_sonic_global_db_config() else: SonicDBConfig.load_sonic_db_config() self.conn = SonicV2Connector(namespace=ns, use_unix_socket_path=True) self.conn.connect(db) except Exception as e: verbose_print("RedisSource: Connection Failed\n" + str(e)) return False return True
def get_per_npu_statedb(per_npu_statedb, port_table_keys): # Getting all front asic namespace and correspding config and state DB connector namespaces = multi_asic.get_front_end_namespaces() for namespace in namespaces: asic_id = multi_asic.get_asic_index_from_namespace(namespace) # replace these with correct macros per_npu_statedb[asic_id] = SonicV2Connector(use_unix_socket_path=True, namespace=namespace) per_npu_statedb[asic_id].connect(per_npu_statedb[asic_id].STATE_DB) port_table_keys[asic_id] = per_npu_statedb[asic_id].keys( per_npu_statedb[asic_id].STATE_DB, 'MUX_CABLE_TABLE|*')
def radius(db): """Show RADIUS configuration""" output = '' config_db = db.cfgdb data = config_db.get_table('RADIUS') radius = { 'global': { 'auth_type': 'pap (default)', 'retransmit': '3 (default)', 'timeout': '5 (default)', 'passkey': '<EMPTY_STRING> (default)' } } if 'global' in data: radius['global'].update(data['global']) for key in radius['global']: output += ('RADIUS global %s %s\n' % (str(key), str(radius['global'][key]))) data = config_db.get_table('RADIUS_SERVER') if data != {}: for row in data: entry = data[row] output += ('\nRADIUS_SERVER address %s\n' % row) for key in entry: output += (' %s %s\n' % (key, str(entry[key]))) counters_db = SonicV2Connector(host='127.0.0.1') counters_db.connect(counters_db.COUNTERS_DB, retry_on=False) if radius['global'].get('statistics', False) and (data != {}): for row in data: exists = counters_db.exists(counters_db.COUNTERS_DB, 'RADIUS_SERVER_STATS:{}'.format(row)) if not exists: continue counter_entry = counters_db.get_all( counters_db.COUNTERS_DB, 'RADIUS_SERVER_STATS:{}'.format(row)) output += ('\nStatistics for RADIUS_SERVER address %s\n' % row) for key in counter_entry: if counter_entry[key] != "0": output += (' %s %s\n' % (key, str(counter_entry[key]))) try: counters_db.close(counters_db.COUNTERS_DB) except Exception as e: pass click.echo(output)
def is_gearbox_configured(): """ Checks whether Gearbox is configured or not """ app_db = SonicV2Connector() app_db.connect(app_db.APPL_DB) keys = app_db.keys(app_db.APPL_DB, '*') # If any _GEARBOX_TABLE:phy:* records present in APPL_DB, then the gearbox is configured if any(re.match(GEARBOX_TABLE_PHY_PATTERN, key) for key in keys): return True else: return False