示例#1
0
def getAllLines():
    config_db = ConfigDBConnector()
    config_db.connect()

    # Querying CONFIG_DB to get configured console ports
    keys = config_db.get_keys(CONSOLE_PORT_TABLE)
    lines = []
    for k in keys:
        line = config_db.get_entry(CONSOLE_PORT_TABLE, k)
        line[LINE_KEY] = k
        lines.append(line)

    # Querying device directory to get all available console ports
    cmd = "ls " + DEVICE_PREFIX + "*"
    output = run_command(cmd)
    availableTtys = output.split('\n')
    availableTtys = list(
        filter(lambda dev: re.match(DEVICE_PREFIX + r"\d+", dev) != None,
               availableTtys))
    for tty in availableTtys:
        k = tty[len(DEVICE_PREFIX):]
        if k not in keys:
            line = {LINE_KEY: k}
            lines.append(line)
    return lines
示例#2
0
def threshold(ctx, port_name, queue_index, queue_type):
    """ Clear queue threshold for a queue on a port """
    # If no params are provided, clear all priority-group entries.
    config_db = ConfigDBConnector()
    config_db.connect()

    all = False

    if port_name is None and queue_index is None and queue_type is None:
        # clear all entries.
        key = 'queue'
        all = True
    elif port_name is None or queue_index is None or queue_type is None:
        ctx.fail(
            "port_name, queue_index and queue_type are mandatory parameters.")
    else:
        if queue_index not in range(0, 8):
            ctx.fail("queue index must be in range 0-7")
        if interface_name_is_valid(port_name) is False:
            ctx.fail("Interface name is invalid!!")
        key = 'queue' + '|' + queue_type + '|' + port_name + '|' + str(
            queue_index)

    if all is True:
        entry_table = config_db.get_keys('THRESHOLD_TABLE')
        # Clear data for all keys
        for k in natsorted(entry_table):
            if k[0] == 'queue':
                config_db.set_entry('THRESHOLD_TABLE', k, None)
    else:
        entry = config_db.get_entry('THRESHOLD_TABLE', key)
        if entry:
            config_db.set_entry('THRESHOLD_TABLE', key, None)
示例#3
0
def get_route_entries():
    db = ConfigDBConnector()
    db.db_connect('ASIC_DB')
    print_message(MODE_DEBUG, "ASIC DB connected")
    keys = db.get_keys('ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY', False)
    print_message(MODE_DEBUG, json.dumps({"ASIC_ROUTE_ENTRY": keys}, indent=4))

    rt = []
    for k in keys:
        rt.append(k.split("\"", -1)[3])
    return sorted(rt)
示例#4
0
def get_route_entries():
    db = ConfigDBConnector()
    db.db_connect('ASIC_DB')
    print_message(MODE_DEBUG, "ASIC DB connected")
    keys = db.get_keys('ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY', False)
    print_message(MODE_DEBUG, json.dumps({"ASIC_ROUTE_ENTRY": keys}, indent=4))

    rt = []
    for k in keys:
        rt.append(k.split("\"", -1)[3])
    return sorted(rt)
示例#5
0
def get_route_entries():
    db = ConfigDBConnector()
    db.db_connect('ASIC_DB')
    print_message(syslog.LOG_DEBUG, "ASIC DB connected")
    keys = db.get_keys('ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY', False)

    rt = []
    for k in keys:
        e = k.lower().split("\"", -1)[3]
        if not is_local(e):
            rt.append(e)
    print_message(syslog.LOG_DEBUG, json.dumps({"ASIC_ROUTE_ENTRY": sorted(rt)}, indent=4))
    return sorted(rt)
示例#6
0
def get_routes():
    db = ConfigDBConnector()
    db.db_connect('APPL_DB')
    print_message(syslog.LOG_DEBUG, "APPL DB connected for routes")
    keys = db.get_keys('ROUTE_TABLE')

    valid_rt = []
    for k in keys:
        if not is_local(k):
            valid_rt.append(add_prefix_ifnot(k.lower()))

    print_message(syslog.LOG_DEBUG, json.dumps({"ROUTE_TABLE": sorted(valid_rt)}, indent=4))
    return sorted(valid_rt)
示例#7
0
def get_routes():
    db = ConfigDBConnector()
    db.db_connect('APPL_DB')
    print_message(MODE_DEBUG, "APPL DB connected for routes")
    keys = db.get_keys('ROUTE_TABLE')
    print_message(MODE_DEBUG, json.dumps({"ROUTE_TABLE": keys}, indent=4))

    valid_rt = []
    skip_rt = []
    for k in keys:
        if db.get_entry('ROUTE_TABLE', k)['nexthop'] != '':
            valid_rt.append(add_prefix_ifnot(k))
        else:
            skip_rt.append(k)

    print_message(MODE_INFO, json.dumps({"skipped_routes" : skip_rt}, indent=4))
    return sorted(valid_rt)
示例#8
0
def get_routes():
    db = ConfigDBConnector()
    db.db_connect('APPL_DB')
    print_message(MODE_DEBUG, "APPL DB connected for routes")
    keys = db.get_keys('ROUTE_TABLE')
    print_message(MODE_DEBUG, json.dumps({"ROUTE_TABLE": keys}, indent=4))

    valid_rt = []
    skip_rt = []
    for k in keys:
        if db.get_entry('ROUTE_TABLE', k)['nexthop'] != '':
            valid_rt.append(add_prefix_ifnot(k))
        else:
            skip_rt.append(k)

    print_message(MODE_INFO, json.dumps({"skipped_routes": skip_rt}, indent=4))
    return sorted(valid_rt)
示例#9
0
def get_interfaces():
    db = ConfigDBConnector()
    db.db_connect('APPL_DB')
    print_message(MODE_DEBUG, "APPL DB connected for interfaces")

    intf = []
    keys = db.get_keys('INTF_TABLE')
    print_message(MODE_DEBUG, json.dumps({"APPL_DB_INTF": keys}, indent=4))

    for k in keys:
        subk = k.split(':', -1)
        alias = subk[0]
        ip_prefix = ":".join(subk[1:])
        ip = add_prefix(ip_prefix.split("/", -1)[0])
        if (subk[0] == "eth0") or (subk[0] == "docker0"):
            continue
        if (subk[0] != "lo"):
            intf.append(ip_subnet(ip_prefix))
        intf.append(ip)
    return sorted(intf)
示例#10
0
def get_interfaces():
    db = ConfigDBConnector()
    db.db_connect('APPL_DB')
    print_message(MODE_DEBUG, "APPL DB connected for interfaces")

    intf = []
    keys = db.get_keys('INTF_TABLE')
    print_message(MODE_DEBUG, json.dumps({"APPL_DB_INTF": keys}, indent=4))

    for k in keys:
        subk = k.split(':', -1)
        alias = subk[0]
        ip_prefix = ":".join(subk[1:])
        ip = add_prefix(ip_prefix.split("/", -1)[0])
        if (subk[0] == "eth0") or (subk[0] == "docker0"):
            continue
        if (subk[0] != "lo"):
            intf.append(ip_subnet(ip_prefix))
        intf.append(ip)
    return sorted(intf)
示例#11
0
def get_interfaces():
    db = ConfigDBConnector()
    db.db_connect('APPL_DB')
    print_message(syslog.LOG_DEBUG, "APPL DB connected for interfaces")

    intf = []
    keys = db.get_keys('INTF_TABLE')

    for k in keys:
        lst = re.split(':', k.lower(), maxsplit=1)
        if len(lst) == 1:
            # No IP address in key; ignore
            continue

        ip = add_prefix(lst[1].split("/", -1)[0])
        if not is_local(ip):
            intf.append(ip)

    print_message(syslog.LOG_DEBUG, json.dumps({"APPL_DB_INTF": sorted(intf)}, indent=4))
    return sorted(intf)
class TunnelPacketHandler(object):

    def __init__(self):
        self.config_db = ConfigDBConnector()
        self.config_db.connect()
        self.state_db = SonicV2Connector()
        self.state_db.connect(STATE_DB)
        self._portchannel_intfs = None
        self.up_portchannels = None
        self.netlink_api = IPRoute()

    @property
    def portchannel_intfs(self):
        """
        Gets all portchannel interfaces and IPv4 addresses in config DB

        Returns:
            (list) Tuples of a portchannel interface name (str) and
                   associated IPv4 address (str)
        """
        if self._portchannel_intfs is None:
            intf_keys = self.config_db.get_keys(PORTCHANNEL_INTERFACE_TABLE)
            portchannel_intfs = []

            for key in intf_keys:
                if isinstance(key, tuple) and len(key) > 1:
                    if ip_interface(key[1]).version == 4:
                        portchannel_intfs.append(key)

            self._portchannel_intfs = portchannel_intfs

        return self._portchannel_intfs

    def get_portchannel_index_mapping(self):
        """
        Gets a mapping of interface kernel indices to portchannel interfaces

        Returns:
            (list) integers representing kernel indices
        """
        index_map = {}
        for portchannel in self.portchannel_intfs:
            index = self.netlink_api.link_lookup(ifname=portchannel[0])[0]
            index_map[index] = portchannel

        return index_map

    def get_up_portchannels(self):
        """
        Returns the portchannels which are operationally up

        Returns:
            (list) of interface names which are up, as strings
        """
        pc_index_map = self.get_portchannel_index_mapping()
        pc_indices = list(pc_index_map.keys())
        link_statuses = self.netlink_api.get_links(*pc_indices)
        up_portchannels = []

        for status in link_statuses:
            if status['state'] == 'up':
                port_index = status['index']
                up_portchannels.append(pc_index_map[port_index][0])

        return up_portchannels

    def all_portchannels_established(self):
        """
        Checks if the portchannel interfaces are established

        Note that this status does not indicate operational state
        Returns:
            (bool) True, if all interfaces are established
                   False, otherwise
        """
        intfs = self.portchannel_intfs
        for intf in intfs:
            intf_table_name = INTF_TABLE_TEMPLATE.format(intf[0], intf[1])
            intf_state = self.state_db.get(
                                STATE_DB,
                                intf_table_name,
                                STATE_KEY
                              )

            if intf_state and intf_state.lower() != 'ok':
                return False

        return True

    def wait_for_portchannels(self, interval=5, timeout=60):
        """
        Continuosly checks if all portchannel host interfaces are established

        Args:
            interval: the interval (in seconds) at which to perform the check
            timeout: maximum allowed duration (in seconds) to wait for
                     interfaces to come up

        Raises:
            RuntimeError if the timeout duration is reached and interfaces are
                still not up
        """
        start = datetime.now()

        while (datetime.now() - start).seconds < timeout:
            if self.all_portchannels_established():
                logger.log_info("All portchannel intfs are established")
                return None
            logger.log_info("Not all portchannel intfs are established")
            time.sleep(interval)

        raise RuntimeError('Portchannel intfs were not established '
                           'within {}'.format(timeout))

    def get_ipinip_tunnel_addrs(self):
        """
        Get the IP addresses used for the IPinIP tunnel

        These should be the Loopback0 addresses for this device and the
        peer device

        Returns:
            ((str) self_loopback_ip, (str) peer_loopback_ip)
            or
            (None, None) If the tunnel type is not IPinIP
                         or
                         if an error is encountered. This most likely means
                         the host device is not a dual ToR device
        """
        try:
            peer_switch = self.config_db.get_keys(PEER_SWITCH_TABLE)[0]
            tunnel = self.config_db.get_keys(TUNNEL_TABLE)[0]
        except IndexError:
            logger.log_warning('PEER_SWITCH or TUNNEL table'
                               'not found in config DB')
            return None, None

        try:
            tunnel_table = self.config_db.get_entry(TUNNEL_TABLE, tunnel)
            tunnel_type = tunnel_table[TUNNEL_TYPE_KEY].lower()
            self_loopback_ip = tunnel_table[DST_IP_KEY]
            peer_loopback_ip = self.config_db.get_entry(
                                    PEER_SWITCH_TABLE, peer_switch
                                    )[ADDRESS_IPV4_KEY]
        except KeyError as e:
            logger.log_warning(
                'PEER_SWITCH or TUNNEL table missing data, '
                'could not find key {}'
                .format(e)
            )
            return None, None

        if tunnel_type == IPINIP_TUNNEL:
            return self_loopback_ip, peer_loopback_ip

        return None, None

    def get_inner_pkt_type(self, packet):
        """
        Get the type of an inner encapsulated packet

        Returns:
            (str)  'v4' if the inner packet is IPv4
            (str)  'v6' if the inner packet is IPv6
            (bool) False if `packet` is not an IPinIP packet
        """
        if packet.haslayer(IP):
            # Determine inner packet type based on IP protocol number
            # The outer packet type should always be IPv4
            if packet[IP].proto == 4:
                return IP
            elif packet[IP].proto == 41:
                return IPv6
        return False

    def wait_for_netlink_msgs(self):
        """
        Gathers any RTM_NEWLINK messages

        Returns:
            (list) containing any received messages
        """
        msgs = []
        with IPRoute() as ipr:
            ipr.bind()
            for msg in ipr.get():
                if msg['event'] == RTM_NEWLINK:
                    msgs.append(msg)

        return msgs

    def sniffer_restart_required(self, messages):
        """
        Determines if the packet sniffer needs to be restarted

        A restart is required if all of the following conditions are met:
            1. A netlink message of type RTM_NEWLINK is received
               (this is checked by `wait_for_netlink_msgs`)
            2. The interface index of the message corresponds to a portchannel
               interface
            3. The state of the interface in the message is 'up'
                    Here, we do not care about an interface going down since
                    the sniffer is able to continue sniffing on the other
                    interfaces. However, if an interface has gone down and
                    come back up, we need to restart the sniffer to be able
                    to sniff traffic on the interface that has come back up.
        """
        pc_index_map = self.get_portchannel_index_mapping()
        for msg in messages:
            if msg['index'] in pc_index_map:
                if msg['state'] == 'up':
                    logger.log_info('{} came back up, sniffer restart required'
                                    .format(pc_index_map[msg['index']]))
                    return True
        return False

    def listen_for_tunnel_pkts(self):
        """
        Listens for tunnel packets that are trapped to CPU

        These packets may be trapped if there is no neighbor info for the
        inner packet destination IP in the hardware.
        """

        def _ping_inner_dst(packet):
            """
            Pings the inner destination IP for an encapsulated packet

            Args:
                packet: The encapsulated packet received
            """
            inner_packet_type = self.get_inner_pkt_type(packet)
            if inner_packet_type and packet[IP].dst == self_ip:
                cmds = ['timeout', '0.2', 'ping', '-c1',
                        '-W1', '-i0', '-n', '-q']
                if inner_packet_type == IPv6:
                    cmds.append('-6')
                dst_ip = packet[IP].payload[inner_packet_type].dst
                cmds.append(dst_ip)
                logger.log_info("Running command '{}'".format(' '.join(cmds)))
                subprocess.run(cmds, stdout=subprocess.DEVNULL)

        self_ip, peer_ip = self.get_ipinip_tunnel_addrs()
        if self_ip is None or peer_ip is None:
            logger.log_notice('Could not get tunnel addresses from '
                              'config DB, exiting...')
            return None

        packet_filter = 'host {} and host {}'.format(self_ip, peer_ip)
        logger.log_notice('Starting tunnel packet handler for {}'
                          .format(packet_filter))

        sniff_intfs = self.get_up_portchannels()
        logger.log_info("Listening on interfaces {}".format(sniff_intfs))

        sniffer = AsyncSniffer(
            iface=sniff_intfs,
            filter=packet_filter,
            prn=_ping_inner_dst

        )
        sniffer.start()
        while True:
            msgs = self.wait_for_netlink_msgs()
            if self.sniffer_restart_required(msgs):
                sniffer.stop()
                sniff_intfs = self.get_up_portchannels()
                logger.log_notice('Restarting tunnel packet handler on '
                                  'interfaces {}'.format(sniff_intfs))
                sniffer = AsyncSniffer(
                    iface=sniff_intfs,
                    filter=packet_filter,
                    prn=_ping_inner_dst
                )
                sniffer.start()

    def run(self):
        self.wait_for_portchannels()
        self.listen_for_tunnel_pkts()
class DropMon(object):
    def __init__(self):
        # connect CONFIG DB
        self.config_db = ConfigDBConnector()
        self.config_db.connect()

        # connect COUNTERS_DB
        self.counters_db = ConfigDBConnector()
        self.counters_db.db_connect('COUNTERS_DB')

        # connect APPL DB
        self.app_db = ConfigDBConnector()
        self.app_db.db_connect('APPL_DB')

    def config_drop_mon(self, args):
        self.config_db.mod_entry(
            TAM_DROP_MONITOR_FLOW_TABLE, args.flowname, {
                'acl-table': args.acl_table,
                'acl-rule': args.acl_rule,
                'collector': args.dropcollector,
                'sample': args.dropsample
            })
        return

    def config_drop_mon_aging(self, args):
        self.config_db.mod_entry(TAM_DROP_MONITOR_AGING_INTERVAL_TABLE,
                                 "aging",
                                 {'aging-interval': args.aginginterval})
        return

    def config_drop_mon_sample(self, args):
        self.config_db.mod_entry(SAMPLE_RATE_TABLE, args.samplename,
                                 {'sampling-rate': args.rate})
        return

    def clear_single_drop_mon_flow(self, key):
        entry = self.config_db.get_entry(TAM_DROP_MONITOR_FLOW_TABLE, key)
        if entry:
            self.config_db.set_entry(TAM_DROP_MONITOR_FLOW_TABLE, key, None)
        else:
            return False
        return

    def clear_drop_mon_flow(self, args):
        key = args.flowname
        if key == "all":
            # Get all the flow keys
            table_data = self.config_db.get_keys(TAM_DROP_MONITOR_FLOW_TABLE)
            if not table_data:
                return True
            # Clear each flow key
            for key in table_data:
                self.clear_single_drop_mon_flow(key)
        else:
            # Clear the specified flow entry
            self.clear_single_drop_mon_flow(key)

        return

    def clear_drop_mon_sample(self, args):
        key = args.samplename
        entry = self.config_db.get_entry(SAMPLE_RATE_TABLE, key)
        if entry:
            self.config_db.set_entry(SAMPLE_RATE_TABLE, key, None)
        else:
            print "Entry Not Found"
            return False
        return

    def clear_drop_mon_aging_int(self, args):
        key = "aging"
        entry = self.config_db.get_entry(TAM_DROP_MONITOR_AGING_INTERVAL_TABLE,
                                         key)
        if entry:
            self.config_db.set_entry(TAM_DROP_MONITOR_AGING_INTERVAL_TABLE,
                                     key, None)
        else:
            return False
        return

    def show_flow(self, args):
        self.get_print_all_dropmon_flows(args.flowname)
        return

    def get_dropmon_flow_stat(self, flowname):
        api_response_stat = {}
        api_response, entryfound = self.get_dropmon_flow_info(flowname)
        api_response_stat['flow-name'] = flowname
        if entryfound is not None:
            for k in api_response:
                if k == "ietf-ts:each-flow-data":
                    acl_rule = api_response['ietf-ts:each-flow-data'][
                        'acl-rule']
                    acl_table = api_response['ietf-ts:each-flow-data'][
                        'acl-table']
                    api_response_stat['rule-name'] = acl_rule
                    api_response_stat['table-name'] = acl_table

        acl_rule_keys = self.config_db.get_keys(ACL_RULE_TABLE_PREFIX)
        for acl_rule_key in acl_rule_keys:
            if acl_rule_key[1] == acl_rule:
                acl_counter_key = 'COUNTERS:' + acl_rule_key[
                    0] + ':' + acl_rule_key[1]
                raw_dropmon_stats = self.counters_db.get_all(
                    self.counters_db.COUNTERS_DB, acl_counter_key)
                api_response_stat['ietf-ts:dropmon-stats'] = raw_ifa_stats

        return api_response_stat, entryfound

    def get_print_all_dropmon_stats(self, name):
        stat_dict = {}
        stat_list = []
        if name != 'all':
            api_response, entryfound = self.get_dropmon_flow_stat(name)
            if entryfound is not None:
                stat_list.append(api_response)
        else:
            table_data = self.config_db.get_keys(TAM_DROP_MONITOR_FLOW_TABLE)
            # Get data for all keys
            for k in table_data:
                api_each_stat_response, entryfound = self.get_dropmon_flow_stat(
                    k)
                if entryfound is not None:
                    stat_list.append(api_each_stat_response)

        stat_dict['stat-list'] = stat_list
        show_cli_output("show_statistics_flow.j2", stat_dict)
        return

    def show_statistics(self, args):
        self.get_print_all_dropmon_stats(args.flowname)
        return

    def show_aging_interval(self, args):
        key = "aging"
        entry = self.config_db.get_entry(TAM_DROP_MONITOR_AGING_INTERVAL_TABLE,
                                         key)
        if entry:
            print "Aging interval : {}".format(entry['aging-interval'])
        return

    def show_sample(self, args):
        self.get_print_all_sample(args.samplename)
        return

    def get_dropmon_flow_info(self, k):
        flow_data = {}
        flow_data['acl-table-name'] = ''
        flow_data['sampling-rate'] = ''
        flow_data['collector'] = ''

        api_response = {}
        key = TAM_DROP_MONITOR_FLOW_TABLE + '|' + k
        raw_flow_data = self.config_db.get_all(self.config_db.CONFIG_DB, key)
        if raw_flow_data:
            sample = raw_flow_data['sample']
            rate = self.config_db.get_entry(SAMPLE_RATE_TABLE, sample)
            raw_flow_data['sample'] = rate['sampling-rate']
        api_response['ietf-ts:flow-key'] = k
        api_response['ietf-ts:each-flow-data'] = raw_flow_data
        return api_response, raw_flow_data

    def get_print_all_dropmon_flows(self, name):
        flow_dict = {}
        flow_list = []
        if name != 'all':
            api_response, entryfound = self.get_dropmon_flow_info(name)
            if entryfound is not None:
                flow_list.append(api_response)
        else:
            table_data = self.config_db.get_keys(TAM_DROP_MONITOR_FLOW_TABLE)
            # Get data for all keys
            for k in table_data:
                api_each_flow_response, entryfound = self.get_dropmon_flow_info(
                    k)
                if entryfound is not None:
                    flow_list.append(api_each_flow_response)

        flow_dict['flow-list'] = flow_list
        show_cli_output("show_drop_monitor_flow.j2", flow_dict)
        return

    def get_sample_info(self, k):
        sample_data = {}
        sample_data['sampling-rate'] = ''

        api_response = {}
        key = SAMPLE_RATE_TABLE + '|' + k
        raw_sample_data = self.config_db.get_all(self.config_db.CONFIG_DB, key)
        api_response['ietf-ts:sample-key'] = k
        api_response['ietf-ts:each-sample-data'] = raw_sample_data
        return api_response, raw_sample_data

    def get_print_all_sample(self, name):
        sample_dict = {}
        sample_list = []
        if name != 'all':
            api_response, entryfound = self.get_sample_info(name)
            if entryfound is not None:
                sample_list.append(api_response)
        else:
            table_data = self.config_db.get_keys(SAMPLE_RATE_TABLE)
            # Get data for all keys
            for k in table_data:
                api_each_flow_response, entryfound = self.get_sample_info(k)
                if entryfound is not None:
                    sample_list.append(api_each_flow_response)

        sample_dict['sample-list'] = sample_list
        show_cli_output("show_sample.j2", sample_dict)
        return
示例#14
0
class Ts(object):
    def __init__(self):
        # connect CONFIG DB
        self.config_db = ConfigDBConnector()
        self.config_db.connect()

        # connect COUNTER DB
        self.counters_db = ConfigDBConnector()
        self.counters_db.db_connect('COUNTERS_DB')

        # connect APPL DB
        self.app_db = ConfigDBConnector()
        self.app_db.db_connect('APPL_DB')

    def config_enable(self, args):
        """ Enable ifa """
        key = 'feature'
        self.config_db.set_entry(TAM_INT_IFA_TS_FEATURE_TABLE_PREFIX, key,
                                 {'enable': "true"})
        print "Enabled IFA"

        return

    def config_disable(self, args):
        """ Disable ifa """
        key = 'feature'
        self.config_db.set_entry(TAM_INT_IFA_TS_FEATURE_TABLE_PREFIX, key,
                                 {'enable': "false"})
        print "Disabled IFA"

        return

    def config_flow(self, args):
        key = TAM_INT_IFA_FLOW_TS_TABLE_PREFIX + '|' + args.flowname
        entry = self.config_db.get_all(self.config_db.CONFIG_DB, key)
        if entry is None:
            if args.acl_table_name:
                self.config_db.mod_entry(
                    TAM_INT_IFA_FLOW_TS_TABLE_PREFIX, args.flowname,
                    {'acl-table-name': args.acl_table_name})
            if args.acl_rule_name:
                self.config_db.mod_entry(TAM_INT_IFA_FLOW_TS_TABLE_PREFIX,
                                         args.flowname,
                                         {'acl-rule-name': args.acl_rule_name})
        else:
            print "Entry Already Exists"
            return False
        return

    def clear_each_flow(self, flowname):
        entry = self.config_db.get_entry(TAM_INT_IFA_FLOW_TS_TABLE_PREFIX,
                                         flowname)
        if entry:
            self.config_db.set_entry(TAM_INT_IFA_FLOW_TS_TABLE_PREFIX,
                                     flowname, None)
        else:
            print "Entry Not Found"
            return False

        return

    def clear_flow(self, args):
        key = args.flowname
        if key == "all":
            # Get all the flow keys
            table_data = self.config_db.get_keys(
                TAM_INT_IFA_FLOW_TS_TABLE_PREFIX)
            if not table_data:
                return True
            # Clear each flow key
            for key in table_data:
                self.clear_each_flow(key)
        else:
            # Clear the specified flow entry
            self.clear_each_flow(key)

        return

    def show_flow(self, args):
        self.get_print_all_ifa_flows(args.flowname)
        return

    def show_status(self):
        # Get data for all keys
        flowtable_keys = self.config_db.get_keys(
            TAM_INT_IFA_FLOW_TS_TABLE_PREFIX)

        api_response = {}
        key = TAM_INT_IFA_TS_FEATURE_TABLE_PREFIX + '|' + 'feature'
        raw_data_feature = self.config_db.get_all(self.config_db.CONFIG_DB,
                                                  key)
        api_response['ietf-ts:feature-data'] = raw_data_feature
        api_inner_response = {}
        api_inner_response["num-of-flows"] = len(flowtable_keys)
        api_response['ietf-ts:num-of-flows'] = api_inner_response
        key = TAM_DEVICE_TABLE_PREFIX + '|' + 'device'
        raw_data_device = self.config_db.get_all(self.config_db.CONFIG_DB, key)
        api_response['ietf-ts:device-data'] = raw_data_device
        show_cli_output("show_status.j2", api_response)

        return

    def get_ifa_flow_stat(self, flowname):
        api_response_stat = {}
        api_response, entryfound = self.get_ifa_flow_info(flowname)
        api_response_stat['flow-name'] = flowname
        if entryfound is not None:
            for k in api_response:
                if k == "ietf-ts:each-flow-data":
                    acl_rule_name = api_response['ietf-ts:each-flow-data'][
                        'acl-rule-name']
                    acl_table_name = api_response['ietf-ts:each-flow-data'][
                        'acl-table-name']
                    api_response_stat['rule-name'] = acl_rule_name
                    api_response_stat['table-name'] = acl_table_name

        acl_rule_keys = self.config_db.get_keys(ACL_RULE_TABLE_PREFIX)
        for acl_rule_key in acl_rule_keys:
            if acl_rule_key[1] == acl_rule_name:
                acl_counter_key = 'COUNTERS:' + acl_rule_key[
                    0] + ':' + acl_rule_key[1]
                raw_ifa_stats = self.counters_db.get_all(
                    self.counters_db.COUNTERS_DB, acl_counter_key)
                api_response_stat['ietf-ts:ifa-stats'] = raw_ifa_stats

        return api_response_stat, entryfound

    def get_print_all_ifa_stats(self, name):
        stat_dict = {}
        stat_list = []
        if name != 'all':
            api_response, entryfound = self.get_ifa_flow_stat(name)
            if entryfound is not None:
                stat_list.append(api_response)
        else:
            table_data = self.config_db.get_keys(
                TAM_INT_IFA_FLOW_TS_TABLE_PREFIX)
            # Get data for all keys
            for k in table_data:
                api_each_stat_response, entryfound = self.get_ifa_flow_stat(k)
                if entryfound is not None:
                    stat_list.append(api_each_stat_response)

        stat_dict['stat-list'] = stat_list
        show_cli_output("show_statistics_flow.j2", stat_dict)
        return

    def show_statistics(self, args):
        self.get_print_all_ifa_stats(args.flowname)
        return

    def get_ifa_flow_info(self, k):
        flow_data = {}
        flow_data['acl-table-name'] = ''
        flow_data['sampling-rate'] = ''
        flow_data['collector'] = ''

        api_response = {}
        key = TAM_INT_IFA_FLOW_TS_TABLE_PREFIX + '|' + k
        raw_flow_data = self.config_db.get_all(self.config_db.CONFIG_DB, key)
        api_response['ietf-ts:flow-key'] = k
        api_response['ietf-ts:each-flow-data'] = raw_flow_data
        return api_response, raw_flow_data

    def get_print_all_ifa_flows(self, name):
        flow_dict = {}
        flow_list = []
        if name != 'all':
            api_response, entryfound = self.get_ifa_flow_info(name)
            if entryfound is not None:
                flow_list.append(api_response)
        else:
            table_data = self.config_db.get_keys(
                TAM_INT_IFA_FLOW_TS_TABLE_PREFIX)
            # Get data for all keys
            for k in table_data:
                api_each_flow_response, entryfound = self.get_ifa_flow_info(k)
                if entryfound is not None:
                    flow_list.append(api_each_flow_response)

        flow_dict['flow-list'] = flow_list
        show_cli_output("show_flow.j2", flow_dict)
        return

    def get_ifa_supported_info(self):
        key = 'TAM_INT_IFA_TS_FEATURE_TABLE|feature'
        data = self.config_db.get_all(self.config_db.CONFIG_DB, key)

        if data is None:
            return

        if data['enable'] == "true":
            print "TAM INT IFA TS Supported - True"
            return True
        elif data['enable'] == "false":
            print "TAM INT IFA TS Supported - False "
            return False

        return

    def get_ifa_enabled_info(self):
        print "In get_ifa_enabled_info"
        key = 'SWITCH_TABLE:switch'
        data = self.app_db.get(self.app_db.APPL_DB, key, 'ifa_enabled')

        if data and data == 'True':
            return True

        return True
示例#15
0
class Tam(object):
    def __init__(self):
        # connect CONFIG DB
        self.config_db = ConfigDBConnector()
        self.config_db.connect()

        # connect APPL DB
        self.app_db = ConfigDBConnector()
        self.app_db.db_connect('APPL_DB')

    def get_tam_collector_info(self, k):
        api_response = {}
        key = TAM_COLLECTOR_TABLE_PREFIX + '|' + k
        raw_coll_data = self.config_db.get_all(self.config_db.CONFIG_DB, key)
        api_response['coll-key'] = k
        api_response['each-coll-data'] = raw_coll_data
        return api_response, raw_coll_data

    def get_print_all_tam_collectors(self, name):
        coll_dict = {}
        coll_list = []
        if name != 'all':
            api_response, entryfound = self.get_tam_collector_info(name)
            if entryfound is not None:
                coll_list.append(api_response)
        else:
            table_data = self.config_db.get_keys(TAM_COLLECTOR_TABLE_PREFIX)
            # Get data for all keys
            for k in table_data:
                api_each_flow_response, entryfound = self.get_tam_collector_info(
                    k)
                if entryfound is not None:
                    coll_list.append(api_each_flow_response)

        coll_dict['flow-list'] = coll_list
        show_cli_output("show_collector.j2", coll_dict)
        return

    def config_device_id(self, args):
        key = 'device'
        entry = self.config_db.get_entry(TAM_DEVICE_TABLE_PREFIX, key)
        if entry is None:
            if args.deviceid:
                self.config_db.set_entry(TAM_DEVICE_TABLE_PREFIX, key,
                                         {'deviceid': args.deviceid})
        else:
            if args.deviceid:
                entry_value = entry.get('deviceid', [])

                if entry_value != args.deviceid:
                    self.config_db.mod_entry(TAM_DEVICE_TABLE_PREFIX, key,
                                             {'deviceid': args.deviceid})
        return

    def config_collector(self, args):
        if args.iptype == 'ipv4':
            if args.ipaddr == "0.0.0.0":
                print "Collector IP should be non-zero ip address"
                return False

        if args.iptype == 'ipv6':
            print "IPv6 Collector type not supported"
            return False

        self.config_db.mod_entry(
            TAM_COLLECTOR_TABLE_PREFIX, args.collectorname, {
                'ipaddress-type': args.iptype,
                'ipaddress': args.ipaddr,
                'port': args.port
            })

        return

    def clear_device_id(self):
        key = 'device'
        entry = self.config_db.get_entry(TAM_DEVICE_TABLE_PREFIX, key)
        if entry:
            self.config_db.set_entry(TAM_DEVICE_TABLE_PREFIX, key, None)
        return

    def clear_collector(self, args):
        key = args.collectorname
        entry = self.config_db.get_entry(TAM_COLLECTOR_TABLE_PREFIX, key)
        if entry:
            self.config_db.set_entry(TAM_COLLECTOR_TABLE_PREFIX, key, None)
        else:
            print "Entry Not Found"
            return False
        return

    def show_device_id(self):
        key = TAM_DEVICE_TABLE_PREFIX + '|' + 'device'
        data = self.config_db.get_all(self.config_db.CONFIG_DB, key)
        print "TAM Device identifier"
        print "-------------------------------"
        if data:
            if 'deviceid' in data:
                print "Device Identifier    - ", data['deviceid']
        return

    def show_collector(self, args):
        self.get_print_all_tam_collectors(args.collectorname)
        return