Example #1
0
    def __init__(self, *args, **kwargs):
        self._view = None
        self._session = None
        self._active = False
        self._filters = AttrDict(
            {n: f(provider=self, name=n)
             for n, f in self.FILTERS.items()})

        rules = AttrDict(self.config.rules.label,
                         **config.settings.profile.rules.label)

        labels = AttrDict(self.config.labels, **config.settings.profile.labels)

        self.rule_map = AttrDict([(re.compile(k, re.IGNORECASE), v)
                                  for k, v in [(r, rules[r])
                                               for r in rules.keys()]])

        self.highlight_map = AttrDict([(re.compile(k,
                                                   re.IGNORECASE), labels[v])
                                       for k, v in rules.items()])

        self.highlight_re = re.compile(
            "(" + "|".join([k.pattern
                            for k in self.highlight_map.keys()]) + ")",
            re.IGNORECASE)
Example #2
0
    def get_port_to_mac(mac_address_table, fmt='std', port_prefix=' '):
        """Convert MAC address table to dictionary.

        Args:
            mac_address_table (string): MAC address table. Lines delimited
            with line feed.  Assumes a header row with "Port" as a column
            header followed by a delimiter row composed of dashes ('-')
            which delimit columns.  Handles MAC addresses formatted
            as 'cc:cc:cc:cc:cc:cc' or 'cccc.cccc.cccc'

        Returns:
            dictionary: Keys are string port numbers and values are a list
            of MAC addresses both in native switch format.
        """
        import lib.logger as logger
        log = logger.getlogger()
        pos = None
        mac_dict = AttrDict()

        _mac_iee802 = '([\dA-F]{2}[\.:-]){5}([\dA-F]{2})'
        _mac_cisco = '([\dA-F]{4}\.){2}[\dA-F]{4}'
        _mac_all = "%s|%s" % (_mac_iee802, _mac_cisco)
        _mac_regex = re.compile(_mac_all, re.I)

        mac_address_table = mac_address_table.splitlines()
        p2 = re.compile('Port', re.IGNORECASE)
        for line in mac_address_table:
            # find row with 'Port' label
            match = p2.search(line)
            if match:
                pos = match.start()
            # find header seperator row
            if re.search(r'--+', line):
                log.debug('Found header seperator row: {}'.format(line))
                iter = re.finditer(r'--+', line)
                for i, match in enumerate(iter):
                    # find column aligned with 'Port'
                    if pos >= match.span()[0] and pos < match.span()[1]:
                        port_span = (match.span()[0], match.span()[1])
            # find rows with MACs
            match = _mac_regex.search(line)

            if match:
                mac = match.group()
                log.debug('Found mac address: {}'.format(mac))
                _mac = mac
                if fmt == 'std':
                    _mac = mac[0:2]
                    mac = re.sub(r'\.|\:', '', mac)
                    for i in (2, 4, 6, 8, 10):
                        _mac = _mac + ':' + mac[i:i + 2]
                # Extract port section of row
                port = line[port_span[0] - 1:port_span[1]].strip(' ')
                if fmt == 'std':
                    port = port.replace(port_prefix, '')
                if port not in mac_dict.keys():
                    mac_dict[port] = [_mac]
                else:
                    mac_dict[port].append(_mac)
        return mac_dict
Example #3
0
class VLANS:
    '''
    Manage a bunch of VLANs, as a dictionary
    '''
    def __init__(self, vlans=None, delemiter=",", range_delemiter="-"):
        super().__init__()
        self._delemiter = delemiter
        self._range_delemiter = range_delemiter
        self._vlans = AttrDict()
        if vlans:
            self.__iadd__(vlans)

    def __add__(self, other):
        """
        Add two VLANS to each other
        """
        if not isinstance(other, VLANS):
            raise TypeError("Error: Can only handle object of VLANS()")
        tmp = self.copy()
        for vlan in other._vlans.values():
            tmp._vlans[vlan.id] = vlan
        return tmp

    def __iadd__(self, other):
        if isinstance(other, VLANS):
            for vlan in other._vlans.values():
                self._vlans[vlan.id] = vlan
        elif isinstance(other, VLAN):
            self._vlans[other.id] = other
        else:
            raise TypeError(
                "Error: Can only handle object of VLANS() or VLAN() got %s" %
                type(other))
        return self

    def __str__(self):
        return dict_to_vlan_str(self._vlans,
                                delemiter=self._delemiter,
                                range_delemiter=self._range_delemiter)

    def __repr__(self):
        s = ""
        for vlan in self._vlans.values():
            s += "(%s)" % vlan.to_str()
        return "VLANS(%s)" % s

    def __iter__(self):
        return iter(self.__dict__)

    def items(self):
        for item in self._vlans.items():
            yield item

    def keys(self):
        for item in self._vlans.keys():
            yield item

    def values(self):
        for item in self._vlans.values():
            yield item
Example #4
0
def updateReport(_id):
    data = request.form
    data = AttrDict(data)
    for key in data.keys():
        print("key", key, "value", data[key] )
    _id = db.conn.update("report", d=data, primary_key="_id")
    return jsonify( id=_id )
Example #5
0
class VLANS:
    '''
    Manage a bunch of VLANs, as a dictionary
    '''
    def __init__(self, vlans=None, delemiter=",", range_delemiter="-"):
        super().__init__()
        self._delemiter = delemiter
        self._range_delemiter = range_delemiter
        self._vlans = AttrDict()
        if vlans:
            self.__iadd__(vlans)

    def __add__(self, other):
        """
        Add two VLANS to each other
        """
        if not isinstance(other, VLANS):
            raise TypeError("Error: Can only handle object of VLANS()")
        tmp = self.copy()
        for vlan in other._vlans.values():
            tmp._vlans[vlan.id] = vlan
        return tmp

    def __iadd__(self, other):
        if isinstance(other, VLANS):
            for vlan in other._vlans.values():
                self._vlans[vlan.id] = vlan
        elif isinstance(other, VLAN):
            self._vlans[other.id] = other
        else:
            raise TypeError("Error: Can only handle object of VLANS() or VLAN() got %s" % type(other))
        return self

    def __str__(self):
        return dict_to_vlan_str(self._vlans,
                                delemiter=self._delemiter,
                                range_delemiter=self._range_delemiter)

    def __repr__(self):
        s = ""
        for vlan in self._vlans.values():
            s += "(%s)" % vlan.to_str()
        return "VLANS(%s)" % s

    def __iter__(self):
        return iter(self.__dict__)

    def items(self):
        for item in self._vlans.items():
            yield item

    def keys(self):
        for item in self._vlans.keys():
            yield item
            
    def values(self):
        for item in self._vlans.values():
            yield item
Example #6
0
def load():
    global PROVIDERS
    global DEFAULT_PROVIDER
    mgr = extension.ExtensionManager(
        namespace='streamglob.providers',
        on_load_failure_callback=log_plugin_exception,
    )
    PROVIDERS = AttrDict((x.name, x.plugin()) for x in mgr)

    load_config()

    if len(config.settings.profile.providers):
        # first listed in config
        DEFAULT_PROVIDER = list(config.settings.profile.providers.keys())[0]
    else:
        # first loaded
        DEFAULT_PROVIDER = list(PROVIDERS.keys())[0]
class ValidateClusterHardware(object):
    """Discover and validate cluster hardware

    Args:
        log (object): Log
    """
    def __init__(self, config_file=None):
        self.log = logger.getlogger()
        try:
            self.cfg = Config(config_file)
            self.inv = Inventory(None, config_file)
        except UserException as exc:
            self.log.critical(exc)
            raise UserException(exc)
        # initialize ipmi list of access info
        self.ran_ipmi = False
        self.bmc_ai = {}
        vlan_ipmi = self.cfg.get_depl_netw_client_vlan(if_type='ipmi')[0]
        vlan_pxe = self.cfg.get_depl_netw_client_vlan(if_type='pxe')[0]
        self.dhcp_pxe_leases_file = GEN_PATH + \
            'logs/dnsmasq{}.leases'.format(vlan_pxe)
        self.dhcp_ipmi_leases_file = GEN_PATH + \
            'logs/dnsmasq{}.leases'.format(vlan_ipmi)
        self.tcp_dump_file = GEN_PATH + \
            'logs/tcpdump{}.out'.format(vlan_pxe)
        self.node_table_ipmi = AttrDict()
        self.node_table_pxe = AttrDict()
        self.node_list = []

    def _add_offset_to_address(self, addr, offset):
        """calculates an address with an offset added.
        Args:
            addr (str): ipv4 or cidr representation of address
            offset (int): integer offset
        Returns:
            addr_.ip (str) address in ipv4 representation
        """
        addr_ = IPNetwork(addr)
        addr_.value += offset
        return str(addr_.ip)

    def _get_port_cnts(self):
        labels = self.cfg.get_sw_mgmt_label()
        ipmi_cnt = 0
        pxe_cnt = 0

        for label in labels:
            ipmi_cnt += len(self.cfg.get_client_switch_ports(label, 'ipmi'))
            pxe_cnt += len(self.cfg.get_client_switch_ports(label, 'pxe'))
        return ipmi_cnt, pxe_cnt

    def _get_credentials(self, node_addr_list, cred_list):
        """ Attempts to discover bmc credentials and generate a list of all
        discovered nodes.  For each node try all available credentials.  If no
        credentials allow access, the node is not marked as succesful.

        Args:
            node_addr_list (list): list of ipv4 addresses for the discovered
            nodes. (ie those that previously fetched an address from the DHCP
             server.
            cred_list (list of lists): Each list item is a list containing the
            the userid, password, bmc_type and number of nodes for a node template.
        return: bmc access info (dict) : Values hold tuple of userid, password,
                bmc_type
        """
        tot = [cred_list[x][3] for x in range(len(cred_list))]
        tot = sum(tot)
        left = tot
        max_attempts = 20
        delay = 5
        attempt = 0
        timeout = 4
        print()
        self.log.info("Discover BMC credentials and verify communications")
        print()
        nodes = {}
        bmc_ai = {}
        for node in node_addr_list:
            nodes[node] = False
        while not all([x for x in nodes.values()]) and attempt <= max_attempts:
            print(f'\rAttempt count: {max_attempts - attempt}  ', end='')
            sys.stdout.flush()
            attempt += 1
            timeout += 1
            node_list = [x for x in nodes if not nodes[x]]
            for node in node_list:
                # each time through re-sort cred_list based on nodes left with
                # those credentials to maximize the probability
                # of using the correct credentials with minimum attempts
                cred_list.sort(key=lambda x: x[3], reverse=True)
                for j, creds in enumerate(cred_list):
                    self.log.debug(
                        f'BMC {node} - Trying userid: {creds[0]} | '
                        f'password: {creds[1]} | bmc type: {creds[2]}')
                    bmc = _bmc.Bmc(node, *creds[:-1], timeout=timeout)
                    if bmc.is_connected():
                        r = bmc.chassis_power('status')
                        self.log.debug(f'Chassis power status: {r}')
                        if r:
                            nodes[node] = True
                            time.sleep(1)
                            self.log.debug(f'Node {node} is powered {r}')
                            bmc_ai[node] = tuple(cred_list[j][:-1])
                            cred_list[j][3] -= 1
                            left -= left
                            print(
                                f'\r{tot - left} of {tot} nodes communicating via IPMI',
                                end='')
                            sys.stdout.flush()
                            bmc.logout()
                        else:
                            self.log.debug(
                                f'No power status response from node {node}')
            time.sleep(delay)
        if left != 0:
            self.log.error(
                f'IPMI communication successful with only {tot - left} '
                f'of {tot} nodes')
        print('\n')
        return bmc_ai

    def _get_ipmi_ports(self, switch_lbl):
        """ Get all of the ipmi ports for a given switch
        Args:
            switch_lbl (str): switch label
        Returns:
            ports (list of str): port name or number
        """
        ports = []
        for node_tmpl_idx in self.cfg.yield_ntmpl_ind():
            for sw_idx in self.cfg.yield_ntmpl_phyintf_ipmi_ind(node_tmpl_idx):
                if switch_lbl == self.cfg.get_ntmpl_phyintf_ipmi_switch(
                        node_tmpl_idx, sw_idx):
                    ports += self.cfg.get_ntmpl_phyintf_ipmi_ports(
                        node_tmpl_idx, sw_idx)
        ports = [str(port) for port in ports]
        return ports

    def _get_pxe_ports(self, switch_lbl):
        """ Get all of the pxe ports for a given switch
        Args:
            switch_lbl (str): switch label
        Returns:
            ports (list of str): port name or number
        """
        ports = []
        for node_tmpl_idx in self.cfg.yield_ntmpl_ind():
            for sw_idx in self.cfg.yield_ntmpl_phyintf_pxe_ind(node_tmpl_idx):
                if switch_lbl == self.cfg.get_ntmpl_phyintf_pxe_switch(
                        node_tmpl_idx, sw_idx):
                    ports += self.cfg.get_ntmpl_phyintf_pxe_ports(
                        node_tmpl_idx, sw_idx)
        ports = [str(port) for port in ports]
        return ports

    def _get_port_table_ipmi(self, node_list):
        """ Build table of discovered nodes.  The responding IP addresses are
        correlated to MAC addresses in the dnsmasq.leases file.  The MAC
        address is then used to correlate the IP address to a switch port.
        Args:
            node_list (list of str): IPV4 addresses
        Returns:
            table (AttrDict): switch, switch port, IPV4 address, MAC address
        """
        dhcp_leases = GetDhcpLeases(self.dhcp_ipmi_leases_file)
        dhcp_mac_ip = dhcp_leases.get_mac_ip()

        dhcp_mac_table = AttrDict()
        for ip in node_list:
            for item in dhcp_mac_ip.items():
                if ip in item:
                    dhcp_mac_table[item[0]] = item[1]
        self.log.debug('ipmi mac-ip table')
        self.log.debug(dhcp_mac_table)

        for sw_ai in self.cfg.yield_sw_mgmt_access_info():
            sw = SwitchFactory.factory(*sw_ai[1:])
            label = sw_ai[0]
            ipmi_ports = self._get_ipmi_ports(label)
            mgmt_sw_cfg_mac_lists = \
                sw.show_mac_address_table(format='std')
            # Get switch ipmi port mac address table
            # Logic below maintains same port order as config.yml
            sw_ipmi_mac_table = AttrDict()
            for port in ipmi_ports:
                if port in mgmt_sw_cfg_mac_lists:
                    sw_ipmi_mac_table[port] = mgmt_sw_cfg_mac_lists[port]
            self.log.debug('Switch ipmi port mac table')
            self.log.debug(sw_ipmi_mac_table)

            if label not in self.node_table_ipmi.keys():
                self.node_table_ipmi[label] = []

            for port in sw_ipmi_mac_table:
                for mac in dhcp_mac_table:
                    if mac in sw_ipmi_mac_table[port]:
                        if not self._is_port_in_table(
                                self.node_table_ipmi[label], port):
                            self.node_table_ipmi[label].append(
                                [port, mac, dhcp_mac_table[mac]])

    def _build_port_table_pxe(self, mac_list):
        """ Build table of discovered nodes.  The responding mac addresses
        discovered by tcpdump are correlated to switch ports from cluster
        switches. If nodes have taken an ip address (via dnsmasq) the ip
        address is included in the table.
        Args:
            node_list (list of str): IPV4 addresses
        Returns:
            table (AttrDict): switch, switch port, IPV4 address, MAC address
        """
        dhcp_leases = GetDhcpLeases(self.dhcp_pxe_leases_file)
        dhcp_mac_ip = dhcp_leases.get_mac_ip()

        dhcp_mac_table = AttrDict()
        for mac in mac_list:
            for item in dhcp_mac_ip.items():
                if mac in item:
                    dhcp_mac_table[item[0]] = item[1]
        self.log.debug('pxe dhcp mac table')
        self.log.debug(dhcp_mac_table)

        for sw_ai in self.cfg.yield_sw_mgmt_access_info():
            sw = SwitchFactory.factory(*sw_ai[1:])
            sw_label = sw_ai[0]
            pxe_ports = self._get_pxe_ports(sw_label)
            mgmt_sw_mac_lists = \
                sw.show_mac_address_table(format='std')

            # Get switch pxe port mac address table
            # Logic below maintains same port order as config.yml
            sw_pxe_mac_table = AttrDict()
            for port in pxe_ports:
                if port in mgmt_sw_mac_lists:
                    sw_pxe_mac_table[port] = mgmt_sw_mac_lists[port]
            self.log.debug('Switch pxe port mac table')
            self.log.debug(sw_pxe_mac_table)

            # self.node_table_pxe is structured around switches
            if sw_label not in self.node_table_pxe.keys():
                self.node_table_pxe[sw_label] = []

            for mac in mac_list:
                _port = '-'
                for port in sw_pxe_mac_table:
                    if mac in sw_pxe_mac_table[port]:
                        _port = port
                        break
                if mac in dhcp_mac_table:
                    ip = dhcp_mac_table[mac]
                else:
                    ip = '-'
                if not self._is_val_in_table(self.node_table_pxe[sw_label],
                                             mac):
                    self.node_table_pxe[sw_label].append([_port, mac, ip])

    def _reset_existing_bmcs(self, node_addr_list, cred_list):
        """ Attempts to reset any BMCs which have existing IP addresses since
        we don't have control over their address lease time.
        Args:
            node_addr_list (list): list of ipv4 addresses for the discovered
            nodes. (ie those that previously fetched an address from the DHCP
             server.
            cred_list (list of lists): Each list item is a list containing the
            the userid, password and number of nodes for a node template.
        """
        for node in node_addr_list:
            for j, creds in enumerate(cred_list):
                bmc = _bmc.Bmc(node, creds[0], creds[1], creds[2])
                if bmc.is_connected():
                    self.log.info(
                        f'Resetting BMC with existing ip address: {node}')
                    if not bmc.bmc_reset('cold'):
                        self.log.error(
                            f'Failed attempting BMC reset on {node}')
                    bmc.logout()
                    break

    def validate_ipmi(self):
        self.log.info("Discover and validate cluster nodes")
        # if self.inv.check_all_nodes_ipmi_macs() and self.inv.check_all_nodes_pxe_macs():
        #     self.log.info("Inventory exists with IPMI and PXE MACs populated.")
        #     print("\nPress Enter to continue cluster deployment without "
        #           "running IPMI hardware validation.")
        #     print("Type 'C' to validate cluster nodes defined in current "
        #           "'config.yml'")
        #     resp = input("Type 'T' to terminate Power-Up ")
        #     if resp == 'T':
        #         resp = input("Type 'y' to confirm ")
        #         if resp == 'y':
        #             self.log.info("'{}' entered. Terminating Power-Up at user "
        #                           "request".format(resp))
        #             sys.exit(1)
        #     elif resp == 'C':
        #         self.log.info("'{}' entered. Continuing with hardware "
        #                       "validation".format(resp))
        #     else:
        #         print()
        #         return
        ipmi_cnt, pxe_cnt = self._get_port_cnts()
        ipmi_addr, bridge_addr, ipmi_prefix, ipmi_vlan = self._get_network(
            'ipmi')
        ipmi_network = ipmi_addr + '/' + str(ipmi_prefix)
        addr = IPNetwork(bridge_addr + '/' + str(ipmi_prefix))
        netmask = str(addr.netmask)
        ipmi_size = addr.size
        addr.value += NAME_SPACE_OFFSET_ADDR
        addr = str(addr)
        cred_list = self._get_cred_list()
        rc = False
        dhcp_st = get_dhcp_pool_start()
        self.ipmi_ns = NetNameSpace('ipmi-ns-', 'br-ipmi-' + str(ipmi_vlan),
                                    addr)

        # setup DHCP, unless already running in namesapce
        # save start and end addr raw numeric values
        self.log.debug('Installing DHCP server in network namespace')
        addr_st = self._add_offset_to_address(ipmi_network, dhcp_st)
        addr_end = self._add_offset_to_address(ipmi_network, ipmi_size - 2)
        dhcp_end = self._add_offset_to_address(ipmi_network,
                                               dhcp_st + ipmi_cnt + 2)

        # scan ipmi network for nodes with pre-existing ip addresses
        cmd = 'fping -r0 -a -g {} {}'.format(addr_st, addr_end)
        node_list, stderr, rc = sub_proc_exec(cmd)
        if rc not in (0, 1):
            self.log.warning(f'Error scanning IPMI network. rc: {rc}')
        self.log.debug('Pre-existing node list: \n{}'.format(node_list))
        node_list = node_list.splitlines()

        self._reset_existing_bmcs(node_list, cred_list)

        if len(node_list) > 0:
            print('Pause 60s for BMCs to begin reset')
            time.sleep(60)

        dns_list, stderr, rc = sub_proc_exec('pgrep dnsmasq')
        if rc not in [0, 1]:
            self.log.warning(f'Error looking for dnsmasq. rc: {rc}')
        dns_list = dns_list.splitlines()

        for pid in dns_list:
            ns_name, stderr, rc = sub_proc_exec(
                'ip netns identify {}'.format(pid))
            if self.ipmi_ns._get_name_sp_name() in ns_name:
                self.log.debug('DHCP already running in {}'.format(ns_name))
                break
        else:
            cmd = (f'dnsmasq --dhcp-leasefile={self.dhcp_ipmi_leases_file} '
                   f'--interface={self.ipmi_ns._get_name_sp_ifc_name()} '
                   f'--dhcp-range={addr_st},{dhcp_end},{netmask},600')
            stdout, stderr, rc = self.ipmi_ns._exec_cmd(cmd)
            if rc != 0:
                self.log.warning(f'Error setting up dnsmasq. rc: {rc}')
            print(stderr)

        # Scan up to 25 times. Delay 5 seconds between scans
        # Allow infinite number of retries
        self.log.info('Scanning BMC network on 5 s intervals')
        cnt = 0
        cnt_down = 25
        while cnt < ipmi_cnt:
            print()
            for i in range(cnt_down):
                print(
                    '\r{} of {} nodes requesting DHCP address. Scan count: {} '
                    .format(cnt, ipmi_cnt, cnt_down - i),
                    end="")
                sys.stdout.flush()
                time.sleep(5)
                cmd = 'fping -r0 -a -g {} {}'.format(addr_st, dhcp_end)
                stdout, stderr, rc = sub_proc_exec(cmd)
                node_list = stdout.splitlines()
                cnt = len(node_list)
                if cnt >= ipmi_cnt:
                    rc = True
                    print(
                        '\r{} of {} nodes requesting DHCP address. Scan count: {} '
                        .format(cnt, ipmi_cnt, cnt_down - i),
                        end="")
                    break

            self._get_port_table_ipmi(node_list)
            self.log.debug('Table of found IPMI ports: {}'.format(
                self.node_table_ipmi))
            for switch in self.node_table_ipmi:
                print('\n\nSwitch: {}                '.format(switch))
                print(
                    tabulate(self.node_table_ipmi[switch],
                             headers=('port', 'MAC address', 'IP address')))
                print()

            if cnt >= ipmi_cnt:
                break
            print(
                '\n\nPress Enter to continue scanning for cluster nodes.\nOr')
            print(
                "Or enter 'C' to continue cluster deployment with a subset of nodes"
            )
            resp = input("Or Enter 'T' to terminate Power-Up ")
            if resp == 'T':
                resp = input("Enter 'y' to confirm ")
                if resp == 'y':
                    self.log.info(
                        "'{}' entered. Terminating Power-Up at user request".
                        format(resp))
                    self._teardown_ns(self.ipmi_ns)
                    sys.exit(1)
            elif resp == 'C':
                print('\nNot all nodes have been discovered')
                resp = input("Enter 'y' to confirm continuation of"
                             " deployment without all nodes ")
                if resp == 'y':
                    self.log.info(
                        "'{}' entered. Continuing PowerUp".format(resp))
                    break
        self.node_list = node_list
        if cnt < ipmi_cnt:
            self.log.warning('Failed to validate expected number of nodes')

        if len(node_list) > 0 and len(cred_list) > 0:
            # Verify and power off nodes
            self.bmc_ai = self._get_credentials(node_list, cred_list)
            if not self.bmc_ai:
                self.log.error(
                    'Critical error. Unable to establish BMC communication '
                    'with any cluster nodes.\n.')
                sys.exit('Exiting.')

        # set_power_cients('off') has built in 60 s delay
        self.log.info('\nPowering off cluster nodes')
        set_power_clients('off', clients=self.bmc_ai)

        set_power_clients('on', clients=self.bmc_ai)

        self.log.debug('\nSetting "network" boot device on all nodes')
        set_bootdev_clients('network', clients=self.bmc_ai)

        self.log.debug('Cluster nodes IPMI validation complete')
        self.ran_ipmi = True
        if not rc:
            raise UserException('Not all node IPMI ports validated')

    def _get_cred_list(self):
        """Returns list of list.  Each list has the credentials
        for a node template(userid, password, bmc_type).
        Note that there is no association to any ip address.
        """
        cred_list = []
        for idx in self.cfg.yield_ntmpl_ind():
            for idx_ipmi in self.cfg.yield_ntmpl_phyintf_ipmi_ind(idx):
                port_cnt = self.cfg.get_ntmpl_phyintf_ipmi_pt_cnt(
                    idx, idx_ipmi)
            cred_list.append([
                self.cfg.get_ntmpl_ipmi_userid(index=idx),
                self.cfg.get_ntmpl_ipmi_password(index=idx),
                self.cfg.get_ntmpl_bmc_type(index=idx), port_cnt
            ])
        return cred_list

    def _teardown_ns(self, ns):
        # kill dnsmasq
        dns_list, stderr, rc = sub_proc_exec('pgrep dnsmasq')
        dns_list = dns_list.splitlines()

        for pid in dns_list:
            ns_name, stderr, rc = sub_proc_exec('ip netns identify ' + pid)
            if ns._get_name_sp_name() in ns_name:
                self.log.debug('Killing dnsmasq {}'.format(pid))
                stdout, stderr, rc = sub_proc_exec('kill -15 ' + pid)

        # kill tcpdump
        tcpdump_list, stderr, rc = sub_proc_exec('pgrep tcpdump')
        tcpdump_list = tcpdump_list.splitlines()

        for pid in tcpdump_list:
            ns_name, stderr, rc = sub_proc_exec('ip netns identify ' + pid)
            if ns._get_name_sp_name() in ns_name:
                self.log.debug('Killing tcpdump {}'.format(pid))
                stdout, stderr, rc = sub_proc_exec('kill -15 ' + pid)

        # reconnect the veth pair to the container
        ns._reconnect_container()

        # Destroy the namespace
        self.log.debug('Destroying namespace')
        ns._destroy_name_sp()

    def _get_macs(self, mac_list, dump):
        """ Parse the data returned by tcpdump looking for pxe boot
        requests.
        Args:
            mac_list(list): list of already found mac addresses
            dump(str): tcpdump output from the tcpdump file
        """
        _mac_iee802 = r'([\dA-F]{2}[\.:-]){5}([\dA-F]{2})'
        _mac_regex = re.compile(_mac_iee802, re.I)

        dump = dump.split('BOOTP/DHCP, Request')

        for item in dump:
            # look first for 'magic cookie'
            pos = item.find('6382 5363')
            if pos >= 0:
                bootp = item[pos:]
                match = re.search(' ff|ff ', bootp, re.DOTALL)
                if match is not None:
                    bootp = bootp[:2 + match.start()]
                    # look for pxe request info.
                    # 0x37 = 55 (parameter list request)
                    # 0x43 = 67 (boot filename request)
                    # 0xd1 = 209 (pxeconfig file request)
                    if ('37 ' in bootp or ' 37' in bootp):
                        if (' d1' in bootp or 'd1 ' in bootp) or \
                                ('43 ' in bootp or ' 43' in bootp):
                            self.log.debug('bootp param request field: '
                                           f'{bootp}')
                            mac = _mac_regex.search(item).group()
                            if mac not in mac_list:
                                mac_list.append(mac)
        return mac_list

    def validate_pxe(self, bootdev='default', persist=True):
        # if self.inv.check_all_nodes_pxe_macs():
        #     self.log.info("Inventory exists with PXE MACs populated.")
        #     if not self.ran_ipmi:
        #         return
        #     print("\nPress Enter to continue cluster deployment without "
        #           "running PXE hardware validation.")
        #     print("Type 'C' to validate cluster nodes defined in current "
        #           "'config.yml'")
        #     resp = input("Type 'T' to terminate Power-Up ")
        #     if resp == 'T':
        #         resp = input("Type 'y' to confirm ")
        #         if resp == 'y':
        #             self.log.info("'{}' entered. Terminating Power-Up at user "
        #                           "request".format(resp))
        #             sys.exit(1)
        #     elif resp == 'C':
        #         self.log.info("'{}' entered. Continuing with hardware "
        #                       "validation".format(resp))
        #     else:
        #         print()
        #         return
        # if not self.ran_ipmi:
        #     return
        if not self.node_table_ipmi:
            raise UserCriticalException('No BMCs discovered')
        self.log.debug("Checking PXE networks and client PXE"
                       " ports ________\n")
        self.log.debug('Boot device: {}'.format(bootdev))
        ipmi_cnt, pxe_cnt = self._get_port_cnts()
        pxe_addr, bridge_addr, pxe_prefix, pxe_vlan = self._get_network('pxe')
        pxe_network = pxe_addr + '/' + str(pxe_prefix)
        addr = IPNetwork(bridge_addr + '/' + str(pxe_prefix))
        netmask = str(addr.netmask)
        addr.value += NAME_SPACE_OFFSET_ADDR
        addr = str(addr)
        foundall = False
        dhcp_st = get_dhcp_pool_start()
        pxe_ns = NetNameSpace('pxe-ns-', 'br-pxe-' + str(pxe_vlan), addr)

        # setup DHCP. save start and end addr raw numeric values
        self.log.debug('Installing DHCP server in network namespace')
        addr_st = self._add_offset_to_address(pxe_network, dhcp_st)
        addr_end = self._add_offset_to_address(pxe_network,
                                               dhcp_st + pxe_cnt + 2)

        dns_list, stderr, rc = sub_proc_exec('pgrep dnsmasq')
        dns_list = dns_list.splitlines()

        if os.path.exists(self.dhcp_pxe_leases_file):
            os.remove(self.dhcp_pxe_leases_file)

        # delete any remnant dnsmasq processes
        for pid in dns_list:
            ns_name, stderr, rc = sub_proc_exec(
                'ip netns identify {}'.format(pid))
            if pxe_ns._get_name_sp_name() in ns_name:
                self.log.debug('Killing dnsmasq. pid {}'.format(pid))
                stdout, stderr, rc = sub_proc_exec('kill -15 ' + pid)

        cmd = (f'dnsmasq --dhcp-leasefile={self.dhcp_pxe_leases_file} '
               f'--interface={pxe_ns._get_name_sp_ifc_name()} '
               f'--dhcp-range={addr_st},{addr_end},{netmask},3600')
        stdout, stderr, rc = pxe_ns._exec_cmd(cmd)
        if rc != 0:
            self.log.warning(f'Error configuring dnsmasq. rc: {rc}')

        if os.path.exists(self.tcp_dump_file):
            os.remove(self.tcp_dump_file)

        tcpdump_list, stderr, rc = sub_proc_exec('pgrep tcpdump')
        tcpdump_list = tcpdump_list.splitlines()

        # delete any remnant tcpdump processes
        for pid in tcpdump_list:
            ns_name, stderr, rc = sub_proc_exec('ip netns identify ' + pid)
            if pxe_ns._get_name_sp_name() in ns_name:
                self.log.debug('Killing tcpdump. pid {}'.format(pid))
                stdout, stderr, rc = sub_proc_exec('kill -15 ' + pid)

        cmd = (f'sudo tcpdump -X -U -i {pxe_ns._get_name_sp_ifc_name()} '
               f'-w {self.tcp_dump_file} --immediate-mode  port 67')
        proc = pxe_ns._launch_cmd(cmd)
        if not isinstance(proc, object):
            self.log.error(
                f'Failure to launch process of tcpdump monitor {proc}')

        # Scan up to 25 times. Delay 10 seconds between scans
        # Allow infinite number of retries
        self.log.info('Scanning pxe network on 10 s intervals.')
        cnt = 0
        cnt_prev = 0
        cnt_down = 25
        mac_list = []
        dump = ''
        while cnt < pxe_cnt:
            print()
            cmd = 'sudo tcpdump -r {} -xx'.format(self.tcp_dump_file)
            for i in range(cnt_down):
                print('\r{} of {} nodes requesting PXE boot. Scan cnt: {} '.
                      format(cnt, pxe_cnt, cnt_down - i),
                      end="")
                sys.stdout.flush()
                time.sleep(10)
                # read the tcpdump file if size is not 0
                if os.path.exists(self.tcp_dump_file) and os.path.getsize(
                        self.tcp_dump_file):
                    dump, stderr, rc = sub_proc_exec(cmd)
                    if rc != 0:
                        self.log.warning(
                            f'Error reading tcpdump file. rc: {rc}')
                    if 'reading' not in stderr:
                        self.log.warning(
                            f'Failure reading tcpdump file - {stderr}')
                mac_list = self._get_macs(mac_list, dump)
                cnt = len(mac_list)
                if cnt > cnt_prev:
                    cnt_prev = cnt
                    # Pause briefly for in flight DHCP to complete and lease file to update
                    time.sleep(5)
                    self._build_port_table_pxe(mac_list)
                if cnt >= pxe_cnt:
                    foundall = True
                    print(
                        '\r{} of {} nodes requesting PXE boot. Scan count: {} '
                        .format(cnt, pxe_cnt, cnt_down - i),
                        end="")
                    break
            self.log.debug('Table of found PXE ports: {}'.format(
                self.node_table_pxe))
            for switch in self.node_table_pxe:
                print('\n\nSwitch: {}'.format(switch))
                print(
                    tabulate(self.node_table_pxe[switch],
                             headers=('port', 'MAC address', 'IP address')))
                print()

            if cnt >= pxe_cnt:
                break
            print('\n\nPress Enter to continue scanning for cluster nodes.')
            print(
                "Or enter 'C' to continue cluster deployment with a subset of nodes"
            )
            print("Or enter 'R' to cycle power to missing nodes")
            resp = input("Or enter 'T' to terminate Power-Up ")
            if resp == 'T':
                resp = input("Enter 'y' to confirm ")
                if resp == 'y':
                    self.log.info("'{}' entered. Terminating Power-Up at user"
                                  " request".format(resp))
                    self._teardown_ns(self.ipmi_ns)
                    self._teardown_ns(pxe_ns)
                    sys.exit(1)
            elif resp == 'R':
                self._reset_unfound_nodes()
            elif resp == 'C':
                print('\nNot all nodes have been discovered')
                resp = input("Enter 'y' to confirm continuation of"
                             " deployment without all nodes ")
                if resp == 'y':
                    self.log.info(
                        "'{}' entered. Continuing Power-Up".format(resp))
                    break
        if cnt < pxe_cnt:
            self.log.warning('Failed to validate expected number of nodes')

        self._teardown_ns(pxe_ns)

        # Cycle power on all discovered nodes if bootdev set to 'network'
        if bootdev == 'network':
            self.log.debug('\nCycling power to discovered nodes.\n')
            set_power_clients('off', clients=self.bmc_ai)

            set_power_clients('on', clients=self.bmc_ai)

            set_bootdev_clients('network', clients=self.bmc_ai)

        self._teardown_ns(self.ipmi_ns)

        # Reset BMCs to insure they acquire a new address from container
        # during inv_add_ports. Avoids conflicting addresses during redeploy
        self._reset_existing_bmcs(self.node_list, self._get_cred_list())

        self.log.info('Cluster nodes validation complete')
        if not foundall:
            raise UserException('Not all node PXE ports validated')

    def _reset_unfound_nodes(self):
        """ Power cycle the nodes who's PXE ports are not requesting pxe boot.
        """
        ipmi_missing_list_ai = {}
        for label in self.cfg.yield_sw_mgmt_label():
            pxe_ports = self._get_pxe_ports(label)
            ipmi_ports = self._get_ipmi_ports(label)
            for node in self.node_table_ipmi[label]:
                if node[0] in ipmi_ports:
                    idx = ipmi_ports.index(node[0])
                    if label not in self.node_table_pxe or not self._is_port_in_table(
                            self.node_table_pxe[label], pxe_ports[idx]):
                        ipmi_missing_list_ai[node[2]] = self.bmc_ai[node[2]]
        self.log.debug(
            f'Cycling power to missing nodes list: {ipmi_missing_list_ai}')

        print('Cycling power to non responding nodes:')
        for node in ipmi_missing_list_ai:
            print(node)
        t1 = time.time()
        set_power_clients('off', clients=ipmi_missing_list_ai)

        while time.time() < t1 + 10:
            time.sleep(0.5)

        set_bootdev_clients('network', clients=ipmi_missing_list_ai)
        set_power_clients('on', clients=ipmi_missing_list_ai)

    def _is_port_in_table(self, table, port):
        for node in table:
            if port == node[0]:
                self.log.debug('Table port: {} port: {}'.format(node[0], port))
                return True
        return False

    def _is_val_in_table(self, table, val):
        for item in table:
            if val == item[0] or val == item[1]:
                self.log.debug('Found in table: {} item: {}'.format(val, item))
                return True
        return False

    def _get_network(self, type_):
        """Returns details of a Power-Up network.
        Args:
            type_ (str): Either 'pxe' or 'ipmi'
        Returns:
            network_addr: (str) ipv4 addr
            bridge_ipaddr: (str) ipv4 addr
            netprefix: (str)
            vlan: (str)
        """
        types = self.cfg.get_depl_netw_client_type()
        bridge_ipaddr = self.cfg.get_depl_netw_client_brg_ip()
        vlan = self.cfg.get_depl_netw_client_vlan()
        netprefix = self.cfg.get_depl_netw_client_prefix()
        idx = types.index(type_)

        network = IPNetwork(bridge_ipaddr[idx] + '/' + str(netprefix[idx]))
        network_addr = str(network.network)
        return network_addr, bridge_ipaddr[idx], netprefix[idx], vlan[idx]

    def validate_data_switches(self):
        self.log.info('Verifying data switches')

        sw_cnt = self.cfg.get_sw_data_cnt()
        self.log.debug(
            'Number of data switches defined in config file: {}'.format(
                sw_cnt))

        for index, switch_label in enumerate(self.cfg.yield_sw_data_label()):
            print('.', end="")
            sys.stdout.flush()
            label = self.cfg.get_sw_data_label(index)
            self.log.debug('switch_label: {}'.format(switch_label))

            switch_class = self.cfg.get_sw_data_class(index)
            if not switch_class:
                self.log.error('No switch class found')
                return False
            userid = None
            password = None
            rc = True

            try:
                userid = self.cfg.get_sw_data_userid(index)
            except AttributeError:
                self.log.info('Passive switch mode specified')
                return True

            try:
                password = self.cfg.get_sw_data_password(index)
            except AttributeError:
                try:
                    self.cfg.get_sw_data_ssh_key(index)
                except AttributeError:
                    return True
                else:
                    self.log.error(
                        'Switch authentication via ssh keys not yet supported')
                    return False
            # Verify communication on each defined interface
            for ip in self.cfg.yield_sw_data_interfaces_ip(index):
                self.log.debug('Verifying switch communication on ip'
                               ' address: {}'.format(ip))
                sw = SwitchFactory.factory(switch_class, ip, userid, password,
                                           'active')
                if sw.is_pingable():
                    self.log.debug(
                        'Successfully pinged data switch \"%s\" at %s' %
                        (label, ip))
                else:
                    self.log.warning(
                        'Failed to ping data switch \"%s\" at %s' %
                        (label, ip))
                    rc = False
                try:
                    vlans = sw.show_vlans()
                    if vlans and len(vlans) > 1:
                        self.log.debug(
                            'Successfully communicated with data switch \"%s\"'
                            ' at %s' % (label, ip))
                    else:
                        self.log.warning(
                            'Failed to communicate with data switch \"%s\"'
                            'at %s' % (label, ip))
                        rc = False
                except (SwitchException, SSH_Exception):
                    self.log.error('Failed communicating with data switch'
                                   ' at address {}'.format(ip))
                    rc = False
        print()
        if rc:
            self.log.debug(' OK - All data switches verified')
        else:
            raise UserException('Failed verification of data switches')

    def validate_mgmt_switches(self):
        self.log.info('Verifying management switches')

        sw_cnt = self.cfg.get_sw_mgmt_cnt()
        self.log.debug(
            'Number of management switches defined in config file: {}'.format(
                sw_cnt))

        for index, switch_label in enumerate(self.cfg.yield_sw_mgmt_label()):
            print('.', end="")
            sys.stdout.flush()
            label = self.cfg.get_sw_mgmt_label(index)
            self.log.debug('switch_label: {}'.format(switch_label))

            switch_class = self.cfg.get_sw_mgmt_class(index)
            if not switch_class:
                self.log.error('No switch class found')
                return False
            userid = None
            password = None
            rc = True

            try:
                userid = self.cfg.get_sw_mgmt_userid(index)
            except AttributeError:
                self.log.debug('Passive switch mode specified')
                return rc

            try:
                password = self.cfg.get_sw_mgmt_password(index)
            except AttributeError:
                try:
                    self.cfg.get_sw_mgmt_ssh_key(index)
                except AttributeError:
                    return rc
                else:
                    self.log.error(
                        'Switch authentication via ssh keys not yet supported')
                    rc = False
            # Verify communication on each defined interface
            for ip in self.cfg.yield_sw_mgmt_interfaces_ip(index):
                self.log.debug('Verifying switch communication on ip address:'
                               ' {}'.format(ip))
                sw = SwitchFactory.factory(switch_class, ip, userid, password,
                                           'active')
                if sw.is_pingable():
                    self.log.debug(
                        'Successfully pinged management switch \"%s\" at %s' %
                        (label, ip))
                else:
                    self.log.warning(
                        'Failed to ping management switch \"%s\" at %s' %
                        (label, ip))
                    rc = False
                try:
                    vlans = sw.show_vlans()
                except (SwitchException, SSH_Exception):
                    self.log.error(
                        'Failed communicating with management switch'
                        ' at address {}'.format(ip))
                    rc = False

                if vlans and len(vlans) > 1:
                    self.log.debug(
                        'Successfully communicated with management switch \"%s\"'
                        ' at %s' % (label, ip))
                else:
                    self.log.warning(
                        'Failed to communicate with data switch \"%s\"'
                        'at %s' % (label, ip))
                    rc = False
        print()
        if rc:
            self.log.debug(' OK - All management switches verified')
        else:
            raise UserCriticalException(
                'Failed verification of management switches')
Example #8
0
class BaseProvider(abc.ABC):
    """
    Abstract base class from which providers should inherit from
    """

    SESSION_CLASS = StreamSession
    ITEM_CLASS = model.MediaItem
    # VIEW_CLASS = SimpleProviderView
    FILTERS = AttrDict()
    ATTRIBUTES = AttrDict(title={"width": ("weight", 1)})
    MEDIA_TYPES = None

    def __init__(self, *args, **kwargs):
        self._view = None
        self._session = None
        self._active = False
        self._filters = AttrDict(
            {n: f(provider=self, name=n)
             for n, f in self.FILTERS.items()})

        rules = AttrDict(self.config.rules.label,
                         **config.settings.profile.rules.label)

        labels = AttrDict(self.config.labels, **config.settings.profile.labels)

        self.rule_map = AttrDict([(re.compile(k, re.IGNORECASE), v)
                                  for k, v in [(r, rules[r])
                                               for r in rules.keys()]])

        self.highlight_map = AttrDict([(re.compile(k,
                                                   re.IGNORECASE), labels[v])
                                       for k, v in rules.items()])

        self.highlight_re = re.compile(
            "(" + "|".join([k.pattern
                            for k in self.highlight_map.keys()]) + ")",
            re.IGNORECASE)

    def init_config(self):
        pass

    @property
    def LISTING_CLASS(self):
        for cls in [self.__class__] + list(self.__class__.__bases__):
            pkg = sys.modules.get(cls.__module__)
            pkgname = pkg.__name__.split(".")[-1]
            try:
                return next(
                    v for k, v in pkg.__dict__.items()
                    if pkgname in k.lower() and k.endswith("MediaListing"))
            except StopIteration:
                continue
        return model.MediaListing

    @property
    def MEDIA_SOURCE_CLASS(self):
        for cls in [self.__class__] + list(self.__class__.mro()):
            pkg = sys.modules.get(cls.__module__)
            pkgname = pkg.__name__.split(".")[-1]
            try:
                return next(
                    v for k, v in pkg.__dict__.items()
                    if pkgname in k.lower() and k.endswith("MediaSource"))
            except (StopIteration, AttributeError):
                continue
        return model.MediaSource

    @property
    def session_params(self):
        return {"proxies": config.settings.profile.get("proxies")}

    @property
    def session(self):
        if self._session is None:
            session_params = self.session_params
            self._session = self.SESSION_CLASS.new(self.IDENTIFIER,
                                                   **session_params)
        return self._session

    @property
    def gui(self):
        return self._view is not None

    @property
    def filters(self):
        return self._filters

    @property
    def view(self):
        if not self._view:
            self._view = self.make_view()
            self._view.update()
        return self._view

    @property
    def is_active(self):
        return self._active

    def activate(self):
        if self.is_active:
            return
        self._active = True
        self.on_activate()

    def deactivate(self):
        if not self.is_active:
            return
        self.on_deactivate()
        self._active = False

    def on_activate(self):
        pass

    def on_deactivate(self):
        pass

    @abc.abstractmethod
    def make_view(self):
        pass

    @classproperty
    def IDENTIFIER(cls):
        return next(c.__module__ for c in cls.__mro__
                    if __package__ in c.__module__).split(".")[-1]

    @classproperty
    @abc.abstractmethod
    def NAME(cls):
        return cls.__name__.replace("Provider", "")

    @property
    def FILTERS_BROWSE(self):
        return AttrDict()

    @property
    def FILTERS_OPTIONS(self):
        return AttrDict()

    @property
    def FILTERS(self):
        d = getattr(self, "FILTERS_BROWSE", AttrDict())
        d.update(getattr(self, "FILTERS_OPTIONS", {}))
        return d

    def parse_identifier(self, identifier):
        return

    def new_media_source(self, *args, **kwargs):
        return self.MEDIA_SOURCE_CLASS(self.IDENTIFIER, *args, **kwargs)

    def new_listing(self, **kwargs):
        return self.LISTING_CLASS(self.IDENTIFIER, **kwargs)

    @abc.abstractmethod
    def listings(self, filters=None):
        pass

    def should_download(self, listing):
        return listing.label in (list(self.config.rules) +
                                 list(config.settings.profile.rules.download))

    def on_new_listing(self, listing):
        try:
            label = next(l for r, l in self.rule_map.items()
                         if r.search(listing.title))
            listing.label = label
            if self.should_download(listing):
                self.download(listing)

        except StopIteration:
            pass

    @property
    def config(self):
        return config.ConfigTree(
            config.settings.profile.providers.get(self.IDENTIFIER, {}))

    @property
    def config_is_valid(self):
        def check_config(required, cfg):
            if isinstance(required, dict):
                for k, v in required.items():
                    if not k in cfg:
                        return False
                    if not check_config(required[k], cfg[k]):
                        return False
            else:
                for k in required:
                    if not k in cfg:
                        return False
            return True

        # return all([ self.config.get(x, None) is not None
        # for x in getattr(self, "REQUIRED_CONFIG", [])
        return check_config(getattr(self, "REQUIRED_CONFIG", []), self.config)

    def parse_options(self, options):
        if not options:
            return AttrDict()
        return AttrDict(
            [(list(self.FILTERS_OPTIONS.keys())[n], v) for n, v in enumerate(
                [o for o in options.split(",") if "=" not in o])],
            **dict(o.split("=") for o in options.split(",") if "=" in o))

    def get_source(self, selection, **kwargs):
        source = selection.content
        if not isinstance(source, list):
            source = [source]
        return source

    def play_args(self, selection, **kwargs):
        source = self.get_source(selection, **kwargs)
        kwargs = {
            k: v
            for k, v in list(kwargs.items()) +
            [(f, self.filters[f].value)
             for f in self.filters if f not in kwargs]
        }
        return (source, kwargs)

    def play(self, selection, no_task_manager=False, **kwargs):

        try:
            sources, kwargs = self.play_args(selection, **kwargs)
        except SGStreamNotFound as e:
            logger.error(f"stream not found: {e}")
            return
        # media_type = kwargs.pop("media_type", None)

        # FIXME: For now, we just throw playlists of media items at the default
        # player program and hope it can handle all of them.

        player_spec = None
        helper_spec = None

        if not isinstance(sources, list):
            sources = [sources]

        for s in sources:
            if not s.media_type:
                # Try to set the content types of the source(s) with a HTTP HEAD
                # request if the provider didn't specify one.
                s.media_type = self.session.head(
                    s.locator).headers.get("Content-Type").split("/")[0]

        media_types = set([s.media_type for s in sources if s.media_type])
        player_spec = {"media_types": media_types}
        if media_types == {"image"}:
            helper_spec = {None: None}
        else:
            helper_spec = getattr(self.config, "helpers",
                                  None) or sources[0].helper

        task = model.PlayMediaTask(provider=self.NAME,
                                   title=selection.title,
                                   sources=sources)

        if not (no_task_manager or state.options.debug_console):
            return state.task_manager.play(task, player_spec, helper_spec,
                                           **kwargs)
        else:
            return state.asyncio_loop.create_task(
                Player.play(task, player_spec, helper_spec, **kwargs))

    def download(self, selection, no_task_manager=False, **kwargs):

        sources, kwargs = self.play_args(selection, **kwargs)
        # filename = selection.download_filename

        if not isinstance(sources, list):
            sources = [sources]

        for i, s in enumerate(sources):
            # filename = s.download_filename
            # kwargs = {"ext": getattr(s, "ext", None)}
            if len(sources):
                kwargs["index"] = i
            try:
                filename = selection.download_filename(**kwargs)
            except SGInvalidFilenameTemplate as e:
                logger.warn(
                    f"filename template for provider {self.IDENTIFIER} is invalid: {e}"
                )
            helper_spec = getattr(self.config, "helpers") or s.download_helper
            # logger.info(f"helper: {helper_spec}")

            task = model.DownloadMediaTask(provider=self.NAME,
                                           title=selection.title,
                                           sources=[s],
                                           dest=filename)

            # s = AttrDict(dataclasses.asdict(s))
            # s.provider = self.NAME
            # s.title = selection.title
            # s.dest = filename

            if not (no_task_manager or state.options.debug_console):
                return state.task_manager.download(task, filename, helper_spec,
                                                   **kwargs)
            else:
                return state.asyncio_loop.create_task(
                    Downloader.download(task, filename, helper_spec, **kwargs))

    def on_select(self, widget, selection):
        self.play(selection)

    @property
    def limit(self):
        return None

    def refresh(self):
        self.view.refresh()

    def reset(self):
        self.view.reset()

    def __str__(self):
        return self.NAME

    def __repr__(self):
        return f"<{type(self)}: {self.NAME}>"
Example #9
0
    def get_ports(self):
        dhcp_leases = GetDhcpLeases(self.dhcp_leases_file)
        dhcp_mac_ip = dhcp_leases.get_mac_ip()
        self.log.debug('DHCP leases: {}'.format(dhcp_mac_ip))

        mgmt_sw_cfg_mac_lists = AttrDict()

        if self.cfg.is_passive_mgmt_switches():
            self.log.debug('passive mode')
            for switch_label in self.cfg.yield_sw_mgmt_label():
                file_path = os.path.join(GEN_PASSIVE_PATH, switch_label)
                mac_info = {}
                try:
                    with open(file_path, 'r') as f:
                        mac_info = f.read()

                except IOError as error:
                    self.log.error(
                        'Passive switch MAC address table file not found {}'
                        .format(error))
                    raise
                mgmt_sw_cfg_mac_lists[switch_label] = \
                    SwitchCommon.get_port_to_mac(mac_info, self.log)
        else:
            for switch in self.sw_dict:
                self.log.debug('Switch: {}'.format(switch))
                mgmt_sw_cfg_mac_lists[switch] = \
                    self.sw_dict[switch].show_mac_address_table(format='std')

        self.log.debug('Management switches MAC address tables: {}'.format(
            mgmt_sw_cfg_mac_lists))

        # Remove all the mac address table entries which do not have a matching
        # MAC address in the DHCP leases table, then remove any MAC addresses
        # which do not have a  DHCP table entry.
        for switch in mgmt_sw_cfg_mac_lists.keys():
            for port in list(mgmt_sw_cfg_mac_lists[switch].keys()):
                port_macs = mgmt_sw_cfg_mac_lists[switch][port]
                found_mac = False
                for mac in dhcp_mac_ip.keys():
                    if mac in port_macs:
                        found_mac = True
                        # keep only the mac which has a dhcp address
                        mgmt_sw_cfg_mac_lists[switch][port] = [mac]
                if not found_mac:
                    del mgmt_sw_cfg_mac_lists[switch][port]
        self.log.debug('Management switches MAC address table of ports with'
                       'dhcp leases: {}'.format(mgmt_sw_cfg_mac_lists))

        if self.port_type == "ipmi":
            self.inv.add_macs_ipmi(mgmt_sw_cfg_mac_lists)
            self.inv.add_ipaddrs_ipmi(dhcp_mac_ip)
        elif self.port_type == "pxe":
            self.inv.add_macs_pxe(mgmt_sw_cfg_mac_lists)
            self.inv.add_ipaddrs_pxe(dhcp_mac_ip)

        if self.port_type == 'ipmi':
            self.node_table, self.ports_found, self.ports_total = \
                self._build_node_table_ipmi(self.cfg, dhcp_mac_ip, mgmt_sw_cfg_mac_lists)

        if self.port_type == 'pxe':
            self.node_table, self.ports_found, self.ports_total = \
                self._build_node_table_pxe(self.cfg, dhcp_mac_ip, mgmt_sw_cfg_mac_lists)
Example #10
0
def loadConfig(mode):
    """
    Build config objects for application launch instance(appConfig) and model arguments.
    Configuration is built by merging:
        Options specified at the command line(highest priority).
        Domain specific defaults specified in the domain directory(next priority).
        Global defaults defined above.

    Returns:
        Tuple of appConfig and modelArgs.
    """
    def __basic_arguments_parser(add_help):
        """
        Creates a command line parser with a minimal set of folder level arguments.
        """
        parser = argparse.ArgumentParser(add_help=add_help)

        # Basic load/store settings.
        parser.add_argument('--domain',
                            action='store',
                            dest='domain',
                            default="toy2",
                            help='The app domain to use.')
        parser.add_argument(
            '--inputs_root_dir',
            action='store',
            dest='inputs_root_dir',
            default="data/inputs/",
            help=
            'Path to folder containing dev, train and test input data folders.'
        )
        if mode in [AppMode.Train, AppMode.Evaluate]:
            parser.add_argument('--training_dir',
                                action='store',
                                dest='training_dir',
                                default='./data/training/'
                                if mode != AppMode.Test else "./data/testing/",
                                help='Path to experiment directory.')
            parser.add_argument(
                "--run",
                action='store',
                dest='run',
                default=None,
                type=int,
                help="Index of the run that should be operated upon.")

            parser.add_argument("--schemaVersion",
                                type=int,
                                default=None,
                                help="Maximum number of nodes in an XML file.")

            parser.add_argument(
                '--resume',
                default=False if mode == AppMode.Test else None,
                type=str2bool3,
                help=
                'Indicates if training has to be resumed from the latest checkpoint'
            )

        # Testing data folders.
        if mode == AppMode.Evaluate:
            parser.add_argument(
                '--test_dataset',
                default="test",
                type=str,
                help='Dataset(test/dev/train) to use for evaluation.')

        return parser

    # Some config defaults depend on the appConfig. So, peeking into appConfig, before configuring the rest
    basicAppConfig, _ = __basic_arguments_parser(False).parse_known_args()
    postProcessAppConfig(basicAppConfig, mode)

    # Get domain defaults and merge them.
    domainModule = import_module("domains." + basicAppConfig.domain)
    appConfigDefaults = AttrDict({
        **appConfigGlobalDefaults,
        **domainModule.appConfigDefaults
    })
    if mode == AppMode.Generate:
        domainGeneratorModule = import_module("domains." +
                                              basicAppConfig.domain +
                                              ".generate")
        generatorArgsDefaults = AttrDict({
            **generatorArgsGlobalDefaults,
            **domainGeneratorModule.generatorArgsDefaults
        })
    else:
        modelArgsDefaults = AttrDict({
            **modelArgsGlobalDefaults,
            **domainModule.modelArgsDefaults
        })

    # Create the parser which parses basic arguments and will also parse the entire kitchen sink, below.
    parser = __basic_arguments_parser(True)

    # Testing multiple times.
    if mode == AppMode.Test:
        parser.add_argument("--repetitionCount",
                            type=int,
                            default=5,
                            help="Number of times to repeat test.")

    if mode in [AppMode.Train]:
        # Domain customizable load/store settings.
        parser.add_argument(
            "--checkpoint_every",
            type=int,
            default=appConfigDefaults.checkpoint_every,
            help="Number of epochs after which we take a checkpoint.")
        parser.add_argument(
            "--print_every",
            type=int,
            default=10,
            help="Print progress information, after every so many batches.")

    if mode in [AppMode.Train, AppMode.Evaluate]:
        parser.add_argument(
            "--input_select_percent",
            type=float,
            default=None,
            help=
            "Percentage of inputs actually to be selected for training. This helps in training"
            + " with smaller dataset that what all is available.")

    # Randomizaion settings.
    parser.add_argument(
        '--random_seed',
        dest='random_seed',
        type=float,
        default=None,
        help='Random seed to use before start of the training.')

    # Various logging and debug settings.
    parser.add_argument('--log_level',
                        dest='log_level',
                        default='info',
                        help='Logging level.')
    parser.add_argument(
        "--tensorboard",
        type=int,
        default=0,
        help="Frequency of logging data into tensorboard. Set to 0 to disable."
    )
    parser.add_argument(
        "--profile",
        type=str2bool,
        default=False,
        help="Set to true to enable profiling info printing mode.")
    if mode in [AppMode.Train, AppMode.Evaluate]:
        parser.add_argument(
            "--runtests",
            type=str2bool,
            default=False,
            help="Set to true to enable unit testing of components.")
        parser.add_argument(
            "--debugAttention",
            dest="attention",
            type=str2bool,
            default=False,
            help="Debug attention by loggnig it into tensorboard.")

    parser.add_argument("--batch_size",
                        type=int,
                        default=1000,
                        help="Batch size for training.")

    if mode in [AppMode.Train, AppMode.Evaluate]:
        # Build args needed during training.
        parser.add_argument(
            "--epochs",
            type=int,
            default=random.randint(1, 10) if mode == AppMode.Test else 400,
            help="Number of epochs to train for.")

        # parser.add_argument("--num_samples", type = int, default = None,
        #                    help="Number of samples to train on.")

    if mode in [AppMode.Train, AppMode.Evaluate, AppMode.Test]:
        # XML Schema params.
        parser.add_argument("--max_node_count",
                            type=int,
                            default=modelArgsDefaults.max_node_count,
                            help="Maximum number of nodes in an XML file.")
        parser.add_argument(
            "--total_attrs_count",
            type=int,
            default=modelArgsDefaults.total_attrs_count,
            help="Total number of known attributes in the schema.")
        parser.add_argument(
            "--value_symbols_count",
            type=int,
            default=modelArgsDefaults.value_symbols_count,
            help="Total number of symbols used in attribute value strings.")
        parser.add_argument("--max_node_fanout",
                            type=int,
                            default=modelArgsDefaults.max_node_fanout,
                            help="Maximum connectivity fanout of an XML node.")
        parser.add_argument("--max_node_text_len",
                            type=int,
                            default=modelArgsDefaults.max_node_text_len,
                            help="Maximum length of text attribute in a node.")
        parser.add_argument(
            "--max_attrib_value_len",
            type=int,
            default=modelArgsDefaults.max_attrib_value_len,
            help="Maximum length of any text attribute value in a node.")
        parser.add_argument("--max_output_len",
                            type=int,
                            default=modelArgsDefaults.max_output_len,
                            help="Maximum length of the output file.")

        # Size meta-parameters of the generated neural network.
        parser.add_argument("--node_text_vec_len",
                            type=int,
                            default=modelArgsDefaults.node_text_vec_len,
                            help="Length of encoded vector for node text.")
        parser.add_argument("--attrib_value_vec_len",
                            type=int,
                            default=modelArgsDefaults.attrib_value_vec_len,
                            help="Length of an encoded attribute value.")
        parser.add_argument(
            "--node_info_propagator_stack_depth",
            type=int,
            default=modelArgsDefaults.node_info_propagator_stack_depth,
            help=
            "Depth of the graph layer stack. This determines the number of " +
            "hops that information would propagate in the graph inside nodeInfoPropagator."
        )
        parser.add_argument(
            "--propagated_info_len",
            type=int,
            default=modelArgsDefaults.propagated_info_len,
            help="Length of node information vector, when being propagated.")
        parser.add_argument(
            "--output_decoder_stack_depth",
            type=int,
            default=modelArgsDefaults.output_decoder_stack_depth,
            help="Stack depth of node decoder.")
        parser.add_argument(
            "--output_decoder_state_width",
            type=int,
            default=modelArgsDefaults.output_decoder_state_width,
            help="Width of GRU cell in output decoder.")
        parser.add_argument(
            "--attentionSubspaceVecLen",
            type=int,
            default=modelArgsDefaults.attentionSubspaceVecLen,
            help=
            "Vec length of subspace of attnReadyVecs and decoder hidden state"
            + "that is used to comptue attention factors.")

        # Other meta-parameters for training the neural network.
        parser.add_argument("--input_dropout_p",
                            type=float,
                            default=None if basicAppConfig.resume else
                            modelArgsDefaults.input_dropout_p,
                            help="Input dropout probability.")
        parser.add_argument("--dropout_p",
                            type=float,
                            default=None if basicAppConfig.resume else
                            modelArgsDefaults.dropout_p,
                            help="Dropout probability.")
        parser.add_argument(
            "--use_attention",
            type=int,
            default=modelArgsDefaults.use_attention,
            help="Use attention while selcting most appropriate.")
        parser.add_argument(
            "--teacher_forcing_ratio",
            type=int,
            default=modelArgsDefaults.teacher_forcing_ratio,
            help="Teacher forcing ratio to using during decoder training.")
        parser.add_argument("--learning_rate",
                            type=float,
                            default=modelArgsDefaults.learning_rate,
                            help="Learning rate to use during training.")
        parser.add_argument('--clip_gradient',
                            type=float,
                            default=modelArgsDefaults.clip_gradient,
                            help='gradient clipping')
        parser.add_argument(
            "--disable_batch_norm",
            type=str2bool,
            default=modelArgsDefaults.disable_batch_norm,
            help="Disable batch norm. Needed for running some tests.")
        parser.add_argument(
            "--enableSpotlight",
            type=str2bool,
            default=modelArgsDefaults.enableSpotlight,
            help="Whether to enable spotlight, which is designed to optimize" +
            " search.")
        parser.add_argument(
            "--spotlightThreshold",
            type=float,
            default=modelArgsDefaults.spotlightThreshold,
            help="Threshold used to identify encoder positions to be considered"
            + "for evaluation.")
        parser.add_argument(
            "--useSrcPtr",
            type=str2bool,
            default=modelArgsDefaults.useSrcPtr,
            help=
            "When set to true, the model is enabled to directly copy symbols from source."
        )

    if mode == AppMode.Evaluate:
        parser.add_argument(
            "--beam_count",
            type=int,
            default=None,
            help=
            "Number of beams to use when decoding. Leave as None for not using beam decoding."
        )

    if mode == AppMode.Generate:
        parser.add_argument('--count',
                            help="Total number of data entries to generate",
                            default=generatorArgsDefaults.count)
        parser = domainGeneratorModule.addArguments(parser,
                                                    generatorArgsDefaults)

    # Parse args to build app config dictionary.
    appConfig = parser.parse_args()

    # Apply random seed.
    if appConfig.random_seed is not None:
        random.seed(appConfig.random_seed)
        torch.manual_seed(appConfig.random_seed)

    appConfig.mode = int(mode)

    if mode == AppMode.Generate:
        domainGeneratorModule.postProcessArguments(appConfig)
        generatorArgs = levelDown(appConfig, "generatorArgs",
                                  generatorArgsDefaults.keys())
        return appConfig, generatorArgs
    else:
        # Post process app config.
        postProcessAppConfig(appConfig, mode)

        # Spin out model arguments from app configuration.
        modelArgs = levelDown(appConfig, "modelArgs", modelArgsDefaults.keys())

        return appConfig, modelArgs
Example #11
0
class Dropdown(urwid.PopUpLauncher):
    # Based in part on SelectOne widget from
    # https://github.com/tuffy/python-audio-tools

    signals = ["change"]

    label = None
    empty_label = u"\N{EMPTY SET}"
    margin = 0

    def __init__(
        self,
        items=None,
        label=None,
        default=None,
        border=False,
        scrollbar=False,
        margin=None,
        left_chars=None,
        right_chars=None,
        left_chars_top=None,
        right_chars_top=None,
        auto_complete=False,
        max_height=10,
        # keymap = {}
    ):

        if items is not None:
            self._items = items
        if label is not None:
            self.label = label
        self.default = default

        self.border = border
        self.scrollbar = scrollbar
        self.auto_complete = auto_complete
        # self.keymap = keymap

        if margin:
            self.margin = margin

        if isinstance(self.items, list):
            if len(self.items):
                if isinstance(self.items[0], tuple):
                    self._items = AttrDict(self.items)
                else:
                    logger.debug(self.items)
                    self._items = AttrDict(
                        ((item, n) for n, item in enumerate(self.items)))
            else:
                self._items = AttrDict()
        else:
            self._items = self.items

        self.button = DropdownItem(
            u"",
            None,
            margin=self.margin,
            left_chars=left_chars_top if left_chars_top else left_chars,
            right_chars=right_chars_top if right_chars_top else right_chars)

        self.pop_up = DropdownDialog(
            self,
            self._items,
            self.default,
            label=self.label,
            border=self.border,
            margin=self.margin,
            left_chars=left_chars,
            right_chars=right_chars,
            auto_complete=self.auto_complete,
            scrollbar=scrollbar,
            max_height=max_height,
            # keymap = self.KEYMAP
        )

        urwid.connect_signal(self.pop_up, "select",
                             lambda souce, selection: self.select(selection))

        urwid.connect_signal(self.pop_up, "close",
                             lambda button: self.close_pop_up())

        if self.default is not None:
            try:
                if isinstance(self.default, str):
                    self.select_label(self.default)
                else:
                    raise StopIteration
            except StopIteration:
                try:
                    self.select_value(self.default)
                except StopIteration:
                    self.focus_position = 0

        if len(self):
            self.select(self.selection)
        else:
            self.button.set_label(("dropdown_text", self.empty_label))

        cols = [(self.button_width, self.button)]

        if self.label:
            cols[0:0] = [
                ("pack", urwid.Text([("dropdown_label", "%s: " % (self.label))
                                     ])),
            ]
        self.columns = urwid.Columns(cols, dividechars=0)

        w = self.columns
        if self.border:
            w = urwid.LineBox(self.columns)
        w = urwid.Padding(w, width=self.width)

        super(Dropdown, self).__init__(w)
        urwid.connect_signal(self.button, 'click',
                             lambda button: self.open_pop_up())

    @classmethod
    def get_palette_entries(cls):
        return {
            "dropdown_text":
            PaletteEntry(
                foreground="light gray",
                background="dark blue",
                foreground_high="light gray",
                background_high="#003",
            ),
            "dropdown_focused":
            PaletteEntry(
                foreground="white",
                background="light blue",
                foreground_high="white",
                background_high="#009",
            ),
            "dropdown_highlight":
            PaletteEntry(
                foreground="yellow",
                background="light blue",
                foreground_high="yellow",
                background_high="#009",
            ),
            "dropdown_label":
            PaletteEntry(foreground="white", background="black"),
            "dropdown_prompt":
            PaletteEntry(foreground="light blue", background="black")
        }

    @keymap_command()
    def complete_prefix(self):
        if not self.auto_complete:
            return
        self.open_pop_up()
        self.pop_up.complete_prefix()

    @keymap_command()
    def complete_substring(self):
        if not self.auto_complete:
            return
        self.open_pop_up()
        self.pop_up.complete_substring()

    def create_pop_up(self):
        # print("create")
        return self.pop_up

    @property
    def button_width(self):
        return self.pop_up.max_item_width + self.button.decoration_width

    @property
    def pop_up_width(self):
        w = self.button_width
        if self.border:
            w += 2
        return w

    @property
    def contents_width(self):
        # raise Exception(self.button.width)
        w = self.button_width
        if self.label:
            w += len(self.label) + 2
        return max(self.pop_up.width, w)

    @property
    def width(self):
        width = max(self.contents_width, self.pop_up.width)
        if self.border:
            width += 2
        return width

    @property
    def height(self):
        height = self.pop_up.height + 1
        return height

    def pack(self, size, focus=False):
        return (self.width, self.height)

    @property
    def page_size(self):
        return self.pop_up.height

    def open_pop_up(self):
        # print("open")
        super(Dropdown, self).open_pop_up()

    def close_pop_up(self):
        super(Dropdown, self).close_pop_up()

    def get_pop_up_parameters(self):
        return {
            'left': (len(self.label) + 2 if self.label else 0),
            'top': 0,
            'overlay_width': self.pop_up_width,
            'overlay_height': self.pop_up.height
        }

    @property
    def focus_position(self):
        return self.pop_up.focus_position

    @focus_position.setter
    def focus_position(self, pos):
        # self.select_index(pos)
        old_pos = self.focus_position
        self.pop_up.selected_button = self.pop_up.focus_position = pos
        self.select(self.selection)

    @property
    def items(self):
        return self._items

    @property
    def selection(self):
        return self.pop_up.selection

    def select_label(self, label, case_sensitive=False):

        old_value = self.value

        def f(x):
            return x

        if not case_sensitive:

            def f(x):
                return x.lower()

        index = next(
            itertools.dropwhile(lambda x: f(x[1]) != f(label),
                                enumerate((self._items.keys()))))[0]
        self.focus_position = index

    @property
    def items(self):
        return self._items

    @property
    def selection(self):
        return self.pop_up.selection

    def select_label(self, label, case_sensitive=False):

        old_value = self.value

        def f(x):
            return x

        if not case_sensitive:

            def f(x):
                return x.lower()

        index = next(
            itertools.dropwhile(lambda x: f(x[1]) != f(label),
                                enumerate((self._items.keys()))))[0]
        self.focus_position = index

    def select_value(self, value):

        index = next(
            itertools.dropwhile(lambda x: x[1] != value,
                                enumerate((self._items.values()))))[0]
        self.focus_position = index

    @property
    def labels(self):
        return self._items.keys()

    @property
    def values(self):
        return self._items.values()

    @property
    def selected_label(self):
        return self.selection.label

    @selected_label.setter
    def selected_label(self, label):
        return self.select_label(label)

    @property
    def selected_value(self):
        if not self.selection:
            return None
        return self.selection.value

    @selected_value.setter
    def selected_value(self, value):
        return self.select_value(value)

    @property
    def value(self):
        return self.selected_value

    @value.setter
    def value(self, value):
        old_value = self.value

        # try to set by value.  if not found, try to set by label
        try:
            self.selected_value = value
        except StopIteration:
            self.selected_label = value

    def cycle_prev(self):
        self.cycle(-1)

    @keymap_command("cycle")
    def cycle(self, n):
        pos = self.focus_position + n
        if pos > len(self) - 1:
            pos = len(self) - 1
        elif pos < 0:
            pos = 0
        # self.focus_position = pos
        self.focus_position = pos

    def select(self, button):
        logger.debug("select: %s" % (button))
        self.button.set_label(("dropdown_text", button.label))
        self.pop_up.dropdown_buttons.listbox.set_focus_valign("top")
        # if old_pos != pos:
        self._emit("change", self.selected_label, self.selected_value)

    # def set_items(self, items, selected_value):
    #     self._items = items
    #     self.make_selection([label for (label, value) in items if
    #                          value is selected_value][0],
    #                         selected_value)
    def __len__(self):
        return len(self.items)
Example #12
0
class BaseProvider(abc.ABC, Observable):
    """
    Abstract base class from which providers should inherit from
    """

    SESSION_CLASS = StreamSession
    LISTING_CLASS = model.TitledMediaListing
    # VIEW_CLASS = SimpleProviderView
    # FILTERS = AttrDict()
    ATTRIBUTES = AttrDict(title={"width": ("weight", 1)})
    MEDIA_TYPES = None
    RPC_METHODS = []

    def __init__(self, *args, **kwargs):
        self._view = None
        self._session = None
        self._active = False
        self._filters = AttrDict(
            {n: f(provider=self, name=n)
             for n, f in self.FILTERS.items()})

        rules = AttrDict(self.config.rules.label or {},
                         **config.settings.profile.rules.label or {})

        labels = AttrDict(self.config.labels, **config.settings.profile.labels)

        self.rule_map = AttrDict([(re.compile(k, re.IGNORECASE), v)
                                  for k, v in [(r, rules[r])
                                               for r in rules.keys()]])

        self.highlight_map = AttrDict([(re.compile(k,
                                                   re.IGNORECASE), labels[v])
                                       for k, v in rules.items()])

        self.highlight_re = re.compile(
            "(" + "|".join([k.pattern
                            for k in self.highlight_map.keys()]) + ")",
            re.IGNORECASE)
        # print(self.filters)
        self.filters["search"].connect("changed", self.on_search_change)

    def init_config(self):
        with db_session:
            try:
                self.provider_data = model.ProviderData.get(
                    name=self.IDENTIFIER).settings
            except AttributeError:
                self.provider_data = model.ProviderData(
                    name=self.IDENTIFIER).settings

        for name, f in self.filters.items():
            value = self.default_filter_values.get(name, None)
            if value:
                try:
                    f.value = value
                except (ValueError, ):
                    # import ipdb; ipdb.set_trace()
                    pass

    @property
    def default_filter_values(self):
        return AttrDict()

    @db_session
    def save_provider_data(self):
        model.ProviderData.get(
            name=self.IDENTIFIER).settings = self.provider_data
        commit()

    @property
    def LISTING_CLASS(self):
        for cls in [self.__class__] + list(self.__class__.__bases__):
            pkg = sys.modules.get(cls.__module__)
            pkgname = pkg.__name__.split(".")[-1]
            try:
                return next(
                    v for k, v in pkg.__dict__.items()
                    if pkgname in k.lower() and k.endswith("MediaListing"))
            except StopIteration:
                continue
        return model.TitledMediaListing

    @property
    def MEDIA_SOURCE_CLASS(self):
        for cls in [self.__class__] + list(self.__class__.mro()):
            pkg = sys.modules.get(cls.__module__)
            pkgname = pkg.__name__.split(".")[-1]
            try:
                return next(
                    v for k, v in pkg.__dict__.items()
                    if pkgname in k.lower() and k.endswith("MediaSource"))
            except (StopIteration, AttributeError):
                continue
        return model.MediaSource

    @property
    def helper(self):
        return None

    @property
    def session_params(self):
        return {"proxies": config.settings.profile.get("proxies")}

    @property
    def PREVIEW_TYPES(self):
        return ["default"]

    @property
    def session(self):
        if self._session is None:
            session_params = self.session_params
            self._session = self.SESSION_CLASS.new(self.IDENTIFIER,
                                                   **session_params)
        return self._session

    @property
    def gui(self):
        return self._view is not None

    @property
    def filters(self):
        return self._filters

    @property
    def view(self):
        if not self._view:
            self._view = self.make_view()
            self._view.update()
        return self._view

    @property
    def toolbar(self):
        return self.view.toolbar

    @property
    def is_active(self):
        return self._active

    def activate(self):
        if self.is_active:
            return
        self._active = True
        self.on_activate()

    def deactivate(self):
        if not self.is_active:
            return
        self.on_deactivate()
        self._active = False

    def on_activate(self):
        self.reset()

    def on_deactivate(self):
        self.view.on_deactivate()

    @property
    def VIEW(self):
        return SimpleProviderView(self, ProviderDataTable(self))

    # @abc.abstractmethod
    def make_view(self):
        if not self.config_is_valid:
            return InvalidConfigView(self.NAME, self.REQUIRED_CONFIG)
        return self.VIEW

    @classproperty
    def IDENTIFIER(cls):
        return next(c.__module__ for c in cls.__mro__
                    if __package__ in c.__module__).split(".")[-1]

    @classproperty
    @abc.abstractmethod
    def NAME(cls):
        return cls.__name__.replace("Provider", "")

    @property
    def FILTERS_BROWSE(self):
        return AttrDict()

    @property
    def FILTERS_OPTIONS(self):
        return AttrDict([("search", TextFilter)])

    @property
    def FILTERS(self):
        d = getattr(self, "FILTERS_BROWSE", AttrDict())
        d.update(getattr(self, "FILTERS_OPTIONS", {}))
        return d

    def on_search_change(self, value, *args):

        if getattr(self, "search_task", False):
            self.search_task.cancel()

        async def apply_search_async():
            await asyncio.sleep(1)
            await self.apply_search_query(value)

        self.search_task = state.event_loop.create_task(apply_search_async())

    async def apply_search_query(self, query):
        self.view.apply_search_query(query)

    def parse_spec(self, spec):

        (identifier, options) = MEDIA_SPEC_RE.search(spec).groups()

        try:
            selection, filters, identifier_options = self.parse_identifier(
                identifier)
            self.apply_identifier(selection, filters, identifier_options)
        except SGIncompleteIdentifier:
            selection, identifier_options = None, {}

        options = AttrDict(identifier_options, **self.parse_options(options))
        self.apply_options(options)
        return (selection, options)

    def parse_identifier(self, identifier):
        return (None, identifier, {})

    def apply_identifier(self, selection, filters, options):

        if filters:
            selected_filters = zip(self.filters.keys(), filters)

            for f, value in selected_filters:
                if value is None or value in [
                        getattr(self.filters[f], "selected_label", None),
                        self.filters[f].value
                ]:
                    continue
                try:
                    self.filters[f].selected_label = value
                except ValueError:
                    self.filters[f].value = value

    def parse_options(self, options):
        if not options:
            options = ""

        d = AttrDict(
            [(list(self.FILTERS_OPTIONS.keys())[n], v) for n, v in enumerate(
                [o for o in options.split(",") if "=" not in o]) if v],
            **dict(o.split("=") for o in options.split(",") if "=" in o))
        return d

    def apply_options(self, options):

        for k, v in options.items():
            if v is None:
                continue
            if k in self.filters:
                logger.debug(f"option: {k}={v}")
                try:
                    if self.filters[k].value != v:
                        self.filters[k].value = v
                except StopIteration:
                    raise SGException("invalid value for %s: %s" % (k, v))

    def new_media_source(self, *args, **kwargs):
        return self.MEDIA_SOURCE_CLASS.attr_class(provider_id=self.IDENTIFIER,
                                                  *args,
                                                  **kwargs)

    def new_listing(self, **kwargs):
        return self.LISTING_CLASS.attr_class(provider_id=self.IDENTIFIER,
                                             **kwargs)

    async def play(self, listing, **kwargs):
        # sources, kwargs = self.extract_sources(listing, **kwargs)
        task = self.create_play_task(listing, **kwargs)
        yield state.task_manager.play(task)

    async def download(self,
                       listing,
                       index=None,
                       no_task_manager=False,
                       **kwargs):
        for task in self.create_download_tasks(listing, index=index, **kwargs):
            yield state.task_manager.download(task)

    def translate_template(self, template):
        return template

    # def new_listing_attr(self, **kwargs):
    #     return self.LISTING_CLASS.attr_class(
    #         provider_id = self.IDENTIFIER,
    #         **kwargs
    #     )

    def sort(self, field, reverse=False):
        self.view.sort(field, reverse=reverse)

    @abc.abstractmethod
    def listings(self, filters=None):
        pass

    def should_download(self, listing):
        return listing.label in (list(self.config.rules) +
                                 list(config.settings.profile.rules.download))

    def on_new_listing(self, listing):
        try:
            label = next(l for r, l in self.rule_map.items()
                         if r.search(listing.title))
            listing.label = label
            if self.should_download(listing):
                self.download(listing)

        except StopIteration:
            pass

    @property
    def config(self):
        return config.ConfigTree(
            config.settings.profile.providers.get(self.IDENTIFIER, {}))

    @property
    def config_is_valid(self):
        def check_config(required, cfg):
            if isinstance(required, dict):
                for k, v in required.items():
                    if not k in cfg:
                        return False
                    if not check_config(required[k], cfg[k]):
                        return False
            else:
                for k in required:
                    if not k in cfg:
                        return False
            return True

        # return all([ self.config.get(x, None) is not None
        # for x in getattr(self, "REQUIRED_CONFIG", [])
        return check_config(getattr(self, "REQUIRED_CONFIG", []), self.config)

    def get_source(self, selection, **kwargs):
        sources = sorted(selection.sources,
                         key=lambda s: getattr(s, "rank", 0))
        if not isinstance(sources, list):
            sources = [sources]

        return sources

    def play_args(self, selection, **kwargs):
        source = self.get_source(selection, **kwargs)
        return (source, kwargs)

    def filter_args(self):
        return {f: self.filters[f].value for f in self.filters}

    def extract_sources(self, listing, **kwargs):
        try:
            sources, kwargs = self.play_args(listing, **kwargs)
            kwargs.update({
                k: v
                for k, v in self.filter_args().items() if k not in kwargs
            })
        except SGStreamNotFound as e:
            logger.error(f"stream not found: {e}")
            return

        # FIXME: For now, we just throw playlists of media items at the default
        # player program and hope it can handle all of them.

        player_spec = None
        downloader_spec = None

        if not isinstance(sources, list):
            sources = [sources]

        for s in sources:
            if not s.media_type:
                # Try to set the content types of the source(s) with a HTTP HEAD
                # request if the provider didn't specify one.
                s.media_type = self.session.head(
                    s.locator).headers.get("Content-Type").split("/")[0]

        return sources, kwargs

    def create_play_task(self, listing, **kwargs):

        sources, kwargs = self.extract_sources(listing, **kwargs)

        media_types = set([s.media_type for s in sources if s.media_type])

        player_spec = {"media_types": media_types}

        if media_types == {"image"}:
            downloader_spec = {None: None}
        else:
            downloader_spec = (getattr(self.config, "helpers", None)
                               or getattr(sources[0], "helper", None)
                               or self.helper)

        return ListingsPlayMediaTask.attr_class(provider=self.NAME,
                                                title=listing.title,
                                                listing=listing,
                                                sources=sources,
                                                args=(player_spec,
                                                      downloader_spec),
                                                kwargs=kwargs)

    @property
    def limit(self):
        return None

    def refresh(self):
        self.view.refresh()

    def reset(self):
        self.view.reset()

    def __str__(self):
        return self.NAME

    def __repr__(self):
        return f"<{type(self)}: {self.NAME}>"

    @property
    def playlist_title(self):
        return f"[{self.IDENTIFIER}"

    @property
    def auto_preview_enabled(self):
        return not self.config.auto_preview.disabled

    @property
    def auto_preview_default(self):
        return self.config.auto_preview.default if self.auto_preview_enabled else "default"

    @property
    def strip_emoji(self):
        return (self.config.get("strip_emoji")
                or config.settings.profile.tables.get("strip_emoji") or False)

    @property
    def translate(self):
        return (self.config.get("translate")
                or config.settings.profile.tables.get("translate") or False)

    @property
    def translate_src(self):
        return "auto"

    @property
    def translate_dest(self):
        return (self.config.get("translate_dest")
                or config.settings.profile.tables.get("translate_dest")
                or "en")

    @property
    def output_path(self):
        return (self.config.get_path("output.path")
                or config.settings.profile.get_path("output.path"))