class CliFetchInstanceVnicsBase(CliAccess):
    def __init__(self):
        super().__init__()
        self.inv = InventoryMgr()

    def get(self, id):
        instance_uuid = id[:id.rindex('-')]
        instance = self.inv.get_by_id(self.get_env(), instance_uuid)
        if not instance:
            return []
        host = self.inv.get_by_id(self.get_env(), instance["host"])
        if not host or "Compute" not in host["host_type"]:
            return []
        lines = self.run_fetch_lines("virsh list", instance["host"])
        del lines[:2]  # remove header
        virsh_ids = [l.split()[0] for l in lines if l > ""]
        results = []
        # Note: there are 2 ids here of instances with local names, which are
        # not connected to the data we have thus far for the instance
        # therefore, we will decide whether the instance is the correct one
        # based on comparison of the uuid in the dumpxml output
        for id in virsh_ids:
            results.extend(self.get_vnics_from_dumpxml(id, instance))
        return results

    def get_vnics_from_dumpxml(self, id, instance):
        xml_string = self.run("virsh dumpxml " + id, instance["host"])
        if not xml_string.strip():
            return []
        response = xmltodict.parse(xml_string)
        if instance["uuid"] != response["domain"]["uuid"]:
            # this is the wrong instance - skip it
            return []
        try:
            vnics = response["domain"]["devices"]["interface"]
        except KeyError:
            return []
        if isinstance(vnics, dict):
            vnics = [vnics]
        for v in vnics:
            self.set_vnic_properties(v, instance)
        return vnics

    def set_vnic_properties(self, v, instance):
        v["name"] = self.get_vnic_name(v, instance)
        v["id"] = "{}-{}".format(instance["host"], v["name"])
        v["vnic_type"] = "instance_vnic"
        v["host"] = instance["host"]
        v["instance_id"] = instance["id"]
        v["instance_db_id"] = instance["_id"]
        v["mac_address"] = v["mac"]["@address"]
        instance["mac_address"] = v["mac_address"]
        self.inv.set(instance)
Exemplo n.º 2
0
class ApiFetchPort(ApiAccess):
    def __init__(self):
        super(ApiFetchPort, self).__init__()
        self.inv = InventoryMgr()

    def get(self, project_id):
        if not project_id:
            self.log.info("Get method needs ID parameter")
            return []
        # use project admin credentials, to be able to fetch all ports
        token = self.v2_auth_pwd(self.admin_project)
        if not token:
            return []
        ret = []
        for region in self.regions:
            ret.append(self.get_port(region, token, project_id))
        if ret == []:
            self.log.info("ApiFetchPort: Port not found.")
        return ret

    def get_port(self, region, token, id):
        endpoint = self.get_region_url_nover(region, "neutron")
        req_url = endpoint + "/v2.0/ports/" + id
        headers = {
            "X-Auth-Project-Id": self.admin_project,
            "X-Auth-Token": token["id"]
        }
        response = self.get_url(req_url, headers)
        if not "port" in response:
            return []

        doc = response["port"]
        doc["master_parent_type"] = "network"
        doc["master_parent_id"] = doc["network_id"]
        doc["parent_type"] = "ports_folder"
        doc["parent_id"] = doc["network_id"] + "-ports"
        doc["parent_text"] = "Ports"
        # get the project name
        net = self.inv.get_by_id(self.get_env(), doc["network_id"])
        if net:
            doc["name"] = doc["mac_address"]
        else:
            doc["name"] = doc["id"]
        project = self.inv.get_by_id(self.get_env(), doc["tenant_id"])
        if project:
            doc["project"] = project["name"]
        return doc
Exemplo n.º 3
0
class ApiFetchPorts(ApiAccess):
    def __init__(self):
        super(ApiFetchPorts, self).__init__()
        self.inv = InventoryMgr()

    def get(self, project_id):
        # use project admin credentials, to be able to fetch all ports
        token = self.v2_auth_pwd(self.admin_project)
        if not token:
            return []
        ret = []
        for region in self.regions:
            ret.extend(self.get_ports_for_region(region, token))
        return ret

    def get_ports_for_region(self, region, token):
        endpoint = self.get_region_url_nover(region, "neutron")
        req_url = endpoint + "/v2.0/ports"
        headers = {
            "X-Auth-Project-Id": self.admin_project,
            "X-Auth-Token": token["id"]
        }
        response = self.get_url(req_url, headers)
        if not "ports" in response:
            return []
        ports = response["ports"]
        for doc in ports:
            doc["master_parent_type"] = "network"
            doc["master_parent_id"] = doc["network_id"]
            doc["parent_type"] = "ports_folder"
            doc["parent_id"] = doc["network_id"] + "-ports"
            doc["parent_text"] = "Ports"
            # get the project name
            net = self.inv.get_by_id(self.get_env(), doc["network_id"])
            if net:
                doc["name"] = doc["mac_address"]
            else:
                doc["name"] = doc["id"]
            project = self.inv.get_by_id(self.get_env(), doc["tenant_id"])
            if project:
                doc["project"] = project["name"]
        return ports
Exemplo n.º 4
0
class ApiFetchHostInstances(ApiAccess, DbAccess, metaclass=Singleton):
    def __init__(self):
        super(ApiFetchHostInstances, self).__init__()
        self.inv = InventoryMgr()
        self.endpoint = ApiAccess.base_url.replace(":5000", ":8774")
        self.projects = None
        self.db_fetcher = DbFetchInstances()

    def get_projects(self):
        if not self.projects:
            projects_list = self.inv.get(self.get_env(), "project", None)
            self.projects = [p["name"] for p in projects_list]

    def get(self, id):
        self.get_projects()
        host_id = id[:id.rindex("-")]
        host = self.inv.get_by_id(self.get_env(), host_id)
        if not host or "Compute" not in host.get("host_type", ""):
            return []
        instances_found = self.get_instances_from_api(host_id)
        self.db_fetcher.get_instance_data(instances_found)
        return instances_found

    def get_instances_from_api(self, host_name):
        token = self.v2_auth_pwd(self.admin_project)
        if not token:
            return []
        tenant_id = token["tenant"]["id"]
        req_url = self.endpoint + "/v2/" + tenant_id + \
                  "/os-hypervisors/" + host_name + "/servers"
        response = self.get_url(req_url, {"X-Auth-Token": token["id"]})
        ret = []
        if not "hypervisors" in response:
            return []
        if not "servers" in response["hypervisors"][0]:
            return []
        for doc in response["hypervisors"][0]["servers"]:
            doc["id"] = doc["uuid"]
            doc["host"] = host_name
            doc["local_name"] = doc.pop("name")
            ret.append(doc)
        self.log.info("found %s instances for host: %s", str(len(ret)),
                      host_name)
        return ret
Exemplo n.º 5
0
 def get(self, id):
     query = """
   SELECT CONCAT('aggregate-', a.name, '-', host) AS id, host AS name
   FROM nova.aggregate_hosts ah
     JOIN nova.aggregates a ON a.id = ah.aggregate_id
   WHERE ah.deleted = 0 AND aggregate_id = %s
 """
     hosts = self.get_objects_list_for_id(query, "host", id)
     if hosts:
         inv = InventoryMgr()
         for host_rec in hosts:
             host_id = host_rec['name']
             host = inv.get_by_id(self.get_env(), host_id)
             if not host:
                 self.log.error('unable to find host {} '
                                'from aggregate {} in inventory'.format(
                                    host_id, id))
                 continue
             host_rec['ref_id'] = bson.ObjectId(host['_id'])
     return hosts
Exemplo n.º 6
0
class CliFetchVconnectors(CliAccess, metaclass=ABCSingleton):
    def __init__(self):
        super().__init__()
        self.inv = InventoryMgr()

    @abstractmethod
    def get_vconnectors(self, host):
        raise NotImplementedError("Subclass must override get_vconnectors()")

    def get(self, id):
        host_id = id[:id.rindex('-')]
        host = self.inv.get_by_id(self.get_env(), host_id)
        if not host:
            self.log.error("CliFetchVconnectors: host not found: " + host_id)
            return []
        if "host_type" not in host:
            self.log.error("host does not have host_type: " + host_id + \
                           ", host: " + str(host))
            return []
        return self.get_vconnectors(host)
Exemplo n.º 7
0
class DbFetchVedgesVpp(DbAccess, CliAccess, metaclass=Singleton):
    def __init__(self):
        super().__init__()
        self.inv = InventoryMgr()

    def get(self, id):
        host_id = id[:id.rindex('-')]
        vedge = {
            'host': host_id,
            'id': host_id + '-VPP',
            'name': 'VPP-' + host_id,
            'agent_type': 'VPP'
        }
        ver = self.run_fetch_lines('vppctl show ver', host_id)
        if ver:
            ver = ver[0]
            vedge['binary'] = ver[:ver.index(' ', ver.index(' ') + 1)]
        host = self.inv.get_by_id(self.get_env(), host_id)
        if not host:
            self.log.error("unable to find host in inventory: %s", host_id)
            return []
        host_types = host["host_type"]
        if "Network" not in host_types and "Compute" not in host_types:
            return []
        interfaces = self.run_fetch_lines('vppctl show int', host_id)
        vedge['ports'] = self.fetch_ports(interfaces)
        return [vedge]

    def fetch_ports(self, interfaces):
        ports = {}
        for i in interfaces:
            if not i or i.startswith(' '):
                continue
            parts = i.split()
            port = {
                'id': parts[1],
                'state': parts[2],
                'name': parts[0]
            }
            ports[port['name']] = port
        return ports
Exemplo n.º 8
0
class CliFetchVserviceVnics(CliAccess):
    def __init__(self):
        super().__init__()
        self.inv = InventoryMgr()
        self.if_header = re.compile('^\d+: ([^:]+): (.+)')
        self.regexps = [{
            'name': 'mac_address',
            're': '^.*\slink/ether\s(\S+)\s'
        }, {
            'name': 'IP Address',
            're': '^\s*inet ([0-9.]+)/'
        }, {
            'name': 'netmask',
            're': '^\s*inet [0-9.]+/([0-9]+)'
        }, {
            'name': 'IPv6 Address',
            're': '^\s*inet6 ([^/]+)/.* global '
        }]

    def get(self, host_id):
        host = self.inv.get_by_id(self.get_env(), host_id)
        if not host:
            self.log.error("host not found: " + host_id)
            return []
        if "host_type" not in host:
            self.log.error("host does not have host_type: " + host_id +
                           ", host: " + str(host))
            return []
        if "Network" not in host["host_type"]:
            return []
        lines = self.run_fetch_lines("ip netns list", host_id)
        ret = []
        for l in [
                l for l in lines
                if l.startswith("qdhcp") or l.startswith("qrouter")
        ]:
            service = l.strip()
            service = service if ' ' not in service \
                else service[:service.index(' ')]
            ret.extend(self.handle_service(host_id, service))
        return ret

    def handle_service(self, host, service, enable_cache=True):
        cmd = "ip netns exec " + service + " ip address show"
        lines = self.run_fetch_lines(cmd, host, enable_cache)
        interfaces = []
        current = None
        for line in lines:
            matches = self.if_header.match(line)
            if matches:
                if current:
                    self.set_interface_data(current)
                name = matches.group(1).strip(":")
                # ignore 'lo' interface
                if name == 'lo':
                    current = None
                else:
                    line_remainder = matches.group(2)
                    master_parent_id = "{}-{}".format(host, service)
                    current = {
                        "id": host + "-" + name,
                        "vnic_type": "vservice_vnic",
                        "host": host,
                        "name": name,
                        "lines": []
                    }
                    self.set_folder_parent(current,
                                           object_type="vnic",
                                           master_parent_type="vservice",
                                           master_parent_id=master_parent_id,
                                           parent_text="vNICs")
                    interfaces.append(current)
                    self.handle_line(current, line_remainder)
            else:
                if current:
                    self.handle_line(current, line)
        if current:
            self.set_interface_data(current)
        return interfaces

    def handle_line(self, interface, line):
        self.find_matching_regexps(interface, line, self.regexps)
        interface["lines"].append(line.strip())

    def set_interface_data(self, interface):
        if not interface or 'IP Address' not in interface or 'netmask' not in interface:
            return

        interface["data"] = "\n".join(interface.pop("lines", None))
        interface["cidr"] = self.get_cidr_for_vnic(interface)
        network = self.inv.get_by_field(self.get_env(),
                                        "network",
                                        "cidrs",
                                        interface["cidr"],
                                        get_single=True)
        if not network:
            return
        interface["network"] = network["id"]
        # set network for the vservice, to check network on clique creation
        vservice = self.inv.get_by_id(self.get_env(),
                                      interface["master_parent_id"])
        network_id = network["id"]
        if "network" not in vservice:
            vservice["network"] = list()
        if network_id not in vservice["network"]:
            vservice["network"].append(network_id)
        self.inv.set(vservice)

    # find CIDR string by IP address and netmask
    def get_cidr_for_vnic(self, vnic):
        if "IP Address" not in vnic:
            vnic["IP Address"] = "No IP Address"
            return "No IP Address"
        ipaddr = vnic["IP Address"].split('.')
        vnic['netmask'] = self.convert_netmask(vnic['netmask'])
        netmask = vnic["netmask"].split('.')

        # calculate network start
        net_start = []
        for pos in range(0, 4):
            net_start.append(str(int(ipaddr[pos]) & int(netmask[pos])))

        cidr_string = '.'.join(net_start) + '/'
        cidr_string = cidr_string + self.get_net_size(netmask)
        return cidr_string

    def get_net_size(self, netmask):
        binary_str = ''
        for octet in netmask:
            binary_str += bin(int(octet))[2:].zfill(8)
        return str(len(binary_str.rstrip('0')))

    @staticmethod
    def convert_netmask(cidr):
        netmask_conversion = {
            '32': '255.255.255.255',
            '31': '255.255.255.254',
            '30': '255.255.255.252',
            '29': '255.255.255.248',
            '28': '255.255.255.240',
            '27': '255.255.255.224',
            '26': '255.255.255.192',
            '25': '255.255.255.128',
            '24': '255.255.255.0',
            '23': '255.255.254.0',
            '22': '255.255.252.0',
            '21': '255.255.248.0',
            '20': '255.255.240.0',
            '19': '255.255.224.0',
            '18': '255.255.192.0',
            '17': '255.255.128.0',
            '16': '255.255.0.0',
            '15': '255.254.0.0',
            '14': '255.252.0.0',
            '13': '255.248.0.0',
            '12': '255.240.0.0',
            '11': '255.224.0.0',
            '10': '255.192.0.0',
            '9': '255.128.0.0',
            '8': '255.0.0.0',
            '7': '254.0.0.0',
            '6': '252.0.0.0',
            '5': '248.0.0.0',
            '4': '240.0.0.0',
            '3': '224.0.0.0',
            '2': '192.0.0.0',
            '1': '128.0.0.0',
            '0': '0.0.0.0'
        }
        if cidr not in netmask_conversion:
            raise ValueError('can' 't convert to netmask: {}'.format(cidr))
        return netmask_conversion.get(cidr)
Exemplo n.º 9
0
class DbFetchVedgesOvs(DbAccess, CliAccess, metaclass=Singleton):
    def __init__(self):
        super().__init__()
        self.inv = InventoryMgr()
        self.port_re = re.compile("^\s*port (\d+): ([^(]+)( \(internal\))?$")
        self.port_line_header_prefix = " " * 8 + "Port "

    def get(self, parent_id):
        host_id = parent_id[:parent_id.rindex('-')]
        results = self.get_objects_list_for_id(
            """
              SELECT *
              FROM {}.agents
              WHERE host = %s AND agent_type = 'Open vSwitch agent'
            """.format(self.neutron_db),
            "vedge", host_id)
        host = self.inv.get_by_id(self.get_env(), host_id)
        if not host:
            self.log.error("unable to find host in inventory: %s", host_id)
            return []
        host_types = host["host_type"]
        if "Network" not in host_types and "Compute" not in host_types:
            return []
        vsctl_lines = self.run_fetch_lines("ovs-vsctl show", host["id"])
        ports = self.fetch_ports(host, vsctl_lines)
        for doc in results:
            doc["name"] = doc["host"] + "-OVS"
            doc["configurations"] = json.loads(doc["configurations"])
            doc["ports"] = ports
            doc["tunnel_ports"] = self.get_overlay_tunnels(doc, vsctl_lines)
        return results

    def fetch_ports(self, host, vsctl_lines):
        host_types = host["host_type"]
        if "Network" not in host_types and "Compute" not in host_types:
            return {}
        ports = self.fetch_ports_from_dpctl(host["id"])
        self.fetch_port_tags_from_vsctl(vsctl_lines, ports)
        return ports

    def fetch_ports_from_dpctl(self, host_id):
        cmd = "ovs-dpctl show"
        lines = self.run_fetch_lines(cmd, host_id)
        ports = {}
        for l in lines:
            port_matches = self.port_re.match(l)
            if not port_matches:
                continue
            port = {}
            port_id = port_matches.group(1)
            name = port_matches.group(2)
            is_internal = port_matches.group(3) == " (internal)"
            port["internal"] = is_internal
            port["id"] = port_id
            port["name"] = name
            ports[name] = port
        return ports

    # from ovs-vsctl, fetch tags of ports
    # example format of ovs-vsctl output for a specific port:
    #        Port "tap9f94d28e-7b"
    #            tag: 5
    #            Interface "tap9f94d28e-7b"
    #                type: internal
    def fetch_port_tags_from_vsctl(self, vsctl_lines, ports):
        port = None
        for l in vsctl_lines:
            if l.startswith(self.port_line_header_prefix):
                port = None
                port_name = l[len(self.port_line_header_prefix):]
                # remove quotes from port name
                if '"' in port_name:
                    port_name = port_name[1:][:-1]
                if port_name in ports:
                    port = ports[port_name]
                continue
            if not port:
                continue
            if l.startswith(" " * 12 + "tag: "):
                port["tag"] = l[l.index(":") + 2:]
                ports[port["name"]] = port
        return ports

    def get_overlay_tunnels(self, doc, vsctl_lines):
        if doc["agent_type"] != "Open vSwitch agent":
            return {}
        if "tunneling_ip" not in doc["configurations"]:
            return {}
        if not doc["configurations"]["tunneling_ip"]:
            self.get_pnics(doc)
            return {}

        # read the 'br-tun' interface ports
        # this will be used later in the OTEP
        tunnel_bridge_header = " " * 4 + "Bridge br-tun"
        try:
            br_tun_loc = vsctl_lines.index(tunnel_bridge_header)
        except ValueError:
            return []
        lines = vsctl_lines[br_tun_loc + 1:]
        tunnel_ports = {}
        port = None
        for l in lines:
            # if we have only 4 or less spaces in the beginng,
            # the br-tun section ended so return
            if not l.startswith(" " * 5):
                break
            if l.startswith(self.port_line_header_prefix):
                if port:
                    tunnel_ports[port["name"]] = port
                name = l[len(self.port_line_header_prefix):].strip('" ')
                port = {"name": name}
            elif port and l.startswith(" " * 12 + "Interface "):
                interface = l[10 + len("Interface ") + 1:].strip('" ')
                port["interface"] = interface
            elif port and l.startswith(" " * 16):
                colon_pos = l.index(":")
                attr = l[:colon_pos].strip()
                val = l[colon_pos + 2:].strip('" ')
                if attr == "options":
                    opts = val.strip('{}')
                    val = {}
                    for opt in opts.split(", "):
                        opt_name = opt[:opt.index("=")]
                        opt_val = opt[opt.index("=") + 1:].strip('" ')
                        val[opt_name] = opt_val
                port[attr] = val
        if port:
            tunnel_ports[port["name"]] = port
        return tunnel_ports

    def get_pnics(self, vedge) -> dict:
        bridges = vedge["configurations"].get("bridge_mappings", {})
        pnics = {}
        for bridge in bridges.values():
            self.get_bridge_pnic(pnics, vedge, bridge)
        return pnics

    MIRANTIS_DIST = "Mirantis"

    def get_bridge_pnic(self, pnics: dict, vedge: dict, bridge: dict):
        cmd = "ovs-vsctl list-ifaces {}".format(bridge)
        ifaces_list_lines = self.run_fetch_lines(cmd, vedge["host"])
        env_config = self.configuration.get_env_config()
        distribution = env_config.get("distribution")
        dist_version = env_config.get("distribution_version")
        use_br_postfix = distribution == self.MIRANTIS_DIST and \
            dist_version in ["6.0", "7.0", "8.0"]
        for l in ifaces_list_lines:
            if use_br_postfix:
                br_pnic_postfix = "{}--br-".format(bridge)
                interface = l[len(br_pnic_postfix):] \
                    if l.startswith(br_pnic_postfix) \
                    else ""
            else:
                interface = l
            if interface:
                pnic = self.find_pnic_for_interface(vedge, interface)
                if pnic:
                    pnics[pnic["name"]] = pnic

    def find_pnic_for_interface(self, vedge, interface):
        # add port ID to pNIC
        pnic = self.inv.find_items({
            "environment": self.get_env(),
            "type": "host_pnic",
            "host": vedge["host"],
            "name": interface
        }, get_single=True)
        if not pnic:
            return
        vedge["pnic"] = interface
        port = vedge["ports"].get(interface, {})
        pnic["port_id"] = port.get("id", "")
        self.inv.set(pnic)
        return pnic
Exemplo n.º 10
0
class SensuClientInstaller(MonitoringHandler):

    UBUNTU = 'ubuntu'
    CENTOS = 'centos'

    INSTALL_CMD = {
        UBUNTU: 'dpkg -i {}',
        CENTOS: 'rpm -i {}'
    }
    PERMISSIONS_CMD = {
        UBUNTU: '',
        CENTOS: 'usermod -aG wheel sensu'
    }
    SUDOERS_FILE = '/etc/sudoers'

    available_downloads = {}

    def __init__(self, env: str, host_id: str):
        super().__init__(env)
        self.cli_ssh = self.get_ssh(host_id)
        self.inv = InventoryMgr()
        self.host = self.inv.get_by_id(env, host_id)
        self.server = self.env_monitoring_config.get('server_ip')
        self.server_cli_ssh = self.get_ssh(self.server)
        self.ubuntu_dist = None
        self.required_package = None

    def install(self):
        pkg_to_install = self.get_pkg_to_install()
        if not pkg_to_install:
            return
        try:
            self.fetch_package(pkg_to_install)
            self.install_package(pkg_to_install)
            self.set_permissions()
        except SystemError as e:
            self.log.error('Sensu install on host {} failed: {}'
                           .format(self.host, str(e)))
            return

    @staticmethod
    def get_attr_from_output(output_lines: list, attr: str) -> str:
        matches = [l for l in output_lines if l.startswith(attr)]
        if not matches:
            return ''
        line = matches[0]
        return SensuClientInstaller.get_attr_from_output_line(line)

    @staticmethod
    def get_attr_from_output_line(output_line: str):
        val = output_line[output_line.index(':')+1:].strip()
        return val

    INSTALLED = 'Installed: '
    CANDIDATE = 'Candidate: '
    SENSU_DIR = '/opt/sensu'
    SENSU_PKG_DIR = '/etc/sensu/pkg'
    SENSU_PKG_DIR_LOCAL = '/tmp/sensu_pkg'
    SENSU_VERSION_FILE = '/opt/sensu/version-manifest.txt'

    def find_available_downloads(self):
        ls_output = self.server_cli_ssh.exec('ls -R {}'
                                             .format(self.SENSU_PKG_DIR))
        ls_lines = ls_output.splitlines()
        last_target_dir = None
        for line in ls_lines:
            if line[-4:] in ['/32:', '/64:']:
                last_target_dir = line.replace(self.SENSU_PKG_DIR, '')
                continue
            elif last_target_dir:
                target_dir = last_target_dir.strip(os.path.sep).strip(':')
                self.available_downloads[target_dir] = line
                last_target_dir = None
            else:
                last_target_dir = None

    def find_available_package(self, os_details: dict):
        if not self.available_downloads:
            self.find_available_downloads()
        distribution = os_details['ID']
        version = os_details['version'].split()[-2].lower()
        arch = os_details['architecure'][-2:]
        download_dir = os.path.join(distribution, version, arch)
        download_file = self.available_downloads.get(download_dir)
        full_path = '' if not download_file \
            else os.path.join(self.SENSU_PKG_DIR, download_dir, download_file)
        return download_file, full_path

    @staticmethod
    def find_available_version(download_file: str) -> str:
        ver = download_file.replace('sensu', '').strip('-_')
        ver = ver[:ver.index('-')]
        return ver

    def get_pkg_to_install(self) -> str:
        if self.provision == self.provision_levels['none']:
            return ''
        if not self.host:
            return ''
        supported_os = [self.UBUNTU, self.CENTOS]
        distribution = self.host['OS']['ID']
        if distribution not in [self.UBUNTU, self.CENTOS]:
            self.log.error('Sensu client auto-install only supported for: {}'
                           .format(', '.join(supported_os)))
            return ''
        cmd = 'if [ -d {} ];  then head -1 {} | sed "s/sensu //"; fi' \
            .format(self.SENSU_DIR, self.SENSU_VERSION_FILE)
        installed_version = self.cli_ssh.exec(cmd).strip()
        os_details = self.host['OS']
        available_pkg, pkg_path = self.find_available_package(os_details)
        available_version = self.find_available_version(available_pkg)
        if parse_version(available_version) <= parse_version(installed_version):
            return ''
        return pkg_path

    def get_local_path(self, pkg_to_install: str):
        return os.path.join(self.SENSU_PKG_DIR_LOCAL,
                            os.path.basename(pkg_to_install))

    def fetch_package(self, pkg_to_install: str):
        self.make_directory(self.SENSU_PKG_DIR_LOCAL)
        self.get_file(self.server, pkg_to_install,
                      self.get_local_path(pkg_to_install))
        local_path = self.get_local_path(pkg_to_install)
        self.copy_to_remote_host(self.host['host'],
                                 local_path=local_path,
                                 remote_path=local_path)

    def install_package(self, pkg_to_install):
        local_path = self.get_local_path(pkg_to_install)
        install_cmd = self.INSTALL_CMD[self.host['OS']['ID']]
        self.cli_ssh.exec(install_cmd.format(local_path))

    def set_permissions(self):
        cmd = self.PERMISSIONS_CMD[self.host['OS']['ID']]
        if cmd:
            self.cli_ssh.exec(cmd)
        # add to sudoers file
        sudoer_permission = 'sensu        ALL=(ALL)       NOPASSWD: ALL'
        sudoer_cmd = 'grep --silent -w sensu {} || echo "{}" >> {}'\
            .format(self.SUDOERS_FILE, sudoer_permission, self.SUDOERS_FILE)
        self.cli_ssh.exec(sudoer_cmd)
Exemplo n.º 11
0
class CliFetchHostPnics(CliAccess):
    def __init__(self):
        super().__init__()
        self.inv = InventoryMgr()
        self.ethtool_attr = re.compile('^\s+([^:]+):\s(.*)$')
        self.regexps = [{
            'name': 'mac_address',
            're': '^.*\slink/ether\s(\S+)\s',
            'description': 'MAC address'
        }, {
            'name': 'IP Address',
            're': '^\s*inet ([0-9.]+)/',
            'description': 'IP Address v4'
        }, {
            'name': 'IPv6 Address',
            're': '^\s*inet6 (\S+) .* global ',
            'description': 'IPv6 Address'
        }]

    def get(self, id):
        host_id = id[:id.rindex("-")]
        cmd = 'ls -l /sys/class/net | grep ^l | grep -v "/virtual/"'
        host = self.inv.get_by_id(self.get_env(), host_id)
        if not host:
            self.log.error("CliFetchHostPnics: host not found: " + host_id)
            return []
        if "host_type" not in host:
            self.log.error("host does not have host_type: " + host_id +
                           ", host: " + str(host))
            return []
        host_types = host["host_type"]
        if "Network" not in host_types and "Compute" not in host_types:
            return []
        interface_lines = self.run_fetch_lines(cmd, host_id)
        interfaces = []
        for line in interface_lines:
            interface_name = line[line.rindex('/') + 1:]
            interface_name = interface_name.strip()
            # run 'ip address show' with specific interface name,
            # since running it with no name yields a list without inactive pNICs
            interface = self.find_interface_details(host_id, interface_name)
            if interface:
                interfaces.append(interface)
        return interfaces

    def find_interface_details(self, host_id, interface_name):
        cmd = "ip address show {}".format(interface_name)
        lines = self.run_fetch_lines(cmd, host_id)
        interface = None
        status_up = None
        for line in [l for l in lines if l != '']:
            tokens = None
            if interface is None:
                tokens = line.split()
                line_remainder = line.split(":")[2].strip()
                interface = {
                    "host": host_id,
                    "name": interface_name,
                    "local_name": interface_name,
                    "lines": []
                }
                self.handle_line(interface, line_remainder)
                if '<UP,' in line:
                    status_up = True
            if status_up is None:
                if tokens is None:
                    tokens = line.split()
                if 'BROADCAST' in tokens:
                    status_up = 'UP' in tokens
            if interface:
                self.handle_line(interface, line)
        self.set_interface_data(interface)
        interface['state'] = 'UP' if status_up else 'DOWN'
        if 'id' not in interface:
            interface['id'] = interface_name + '-unknown_mac'
        return interface

    def handle_line(self, interface, line):
        self.find_matching_regexps(interface, line, self.regexps)
        if 'mac_address' in interface:
            interface[
                "id"] = interface["name"] + "-" + interface["mac_address"]
        interface["lines"].append(line.strip())

    def set_interface_data(self, interface):
        if not interface:
            return
        interface["data"] = "\n".join(interface["lines"])
        interface.pop("lines", None)
        ethtool_ifname = interface["local_name"]
        if "@" in interface["local_name"]:
            pos = interface["local_name"].index("@")
            ethtool_ifname = ethtool_ifname[pos + 1:]
        cmd = "ethtool " + ethtool_ifname
        lines = self.run_fetch_lines(cmd, interface["host"])
        attr = None
        for line in lines[1:]:
            matches = self.ethtool_attr.match(line)
            if matches:
                # add this attribute to the interface
                attr = matches.group(1)
                value = matches.group(2)
                interface[attr] = value.strip()
            else:
                # add more values to the current attribute as an array
                if isinstance(interface[attr], str):
                    interface[attr] = [interface[attr], line.strip()]
                else:
                    interface[attr].append(line.strip())
Exemplo n.º 12
0
class MonitoringCheckHandler(SpecialCharConverter):
    status_labels = {}
    TIME_FORMAT = '%Y-%m-%d %H:%M:%S %Z'

    def __init__(self, args):
        super().__init__()
        self.log = FullLogger()
        self.log.set_loglevel(args.loglevel)
        self.env = args.env
        try:
            self.conf = Configuration(args.mongo_config)
            self.inv = InventoryMgr()
            self.inv.log.set_loglevel(args.loglevel)
            self.inv.set_collections(args.inventory)
            self.status_labels = self.get_status_labels()
        except FileNotFoundError:
            sys.exit(1)

    def get_status_labels(self):
        statuses_name_search = {'name': 'monitoring_check_statuses'}
        labels_data = self.inv.find_one(search=statuses_name_search,
                                        collection='constants')
        if not isinstance(labels_data, dict) or 'data' not in labels_data:
            return ''
        labels = {}
        for status_data in labels_data['data']:
            if not isinstance(status_data, dict):
                continue
            val = int(status_data['value'])
            label = status_data['label']
            labels[val] = label
        return labels

    def get_label_for_status(self, status: int) -> str:
        if status not in self.status_labels.keys():
            return ''
        return self.status_labels.get(status, '')

    def doc_by_id(self, object_id):
        doc = self.inv.get_by_id(self.env, object_id)
        if not doc:
            self.log.warn('No matching object found with ID: ' + object_id)
        return doc

    def doc_by_db_id(self, db_id, coll_name=None):
        coll = self.inv.collections[coll_name] if coll_name else None
        doc = self.inv.find({'_id': ObjectId(db_id)},
                            get_single=True,
                            collection=coll)
        if not doc:
            self.log.warn('No matching object found with DB ID: ' + db_id)
        return doc

    def set_doc_status(self, doc, status, status_text, timestamp):
        doc['status_value'] = status if isinstance(status, int) \
            else status
        doc['status'] = self.get_label_for_status(status) \
            if isinstance(status, int) \
            else status
        if status_text:
            doc['status_text'] = status_text
        doc['status_timestamp'] = strftime(self.TIME_FORMAT, timestamp)
        if 'link_type' in doc:
            self.inv.write_link(doc)
        else:
            self.inv.set(doc)

    @staticmethod
    def check_ts(check_result):
        return gmtime(check_result['executed'])

    def keep_result(self, doc, check_result):
        status = check_result['status']
        ts = self.check_ts(check_result)
        self.set_doc_status(doc, status, check_result['output'], ts)
        self.keep_message(doc, check_result)

    def keep_message(self, doc, check_result, error_level=None):
        is_link = 'link_type' in doc
        msg_id = check_result['id']
        obj_id = 'link_{}_{}'.format(doc['source_id'], doc['target_id']) \
            if is_link \
            else doc['id']
        obj_type = 'link_{}'.format(doc['link_type']) if is_link else \
            doc['type']
        display_context = obj_id if is_link \
            else doc['network_id'] if doc['type'] == 'port' else doc['id']
        level = error_level if error_level\
            else ERROR_LEVEL[check_result['status']]
        dt = datetime.datetime.utcfromtimestamp(check_result['executed'])
        message = Message(msg_id=msg_id,
                          env=self.env,
                          source=SOURCE_SYSTEM,
                          object_id=obj_id,
                          object_type=obj_type,
                          display_context=display_context,
                          level=level,
                          msg=check_result,
                          ts=dt)
        collection = self.inv.collections['messages']
        collection.insert_one(message.get())
Exemplo n.º 13
0
class CliFetchBondHostPnics(CliAccess):
    BOND_DIR = '/proc/net/bonding/'
    SLAVE_INTERFACE_HEADER = 'Slave Interface: '

    def __init__(self):
        super().__init__()
        self.inv = InventoryMgr()

    def get(self, parent_id: str):
        self.log.info(
            'CliFetchBondHostPnics: checking under {}'.format(parent_id))
        host_id = parent_id[:parent_id.rindex('-')]
        cmd = 'ls -1 {} 2>&1'.format(self.BOND_DIR)
        host = self.inv.get_by_id(self.get_env(), host_id)
        if not host:
            self.log.error('CliFetchBondHostPnics: host not found: ' + host_id)
            return []
        host_types = host['host_type']
        if 'Network' not in host_types and 'Compute' not in host_types:
            return []
        lines = self.run_fetch_lines(cmd, host_id)
        if lines and 'No such file or directory' in lines[0]:
            return []  # no bonds so directory does not exist
        bonds = []
        for line in [l.strip() for l in lines]:
            bond = self.get_bond_details(host_id, line)
            if bond:
                bonds.append(bond)
        return bonds

    def get_bond_details(self, host_id: str, interface_name: str) -> dict:
        lines = self.run_fetch_lines(
            'cat {}{}'.format(self.BOND_DIR, interface_name), host_id)
        status, mac_address = \
            self.get_bond_status_and_mac_address(host_id, interface_name)
        interface_id = '{}-{}'.format(interface_name, mac_address)
        interface = {
            'host': host_id,
            'name': interface_name,
            'id': interface_id,
            'local_name': interface_name,
            'mac_address': mac_address,
            'Link detected': 'yes' if status == 'up' else 'no',
            'EtherChannel': True,
            'EtherChannel Master': '',
            'members': {}
        }
        # keep stack of info objects to support multi-level info
        info_objects = deque([interface])
        for line in [line for line in lines if line != '']:
            if line.startswith(self.SLAVE_INTERFACE_HEADER):
                name = line[line.index(':') + 1:].strip()
                slave = {'name': name, 'EtherChannel Master': interface_id}
                # remove any pending info objects, keep only interface
                info_objects = deque([interface])
                info_objects.append(slave)
                interface['members'][name] = slave
            elif line.rstrip(':').lower().endswith('info'):
                # move to lower level info object
                info_name = line.rstrip(':')
                upper_info_obj = info_objects[-1]
                info_obj = {}
                upper_info_obj[info_name] = info_obj
                info_objects.append(info_obj)
            else:
                self.get_attribute_from_line(info_objects[-1], line)
        for slave in list(interface['members'].values()):
            self.set_slave_host_pnic_bond_attributes(host_id, slave,
                                                     interface_id)
        return interface

    def get_bond_status_and_mac_address(self, host_id: str, name: str):
        output = self.run_fetch_lines('ip link show {}'.format(name), host_id)
        status_line = output[0]
        status = status_line[status_line.index(' state ') + len(' state '):]
        status = status[:status.index(' ')]
        matches = [line.strip() for line in output if 'link/ether' in line]
        if not matches:
            self.log.error('Failed to find line with MAC address '
                           'for bond {} (host: {})'.format(name, host_id))
        tokens = matches[0].split()
        if len(tokens) < 2:
            self.log.error('Failed to find MAC address in line: {}'.format(
                matches[0]))
        mac_address = tokens[1]
        return status.lower(), mac_address

    def get_attribute_from_line(self, obj: dict, line: str):
        if ':' not in line:
            self.log.error('object {}: failed to find ":" in line: {}'.format(
                obj['name'], line))
            return
        attr = line[:line.index(':')]
        value = line[len(attr) + 1:]
        obj[attr.strip()] = value.strip()

    def set_slave_host_pnic_bond_attributes(self, host, slave, interface_id):
        pnic = self.inv.find_one({
            'environment': self.get_env(),
            'host': host,
            'type': 'host_pnic',
            'name': slave['name']
        })
        if not pnic:
            self.log.error('unable to find slave pNIC {} under bond {}'.format(
                slave['name'], interface_id))
            return
        mac_address = pnic['mac_address']
        slave_id = '{}-{}'.format(slave.get('name', ''), mac_address)
        slave['mac_address'] = mac_address
        slave['id'] = slave_id
        pnic['EtherChannel'] = True
        pnic['EtherChannel Master'] = interface_id
        self.inv.set(pnic)
Exemplo n.º 14
0
class Monitor:
    DEFAULTS = {
        'env': 'WebEX-Mirantis@Cisco',
        'inventory': 'inventory',
        'loglevel': 'WARNING'
    }

    def __init__(self):
        self.args = self.get_args()
        MongoAccess.set_config_file(self.args.mongo_config)
        self.inv = InventoryMgr()
        self.inv.set_collections(self.args.inventory)
        self.configuration = Configuration()
        self.input_text = None
        self.converter = SpecialCharConverter()

    def get_args(self):
        parser = argparse.ArgumentParser()
        parser.add_argument("-m",
                            "--mongo_config",
                            nargs="?",
                            type=str,
                            default="",
                            help="name of config file with MongoDB server " +
                            "access details")
        parser.add_argument("-e",
                            "--env",
                            nargs="?",
                            type=str,
                            default=self.DEFAULTS['env'],
                            help="name of environment to scan \n" +
                            "(default: {})".format(self.DEFAULTS['env']))
        parser.add_argument("-y",
                            "--inventory",
                            nargs="?",
                            type=str,
                            default=self.DEFAULTS['inventory'],
                            help="name of inventory collection \n" +
                            "(default: {}".format(self.DEFAULTS['inventory']))
        parser.add_argument('-i',
                            '--inputfile',
                            nargs='?',
                            type=str,
                            default='',
                            help="read input from the specifed file \n" +
                            "(default: from stdin)")
        parser.add_argument("-l",
                            "--loglevel",
                            nargs="?",
                            type=str,
                            default=self.DEFAULTS["loglevel"],
                            help="logging level \n(default: '{}')".format(
                                self.DEFAULTS["loglevel"]))
        args = parser.parse_args()
        return args

    def get_type_list(self, type_name) -> list:
        types_list = []
        docs = self.inv.find_items({'name': type_name}, collection='constants')
        for types_list in docs:
            types_list = [t['value'] for t in types_list['data']]
        if not types_list:
            raise ValueError('Unable to fetch {}'.format(
                type_name.replace('_', ' ')))
        return types_list

    def match_object_types(self, check_name: str) -> list:
        object_types = self.get_type_list('object_types')
        matches = [t for t in object_types if check_name.startswith(t + '_')]
        return matches

    def match_link_types(self, check_name: str) -> list:
        object_types = self.get_type_list('link_types')
        matches = [
            t for t in object_types if check_name.startswith('link_' + t + '_')
        ]
        return matches

    def find_object_type_and_id(self, check_name: str):
        # if we have multiple matching host types, then take the longest
        # of these. For example, if matches are ['host', 'host_pnic'],
        # then take 'host_pnic'.
        # To facilitate this, we sort the matches by reverse order.
        is_link_check = check_name.startswith('link_')
        check_type = 'link' if is_link_check else 'object'
        if is_link_check:
            matching_types = sorted(self.match_link_types(check_name),
                                    reverse=True)
        else:
            matching_types = sorted(self.match_object_types(check_name),
                                    reverse=True)
        if not matching_types:
            raise ValueError(
                'Unable to match check name "{}" with {} type'.format(
                    check_name, check_type))
        obj_type = matching_types[0]
        postfix_len = len('link_') if is_link_check else 0
        obj_id = (obj_type + '_' if is_link_check else '') + \
            check_name[len(obj_type)+1+postfix_len:]
        return check_type, obj_type, obj_id

    def read_input(self):
        if self.args.inputfile:
            try:
                with open(self.args.inputfile, 'r') as input_file:
                    self.input_text = input_file.read()
            except Exception as e:
                raise FileNotFoundError(
                    "failed to open input file {}: {}".format(
                        self.args.inputfile, str(e)))
        else:
            self.input_text = sys.stdin.read()
            if not self.input_text:
                raise ValueError("No input provided on stdin")

    def get_handler_by_type(self, check_type, obj_type):
        module_name = 'handle_link' if check_type == 'link' \
                else 'handle_' + obj_type
        package = 'monitoring.handlers'
        handler = ClassResolver.get_instance_single_arg(
            self.args, module_name=module_name, package_name=package)
        return handler

    def get_handler(self, check_type, obj_type):
        basic_handling_types = ['instance', 'vedge', 'vservice', 'vconnector']
        if obj_type not in basic_handling_types:
            return self.get_handler_by_type(check_type, obj_type)
        from monitoring.handlers.basic_check_handler \
            import BasicCheckHandler
        return BasicCheckHandler(self.args)

    def check_link_interdependency_for(self,
                                       object_id: str,
                                       from_type: str = None,
                                       to_type: str = None):
        if from_type is not None and to_type is not None or \
                from_type is None and to_type is None:
            raise ValueError('check_link_interdependency: '
                             'supply one of from_type/to_type')
        obj_id = self.converter.decode_special_characters(object_id)
        obj = self.inv.get_by_id(environment=self.args.env, item_id=obj_id)
        if not obj:
            self.inv.log.error(
                'check_link_interdependency: '
                'failed to find object with ID: {}'.format(object_id))
            return
        if 'status' not in obj:
            return
        id_attr = 'source_id' if from_type is None else 'target_id'
        link_type = '{}-{}'.format(
            from_type if from_type is not None else obj['type'],
            to_type if to_type is not None else obj['type'])
        condition = {
            'environment': self.args.env,
            'link_type': link_type,
            id_attr: obj_id
        }
        link = self.inv.find_one(search=condition, collection='links')
        if not link:
            self.inv.log.error('check_link_interdependency: '
                               'failed to find {} link with {}: {}'.format(
                                   link_type, id_attr, obj_id))
            return
        other_id_attr = '{}_id' \
            .format('source' if from_type is not None else 'target')
        other_obj = self.inv.get_by_id(environment=self.args.env,
                                       item_id=link[other_id_attr])
        if not other_obj:
            self.inv.log.error(
                'check_link_interdependency: '
                'failed to find {} with ID: {} (link type: {})'.format(
                    other_id_attr, link[other_id_attr], link_type))
            return
        if 'status' not in other_obj:
            return
        status = 'Warning'
        if obj['status'] == 'OK' and other_obj['status'] == 'OK':
            status = 'OK'
        elif obj['status'] == 'OK' and other_obj['status'] == 'OK':
            status = 'OK'
        link['status'] = status
        time_format = MonitoringCheckHandler.TIME_FORMAT
        timestamp1 = obj['status_timestamp']
        t1 = datetime.datetime.strptime(timestamp1, time_format)
        timestamp2 = other_obj['status_timestamp']
        t2 = datetime.datetime.strptime(timestamp2, time_format)
        timestamp = max(t1, t2)
        link['status_timestamp'] = datetime.datetime.strftime(
            timestamp, time_format)
        self.inv.set(link, self.inv.collections['links'])

    def check_link_interdependency(self, object_id: str, object_type: str):
        conf = self.configuration.get_env_config()
        if 'OVS' in conf['mechanism_drivers']:
            if object_type == 'vedge':
                self.check_link_interdependency_for(object_id,
                                                    to_type='host_pnic')
            if object_type == 'host_pnic':
                self.check_link_interdependency_for(object_id,
                                                    from_type='vedge')

    def process_input(self):
        check_result_full = json.loads(self.input_text)
        check_client = check_result_full['client']
        check_result = check_result_full['check']
        check_result['id'] = check_result_full['id']
        name = check_result['name']
        check_type, object_type, object_id = \
            monitor.find_object_type_and_id(name)
        if 'environment' in check_client:
            self.args.env = check_client['environment']
        else:
            raise ValueError('Check client should contain environment name')
        self.configuration.use_env(self.args.env)

        check_handler = self.get_handler(check_type, object_type)
        if check_handler:
            check_handler.handle(object_id, check_result)
        self.check_link_interdependency(object_id, object_type)

    def process_check_result(self):
        self.read_input()
        self.process_input()
Exemplo n.º 15
0
class MonitoringCheckHandler(SpecialCharConverter):
    STATUS_LABEL = ['OK', 'Warning', 'Error']

    def __init__(self, args):
        super().__init__()
        self.log = FullLogger()
        self.log.set_loglevel(args.loglevel)
        self.env = args.env
        try:
            self.conf = Configuration(args.mongo_config)
            self.inv = InventoryMgr()
            self.inv.log.set_loglevel(args.loglevel)
            self.inv.set_collections(args.inventory)
        except FileNotFoundError:
            sys.exit(1)

    def doc_by_id(self, object_id):
        doc = self.inv.get_by_id(self.env, object_id)
        if not doc:
            self.log.warn('No matching object found with ID: ' + object_id)
        return doc

    def doc_by_db_id(self, db_id, coll_name=None):
        coll = self.inv.collections[coll_name] if coll_name else None
        doc = self.inv.find({'_id': ObjectId(db_id)},
                            get_single=True,
                            collection=coll)
        if not doc:
            self.log.warn('No matching object found with DB ID: ' + db_id)
        return doc

    def set_doc_status(self, doc, status, status_text, timestamp):
        doc['status'] = self.STATUS_LABEL[status] if isinstance(status, int) \
            else status
        if status_text:
            doc['status_text'] = status_text
        doc['status_timestamp'] = strftime(TIME_FORMAT, timestamp)
        if 'link_type' in doc:
            self.inv.write_link(doc)
        else:
            self.inv.set(doc)

    @staticmethod
    def check_ts(check_result):
        return gmtime(check_result['executed'])

    def keep_result(self, doc, check_result):
        status = check_result['status']
        ts = self.check_ts(check_result)
        self.set_doc_status(doc, status, check_result['output'], ts)
        self.keep_message(doc, check_result)

    def keep_message(self, doc, check_result, error_level=None):
        is_link = 'link_type' in doc
        msg_id = check_result['id']
        obj_id = 'link_{}_{}'.format(doc['source_id'], doc['target_id']) \
            if is_link \
            else doc['id']
        obj_type = 'link_{}'.format(
            doc['link_type']) if is_link else doc['type']
        display_context = obj_id if is_link \
            else doc['network_id'] if doc['type'] == 'port' else doc['id']
        level = error_level if error_level\
            else ERROR_LEVEL[check_result['status']]
        dt = datetime.datetime.utcfromtimestamp(check_result['executed'])
        message = Message(msg_id=msg_id,
                          env=self.env,
                          source=SOURCE_SYSTEM,
                          object_id=obj_id,
                          object_type=obj_type,
                          display_context=display_context,
                          level=level,
                          msg=check_result,
                          ts=dt)
        collection = self.inv.collections['messages']
        collection.insert_one(message.get())
Exemplo n.º 16
0
class ApiFetchNetwork(ApiAccess):
    def __init__(self):
        super(ApiFetchNetwork, self).__init__()
        self.inv = InventoryMgr()

    def get(self, project_id):
        # use project admin credentials, to be able to fetch all networks
        token = self.v2_auth_pwd(self.admin_project)
        if not token:
            return []
        ret = []
        for region in self.regions:
            # TODO: refactor legacy code
            # (Unresolved reference - self.get_for_region)
            ret.extend(self.get_for_region(region, token, project_id))
        return ret

    def get_network(self, region, token, subnet_id):
        endpoint = self.get_region_url_nover(region, "neutron")

        # get target network network document
        req_url = endpoint + "/v2.0/networks/" + subnet_id
        headers = {
            "X-Auth-Project-Id": self.admin_project,
            "X-Auth-Token": token["id"]
        }
        response = self.get_url(req_url, headers)
        if "network" not in response:
            return []
        network = response["network"]
        subnets = network['subnets']

        # get subnets documents.
        subnets_hash = {}
        cidrs = []
        subnet_ids = []
        for subnet_id in subnets:
            req_url = endpoint + "/v2.0/subnets/" + subnet_id
            response = self.get_url(req_url, headers)
            if "subnet" in response:
                # create a hash subnets, to allow easy locating of subnets
                subnet = response["subnet"]
                subnets_hash[subnet["name"]] = subnet
                cidrs.append(subnet["cidr"])
                subnet_ids.append(subnet["id"])

        network["subnets"] = subnets_hash
        network["cidrs"] = cidrs
        network["subnet_ids"] = subnet_ids

        self.set_folder_parent(network,
                               object_type="network",
                               master_parent_type="project",
                               master_parent_id=network["tenant_id"])
        # set the 'network' attribute for network objects to the name of
        # network, to allow setting constraint on network when creating
        # network clique
        network['network'] = network["id"]
        # get the project name
        project = self.inv.get_by_id(self.get_env(), network["tenant_id"])
        if project:
            network["project"] = project["name"]

        return network
Exemplo n.º 17
0
class ApiFetchNetworks(ApiAccess):
    def __init__(self):
        super(ApiFetchNetworks, self).__init__()
        self.inv = InventoryMgr()

    def get(self, project_id=None):
        # use project admin credentials, to be able to fetch all networks
        token = self.v2_auth_pwd(self.admin_project)
        if not token:
            return []
        ret = []
        for region in self.regions:
            ret.extend(self.get_networks(region, token))
        return ret

    def get_networks(self, region, token):
        endpoint = self.get_region_url_nover(region, "neutron")
        req_url = endpoint + "/v2.0/networks"
        headers = {
            "X-Auth-Project-Id": self.admin_project,
            "X-Auth-Token": token["id"]
        }
        response = self.get_url(req_url, headers)
        if "networks" not in response:
            return []
        networks = response["networks"]
        req_url = endpoint + "/v2.0/subnets"
        response = self.get_url(req_url, headers)
        subnets_hash = {}
        if "subnets" in response:
            # create a hash subnets, to allow easy locating of subnets
            subnets = response["subnets"]
            for s in subnets:
                subnets_hash[s["id"]] = s
        for doc in networks:
            project_id = doc["tenant_id"]
            if not project_id:
                # find project ID of admin project
                project = self.inv.get_by_field(self.get_env(),
                                                "project",
                                                "name",
                                                self.admin_project,
                                                get_single=True)
                if not project:
                    self.log.error("failed to find admin project in DB")
                project_id = project["id"]
            self.set_folder_parent(doc,
                                   object_type='network',
                                   master_parent_id=project_id,
                                   master_parent_type='project')
            # set the 'network' attribute for network objects to the name of
            # network, to allow setting constraint on network when creating
            # network clique
            doc['network'] = doc["id"]
            # get the project name
            project = self.inv.get_by_id(self.get_env(), project_id)
            if project:
                doc["project"] = project["name"]
            subnets_details = {}
            cidrs = []
            subnet_ids = []
            for s in doc["subnets"]:
                try:
                    subnet = subnets_hash[s]
                    cidrs.append(subnet["cidr"])
                    subnet_ids.append(subnet["id"])
                    subnets_details[subnet["name"]] = subnet
                except KeyError:
                    pass

            doc["subnets"] = subnets_details
            doc["cidrs"] = cidrs
            doc["subnet_ids"] = subnet_ids
        return networks
Exemplo n.º 18
0
class DbFetchOteps(DbAccess, CliAccess, metaclass=Singleton):
    def __init__(self):
        super().__init__()
        self.inv = InventoryMgr()
        self.port_re = re.compile("^\s*port (\d+): ([^(]+)( \(internal\))?$")

    def get(self, id):
        vedge = self.inv.get_by_id(self.get_env(), id)
        tunnel_type = None
        if "configurations" not in vedge:
            return []
        if "tunnel_types" not in vedge["configurations"]:
            return []
        if not vedge["configurations"]["tunnel_types"]:
            return []
        tunnel_type = vedge["configurations"]["tunnel_types"][0]
        host_id = vedge["host"]
        table_name = "{}.ml2_{}_endpoints".format(self.neutron_db, tunnel_type)
        env_config = self.config.get_env_config()
        distribution = env_config["distribution"]
        distribution_version = env_config["distribution_version"]
        dist_ver = "{}-{}".format(distribution, distribution_version)
        if dist_ver == "Canonical-icehouse":
            # for Icehouse, we only get IP address from the DB, so take the
            # host IP address and from the host data in Mongo
            host = self.inv.get_by_id(self.get_env(), host_id)
            results = [{"host": host_id, "ip_address": host["ip_address"]}]
        else:
            results = self.get_objects_list_for_id(
                """
                SELECT *
                FROM {}
                WHERE host = %s
                """.format(table_name),
                "vedge", host_id)
        for doc in results:
            doc["id"] = host_id + "-otep"
            doc["name"] = doc["id"]
            doc["host"] = host_id
            doc["overlay_type"] = tunnel_type
            doc["ports"] = vedge["tunnel_ports"] if "tunnel_ports" in vedge else []
            if "udp_port" not in doc:
                doc["udp_port"] = "67"
            self.get_vconnector(doc, host_id, vedge)

        return results

    # find matching vConnector by tunneling_ip of vEdge
    # look for that IP address in 'ip address show' output for the host
    def get_vconnector(self, doc, host_id, vedge):
        tunneling_ip = vedge["configurations"]["tunneling_ip"]
        output_lines = self.run_fetch_lines("ip address show", host_id)
        interface = None
        ip_string = "    inet {}/".format(tunneling_ip)
        vconnector = None
        for l in output_lines:
            if l.startswith(" "):
                if interface and l.startswith(ip_string):
                    vconnector = interface
                    break
            else:
                if " " in l:
                    # line format is like this:
                    # <interface number>: <interface name>: ....
                    interface = l.split(":")[1].strip()

        if vconnector:
            doc["vconnector"] = vconnector
Exemplo n.º 19
0
class AciFetchLeafToSpinePnics(AciAccess):

    def __init__(self):
        super().__init__()
        self.inv = InventoryMgr()

    def fetch_switches_by_role(self, role_name):
        query_filter = {"query-target-filter":
                        "eq(fabricNode.role, \"{}\")".format(role_name)}
        switches = self.fetch_objects_by_class("fabricNode", query_filter)
        return [switch["attributes"] for switch in switches]

    def fetch_adjacent_connections(self, device_id):
        dn = "/".join((device_id, "sys"))

        response = self.fetch_mo_data(dn,
                                      {"query-target": "subtree",
                                       "target-subtree-class": "lldpAdjEp"})

        connections = self.get_objects_by_field_names(response,
                                                      "lldpAdjEp", "attributes")
        return connections

    # Returns:
    # List of:
    # 1. Switches with role "spine"
    # 2. Downlink pnic id for spine switch
    # 3. Uplink pnic id for leaf switch
    def fetch_spines_and_pnics_by_leaf_id(self, leaf_id):
        spine_switches = self.fetch_switches_by_role("spine")
        adjacent_devices = self.fetch_adjacent_connections(leaf_id)
        spines = []
        for spine in spine_switches:
            # Check if spine switch is connected to current leaf switch
            connection = next((d for d in adjacent_devices
                               if spine["name"] == d["sysName"]),
                              None)
            if connection:
                try:
                    # Extract pnics from adjacency data
                    uplink_pnic = re.match(".*\[(.+?)\].*",
                                           connection["dn"]).group(1)
                    downlink_pnic = re.match(".*\[(.+?)\].*",
                                             connection["portDesc"]).group(1)
                    spines.append({
                        "device": spine,
                        "downlink_pnic": downlink_pnic,
                        "uplink_pnic": uplink_pnic
                    })
                except AttributeError:
                    continue  # TODO: probably raise an exception

        return spines

    @aci_config_required(default=[])
    def get(self, db_leaf_pnic_id):
        environment = self.get_env()
        leaf_pnic = self.inv.get_by_id(environment=environment,
                                       item_id=db_leaf_pnic_id)
        leaf_switch_id = leaf_pnic['switch']

        # Decode aci leaf switch id from db format
        aci_leaf_pnic_id = decode_aci_dn(db_leaf_pnic_id)
        aci_leaf_id = re.match("switch-(.+?)-leaf", aci_leaf_pnic_id).group(1)

        # Fetch all leaf-to-spine connectivity data
        spines_with_pnics = self.fetch_spines_and_pnics_by_leaf_id(aci_leaf_id)
        pnics = []
        for spine_with_pnic in spines_with_pnics:
            spine = spine_with_pnic["device"]
            downlink_pnic_id = spine_with_pnic["downlink_pnic"]
            uplink_pnic_id = spine_with_pnic["uplink_pnic"]

            # Add spine switch to db if it's not there yet
            spine_id_match = re.match("topology/(.+)", spine["dn"])
            if not spine_id_match:
                raise ValueError("Failed to fetch spine switch id "
                                 "from switch dn: {}".format(spine["dn"]))

            aci_spine_id = spine_id_match.group(1)
            db_spine_id = "-".join(("switch", encode_aci_dn(aci_spine_id),
                                    spine["role"]))
            if not self.inv.get_by_id(environment, db_spine_id):
                spine_json = {
                    "id": db_spine_id,
                    "type": "switch",
                    "switch": db_spine_id,
                    "aci_document": spine
                }
                # Region name is the same as region id
                region_id = get_object_path_part(leaf_pnic["name_path"],
                                                 "Regions")
                region = self.inv.get_by_id(environment, region_id)
                self.inv.save_inventory_object(o=spine_json, parent=region,
                                               environment=environment)

            # Add downlink and uplink pnics to results list,
            # including their mutual connection data
            # (see "connected_to" field).
            db_downlink_pnic_id = "-".join((db_spine_id,
                                            encode_aci_dn(downlink_pnic_id)))
            db_uplink_pnic_id = "-".join((leaf_pnic["switch"],
                                          encode_aci_dn(uplink_pnic_id)))

            downlink_pnic_json = {
                "id": db_downlink_pnic_id,
                "object_name": downlink_pnic_id,
                "type": "switch_pnic",
                "role": "downlink",
                "connected_to": db_uplink_pnic_id,
                "switch": db_spine_id,
                "parent_id": db_spine_id,
                "parent_type": "switch",
                "aci_document": {}  # TODO: what can we add here?
            }

            uplink_pnic_json = {
                "id": db_uplink_pnic_id,
                "object_name": uplink_pnic_id,
                "type": "switch_pnic",
                "role": "uplink",
                "connected_to": db_downlink_pnic_id,
                "switch": leaf_switch_id,
                "parent_id": leaf_switch_id,
                "parent_type": "switch",
                "aci_document": {}  # TODO: what can we add here?
            }

            pnics.extend([downlink_pnic_json, uplink_pnic_json])

        return pnics