Exemple #1
0
 def __init__(self, env):
     super().__init__()
     self.log = FullLogger()
     self.configuration = Configuration()
     self.mechanism_drivers = \
         self.configuration.environment['mechanism_drivers']
     self.env = env
     self.had_errors = False
     self.monitoring_config = self.db.monitoring_config_templates
     try:
         self.env_monitoring_config = self.configuration.get('Monitoring')
     except IndexError:
         self.env_monitoring_config = {}
     self.local_host = self.env_monitoring_config.get('server_ip', '')
     self.scripts_prepared_for_host = {}
     self.replacements = self.env_monitoring_config
     self.inv = InventoryMgr()
     self.config_db = self.db[self.inv.get_coll_name('monitoring_config')]
     self.provision = self.provision_levels['none']
     if self.env_monitoring_config:
         provision = self.env_monitoring_config.get('provision', 'none')
         provision = str.lower(provision)
         self.provision =\
             self.provision_levels.get(provision,
                                       self.provision_levels['none'])
Exemple #2
0
 def __init__(self):
     self.args = self.get_args()
     MongoAccess.set_config_file(self.args.mongo_config)
     self.inv = InventoryMgr()
     self.inv.set_collections(self.args.inventory)
     self.configuration = Configuration()
     self.input_text = None
     self.converter = SpecialCharConverter()
Exemple #3
0
 def __init__(self):
     super().__init__()
     self.configuration = Configuration()
     self.aci_enabled = self.configuration.get_env_config() \
         .get('aci_enabled', False)
     self.aci_configuration = None
     self.host = None
     if self.aci_enabled:
         self.aci_configuration = self.configuration.get("ACI")
         self.host = self.aci_configuration["host"]
Exemple #4
0
 def __init__(self):
     """
     Scanner is the base class for scanners.
     """
     super().__init__()
     self.config = Configuration()
     self.inv = InventoryMgr()
     self.scanners_package = None
     self.scanners = {}
     self.link_finders = []
     self.load_scanners_metadata()
     self.load_link_finders_metadata()
Exemple #5
0
 def __init__(self, mysql_config=None):
     super().__init__()
     self.config = {'mysql': mysql_config} if mysql_config \
         else Configuration()
     self.conf = self.config.get("mysql")
     self.connect_to_db()
     self.neutron_db = self.get_neutron_db_name()
 def setUp(self):
     super().setUp()
     self.configure_environment()
     self.cli_access = CliAccess()
     self.conf = Configuration()
     self.cli_access.configuration = self.conf
     self.conf.use_env = MagicMock()
     self.conf.environment = CONFIGURATIONS
     self.conf.configuration = CONFIGURATIONS["configuration"]
Exemple #7
0
 def __init__(self, mysql_config=None):
     super().__init__()
     self.config = {'mysql': mysql_config} if mysql_config \
         else Configuration()
     self.conf = self.config.get("mysql")
     self.connect_timeout = int(self.conf['connect_timeout']) \
         if 'connect_timeout' in self.conf \
         else self.TIMEOUT
     self.connect_to_db()
     self.neutron_db = self.get_neutron_db_name()
 def __init__(self, args):
     super().__init__()
     self.log = FullLogger()
     self.log.set_loglevel(args.loglevel)
     self.env = args.env
     try:
         self.conf = Configuration(args.mongo_config)
         self.inv = InventoryMgr()
         self.inv.log.set_loglevel(args.loglevel)
         self.inv.set_collections(args.inventory)
     except FileNotFoundError:
         sys.exit(1)
Exemple #9
0
 def __init__(self, api_name=None, config=None):
     super().__init__()
     if api_name is None:
         raise ValueError('ApiAccessBase: api_name must be defined')
     self.config = {api_name: config} if config else Configuration()
     self.api_config = self.config.get(api_name)
     if self.api_config is None:
         raise ValueError('ApiAccessBase: section "{}" missing in config'
                          .format(api_name))
     self.host = self.api_config.get('host', '')
     self.port = self.api_config.get('port', '')
     if not (self.host and self.port):
         raise ValueError('Missing definition of host or port ' +
                          'for {} API access'
                          .format(api_name))
Exemple #10
0
 def __init__(self):
     self.get_args()
     MongoAccess.set_config_file(self.args.mongo_config)
     MongoAccess.__init__(self)
     self.log = FullLogger()
     self.log.set_loglevel(self.args.loglevel)
     self.conf = Configuration()
     self.inv = InventoryMgr()
     self.inv.set_collections(self.args.inventory)
     stats_coll = self.inv.get_coll_name('statistics')
     self.stats = self.db[stats_coll]
     # consume messages from topic
     self.consumer = KafkaConsumer('VPP.stats',
                                   group_id='calipso_test',
                                   auto_offset_reset=self.args.offset,
                                   bootstrap_servers=['localhost:9092'])
Exemple #11
0
    def configure_environment(self):
        self.env = ENV_CONFIG
        self.inventory_collection = COLLECTION_CONFIG
        # mock the Mongo Access
        MongoAccess.mongo_connect = MagicMock()
        MongoAccess.db = MagicMock()

        self.conf = Configuration()
        self.conf.use_env = MagicMock()
        self.conf.environment = CONFIGURATIONS
        self.conf.configuration = CONFIGURATIONS["configuration"]

        self.inv = InventoryMgr()
        self.inv.set_collections(self.inventory_collection)
        DbAccess.conn = MagicMock()
        DbAccess.get_neutron_db_name = MagicMock()
        DbAccess.get_neutron_db_name.return_value = "neutron"
        SshConnection.connect = MagicMock()
        SshConnection.check_definitions = MagicMock()
        SshConn.check_definitions = MagicMock()
Exemple #12
0
    def configure_environment(self):
        self.env = ENV_CONFIG
        self.inventory_collection = COLLECTION_CONFIG
        # mock the mongo access
        MongoAccess.mongo_connect = MagicMock()
        MongoAccess.db = MagicMock()
        # mock log
        FullLogger.info = MagicMock()

        self.conf = Configuration()
        self.conf.use_env = MagicMock()
        self.conf.environment = CONFIGURATIONS
        self.conf.configuration = CONFIGURATIONS["configuration"]

        self.inv = InventoryMgr()
        self.inv.clear = MagicMock()
        self.inv.set_collections(self.inventory_collection)

        MonitoringSetupManager.server_setup = MagicMock()

        DbAccess.get_neutron_db_name = MagicMock()
        DbAccess.get_neutron_db_name.return_value = "neutron"
Exemple #13
0
    def __init__(self, config=None):
        super(ApiAccess, self).__init__()
        if ApiAccess.initialized:
            return
        ApiAccess.config = {'OpenStack': config} if config else Configuration()
        ApiAccess.api_config = ApiAccess.config.get("OpenStack")
        host = ApiAccess.api_config.get("host", "")
        ApiAccess.host = host
        port = ApiAccess.api_config.get("port", "")
        if not (host and port):
            raise ValueError('Missing definition of host or port ' +
                             'for OpenStack API access')
        ApiAccess.base_url = "http://" + host + ":" + port
        ApiAccess.admin_token = ApiAccess.api_config.get("admin_token", "")
        ApiAccess.admin_project = ApiAccess.api_config.get(
            "admin_project", "admin")
        ApiAccess.admin_endpoint = "http://" + host + ":" + "35357"

        token = self.v2_auth_pwd(ApiAccess.admin_project)
        if not token:
            raise ValueError("Authentication failed. Failed to obtain token")
        else:
            self.subject_token = token
Exemple #14
0
class MonitoringHandler(MongoAccess, CliAccess, BinaryConverter):
    PRODUCTION_CONFIG_DIR = '/etc/sensu/conf.d'
    APP_SCRIPTS_FOLDER = 'monitoring/checks'
    REMOTE_SCRIPTS_FOLDER = '/etc/sensu/plugins'

    provision_levels = {
        'none': 0,
        'db': 1,
        'files': 2,
        'deploy': 3
    }

    pending_changes = {}

    fetch_ssl_files = []

    def __init__(self, env):
        super().__init__()
        self.log = FullLogger()
        self.configuration = Configuration()
        self.mechanism_drivers = \
            self.configuration.environment['mechanism_drivers']
        self.env = env
        self.had_errors = False
        self.monitoring_config = self.db.monitoring_config_templates
        try:
            self.env_monitoring_config = self.configuration.get('Monitoring')
        except IndexError:
            self.env_monitoring_config = {}
        self.local_host = self.env_monitoring_config.get('server_ip', '')
        self.scripts_prepared_for_host = {}
        self.replacements = self.env_monitoring_config
        self.inv = InventoryMgr()
        self.config_db = self.db[self.inv.get_coll_name('monitoring_config')]
        self.provision = self.provision_levels['none']
        if self.env_monitoring_config:
            provision = self.env_monitoring_config.get('provision', 'none')
            provision = str.lower(provision)
            self.provision =\
                self.provision_levels.get(provision,
                                          self.provision_levels['none'])

    # create a directory if it does not exist
    @staticmethod
    def make_directory(directory):
        if not os.path.exists(directory):
            os.makedirs(directory)
        return directory

    def get_config_dir(self, sub_dir=''):
        config_folder = self.env_monitoring_config['config_folder'] + \
            (os.sep + sub_dir if sub_dir else '')
        return self.make_directory(config_folder).rstrip(os.sep)

    def prepare_config_file(self, file_type, base_condition):
        condition = base_condition
        condition['type'] = file_type
        sort = [('order', pymongo.ASCENDING)]
        docs = self.monitoring_config.find(condition, sort=sort)
        content = {}
        for doc in docs:
            if not self.check_env_condition(doc):
                return {}
            content.update(doc)
        self.replacements['app_path'] = \
            self.configuration.environment['app_path']
        config = self.content_replace({'config': content.get('config', {})})
        return config

    def check_env_condition(self, doc):
        if 'condition' not in doc:
            return True
        condition = doc['condition']
        if not isinstance(condition, dict):
            self.log.error('incorrect condition in monitoring ({}): '
                           'condition must be a dict'
                           .format(doc.get(doc.get('type'), '')))
            return False
        for key, required_value in condition.items():
            if not self.check_env_config(key, required_value):
                return False
        return True

    def check_env_config(self, config_name, required_config_value):
        required_config_values = required_config_value \
            if isinstance(required_config_value, list) \
            else [required_config_value]
        conf_values = self.configuration.environment.get(config_name, [])
        conf_values = conf_values if isinstance(conf_values, list) \
            else [conf_values]
        intersection = [val for val in required_config_values
                        if val in conf_values]
        return bool(intersection)

    def content_replace(self, content):
        content_remapped = remap(content, visit=self.fill_values)
        return content_remapped

    def format_string(self, val):
        formatted = val if not isinstance(val, str) or '{' not in val \
            else val.format_map(self.replacements)
        return formatted

    def fill_values(self, path, key, value):
        if not path:
            return key, value
        key_formatted = self.format_string(key)
        value_formatted = self.format_string(value)
        return key_formatted, value_formatted

    def get_config_from_db(self, host, file_type):
        find_tuple = {
            'environment': self.env,
            'host': host,
            'type': file_type
        }
        doc = self.config_db.find_one(find_tuple)
        if not doc:
            return {}
        doc.pop("_id", None)
        return self.decode_mongo_keys(doc)

    def write_config_to_db(self, host, config, file_type):
        find_tuple = {
            'environment': self.env,
            'host': host,
            'type': file_type
        }
        doc = copy.copy(find_tuple)
        doc['config'] = config
        doc = self.encode_mongo_keys(doc)
        if not doc:
            return {}
        self.config_db.update_one(find_tuple, {'$set': doc}, upsert=True)

    def merge_config(self, host, file_type, content):
        """
        merge current monitoring config of host
        with newer content.
        return the merged config
        """
        doc = self.get_config_from_db(host, file_type)
        config = remerge([doc['config'], content.get('config')]) if doc \
            else content.get('config', {})
        self.write_config_to_db(host, config, file_type)
        return config

    def write_config_file(self, file_name, sub_dir, host, content,
                          is_container=False, is_server=False):
        """
        apply environment definitions to the config,
        e.g. replace {server_ip} with the IP or host name for the server
        """
        # save the config to DB first, and while doing that
        # merge it with any existing config on same host
        content = self.merge_config(host, file_name, content)

        if self.provision == self.provision_levels['db']:
            self.log.debug('Monitoring setup kept only in DB')
            return
        # now dump the config to the file
        content_json = json.dumps(content.get('config', content),
                                  sort_keys=True, indent=4)
        content_json += '\n'
        # always write the file locally first
        local_dir = self.make_directory(os.path.join(self.get_config_dir(),
                                        sub_dir.strip(os.path.sep)))
        local_path = os.path.join(local_dir, file_name)
        self.write_to_local_host(local_path, content_json)
        self.track_setup_changes(host, is_container, file_name, local_path,
                                 sub_dir, is_server=is_server)

    def add_changes_for_all_clients(self):
        """
        to debug deployment, add simulated track changes entries.
        no need to add for server, as these are done by server_setup()
        """
        docs = self.config_db.find({'environment': self.env})
        for doc in docs:
            host = doc['host']
            sub_dir = os.path.join('host', host)
            file_name = doc['type']
            config_folder = self.env_monitoring_config['config_folder']
            local_path = os.path.join(config_folder, sub_dir, file_name)
            if host == self.env_monitoring_config['server_ip']:
                continue
            self.track_setup_changes(host, False, file_name, local_path,
                                     sub_dir)

    def get_ssh(self, host, is_container=False, for_sftp=False):
        ssh = SshConnection.get_ssh(host, for_sftp)
        if not ssh:
            conf = self.env_monitoring_config
            if is_container or host == conf['server_ip']:
                host = conf['server_ip']
                port = int(conf['ssh_port'])
                user = conf['ssh_user']
                pwd = conf['ssh_password']
                ssh = SshConnection(host, user, _pwd=pwd, _port=port,
                                    for_sftp=for_sftp)
            else:
                ssh = SshConn(host, for_sftp=for_sftp)
        return ssh

    def track_setup_changes(self, host=None, is_container=False, file_name=None,
                            local_path=None, sub_dir=None,
                            is_server=False,
                            target_mode=None,
                            target_path=PRODUCTION_CONFIG_DIR):
        if host not in self.pending_changes:
            self.pending_changes[host] = {}
        if file_name not in self.pending_changes[host]:
            self.pending_changes[host][file_name] = {
                "host": host,
                "is_container": is_container,
                "is_server": is_server,
                "file_name": file_name,
                "local_path": local_path,
                "sub_dir": sub_dir,
                "target_path": target_path,
                "target_mode": target_mode
            }

    def handle_pending_setup_changes(self):
        if self.provision < self.provision_levels['files']:
            if self.provision == self.provision_levels['db']:
                self.log.info('Monitoring config applied only in DB')
            return True
        self.log.info('applying monitoring setup')
        hosts = {}
        scripts_to_hosts = {}
        for host, host_changes in self.pending_changes.items():
            self.handle_pending_host_setup_changes(host_changes, hosts,
                                                   scripts_to_hosts)
        if self.provision < self.provision_levels['deploy']:
            return True
        if self.fetch_ssl_files:
            self.deploy_ssl_files(list(scripts_to_hosts.keys()))
        for host in scripts_to_hosts.values():
            self.deploy_scripts_to_host(host)
        for host in hosts.values():
            self.deploy_config_to_target(host)
        had_errors = ', with some error(s)' if self.had_errors else ''
        self.log.info('done applying monitoring setup{}'.format(had_errors))
        return not self.had_errors

    def handle_pending_host_setup_changes(self, host_changes, hosts,
                                          scripts_to_hosts):
        if self.provision < self.provision_levels['deploy']:
            self.log.info('Monitoring config not deployed to remote host')
        for file_type, changes in host_changes.items():
            host = changes['host']
            is_container = changes['is_container']
            is_server = changes['is_server']
            local_dir = changes['local_path']
            if local_dir == "scripts":
                scripts_to_hosts[host] = {'host': host, 'is_server': is_server}
                continue
            self.log.debug('applying monitoring setup changes ' +
                           'for host ' + host + ', file type: ' + file_type)
            is_local_host = host == self.local_host
            file_path = os.path.join(self.PRODUCTION_CONFIG_DIR, file_type)
            if not is_server and host not in hosts:
                hosts[host] = {
                    'host': host,
                    'local_dir': local_dir,
                    'is_local_host': is_local_host,
                    'is_container': is_container,
                    'is_server': is_server
                }
            if is_server:
                remote_path = self.PRODUCTION_CONFIG_DIR
                if os.path.isfile(local_dir):
                    remote_path += os.path.sep + os.path.basename(local_dir)
                try:
                    self.write_to_server(local_dir,
                                         remote_path=remote_path,
                                         is_container=is_container)
                except SshError:
                    self.had_errors = True
            elif is_local_host:
                    # write to production configuration directory on local host
                    self.make_directory(self.PRODUCTION_CONFIG_DIR)
                    shutil.copy(changes['local_path'], file_path)
            else:
                # write to remote host prepare dir - use sftp
                if self.provision < self.provision_levels['deploy']:
                    continue
                try:
                    self.write_to_remote_host(host, changes['local_path'])
                except SshError:
                    self.had_errors = True

    def prepare_scripts(self, host, is_server):
        if self.scripts_prepared_for_host.get(host, False):
            return
        gateway_host = SshConn.get_gateway_host(host)
        # copy scripts to host
        scripts_dir = os.path.join(self.env_monitoring_config['app_path'],
                                   self.APP_SCRIPTS_FOLDER)
        script_files = [f for f in os.listdir(scripts_dir)
                        if os.path.isfile(os.path.join(scripts_dir, f))]
        script_mode = stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | \
            stat.S_IROTH | stat.S_IXOTH
        target_host = host if is_server else gateway_host
        self.make_remote_dir(target_host, self.REMOTE_SCRIPTS_FOLDER)
        for file_name in script_files:
            remote_path = os.path.join(self.REMOTE_SCRIPTS_FOLDER, file_name)
            local_path = os.path.join(scripts_dir, file_name)
            if not os.path.isfile(local_path):
                continue
            if is_server:
                ssh = self.get_ssh(target_host, for_sftp=True)
                ssh.copy_file(local_path, remote_path, mode=script_mode)
            else:
                self.copy_to_remote_host(target_host, local_path, remote_path,
                                         mode=script_mode,
                                         make_remote_dir=False)
        self.scripts_prepared_for_host[host] = True

    def deploy_ssl_files(self, hosts: list):
        try:
            monitoring_server = self.env_monitoring_config['server_ip']
            gateway_host = SshConn.get_gateway_host(hosts[0])
            temp_dir = tempfile.TemporaryDirectory()
            for file_path in self.fetch_ssl_files:
                # copy SSL files from the monitoring server
                file_name = os.path.basename(file_path)
                local_path = os.path.join(temp_dir.name, file_name)
                self.get_file(monitoring_server, file_path, local_path)
                #  first copy the files to the gateway
                self.write_to_remote_host(gateway_host, local_path,
                                          remote_path=file_path)
            ssl_path = os.path.commonprefix(self.fetch_ssl_files)
            for host in hosts:
                self.copy_from_gateway_to_host(host, ssl_path, ssl_path)
        except SshError:
            self.had_errors = True

    def deploy_scripts_to_host(self, host_details):
        try:
            host = host_details['host']
            is_server = host_details['is_server']
            self.prepare_scripts(host, is_server)
            remote_path = self.REMOTE_SCRIPTS_FOLDER
            local_path = remote_path + os.path.sep + '*.py'
            if is_server:
                return  # this was done earlier
            self.copy_from_gateway_to_host(host, local_path, remote_path)
        except SshError:
            self.had_errors = True

    def restart_service(self, host: str = None,
                        service: str = 'sensu-client',
                        is_server: bool = False,
                        msg: str =None):
        ssh = self.get_ssh(host)
        cmd = 'sudo service {} restart'.format(service)
        log_msg = msg if msg else 'deploying config to host {}'.format(host)
        self.log.info(log_msg)
        try:
            if is_server:
                ssh.exec(cmd)
            else:
                self.run(cmd, ssh_to_host=host, ssh=ssh)
        except SshError as e:
            if 'Error: Redirecting to /bin/systemctl restart' not in str(e):
                self.had_errors = True

    def deploy_config_to_target(self, host_details):
        try:
            host = host_details['host']
            is_local_host = host_details['is_local_host']
            is_container = host_details['is_container']
            is_server = host_details['is_server']
            local_dir = host_details['local_dir']
            if is_container or is_server or not is_local_host:
                local_dir = os.path.dirname(local_dir)
                if not is_server:
                    self.move_setup_files_to_remote_host(host, local_dir)
                # restart the Sensu client on the remote host,
                # so it takes the new setup
                self.restart_service(host)
        except SshError:
            self.had_errors = True

    def run_cmd_locally(self, cmd):
        try:
            subprocess.popen(cmd.split(),
                             shell=True,
                             stdin=subprocess.PIPE,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
        except subprocess.CalledProcessError as e:
            print("Error running command: " + cmd +
                  ", output: " + self.binary2str(e.output) + "\n")

    def move_setup_files_to_remote_host(self, host, local_dir):
        if self.provision < self.provision_levels['deploy']:
            self.log.info('Monitoring config not written to remote host')
            return
        # need to scp the files from the gateway host to the target host
        remote_path = self.PRODUCTION_CONFIG_DIR
        self.copy_from_gateway_to_host(host, local_dir, remote_path)

    def copy_from_gateway_to_host(self, host, local_dir, remote_path):
        ssh = self.get_ssh(host)
        what_to_copy = local_dir if '*' in local_dir else local_dir + '/*'
        if ssh.is_gateway_host(host):
            # on gateway host, perform a simple copy
            # make sure the source and destination are not the same
            local_dir_base = local_dir[:local_dir.rindex('/*')] \
                if '/*' in local_dir else local_dir
            if local_dir_base.strip('/*') == remote_path.strip('/*'):
                return  # same directory - nothing to do
            self.make_remote_dir(host, remote_path)
            cmd = 'cp {} {}'.format(what_to_copy, remote_path)
            self.run(cmd, ssh=ssh)
            return
        self.make_remote_dir(host, remote_path)
        remote_path = ssh.get_user() + '@' + host + ':' + \
            remote_path + os.sep
        self.run_on_gateway('scp {} {}'.format(what_to_copy, remote_path),
                            enable_cache=False,
                            use_sudo=None)

    def make_remote_dir_on_host(self, ssh, host, path, path_is_file=False):
        # make sure we have write permissions in target directories
        dir_path = path
        if path_is_file:
            dir_path = os.path.dirname(dir_path)
        cmd = 'sudo mkdir -p ' + dir_path
        try:
            self.run(cmd, ssh_to_host=host, ssh=ssh)
        except timeout:
            self.log.error('timed out trying to create directory {} on host {}'
                           .format(dir_path, host))
            return
        cmd = 'sudo chown -R ' + ssh.get_user() + ' ' + dir_path
        self.run(cmd, ssh_to_host=host, ssh=ssh)

    def make_remote_dir(self, host, path, path_is_file=False):
        ssh = self.get_ssh(host, for_sftp=True)
        self.make_remote_dir_on_host(ssh, host, path, path_is_file)

    def copy_to_remote_host(self, host, local_path, remote_path, mode=None,
                            make_remote_dir=True):
        # copy the local file to the preparation folder for the remote host
        # on the gateway host
        ssh = self.get_ssh(host)
        gateway_host = ssh.get_gateway_host(host)
        if make_remote_dir:
            self.make_remote_dir(gateway_host, remote_path, path_is_file=True)
        ftp_ssh = self.get_ssh(gateway_host, for_sftp=True)
        ftp_ssh.copy_file(local_path, remote_path, mode)

    def write_to_remote_host(self, host, local_path=None, remote_path=None):
        remote_path = remote_path if remote_path else local_path
        self.copy_to_remote_host(host, local_path, remote_path)

    def write_to_server(self, local_path, remote_path=None, is_container=False):
        host = self.env_monitoring_config['server_ip']
        ssh = self.get_ssh(host, is_container=is_container)
        remote_path = remote_path if remote_path else local_path
        self.make_remote_dir_on_host(ssh, host, remote_path, True)
        # copy to config dir first
        ftp_ssh = self.get_ssh(host, is_container=is_container, for_sftp=True)
        ftp_ssh.copy_file(local_path, remote_path)

    @staticmethod
    def write_to_local_host(file_path, content):
        f = open(file_path, "w")
        f.write(content)
        f.close()
        return file_path

    def get_file(self, host, remote_path, local_path):
        ftp_ssh = self.get_ssh(host, for_sftp=True)
        ftp_ssh.copy_file_from_remote(remote_path, local_path)
Exemple #15
0
    def run(self, args: dict = None):
        args = setup_args(args, self.DEFAULTS, self.get_args)
        # After this setup we assume args dictionary has all keys
        # defined in self.DEFAULTS
        self.log.set_loglevel(args['loglevel'])

        try:
            MongoAccess.set_config_file(args['mongo_config'])
            self.inv = InventoryMgr()
            self.inv.log.set_loglevel(args['loglevel'])
            self.inv.set_collections(args['inventory'])
            self.conf = Configuration()
        except FileNotFoundError as e:
            return False, 'Mongo configuration file not found: {}'\
                .format(str(e))

        scan_plan = self.get_scan_plan(args)
        if scan_plan.clear or scan_plan.clear_all:
            self.inv.clear(scan_plan)
        self.conf.log.set_loglevel(scan_plan.loglevel)

        env_name = scan_plan.env
        self.conf.use_env(env_name)

        # generate ScanObject Class and instance.
        scanner = Scanner()
        scanner.log.set_loglevel(args['loglevel'])
        scanner.set_env(env_name)
        scanner.found_errors[env_name] = False

        # decide what scanning operations to do
        inventory_only = scan_plan.inventory_only
        links_only = scan_plan.links_only
        cliques_only = scan_plan.cliques_only
        monitoring_setup_only = scan_plan.monitoring_setup_only
        run_all = False if inventory_only or links_only or cliques_only \
            or monitoring_setup_only else True

        # setup monitoring server
        monitoring = \
            self.inv.is_feature_supported(env_name,
                                          EnvironmentFeatures.MONITORING)
        if monitoring:
            self.inv.monitoring_setup_manager = \
                MonitoringSetupManager(env_name)
            self.inv.monitoring_setup_manager.server_setup()

        # do the actual scanning
        try:
            if inventory_only or run_all:
                scanner.run_scan(scan_plan.scanner_type, scan_plan.obj,
                                 scan_plan.id_field, scan_plan.child_id,
                                 scan_plan.child_type)
            if links_only or run_all:
                scanner.scan_links()
            if cliques_only or run_all:
                scanner.scan_cliques()
            if monitoring:
                if monitoring_setup_only:
                    self.inv.monitoring_setup_manager.simulate_track_changes()
                if not (inventory_only or links_only or cliques_only):
                    scanner.deploy_monitoring_setup()
        except ScanError as e:
            return False, "scan error: " + str(e)
        SshConnection.disconnect_all()
        status = 'ok' if not scanner.found_errors.get(env_name, False) \
            else 'errors detected'
        if status == 'ok' and scan_plan.object_type == "environment":
            self.mark_env_scanned(scan_plan.env)
        self.log.info('Scan completed, status: {}'.format(status))
        return True, status
Exemple #16
0
class ScanController(Fetcher):
    DEFAULTS = {
        "env": "",
        "mongo_config": "",
        "type": "",
        "inventory": "inventory",
        "scan_self": False,
        "parent_id": "",
        "parent_type": "",
        "id_field": "id",
        "loglevel": "INFO",
        "inventory_only": False,
        "links_only": False,
        "cliques_only": False,
        "monitoring_setup_only": False,
        "clear": False,
        "clear_all": False
    }

    def __init__(self):
        super().__init__()
        self.conf = None
        self.inv = None

    def get_args(self):
        # try to read scan plan from command line parameters
        parser = argparse.ArgumentParser()
        parser.add_argument("-m",
                            "--mongo_config",
                            nargs="?",
                            type=str,
                            default=self.DEFAULTS["mongo_config"],
                            help="name of config file " +
                            "with MongoDB server access details")
        parser.add_argument("-e",
                            "--env",
                            nargs="?",
                            type=str,
                            default=self.DEFAULTS["env"],
                            help="name of environment to scan \n"
                            "(default: " + self.DEFAULTS["env"] + ")")
        parser.add_argument("-t",
                            "--type",
                            nargs="?",
                            type=str,
                            default=self.DEFAULTS["type"],
                            help="type of object to scan \n"
                            "(default: environment)")
        parser.add_argument("-y",
                            "--inventory",
                            nargs="?",
                            type=str,
                            default=self.DEFAULTS["inventory"],
                            help="name of inventory collection \n"
                            "(default: 'inventory')")
        parser.add_argument("-s",
                            "--scan_self",
                            action="store_true",
                            help="scan changes to a specific object \n"
                            "(default: False)")
        parser.add_argument("-i",
                            "--id",
                            nargs="?",
                            type=str,
                            default=self.DEFAULTS["env"],
                            help="ID of object to scan (when scan_self=true)")
        parser.add_argument("-p",
                            "--parent_id",
                            nargs="?",
                            type=str,
                            default=self.DEFAULTS["parent_id"],
                            help="ID of parent object (when scan_self=true)")
        parser.add_argument("-a",
                            "--parent_type",
                            nargs="?",
                            type=str,
                            default=self.DEFAULTS["parent_type"],
                            help="type of parent object (when scan_self=true)")
        parser.add_argument("-f",
                            "--id_field",
                            nargs="?",
                            type=str,
                            default=self.DEFAULTS["id_field"],
                            help="name of ID field (when scan_self=true) \n"
                            "(default: 'id', use 'name' for projects)")
        parser.add_argument("-l",
                            "--loglevel",
                            nargs="?",
                            type=str,
                            default=self.DEFAULTS["loglevel"],
                            help="logging level \n(default: '{}')".format(
                                self.DEFAULTS["loglevel"]))
        parser.add_argument("--clear",
                            action="store_true",
                            help="clear all data related to "
                            "the specified environment prior to scanning\n"
                            "(default: False)")
        parser.add_argument("--clear_all",
                            action="store_true",
                            help="clear all data prior to scanning\n"
                            "(default: False)")
        parser.add_argument("--monitoring_setup_only",
                            action="store_true",
                            help="do only monitoring setup deployment \n"
                            "(default: False)")

        # At most one of these arguments may be present
        scan_only_group = parser.add_mutually_exclusive_group()
        scan_only_group.add_argument("--inventory_only",
                                     action="store_true",
                                     help="do only scan to inventory\n" +
                                     "(default: False)")
        scan_only_group.add_argument("--links_only",
                                     action="store_true",
                                     help="do only links creation \n" +
                                     "(default: False)")
        scan_only_group.add_argument("--cliques_only",
                                     action="store_true",
                                     help="do only cliques creation \n" +
                                     "(default: False)")

        return parser.parse_args()

    def get_scan_plan(self, args):
        # PyCharm type checker can't reliably check types of document
        # noinspection PyTypeChecker
        return self.prepare_scan_plan(ScanPlan(args))

    def prepare_scan_plan(self, plan):
        # Find out object type if not specified in arguments
        if not plan.object_type:
            if not plan.object_id:
                plan.object_type = "environment"
            else:
                # If we scan a specific object, it has to exist in db
                scanned_object = self.inv.get_by_id(plan.env, plan.object_id)
                if not scanned_object:
                    exc_msg = "No object found with specified id: '{}'" \
                        .format(plan.object_id)
                    raise ScanArgumentsError(exc_msg)
                plan.object_type = scanned_object["type"]
                plan.parent_id = scanned_object["parent_id"]
                plan.type_to_scan = scanned_object["parent_type"]

        class_module = plan.object_type
        if not plan.scan_self:
            plan.scan_self = plan.object_type != "environment"

        plan.object_type = plan.object_type.title().replace("_", "")

        if not plan.scan_self:
            plan.child_type = None
        else:
            plan.child_id = plan.object_id
            plan.object_id = plan.parent_id
            if plan.type_to_scan.endswith("_folder"):
                class_module = plan.child_type + "s_root"
            else:
                class_module = plan.type_to_scan
            plan.object_type = class_module.title().replace("_", "")

        if class_module == "environment":
            plan.obj = {"id": plan.env}
        else:
            # fetch object from inventory
            obj = self.inv.get_by_id(plan.env, plan.object_id)
            if not obj:
                raise ValueError("No match for object ID: {}".format(
                    plan.object_id))
            plan.obj = obj

        plan.scanner_type = "Scan" + plan.object_type
        return plan

    def run(self, args: dict = None):
        args = setup_args(args, self.DEFAULTS, self.get_args)
        # After this setup we assume args dictionary has all keys
        # defined in self.DEFAULTS
        self.log.set_loglevel(args['loglevel'])

        try:
            MongoAccess.set_config_file(args['mongo_config'])
            self.inv = InventoryMgr()
            self.inv.log.set_loglevel(args['loglevel'])
            self.inv.set_collections(args['inventory'])
            self.conf = Configuration()
        except FileNotFoundError as e:
            return False, 'Mongo configuration file not found: {}'\
                .format(str(e))

        scan_plan = self.get_scan_plan(args)
        if scan_plan.clear or scan_plan.clear_all:
            self.inv.clear(scan_plan)
        self.conf.log.set_loglevel(scan_plan.loglevel)

        env_name = scan_plan.env
        self.conf.use_env(env_name)

        # generate ScanObject Class and instance.
        scanner = Scanner()
        scanner.log.set_loglevel(args['loglevel'])
        scanner.set_env(env_name)
        scanner.found_errors[env_name] = False

        # decide what scanning operations to do
        inventory_only = scan_plan.inventory_only
        links_only = scan_plan.links_only
        cliques_only = scan_plan.cliques_only
        monitoring_setup_only = scan_plan.monitoring_setup_only
        run_all = False if inventory_only or links_only or cliques_only \
            or monitoring_setup_only else True

        # setup monitoring server
        monitoring = \
            self.inv.is_feature_supported(env_name,
                                          EnvironmentFeatures.MONITORING)
        if monitoring:
            self.inv.monitoring_setup_manager = \
                MonitoringSetupManager(env_name)
            self.inv.monitoring_setup_manager.server_setup()

        # do the actual scanning
        try:
            if inventory_only or run_all:
                scanner.run_scan(scan_plan.scanner_type, scan_plan.obj,
                                 scan_plan.id_field, scan_plan.child_id,
                                 scan_plan.child_type)
            if links_only or run_all:
                scanner.scan_links()
            if cliques_only or run_all:
                scanner.scan_cliques()
            if monitoring:
                if monitoring_setup_only:
                    self.inv.monitoring_setup_manager.simulate_track_changes()
                if not (inventory_only or links_only or cliques_only):
                    scanner.deploy_monitoring_setup()
        except ScanError as e:
            return False, "scan error: " + str(e)
        SshConnection.disconnect_all()
        status = 'ok' if not scanner.found_errors.get(env_name, False) \
            else 'errors detected'
        if status == 'ok' and scan_plan.object_type == "environment":
            self.mark_env_scanned(scan_plan.env)
        self.log.info('Scan completed, status: {}'.format(status))
        return True, status

    def mark_env_scanned(self, env):
        environments_collection = self.inv.collection['environments_config']
        environments_collection \
            .update_one(filter={'name': env},
                        update={'$set': {'scanned': True}})
Exemple #17
0
class Scanner(Fetcher):

    config = None
    environment = None
    env = None
    root_patern = None
    scan_queue = queue.Queue()
    scan_queue_track = {}

    # keep errors indication per environment
    found_errors = {}

    def __init__(self):
        """
        Scanner is the base class for scanners.
        """
        super().__init__()
        self.config = Configuration()
        self.inv = InventoryMgr()
        self.scanners_package = None
        self.scanners = {}
        self.link_finders = []
        self.load_scanners_metadata()
        self.load_link_finders_metadata()

    def scan(self,
             scanner_type,
             obj,
             id_field="id",
             limit_to_child_id=None,
             limit_to_child_type=None):
        types_to_fetch = self.get_scanner(scanner_type)
        types_children = []
        if not limit_to_child_type:
            limit_to_child_type = []
        elif isinstance(limit_to_child_type, str):
            limit_to_child_type = [limit_to_child_type]
        try:
            for t in types_to_fetch:
                if limit_to_child_type and t["type"] not in limit_to_child_type:
                    continue
                children = self.scan_type(t, obj, id_field)
                if limit_to_child_id:
                    children = [
                        c for c in children if c[id_field] == limit_to_child_id
                    ]
                    if not children:
                        continue
                types_children.append({
                    "type": t["type"],
                    "children": children
                })
        except ValueError:
            return False
        except SshError:
            # mark the error
            self.found_errors[self.get_env()] = True
        if limit_to_child_id and len(types_children) > 0:
            t = types_children[0]
            children = t["children"]
            return children[0]
        return obj

    def check_type_env(self, type_to_fetch):
        # check if type is to be run in this environment
        basic_cond = {'environment_type': self.ENV_TYPE_OPENSTACK}
        env_cond = type_to_fetch.get("environment_condition", {}) \
            if "environment_condition" in type_to_fetch \
            else basic_cond
        if not env_cond:
            env_cond = basic_cond
        if 'environment_type' not in env_cond.keys():
            env_cond.update(basic_cond)
        if not isinstance(env_cond, dict):
            self.log.warn('Illegal environment_condition given '
                          'for type {type}'.format(type=type_to_fetch['type']))
            return True
        conf = self.config.get_env_config()
        if 'environment_type' not in conf:
            conf.update(basic_cond)
        for attr, required_val in env_cond.items():
            if attr == "mechanism_drivers":
                if "mechanism_drivers" not in conf:
                    self.log.warn('Illegal environment configuration: '
                                  'missing mechanism_drivers')
                    return False
                if not isinstance(required_val, list):
                    required_val = [required_val]
                value_ok = bool(
                    set(required_val) & set(conf["mechanism_drivers"]))
                if not value_ok:
                    return False
            elif attr not in conf:
                return False
            else:
                if isinstance(required_val, list):
                    if conf[attr] not in required_val:
                        return False
                else:
                    if conf[attr] != required_val:
                        return False
        # no check failed
        return True

    def scan_type(self, type_to_fetch, parent, id_field):
        # check if type is to be run in this environment
        if not self.check_type_env(type_to_fetch):
            return []

        if not parent:
            obj_id = None
        else:
            obj_id = str(parent[id_field])
            if not obj_id or not obj_id.rstrip():
                raise ValueError("Object missing " + id_field + " attribute")

        # get Fetcher instance
        fetcher = type_to_fetch["fetcher"]
        if not isinstance(fetcher, Fetcher):
            type_to_fetch['fetcher'] = fetcher()  # make it an instance
            fetcher = type_to_fetch["fetcher"]
        fetcher.setup(env=self.get_env(), origin=self.origin)

        # get children_scanner instance
        children_scanner = type_to_fetch.get("children_scanner")

        escaped_id = fetcher.escape(str(obj_id)) if obj_id else obj_id
        self.log.info("Scanning: type={type}, "
                      "parent: (type={parent_type}, "
                      "name={parent_name}, "
                      "id={parent_id})".format(
                          type=type_to_fetch["type"],
                          parent_type=parent.get('type', 'environment'),
                          parent_name=parent.get('name', ''),
                          parent_id=escaped_id))

        # fetch OpenStack data from environment by CLI, API or MySQL
        # or physical devices data from ACI API
        # It depends on the Fetcher's config.
        try:
            db_results = fetcher.get(escaped_id)
        except SshError:
            self.found_errors[self.get_env()] = True
            return []
        except Exception as e:
            self.log.error(
                "Error while scanning: fetcher={fetcher}, type={type}, "
                "parent: (type={parent_type}, name={parent_name}, "
                "id={parent_id}), "
                "error: {error}".format(
                    fetcher=fetcher.__class__.__name__,
                    type=type_to_fetch["type"],
                    parent_type="environment"
                    if "type" not in parent else parent["type"],
                    parent_name="" if "name" not in parent else parent["name"],
                    parent_id=escaped_id,
                    error=e))

            traceback.print_exc()
            raise ScanError(str(e))

        # format results
        if isinstance(db_results, dict):
            results = db_results["rows"] if db_results["rows"] else [
                db_results
            ]
        elif isinstance(db_results, str):
            results = json.loads(db_results)
        else:
            results = db_results

        # get child_id_field
        try:
            child_id_field = type_to_fetch["object_id_to_use_in_child"]
        except KeyError:
            child_id_field = "id"

        environment = self.get_env()
        children = []

        for o in results:
            saved = self.inv.save_inventory_object(o,
                                                   parent=parent,
                                                   environment=environment,
                                                   type_to_fetch=type_to_fetch)

            if saved:
                # add objects into children list.
                children.append(o)

                # put children scanner into queue
                if children_scanner:
                    self.queue_for_scan(o, child_id_field, children_scanner)
        return children

    # scanning queued items, rather than going depth-first (DFS)
    # this is done to allow collecting all required data for objects
    # before continuing to next level
    # for example, get host ID from API os-hypervisors call, so later
    # we can use this ID in the "os-hypervisors/<ID>/servers" call
    @staticmethod
    def queue_for_scan(o, child_id_field, children_scanner):
        if o["id"] in Scanner.scan_queue_track:
            return
        Scanner.scan_queue_track[o["type"] + ";" + o["id"]] = 1
        Scanner.scan_queue.put({
            "object": o,
            "child_id_field": child_id_field,
            "scanner": children_scanner
        })

    def run_scan(self, scanner_type, obj, id_field, child_id, child_type):
        results = self.scan(scanner_type, obj, id_field, child_id, child_type)

        # run children scanner from queue.
        self.scan_from_queue()
        return results

    def scan_from_queue(self):
        while not Scanner.scan_queue.empty():
            item = Scanner.scan_queue.get()
            scanner_type = item["scanner"]

            # scan the queued item
            self.scan(scanner_type, item["object"], item["child_id_field"])
        self.log.info("Scan complete")

    def scan_links(self):
        self.log.info("Scanning for links")
        for fetcher in self.link_finders:
            fetcher.setup(env=self.get_env(), origin=self.origin)
            fetcher.add_links()

    def scan_cliques(self):
        clique_scanner = CliqueFinder()
        clique_scanner.setup(env=self.get_env(), origin=self.origin)
        clique_scanner.find_cliques()

    def deploy_monitoring_setup(self):
        ret = self.inv.monitoring_setup_manager.handle_pending_setup_changes()
        if not ret:
            self.found_errors[self.get_env()] = True

    def get_run_app_path(self):
        conf = self.config.get_env_config()
        run_app_path = conf.get('run_app_path', '')
        if not run_app_path:
            run_app_path = conf.get('app_path', '/etc/calipso')
        return run_app_path

    def load_scanners_metadata(self):
        parser = ScanMetadataParser(self.inv)
        scanners_file = os.path.join(self.get_run_app_path(), 'config',
                                     ScanMetadataParser.SCANNERS_FILE)

        metadata = parser.parse_metadata_file(scanners_file)
        self.scanners_package = metadata[ScanMetadataParser.SCANNERS_PACKAGE]
        self.scanners = metadata[ScanMetadataParser.SCANNERS]

    def load_link_finders_metadata(self):
        parser = FindLinksMetadataParser()
        finders_file = os.path.join(self.get_run_app_path(), 'config',
                                    FindLinksMetadataParser.FINDERS_FILE)
        metadata = parser.parse_metadata_file(finders_file)
        self.link_finders = metadata[FindLinksMetadataParser.LINK_FINDERS]

    def get_scanner_package(self):
        return self.scanners_package

    def get_scanner(self, scanner_type: str) -> dict:
        return self.scanners.get(scanner_type)
Exemple #18
0
    def listen(args: dict = None):

        args = setup_args(args, DefaultListener.DEFAULTS, get_args)
        if 'process_vars' not in args:
            args['process_vars'] = {}

        env_name = args["env"]
        inventory_collection = args["inventory"]

        MongoAccess.set_config_file(args["mongo_config"])
        inv = InventoryMgr()
        inv.set_collections(inventory_collection)
        conf = Configuration(args["environments_collection"])
        conf.use_env(env_name)

        event_handler = EventHandler(env_name, inventory_collection)
        event_queues = []

        env_config = conf.get_env_config()
        common_metadata_file = os.path.join(env_config.get('app_path', '/etc/calipso'),
                                            'config',
                                            DefaultListener.COMMON_METADATA_FILE)

        # import common metadata
        import_metadata(event_handler, event_queues, common_metadata_file)

        # import custom metadata if supplied
        if args["metadata_file"]:
            import_metadata(event_handler, event_queues, args["metadata_file"])

        logger = FullLogger()
        logger.set_loglevel(args["loglevel"])

        amqp_config = conf.get("AMQP")
        connect_url = 'amqp://{user}:{pwd}@{host}:{port}//' \
            .format(user=amqp_config["user"],
                    pwd=amqp_config["pwd"],
                    host=amqp_config["host"],
                    port=amqp_config["port"])

        with Connection(connect_url) as conn:
            try:
                print(conn)
                conn.connect()
                args['process_vars']['operational'] = OperationalStatus.RUNNING
                terminator = SignalHandler()
                worker = \
                    DefaultListener(connection=conn,
                                    event_handler=event_handler,
                                    event_queues=event_queues,
                                    retry_limit=args["retry_limit"],
                                    consume_all=args["consume_all"],
                                    inventory_collection=inventory_collection,
                                    env_name=env_name)
                worker.run()
                if terminator.terminated:
                    args.get('process_vars', {})['operational'] = \
                        OperationalStatus.STOPPED
            except KeyboardInterrupt:
                print('Stopped')
                args['process_vars']['operational'] = OperationalStatus.STOPPED
            except Exception as e:
                logger.log.exception(e)
                args['process_vars']['operational'] = OperationalStatus.ERROR
            finally:
                # This should enable safe saving of shared variables
                time.sleep(0.1)
Exemple #19
0
 def set_env(self, env):
     self.env = env
     self.log.set_env(env)
     self.configuration = Configuration()
Exemple #20
0
class Monitor:
    DEFAULTS = {
        'env': 'WebEX-Mirantis@Cisco',
        'inventory': 'inventory',
        'loglevel': 'WARNING'
    }

    def __init__(self):
        self.args = self.get_args()
        MongoAccess.set_config_file(self.args.mongo_config)
        self.inv = InventoryMgr()
        self.inv.set_collections(self.args.inventory)
        self.configuration = Configuration()
        self.input_text = None
        self.converter = SpecialCharConverter()

    def get_args(self):
        parser = argparse.ArgumentParser()
        parser.add_argument("-m",
                            "--mongo_config",
                            nargs="?",
                            type=str,
                            default="",
                            help="name of config file with MongoDB server " +
                            "access details")
        parser.add_argument("-e",
                            "--env",
                            nargs="?",
                            type=str,
                            default=self.DEFAULTS['env'],
                            help="name of environment to scan \n" +
                            "(default: {})".format(self.DEFAULTS['env']))
        parser.add_argument("-y",
                            "--inventory",
                            nargs="?",
                            type=str,
                            default=self.DEFAULTS['inventory'],
                            help="name of inventory collection \n" +
                            "(default: {}".format(self.DEFAULTS['inventory']))
        parser.add_argument('-i',
                            '--inputfile',
                            nargs='?',
                            type=str,
                            default='',
                            help="read input from the specifed file \n" +
                            "(default: from stdin)")
        parser.add_argument("-l",
                            "--loglevel",
                            nargs="?",
                            type=str,
                            default=self.DEFAULTS["loglevel"],
                            help="logging level \n(default: '{}')".format(
                                self.DEFAULTS["loglevel"]))
        args = parser.parse_args()
        return args

    def get_type_list(self, type_name) -> list:
        types_list = []
        docs = self.inv.find_items({'name': type_name}, collection='constants')
        for types_list in docs:
            types_list = [t['value'] for t in types_list['data']]
        if not types_list:
            raise ValueError('Unable to fetch {}'.format(
                type_name.replace('_', ' ')))
        return types_list

    def match_object_types(self, check_name: str) -> list:
        object_types = self.get_type_list('object_types')
        matches = [t for t in object_types if check_name.startswith(t + '_')]
        return matches

    def match_link_types(self, check_name: str) -> list:
        object_types = self.get_type_list('link_types')
        matches = [
            t for t in object_types if check_name.startswith('link_' + t + '_')
        ]
        return matches

    def find_object_type_and_id(self, check_name: str):
        # if we have multiple matching host types, then take the longest
        # of these. For example, if matches are ['host', 'host_pnic'],
        # then take 'host_pnic'.
        # To facilitate this, we sort the matches by reverse order.
        is_link_check = check_name.startswith('link_')
        check_type = 'link' if is_link_check else 'object'
        if is_link_check:
            matching_types = sorted(self.match_link_types(check_name),
                                    reverse=True)
        else:
            matching_types = sorted(self.match_object_types(check_name),
                                    reverse=True)
        if not matching_types:
            raise ValueError(
                'Unable to match check name "{}" with {} type'.format(
                    check_name, check_type))
        obj_type = matching_types[0]
        postfix_len = len('link_') if is_link_check else 0
        obj_id = (obj_type + '_' if is_link_check else '') + \
            check_name[len(obj_type)+1+postfix_len:]
        return check_type, obj_type, obj_id

    def read_input(self):
        if self.args.inputfile:
            try:
                with open(self.args.inputfile, 'r') as input_file:
                    self.input_text = input_file.read()
            except Exception as e:
                raise FileNotFoundError(
                    "failed to open input file {}: {}".format(
                        self.args.inputfile, str(e)))
        else:
            self.input_text = sys.stdin.read()
            if not self.input_text:
                raise ValueError("No input provided on stdin")

    def get_handler_by_type(self, check_type, obj_type):
        module_name = 'handle_link' if check_type == 'link' \
                else 'handle_' + obj_type
        package = 'monitoring.handlers'
        handler = ClassResolver.get_instance_single_arg(
            self.args, module_name=module_name, package_name=package)
        return handler

    def get_handler(self, check_type, obj_type):
        basic_handling_types = ['instance', 'vedge', 'vservice', 'vconnector']
        if obj_type not in basic_handling_types:
            return self.get_handler_by_type(check_type, obj_type)
        from monitoring.handlers.basic_check_handler \
            import BasicCheckHandler
        return BasicCheckHandler(self.args)

    def check_link_interdependency_for(self,
                                       object_id: str,
                                       from_type: str = None,
                                       to_type: str = None):
        if from_type is not None and to_type is not None or \
                from_type is None and to_type is None:
            raise ValueError('check_link_interdependency: '
                             'supply one of from_type/to_type')
        obj_id = self.converter.decode_special_characters(object_id)
        obj = self.inv.get_by_id(environment=self.args.env, item_id=obj_id)
        if not obj:
            self.inv.log.error(
                'check_link_interdependency: '
                'failed to find object with ID: {}'.format(object_id))
            return
        if 'status' not in obj:
            return
        id_attr = 'source_id' if from_type is None else 'target_id'
        link_type = '{}-{}'.format(
            from_type if from_type is not None else obj['type'],
            to_type if to_type is not None else obj['type'])
        condition = {
            'environment': self.args.env,
            'link_type': link_type,
            id_attr: obj_id
        }
        link = self.inv.find_one(search=condition, collection='links')
        if not link:
            self.inv.log.error('check_link_interdependency: '
                               'failed to find {} link with {}: {}'.format(
                                   link_type, id_attr, obj_id))
            return
        other_id_attr = '{}_id' \
            .format('source' if from_type is not None else 'target')
        other_obj = self.inv.get_by_id(environment=self.args.env,
                                       item_id=link[other_id_attr])
        if not other_obj:
            self.inv.log.error(
                'check_link_interdependency: '
                'failed to find {} with ID: {} (link type: {})'.format(
                    other_id_attr, link[other_id_attr], link_type))
            return
        if 'status' not in other_obj:
            return
        status = 'Warning'
        if obj['status'] == 'OK' and other_obj['status'] == 'OK':
            status = 'OK'
        elif obj['status'] == 'OK' and other_obj['status'] == 'OK':
            status = 'OK'
        link['status'] = status
        time_format = MonitoringCheckHandler.TIME_FORMAT
        timestamp1 = obj['status_timestamp']
        t1 = datetime.datetime.strptime(timestamp1, time_format)
        timestamp2 = other_obj['status_timestamp']
        t2 = datetime.datetime.strptime(timestamp2, time_format)
        timestamp = max(t1, t2)
        link['status_timestamp'] = datetime.datetime.strftime(
            timestamp, time_format)
        self.inv.set(link, self.inv.collections['links'])

    def check_link_interdependency(self, object_id: str, object_type: str):
        conf = self.configuration.get_env_config()
        if 'OVS' in conf['mechanism_drivers']:
            if object_type == 'vedge':
                self.check_link_interdependency_for(object_id,
                                                    to_type='host_pnic')
            if object_type == 'host_pnic':
                self.check_link_interdependency_for(object_id,
                                                    from_type='vedge')

    def process_input(self):
        check_result_full = json.loads(self.input_text)
        check_client = check_result_full['client']
        check_result = check_result_full['check']
        check_result['id'] = check_result_full['id']
        name = check_result['name']
        check_type, object_type, object_id = \
            monitor.find_object_type_and_id(name)
        if 'environment' in check_client:
            self.args.env = check_client['environment']
        else:
            raise ValueError('Check client should contain environment name')
        self.configuration.use_env(self.args.env)

        check_handler = self.get_handler(check_type, object_type)
        if check_handler:
            check_handler.handle(object_id, check_result)
        self.check_link_interdependency(object_id, object_type)

    def process_check_result(self):
        self.read_input()
        self.process_input()
Exemple #21
0
class AciAccess(Fetcher):

    RESPONSE_FORMAT = "json"
    cookie_token = None

    def __init__(self):
        super().__init__()
        self.configuration = Configuration()
        self.aci_enabled = self.configuration.get_env_config() \
            .get('aci_enabled', False)
        self.aci_configuration = None
        self.host = None
        if self.aci_enabled:
            self.aci_configuration = self.configuration.get("ACI")
            self.host = self.aci_configuration["host"]

    def get_base_url(self):
        return "https://{}/api".format(self.host)

    # Unwrap ACI response payload
    # and return an array of desired fields' values.
    #
    # Parameters
    # ----------
    #
    # payload: dict
    #       Full json response payload returned by ACI
    # *field_names: Tuple[str]
    #       Enumeration of fields that are used to traverse ACI "imdata" array
    #       (order is important)
    #
    # Returns
    # ----------
    # list
    #       List of unwrapped dictionaries (or primitives)
    #
    # Example
    # ----------
    # Given payload:
    #
    #   {
    #       "totalCount": "2",
    #       "imdata": [
    #           {
    #               "aaa": {
    #                   "bbb": {
    #                       "ccc": "value1"
    #                   }
    #               }
    #           },
    #           {
    #               "aaa": {
    #                   "bbb": {
    #                       "ccc": "value2"
    #                   }
    #               }
    #           }
    #       ]
    #   }
    #
    #   Executing get_objects_by_field_names(payload, "aaa", "bbb")
    #   will yield the following result:
    #
    #   >>> [{"ccc": "value1"}, {"ccc": "value2"}]
    #
    #   Executing get_objects_by_field_names(payload, "aaa", "bbb", "ccc")
    #   will yield the following result:
    #
    #   >>> ["value1", "value2"]
    #
    @staticmethod
    def get_objects_by_field_names(payload, *field_names):
        results = payload.get("imdata", [])
        if not results:
            return []

        for field in field_names:
            results = [entry[field] for entry in results]
        return results

    # Set auth tokens in request headers and cookies
    @staticmethod
    def _insert_token_into_request(cookies):
        return dict(cookies, **AciAccess.cookie_token) \
            if cookies \
            else AciAccess.cookie_token

    @staticmethod
    def _set_token(response):
        tokens = AciAccess.get_objects_by_field_names(response.json(), "aaaLogin", "attributes", "token")
        token = tokens[0]

        AciAccess.cookie_token = {"APIC-Cookie": token}

    @aci_config_required()
    def login(self):
        url = "/".join((self.get_base_url(), "aaaLogin.json"))
        payload = {
            "aaaUser": {
                "attributes": {
                    "name": self.aci_configuration["user"],
                    "pwd": self.aci_configuration["pwd"]
                }
            }
        }

        response = requests.post(url, json=payload, verify=False)
        response.raise_for_status()

        AciAccess._set_token(response)

    # Refresh token or login if token has expired
    @aci_config_required()
    def refresh_token(self):
        # First time login
        if not AciAccess.cookie_token:
            self.login()
            return

        url = "/".join((self.get_base_url(), "aaaRefresh.json"))

        response = requests.get(url, verify=False)

        # Login again if the token has expired
        if response.status_code == requests.codes.forbidden:
            self.login()
            return
        # Propagate any other error
        elif response.status_code != requests.codes.ok:
            response.raise_for_status()

        AciAccess._set_token(response)

    @aci_config_required(default={})
    def send_get(self, url, params, headers, cookies):
        self.refresh_token()

        cookies = self._insert_token_into_request(cookies)

        response = requests.get(url, params=params, headers=headers,
                                cookies=cookies, verify=False)
        # Let client handle HTTP errors
        response.raise_for_status()

        return response.json()

    # Search ACI for Managed Objects (MOs) of a specific class
    @aci_config_required(default=[])
    def fetch_objects_by_class(self,
                               class_name: str,
                               params: dict = None,
                               headers: dict = None,
                               cookies: dict = None,
                               response_format: str = RESPONSE_FORMAT):
        url = "/".join((self.get_base_url(),
                        "class", "{cn}.{f}".format(cn=class_name, f=response_format)))

        response_json = self.send_get(url, params, headers, cookies)
        return self.get_objects_by_field_names(response_json, class_name)

    # Fetch data for a specific Managed Object (MO)
    @aci_config_required(default=[])
    def fetch_mo_data(self,
                      dn: str,
                      params: dict = None,
                      headers: dict = None,
                      cookies: dict = None,
                      response_format: str = RESPONSE_FORMAT):
        url = "/".join((self.get_base_url(), "mo", "topology",
                        "{dn}.{f}".format(dn=dn, f=response_format)))

        response_json = self.send_get(url, params, headers, cookies)
        return response_json