Beispiel #1
0
 def build_config(self):
     """Build the config as a Plugins object and return."""
     log.info("\tEnabling the Monasca InfluxDB check")
     return watch_process(['influxd'],
                          'monitoring',
                          'influxd',
                          exact_match=False)
Beispiel #2
0
 def build_config(self):
     """Build the config as a Plugins object and return."""
     log.info("\tWatching the mon-thresh process.")
     config = monasca_setup.agent_config.Plugins()
     for process in ['storm.daemon.nimbus', 'storm.daemon.supervisor', 'storm.daemon.worker']:
         if find_process_cmdline(process) is not None:
             config.merge(watch_process([process], 'monitoring', 'apache-storm', exact_match=False, detailed=False))
     config.merge(watch_process_by_username('storm', 'monasca-thresh', 'monitoring', 'apache-storm'))
     return config
Beispiel #3
0
 def build_config(self):
     """Build the config as a Plugins object and return."""
     log.info("\tWatching the mon-thresh process.")
     config = monasca_setup.agent_config.Plugins()
     for process in ['storm.daemon.nimbus', 'storm.daemon.supervisor', 'storm.daemon.worker']:
         if find_process_cmdline(process) is not None:
             config.merge(watch_process([process], 'monitoring', 'apache-storm', exact_match=False, detailed=False))
     config.merge(watch_process_by_username('storm', 'monasca-thresh', 'monitoring', 'apache-storm'))
     return config
Beispiel #4
0
    def build_config(self):
        """Build the config as a Plugins object and return.

        """
        config = agent_config.Plugins()
        for process in self.found_processes:
            # Watch the service processes
            log.info("\tMonitoring the {0} {1} process.".format(
                process, self.service_name))
            config.merge(
                watch_process([process],
                              self.service_name,
                              process,
                              exact_match=False))

        # Skip the http_check if disable_http_check is set
        if self.args is not None and self.args.get('disable_http_check',
                                                   False):
            self.service_api_url = None
            self.search_pattern = None

        if self.service_api_url and self.search_pattern:
            # Check if there is something listening on the host/port
            parsed = urlparse.urlparse(self.service_api_url)
            host, port = parsed.netloc.split(':')
            listening = []
            for connection in psutil.net_connections():
                if connection.status == psutil.CONN_LISTEN and connection.laddr[
                        1] == int(port):
                    listening.append(connection.laddr[0])

            if len(listening) > 0:
                # If not listening on localhost or ips then use another local ip
                if host == 'localhost' and len(
                        set(['127.0.0.1', '0.0.0.0', '::', '::1'])
                        & set(listening)) == 0:
                    new_url = list(parsed)
                    new_url[1] = listening[0] + ':' + port
                    api_url = urlparse.urlunparse(new_url)
                else:
                    api_url = self.service_api_url

                # Setup an active http_status check on the API
                log.info("\tConfiguring an http_check for the {0} API.".format(
                    self.service_name))
                config.merge(
                    service_api_check(self.service_name + '-api', api_url,
                                      self.search_pattern, self.service_name))
            else:
                log.info("\tNo process found listening on {0} ".format(port) +
                         "skipping setup of http_check for the {0} API.".
                         format(self.service_name))

        return config
Beispiel #5
0
    def build_config(self):
        """Build the config as a Plugins object and return."""
        config = monasca_setup.agent_config.Plugins()

        log.info("\tEnabling the monasca-api process check")
        config.merge(
            watch_process(search_strings=['monasca-api'],
                          service='monitoring',
                          component='monasca-api',
                          exact_match=False))
        impl_config = self._impl_helper.build_config()
        if impl_config:
            config.merge(impl_config)

        return config
Beispiel #6
0
    def build_config(self):
        """Build the config as a Plugins object and return.

        """
        config = agent_config.Plugins()

        # There may be multiple clusters, and we construct a list of dicts
        # containing cluster_name and config_file for each cluster
        clusters = list()

        # Get the cluster_name from <cluster_name>.conf in /etc/ceph/ directory
        if os.path.exists(self.ceph_config_dir):
            config_files = [f for f in os.listdir(self.ceph_config_dir)
                            if f.endswith('.conf')]
            if not config_files:
                return config
            for config_file in config_files:
                cluster_dict = dict()
                cluster_dict['cluster_name'] = config_file[:-5]
                cluster_dict['config_file'] = \
                    os.path.join(self.ceph_config_dir, config_file)
                clusters.append(cluster_dict)

        expected_processes = list()

        expected_processes.extend(self._service_config(clusters, 'mon'))
        expected_processes.extend(self._service_config(clusters, 'osd'))
        expected_processes.extend(self._service_config(clusters, 'mds'))
        expected_processes.extend(self._service_config(clusters, 'radosgw'))

        for process in expected_processes:
            # Watch the service processes
            log.info("\tMonitoring the {0} {1} process.".format(
                process['name'], self.service_name))
            config.merge(watch_process(search_strings=process['search_string'],
                                       service=self.service_name,
                                       component=process['type'],
                                       process_name=process['name'],
                                       exact_match=False))

        # Configure ceph plugin
        instances = []
        for cluster in clusters:
            cluster_name = cluster['cluster_name']
            log.info("\tMonitoring ceph cluster: '{0}'.".format(cluster_name))
            instances.append({'cluster_name': cluster_name})
        config['ceph'] = {'init_config': None, 'instances': instances}
        return config
Beispiel #7
0
    def build_config(self):
        """Build the config as a Plugins object and return."""
        config = monasca_setup.agent_config.Plugins()

        log.info("\tEnabling the monasca-api process check")
        config.merge(watch_process(
            search_strings=['monasca-api'],
            service='monitoring',
            component='monasca-api',
            exact_match=False
        ))
        impl_config = self._impl_helper.build_config()
        if impl_config:
            config.merge(impl_config)

        return config
Beispiel #8
0
    def build_config(self):
        """Build the config as a Plugins object and return.

        """
        config = agent_config.Plugins()

        # There may be multiple clusters, and we construct a list of dicts
        # containing cluster_name and config_file for each cluster
        clusters = list()

        # Get the cluster_name from <cluster_name>.conf in /etc/ceph/ directory
        if os.path.exists(self.ceph_config_dir):
            config_files = [
                f for f in os.listdir(self.ceph_config_dir)
                if f.endswith('.conf')
            ]
            if not config_files:
                return config
            for config_file in config_files:
                cluster_dict = dict()
                cluster_dict['cluster_name'] = config_file[:-5]
                cluster_dict['config_file'] = \
                    os.path.join(self.ceph_config_dir, config_file)
                clusters.append(cluster_dict)

        expected_processes = list()

        expected_processes.extend(self._service_config(clusters, 'mon'))
        expected_processes.extend(self._service_config(clusters, 'osd'))
        expected_processes.extend(self._service_config(clusters, 'mds'))
        # RADOS Gateway is little different from other ceph-daemons hence
        # the process definition is handled differently
        expected_processes.extend(self._radosgw_config(clusters))

        for process in expected_processes:
            # Watch the service processes
            log.info("\tMonitoring the {0} {1} process.".format(
                process['name'], self.service_name))
            config.merge(
                watch_process(search_strings=process['search_string'],
                              service=self.service_name,
                              component=process['type'],
                              process_name=process['name'],
                              exact_match=False))

        return config
Beispiel #9
0
    def build_config(self):
        """Build the config as a Plugins object and return.

        """
        config = agent_config.Plugins()

        # Default cluster name
        cluster_name = 'ceph'
        config_file = '/etc/ceph/ceph.conf'

        # Get the cluster_name from <cluster_name>.conf in /etc/ceph/ directory
        if os.path.exists(self.ceph_config_dir):
            config_files = [
                f for f in os.listdir(self.ceph_config_dir)
                if f.endswith('.conf')
            ]
            if not config_files:
                return config
            config_file = os.path.join(self.ceph_config_dir, config_files[0])
            cluster_name = config_files[0][:-5]

        expected_processes = list()

        expected_processes.extend(self._service_config(cluster_name, 'mon'))
        expected_processes.extend(self._service_config(cluster_name, 'osd'))
        expected_processes.extend(self._service_config(cluster_name, 'mds'))
        # RADOS Gateway is little different from other ceph-daemons hence
        # the process definition is handled differently
        expected_processes.extend(
            self._radosgw_config(cluster_name, config_file))

        for process in expected_processes:
            # Watch the service processes
            log.info("\tMonitoring the {0} {1} process.".format(
                process['name'], self.service_name))
            config.merge(
                watch_process(search_strings=process['search_string'],
                              service=self.service_name,
                              component=process['type'],
                              process_name=process['name'],
                              exact_match=False))

        return config
Beispiel #10
0
    def build_config(self):
        """Build the config as a Plugins object and return.

        """
        config = agent_config.Plugins()

        # There may be multiple clusters, and we construct a list of dicts
        # containing cluster_name and config_file for each cluster
        clusters = list()

        # Get the cluster_name from <cluster_name>.conf in /etc/ceph/ directory
        if os.path.exists(self.ceph_config_dir):
            config_files = [f for f in os.listdir(self.ceph_config_dir)
                            if f.endswith('.conf')]
            if not config_files:
                return config
            for config_file in config_files:
                cluster_dict = dict()
                cluster_dict['cluster_name'] = config_file[:-5]
                cluster_dict['config_file'] = \
                    os.path.join(self.ceph_config_dir, config_file)
                clusters.append(cluster_dict)

        expected_processes = list()

        expected_processes.extend(self._service_config(clusters, 'mon'))
        expected_processes.extend(self._service_config(clusters, 'osd'))
        expected_processes.extend(self._service_config(clusters, 'mds'))
        # RADOS Gateway is little different from other ceph-daemons hence
        # the process definition is handled differently
        expected_processes.extend(self._radosgw_config(clusters))

        for process in expected_processes:
            # Watch the service processes
            log.info("\tMonitoring the {0} {1} process.".format(
                process['name'], self.service_name))
            config.merge(watch_process(search_strings=process['search_string'],
                                       service=self.service_name,
                                       component=process['type'],
                                       process_name=process['name'],
                                       exact_match=False))

        return config
    def build_config(self):
        """Build the config as a Plugins object and return.

        """
        config = agent_config.Plugins()
        for process in self.found_processes:
            # Watch the service processes
            log.info("\tMonitoring the {0} {1} process.".format(process, self.service_name))
            config.merge(watch_process([process], self.service_name, process, exact_match=False))

        # Skip the http_check if disable_http_check is set
        if self.args is not None and self.args.get('disable_http_check', False):
            self.service_api_url = None
            self.search_pattern = None

        if self.service_api_url and self.search_pattern:
            # Check if there is something listening on the host/port
            parsed = urlparse.urlparse(self.service_api_url)
            host, port = parsed.netloc.split(':')
            listening = []
            for connection in psutil.net_connections():
                if connection.status == psutil.CONN_LISTEN and connection.laddr[1] == int(port):
                    listening.append(connection.laddr[0])

            if len(listening) > 0:
                # If not listening on localhost or ips then use another local ip
                if host == 'localhost' and len(set(['127.0.0.1', '0.0.0.0', '::', '::1']) & set(listening)) == 0:
                    new_url = list(parsed)
                    new_url[1] = listening[0] + ':' + port
                    api_url = urlparse.urlunparse(new_url)
                else:
                    api_url = self.service_api_url

                # Setup an active http_status check on the API
                log.info("\tConfiguring an http_check for the {0} API.".format(self.service_name))
                config.merge(service_api_check(self.service_name + '-api', api_url,
                                               self.search_pattern, self.service_name))
            else:
                log.info("\tNo process found listening on {0} ".format(port) +
                         "skipping setup of http_check for the {0} API." .format(self.service_name))

        return config
Beispiel #12
0
    def build_config(self):
        """Build the config as a Plugins object and return.

        """
        config = agent_config.Plugins()

        # Default cluster name
        cluster_name = 'ceph'
        config_file = '/etc/ceph/ceph.conf'

        # Get the cluster_name from <cluster_name>.conf in /etc/ceph/ directory
        if os.path.exists(self.ceph_config_dir):
            config_files = [f for f in os.listdir(self.ceph_config_dir)
                            if f.endswith('.conf')]
            if not config_files:
                return config
            config_file = os.path.join(self.ceph_config_dir, config_files[0])
            cluster_name = config_files[0][:-5]

        expected_processes = list()

        expected_processes.extend(self._service_config(cluster_name, 'mon'))
        expected_processes.extend(self._service_config(cluster_name, 'osd'))
        expected_processes.extend(self._service_config(cluster_name, 'mds'))
        # RADOS Gateway is little different from other ceph-daemons hence
        # the process definition is handled differently
        expected_processes.extend(self._radosgw_config(
            cluster_name, config_file))

        for process in expected_processes:
            # Watch the service processes
            log.info("\tMonitoring the {0} {1} process.".format(
                process['name'], self.service_name))
            config.merge(watch_process(search_strings=process['search_string'],
                                       service=self.service_name,
                                       component=process['type'],
                                       process_name=process['name'],
                                       exact_match=False))

        return config
Beispiel #13
0
    def build_config(self):
        """Build the config as a Plugins object and return.

        """
        config = agent_config.Plugins()
        if self.found_processes:
            log.info("\tMonitoring by process_name(s): {0} "
                     "for service: {1}.".format(",".join(self.found_processes),
                                                self.service_name))
            for process in self.found_processes:
                # Watch the service processes
                component_name = self.component_name if self.component_name else process
                config.merge(
                    watch_process(search_strings=[process],
                                  service=self.service_name,
                                  component=component_name,
                                  exact_match=False))

        if self.process_username:
            log.info("\tMonitoring by process_username: {0} for "
                     "service: {1}.".format(self.process_username,
                                            self.service_name))
            config.merge(
                watch_process_by_username(username=self.process_username,
                                          process_name=self.component_name,
                                          service=self.service_name,
                                          component=self.component_name))
        if self.file_dirs_names:
            for file_dir_name in self.file_dirs_names:
                # Watch file size
                file_dir = file_dir_name[0]
                file_names = file_dir_name[1]
                if len(file_dir_name) == 3:
                    file_recursive = file_dir_name[2]
                else:
                    file_recursive = False
                if file_names == ['*']:
                    log.info("\tMonitoring the size of all the files in the "
                             "directory {0}.".format(file_dir))
                else:
                    log.info("\tMonitoring the size of files {0} in the "
                             "directory {1}.".format(
                                 ", ".join(str(name) for name in file_names),
                                 file_dir))
                config.merge(
                    watch_file_size(directory_name=file_dir,
                                    file_names=file_names,
                                    file_recursive=file_recursive,
                                    service=self.service_name,
                                    component=self.component_name))

        if self.directory_names:
            for dir_name in self.directory_names:
                log.info(
                    "\tMonitoring the size of directory {0}.".format(dir_name))
                config.merge(
                    watch_directory(directory_name=dir_name,
                                    service=self.service_name,
                                    component=self.component_name))

        # Skip the http_check if disable_http_check is set
        if self.args is not None and self.args.get('disable_http_check',
                                                   False):
            self.service_api_url = None
            self.search_pattern = None

        if self.service_api_url and self.search_pattern:
            # Check if there is something listening on the host/port
            parsed = urllib.parse.urlparse(self.service_api_url)
            host, port = parsed.netloc.split(':')
            listening = find_addrs_listening_on_port(port)

            if len(listening) > 0:
                # If not listening on localhost or ips then use another local ip
                if host == 'localhost' and len(
                        set(['127.0.0.1', '0.0.0.0', '::', '::1'])
                        & set(listening)) == 0:
                    new_url = list(parsed)
                    new_url[1] = listening[0] + ':' + port
                    api_url = urllib.parse.urlunparse(new_url)
                else:
                    api_url = self.service_api_url

                # Setup an active http_status check on the API
                log.info("\tConfiguring an http_check for the {0} API.".format(
                    self.service_name))
                config.merge(
                    service_api_check(name=self.service_name + '-api',
                                      url=api_url,
                                      pattern=self.search_pattern,
                                      use_keystone=True,
                                      service=self.service_name,
                                      component=self.component_name))
            else:
                log.info("\tNo process found listening on {0} ".format(port) +
                         "skipping setup of http_check for the {0} API.".
                         format(self.service_name))

        return config
Beispiel #14
0
    def build_config(self):
        """Build the config as a Plugins object and return.

        """
        config = agent_config.Plugins()
        if self.found_processes:
            log.info("\tMonitoring by process_name(s): {0} "
                     "for service: {1}.".format(",".join(self.found_processes), self.service_name))
            for process in self.found_processes:
                # Watch the service processes
                component_name = self.component_name if self.component_name else process
                config.merge(watch_process(search_strings=[process], service=self.service_name,
                                           component=component_name, exact_match=False))

        if self.process_username:
            log.info("\tMonitoring by process_username: {0} for "
                     "service: {1}.".format(self.process_username, self.service_name))
            config.merge(watch_process_by_username(username=self.process_username,
                                                   process_name=self.component_name,
                                                   service=self.service_name,
                                                   component=self.component_name))
        if self.file_dirs_names:
            for file_dir_name in self.file_dirs_names:
                # Watch file size
                file_dir = file_dir_name[0]
                file_names = file_dir_name[1]
                if len(file_dir_name) == 3:
                    file_recursive = file_dir_name[2]
                else:
                    file_recursive = False
                if file_names == ['*']:
                    log.info("\tMonitoring the size of all the files in the "
                             "directory {0}.".format(file_dir))
                else:
                    log.info("\tMonitoring the size of files {0} in the "
                             "directory {1}.".format(", ".join(str(name) for name in file_names), file_dir))
                config.merge(watch_file_size(directory_name=file_dir, file_names=file_names,
                                             file_recursive=file_recursive, service=self.service_name,
                                             component=self.component_name))

        if self.directory_names:
            for dir_name in self.directory_names:
                log.info("\tMonitoring the size of directory {0}.".format(
                    dir_name))
                config.merge(watch_directory(directory_name=dir_name, service=self.service_name, component=self.component_name))

        # Skip the http_check if disable_http_check is set
        if self.args is not None and self.args.get('disable_http_check', False):
            self.service_api_url = None
            self.search_pattern = None

        if self.service_api_url and self.search_pattern:
            # Check if there is something listening on the host/port
            parsed = urlparse.urlparse(self.service_api_url)
            host, port = parsed.netloc.split(':')
            listening = find_addrs_listening_on_port(port)

            if len(listening) > 0:
                # If not listening on localhost or ips then use another local ip
                if host == 'localhost' and len(set(['127.0.0.1', '0.0.0.0', '::', '::1']) & set(listening)) == 0:
                    new_url = list(parsed)
                    new_url[1] = listening[0] + ':' + port
                    api_url = urlparse.urlunparse(new_url)
                else:
                    api_url = self.service_api_url

                # Setup an active http_status check on the API
                log.info("\tConfiguring an http_check for the {0} API.".format(self.service_name))
                config.merge(service_api_check(name=self.service_name + '-api',
                                               url=api_url,
                                               pattern=self.search_pattern,
                                               use_keystone=True,
                                               service=self.service_name,
                                               component=self.component_name))
            else:
                log.info("\tNo process found listening on {0} ".format(port) +
                         "skipping setup of http_check for the {0} API." .format(self.service_name))

        return config
Beispiel #15
0
    def build_config(self):
        """Build the config as a Plugins object and return.

        """
        config = agent_config.Plugins()

        # There may be multiple clusters, and we construct a list of dicts
        # containing cluster_name and config_file for each cluster
        clusters = list()

        # Get the cluster_name from <cluster_name>.conf in /etc/ceph/ directory
        if os.path.exists(self.ceph_config_dir):
            config_files = [
                f for f in os.listdir(self.ceph_config_dir)
                if f.endswith('.conf')
            ]
            if not config_files:
                return config
            for config_file in config_files:
                cluster_dict = dict()
                cluster_dict['cluster_name'] = config_file[:-5]
                cluster_dict['config_file'] = \
                    os.path.join(self.ceph_config_dir, config_file)
                cluster_dict['admin_key'] = \
                    cluster_dict['cluster_name'] + '.client.admin.keyring' in \
                    os.listdir(self.ceph_config_dir)
                clusters.append(cluster_dict)

        expected_processes = list()

        expected_processes.extend(self._service_config(clusters, 'mon'))
        expected_processes.extend(self._service_config(clusters, 'osd'))
        expected_processes.extend(self._service_config(clusters, 'mds'))
        expected_processes.extend(self._service_config(clusters, 'radosgw'))

        for process in expected_processes:
            # Watch the service processes
            log.info("\tMonitoring the {0} {1} process.".format(
                process['name'], self.service_name))
            config.merge(
                watch_process(search_strings=process['search_string'],
                              service=self.service_name,
                              component=process['type'],
                              process_name=process['name'],
                              exact_match=False))

        # Configure ceph plugin
        instances = []
        for cluster in clusters:
            cluster_config = {}
            cluster_config['cluster_name'] = cluster['cluster_name']

            # If there is no client admin key installed for this cluster
            # then we cannot invoke Ceph commands for cluster monitoring.
            # In that case we only monitor the locally active processes.
            if not cluster['admin_key']:
                cluster_config['collect_usage_metrics'] = False
                cluster_config['collect_stats_metrics'] = False
                cluster_config['collect_mon_metrics'] = False
                cluster_config['collect_osd_metrics'] = False
                cluster_config['collect_pool_metrics'] = False

            log.info("\tMonitoring ceph cluster: '{0}'.".format(
                cluster['cluster_name']))
            instances.append(cluster_config)
        config['ceph'] = {'init_config': None, 'instances': instances}
        return config
Beispiel #16
0
    def build_config(self):
        """Build the config as a Plugins object and return.

        """
        config = agent_config.Plugins()

        # Default cluster name
        cluster_name = 'ceph'

        # Get the cluster_name from <cluster_name>.conf in /etc/ceph/ directory
        if os.path.exists(self.ceph_config_dir):
            config_files = [f for f in os.listdir(self.ceph_config_dir)
                            if f.endswith('.conf')]
            cluster_name = config_files[0][:-5]

        # Get the list of daemon identifiers
        osd_list = os.listdir(self.ceph_osd_path) \
            if os.path.exists(self.ceph_osd_path) else []
        mon_list = os.listdir(self.ceph_mon_path) \
            if os.path.exists(self.ceph_mon_path) else []
        mds_list = os.listdir(self.ceph_mds_path) \
            if os.path.exists(self.ceph_mds_path) else []

        expected_processes = []

        for osd in osd_list:
            # OSD daemon identifier is of format <cluster_name>-<id>
            # Where 'id' is a unique numeric index for that OSD in the cluster
            # E.g., ceph-1, ceph-2 etc.
            daemon_id = osd.split(cluster_name + '-', 1)[1]
            process = dict()
            process_args = ['--cluster %s' % cluster_name,
                            '--id %s' % daemon_id, '-f']
            process['search_string'] = self._build_search_string(
                self.ceph_osd_executable, process_args)
            process['name'] = '%s-osd.%s' % (cluster_name, daemon_id)
            process['type'] = 'ceph-osd'
            expected_processes.append(process)

        for mon in mon_list:
            # MON daemon identifier is of format <cluster_name>-<id>
            # Where 'id' is alphanumeric and is usually the hostname
            # where the service is running.
            # E.g., ceph-monitor1.dom, ceph-monitor2.dom etc.
            daemon_id = mon.split(cluster_name + '-', 1)[1]
            process = dict()
            process_args = ['--cluster %s' % cluster_name,
                            '--id %s' % daemon_id, '-f']
            process['search_string'] = self._build_search_string(
                self.ceph_mon_executable, process_args)
            process['name'] = '%s-mon.%s' % (cluster_name, daemon_id)
            process['type'] = 'ceph-mon'
            expected_processes.append(process)

        for mds in mds_list:
            # MON daemon identifier is of format <cluster_name>-<id>
            # Where 'id' is alphanumeric and is usually the hostname
            # where the service is running.
            # E.g., ceph-mds1.dom, ceph-mds2.dom etc.
            daemon_id = mds.split(cluster_name + '-', 1)[1]
            process = dict()
            process_args = ['--cluster %s' % cluster_name,
                            '--id %s' % daemon_id, '-f']
            process['search_string'] = self._build_search_string(
                self.ceph_mds_executable, process_args)
            process['name'] = '%s-mds.%s' % (cluster_name, daemon_id)
            process['type'] = 'ceph-mds'
            expected_processes.append(process)

        for process in expected_processes:
            # Watch the service processes
            log.info("\tMonitoring the {0} {1} process.".format(
                process['name'], self.service_name))
            config.merge(watch_process(search_strings=process['search_string'],
                                       service=self.service_name,
                                       component=process['type'],
                                       process_name=process['name'],
                                       exact_match=False))

        return config