Exemple #1
0
    def build_config(self):
        """Build the config as a Plugins object and return.
        """
        config = monasca_setup.agent_config.Plugins()

        try:
            # do not add another instance if there is already something configured
            if self._get_config():
                log.info("\tEnabling the InfluxDB check for {:s}".format(self.url))
                instance = {'name': 'localhost',
                            'url': self.url,
                            'collect_response_time':
                                self.collect_response_time,
                            }
                if self.username is not None and self.password is not None:
                    instance['username'] = self.username
                    instance['password'] = self.password
                if self.timeout is not None:
                    instance['timeout'] = self.timeout
                if self.whitelist is not None:
                    instance['whitelist'] = self.whitelist
                # extract stats continuously
                config['influxdb'] = {'init_config': None,
                                      'instances': [instance]}
                # watch processes using process plugin
                config.merge(detection.watch_process(['influxd'], component='influxdb', exact_match=False))
            else:
                log.warn('Unable to access the InfluxDB diagnostics URL;' +
                         ' the InfluxDB plugin is not configured.' +
                         ' Please correct and re-run monasca-setup.')
        except Exception as e:
            log.exception('Error configuring the InfluxDB check plugin: %s', repr(e))

        return config
Exemple #2
0
    def build_config(self):
        """Build the config as a Plugins object and return."""
        config = monasca_setup.agent_config.Plugins()

        log.info("\tEnabling the Monasca api process check")
        config.merge(watch_process(['monasca-api'], 'monitoring', 'monasca-api', exact_match=False))

        log.info("\tEnabling the Monasca api healthcheck")
        config.merge(dropwizard_health_check('monitoring', 'monasca-api', 'http://localhost:8081/healthcheck'))

        log.info("\tEnabling the Monasca api metrics")
        whitelist = [
            {
                "name": "jvm.memory.total.max",
                "path": "gauges/jvm.memory.total.max/value",
                "type": "gauge"},
            {
                "name": "jvm.memory.total.used",
                "path": "gauges/jvm.memory.total.used/value",
                "type": "gauge"},
            {
                "name": "metrics.published",
                "path": "meters/monasca.api.app.MetricService.metrics.published/count",
                "type": "rate"},
            {
                "name": "raw-sql.time.avg",
                "path": "timers/org.skife.jdbi.v2.DBI.raw-sql/mean",
                "type": "gauge"},
            {
                "name": "raw-sql.time.max",
                "path": "timers/org.skife.jdbi.v2.DBI.raw-sql/max",
                "type": "gauge"},
        ]
        config.merge(dropwizard_metrics('monitoring', 'monasca-api', 'http://localhost:8081/metrics', whitelist))
        return config
    def build_config(self):
        """Build the config as a Plugins object and return.
            Config includes: consumer_groups (include topics) and kafka_connection_str
        """
        # First watch the process
        self.config.merge(watch_process(['kafka.Kafka'], component='kafka', exact_match=False))
        log.info("\tWatching the kafka process.")

        if not self.dependencies_installed():
            log.warning("Dependencies not installed, skipping Kafka Consumer plugin configuration.")
        elif self.args is not None and len(self.args) > 0:
            kafka_connect_str = self._find_kafka_connection()
            consumers = {}
            service_name = kafka_connect_str
            # Check if the plugin passed in a service name
            # If it did, delete it after use so it doesn't become a consumer group
            if 'service_name' in self.args:
                service_name += '_' + str(self.args.pop('service_name'))
            for key, value in self.args.iteritems():
                value_dict = {topic: [] for topic in value.split('/')}
                consumers[key] = value_dict
            self.config['kafka_consumer'] = {'init_config': None,
                                             'instances': [{'name': service_name,
                                                            'kafka_connect_str': kafka_connect_str,
                                                            'per_partition': False,
                                                            'consumer_groups': consumers}]}
        elif self.zk_url is not None:
            self._detect_consumers()
        return self.config
    def build_config(self):
        """Build the config as a Plugins object and return.
            Config includes: consumer_groups (include topics) and kafka_connection_str
        """
        # First watch the process
        self.config.merge(watch_process(["kafka.Kafka"], exact_match=False))
        log.info("\tWatching the kafka process.")

        if not self.dependencies_installed():
            log.warning("Dependencies not installed, skipping Kafka Consumer plugin configuration.")
        elif self.args is not None and len(self.args) > 0:
            kafka_connect_str = self._find_kafka_connection()
            consumers = {}
            for key, value in self.args.iteritems():
                value_dict = {topic: [] for topic in value.split("/")}
                consumers[key] = value_dict
            self.config["kafka_consumer"] = {
                "init_config": None,
                "instances": [
                    {
                        "name": kafka_connect_str,
                        "kafka_connect_str": kafka_connect_str,
                        "per_partition": False,
                        "consumer_groups": consumers,
                    }
                ],
            }
        elif self.zk_url is not None:
            self._detect_consumers()
        return self.config
Exemple #5
0
 def build_config(self):
     """Build the config as a Plugins object and return."""
     log.info("\tEnabling the Monasca Vertica check")
     config = monasca_setup.agent_config.Plugins()
     for process in ['vertica', 'spread']:
         config.merge(watch_process([process], 'monitoring', process,
                                    exact_match=False))
     return config
Exemple #6
0
 def build_config(self):
     """Build the config as a Plugins object and return."""
     log.info("\tWatching the mon-thresh process.")
     config = monasca_setup.agent_config.Plugins()
     for process in ['backtype.storm.daemon.nimbus', 'backtype.storm.daemon.supervisor', 'backtype.storm.daemon.worker']:
         if find_process_cmdline(process) is not None:
             config.merge(watch_process([process], 'monitoring', 'storm', exact_match=False))
     return config
Exemple #7
0
    def _monitor_process(self):
        dimensions = {}
        if self.args and self.args.get(self.INFLUXDB_NODE_ARG_NAME):
            dimensions.update({self.INFLUXDB_NODE_ARG_NAME: self.args.get(self.INFLUXDB_NODE_ARG_NAME)})

        return detection.watch_process([self.PROC_NAME],
                                       service='influxdb',
                                       component='influxdb',
                                       exact_match=False,
                                       dimensions=dimensions)
Exemple #8
0
    def build_config(self):
        """Build the config as a Plugins object and return."""
        config = monasca_setup.agent_config.Plugins()

        log.info("\tEnabling the Monasca api process check")
        config.merge(watch_process(['monasca-api'], 'monitoring', 'monasca-api', exact_match=False))

        # configure DropWizard metrics only for Java API
        if self._admin_port:
            log.info("\tEnabling the Monasca api healthcheck")
            metrics_url = 'http://localhost:{}/healthcheck'.format(self._admin_port)
            config.merge(dropwizard_health_check('monitoring', 'monasca-api', metrics_url))

            log.info("\tEnabling the Monasca api metrics")
            whitelist = [
                {
                    "name": "jvm.memory.total.max",
                    "path": "gauges/jvm.memory.total.max/value",
                    "type": "gauge"
                },
                {
                    "name": "jvm.memory.total.used",
                    "path": "gauges/jvm.memory.total.used/value",
                    "type": "gauge"
                },
                {
                    "name": "metrics.published",
                    "path": "meters/monasca.api.app.MetricService.metrics.published/count",
                    "type": "rate"
                }
            ]

            if not self._is_hibernate_on():
                # if hibernate is not used, it is mysql with DBI
                # for that case having below entries makes sense
                log.debug('MonApi has not enabled Hibernate, adding DBI metrics')
                whitelist.extend([
                    {
                        "name": "raw-sql.time.avg",
                        "path": "timers/org.skife.jdbi.v2.DBI.raw-sql/mean",
                        "type": "gauge"
                    },
                    {
                        "name": "raw-sql.time.max",
                        "path": "timers/org.skife.jdbi.v2.DBI.raw-sql/max",
                        "type": "gauge"
                    }
                ])

            config.merge(dropwizard_metrics('monitoring',
                                            'monasca-api',
                                            metrics_url,
                                            whitelist))

        return config
Exemple #9
0
 def build_config(self):
     """Build the config as a Plugins object and return."""
     log.info("\tEnabling the Monasca Vertica check")
     config = monasca_setup.agent_config.Plugins()
     for process in ['vertica', 'spread']:
         config.merge(
             watch_process([process],
                           'monitoring',
                           process,
                           exact_match=False))
     return config
Exemple #10
0
    def build_config(self):
        kibana_config = self._get_config_file()

        try:
            (kibana_host, kibana_port,
             kibana_protocol) = self._read_config(kibana_config)
        except Exception as ex:
            LOG.error('Failed to read configuration at %s' % kibana_config)
            LOG.exception(ex)
            return

        if kibana_protocol == 'https':
            LOG.error('"https" protocol is currently not supported')
            return None

        config = agent_config.Plugins()

        # retrieve user name and set in config
        # if passed in args (note args are optional)
        if (self.args and 'kibana-user' in self.args
                and self.args['kibana-user']):
            process = detection.watch_process_by_username(
                username=self.args['kibana-user'],
                process_name='kibana',
                service='monitoring',
                component='kibana')
        else:
            process = detection.watch_process(['kibana'],
                                              service='monitoring',
                                              component='kibana',
                                              process_name='kibana')

        config.merge(process)

        kibana_url = '%s://%s:%d' % (kibana_protocol, kibana_host, kibana_port)

        if not self._has_metrics_support(kibana_url):
            LOG.warning('Running kibana does not support metrics, skipping...')
            return None
        else:
            metrics = self._get_all_metrics(kibana_url)
            config['kibana'] = {
                'init_config': {
                    'url': '%s/%s' % (kibana_url, _API_STATUS),
                },
                'instances': [{
                    "name": kibana_url,
                    'metrics': metrics
                }]
            }

        LOG.info('\tWatching the kibana process.')

        return config
Exemple #11
0
    def _monitor_process(self):
        dimensions = {}
        if self.args and self.args.get(self.INFLUXDB_NODE_ARG_NAME):
            dimensions.update(
                {self.INFLUXDB_NODE_ARG_NAME: self.args.get(self.INFLUXDB_NODE_ARG_NAME)})

        return detection.watch_process([self.PROC_NAME],
                                       service='influxdb',
                                       component='influxdb',
                                       exact_match=False,
                                       dimensions=dimensions)
    def _monitor_process(self):
        LOG.info("\tMonitoring the influxdb-relay process")

        dimensions = {}
        if self.args and self.args.get(self.RELAY_NODE_ARG_NAME):
            dimensions.update({self.RELAY_NODE_ARG_NAME: self.args.get(self.RELAY_NODE_ARG_NAME)})

        return detection.watch_process([self.PROC_NAME],
                                       service='influxdb',
                                       component='influxdb-relay',
                                       exact_match=False,
                                       dimensions=dimensions)
Exemple #13
0
    def _monitor_process(self):
        LOG.info("\tMonitoring the influxdb-relay process")

        dimensions = {}
        if self.args and self.args.get(self.RELAY_NODE_ARG_NAME):
            dimensions.update({self.RELAY_NODE_ARG_NAME: self.args.get(self.RELAY_NODE_ARG_NAME)})

        return detection.watch_process([self.PROC_NAME],
                                       service='influxdb',
                                       component='influxdb-relay',
                                       exact_match=False,
                                       dimensions=dimensions)
Exemple #14
0
    def build_config(self):
        """Build the config as a Plugins object and return."""
        config = monasca_setup.agent_config.Plugins()

        log.info("\tEnabling the Monasca api process check")
        config.merge(watch_process(['monasca-api'], 'monitoring', 'monasca-api', exact_match=False))

        log.info("\tEnabling the Monasca api healthcheck")
        config.merge(dropwizard_health_check('monitoring', 'monasca-api', 'http://localhost:8081/healthcheck'))

        log.info("\tEnabling the Monasca api metrics")
        whitelist = [
            {
                "name": "jvm.memory.total.max",
                "path": "gauges/jvm.memory.total.max/value",
                "type": "gauge"
            },
            {
                "name": "jvm.memory.total.used",
                "path": "gauges/jvm.memory.total.used/value",
                "type": "gauge"
            },
            {
                "name": "metrics.published",
                "path": "meters/monasca.api.app.MetricService.metrics.published/count",
                "type": "rate"
            }
        ]

        if not self._is_hibernate_on():
            # if hibernate is not used, it is mysql with DBI
            # for that case having below entries makes sense
            log.debug('MonApi has not enabled Hibernate, adding DBI metrics')
            whitelist.extend([
                {
                    "name": "raw-sql.time.avg",
                    "path": "timers/org.skife.jdbi.v2.DBI.raw-sql/mean",
                    "type": "gauge"
                },
                {
                    "name": "raw-sql.time.max",
                    "path": "timers/org.skife.jdbi.v2.DBI.raw-sql/max",
                    "type": "gauge"
                }
            ])

        config.merge(dropwizard_metrics('monitoring',
                                        'monasca-api',
                                        'http://localhost:8081/metrics',
                                        whitelist))

        return config
    def build_config(self):
        """Build the config as a Plugins object and return.
            Config includes: consumer_groups (include topics) and kafka_connection_str
        """
        # First watch the process
        self.config.merge(watch_process(['kafka.Kafka'], exact_match=False))
        log.info("\tWatching the kafka process.")

        if self.dependencies_installed() and self.zk_url is not None:
            self._detect_consumers()
        else:
            log.warning("Dependencies not installed, skipping plugin configuration.")
        return self.config
Exemple #16
0
    def build_config(self):
        """Build the config as a Plugins object and return.
            Config includes: consumer_groups (include topics) and kafka_connection_str
        """
        # First watch the process
        self.config.merge(watch_process(['kafka.Kafka'], exact_match=False))
        log.info("\tWatching the kafka process.")

        if self.dependencies_installed() and self.zk_url is not None:
            self._detect_consumers()
        else:
            log.warning(
                "Dependencies not installed, skipping plugin configuration.")
        return self.config
Exemple #17
0
    def build_config(self):
        """Build the config as a Plugins object and return."""
        config = monasca_setup.agent_config.Plugins()

        log.info("\tEnabling the Monasca api process check")
        config.merge(
            watch_process(['monasca-api'],
                          'monitoring',
                          'monasca-api',
                          exact_match=False))

        log.info("\tEnabling the Monasca api healthcheck")
        config.merge(
            dropwizard_health_check('monitoring', 'monasca-api',
                                    'http://localhost:8081/healthcheck'))

        log.info("\tEnabling the Monasca api metrics")
        whitelist = [
            {
                "name": "jvm.memory.total.max",
                "path": "gauges/jvm.memory.total.max/value",
                "type": "gauge"
            },
            {
                "name": "jvm.memory.total.used",
                "path": "gauges/jvm.memory.total.used/value",
                "type": "gauge"
            },
            {
                "name": "metrics.published",
                "path":
                "meters/monasca.api.app.MetricService.metrics.published/count",
                "type": "rate"
            },
            {
                "name": "raw-sql.time.avg",
                "path": "timers/org.skife.jdbi.v2.DBI.raw-sql/mean",
                "type": "gauge"
            },
            {
                "name": "raw-sql.time.max",
                "path": "timers/org.skife.jdbi.v2.DBI.raw-sql/max",
                "type": "gauge"
            },
        ]
        config.merge(
            dropwizard_metrics('monitoring', 'monasca-api',
                               'http://localhost:8081/metrics', whitelist))
        return config
Exemple #18
0
 def build_config(self):
     """Build the config as a Plugins object and return."""
     log.info("\tWatching the mon-thresh process.")
     config = monasca_setup.agent_config.Plugins()
     for process in [
             'backtype.storm.daemon.nimbus',
             'backtype.storm.daemon.supervisor',
             'backtype.storm.daemon.worker'
     ]:
         if find_process_cmdline(process) is not None:
             config.merge(
                 watch_process([process],
                               'monitoring',
                               'apache-storm',
                               exact_match=False))
     return config
    def build_config(self):
        """Build the config as a Plugins object and return.

        """
        config = agent_config.Plugins()
        for process in self.found_processes:
            # Watch the service processes
            log.info("\tMonitoring the {0} {1} process.".format(process, self.service_name))
            config.merge(watch_process([process], self.service_name, process, exact_match=False))

        if self.service_api_url and self.search_pattern:
            # Setup an active http_status check on the API
            log.info("\tConfiguring an http_check for the {0} API.".format(self.service_name))
            config.merge(service_api_check(self.service_name + '-api', self.service_api_url,
                                           self.search_pattern, self.service_name))

        return config
    def build_config(self):
        """Build the config as a Plugins object and return.

        """
        config = agent_config.Plugins()
        for process in self.found_processes:
            # Watch the service processes
            log.info("\tMonitoring the {0} {1} process.".format(process, self.service_name))
            config.merge(watch_process([process], self.service_name, process, exact_match=False))

        # Skip the http_check if disable_http_check is set
        if self.args is not None:
            args_dict = dict([a.split('=') for a in self.args.split()])
            if args_dict.get('disable_http_check', default=False):
                self.service_api_url = None
                self.search_pattern = None

        if self.service_api_url and self.search_pattern:
            # Check if there is something listening on the host/port
            parsed = urlparse(self.service_api_url)
            host, port = parsed.netloc.split(':')
            listening = []
            for connection in psutil.net_connections():
               if connection.status == psutil.CONN_LISTEN and connection.laddr[1] == int(port):
                   listening.append(connection.laddr[0])

            if len(listening) > 0:
                # If not listening on localhost or ips then use another local ip
                if host == 'localhost' and len(set(['0.0.0.0', '::', '::1']) & set(listening)) == 0:
                    api_url = listening[0] + ':' + port
                else:
                    api_url = self.service_api_url

                # Setup an active http_status check on the API
                log.info("\tConfiguring an http_check for the {0} API.".format(self.service_name))
                config.merge(service_api_check(self.service_name + '-api', api_url,
                                           self.search_pattern, self.service_name))
            else:
                log.info("\tNo process found listening on {0} ".format(port) +
                         "skipping setup of http_check for the {0} API." .format(self.service_name))

        return config
Exemple #21
0
    def build_config(self):
        """Build the config as a Plugins object and return.
            Config includes: consumer_groups (include topics) and kafka_connection_str
        """
        # First watch the process
        self.config.merge(
            watch_process(['kafka.Kafka'], 'kafka', exact_match=False))
        log.info("\tWatching the kafka process.")

        if not self.dependencies_installed():
            log.warning(
                "Dependencies not installed, skipping Kafka Consumer plugin configuration."
            )
        elif self.args is not None and len(self.args) > 0:
            kafka_connect_str = self._find_kafka_connection()
            consumers = {}
            service_name = kafka_connect_str
            # Check if the plugin passed in a service name
            # If it did, delete it after use so it doesn't become a consumer group
            if 'service_name' in self.args:
                service_name += '_' + str(self.args.pop('service_name'))
            for key, value in self.args.items():
                value_dict = {topic: [] for topic in value.split('/')}
                consumers[key] = value_dict
            self.config['kafka_consumer'] = {
                'init_config':
                None,
                'instances': [{
                    'name': service_name,
                    'kafka_connect_str': kafka_connect_str,
                    'per_partition': False,
                    'consumer_groups': consumers
                }]
            }
        elif self.zk_url is not None:
            self._detect_consumers()
        return self.config
Exemple #22
0
 def build_config(self):
     """Build the config as a Plugins object and return."""
     log.info("\tEnabling the Monasca InfluxDB check")
     return watch_process(['influxd'], 'monitoring', 'influxd',
                          exact_match=False)
Exemple #23
0
 def build_config(self):
     """Build the config as a Plugins object and return."""
     log.info("\tEnabling the Monasca Notification healthcheck")
     return watch_process(['monasca-notification'], 'monitoring', 'notification', exact_match=False)
Exemple #24
0
 def build_config(self):
     """Build the config as a Plugins object and return."""
     log.info("\tEnabling the Monasca InfluxDB check")
     return watch_process(['influxd'], 'monitoring', 'influxd',
                          exact_match=False)
Exemple #25
0
    def build_config(self):
        """Build the config as a Plugins object and return."""
        """Read persister-config.yml file to find the exact numThreads."""
        try:
            with open('/etc/monasca/persister-config.yml', 'r') as config:
                self.persister_config = yaml.load(config.read())
        except Exception:
            log.exception('Failed parsing /etc/monasca/persister-config.yml')
            self.available = False
            return

        alarm_num_threads = self.persister_config['alarmHistoryConfiguration']['numThreads']
        metric_num_threads = self.persister_config['metricConfiguration']['numThreads']

        database_type = self.persister_config['databaseConfiguration']['databaseType']

        config = monasca_setup.agent_config.Plugins()

        log.info("\tEnabling the Monasca persister process check")
        config.merge(watch_process(['monasca-persister'], 'monitoring', 'monasca-persister', exact_match=False))

        adminConnector = self.persister_config['server']['adminConnectors'][0]
        try:
            admin_endpoint_type = adminConnector['type']
        except Exception:
            admin_endpoint_type = "http"

        try:
            admin_endpoint_port = adminConnector['port']
        except Exception:
            admin_endpoint_port = 8091

        log.info("\tEnabling the Monasca persister healthcheck")
        config.merge(
            dropwizard_health_check(
                'monitoring',
                'monasca-persister',
                '{0}://localhost:{1}/healthcheck'.format(admin_endpoint_type,
                                                         admin_endpoint_port)))

        log.info("\tEnabling the Monasca persister metrics")
        whitelist = [
            {
                "name": "jvm.memory.total.max",
                "path": "gauges/jvm.memory.total.max/value",
                "type": "gauge"},
            {
                "name": "jvm.memory.total.used",
                "path": "gauges/jvm.memory.total.used/value",
                "type": "gauge"}
        ]

        # Generate initial whitelist based on the database type
        if database_type == 'influxdb':
            pass
        elif database_type == 'vertica':
            whitelist.extend([
                {
                    "name": "monasca.persister.repository.vertica.VerticaMetricRepo.definition-cache-hit-meter",
                    "path": "meters/monasca.persister.repository.vertica.VerticaMetricRepo.definition-cache-hit-meter/count",
                    "type": "rate"},
                {
                    "name": "monasca.persister.repository.vertica.VerticaMetricRepo.definition-cache-miss-meter",
                    "path": "meters/monasca.persister.repository.vertica.VerticaMetricRepo.definition-cache-miss-meter/count",
                    "type": "rate"},
                {
                    "name": "monasca.persister.repository.vertica.VerticaMetricRepo.definition-dimension-cache-hit-meter",
                    "path": "meters/monasca.persister.repository.vertica.VerticaMetricRepo.definition-dimension-cache-hit-meter/count",
                    "type": "rate"},
                {
                    "name": "monasca.persister.repository.vertica.VerticaMetricRepo.definition-dimension-cache-miss-meter",
                    "path": "meters/monasca.persister.repository.vertica.VerticaMetricRepo.definition-dimension-cache-miss-meter/count",
                    "type": "rate"},
                {
                    "name": "monasca.persister.repository.vertica.VerticaMetricRepo.dimension-cache-hit-meter",
                    "path": "meters/monasca.persister.repository.vertica.VerticaMetricRepo.dimension-cache-hit-meter/count",
                    "type": "rate"},
                {
                    "name": "monasca.persister.repository.vertica.VerticaMetricRepo.dimension-cache-miss-meter",
                    "path": "meters/monasca.persister.repository.vertica.VerticaMetricRepo.dimension-cache-miss-meter/count",
                    "type": "rate"},
                {
                    "name": "monasca.persister.repository.vertica.VerticaMetricRepo.measurement-meter",
                    "path": "meters/monasca.persister.repository.vertica.VerticaMetricRepo.measurement-meter/count",
                    "type": "rate"}
            ])
        else:
            log.warn('Failed finding database type in /etc/monasca/persister-config.yml')

        # Dynamic Whitelist
        for idx in range(alarm_num_threads):
            new_thread = {"name": "alarm-state-transitions-added-to-batch-counter[{0}]".format(idx),
                          "path": "counters/monasca.persister.pipeline.event.AlarmStateTransitionHandler[alarm-state-transition-{0}].alarm-state-transitions-added-to-batch-counter/count".format(idx),
                          "type": "rate"
                          }
            whitelist.append(new_thread)

        for idx in range(metric_num_threads):
            new_thread = {"name": "metrics-added-to-batch-counter[{0}]".format(idx),
                          "path": "counters/monasca.persister.pipeline.event.MetricHandler[metric-{0}].metrics-added-to-batch-counter/count".format(idx),
                          "type": "rate"
                          }
            whitelist.append(new_thread)

        config.merge(
            dropwizard_metrics(
                'monitoring',
                'monasca-persister',
                '{0}://localhost:{1}/metrics'.format(admin_endpoint_type,
                                                     admin_endpoint_port),
                whitelist))
        return config
 def _monitor_process(self):
     LOG.info("\tMonitoring the influxdb-relay process")
     return detection.watch_process([self.PROC_NAME],
                                    service='influxdb',
                                    component='influxdb-relay',
                                    exact_match=False)
Exemple #27
0
 def _monitor_process(self):
     return detection.watch_process([self.PROC_NAME],
                                    service='influxdb',
                                    component='influxdb',
                                    exact_match=False)
Exemple #28
0
    def build_config(self):
        """Build the config as a Plugins object and return."""
        """Read persister-config.yml file to find the exact numThreads."""
        try:
            with open('/etc/monasca/persister-config.yml', 'r') as config:
                self.persister_config = yaml.load(config.read())
        except Exception:
            log.exception('Failed parsing /etc/monasca/persister-config.yml')
            self.available = False
            return

        alarm_num_threads = self.persister_config['alarmHistoryConfiguration'][
            'numThreads']
        metric_num_threads = self.persister_config['metricConfiguration'][
            'numThreads']

        database_type = self.persister_config['databaseConfiguration'][
            'databaseType']

        config = monasca_setup.agent_config.Plugins()

        log.info("\tEnabling the Monasca persister process check")
        config.merge(
            watch_process(['monasca-persister'],
                          'monitoring',
                          'monasca-persister',
                          exact_match=False))

        log.info("\tEnabling the Monasca persister healthcheck")
        config.merge(
            dropwizard_health_check('monitoring', 'monasca-persister',
                                    'http://localhost:8091/healthcheck'))

        log.info("\tEnabling the Monasca persister metrics")
        whitelist = [{
            "name": "jvm.memory.total.max",
            "path": "gauges/jvm.memory.total.max/value",
            "type": "gauge"
        }, {
            "name": "jvm.memory.total.used",
            "path": "gauges/jvm.memory.total.used/value",
            "type": "gauge"
        }]

        # Generate initial whitelist based on the database type
        if database_type == 'influxdb':
            pass
        elif database_type == 'vertica':
            whitelist.extend([{
                "name":
                "monasca.persister.repository.vertica.VerticaMetricRepo.definition-cache-hit-meter",
                "path":
                "meters/monasca.persister.repository.vertica.VerticaMetricRepo.definition-cache-hit-meter/count",
                "type": "rate"
            }, {
                "name":
                "monasca.persister.repository.vertica.VerticaMetricRepo.definition-cache-miss-meter",
                "path":
                "meters/monasca.persister.repository.vertica.VerticaMetricRepo.definition-cache-miss-meter/count",
                "type": "rate"
            }, {
                "name":
                "monasca.persister.repository.vertica.VerticaMetricRepo.definition-dimension-cache-hit-meter",
                "path":
                "meters/monasca.persister.repository.vertica.VerticaMetricRepo.definition-dimension-cache-hit-meter/count",
                "type": "rate"
            }, {
                "name":
                "monasca.persister.repository.vertica.VerticaMetricRepo.definition-dimension-cache-miss-meter",
                "path":
                "meters/monasca.persister.repository.vertica.VerticaMetricRepo.definition-dimension-cache-miss-meter/count",
                "type": "rate"
            }, {
                "name":
                "monasca.persister.repository.vertica.VerticaMetricRepo.dimension-cache-hit-meter",
                "path":
                "meters/monasca.persister.repository.vertica.VerticaMetricRepo.dimension-cache-hit-meter/count",
                "type": "rate"
            }, {
                "name":
                "monasca.persister.repository.vertica.VerticaMetricRepo.dimension-cache-miss-meter",
                "path":
                "meters/monasca.persister.repository.vertica.VerticaMetricRepo.dimension-cache-miss-meter/count",
                "type": "rate"
            }, {
                "name":
                "monasca.persister.repository.vertica.VerticaMetricRepo.measurement-meter",
                "path":
                "meters/monasca.persister.repository.vertica.VerticaMetricRepo.measurement-meter/count",
                "type": "rate"
            }])
        else:
            log.warn(
                'Failed finding database type in /etc/monasca/persister-config.yml'
            )

        # Dynamic Whitelist
        for idx in range(alarm_num_threads):
            new_thread = {
                "name":
                "alarm-state-transitions-added-to-batch-counter[{0}]".format(
                    idx),
                "path":
                "counters/monasca.persister.pipeline.event.AlarmStateTransitionHandler[alarm-state-transition-{0}].alarm-state-transitions-added-to-batch-counter/count"
                .format(idx),
                "type":
                "rate"
            }
            whitelist.append(new_thread)

        for idx in range(metric_num_threads):
            new_thread = {
                "name":
                "metrics-added-to-batch-counter[{0}]".format(idx),
                "path":
                "counters/monasca.persister.pipeline.event.MetricHandler[metric-{0}].metrics-added-to-batch-counter/count"
                .format(idx),
                "type":
                "rate"
            }
            whitelist.append(new_thread)

        config.merge(
            dropwizard_metrics('monitoring', 'monasca-persister',
                               'http://localhost:8091/metrics', whitelist))
        return config
Exemple #29
0
    def build_config(self):
        kibana_config = self._get_config_file()

        try:
            (kibana_host,
             kibana_port,
             kibana_protocol) = self._read_config(kibana_config)
        except Exception as ex:
            LOG.error('Failed to read configuration at %s' % kibana_config)
            LOG.exception(ex)
            return

        if kibana_protocol == 'https':
            LOG.error('"https" protocol is currently not supported')
            return None

        config = agent_config.Plugins()

        # retrieve user name and set in config
        # if passed in args (note args are optional)
        if (self.args and 'kibana-user' in self.args and
                self.args['kibana-user']):
            process = detection.watch_process_by_username(
                username=self.args['kibana-user'],
                process_name='kibana',
                service='monitoring',
                component='kibana'
            )
        else:
            process = detection.watch_process(['kibana'],
                                              service='monitoring',
                                              component='kibana',
                                              process_name='kibana')

        config.merge(process)

        kibana_url = '%s://%s:%d' % (
            kibana_protocol,
            kibana_host,
            kibana_port
        )

        if not self._has_metrics_support(kibana_url):
            LOG.warning('Running kibana does not support metrics, skipping...')
            return None
        else:
            metrics = self._get_all_metrics(kibana_url)
            config['kibana'] = {
                'init_config': {
                    'url': '%s/%s' % (kibana_url, _API_STATUS),
                },
                'instances': [
                    {
                        "name": kibana_url,
                        'metrics': metrics
                    }
                ]
            }

        LOG.info('\tWatching the kibana process.')

        return config
Exemple #30
0
 def build_config(self):
     """Build the config as a Plugins object and return."""
     log.info("\tEnabling the Monasca Notification healthcheck")
     return watch_process(['monasca-notification'], 'monitoring', 'monasca-notification', exact_match=False)