コード例 #1
0
    def _add_collectors(self):
        try:

            cf = "/opt/netuitive-agent/conf/netuitive-agent.conf"

            enabled_collectors = []

            if os.path.isfile(cf):
                serverconf = load_server_config(cf)

            c = serverconf['collectors']

            for k, v in c.iteritems():
                if v.get('enabled', False):
                    logging.debug(k + ' is enabled')
                    enabled_collectors.append(k.replace('Collector', ''))

            enabled_collectors.sort()
            collectors = ', '.join(enabled_collectors)

            self.element.add_tag('n.collectors', collectors)

            self.element.add_tag(
                'variant', 'SIMPLE' if c.get('SimpleCollector')
                and str_to_bool(c.get('SimpleCollector').get('enabled')) else
                'FULL')

        except Exception as e:
            logging.error(e)
            pass
コード例 #2
0
    def _add_collectors(self):
        try:

            cf = "/opt/netuitive-agent/conf/netuitive-agent.conf"

            enabled_collectors = []

            if os.path.isfile(cf):
                serverconf = load_server_config(cf)

            c = serverconf['collectors']

            for k, v in c.iteritems():
                if v.get('enabled', False):
                    logging.debug(k + ' is enabled')
                    enabled_collectors.append(k.replace('Collector', ''))

            enabled_collectors.sort()
            collectors = ', '.join(enabled_collectors)

            self.element.add_tag('n.collectors', collectors)

            self.element.add_tag('variant', 'SIMPLE' if c.get('SimpleCollector') and str_to_bool(c.get('SimpleCollector').get('enabled')) else 'FULL')

        except Exception as e:
            logging.error(e)
            pass
コード例 #3
0
 def disable_collector(self, config, collector):
     """
     disable a collector in its config
     """
     if collector in config['collectors'] and 'enabled' in config[
             'collectors'][collector] and str_to_bool(
                 config['collectors'][collector]['enabled']):
         config['collectors'][collector]['enabled'] = 'False'
コード例 #4
0
    def __init__(self, config=None):
        """
        initialize Netuitive api and populate agent host metadata
        """

        if not netuitive:
            self.log.error('netuitive import failed. Handler disabled')
            self.enabled = False
            return

        try:
            Handler.__init__(self, config)

            logging.debug("initialize Netuitive handler")

            self.version = self._get_version()
            self.api = netuitive.Client(self.config['url'],
                                        self.config['api_key'], self.version)

            self.element = netuitive.Element(
                location=self.config.get('location'))

            self.batch_size = int(self.config['batch'])

            self.max_backlog_multiplier = int(
                self.config['max_backlog_multiplier'])

            self.trim_backlog_multiplier = int(
                self.config['trim_backlog_multiplier'])

            self._add_sys_meta()
            self._add_aws_meta()
            self._add_docker_meta()
            self._add_azure_meta()
            self._add_config_tags()
            self._add_config_relations()
            self._add_collectors()

            self.flush_time = 0

            try:
                self.config['write_metric_fqns'] = str_to_bool(
                    self.config['write_metric_fqns'])

            except KeyError, e:
                self.log.warning('write_metric_fqns missing from the config')
                self.config['write_metric_fqns'] = False

            if self.config['write_metric_fqns']:
                self.metric_fqns_path = self.config['metric_fqns_path']
                truncate_fqn_file = open(self.metric_fqns_path, "w")
                truncate_fqn_file.close()

            logging.debug(self.config)
コード例 #5
0
    def __init__(self, config=None):
        """
        initialize Netuitive api and populate agent host metadata
        """

        if not netuitive:
            self.log.error('netuitive import failed. Handler disabled')
            self.enabled = False
            return

        try:
            Handler.__init__(self, config)

            logging.debug("initialize Netuitive handler")

            self.version = self._get_version()
            self.api = netuitive.Client(self.config['url'], self.config[
                                        'api_key'], self.version)

            self.element = netuitive.Element(
                location=self.config.get('location'))

            self.batch_size = int(self.config['batch'])

            self.max_backlog_multiplier = int(
                self.config['max_backlog_multiplier'])

            self.trim_backlog_multiplier = int(
                self.config['trim_backlog_multiplier'])

            self._add_sys_meta()
            self._add_aws_meta()
            self._add_docker_meta()
            self._add_azure_meta()
            self._add_config_tags()
            self._add_config_relations()
            self._add_collectors()

            self.flush_time = 0

            try:
                self.config['write_metric_fqns'] = str_to_bool(self.config['write_metric_fqns'])

            except KeyError, e:
                self.log.warning('write_metric_fqns missing from the config')
                self.config['write_metric_fqns'] = False

            if self.config['write_metric_fqns']:
                self.metric_fqns_path = self.config['metric_fqns_path']
                truncate_fqn_file = open(self.metric_fqns_path, "w")
                truncate_fqn_file.close()

            logging.debug(self.config)
コード例 #6
0
 def manage_base_collectors(self, config):
     """
     manage base collector and its delegated collectors
     so that they won't be enabled at the same time
     """
     if 'BaseCollector' in config['collectors'] and 'enabled' in config[
             'collectors']['BaseCollector'] and str_to_bool(
                 config['collectors']['BaseCollector']['enabled']):
         self.disable_collector(config, 'CPUCollector')
         self.disable_collector(config, 'DiskSpaceCollector')
         self.disable_collector(config, 'DiskUsageCollector')
         self.disable_collector(config, 'LoadAverageCollector')
         self.disable_collector(config, 'MemoryCollector')
         self.disable_collector(config, 'VMStatCollector')
         self.disable_collector(config, 'NetworkCollector')
コード例 #7
0
ファイル: server.py プロジェクト: pombredanne/Diamond-1
    def run(self):
        """
        Load handler and collector classes and then start collectors
        """

        #######################################################################
        # Config
        #######################################################################
        self.config = load_config(self.configfile)

        collectors = load_collectors(self.config['server']['collectors_path'])
        metric_queue_size = int(self.config['server'].get(
            'metric_queue_size', 16384))
        self.metric_queue = self.manager.Queue(maxsize=metric_queue_size)
        self.log.debug('metric_queue_size: %d', metric_queue_size)

        #######################################################################
        # Handlers
        #
        # TODO: Eventually move each handler to it's own process space?
        #######################################################################

        if 'handlers_path' in self.config['server']:
            handlers_path = self.config['server']['handlers_path']

            # Make an list if not one
            if isinstance(handlers_path, basestring):
                handlers_path = handlers_path.split(',')
                handlers_path = map(str.strip, handlers_path)
                self.config['server']['handlers_path'] = handlers_path

            load_include_path(handlers_path)

        if 'handlers' not in self.config['server']:
            self.log.critical('handlers missing from server section in config')
            sys.exit(1)

        handlers = self.config['server'].get('handlers')
        if isinstance(handlers, basestring):
            handlers = [handlers]

        # Prevent the Queue Handler from being a normal handler
        if 'diamond.handler.queue.QueueHandler' in handlers:
            handlers.remove('diamond.handler.queue.QueueHandler')

        self.handlers = load_handlers(self.config, handlers)

        QueueHandler = load_dynamic_class('diamond.handler.queue.QueueHandler',
                                          Handler)

        self.handler_queue = QueueHandler(config=self.config,
                                          queue=self.metric_queue,
                                          log=self.log)

        handlers_process = multiprocessing.Process(
            name="Handlers",
            target=handler_process,
            args=(self.handlers, self.metric_queue, self.log),
        )

        handlers_process.daemon = True
        handlers_process.start()

        #######################################################################
        # Signals
        #######################################################################

        if hasattr(signal, 'SIGHUP'):
            signal.signal(signal.SIGHUP, signal_to_exception)

        #######################################################################

        while True:
            try:
                active_children = multiprocessing.active_children()
                running_processes = []
                for process in active_children:
                    running_processes.append(process.name)
                running_processes = set(running_processes)

                ##############################################################
                # Collectors
                ##############################################################

                running_collectors = []
                for collector, config in self.config['collectors'].iteritems():
                    if config.get('enabled', False) is not True:
                        continue
                    running_collectors.append(collector)
                running_collectors = set(running_collectors)

                # Collectors that are running but shouldn't be
                for process_name in running_processes - running_collectors:
                    if 'Collector' not in process_name:
                        continue
                    for process in active_children:
                        if process.name == process_name:
                            process.terminate()

                collector_classes = dict((cls.__name__.split('.')[-1], cls)
                                         for cls in collectors.values())

                load_delay = self.config['server'].get('collectors_load_delay',
                                                       1.0)
                for process_name in running_collectors - running_processes:
                    # To handle running multiple collectors concurrently, we
                    # split on white space and use the first word as the
                    # collector name to spin
                    collector_name = process_name.split()[0]

                    if 'Collector' not in collector_name:
                        continue

                    if collector_name not in collector_classes:
                        self.log.error('Can not find collector %s',
                                       collector_name)
                        continue

                    collector = initialize_collector(
                        collector_classes[collector_name],
                        name=process_name,
                        configfile=self.configfile,
                        handlers=[self.handler_queue])

                    if collector is None:
                        self.log.error('Failed to load collector %s',
                                       process_name)
                        continue

                    # Splay the loads
                    time.sleep(float(load_delay))

                    process = multiprocessing.Process(name=process_name,
                                                      target=collector_process,
                                                      args=(collector,
                                                            self.metric_queue,
                                                            self.log))
                    process.daemon = True
                    process.start()

                if not handlers_process.is_alive():
                    self.log.error('Handlers process exited')
                    if (str_to_bool(self.config['server'].get(
                            'abort_on_handlers_process_exit', 'False'))):
                        raise Exception('Handlers process exited')

                ##############################################################

                time.sleep(1)

            except SIGHUPException:
                # ignore further SIGHUPs for now
                original_sighup_handler = signal.getsignal(signal.SIGHUP)
                signal.signal(signal.SIGHUP, signal.SIG_IGN)

                self.log.info('Reloading state due to HUP')
                self.config = load_config(self.configfile)
                collectors = load_collectors(
                    self.config['server']['collectors_path'])
                # restore SIGHUP handler
                signal.signal(signal.SIGHUP, original_sighup_handler)
コード例 #8
0
ファイル: server.py プロジェクト: Affirm/Diamond
    def run(self):
        """
        Load handler and collector classes and then start collectors
        """

        #######################################################################
        # Config
        #######################################################################
        self.config = load_config(self.configfile)

        collectors = load_collectors(self.config['server']['collectors_path'])
        metric_queue_size = int(self.config['server'].get('metric_queue_size',
                                                          16384))
        self.metric_queue = self.manager.Queue(maxsize=metric_queue_size)
        self.log.debug('metric_queue_size: %d', metric_queue_size)

        #######################################################################
        # Handlers
        #
        # TODO: Eventually move each handler to it's own process space?
        #######################################################################

        if 'handlers_path' in self.config['server']:
            handlers_path = self.config['server']['handlers_path']

            # Make an list if not one
            if isinstance(handlers_path, basestring):
                handlers_path = handlers_path.split(',')
                handlers_path = map(str.strip, handlers_path)
                self.config['server']['handlers_path'] = handlers_path

            load_include_path(handlers_path)

        if 'handlers' not in self.config['server']:
            self.log.critical('handlers missing from server section in config')
            sys.exit(1)

        handlers = self.config['server'].get('handlers')
        if isinstance(handlers, basestring):
            handlers = [handlers]

        # Prevent the Queue Handler from being a normal handler
        if 'diamond.handler.queue.QueueHandler' in handlers:
            handlers.remove('diamond.handler.queue.QueueHandler')

        self.handlers = load_handlers(self.config, handlers)

        QueueHandler = load_dynamic_class(
            'diamond.handler.queue.QueueHandler',
            Handler
        )

        self.handler_queue = QueueHandler(
            config=self.config, queue=self.metric_queue, log=self.log)

        handlers_process = multiprocessing.Process(
            name="Handlers",
            target=handler_process,
            args=(self.handlers, self.metric_queue, self.log),
        )

        handlers_process.daemon = True
        handlers_process.start()

        #######################################################################
        # Signals
        #######################################################################

        if hasattr(signal, 'SIGHUP'):
            signal.signal(signal.SIGHUP, signal_to_exception)

        #######################################################################

        while True:
            try:
                active_children = multiprocessing.active_children()
                running_processes = []
                for process in active_children:
                    running_processes.append(process.name)
                running_processes = set(running_processes)

                ##############################################################
                # Collectors
                ##############################################################

                running_collectors = []
                for collector, config in self.config['collectors'].iteritems():
                    if config.get('enabled', False) is not True:
                        continue
                    running_collectors.append(collector)
                running_collectors = set(running_collectors)

                # Collectors that are running but shouldn't be
                for process_name in running_processes - running_collectors:
                    if 'Collector' not in process_name:
                        continue
                    for process in active_children:
                        if process.name == process_name:
                            process.terminate()

                collector_classes = dict(
                    (cls.__name__.split('.')[-1], cls)
                    for cls in collectors.values()
                )

                load_delay = self.config['server'].get('collectors_load_delay',
                                                       1.0)
                for process_name in running_collectors - running_processes:
                    # To handle running multiple collectors concurrently, we
                    # split on white space and use the first word as the
                    # collector name to spin
                    collector_name = process_name.split()[0]

                    if 'Collector' not in collector_name:
                        continue

                    if collector_name not in collector_classes:
                        self.log.error('Can not find collector %s',
                                       collector_name)
                        continue

                    collector = initialize_collector(
                        collector_classes[collector_name],
                        name=process_name,
                        configfile=self.configfile,
                        handlers=[self.handler_queue])

                    if collector is None:
                        self.log.error('Failed to load collector %s',
                                       process_name)
                        continue

                    # Splay the loads
                    time.sleep(float(load_delay))

                    process = multiprocessing.Process(
                        name=process_name,
                        target=collector_process,
                        args=(collector, self.metric_queue, self.log)
                    )
                    process.daemon = True
                    process.start()

                if not handlers_process.is_alive():
                    self.log.error('Handlers process exited')
                    if (str_to_bool(self.config['server'].get(
                            'abort_on_handlers_process_exit', 'False'))):
                        raise Exception('Handlers process exited')

                ##############################################################

                time.sleep(1)

            except SIGHUPException:
                # ignore further SIGHUPs for now
                original_sighup_handler = signal.getsignal(signal.SIGHUP)
                signal.signal(signal.SIGHUP, signal.SIG_IGN)

                self.log.info('Reloading state due to HUP')
                self.config = load_config(self.configfile)
                collectors = load_collectors(
                    self.config['server']['collectors_path'])
                # restore SIGHUP handler
                signal.signal(signal.SIGHUP, original_sighup_handler)
コード例 #9
0
ファイル: server.py プロジェクト: Netuitive/netuitive-diamond
 def manage_base_collectors(self, config):
     """
     manage base collector and its delegated collectors
     so that they won't be enabled at the same time
     """
     if 'BaseCollector' in config['collectors'] and 'enabled' in config['collectors']['BaseCollector'] and str_to_bool(config['collectors']['BaseCollector']['enabled']):
         self.disable_collector(config, 'CPUCollector')
         self.disable_collector(config, 'DiskSpaceCollector')
         self.disable_collector(config, 'DiskUsageCollector')
         self.disable_collector(config, 'LoadAverageCollector')
         self.disable_collector(config, 'MemoryCollector')
         self.disable_collector(config, 'VMStatCollector')
         self.disable_collector(config, 'NetworkCollector')
コード例 #10
0
ファイル: server.py プロジェクト: Netuitive/netuitive-diamond
 def disable_collector(self, config, collector):
     """
     disable a collector in its config
     """
     if collector in config['collectors'] and 'enabled' in config['collectors'][collector] and str_to_bool(config['collectors'][collector]['enabled']):
         config['collectors'][collector]['enabled'] = 'False'