예제 #1
0
    def load_config(self, configfile=None, override_config=None):
        """
        Process a configfile, or reload if previously given one.
        """
        self.config = configobj.ConfigObj()

        # Load in the collector's defaults
        if self.get_default_config() is not None:
            self.config.merge(self.get_default_config())

        if configfile is not None:
            self.configfile = os.path.abspath(configfile)

        if self.configfile is not None:
            config = load_config(self.configfile)

            if 'collectors' in config:
                if 'default' in config['collectors']:
                    self.config.merge(config['collectors']['default'])

                if self.name in config['collectors']:
                    self.config.merge(config['collectors'][self.name])

        if override_config is not None:
            if 'collectors' in override_config:
                if 'default' in override_config['collectors']:
                    self.config.merge(override_config['collectors']['default'])

                if self.name in override_config['collectors']:
                    self.config.merge(override_config['collectors'][self.name])

        self.process_config()
예제 #2
0
    def load_config(self, configfile=None, override_config=None):
        """
        Process a configfile, or reload if previously given one.
        """
        self.config = configobj.ConfigObj()

        # Load in the collector's defaults
        if self.get_default_config() is not None:
            self.config.merge(self.get_default_config())

        if configfile is not None:
            self.configfile = os.path.abspath(configfile)

        if self.configfile is not None:
            config = load_config(self.configfile)

            if "collectors" in config:
                if "default" in config["collectors"]:
                    self.config.merge(config["collectors"]["default"])

                if self.name in config["collectors"]:
                    self.config.merge(config["collectors"][self.name])

        if override_config is not None:
            if "collectors" in override_config:
                if "default" in override_config["collectors"]:
                    self.config.merge(override_config["collectors"]["default"])

                if self.name in override_config["collectors"]:
                    self.config.merge(override_config["collectors"][self.name])

        self.process_config()
예제 #3
0
    def load_config(self, configfile=None, override_config=None):
        """
        Process a configfile, or reload if previously given one.
        """

        self.config = configobj.ConfigObj()

        # Load in the collector's defaults
        if self.get_default_config() is not None:
            self.config.merge(self.get_default_config())

        if configfile is not None:
            self.configfile = os.path.abspath(configfile)

        if self.configfile is not None:
            config = load_config(self.configfile)

            if 'handlers' in config:
                if 'NetuitiveHandler' in config['handlers']:
                    self.merge_config(config['handlers']['NetuitiveHandler'], prefix='netuitive_')

            if 'collectors' in config:
                if 'default' in config['collectors']:
                    self.config.merge(config['collectors']['default'])

                if self.name in config['collectors']:
                    self.config.merge(config['collectors'][self.name])

        if override_config is not None:
            if 'collectors' in override_config:
                if 'default' in override_config['collectors']:
                    self.config.merge(override_config['collectors']['default'])

                if self.name in override_config['collectors']:
                    self.config.merge(override_config['collectors'][self.name])

        self.process_config()
예제 #4
0
    def run(self):
        """
        Load handler and collector classes and then start collectors
        """

        ########################################################################
        # Config
        ########################################################################
        self.config = load_config(self.configfile)

        collectors = load_collectors(self.config['server']['collectors_path'])

        ########################################################################
        # Handlers
        #
        # TODO: Eventually move each handler to it's own process space?
        ########################################################################

        if 'handlers_path' in self.config['server']:
            # Make an list if not one
            if isinstance(self.config['server']['handlers_path'], basestring):
                handlers_path = self.config['server']['handlers_path']
                handlers_path = handlers_path.split(',')
                handlers_path = map(str.strip, handlers_path)
                self.config['server']['handlers_path'] = handlers_path

            load_include_path(handlers_path)

        if 'handlers' not in self.config['server']:
            self.log.critical('handlers missing from server section in config')
            sys.exit(1)

        handlers = self.config['server'].get('handlers')
        if isinstance(handlers, basestring):
            handlers = [handlers]

        # Prevent the Queue Handler from being a normal handler
        if 'diamond.handler.queue.QueueHandler' in handlers:
            handlers.remove('diamond.handler.queue.QueueHandler')

        self.handlers = load_handlers(self.config, handlers)

        QueueHandler = load_dynamic_class('diamond.handler.queue.QueueHandler',
                                          Handler)

        self.handler_queue = QueueHandler(config=self.config,
                                          queue=self.metric_queue,
                                          log=self.log)

        process = multiprocessing.Process(
            name="Handlers",
            target=handler_process,
            args=(self.handlers, self.metric_queue, self.log),
        )

        process.daemon = True
        process.start()

        ########################################################################
        # Signals
        ########################################################################

        signal.signal(signal.SIGHUP, signal_to_exception)

        ########################################################################

        while True:
            try:
                active_children = multiprocessing.active_children()
                running_processes = []
                for process in active_children:
                    running_processes.append(process.name)
                running_processes = set(running_processes)

                ##############################################################
                # Collectors
                ##############################################################

                running_collectors = []
                for collector, config in self.config['collectors'].iteritems():
                    if config.get('enabled', False) is not True:
                        continue
                    running_collectors.append(collector)
                running_collectors = set(running_collectors)

                # Collectors that are running but shouldn't be
                for process_name in running_processes - running_collectors:
                    if 'Collector' not in process_name:
                        continue
                    for process in active_children:
                        if process.name == process_name:
                            process.terminate()

                for process_name in running_collectors - running_processes:
                    # To handle running multiple collectors concurrently, we
                    # split on white space and use the first word as the
                    # collector name to spin
                    collector_name = process_name.split()[0]

                    if 'Collector' not in collector_name:
                        continue

                    # Find the class
                    for cls in collectors.values():
                        cls_name = cls.__name__.split('.')[-1]
                        if cls_name == collector_name:
                            break
                    if cls_name != collector_name:
                        self.log.error('Can not find collector %s',
                                       collector_name)
                        continue

                    collector = initialize_collector(
                        cls,
                        name=process_name,
                        configfile=self.configfile,
                        handlers=[self.handler_queue])

                    if collector is None:
                        self.log.error('Failed to load collector %s',
                                       process_name)
                        continue

                    # Splay the loads
                    time.sleep(1)

                    process = multiprocessing.Process(name=process_name,
                                                      target=collector_process,
                                                      args=(collector,
                                                            self.metric_queue,
                                                            self.log))
                    process.daemon = True
                    process.start()

                ##############################################################

                time.sleep(1)

            except SIGHUPException:
                self.log.info('Reloading state due to HUP')
                self.config = load_config(self.configfile)
                collectors = load_collectors(
                    self.config['server']['collectors_path'])
예제 #5
0
    def run(self):
        """
        Load handler and collector classes and then start collectors
        """

        #######################################################################
        # Config
        #######################################################################
        self.config = load_config(self.configfile)

        collectors = load_collectors(self.config['server']['collectors_path'])

        #######################################################################
        # Handlers
        #
        # TODO: Eventually move each handler to it's own process space?
        #######################################################################

        if 'handlers_path' in self.config['server']:
            handlers_path = self.config['server']['handlers_path']

            # Make an list if not one
            if isinstance(handlers_path, basestring):
                handlers_path = handlers_path.split(',')
                handlers_path = map(str.strip, handlers_path)
                self.config['server']['handlers_path'] = handlers_path

            load_include_path(handlers_path)

        if 'handlers' not in self.config['server']:
            self.log.critical('handlers missing from server section in config')
            sys.exit(1)

        handlers = self.config['server'].get('handlers')
        if isinstance(handlers, basestring):
            handlers = [handlers]

        # Prevent the Queue Handler from being a normal handler
        if 'diamond.handler.queue.QueueHandler' in handlers:
            handlers.remove('diamond.handler.queue.QueueHandler')

        self.handlers = load_handlers(self.config, handlers)

        QueueHandler = load_dynamic_class(
            'diamond.handler.queue.QueueHandler',
            Handler
        )

        self.handler_queue = QueueHandler(
            config=self.config, queue=self.metric_queue, log=self.log)

        process = multiprocessing.Process(
            name="Handlers",
            target=handler_process,
            args=(self.handlers, self.metric_queue, self.log),
        )

        process.daemon = True
        process.start()

        #######################################################################
        # Signals
        #######################################################################

        if hasattr(signal, 'SIGHUP'):
            signal.signal(signal.SIGHUP, signal_to_exception)

        #######################################################################

        while True:
            try:
                active_children = multiprocessing.active_children()
                running_processes = []
                for process in active_children:
                    running_processes.append(process.name)
                running_processes = set(running_processes)

                ##############################################################
                # Collectors
                ##############################################################

                running_collectors = []
                for collector, config in self.config['collectors'].iteritems():
                    if config.get('enabled', False) is not True:
                        continue
                    running_collectors.append(collector)
                running_collectors = set(running_collectors)

                # Collectors that are running but shouldn't be
                for process_name in running_processes - running_collectors:
                    if 'Collector' not in process_name:
                        continue
                    for process in active_children:
                        if process.name == process_name:
                            process.terminate()

                collector_classes = dict(
                    (cls.__name__.split('.')[-1], cls)
                    for cls in collectors.values()
                )

                load_delay = self.config['server'].get('collectors_load_delay',
                                                       1.0)
                for process_name in running_collectors - running_processes:
                    # To handle running multiple collectors concurrently, we
                    # split on white space and use the first word as the
                    # collector name to spin
                    collector_name = process_name.split()[0]

                    if 'Collector' not in collector_name:
                        continue

                    if collector_name not in collector_classes:
                        self.log.error('Can not find collector %s',
                                       collector_name)
                        continue

                    collector = initialize_collector(
                        collector_classes[collector_name],
                        name=process_name,
                        configfile=self.configfile,
                        handlers=[self.handler_queue])

                    if collector is None:
                        self.log.error('Failed to load collector %s',
                                       process_name)
                        continue

                    # Splay the loads
                    time.sleep(float(load_delay))

                    process = multiprocessing.Process(
                        name=process_name,
                        target=collector_process,
                        args=(collector, self.metric_queue, self.log)
                    )
                    process.daemon = True
                    process.start()

                ##############################################################

                time.sleep(1)

            except SIGHUPException:
                # ignore further SIGHUPs for now
                original_sighup_handler = signal.getsignal(signal.SIGHUP)
                signal.signal(signal.SIGHUP, signal.SIG_IGN)

                self.log.info('Reloading state due to HUP')
                self.config = load_config(self.configfile)
                collectors = load_collectors(
                    self.config['server']['collectors_path'])
                # restore SIGHUP handler
                signal.signal(signal.SIGHUP, original_sighup_handler)
예제 #6
0
    def run(self):
        """
        Load handler and collector classes and then start collectors
        """

        #######################################################################
        # Config
        #######################################################################
        self.config = load_config(self.configfile)

        collectors = load_collectors(self.config['server']['collectors_path'])
        metric_queue_size = int(self.config['server'].get(
            'metric_queue_size', 16384))
        self.metric_queue = self.manager.Queue(maxsize=metric_queue_size)
        self.log.debug('metric_queue_size: %d', metric_queue_size)

        #######################################################################
        # Handlers
        #
        # TODO: Eventually move each handler to it's own process space?
        #######################################################################

        if 'handlers_path' in self.config['server']:
            handlers_path = self.config['server']['handlers_path']

            # Make an list if not one
            if isinstance(handlers_path, basestring):
                handlers_path = handlers_path.split(',')
                handlers_path = map(str.strip, handlers_path)
                self.config['server']['handlers_path'] = handlers_path

            load_include_path(handlers_path)

        if 'handlers' not in self.config['server']:
            self.log.critical('handlers missing from server section in config')
            sys.exit(1)

        handlers = self.config['server'].get('handlers')
        if isinstance(handlers, basestring):
            handlers = [handlers]

        # Prevent the Queue Handler from being a normal handler
        if 'diamond.handler.queue.QueueHandler' in handlers:
            handlers.remove('diamond.handler.queue.QueueHandler')

        self.handlers = load_handlers(self.config, handlers)

        QueueHandler = load_dynamic_class('diamond.handler.queue.QueueHandler',
                                          Handler)

        self.handler_queue = QueueHandler(config=self.config,
                                          queue=self.metric_queue,
                                          log=self.log)

        handlers_process = multiprocessing.Process(
            name="Handlers",
            target=handler_process,
            args=(self.handlers, self.metric_queue, self.log),
        )

        handlers_process.daemon = True
        handlers_process.start()

        #######################################################################
        # Signals
        #######################################################################

        if hasattr(signal, 'SIGHUP'):
            signal.signal(signal.SIGHUP, signal_to_exception)

        #######################################################################

        while True:
            try:
                active_children = multiprocessing.active_children()
                running_processes = []
                for process in active_children:
                    running_processes.append(process.name)
                running_processes = set(running_processes)

                ##############################################################
                # Collectors
                ##############################################################

                running_collectors = []
                for collector, config in self.config['collectors'].iteritems():
                    if config.get('enabled', False) is not True:
                        continue
                    running_collectors.append(collector)
                running_collectors = set(running_collectors)

                # Collectors that are running but shouldn't be
                for process_name in running_processes - running_collectors:
                    if 'Collector' not in process_name:
                        continue
                    for process in active_children:
                        if process.name == process_name:
                            process.terminate()

                collector_classes = dict((cls.__name__.split('.')[-1], cls)
                                         for cls in collectors.values())

                load_delay = self.config['server'].get('collectors_load_delay',
                                                       1.0)
                for process_name in running_collectors - running_processes:
                    # To handle running multiple collectors concurrently, we
                    # split on white space and use the first word as the
                    # collector name to spin
                    collector_name = process_name.split()[0]

                    if 'Collector' not in collector_name:
                        continue

                    if collector_name not in collector_classes:
                        self.log.error('Can not find collector %s',
                                       collector_name)
                        continue

                    collector = initialize_collector(
                        collector_classes[collector_name],
                        name=process_name,
                        configfile=self.configfile,
                        handlers=[self.handler_queue])

                    if collector is None:
                        self.log.error('Failed to load collector %s',
                                       process_name)
                        continue

                    # Splay the loads
                    time.sleep(float(load_delay))

                    process = multiprocessing.Process(name=process_name,
                                                      target=collector_process,
                                                      args=(collector,
                                                            self.metric_queue,
                                                            self.log))
                    process.daemon = True
                    process.start()

                if not handlers_process.is_alive():
                    self.log.error('Handlers process exited')
                    if (str_to_bool(self.config['server'].get(
                            'abort_on_handlers_process_exit', 'False'))):
                        raise Exception('Handlers process exited')

                ##############################################################

                time.sleep(1)

            except SIGHUPException:
                # ignore further SIGHUPs for now
                original_sighup_handler = signal.getsignal(signal.SIGHUP)
                signal.signal(signal.SIGHUP, signal.SIG_IGN)

                self.log.info('Reloading state due to HUP')
                self.config = load_config(self.configfile)
                collectors = load_collectors(
                    self.config['server']['collectors_path'])
                # restore SIGHUP handler
                signal.signal(signal.SIGHUP, original_sighup_handler)
예제 #7
0
    def run(self):
        """
        Load handler and collector classes and then start collectors
        """

        ########################################################################
        # Config
        ########################################################################
        self.config = load_config(self.configfile)

        collectors = load_collectors(self.config["server"]["collectors_path"])

        ########################################################################
        # Handlers
        #
        # TODO: Eventually move each handler to it's own process space?
        ########################################################################

        if "handlers_path" in self.config["server"]:
            handlers_path = self.config["server"]["handlers_path"]

            # Make an list if not one
            if isinstance(handlers_path, basestring):
                handlers_path = handlers_path.split(",")
                handlers_path = map(str.strip, handlers_path)
                self.config["server"]["handlers_path"] = handlers_path

            load_include_path(handlers_path)

        if "handlers" not in self.config["server"]:
            self.log.critical("handlers missing from server section in config")
            sys.exit(1)

        handlers = self.config["server"].get("handlers")
        if isinstance(handlers, basestring):
            handlers = [handlers]

        # Prevent the Queue Handler from being a normal handler
        if "diamond.handler.queue.QueueHandler" in handlers:
            handlers.remove("diamond.handler.queue.QueueHandler")

        self.handlers = load_handlers(self.config, handlers)

        QueueHandler = load_dynamic_class("diamond.handler.queue.QueueHandler", Handler)

        self.handler_queue = QueueHandler(config=self.config, queue=self.metric_queue, log=self.log)

        process = multiprocessing.Process(
            name="Handlers", target=handler_process, args=(self.handlers, self.metric_queue, self.log)
        )

        process.daemon = True
        process.start()

        ########################################################################
        # Signals
        ########################################################################

        signal.signal(signal.SIGHUP, signal_to_exception)

        ########################################################################

        while True:
            try:
                active_children = multiprocessing.active_children()
                running_processes = []
                for process in active_children:
                    running_processes.append(process.name)
                running_processes = set(running_processes)

                ##############################################################
                # Collectors
                ##############################################################

                running_collectors = []
                for collector, config in self.config["collectors"].iteritems():
                    if config.get("enabled", False) is not True:
                        continue
                    running_collectors.append(collector)
                running_collectors = set(running_collectors)

                # Collectors that are running but shouldn't be
                for process_name in running_processes - running_collectors:
                    if "Collector" not in process_name:
                        continue
                    for process in active_children:
                        if process.name == process_name:
                            process.terminate()

                for process_name in running_collectors - running_processes:
                    # To handle running multiple collectors concurrently, we
                    # split on white space and use the first word as the
                    # collector name to spin
                    collector_name = process_name.split()[0]

                    if "Collector" not in collector_name:
                        continue

                    # Find the class
                    for cls in collectors.values():
                        cls_name = cls.__name__.split(".")[-1]
                        if cls_name == collector_name:
                            break
                    if cls_name != collector_name:
                        self.log.error("Can not find collector %s", collector_name)
                        continue

                    collector = initialize_collector(
                        cls, name=process_name, configfile=self.configfile, handlers=[self.handler_queue]
                    )

                    if collector is None:
                        self.log.error("Failed to load collector %s", process_name)
                        continue

                    # Splay the loads
                    time.sleep(1)

                    process = multiprocessing.Process(
                        name=process_name, target=collector_process, args=(collector, self.metric_queue, self.log)
                    )
                    process.daemon = True
                    process.start()

                ##############################################################

                time.sleep(1)

            except SIGHUPException:
                self.log.info("Reloading state due to HUP")
                self.config = load_config(self.configfile)
                collectors = load_collectors(self.config["server"]["collectors_path"])