コード例 #1
0
def make_monitoring_log(level, message):
    """
    Function used to build the monitoring log. Build a Brok typed as monitoring_log with
    the message to log

    TODO: replace with dedicated brok for each event to log

    :param level: log level as defined in logging
    :param message: message to insert into the monitoring log
    :return:
    """
    logger.debug("Monitoring log: %s / %s", level, message)
    return Brok({
        'type': 'monitoring_log',
        'data': {
            'level': level,
            'message': message
        }
    })
コード例 #2
0
    def get_initial_status_brok(self):
        """
        Get a brok with hostgroup info (like id, name)
        Members contain list of (id, host_name)

        :return:Brok object
        :rtype: object
        :return: None
        """
        cls = self.__class__
        data = {}
        # Now config properties
        for prop, entry in cls.properties.items():
            if entry.fill_brok != []:
                if hasattr(self, prop):
                    data[prop] = getattr(self, prop)
        # Here members is just a bunch of host, I need name in place
        data['members'] = []
        for i in self.members:
            # it look like lisp! ((( ..))), sorry....
            data['members'].append((i._id, i.get_name()))
        brok = Brok('initial_' + cls.my_type + '_status', data)
        return brok
コード例 #3
0
    def setup_new_conf(self):
        # pylint: disable=too-many-statements, too-many-branches, too-many-locals
        """Setup new conf received for scheduler

        :return: None
        """
        # Execute the base class treatment...
        super(Alignak, self).setup_new_conf()

        # ...then our own specific treatment!
        with self.conf_lock:
            # self_conf is our own configuration from the alignak environment
            # self_conf = self.cur_conf['self_conf']
            logger.debug("Got config: %s", self.cur_conf)
            if 'conf_part' not in self.cur_conf:
                self.cur_conf['conf_part'] = None
            conf_part = self.cur_conf['conf_part']

            # Ok now we can save the retention data
            if self.sched.pushed_conf is not None:
                self.sched.update_retention()

            # Get the monitored objects configuration
            t00 = time.time()
            received_conf_part = None
            try:
                received_conf_part = unserialize(conf_part)
                assert received_conf_part is not None
            except AssertionError as exp:
                # This to indicate that no configuration is managed by this scheduler...
                logger.warning(
                    "No managed configuration received from arbiter")
            except AlignakClassLookupException as exp:  # pragma: no cover
                # This to indicate that the new configuration is not managed...
                self.new_conf = {
                    "_status":
                    "Cannot un-serialize configuration received from arbiter",
                    "_error": str(exp)
                }
                logger.error(self.new_conf)
                logger.error("Back trace of the error:\n%s",
                             traceback.format_exc())
                return
            except Exception as exp:  # pylint: disable=broad-except
                # This to indicate that the new configuration is not managed...
                self.new_conf = {
                    "_status":
                    "Cannot un-serialize configuration received from arbiter",
                    "_error": str(exp)
                }
                logger.error(self.new_conf)
                self.exit_on_exception(exp, str(self.new_conf))

            # if not received_conf_part:
            #     return

            logger.info(
                "Monitored configuration %s received at %d. Un-serialized in %d secs",
                received_conf_part, t00,
                time.time() - t00)
            logger.info("Scheduler received configuration : %s",
                        received_conf_part)

            # Now we create our pollers, reactionners and brokers
            for link_type in ['pollers', 'reactionners', 'brokers']:
                if link_type not in self.cur_conf['satellites']:
                    logger.error("Missing %s in the configuration!", link_type)
                    continue

                my_satellites = getattr(self, link_type, {})
                received_satellites = self.cur_conf['satellites'][link_type]
                for link_uuid in received_satellites:
                    rs_conf = received_satellites[link_uuid]
                    logger.debug("- received %s - %s: %s",
                                 rs_conf['instance_id'], rs_conf['type'],
                                 rs_conf['name'])

                    # Must look if we already had a configuration and save our broks
                    already_got = rs_conf['instance_id'] in my_satellites
                    broks = []
                    actions = {}
                    wait_homerun = {}
                    external_commands = {}
                    running_id = 0
                    if already_got:
                        logger.warning("I already got: %s",
                                       rs_conf['instance_id'])
                        # Save some information
                        running_id = my_satellites[link_uuid].running_id
                        (broks, actions,
                         wait_homerun, external_commands) = \
                            my_satellites[link_uuid].get_and_clear_context()
                        # Delete the former link
                        del my_satellites[link_uuid]

                    # My new satellite link...
                    new_link = SatelliteLink.get_a_satellite_link(
                        link_type[:-1], rs_conf)
                    my_satellites[new_link.uuid] = new_link
                    logger.info("I got a new %s satellite: %s", link_type[:-1],
                                new_link)

                    new_link.running_id = running_id
                    new_link.external_commands = external_commands
                    new_link.broks = broks
                    new_link.wait_homerun = wait_homerun
                    new_link.actions = actions

                    # Replacing the satellite address and port by those defined in satellite_map
                    if new_link.name in self.cur_conf['override_conf'].get(
                            'satellite_map', {}):
                        override_conf = self.cur_conf['override_conf']
                        overriding = override_conf.get('satellite_map')[
                            new_link.name]
                        logger.warning(
                            "Do not override the configuration for: %s, with: %s. "
                            "Please check whether this is necessary!",
                            new_link.name, overriding)

            # First mix conf and override_conf to have our definitive conf
            for prop in getattr(self.cur_conf, 'override_conf', []):
                logger.debug("Overriden: %s / %s ", prop,
                             getattr(received_conf_part, prop, None))
                logger.debug("Overriding: %s / %s ", prop,
                             self.cur_conf['override_conf'])
                setattr(received_conf_part, prop,
                        self.cur_conf['override_conf'].get(prop, None))

            # Scheduler modules
            if not self.have_modules:
                try:
                    logger.debug("Modules configuration: %s",
                                 self.cur_conf['modules'])
                    self.modules = unserialize(self.cur_conf['modules'],
                                               no_load=True)
                except AlignakClassLookupException as exp:  # pragma: no cover, simple protection
                    logger.error(
                        'Cannot un-serialize modules configuration '
                        'received from arbiter: %s', exp)
                if self.modules:
                    logger.debug("I received some modules configuration: %s",
                                 self.modules)
                    self.have_modules = True

                    self.do_load_modules(self.modules)
                    # and start external modules too
                    self.modules_manager.start_external_instances()
                else:
                    logger.info("I do not have modules")

            if received_conf_part:
                logger.info("Loading configuration...")

                # Propagate the global parameters to the configuration items
                received_conf_part.explode_global_conf()

                # We give the configuration to our scheduler
                self.sched.reset()
                self.sched.load_conf(self.cur_conf['instance_id'],
                                     self.cur_conf['instance_name'],
                                     received_conf_part)

                # Once loaded, the scheduler has an inner pushed_conf object
                logger.info("Loaded: %s", self.sched.pushed_conf)

                # Update the scheduler ticks according to the daemon configuration
                self.sched.update_recurrent_works_tick(self)

                # We must update our pushed configuration macros with correct values
                # from the configuration parameters
                # self.sched.pushed_conf.fill_resource_macros_names_macros()

                # Creating the Macroresolver Class & unique instance
                m_solver = MacroResolver()
                m_solver.init(received_conf_part)

                # Now create the external commands manager
                # We are an applyer: our role is not to dispatch commands, but to apply them
                ecm = ExternalCommandManager(
                    received_conf_part, 'applyer', self.sched,
                    received_conf_part.accept_passive_unknown_check_results,
                    received_conf_part.log_external_commands)

                # Scheduler needs to know about this external command manager to use it if necessary
                self.sched.external_commands_manager = ecm

                # Ok now we can load the retention data
                self.sched.retention_load()

                # Log hosts/services initial states
                self.sched.log_initial_states()

            # Create brok new conf
            brok = Brok({'type': 'new_conf', 'data': {}})
            self.sched.add_brok(brok)

            # Initialize connection with all our satellites
            logger.info("Initializing connection with my satellites:")
            my_satellites = self.get_links_of_type(s_type='')
            for satellite in list(my_satellites.values()):
                logger.info("- : %s/%s", satellite.type, satellite.name)
                if not self.daemon_connection_init(satellite):
                    logger.error("Satellite connection failed: %s", satellite)

            if received_conf_part:
                # Enable the scheduling process
                logger.info("Loaded: %s", self.sched.pushed_conf)
                self.sched.start_scheduling()

        # Now I have a configuration!
        self.have_conf = True
コード例 #4
0
ファイル: stats.py プロジェクト: jgmel/alignak
    def gauge(self, key, value):
        """Set a gauge value

        If the inner key does not exist is is created

        :param key: gauge to update
        :type key: str
        :param value: counter value
        :type value: int
        :return: An alignak_stat brok if broks are enabled else None
        """
        _min, _max, count, _sum = self.stats.get(key, (None, None, 0, 0))
        count += 1
        _sum += value
        if _min is None or value < _min:
            _min = value
        if _max is None or value > _max:
            _max = value
        self.stats[key] = (_min, _max, count, _sum)

        # Manage local statsd part
        if self.statsd_enabled and self.statsd_sock:
            # beware, we are sending ms here, timer is in seconds
            packet = '%s.%s.%s:%d|g' % (self.statsd_prefix, self.name, key,
                                        value)
            # Do not log because it is spamming the log file, but leave this code in place
            # for it may be restored easily if more tests are necessary... ;)
            # logger.info("Sending data: %s", packet)
            try:
                self.statsd_sock.sendto(packet, self.statsd_addr)
            except (socket.error, socket.gaierror):
                pass
                # cannot send? ok not a huge problem here and we cannot
                # log because it will be far too verbose :p

        # Manage file part
        if self.statsd_enabled and self.file_d:
            packet = self.line_fmt
            if not self.date_fmt:
                date = "%s" % time.time()
            else:
                date = datetime.datetime.utcnow().strftime(self.date_fmt)
            packet = packet.replace("#date#", date)
            packet = packet.replace(
                "#counter#", '%s.%s.%s' % (self.statsd_prefix, self.name, key))
            packet = packet.replace("#value#", '%d' % value)
            packet = packet.replace("#uom#", 'g')
            # Do not log because it is spamming the log file, but leave this code in place
            # for it may be restored easily if more tests are necessary... ;)
            # logger.debug("Writing data: %s", packet)
            try:
                self.file_d.write(packet)
            except IOError:
                logger.warning("Could not write to the file: %s", packet)

        if self.broks_enabled:
            logger.debug("alignak stat brok: %s = %s", key, value)
            return Brok({
                'type': 'alignak_stat',
                'data': {
                    'type': 'gauge',
                    'metric':
                    '%s.%s.%s' % (self.statsd_prefix, self.name, key),
                    'value': value,
                    'uom': 'g'
                }
            })

        return None
コード例 #5
0
ファイル: test_metrics.py プロジェクト: jbiousse/alignak
    def test_inner_module_checks_results(self):
        """ Test that inner metrics module is pushing data to Graphite

        :return: None
        """
        self.setup_with_file('cfg/cfg_metrics.cfg')
        # self.clear_logs()

        # Module is an internal one (no external process) in the broker daemon modules manager
        my_module = self._broker_daemon.modules_manager.instances[0]
        assert my_module.is_external is False
        my_module.metrics_flush_count = 1

        # When the broker daemon receives a Brok, it is propagated to the module

        # Host check result
        self.clear_logs()
        hcr = {
            "host_name": "srv001",

            "last_time_unreachable": 0,
            "last_problem_id": 0,
            "passive_check": False,
            "retry_interval": 1,
            "last_event_id": 0,
            "problem_has_been_acknowledged": False,
            "command_name": "pm-check_linux_host_alive",
            "last_state": "UP",
            "latency": 0.2317881584,
            "last_state_type": "HARD",
            "last_hard_state_change": 1444427108,
            "last_time_up": 0,
            "percent_state_change": 0.0,
            "state": "DOWN",
            "last_chk": 1444427104,
            "last_state_id": 0,
            "end_time": 0,
            "timeout": 0,
            "current_event_id": 10,
            "execution_time": 3.1496069431000002,
            "start_time": 0,
            "return_code": 2,
            "state_type": "SOFT",
            "output": "CRITICAL - Plugin timed out after 10 seconds",
            "in_checking": True,
            "early_timeout": 0,
            "in_scheduled_downtime": False,
            "attempt": 0,
            "state_type_id": 1,
            "acknowledgement_type": 1,
            "last_state_change": 1444427108.040841,
            "last_time_down": 1444427108,
            "instance_id": 0,
            "long_output": "",
            "current_problem_id": 0,
            "check_interval": 5,
            "state_id": 2,
            "has_been_checked": 1,
            "perf_data": "uptime=1200;rta=0.049000ms;2.000000;3.000000;0.000000 pl=0%;50;80;0"
        }
        b = Brok({'data': hcr, 'type': 'host_check_result'}, False)
        self._broker_daemon.manage_brok(b)
        self.show_logs()
        self.assert_log_count(0)

        # Service check result
        self.clear_logs()
        scr = {
            "host_name": "srv001",
            "service_description": "ping",
            "command_name": "ping",

            "attempt": 1,
            "execution_time": 3.1496069431000002,
            "latency": 0.2317881584,
            "return_code": 2,
            "state": "OK",
            "state_type": "HARD",
            "state_id": 0,
            "state_type_id": 1,

            "output": "PING OK - Packet loss = 0%, RTA = 0.05 ms",
            "long_output": "Long output ...",
            "perf_data": "rta=0.049000ms;2.000000;3.000000;0.000000 pl=0%;50;80;0",

            "passive_check": False,

            "problem_has_been_acknowledged": False,
            "acknowledgement_type": 1,
            "in_scheduled_downtime": False,

            "last_chk": 1473597375,
            "last_state_change": 1444427108.147903,
            "last_state_id": 0,
            "last_state": "UNKNOWN",
            "last_state_type": "HARD",
            "last_hard_state_change": 0.0,
            "last_time_unknown": 0,
            "last_time_unreachable": 0,
            "last_time_critical": 1473597376,
            "last_time_warning": 0,
            "last_time_ok": 0,

            "retry_interval": 2,
            "percent_state_change": 4.1,
            "check_interval": 5,

            "in_checking": False,
            "early_timeout": 0,
            "instance_id": "3ac88dd0c1c04b37a5d181622e93b5bc",
            "current_event_id": 1,
            "last_event_id": 0,
            "current_problem_id": 1,
            "last_problem_id": 0,
            "timeout": 0,
            "has_been_checked": 1,
            "start_time": 0,
            "end_time": 0
        }
        b = Brok({'data': scr, 'type': 'service_check_result'}, False)
        self._broker_daemon.manage_brok(b)
        self.show_logs()
        self.assert_log_count(0)
        print(my_module.my_metrics)
コード例 #6
0
ファイル: stats.py プロジェクト: jbiousse/alignak
    def counter(self, key, value, timestamp=None):
        """Set a counter value

        If the inner key does not exist is is created

        :param key: counter to update
        :type key: str
        :param value: counter value
        :type value: float
        :return: An alignak_stat brok if broks are enabled else None
        """
        _min, _max, count, _sum = self.stats.get(key, (None, None, 0, 0))
        count += 1
        _sum += value
        if _min is None or value < _min:
            _min = value
        if _max is None or value > _max:
            _max = value
        self.stats[key] = (_min, _max, count, _sum)

        # Manage local statsd part
        if self.statsd_enabled and self.statsd_sock:
            # beware, we are sending ms here, timer is in seconds
            packet = '%s.%s.%s:%d|c' % (self.statsd_prefix, self.name, key,
                                        value)
            packet = packet.encode('utf-8')
            try:
                self.statsd_sock.sendto(packet, self.statsd_addr)
            except (socket.error, socket.gaierror):
                pass
                # cannot send? ok not a huge problem here and we cannot
                # log because it will be far too verbose :p

        # Manage Graphite part
        if self.statsd_enabled and self.carbon:
            self.send_to_graphite(key, value, timestamp=timestamp)

        # Manage file part
        if self.statsd_enabled and self.file_d:
            if timestamp is None:
                timestamp = int(time.time())

            packet = self.line_fmt
            if not self.date_fmt:
                date = "%s" % timestamp
            else:
                date = datetime.datetime.fromtimestamp(timestamp).strftime(
                    self.date_fmt)
            packet = packet.replace("#date#", date)
            packet = packet.replace(
                "#counter#", '%s.%s.%s' % (self.statsd_prefix, self.name, key))
            packet = packet.replace("#value#", '%d' % value)
            packet = packet.replace("#uom#", 'c')
            try:
                self.file_d.write(packet)
            except IOError:
                logger.warning("Could not write to the file: %s", packet)

        if self.broks_enabled:
            logger.debug("alignak stat brok: %s = %s", key, value)
            if timestamp is None:
                timestamp = int(time.time())

            return Brok({
                'type': 'alignak_stat',
                'data': {
                    'ts': timestamp,
                    'type': 'counter',
                    'metric':
                    '%s.%s.%s' % (self.statsd_prefix, self.name, key),
                    'value': value,
                    'uom': 'c'
                }
            })

        return None
コード例 #7
0
ファイル: schedulerdaemon.py プロジェクト: jgmel/alignak
    def setup_new_conf(self):  # pylint: disable=too-many-statements
        """Setup new conf received for scheduler

        :return: None
        """
        with self.conf_lock:
            self.clean_previous_run()
            new_conf = self.new_conf
            logger.info("[%s] Sending us a configuration", self.name)
            conf_raw = new_conf['conf']
            override_conf = new_conf['override_conf']
            modules = new_conf['modules']
            satellites = new_conf['satellites']
            instance_name = new_conf['instance_name']

            # Ok now we can save the retention data
            if hasattr(self.sched, 'conf'):
                self.sched.update_retention_file(forced=True)

            # horay, we got a name, we can set it in our stats objects
            statsmgr.register(instance_name,
                              'scheduler',
                              statsd_host=new_conf['statsd_host'],
                              statsd_port=new_conf['statsd_port'],
                              statsd_prefix=new_conf['statsd_prefix'],
                              statsd_enabled=new_conf['statsd_enabled'])

            t00 = time.time()
            try:
                conf = unserialize(conf_raw)
            except AlignakClassLookupException as exp:  # pragma: no cover, simple protection
                logger.error(
                    'Cannot un-serialize configuration received from arbiter: %s',
                    exp)
            logger.debug("Conf received at %d. Un-serialized in %d secs", t00,
                         time.time() - t00)
            self.new_conf = None

            if 'scheduler_name' in new_conf:
                name = new_conf['scheduler_name']
            else:
                name = instance_name
            self.name = name

            # Set my own process title
            self.set_proctitle(self.name)

            logger.info("[%s] Received a new configuration, containing: ",
                        self.name)
            for key in new_conf:
                logger.info("[%s] - %s", self.name, key)
            logger.info("[%s] configuration identifiers: %s (%s)", self.name,
                        new_conf['conf_uuid'], new_conf['push_flavor'])

            # Tag the conf with our data
            self.conf = conf
            self.conf.push_flavor = new_conf['push_flavor']
            self.conf.alignak_name = new_conf['alignak_name']
            self.conf.instance_name = instance_name
            self.conf.skip_initial_broks = new_conf['skip_initial_broks']
            self.conf.accept_passive_unknown_check_results = \
                new_conf['accept_passive_unknown_check_results']

            self.cur_conf = conf
            self.override_conf = override_conf
            self.modules = unserialize(modules, True)
            self.satellites = satellites

            # Now We create our pollers, reactionners and brokers
            for sat_type in ['pollers', 'reactionners', 'brokers']:
                if sat_type not in satellites:
                    continue
                for sat_id in satellites[sat_type]:
                    # Must look if we already have it
                    sats = getattr(self, sat_type)
                    sat = satellites[sat_type][sat_id]

                    sats[sat_id] = sat

                    if sat['name'] in override_conf['satellitemap']:
                        sat = dict(sat)  # make a copy
                        sat.update(override_conf['satellitemap'][sat['name']])

                    proto = 'http'
                    if sat['use_ssl']:
                        proto = 'https'
                    uri = '%s://%s:%s/' % (proto, sat['address'], sat['port'])

                    sats[sat_id]['uri'] = uri
                    sats[sat_id]['con'] = None
                    sats[sat_id]['running_id'] = 0
                    sats[sat_id]['last_connection'] = 0
                    sats[sat_id]['connection_attempt'] = 0
                    sats[sat_id]['max_failed_connections'] = 3
                    setattr(self, sat_type, sats)
                logger.debug("We have our %s: %s ", sat_type,
                             satellites[sat_type])
                logger.info("We have our %s:", sat_type)
                for daemon in satellites[sat_type].values():
                    logger.info(" - %s ", daemon['name'])

            # First mix conf and override_conf to have our definitive conf
            for prop in self.override_conf:
                val = self.override_conf[prop]
                setattr(self.conf, prop, val)

            if self.conf.use_timezone != '':
                logger.info("Setting our timezone to %s",
                            str(self.conf.use_timezone))
                os.environ['TZ'] = self.conf.use_timezone
                time.tzset()

            self.do_load_modules(self.modules)

            logger.info("Loading configuration.")
            self.conf.explode_global_conf()  # pylint: disable=E1101

            # we give sched it's conf
            self.sched.reset()
            self.sched.load_conf(self.conf)
            self.sched.load_satellites(self.pollers, self.reactionners,
                                       self.brokers)

            # We must update our Config dict macro with good value
            # from the config parameters
            self.sched.conf.fill_resource_macros_names_macros()

            # Creating the Macroresolver Class & unique instance
            m_solver = MacroResolver()
            m_solver.init(self.conf)

            # self.conf.dump()
            # self.conf.quick_debug()

            # Now create the external commands manager
            # We are an applyer: our role is not to dispatch commands, but to apply them
            ecm = ExternalCommandManager(self.conf, 'applyer', self.sched)

            # Scheduler needs to know about this external command manager to use it if necessary
            self.sched.set_external_commands_manager(ecm)
            # Update External Commands Manager
            self.sched.external_commands_manager.accept_passive_unknown_check_results = \
                self.sched.conf.accept_passive_unknown_check_results

            # We clear our schedulers managed (it's us :) )
            # and set ourselves in it
            self.schedulers = {self.conf.uuid: self.sched}  # pylint: disable=E1101

            # Ok now we can load the retention data
            self.sched.retention_load()

            # Create brok new conf
            brok = Brok({'type': 'new_conf', 'data': {}})
            self.sched.add_brok(brok)
コード例 #8
0
ファイル: test_metrics.py プロジェクト: jbiousse/alignak
    def test_inner_module_broks(self):
        """ Test that inner metrics module is managing broks with the default configuration

        :return: None
        """
        self.setup_with_file('cfg/cfg_metrics.cfg')

        # Specific configuration enables the module
        assert True == self._scheduler.pushed_conf.process_performance_data
        assert self._scheduler.pushed_conf.host_perfdata_file == 'go-hosts'
        assert self._scheduler.pushed_conf.service_perfdata_file == 'go-services'
        assert 1 == len(self._broker_daemon.modules)

        self.show_logs()

        # The declared module instance
        my_module = self._broker_daemon.modules[0]
        # Generic stuff
        assert my_module.python_name == 'alignak.modules.inner_metrics'
        assert my_module.type == 'metrics'
        assert my_module.alias == 'inner-metrics'
        assert my_module.enabled is True

        # Specific stuff - the content of the configuration parameters
        assert my_module.host_perfdata_file == 'go-hosts'
        assert my_module.service_perfdata_file == 'go-services'

        self.clear_logs()

        # Module is not yet initialized, let's do it in place of the daemon.
        # Create the modules manager for a daemon type
        self.modules_manager = ModulesManager(self._broker_daemon)

        # Load an initialize the modules:
        #  - load python module
        #  - get module properties and instances
        self.modules_manager.load_and_init([my_module])

        self.show_logs()

        # Module is an internal one (no external process) in the broker daemon modules manager
        my_module = self._broker_daemon.modules_manager.instances[0]
        assert my_module.is_external is False

        # Known hosts/services cache is empty
        assert my_module.hosts_cache == {}
        assert my_module.services_cache == {}

        # When the broker daemon receives a Brok, it is propagated to the module

        # Host check result
        self.clear_logs()
        hcr = {
            "host_name": "srv001",

            "last_time_unreachable": 0,
            "last_problem_id": 0,
            "passive_check": False,
            "retry_interval": 1,
            "last_event_id": 0,
            "problem_has_been_acknowledged": False,
            "command_name": "pm-check_linux_host_alive",
            "last_state": "UP",
            "latency": 0.2317881584,
            "last_state_type": "HARD",
            "last_hard_state_change": 1444427108,
            "last_time_up": 0,
            "percent_state_change": 0.0,
            "state": "DOWN",
            "last_chk": 1444427104,
            "last_state_id": 0,
            "end_time": 0,
            "timeout": 0,
            "current_event_id": 10,
            "execution_time": 3.1496069431000002,
            "start_time": 0,
            "return_code": 2,
            "state_type": "SOFT",
            "output": "CRITICAL - Plugin timed out after 10 seconds",
            "in_checking": True,
            "early_timeout": 0,
            "in_scheduled_downtime": False,
            "attempt": 0,
            "state_type_id": 1,
            "acknowledgement_type": 1,
            "last_state_change": 1444427108.040841,
            "last_time_down": 1444427108,
            "instance_id": 0,
            "long_output": "",
            "current_problem_id": 0,
            "check_interval": 5,
            "state_id": 2,
            "has_been_checked": 1,
            "perf_data": "uptime=1200;rta=0.049000ms;2.000000;3.000000;0.000000 pl=0%;50;80;0"
        }
        b = Brok({'data': hcr, 'type': 'host_check_result'}, False)
        self._broker_daemon.manage_brok(b)
        self.show_logs()
        self.assert_log_count(0)

        # Service check result
        self.clear_logs()
        scr = {
            "host_name": "srv001",
            "service_description": "ping",
            "command_name": "ping",

            "attempt": 1,
            "execution_time": 3.1496069431000002,
            "latency": 0.2317881584,
            "return_code": 2,
            "state": "OK",
            "state_type": "HARD",
            "state_id": 0,
            "state_type_id": 1,

            "output": "PING OK - Packet loss = 0%, RTA = 0.05 ms",
            "long_output": "Long output ...",
            "perf_data": "rta=0.049000ms;2.000000;3.000000;0.000000 pl=0%;50;80;0",

            "passive_check": False,

            "problem_has_been_acknowledged": False,
            "acknowledgement_type": 1,
            "in_scheduled_downtime": False,

            "last_chk": 1473597375,
            "last_state_change": 1444427108.147903,
            "last_state_id": 0,
            "last_state": "UNKNOWN",
            "last_state_type": "HARD",
            "last_hard_state_change": 0.0,
            "last_time_unknown": 0,
            "last_time_unreachable": 0,
            "last_time_critical": 1473597376,
            "last_time_warning": 0,
            "last_time_ok": 0,

            "retry_interval": 2,
            "percent_state_change": 4.1,
            "check_interval": 5,

            "in_checking": False,
            "early_timeout": 0,
            "instance_id": "3ac88dd0c1c04b37a5d181622e93b5bc",
            "current_event_id": 1,
            "last_event_id": 0,
            "current_problem_id": 1,
            "last_problem_id": 0,
            "timeout": 0,
            "has_been_checked": 1,
            "start_time": 0,
            "end_time": 0
        }
        b = Brok({'data': scr, 'type': 'service_check_result'}, False)
        self._broker_daemon.manage_brok(b)
        self.show_logs()
        self.assert_log_count(0)

        # Initial host status
        self.clear_logs()
        hcr = {
            "host_name": "srv001",
        }
        b = Brok({'data': hcr, 'type': 'initial_host_status'}, False)
        self._broker_daemon.manage_brok(b)
        self.show_logs()
        # The module inner cache stored the host
        assert 'srv001' in my_module.hosts_cache
        assert my_module.hosts_cache['srv001'] == {}
        assert my_module.services_cache == {}

        # Initial service status
        self.clear_logs()
        hcr = {
            "host_name": "srv001",
            "service_description": "ping"
        }
        b = Brok({'data': hcr, 'type': 'initial_service_status'}, False)
        self._broker_daemon.manage_brok(b)
        self.show_logs()
        # The module inner cache stored the host
        assert 'srv001' in my_module.hosts_cache
        assert my_module.hosts_cache['srv001'] == {}
        assert 'srv001/ping' in my_module.services_cache
        assert my_module.services_cache['srv001/ping'] == {}
コード例 #9
0
    def test_01_program_status_brok(self):
        """Test with a brok for the program status update

        :return: None
        """
        # Get alignak endpoint resources before the brok
        name = 'my_alignak'
        params = {'sort': '_id', 'where': '{"name": "%s"}' % name}
        all_alignak = self.backend.get_all('alignak', params)
        for alignak_cfg in all_alignak['_items']:
            print(("Alignak cfg: %s" % alignak_cfg))
        # No alignak configuration resource
        self.assertEqual(0, len(all_alignak['_items']))

        # Get a program status brok
        brok_data = {
            # Some general information
            'alignak_name': 'my_alignak',
            'instance_id': '176064a1b30741d39452415097807ab0',
            'instance_name': 'scheduler-master',

            # Some running information
            'program_start': 1493969754,
            'daemon_mode': 1,
            'pid': 68989,
            'last_alive': 1493970641,
            'last_command_check': 1493970641,
            'last_log_rotation': 1493970641,
            'is_running': 1,

            # Some configuration parameters
            'process_performance_data': True,
            'passive_service_checks_enabled': True,
            'event_handlers_enabled': True,
            'command_file': '',
            'global_host_event_handler': None,
            'interval_length': 60,
            'modified_host_attributes': 0,
            'check_external_commands': True,
            'modified_service_attributes': 0,
            'passive_host_checks_enabled': True,
            'global_service_event_handler': 'None',
            'notifications_enabled': True,
            'check_service_freshness': True,
            'check_host_freshness': True,
            'flap_detection_enabled': True,
            'active_service_checks_enabled': True,
            'active_host_checks_enabled': True
        }
        brok = Brok({'type': 'update_program_status', 'data': brok_data})
        brok.prepare()

        # As default, we do not manage this brok !
        assert self.brokmodule.manage_update_program_status is False

        # Send program status brok
        self.brokmodule.manage_brok(brok)

        # Get alignak endpoint resources after the brok
        name = 'my_alignak'
        params = {'sort': '_id', 'where': '{"name": "%s"}' % name}
        all_alignak = self.backend.get_all('alignak', params)
        # Still no alignak configuration resource
        self.assertEqual(0, len(all_alignak['_items']))

        # -------
        # Now we manage this brok !
        self.brokmodule.manage_update_program_status = True

        # Send program status brok
        self.brokmodule.manage_brok(brok)

        # Get alignak endpoint resources after the brok
        name = 'my_alignak'
        params = {'sort': '_id', 'where': '{"name": "%s"}' % name}
        all_alignak = self.backend.get_all('alignak', params)
        # Now we have one resource
        self.assertEqual(1, len(all_alignak['_items']))

        alignak = all_alignak['_items'][0]
        # Remove backend Eve fields and store creation and update timestamp
        _created = alignak.pop('_created')
        _updated = alignak.pop('_updated')
        alignak.pop('_id')
        alignak.pop('_links')
        alignak.pop('_etag')
        alignak.pop('schema_version')
        # TODO need add this new fields in alignak brok creation
        for field_name in ['use_timezone',
                           'illegal_macro_output_chars', 'illegal_object_name_chars',
                           'cleaning_queues_interval', 'max_plugins_output_length',
                           'enable_environment_macros', 'log_initial_states', 'log_active_checks',
                           'log_host_retries', 'log_service_retries', 'log_passive_checks',
                           'log_notifications', 'log_event_handlers', 'log_external_commands',
                           'log_flappings', 'log_snapshots', 'enable_notifications',
                           'notification_timeout', 'timeout_exit_status', 'execute_host_checks',
                           'max_host_check_spread', 'host_check_timeout',
                           'check_for_orphaned_hosts', 'execute_service_checks',
                           'max_service_check_spread', 'service_check_timeout',
                           'check_for_orphaned_services', 'flap_history', 'low_host_flap_threshold',
                           'high_host_flap_threshold', 'low_service_flap_threshold',
                           'high_service_flap_threshold', 'event_handler_timeout',
                           'no_event_handlers_during_downtimes', 'host_perfdata_command',
                           'service_perfdata_command', 'accept_passive_host_checks',
                           'host_freshness_check_interval', 'accept_passive_service_checks',
                           'service_freshness_check_interval', 'additional_freshness_latency']:
            alignak.pop(field_name)

        expected = brok_data.copy()
        expected['name'] = expected.pop('alignak_name')
        # Some fields are valued as default by the backend
        expected['_sub_realm'] = True
        expected['alias'] = expected['name']
        expected['notes'] = ''
        expected['notes_url'] = ''
        expected['_realm'] = self.realm_all
        expected['global_host_event_handler'] = str(expected['global_host_event_handler'])
        expected['global_service_event_handler'] = 'None'
        self.assertEqual(expected, alignak)

        # --- 1
        time.sleep(1)
        # Re-send the same brok
        brok_data = {
            # Some general information
            'alignak_name': 'my_alignak',
            'instance_id': '176064a1b30741d39452415097807ab0',
            'instance_name': 'scheduler-master',

            # Some running information
            'program_start': 1493969754,
            'daemon_mode': 1,
            'pid': 68989,
            'last_alive': 1493970641,
            'last_command_check': 1493970641,
            'last_log_rotation': 1493970641,
            'is_running': 1,

            # Some configuration parameters
            'process_performance_data': True,
            'passive_service_checks_enabled': True,
            'event_handlers_enabled': True,
            'command_file': '',
            'global_host_event_handler': None,
            'interval_length': 60,
            'modified_host_attributes': 0,
            'check_external_commands': True,
            'modified_service_attributes': 0,
            'passive_host_checks_enabled': True,
            'global_service_event_handler': 'None',
            'notifications_enabled': True,
            'check_service_freshness': True,
            'check_host_freshness': True,
            'flap_detection_enabled': True,
            'active_service_checks_enabled': True,
            'active_host_checks_enabled': True
        }
        brok = Brok({'type': 'update_program_status', 'data': brok_data})
        brok.prepare()
        self.brokmodule.manage_brok(brok)

        # Get alignak endpoint resources after the brok
        name = 'my_alignak'
        params = {'sort': '_id', 'where': '{"name": "%s"}' % name}
        all_alignak = self.backend.get_all('alignak', params)
        # We still have one resource
        self.assertEqual(1, len(all_alignak['_items']))

        alignak = all_alignak['_items'][0]
        # Remove backend Eve fields
        # Creation and update timestamps did not change because there was no backend update
        assert _created == alignak.pop('_created')
        assert _updated == alignak.pop('_updated')
        alignak.pop('_id')
        alignak.pop('_links')
        alignak.pop('_etag')
        alignak.pop('schema_version')
        # TODO need add this new fields in alignak brok creation
        for field_name in ['use_timezone',
                           'illegal_macro_output_chars', 'illegal_object_name_chars',
                           'cleaning_queues_interval', 'max_plugins_output_length',
                           'enable_environment_macros', 'log_initial_states', 'log_active_checks',
                           'log_host_retries', 'log_service_retries', 'log_passive_checks',
                           'log_notifications', 'log_event_handlers', 'log_external_commands',
                           'log_flappings', 'log_snapshots', 'enable_notifications',
                           'notification_timeout', 'timeout_exit_status', 'execute_host_checks',
                           'max_host_check_spread', 'host_check_timeout',
                           'check_for_orphaned_hosts', 'execute_service_checks',
                           'max_service_check_spread', 'service_check_timeout',
                           'check_for_orphaned_services', 'flap_history', 'low_host_flap_threshold',
                           'high_host_flap_threshold', 'low_service_flap_threshold',
                           'high_service_flap_threshold', 'event_handler_timeout',
                           'no_event_handlers_during_downtimes', 'host_perfdata_command',
                           'service_perfdata_command', 'accept_passive_host_checks',
                           'host_freshness_check_interval', 'accept_passive_service_checks',
                           'service_freshness_check_interval', 'additional_freshness_latency']:
            alignak.pop(field_name)

        expected = brok_data.copy()
        expected['name'] = expected.pop('alignak_name')
        # Some fields are valued as default by the backend
        expected['_sub_realm'] = True
        expected['alias'] = expected['name']
        expected['notes'] = ''
        expected['notes_url'] = ''
        expected['_realm'] = self.realm_all
        expected['global_host_event_handler'] = str(expected['global_host_event_handler'])
        expected['global_service_event_handler'] = 'None'
        self.assertEqual(expected, alignak)

        # --- 2
        time.sleep(1)
        # Update the program status
        brok_data = {
            # Some general information
            'alignak_name': 'my_alignak',
            'instance_id': '176064a1b30741d39452415097807ab0',
            'instance_name': 'scheduler-master',

            # Some running information
            'program_start': 1493969754,
            'daemon_mode': 1,
            'pid': 68989,
            'last_alive': 1493970641,
            'last_command_check': 1493970641,
            'last_log_rotation': 1493970641,
            'is_running': 1,

            # Some configuration parameters
            'process_performance_data': True,
            'passive_service_checks_enabled': True,
            'event_handlers_enabled': True,
            'command_file': '',
            'global_host_event_handler': None,
            'interval_length': 60,
            'modified_host_attributes': 0,
            'check_external_commands': True,
            'modified_service_attributes': 0,
            'passive_host_checks_enabled': True,
            'global_service_event_handler': 'None',
            'notifications_enabled': True,
            'check_service_freshness': True,
            'check_host_freshness': True,
            'flap_detection_enabled': True,
            'active_service_checks_enabled': True,
            'active_host_checks_enabled': True
        }
        brok_data['flap_detection_enabled'] = False
        brok = Brok({'type': 'update_program_status', 'data': brok_data})
        brok.prepare()
        # Send program status brok
        self.brokmodule.manage_brok(brok)

        # Get alignak endpoint resources after the brok
        name = 'my_alignak'
        params = {'sort': '_id', 'where': '{"name": "%s"}' % name}
        all_alignak = self.backend.get_all('alignak', params)
        # We still have one resource
        self.assertEqual(1, len(all_alignak['_items']))

        alignak = all_alignak['_items'][0]
        # Remove backend Eve fields
        # Creation timestamp did not change
        assert _created == alignak['_created']
        _created = alignak.pop('_created')
        # But update timestamp changed !
        assert _updated != alignak['_updated']
        _updated = alignak.pop('_updated')
        alignak.pop('_id')
        alignak.pop('_links')
        alignak.pop('_etag')
        alignak.pop('schema_version')
        # TODO need add this new fields in alignak brok creation
        for field_name in ['use_timezone',
                           'illegal_macro_output_chars', 'illegal_object_name_chars',
                           'cleaning_queues_interval', 'max_plugins_output_length',
                           'enable_environment_macros', 'log_initial_states', 'log_active_checks',
                           'log_host_retries', 'log_service_retries', 'log_passive_checks',
                           'log_notifications', 'log_event_handlers', 'log_external_commands',
                           'log_flappings', 'log_snapshots', 'enable_notifications',
                           'notification_timeout', 'timeout_exit_status', 'execute_host_checks',
                           'max_host_check_spread', 'host_check_timeout',
                           'check_for_orphaned_hosts', 'execute_service_checks',
                           'max_service_check_spread', 'service_check_timeout',
                           'check_for_orphaned_services', 'flap_history', 'low_host_flap_threshold',
                           'high_host_flap_threshold', 'low_service_flap_threshold',
                           'high_service_flap_threshold', 'event_handler_timeout',
                           'no_event_handlers_during_downtimes', 'host_perfdata_command',
                           'service_perfdata_command', 'accept_passive_host_checks',
                           'host_freshness_check_interval', 'accept_passive_service_checks',
                           'service_freshness_check_interval', 'additional_freshness_latency']:
            alignak.pop(field_name)

        expected = brok_data.copy()
        expected['name'] = expected.pop('alignak_name')
        # Some fields are valued as default by the backend
        expected['_sub_realm'] = True
        expected['alias'] = expected['name']
        expected['notes'] = ''
        expected['notes_url'] = ''
        expected['_realm'] = self.realm_all
        expected['global_host_event_handler'] = str(expected['global_host_event_handler'])
        expected['global_service_event_handler'] = 'None'
        self.assertEqual(expected, alignak)

        # --- 3
        time.sleep(1)
        # Re-send the same brok
        brok_data = {
            # Some general information
            'alignak_name': 'my_alignak',
            'instance_id': '176064a1b30741d39452415097807ab0',
            'instance_name': 'scheduler-master',

            # Some running information
            'program_start': 1493969754,
            'daemon_mode': 1,
            'pid': 68989,
            'last_alive': 1493970641,
            'last_command_check': 1493970641,
            'last_log_rotation': 1493970641,
            'is_running': 1,

            # Some configuration parameters
            'process_performance_data': True,
            'passive_service_checks_enabled': True,
            'event_handlers_enabled': True,
            'command_file': '',
            'global_host_event_handler': None,
            'interval_length': 60,
            'modified_host_attributes': 0,
            'check_external_commands': True,
            'modified_service_attributes': 0,
            'passive_host_checks_enabled': True,
            'global_service_event_handler': 'None',
            'notifications_enabled': True,
            'check_service_freshness': True,
            'check_host_freshness': True,
            'flap_detection_enabled': True,
            'active_service_checks_enabled': True,
            'active_host_checks_enabled': True
        }
        brok_data['flap_detection_enabled'] = False
        brok = Brok({'type': 'update_program_status', 'data': brok_data})
        brok.prepare()
        self.brokmodule.manage_brok(brok)

        # Get alignak endpoint resources after the brok
        name = 'my_alignak'
        params = {'sort': '_id', 'where': '{"name": "%s"}' % name}
        all_alignak = self.backend.get_all('alignak', params)
        # We still have one resource
        self.assertEqual(1, len(all_alignak['_items']))

        alignak = all_alignak['_items'][0]
        # Remove backend Eve fields
        # Creation timestamp do not change
        assert _created == alignak['_created']
        _created = alignak.pop('_created')
        # And update timestamp do not change!
        assert _updated == alignak['_updated']
        _updated = alignak.pop('_updated')
        alignak.pop('_id')
        alignak.pop('_links')
        alignak.pop('_etag')
        alignak.pop('schema_version')
        # TODO need add this new fields in alignak brok creation
        for field_name in ['use_timezone',
                           'illegal_macro_output_chars', 'illegal_object_name_chars',
                           'cleaning_queues_interval', 'max_plugins_output_length',
                           'enable_environment_macros', 'log_initial_states', 'log_active_checks',
                           'log_host_retries', 'log_service_retries', 'log_passive_checks',
                           'log_notifications', 'log_event_handlers', 'log_external_commands',
                           'log_flappings', 'log_snapshots', 'enable_notifications',
                           'notification_timeout', 'timeout_exit_status', 'execute_host_checks',
                           'max_host_check_spread', 'host_check_timeout',
                           'check_for_orphaned_hosts', 'execute_service_checks',
                           'max_service_check_spread', 'service_check_timeout',
                           'check_for_orphaned_services', 'flap_history', 'low_host_flap_threshold',
                           'high_host_flap_threshold', 'low_service_flap_threshold',
                           'high_service_flap_threshold', 'event_handler_timeout',
                           'no_event_handlers_during_downtimes', 'host_perfdata_command',
                           'service_perfdata_command', 'accept_passive_host_checks',
                           'host_freshness_check_interval', 'accept_passive_service_checks',
                           'service_freshness_check_interval', 'additional_freshness_latency']:
            alignak.pop(field_name)

        expected = brok_data.copy()
        expected['name'] = expected.pop('alignak_name')
        # Some fields are valued as default by the backend
        expected['_sub_realm'] = True
        expected['alias'] = expected['name']
        expected['notes'] = ''
        expected['notes_url'] = ''
        expected['_realm'] = self.realm_all
        expected['global_host_event_handler'] = str(expected['global_host_event_handler'])
        expected['global_service_event_handler'] = 'None'
        self.assertEqual(expected, alignak)

        # --- 4
        time.sleep(1)
        # Update only the running properties
        brok_data = {
            # Some general information
            'alignak_name': 'my_alignak',
            'instance_id': '176064a1b30741d39452415097807ab0',
            'instance_name': 'scheduler-master',

            # Some running information
            'program_start': 1493969754,
            'daemon_mode': 1,
            'pid': 68989,
            'last_alive': 1493970641,
            'last_command_check': 1493970641,
            'last_log_rotation': 1493970641,
            'is_running': 1,

            # Some configuration parameters
            'process_performance_data': True,
            'passive_service_checks_enabled': True,
            'event_handlers_enabled': True,
            'command_file': '',
            'global_host_event_handler': None,
            'interval_length': 60,
            'modified_host_attributes': 0,
            'check_external_commands': True,
            'modified_service_attributes': 0,
            'passive_host_checks_enabled': True,
            'global_service_event_handler': 'None',
            'notifications_enabled': True,
            'check_service_freshness': True,
            'check_host_freshness': True,
            'flap_detection_enabled': True,
            'active_service_checks_enabled': True,
            'active_host_checks_enabled': True
        }
        brok_data['flap_detection_enabled'] = False
        brok_data['last_alive'] = 123456789
        brok_data['last_command_check'] = 123456789
        brok_data['last_log_rotation'] = 123456789
        brok = Brok({'type': 'update_program_status', 'data': brok_data})
        brok.prepare()
        self.brokmodule.manage_brok(brok)

        # Get alignak endpoint resources after the brok
        name = 'my_alignak'
        params = {'sort': '_id', 'where': '{"name": "%s"}' % name}
        all_alignak = self.backend.get_all('alignak', params)
        # We still have one resource
        self.assertEqual(1, len(all_alignak['_items']))

        alignak = all_alignak['_items'][0]
        # Remove backend Eve fields
        # Creation timestamp do not change
        assert _created == alignak['_created']
        _created = alignak.pop('_created')
        # And update timestamp do not change!
        assert _updated == alignak['_updated']
        _updated = alignak.pop('_updated')
        alignak.pop('_id')
        alignak.pop('_links')
        alignak.pop('_etag')
        alignak.pop('schema_version')
        # TODO need add this new fields in alignak brok creation
        for field_name in ['use_timezone',
                           'illegal_macro_output_chars', 'illegal_object_name_chars',
                           'cleaning_queues_interval', 'max_plugins_output_length',
                           'enable_environment_macros', 'log_initial_states', 'log_active_checks',
                           'log_host_retries', 'log_service_retries', 'log_passive_checks',
                           'log_notifications', 'log_event_handlers', 'log_external_commands',
                           'log_flappings', 'log_snapshots', 'enable_notifications',
                           'notification_timeout', 'timeout_exit_status', 'execute_host_checks',
                           'max_host_check_spread', 'host_check_timeout',
                           'check_for_orphaned_hosts', 'execute_service_checks',
                           'max_service_check_spread', 'service_check_timeout',
                           'check_for_orphaned_services', 'flap_history', 'low_host_flap_threshold',
                           'high_host_flap_threshold', 'low_service_flap_threshold',
                           'high_service_flap_threshold', 'event_handler_timeout',
                           'no_event_handlers_during_downtimes', 'host_perfdata_command',
                           'service_perfdata_command', 'accept_passive_host_checks',
                           'host_freshness_check_interval', 'accept_passive_service_checks',
                           'service_freshness_check_interval', 'additional_freshness_latency']:
            alignak.pop(field_name)

        expected = brok_data.copy()
        expected['name'] = expected.pop('alignak_name')
        # Some fields are valued as default by the backend
        expected['_sub_realm'] = True
        expected['alias'] = expected['name']
        expected['notes'] = ''
        expected['notes_url'] = ''
        expected['_realm'] = self.realm_all
        expected['global_host_event_handler'] = str(expected['global_host_event_handler'])
        expected['global_service_event_handler'] = 'None'
        self.assertEqual(expected, alignak)
コード例 #10
0
    def test_00_refused_program_status_brok(self):
        """Test with a bad formatted brok for the program status

        :return: None
        """
        # Get alignak endpoint resources before the brok
        name = 'my_alignak'
        params = {'sort': '_id', 'where': '{"name": "%s"}' % name}
        all_alignak = self.backend.get_all('alignak', params)
        for alignak_cfg in all_alignak['_items']:
            print(("Alignak cfg: %s" % alignak_cfg))
        # No alignak configuration resource
        self.assertEqual(0, len(all_alignak['_items']))

        # Get a BAD program status brok
        brok_data = {
            # Some general information

            ### Missing alignak_name property !
            # u'alignak_name': u'my_alignak',
            'instance_id': '176064a1b30741d39452415097807ab0',
            'instance_name': 'scheduler-master',

            # Some running information
            'program_start': 1493969754,
            'daemon_mode': 1,
            'pid': 68989,
            'last_alive': 1493970641,
            'last_command_check': 1493970641,
            'last_log_rotation': 1493970641,
            'is_running': 1,

            # Some configuration parameters
            'process_performance_data': True,
            'passive_service_checks_enabled': True,
            'event_handlers_enabled': True,
            'command_file': '',
            'global_host_event_handler': None,
            'interval_length': 60,
            'modified_host_attributes': 0,
            'check_external_commands': True,
            'modified_service_attributes': 0,
            'passive_host_checks_enabled': True,
            'global_service_event_handler': 'None',
            'notifications_enabled': True,
            'check_service_freshness': True,
            'check_host_freshness': True,
            'flap_detection_enabled': True,
            'active_service_checks_enabled': True,
            'active_host_checks_enabled': True
        }
        brok = Brok({'type': 'update_program_status', 'data': brok_data})
        brok.prepare()

        # Send program status brok
        self.brokmodule.manage_brok(brok)

        # Get alignak endpoint resources after the brok
        name = 'my_alignak'
        params = {'sort': '_id', 'where': '{"name": "%s"}' % name}
        all_alignak = self.backend.get_all('alignak', params)
        # Still no alignak configuration resource
        self.assertEqual(0, len(all_alignak['_items']))

        # Get a GOOD program status brok
        brok_data = {
            # Some general information

            'alignak_name': 'my_alignak',
            'instance_id': '176064a1b30741d39452415097807ab0',
            'instance_name': 'scheduler-master',

            # Some running information
            'program_start': 1493969754,
            'daemon_mode': 1,
            'pid': 68989,
            'last_alive': 1493970641,
            'last_command_check': 1493970641,
            'last_log_rotation': 1493970641,
            'is_running': 1,

            # Some configuration parameters
            'process_performance_data': True,
            'passive_service_checks_enabled': True,
            'event_handlers_enabled': True,
            'command_file': '',
            'global_host_event_handler': None,
            'interval_length': 60,
            'modified_host_attributes': 0,
            'check_external_commands': True,
            'modified_service_attributes': 0,
            'passive_host_checks_enabled': True,
            'global_service_event_handler': 'None',
            'notifications_enabled': True,
            'check_service_freshness': True,
            'check_host_freshness': True,
            'flap_detection_enabled': True,
            'active_service_checks_enabled': True,
            'active_host_checks_enabled': True
        }
        brok = Brok({'type': 'update_program_status', 'data': brok_data})
        brok.prepare()

        # The module has no default realm ... this should never happen !
        self.brokmodule.default_realm = None

        # Send program status brok
        self.brokmodule.manage_brok(brok)

        # Get alignak endpoint resources after the brok
        name = 'my_alignak'
        params = {'sort': '_id', 'where': '{"name": "%s"}' % name}
        all_alignak = self.backend.get_all('alignak', params)
        # Still no alignak configuration resource
        self.assertEqual(0, len(all_alignak['_items']))
コード例 #11
0
    def test_inner_module_configuration(self):
        """ Test that inner metrics module may be configured in Alignak configuration

        With this configuration, hosts/services cache is enabled and tested. Broks for
        unknown hosts/services are ignored.

        :return: None
        """
        with requests_mock.mock() as mr:
            mr.get("http://localhost:8086/ping",
                   json={"results": [{
                       "statement_id": 0,
                       "version": "1.7.2"
                   }]},
                   status_code=204,
                   headers={"x-influxdb-version": "1.7.2"})
            mr.get("http://localhost:8086/query?q=SHOW+DATABASES&db=alignak",
                   json={
                       "results": [{
                           "statement_id":
                           0,
                           "series": [{
                               "name": "databases",
                               "columns": ["name"],
                               "values": [["_internal"]]
                           }]
                       }]
                   })
            mr.get("http://localhost:8086/query?q=SHOW+DATABASES&db=alignak",
                   json={"results": [{
                       "statement_id": 0
                   }]})
            mr.post(
                "http://localhost:8086/query?q=CREATE+DATABASE+%22alignak%22&db=alignak",
                json={"results": [{
                    "statement_id": 0
                }]})
            mr.post(
                "http://localhost:8086/query?q=CREATE+RETENTION+POLICY+%22alignak%22+ON+%22alignak%22+DURATION+1y+REPLICATION+1+SHARD+DURATION+0s&db=alignak",
                json={"results": [{
                    "statement_id": 0
                }]})
            mr.post("http://localhost:8086/write?db=alignak",
                    status_code=204,
                    json={"results": [{
                        "statement_id": 0
                    }]})

            self.setup_with_file('cfg/cfg_metrics.cfg',
                                 'cfg/inner_metrics/alignak.ini')

            # Specific configuration enables the module
            assert self._scheduler.pushed_conf.process_performance_data is True
            assert self._scheduler.pushed_conf.host_perfdata_file == 'go-hosts'
            assert self._scheduler.pushed_conf.service_perfdata_file == 'go-services'
            assert 1 == len(self._broker_daemon.modules)

            self.show_logs()

            # The declared module instance
            my_module = self._broker_daemon.modules[0]
            print(my_module)
            # Generic stuff
            assert my_module.python_name == 'alignak.modules.inner_metrics'
            assert my_module.type == 'metrics'
            # assert my_module.alias == 'inner-metrics'
            assert my_module.enabled is True

            # Specific stuff - the content of the configuration parameters
            # When the module is configured in Alignak configuration, it does not exist!
            # assert my_module.host_perfdata_file == 'go-hosts'
            # assert my_module.service_perfdata_file == 'go-services'
            assert my_module.output_file == '/tmp/alignak-metrics.log'

            self.clear_logs()

            # Module is not yet initialized, let's do it in place of the daemon.
            # Create the modules manager for a daemon type
            self.modules_manager = ModulesManager(self._broker_daemon)

            # Load an initialize the modules:
            #  - load python module
            #  - get module properties and instances
            self.modules_manager.load_and_init([my_module])

            self.show_logs()

            # self.assert_log_match(
            #     "Targets configuration: graphite: True, influxdb: True, "
            #     "file: /tmp/alignak-metrics.log", 10)
            #
            self.assert_log_match(
                "targets configuration: graphite: True, influxdb: True, "
                "file: /tmp/alignak-metrics.log", 11)

            self.assert_log_match(
                "Storing metrics in an output file is configured. Do not forget "
                "to regularly clean this file to avoid important disk usage!",
                12)

            self.assert_log_match("Trying to initialize module: inner-metrics",
                                  24)

            self.assert_log_match(
                "testing storage to /tmp/alignak-metrics.log ...", 25)
            self.assert_log_match("Ok", 26)

            self.assert_log_match(
                "testing connection to InfluxDB localhost:8086 ...", 27)
            self.assert_log_match("connected, InfluxDB version 1.7.2", 28)
            self.assert_log_match(
                "testing connection to Graphite localhost:2004 ...", 29)
            self.assert_log_match("Ok", 30)

            self.assert_log_match("creating database alignak...", 31)
            # self.assert_log_match("creating database retention policy: alignak - 1y - 1...", 32)
            # self.assert_log_match("Ok", 33)

            self.assert_log_match("Module inner-metrics is initialized.", 32)

            # Module is an internal one (no external process) in the broker daemon modules manager
            my_module = self._broker_daemon.modules_manager.instances[0]
            assert my_module.is_external is False

            # Known hosts/services cache is empty
            assert my_module.hosts_cache == {}
            assert my_module.services_cache == {}

            # File output - we still got a metric for the connection test!
            assert os.path.exists('/tmp/alignak-metrics.log')
            with open('/tmp/alignak-metrics.log') as f:
                lines = f.readlines()
                first_line = False
                for line in lines:
                    assert 3 == len(line.split(';'))
                    if not first_line:
                        line = line.strip()
                        metric = line.split(';')
                        assert metric[0] == metric[2]
                        assert metric[1] == 'connection-test'
                    print(line)
                # Some metrics were stored
                assert 2 == len(lines)

            # When the broker daemon receives a Brok, it is propagated to the module

            # Host check result
            self.clear_logs()
            hcr = {
                "host_name":
                "srv001",
                "last_time_unreachable":
                0,
                "last_problem_id":
                0,
                "passive_check":
                False,
                "retry_interval":
                1,
                "last_event_id":
                0,
                "problem_has_been_acknowledged":
                False,
                "command_name":
                "pm-check_linux_host_alive",
                "last_state":
                "UP",
                "latency":
                0.2317881584,
                "last_state_type":
                "HARD",
                "last_hard_state_change":
                1444427108,
                "last_time_up":
                0,
                "percent_state_change":
                0.0,
                "state":
                "DOWN",
                "last_chk":
                1444427104,
                "last_state_id":
                0,
                "end_time":
                0,
                "timeout":
                0,
                "current_event_id":
                10,
                "execution_time":
                3.1496069431000002,
                "start_time":
                0,
                "return_code":
                2,
                "state_type":
                "SOFT",
                "output":
                "CRITICAL - Plugin timed out after 10 seconds",
                "in_checking":
                True,
                "early_timeout":
                0,
                "in_scheduled_downtime":
                False,
                "attempt":
                0,
                "state_type_id":
                1,
                "acknowledgement_type":
                1,
                "last_state_change":
                1444427108.040841,
                "last_time_down":
                1444427108,
                "instance_id":
                0,
                "long_output":
                "",
                "current_problem_id":
                0,
                "check_interval":
                5,
                "state_id":
                2,
                "has_been_checked":
                1,
                "perf_data":
                "uptime=1200;rta=0.049000ms;2.000000;3.000000;0.000000 pl=0%;50;80;0"
            }
            b = Brok({'data': hcr, 'type': 'host_check_result'}, False)
            self._broker_daemon.manage_brok(b)
            self.show_logs()
            self.assert_log_count(2)
            self.assert_log_match("host check result: srv001", 0)
            self.assert_log_match(
                "received host check result for an unknown host: srv001", 1)

            # Service check result
            self.clear_logs()
            scr = {
                "host_name": "srv001",
                "service_description": "ping",
                "command_name": "ping",
                "attempt": 1,
                "execution_time": 3.1496069431000002,
                "latency": 0.2317881584,
                "return_code": 2,
                "state": "OK",
                "state_type": "HARD",
                "state_id": 0,
                "state_type_id": 1,
                "output": "PING OK - Packet loss = 0%, RTA = 0.05 ms",
                "long_output": "Long output ...",
                "perf_data":
                "rta=0.049000ms;2.000000;3.000000;0.000000 pl=0%;50;80;0",
                "passive_check": False,
                "problem_has_been_acknowledged": False,
                "acknowledgement_type": 1,
                "in_scheduled_downtime": False,
                "last_chk": 1473597375,
                "last_state_change": 1444427108.147903,
                "last_state_id": 0,
                "last_state": "UNKNOWN",
                "last_state_type": "HARD",
                "last_hard_state_change": 0.0,
                "last_time_unknown": 0,
                "last_time_unreachable": 0,
                "last_time_critical": 1473597376,
                "last_time_warning": 0,
                "last_time_ok": 0,
                "retry_interval": 2,
                "percent_state_change": 4.1,
                "check_interval": 5,
                "in_checking": False,
                "early_timeout": 0,
                "instance_id": "3ac88dd0c1c04b37a5d181622e93b5bc",
                "current_event_id": 1,
                "last_event_id": 0,
                "current_problem_id": 1,
                "last_problem_id": 0,
                "timeout": 0,
                "has_been_checked": 1,
                "start_time": 0,
                "end_time": 0
            }
            b = Brok({'data': scr, 'type': 'service_check_result'}, False)
            self._broker_daemon.manage_brok(b)
            self.show_logs()
            self.assert_log_count(2)
            self.assert_log_match("service check result: srv001/ping", 0)
            self.assert_log_match(
                "received service check result for an unknown host", 1)

            # Initial host status
            self.clear_logs()
            hcr = {
                "host_name": "srv001",
            }
            b = Brok({'data': hcr, 'type': 'initial_host_status'}, False)
            self._broker_daemon.manage_brok(b)
            self.show_logs()
            # The module inner cache stored the host
            assert 'srv001' in my_module.hosts_cache
            assert my_module.hosts_cache['srv001'] == {'realm_name': 'All'}
            assert my_module.services_cache == {}

            # Initial service status
            self.clear_logs()
            hcr = {"host_name": "srv001", "service_description": "disks"}
            b = Brok({'data': hcr, 'type': 'initial_service_status'}, False)
            self._broker_daemon.manage_brok(b)
            self.show_logs()
            # The module inner cache stored the host
            assert 'srv001' in my_module.hosts_cache
            assert my_module.hosts_cache['srv001'] == {'realm_name': 'All'}
            assert 'srv001/disks' in my_module.services_cache
            assert my_module.services_cache['srv001/disks'] == {}

            # Now the host srv001 is known in the module, let's raise an host brok

            # Host check result
            self.clear_logs()
            hcr = {
                "host_name":
                "srv001",
                "last_time_unreachable":
                0,
                "last_problem_id":
                0,
                "passive_check":
                False,
                "retry_interval":
                1,
                "last_event_id":
                0,
                "problem_has_been_acknowledged":
                False,
                "command_name":
                "pm-check_linux_host_alive",
                "last_state":
                "UP",
                "latency":
                0.2317881584,
                "last_state_type":
                "HARD",
                "last_hard_state_change":
                1444427108,
                "last_time_up":
                0,
                "percent_state_change":
                0.0,
                "state":
                "DOWN",
                "last_chk":
                1444427104,
                "last_state_id":
                0,
                "end_time":
                0,
                "timeout":
                0,
                "current_event_id":
                10,
                "execution_time":
                3.1496069431000002,
                "start_time":
                0,
                "return_code":
                2,
                "state_type":
                "SOFT",
                "output":
                "CRITICAL - Plugin timed out after 10 seconds",
                "in_checking":
                True,
                "early_timeout":
                0,
                "in_scheduled_downtime":
                False,
                "attempt":
                0,
                "state_type_id":
                1,
                "acknowledgement_type":
                1,
                "last_state_change":
                1444427108.040841,
                "last_time_down":
                1444427108,
                "instance_id":
                0,
                "long_output":
                "",
                "current_problem_id":
                0,
                "check_interval":
                5,
                "state_id":
                2,
                "has_been_checked":
                1,
                "perf_data":
                "uptime=1200 rta=0.049000ms;2.000000;3.000000;0.000000 pl=0%;50;80;0"
            }
            b = Brok({'data': hcr, 'type': 'host_check_result'}, False)
            self._broker_daemon.manage_brok(b)

            self.show_logs()
            self.assert_log_count(9)
            self.assert_log_match("host check result: srv001", 0)
            self.assert_log_match("service: host_check, metric: ", 1)
            self.assert_log_match("service: host_check, metric: ", 2)
            self.assert_log_match("service: host_check, metric: ", 3)
            self.assert_log_match("Metrics: host_check - ", 4)
            self.assert_log_match("Data: ", 5)
            self.assert_log_match("Flushing 1 metrics to Graphite/carbon", 6)
            self.assert_log_match("Flushing 1 metrics to InfluxDB", 7)
            self.assert_log_match(
                "Storing 1 metrics to /tmp/alignak-metrics.log", 8)

            # Service check result
            self.clear_logs()
            scr = {
                "host_name":
                "srv001",
                "service_description":
                "disks",
                "last_time_unreachable":
                0,
                "last_problem_id":
                0,
                "passive_check":
                False,
                "retry_interval":
                1,
                "last_event_id":
                0,
                "problem_has_been_acknowledged":
                False,
                "command_name":
                "pm-check_linux_disks",
                "last_state":
                "UP",
                "latency":
                0.2317881584,
                "last_state_type":
                "HARD",
                "last_hard_state_change":
                1444427108,
                "last_time_up":
                0,
                "percent_state_change":
                0.0,
                "state":
                "OK",
                "last_chk":
                1444427104,
                "last_state_id":
                0,
                "end_time":
                0,
                "timeout":
                0,
                "current_event_id":
                10,
                "execution_time":
                3.1496069431000002,
                "start_time":
                0,
                "return_code":
                2,
                "state_type":
                "SOFT",
                "output":
                "DISK OK - free space: / 3326 MB (56%); / 15272 MB (77%);/boot 68 MB (69%);/home 69357 MB (27%);/var/log 819 MB (84%);",
                "in_checking":
                True,
                "early_timeout":
                0,
                "in_scheduled_downtime":
                False,
                "attempt":
                0,
                "state_type_id":
                1,
                "acknowledgement_type":
                1,
                "last_state_change":
                1444427108.040841,
                "last_time_down":
                1444427108,
                "instance_id":
                0,
                "long_output":
                "",
                "current_problem_id":
                0,
                "check_interval":
                5,
                "state_id":
                2,
                "has_been_checked":
                1,
                "perf_data":
                "/=2643MB;5948;5958;0;5968 /boot=68MB;88;93;0;98 /home=69357MB;253404;253409;0;253414 /var/log=818MB;970;975;0;980"
            }
            b = Brok({'data': scr, 'type': 'service_check_result'}, False)
            self._broker_daemon.manage_brok(b)

            self.show_logs()
            self.assert_log_count(10)
            self.assert_log_match("service check result: srv001/disks", 0)
            self.assert_log_match(re.escape("service: disks, metric: "), 1)
            self.assert_log_match(re.escape("service: disks, metric: "), 2)
            self.assert_log_match(re.escape("service: disks, metric: "), 3)
            self.assert_log_match(re.escape("service: disks, metric: "), 4)
            self.assert_log_match(re.escape("Metrics: disks - "), 5)
            self.assert_log_match("Data: ", 6)
            self.assert_log_match("Flushing 1 metrics to Graphite/carbon", 7)
            self.assert_log_match("Flushing 1 metrics to InfluxDB", 8)
            self.assert_log_match(
                "Storing 1 metrics to /tmp/alignak-metrics.log", 9)

            # Metrics count

            # File output
            assert os.path.exists('/tmp/alignak-metrics.log')
            with open('/tmp/alignak-metrics.log') as f:
                lines = f.readlines()
                first_line = False
                for line in lines:
                    line = line.strip()
                    assert 3 == len(line.split(';'))
                    print(line)
                    if not first_line:
                        first_line = True
                        metric = line.split(';')
                        assert metric[0] == metric[2]
                        assert metric[1] == 'connection-test'
                # Some metrics were stored!
                assert 33 == len(lines)
コード例 #12
0
ファイル: log.py プロジェクト: jbiousse/alignak
def make_monitoring_log(level, message, timestamp=None, to_logger=False):
    """
    Function used to build the monitoring log.

    Emit a log message with the provided level to the monitoring log logger.
    Build a Brok typed as monitoring_log with the provided message

    When to_logger is True, the information is sent to the python logger, else a monitoring_log
    Brok is returned. The Brok is managed by the daemons to build an Event that will br logged
    by the Arbiter when it collects all the events.

    TODO: replace with dedicated brok for each event to log - really useful?

    :param level: log level as defined in logging
    :type level: str
    :param message: message to send to the monitoring log logger
    :type message: str
    :param to_logger: when set, send to the logger, else raise a brok
    :type to_logger: bool
    :param timestamp: if set, force the log event timestamp
    :return: a monitoring_log Brok
    :rtype: alignak.brok.Brok
    """
    level = level.lower()
    if level not in ['debug', 'info', 'warning', 'error', 'critical']:
        return False

    if to_logger:
        logging.getLogger(ALIGNAK_LOGGER_NAME).debug("Monitoring log: %s / %s",
                                                     level, message)

        # Emit to our monitoring log logger
        message = message.replace('\r', '\\r')
        message = message.replace('\n', '\\n')
        logger_ = logging.getLogger(MONITORING_LOGGER_NAME)
        logging_function = getattr(logger_, level)
        try:
            message = message.decode('utf8', 'ignore')
        except UnicodeEncodeError:
            pass
        except AttributeError:
            # Python 3 raises an exception!
            pass

        if timestamp:
            st = datetime.datetime.fromtimestamp(timestamp).strftime(
                '%Y-%m-%d %H:%M:%S')
            logging_function(message, extra={'my_date': st})
        else:
            logging_function(message)

        return True

    # ... and returns a brok
    return Brok({
        'type': 'monitoring_log',
        'data': {
            'level': level,
            'message': message
        }
    })
コード例 #13
0
    def test_alignak_configuration(self):
        """Test alignak configuration reading

        :return:
        """
        # Start broker module
        modconf = Module()
        modconf.module_alias = "backend_broker"
        modconf.username = "******"
        modconf.password = "******"
        modconf.api_url = 'http://127.0.0.1:5000'
        self.brokmodule = AlignakBackendBroker(modconf)

        # Get a program status brok
        brok_data = {
            # Some general information
            'alignak_name': 'my_alignak',
            'instance_id': '176064a1b30741d39452415097807ab0',
            'instance_name': 'scheduler-master',

            # Some running information
            'program_start': 1493969754,
            'daemon_mode': 1,
            'pid': 68989,
            'last_alive': 1493970641,
            'last_command_check': 1493970641,
            'last_log_rotation': 1493970641,
            'is_running': 1,

            # Some configuration parameters
            'process_performance_data': True,
            'passive_service_checks_enabled': True,
            'event_handlers_enabled': True,
            'command_file': '',
            'global_host_event_handler': None,
            'interval_length': 60,
            'modified_host_attributes': 0,
            'check_external_commands': True,
            'modified_service_attributes': 0,
            'passive_host_checks_enabled': True,
            'global_service_event_handler': None,
            'notifications_enabled': True,
            'check_service_freshness': True,
            'check_host_freshness': True,
            'flap_detection_enabled': True,
            'active_service_checks_enabled': True,
            'active_host_checks_enabled': True
        }
        brok = Brok({'type': 'update_program_status', 'data': brok_data})
        brok.prepare()

        # -------
        # Configure to manage this brok (default is to ignore...) !
        self.brokmodule.manage_update_program_status = True

        # Send program status brok
        self.brokmodule.manage_brok(brok)
        # This has created an `alignak` resource...

        # Now we call the Arbiter hook function to get this created configuration
        # Will get all the `alignak` resources because no arbiter name is defined ...
        fake_arb = Arbiter()
        self.arbmodule.hook_read_configuration(fake_arb)
        configuration = self.arbmodule.get_alignak_configuration()
        print(("Configuration: %s" % configuration))
        expected = brok_data.copy()
        print(("Expected: %s" % expected))
        expected['name'] = expected.pop('alignak_name')
        # Some fields are valued as default by the backend
        configuration.pop('_created')
        configuration.pop('_updated')
        configuration.pop('_id')
        configuration.pop('_etag')
        configuration.pop('_realm')
        configuration.pop('_sub_realm')
        configuration.pop('_links')
        configuration.pop('schema_version')
        # TODO need add this new fields in alignak brok creation
        for field_name in ['use_timezone',
                           'illegal_macro_output_chars', 'illegal_object_name_chars',
                           'cleaning_queues_interval', 'max_plugins_output_length',
                           'enable_environment_macros', 'log_initial_states', 'log_active_checks',
                           'log_host_retries', 'log_service_retries', 'log_passive_checks',
                           'log_notifications', 'log_event_handlers', 'log_external_commands',
                           'log_flappings', 'log_snapshots', 'enable_notifications',
                           'notification_timeout', 'timeout_exit_status', 'execute_host_checks',
                           'max_host_check_spread', 'host_check_timeout',
                           'check_for_orphaned_hosts', 'execute_service_checks',
                           'max_service_check_spread', 'service_check_timeout',
                           'check_for_orphaned_services', 'flap_history', 'low_host_flap_threshold',
                           'high_host_flap_threshold', 'low_service_flap_threshold',
                           'high_service_flap_threshold', 'event_handler_timeout',
                           'no_event_handlers_during_downtimes', 'host_perfdata_command',
                           'service_perfdata_command', 'accept_passive_host_checks',
                           'host_freshness_check_interval', 'accept_passive_service_checks',
                           'service_freshness_check_interval', 'additional_freshness_latency']:
            configuration.pop(field_name)
        expected['alias'] = expected['name']
        expected['notes'] = ''
        expected['notes_url'] = ''
        expected['global_host_event_handler'] = str(expected['global_host_event_handler'])
        expected['global_service_event_handler'] = 'None'
        self.assertEqual(configuration, expected)

        # Get another program status brok
        brok_data = {
            # Some general information
            'alignak_name': 'my_alignak_2',
            'instance_id': '176064a1b30741d39452415097807ab0',
            'instance_name': 'scheduler-master',

            # Some running information
            'program_start': 1493969754,
            'daemon_mode': 1,
            'pid': 68989,
            'last_alive': 1493970641,
            'last_command_check': 1493970641,
            'last_log_rotation': 1493970641,
            'is_running': 1,

            # Some configuration parameters
            'process_performance_data': True,
            'passive_service_checks_enabled': True,
            'event_handlers_enabled': True,
            'command_file': '',
            'global_host_event_handler': 'None',
            'interval_length': 60,
            'modified_host_attributes': 0,
            'check_external_commands': True,
            'modified_service_attributes': 0,
            'passive_host_checks_enabled': True,
            'global_service_event_handler': 'None',
            'notifications_enabled': True,
            'check_service_freshness': True,
            'check_host_freshness': True,
            'flap_detection_enabled': True,
            'active_service_checks_enabled': True,
            'active_host_checks_enabled': True
        }
        brok = Brok({'type': 'update_program_status', 'data': brok_data})
        brok.prepare()

        # Send program status brok
        self.brokmodule.manage_brok(brok)
        # This has created an `alignak` resource...

        # Now we call the Arbiter hook function to get this created configuration
        # Get the configuration for a specific arbiter / alignak
        # It will be the first one created
        fake_arb = Arbiter(arbiter_name='my_alignak')
        self.arbmodule.hook_read_configuration(fake_arb)
        configuration = self.arbmodule.get_alignak_configuration()
        # Some fields are valued as default by the backend
        configuration.pop('_created')
        configuration.pop('_updated')
        configuration.pop('_id')
        configuration.pop('_etag')
        configuration.pop('_realm')
        configuration.pop('_sub_realm')
        configuration.pop('_links')
        configuration.pop('schema_version')
        # TODO need add this new fields in alignak brok creation
        for field_name in ['use_timezone',
                           'illegal_macro_output_chars', 'illegal_object_name_chars',
                           'cleaning_queues_interval', 'max_plugins_output_length',
                           'enable_environment_macros', 'log_initial_states', 'log_active_checks',
                           'log_host_retries', 'log_service_retries', 'log_passive_checks',
                           'log_notifications', 'log_event_handlers', 'log_external_commands',
                           'log_flappings', 'log_snapshots', 'enable_notifications',
                           'notification_timeout', 'timeout_exit_status', 'execute_host_checks',
                           'max_host_check_spread', 'host_check_timeout',
                           'check_for_orphaned_hosts', 'execute_service_checks',
                           'max_service_check_spread', 'service_check_timeout',
                           'check_for_orphaned_services', 'flap_history', 'low_host_flap_threshold',
                           'high_host_flap_threshold', 'low_service_flap_threshold',
                           'high_service_flap_threshold', 'event_handler_timeout',
                           'no_event_handlers_during_downtimes', 'host_perfdata_command',
                           'service_perfdata_command', 'accept_passive_host_checks',
                           'host_freshness_check_interval', 'accept_passive_service_checks',
                           'service_freshness_check_interval', 'additional_freshness_latency']:
            configuration.pop(field_name)
        self.assertEqual(configuration, expected)