Esempio n. 1
0
    def test_real(self):
        args = {
            'env_file': 'cfg/monitor/simple.ini',
            'alignak_name': 'alignak-test',
            'daemon_name': 'arbiter-master'
        }
        my_arbiter = Arbiter(**args)
        my_arbiter.setup_alignak_logger()

        # Clear logs
        self.clear_logs()

        my_arbiter.alignak_monitor = "http://alignak-mos-ws.kiosks.ipmfrance.com"
        my_arbiter.alignak_monitor_username = '******'
        my_arbiter.alignak_monitor_password = '******'

        my_arbiter.load_modules_manager()
        my_arbiter.load_monitoring_config_file()
        assert my_arbiter.conf.conf_is_correct is True

        # #1 - Get a new dispatcher
        my_dispatcher = Dispatcher(my_arbiter.conf, my_arbiter.link_to_myself)
        my_arbiter.dispatcher = my_dispatcher
        print("*** All daemons WS: %s" % [
            "%s:%s" % (link.address, link.port)
            for link in my_dispatcher.all_daemons_links
        ])

        my_arbiter.push_passive_check(details=False)
Esempio n. 2
0
    def setup_with_file(self, paths, add_default=True):
        self.time_hacker.set_my_time()
        self.print_header()
        # i am arbiter-like
        self.broks = {}
        self.me = None
        self.log = logger
        self.log.load_obj(self)
        if not isinstance(paths, list):
            paths = [paths]  # Fix for modules tests
            add_default = False # Don't mix config
        if add_default:
            paths.insert(0, 'etc/alignak_1r_1h_1s.cfg')
        self.config_files = paths
        self.conf = Config()
        buf = self.conf.read_config(self.config_files)
        raw_objects = self.conf.read_config_buf(buf)
        self.conf.create_objects_for_type(raw_objects, 'arbiter')
        self.conf.create_objects_for_type(raw_objects, 'module')
        self.conf.early_arbiter_linking()

        # If we got one arbiter defined here (before default) we should be in a case where
        # the tester want to load/test a module, so we simulate an arbiter daemon
        # and the modules loading phase. As it has its own modulesmanager, should
        # not impact scheduler modules ones, especially we are asking for arbiter type :)
        if len(self.conf.arbiters) == 1:
            arbdaemon = Arbiter([''],[''], False, False, None, None)
            # only load if the module_dir is reallyexisting, so was set explicitly
            # in the test configuration
            if os.path.exists(getattr(self.conf, 'modules_dir', '')):
                arbdaemon.modules_dir = self.conf.modules_dir
                arbdaemon.load_modules_manager()
            
                # we request the instances without them being *started*
                # (for those that are concerned ("external" modules):
                # we will *start* these instances after we have been daemonized (if requested)
                me = None
                for arb in self.conf.arbiters:
                    me = arb
                    arbdaemon.modules_manager.set_modules(arb.modules)
                    arbdaemon.do_load_modules()
                    arbdaemon.load_modules_configuration_objects(raw_objects)

        self.conf.create_objects(raw_objects)
        self.conf.instance_id = 0
        self.conf.instance_name = 'test'
        # Hack push_flavor, that is set by the dispatcher
        self.conf.push_flavor = 0
        self.conf.load_triggers()
        #import pdb;pdb.set_trace()
        self.conf.linkify_templates()
        #import pdb;pdb.set_trace()
        self.conf.apply_inheritance()
        #import pdb;pdb.set_trace()
        self.conf.explode()
        #print "Aconf.services has %d elements" % len(self.conf.services)
        self.conf.apply_implicit_inheritance()
        self.conf.fill_default()
        self.conf.remove_templates()
        #print "conf.services has %d elements" % len(self.conf.services)
        self.conf.override_properties()
        self.conf.linkify()
        self.conf.apply_dependencies()
        self.conf.explode_global_conf()
        self.conf.propagate_timezone_option()
        self.conf.create_business_rules()
        self.conf.create_business_rules_dependencies()
        self.conf.is_correct()
        if not self.conf.conf_is_correct:
            print "The conf is not correct, I stop here"
            self.conf.dump()
            return
        self.conf.clean()

        self.confs = self.conf.cut_into_parts()
        self.conf.prepare_for_sending()
        self.conf.show_errors()
        self.dispatcher = Dispatcher(self.conf, self.me)

        scheddaemon = Alignak(None, False, False, False, None, None)
        self.scheddaemon = scheddaemon
        self.sched = scheddaemon.sched
        scheddaemon.modules_dir = modules_dir
        scheddaemon.load_modules_manager()
        # Remember to clean the logs we just created before launching tests
        self.clear_logs()
        m = MacroResolver()
        m.init(self.conf)
        self.sched.load_conf(self.conf)
        e = ExternalCommandManager(self.conf, 'applyer')
        self.sched.external_command = e
        e.load_scheduler(self.sched)
        e2 = ExternalCommandManager(self.conf, 'dispatcher')
        e2.load_arbiter(self)
        self.external_command_dispatcher = e2
        self.sched.conf.accept_passive_unknown_check_results = False

        self.sched.schedule()
Esempio n. 3
0
    def _dispatching(self, env_filename='cfg/dispatcher/simple.ini', loops=3, multi_realms=False):
        """ Dispatching process: prepare, check, dispatch

        This function realize all the dispatching operations:
        - load a monitoring configuration
        - prepare the dispatching
        - dispatch
        - check the correct dispatching, including:
            - check the configuration dispatched to the schedulers
            - check the configuration dispatched to the spare arbiter (if any)
        - run the check_reachable loop several times

        if multi_realms is True, the scheduler configuration received are not checked against
        the arbiter whole configuration. This would be really too complex to assert on this :(

        Schedulers must have a port number with 7768 (eg. 7768,17768,27768,...)

        Spare daemons must have a port number with 8770 (eg. 8770,18770,28770,...)

        :return: None
        """
        args = {
            'env_file': env_filename, 'alignak_name': 'alignak-test', 'daemon_name': 'arbiter-master'
        }
        my_arbiter = Arbiter(**args)
        my_arbiter.setup_alignak_logger()

        # Clear logs
        self.clear_logs()

        my_arbiter.load_modules_manager()
        my_arbiter.load_monitoring_config_file()
        assert my_arbiter.conf.conf_is_correct is True
        # logging.getLogger('alignak').setLevel(logging.DEBUG)

        objects_map = {}
        for _, _, strclss, _, _ in list(my_arbiter.conf.types_creations.values()):
            if strclss in ['hostescalations', 'serviceescalations']:
                continue

            objects_list = getattr(my_arbiter.conf, strclss, [])
            objects_map[strclss] = {'count': len(objects_list), 'str': str(objects_list)}
            # print("Got %d %s: %s" % (len(objects_list), strclss, objects_list))

        # Freeze the time !
        initial_datetime = datetime.datetime.now()
        with freeze_time(initial_datetime) as frozen_datetime:
            assert frozen_datetime() == initial_datetime

            # #1 - Get a new dispatcher
            my_dispatcher = Dispatcher(my_arbiter.conf, my_arbiter.link_to_myself)
            print("*** All daemons WS: %s"
                  % ["%s:%s" % (link.address, link.port)
                     for link in my_dispatcher.all_daemons_links])

            assert my_dispatcher.dispatch_ok is False
            assert my_dispatcher.new_to_dispatch is False
            assert my_dispatcher.first_dispatch_done is False

            self.assert_any_log_match(re.escape("Dispatcher arbiters/satellites map:"))
            for link in my_dispatcher.all_daemons_links:
                self.assert_any_log_match(re.escape(" - %s: %s" % (link.name, link.uri)))

            # Simulate the daemons HTTP interface (very simple simulation !)
            with requests_mock.mock() as mr:
                for link in my_dispatcher.all_daemons_links:
                    mr.get('http://%s:%s/ping' % (link.address, link.port),
                           json='pong')
                    mr.get('http://%s:%s/identity' % (link.address, link.port),
                           json={"running_id": 123456.123456})
                    mr.get('http://%s:%s/wait_new_conf' % (link.address, link.port),
                           json=True)
                    mr.get('http://%s:%s/fill_initial_broks' % (link.address, link.port),
                           json=[])
                    mr.post('http://%s:%s/_push_configuration' % (link.address, link.port),
                            json=True)
                    mr.get('http://%s:%s/managed_configurations' % (link.address, link.port),
                           json={})
                    mr.get('http://%s:%s/do_not_run' % (link.address, link.port),
                           json=True)

                for link in my_dispatcher.all_daemons_links:
                    # print("Satellite: %s / %s" % (link, link.cfg_to_manage))
                    assert not link.hash
                    assert not link.push_flavor
                    assert not link.cfg_to_manage
                    assert not link.cfg_managed

                # #2 - Initialize connection with all our satellites
                for satellite in my_dispatcher.all_daemons_links:
                    assert my_arbiter.daemon_connection_init(satellite)
                # All links have a running identifier
                for link in my_dispatcher.all_daemons_links:
                    if link == my_dispatcher.arbiter_link:
                        continue
                    assert link.running_id == 123456.123456
                    self.assert_any_log_match(re.escape("got: 123456.123456"))

                # #3 - Check reachable - a configuration is not yet prepared,
                # so only check reachable state
                my_dispatcher.check_reachable()
                assert my_dispatcher.dispatch_ok is False
                assert my_dispatcher.first_dispatch_done is False
                assert my_dispatcher.new_to_dispatch is False
                # Not yet configured ...
                for link in my_dispatcher.all_daemons_links:
                    if link == my_dispatcher.arbiter_link:
                        continue
                    self.assert_any_log_match(re.escape(
                        "The %s %s do not have a configuration" % (link.type, link.name)
                    ))

                # #3 - Check reachable - daemons got pinged too early...
                my_dispatcher.check_reachable()
                assert my_dispatcher.dispatch_ok is False
                assert my_dispatcher.first_dispatch_done is False
                assert my_dispatcher.new_to_dispatch is False
                # Only for Python > 2.7, DEBUG logs ...
                if os.sys.version_info > (2, 7):
                    for link in my_dispatcher.all_daemons_links:
                        if link == my_dispatcher.arbiter_link:
                            continue
                        self.assert_any_log_match(re.escape(
                            "Too early to ping %s" % (link.name)
                        ))
                self.assert_no_log_match(re.escape(
                    "Dispatcher, these daemons are not configured: "
                    "reactionner-master,poller-master,broker-master,receiver-master,"
                    "scheduler-master"
                    ", and a configuration is ready to dispatch, run the dispatching..."
                ))

                # Time warp 5 seconds - overpass the ping period...
                self.clear_logs()
                frozen_datetime.tick(delta=datetime.timedelta(seconds=5))

                # #3 - Check reachable - daemons provide their configuration
                my_dispatcher.check_reachable()
                assert my_dispatcher.dispatch_ok is False
                assert my_dispatcher.first_dispatch_done is False
                assert my_dispatcher.new_to_dispatch is False
                # Only for Python > 2.7, DEBUG logs ...
                if os.sys.version_info > (2, 7):
                    # Still not configured ...
                    for link in my_dispatcher.all_daemons_links:
                        if link == my_dispatcher.arbiter_link:
                            continue
                        self.assert_any_log_match(re.escape(
                            "My (%s) fresh managed configuration: {}" % link.name
                        ))

                # #4 - Prepare dispatching
                assert my_dispatcher.new_to_dispatch is False
                my_dispatcher.prepare_dispatch()
                assert my_dispatcher.dispatch_ok is False
                assert my_dispatcher.first_dispatch_done is False
                assert my_dispatcher.new_to_dispatch is True

                self.assert_any_log_match(re.escape(
                    "All configuration parts are assigned to schedulers and their satellites :)"
                ))
                # All links have a hash, push_flavor and cfg_to_manage
                for link in my_dispatcher.all_daemons_links:
                    print("Link: %s" % link)
                    assert getattr(link, 'hash', None) is not None
                    assert getattr(link, 'push_flavor', None) is not None
                    assert getattr(link, 'cfg_to_manage', None) is not None
                    assert not link.cfg_managed  # Not yet

                # #5 - Check reachable - a configuration is prepared,
                # this will force the daemons communication, no need for a time warp ;)
                my_dispatcher.check_reachable()
                # Only for Python > 2.7, DEBUG logs ...
                if os.sys.version_info > (2, 7):
                    for link in my_dispatcher.all_daemons_links:
                        if link == my_dispatcher.arbiter_link:
                            continue
                        self.assert_any_log_match(re.escape(
                            "My (%s) fresh managed configuration: {}" % link.name
                        ))

                self.assert_any_log_match(re.escape(
                    "Dispatcher, these daemons are not configured:"))
                self.assert_any_log_match(re.escape(
                    ", and a configuration is ready to dispatch, run the dispatching..."))

                self.assert_any_log_match(re.escape(
                    "Trying to send configuration to the satellites..."))
                for link in my_dispatcher.all_daemons_links:
                    if link == my_dispatcher.arbiter_link:
                        continue
                    self.assert_any_log_match(re.escape(
                        "Sending configuration to the %s %s" % (link.type, link.name)))

                # As of now the configuration is prepared and was dispatched to the daemons !
                # Configuration already dispatched!
                with pytest.raises(DispatcherError):
                    my_dispatcher.dispatch()
                self.show_logs()

                # Hack the requests history to check and simulate  the configuration pushed...
                history = mr.request_history
                for index, request in enumerate(history):
                    if '_push_configuration' in request.url:
                        received = request.json()
                        print(index, request.url, received)
                        assert ['conf'] == list(received.keys())
                        conf = received['conf']

                        from pprint import pprint
                        pprint(conf)
                        assert 'alignak_name' in conf
                        assert conf['alignak_name'] == 'My Alignak'

                        assert 'self_conf' in conf
                        assert conf['self_conf']
                        i_am = None
                        for link in my_dispatcher.all_daemons_links:
                            if link.type == conf['self_conf']['type'] \
                                    and link.name == conf['self_conf']['name']:
                                i_am = link
                                break
                        else:
                            assert False
                        print(("I am: %s" % i_am))
                        print(("I have: %s" % conf))

                        # All links have a hash, push_flavor and cfg_to_manage
                        assert 'hash' in conf
                        assert 'managed_conf_id' in conf

                        assert 'arbiters' in conf
                        if conf['self_conf']['manage_arbiters']:
                            # All the known arbiters
                            assert list(conf['arbiters'].keys()) == [arbiter_link.uuid for arbiter_link
                                                               in my_dispatcher.arbiters]
                        else:
                            assert conf['arbiters'] == {}

                        assert 'schedulers' in conf
                        # Hack for the managed configurations
                        link.cfg_managed = {}
                        for scheduler_link in list(conf['schedulers'].values()):
                            link.cfg_managed[scheduler_link['instance_id']] = {
                                'hash': scheduler_link['hash'],
                                'push_flavor': scheduler_link['push_flavor'],
                                'managed_conf_id': scheduler_link['managed_conf_id']
                            }
                        print("Managed: %s" % link.cfg_managed)

                        assert 'modules' in conf
                        assert conf['modules'] == []

                        # Spare arbiter specific
                        if '8770/_push_configuration' in request.url:
                            # Spare arbiter receives all the monitored configuration
                            assert 'whole_conf' in conf
                            # String serialized configuration
                            assert isinstance(conf['whole_conf'], string_types)
                            managed_conf_part = unserialize(conf['whole_conf'])
                            # Test a property to be sure conf loaded correctly
                            assert managed_conf_part.instance_id == conf['managed_conf_id']

                            # The spare arbiter got the same objects count as the master arbiter prepared!
                            for _, _, strclss, _, _ in list(managed_conf_part.types_creations.values()):
                                # These elements are not included in the serialized configuration!
                                if strclss in ['hostescalations', 'serviceescalations',
                                               'arbiters', 'schedulers', 'brokers',
                                               'pollers', 'reactionners', 'receivers', 'realms',
                                               'modules', 'hostsextinfo', 'servicesextinfo',
                                               'hostdependencies', 'servicedependencies']:
                                    continue

                                objects_list = getattr(managed_conf_part, strclss, [])
                                # print("Got %d %s: %s" % (len(objects_list), strclss, objects_list))
                                # Count and string dup are the same !
                                assert len(objects_list) == objects_map[strclss]['count']
                                assert str(objects_list) == objects_map[strclss]['str']

                        # Scheduler specific
                        elif '7768/_push_configuration' in request.url:
                            assert 'conf_part' in conf
                            # String serialized configuration
                            assert isinstance(conf['conf_part'], string_types)
                            managed_conf_part = unserialize(conf['conf_part'])
                            # Test a property to be sure conf loaded correctly
                            assert managed_conf_part.instance_id == conf['managed_conf_id']

                            # Hack for the managed configurations
                            link.cfg_managed = {
                                conf['instance_id']: {
                                    'hash': conf['hash'],
                                    'push_flavor': conf['push_flavor'],
                                    'managed_conf_id': conf['managed_conf_id']
                                }
                            }
                            print("Managed: %s" % link.cfg_managed)

                            # The scheduler got the same objects count as the arbiter prepared!
                            for _, _, strclss, _, _ in list(managed_conf_part.types_creations.values()):
                                # These elements are not included in the serialized configuration!
                                if strclss in ['hostescalations', 'serviceescalations',
                                               'arbiters', 'schedulers', 'brokers',
                                               'pollers', 'reactionners', 'receivers', 'realms',
                                               'modules', 'hostsextinfo', 'servicesextinfo',
                                               'hostdependencies', 'servicedependencies']:
                                    continue

                                objects_list = getattr(managed_conf_part, strclss, [])
                                # print("Got %d %s: %s" % (len(objects_list), strclss, objects_list))
                                if not multi_realms:
                                    # Count and string dump are the same !
                                    assert len(objects_list) == objects_map[strclss]['count']
                                    assert str(objects_list) == objects_map[strclss]['str']

                        else:
                            # Satellites
                            print("I am: ")
                            print(index, request.url, received)
                            assert 'conf_part' not in conf
                            assert 'see_my_schedulers' == conf['managed_conf_id']

                for link in my_dispatcher.all_daemons_links:
                    mr.get('http://%s:%s/managed_configurations' % (link.address, link.port),
                           json=link.cfg_managed)

                print("Check dispatching:")
                self.clear_logs()
                # assert my_dispatcher.check_dispatch() is True
                dispatched = my_dispatcher.check_dispatch()
                self.show_logs()
                assert dispatched

                for loop_count in range(0, loops):
                    for tw in range(0, 4):
                        # Time warp 1 second
                        frozen_datetime.tick(delta=datetime.timedelta(seconds=1))

                        print("Check reachable %s" % tw)
                        self.clear_logs()
                        my_dispatcher.check_reachable()
                        # Only for Python > 2.7, DEBUG logs ...
                        if os.sys.version_info > (2, 7):
                            for link in my_dispatcher.all_daemons_links:
                                if link == my_dispatcher.arbiter_link:
                                    continue
                                self.assert_any_log_match(re.escape(
                                    "Too early to ping %s" % (link.name)
                                ))

                    # Time warp 1 second
                    frozen_datetime.tick(delta=datetime.timedelta(seconds=1))

                    print("Check reachable response")
                    self.clear_logs()
                    my_dispatcher.check_reachable()
                    self.show_logs()
                    # Only for Python > 2.7, DEBUG logs ...
                    if os.sys.version_info > (2, 7):
                        for link in my_dispatcher.all_daemons_links:
                            if link == my_dispatcher.arbiter_link:
                                continue
                            self.assert_any_log_match(re.escape(
                                "My (%s) fresh managed configuration: %s"
                                % (link.name, link.cfg_managed)
                            ))
Esempio n. 4
0
class TestDispatcher(AlignakTest):
    """
    This class tests the dispatcher (distribute configuration to satellites)
    """
    def setUp(self):
        """Test starting"""
        super(TestDispatcher, self).setUp()

        # Log at DEBUG level
        self.set_unit_tests_logger_level()

    def _dispatching(self, env_filename='cfg/dispatcher/simple.ini', loops=3, multi_realms=False):
        """ Dispatching process: prepare, check, dispatch

        This function realize all the dispatching operations:
        - load a monitoring configuration
        - prepare the dispatching
        - dispatch
        - check the correct dispatching, including:
            - check the configuration dispatched to the schedulers
            - check the configuration dispatched to the spare arbiter (if any)
        - run the check_reachable loop several times

        if multi_realms is True, the scheduler configuration received are not checked against
        the arbiter whole configuration. This would be really too complex to assert on this :(

        Schedulers must have a port number with 7768 (eg. 7768,17768,27768,...)

        Spare daemons must have a port number with 8770 (eg. 8770,18770,28770,...)

        :return: None
        """
        args = {
            'env_file': env_filename, 'alignak_name': 'alignak-test', 'daemon_name': 'arbiter-master'
        }
        my_arbiter = Arbiter(**args)
        my_arbiter.setup_alignak_logger()

        # Clear logs
        self.clear_logs()

        my_arbiter.load_modules_manager()
        my_arbiter.load_monitoring_config_file()
        assert my_arbiter.conf.conf_is_correct is True
        # logging.getLogger('alignak').setLevel(logging.DEBUG)

        objects_map = {}
        for _, _, strclss, _, _ in list(my_arbiter.conf.types_creations.values()):
            if strclss in ['hostescalations', 'serviceescalations']:
                continue

            objects_list = getattr(my_arbiter.conf, strclss, [])
            objects_map[strclss] = {'count': len(objects_list), 'str': str(objects_list)}
            # print("Got %d %s: %s" % (len(objects_list), strclss, objects_list))

        # Freeze the time !
        initial_datetime = datetime.datetime.now()
        with freeze_time(initial_datetime) as frozen_datetime:
            assert frozen_datetime() == initial_datetime

            # #1 - Get a new dispatcher
            my_dispatcher = Dispatcher(my_arbiter.conf, my_arbiter.link_to_myself)
            print("*** All daemons WS: %s"
                  % ["%s:%s" % (link.address, link.port)
                     for link in my_dispatcher.all_daemons_links])

            assert my_dispatcher.dispatch_ok is False
            assert my_dispatcher.new_to_dispatch is False
            assert my_dispatcher.first_dispatch_done is False

            self.assert_any_log_match(re.escape("Dispatcher arbiters/satellites map:"))
            for link in my_dispatcher.all_daemons_links:
                self.assert_any_log_match(re.escape(" - %s: %s" % (link.name, link.uri)))

            # Simulate the daemons HTTP interface (very simple simulation !)
            with requests_mock.mock() as mr:
                for link in my_dispatcher.all_daemons_links:
                    mr.get('http://%s:%s/ping' % (link.address, link.port),
                           json='pong')
                    mr.get('http://%s:%s/identity' % (link.address, link.port),
                           json={"running_id": 123456.123456})
                    mr.get('http://%s:%s/wait_new_conf' % (link.address, link.port),
                           json=True)
                    mr.get('http://%s:%s/fill_initial_broks' % (link.address, link.port),
                           json=[])
                    mr.post('http://%s:%s/_push_configuration' % (link.address, link.port),
                            json=True)
                    mr.get('http://%s:%s/managed_configurations' % (link.address, link.port),
                           json={})
                    mr.get('http://%s:%s/do_not_run' % (link.address, link.port),
                           json=True)

                for link in my_dispatcher.all_daemons_links:
                    # print("Satellite: %s / %s" % (link, link.cfg_to_manage))
                    assert not link.hash
                    assert not link.push_flavor
                    assert not link.cfg_to_manage
                    assert not link.cfg_managed

                # #2 - Initialize connection with all our satellites
                for satellite in my_dispatcher.all_daemons_links:
                    assert my_arbiter.daemon_connection_init(satellite)
                # All links have a running identifier
                for link in my_dispatcher.all_daemons_links:
                    if link == my_dispatcher.arbiter_link:
                        continue
                    assert link.running_id == 123456.123456
                    self.assert_any_log_match(re.escape("got: 123456.123456"))

                # #3 - Check reachable - a configuration is not yet prepared,
                # so only check reachable state
                my_dispatcher.check_reachable()
                assert my_dispatcher.dispatch_ok is False
                assert my_dispatcher.first_dispatch_done is False
                assert my_dispatcher.new_to_dispatch is False
                # Not yet configured ...
                for link in my_dispatcher.all_daemons_links:
                    if link == my_dispatcher.arbiter_link:
                        continue
                    self.assert_any_log_match(re.escape(
                        "The %s %s do not have a configuration" % (link.type, link.name)
                    ))

                # #3 - Check reachable - daemons got pinged too early...
                my_dispatcher.check_reachable()
                assert my_dispatcher.dispatch_ok is False
                assert my_dispatcher.first_dispatch_done is False
                assert my_dispatcher.new_to_dispatch is False
                # Only for Python > 2.7, DEBUG logs ...
                if os.sys.version_info > (2, 7):
                    for link in my_dispatcher.all_daemons_links:
                        if link == my_dispatcher.arbiter_link:
                            continue
                        self.assert_any_log_match(re.escape(
                            "Too early to ping %s" % (link.name)
                        ))
                self.assert_no_log_match(re.escape(
                    "Dispatcher, these daemons are not configured: "
                    "reactionner-master,poller-master,broker-master,receiver-master,"
                    "scheduler-master"
                    ", and a configuration is ready to dispatch, run the dispatching..."
                ))

                # Time warp 5 seconds - overpass the ping period...
                self.clear_logs()
                frozen_datetime.tick(delta=datetime.timedelta(seconds=5))

                # #3 - Check reachable - daemons provide their configuration
                my_dispatcher.check_reachable()
                assert my_dispatcher.dispatch_ok is False
                assert my_dispatcher.first_dispatch_done is False
                assert my_dispatcher.new_to_dispatch is False
                # Only for Python > 2.7, DEBUG logs ...
                if os.sys.version_info > (2, 7):
                    # Still not configured ...
                    for link in my_dispatcher.all_daemons_links:
                        if link == my_dispatcher.arbiter_link:
                            continue
                        self.assert_any_log_match(re.escape(
                            "My (%s) fresh managed configuration: {}" % link.name
                        ))

                # #4 - Prepare dispatching
                assert my_dispatcher.new_to_dispatch is False
                my_dispatcher.prepare_dispatch()
                assert my_dispatcher.dispatch_ok is False
                assert my_dispatcher.first_dispatch_done is False
                assert my_dispatcher.new_to_dispatch is True

                self.assert_any_log_match(re.escape(
                    "All configuration parts are assigned to schedulers and their satellites :)"
                ))
                # All links have a hash, push_flavor and cfg_to_manage
                for link in my_dispatcher.all_daemons_links:
                    print("Link: %s" % link)
                    assert getattr(link, 'hash', None) is not None
                    assert getattr(link, 'push_flavor', None) is not None
                    assert getattr(link, 'cfg_to_manage', None) is not None
                    assert not link.cfg_managed  # Not yet

                # #5 - Check reachable - a configuration is prepared,
                # this will force the daemons communication, no need for a time warp ;)
                my_dispatcher.check_reachable()
                # Only for Python > 2.7, DEBUG logs ...
                if os.sys.version_info > (2, 7):
                    for link in my_dispatcher.all_daemons_links:
                        if link == my_dispatcher.arbiter_link:
                            continue
                        self.assert_any_log_match(re.escape(
                            "My (%s) fresh managed configuration: {}" % link.name
                        ))

                self.assert_any_log_match(re.escape(
                    "Dispatcher, these daemons are not configured:"))
                self.assert_any_log_match(re.escape(
                    ", and a configuration is ready to dispatch, run the dispatching..."))

                self.assert_any_log_match(re.escape(
                    "Trying to send configuration to the satellites..."))
                for link in my_dispatcher.all_daemons_links:
                    if link == my_dispatcher.arbiter_link:
                        continue
                    self.assert_any_log_match(re.escape(
                        "Sending configuration to the %s %s" % (link.type, link.name)))

                # As of now the configuration is prepared and was dispatched to the daemons !
                # Configuration already dispatched!
                with pytest.raises(DispatcherError):
                    my_dispatcher.dispatch()
                self.show_logs()

                # Hack the requests history to check and simulate  the configuration pushed...
                history = mr.request_history
                for index, request in enumerate(history):
                    if '_push_configuration' in request.url:
                        received = request.json()
                        print(index, request.url, received)
                        assert ['conf'] == list(received.keys())
                        conf = received['conf']

                        from pprint import pprint
                        pprint(conf)
                        assert 'alignak_name' in conf
                        assert conf['alignak_name'] == 'My Alignak'

                        assert 'self_conf' in conf
                        assert conf['self_conf']
                        i_am = None
                        for link in my_dispatcher.all_daemons_links:
                            if link.type == conf['self_conf']['type'] \
                                    and link.name == conf['self_conf']['name']:
                                i_am = link
                                break
                        else:
                            assert False
                        print(("I am: %s" % i_am))
                        print(("I have: %s" % conf))

                        # All links have a hash, push_flavor and cfg_to_manage
                        assert 'hash' in conf
                        assert 'managed_conf_id' in conf

                        assert 'arbiters' in conf
                        if conf['self_conf']['manage_arbiters']:
                            # All the known arbiters
                            assert list(conf['arbiters'].keys()) == [arbiter_link.uuid for arbiter_link
                                                               in my_dispatcher.arbiters]
                        else:
                            assert conf['arbiters'] == {}

                        assert 'schedulers' in conf
                        # Hack for the managed configurations
                        link.cfg_managed = {}
                        for scheduler_link in list(conf['schedulers'].values()):
                            link.cfg_managed[scheduler_link['instance_id']] = {
                                'hash': scheduler_link['hash'],
                                'push_flavor': scheduler_link['push_flavor'],
                                'managed_conf_id': scheduler_link['managed_conf_id']
                            }
                        print("Managed: %s" % link.cfg_managed)

                        assert 'modules' in conf
                        assert conf['modules'] == []

                        # Spare arbiter specific
                        if '8770/_push_configuration' in request.url:
                            # Spare arbiter receives all the monitored configuration
                            assert 'whole_conf' in conf
                            # String serialized configuration
                            assert isinstance(conf['whole_conf'], string_types)
                            managed_conf_part = unserialize(conf['whole_conf'])
                            # Test a property to be sure conf loaded correctly
                            assert managed_conf_part.instance_id == conf['managed_conf_id']

                            # The spare arbiter got the same objects count as the master arbiter prepared!
                            for _, _, strclss, _, _ in list(managed_conf_part.types_creations.values()):
                                # These elements are not included in the serialized configuration!
                                if strclss in ['hostescalations', 'serviceescalations',
                                               'arbiters', 'schedulers', 'brokers',
                                               'pollers', 'reactionners', 'receivers', 'realms',
                                               'modules', 'hostsextinfo', 'servicesextinfo',
                                               'hostdependencies', 'servicedependencies']:
                                    continue

                                objects_list = getattr(managed_conf_part, strclss, [])
                                # print("Got %d %s: %s" % (len(objects_list), strclss, objects_list))
                                # Count and string dup are the same !
                                assert len(objects_list) == objects_map[strclss]['count']
                                assert str(objects_list) == objects_map[strclss]['str']

                        # Scheduler specific
                        elif '7768/_push_configuration' in request.url:
                            assert 'conf_part' in conf
                            # String serialized configuration
                            assert isinstance(conf['conf_part'], string_types)
                            managed_conf_part = unserialize(conf['conf_part'])
                            # Test a property to be sure conf loaded correctly
                            assert managed_conf_part.instance_id == conf['managed_conf_id']

                            # Hack for the managed configurations
                            link.cfg_managed = {
                                conf['instance_id']: {
                                    'hash': conf['hash'],
                                    'push_flavor': conf['push_flavor'],
                                    'managed_conf_id': conf['managed_conf_id']
                                }
                            }
                            print("Managed: %s" % link.cfg_managed)

                            # The scheduler got the same objects count as the arbiter prepared!
                            for _, _, strclss, _, _ in list(managed_conf_part.types_creations.values()):
                                # These elements are not included in the serialized configuration!
                                if strclss in ['hostescalations', 'serviceescalations',
                                               'arbiters', 'schedulers', 'brokers',
                                               'pollers', 'reactionners', 'receivers', 'realms',
                                               'modules', 'hostsextinfo', 'servicesextinfo',
                                               'hostdependencies', 'servicedependencies']:
                                    continue

                                objects_list = getattr(managed_conf_part, strclss, [])
                                # print("Got %d %s: %s" % (len(objects_list), strclss, objects_list))
                                if not multi_realms:
                                    # Count and string dump are the same !
                                    assert len(objects_list) == objects_map[strclss]['count']
                                    assert str(objects_list) == objects_map[strclss]['str']

                        else:
                            # Satellites
                            print("I am: ")
                            print(index, request.url, received)
                            assert 'conf_part' not in conf
                            assert 'see_my_schedulers' == conf['managed_conf_id']

                for link in my_dispatcher.all_daemons_links:
                    mr.get('http://%s:%s/managed_configurations' % (link.address, link.port),
                           json=link.cfg_managed)

                print("Check dispatching:")
                self.clear_logs()
                # assert my_dispatcher.check_dispatch() is True
                dispatched = my_dispatcher.check_dispatch()
                self.show_logs()
                assert dispatched

                for loop_count in range(0, loops):
                    for tw in range(0, 4):
                        # Time warp 1 second
                        frozen_datetime.tick(delta=datetime.timedelta(seconds=1))

                        print("Check reachable %s" % tw)
                        self.clear_logs()
                        my_dispatcher.check_reachable()
                        # Only for Python > 2.7, DEBUG logs ...
                        if os.sys.version_info > (2, 7):
                            for link in my_dispatcher.all_daemons_links:
                                if link == my_dispatcher.arbiter_link:
                                    continue
                                self.assert_any_log_match(re.escape(
                                    "Too early to ping %s" % (link.name)
                                ))

                    # Time warp 1 second
                    frozen_datetime.tick(delta=datetime.timedelta(seconds=1))

                    print("Check reachable response")
                    self.clear_logs()
                    my_dispatcher.check_reachable()
                    self.show_logs()
                    # Only for Python > 2.7, DEBUG logs ...
                    if os.sys.version_info > (2, 7):
                        for link in my_dispatcher.all_daemons_links:
                            if link == my_dispatcher.arbiter_link:
                                continue
                            self.assert_any_log_match(re.escape(
                                "My (%s) fresh managed configuration: %s"
                                % (link.name, link.cfg_managed)
                            ))

    def test_bad_init(self):
        """ Test that:
        - bad configuration
        - two master arbiters
        are not correct and raise an exception!

        :return: None
        """
        args = {
            'env_file': 'cfg/dispatcher/two_master_arbiters.ini',
            'alignak_name': 'alignak-test', 'daemon_name': 'arbiter-master'
        }
        self.my_arbiter = Arbiter(**args)

        # Get a new dispatcher - raise an exception
        with pytest.raises(DispatcherError):
            Dispatcher(None, self.my_arbiter.link_to_myself)

        # Get a new dispatcher - raise an exception
        with pytest.raises(DispatcherError):
            Dispatcher(self.my_arbiter.conf, None)

        # Prepare the Alignak configuration
        self.my_arbiter.load_modules_manager()
        self.my_arbiter.load_monitoring_config_file()
        assert self.my_arbiter.conf.conf_is_correct is True

        # Get a new dispatcher - raise an exception (two master arbiters)
        with pytest.raises(DispatcherError):
            Dispatcher(self.my_arbiter.conf, self.my_arbiter.link_to_myself)

    def test_dispatching_simple(self):
        """ Test the dispatching process: simple configuration

        :return: None
        """
        self._dispatching()

    def test_dispatching_multiple_schedulers(self):
        """ Test the dispatching process: 1 realm, 2 schedulers

        :return: None
        """
        self._dispatching('cfg/dispatcher/simple_multi_schedulers.ini', multi_realms=True)

    def test_dispatching_multiple_pollers(self):
        """ Test the dispatching process: 1 realm, 2 pollers

        :return: None
        """
        self._dispatching('cfg/dispatcher/simple_multi_pollers.ini')

    def test_dispatching_multiple_realms(self):
        """ Test the dispatching process: 2 realms, all daemons duplicated

        :return: None
        """
        self._dispatching('cfg/dispatcher/2-realms.ini', multi_realms=True)

    def test_dispatching_multiple_realms_sub_realms(self):
        """ Test the dispatching process: 2 realms, some daemons are sub_realms managers

        realm All:
        * 1 scheduler
        * 1 receiver

        realm realm2:
        * 1 receiver
        * 1 scheduler
        * 1 poller

        realm All + realm2 (sub realm):
        * 1 broker
        * 1 poller
        * 1 reactionner

        realm realm3:
        * 1 receiver
        * 1 scheduler
        * 1 reactionner
        * 1 broker
        * 1 poller

        :return: None
        """
        self._dispatching('cfg/dispatcher/realms_with_sub_realms.ini', multi_realms=True)

    def test_dispatching_multiple_realms_sub_realms_multi_schedulers(self):
        """ Test the dispatching process: 2 realms, some daemons are sub_realms managers and
        we have several schedulers. daemons with (+) are manage_sub_realms=1

        realm All (6 hosts):
        * 2 schedulers (+)

        realm All / All1 (6 hosts):
        * 3 schedulers (+)

        realm All / All1 / All1a (4 hosts):
        * 2 schedulers (+)

        :return: None
        """
        self._dispatching('cfg/dispatcher/realms_with_sub_realms_multi_schedulers.ini',
                          multi_realms=True)

    @pytest.mark.skip("Currently disabled - spare feature - and whatever this test seems broken!")
    def test_dispatching_spare_arbiter(self):
        """ Test the dispatching process: 1 realm, 1 spare arbiter

        :return: None
        """
        self._dispatching('cfg/dispatcher/spare_arbiter.ini')

    @pytest.mark.skip("Currently disabled - spare feature - and whatever this test seems broken!")
    def test_simple_scheduler_spare(self):
        """ Test simple but with spare of scheduler

        :return: None
        """
        with requests_mock.mock() as mockreq:
            for port in ['7768', '7772', '7771', '7769', '7773', '8002']:
                mockreq.get('http://localhost:%s/ping' % port, json='pong')

            self.setup_with_file('cfg/dispatcher/simple.cfg')
            self.show_logs()
            json_managed = {self._scheduler_daemon.conf.uuid:
                            self._scheduler_daemon.conf.push_flavor}
            for port in ['7768', '7772', '7771', '7769', '7773']:
                mockreq.get('http://localhost:%s/what_i_managed' % port, json=json_managed)
            mockreq.get('http://localhost:8002/what_i_managed', json='{}')

            self._arbiter.dispatcher.check_reachable()
            self._arbiter.dispatcher.prepare_dispatch()
            self._arbiter.dispatcher.dispatch_ok = True

            assert 2 == len(self._arbiter.dispatcher.schedulers)
            assert 4 == len(self._arbiter.dispatcher.satellites)
            master_sched = None
            spare_sched = None
            for scheduler in self._arbiter.dispatcher.schedulers:
                if scheduler.get_name() == 'scheduler-master':
                    scheduler.is_sent = True
                    master_sched = scheduler
                else:
                    spare_sched = scheduler

            assert master_sched.ping
            assert 1 == master_sched.attempt
            assert spare_sched.ping
            assert 0 == spare_sched.attempt

        for satellite in self._arbiter.dispatcher.satellites:
            assert 1 == len(satellite.cfg['schedulers'])
            scheduler = next(iter(satellite.cfg['schedulers'].values()))
            assert 'scheduler-master' == scheduler['name']

        # now simulate master sched down
        master_sched.check_interval = 1
        spare_sched.check_interval = 1
        for satellite in self._arbiter.dispatcher.receivers:
            satellite.check_interval = 1
        for satellite in self._arbiter.dispatcher.reactionners:
            satellite.check_interval = 1
        for satellite in self._arbiter.dispatcher.brokers:
            satellite.check_interval = 1
        for satellite in self._arbiter.dispatcher.pollers:
            satellite.check_interval = 1
        time.sleep(1)

        with requests_mock.mock() as mockreq:
            for port in ['7772', '7771', '7769', '7773', '8002']:
                mockreq.get('http://localhost:%s/ping' % port, json='pong')

            for port in ['7772', '7771', '7769', '7773']:
                mockreq.get('http://localhost:%s/what_i_managed' % port, json=json_managed)
            mockreq.get('http://localhost:8002/what_i_managed', json='{}')

            for port in ['7772', '7771', '7769', '7773', '8002']:
                mockreq.post('http://localhost:%s/put_conf' % port, json='true')

            self._arbiter.dispatcher.check_reachable()
            self._arbiter.dispatcher.check_dispatch()
            self._arbiter.dispatcher.prepare_dispatch()
            self._arbiter.dispatcher.dispatch()
            self._arbiter.dispatcher.check_bad_dispatch()

            assert master_sched.ping
            assert 2 == master_sched.attempt

            time.sleep(1)
            self._arbiter.dispatcher.check_reachable()
            self._arbiter.dispatcher.check_dispatch()
            self._arbiter.dispatcher.prepare_dispatch()
            self._arbiter.dispatcher.dispatch()
            self._arbiter.dispatcher.check_bad_dispatch()

            assert master_sched.ping
            assert 3 == master_sched.attempt
            # assert master_sched.alive
            #
            # time.sleep(1)
            # self.arbiter.dispatcher.check_alive()
            # self.arbiter.dispatcher.check_dispatch()
            # self.arbiter.dispatcher.prepare_dispatch()
            # self.arbiter.dispatcher.dispatch()
            # self.arbiter.dispatcher.check_bad_dispatch()

            assert not master_sched.alive

            history = mockreq.request_history
            send_conf_to_sched_master = False
            conf_sent = {}
            for index, hist in enumerate(history):
                if hist.url == 'http://localhost:7768/put_conf':
                    send_conf_to_sched_master = True
                elif hist.url == 'http://localhost:8002/put_conf':
                    conf_sent['scheduler-spare'] = hist.json()
                elif hist.url == 'http://localhost:7772/put_conf':
                    conf_sent['broker'] = hist.json()
                elif hist.url == 'http://localhost:7771/put_conf':
                    conf_sent['poller'] = hist.json()
                elif hist.url == 'http://localhost:7769/put_conf':
                    conf_sent['reactionner'] = hist.json()
                elif hist.url == 'http://localhost:7773/put_conf':
                    conf_sent['receiver'] = hist.json()

            assert not send_conf_to_sched_master, 'Conf to scheduler master must not be sent' \
                                                        'because it is not alive'
            self.show_logs()
            assert 5 == len(conf_sent)
            assert ['conf'] == list(conf_sent['scheduler-spare'].keys())

            json_managed_spare = {}
            for satellite in self._arbiter.dispatcher.satellites:
                assert 1 == len(satellite.cfg['schedulers'])
                scheduler = next(iter(satellite.cfg['schedulers'].values()))
                assert 'scheduler-spare' == scheduler['name']
                json_managed_spare[scheduler['instance_id']] = scheduler['push_flavor']

        # return of the scheduler master
        print("*********** Return of the king / master ***********")
        with requests_mock.mock() as mockreq:
            for port in ['7768', '7772', '7771', '7769', '7773', '8002']:
                mockreq.get('http://localhost:%s/ping' % port, json='pong')

            mockreq.get('http://localhost:7768/what_i_managed', json=json_managed)
            for port in ['7772', '7771', '7769', '7773', '8002']:
                mockreq.get('http://localhost:%s/what_i_managed' % port, json=json_managed_spare)

            for port in ['7768', '7772', '7771', '7769', '7773', '8002']:
                mockreq.post('http://localhost:%s/put_conf' % port, json='true')

            time.sleep(1)
            self._arbiter.dispatcher.check_reachable()
            self._arbiter.dispatcher.check_dispatch()
            self._arbiter.dispatcher.prepare_dispatch()
            self._arbiter.dispatcher.dispatch()
            self._arbiter.dispatcher.check_bad_dispatch()

            assert master_sched.ping
            assert 0 == master_sched.attempt

            history = mockreq.request_history
            conf_sent = {}
            for index, hist in enumerate(history):
                if hist.url == 'http://localhost:7768/put_conf':
                    conf_sent['scheduler-master'] = hist.json()
                elif hist.url == 'http://localhost:8002/put_conf':
                    conf_sent['scheduler-spare'] = hist.json()
                elif hist.url == 'http://localhost:7772/put_conf':
                    conf_sent['broker'] = hist.json()
                elif hist.url == 'http://localhost:7771/put_conf':
                    conf_sent['poller'] = hist.json()
                elif hist.url == 'http://localhost:7769/put_conf':
                    conf_sent['reactionner'] = hist.json()
                elif hist.url == 'http://localhost:7773/put_conf':
                    conf_sent['receiver'] = hist.json()

            assert set(['scheduler-master', 'broker', 'poller', 'reactionner',
                                  'receiver']) == \
                             set(conf_sent.keys())

            for satellite in self._arbiter.dispatcher.satellites:
                assert 1 == len(satellite.cfg['schedulers'])
                scheduler = next(iter(satellite.cfg['schedulers'].values()))
                assert 'scheduler-master' == scheduler['name']
Esempio n. 5
0
class TestLaunchDaemons(AlignakTest):
    def setUp(self):
        super(TestLaunchDaemons, self).setUp()

        self.cfg_folder = '/tmp/alignak'
        self._prepare_configuration(copy=True, cfg_folder=self.cfg_folder)

        files = [
            '%s/etc/alignak.ini' % self.cfg_folder,
            '%s/etc/alignak.d/daemons.ini' % self.cfg_folder,
            '%s/etc/alignak.d/modules.ini' % self.cfg_folder
        ]
        try:
            cfg = configparser.ConfigParser()
            cfg.read(files)

            cfg.set('alignak-configuration', 'launch_missing_daemons', '1')
            cfg.set('daemon.arbiter-master', 'alignak_launched', '1')
            cfg.set('daemon.scheduler-master', 'alignak_launched', '1')
            cfg.set('daemon.poller-master', 'alignak_launched', '1')
            cfg.set('daemon.reactionner-master', 'alignak_launched', '1')
            cfg.set('daemon.receiver-master', 'alignak_launched', '1')
            cfg.set('daemon.broker-master', 'alignak_launched', '1')

            with open('%s/etc/alignak.ini' % self.cfg_folder, "w") as modified:
                cfg.write(modified)
        except Exception as exp:
            print("* parsing error in config file: %s" % exp)
            assert False

    def tearDown(self):
        # Restore the default test logger configuration
        if 'ALIGNAK_LOGGER_CONFIGURATION' in os.environ:
            del os.environ['ALIGNAK_LOGGER_CONFIGURATION']

        print("Test terminated!")

    def test_arbiter_missing_parameters(self):
        """ Running the Alignak Arbiter with missing command line parameters

        :return:
        """
        print("Launching arbiter with missing parameters...")
        args = ["../alignak/bin/alignak_arbiter.py"]
        arbiter = subprocess.Popen(args,
                                   stdout=subprocess.PIPE,
                                   stderr=subprocess.PIPE)
        print("%s launched (pid=%d)" % ('arbiter', arbiter.pid))

        # Waiting for arbiter to parse the configuration
        sleep(3)

        ret = arbiter.poll()
        print("*** Arbiter exited with code: %d" % ret)
        assert ret is not None, "Arbiter is still running!"
        stderr = arbiter.stderr.read()
        print(stderr)
        assert b"usage: alignak_arbiter.py" in stderr
        # Arbiter process must exit with a return code == 2
        assert ret == 2

    def test_arbiter_no_environment(self):
        """ Running the Alignak Arbiter without environment file

        :return:
        """
        print("Launching arbiter without environment file...")
        args = ["../alignak/bin/alignak_arbiter.py"]
        arbiter = subprocess.Popen(args,
                                   stdout=subprocess.PIPE,
                                   stderr=subprocess.PIPE)
        print("%s launched (pid=%d)" % ('arbiter', arbiter.pid))

        # Waiting for arbiter to parse the configuration
        sleep(3)

        ret = arbiter.poll()
        print("*** Arbiter exited with code: %d" % ret)
        assert ret is not None, "Arbiter is still running!"
        stdout = arbiter.stdout.read()
        print(stdout)
        stderr = arbiter.stderr.read()
        print(stderr)
        assert b"usage: alignak_arbiter.py" in stderr
        # Arbiter process must exit with a return code == 2
        assert ret == 2

    def test_arbiter_class_no_environment(self):
        """ Instantiate the Alignak Arbiter class without environment file

        :return:
        """
        from alignak.daemons.arbiterdaemon import Arbiter
        print("Instantiate arbiter without environment file...")
        # Using values that are usually provided by the command line parameters
        args = {
            'env_file': '',
            'alignak_name': 'alignak-test',
            'daemon_name': 'arbiter-master',
            'legacy_cfg_files': ['../etc/alignak.cfg']
        }
        self.arbiter = Arbiter(**args)

        print("Arbiter: %s" % (self.arbiter))
        assert self.arbiter.env_filename == ''
        assert self.arbiter.legacy_cfg_files == [
            os.path.abspath('../etc/alignak.cfg')
        ]

        # Configure the logger
        self.arbiter.log_level = 'ERROR'
        self.arbiter.setup_alignak_logger()

        # Setup our modules manager
        self.arbiter.load_modules_manager()

        # Load and initialize the arbiter configuration
        # This to check that the configuration is correct!
        self.arbiter.load_monitoring_config_file()

    def test_arbiter_class_env_default(self):
        """ Instantiate the Alignak Arbiter class without legacy cfg files
        :return:
        """
        # Unset legacy configuration files
        files = ['%s/etc/alignak.ini' % self.cfg_folder]
        try:
            cfg = configparser.ConfigParser()
            cfg.read(files)

            # Nagios legacy files - not configured
            cfg.set('alignak-configuration', 'cfg', '')

            with open('%s/etc/alignak.ini' % self.cfg_folder, "w") as modified:
                cfg.write(modified)
        except Exception as exp:
            print("* parsing error in config file: %s" % exp)
            assert False

        from alignak.daemons.arbiterdaemon import Arbiter
        print("Instantiate arbiter with default environment file...")
        # Using values that are usually provided by the command line parameters
        args = {
            'env_file': "/tmp/alignak/etc/alignak.ini",
            'daemon_name': 'arbiter-master'
        }
        self.arbiter = Arbiter(**args)

        print("Arbiter: %s" % (self.arbiter))
        print("Arbiter: %s" % (self.arbiter.__dict__))
        assert self.arbiter.env_filename == '/tmp/alignak/etc/alignak.ini'
        assert self.arbiter.legacy_cfg_files == []
        assert len(self.arbiter.legacy_cfg_files) == 0

        # Configure the logger
        self.arbiter.log_level = 'INFO'
        self.arbiter.setup_alignak_logger()

        # Setup our modules manager
        self.arbiter.load_modules_manager()

        # Load and initialize the arbiter configuration
        # This to check that the configuration is correct!
        self.arbiter.load_monitoring_config_file()
        # No legacy files found
        assert len(self.arbiter.legacy_cfg_files) == 0

    def test_arbiter_unexisting_environment(self):
        """ Running the Alignak Arbiter with a not existing environment file

        :return:
        """
        print("Launching arbiter with a not existing environment file...")
        args = [
            "../alignak/bin/alignak_arbiter.py", "-e",
            "/tmp/etc/unexisting.ini"
        ]
        arbiter = subprocess.Popen(args,
                                   stdout=subprocess.PIPE,
                                   stderr=subprocess.PIPE)
        print("%s launched (pid=%d)" % ('arbiter', arbiter.pid))

        # Waiting for arbiter to parse the configuration
        sleep(3)

        ret = arbiter.poll()
        print("*** Arbiter exited with code: %d" % ret)
        assert ret is not None, "Arbiter is still running!"
        stdout = arbiter.stdout.read()
        print(stdout)
        assert b"Daemon 'arbiter-master' did not correctly read " \
               b"Alignak environment file: /tmp/etc/unexisting.ini" in stdout
        # Arbiter process must exit with a return code == 1
        assert ret == 99

    def test_arbiter_no_monitoring_configuration(self):
        """ Running the Alignak Arbiter with no monitoring configuration defined -
        no legacy cfg files

        :return:
        """
        print("Launching arbiter with no monitoring configuration...")

        # Unset legacy configuration files
        files = ['%s/etc/alignak.ini' % self.cfg_folder]
        try:
            cfg = configparser.ConfigParser()
            cfg.read(files)

            # Nagios legacy files - not configured
            cfg.set('alignak-configuration', 'cfg', '')

            with open('%s/etc/alignak.ini' % self.cfg_folder, "w") as modified:
                cfg.write(modified)
        except Exception as exp:
            print("* parsing error in config file: %s" % exp)
            assert False

        args = [
            "../alignak/bin/alignak_arbiter.py", "-e",
            '%s/etc/alignak.ini' % self.cfg_folder
        ]
        ret = self._run_command_with_timeout(args, 30)

        errors = 0
        ok = False
        with open('/tmp/alignak/log/arbiter-master.log') as f:
            for line in f:
                if 'total number of hosts in all realms: 0' in line:
                    ok = True
        assert errors == 0
        assert ok

    def test_arbiter_unexisting_monitoring_configuration(self):
        """ Running the Alignak Arbiter with a not existing monitoring configuration file

        :return:
        """
        print("Launching arbiter with no monitoring configuration...")

        files = ['%s/etc/alignak.ini' % self.cfg_folder]
        try:
            cfg = configparser.ConfigParser()
            cfg.read(files)

            # Nagios legacy files
            cfg.set('alignak-configuration', 'cfg',
                    '%(etcdir)s/alignak-missing.cfg')

            with open('%s/etc/alignak.ini' % self.cfg_folder, "w") as modified:
                cfg.write(modified)
        except Exception as exp:
            print("* parsing error in config file: %s" % exp)
            assert False

        args = [
            "../alignak/bin/alignak_arbiter.py", "-e",
            '%s/etc/alignak.ini' % self.cfg_folder
        ]
        ret = self._run_command_with_timeout(args, 20)

        errors = 0
        ok = False
        with open('/tmp/alignak/log/arbiter-master.log') as f:
            for line in f:
                if 'WARNING:' in line and "cannot open main file '/tmp/alignak/etc/alignak-missing.cfg' for reading" in line:
                    ok = True
                if 'ERROR:' in line or 'CRITICAL:' in line:
                    print("*** %s" % line.rstrip())
                    errors = errors + 1
        # Arbiter process must exit with a return code == 0 and no errors
        assert errors == 2
        # Arbiter process must exit with a return code == 1
        assert ret == 1
        assert ok

    def test_arbiter_bad_configuration(self):
        """ Running the Alignak Arbiter with bad monitoring configuration (unknown sub directory)

        :return:
        """
        print("Launching arbiter with a bad monitoring configuration...")

        files = ['%s/etc/alignak.ini' % self.cfg_folder]
        try:
            cfg = configparser.ConfigParser()
            cfg.read(files)

            # Nagios legacy files
            cfg.set('alignak-configuration', 'cfg', '%(etcdir)s/alignak.cfg')

            with open('%s/etc/alignak.ini' % self.cfg_folder, "w") as modified:
                cfg.write(modified)
        except Exception as exp:
            print("* parsing error in config file: %s" % exp)
            assert False

        # Update configuration with a bad file name
        files = ['%s/etc/alignak.cfg' % self.cfg_folder]
        replacements = {
            'cfg_dir=arbiter/templates': 'cfg_dir=unexisting/objects/realms'
        }
        self._files_update(files, replacements)

        args = [
            "../alignak/bin/alignak_arbiter.py", "-e",
            '%s/etc/alignak.ini' % self.cfg_folder
        ]
        ret = self._run_command_with_timeout(args, 20)

        errors = 0
        ok = False
        with open('/tmp/alignak/log/arbiter-master.log') as f:
            for line in f:
                if 'ERROR:' in line and "*** One or more problems were encountered while processing the configuration (first check)..." in line:
                    ok = True
                if 'ERROR:' in line or 'CRITICAL:' in line:
                    print("*** %s" % line.rstrip())
                    errors = errors + 1
        # Arbiter process must exit with a return code == 0 and no errors
        assert errors == 2
        # Arbiter process must exit with a return code == 1
        assert ret == 1
        assert ok

    def test_arbiter_i_am_not_configured(self):
        """ Running the Alignak Arbiter with missing arbiter configuration

        :return:
        """
        print("Launching arbiter with a missing arbiter configuration...")

        if os.path.exists('%s/my-arbiter-name.log' % self._launch_dir):
            os.remove('%s/my-arbiter-name.log' % self._launch_dir)

        args = [
            "../alignak/bin/alignak_arbiter.py", "-e",
            '%s/etc/alignak.ini' % self.cfg_folder, "-n", "my-arbiter-name"
        ]
        ret = self._run_command_with_timeout(args, 20)

        errors = 0
        ok = False
        # Note the log filename!
        with open('%s/my-arbiter-name.log' % self._launch_dir) as f:
            for line in f:
                if "I cannot find my own configuration (my-arbiter-name)" in line:
                    ok = True
                if 'ERROR:' in line or 'CRITICAL:' in line:
                    print("*** %s" % line.rstrip())
                    errors = errors + 1
        # Arbiter process must exit with a return code == 0 and no errors
        assert errors == 2
        # Arbiter process must exit with a return code == 1
        assert ret == 1
        assert ok

    def test_arbiter_verify(self):
        """ Running the Alignak Arbiter in verify mode only with the default shipped configuration

        :return:
        """
        # Set a specific logger configuration - do not use the default test configuration
        # to use the default shipped configuration
        os.environ[
            'ALIGNAK_LOGGER_CONFIGURATION'] = './etc/warning_alignak-logger.json'

        print("Launching arbiter in verification mode...")
        args = [
            "../alignak/bin/alignak_arbiter.py", "-e",
            '%s/etc/alignak.ini' % self.cfg_folder, "-V"
        ]
        ret = self._run_command_with_timeout(args, 20)

        errors = 0
        specific_log = False
        info_log = False
        with open('/tmp/alignak/log/arbiter-master.log') as f:
            for line in f:
                if 'INFO:' in line:
                    info_log = True
                    if 'Arbiter is in configuration check mode' in line:
                        specific_log = True
                if 'ERROR:' in line or 'CRITICAL:' in line:
                    print("*** %s" % line.rstrip())
                    errors = errors + 1
        # Arbiter process must exit with a return code == 0 and no errors
        # Arbiter changed the log level to INFO because of the verify mode
        assert specific_log is True
        assert info_log is True
        assert errors == 0
        assert ret == 0

    def test_arbiter_parameters_pid(self):
        """ Run the Alignak Arbiter with some parameters - set a pid file

        :return:
        """
        # All the default configuration files are in /tmp/etc

        print("Launching arbiter with forced PID file...")
        if os.path.exists('/tmp/arbiter.pid'):
            os.remove('/tmp/arbiter.pid')

        args = [
            "../alignak/bin/alignak_arbiter.py", "-e",
            '%s/etc/alignak.ini' % self.cfg_folder, "-V", "--pid_file",
            "/tmp/arbiter.pid"
        ]
        ret = self._run_command_with_timeout(args, 20)

        # The arbiter unlinks the pid file - I cannot assert it exists!
        # assert os.path.exists('/tmp/arbiter.pid')

        errors = 0
        # ok = False
        with open('/tmp/alignak/log/arbiter-master.log') as f:
            for line in f:
                # if 'Unlinking /tmp/arbiter.pid' in line:
                #     ok = True
                if 'ERROR:' in line or 'CRITICAL:' in line:
                    print("*** %s" % line.rstrip())
                    errors = errors + 1
        # Arbiter process must exit with a return code == 0 and no errors
        assert errors == 0
        assert ret == 0
        # assert ok

    def test_arbiter_parameters_log(self):
        """ Run the Alignak Arbiter with some parameters - log file name

        :return:
        """
        # All the default configuration files are in /tmp/etc
        print("Launching arbiter with forced log file...")
        if os.path.exists('/tmp/arbiter.log'):
            os.remove('/tmp/arbiter.log')

        args = [
            "../alignak/bin/alignak_arbiter.py", "-e",
            '%s/etc/alignak.ini' % self.cfg_folder, "-V", "-vv", "--log_file",
            "/tmp/arbiter.log"
        ]
        ret = self._run_command_with_timeout(args, 20)

        assert os.path.exists("/tmp/arbiter.log")

        errors = 0
        with open('/tmp/arbiter.log') as f:
            for line in f:
                if 'ERROR:' in line or 'CRITICAL:' in line:
                    print("*** %s" % line.rstrip())
                    errors = errors + 1
        # Arbiter process must exit with a return code == 0 and no errors
        assert errors == 0
        assert ret == 0

    @pytest.mark.skip("To be re-activated with spare mode")
    def test_arbiter_spare_missing_configuration(self):
        """ Run the Alignak Arbiter in spare mode - missing spare configuration

        :return:
        """
        print("Launching arbiter in spare mode...")
        args = [
            "../alignak/bin/alignak_arbiter.py", "-a",
            cfg_folder + "/alignak.cfg", "-c",
            cfg_folder + "/daemons/arbiterd.ini", "-n", "arbiter-spare"
        ]
        arbiter = subprocess.Popen(args,
                                   stdout=subprocess.PIPE,
                                   stderr=subprocess.PIPE)
        print("%s launched (pid=%d)" % ('arbiter', arbiter.pid))

        sleep(5)

        ret = arbiter.poll()
        print("*** Arbiter exited with code: %s" % ret)
        assert ret is not None, "Arbiter is still running!"
        # Arbiter process must exit with a return code == 1
        assert ret == 1

    @pytest.mark.skip("To be re-activated with spare mode")
    def test_arbiter_spare(self):
        """ Run the Alignak Arbiter in spare mode - missing spare configuration

        :return:
        """
        print("Launching arbiter in spare mode...")
        args = [
            "../alignak/bin/alignak_arbiter.py", "-a",
            cfg_folder + "/alignak.cfg", "-c",
            cfg_folder + "/daemons/arbiterd.ini", "-n", "arbiter-spare"
        ]
        arbiter = subprocess.Popen(args,
                                   stdout=subprocess.PIPE,
                                   stderr=subprocess.PIPE)
        print("%s launched (pid=%d)" % ('arbiter', arbiter.pid))

        ret = arbiter.poll()
        # Arbiter must still be running ... it is still trying to dispatch the configuration!
        assert ret is None, "Arbiter exited!"

        sleep(5)

        # Arbiter never stops trying to send its configuration! We must kill it...

        print("Asking arbiter to end...")
        os.kill(arbiter.pid, signal.SIGTERM)

        ret = arbiter.poll()
        print("*** Arbiter exited on kill, no return code!")
        assert ret is None, "Arbiter is still running!"
        # No ERRORS because the daemons are not alive !
        ok = 0
        for line in iter(arbiter.stdout.readline, b''):
            print(">>> %s" % line.rstrip())
            if b'INFO:' in line:
                # I must find this line
                if b'[alignak.daemons.arbiterdaemon] I found myself in the configuration: arbiter-spare' in line:
                    ok += 1
                # and this one also
                if b'[alignak.daemons.arbiterdaemon] I am a spare Arbiter: arbiter-spare' in line:
                    ok += 1
                if b'I am not the master arbiter, I stop parsing the configuration' in line:
                    ok += 1
                if b'Waiting for master...' in line:
                    ok += 1
                if b'Waiting for master death' in line:
                    ok += 1
                assert b'CRITICAL:' not in line
        for line in iter(arbiter.stderr.readline, b''):
            print("*** %s" % line.rstrip())
            if sys.version_info > (2, 7):
                assert False, "stderr output!"
        assert ok == 5

    def test_arbiter_normal(self):
        """ Running the Alignak Arbiter - normal verbosity

        :return:
        """
        self._arbiter(verbosity=None)

    def test_arbiter_verbose(self):
        """ Running the Alignak Arbiter - normal verbosity

        :return:
        """
        self._arbiter(verbosity='--verbose')
        self._arbiter(verbosity='-v')

    def test_arbiter_very_verbose(self):
        """ Running the Alignak Arbiter - normal verbosity

        :return:
        """
        self._arbiter(verbosity='--debug')
        # Execute only once, because it looks too verbose for Travis :/
        # self._arbiter(verbosity='-vv')

    def _arbiter(self, verbosity=None):
        """ Running the Alignak Arbiter with a specific verbosity

        :return:
        """
        # Set a specific logger configuration - do not use the default test configuration
        # to use the default shipped configuration
        os.environ[
            'ALIGNAK_LOGGER_CONFIGURATION'] = './etc/warning_alignak-logger.json'

        print("Launching arbiter ...")
        args = [
            "../alignak/bin/alignak_arbiter.py", "-n", "arbiter-master", "-e",
            '%s/etc/alignak.ini' % self.cfg_folder
        ]
        if verbosity:
            args.append(verbosity)
        arbiter = subprocess.Popen(args)
        print("%s launched (pid=%d)" % ('arbiter', arbiter.pid))

        # Wait for the arbiter to get started
        time.sleep(5)

        # This function will request the arbiter daemon to stop
        self._stop_alignak_daemons(request_stop_uri='http://127.0.0.1:7770')

        errors = 0
        info_log = False
        debug_log = False
        with open('/tmp/alignak/log/arbiter-master.log') as f:
            for line in f:
                if 'DEBUG:' in line:
                    debug_log = True
                if 'INFO:' in line:
                    info_log = True
                if 'ERROR:' in line or 'CRITICAL:' in line:
                    print("*** %s" % line.rstrip())
                    errors = errors + 1

        # arbiter process may exit with no errors!
        # assert errors == 0
        # Arbiter changed the log level to INFO because of the verify mode
        if verbosity in ['-v', '--verbose']:
            assert info_log is True
        # Arbiter changed the log level to DEBUG because of the verify mode
        if verbosity in ['-vv', '--debug']:
            assert debug_log is True

    def test_broker(self):
        """ Running the Alignak Broker

        :return:
        """
        print("Launching broker ...")
        args = [
            "../alignak/bin/alignak_broker.py", "-n", "broker-master", "-e",
            '%s/etc/alignak.ini' % self.cfg_folder
        ]
        broker = subprocess.Popen(args)
        print("%s launched (pid=%d)" % ('broker', broker.pid))

        # Wait for the broker to get started
        time.sleep(5)

        # This function will request the arbiter daemon to stop
        self._stop_alignak_daemons(request_stop_uri='http://127.0.0.1:7772')

        errors = 0
        with open('/tmp/alignak/log/broker-master.log') as f:
            for line in f:
                if 'ERROR:' in line or 'CRITICAL:' in line:
                    print("*** %s" % line.rstrip())
                    errors = errors + 1
        # broker process must exit with no errors
        assert errors == 0

    def test_poller(self):
        """ Running the Alignak poller

        :return:
        """
        print("Launching poller ...")
        args = [
            "../alignak/bin/alignak_poller.py", "-n", "poller-master", "-e",
            '%s/etc/alignak.ini' % self.cfg_folder
        ]
        poller = subprocess.Popen(args)
        print("%s launched (pid=%d)" % ('poller', poller.pid))

        # Wait for the poller to get started
        time.sleep(5)

        # This function will request the arbiter daemon to stop
        self._stop_alignak_daemons(request_stop_uri='http://127.0.0.1:7771')

        errors = 0
        with open('/tmp/alignak/log/poller-master.log') as f:
            for line in f:
                if 'ERROR:' in line or 'CRITICAL:' in line:
                    print("*** %s" % line.rstrip())
                    errors = errors + 1
        # poller process must exit with a return code == 0 and no errors
        assert errors == 0

    def test_reactionner(self):
        """ Running the Alignak reactionner

        :return:
        """
        print("Launching reactionner ...")
        args = [
            "../alignak/bin/alignak_reactionner.py", "-n",
            "reactionner-master", "-e",
            '%s/etc/alignak.ini' % self.cfg_folder
        ]
        reactionner = subprocess.Popen(args)
        print("%s launched (pid=%d)" % ('reactionner', reactionner.pid))

        # Wait for the reactionner to get started
        time.sleep(5)

        # This function will request the arbiter daemon to stop
        self._stop_alignak_daemons(request_stop_uri='http://127.0.0.1:7769')

        errors = 0
        with open('/tmp/alignak/log/reactionner-master.log') as f:
            for line in f:
                if 'ERROR:' in line or 'CRITICAL:' in line:
                    print("*** %s" % line.rstrip())
                    errors = errors + 1
        # reactionner process must exit with a return code == 0 and no errors
        assert errors == 0

    def test_receiver(self):
        """ Running the Alignak receiver

        :return:
        """
        print("Launching receiver ...")
        args = [
            "../alignak/bin/alignak_receiver.py", "-n", "receiver-master",
            "-e",
            '%s/etc/alignak.ini' % self.cfg_folder
        ]
        receiver = subprocess.Popen(args)
        print("%s launched (pid=%d)" % ('receiver', receiver.pid))

        # Wait for the receiver to get started
        time.sleep(5)

        # This function will request the arbiter daemon to stop
        self._stop_alignak_daemons(request_stop_uri='http://127.0.0.1:7773')

        errors = 0
        with open('/tmp/alignak/log/receiver-master.log') as f:
            for line in f:
                if 'ERROR:' in line or 'CRITICAL:' in line:
                    print("*** %s" % line.rstrip())
                    errors = errors + 1
        # receiver process must exit with a return code == 0 and no errors
        assert errors == 0

    def test_scheduler(self):
        """ Running the Alignak scheduler

        :return:
        """
        print("Launching scheduler ...")

        args = [
            "../alignak/bin/alignak_scheduler.py", "-n", "scheduler-master",
            "-e",
            '%s/etc/alignak.ini' % self.cfg_folder
        ]
        scheduler = subprocess.Popen(args)
        print("%s launched (pid=%d)" % ('scheduler', scheduler.pid))

        # Wait for the scheduler to get started
        time.sleep(5)

        # This function will request the arbiter daemon to stop
        self._stop_alignak_daemons(request_stop_uri='http://127.0.0.1:7768')

        errors = 0
        with open('/tmp/alignak/log/scheduler-master.log') as f:
            for line in f:
                if 'ERROR:' in line or 'CRITICAL:' in line:
                    print("*** %s" % line.rstrip())
                    errors = errors + 1
        # scheduler process must exit with a return code == 0 and no errors
        assert errors == 0
Esempio n. 6
0
    def _monitoring(self,
                    env_filename='cfg/monitor/simple.ini',
                    loops=3,
                    multi_realms=False):
        """ monitoring process: prepare, check, dispatch

        This function realize all the monitoring operations:
        - load a monitoring configuration
        - prepare the monitoring
        - dispatch
        - check the correct monitoring, including:
            - check the configuration dispatched to the schedulers
            - check the configuration dispatched to the spare arbiter (if any)
        - run the check_reachable loop several times

        if multi_realms is True, the scheduler configuration received are not checked against
        the arbiter whole configuration. This would be really too complex to assert on this :(

        Schedulers must have a port number with 7768 (eg. 7768,17768,27768,...)

        Spare daemons must have a port number with 8770 (eg. 8770,18770,28770,...)

        :return: None
        """
        args = {
            'env_file': env_filename,
            'alignak_name': 'alignak-test',
            'daemon_name': 'arbiter-master'
        }
        my_arbiter = Arbiter(**args)
        my_arbiter.setup_alignak_logger()

        # Clear logs
        self.clear_logs()

        my_arbiter.load_modules_manager()
        my_arbiter.load_monitoring_config_file()
        assert my_arbiter.conf.conf_is_correct is True

        # #1 - Get a new dispatcher
        my_dispatcher = Dispatcher(my_arbiter.conf, my_arbiter.link_to_myself)
        my_arbiter.dispatcher = my_dispatcher
        print("*** All daemons WS: %s" % [
            "%s:%s" % (link.address, link.port)
            for link in my_dispatcher.all_daemons_links
        ])

        assert my_arbiter.alignak_monitor == "http://super_alignak:7773/ws"
        assert my_arbiter.alignak_monitor_username == 'admin'
        assert my_arbiter.alignak_monitor_password == 'admin'

        metrics = []
        for type in sorted(my_arbiter.conf.types_creations):
            _, _, strclss, _, _ = my_arbiter.conf.types_creations[type]
            if strclss in ['hostescalations', 'serviceescalations']:
                continue

            objects_list = getattr(my_arbiter.conf, strclss, [])
            metrics.append("'%s'=%d" % (strclss, len(objects_list)))

        # Simulate the daemons HTTP interface (very simple simulation !)
        with requests_mock.mock() as mr:
            mr.post('%s/login' % (my_arbiter.alignak_monitor),
                    json={
                        "_status":
                        "OK",
                        "_result":
                        ["1508507175582-c21a7d8e-ace0-47f2-9b10-280a17152c7c"]
                    })
            mr.patch(
                '%s/host' % (my_arbiter.alignak_monitor),
                json={
                    "_status":
                    "OK",
                    "_result":
                    ["1508507175582-c21a7d8e-ace0-47f2-9b10-280a17152c7c"]
                })

            # Time warp 5 seconds - overpass the ping period...
            self.clear_logs()
            # frozen_datetime.tick(delta=datetime.timedelta(seconds=5))

            my_arbiter.get_alignak_status(details=False)

            self.show_logs()

            # Hack the requests history to check and simulate  the configuration pushed...
            history = mr.request_history
            for index, request in enumerate(history):
                # Check what is patched on /host ...
                if 'host' in request.url:
                    received = request.json()
                    print((index, request.url, received))

                    from pprint import pprint
                    pprint(received)

                    assert received['name'] == 'My Alignak'
                    assert received['livestate']['timestamp'] == 1519583400
                    assert received['livestate']['state'] == 'up'
                    assert received['livestate'][
                        'output'] == 'Some of my daemons are not reachable.'
                    for metric in metrics:
                        assert metric in received['livestate']['perf_data']
                    print(received['livestate']['long_output'])
                    # Long output is sorted by daemon name
                    assert received['livestate']['long_output'] == \
                           u'broker-master - daemon is not reachable.\n' \
                           u'poller-master - daemon is not reachable.\n' \
                           u'reactionner-master - daemon is not reachable.\n' \
                           u'receiver-master - daemon is not reachable.\n' \
                           u'scheduler-master - daemon is not reachable.'

                    for link in my_dispatcher.all_daemons_links:
                        assert link.name in [
                            service['name'] for service in received['services']
                        ]

                    for service in received['services']:
                        assert 'name' in service
                        assert 'livestate' in service
                        assert 'timestamp' in service['livestate']
                        assert 'state' in service['livestate']
                        assert 'output' in service['livestate']
                        assert 'long_output' in service['livestate']
                        assert 'perf_data' in service['livestate']
Esempio n. 7
0
    def setup_with_file(self, paths, add_default=True):
        self.time_hacker.set_my_time()
        self.print_header()
        # i am arbiter-like
        self.broks = {}
        self.me = None
        self.log = logger
        self.log.load_obj(self)
        if not isinstance(paths, list):
            paths = [paths]  # Fix for modules tests
            add_default = False # Don't mix config
        if add_default:
            paths.insert(0, 'etc/alignak_1r_1h_1s.cfg')
        self.config_files = paths
        self.conf = Config()
        buf = self.conf.read_config(self.config_files)
        raw_objects = self.conf.read_config_buf(buf)
        self.conf.create_objects_for_type(raw_objects, 'arbiter')
        self.conf.create_objects_for_type(raw_objects, 'module')
        self.conf.early_arbiter_linking()

        # If we got one arbiter defined here (before default) we should be in a case where
        # the tester want to load/test a module, so we simulate an arbiter daemon
        # and the modules loading phase. As it has its own modulesmanager, should
        # not impact scheduler modules ones, especially we are asking for arbiter type :)
        if len(self.conf.arbiters) == 1:
            arbdaemon = Arbiter([''], [''], False, False, None, None)

            arbdaemon.load_modules_manager()

            # we request the instances without them being *started*
            # (for those that are concerned ("external" modules):
            # we will *start* these instances after we have been daemonized (if requested)
            me = None
            for arb in self.conf.arbiters:
                me = arb
                arbdaemon.do_load_modules(arb.modules)
                arbdaemon.load_modules_configuration_objects(raw_objects)

        self.conf.create_objects(raw_objects)
        self.conf.instance_id = 0
        self.conf.instance_name = 'test'
        # Hack push_flavor, that is set by the dispatcher
        self.conf.push_flavor = 0
        self.conf.load_triggers()
        #import pdb;pdb.set_trace()
        self.conf.linkify_templates()
        #import pdb;pdb.set_trace()
        self.conf.apply_inheritance()
        #import pdb;pdb.set_trace()
        self.conf.explode()
        #print "Aconf.services has %d elements" % len(self.conf.services)
        self.conf.apply_implicit_inheritance()
        self.conf.fill_default()
        self.conf.remove_templates()
        #print "conf.services has %d elements" % len(self.conf.services)
        self.conf.override_properties()
        self.conf.linkify()
        self.conf.apply_dependencies()
        self.conf.explode_global_conf()
        self.conf.propagate_timezone_option()
        self.conf.create_business_rules()
        self.conf.create_business_rules_dependencies()
        self.conf.is_correct()
        if not self.conf.conf_is_correct:
            print "The conf is not correct, I stop here"
            self.conf.dump()
            return
        self.conf.clean()

        self.confs = self.conf.cut_into_parts()
        self.conf.prepare_for_sending()
        self.conf.show_errors()
        self.dispatcher = Dispatcher(self.conf, self.me)

        scheddaemon = Alignak(None, False, False, False, None, None)
        self.scheddaemon = scheddaemon
        self.sched = scheddaemon.sched
        scheddaemon.load_modules_manager()
        # Remember to clean the logs we just created before launching tests
        self.clear_logs()
        m = MacroResolver()
        m.init(self.conf)
        self.sched.load_conf(self.conf)
        e = ExternalCommandManager(self.conf, 'applyer')
        self.sched.external_command = e
        e.load_scheduler(self.sched)
        e2 = ExternalCommandManager(self.conf, 'dispatcher')
        e2.load_arbiter(self)
        self.external_command_dispatcher = e2
        self.sched.conf.accept_passive_unknown_check_results = False

        self.sched.schedule()