示例#1
0
    def test_arbiter_class_no_environment(self):
        """ Instantiate the Alignak Arbiter class without environment file

        :return:
        """
        from alignak.daemons.arbiterdaemon import Arbiter
        print("Instantiate arbiter without environment file...")
        # Using values that are usually provided by the command line parameters
        args = {
            'env_file': '',
            'alignak_name': 'alignak-test',
            'daemon_name': 'arbiter-master',
            'legacy_cfg_files': ['../etc/alignak.cfg']
        }
        self.arbiter = Arbiter(**args)

        print("Arbiter: %s" % (self.arbiter))
        assert self.arbiter.env_filename == ''
        assert self.arbiter.legacy_cfg_files == [os.path.abspath('../etc/alignak.cfg')]

        # Configure the logger
        self.arbiter.log_level = 'ERROR'
        self.arbiter.setup_alignak_logger()

        # Setup our modules manager
        # self.arbiter.load_modules_manager()

        # Load and initialize the arbiter configuration
        # This to check that the configuration is correct!
        self.arbiter.load_monitoring_config_file()
示例#2
0
    def test_bad_init(self):
        """ Test that:
        - bad configuration
        - two master arbiters
        are not correct and raise an exception!

        :return: None
        """
        args = {
            'env_file': 'cfg/dispatcher/two_master_arbiters.ini',
            'alignak_name': 'alignak-test',
            'daemon_name': 'arbiter-master'
        }
        self.my_arbiter = Arbiter(**args)

        # Get a new dispatcher - raise an exception
        with pytest.raises(DispatcherError):
            Dispatcher(None, self.my_arbiter.link_to_myself)

        # Get a new dispatcher - raise an exception
        with pytest.raises(DispatcherError):
            Dispatcher(self.my_arbiter.conf, None)

        # Prepare the Alignak configuration
        # self.my_arbiter.load_modules_manager()
        self.my_arbiter.load_monitoring_config_file()
        assert self.my_arbiter.conf.conf_is_correct is True

        # Get a new dispatcher - raise an exception (two master arbiters)
        with pytest.raises(DispatcherError):
            Dispatcher(self.my_arbiter.conf, self.my_arbiter.link_to_myself)
示例#3
0
    def test_real(self):
        args = {
            'env_file': 'cfg/monitor/simple.ini',
            'alignak_name': 'alignak-test',
            'daemon_name': 'arbiter-master'
        }
        my_arbiter = Arbiter(**args)
        my_arbiter.setup_alignak_logger()

        # Clear logs
        self.clear_logs()

        my_arbiter.alignak_monitor = "http://alignak-mos-ws.kiosks.ipmfrance.com"
        my_arbiter.alignak_monitor_username = '******'
        my_arbiter.alignak_monitor_password = '******'

        my_arbiter.load_modules_manager()
        my_arbiter.load_monitoring_config_file()
        assert my_arbiter.conf.conf_is_correct is True

        # #1 - Get a new dispatcher
        my_dispatcher = Dispatcher(my_arbiter.conf, my_arbiter.link_to_myself)
        my_arbiter.dispatcher = my_dispatcher
        print("*** All daemons WS: %s" % [
            "%s:%s" % (link.address, link.port)
            for link in my_dispatcher.all_daemons_links
        ])

        my_arbiter.push_passive_check(details=False)
示例#4
0
def main():
    """Parse args and run main daemon function

    :return: None
    """
    args = parse_daemon_args(True)

    # Protect for windows multiprocessing that will RELAUNCH all
    while True:
        daemon = Arbiter(debug=args.debug_file is not None, **args.__dict__)
        daemon.main()
        if not daemon.need_config_reload:
            break
        daemon = None
示例#5
0
    def test_arbiter_class_env_default(self):
        """ Instantiate the Alignak Arbiter class without legacy cfg files
        :return:
        """
        # Unset legacy configuration files
        files = ['%s/etc/alignak.ini' % self.cfg_folder]
        try:
            cfg = configparser.ConfigParser()
            cfg.read(files)

            # Nagios legacy files - not configured
            cfg.set('alignak-configuration', 'cfg', '')

            with open('%s/etc/alignak.ini' % self.cfg_folder, "w") as modified:
                cfg.write(modified)
        except Exception as exp:
            print("* parsing error in config file: %s" % exp)
            assert False

        from alignak.daemons.arbiterdaemon import Arbiter
        print("Instantiate arbiter with default environment file...")
        # Using values that are usually provided by the command line parameters
        args = {
            'env_file': "/tmp/alignak/etc/alignak.ini",
            'daemon_name': 'arbiter-master'
        }
        self.arbiter = Arbiter(**args)

        print("Arbiter: %s" % (self.arbiter))
        print("Arbiter: %s" % (self.arbiter.__dict__))
        assert self.arbiter.env_filename == '/tmp/alignak/etc/alignak.ini'
        assert self.arbiter.legacy_cfg_files == []
        assert len(self.arbiter.legacy_cfg_files) == 0

        # Configure the logger
        self.arbiter.log_level = 'INFO'
        self.arbiter.setup_alignak_logger()

        # Setup our modules manager
        # self.arbiter.load_modules_manager()

        # Load and initialize the arbiter configuration
        # This to check that the configuration is correct!
        self.arbiter.load_monitoring_config_file()
        # No legacy files found
        assert len(self.arbiter.legacy_cfg_files) == 0
示例#6
0
def main():
    """Parse args and run main daemon function

    :return: None
    """
    try:
        args = parse_daemon_args(True)

        # Protect for windows multiprocessing that will RELAUNCH all
        while True:
            daemon = Arbiter(**args.__dict__)
            daemon.main()
            if not daemon.need_config_reload:
                break
            daemon = None
    except Exception as exp:  # pylint: disable=broad-except
        sys.stderr.write("*** Daemon exited because: %s" % str(exp))
        traceback.print_exc()
        exit(1)
示例#7
0
    def _dispatching(self,
                     env_filename='cfg/dispatcher/simple.ini',
                     loops=3,
                     multi_realms=False):
        """ Dispatching process: prepare, check, dispatch

        This function realize all the dispatching operations:
        - load a monitoring configuration
        - prepare the dispatching
        - dispatch
        - check the correct dispatching, including:
            - check the configuration dispatched to the schedulers
            - check the configuration dispatched to the spare arbiter (if any)
        - run the check_reachable loop several times

        if multi_realms is True, the scheduler configuration received are not checked against
        the arbiter whole configuration. This would be really too complex to assert on this :(

        Schedulers must have a port number with 7768 (eg. 7768,17768,27768,...)

        Spare daemons must have a port number with 8770 (eg. 8770,18770,28770,...)

        :return: None
        """
        args = {
            'env_file': env_filename,
            'alignak_name': 'alignak-test',
            'daemon_name': 'arbiter-master'
        }
        my_arbiter = Arbiter(**args)
        my_arbiter.setup_alignak_logger()

        # Clear logs
        self.clear_logs()

        # my_arbiter.load_modules_manager()
        my_arbiter.load_monitoring_config_file()
        assert my_arbiter.conf.conf_is_correct is True
        # logging.getLogger('alignak').setLevel(logging.DEBUG)

        objects_map = {}
        for _, _, strclss, _, _ in list(
                my_arbiter.conf.types_creations.values()):
            if strclss in ['hostescalations', 'serviceescalations']:
                continue

            objects_list = getattr(my_arbiter.conf, strclss, [])
            objects_map[strclss] = {
                'count': len(objects_list),
                'str': str(objects_list)
            }
            # print("Got %d %s: %s" % (len(objects_list), strclss, objects_list))

        # Freeze the time !
        initial_datetime = datetime.datetime.now()
        with freeze_time(initial_datetime) as frozen_datetime:
            assert frozen_datetime() == initial_datetime

            # #1 - Get a new dispatcher
            my_dispatcher = Dispatcher(my_arbiter.conf,
                                       my_arbiter.link_to_myself)
            print("*** All daemons WS: %s" % [
                "%s:%s" % (link.address, link.port)
                for link in my_dispatcher.all_daemons_links
            ])

            assert my_dispatcher.dispatch_ok is False
            assert my_dispatcher.new_to_dispatch is False
            assert my_dispatcher.first_dispatch_done is False

            self.assert_any_log_match(
                re.escape("Dispatcher arbiters/satellites map:"))
            for link in my_dispatcher.all_daemons_links:
                self.assert_any_log_match(
                    re.escape(" - %s: %s" % (link.name, link.uri)))

            # Simulate the daemons HTTP interface (very simple simulation !)
            with requests_mock.mock() as mr:
                for link in my_dispatcher.all_daemons_links:
                    mr.get('http://%s:%s/ping' % (link.address, link.port),
                           json='pong')
                    mr.get('http://%s:%s/identity' % (link.address, link.port),
                           json={"running_id": 123456.123456})
                    mr.get('http://%s:%s/wait_new_conf' %
                           (link.address, link.port),
                           json=True)
                    mr.get('http://%s:%s/fill_initial_broks' %
                           (link.address, link.port),
                           json=[])
                    mr.post('http://%s:%s/_push_configuration' %
                            (link.address, link.port),
                            json=True)
                    mr.get('http://%s:%s/managed_configurations' %
                           (link.address, link.port),
                           json={})
                    mr.get('http://%s:%s/do_not_run' %
                           (link.address, link.port),
                           json=True)

                for link in my_dispatcher.all_daemons_links:
                    # print("Satellite: %s / %s" % (link, link.cfg_to_manage))
                    assert not link.hash
                    assert not link.push_flavor
                    assert not link.cfg_to_manage
                    assert not link.cfg_managed

                # #2 - Initialize connection with all our satellites
                for satellite in my_dispatcher.all_daemons_links:
                    assert my_arbiter.daemon_connection_init(satellite)
                # All links have a running identifier
                for link in my_dispatcher.all_daemons_links:
                    if link == my_dispatcher.arbiter_link:
                        continue
                    assert link.running_id == 123456.123456
                    self.assert_any_log_match(re.escape("got: 123456.123456"))

                # #3 - Check reachable - a configuration is not yet prepared,
                # so only check reachable state
                my_dispatcher.check_reachable()
                assert my_dispatcher.dispatch_ok is False
                assert my_dispatcher.first_dispatch_done is False
                assert my_dispatcher.new_to_dispatch is False
                # Not yet configured ...
                for link in my_dispatcher.all_daemons_links:
                    if link == my_dispatcher.arbiter_link:
                        continue
                    self.assert_any_log_match(
                        re.escape("The %s %s do not have a configuration" %
                                  (link.type, link.name)))

                # #3 - Check reachable - daemons got pinged too early...
                my_dispatcher.check_reachable()
                assert my_dispatcher.dispatch_ok is False
                assert my_dispatcher.first_dispatch_done is False
                assert my_dispatcher.new_to_dispatch is False
                # Only for Python > 2.7, DEBUG logs ...
                if os.sys.version_info > (2, 7):
                    for link in my_dispatcher.all_daemons_links:
                        if link == my_dispatcher.arbiter_link:
                            continue
                        self.assert_any_log_match(
                            re.escape("Too early to ping %s" % (link.name)))
                self.assert_no_log_match(
                    re.escape(
                        "Dispatcher, these daemons are not configured: "
                        "reactionner-master,poller-master,broker-master,receiver-master,"
                        "scheduler-master"
                        ", and a configuration is ready to dispatch, run the dispatching..."
                    ))

                # Time warp 5 seconds - overpass the ping period...
                self.clear_logs()
                frozen_datetime.tick(delta=datetime.timedelta(seconds=5))

                # #3 - Check reachable - daemons provide their configuration
                my_dispatcher.check_reachable()
                assert my_dispatcher.dispatch_ok is False
                assert my_dispatcher.first_dispatch_done is False
                assert my_dispatcher.new_to_dispatch is False
                # Only for Python > 2.7, DEBUG logs ...
                if os.sys.version_info > (2, 7):
                    # Still not configured ...
                    for link in my_dispatcher.all_daemons_links:
                        if link == my_dispatcher.arbiter_link:
                            continue
                        self.assert_any_log_match(
                            re.escape(
                                "My (%s) fresh managed configuration: {}" %
                                link.name))

                # #4 - Prepare dispatching
                assert my_dispatcher.new_to_dispatch is False
                my_dispatcher.prepare_dispatch()
                assert my_dispatcher.dispatch_ok is False
                assert my_dispatcher.first_dispatch_done is False
                assert my_dispatcher.new_to_dispatch is True

                self.assert_any_log_match(
                    re.escape(
                        "All configuration parts are assigned to schedulers and their satellites :)"
                    ))
                # All links have a hash, push_flavor and cfg_to_manage
                for link in my_dispatcher.all_daemons_links:
                    print("Link: %s" % link)
                    assert getattr(link, 'hash', None) is not None
                    assert getattr(link, 'push_flavor', None) is not None
                    assert getattr(link, 'cfg_to_manage', None) is not None
                    assert not link.cfg_managed  # Not yet

                # #5 - Check reachable - a configuration is prepared,
                # this will force the daemons communication, no need for a time warp ;)
                my_dispatcher.check_reachable()
                # Only for Python > 2.7, DEBUG logs ...
                if os.sys.version_info > (2, 7):
                    for link in my_dispatcher.all_daemons_links:
                        if link == my_dispatcher.arbiter_link:
                            continue
                        self.assert_any_log_match(
                            re.escape(
                                "My (%s) fresh managed configuration: {}" %
                                link.name))

                self.assert_any_log_match(
                    re.escape("Dispatcher, these daemons are not configured:"))
                self.assert_any_log_match(
                    re.escape(
                        ", and a configuration is ready to dispatch, run the dispatching..."
                    ))

                self.assert_any_log_match(
                    re.escape(
                        "Trying to send configuration to the satellites..."))
                for link in my_dispatcher.all_daemons_links:
                    if link == my_dispatcher.arbiter_link:
                        continue
                    self.assert_any_log_match(
                        re.escape("Sending configuration to the %s %s" %
                                  (link.type, link.name)))

                # As of now the configuration is prepared and was dispatched to the daemons !
                # Configuration already dispatched!
                with pytest.raises(DispatcherError):
                    my_dispatcher.dispatch()
                self.show_logs()

                # Hack the requests history to check and simulate  the configuration pushed...
                history = mr.request_history
                for index, request in enumerate(history):
                    if '_push_configuration' in request.url:
                        received = request.json()
                        print(index, request.url, received)
                        assert ['conf'] == list(received.keys())
                        conf = received['conf']

                        from pprint import pprint
                        pprint(conf)
                        assert 'alignak_name' in conf
                        assert conf['alignak_name'] == 'My Alignak'

                        assert 'self_conf' in conf
                        assert conf['self_conf']
                        i_am = None
                        for link in my_dispatcher.all_daemons_links:
                            if link.type == conf['self_conf']['type'] \
                                    and link.name == conf['self_conf']['name']:
                                i_am = link
                                break
                        else:
                            assert False
                        print(("I am: %s" % i_am))
                        print(("I have: %s" % conf))

                        # All links have a hash, push_flavor and cfg_to_manage
                        assert 'hash' in conf
                        assert 'managed_conf_id' in conf

                        assert 'arbiters' in conf
                        if conf['self_conf']['manage_arbiters']:
                            # All the known arbiters
                            assert list(conf['arbiters'].keys()) == [
                                arbiter_link.uuid
                                for arbiter_link in my_dispatcher.arbiters
                            ]
                        else:
                            assert conf['arbiters'] == {}

                        assert 'schedulers' in conf
                        # Hack for the managed configurations
                        link.cfg_managed = {}
                        for scheduler_link in list(
                                conf['schedulers'].values()):
                            link.cfg_managed[scheduler_link['instance_id']] = {
                                'hash':
                                scheduler_link['hash'],
                                'push_flavor':
                                scheduler_link['push_flavor'],
                                'managed_conf_id':
                                scheduler_link['managed_conf_id']
                            }
                        print("Managed: %s" % link.cfg_managed)

                        assert 'modules' in conf
                        assert conf['modules'] == []

                        # Spare arbiter specific
                        if '8770/_push_configuration' in request.url:
                            # Spare arbiter receives all the monitored configuration
                            assert 'whole_conf' in conf
                            # String serialized configuration
                            assert isinstance(conf['whole_conf'], string_types)
                            managed_conf_part = unserialize(conf['whole_conf'])
                            # Test a property to be sure conf loaded correctly
                            assert managed_conf_part.instance_id == conf[
                                'managed_conf_id']

                            # The spare arbiter got the same objects count as the master arbiter prepared!
                            for _, _, strclss, _, _ in list(
                                    managed_conf_part.types_creations.values(
                                    )):
                                # These elements are not included in the serialized configuration!
                                if strclss in [
                                        'hostescalations',
                                        'serviceescalations', 'arbiters',
                                        'schedulers', 'brokers', 'pollers',
                                        'reactionners', 'receivers', 'realms',
                                        'modules', 'hostsextinfo',
                                        'servicesextinfo', 'hostdependencies',
                                        'servicedependencies'
                                ]:
                                    continue

                                objects_list = getattr(managed_conf_part,
                                                       strclss, [])
                                # print("Got %d %s: %s" % (len(objects_list), strclss, objects_list))
                                # Count and string dup are the same !
                                assert len(objects_list
                                           ) == objects_map[strclss]['count']
                                assert str(objects_list
                                           ) == objects_map[strclss]['str']

                        # Scheduler specific
                        elif '7768/_push_configuration' in request.url:
                            assert 'conf_part' in conf
                            # String serialized configuration
                            assert isinstance(conf['conf_part'], string_types)
                            managed_conf_part = unserialize(conf['conf_part'])
                            # Test a property to be sure conf loaded correctly
                            assert managed_conf_part.instance_id == conf[
                                'managed_conf_id']

                            # Hack for the managed configurations
                            link.cfg_managed = {
                                conf['instance_id']: {
                                    'hash': conf['hash'],
                                    'push_flavor': conf['push_flavor'],
                                    'managed_conf_id': conf['managed_conf_id']
                                }
                            }
                            print("Managed: %s" % link.cfg_managed)

                            # The scheduler got the same objects count as the arbiter prepared!
                            for _, _, strclss, _, _ in list(
                                    managed_conf_part.types_creations.values(
                                    )):
                                # These elements are not included in the serialized configuration!
                                if strclss in [
                                        'hostescalations',
                                        'serviceescalations', 'arbiters',
                                        'schedulers', 'brokers', 'pollers',
                                        'reactionners', 'receivers', 'realms',
                                        'modules', 'hostsextinfo',
                                        'servicesextinfo', 'hostdependencies',
                                        'servicedependencies'
                                ]:
                                    continue

                                objects_list = getattr(managed_conf_part,
                                                       strclss, [])
                                # print("Got %d %s: %s" % (len(objects_list), strclss, objects_list))
                                if not multi_realms:
                                    # Count and string dump are the same !
                                    assert len(objects_list) == objects_map[
                                        strclss]['count']
                                    assert str(objects_list
                                               ) == objects_map[strclss]['str']

                        else:
                            # Satellites
                            print("I am: ")
                            print(index, request.url, received)
                            assert 'conf_part' not in conf
                            assert 'see_my_schedulers' == conf[
                                'managed_conf_id']

                for link in my_dispatcher.all_daemons_links:
                    mr.get('http://%s:%s/managed_configurations' %
                           (link.address, link.port),
                           json=link.cfg_managed)

                print("Check dispatching:")
                self.clear_logs()
                # assert my_dispatcher.check_dispatch() is True
                dispatched = my_dispatcher.check_dispatch()
                self.show_logs()
                assert dispatched

                for loop_count in range(0, loops):
                    for tw in range(0, 4):
                        # Time warp 1 second
                        frozen_datetime.tick(delta=datetime.timedelta(
                            seconds=1))

                        print("Check reachable %s" % tw)
                        self.clear_logs()
                        my_dispatcher.check_reachable()
                        # Only for Python > 2.7, DEBUG logs ...
                        if os.sys.version_info > (2, 7):
                            for link in my_dispatcher.all_daemons_links:
                                if link == my_dispatcher.arbiter_link:
                                    continue
                                self.assert_any_log_match(
                                    re.escape("Too early to ping %s" %
                                              (link.name)))

                    # Time warp 1 second
                    frozen_datetime.tick(delta=datetime.timedelta(seconds=1))

                    print("Check reachable response")
                    self.clear_logs()
                    my_dispatcher.check_reachable()
                    self.show_logs()
                    # Only for Python > 2.7, DEBUG logs ...
                    if os.sys.version_info > (2, 7):
                        for link in my_dispatcher.all_daemons_links:
                            if link == my_dispatcher.arbiter_link:
                                continue
                            self.assert_any_log_match(
                                re.escape(
                                    "My (%s) fresh managed configuration: %s" %
                                    (link.name, link.cfg_managed)))
示例#8
0
    def setup_with_file(self, configuration_file):
        """
        Load alignak with defined configuration file

        If the configuration loading fails, a SystemExit exception is raised to the caller.

        The conf_is_correct property indicates if the configuration loading succeeded or failed.

        The configuration errors property contains a list of the error message that are normally
        logged as ERROR by the arbiter.

        @verified

        :param configuration_file: path + file name of the main configuration file
        :type configuration_file: str
        :return: None
        """
        self.broks = {}
        self.schedulers = {}
        self.brokers = {}
        self.pollers = {}
        self.receivers = {}
        self.reactionners = {}
        self.arbiter = None
        self.conf_is_correct = False
        self.configuration_warnings = []
        self.configuration_errors = []

        # Add collector for test purpose.
        self.setup_logger()

        # Initialize the Arbiter with no daemon configuration file
        self.arbiter = Arbiter(None, [configuration_file], False, False, False,
                               False, '/tmp/arbiter.log', 'arbiter-master')

        try:
            # The following is copy paste from setup_alignak_logger
            # The only difference is that keep logger at INFO level to gather messages
            # This is needed to assert later on logs we received.
            self.logger.setLevel(logging.INFO)
            # Force the debug level if the daemon is said to start with such level
            if self.arbiter.debug:
                self.logger.setLevel(logging.DEBUG)

            # Log will be broks
            for line in self.arbiter.get_header():
                self.logger.info(line)

            self.arbiter.load_monitoring_config_file()

            # If this assertion does not match, then there is a bug in the arbiter :)
            self.assertTrue(self.arbiter.conf.conf_is_correct)
            self.conf_is_correct = True
            self.configuration_warnings = self.arbiter.conf.configuration_warnings
            self.configuration_errors = self.arbiter.conf.configuration_errors
        except SystemExit:
            self.configuration_warnings = self.arbiter.conf.configuration_warnings
            print("Configuration warnings:")
            for msg in self.configuration_warnings:
                print(" - %s" % msg)
            self.configuration_errors = self.arbiter.conf.configuration_errors
            print("Configuration errors:")
            for msg in self.configuration_errors:
                print(" - %s" % msg)
            raise

        for arb in self.arbiter.conf.arbiters:
            if arb.get_name() == self.arbiter.arbiter_name:
                self.arbiter.myself = arb
        self.arbiter.dispatcher = Dispatcher(self.arbiter.conf,
                                             self.arbiter.myself)
        self.arbiter.dispatcher.prepare_dispatch()

        # Build schedulers dictionary with the schedulers involved in the configuration
        for scheduler in self.arbiter.dispatcher.schedulers:
            sched = Alignak([], False, False, True, '/tmp/scheduler.log')
            sched.load_modules_manager(scheduler.name)
            sched.new_conf = scheduler.conf_package
            if sched.new_conf:
                sched.setup_new_conf()
            self.schedulers[scheduler.scheduler_name] = sched

        # Build pollers dictionary with the pollers involved in the configuration
        for poller in self.arbiter.dispatcher.pollers:
            self.pollers[poller.poller_name] = poller

        # Build receivers dictionary with the receivers involved in the configuration
        for receiver in self.arbiter.dispatcher.receivers:
            self.receivers[receiver.receiver_name] = receiver

        # Build reactionners dictionary with the reactionners involved in the configuration
        for reactionner in self.arbiter.dispatcher.reactionners:
            self.reactionners[reactionner.reactionner_name] = reactionner

        # Build brokers dictionary with the brokers involved in the configuration
        for broker in self.arbiter.dispatcher.brokers:
            self.brokers[broker.broker_name] = broker
示例#9
0
    def _monitoring(self,
                    env_filename='cfg/monitor/simple.ini',
                    loops=3,
                    multi_realms=False):
        """ monitoring process: prepare, check, dispatch

        This function realize all the monitoring operations:
        - load a monitoring configuration
        - prepare the monitoring
        - dispatch
        - check the correct monitoring, including:
            - check the configuration dispatched to the schedulers
            - check the configuration dispatched to the spare arbiter (if any)
        - run the check_reachable loop several times

        if multi_realms is True, the scheduler configuration received are not checked against
        the arbiter whole configuration. This would be really too complex to assert on this :(

        Schedulers must have a port number with 7768 (eg. 7768,17768,27768,...)

        Spare daemons must have a port number with 8770 (eg. 8770,18770,28770,...)

        :return: None
        """
        args = {
            'env_file': env_filename,
            'alignak_name': 'alignak-test',
            'daemon_name': 'arbiter-master'
        }
        my_arbiter = Arbiter(**args)
        my_arbiter.setup_alignak_logger()

        # Clear logs
        self.clear_logs()

        my_arbiter.load_modules_manager()
        my_arbiter.load_monitoring_config_file()
        assert my_arbiter.conf.conf_is_correct is True

        # #1 - Get a new dispatcher
        my_dispatcher = Dispatcher(my_arbiter.conf, my_arbiter.link_to_myself)
        my_arbiter.dispatcher = my_dispatcher
        print("*** All daemons WS: %s" % [
            "%s:%s" % (link.address, link.port)
            for link in my_dispatcher.all_daemons_links
        ])

        assert my_arbiter.alignak_monitor == "http://super_alignak:7773/ws"
        assert my_arbiter.alignak_monitor_username == 'admin'
        assert my_arbiter.alignak_monitor_password == 'admin'

        metrics = []
        for type in sorted(my_arbiter.conf.types_creations):
            _, _, strclss, _, _ = my_arbiter.conf.types_creations[type]
            if strclss in ['hostescalations', 'serviceescalations']:
                continue

            objects_list = getattr(my_arbiter.conf, strclss, [])
            metrics.append("'%s'=%d" % (strclss, len(objects_list)))

        # Simulate the daemons HTTP interface (very simple simulation !)
        with requests_mock.mock() as mr:
            mr.post('%s/login' % (my_arbiter.alignak_monitor),
                    json={
                        "_status":
                        "OK",
                        "_result":
                        ["1508507175582-c21a7d8e-ace0-47f2-9b10-280a17152c7c"]
                    })
            mr.patch(
                '%s/host' % (my_arbiter.alignak_monitor),
                json={
                    "_status":
                    "OK",
                    "_result":
                    ["1508507175582-c21a7d8e-ace0-47f2-9b10-280a17152c7c"]
                })

            # Time warp 5 seconds - overpass the ping period...
            self.clear_logs()
            # frozen_datetime.tick(delta=datetime.timedelta(seconds=5))

            my_arbiter.get_alignak_status(details=False)

            self.show_logs()

            # Hack the requests history to check and simulate  the configuration pushed...
            history = mr.request_history
            for index, request in enumerate(history):
                # Check what is patched on /host ...
                if 'host' in request.url:
                    received = request.json()
                    print((index, request.url, received))

                    from pprint import pprint
                    pprint(received)

                    assert received['name'] == 'My Alignak'
                    assert received['livestate']['timestamp'] == 1519583400
                    assert received['livestate']['state'] == 'up'
                    assert received['livestate'][
                        'output'] == 'Some of my daemons are not reachable.'
                    for metric in metrics:
                        assert metric in received['livestate']['perf_data']
                    print(received['livestate']['long_output'])
                    # Long output is sorted by daemon name
                    assert received['livestate']['long_output'] == \
                           u'broker-master - daemon is not reachable.\n' \
                           u'poller-master - daemon is not reachable.\n' \
                           u'reactionner-master - daemon is not reachable.\n' \
                           u'receiver-master - daemon is not reachable.\n' \
                           u'scheduler-master - daemon is not reachable.'

                    for link in my_dispatcher.all_daemons_links:
                        assert link.name in [
                            service['name'] for service in received['services']
                        ]

                    for service in received['services']:
                        assert 'name' in service
                        assert 'livestate' in service
                        assert 'timestamp' in service['livestate']
                        assert 'state' in service['livestate']
                        assert 'output' in service['livestate']
                        assert 'long_output' in service['livestate']
                        assert 'perf_data' in service['livestate']
示例#10
0
    def setup_with_file(self, paths, add_default=True):
        self.time_hacker.set_my_time()
        self.print_header()
        # i am arbiter-like
        self.broks = {}
        self.me = None
        self.log = logger
        self.log.load_obj(self)
        if not isinstance(paths, list):
            paths = [paths]  # Fix for modules tests
            add_default = False # Don't mix config
        if add_default:
            paths.insert(0, 'etc/alignak_1r_1h_1s.cfg')
        self.config_files = paths
        self.conf = Config()
        buf = self.conf.read_config(self.config_files)
        raw_objects = self.conf.read_config_buf(buf)
        self.conf.create_objects_for_type(raw_objects, 'arbiter')
        self.conf.create_objects_for_type(raw_objects, 'module')
        self.conf.early_arbiter_linking()

        # If we got one arbiter defined here (before default) we should be in a case where
        # the tester want to load/test a module, so we simulate an arbiter daemon
        # and the modules loading phase. As it has its own modulesmanager, should
        # not impact scheduler modules ones, especially we are asking for arbiter type :)
        if len(self.conf.arbiters) == 1:
            arbdaemon = Arbiter([''], [''], False, False, None, None)

            arbdaemon.load_modules_manager()

            # we request the instances without them being *started*
            # (for those that are concerned ("external" modules):
            # we will *start* these instances after we have been daemonized (if requested)
            me = None
            for arb in self.conf.arbiters:
                me = arb
                arbdaemon.do_load_modules(arb.modules)
                arbdaemon.load_modules_configuration_objects(raw_objects)

        self.conf.create_objects(raw_objects)
        self.conf.instance_id = 0
        self.conf.instance_name = 'test'
        # Hack push_flavor, that is set by the dispatcher
        self.conf.push_flavor = 0
        self.conf.load_triggers()
        #import pdb;pdb.set_trace()
        self.conf.linkify_templates()
        #import pdb;pdb.set_trace()
        self.conf.apply_inheritance()
        #import pdb;pdb.set_trace()
        self.conf.explode()
        #print "Aconf.services has %d elements" % len(self.conf.services)
        self.conf.apply_implicit_inheritance()
        self.conf.fill_default()
        self.conf.remove_templates()
        #print "conf.services has %d elements" % len(self.conf.services)
        self.conf.override_properties()
        self.conf.linkify()
        self.conf.apply_dependencies()
        self.conf.explode_global_conf()
        self.conf.propagate_timezone_option()
        self.conf.create_business_rules()
        self.conf.create_business_rules_dependencies()
        self.conf.is_correct()
        if not self.conf.conf_is_correct:
            print "The conf is not correct, I stop here"
            self.conf.dump()
            return
        self.conf.clean()

        self.confs = self.conf.cut_into_parts()
        self.conf.prepare_for_sending()
        self.conf.show_errors()
        self.dispatcher = Dispatcher(self.conf, self.me)

        scheddaemon = Alignak(None, False, False, False, None, None)
        self.scheddaemon = scheddaemon
        self.sched = scheddaemon.sched
        scheddaemon.load_modules_manager()
        # Remember to clean the logs we just created before launching tests
        self.clear_logs()
        m = MacroResolver()
        m.init(self.conf)
        self.sched.load_conf(self.conf)
        e = ExternalCommandManager(self.conf, 'applyer')
        self.sched.external_command = e
        e.load_scheduler(self.sched)
        e2 = ExternalCommandManager(self.conf, 'dispatcher')
        e2.load_arbiter(self)
        self.external_command_dispatcher = e2
        self.sched.conf.accept_passive_unknown_check_results = False

        self.sched.schedule()
示例#11
0
def main():
    """Parse args and run main daemon function

    :return: None
    """
    parser = optparse.OptionParser(
        "%prog [options] -c configfile [-c additional_config_file]",
        version="%prog: " + VERSION)
    parser.add_option(
        '-c',
        '--config',
        action='append',
        dest="config_files",
        metavar="CONFIG-FILE",
        help=('Config file (your nagios.cfg). Multiple -c can be '
              'used, it will be like if all files was just one'))
    parser.add_option('-d',
                      '--daemon',
                      action='store_true',
                      dest="is_daemon",
                      help="Run in daemon mode")
    parser.add_option('-r',
                      '--replace',
                      action='store_true',
                      dest="do_replace",
                      help="Replace previous running arbiter")
    parser.add_option('--debugfile',
                      dest='debug_file',
                      help=("Debug file. Default: not used "
                            "(why debug a bug free program? :) )"))
    parser.add_option("-v",
                      "--verify-config",
                      dest="verify_only",
                      action="store_true",
                      help="Verify config file and exit")
    parser.add_option(
        "-p",
        "--profile",
        dest="profile",
        help="Dump a profile file. Need the python cProfile librairy")
    parser.add_option("-a",
                      "--analyse",
                      dest="analyse",
                      help="Dump an analyse statistics file, for support")
    parser.add_option(
        "-m",
        "--migrate",
        dest="migrate",
        help="Migrate the raw configuration read from the arbiter to another "
        "module. --> VERY EXPERIMENTAL!")
    parser.add_option(
        "-n",
        "--name",
        dest="arb_name",
        help="Give the arbiter name to use. Optionnal, will use the hostaddress "
        "if not provide to find it.")

    opts, args = parser.parse_args()

    if not opts.config_files:
        parser.error("Requires at least one config file (option -c/--config")
    if args:
        parser.error("Does not accept any argument. Use option -c/--config")

    # Protect for windows multiprocessing that will RELAUNCH all
    daemon = Arbiter(debug=opts.debug_file is not None, **opts.__dict__)
    if not opts.profile:
        daemon.main()
    else:
        # For perf tuning:
        import cProfile
        cProfile.run('''daemon.main()''', opts.profile)