Esempio n. 1
0
    def test_real(self):
        args = {
            'env_file': 'cfg/monitor/simple.ini',
            'alignak_name': 'alignak-test', 'daemon_name': 'arbiter-master'
        }
        my_arbiter = Arbiter(**args)
        my_arbiter.setup_alignak_logger()

        # Clear logs
        self.clear_logs()

        my_arbiter.alignak_monitor = "http://alignak-mos-ws.kiosks.ipmfrance.com"
        my_arbiter.alignak_monitor_username = '******'
        my_arbiter.alignak_monitor_password = '******'

        # my_arbiter.load_modules_manager()
        my_arbiter.load_monitoring_config_file()
        assert my_arbiter.conf.conf_is_correct is True

        # #1 - Get a new dispatcher
        my_dispatcher = Dispatcher(my_arbiter.conf, my_arbiter.link_to_myself)
        my_arbiter.dispatcher = my_dispatcher
        print("*** All daemons WS: %s"
              % ["%s:%s" % (link.address, link.port)
                 for link in my_dispatcher.all_daemons_links])

        my_arbiter.push_passive_check(details=False)
Esempio n. 2
0
    def test_real(self):
        args = {
            'env_file': 'cfg/monitor/simple.ini',
            'alignak_name': 'alignak-test',
            'daemon_name': 'arbiter-master'
        }
        my_arbiter = Arbiter(**args)
        my_arbiter.setup_alignak_logger()

        # Clear logs
        self.clear_logs()

        my_arbiter.alignak_monitor = "http://alignak-mos-ws.kiosks.ipmfrance.com"
        my_arbiter.alignak_monitor_username = '******'
        my_arbiter.alignak_monitor_password = '******'

        my_arbiter.load_modules_manager()
        my_arbiter.load_monitoring_config_file()
        assert my_arbiter.conf.conf_is_correct is True

        # #1 - Get a new dispatcher
        my_dispatcher = Dispatcher(my_arbiter.conf, my_arbiter.link_to_myself)
        my_arbiter.dispatcher = my_dispatcher
        print("*** All daemons WS: %s" % [
            "%s:%s" % (link.address, link.port)
            for link in my_dispatcher.all_daemons_links
        ])

        my_arbiter.push_passive_check(details=False)
class TestLaunchDaemons(AlignakTest):
    def setUp(self):
        super(TestLaunchDaemons, self).setUp()

        self.cfg_folder = '/tmp/alignak'
        self._prepare_configuration(copy=True, cfg_folder=self.cfg_folder)

        files = ['%s/etc/alignak.ini' % self.cfg_folder,
                 '%s/etc/alignak.d/daemons.ini' % self.cfg_folder,
                 '%s/etc/alignak.d/modules.ini' % self.cfg_folder]
        try:
            cfg = configparser.ConfigParser()
            cfg.read(files)

            cfg.set('alignak-configuration', 'launch_missing_daemons', '1')
            cfg.set('daemon.arbiter-master', 'alignak_launched', '1')
            cfg.set('daemon.scheduler-master', 'alignak_launched', '1')
            cfg.set('daemon.poller-master', 'alignak_launched', '1')
            cfg.set('daemon.reactionner-master', 'alignak_launched', '1')
            cfg.set('daemon.receiver-master', 'alignak_launched', '1')
            cfg.set('daemon.broker-master', 'alignak_launched', '1')

            with open('%s/etc/alignak.ini' % self.cfg_folder, "w") as modified:
                cfg.write(modified)
        except Exception as exp:
            print("* parsing error in config file: %s" % exp)
            assert False

    def tearDown(self):
        # Restore the default test logger configuration
        if 'ALIGNAK_LOGGER_CONFIGURATION' in os.environ:
            del os.environ['ALIGNAK_LOGGER_CONFIGURATION']

        print("Test terminated!")

    def test_arbiter_missing_parameters(self):
        """ Running the Alignak Arbiter with missing command line parameters

        :return:
        """
        print("Launching arbiter with missing parameters...")
        args = ["../alignak/bin/alignak_arbiter.py"]
        arbiter = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        print("%s launched (pid=%d)" % ('arbiter', arbiter.pid))

        # Waiting for arbiter to parse the configuration
        sleep(3)

        ret = arbiter.poll()
        print("*** Arbiter exited with code: %d" % ret)
        assert ret is not None, "Arbiter is still running!"
        stderr = arbiter.stderr.read()
        print(stderr)
        assert b"usage: alignak_arbiter.py" in stderr
        # Arbiter process must exit with a return code == 2
        assert ret == 2

    def test_arbiter_no_environment(self):
        """ Running the Alignak Arbiter without environment file

        :return:
        """
        print("Launching arbiter without environment file...")
        args = ["../alignak/bin/alignak_arbiter.py"]
        arbiter = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        print("%s launched (pid=%d)" % ('arbiter', arbiter.pid))

        # Waiting for arbiter to parse the configuration
        sleep(3)

        ret = arbiter.poll()
        print("*** Arbiter exited with code: %d" % ret)
        assert ret is not None, "Arbiter is still running!"
        stdout = arbiter.stdout.read()
        print(stdout)
        stderr = arbiter.stderr.read()
        print(stderr)
        assert b"usage: alignak_arbiter.py" in stderr
        # Arbiter process must exit with a return code == 2
        assert ret == 2

    def test_arbiter_class_no_environment(self):
        """ Instantiate the Alignak Arbiter class without environment file

        :return:
        """
        from alignak.daemons.arbiterdaemon import Arbiter
        print("Instantiate arbiter without environment file...")
        # Using values that are usually provided by the command line parameters
        args = {
            'env_file': '',
            'alignak_name': 'alignak-test',
            'daemon_name': 'arbiter-master',
            'legacy_cfg_files': ['../etc/alignak.cfg']
        }
        self.arbiter = Arbiter(**args)

        print("Arbiter: %s" % (self.arbiter))
        assert self.arbiter.env_filename == ''
        assert self.arbiter.legacy_cfg_files == [os.path.abspath('../etc/alignak.cfg')]

        # Configure the logger
        self.arbiter.log_level = 'ERROR'
        self.arbiter.setup_alignak_logger()

        # Setup our modules manager
        # self.arbiter.load_modules_manager()

        # Load and initialize the arbiter configuration
        # This to check that the configuration is correct!
        self.arbiter.load_monitoring_config_file()

    def test_arbiter_class_env_default(self):
        """ Instantiate the Alignak Arbiter class without legacy cfg files
        :return:
        """
        # Unset legacy configuration files
        files = ['%s/etc/alignak.ini' % self.cfg_folder]
        try:
            cfg = configparser.ConfigParser()
            cfg.read(files)

            # Nagios legacy files - not configured
            cfg.set('alignak-configuration', 'cfg', '')

            with open('%s/etc/alignak.ini' % self.cfg_folder, "w") as modified:
                cfg.write(modified)
        except Exception as exp:
            print("* parsing error in config file: %s" % exp)
            assert False

        from alignak.daemons.arbiterdaemon import Arbiter
        print("Instantiate arbiter with default environment file...")
        # Using values that are usually provided by the command line parameters
        args = {
            'env_file': "/tmp/alignak/etc/alignak.ini",
            'daemon_name': 'arbiter-master'
        }
        self.arbiter = Arbiter(**args)

        print("Arbiter: %s" % (self.arbiter))
        print("Arbiter: %s" % (self.arbiter.__dict__))
        assert self.arbiter.env_filename == '/tmp/alignak/etc/alignak.ini'
        assert self.arbiter.legacy_cfg_files == []
        assert len(self.arbiter.legacy_cfg_files) == 0

        # Configure the logger
        self.arbiter.log_level = 'INFO'
        self.arbiter.setup_alignak_logger()

        # Setup our modules manager
        # self.arbiter.load_modules_manager()

        # Load and initialize the arbiter configuration
        # This to check that the configuration is correct!
        self.arbiter.load_monitoring_config_file()
        # No legacy files found
        assert len(self.arbiter.legacy_cfg_files) == 0

    def test_arbiter_unexisting_environment(self):
        """ Running the Alignak Arbiter with a not existing environment file

        :return:
        """
        print("Launching arbiter with a not existing environment file...")
        args = ["../alignak/bin/alignak_arbiter.py", "-e", "/tmp/etc/unexisting.ini"]
        arbiter = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        print("%s launched (pid=%d)" % ('arbiter', arbiter.pid))

        # Waiting for arbiter to parse the configuration
        sleep(3)

        ret = arbiter.poll()
        print("*** Arbiter exited with code: %d" % ret)
        assert ret is not None, "Arbiter is still running!"
        stdout = arbiter.stdout.read()
        print(stdout)
        assert b"Daemon 'arbiter-master' did not correctly read " \
               b"Alignak environment file: /tmp/etc/unexisting.ini" in stdout
        # Arbiter process must exit with a return code == 1
        assert ret == 99

    def test_arbiter_no_monitoring_configuration(self):
        """ Running the Alignak Arbiter with no monitoring configuration defined -
        no legacy cfg files

        :return:
        """
        print("Launching arbiter with no monitoring configuration...")

        # Unset legacy configuration files
        files = ['%s/etc/alignak.ini' % self.cfg_folder]
        try:
            cfg = configparser.ConfigParser()
            cfg.read(files)

            # Nagios legacy files - not configured
            cfg.set('alignak-configuration', 'cfg', '')

            with open('%s/etc/alignak.ini' % self.cfg_folder, "w") as modified:
                cfg.write(modified)
        except Exception as exp:
            print("* parsing error in config file: %s" % exp)
            assert False

        args = ["../alignak/bin/alignak_arbiter.py", "-e", '%s/etc/alignak.ini' % self.cfg_folder]
        ret = self._run_command_with_timeout(args, 30)

        errors = 0
        ok = False
        with open('/tmp/alignak/log/arbiter-master.log') as f:
            for line in f:
                if 'total number of hosts in all realms: 0' in line:
                    ok = True
        assert errors == 0
        assert ok

    def test_arbiter_unexisting_monitoring_configuration(self):
        """ Running the Alignak Arbiter with a not existing monitoring configuration file

        :return:
        """
        print("Launching arbiter with no monitoring configuration...")

        files = ['%s/etc/alignak.ini' % self.cfg_folder]
        try:
            cfg = configparser.ConfigParser()
            cfg.read(files)

            # Nagios legacy files
            cfg.set('alignak-configuration', 'cfg', '%(etcdir)s/alignak-missing.cfg')

            with open('%s/etc/alignak.ini' % self.cfg_folder, "w") as modified:
                cfg.write(modified)
        except Exception as exp:
            print("* parsing error in config file: %s" % exp)
            assert False

        args = ["../alignak/bin/alignak_arbiter.py", "-e", '%s/etc/alignak.ini' % self.cfg_folder]
        ret = self._run_command_with_timeout(args, 20)

        errors = 0
        ok = False
        with open('/tmp/alignak/log/arbiter-master.log') as f:
            for line in f:
                if 'WARNING:' in line and "cannot open main file '/tmp/alignak/etc/alignak-missing.cfg' for reading" in line:
                    ok = True
                if 'ERROR:' in line or 'CRITICAL:' in line:
                    print("*** %s" % line.rstrip())
                    errors = errors + 1
        # Arbiter process must exit with a return code == 0 and no errors
        assert errors == 2
        # Arbiter process must exit with a return code == 1
        assert ret == 1
        assert ok

    def test_arbiter_bad_configuration(self):
        """ Running the Alignak Arbiter with bad monitoring configuration (unknown sub directory)

        :return:
        """
        print("Launching arbiter with a bad monitoring configuration...")

        files = ['%s/etc/alignak.ini' % self.cfg_folder]
        try:
            cfg = configparser.ConfigParser()
            cfg.read(files)

            # Nagios legacy files
            cfg.set('alignak-configuration', 'cfg', '%(etcdir)s/alignak.cfg')

            with open('%s/etc/alignak.ini' % self.cfg_folder, "w") as modified:
                cfg.write(modified)
        except Exception as exp:
            print("* parsing error in config file: %s" % exp)
            assert False

        # Update configuration with a bad file name
        files = ['%s/etc/alignak.cfg' % self.cfg_folder]
        replacements = {
            'cfg_dir=arbiter/templates': 'cfg_dir=unexisting/objects/realms'
        }
        self._files_update(files, replacements)

        args = ["../alignak/bin/alignak_arbiter.py", "-e", '%s/etc/alignak.ini' % self.cfg_folder]
        ret = self._run_command_with_timeout(args, 20)

        errors = 0
        ok = False
        with open('/tmp/alignak/log/arbiter-master.log') as f:
            for line in f:
                if 'ERROR:' in line and "*** One or more problems were encountered while processing the configuration (first check)..." in line:
                    ok = True
                if 'ERROR:' in line or 'CRITICAL:' in line:
                    print("*** %s" % line.rstrip())
                    errors = errors + 1
        # Arbiter process must exit with a return code == 0 and no errors
        assert errors == 2
        # Arbiter process must exit with a return code == 1
        assert ret == 1
        assert ok

    def test_arbiter_i_am_not_configured(self):
        """ Running the Alignak Arbiter with missing arbiter configuration

        :return:
        """
        print("Launching arbiter with a missing arbiter configuration...")

        if os.path.exists('%s/my-arbiter-name.log' % self._launch_dir):
            os.remove('%s/my-arbiter-name.log' % self._launch_dir)

        args = ["../alignak/bin/alignak_arbiter.py", "-e", '%s/etc/alignak.ini' % self.cfg_folder, "-n", "my-arbiter-name"]
        ret = self._run_command_with_timeout(args, 20)

        errors = 0
        ok = False
        # Note the log filename!
        with open('%s/my-arbiter-name.log' % self._launch_dir) as f:
            for line in f:
                if "I cannot find my own configuration (my-arbiter-name)" in line:
                    ok = True
                if 'ERROR:' in line or 'CRITICAL:' in line:
                    print("*** %s" % line.rstrip())
                    errors = errors + 1
        # Arbiter process must exit with a return code == 0 and no errors
        assert errors == 2
        # Arbiter process must exit with a return code == 1
        assert ret == 1
        assert ok

    def test_arbiter_verify(self):
        """ Running the Alignak Arbiter in verify mode only with the default shipped configuration

        :return:
        """
        # Set a specific logger configuration - do not use the default test configuration
        # to use the default shipped configuration
        os.environ['ALIGNAK_LOGGER_CONFIGURATION'] = './etc/warning_alignak-logger.json'

        print("Launching arbiter in verification mode...")
        args = ["../alignak/bin/alignak_arbiter.py", "-e", '%s/etc/alignak.ini' % self.cfg_folder, "-V"]
        ret = self._run_command_with_timeout(args, 20)

        errors = 0
        specific_log = False
        info_log = False
        with open('/tmp/alignak/log/arbiter-master.log') as f:
            for line in f:
                if 'INFO:' in line:
                    info_log = True
                    if 'Arbiter is in configuration check mode' in line:
                        specific_log = True
                if 'ERROR:' in line or 'CRITICAL:' in line:
                    print("*** %s" % line.rstrip())
                    errors = errors + 1
        # Arbiter process must exit with a return code == 0 and no errors
        # Arbiter changed the log level to INFO because of the verify mode
        assert specific_log is True
        assert info_log is True
        assert errors == 0
        assert ret == 0

    def test_arbiter_parameters_pid(self):
        """ Run the Alignak Arbiter with some parameters - set a pid file

        :return:
        """
        # All the default configuration files are in /tmp/etc

        print("Launching arbiter with forced PID file...")
        if os.path.exists('/tmp/arbiter.pid'):
            os.remove('/tmp/arbiter.pid')

        args = ["../alignak/bin/alignak_arbiter.py", "-e", '%s/etc/alignak.ini' % self.cfg_folder, "-V",
                "--pid_file", "/tmp/arbiter.pid"]
        ret = self._run_command_with_timeout(args, 20)

        # The arbiter unlinks the pid file - I cannot assert it exists!
        # assert os.path.exists('/tmp/arbiter.pid')

        errors = 0
        # ok = False
        with open('/tmp/alignak/log/arbiter-master.log') as f:
            for line in f:
                # if 'Unlinking /tmp/arbiter.pid' in line:
                #     ok = True
                if 'ERROR:' in line or 'CRITICAL:' in line:
                    print("*** %s" % line.rstrip())
                    errors = errors + 1
        # Arbiter process must exit with a return code == 0 and no errors
        assert errors == 0
        assert ret == 0
        # assert ok

    def test_arbiter_parameters_log(self):
        """ Run the Alignak Arbiter with some parameters - log file name

        :return:
        """
        # All the default configuration files are in /tmp/etc
        print("Launching arbiter with forced log file...")
        if os.path.exists('/tmp/arbiter.log'):
            os.remove('/tmp/arbiter.log')

        args = ["../alignak/bin/alignak_arbiter.py", "-e", '%s/etc/alignak.ini' % self.cfg_folder,
                "-V", "-vv", "--log_file", "/tmp/arbiter.log"]
        ret = self._run_command_with_timeout(args, 20)

        # Log file created because of the -V option
        assert os.path.exists("/tmp/arbiter.log")

        errors = 0
        with open('/tmp/arbiter.log') as f:
            for line in f:
                if 'ERROR:' in line or 'CRITICAL:' in line:
                    print("*** %s" % line.rstrip())
                    errors = errors + 1
        # Arbiter process must exit with a return code == 0 and no errors
        assert errors == 0
        assert ret == 0

    @pytest.mark.skip("To be re-activated with spare mode")
    def test_arbiter_spare_missing_configuration(self):
        """ Run the Alignak Arbiter in spare mode - missing spare configuration

        :return:
        """
        print("Launching arbiter in spare mode...")
        args = ["../alignak/bin/alignak_arbiter.py",
                "-a", cfg_folder + "/alignak.cfg",
                "-c", cfg_folder + "/daemons/arbiterd.ini",
                "-n", "arbiter-spare"]
        arbiter = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        print("%s launched (pid=%d)" % ('arbiter', arbiter.pid))

        sleep(5)

        ret = arbiter.poll()
        print("*** Arbiter exited with code: %s" % ret)
        assert ret is not None, "Arbiter is still running!"
        # Arbiter process must exit with a return code == 1
        assert ret == 1

    @pytest.mark.skip("To be re-activated with spare mode")
    def test_arbiter_spare(self):
        """ Run the Alignak Arbiter in spare mode - missing spare configuration

        :return:
        """
        print("Launching arbiter in spare mode...")
        args = ["../alignak/bin/alignak_arbiter.py",
                "-a", cfg_folder + "/alignak.cfg",
                "-c", cfg_folder + "/daemons/arbiterd.ini",
                "-n", "arbiter-spare"]
        arbiter = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        print("%s launched (pid=%d)" % ('arbiter', arbiter.pid))

        ret = arbiter.poll()
        # Arbiter must still be running ... it is still trying to dispatch the configuration!
        assert ret is None, "Arbiter exited!"

        sleep(5)

        # Arbiter never stops trying to send its configuration! We must kill it...

        print("Asking arbiter to end...")
        os.kill(arbiter.pid, signal.SIGTERM)

        ret = arbiter.poll()
        print("*** Arbiter exited on kill, no return code!")
        assert ret is None, "Arbiter is still running!"
        # No ERRORS because the daemons are not alive !
        ok = 0
        for line in iter(arbiter.stdout.readline, b''):
            print(">>> %s" % line.rstrip())
            if b'INFO:' in line:
                # I must find this line
                if b'[alignak.daemons.arbiterdaemon] I found myself in the configuration: arbiter-spare' in line:
                    ok += 1
                # and this one also
                if b'[alignak.daemons.arbiterdaemon] I am a spare Arbiter: arbiter-spare' in line:
                    ok += 1
                if b'I am not the master arbiter, I stop parsing the configuration' in line:
                    ok += 1
                if b'Waiting for master...' in line:
                    ok += 1
                if b'Waiting for master death' in line:
                    ok += 1
                assert b'CRITICAL:' not in line
        for line in iter(arbiter.stderr.readline, b''):
            print("*** %s" % line.rstrip())
            if sys.version_info > (2, 7):
                assert False, "stderr output!"
        assert ok == 5

    def test_arbiter_normal(self):
        """ Running the Alignak Arbiter - normal verbosity

        :return:
        """
        self._arbiter(verbosity=None)

    def test_arbiter_verbose(self):
        """ Running the Alignak Arbiter - normal verbosity

        :return:
        """
        self._arbiter(verbosity='--verbose')
        self._arbiter(verbosity='-v')

    def test_arbiter_very_verbose(self):
        """ Running the Alignak Arbiter - normal verbosity

        :return:
        """
        self._arbiter(verbosity='--debug')
        # Execute only once, because it looks too verbose for Travis :/
        # self._arbiter(verbosity='-vv')

    def _arbiter(self, verbosity=None):
        """ Running the Alignak Arbiter with a specific verbosity

        :return:
        """
        # Set a specific logger configuration - do not use the default test configuration
        # to use the default shipped configuration
        os.environ['ALIGNAK_LOGGER_CONFIGURATION'] = './etc/warning_alignak-logger.json'

        print("Launching arbiter ...")
        args = ["../alignak/bin/alignak_arbiter.py", "-n", "arbiter-master", "-e", '%s/etc/alignak.ini' % self.cfg_folder]
        if verbosity:
            args.append(verbosity)
        arbiter = subprocess.Popen(args)
        print("%s launched (pid=%d)" % ('arbiter', arbiter.pid))

        # Wait for the arbiter to get started
        time.sleep(5)

        # This function will request the arbiter daemon to stop
        self._stop_alignak_daemons(request_stop_uri='http://127.0.0.1:7770')

        errors = 0
        info_log = False
        debug_log = False
        with open('/tmp/alignak/log/arbiter-master.log') as f:
            for line in f:
                if 'DEBUG:' in line:
                    debug_log = True
                if 'INFO:' in line:
                    info_log = True
                if 'ERROR:' in line or 'CRITICAL:' in line:
                    print("*** %s" % line.rstrip())
                    errors = errors + 1

        # arbiter process may exit with no errors!
        # assert errors == 0
        # Arbiter changed the log level to INFO because of the verify mode
        if verbosity in ['-v', '--verbose']:
            assert info_log is True
        # Arbiter changed the log level to DEBUG because of the verify mode
        if verbosity in ['-vv', '--debug']:
            assert debug_log is True

    def test_broker(self):
        """ Running the Alignak Broker

        :return:
        """
        print("Launching broker ...")
        args = ["../alignak/bin/alignak_broker.py", "-n", "broker-master", "-e", '%s/etc/alignak.ini' % self.cfg_folder]
        broker = subprocess.Popen(args)
        print("%s launched (pid=%d)" % ('broker', broker.pid))

        # Wait for the broker to get started
        time.sleep(2)

        # This function will request the arbiter daemon to stop
        self._stop_alignak_daemons(request_stop_uri='http://127.0.0.1:7772')

        errors = 0
        with open('/tmp/alignak/log/broker-master.log') as f:
            for line in f:
                if 'ERROR:' in line or 'CRITICAL:' in line:
                    print("*** %s" % line.rstrip())
                    errors = errors + 1
        # broker process must exit with no errors
        assert errors == 0

    def test_poller(self):
        """ Running the Alignak poller

        :return:
        """
        print("Launching poller ...")
        args = ["../alignak/bin/alignak_poller.py", "-n", "poller-master", "-e", '%s/etc/alignak.ini' % self.cfg_folder]
        poller = subprocess.Popen(args)
        print("%s launched (pid=%d)" % ('poller', poller.pid))

        # Wait for the poller to get started
        time.sleep(2)

        # This function will request the arbiter daemon to stop
        self._stop_alignak_daemons(request_stop_uri='http://127.0.0.1:7771')

        errors = 0
        with open('/tmp/alignak/log/poller-master.log') as f:
            for line in f:
                if 'ERROR:' in line or 'CRITICAL:' in line:
                    print("*** %s" % line.rstrip())
                    errors = errors + 1
        # poller process must exit with a return code == 0 and no errors
        assert errors == 0

    def test_reactionner(self):
        """ Running the Alignak reactionner

        :return:
        """
        print("Launching reactionner ...")
        args = ["../alignak/bin/alignak_reactionner.py", "-n", "reactionner-master", "-e", '%s/etc/alignak.ini' % self.cfg_folder]
        reactionner = subprocess.Popen(args)
        print("%s launched (pid=%d)" % ('reactionner', reactionner.pid))

        # Wait for the reactionner to get started
        time.sleep(2)

        # This function will request the arbiter daemon to stop
        self._stop_alignak_daemons(request_stop_uri='http://127.0.0.1:7769')

        errors = 0
        with open('/tmp/alignak/log/reactionner-master.log') as f:
            for line in f:
                if 'ERROR:' in line or 'CRITICAL:' in line:
                    print("*** %s" % line.rstrip())
                    errors = errors + 1
        # reactionner process must exit with a return code == 0 and no errors
        assert errors == 0

    def test_receiver(self):
        """ Running the Alignak receiver

        :return:
        """
        print("Launching receiver ...")
        args = ["../alignak/bin/alignak_receiver.py", "-n", "receiver-master", "-e", '%s/etc/alignak.ini' % self.cfg_folder]
        receiver = subprocess.Popen(args)
        print("%s launched (pid=%d)" % ('receiver', receiver.pid))

        # Wait for the receiver to get started
        time.sleep(2)

        # This function will request the arbiter daemon to stop
        self._stop_alignak_daemons(request_stop_uri='http://127.0.0.1:7773')

        errors = 0
        with open('/tmp/alignak/log/receiver-master.log') as f:
            for line in f:
                if 'ERROR:' in line or 'CRITICAL:' in line:
                    print("*** %s" % line.rstrip())
                    errors = errors + 1
        # receiver process must exit with a return code == 0 and no errors
        assert errors == 0

    def test_scheduler(self):
        """ Running the Alignak scheduler

        :return:
        """
        print("Launching scheduler ...")

        args = ["../alignak/bin/alignak_scheduler.py", "-n", "scheduler-master",
                "-e", '%s/etc/alignak.ini' % self.cfg_folder]
        scheduler = subprocess.Popen(args)
        print("%s launched (pid=%d)" % ('scheduler', scheduler.pid))

        # Wait for the scheduler to get started
        time.sleep(2)

        # This function will request the arbiter daemon to stop
        self._stop_alignak_daemons(request_stop_uri='http://127.0.0.1:7768')

        errors = 0
        with open('/tmp/alignak/log/scheduler-master.log') as f:
            for line in f:
                if 'ERROR:' in line or 'CRITICAL:' in line:
                    print("*** %s" % line.rstrip())
                    errors = errors + 1
        # scheduler process must exit with a return code == 0 and no errors
        assert errors == 0
Esempio n. 4
0
    def _dispatching(self,
                     env_filename='cfg/dispatcher/simple.ini',
                     loops=3,
                     multi_realms=False):
        """ Dispatching process: prepare, check, dispatch

        This function realize all the dispatching operations:
        - load a monitoring configuration
        - prepare the dispatching
        - dispatch
        - check the correct dispatching, including:
            - check the configuration dispatched to the schedulers
            - check the configuration dispatched to the spare arbiter (if any)
        - run the check_reachable loop several times

        if multi_realms is True, the scheduler configuration received are not checked against
        the arbiter whole configuration. This would be really too complex to assert on this :(

        Schedulers must have a port number with 7768 (eg. 7768,17768,27768,...)

        Spare daemons must have a port number with 8770 (eg. 8770,18770,28770,...)

        :return: None
        """
        args = {
            'env_file': env_filename,
            'alignak_name': 'alignak-test',
            'daemon_name': 'arbiter-master'
        }
        my_arbiter = Arbiter(**args)
        my_arbiter.setup_alignak_logger()

        # Clear logs
        self.clear_logs()

        # my_arbiter.load_modules_manager()
        my_arbiter.load_monitoring_config_file()
        assert my_arbiter.conf.conf_is_correct is True
        # logging.getLogger('alignak').setLevel(logging.DEBUG)

        objects_map = {}
        for _, _, strclss, _, _ in list(
                my_arbiter.conf.types_creations.values()):
            if strclss in ['hostescalations', 'serviceescalations']:
                continue

            objects_list = getattr(my_arbiter.conf, strclss, [])
            objects_map[strclss] = {
                'count': len(objects_list),
                'str': str(objects_list)
            }
            # print("Got %d %s: %s" % (len(objects_list), strclss, objects_list))

        # Freeze the time !
        initial_datetime = datetime.datetime.now()
        with freeze_time(initial_datetime) as frozen_datetime:
            assert frozen_datetime() == initial_datetime

            # #1 - Get a new dispatcher
            my_dispatcher = Dispatcher(my_arbiter.conf,
                                       my_arbiter.link_to_myself)
            print("*** All daemons WS: %s" % [
                "%s:%s" % (link.address, link.port)
                for link in my_dispatcher.all_daemons_links
            ])

            assert my_dispatcher.dispatch_ok is False
            assert my_dispatcher.new_to_dispatch is False
            assert my_dispatcher.first_dispatch_done is False

            self.assert_any_log_match(
                re.escape("Dispatcher arbiters/satellites map:"))
            for link in my_dispatcher.all_daemons_links:
                self.assert_any_log_match(
                    re.escape(" - %s: %s" % (link.name, link.uri)))

            # Simulate the daemons HTTP interface (very simple simulation !)
            with requests_mock.mock() as mr:
                for link in my_dispatcher.all_daemons_links:
                    mr.get('http://%s:%s/ping' % (link.address, link.port),
                           json='pong')
                    mr.get('http://%s:%s/identity' % (link.address, link.port),
                           json={"running_id": 123456.123456})
                    mr.get('http://%s:%s/wait_new_conf' %
                           (link.address, link.port),
                           json=True)
                    mr.get('http://%s:%s/fill_initial_broks' %
                           (link.address, link.port),
                           json=[])
                    mr.post('http://%s:%s/_push_configuration' %
                            (link.address, link.port),
                            json=True)
                    mr.get('http://%s:%s/managed_configurations' %
                           (link.address, link.port),
                           json={})
                    mr.get('http://%s:%s/do_not_run' %
                           (link.address, link.port),
                           json=True)

                for link in my_dispatcher.all_daemons_links:
                    # print("Satellite: %s / %s" % (link, link.cfg_to_manage))
                    assert not link.hash
                    assert not link.push_flavor
                    assert not link.cfg_to_manage
                    assert not link.cfg_managed

                # #2 - Initialize connection with all our satellites
                for satellite in my_dispatcher.all_daemons_links:
                    assert my_arbiter.daemon_connection_init(satellite)
                # All links have a running identifier
                for link in my_dispatcher.all_daemons_links:
                    if link == my_dispatcher.arbiter_link:
                        continue
                    assert link.running_id == 123456.123456
                    self.assert_any_log_match(re.escape("got: 123456.123456"))

                # #3 - Check reachable - a configuration is not yet prepared,
                # so only check reachable state
                my_dispatcher.check_reachable()
                assert my_dispatcher.dispatch_ok is False
                assert my_dispatcher.first_dispatch_done is False
                assert my_dispatcher.new_to_dispatch is False
                # Not yet configured ...
                for link in my_dispatcher.all_daemons_links:
                    if link == my_dispatcher.arbiter_link:
                        continue
                    self.assert_any_log_match(
                        re.escape("The %s %s do not have a configuration" %
                                  (link.type, link.name)))

                # #3 - Check reachable - daemons got pinged too early...
                my_dispatcher.check_reachable()
                assert my_dispatcher.dispatch_ok is False
                assert my_dispatcher.first_dispatch_done is False
                assert my_dispatcher.new_to_dispatch is False
                # Only for Python > 2.7, DEBUG logs ...
                if os.sys.version_info > (2, 7):
                    for link in my_dispatcher.all_daemons_links:
                        if link == my_dispatcher.arbiter_link:
                            continue
                        self.assert_any_log_match(
                            re.escape("Too early to ping %s" % (link.name)))
                self.assert_no_log_match(
                    re.escape(
                        "Dispatcher, these daemons are not configured: "
                        "reactionner-master,poller-master,broker-master,receiver-master,"
                        "scheduler-master"
                        ", and a configuration is ready to dispatch, run the dispatching..."
                    ))

                # Time warp 5 seconds - overpass the ping period...
                self.clear_logs()
                frozen_datetime.tick(delta=datetime.timedelta(seconds=5))

                # #3 - Check reachable - daemons provide their configuration
                my_dispatcher.check_reachable()
                assert my_dispatcher.dispatch_ok is False
                assert my_dispatcher.first_dispatch_done is False
                assert my_dispatcher.new_to_dispatch is False
                # Only for Python > 2.7, DEBUG logs ...
                if os.sys.version_info > (2, 7):
                    # Still not configured ...
                    for link in my_dispatcher.all_daemons_links:
                        if link == my_dispatcher.arbiter_link:
                            continue
                        self.assert_any_log_match(
                            re.escape(
                                "My (%s) fresh managed configuration: {}" %
                                link.name))

                # #4 - Prepare dispatching
                assert my_dispatcher.new_to_dispatch is False
                my_dispatcher.prepare_dispatch()
                assert my_dispatcher.dispatch_ok is False
                assert my_dispatcher.first_dispatch_done is False
                assert my_dispatcher.new_to_dispatch is True

                self.assert_any_log_match(
                    re.escape(
                        "All configuration parts are assigned to schedulers and their satellites :)"
                    ))
                # All links have a hash, push_flavor and cfg_to_manage
                for link in my_dispatcher.all_daemons_links:
                    print("Link: %s" % link)
                    assert getattr(link, 'hash', None) is not None
                    assert getattr(link, 'push_flavor', None) is not None
                    assert getattr(link, 'cfg_to_manage', None) is not None
                    assert not link.cfg_managed  # Not yet

                # #5 - Check reachable - a configuration is prepared,
                # this will force the daemons communication, no need for a time warp ;)
                my_dispatcher.check_reachable()
                # Only for Python > 2.7, DEBUG logs ...
                if os.sys.version_info > (2, 7):
                    for link in my_dispatcher.all_daemons_links:
                        if link == my_dispatcher.arbiter_link:
                            continue
                        self.assert_any_log_match(
                            re.escape(
                                "My (%s) fresh managed configuration: {}" %
                                link.name))

                self.assert_any_log_match(
                    re.escape("Dispatcher, these daemons are not configured:"))
                self.assert_any_log_match(
                    re.escape(
                        ", and a configuration is ready to dispatch, run the dispatching..."
                    ))

                self.assert_any_log_match(
                    re.escape(
                        "Trying to send configuration to the satellites..."))
                for link in my_dispatcher.all_daemons_links:
                    if link == my_dispatcher.arbiter_link:
                        continue
                    self.assert_any_log_match(
                        re.escape("Sending configuration to the %s %s" %
                                  (link.type, link.name)))

                # As of now the configuration is prepared and was dispatched to the daemons !
                # Configuration already dispatched!
                with pytest.raises(DispatcherError):
                    my_dispatcher.dispatch()
                self.show_logs()

                # Hack the requests history to check and simulate  the configuration pushed...
                history = mr.request_history
                for index, request in enumerate(history):
                    if '_push_configuration' in request.url:
                        received = request.json()
                        print(index, request.url, received)
                        assert ['conf'] == list(received.keys())
                        conf = received['conf']

                        from pprint import pprint
                        pprint(conf)
                        assert 'alignak_name' in conf
                        assert conf['alignak_name'] == 'My Alignak'

                        assert 'self_conf' in conf
                        assert conf['self_conf']
                        i_am = None
                        for link in my_dispatcher.all_daemons_links:
                            if link.type == conf['self_conf']['type'] \
                                    and link.name == conf['self_conf']['name']:
                                i_am = link
                                break
                        else:
                            assert False
                        print(("I am: %s" % i_am))
                        print(("I have: %s" % conf))

                        # All links have a hash, push_flavor and cfg_to_manage
                        assert 'hash' in conf
                        assert 'managed_conf_id' in conf

                        assert 'arbiters' in conf
                        if conf['self_conf']['manage_arbiters']:
                            # All the known arbiters
                            assert list(conf['arbiters'].keys()) == [
                                arbiter_link.uuid
                                for arbiter_link in my_dispatcher.arbiters
                            ]
                        else:
                            assert conf['arbiters'] == {}

                        assert 'schedulers' in conf
                        # Hack for the managed configurations
                        link.cfg_managed = {}
                        for scheduler_link in list(
                                conf['schedulers'].values()):
                            link.cfg_managed[scheduler_link['instance_id']] = {
                                'hash':
                                scheduler_link['hash'],
                                'push_flavor':
                                scheduler_link['push_flavor'],
                                'managed_conf_id':
                                scheduler_link['managed_conf_id']
                            }
                        print("Managed: %s" % link.cfg_managed)

                        assert 'modules' in conf
                        assert conf['modules'] == []

                        # Spare arbiter specific
                        if '8770/_push_configuration' in request.url:
                            # Spare arbiter receives all the monitored configuration
                            assert 'whole_conf' in conf
                            # String serialized configuration
                            assert isinstance(conf['whole_conf'], string_types)
                            managed_conf_part = unserialize(conf['whole_conf'])
                            # Test a property to be sure conf loaded correctly
                            assert managed_conf_part.instance_id == conf[
                                'managed_conf_id']

                            # The spare arbiter got the same objects count as the master arbiter prepared!
                            for _, _, strclss, _, _ in list(
                                    managed_conf_part.types_creations.values(
                                    )):
                                # These elements are not included in the serialized configuration!
                                if strclss in [
                                        'hostescalations',
                                        'serviceescalations', 'arbiters',
                                        'schedulers', 'brokers', 'pollers',
                                        'reactionners', 'receivers', 'realms',
                                        'modules', 'hostsextinfo',
                                        'servicesextinfo', 'hostdependencies',
                                        'servicedependencies'
                                ]:
                                    continue

                                objects_list = getattr(managed_conf_part,
                                                       strclss, [])
                                # print("Got %d %s: %s" % (len(objects_list), strclss, objects_list))
                                # Count and string dup are the same !
                                assert len(objects_list
                                           ) == objects_map[strclss]['count']
                                assert str(objects_list
                                           ) == objects_map[strclss]['str']

                        # Scheduler specific
                        elif '7768/_push_configuration' in request.url:
                            assert 'conf_part' in conf
                            # String serialized configuration
                            assert isinstance(conf['conf_part'], string_types)
                            managed_conf_part = unserialize(conf['conf_part'])
                            # Test a property to be sure conf loaded correctly
                            assert managed_conf_part.instance_id == conf[
                                'managed_conf_id']

                            # Hack for the managed configurations
                            link.cfg_managed = {
                                conf['instance_id']: {
                                    'hash': conf['hash'],
                                    'push_flavor': conf['push_flavor'],
                                    'managed_conf_id': conf['managed_conf_id']
                                }
                            }
                            print("Managed: %s" % link.cfg_managed)

                            # The scheduler got the same objects count as the arbiter prepared!
                            for _, _, strclss, _, _ in list(
                                    managed_conf_part.types_creations.values(
                                    )):
                                # These elements are not included in the serialized configuration!
                                if strclss in [
                                        'hostescalations',
                                        'serviceescalations', 'arbiters',
                                        'schedulers', 'brokers', 'pollers',
                                        'reactionners', 'receivers', 'realms',
                                        'modules', 'hostsextinfo',
                                        'servicesextinfo', 'hostdependencies',
                                        'servicedependencies'
                                ]:
                                    continue

                                objects_list = getattr(managed_conf_part,
                                                       strclss, [])
                                # print("Got %d %s: %s" % (len(objects_list), strclss, objects_list))
                                if not multi_realms:
                                    # Count and string dump are the same !
                                    assert len(objects_list) == objects_map[
                                        strclss]['count']
                                    assert str(objects_list
                                               ) == objects_map[strclss]['str']

                        else:
                            # Satellites
                            print("I am: ")
                            print(index, request.url, received)
                            assert 'conf_part' not in conf
                            assert 'see_my_schedulers' == conf[
                                'managed_conf_id']

                for link in my_dispatcher.all_daemons_links:
                    mr.get('http://%s:%s/managed_configurations' %
                           (link.address, link.port),
                           json=link.cfg_managed)

                print("Check dispatching:")
                self.clear_logs()
                # assert my_dispatcher.check_dispatch() is True
                dispatched = my_dispatcher.check_dispatch()
                self.show_logs()
                assert dispatched

                for loop_count in range(0, loops):
                    for tw in range(0, 4):
                        # Time warp 1 second
                        frozen_datetime.tick(delta=datetime.timedelta(
                            seconds=1))

                        print("Check reachable %s" % tw)
                        self.clear_logs()
                        my_dispatcher.check_reachable()
                        # Only for Python > 2.7, DEBUG logs ...
                        if os.sys.version_info > (2, 7):
                            for link in my_dispatcher.all_daemons_links:
                                if link == my_dispatcher.arbiter_link:
                                    continue
                                self.assert_any_log_match(
                                    re.escape("Too early to ping %s" %
                                              (link.name)))

                    # Time warp 1 second
                    frozen_datetime.tick(delta=datetime.timedelta(seconds=1))

                    print("Check reachable response")
                    self.clear_logs()
                    my_dispatcher.check_reachable()
                    self.show_logs()
                    # Only for Python > 2.7, DEBUG logs ...
                    if os.sys.version_info > (2, 7):
                        for link in my_dispatcher.all_daemons_links:
                            if link == my_dispatcher.arbiter_link:
                                continue
                            self.assert_any_log_match(
                                re.escape(
                                    "My (%s) fresh managed configuration: %s" %
                                    (link.name, link.cfg_managed)))
Esempio n. 5
0
class TestDispatcher(AlignakTest):
    """
    This class tests the dispatcher (distribute configuration to satellites)
    """
    def setUp(self):
        """Test starting"""
        super(TestDispatcher, self).setUp()

        # Log at DEBUG level
        self.set_unit_tests_logger_level()

    def _dispatching(self,
                     env_filename='cfg/dispatcher/simple.ini',
                     loops=3,
                     multi_realms=False):
        """ Dispatching process: prepare, check, dispatch

        This function realize all the dispatching operations:
        - load a monitoring configuration
        - prepare the dispatching
        - dispatch
        - check the correct dispatching, including:
            - check the configuration dispatched to the schedulers
            - check the configuration dispatched to the spare arbiter (if any)
        - run the check_reachable loop several times

        if multi_realms is True, the scheduler configuration received are not checked against
        the arbiter whole configuration. This would be really too complex to assert on this :(

        Schedulers must have a port number with 7768 (eg. 7768,17768,27768,...)

        Spare daemons must have a port number with 8770 (eg. 8770,18770,28770,...)

        :return: None
        """
        args = {
            'env_file': env_filename,
            'alignak_name': 'alignak-test',
            'daemon_name': 'arbiter-master'
        }
        my_arbiter = Arbiter(**args)
        my_arbiter.setup_alignak_logger()

        # Clear logs
        self.clear_logs()

        # my_arbiter.load_modules_manager()
        my_arbiter.load_monitoring_config_file()
        assert my_arbiter.conf.conf_is_correct is True
        # logging.getLogger('alignak').setLevel(logging.DEBUG)

        objects_map = {}
        for _, _, strclss, _, _ in list(
                my_arbiter.conf.types_creations.values()):
            if strclss in ['hostescalations', 'serviceescalations']:
                continue

            objects_list = getattr(my_arbiter.conf, strclss, [])
            objects_map[strclss] = {
                'count': len(objects_list),
                'str': str(objects_list)
            }
            # print("Got %d %s: %s" % (len(objects_list), strclss, objects_list))

        # Freeze the time !
        initial_datetime = datetime.datetime.now()
        with freeze_time(initial_datetime) as frozen_datetime:
            assert frozen_datetime() == initial_datetime

            # #1 - Get a new dispatcher
            my_dispatcher = Dispatcher(my_arbiter.conf,
                                       my_arbiter.link_to_myself)
            print("*** All daemons WS: %s" % [
                "%s:%s" % (link.address, link.port)
                for link in my_dispatcher.all_daemons_links
            ])

            assert my_dispatcher.dispatch_ok is False
            assert my_dispatcher.new_to_dispatch is False
            assert my_dispatcher.first_dispatch_done is False

            self.assert_any_log_match(
                re.escape("Dispatcher arbiters/satellites map:"))
            for link in my_dispatcher.all_daemons_links:
                self.assert_any_log_match(
                    re.escape(" - %s: %s" % (link.name, link.uri)))

            # Simulate the daemons HTTP interface (very simple simulation !)
            with requests_mock.mock() as mr:
                for link in my_dispatcher.all_daemons_links:
                    mr.get('http://%s:%s/ping' % (link.address, link.port),
                           json='pong')
                    mr.get('http://%s:%s/identity' % (link.address, link.port),
                           json={"running_id": 123456.123456})
                    mr.get('http://%s:%s/wait_new_conf' %
                           (link.address, link.port),
                           json=True)
                    mr.get('http://%s:%s/fill_initial_broks' %
                           (link.address, link.port),
                           json=[])
                    mr.post('http://%s:%s/_push_configuration' %
                            (link.address, link.port),
                            json=True)
                    mr.get('http://%s:%s/managed_configurations' %
                           (link.address, link.port),
                           json={})
                    mr.get('http://%s:%s/do_not_run' %
                           (link.address, link.port),
                           json=True)

                for link in my_dispatcher.all_daemons_links:
                    # print("Satellite: %s / %s" % (link, link.cfg_to_manage))
                    assert not link.hash
                    assert not link.push_flavor
                    assert not link.cfg_to_manage
                    assert not link.cfg_managed

                # #2 - Initialize connection with all our satellites
                for satellite in my_dispatcher.all_daemons_links:
                    assert my_arbiter.daemon_connection_init(satellite)
                # All links have a running identifier
                for link in my_dispatcher.all_daemons_links:
                    if link == my_dispatcher.arbiter_link:
                        continue
                    assert link.running_id == 123456.123456
                    self.assert_any_log_match(re.escape("got: 123456.123456"))

                # #3 - Check reachable - a configuration is not yet prepared,
                # so only check reachable state
                my_dispatcher.check_reachable()
                assert my_dispatcher.dispatch_ok is False
                assert my_dispatcher.first_dispatch_done is False
                assert my_dispatcher.new_to_dispatch is False
                # Not yet configured ...
                for link in my_dispatcher.all_daemons_links:
                    if link == my_dispatcher.arbiter_link:
                        continue
                    self.assert_any_log_match(
                        re.escape("The %s %s do not have a configuration" %
                                  (link.type, link.name)))

                # #3 - Check reachable - daemons got pinged too early...
                my_dispatcher.check_reachable()
                assert my_dispatcher.dispatch_ok is False
                assert my_dispatcher.first_dispatch_done is False
                assert my_dispatcher.new_to_dispatch is False
                # Only for Python > 2.7, DEBUG logs ...
                if os.sys.version_info > (2, 7):
                    for link in my_dispatcher.all_daemons_links:
                        if link == my_dispatcher.arbiter_link:
                            continue
                        self.assert_any_log_match(
                            re.escape("Too early to ping %s" % (link.name)))
                self.assert_no_log_match(
                    re.escape(
                        "Dispatcher, these daemons are not configured: "
                        "reactionner-master,poller-master,broker-master,receiver-master,"
                        "scheduler-master"
                        ", and a configuration is ready to dispatch, run the dispatching..."
                    ))

                # Time warp 5 seconds - overpass the ping period...
                self.clear_logs()
                frozen_datetime.tick(delta=datetime.timedelta(seconds=5))

                # #3 - Check reachable - daemons provide their configuration
                my_dispatcher.check_reachable()
                assert my_dispatcher.dispatch_ok is False
                assert my_dispatcher.first_dispatch_done is False
                assert my_dispatcher.new_to_dispatch is False
                # Only for Python > 2.7, DEBUG logs ...
                if os.sys.version_info > (2, 7):
                    # Still not configured ...
                    for link in my_dispatcher.all_daemons_links:
                        if link == my_dispatcher.arbiter_link:
                            continue
                        self.assert_any_log_match(
                            re.escape(
                                "My (%s) fresh managed configuration: {}" %
                                link.name))

                # #4 - Prepare dispatching
                assert my_dispatcher.new_to_dispatch is False
                my_dispatcher.prepare_dispatch()
                assert my_dispatcher.dispatch_ok is False
                assert my_dispatcher.first_dispatch_done is False
                assert my_dispatcher.new_to_dispatch is True

                self.assert_any_log_match(
                    re.escape(
                        "All configuration parts are assigned to schedulers and their satellites :)"
                    ))
                # All links have a hash, push_flavor and cfg_to_manage
                for link in my_dispatcher.all_daemons_links:
                    print("Link: %s" % link)
                    assert getattr(link, 'hash', None) is not None
                    assert getattr(link, 'push_flavor', None) is not None
                    assert getattr(link, 'cfg_to_manage', None) is not None
                    assert not link.cfg_managed  # Not yet

                # #5 - Check reachable - a configuration is prepared,
                # this will force the daemons communication, no need for a time warp ;)
                my_dispatcher.check_reachable()
                # Only for Python > 2.7, DEBUG logs ...
                if os.sys.version_info > (2, 7):
                    for link in my_dispatcher.all_daemons_links:
                        if link == my_dispatcher.arbiter_link:
                            continue
                        self.assert_any_log_match(
                            re.escape(
                                "My (%s) fresh managed configuration: {}" %
                                link.name))

                self.assert_any_log_match(
                    re.escape("Dispatcher, these daemons are not configured:"))
                self.assert_any_log_match(
                    re.escape(
                        ", and a configuration is ready to dispatch, run the dispatching..."
                    ))

                self.assert_any_log_match(
                    re.escape(
                        "Trying to send configuration to the satellites..."))
                for link in my_dispatcher.all_daemons_links:
                    if link == my_dispatcher.arbiter_link:
                        continue
                    self.assert_any_log_match(
                        re.escape("Sending configuration to the %s %s" %
                                  (link.type, link.name)))

                # As of now the configuration is prepared and was dispatched to the daemons !
                # Configuration already dispatched!
                with pytest.raises(DispatcherError):
                    my_dispatcher.dispatch()
                self.show_logs()

                # Hack the requests history to check and simulate  the configuration pushed...
                history = mr.request_history
                for index, request in enumerate(history):
                    if '_push_configuration' in request.url:
                        received = request.json()
                        print(index, request.url, received)
                        assert ['conf'] == list(received.keys())
                        conf = received['conf']

                        from pprint import pprint
                        pprint(conf)
                        assert 'alignak_name' in conf
                        assert conf['alignak_name'] == 'My Alignak'

                        assert 'self_conf' in conf
                        assert conf['self_conf']
                        i_am = None
                        for link in my_dispatcher.all_daemons_links:
                            if link.type == conf['self_conf']['type'] \
                                    and link.name == conf['self_conf']['name']:
                                i_am = link
                                break
                        else:
                            assert False
                        print(("I am: %s" % i_am))
                        print(("I have: %s" % conf))

                        # All links have a hash, push_flavor and cfg_to_manage
                        assert 'hash' in conf
                        assert 'managed_conf_id' in conf

                        assert 'arbiters' in conf
                        if conf['self_conf']['manage_arbiters']:
                            # All the known arbiters
                            assert list(conf['arbiters'].keys()) == [
                                arbiter_link.uuid
                                for arbiter_link in my_dispatcher.arbiters
                            ]
                        else:
                            assert conf['arbiters'] == {}

                        assert 'schedulers' in conf
                        # Hack for the managed configurations
                        link.cfg_managed = {}
                        for scheduler_link in list(
                                conf['schedulers'].values()):
                            link.cfg_managed[scheduler_link['instance_id']] = {
                                'hash':
                                scheduler_link['hash'],
                                'push_flavor':
                                scheduler_link['push_flavor'],
                                'managed_conf_id':
                                scheduler_link['managed_conf_id']
                            }
                        print("Managed: %s" % link.cfg_managed)

                        assert 'modules' in conf
                        assert conf['modules'] == []

                        # Spare arbiter specific
                        if '8770/_push_configuration' in request.url:
                            # Spare arbiter receives all the monitored configuration
                            assert 'whole_conf' in conf
                            # String serialized configuration
                            assert isinstance(conf['whole_conf'], string_types)
                            managed_conf_part = unserialize(conf['whole_conf'])
                            # Test a property to be sure conf loaded correctly
                            assert managed_conf_part.instance_id == conf[
                                'managed_conf_id']

                            # The spare arbiter got the same objects count as the master arbiter prepared!
                            for _, _, strclss, _, _ in list(
                                    managed_conf_part.types_creations.values(
                                    )):
                                # These elements are not included in the serialized configuration!
                                if strclss in [
                                        'hostescalations',
                                        'serviceescalations', 'arbiters',
                                        'schedulers', 'brokers', 'pollers',
                                        'reactionners', 'receivers', 'realms',
                                        'modules', 'hostsextinfo',
                                        'servicesextinfo', 'hostdependencies',
                                        'servicedependencies'
                                ]:
                                    continue

                                objects_list = getattr(managed_conf_part,
                                                       strclss, [])
                                # print("Got %d %s: %s" % (len(objects_list), strclss, objects_list))
                                # Count and string dup are the same !
                                assert len(objects_list
                                           ) == objects_map[strclss]['count']
                                assert str(objects_list
                                           ) == objects_map[strclss]['str']

                        # Scheduler specific
                        elif '7768/_push_configuration' in request.url:
                            assert 'conf_part' in conf
                            # String serialized configuration
                            assert isinstance(conf['conf_part'], string_types)
                            managed_conf_part = unserialize(conf['conf_part'])
                            # Test a property to be sure conf loaded correctly
                            assert managed_conf_part.instance_id == conf[
                                'managed_conf_id']

                            # Hack for the managed configurations
                            link.cfg_managed = {
                                conf['instance_id']: {
                                    'hash': conf['hash'],
                                    'push_flavor': conf['push_flavor'],
                                    'managed_conf_id': conf['managed_conf_id']
                                }
                            }
                            print("Managed: %s" % link.cfg_managed)

                            # The scheduler got the same objects count as the arbiter prepared!
                            for _, _, strclss, _, _ in list(
                                    managed_conf_part.types_creations.values(
                                    )):
                                # These elements are not included in the serialized configuration!
                                if strclss in [
                                        'hostescalations',
                                        'serviceescalations', 'arbiters',
                                        'schedulers', 'brokers', 'pollers',
                                        'reactionners', 'receivers', 'realms',
                                        'modules', 'hostsextinfo',
                                        'servicesextinfo', 'hostdependencies',
                                        'servicedependencies'
                                ]:
                                    continue

                                objects_list = getattr(managed_conf_part,
                                                       strclss, [])
                                # print("Got %d %s: %s" % (len(objects_list), strclss, objects_list))
                                if not multi_realms:
                                    # Count and string dump are the same !
                                    assert len(objects_list) == objects_map[
                                        strclss]['count']
                                    assert str(objects_list
                                               ) == objects_map[strclss]['str']

                        else:
                            # Satellites
                            print("I am: ")
                            print(index, request.url, received)
                            assert 'conf_part' not in conf
                            assert 'see_my_schedulers' == conf[
                                'managed_conf_id']

                for link in my_dispatcher.all_daemons_links:
                    mr.get('http://%s:%s/managed_configurations' %
                           (link.address, link.port),
                           json=link.cfg_managed)

                print("Check dispatching:")
                self.clear_logs()
                # assert my_dispatcher.check_dispatch() is True
                dispatched = my_dispatcher.check_dispatch()
                self.show_logs()
                assert dispatched

                for loop_count in range(0, loops):
                    for tw in range(0, 4):
                        # Time warp 1 second
                        frozen_datetime.tick(delta=datetime.timedelta(
                            seconds=1))

                        print("Check reachable %s" % tw)
                        self.clear_logs()
                        my_dispatcher.check_reachable()
                        # Only for Python > 2.7, DEBUG logs ...
                        if os.sys.version_info > (2, 7):
                            for link in my_dispatcher.all_daemons_links:
                                if link == my_dispatcher.arbiter_link:
                                    continue
                                self.assert_any_log_match(
                                    re.escape("Too early to ping %s" %
                                              (link.name)))

                    # Time warp 1 second
                    frozen_datetime.tick(delta=datetime.timedelta(seconds=1))

                    print("Check reachable response")
                    self.clear_logs()
                    my_dispatcher.check_reachable()
                    self.show_logs()
                    # Only for Python > 2.7, DEBUG logs ...
                    if os.sys.version_info > (2, 7):
                        for link in my_dispatcher.all_daemons_links:
                            if link == my_dispatcher.arbiter_link:
                                continue
                            self.assert_any_log_match(
                                re.escape(
                                    "My (%s) fresh managed configuration: %s" %
                                    (link.name, link.cfg_managed)))

    def test_bad_init(self):
        """ Test that:
        - bad configuration
        - two master arbiters
        are not correct and raise an exception!

        :return: None
        """
        args = {
            'env_file': 'cfg/dispatcher/two_master_arbiters.ini',
            'alignak_name': 'alignak-test',
            'daemon_name': 'arbiter-master'
        }
        self.my_arbiter = Arbiter(**args)

        # Get a new dispatcher - raise an exception
        with pytest.raises(DispatcherError):
            Dispatcher(None, self.my_arbiter.link_to_myself)

        # Get a new dispatcher - raise an exception
        with pytest.raises(DispatcherError):
            Dispatcher(self.my_arbiter.conf, None)

        # Prepare the Alignak configuration
        # self.my_arbiter.load_modules_manager()
        self.my_arbiter.load_monitoring_config_file()
        assert self.my_arbiter.conf.conf_is_correct is True

        # Get a new dispatcher - raise an exception (two master arbiters)
        with pytest.raises(DispatcherError):
            Dispatcher(self.my_arbiter.conf, self.my_arbiter.link_to_myself)

    def test_dispatching_simple(self):
        """ Test the dispatching process: simple configuration

        :return: None
        """
        self._dispatching()

    def test_dispatching_multiple_schedulers(self):
        """ Test the dispatching process: 1 realm, 2 schedulers

        :return: None
        """
        self._dispatching('cfg/dispatcher/simple_multi_schedulers.ini',
                          multi_realms=True)

    def test_dispatching_multiple_pollers(self):
        """ Test the dispatching process: 1 realm, 2 pollers

        :return: None
        """
        self._dispatching('cfg/dispatcher/simple_multi_pollers.ini')

    def test_dispatching_multiple_realms(self):
        """ Test the dispatching process: 2 realms, all daemons duplicated

        :return: None
        """
        self._dispatching('cfg/dispatcher/2-realms.ini', multi_realms=True)

    def test_dispatching_multiple_realms_sub_realms(self):
        """ Test the dispatching process: 2 realms, some daemons are sub_realms managers

        realm All:
        * 1 scheduler
        * 1 receiver

        realm realm2:
        * 1 receiver
        * 1 scheduler
        * 1 poller

        realm All + realm2 (sub realm):
        * 1 broker
        * 1 poller
        * 1 reactionner

        realm realm3:
        * 1 receiver
        * 1 scheduler
        * 1 reactionner
        * 1 broker
        * 1 poller

        :return: None
        """
        self._dispatching('cfg/dispatcher/realms_with_sub_realms.ini',
                          multi_realms=True)

    def test_dispatching_multiple_realms_sub_realms_multi_schedulers(self):
        """ Test the dispatching process: 2 realms, some daemons are sub_realms managers and
        we have several schedulers. daemons with (+) are manage_sub_realms=1

        realm All (6 hosts):
        * 2 schedulers (+)

        realm All / All1 (6 hosts):
        * 3 schedulers (+)

        realm All / All1 / All1a (4 hosts):
        * 2 schedulers (+)

        :return: None
        """
        self._dispatching(
            'cfg/dispatcher/realms_with_sub_realms_multi_schedulers.ini',
            multi_realms=True)

    @pytest.mark.skip(
        "Currently disabled - spare feature - and whatever this test seems broken!"
    )
    def test_dispatching_spare_arbiter(self):
        """ Test the dispatching process: 1 realm, 1 spare arbiter

        :return: None
        """
        self._dispatching('cfg/dispatcher/spare_arbiter.ini')

    @pytest.mark.skip(
        "Currently disabled - spare feature - and whatever this test seems broken!"
    )
    def test_simple_scheduler_spare(self):
        """ Test simple but with spare of scheduler

        :return: None
        """
        with requests_mock.mock() as mockreq:
            for port in ['7768', '7772', '7771', '7769', '7773', '8002']:
                mockreq.get('http://localhost:%s/ping' % port, json='pong')

            self.setup_with_file('cfg/dispatcher/simple.cfg')
            self.show_logs()
            json_managed = {
                self._scheduler_daemon.conf.uuid:
                self._scheduler_daemon.conf.push_flavor
            }
            for port in ['7768', '7772', '7771', '7769', '7773']:
                mockreq.get('http://localhost:%s/what_i_managed' % port,
                            json=json_managed)
            mockreq.get('http://localhost:8002/what_i_managed', json='{}')

            self._arbiter.dispatcher.check_reachable()
            self._arbiter.dispatcher.prepare_dispatch()
            self._arbiter.dispatcher.dispatch_ok = True

            assert 2 == len(self._arbiter.dispatcher.schedulers)
            assert 4 == len(self._arbiter.dispatcher.satellites)
            master_sched = None
            spare_sched = None
            for scheduler in self._arbiter.dispatcher.schedulers:
                if scheduler.get_name() == 'scheduler-master':
                    scheduler.is_sent = True
                    master_sched = scheduler
                else:
                    spare_sched = scheduler

            assert master_sched.ping
            assert 1 == master_sched.attempt
            assert spare_sched.ping
            assert 0 == spare_sched.attempt

        for satellite in self._arbiter.dispatcher.satellites:
            assert 1 == len(satellite.cfg['schedulers'])
            scheduler = next(iter(satellite.cfg['schedulers'].values()))
            assert 'scheduler-master' == scheduler['name']

        # now simulate master sched down
        master_sched.check_interval = 1
        spare_sched.check_interval = 1
        for satellite in self._arbiter.dispatcher.receivers:
            satellite.check_interval = 1
        for satellite in self._arbiter.dispatcher.reactionners:
            satellite.check_interval = 1
        for satellite in self._arbiter.dispatcher.brokers:
            satellite.check_interval = 1
        for satellite in self._arbiter.dispatcher.pollers:
            satellite.check_interval = 1
        time.sleep(1)

        with requests_mock.mock() as mockreq:
            for port in ['7772', '7771', '7769', '7773', '8002']:
                mockreq.get('http://localhost:%s/ping' % port, json='pong')

            for port in ['7772', '7771', '7769', '7773']:
                mockreq.get('http://localhost:%s/what_i_managed' % port,
                            json=json_managed)
            mockreq.get('http://localhost:8002/what_i_managed', json='{}')

            for port in ['7772', '7771', '7769', '7773', '8002']:
                mockreq.post('http://localhost:%s/put_conf' % port,
                             json='true')

            self._arbiter.dispatcher.check_reachable()
            self._arbiter.dispatcher.check_dispatch()
            self._arbiter.dispatcher.prepare_dispatch()
            self._arbiter.dispatcher.dispatch()
            self._arbiter.dispatcher.check_bad_dispatch()

            assert master_sched.ping
            assert 2 == master_sched.attempt

            time.sleep(1)
            self._arbiter.dispatcher.check_reachable()
            self._arbiter.dispatcher.check_dispatch()
            self._arbiter.dispatcher.prepare_dispatch()
            self._arbiter.dispatcher.dispatch()
            self._arbiter.dispatcher.check_bad_dispatch()

            assert master_sched.ping
            assert 3 == master_sched.attempt
            # assert master_sched.alive
            #
            # time.sleep(1)
            # self.arbiter.dispatcher.check_alive()
            # self.arbiter.dispatcher.check_dispatch()
            # self.arbiter.dispatcher.prepare_dispatch()
            # self.arbiter.dispatcher.dispatch()
            # self.arbiter.dispatcher.check_bad_dispatch()

            assert not master_sched.alive

            history = mockreq.request_history
            send_conf_to_sched_master = False
            conf_sent = {}
            for index, hist in enumerate(history):
                if hist.url == 'http://localhost:7768/put_conf':
                    send_conf_to_sched_master = True
                elif hist.url == 'http://localhost:8002/put_conf':
                    conf_sent['scheduler-spare'] = hist.json()
                elif hist.url == 'http://localhost:7772/put_conf':
                    conf_sent['broker'] = hist.json()
                elif hist.url == 'http://localhost:7771/put_conf':
                    conf_sent['poller'] = hist.json()
                elif hist.url == 'http://localhost:7769/put_conf':
                    conf_sent['reactionner'] = hist.json()
                elif hist.url == 'http://localhost:7773/put_conf':
                    conf_sent['receiver'] = hist.json()

            assert not send_conf_to_sched_master, 'Conf to scheduler master must not be sent' \
                                                        'because it is not alive'
            self.show_logs()
            assert 5 == len(conf_sent)
            assert ['conf'] == list(conf_sent['scheduler-spare'].keys())

            json_managed_spare = {}
            for satellite in self._arbiter.dispatcher.satellites:
                assert 1 == len(satellite.cfg['schedulers'])
                scheduler = next(iter(satellite.cfg['schedulers'].values()))
                assert 'scheduler-spare' == scheduler['name']
                json_managed_spare[
                    scheduler['instance_id']] = scheduler['push_flavor']

        # return of the scheduler master
        print("*********** Return of the king / master ***********")
        with requests_mock.mock() as mockreq:
            for port in ['7768', '7772', '7771', '7769', '7773', '8002']:
                mockreq.get('http://localhost:%s/ping' % port, json='pong')

            mockreq.get('http://localhost:7768/what_i_managed',
                        json=json_managed)
            for port in ['7772', '7771', '7769', '7773', '8002']:
                mockreq.get('http://localhost:%s/what_i_managed' % port,
                            json=json_managed_spare)

            for port in ['7768', '7772', '7771', '7769', '7773', '8002']:
                mockreq.post('http://localhost:%s/put_conf' % port,
                             json='true')

            time.sleep(1)
            self._arbiter.dispatcher.check_reachable()
            self._arbiter.dispatcher.check_dispatch()
            self._arbiter.dispatcher.prepare_dispatch()
            self._arbiter.dispatcher.dispatch()
            self._arbiter.dispatcher.check_bad_dispatch()

            assert master_sched.ping
            assert 0 == master_sched.attempt

            history = mockreq.request_history
            conf_sent = {}
            for index, hist in enumerate(history):
                if hist.url == 'http://localhost:7768/put_conf':
                    conf_sent['scheduler-master'] = hist.json()
                elif hist.url == 'http://localhost:8002/put_conf':
                    conf_sent['scheduler-spare'] = hist.json()
                elif hist.url == 'http://localhost:7772/put_conf':
                    conf_sent['broker'] = hist.json()
                elif hist.url == 'http://localhost:7771/put_conf':
                    conf_sent['poller'] = hist.json()
                elif hist.url == 'http://localhost:7769/put_conf':
                    conf_sent['reactionner'] = hist.json()
                elif hist.url == 'http://localhost:7773/put_conf':
                    conf_sent['receiver'] = hist.json()

            assert set(['scheduler-master', 'broker', 'poller', 'reactionner',
                                  'receiver']) == \
                             set(conf_sent.keys())

            for satellite in self._arbiter.dispatcher.satellites:
                assert 1 == len(satellite.cfg['schedulers'])
                scheduler = next(iter(satellite.cfg['schedulers'].values()))
                assert 'scheduler-master' == scheduler['name']
Esempio n. 6
0
    def _monitoring(self, env_filename='cfg/monitor/simple.ini', loops=3, multi_realms=False):
        """ monitoring process: prepare, check, dispatch

        This function realize all the monitoring operations:
        - load a monitoring configuration
        - prepare the monitoring
        - dispatch
        - check the correct monitoring, including:
            - check the configuration dispatched to the schedulers
            - check the configuration dispatched to the spare arbiter (if any)
        - run the check_reachable loop several times

        if multi_realms is True, the scheduler configuration received are not checked against
        the arbiter whole configuration. This would be really too complex to assert on this :(

        Schedulers must have a port number with 7768 (eg. 7768,17768,27768,...)

        Spare daemons must have a port number with 8770 (eg. 8770,18770,28770,...)

        :return: None
        """
        args = {
            'env_file': env_filename,
            'alignak_name': 'alignak-test', 'daemon_name': 'arbiter-master'
        }
        my_arbiter = Arbiter(**args)
        my_arbiter.setup_alignak_logger()

        # Clear logs
        self.clear_logs()

        # my_arbiter.load_modules_manager()
        my_arbiter.load_monitoring_config_file()
        assert my_arbiter.conf.conf_is_correct is True

        # #1 - Get a new dispatcher
        my_dispatcher = Dispatcher(my_arbiter.conf, my_arbiter.link_to_myself)
        my_arbiter.dispatcher = my_dispatcher
        print("*** All daemons WS: %s"
              % ["%s:%s" % (link.address, link.port)
                 for link in my_dispatcher.all_daemons_links])

        assert my_arbiter.alignak_monitor == "http://super_alignak:7773/ws"
        assert my_arbiter.alignak_monitor_username == 'admin'
        assert my_arbiter.alignak_monitor_password == 'admin'

        metrics = []
        for type in sorted(my_arbiter.conf.types_creations):
            _, _, strclss, _, _ = my_arbiter.conf.types_creations[type]
            if strclss in ['hostescalations', 'serviceescalations']:
                continue

            objects_list = getattr(my_arbiter.conf, strclss, [])
            metrics.append("'%s'=%d" % (strclss, len(objects_list)))

        # Simulate the daemons HTTP interface (very simple simulation !)
        with requests_mock.mock() as mr:
            mr.post('%s/login' % (my_arbiter.alignak_monitor),
                    json={
                        "_status": "OK",
                        "_result": ["1508507175582-c21a7d8e-ace0-47f2-9b10-280a17152c7c"]
                    })
            mr.patch('%s/host' % (my_arbiter.alignak_monitor),
                   json={
                       "_status": "OK",
                       "_result": ["1508507175582-c21a7d8e-ace0-47f2-9b10-280a17152c7c"]
                   })

            # Time warp 5 seconds - overpass the ping period...
            self.clear_logs()
            # frozen_datetime.tick(delta=datetime.timedelta(seconds=5))

            my_arbiter.get_alignak_status(details=False)

            self.show_logs()

            # Hack the requests history to check and simulate  the configuration pushed...
            history = mr.request_history
            for index, request in enumerate(history):
                # Check what is patched on /host ...
                if 'host' in request.url:
                    received = request.json()
                    print((index, request.url, received))

                    from pprint import pprint
                    pprint(received)

                    assert received['name'] == 'My Alignak'
                    assert received['livestate']['timestamp'] == 1519583400
                    assert received['livestate']['state'] == 'up'
                    assert received['livestate']['output'] == 'Some of my daemons are not reachable.'
                    for metric in metrics:
                        assert metric in received['livestate']['perf_data']
                    print(received['livestate']['long_output'])
                    # Long output is sorted by daemon name
                    assert received['livestate']['long_output'] == \
                           u'broker-master - daemon is not reachable.\n' \
                           u'poller-master - daemon is not reachable.\n' \
                           u'reactionner-master - daemon is not reachable.\n' \
                           u'receiver-master - daemon is not reachable.\n' \
                           u'scheduler-master - daemon is not reachable.'

                    for link in my_dispatcher.all_daemons_links:
                        assert link.name in [service['name'] for service in received['services']]

                    for service in received['services']:
                        assert 'name' in service
                        assert 'livestate' in service
                        assert 'timestamp' in service['livestate']
                        assert 'state' in service['livestate']
                        assert 'output' in service['livestate']
                        assert 'long_output' in service['livestate']
                        assert 'perf_data' in service['livestate']
Esempio n. 7
0
class AlignakTest(unittest.TestCase):

    time_hacker = TimeHacker()
    maxDiff = None

    if sys.version_info < (2, 7):

        def assertRegex(self, *args, **kwargs):
            return self.assertRegexpMatches(*args, **kwargs)

    def setup_logger(self):
        """
        Setup a log collector
        :return:
        """
        self.logger = logging.getLogger("alignak")

        # Add collector for test purpose.
        collector_h = CollectorHandler()
        collector_h.setFormatter(DEFAULT_FORMATTER_NAMED)
        self.logger.addHandler(collector_h)

    def files_update(self, files, replacements):
        """Update files content with the defined replacements

        :param files: list of files to parse and replace
        :param replacements: list of values to replace
        :return:
        """
        for filename in files:
            lines = []
            with open(filename) as infile:
                for line in infile:
                    for src, target in replacements.iteritems():
                        line = line.replace(src, target)
                    lines.append(line)
            with open(filename, 'w') as outfile:
                for line in lines:
                    outfile.write(line)

    def setup_with_file(self, configuration_file):
        """
        Load alignak with defined configuration file

        If the configuration loading fails, a SystemExit exception is raised to the caller.

        The conf_is_correct property indicates if the configuration loading succeeded or failed.

        The configuration errors property contains a list of the error message that are normally
        logged as ERROR by the arbiter.

        @verified

        :param configuration_file: path + file name of the main configuration file
        :type configuration_file: str
        :return: None
        """
        self.broks = {}
        self.schedulers = {}
        self.brokers = {}
        self.pollers = {}
        self.receivers = {}
        self.reactionners = {}
        self.arbiter = None
        self.conf_is_correct = False
        self.configuration_warnings = []
        self.configuration_errors = []

        # Add collector for test purpose.
        self.setup_logger()

        # Initialize the Arbiter with no daemon configuration file
        self.arbiter = Arbiter(None, [configuration_file], False, False, False,
                               False, '/tmp/arbiter.log', 'arbiter-master')

        try:
            # The following is copy paste from setup_alignak_logger
            # The only difference is that keep logger at INFO level to gather messages
            # This is needed to assert later on logs we received.
            self.logger.setLevel(logging.INFO)
            # Force the debug level if the daemon is said to start with such level
            if self.arbiter.debug:
                self.logger.setLevel(logging.DEBUG)

            # Log will be broks
            for line in self.arbiter.get_header():
                self.logger.info(line)

            self.arbiter.load_monitoring_config_file()

            # If this assertion does not match, then there is a bug in the arbiter :)
            self.assertTrue(self.arbiter.conf.conf_is_correct)
            self.conf_is_correct = True
            self.configuration_warnings = self.arbiter.conf.configuration_warnings
            self.configuration_errors = self.arbiter.conf.configuration_errors
        except SystemExit:
            self.configuration_warnings = self.arbiter.conf.configuration_warnings
            print("Configuration warnings:")
            for msg in self.configuration_warnings:
                print(" - %s" % msg)
            self.configuration_errors = self.arbiter.conf.configuration_errors
            print("Configuration errors:")
            for msg in self.configuration_errors:
                print(" - %s" % msg)
            raise

        for arb in self.arbiter.conf.arbiters:
            if arb.get_name() == self.arbiter.arbiter_name:
                self.arbiter.myself = arb
        self.arbiter.dispatcher = Dispatcher(self.arbiter.conf,
                                             self.arbiter.myself)
        self.arbiter.dispatcher.prepare_dispatch()

        # Build schedulers dictionary with the schedulers involved in the configuration
        for scheduler in self.arbiter.dispatcher.schedulers:
            sched = Alignak([], False, False, True, '/tmp/scheduler.log')
            sched.load_modules_manager(scheduler.name)
            sched.new_conf = scheduler.conf_package
            if sched.new_conf:
                sched.setup_new_conf()
            self.schedulers[scheduler.scheduler_name] = sched

        # Build pollers dictionary with the pollers involved in the configuration
        for poller in self.arbiter.dispatcher.pollers:
            self.pollers[poller.poller_name] = poller

        # Build receivers dictionary with the receivers involved in the configuration
        for receiver in self.arbiter.dispatcher.receivers:
            self.receivers[receiver.receiver_name] = receiver

        # Build reactionners dictionary with the reactionners involved in the configuration
        for reactionner in self.arbiter.dispatcher.reactionners:
            self.reactionners[reactionner.reactionner_name] = reactionner

        # Build brokers dictionary with the brokers involved in the configuration
        for broker in self.arbiter.dispatcher.brokers:
            self.brokers[broker.broker_name] = broker

    def add(self, b):
        if isinstance(b, Brok):
            self.broks[b.uuid] = b
            return
        if isinstance(b, ExternalCommand):
            self.schedulers['scheduler-master'].run_external_command(
                b.cmd_line)

    def fake_check(self, ref, exit_status, output="OK"):
        """
        Simulate a check execution and result
        :param ref: host/service concerned by the check
        :param exit_status: check exit status code (0, 1, ...).
               If set to None, the check is simply scheduled but not "executed"
        :param output: check output (output + perf data)
        :return:
        """

        now = time.time()
        check = ref.schedule(
            self.schedulers['scheduler-master'].sched.hosts,
            self.schedulers['scheduler-master'].sched.services,
            self.schedulers['scheduler-master'].sched.timeperiods,
            self.schedulers['scheduler-master'].sched.macromodulations,
            self.schedulers['scheduler-master'].sched.checkmodulations,
            self.schedulers['scheduler-master'].sched.checks,
            force=True,
            force_time=None)
        # now the check is scheduled and we get it in the action queue
        self.schedulers['scheduler-master'].sched.add(
            check)  # check is now in sched.checks[]

        # Allows to force check scheduling without setting its status nor output.
        # Useful for manual business rules rescheduling, for instance.
        if exit_status is None:
            return

        # fake execution
        check.check_time = now

        # and lie about when we will launch it because
        # if not, the schedule call for ref
        # will not really reschedule it because there
        # is a valid value in the future
        ref.next_chk = now - 0.5

        # Max plugin output is default to 8192
        check.get_outputs(output, 8192)
        check.exit_status = exit_status
        check.execution_time = 0.001
        check.status = 'waitconsume'

        # Put the check result in the waiting results for the scheduler ...
        self.schedulers['scheduler-master'].sched.waiting_results.put(check)

    def scheduler_loop(self, count, items, mysched=None):
        """
        Manage scheduler checks

        @verified

        :param count: number of checks to pass
        :type count: int
        :param items: list of list [[object, exist_status, output]]
        :type items: list
        :param mysched: The scheduler
        :type mysched: None | object
        :return: None
        """
        if mysched is None:
            mysched = self.schedulers['scheduler-master']

        macroresolver = MacroResolver()
        macroresolver.init(mysched.conf)

        for num in range(count):
            for item in items:
                (obj, exit_status, output) = item
                if len(obj.checks_in_progress) == 0:
                    for i in mysched.sched.recurrent_works:
                        (name, fun,
                         nb_ticks) = mysched.sched.recurrent_works[i]
                        if nb_ticks == 1:
                            fun()
                self.assertGreater(len(obj.checks_in_progress), 0)
                chk = mysched.sched.checks[obj.checks_in_progress[0]]
                chk.set_type_active()
                chk.check_time = time.time()
                chk.wait_time = 0.0001
                chk.last_poll = chk.check_time
                chk.output = output
                chk.exit_status = exit_status
                mysched.sched.waiting_results.put(chk)

            for i in mysched.sched.recurrent_works:
                (name, fun, nb_ticks) = mysched.sched.recurrent_works[i]
                if nb_ticks == 1:
                    fun()

    def external_command_loop(self):
        """
        Execute the scheduler actions for external commands.

        Yes, why not, but the scheduler si not an ECM 'dispatcher' but an 'applyer' ...

        @verified
        :return:
        """
        for i in self.schedulers['scheduler-master'].sched.recurrent_works:
            (name, fun, nb_ticks
             ) = self.schedulers['scheduler-master'].sched.recurrent_works[i]
            if nb_ticks == 1:
                fun()
        self.assert_no_log_match(
            "External command Brok could not be sent to any daemon!")

    def worker_loop(self, verbose=True):
        self.schedulers['scheduler-master'].sched.delete_zombie_checks()
        self.schedulers['scheduler-master'].sched.delete_zombie_actions()
        checks = self.schedulers['scheduler-master'].sched.get_to_run_checks(
            True, False, worker_name='tester')
        actions = self.schedulers['scheduler-master'].sched.get_to_run_checks(
            False, True, worker_name='tester')
        if verbose is True:
            self.show_actions()
        for a in actions:
            a.status = 'inpoller'
            a.check_time = time.time()
            a.exit_status = 0
            self.schedulers['scheduler-master'].sched.put_results(a)
        if verbose is True:
            self.show_actions()

    def launch_internal_check(self, svc_br):
        """ Launch an internal check for the business rule service provided """
        self._sched = self.schedulers['scheduler-master'].sched

        # Launch an internal check
        now = time.time()
        self._sched.add(
            svc_br.launch_check(now - 1, self._sched.hosts,
                                self._sched.services, self._sched.timeperiods,
                                self._sched.macromodulations,
                                self._sched.checkmodulations,
                                self._sched.checks))
        c = svc_br.actions[0]
        self.assertEqual(True, c.internal)
        self.assertTrue(c.is_launchable(now))

        # ask the scheduler to launch this check
        # and ask 2 loops: one to launch the check
        # and another to get the result
        self.scheduler_loop(2, [])

        # We should not have the check anymore
        self.assertEqual(0, len(svc_br.actions))

    def show_logs(self, scheduler=False):
        """
        Show logs. Get logs collected by the collector handler and print them

        @verified
        :param scheduler:
        :return:
        """
        print "--- logs <<<----------------------------------"
        collector_h = [
            hand for hand in self.logger.handlers
            if isinstance(hand, CollectorHandler)
        ][0]
        for log in collector_h.collector:
            safe_print(log)

        print "--- logs >>>----------------------------------"

    def show_actions(self):
        print "--- actions <<<----------------------------------"
        actions = sorted(
            self.schedulers['scheduler-master'].sched.actions.values(),
            key=lambda x: x.creation_time)
        for a in actions:
            if a.is_a == 'notification':
                item = self.schedulers[
                    'scheduler-master'].sched.find_item_by_id(a.ref)
                if item.my_type == "host":
                    ref = "host: %s" % item.get_name()
                else:
                    hst = self.schedulers[
                        'scheduler-master'].sched.find_item_by_id(item.host)
                    ref = "host: %s svc: %s" % (hst.get_name(),
                                                item.get_name())
                print "NOTIFICATION %s %s %s %s %s" % (
                    a.uuid, ref, a.type, time.asctime(time.localtime(
                        a.t_to_go)), a.status)
            elif a.is_a == 'eventhandler':
                print "EVENTHANDLER:", a
        print "--- actions >>>----------------------------------"

    def show_checks(self):
        """
        Show checks from the scheduler
        :return:
        """
        print "--- checks <<<--------------------------------"
        checks = sorted(
            self.schedulers['scheduler-master'].sched.checks.values(),
            key=lambda x: x.creation_time)
        for check in checks:
            print("- %s" % check)
        print "--- checks >>>--------------------------------"

    def show_and_clear_logs(self):
        """
        Prints and then deletes the current logs stored in the log collector

        @verified
        :return:
        """
        self.show_logs()
        self.clear_logs()

    def show_and_clear_actions(self):
        self.show_actions()
        self.clear_actions()

    def count_logs(self):
        """
        Count the log lines in the Arbiter broks.
        If 'scheduler' is True, then uses the scheduler's broks list.

        @verified
        :return:
        """
        collector_h = [
            hand for hand in self.logger.handlers
            if isinstance(hand, CollectorHandler)
        ][0]
        return len(collector_h.collector)

    def count_actions(self):
        """
        Count the actions in the scheduler's actions.

        @verified
        :return:
        """
        return len(self.schedulers['scheduler-master'].sched.actions.values())

    def clear_logs(self):
        """
        Remove all the logs stored in the logs collector

        @verified
        :return:
        """
        collector_h = [
            hand for hand in self.logger.handlers
            if isinstance(hand, CollectorHandler)
        ][0]
        collector_h.collector = []

    def clear_actions(self):
        """
        Clear the actions in the scheduler's actions.

        @verified
        :return:
        """
        self.schedulers['scheduler-master'].sched.actions = {}

    def assert_actions_count(self, number):
        """
        Check the number of actions

        @verified

        :param number: number of actions we must have
        :type number: int
        :return: None
        """
        actions = sorted(
            self.schedulers['scheduler-master'].sched.actions.values(),
            key=lambda x: x.creation_time)
        self.assertEqual(
            number, len(self.schedulers['scheduler-master'].sched.actions),
            "Not found expected number of actions:\nactions_logs=[[[\n%s\n]]]"
            %
            ('\n'.join('\t%s = creation: %s, is_a: %s, type: %s, status: %s, '
                       'planned: %s, command: %s' %
                       (idx, b.creation_time, b.is_a, b.type, b.status,
                        b.t_to_go, b.command)
                       for idx, b in enumerate(actions))))

    def assert_actions_match(self, index, pattern, field):
        """
        Check if pattern verified in field(property) name of the action with index in action list

        @verified

        :param index: index in the actions list. If index is -1, all the actions in the list are
        searched for a matching pattern
        :type index: int
        :param pattern: pattern to verify is in the action
        :type pattern: str
        :param field: name of the field (property) of the action
        :type field: str
        :return: None
        """
        regex = re.compile(pattern)
        actions = sorted(
            self.schedulers['scheduler-master'].sched.actions.values(),
            key=lambda x: x.creation_time)
        if index != -1:
            myaction = actions[index]
            self.assertTrue(
                regex.search(getattr(myaction, field)),
                "Not found a matching pattern in actions:\n"
                "index=%s field=%s pattern=%r\n"
                "action_line=creation: %s, is_a: %s, type: %s, "
                "status: %s, planned: %s, command: %s" %
                (index, field, pattern, myaction.creation_time, myaction.is_a,
                 myaction.type, myaction.status, myaction.t_to_go,
                 myaction.command))
            return

        for myaction in actions:
            if regex.search(getattr(myaction, field)):
                return

        self.assertTrue(
            False,
            "Not found a matching pattern in actions:\nfield=%s pattern=%r\n" %
            (field, pattern))

    def assert_log_match(self, pattern, index=None):
        """
        Search if the log with the index number has the pattern in the Arbiter logs.

        If index is None, then all the collected logs are searched for the pattern

        Logs numbering starts from 0 (the oldest stored log line)

        This function assert on the search result. As of it, if no log is found with th search
        criteria an assertion is raised and the test stops on error.

        :param pattern: string to search in log
        :type pattern: str
        :param index: index number
        :type index: int
        :return: None
        """
        self.assertIsNotNone(pattern, "Searched pattern can not be None!")

        collector_h = [
            hand for hand in self.logger.handlers
            if isinstance(hand, CollectorHandler)
        ][0]

        regex = re.compile(pattern)
        log_num = 0

        found = False
        for log in collector_h.collector:
            if index is None:
                if regex.search(log):
                    found = True
                    break
            elif index == log_num:
                if regex.search(log):
                    found = True
                    break
            log_num += 1

        self.assertTrue(
            found,
            "Not found a matching log line in logs:\nindex=%s pattern=%r\n"
            "logs=[[[\n%s\n]]]" % (index, pattern, '\n'.join(
                '\t%s=%s' % (idx, b.strip())
                for idx, b in enumerate(collector_h.collector))))

    def assert_checks_count(self, number):
        """
        Check the number of actions

        @verified

        :param number: number of actions we must have
        :type number: int
        :return: None
        """
        checks = sorted(
            self.schedulers['scheduler-master'].sched.checks.values(),
            key=lambda x: x.creation_time)
        self.assertEqual(
            number, len(checks),
            "Not found expected number of checks:\nchecks_logs=[[[\n%s\n]]]" %
            ('\n'.join(
                '\t%s = creation: %s, is_a: %s, type: %s, status: %s, planned: %s, '
                'command: %s' % (idx, b.creation_time, b.is_a, b.type,
                                 b.status, b.t_to_go, b.command)
                for idx, b in enumerate(checks))))

    def assert_checks_match(self, index, pattern, field):
        """
        Check if pattern verified in field(property) name of the check with index in check list

        @verified

        :param index: index number of checks list
        :type index: int
        :param pattern: pattern to verify is in the check
        :type pattern: str
        :param field: name of the field (property) of the check
        :type field: str
        :return: None
        """
        regex = re.compile(pattern)
        checks = sorted(
            self.schedulers['scheduler-master'].sched.checks.values(),
            key=lambda x: x.creation_time)
        mycheck = checks[index]
        self.assertTrue(
            regex.search(getattr(mycheck, field)),
            "Not found a matching pattern in checks:\nindex=%s field=%s pattern=%r\n"
            "check_line=creation: %s, is_a: %s, type: %s, status: %s, planned: %s, "
            "command: %s" %
            (index, field, pattern, mycheck.creation_time, mycheck.is_a,
             mycheck.type, mycheck.status, mycheck.t_to_go, mycheck.command))

    def _any_check_match(self, pattern, field, assert_not):
        """
        Search if any check matches the requested pattern

        @verified
        :param pattern:
        :param field to search with pattern:
        :param assert_not:
        :return:
        """
        regex = re.compile(pattern)
        checks = sorted(
            self.schedulers['scheduler-master'].sched.checks.values(),
            key=lambda x: x.creation_time)
        for check in checks:
            if re.search(regex, getattr(check, field)):
                self.assertTrue(
                    not assert_not, "Found check:\nfield=%s pattern=%r\n"
                    "check_line=creation: %s, is_a: %s, type: %s, status: %s, "
                    "planned: %s, command: %s" %
                    (field, pattern, check.creation_time, check.is_a,
                     check.type, check.status, check.t_to_go, check.command))
                return
        self.assertTrue(
            assert_not, "No matching check found:\n"
            "pattern = %r\n"
            "checks = %r" % (pattern, checks))

    def assert_any_check_match(self, pattern, field):
        """
        Assert if any check matches the pattern

        @verified
        :param pattern:
        :param field to search with pattern:
        :return:
        """
        self._any_check_match(pattern, field, assert_not=False)

    def assert_no_check_match(self, pattern, field):
        """
        Assert if no check matches the pattern

        @verified
        :param pattern:
        :param field to search with pattern:
        :return:
        """
        self._any_check_match(pattern, field, assert_not=True)

    def _any_log_match(self, pattern, assert_not):
        """
        Search if any log in the Arbiter logs matches the requested pattern
        If 'scheduler' is True, then uses the scheduler's broks list.

        @verified
        :param pattern:
        :param assert_not:
        :return:
        """
        regex = re.compile(pattern)

        collector_h = [
            hand for hand in self.logger.handlers
            if isinstance(hand, CollectorHandler)
        ][0]

        for log in collector_h.collector:
            if re.search(regex, log):
                self.assertTrue(
                    not assert_not, "Found matching log line:\n"
                    "pattern = %r\nbrok log = %r" % (pattern, log))
                return

        self.assertTrue(
            assert_not, "No matching log line found:\n"
            "pattern = %r\n"
            "logs broks = %r" % (pattern, collector_h.collector))

    def assert_any_log_match(self, pattern):
        """
        Assert if any log (Arbiter or Scheduler if True) matches the pattern

        @verified
        :param pattern:
        :param scheduler:
        :return:
        """
        self._any_log_match(pattern, assert_not=False)

    def assert_no_log_match(self, pattern):
        """
        Assert if no log (Arbiter or Scheduler if True) matches the pattern

        @verified
        :param pattern:
        :param scheduler:
        :return:
        """
        self._any_log_match(pattern, assert_not=True)

    def _any_brok_match(self, pattern, level, assert_not):
        """
        Search if any brok message in the Scheduler broks matches the requested pattern and
        requested level

        @verified
        :param pattern:
        :param assert_not:
        :return:
        """
        regex = re.compile(pattern)

        monitoring_logs = []
        for brok in self._sched.brokers['broker-master']['broks'].itervalues():
            if brok.type == 'monitoring_log':
                data = unserialize(brok.data)
                monitoring_logs.append((data['level'], data['message']))
                if re.search(regex,
                             data['message']) and (level is None
                                                   or data['level'] == level):
                    self.assertTrue(
                        not assert_not, "Found matching brok:\n"
                        "pattern = %r\nbrok message = %r" %
                        (pattern, data['message']))
                    return

        self.assertTrue(
            assert_not, "No matching brok found:\n"
            "pattern = %r\n"
            "brok message = %r" % (pattern, monitoring_logs))

    def assert_any_brok_match(self, pattern, level=None):
        """
        Search if any brok message in the Scheduler broks matches the requested pattern and
        requested level

        @verified
        :param pattern:
        :param scheduler:
        :return:
        """
        self._any_brok_match(pattern, level, assert_not=False)

    def assert_no_brok_match(self, pattern, level=None):
        """
        Search if no brok message in the Scheduler broks matches the requested pattern and
        requested level

        @verified
        :param pattern:
        :param scheduler:
        :return:
        """
        self._any_brok_match(pattern, level, assert_not=True)

    def get_log_match(self, pattern):
        regex = re.compile(pattern)
        res = []
        collector_h = [
            hand for hand in self.logger.handlers
            if isinstance(hand, CollectorHandler)
        ][0]

        for log in collector_h.collector:
            if re.search(regex, log):
                res.append(log)
        return res

    def print_header(self):
        print "\n" + "#" * 80 + "\n" + "#" + " " * 78 + "#"
        print "#" + string.center(self.id(), 78) + "#"
        print "#" + " " * 78 + "#\n" + "#" * 80 + "\n"

    def xtest_conf_is_correct(self):
        self.print_header()
        self.assertTrue(self.conf.conf_is_correct)

    def show_configuration_logs(self):
        """
        Prints the configuration logs

        @verified
        :return:
        """
        print("Configuration warnings:")
        for msg in self.configuration_warnings:
            print(" - %s" % msg)
        print("Configuration errors:")
        for msg in self.configuration_errors:
            print(" - %s" % msg)

    def _any_cfg_log_match(self, pattern, assert_not):
        """
        Search a pattern in configuration log (warning and error)

        @verified
        :param pattern:
        :return:
        """
        regex = re.compile(pattern)

        cfg_logs = self.configuration_warnings + self.configuration_errors

        for log in cfg_logs:
            if re.search(regex, log):
                self.assertTrue(
                    not assert_not, "Found matching log line:\n"
                    "pattern = %r\nlog = %r" % (pattern, log))
                return

        self.assertTrue(
            assert_not, "No matching log line found:\n"
            "pattern = %r\n"
            "logs = %r" % (pattern, cfg_logs))

    def assert_any_cfg_log_match(self, pattern):
        """
        Assert if any configuration log matches the pattern

        @verified
        :param pattern:
        :return:
        """
        self._any_cfg_log_match(pattern, assert_not=False)

    def assert_no_cfg_log_match(self, pattern):
        """
        Assert if no configuration log matches the pattern

        @verified
        :param pattern:
        :return:
        """
        self._any_cfg_log_match(pattern, assert_not=True)
Esempio n. 8
0
class TestLaunchDaemons(AlignakTest):
    def setUp(self):
        super(TestLaunchDaemons, self).setUp()

        self.cfg_folder = '/tmp/alignak'
        self._prepare_configuration(copy=True, cfg_folder=self.cfg_folder)

        files = ['%s/etc/alignak.ini' % self.cfg_folder,
                 '%s/etc/alignak.d/daemons.ini' % self.cfg_folder,
                 '%s/etc/alignak.d/modules.ini' % self.cfg_folder]
        try:
            cfg = configparser.ConfigParser()
            cfg.read(files)

            cfg.set('alignak-configuration', 'launch_missing_daemons', '1')
            cfg.set('daemon.arbiter-master', 'alignak_launched', '1')
            cfg.set('daemon.scheduler-master', 'alignak_launched', '1')
            cfg.set('daemon.poller-master', 'alignak_launched', '1')
            cfg.set('daemon.reactionner-master', 'alignak_launched', '1')
            cfg.set('daemon.receiver-master', 'alignak_launched', '1')
            cfg.set('daemon.broker-master', 'alignak_launched', '1')

            with open('%s/etc/alignak.ini' % self.cfg_folder, "w") as modified:
                cfg.write(modified)
        except Exception as exp:
            print("* parsing error in config file: %s" % exp)
            assert False

    def tearDown(self):
        # Restore the default test logger configuration
        if 'ALIGNAK_LOGGER_CONFIGURATION' in os.environ:
            del os.environ['ALIGNAK_LOGGER_CONFIGURATION']

        print("Test terminated!")

    def test_arbiter_missing_parameters(self):
        """ Running the Alignak Arbiter with missing command line parameters

        :return:
        """
        print("Launching arbiter with missing parameters...")
        args = ["../alignak/bin/alignak_arbiter.py"]
        arbiter = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        print("%s launched (pid=%d)" % ('arbiter', arbiter.pid))

        # Waiting for arbiter to parse the configuration
        sleep(3)

        ret = arbiter.poll()
        print("*** Arbiter exited with code: %d" % ret)
        assert ret is not None, "Arbiter is still running!"
        stderr = arbiter.stderr.read()
        print(stderr)
        assert b"usage: alignak_arbiter.py" in stderr
        # Arbiter process must exit with a return code == 2
        assert ret == 2

    def test_arbiter_no_environment(self):
        """ Running the Alignak Arbiter without environment file

        :return:
        """
        print("Launching arbiter without environment file...")
        args = ["../alignak/bin/alignak_arbiter.py"]
        arbiter = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        print("%s launched (pid=%d)" % ('arbiter', arbiter.pid))

        # Waiting for arbiter to parse the configuration
        sleep(3)

        ret = arbiter.poll()
        print("*** Arbiter exited with code: %d" % ret)
        assert ret is not None, "Arbiter is still running!"
        stdout = arbiter.stdout.read()
        print(stdout)
        stderr = arbiter.stderr.read()
        print(stderr)
        assert b"usage: alignak_arbiter.py" in stderr
        # Arbiter process must exit with a return code == 2
        assert ret == 2

    def test_arbiter_class_no_environment(self):
        """ Instantiate the Alignak Arbiter class without environment file

        :return:
        """
        from alignak.daemons.arbiterdaemon import Arbiter
        print("Instantiate arbiter without environment file...")
        # Using values that are usually provided by the command line parameters
        args = {
            'env_file': '',
            'alignak_name': 'alignak-test',
            'daemon_name': 'arbiter-master',
            'legacy_cfg_files': ['../etc/alignak.cfg']
        }
        self.arbiter = Arbiter(**args)

        print("Arbiter: %s" % (self.arbiter))
        assert self.arbiter.env_filename == ''
        assert self.arbiter.legacy_cfg_files == [os.path.abspath('../etc/alignak.cfg')]

        # Configure the logger
        self.arbiter.log_level = 'ERROR'
        self.arbiter.setup_alignak_logger()

        # Setup our modules manager
        # self.arbiter.load_modules_manager()

        # Load and initialize the arbiter configuration
        # This to check that the configuration is correct!
        self.arbiter.load_monitoring_config_file()

    def test_arbiter_class_env_default(self):
        """ Instantiate the Alignak Arbiter class without legacy cfg files
        :return:
        """
        # Unset legacy configuration files
        files = ['%s/etc/alignak.ini' % self.cfg_folder]
        try:
            cfg = configparser.ConfigParser()
            cfg.read(files)

            # Nagios legacy files - not configured
            cfg.set('alignak-configuration', 'cfg', '')

            with open('%s/etc/alignak.ini' % self.cfg_folder, "w") as modified:
                cfg.write(modified)
        except Exception as exp:
            print("* parsing error in config file: %s" % exp)
            assert False

        from alignak.daemons.arbiterdaemon import Arbiter
        print("Instantiate arbiter with default environment file...")
        # Using values that are usually provided by the command line parameters
        args = {
            'env_file': "/tmp/alignak/etc/alignak.ini",
            'daemon_name': 'arbiter-master'
        }
        self.arbiter = Arbiter(**args)

        print("Arbiter: %s" % (self.arbiter))
        print("Arbiter: %s" % (self.arbiter.__dict__))
        assert self.arbiter.env_filename == '/tmp/alignak/etc/alignak.ini'
        assert self.arbiter.legacy_cfg_files == []
        assert len(self.arbiter.legacy_cfg_files) == 0

        # Configure the logger
        self.arbiter.log_level = 'INFO'
        self.arbiter.setup_alignak_logger()

        # Setup our modules manager
        # self.arbiter.load_modules_manager()

        # Load and initialize the arbiter configuration
        # This to check that the configuration is correct!
        self.arbiter.load_monitoring_config_file()
        # No legacy files found
        assert len(self.arbiter.legacy_cfg_files) == 0

    def test_arbiter_unexisting_environment(self):
        """ Running the Alignak Arbiter with a not existing environment file

        :return:
        """
        print("Launching arbiter with a not existing environment file...")
        args = ["../alignak/bin/alignak_arbiter.py", "-e", "/tmp/etc/unexisting.ini"]
        arbiter = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        print("%s launched (pid=%d)" % ('arbiter', arbiter.pid))

        # Waiting for arbiter to parse the configuration
        sleep(3)

        ret = arbiter.poll()
        print("*** Arbiter exited with code: %d" % ret)
        assert ret is not None, "Arbiter is still running!"
        stdout = arbiter.stdout.read()
        print(stdout)
        assert b"Daemon 'arbiter-master' did not correctly read " \
               b"Alignak environment file: /tmp/etc/unexisting.ini" in stdout
        # Arbiter process must exit with a return code == 1
        assert ret == 99

    def test_arbiter_no_monitoring_configuration(self):
        """ Running the Alignak Arbiter with no monitoring configuration defined -
        no legacy cfg files

        :return:
        """
        print("Launching arbiter with no monitoring configuration...")

        # Unset legacy configuration files
        files = ['%s/etc/alignak.ini' % self.cfg_folder]
        try:
            cfg = configparser.ConfigParser()
            cfg.read(files)

            # Nagios legacy files - not configured
            cfg.set('alignak-configuration', 'cfg', '')

            with open('%s/etc/alignak.ini' % self.cfg_folder, "w") as modified:
                cfg.write(modified)
        except Exception as exp:
            print("* parsing error in config file: %s" % exp)
            assert False

        args = ["../alignak/bin/alignak_arbiter.py", "-e", '%s/etc/alignak.ini' % self.cfg_folder]
        ret = self._run_command_with_timeout(args, 30)

        errors = 0
        ok = False
        with open('/tmp/alignak/log/arbiter-master.log') as f:
            for line in f:
                if 'total number of hosts in all realms: 0' in line:
                    ok = True
        assert errors == 0
        assert ok

    def test_arbiter_unexisting_monitoring_configuration(self):
        """ Running the Alignak Arbiter with a not existing monitoring configuration file

        :return:
        """
        print("Launching arbiter with no monitoring configuration...")

        files = ['%s/etc/alignak.ini' % self.cfg_folder]
        try:
            cfg = configparser.ConfigParser()
            cfg.read(files)

            # Nagios legacy files
            cfg.set('alignak-configuration', 'cfg', '%(etcdir)s/alignak-missing.cfg')

            with open('%s/etc/alignak.ini' % self.cfg_folder, "w") as modified:
                cfg.write(modified)
        except Exception as exp:
            print("* parsing error in config file: %s" % exp)
            assert False

        args = ["../alignak/bin/alignak_arbiter.py", "-e", '%s/etc/alignak.ini' % self.cfg_folder]
        ret = self._run_command_with_timeout(args, 20)

        errors = 0
        ok = False
        with open('/tmp/alignak/log/arbiter-master.log') as f:
            for line in f:
                if 'WARNING:' in line and "cannot open main file '/tmp/alignak/etc/alignak-missing.cfg' for reading" in line:
                    ok = True
                if 'ERROR:' in line or 'CRITICAL:' in line:
                    print("*** %s" % line.rstrip())
                    errors = errors + 1
        # Arbiter process must exit with a return code == 0 and no errors
        assert errors == 2
        # Arbiter process must exit with a return code == 1
        assert ret == 1
        assert ok

    def test_arbiter_bad_configuration(self):
        """ Running the Alignak Arbiter with bad monitoring configuration (unknown sub directory)

        :return:
        """
        print("Launching arbiter with a bad monitoring configuration...")

        files = ['%s/etc/alignak.ini' % self.cfg_folder]
        try:
            cfg = configparser.ConfigParser()
            cfg.read(files)

            # Nagios legacy files
            cfg.set('alignak-configuration', 'cfg', '%(etcdir)s/alignak.cfg')

            with open('%s/etc/alignak.ini' % self.cfg_folder, "w") as modified:
                cfg.write(modified)
        except Exception as exp:
            print("* parsing error in config file: %s" % exp)
            assert False

        # Update configuration with a bad file name
        files = ['%s/etc/alignak.cfg' % self.cfg_folder]
        replacements = {
            'cfg_dir=arbiter/templates': 'cfg_dir=unexisting/objects/realms'
        }
        self._files_update(files, replacements)

        args = ["../alignak/bin/alignak_arbiter.py", "-e", '%s/etc/alignak.ini' % self.cfg_folder]
        ret = self._run_command_with_timeout(args, 20)

        errors = 0
        ok = False
        with open('/tmp/alignak/log/arbiter-master.log') as f:
            for line in f:
                if 'ERROR:' in line and "*** One or more problems were encountered while processing the configuration (first check)..." in line:
                    ok = True
                if 'ERROR:' in line or 'CRITICAL:' in line:
                    print("*** %s" % line.rstrip())
                    errors = errors + 1
        # Arbiter process must exit with a return code == 0 and no errors
        assert errors == 2
        # Arbiter process must exit with a return code == 1
        assert ret == 1
        assert ok

    def test_arbiter_i_am_not_configured(self):
        """ Running the Alignak Arbiter with missing arbiter configuration

        :return:
        """
        print("Launching arbiter with a missing arbiter configuration...")

        if os.path.exists('%s/my-arbiter-name.log' % self._launch_dir):
            os.remove('%s/my-arbiter-name.log' % self._launch_dir)

        args = ["../alignak/bin/alignak_arbiter.py", "-e", '%s/etc/alignak.ini' % self.cfg_folder, "-n", "my-arbiter-name"]
        ret = self._run_command_with_timeout(args, 20)

        errors = 0
        ok = False
        # Note the log filename!
        with open('%s/my-arbiter-name.log' % self._launch_dir) as f:
            for line in f:
                if "I cannot find my own configuration (my-arbiter-name)" in line:
                    ok = True
                if 'ERROR:' in line or 'CRITICAL:' in line:
                    print("*** %s" % line.rstrip())
                    errors = errors + 1
        # Arbiter process must exit with a return code == 0 and no errors
        assert errors == 2
        # Arbiter process must exit with a return code == 1
        assert ret == 1
        assert ok

    def test_arbiter_verify(self):
        """ Running the Alignak Arbiter in verify mode only with the default shipped configuration

        :return:
        """
        # Set a specific logger configuration - do not use the default test configuration
        # to use the default shipped configuration
        os.environ['ALIGNAK_LOGGER_CONFIGURATION'] = './etc/warning_alignak-logger.json'

        print("Launching arbiter in verification mode...")
        args = ["../alignak/bin/alignak_arbiter.py", "-e", '%s/etc/alignak.ini' % self.cfg_folder, "-V"]
        ret = self._run_command_with_timeout(args, 20)

        errors = 0
        specific_log = False
        info_log = False
        with open('/tmp/alignak/log/arbiter-master.log') as f:
            for line in f:
                if 'INFO:' in line:
                    info_log = True
                    if 'Arbiter is in configuration check mode' in line:
                        specific_log = True
                if 'ERROR:' in line or 'CRITICAL:' in line:
                    print("*** %s" % line.rstrip())
                    errors = errors + 1
        # Arbiter process must exit with a return code == 0 and no errors
        # Arbiter changed the log level to INFO because of the verify mode
        assert specific_log is True
        assert info_log is True
        assert errors == 0
        assert ret == 0

    def test_arbiter_parameters_pid(self):
        """ Run the Alignak Arbiter with some parameters - set a pid file

        :return:
        """
        # All the default configuration files are in /tmp/etc

        print("Launching arbiter with forced PID file...")
        if os.path.exists('/tmp/arbiter.pid'):
            os.remove('/tmp/arbiter.pid')

        args = ["../alignak/bin/alignak_arbiter.py", "-e", '%s/etc/alignak.ini' % self.cfg_folder, "-V",
                "--pid_file", "/tmp/arbiter.pid"]
        ret = self._run_command_with_timeout(args, 20)

        # The arbiter unlinks the pid file - I cannot assert it exists!
        # assert os.path.exists('/tmp/arbiter.pid')

        errors = 0
        # ok = False
        with open('/tmp/alignak/log/arbiter-master.log') as f:
            for line in f:
                # if 'Unlinking /tmp/arbiter.pid' in line:
                #     ok = True
                if 'ERROR:' in line or 'CRITICAL:' in line:
                    print("*** %s" % line.rstrip())
                    errors = errors + 1
        # Arbiter process must exit with a return code == 0 and no errors
        assert errors == 0
        assert ret == 0
        # assert ok

    def test_arbiter_parameters_log(self):
        """ Run the Alignak Arbiter with some parameters - log file name

        :return:
        """
        # All the default configuration files are in /tmp/etc
        print("Launching arbiter with forced log file...")
        if os.path.exists('/tmp/arbiter.log'):
            os.remove('/tmp/arbiter.log')

        args = ["../alignak/bin/alignak_arbiter.py", "-e", '%s/etc/alignak.ini' % self.cfg_folder,
                "-V", "-vv", "--log_file", "/tmp/arbiter.log"]
        ret = self._run_command_with_timeout(args, 20)

        # Log file created because of the -V option
        assert os.path.exists("/tmp/arbiter.log")

        errors = 0
        with open('/tmp/arbiter.log') as f:
            for line in f:
                if 'ERROR:' in line or 'CRITICAL:' in line:
                    print("*** %s" % line.rstrip())
                    errors = errors + 1
        # Arbiter process must exit with a return code == 0 and no errors
        assert errors == 0
        assert ret == 0

    @pytest.mark.skip("To be re-activated with spare mode")
    def test_arbiter_spare_missing_configuration(self):
        """ Run the Alignak Arbiter in spare mode - missing spare configuration

        :return:
        """
        print("Launching arbiter in spare mode...")
        args = ["../alignak/bin/alignak_arbiter.py",
                "-a", cfg_folder + "/alignak.cfg",
                "-c", cfg_folder + "/daemons/arbiterd.ini",
                "-n", "arbiter-spare"]
        arbiter = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        print("%s launched (pid=%d)" % ('arbiter', arbiter.pid))

        sleep(5)

        ret = arbiter.poll()
        print("*** Arbiter exited with code: %s" % ret)
        assert ret is not None, "Arbiter is still running!"
        # Arbiter process must exit with a return code == 1
        assert ret == 1

    @pytest.mark.skip("To be re-activated with spare mode")
    def test_arbiter_spare(self):
        """ Run the Alignak Arbiter in spare mode - missing spare configuration

        :return:
        """
        print("Launching arbiter in spare mode...")
        args = ["../alignak/bin/alignak_arbiter.py",
                "-a", cfg_folder + "/alignak.cfg",
                "-c", cfg_folder + "/daemons/arbiterd.ini",
                "-n", "arbiter-spare"]
        arbiter = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        print("%s launched (pid=%d)" % ('arbiter', arbiter.pid))

        ret = arbiter.poll()
        # Arbiter must still be running ... it is still trying to dispatch the configuration!
        assert ret is None, "Arbiter exited!"

        sleep(5)

        # Arbiter never stops trying to send its configuration! We must kill it...

        print("Asking arbiter to end...")
        os.kill(arbiter.pid, signal.SIGTERM)

        ret = arbiter.poll()
        print("*** Arbiter exited on kill, no return code!")
        assert ret is None, "Arbiter is still running!"
        # No ERRORS because the daemons are not alive !
        ok = 0
        for line in iter(arbiter.stdout.readline, b''):
            print(">>> %s" % line.rstrip())
            if b'INFO:' in line:
                # I must find this line
                if b'[alignak.daemons.arbiterdaemon] I found myself in the configuration: arbiter-spare' in line:
                    ok += 1
                # and this one also
                if b'[alignak.daemons.arbiterdaemon] I am a spare Arbiter: arbiter-spare' in line:
                    ok += 1
                if b'I am not the master arbiter, I stop parsing the configuration' in line:
                    ok += 1
                if b'Waiting for master...' in line:
                    ok += 1
                if b'Waiting for master death' in line:
                    ok += 1
                assert b'CRITICAL:' not in line
        for line in iter(arbiter.stderr.readline, b''):
            print("*** %s" % line.rstrip())
            if sys.version_info > (2, 7):
                assert False, "stderr output!"
        assert ok == 5

    def test_arbiter_normal(self):
        """ Running the Alignak Arbiter - normal verbosity

        :return:
        """
        self._arbiter(verbosity=None)

    def test_arbiter_verbose(self):
        """ Running the Alignak Arbiter - normal verbosity

        :return:
        """
        self._arbiter(verbosity='--verbose')
        self._arbiter(verbosity='-v')

    def test_arbiter_very_verbose(self):
        """ Running the Alignak Arbiter - normal verbosity

        :return:
        """
        self._arbiter(verbosity='--debug')
        # Execute only once, because it looks too verbose for Travis :/
        # self._arbiter(verbosity='-vv')

    def _arbiter(self, verbosity=None):
        """ Running the Alignak Arbiter with a specific verbosity

        :return:
        """
        # Set a specific logger configuration - do not use the default test configuration
        # to use the default shipped configuration
        os.environ['ALIGNAK_LOGGER_CONFIGURATION'] = './etc/warning_alignak-logger.json'

        print("Launching arbiter ...")
        args = ["../alignak/bin/alignak_arbiter.py", "-n", "arbiter-master", "-e", '%s/etc/alignak.ini' % self.cfg_folder]
        if verbosity:
            args.append(verbosity)
        arbiter = subprocess.Popen(args)
        print("%s launched (pid=%d)" % ('arbiter', arbiter.pid))

        # Wait for the arbiter to get started
        time.sleep(5)

        # This function will request the arbiter daemon to stop
        self._stop_alignak_daemons(request_stop_uri='http://127.0.0.1:7770')

        errors = 0
        info_log = False
        debug_log = False
        with open('/tmp/alignak/log/arbiter-master.log') as f:
            for line in f:
                if 'DEBUG:' in line:
                    debug_log = True
                if 'INFO:' in line:
                    info_log = True
                if 'ERROR:' in line or 'CRITICAL:' in line:
                    print("*** %s" % line.rstrip())
                    errors = errors + 1

        # arbiter process may exit with no errors!
        # assert errors == 0
        # Arbiter changed the log level to INFO because of the verify mode
        if verbosity in ['-v', '--verbose']:
            assert info_log is True
        # Arbiter changed the log level to DEBUG because of the verify mode
        if verbosity in ['-vv', '--debug']:
            assert debug_log is True

    def test_broker(self):
        """ Running the Alignak Broker

        :return:
        """
        print("Launching broker ...")
        args = ["../alignak/bin/alignak_broker.py", "-n", "broker-master", "-e", '%s/etc/alignak.ini' % self.cfg_folder]
        broker = subprocess.Popen(args)
        print("%s launched (pid=%d)" % ('broker', broker.pid))

        # Wait for the broker to get started
        time.sleep(2)

        # This function will request the arbiter daemon to stop
        self._stop_alignak_daemons(request_stop_uri='http://127.0.0.1:7772')

        errors = 0
        with open('/tmp/alignak/log/broker-master.log') as f:
            for line in f:
                if 'ERROR:' in line or 'CRITICAL:' in line:
                    print("*** %s" % line.rstrip())
                    errors = errors + 1
        # broker process must exit with no errors
        assert errors == 0

    def test_poller(self):
        """ Running the Alignak poller

        :return:
        """
        print("Launching poller ...")
        args = ["../alignak/bin/alignak_poller.py", "-n", "poller-master", "-e", '%s/etc/alignak.ini' % self.cfg_folder]
        poller = subprocess.Popen(args)
        print("%s launched (pid=%d)" % ('poller', poller.pid))

        # Wait for the poller to get started
        time.sleep(2)

        # This function will request the arbiter daemon to stop
        self._stop_alignak_daemons(request_stop_uri='http://127.0.0.1:7771')

        errors = 0
        with open('/tmp/alignak/log/poller-master.log') as f:
            for line in f:
                if 'ERROR:' in line or 'CRITICAL:' in line:
                    print("*** %s" % line.rstrip())
                    errors = errors + 1
        # poller process must exit with a return code == 0 and no errors
        assert errors == 0

    def test_reactionner(self):
        """ Running the Alignak reactionner

        :return:
        """
        print("Launching reactionner ...")
        args = ["../alignak/bin/alignak_reactionner.py", "-n", "reactionner-master", "-e", '%s/etc/alignak.ini' % self.cfg_folder]
        reactionner = subprocess.Popen(args)
        print("%s launched (pid=%d)" % ('reactionner', reactionner.pid))

        # Wait for the reactionner to get started
        time.sleep(2)

        # This function will request the arbiter daemon to stop
        self._stop_alignak_daemons(request_stop_uri='http://127.0.0.1:7769')

        errors = 0
        with open('/tmp/alignak/log/reactionner-master.log') as f:
            for line in f:
                if 'ERROR:' in line or 'CRITICAL:' in line:
                    print("*** %s" % line.rstrip())
                    errors = errors + 1
        # reactionner process must exit with a return code == 0 and no errors
        assert errors == 0

    def test_receiver(self):
        """ Running the Alignak receiver

        :return:
        """
        print("Launching receiver ...")
        args = ["../alignak/bin/alignak_receiver.py", "-n", "receiver-master", "-e", '%s/etc/alignak.ini' % self.cfg_folder]
        receiver = subprocess.Popen(args)
        print("%s launched (pid=%d)" % ('receiver', receiver.pid))

        # Wait for the receiver to get started
        time.sleep(2)

        # This function will request the arbiter daemon to stop
        self._stop_alignak_daemons(request_stop_uri='http://127.0.0.1:7773')

        errors = 0
        with open('/tmp/alignak/log/receiver-master.log') as f:
            for line in f:
                if 'ERROR:' in line or 'CRITICAL:' in line:
                    print("*** %s" % line.rstrip())
                    errors = errors + 1
        # receiver process must exit with a return code == 0 and no errors
        assert errors == 0

    def test_scheduler(self):
        """ Running the Alignak scheduler

        :return:
        """
        print("Launching scheduler ...")

        args = ["../alignak/bin/alignak_scheduler.py", "-n", "scheduler-master",
                "-e", '%s/etc/alignak.ini' % self.cfg_folder]
        scheduler = subprocess.Popen(args)
        print("%s launched (pid=%d)" % ('scheduler', scheduler.pid))

        # Wait for the scheduler to get started
        time.sleep(2)

        # This function will request the arbiter daemon to stop
        self._stop_alignak_daemons(request_stop_uri='http://127.0.0.1:7768')

        errors = 0
        with open('/tmp/alignak/log/scheduler-master.log') as f:
            for line in f:
                if 'ERROR:' in line or 'CRITICAL:' in line:
                    print("*** %s" % line.rstrip())
                    errors = errors + 1
        # scheduler process must exit with a return code == 0 and no errors
        assert errors == 0
Esempio n. 9
0
    def _monitoring(self,
                    env_filename='cfg/monitor/simple.ini',
                    loops=3,
                    multi_realms=False):
        """ monitoring process: prepare, check, dispatch

        This function realize all the monitoring operations:
        - load a monitoring configuration
        - prepare the monitoring
        - dispatch
        - check the correct monitoring, including:
            - check the configuration dispatched to the schedulers
            - check the configuration dispatched to the spare arbiter (if any)
        - run the check_reachable loop several times

        if multi_realms is True, the scheduler configuration received are not checked against
        the arbiter whole configuration. This would be really too complex to assert on this :(

        Schedulers must have a port number with 7768 (eg. 7768,17768,27768,...)

        Spare daemons must have a port number with 8770 (eg. 8770,18770,28770,...)

        :return: None
        """
        args = {
            'env_file': env_filename,
            'alignak_name': 'alignak-test',
            'daemon_name': 'arbiter-master'
        }
        my_arbiter = Arbiter(**args)
        my_arbiter.setup_alignak_logger()

        # Clear logs
        self.clear_logs()

        my_arbiter.load_modules_manager()
        my_arbiter.load_monitoring_config_file()
        assert my_arbiter.conf.conf_is_correct is True

        # #1 - Get a new dispatcher
        my_dispatcher = Dispatcher(my_arbiter.conf, my_arbiter.link_to_myself)
        my_arbiter.dispatcher = my_dispatcher
        print("*** All daemons WS: %s" % [
            "%s:%s" % (link.address, link.port)
            for link in my_dispatcher.all_daemons_links
        ])

        assert my_arbiter.alignak_monitor == "http://super_alignak:7773/ws"
        assert my_arbiter.alignak_monitor_username == 'admin'
        assert my_arbiter.alignak_monitor_password == 'admin'

        metrics = []
        for type in sorted(my_arbiter.conf.types_creations):
            _, _, strclss, _, _ = my_arbiter.conf.types_creations[type]
            if strclss in ['hostescalations', 'serviceescalations']:
                continue

            objects_list = getattr(my_arbiter.conf, strclss, [])
            metrics.append("'%s'=%d" % (strclss, len(objects_list)))

        # Simulate the daemons HTTP interface (very simple simulation !)
        with requests_mock.mock() as mr:
            mr.post('%s/login' % (my_arbiter.alignak_monitor),
                    json={
                        "_status":
                        "OK",
                        "_result":
                        ["1508507175582-c21a7d8e-ace0-47f2-9b10-280a17152c7c"]
                    })
            mr.patch(
                '%s/host' % (my_arbiter.alignak_monitor),
                json={
                    "_status":
                    "OK",
                    "_result":
                    ["1508507175582-c21a7d8e-ace0-47f2-9b10-280a17152c7c"]
                })

            # Time warp 5 seconds - overpass the ping period...
            self.clear_logs()
            # frozen_datetime.tick(delta=datetime.timedelta(seconds=5))

            my_arbiter.get_alignak_status(details=False)

            self.show_logs()

            # Hack the requests history to check and simulate  the configuration pushed...
            history = mr.request_history
            for index, request in enumerate(history):
                # Check what is patched on /host ...
                if 'host' in request.url:
                    received = request.json()
                    print((index, request.url, received))

                    from pprint import pprint
                    pprint(received)

                    assert received['name'] == 'My Alignak'
                    assert received['livestate']['timestamp'] == 1519583400
                    assert received['livestate']['state'] == 'up'
                    assert received['livestate'][
                        'output'] == 'Some of my daemons are not reachable.'
                    for metric in metrics:
                        assert metric in received['livestate']['perf_data']
                    print(received['livestate']['long_output'])
                    # Long output is sorted by daemon name
                    assert received['livestate']['long_output'] == \
                           u'broker-master - daemon is not reachable.\n' \
                           u'poller-master - daemon is not reachable.\n' \
                           u'reactionner-master - daemon is not reachable.\n' \
                           u'receiver-master - daemon is not reachable.\n' \
                           u'scheduler-master - daemon is not reachable.'

                    for link in my_dispatcher.all_daemons_links:
                        assert link.name in [
                            service['name'] for service in received['services']
                        ]

                    for service in received['services']:
                        assert 'name' in service
                        assert 'livestate' in service
                        assert 'timestamp' in service['livestate']
                        assert 'state' in service['livestate']
                        assert 'output' in service['livestate']
                        assert 'long_output' in service['livestate']
                        assert 'perf_data' in service['livestate']