Esempio n. 1
0
 def init_livestatus(self):
     self.livelogs = 'tmp/livelogs.db' + self.testid
     self.db_archives = os.path.join(os.path.dirname(self.livelogs), 'archives')
     self.pnp4nagios = 'tmp/pnp4nagios_test' + self.testid
     self.livestatus_broker = Livestatus_broker(livestatus_modconf, '127.0.0.1', str(50000 + os.getpid()), 'live', [], self.livelogs, self.db_archives, 365, self.pnp4nagios, True)
     self.livestatus_broker.create_queues()
     #self.livestatus_broker.properties = {
     #    'to_queue' : 0,
     #    'from_queue' : 0
     #
     #    }
     self.livestatus_broker.init()
     self.livestatus_broker.db = LiveStatusDb(self.livestatus_broker.database_file, self.livestatus_broker.archive_path, self.livestatus_broker.max_logs_age)
     self.livestatus_broker.livestatus = LiveStatus(self.livestatus_broker.configs, self.livestatus_broker.hosts, self.livestatus_broker.services, self.livestatus_broker.contacts, self.livestatus_broker.hostgroups, self.livestatus_broker.servicegroups, self.livestatus_broker.contactgroups, self.livestatus_broker.timeperiods, self.livestatus_broker.commands, self.livestatus_broker.schedulers, self.livestatus_broker.pollers, self.livestatus_broker.reactionners, self.livestatus_broker.brokers, self.livestatus_broker.db, self.livestatus_broker.use_aggressive_sql, self.livestatus_broker.pnp_path, self.livestatus_broker.from_q)
Esempio n. 2
0
 def setUp(self):
     self.setup_with_file('etc/nagios_1r_1h_1s.cfg')
     Comment.id = 1
     self.testid = str(os.getpid() + random.randint(1, 1000))
     self.livelogs = 'tmp/livelogs.db' + self.testid
     self.pnp4nagios = 'tmp/pnp4nagios_test' + self.testid
     self.livestatus_broker = Livestatus_broker(modconf, '127.0.0.1', str(50000 + os.getpid()), 'live', [], self.livelogs, 365, self.pnp4nagios)
     self.livestatus_broker.create_queues()
     #self.livestatus_broker.properties = {
     #    'to_queue' : 0,
     #    'from_queue' : 0
     #
     #    }
     self.livestatus_broker.init()
     print "Cleaning old broks?"
     self.sched.fill_initial_broks()
     self.update_broker()
     self.nagios_path = None
     self.livestatus_path = None
     self.nagios_config = None
Esempio n. 3
0
class ShinkenTest(unittest.TestCase):
    def setUp(self):
        self.setup_with_file('etc/nagios_1r_1h_1s.cfg')

    def setup_with_file(self, path):
        # i am arbiter-like
        self.broks = {}
        self.me = None
        self.log = logger
        self.log.load_obj(self)
        self.config_files = [path]
        self.conf = Config()
        self.conf.read_config(self.config_files)
        buf = self.conf.read_config(self.config_files)
        raw_objects = self.conf.read_config_buf(buf)
        self.conf.create_objects_for_type(raw_objects, 'arbiter')
        self.conf.create_objects_for_type(raw_objects, 'module')
        self.conf.early_arbiter_linking()
        self.conf.create_objects(raw_objects)
        self.conf.old_properties_names_to_new()
        self.conf.instance_id = 0
        self.conf.instance_name = 'test'
        self.conf.linkify_templates()
        self.conf.apply_inheritance()
        self.conf.explode()
        self.conf.create_reversed_list()
        self.conf.remove_twins()
        self.conf.apply_implicit_inheritance()
        self.conf.fill_default()
        self.conf.remove_templates()
        self.conf.create_reversed_list()
        self.conf.pythonize()
        self.conf.linkify()
        self.conf.apply_dependencies()
        self.conf.explode_global_conf()
        self.conf.propagate_timezone_option()
        self.conf.create_business_rules()
        self.conf.create_business_rules_dependencies()
        self.conf.is_correct()
        self.confs = self.conf.cut_into_parts()
        self.conf.show_errors()
        self.dispatcher = Dispatcher(self.conf, self.me)
        
        scheddaemon = Shinken(None, False, False, False, None)
        self.sched = Scheduler(scheddaemon)
        
        scheddaemon.sched = self.sched
                
        m = MacroResolver()
        m.init(self.conf)
        self.sched.load_conf(self.conf)
        e = ExternalCommandManager(self.conf, 'applyer')
        self.sched.external_command = e
        e.load_scheduler(self.sched)
        e2 = ExternalCommandManager(self.conf, 'dispatcher')
        e2.load_arbiter(self)
        self.external_command_dispatcher = e2
        self.sched.schedule()


    def add(self, b):
        if isinstance(b, Brok):
            self.broks[b.id] = b
            return
        if isinstance(b, ExternalCommand):
            self.sched.run_external_command(b.cmd_line)


    def fake_check(self, ref, exit_status, output="OK"):
        #print "fake", ref
        now = time.time()
        ref.schedule(force=True)
        #now checks are schedule and we get them in
        #the action queue
        check = ref.actions.pop()
        self.sched.add(check)  # check is now in sched.checks[]
        # fake execution
        check.check_time = now
        
        # and lie about when we will launch it because
        # if not, the schedule call for ref
        # will not really reschedule it because there
        # is a valid value in the future
        ref.next_chk = now - 0.5

        check.get_outputs(output, 9000)
        check.exit_status = exit_status
        check.execution_time = 0.001
        check.status = 'waitconsume'
        self.sched.waiting_results.append(check)


    def scheduler_loop(self, count, reflist, do_sleep=False, sleep_time=61):
        for ref in reflist:
            (obj, exit_status, output) = ref
            obj.checks_in_progress = []
        for loop in range(1, count + 1):
            print "processing check", loop
            for ref in reflist:
                (obj, exit_status, output) = ref
                obj.update_in_checking()
                self.fake_check(obj, exit_status, output)
            self.sched.manage_internal_checks()
            self.sched.consume_results()
            self.sched.get_new_actions()
            self.sched.get_new_broks()
            self.worker_loop()
            for ref in reflist:
                (obj, exit_status, output) = ref
                obj.checks_in_progress = []
            self.sched.update_downtimes_and_comments()
            #time.sleep(ref.retry_interval * 60 + 1)
            if do_sleep:
                time.sleep(sleep_time)


    def worker_loop(self):
        self.sched.delete_zombie_checks()
        self.sched.delete_zombie_actions()
        checks = self.sched.get_to_run_checks(True, False, worker_name='tester')
        actions = self.sched.get_to_run_checks(False, True, worker_name='tester')
        #print "------------ worker loop checks ----------------"
        #print checks
        #print "------------ worker loop actions ----------------"
        self.show_actions()
        #print "------------ worker loop new ----------------"
        for a in actions:
            a.status = 'inpoller'
            a.check_time = time.time()
            a.exit_status = 0
            self.sched.put_results(a)
        self.show_actions()
        #print "------------ worker loop end ----------------"


    def show_logs(self):
        print "--- logs <<<----------------------------------"
        for brok in sorted(self.sched.broks.values(), lambda x, y: x.id - y.id):
            if brok.type == 'log':
                print "LOG:", brok.data['log']
        print "--- logs >>>----------------------------------"


    def show_actions(self):
        print "--- actions <<<----------------------------------"
        for a in sorted(self.sched.actions.values(), lambda x, y: x.id - y.id):
            if a.is_a == 'notification':
                if a.ref.my_type == "host":
                    ref = "host: %s" % a.ref.get_name()
                else:
                    ref = "host: %s svc: %s" % (a.ref.host.get_name(), a.ref.get_name())
                print "NOTIFICATION %d %s %s %s %s" % (a.id, ref, a.type, time.asctime(time.localtime(a.t_to_go)), a.status)
            elif a.is_a == 'eventhandler':
                print "EVENTHANDLER:", a
        print "--- actions >>>----------------------------------"


    def show_and_clear_logs(self):
        self.show_logs()
        self.clear_logs()


    def show_and_clear_actions(self):
        self.show_actions()
        self.clear_actions()


    def count_logs(self):
        return len([b for b in self.sched.broks.values() if b.type == 'log'])


    def count_actions(self):
        return len(self.sched.actions.values())


    def clear_logs(self):
        id_to_del = []
        for b in self.sched.broks.values():
            if b.type == 'log':
                id_to_del.append(b.id)
        for id in id_to_del:
            del self.sched.broks[id]


    def clear_actions(self):
        self.sched.actions = {}


    def log_match(self, index, pattern):
        # log messages are counted 1...n, so index=1 for the first message
        if index > self.count_logs():
            return False
        else:
            regex = re.compile(pattern)
            lognum = 1
            for brok in sorted(self.sched.broks.values(), lambda x, y: x.id - y.id):
                if brok.type == 'log':
                    if index == lognum:
                        if re.search(regex, brok.data['log']):
                            return True
                    lognum += 1
        return False


    def any_log_match(self, pattern):
        regex = re.compile(pattern)
        for brok in sorted(self.sched.broks.values(), lambda x, y: x.id - y.id):
            if brok.type == 'log':
                if re.search(regex, brok.data['log']):
                    return True
        return False


    def get_log_match(self, pattern):
        regex = re.compile(pattern)
        res = []
        for brok in sorted(self.sched.broks.values(), lambda x, y: x.id - y.id):
            if brok.type == 'log':
                if re.search(regex, brok.data['log']):
                    res.append(brok.data['log'])
        return res



    def print_header(self):
        print "#" * 80 + "\n" + "#" + " " * 78 + "#"
        print "#" + string.center(self.id(), 78) + "#"
        print "#" + " " * 78 + "#\n" + "#" * 80 + "\n"




    def xtest_conf_is_correct(self):
        self.print_header()
        self.assert_(self.conf.conf_is_correct)


    def init_livestatus(self):
        self.livelogs = 'tmp/livelogs.db' + self.testid
        self.db_archives = os.path.join(os.path.dirname(self.livelogs), 'archives')
        self.pnp4nagios = 'tmp/pnp4nagios_test' + self.testid
        self.livestatus_broker = Livestatus_broker(livestatus_modconf, '127.0.0.1', str(50000 + os.getpid()), 'live', [], self.livelogs, self.db_archives, 365, self.pnp4nagios, True)
        self.livestatus_broker.create_queues()
        #self.livestatus_broker.properties = {
        #    'to_queue' : 0,
        #    'from_queue' : 0
        #
        #    }
        self.livestatus_broker.init()
        self.livestatus_broker.db = LiveStatusDb(self.livestatus_broker.database_file, self.livestatus_broker.archive_path, self.livestatus_broker.max_logs_age)
        self.livestatus_broker.livestatus = LiveStatus(self.livestatus_broker.configs, self.livestatus_broker.hosts, self.livestatus_broker.services, self.livestatus_broker.contacts, self.livestatus_broker.hostgroups, self.livestatus_broker.servicegroups, self.livestatus_broker.contactgroups, self.livestatus_broker.timeperiods, self.livestatus_broker.commands, self.livestatus_broker.schedulers, self.livestatus_broker.pollers, self.livestatus_broker.reactionners, self.livestatus_broker.brokers, self.livestatus_broker.db, self.livestatus_broker.use_aggressive_sql, self.livestatus_broker.pnp_path, self.livestatus_broker.from_q)
Esempio n. 4
0
class TestConfigSmall(TestConfig):
    def setUp(self):
        self.setup_with_file('etc/nagios_1r_1h_1s.cfg')
        Comment.id = 1
        self.testid = str(os.getpid() + random.randint(1, 1000))
        self.livelogs = 'tmp/livelogs.db' + self.testid
        self.pnp4nagios = 'tmp/pnp4nagios_test' + self.testid
        self.livestatus_broker = Livestatus_broker(modconf, '127.0.0.1', str(50000 + os.getpid()), 'live', [], self.livelogs, 365, self.pnp4nagios)
        self.livestatus_broker.create_queues()
        #self.livestatus_broker.properties = {
        #    'to_queue' : 0,
        #    'from_queue' : 0
        #
        #    }
        self.livestatus_broker.init()
        print "Cleaning old broks?"
        self.sched.fill_initial_broks()
        self.update_broker()
        self.nagios_path = None
        self.livestatus_path = None
        self.nagios_config = None



    def tearDown(self):
        self.stop_nagios()
        self.livestatus_broker.dbconn.commit()
        self.livestatus_broker.dbconn.close()
        if os.path.exists(self.livelogs):
            os.remove(self.livelogs)
        if os.path.exists(self.pnp4nagios):
            shutil.rmtree(self.pnp4nagios)
        if os.path.exists('var/nagios.log'):
            os.remove('var/nagios.log')
        if os.path.exists('var/retention.dat'):
            os.remove('var/retention.dat')
        if os.path.exists('var/status.dat'):
            os.remove('var/status.dat')
        to_del = [attr for attr in self.livestatus_broker.livestatus.__class__.out_map['Host'].keys() if attr.startswith('host_')]
        for attr in to_del:
            del self.livestatus_broker.livestatus.__class__.out_map['Host'][attr]
        self.livestatus_broker = None



    def test_host_wait(self):
        self.print_header()
        if self.nagios_installed():
            self.start_nagios('1r_1h_1s')
        now = time.time()
        host = self.sched.hosts.find_by_name("test_host_0")
        host.checks_in_progress = []
        host.act_depend_of = [] # ignore the router
        router = self.sched.hosts.find_by_name("test_router_0")
        router.checks_in_progress = []
        router.act_depend_of = [] # ignore the router
        svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
        svc.checks_in_progress = []
        svc.act_depend_of = [] # no hostchecks on critical checkresults
        self.scheduler_loop(2, [[host, 0, 'UP'], [router, 0, 'UP'], [svc, 2, 'BAD']])
        self.update_broker(True)
        print ".#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#."
        print "i updated the broker at", time.time()
        print ".#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#."

        #---------------------------------------------------------------
        # get only the host names and addresses
        #---------------------------------------------------------------
        request = """
GET hosts
Columns: name state address
ColumnHeaders: on
Filter: host_name = test_host_0
"""
        response, keepalive = self.livestatus_broker.livestatus.handle_request(request)
        print response
        good_response = """name;state;address
test_host_0;0;127.0.0.1
"""
        self.assert_(isinstance(response, str))
        self.assert_(self.lines_equal(response, good_response))

        request = """
GET hosts
Columns: name state address last_check
ColumnHeaders: on
Filter: host_name = test_host_0
"""
        response, keepalive = self.livestatus_broker.livestatus.handle_request(request)
        print response

        time.sleep(1)
        now = time.time()
        print ".#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#."
        print "i query with trigger at", now
        print ".#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#."

        request = """
COMMAND [%d] SCHEDULE_FORCED_HOST_CHECK;test_host_0;%d

GET hosts
WaitObject: test_host_0
WaitCondition: last_check >= %d
WaitTimeout: 10000
WaitTrigger: check
Columns: last_check state plugin_output
Filter: host_name = test_host_0
Localtime: %d
OutputFormat: python
KeepAlive: on
ResponseHeader: fixed16
ColumnHeaders: off
""" % (now, now, now, now)

        response, keepalive = self.livestatus_broker.livestatus.handle_request(request)
        print "response is", response
        self.assert_(isinstance(response, list))
        self.assert_('wait' in [q.my_type for q in response])
        self.assert_('query' in [q.my_type for q in response])

        # launch the query, which must return an empty result
        query = [q for q in response if q.my_type == "query"][0]
        wait = [q for q in response if q.my_type == "wait"][0]
        result = wait.launch_query()
        response = wait.response
        print response
        response.format_live_data(result, wait.columns, wait.aliases)
        output, keepalive = response.respond()
        self.assert_(not output.strip())

        #result = query.launch_query()
        #response = query.response
        #response.format_live_data(result, query.columns, query.aliases)
        #output, keepalive = response.respond()
        #print "output is", output

        time.sleep(1)
        # update the broker
        # wait....launch the wait
        # launch the query again, which must return a result
        self.scheduler_loop(3, [[host, 2, 'DOWN']])
        self.update_broker(True)

        print wait.filter_stack.qsize()
        result = wait.launch_query()
        response = wait.response
        response.columnheaders = "on"
        print response
        response.format_live_data(result, wait.columns, wait.aliases)
        output, keepalive = response.respond()
        print "output of the wait is (%s)" % output
        self.assert_(output.strip())






        query = """
COMMAND [1303116582] SCHEDULE_FORCED_SVC_CHECK;test_host_0;test_ok_0;1303116582

GET services
WaitObject: test_host_0 test_ok_0
WaitCondition: last_check >= 1303116582
WaitTimeout: 10000
WaitTrigger: check
Columns: last_check state plugin_output
Filter: host_name = test_host_0
Filter: service_description = test_ok_0
Localtime: 1303116582
OutputFormat: python
KeepAlive: on
ResponseHeader: fixed16
ColumnHeaders: off
"""


    def test_multiple_externals(self):
        self.print_header()
        now = time.time()
        host = self.sched.hosts.find_by_name("test_host_0")
        host.checks_in_progress = []
        host.act_depend_of = [] # ignore the router
        router = self.sched.hosts.find_by_name("test_router_0")
        router.checks_in_progress = []
        router.act_depend_of = [] # ignore the router
        svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
        svc.checks_in_progress = []
        svc.act_depend_of = [] # no hostchecks on critical checkresults
        self.scheduler_loop(2, [[host, 0, 'UP'], [router, 0, 'UP'], [svc, 2, 'BAD']])
        self.update_broker(True)
        print ".#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#."
        print "i updated the broker at", time.time()
        print ".#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#.#."

        #---------------------------------------------------------------
        # get only the host names and addresses
        #---------------------------------------------------------------
        request = """COMMAND [1303425876] SCHEDULE_FORCED_HOST_CHECK;test_host_0;1303425870

COMMAND [1303425876] SCHEDULE_FORCED_HOST_CHECK;test_host_0;1303425870

COMMAND [1303425876] SCHEDULE_FORCED_HOST_CHECK;test_host_0;1303425870

COMMAND [1303425876] SCHEDULE_FORCED_HOST_CHECK;test_host_0;1303425870

"""
        response, keepalive = self.livestatus_broker.livestatus.handle_request(request)
        print response
        good_response = ""
        self.assert_(isinstance(response, str))
        self.assert_(self.lines_equal(response, good_response))