def loadconfig(self): try: c=Config() c.read_config_silent=1 r=c.read_config(self.configfile) self.config=c.read_config_buf(r) return (True,"Config loaded") except: return (False,"There was an error reading the configuration file")
def loadconfig(configfile): try: c = Config() c.read_config_silent = 1 r = c.read_config(configfile) b = c.read_config_buf(r) return (True, b) except: return (False, "There was an error reading the configuration file")
def setup_with_file(self, path): # i am arbiter-like self.broks = {} self.me = None self.log = logger self.log.load_obj(self) self.config_files = [path] self.conf = Config() self.conf.read_config(self.config_files) buf = self.conf.read_config(self.config_files) raw_objects = self.conf.read_config_buf(buf) self.conf.create_objects_for_type(raw_objects, 'arbiter') self.conf.create_objects_for_type(raw_objects, 'module') self.conf.early_arbiter_linking() self.conf.create_objects(raw_objects) self.conf.instance_id = 0 self.conf.instance_name = 'test' self.conf.linkify_templates() self.conf.apply_inheritance() self.conf.explode() self.conf.create_reversed_list() self.conf.remove_twins() self.conf.apply_implicit_inheritance() self.conf.fill_default() self.conf.clean_useless() self.conf.pythonize() self.conf.linkify() self.conf.apply_dependancies() self.conf.explode_global_conf() self.conf.propagate_timezone_option() self.conf.create_business_rules() self.conf.create_business_rules_dependencies() self.conf.is_correct() self.confs = self.conf.cut_into_parts() self.dispatcher = Dispatcher(self.conf, self.me) scheddaemon = Shinken(None, False, False, False, None) self.sched = Scheduler(scheddaemon) scheddaemon.sched = self.sched m = MacroResolver() m.init(self.conf) self.sched.load_conf(self.conf) e = ExternalCommandManager(self.conf, 'applyer') self.sched.external_command = e e.load_scheduler(self.sched) e2 = ExternalCommandManager(self.conf, 'dispatcher') e2.load_arbiter(self) self.external_command_dispatcher = e2 self.sched.schedule()
def __init__(self, file): logger.load_obj(Dummy()) self.conf = Config() buf = self.conf.read_config([file]) raw_objects = self.conf.read_config_buf(buf) self.conf.create_objects_for_type(raw_objects, 'arbiter') self.conf.create_objects_for_type(raw_objects, 'module') self.conf.early_arbiter_linking() self.conf.create_objects(raw_objects) for mod in self.conf.modules: if mod.module_type == 'logstore_sqlite': self.mod_sqlite = get_instance_sqlite(mod) self.mod_sqlite.init() if mod.module_type == 'logstore_mongodb': self.mod_mongodb = get_instance_mongodb(mod)
class Converter(object): def __init__(self, file): logger.load_obj(Dummy()) self.conf = Config() buf = self.conf.read_config([file]) raw_objects = self.conf.read_config_buf(buf) self.conf.create_objects_for_type(raw_objects, 'arbiter') self.conf.create_objects_for_type(raw_objects, 'module') self.conf.early_arbiter_linking() self.conf.create_objects(raw_objects) for mod in self.conf.modules: if mod.module_type == 'logstore_sqlite': self.mod_sqlite = get_instance_sqlite(mod) self.mod_sqlite.init() if mod.module_type == 'logstore_mongodb': self.mod_mongodb = get_instance_mongodb(mod)
def getconf(self, config): ''' Get the data in the arbiter for a table and some properties like hosts host_name realm ''' files = [config] conf = Config() conf.read_config_silent = 1 # Get hosts objects properties = ['host_name', 'use', 'act_depend_of'] hosts = self.arb.get_objects_properties('hosts', properties) # Get services dependencies svcdep_buf = conf.read_config(files) svc_dep = conf.read_config_buf(svcdep_buf)['servicedependency'] return (hosts, svc_dep)
def setup_with_file(self, path): # i am arbiter-like self.broks = {} self.me = None self.log = logger self.log.load_obj(self) self.config_files = [path] self.conf = Config() buf = self.conf.read_config(self.config_files) raw_objects = self.conf.read_config_buf(buf) self.conf.create_objects_for_type(raw_objects, 'arbiter') self.conf.create_objects_for_type(raw_objects, 'module') self.conf.early_arbiter_linking() self.conf.create_objects(raw_objects) self.conf.old_properties_names_to_new() self.conf.instance_id = 0 self.conf.instance_name = 'test' # Hack push_flavor, that is set by the dispatcher self.conf.push_flavor = 0 self.conf.linkify_templates() self.conf.apply_inheritance() self.conf.explode() print "Aconf.services has %d elements" % len(self.conf.services) self.conf.create_reversed_list() self.conf.remove_twins() self.conf.apply_implicit_inheritance() self.conf.fill_default() self.conf.remove_templates() print "conf.services has %d elements" % len(self.conf.services) self.conf.create_reversed_list() self.conf.pythonize() self.conf.linkify() self.conf.apply_dependencies() self.conf.explode_global_conf() self.conf.propagate_timezone_option() self.conf.create_business_rules() self.conf.create_business_rules_dependencies() self.conf.is_correct() self.confs = self.conf.cut_into_parts() self.conf.show_errors() self.dispatcher = Dispatcher(self.conf, self.me) scheddaemon = Shinken(None, False, False, False, None) self.sched = Scheduler(scheddaemon) scheddaemon.sched = self.sched m = MacroResolver() m.init(self.conf) self.sched.load_conf(self.conf) e = ExternalCommandManager(self.conf, 'applyer') self.sched.external_command = e e.load_scheduler(self.sched) e2 = ExternalCommandManager(self.conf, 'dispatcher') e2.load_arbiter(self) self.external_command_dispatcher = e2 self.sched.schedule()
def getconf(self, config): """ Get the data in the arbiter for a table and some properties like hosts host_name realm """ files = [config] conf = Config() conf.read_config_silent = 1 # Get hosts objects properties = ["host_name", "use", "act_depend_of"] hosts = self.arb.get_objects_properties("hosts", properties) # Get services dependencies svcdep_buf = conf.read_config(files) svc_dep = conf.read_config_buf(svcdep_buf)["servicedependency"] return (hosts, svc_dep)
def manage_program_status_brok(self, b): data = b.data c_id = data['instance_id'] #print "Creating config:", c_id, data c = Config() for prop in data: setattr(c, prop, data[prop]) #print "CFG:", c self.configs[c_id] = c
def getconf(self, config): ''' Get the data in the arbiter for a table and some properties like hosts host_name realm ''' files = [config] conf = Config() conf.read_config_silent = 1 # Get hosts objects properties = [ 'host_name','use','act_depend_of'] hosts = self.arb.get_objects_properties('hosts', properties) # Get services dependencies svcdep_buf = conf.read_config(files) svc_dep = conf.read_config_buf(svcdep_buf)['servicedependency'] return (hosts, svc_dep)
class ShinkenTest(unittest.TestCase): def setUp(self): self.setup_with_file('etc/shinken_1r_1h_1s.cfg') def setup_with_file(self, path): time_hacker.set_my_time() self.print_header() # i am arbiter-like self.broks = {} self.me = None self.log = logger self.log.load_obj(self) self.config_files = [path] self.conf = Config() buf = self.conf.read_config(self.config_files) raw_objects = self.conf.read_config_buf(buf) self.conf.create_objects_for_type(raw_objects, 'arbiter') self.conf.create_objects_for_type(raw_objects, 'module') self.conf.early_arbiter_linking() # If we got one arbiter defined here (before default) we should be in a case where # the tester want to load/test a module, so we simulate an arbiter daemon # and the modules loading phase. As it has its own modulesmanager, should # not impact scheduler modules ones, especially we are asking for arbiter type :) if len(self.conf.arbiters) == 1: arbdaemon = Arbiter([''],[''], False, False, None, None) # only load if the module_dir is reallyexisting, so was set explicitly # in the test configuration if os.path.exists(getattr(self.conf, 'modules_dir', '')): arbdaemon.modules_dir = self.conf.modules_dir arbdaemon.load_modules_manager() # we request the instances without them being *started* # (for those that are concerned ("external" modules): # we will *start* these instances after we have been daemonized (if requested) me = None for arb in self.conf.arbiters: me = arb arbdaemon.modules_manager.set_modules(arb.modules) arbdaemon.do_load_modules() arbdaemon.load_modules_configuration_objects(raw_objects) self.conf.create_objects(raw_objects) self.conf.instance_id = 0 self.conf.instance_name = 'test' # Hack push_flavor, that is set by the dispatcher self.conf.push_flavor = 0 self.conf.load_triggers() #import pdb;pdb.set_trace() self.conf.linkify_templates() #import pdb;pdb.set_trace() self.conf.apply_inheritance() #import pdb;pdb.set_trace() self.conf.explode() #print "Aconf.services has %d elements" % len(self.conf.services) self.conf.apply_implicit_inheritance() self.conf.fill_default() self.conf.remove_templates() self.conf.compute_hash() #print "conf.services has %d elements" % len(self.conf.services) self.conf.override_properties() self.conf.linkify() self.conf.apply_dependencies() self.conf.set_initial_state() self.conf.explode_global_conf() self.conf.propagate_timezone_option() self.conf.create_business_rules() self.conf.create_business_rules_dependencies() self.conf.is_correct() if not self.conf.conf_is_correct: print "The conf is not correct, I stop here" self.conf.dump() return self.conf.clean() self.confs = self.conf.cut_into_parts() self.conf.prepare_for_sending() self.conf.show_errors() self.dispatcher = Dispatcher(self.conf, self.me) scheddaemon = Shinken(None, False, False, False, None, None) self.scheddaemon = scheddaemon self.sched = scheddaemon.sched scheddaemon.modules_dir = modules_dir scheddaemon.load_modules_manager() # Remember to clean the logs we just created before launching tests self.clear_logs() m = MacroResolver() m.init(self.conf) self.sched.load_conf(self.conf, in_test=True) e = ExternalCommandManager(self.conf, 'applyer') self.sched.external_command = e e.load_scheduler(self.sched) e2 = ExternalCommandManager(self.conf, 'dispatcher') e2.load_arbiter(self) self.external_command_dispatcher = e2 self.sched.conf.accept_passive_unknown_check_results = False self.sched.schedule() def add(self, b): if isinstance(b, Brok): self.broks[b.id] = b return if isinstance(b, ExternalCommand): self.sched.run_external_command(b.cmd_line) def fake_check(self, ref, exit_status, output="OK"): #print "fake", ref now = time.time() ref.schedule(force=True) # now checks are schedule and we get them in # the action queue #check = ref.actions.pop() check = ref.checks_in_progress[0] self.sched.add(check) # check is now in sched.checks[] # Allows to force check scheduling without setting its status nor # output. Useful for manual business rules rescheduling, for instance. if exit_status is None: return # fake execution check.check_time = now # and lie about when we will launch it because # if not, the schedule call for ref # will not really reschedule it because there # is a valid value in the future ref.next_chk = now - 0.5 check.get_outputs(output, 9000) check.exit_status = exit_status check.execution_time = 0.001 check.status = 'waitconsume' self.sched.waiting_results.append(check) def scheduler_loop(self, count, reflist, do_sleep=False, sleep_time=61, verbose=True): for ref in reflist: (obj, exit_status, output) = ref obj.checks_in_progress = [] for loop in range(1, count + 1): if verbose is True: print "processing check", loop for ref in reflist: (obj, exit_status, output) = ref obj.update_in_checking() self.fake_check(obj, exit_status, output) self.sched.manage_internal_checks() self.sched.consume_results() self.sched.get_new_actions() self.sched.get_new_broks() self.sched.scatter_master_notifications() self.worker_loop(verbose) for ref in reflist: (obj, exit_status, output) = ref obj.checks_in_progress = [] self.sched.update_downtimes_and_comments() #time.sleep(ref.retry_interval * 60 + 1) if do_sleep: time.sleep(sleep_time) def worker_loop(self, verbose=True): self.sched.delete_zombie_checks() self.sched.delete_zombie_actions() checks = self.sched.get_to_run_checks(True, False, worker_name='tester') actions = self.sched.get_to_run_checks(False, True, worker_name='tester') #print "------------ worker loop checks ----------------" #print checks #print "------------ worker loop actions ----------------" if verbose is True: self.show_actions() #print "------------ worker loop new ----------------" for a in actions: a.status = 'inpoller' a.check_time = time.time() a.exit_status = 0 self.sched.put_results(a) if verbose is True: self.show_actions() #print "------------ worker loop end ----------------" def show_logs(self): print "--- logs <<<----------------------------------" if hasattr(self, "sched"): broks = self.sched.broks else: broks = self.broks for brok in sorted(broks.values(), lambda x, y: x.id - y.id): if brok.type == 'log': brok.prepare() safe_print("LOG: ", brok.data['log']) print "--- logs >>>----------------------------------" def show_actions(self): print "--- actions <<<----------------------------------" if hasattr(self, "sched"): actions = self.sched.actions else: actions = self.actions for a in sorted(actions.values(), lambda x, y: x.id - y.id): if a.is_a == 'notification': if a.ref.my_type == "host": ref = "host: %s" % a.ref.get_name() else: ref = "host: %s svc: %s" % (a.ref.host.get_name(), a.ref.get_name()) print "NOTIFICATION %d %s %s %s %s" % (a.id, ref, a.type, time.asctime(time.localtime(a.t_to_go)), a.status) elif a.is_a == 'eventhandler': print "EVENTHANDLER:", a print "--- actions >>>----------------------------------" def show_and_clear_logs(self): self.show_logs() self.clear_logs() def show_and_clear_actions(self): self.show_actions() self.clear_actions() def count_logs(self): if hasattr(self, "sched"): broks = self.sched.broks else: broks = self.broks return len([b for b in broks.values() if b.type == 'log']) def count_actions(self): if hasattr(self, "sched"): actions = self.sched.actions else: actions = self.actions return len(actions.values()) def clear_logs(self): if hasattr(self, "sched"): broks = self.sched.broks else: broks = self.broks id_to_del = [] for b in broks.values(): if b.type == 'log': id_to_del.append(b.id) for id in id_to_del: del broks[id] def clear_actions(self): if hasattr(self, "sched"): self.sched.actions = {} else: self.actions = {} def assert_log_match(self, index, pattern, no_match=False): # log messages are counted 1...n, so index=1 for the first message if not no_match: self.assertGreaterEqual(self.count_logs(), index) regex = re.compile(pattern) lognum = 1 broks = sorted(self.sched.broks.values(), key=lambda x: x.id) for brok in broks: if brok.type == 'log': brok.prepare() if index == lognum: if re.search(regex, brok.data['log']): return lognum += 1 self.assertTrue(no_match, "%s found a matched log line in broks :\n" "index=%s pattern=%r\n" "broks_logs=[[[\n%s\n]]]" % ( '*HAVE*' if no_match else 'Not', index, pattern, '\n'.join( '\t%s=%s' % (idx, b.strip()) for idx, b in enumerate( (b.data['log'] for b in broks if b.type == 'log'), 1) ) )) def _any_log_match(self, pattern, assert_not): regex = re.compile(pattern) broks = getattr(self, 'sched', self).broks broks = sorted(broks.values(), lambda x, y: x.id - y.id) for brok in broks: if brok.type == 'log': brok.prepare() if re.search(regex, brok.data['log']): self.assertTrue(not assert_not, "Found matching log line:\n" "pattern = %r\nbrok log = %r" % (pattern, brok.data['log']) ) return self.assertTrue(assert_not, "No matching log line found:\n" "pattern = %r\n" "broks = %r" % (pattern, broks) ) def assert_any_log_match(self, pattern): self._any_log_match(pattern, assert_not=False) def assert_no_log_match(self, pattern): self._any_log_match(pattern, assert_not=True) def get_log_match(self, pattern): regex = re.compile(pattern) res = [] for brok in sorted(self.sched.broks.values(), lambda x, y: x.id - y.id): if brok.type == 'log': if re.search(regex, brok.data['log']): res.append(brok.data['log']) return res def print_header(self): print "\n" + "#" * 80 + "\n" + "#" + " " * 78 + "#" print "#" + string.center(self.id(), 78) + "#" print "#" + " " * 78 + "#\n" + "#" * 80 + "\n" def xtest_conf_is_correct(self): self.print_header() self.assertTrue(self.conf.conf_is_correct)
def setup_with_file(self, path): # i am arbiter-like self.broks = {} self.me = None self.log = logger self.log.load_obj(self) self.config_files = [path] self.conf = Config() buf = self.conf.read_config(self.config_files) raw_objects = self.conf.read_config_buf(buf) self.conf.create_objects_for_type(raw_objects, 'arbiter') self.conf.create_objects_for_type(raw_objects, 'module') self.conf.early_arbiter_linking() self.conf.create_objects(raw_objects) self.conf.old_properties_names_to_new() self.conf.instance_id = 0 self.conf.instance_name = 'test' # Hack push_flavor, that is set by the dispatcher self.conf.push_flavor = 0 self.conf.load_triggers() self.conf.linkify_templates() self.conf.apply_inheritance() self.conf.explode() #print "Aconf.services has %d elements" % len(self.conf.services) self.conf.create_reversed_list() self.conf.remove_twins() self.conf.apply_implicit_inheritance() self.conf.fill_default() self.conf.remove_templates() self.conf.compute_hash() #print "conf.services has %d elements" % len(self.conf.services) self.conf.create_reversed_list() self.conf.pythonize() self.conf.linkify() self.conf.apply_dependencies() self.conf.explode_global_conf() self.conf.propagate_timezone_option() self.conf.create_business_rules() self.conf.create_business_rules_dependencies() self.conf.is_correct() if not self.conf.conf_is_correct: print "The conf is not correct, I stop here" return self.confs = self.conf.cut_into_parts() self.conf.prepare_for_sending() self.conf.show_errors() self.dispatcher = Dispatcher(self.conf, self.me) scheddaemon = Shinken(None, False, False, False, None) self.sched = Scheduler(scheddaemon) scheddaemon.sched = self.sched m = MacroResolver() m.init(self.conf) self.sched.load_conf(self.conf, in_test=True) e = ExternalCommandManager(self.conf, 'applyer') self.sched.external_command = e e.load_scheduler(self.sched) e2 = ExternalCommandManager(self.conf, 'dispatcher') e2.load_arbiter(self) self.external_command_dispatcher = e2 self.sched.schedule()
class ShinkenTest(unittest.TestCase): def setUp(self): self.setup_with_file('etc/nagios_1r_1h_1s.cfg') def setup_with_file(self, path): # i am arbiter-like self.broks = {} self.me = None self.log = logger self.log.load_obj(self) self.config_files = [path] self.conf = Config() buf = self.conf.read_config(self.config_files) raw_objects = self.conf.read_config_buf(buf) self.conf.create_objects_for_type(raw_objects, 'arbiter') self.conf.create_objects_for_type(raw_objects, 'module') self.conf.early_arbiter_linking() self.conf.create_objects(raw_objects) self.conf.old_properties_names_to_new() self.conf.instance_id = 0 self.conf.instance_name = 'test' # Hack push_flavor, that is set by the dispatcher self.conf.push_flavor = 0 self.conf.load_triggers() self.conf.linkify_templates() self.conf.apply_inheritance() self.conf.explode() #print "Aconf.services has %d elements" % len(self.conf.services) self.conf.create_reversed_list() self.conf.remove_twins() self.conf.apply_implicit_inheritance() self.conf.fill_default() self.conf.remove_templates() self.conf.compute_hash() #print "conf.services has %d elements" % len(self.conf.services) self.conf.create_reversed_list() self.conf.pythonize() self.conf.linkify() self.conf.apply_dependencies() self.conf.explode_global_conf() self.conf.propagate_timezone_option() self.conf.create_business_rules() self.conf.create_business_rules_dependencies() self.conf.is_correct() if not self.conf.conf_is_correct: print "The conf is not correct, I stop here" return self.confs = self.conf.cut_into_parts() self.conf.prepare_for_sending() self.conf.show_errors() self.dispatcher = Dispatcher(self.conf, self.me) scheddaemon = Shinken(None, False, False, False, None) self.sched = Scheduler(scheddaemon) scheddaemon.sched = self.sched m = MacroResolver() m.init(self.conf) self.sched.load_conf(self.conf, in_test=True) e = ExternalCommandManager(self.conf, 'applyer') self.sched.external_command = e e.load_scheduler(self.sched) e2 = ExternalCommandManager(self.conf, 'dispatcher') e2.load_arbiter(self) self.external_command_dispatcher = e2 self.sched.schedule() def add(self, b): if isinstance(b, Brok): self.broks[b.id] = b return if isinstance(b, ExternalCommand): self.sched.run_external_command(b.cmd_line) def fake_check(self, ref, exit_status, output="OK"): #print "fake", ref now = time.time() ref.schedule(force=True) # now checks are schedule and we get them in # the action queue #check = ref.actions.pop() check = ref.checks_in_progress[0] self.sched.add(check) # check is now in sched.checks[] # fake execution check.check_time = now # and lie about when we will launch it because # if not, the schedule call for ref # will not really reschedule it because there # is a valid value in the future ref.next_chk = now - 0.5 check.get_outputs(output, 9000) check.exit_status = exit_status check.execution_time = 0.001 check.status = 'waitconsume' self.sched.waiting_results.append(check) def scheduler_loop(self, count, reflist, do_sleep=False, sleep_time=61): for ref in reflist: (obj, exit_status, output) = ref obj.checks_in_progress = [] for loop in range(1, count + 1): print "processing check", loop for ref in reflist: (obj, exit_status, output) = ref obj.update_in_checking() self.fake_check(obj, exit_status, output) self.sched.manage_internal_checks() self.sched.consume_results() self.sched.get_new_actions() self.sched.get_new_broks() self.worker_loop() for ref in reflist: (obj, exit_status, output) = ref obj.checks_in_progress = [] self.sched.update_downtimes_and_comments() #time.sleep(ref.retry_interval * 60 + 1) if do_sleep: time.sleep(sleep_time) def worker_loop(self): self.sched.delete_zombie_checks() self.sched.delete_zombie_actions() checks = self.sched.get_to_run_checks(True, False, worker_name='tester') actions = self.sched.get_to_run_checks(False, True, worker_name='tester') #print "------------ worker loop checks ----------------" #print checks #print "------------ worker loop actions ----------------" self.show_actions() #print "------------ worker loop new ----------------" for a in actions: a.status = 'inpoller' a.check_time = time.time() a.exit_status = 0 self.sched.put_results(a) self.show_actions() #print "------------ worker loop end ----------------" def show_logs(self): print "--- logs <<<----------------------------------" for brok in sorted(self.sched.broks.values(), lambda x, y: x.id - y.id): if brok.type == 'log': brok.prepare() print "LOG:", brok.data['log'] print "--- logs >>>----------------------------------" def show_actions(self): print "--- actions <<<----------------------------------" for a in sorted(self.sched.actions.values(), lambda x, y: x.id - y.id): if a.is_a == 'notification': if a.ref.my_type == "host": ref = "host: %s" % a.ref.get_name() else: ref = "host: %s svc: %s" % (a.ref.host.get_name(), a.ref.get_name()) print "NOTIFICATION %d %s %s %s %s" % ( a.id, ref, a.type, time.asctime(time.localtime( a.t_to_go)), a.status) elif a.is_a == 'eventhandler': print "EVENTHANDLER:", a print "--- actions >>>----------------------------------" def show_and_clear_logs(self): self.show_logs() self.clear_logs() def show_and_clear_actions(self): self.show_actions() self.clear_actions() def count_logs(self): return len([b for b in self.sched.broks.values() if b.type == 'log']) def count_actions(self): return len(self.sched.actions.values()) def clear_logs(self): id_to_del = [] for b in self.sched.broks.values(): if b.type == 'log': id_to_del.append(b.id) for id in id_to_del: del self.sched.broks[id] def clear_actions(self): self.sched.actions = {} def log_match(self, index, pattern): # log messages are counted 1...n, so index=1 for the first message if index > self.count_logs(): return False else: regex = re.compile(pattern) lognum = 1 for brok in sorted(self.sched.broks.values(), lambda x, y: x.id - y.id): if brok.type == 'log': brok.prepare() if index == lognum: if re.search(regex, brok.data['log']): return True lognum += 1 return False def any_log_match(self, pattern): regex = re.compile(pattern) for brok in sorted(self.sched.broks.values(), lambda x, y: x.id - y.id): if brok.type == 'log': brok.prepare() if re.search(regex, brok.data['log']): return True return False def get_log_match(self, pattern): regex = re.compile(pattern) res = [] for brok in sorted(self.sched.broks.values(), lambda x, y: x.id - y.id): if brok.type == 'log': if re.search(regex, brok.data['log']): res.append(brok.data['log']) return res def print_header(self): print "#" * 80 + "\n" + "#" + " " * 78 + "#" print "#" + string.center(self.id(), 78) + "#" print "#" + " " * 78 + "#\n" + "#" * 80 + "\n" def xtest_conf_is_correct(self): self.print_header() self.assert_(self.conf.conf_is_correct) def find_modules_path(self): """ Find the absolute path of the shinken module directory and returns it. """ import shinken # BEWARE: this way of finding path is good if we still # DO NOT HAVE CHANGE PWD!!! # Now get the module path. It's in fact the directory modules # inside the shinken directory. So let's find it. print "modulemanager file", shinken.modulesmanager.__file__ modulespath = os.path.abspath(shinken.modulesmanager.__file__) print "modulemanager absolute file", modulespath # We got one of the files of parent_path = os.path.dirname(os.path.dirname(modulespath)) modulespath = os.path.join(parent_path, 'shinken', 'modules') print("Using modules path: %s" % (modulespath)) return modulespath def do_load_modules(self): self.modules_manager.load_and_init() self.log.log("I correctly loaded the modules: [%s]" % (','.join( [inst.get_name() for inst in self.modules_manager.instances]))) def init_livestatus(self, modconf=None): self.livelogs = 'tmp/livelogs.db' + self.testid if modconf is None: modconf = Module({ 'module_name': 'LiveStatus', 'module_type': 'livestatus', 'port': str(50000 + os.getpid()), 'pnp_path': 'tmp/pnp4nagios_test' + self.testid, 'host': '127.0.0.1', 'socket': 'live', 'name': 'test', #? }) dbmodconf = Module({ 'module_name': 'LogStore', 'module_type': 'logstore_sqlite', 'use_aggressive_sql': "0", 'database_file': self.livelogs, 'archive_path': os.path.join(os.path.dirname(self.livelogs), 'archives'), }) modconf.modules = [dbmodconf] self.livestatus_broker = LiveStatus_broker(modconf) self.livestatus_broker.create_queues() #--- livestatus_broker.main self.livestatus_broker.log = logger # this seems to damage the logger so that the scheduler can't use it #self.livestatus_broker.log.load_obj(self.livestatus_broker) self.livestatus_broker.debug_output = [] self.livestatus_broker.modules_manager = ModulesManager( 'livestatus', self.livestatus_broker.find_modules_path(), []) self.livestatus_broker.modules_manager.set_modules( self.livestatus_broker.modules) # We can now output some previouly silented debug ouput self.livestatus_broker.do_load_modules() for inst in self.livestatus_broker.modules_manager.instances: if inst.properties["type"].startswith('logstore'): f = getattr(inst, 'load', None) if f and callable(f): f(self.livestatus_broker) # !!! NOT self here !!!! break for s in self.livestatus_broker.debug_output: print "errors during load", s del self.livestatus_broker.debug_output self.livestatus_broker.rg = LiveStatusRegenerator() self.livestatus_broker.datamgr = datamgr datamgr.load(self.livestatus_broker.rg) self.livestatus_broker.query_cache = LiveStatusQueryCache() self.livestatus_broker.query_cache.disable() self.livestatus_broker.rg.register_cache( self.livestatus_broker.query_cache) #--- livestatus_broker.main self.livestatus_broker.init() self.livestatus_broker.db = self.livestatus_broker.modules_manager.instances[ 0] self.livestatus_broker.livestatus = LiveStatus( self.livestatus_broker.datamgr, self.livestatus_broker.query_cache, self.livestatus_broker.db, self.livestatus_broker.pnp_path, self.livestatus_broker.from_q) #--- livestatus_broker.do_main self.livestatus_broker.db.open()
def __init__(self): self.conf = Config() buf = self.conf.read_config(['/etc/shinken/shinken.cfg']) self.raw_objects = self.conf.read_config_buf(buf) self.conf.create_objects_for_type(self.raw_objects, 'arbiter') self.conf.create_objects_for_type(self.raw_objects, 'module') self.conf.early_arbiter_linking() if len(self.conf.arbiters) == 1: self.arbdaemon = Arbiter( config_files=[''], is_daemon=[''], do_replace=False, verify_only=False, debug=None, debug_file=None, arb_name='arbtest' ) self.arbdaemon.modules_dir = '/var/lib/shinken/modules/' self.arbdaemon.load_modules_manager() me = None for arb in self.conf.arbiters: me = arb self.arbdaemon.modules_manager.set_modules(arb.modules) self.arbdaemon.do_load_modules() self.arbdaemon.load_modules_configuration_objects(self.raw_objects) self.conf.create_objects(self.raw_objects) self.conf.instance_id = 0 self.conf.instance_name = 'test' # Hack push_flavor, that is set by the dispatcher self.conf.push_flavor = 0 self.conf.load_triggers() # import pdb;pdb.set_trace() self.conf.linkify_templates() # import pdb;pdb.set_trace() self.conf.apply_inheritance() # import pdb;pdb.set_trace() self.conf.explode() # print "Aconf.services has %d elements" % len(self.conf.services) self.conf.apply_implicit_inheritance() self.conf.fill_default() self.conf.remove_templates() self.conf.compute_hash() # print "conf.services has %d elements" % len(self.conf.services) self.conf.override_properties() self.conf.linkify() self.conf.apply_dependencies() self.conf.set_initial_state() self.conf.explode_global_conf() self.conf.propagate_timezone_option() self.conf.create_business_rules() self.conf.create_business_rules_dependencies() self.conf.is_correct() if not self.conf.conf_is_correct: print "The conf is not correct, I stop here" self.conf.dump() return self.conf.clean() self.arbdaemon.conf = self.conf
def setup_with_file(self, path): time_hacker.set_my_time() self.print_header() # i am arbiter-like self.broks = {} self.me = None self.log = logger self.log.load_obj(self) self.config_files = [path] self.conf = Config() buf = self.conf.read_config(self.config_files) raw_objects = self.conf.read_config_buf(buf) self.conf.create_objects_for_type(raw_objects, 'arbiter') self.conf.create_objects_for_type(raw_objects, 'module') self.conf.early_arbiter_linking() self.conf.create_objects(raw_objects) self.conf.instance_id = 0 self.conf.instance_name = 'test' # Hack push_flavor, that is set by the dispatcher self.conf.push_flavor = 0 self.conf.load_triggers() #import pdb;pdb.set_trace() self.conf.linkify_templates() #import pdb;pdb.set_trace() self.conf.apply_inheritance() #import pdb;pdb.set_trace() self.conf.explode() #print "Aconf.services has %d elements" % len(self.conf.services) self.conf.apply_implicit_inheritance() self.conf.fill_default() self.conf.remove_templates() self.conf.compute_hash() #print "conf.services has %d elements" % len(self.conf.services) self.conf.override_properties() self.conf.linkify() self.conf.apply_dependencies() self.conf.explode_global_conf() self.conf.propagate_timezone_option() self.conf.create_business_rules() self.conf.create_business_rules_dependencies() self.conf.is_correct() if not self.conf.conf_is_correct: print "The conf is not correct, I stop here" self.conf.dump() return self.conf.clean() self.confs = self.conf.cut_into_parts() self.conf.prepare_for_sending() self.conf.show_errors() self.dispatcher = Dispatcher(self.conf, self.me) scheddaemon = Shinken(None, False, False, False, None, None) self.sched = Scheduler(scheddaemon) scheddaemon.sched = self.sched scheddaemon.modules_dir = modules_dir scheddaemon.load_modules_manager() # Remember to clean the logs we just created before launching tests self.clear_logs() m = MacroResolver() m.init(self.conf) self.sched.load_conf(self.conf, in_test=True) e = ExternalCommandManager(self.conf, 'applyer') self.sched.external_command = e e.load_scheduler(self.sched) e2 = ExternalCommandManager(self.conf, 'dispatcher') e2.load_arbiter(self) self.external_command_dispatcher = e2 self.sched.conf.accept_passive_unknown_check_results = False self.sched.schedule()
def test_types(self): path = 'etc/shinken_1r_1h_1s.cfg' time_hacker.set_my_time() self.print_header() # i am arbiter-like self.broks = {} self.me = None self.log = logger self.log.setLevel("INFO") self.log.load_obj(self) self.config_files = [path] self.conf = Config() buf = self.conf.read_config(self.config_files) raw_objects = self.conf.read_config_buf(buf) self.conf.create_objects_for_type(raw_objects, 'arbiter') self.conf.create_objects_for_type(raw_objects, 'module') self.conf.early_arbiter_linking() self.conf.create_objects(raw_objects) self.conf.instance_id = 0 self.conf.instance_name = 'test' # Hack push_flavor, that is set by the dispatcher self.conf.push_flavor = 0 self.conf.load_triggers() self.conf.linkify_templates() self.conf.apply_inheritance() self.conf.explode() self.conf.apply_implicit_inheritance() self.conf.fill_default() self.conf.remove_templates() self.conf.compute_hash() self.conf.override_properties() self.conf.linkify() self.conf.apply_dependencies() self.conf.explode_global_conf() self.conf.propagate_timezone_option() self.conf.create_business_rules() self.conf.create_business_rules_dependencies() self.conf.is_correct() # Cannot do it for all obj for now. We have to ensure unicode everywhere fist for objs in [self.conf.arbiters]: for obj in objs: #print "=== obj : %s ===" % obj.__class__ for prop in obj.properties: if hasattr(obj, prop): value = getattr(obj, prop) # We should get ride of None, maybe use the "neutral" value for type if value is not None: #print("TESTING %s with value %s" % (prop, value)) self.assertIsInstance(value, self.map_type(obj.properties[prop])) else: print("Skipping %s " % prop) #print "===" # Manual check of several attr for self.conf.contacts # because contacts contains unicode attr for contact in self.conf.contacts: for prop in ["notificationways", "host_notification_commands", "service_notification_commands"]: if hasattr(contact, prop): value = getattr(contact, prop) # We should get ride of None, maybe use the "neutral" value for type if value is not None: print("TESTING %s with value %s" % (prop, value)) self.assertIsInstance(value, self.map_type(contact.properties[prop])) else: print("Skipping %s " % prop) # Same here for notifway in self.conf.notificationways: for prop in ["host_notification_commands", "service_notification_commands"]: if hasattr(notifway, prop): value = getattr(notifway, prop) # We should get ride of None, maybe use the "neutral" value for type if value is not None: print("TESTING %s with value %s" % (prop, value)) self.assertIsInstance(value, self.map_type(notifway.properties[prop])) else: print("Skipping %s " % prop)
def test_types(self): path = 'etc/shinken_1r_1h_1s.cfg' time_hacker.set_my_time() self.print_header() # i am arbiter-like self.broks = {} self.me = None self.log = logger self.log.setLevel("INFO") self.log.load_obj(self) self.config_files = [path] self.conf = Config() buf = self.conf.read_config(self.config_files) raw_objects = self.conf.read_config_buf(buf) self.conf.create_objects_for_type(raw_objects, 'arbiter') self.conf.create_objects_for_type(raw_objects, 'module') self.conf.early_arbiter_linking() self.conf.create_objects(raw_objects) self.conf.instance_id = 0 self.conf.instance_name = 'test' # Hack push_flavor, that is set by the dispatcher self.conf.push_flavor = 0 self.conf.load_triggers() self.conf.linkify_templates() self.conf.apply_inheritance() self.conf.explode() self.conf.apply_implicit_inheritance() self.conf.fill_default() self.conf.remove_templates() self.conf.compute_hash() self.conf.override_properties() self.conf.linkify() self.conf.apply_dependencies() self.conf.explode_global_conf() self.conf.propagate_timezone_option() self.conf.create_business_rules() self.conf.create_business_rules_dependencies() self.conf.is_correct() # Cannot do it for all obj for now. We have to ensure unicode everywhere fist for objs in [self.conf.arbiters]: for obj in objs: #print "=== obj : %s ===" % obj.__class__ for prop in obj.properties: if hasattr(obj, prop): value = getattr(obj, prop) # We should get ride of None, maybe use the "neutral" value for type if value is not None: #print("TESTING %s with value %s" % (prop, value)) self.assertIsInstance( value, self.map_type(obj.properties[prop])) else: print("Skipping %s " % prop) #print "===" # Manual check of several attr for self.conf.contacts # because contacts contains unicode attr for contact in self.conf.contacts: for prop in [ "notificationways", "host_notification_commands", "service_notification_commands" ]: if hasattr(contact, prop): value = getattr(contact, prop) # We should get ride of None, maybe use the "neutral" value for type if value is not None: print("TESTING %s with value %s" % (prop, value)) self.assertIsInstance( value, self.map_type(contact.properties[prop])) else: print("Skipping %s " % prop) # Same here for notifway in self.conf.notificationways: for prop in [ "host_notification_commands", "service_notification_commands" ]: if hasattr(notifway, prop): value = getattr(notifway, prop) # We should get ride of None, maybe use the "neutral" value for type if value is not None: print("TESTING %s with value %s" % (prop, value)) self.assertIsInstance( value, self.map_type(notifway.properties[prop])) else: print("Skipping %s " % prop)
def setUp(self): from shinken.objects.config import Config self.item = Config()
def setup_with_file(self, path): time_hacker.set_my_time() self.print_header() # i am arbiter-like self.broks = {} self.me = None self.log = logger self.log.load_obj(self) self.config_files = [path] self.conf = Config() buf = self.conf.read_config(self.config_files) raw_objects = self.conf.read_config_buf(buf) self.conf.create_objects_for_type(raw_objects, 'arbiter') self.conf.create_objects_for_type(raw_objects, 'module') self.conf.early_arbiter_linking() # If we got one arbiter defined here (before default) we should be in a case where # the tester want to load/test a module, so we simulate an arbiter daemon # and the modules loading phase. As it has its own modulesmanager, should # not impact scheduler modules ones, especially we are asking for arbiter type :) if len(self.conf.arbiters) == 1: arbdaemon = Arbiter([''], [''], False, False, None, None) # only load if the module_dir is reallyexisting, so was set explicitly # in the test configuration if os.path.exists(getattr(self.conf, 'modules_dir', '')): arbdaemon.modules_dir = self.conf.modules_dir arbdaemon.load_modules_manager() # we request the instances without them being *started* # (for those that are concerned ("external" modules): # we will *start* these instances after we have been daemonized (if requested) me = None for arb in self.conf.arbiters: me = arb arbdaemon.modules_manager.set_modules(arb.modules) arbdaemon.do_load_modules() arbdaemon.load_modules_configuration_objects(raw_objects) self.conf.create_objects(raw_objects) self.conf.instance_id = 0 self.conf.instance_name = 'test' # Hack push_flavor, that is set by the dispatcher self.conf.push_flavor = 0 self.conf.load_triggers() #import pdb;pdb.set_trace() self.conf.linkify_templates() #import pdb;pdb.set_trace() self.conf.apply_inheritance() #import pdb;pdb.set_trace() self.conf.explode() #print "Aconf.services has %d elements" % len(self.conf.services) self.conf.apply_implicit_inheritance() self.conf.fill_default() self.conf.remove_templates() self.conf.override_properties() self.conf.linkify() self.conf.apply_dependencies() self.conf.set_initial_state() self.conf.explode_global_conf() self.conf.propagate_timezone_option() self.conf.create_business_rules() self.conf.create_business_rules_dependencies() self.conf.is_correct() if not self.conf.conf_is_correct: print "The conf is not correct, I stop here" self.conf.dump() return self.conf.clean() self.confs = self.conf.cut_into_parts() self.conf.prepare_for_sending() self.conf.show_errors() self.dispatcher = Dispatcher(self.conf, self.me) scheddaemon = Shinken(None, False, False, False, None, None) self.scheddaemon = scheddaemon self.sched = scheddaemon.sched scheddaemon.modules_dir = modules_dir scheddaemon.load_modules_manager() # Remember to clean the logs we just created before launching tests self.clear_logs() m = MacroResolver() m.init(self.conf) self.sched.load_conf(self.conf, in_test=True) e = ExternalCommandManager(self.conf, 'applyer') self.sched.external_command = e e.load_scheduler(self.sched) e2 = ExternalCommandManager(self.conf, 'dispatcher') e2.load_arbiter(self) self.external_command_dispatcher = e2 self.sched.conf.accept_passive_unknown_check_results = False self.sched.schedule()
class ShinkenTest(unittest.TestCase): def setUp(self): self.setup_with_file('etc/shinken_1r_1h_1s.cfg') def setup_with_file(self, path): time_hacker.set_my_time() self.print_header() # i am arbiter-like self.broks = {} self.me = None self.log = logger self.log.load_obj(self) self.config_files = [path] self.conf = Config() buf = self.conf.read_config(self.config_files) raw_objects = self.conf.read_config_buf(buf) self.conf.create_objects_for_type(raw_objects, 'arbiter') self.conf.create_objects_for_type(raw_objects, 'module') self.conf.early_arbiter_linking() # If we got one arbiter defined here (before default) we should be in a case where # the tester want to load/test a module, so we simulate an arbiter daemon # and the modules loading phase. As it has its own modulesmanager, should # not impact scheduler modules ones, especially we are asking for arbiter type :) if len(self.conf.arbiters) == 1: arbdaemon = Arbiter([''], [''], False, False, None, None) # only load if the module_dir is reallyexisting, so was set explicitly # in the test configuration if os.path.exists(getattr(self.conf, 'modules_dir', '')): arbdaemon.modules_dir = self.conf.modules_dir arbdaemon.load_modules_manager() # we request the instances without them being *started* # (for those that are concerned ("external" modules): # we will *start* these instances after we have been daemonized (if requested) me = None for arb in self.conf.arbiters: me = arb arbdaemon.modules_manager.set_modules(arb.modules) arbdaemon.do_load_modules() arbdaemon.load_modules_configuration_objects(raw_objects) self.conf.create_objects(raw_objects) self.conf.instance_id = 0 self.conf.instance_name = 'test' # Hack push_flavor, that is set by the dispatcher self.conf.push_flavor = 0 self.conf.load_triggers() #import pdb;pdb.set_trace() self.conf.linkify_templates() #import pdb;pdb.set_trace() self.conf.apply_inheritance() #import pdb;pdb.set_trace() self.conf.explode() #print "Aconf.services has %d elements" % len(self.conf.services) self.conf.apply_implicit_inheritance() self.conf.fill_default() self.conf.remove_templates() self.conf.override_properties() self.conf.linkify() self.conf.apply_dependencies() self.conf.set_initial_state() self.conf.explode_global_conf() self.conf.propagate_timezone_option() self.conf.create_business_rules() self.conf.create_business_rules_dependencies() self.conf.is_correct() if not self.conf.conf_is_correct: print "The conf is not correct, I stop here" self.conf.dump() return self.conf.clean() self.confs = self.conf.cut_into_parts() self.conf.prepare_for_sending() self.conf.show_errors() self.dispatcher = Dispatcher(self.conf, self.me) scheddaemon = Shinken(None, False, False, False, None, None) self.scheddaemon = scheddaemon self.sched = scheddaemon.sched scheddaemon.modules_dir = modules_dir scheddaemon.load_modules_manager() # Remember to clean the logs we just created before launching tests self.clear_logs() m = MacroResolver() m.init(self.conf) self.sched.load_conf(self.conf, in_test=True) e = ExternalCommandManager(self.conf, 'applyer') self.sched.external_command = e e.load_scheduler(self.sched) e2 = ExternalCommandManager(self.conf, 'dispatcher') e2.load_arbiter(self) self.external_command_dispatcher = e2 self.sched.conf.accept_passive_unknown_check_results = False self.sched.schedule() def add(self, b): if isinstance(b, Brok): self.broks[b.id] = b return if isinstance(b, ExternalCommand): self.sched.run_external_command(b.cmd_line) def fake_check(self, ref, exit_status, output="OK"): #print "fake", ref now = time.time() ref.schedule(force=True) # now checks are schedule and we get them in # the action queue #check = ref.actions.pop() check = ref.checks_in_progress[0] self.sched.add(check) # check is now in sched.checks[] # Allows to force check scheduling without setting its status nor # output. Useful for manual business rules rescheduling, for instance. if exit_status is None: return # fake execution check.check_time = now # and lie about when we will launch it because # if not, the schedule call for ref # will not really reschedule it because there # is a valid value in the future ref.next_chk = now - 0.5 check.get_outputs(output, 9000) check.exit_status = exit_status check.execution_time = 0.001 check.status = 'waitconsume' self.sched.waiting_results.append(check) def scheduler_loop(self, count, reflist, do_sleep=False, sleep_time=61, verbose=True): for ref in reflist: (obj, exit_status, output) = ref obj.checks_in_progress = [] for loop in range(1, count + 1): if verbose is True: print "processing check", loop for ref in reflist: (obj, exit_status, output) = ref obj.update_in_checking() self.fake_check(obj, exit_status, output) self.sched.manage_internal_checks() self.sched.consume_results() self.sched.get_new_actions() self.sched.get_new_broks() self.sched.scatter_master_notifications() self.worker_loop(verbose) for ref in reflist: (obj, exit_status, output) = ref obj.checks_in_progress = [] self.sched.update_downtimes_and_comments() #time.sleep(ref.retry_interval * 60 + 1) if do_sleep: time.sleep(sleep_time) def worker_loop(self, verbose=True): self.sched.delete_zombie_checks() self.sched.delete_zombie_actions() checks = self.sched.get_to_run_checks(True, False, worker_name='tester') actions = self.sched.get_to_run_checks(False, True, worker_name='tester') #print "------------ worker loop checks ----------------" #print checks #print "------------ worker loop actions ----------------" if verbose is True: self.show_actions() #print "------------ worker loop new ----------------" for a in actions: a.status = 'inpoller' a.check_time = time.time() a.exit_status = 0 self.sched.put_results(a) if verbose is True: self.show_actions() #print "------------ worker loop end ----------------" def show_logs(self): print "--- logs <<<----------------------------------" if hasattr(self, "sched"): broks = self.sched.broks else: broks = self.broks for brok in sorted(broks.values(), lambda x, y: x.id - y.id): if brok.type == 'log': brok.prepare() safe_print("LOG: ", brok.data['log']) print "--- logs >>>----------------------------------" def show_actions(self): print "--- actions <<<----------------------------------" if hasattr(self, "sched"): actions = self.sched.actions else: actions = self.actions for a in sorted(actions.values(), lambda x, y: x.id - y.id): if a.is_a == 'notification': if a.ref.my_type == "host": ref = "host: %s" % a.ref.get_name() else: ref = "host: %s svc: %s" % (a.ref.host.get_name(), a.ref.get_name()) print "NOTIFICATION %d %s %s %s %s" % ( a.id, ref, a.type, time.asctime(time.localtime( a.t_to_go)), a.status) elif a.is_a == 'eventhandler': print "EVENTHANDLER:", a print "--- actions >>>----------------------------------" def show_and_clear_logs(self): self.show_logs() self.clear_logs() def show_and_clear_actions(self): self.show_actions() self.clear_actions() def count_logs(self): if hasattr(self, "sched"): broks = self.sched.broks else: broks = self.broks return len([b for b in broks.values() if b.type == 'log']) def count_actions(self): if hasattr(self, "sched"): actions = self.sched.actions else: actions = self.actions return len(actions.values()) def clear_logs(self): if hasattr(self, "sched"): broks = self.sched.broks else: broks = self.broks id_to_del = [] for b in broks.values(): if b.type == 'log': id_to_del.append(b.id) for id in id_to_del: del broks[id] def clear_actions(self): if hasattr(self, "sched"): self.sched.actions = {} else: self.actions = {} def assert_log_match(self, index, pattern, no_match=False): # log messages are counted 1...n, so index=1 for the first message if not no_match: self.assertGreaterEqual(self.count_logs(), index) regex = re.compile(pattern) lognum = 1 broks = sorted(self.sched.broks.values(), key=lambda x: x.id) for brok in broks: if brok.type == 'log': brok.prepare() if index == lognum: if re.search(regex, brok.data['log']): return lognum += 1 self.assertTrue( no_match, "%s found a matched log line in broks :\n" "index=%s pattern=%r\n" "broks_logs=[[[\n%s\n]]]" % ('*HAVE*' if no_match else 'Not', index, pattern, '\n'.join( '\t%s=%s' % (idx, b.strip()) for idx, b in enumerate((b.data['log'] for b in broks if b.type == 'log'), 1)))) def _any_log_match(self, pattern, assert_not): regex = re.compile(pattern) broks = getattr(self, 'sched', self).broks broks = sorted(broks.values(), lambda x, y: x.id - y.id) for brok in broks: if brok.type == 'log': brok.prepare() if re.search(regex, brok.data['log']): self.assertTrue( not assert_not, "Found matching log line:\n" "pattern = %r\nbrok log = %r" % (pattern, brok.data['log'])) return self.assertTrue( assert_not, "No matching log line found:\n" "pattern = %r\n" "broks = %r" % (pattern, broks)) def assert_any_log_match(self, pattern): self._any_log_match(pattern, assert_not=False) def assert_no_log_match(self, pattern): self._any_log_match(pattern, assert_not=True) def get_log_match(self, pattern): regex = re.compile(pattern) res = [] for brok in sorted(self.sched.broks.values(), lambda x, y: x.id - y.id): if brok.type == 'log': if re.search(regex, brok.data['log']): res.append(brok.data['log']) return res def print_header(self): print "\n" + "#" * 80 + "\n" + "#" + " " * 78 + "#" print "#" + string.center(self.id(), 78) + "#" print "#" + " " * 78 + "#\n" + "#" * 80 + "\n" def xtest_conf_is_correct(self): self.print_header() self.assertTrue(self.conf.conf_is_correct)
class ShinkenTest(unittest.TestCase): def setUp(self): self.setup_with_file('etc/shinken_1r_1h_1s.cfg') def setup_with_file(self, path): # i am arbiter-like self.broks = {} self.me = None self.log = logger self.log.load_obj(self) self.config_files = [path] self.conf = Config() buf = self.conf.read_config(self.config_files) raw_objects = self.conf.read_config_buf(buf) self.conf.create_objects_for_type(raw_objects, 'arbiter') self.conf.create_objects_for_type(raw_objects, 'module') self.conf.early_arbiter_linking() self.conf.create_objects(raw_objects) self.conf.old_properties_names_to_new() self.conf.instance_id = 0 self.conf.instance_name = 'test' # Hack push_flavor, that is set by the dispatcher self.conf.push_flavor = 0 self.conf.load_triggers() self.conf.linkify_templates() self.conf.apply_inheritance() self.conf.explode() #print "Aconf.services has %d elements" % len(self.conf.services) self.conf.create_reversed_list() self.conf.remove_twins() self.conf.apply_implicit_inheritance() self.conf.fill_default() self.conf.remove_templates() self.conf.compute_hash() #print "conf.services has %d elements" % len(self.conf.services) self.conf.create_reversed_list() self.conf.override_properties() self.conf.pythonize() self.conf.linkify() self.conf.apply_dependencies() self.conf.explode_global_conf() self.conf.propagate_timezone_option() self.conf.create_business_rules() self.conf.create_business_rules_dependencies() self.conf.is_correct() if not self.conf.conf_is_correct: print "The conf is not correct, I stop here" return self.conf.clean() self.confs = self.conf.cut_into_parts() self.conf.prepare_for_sending() self.conf.show_errors() self.dispatcher = Dispatcher(self.conf, self.me) scheddaemon = Shinken(None, False, False, False, None, None) self.sched = Scheduler(scheddaemon) scheddaemon.sched = self.sched scheddaemon.modules_dir = modules_dir scheddaemon.load_modules_manager() # Remember to clean the logs we just created before launching tests self.clear_logs() m = MacroResolver() m.init(self.conf) self.sched.load_conf(self.conf, in_test=True) e = ExternalCommandManager(self.conf, 'applyer') self.sched.external_command = e e.load_scheduler(self.sched) e2 = ExternalCommandManager(self.conf, 'dispatcher') e2.load_arbiter(self) self.external_command_dispatcher = e2 self.sched.schedule() def add(self, b): if isinstance(b, Brok): self.broks[b.id] = b return if isinstance(b, ExternalCommand): self.sched.run_external_command(b.cmd_line) def fake_check(self, ref, exit_status, output="OK"): #print "fake", ref now = time.time() ref.schedule(force=True) # now checks are schedule and we get them in # the action queue #check = ref.actions.pop() check = ref.checks_in_progress[0] self.sched.add(check) # check is now in sched.checks[] # Allows to force check scheduling without setting its status nor # output. Useful for manual business rules rescheduling, for instance. if exit_status is None: return # fake execution check.check_time = now # and lie about when we will launch it because # if not, the schedule call for ref # will not really reschedule it because there # is a valid value in the future ref.next_chk = now - 0.5 check.get_outputs(output, 9000) check.exit_status = exit_status check.execution_time = 0.001 check.status = 'waitconsume' self.sched.waiting_results.append(check) def scheduler_loop(self, count, reflist, do_sleep=False, sleep_time=61, verbose=True): for ref in reflist: (obj, exit_status, output) = ref obj.checks_in_progress = [] for loop in range(1, count + 1): if verbose is True: print "processing check", loop for ref in reflist: (obj, exit_status, output) = ref obj.update_in_checking() self.fake_check(obj, exit_status, output) self.sched.manage_internal_checks() self.sched.consume_results() self.sched.get_new_actions() self.sched.get_new_broks() self.worker_loop(verbose) for ref in reflist: (obj, exit_status, output) = ref obj.checks_in_progress = [] self.sched.update_downtimes_and_comments() #time.sleep(ref.retry_interval * 60 + 1) if do_sleep: time.sleep(sleep_time) def worker_loop(self, verbose=True): self.sched.delete_zombie_checks() self.sched.delete_zombie_actions() checks = self.sched.get_to_run_checks(True, False, worker_name='tester') actions = self.sched.get_to_run_checks(False, True, worker_name='tester') #print "------------ worker loop checks ----------------" #print checks #print "------------ worker loop actions ----------------" if verbose is True: self.show_actions() #print "------------ worker loop new ----------------" for a in actions: a.status = 'inpoller' a.check_time = time.time() a.exit_status = 0 self.sched.put_results(a) if verbose is True: self.show_actions() #print "------------ worker loop end ----------------" def show_logs(self): print "--- logs <<<----------------------------------" for brok in sorted(self.sched.broks.values(), lambda x, y: x.id - y.id): if brok.type == 'log': brok.prepare() print "LOG:", brok.data['log'] print "--- logs >>>----------------------------------" def show_actions(self): print "--- actions <<<----------------------------------" for a in sorted(self.sched.actions.values(), lambda x, y: x.id - y.id): if a.is_a == 'notification': if a.ref.my_type == "host": ref = "host: %s" % a.ref.get_name() else: ref = "host: %s svc: %s" % (a.ref.host.get_name(), a.ref.get_name()) print "NOTIFICATION %d %s %s %s %s" % ( a.id, ref, a.type, time.asctime(time.localtime( a.t_to_go)), a.status) elif a.is_a == 'eventhandler': print "EVENTHANDLER:", a print "--- actions >>>----------------------------------" def show_and_clear_logs(self): self.show_logs() self.clear_logs() def show_and_clear_actions(self): self.show_actions() self.clear_actions() def count_logs(self): return len([b for b in self.sched.broks.values() if b.type == 'log']) def count_actions(self): return len(self.sched.actions.values()) def clear_logs(self): id_to_del = [] for b in self.sched.broks.values(): if b.type == 'log': id_to_del.append(b.id) for id in id_to_del: del self.sched.broks[id] def clear_actions(self): self.sched.actions = {} def log_match(self, index, pattern): # log messages are counted 1...n, so index=1 for the first message if index > self.count_logs(): return False else: regex = re.compile(pattern) lognum = 1 for brok in sorted(self.sched.broks.values(), lambda x, y: x.id - y.id): if brok.type == 'log': brok.prepare() if index == lognum: if re.search(regex, brok.data['log']): return True lognum += 1 return False def any_log_match(self, pattern): regex = re.compile(pattern) for brok in sorted(self.sched.broks.values(), lambda x, y: x.id - y.id): if brok.type == 'log': brok.prepare() if re.search(regex, brok.data['log']): return True return False def get_log_match(self, pattern): regex = re.compile(pattern) res = [] for brok in sorted(self.sched.broks.values(), lambda x, y: x.id - y.id): if brok.type == 'log': if re.search(regex, brok.data['log']): res.append(brok.data['log']) return res def print_header(self): print "#" * 80 + "\n" + "#" + " " * 78 + "#" print "#" + string.center(self.id(), 78) + "#" print "#" + " " * 78 + "#\n" + "#" * 80 + "\n" def xtest_conf_is_correct(self): self.print_header() self.assert_(self.conf.conf_is_correct)
class ShinkenTest(unittest.TestCase): def setUp(self): self.setup_with_file('etc/nagios_1r_1h_1s.cfg') def setup_with_file(self, path): # i am arbiter-like self.broks = {} self.me = None self.log = logger self.log.load_obj(self) self.config_files = [path] self.conf = Config() self.conf.read_config(self.config_files) buf = self.conf.read_config(self.config_files) raw_objects = self.conf.read_config_buf(buf) self.conf.create_objects_for_type(raw_objects, 'arbiter') self.conf.create_objects_for_type(raw_objects, 'module') self.conf.early_arbiter_linking() self.conf.create_objects(raw_objects) self.conf.instance_id = 0 self.conf.instance_name = 'test' self.conf.linkify_templates() self.conf.apply_inheritance() self.conf.explode() self.conf.create_reversed_list() self.conf.remove_twins() self.conf.apply_implicit_inheritance() self.conf.fill_default() self.conf.clean_useless() self.conf.pythonize() self.conf.linkify() self.conf.apply_dependancies() self.conf.explode_global_conf() self.conf.propagate_timezone_option() self.conf.create_business_rules() self.conf.create_business_rules_dependencies() self.conf.is_correct() self.confs = self.conf.cut_into_parts() self.dispatcher = Dispatcher(self.conf, self.me) scheddaemon = Shinken(None, False, False, False, None) self.sched = Scheduler(scheddaemon) scheddaemon.sched = self.sched m = MacroResolver() m.init(self.conf) self.sched.load_conf(self.conf) e = ExternalCommandManager(self.conf, 'applyer') self.sched.external_command = e e.load_scheduler(self.sched) e2 = ExternalCommandManager(self.conf, 'dispatcher') e2.load_arbiter(self) self.external_command_dispatcher = e2 self.sched.schedule() def add(self, b): if isinstance(b, Brok): self.broks[b.id] = b return if isinstance(b, ExternalCommand): self.sched.run_external_command(b.cmd_line) def fake_check(self, ref, exit_status, output="OK"): #print "fake", ref now = time.time() ref.schedule(force=True) #now checks are schedule and we get them in #the action queue check = ref.actions.pop() self.sched.add(check) # check is now in sched.checks[] # fake execution check.check_time = now elts_line1 = output.split('|') #First line before | is output check.output = elts_line1[0] #After | is perfdata if len(elts_line1) > 1: check.perf_data = elts_line1[1] else: check.perf_data = '' check.exit_status = exit_status check.execution_time = 0.001 check.status = 'waitconsume' self.sched.waiting_results.append(check) def scheduler_loop(self, count, reflist, do_sleep=False, sleep_time=61): for ref in reflist: (obj, exit_status, output) = ref obj.checks_in_progress = [] for loop in range(1, count + 1): print "processing check", loop for ref in reflist: (obj, exit_status, output) = ref obj.update_in_checking() self.fake_check(obj, exit_status, output) self.sched.manage_internal_checks() self.sched.consume_results() self.sched.get_new_actions() self.sched.get_new_broks() self.worker_loop() for ref in reflist: (obj, exit_status, output) = ref obj.checks_in_progress = [] self.sched.update_downtimes_and_comments() #time.sleep(ref.retry_interval * 60 + 1) if do_sleep: time.sleep(sleep_time) def worker_loop(self): self.sched.delete_zombie_checks() self.sched.delete_zombie_actions() checks = self.sched.get_to_run_checks(True, False, worker_name='tester') actions = self.sched.get_to_run_checks(False, True, worker_name='tester') #print "------------ worker loop checks ----------------" #print checks #print "------------ worker loop actions ----------------" self.show_actions() #print "------------ worker loop new ----------------" for a in actions: a.status = 'inpoller' a.check_time = time.time() a.exit_status = 0 self.sched.put_results(a) self.show_actions() #print "------------ worker loop end ----------------" def show_logs(self): print "--- logs <<<----------------------------------" for brok in sorted(self.sched.broks.values(), lambda x, y: x.id - y.id): if brok.type == 'log': print "LOG:", brok.data['log'] print "--- logs >>>----------------------------------" def show_actions(self): print "--- actions <<<----------------------------------" for a in sorted(self.sched.actions.values(), lambda x, y: x.id - y.id): if a.is_a == 'notification': if a.ref.my_type == "host": ref = "host: %s" % a.ref.get_name() else: ref = "host: %s svc: %s" % (a.ref.host.get_name(), a.ref.get_name()) print "NOTIFICATION %d %s %s %s %s" % (a.id, ref, a.type, time.asctime(time.localtime(a.t_to_go)), a.status) elif a.is_a == 'eventhandler': print "EVENTHANDLER:", a print "--- actions >>>----------------------------------" def show_and_clear_logs(self): self.show_logs() self.clear_logs() def show_and_clear_actions(self): self.show_actions() self.clear_actions() def count_logs(self): return len([b for b in self.sched.broks.values() if b.type == 'log']) def count_actions(self): return len(self.sched.actions.values()) def clear_logs(self): id_to_del = [] for b in self.sched.broks.values(): if b.type == 'log': id_to_del.append(b.id) for id in id_to_del: del self.sched.broks[id] def clear_actions(self): self.sched.actions = {} def log_match(self, index, pattern): # log messages are counted 1...n, so index=1 for the first message if index > self.count_logs(): return False else: regex = re.compile(pattern) lognum = 1 for brok in sorted(self.sched.broks.values(), lambda x, y: x.id - y.id): if brok.type == 'log': if index == lognum: if re.search(regex, brok.data['log']): return True lognum += 1 return False def any_log_match(self, pattern): regex = re.compile(pattern) for brok in sorted(self.sched.broks.values(), lambda x, y: x.id - y.id): if brok.type == 'log': if re.search(regex, brok.data['log']): return True return False def get_log_match(self, pattern): regex = re.compile(pattern) res = [] for brok in sorted(self.sched.broks.values(), lambda x, y: x.id - y.id): if brok.type == 'log': if re.search(regex, brok.data['log']): res.append(brok.data['log']) return res def print_header(self): print "#" * 80 + "\n" + "#" + " " * 78 + "#" print "#" + string.center(self.id(), 78) + "#" print "#" + " " * 78 + "#\n" + "#" * 80 + "\n" def xtest_conf_is_correct(self): self.print_header() self.assert_(self.conf.conf_is_correct)
def setup_with_file(self, path): time_hacker.set_my_time() self.print_header() # i am arbiter-like self.broks = {} self.me = None self.log = logger self.log.load_obj(self) self.config_files = [path] self.conf = Config() buf = self.conf.read_config(self.config_files) raw_objects = self.conf.read_config_buf(buf) self.conf.create_objects_for_type(raw_objects, 'arbiter') self.conf.create_objects_for_type(raw_objects, 'module') self.conf.early_arbiter_linking() # If we got one arbiter defined here (before default) we should be in a case where # the tester want to load/test a module, so we simulate an arbiter daemon # and the modules loading phase. As it has its own modulesmanager, should # not impact scheduler modules ones, especially we are asking for arbiter type :) if len(self.conf.arbiters) == 1: arbdaemon = Arbiter([''],[''], False, False, None, None) # only load if the module_dir is reallyexisting, so was set explicitly # in the test configuration if os.path.exists(getattr(self.conf, 'modules_dir', '')): arbdaemon.modules_dir = self.conf.modules_dir arbdaemon.load_modules_manager() # we request the instances without them being *started* # (for those that are concerned ("external" modules): # we will *start* these instances after we have been daemonized (if requested) me = None for arb in self.conf.arbiters: me = arb arbdaemon.modules_manager.set_modules(arb.modules) arbdaemon.do_load_modules() arbdaemon.load_modules_configuration_objects(raw_objects) self.conf.create_objects(raw_objects) self.conf.instance_id = 0 self.conf.instance_name = 'test' # Hack push_flavor, that is set by the dispatcher self.conf.push_flavor = 0 self.conf.load_triggers() #import pdb;pdb.set_trace() self.conf.linkify_templates() #import pdb;pdb.set_trace() self.conf.apply_inheritance() #import pdb;pdb.set_trace() self.conf.explode() #print "Aconf.services has %d elements" % len(self.conf.services) self.conf.apply_implicit_inheritance() self.conf.fill_default() self.conf.remove_templates() self.conf.compute_hash() #print "conf.services has %d elements" % len(self.conf.services) self.conf.override_properties() self.conf.linkify() self.conf.apply_dependencies() self.conf.set_initial_state() self.conf.explode_global_conf() self.conf.propagate_timezone_option() self.conf.create_business_rules() self.conf.create_business_rules_dependencies() self.conf.is_correct() if not self.conf.conf_is_correct: print "The conf is not correct, I stop here" self.conf.dump() return self.conf.clean() self.confs = self.conf.cut_into_parts() self.conf.prepare_for_sending() self.conf.show_errors() self.dispatcher = Dispatcher(self.conf, self.me) scheddaemon = Shinken(None, False, False, False, None, None) self.scheddaemon = scheddaemon self.sched = scheddaemon.sched scheddaemon.modules_dir = modules_dir scheddaemon.load_modules_manager() # Remember to clean the logs we just created before launching tests self.clear_logs() m = MacroResolver() m.init(self.conf) self.sched.load_conf(self.conf, in_test=True) e = ExternalCommandManager(self.conf, 'applyer') self.sched.external_command = e e.load_scheduler(self.sched) e2 = ExternalCommandManager(self.conf, 'dispatcher') e2.load_arbiter(self) self.external_command_dispatcher = e2 self.sched.conf.accept_passive_unknown_check_results = False self.sched.schedule()
class ShinkenTest(unittest.TestCase): def setUp(self): self.setup_with_file('etc/nagios_1r_1h_1s.cfg') def setup_with_file(self, path): # i am arbiter-like self.broks = {} self.me = None self.log = logger self.log.load_obj(self) self.config_files = [path] self.conf = Config() self.conf.read_config(self.config_files) buf = self.conf.read_config(self.config_files) raw_objects = self.conf.read_config_buf(buf) self.conf.create_objects_for_type(raw_objects, 'arbiter') self.conf.create_objects_for_type(raw_objects, 'module') self.conf.early_arbiter_linking() self.conf.create_objects(raw_objects) self.conf.old_properties_names_to_new() self.conf.instance_id = 0 self.conf.instance_name = 'test' self.conf.linkify_templates() self.conf.apply_inheritance() self.conf.explode() self.conf.create_reversed_list() self.conf.remove_twins() self.conf.apply_implicit_inheritance() self.conf.fill_default() self.conf.remove_templates() self.conf.create_reversed_list() self.conf.pythonize() self.conf.linkify() self.conf.apply_dependencies() self.conf.explode_global_conf() self.conf.propagate_timezone_option() self.conf.create_business_rules() self.conf.create_business_rules_dependencies() self.conf.is_correct() self.confs = self.conf.cut_into_parts() self.conf.show_errors() self.dispatcher = Dispatcher(self.conf, self.me) scheddaemon = Shinken(None, False, False, False, None) self.sched = Scheduler(scheddaemon) scheddaemon.sched = self.sched m = MacroResolver() m.init(self.conf) self.sched.load_conf(self.conf) e = ExternalCommandManager(self.conf, 'applyer') self.sched.external_command = e e.load_scheduler(self.sched) e2 = ExternalCommandManager(self.conf, 'dispatcher') e2.load_arbiter(self) self.external_command_dispatcher = e2 self.sched.schedule() def add(self, b): if isinstance(b, Brok): self.broks[b.id] = b return if isinstance(b, ExternalCommand): self.sched.run_external_command(b.cmd_line) def fake_check(self, ref, exit_status, output="OK"): #print "fake", ref now = time.time() ref.schedule(force=True) #now checks are schedule and we get them in #the action queue check = ref.actions.pop() self.sched.add(check) # check is now in sched.checks[] # fake execution check.check_time = now # and lie about when we will launch it because # if not, the schedule call for ref # will not really reschedule it because there # is a valid value in the future ref.next_chk = now - 0.5 check.get_outputs(output, 9000) check.exit_status = exit_status check.execution_time = 0.001 check.status = 'waitconsume' self.sched.waiting_results.append(check) def scheduler_loop(self, count, reflist, do_sleep=False, sleep_time=61): for ref in reflist: (obj, exit_status, output) = ref obj.checks_in_progress = [] for loop in range(1, count + 1): print "processing check", loop for ref in reflist: (obj, exit_status, output) = ref obj.update_in_checking() self.fake_check(obj, exit_status, output) self.sched.manage_internal_checks() self.sched.consume_results() self.sched.get_new_actions() self.sched.get_new_broks() self.worker_loop() for ref in reflist: (obj, exit_status, output) = ref obj.checks_in_progress = [] self.sched.update_downtimes_and_comments() #time.sleep(ref.retry_interval * 60 + 1) if do_sleep: time.sleep(sleep_time) def worker_loop(self): self.sched.delete_zombie_checks() self.sched.delete_zombie_actions() checks = self.sched.get_to_run_checks(True, False, worker_name='tester') actions = self.sched.get_to_run_checks(False, True, worker_name='tester') #print "------------ worker loop checks ----------------" #print checks #print "------------ worker loop actions ----------------" self.show_actions() #print "------------ worker loop new ----------------" for a in actions: a.status = 'inpoller' a.check_time = time.time() a.exit_status = 0 self.sched.put_results(a) self.show_actions() #print "------------ worker loop end ----------------" def show_logs(self): print "--- logs <<<----------------------------------" for brok in sorted(self.sched.broks.values(), lambda x, y: x.id - y.id): if brok.type == 'log': print "LOG:", brok.data['log'] print "--- logs >>>----------------------------------" def show_actions(self): print "--- actions <<<----------------------------------" for a in sorted(self.sched.actions.values(), lambda x, y: x.id - y.id): if a.is_a == 'notification': if a.ref.my_type == "host": ref = "host: %s" % a.ref.get_name() else: ref = "host: %s svc: %s" % (a.ref.host.get_name(), a.ref.get_name()) print "NOTIFICATION %d %s %s %s %s" % (a.id, ref, a.type, time.asctime(time.localtime(a.t_to_go)), a.status) elif a.is_a == 'eventhandler': print "EVENTHANDLER:", a print "--- actions >>>----------------------------------" def show_and_clear_logs(self): self.show_logs() self.clear_logs() def show_and_clear_actions(self): self.show_actions() self.clear_actions() def count_logs(self): return len([b for b in self.sched.broks.values() if b.type == 'log']) def count_actions(self): return len(self.sched.actions.values()) def clear_logs(self): id_to_del = [] for b in self.sched.broks.values(): if b.type == 'log': id_to_del.append(b.id) for id in id_to_del: del self.sched.broks[id] def clear_actions(self): self.sched.actions = {} def log_match(self, index, pattern): # log messages are counted 1...n, so index=1 for the first message if index > self.count_logs(): return False else: regex = re.compile(pattern) lognum = 1 for brok in sorted(self.sched.broks.values(), lambda x, y: x.id - y.id): if brok.type == 'log': if index == lognum: if re.search(regex, brok.data['log']): return True lognum += 1 return False def any_log_match(self, pattern): regex = re.compile(pattern) for brok in sorted(self.sched.broks.values(), lambda x, y: x.id - y.id): if brok.type == 'log': if re.search(regex, brok.data['log']): return True return False def get_log_match(self, pattern): regex = re.compile(pattern) res = [] for brok in sorted(self.sched.broks.values(), lambda x, y: x.id - y.id): if brok.type == 'log': if re.search(regex, brok.data['log']): res.append(brok.data['log']) return res def print_header(self): print "#" * 80 + "\n" + "#" + " " * 78 + "#" print "#" + string.center(self.id(), 78) + "#" print "#" + " " * 78 + "#\n" + "#" * 80 + "\n" def xtest_conf_is_correct(self): self.print_header() self.assert_(self.conf.conf_is_correct) def init_livestatus(self): self.livelogs = 'tmp/livelogs.db' + self.testid self.db_archives = os.path.join(os.path.dirname(self.livelogs), 'archives') self.pnp4nagios = 'tmp/pnp4nagios_test' + self.testid self.livestatus_broker = Livestatus_broker(livestatus_modconf, '127.0.0.1', str(50000 + os.getpid()), 'live', [], self.livelogs, self.db_archives, 365, self.pnp4nagios, True) self.livestatus_broker.create_queues() #self.livestatus_broker.properties = { # 'to_queue' : 0, # 'from_queue' : 0 # # } self.livestatus_broker.init() self.livestatus_broker.db = LiveStatusDb(self.livestatus_broker.database_file, self.livestatus_broker.archive_path, self.livestatus_broker.max_logs_age) self.livestatus_broker.livestatus = LiveStatus(self.livestatus_broker.configs, self.livestatus_broker.hosts, self.livestatus_broker.services, self.livestatus_broker.contacts, self.livestatus_broker.hostgroups, self.livestatus_broker.servicegroups, self.livestatus_broker.contactgroups, self.livestatus_broker.timeperiods, self.livestatus_broker.commands, self.livestatus_broker.schedulers, self.livestatus_broker.pollers, self.livestatus_broker.reactionners, self.livestatus_broker.brokers, self.livestatus_broker.db, self.livestatus_broker.use_aggressive_sql, self.livestatus_broker.pnp_path, self.livestatus_broker.from_q)
class TestEndParsingType(unittest.TestCase): def map_type(self, obj): # TODO: Replace all str with unicode when done in property.default attribute # TODO: Fix ToGuessProp as it may be a list. if isinstance(obj, ListProp): return list if isinstance(obj, StringProp): return str if isinstance(obj, UnusedProp): return str if isinstance(obj, BoolProp): return bool if isinstance(obj, IntegerProp): return int if isinstance(obj, FloatProp): return float if isinstance(obj, CharProp): return str if isinstance(obj, DictProp): return dict if isinstance(obj, AddrProp): return str if isinstance(obj, ToGuessProp): return str def print_header(self): print "\n" + "#" * 80 + "\n" + "#" + " " * 78 + "#" print "#" + string.center(self.id(), 78) + "#" print "#" + " " * 78 + "#\n" + "#" * 80 + "\n" def add(self, b): if isinstance(b, Brok): self.broks[b.id] = b return if isinstance(b, ExternalCommand): self.sched.run_external_command(b.cmd_line) def test_types(self): path = 'etc/shinken_1r_1h_1s.cfg' time_hacker.set_my_time() self.print_header() # i am arbiter-like self.broks = {} self.me = None self.log = logger self.log.setLevel("INFO") self.log.load_obj(self) self.config_files = [path] self.conf = Config() buf = self.conf.read_config(self.config_files) raw_objects = self.conf.read_config_buf(buf) self.conf.create_objects_for_type(raw_objects, 'arbiter') self.conf.create_objects_for_type(raw_objects, 'module') self.conf.early_arbiter_linking() self.conf.create_objects(raw_objects) self.conf.instance_id = 0 self.conf.instance_name = 'test' # Hack push_flavor, that is set by the dispatcher self.conf.push_flavor = 0 self.conf.load_triggers() self.conf.linkify_templates() self.conf.apply_inheritance() self.conf.explode() self.conf.apply_implicit_inheritance() self.conf.fill_default() self.conf.remove_templates() self.conf.compute_hash() self.conf.override_properties() self.conf.linkify() self.conf.apply_dependencies() self.conf.explode_global_conf() self.conf.propagate_timezone_option() self.conf.create_business_rules() self.conf.create_business_rules_dependencies() self.conf.is_correct() # Cannot do it for all obj for now. We have to ensure unicode everywhere fist for objs in [self.conf.arbiters]: for obj in objs: #print "=== obj : %s ===" % obj.__class__ for prop in obj.properties: if hasattr(obj, prop): value = getattr(obj, prop) # We should get ride of None, maybe use the "neutral" value for type if value is not None: #print("TESTING %s with value %s" % (prop, value)) self.assertIsInstance(value, self.map_type(obj.properties[prop])) else: print("Skipping %s " % prop) #print "===" # Manual check of several attr for self.conf.contacts # because contacts contains unicode attr for contact in self.conf.contacts: for prop in ["notificationways", "host_notification_commands", "service_notification_commands"]: if hasattr(contact, prop): value = getattr(contact, prop) # We should get ride of None, maybe use the "neutral" value for type if value is not None: print("TESTING %s with value %s" % (prop, value)) self.assertIsInstance(value, self.map_type(contact.properties[prop])) else: print("Skipping %s " % prop) # Same here for notifway in self.conf.notificationways: for prop in ["host_notification_commands", "service_notification_commands"]: if hasattr(notifway, prop): value = getattr(notifway, prop) # We should get ride of None, maybe use the "neutral" value for type if value is not None: print("TESTING %s with value %s" % (prop, value)) self.assertIsInstance(value, self.map_type(notifway.properties[prop])) else: print("Skipping %s " % prop)
class TestEndParsingType(unittest.TestCase): def map_type(self, obj): # TODO: Replace all str with unicode when done in property.default attribute # TODO: Fix ToGuessProp as it may be a list. if isinstance(obj, ListProp): return list if isinstance(obj, StringProp): return str if isinstance(obj, UnusedProp): return str if isinstance(obj, BoolProp): return bool if isinstance(obj, IntegerProp): return int if isinstance(obj, FloatProp): return float if isinstance(obj, CharProp): return str if isinstance(obj, DictProp): return dict if isinstance(obj, AddrProp): return str if isinstance(obj, ToGuessProp): return str def print_header(self): print "\n" + "#" * 80 + "\n" + "#" + " " * 78 + "#" print "#" + string.center(self.id(), 78) + "#" print "#" + " " * 78 + "#\n" + "#" * 80 + "\n" def add(self, b): if isinstance(b, Brok): self.broks[b.id] = b return if isinstance(b, ExternalCommand): self.sched.run_external_command(b.cmd_line) def test_types(self): path = 'etc/shinken_1r_1h_1s.cfg' time_hacker.set_my_time() self.print_header() # i am arbiter-like self.broks = {} self.me = None self.log = logger self.log.setLevel("INFO") self.log.load_obj(self) self.config_files = [path] self.conf = Config() buf = self.conf.read_config(self.config_files) raw_objects = self.conf.read_config_buf(buf) self.conf.create_objects_for_type(raw_objects, 'arbiter') self.conf.create_objects_for_type(raw_objects, 'module') self.conf.early_arbiter_linking() self.conf.create_objects(raw_objects) self.conf.instance_id = 0 self.conf.instance_name = 'test' # Hack push_flavor, that is set by the dispatcher self.conf.push_flavor = 0 self.conf.load_triggers() self.conf.linkify_templates() self.conf.apply_inheritance() self.conf.explode() self.conf.apply_implicit_inheritance() self.conf.fill_default() self.conf.remove_templates() self.conf.compute_hash() self.conf.override_properties() self.conf.linkify() self.conf.apply_dependencies() self.conf.explode_global_conf() self.conf.propagate_timezone_option() self.conf.create_business_rules() self.conf.create_business_rules_dependencies() self.conf.is_correct() # Cannot do it for all obj for now. We have to ensure unicode everywhere fist for objs in [self.conf.arbiters]: for obj in objs: #print "=== obj : %s ===" % obj.__class__ for prop in obj.properties: if hasattr(obj, prop): value = getattr(obj, prop) # We should get ride of None, maybe use the "neutral" value for type if value is not None: #print("TESTING %s with value %s" % (prop, value)) self.assertIsInstance( value, self.map_type(obj.properties[prop])) else: print("Skipping %s " % prop) #print "===" # Manual check of several attr for self.conf.contacts # because contacts contains unicode attr for contact in self.conf.contacts: for prop in [ "notificationways", "host_notification_commands", "service_notification_commands" ]: if hasattr(contact, prop): value = getattr(contact, prop) # We should get ride of None, maybe use the "neutral" value for type if value is not None: print("TESTING %s with value %s" % (prop, value)) self.assertIsInstance( value, self.map_type(contact.properties[prop])) else: print("Skipping %s " % prop) # Same here for notifway in self.conf.notificationways: for prop in [ "host_notification_commands", "service_notification_commands" ]: if hasattr(notifway, prop): value = getattr(notifway, prop) # We should get ride of None, maybe use the "neutral" value for type if value is not None: print("TESTING %s with value %s" % (prop, value)) self.assertIsInstance( value, self.map_type(notifway.properties[prop])) else: print("Skipping %s " % prop)
class ShinkenTest(unittest.TestCase): def setUp(self): self.setup_with_file('etc/nagios_1r_1h_1s.cfg') def setup_with_file(self, path): # i am arbiter-like self.broks = {} self.me = None self.log = logger self.log.load_obj(self) self.config_files = [path] self.conf = Config() buf = self.conf.read_config(self.config_files) raw_objects = self.conf.read_config_buf(buf) self.conf.create_objects_for_type(raw_objects, 'arbiter') self.conf.create_objects_for_type(raw_objects, 'module') self.conf.early_arbiter_linking() self.conf.create_objects(raw_objects) self.conf.old_properties_names_to_new() self.conf.instance_id = 0 self.conf.instance_name = 'test' # Hack push_flavor, that is set by the dispatcher self.conf.push_flavor = 0 self.conf.load_triggers() self.conf.linkify_templates() self.conf.apply_inheritance() self.conf.explode() #print "Aconf.services has %d elements" % len(self.conf.services) self.conf.create_reversed_list() self.conf.remove_twins() self.conf.apply_implicit_inheritance() self.conf.fill_default() self.conf.remove_templates() self.conf.compute_hash() #print "conf.services has %d elements" % len(self.conf.services) self.conf.create_reversed_list() self.conf.pythonize() self.conf.linkify() self.conf.apply_dependencies() self.conf.explode_global_conf() self.conf.propagate_timezone_option() self.conf.create_business_rules() self.conf.create_business_rules_dependencies() self.conf.is_correct() if not self.conf.conf_is_correct: print "The conf is not correct, I stop here" return self.confs = self.conf.cut_into_parts() self.conf.prepare_for_sending() self.conf.show_errors() self.dispatcher = Dispatcher(self.conf, self.me) scheddaemon = Shinken(None, False, False, False, None) self.sched = Scheduler(scheddaemon) scheddaemon.sched = self.sched m = MacroResolver() m.init(self.conf) self.sched.load_conf(self.conf, in_test=True) e = ExternalCommandManager(self.conf, 'applyer') self.sched.external_command = e e.load_scheduler(self.sched) e2 = ExternalCommandManager(self.conf, 'dispatcher') e2.load_arbiter(self) self.external_command_dispatcher = e2 self.sched.schedule() def add(self, b): if isinstance(b, Brok): self.broks[b.id] = b return if isinstance(b, ExternalCommand): self.sched.run_external_command(b.cmd_line) def fake_check(self, ref, exit_status, output="OK"): #print "fake", ref now = time.time() ref.schedule(force=True) # now checks are schedule and we get them in # the action queue check = ref.actions.pop() self.sched.add(check) # check is now in sched.checks[] # fake execution check.check_time = now # and lie about when we will launch it because # if not, the schedule call for ref # will not really reschedule it because there # is a valid value in the future ref.next_chk = now - 0.5 check.get_outputs(output, 9000) check.exit_status = exit_status check.execution_time = 0.001 check.status = 'waitconsume' self.sched.waiting_results.append(check) def scheduler_loop(self, count, reflist, do_sleep=False, sleep_time=61): for ref in reflist: (obj, exit_status, output) = ref obj.checks_in_progress = [] for loop in range(1, count + 1): print "processing check", loop for ref in reflist: (obj, exit_status, output) = ref obj.update_in_checking() self.fake_check(obj, exit_status, output) self.sched.manage_internal_checks() self.sched.consume_results() self.sched.get_new_actions() self.sched.get_new_broks() self.worker_loop() for ref in reflist: (obj, exit_status, output) = ref obj.checks_in_progress = [] self.sched.update_downtimes_and_comments() #time.sleep(ref.retry_interval * 60 + 1) if do_sleep: time.sleep(sleep_time) def worker_loop(self): self.sched.delete_zombie_checks() self.sched.delete_zombie_actions() checks = self.sched.get_to_run_checks(True, False, worker_name='tester') actions = self.sched.get_to_run_checks(False, True, worker_name='tester') #print "------------ worker loop checks ----------------" #print checks #print "------------ worker loop actions ----------------" self.show_actions() #print "------------ worker loop new ----------------" for a in actions: a.status = 'inpoller' a.check_time = time.time() a.exit_status = 0 self.sched.put_results(a) self.show_actions() #print "------------ worker loop end ----------------" def show_logs(self): print "--- logs <<<----------------------------------" for brok in sorted(self.sched.broks.values(), lambda x, y: x.id - y.id): if brok.type == 'log': brok.prepare() print "LOG:", brok.data['log'] print "--- logs >>>----------------------------------" def show_actions(self): print "--- actions <<<----------------------------------" for a in sorted(self.sched.actions.values(), lambda x, y: x.id - y.id): if a.is_a == 'notification': if a.ref.my_type == "host": ref = "host: %s" % a.ref.get_name() else: ref = "host: %s svc: %s" % (a.ref.host.get_name(), a.ref.get_name()) print "NOTIFICATION %d %s %s %s %s" % (a.id, ref, a.type, time.asctime(time.localtime(a.t_to_go)), a.status) elif a.is_a == 'eventhandler': print "EVENTHANDLER:", a print "--- actions >>>----------------------------------" def show_and_clear_logs(self): self.show_logs() self.clear_logs() def show_and_clear_actions(self): self.show_actions() self.clear_actions() def count_logs(self): return len([b for b in self.sched.broks.values() if b.type == 'log']) def count_actions(self): return len(self.sched.actions.values()) def clear_logs(self): id_to_del = [] for b in self.sched.broks.values(): if b.type == 'log': id_to_del.append(b.id) for id in id_to_del: del self.sched.broks[id] def clear_actions(self): self.sched.actions = {} def log_match(self, index, pattern): # log messages are counted 1...n, so index=1 for the first message if index > self.count_logs(): return False else: regex = re.compile(pattern) lognum = 1 for brok in sorted(self.sched.broks.values(), lambda x, y: x.id - y.id): if brok.type == 'log': brok.prepare() if index == lognum: if re.search(regex, brok.data['log']): return True lognum += 1 return False def any_log_match(self, pattern): regex = re.compile(pattern) for brok in sorted(self.sched.broks.values(), lambda x, y: x.id - y.id): if brok.type == 'log': brok.prepare() if re.search(regex, brok.data['log']): return True return False def get_log_match(self, pattern): regex = re.compile(pattern) res = [] for brok in sorted(self.sched.broks.values(), lambda x, y: x.id - y.id): if brok.type == 'log': if re.search(regex, brok.data['log']): res.append(brok.data['log']) return res def print_header(self): print "#" * 80 + "\n" + "#" + " " * 78 + "#" print "#" + string.center(self.id(), 78) + "#" print "#" + " " * 78 + "#\n" + "#" * 80 + "\n" def xtest_conf_is_correct(self): self.print_header() self.assert_(self.conf.conf_is_correct) def find_modules_path(self): """ Find the absolute path of the shinken module directory and returns it. """ import shinken # BEWARE: this way of finding path is good if we still # DO NOT HAVE CHANGE PWD!!! # Now get the module path. It's in fact the directory modules # inside the shinken directory. So let's find it. print "modulemanager file", shinken.modulesmanager.__file__ modulespath = os.path.abspath(shinken.modulesmanager.__file__) print "modulemanager absolute file", modulespath # We got one of the files of parent_path = os.path.dirname(os.path.dirname(modulespath)) modulespath = os.path.join(parent_path, 'shinken', 'modules') print("Using modules path: %s" % (modulespath)) return modulespath def do_load_modules(self): self.modules_manager.load_and_init() self.log.log("I correctly loaded the modules: [%s]" % (','.join([inst.get_name() for inst in self.modules_manager.instances]))) def init_livestatus(self, modconf=None): self.livelogs = 'tmp/livelogs.db' + self.testid if modconf is None: modconf = Module({'module_name': 'LiveStatus', 'module_type': 'livestatus', 'port': str(50000 + os.getpid()), 'pnp_path': 'tmp/pnp4nagios_test' + self.testid, 'host': '127.0.0.1', 'socket': 'live', 'name': 'test', #? }) dbmodconf = Module({'module_name': 'LogStore', 'module_type': 'logstore_sqlite', 'use_aggressive_sql': "0", 'database_file': self.livelogs, 'archive_path': os.path.join(os.path.dirname(self.livelogs), 'archives'), }) modconf.modules = [dbmodconf] self.livestatus_broker = LiveStatus_broker(modconf) self.livestatus_broker.create_queues() #--- livestatus_broker.main self.livestatus_broker.log = logger # this seems to damage the logger so that the scheduler can't use it #self.livestatus_broker.log.load_obj(self.livestatus_broker) self.livestatus_broker.debug_output = [] self.livestatus_broker.modules_manager = ModulesManager('livestatus', self.livestatus_broker.find_modules_path(), []) self.livestatus_broker.modules_manager.set_modules(self.livestatus_broker.modules) # We can now output some previouly silented debug ouput self.livestatus_broker.do_load_modules() for inst in self.livestatus_broker.modules_manager.instances: if inst.properties["type"].startswith('logstore'): f = getattr(inst, 'load', None) if f and callable(f): f(self.livestatus_broker) # !!! NOT self here !!!! break for s in self.livestatus_broker.debug_output: print "errors during load", s del self.livestatus_broker.debug_output self.livestatus_broker.rg = LiveStatusRegenerator() self.livestatus_broker.datamgr = datamgr datamgr.load(self.livestatus_broker.rg) self.livestatus_broker.query_cache = LiveStatusQueryCache() self.livestatus_broker.query_cache.disable() self.livestatus_broker.rg.register_cache(self.livestatus_broker.query_cache) #--- livestatus_broker.main self.livestatus_broker.init() self.livestatus_broker.db = self.livestatus_broker.modules_manager.instances[0] self.livestatus_broker.livestatus = LiveStatus(self.livestatus_broker.datamgr, self.livestatus_broker.query_cache, self.livestatus_broker.db, self.livestatus_broker.pnp_path, self.livestatus_broker.from_q) #--- livestatus_broker.do_main self.livestatus_broker.db.open()
class ShinkenTest(unittest.TestCase): def setUp(self): self.setup_with_file('etc/nagios_1r_1h_1s.cfg') def setup_with_file(self, path): # i am arbiter-like self.broks = {} self.me = None self.log = logger self.log.load_obj(self) self.config_files = [path] self.conf = Config() self.conf.read_config(self.config_files) buf = self.conf.read_config(self.config_files) raw_objects = self.conf.read_config_buf(buf) self.conf.create_objects_for_type(raw_objects, 'arbiter') self.conf.create_objects_for_type(raw_objects, 'module') self.conf.early_arbiter_linking() self.conf.create_objects(raw_objects) self.conf.instance_id = 0 self.conf.instance_name = 'test' self.conf.linkify_templates() self.conf.apply_inheritance() self.conf.explode() self.conf.create_reversed_list() self.conf.remove_twins() self.conf.apply_implicit_inheritance() self.conf.fill_default() self.conf.clean_useless() self.conf.pythonize() self.conf.linkify() self.conf.apply_dependancies() self.conf.explode_global_conf() self.conf.propagate_timezone_option() self.conf.create_business_rules() self.conf.create_business_rules_dependencies() self.conf.is_correct() self.confs = self.conf.cut_into_parts() self.dispatcher = Dispatcher(self.conf, self.me) scheddaemon = Shinken(None, False, False, False, None) self.sched = Scheduler(scheddaemon) scheddaemon.sched = self.sched m = MacroResolver() m.init(self.conf) self.sched.load_conf(self.conf) e = ExternalCommandManager(self.conf, 'applyer') self.sched.external_command = e e.load_scheduler(self.sched) e2 = ExternalCommandManager(self.conf, 'dispatcher') e2.load_arbiter(self) self.external_command_dispatcher = e2 self.sched.schedule() def add(self, b): if isinstance(b, Brok): self.broks[b.id] = b return if isinstance(b, ExternalCommand): self.sched.run_external_command(b.cmd_line) def fake_check(self, ref, exit_status, output="OK"): #print "fake", ref now = time.time() ref.schedule(force=True) #now checks are schedule and we get them in #the action queue check = ref.actions.pop() self.sched.add(check) # check is now in sched.checks[] # fake execution check.check_time = now elts_line1 = output.split('|') #First line before | is output check.output = elts_line1[0] #After | is perfdata if len(elts_line1) > 1: check.perf_data = elts_line1[1] else: check.perf_data = '' check.exit_status = exit_status check.execution_time = 0.001 check.status = 'waitconsume' self.sched.waiting_results.append(check) def scheduler_loop(self, count, reflist, do_sleep=False, sleep_time=61): for ref in reflist: (obj, exit_status, output) = ref obj.checks_in_progress = [] for loop in range(1, count + 1): print "processing check", loop for ref in reflist: (obj, exit_status, output) = ref obj.update_in_checking() self.fake_check(obj, exit_status, output) self.sched.manage_internal_checks() self.sched.consume_results() self.sched.get_new_actions() self.sched.get_new_broks() self.worker_loop() for ref in reflist: (obj, exit_status, output) = ref obj.checks_in_progress = [] self.sched.update_downtimes_and_comments() #time.sleep(ref.retry_interval * 60 + 1) if do_sleep: time.sleep(sleep_time) def worker_loop(self): self.sched.delete_zombie_checks() self.sched.delete_zombie_actions() checks = self.sched.get_to_run_checks(True, False, worker_name='tester') actions = self.sched.get_to_run_checks(False, True, worker_name='tester') #print "------------ worker loop checks ----------------" #print checks #print "------------ worker loop actions ----------------" self.show_actions() #print "------------ worker loop new ----------------" for a in actions: a.status = 'inpoller' a.check_time = time.time() a.exit_status = 0 self.sched.put_results(a) self.show_actions() #print "------------ worker loop end ----------------" def show_logs(self): print "--- logs <<<----------------------------------" for brok in sorted(self.sched.broks.values(), lambda x, y: x.id - y.id): if brok.type == 'log': print "LOG:", brok.data['log'] print "--- logs >>>----------------------------------" def show_actions(self): print "--- actions <<<----------------------------------" for a in sorted(self.sched.actions.values(), lambda x, y: x.id - y.id): if a.is_a == 'notification': if a.ref.my_type == "host": ref = "host: %s" % a.ref.get_name() else: ref = "host: %s svc: %s" % (a.ref.host.get_name(), a.ref.get_name()) print "NOTIFICATION %d %s %s %s %s" % ( a.id, ref, a.type, time.asctime(time.localtime( a.t_to_go)), a.status) elif a.is_a == 'eventhandler': print "EVENTHANDLER:", a print "--- actions >>>----------------------------------" def show_and_clear_logs(self): self.show_logs() self.clear_logs() def show_and_clear_actions(self): self.show_actions() self.clear_actions() def count_logs(self): return len([b for b in self.sched.broks.values() if b.type == 'log']) def count_actions(self): return len(self.sched.actions.values()) def clear_logs(self): id_to_del = [] for b in self.sched.broks.values(): if b.type == 'log': id_to_del.append(b.id) for id in id_to_del: del self.sched.broks[id] def clear_actions(self): self.sched.actions = {} def log_match(self, index, pattern): # log messages are counted 1...n, so index=1 for the first message if index > self.count_logs(): return False else: regex = re.compile(pattern) lognum = 1 for brok in sorted(self.sched.broks.values(), lambda x, y: x.id - y.id): if brok.type == 'log': if index == lognum: if re.search(regex, brok.data['log']): return True lognum += 1 return False def any_log_match(self, pattern): regex = re.compile(pattern) for brok in sorted(self.sched.broks.values(), lambda x, y: x.id - y.id): if brok.type == 'log': if re.search(regex, brok.data['log']): return True return False def get_log_match(self, pattern): regex = re.compile(pattern) res = [] for brok in sorted(self.sched.broks.values(), lambda x, y: x.id - y.id): if brok.type == 'log': if re.search(regex, brok.data['log']): res.append(brok.data['log']) return res def print_header(self): print "#" * 80 + "\n" + "#" + " " * 78 + "#" print "#" + string.center(self.id(), 78) + "#" print "#" + " " * 78 + "#\n" + "#" * 80 + "\n" def xtest_conf_is_correct(self): self.print_header() self.assert_(self.conf.conf_is_correct)
class ShinkenTest(unittest.TestCase, _Unittest2CompatMixIn): def setUp(self): self.setup_with_file('etc/shinken_1r_1h_1s.cfg') def setup_with_file(self, path): self.print_header() # i am arbiter-like self.broks = {} self.me = None self.log = logger self.log.load_obj(self) self.config_files = [path] self.conf = Config() buf = self.conf.read_config(self.config_files) raw_objects = self.conf.read_config_buf(buf) self.conf.create_objects_for_type(raw_objects, 'arbiter') self.conf.create_objects_for_type(raw_objects, 'module') self.conf.early_arbiter_linking() self.conf.create_objects(raw_objects) self.conf.old_properties_names_to_new() self.conf.instance_id = 0 self.conf.instance_name = 'test' # Hack push_flavor, that is set by the dispatcher self.conf.push_flavor = 0 self.conf.load_triggers() self.conf.linkify_templates() self.conf.apply_inheritance() self.conf.explode() #print "Aconf.services has %d elements" % len(self.conf.services) self.conf.create_reversed_list() self.conf.remove_twins() self.conf.apply_implicit_inheritance() self.conf.fill_default() self.conf.remove_templates() self.conf.compute_hash() #print "conf.services has %d elements" % len(self.conf.services) self.conf.create_reversed_list() self.conf.override_properties() self.conf.pythonize() count = self.conf.remove_exclusions() if count > 0: self.conf.create_reversed_list() self.conf.linkify() self.conf.apply_dependencies() self.conf.explode_global_conf() self.conf.propagate_timezone_option() self.conf.create_business_rules() self.conf.create_business_rules_dependencies() self.conf.is_correct() if not self.conf.conf_is_correct: print "The conf is not correct, I stop here" return self.conf.clean() self.confs = self.conf.cut_into_parts() self.conf.prepare_for_sending() self.conf.show_errors() self.dispatcher = Dispatcher(self.conf, self.me) scheddaemon = Shinken(None, False, False, False, None, None) self.sched = Scheduler(scheddaemon) scheddaemon.sched = self.sched scheddaemon.modules_dir = modules_dir scheddaemon.load_modules_manager() # Remember to clean the logs we just created before launching tests self.clear_logs() m = MacroResolver() m.init(self.conf) self.sched.load_conf(self.conf, in_test=True) e = ExternalCommandManager(self.conf, 'applyer') self.sched.external_command = e e.load_scheduler(self.sched) e2 = ExternalCommandManager(self.conf, 'dispatcher') e2.load_arbiter(self) self.external_command_dispatcher = e2 self.sched.schedule() def add(self, b): if isinstance(b, Brok): self.broks[b.id] = b return if isinstance(b, ExternalCommand): self.sched.run_external_command(b.cmd_line) def fake_check(self, ref, exit_status, output="OK"): #print "fake", ref now = time.time() ref.schedule(force=True) # now checks are schedule and we get them in # the action queue #check = ref.actions.pop() check = ref.checks_in_progress[0] self.sched.add(check) # check is now in sched.checks[] # Allows to force check scheduling without setting its status nor # output. Useful for manual business rules rescheduling, for instance. if exit_status is None: return # fake execution check.check_time = now # and lie about when we will launch it because # if not, the schedule call for ref # will not really reschedule it because there # is a valid value in the future ref.next_chk = now - 0.5 check.get_outputs(output, 9000) check.exit_status = exit_status check.execution_time = 0.001 check.status = 'waitconsume' self.sched.waiting_results.append(check) def scheduler_loop(self, count, reflist, do_sleep=False, sleep_time=61, verbose=True): for ref in reflist: (obj, exit_status, output) = ref obj.checks_in_progress = [] for loop in range(1, count + 1): if verbose is True: print "processing check", loop for ref in reflist: (obj, exit_status, output) = ref obj.update_in_checking() self.fake_check(obj, exit_status, output) self.sched.manage_internal_checks() self.sched.consume_results() self.sched.get_new_actions() self.sched.get_new_broks() self.sched.scatter_master_notifications() self.worker_loop(verbose) for ref in reflist: (obj, exit_status, output) = ref obj.checks_in_progress = [] self.sched.update_downtimes_and_comments() #time.sleep(ref.retry_interval * 60 + 1) if do_sleep: time.sleep(sleep_time) def worker_loop(self, verbose=True): self.sched.delete_zombie_checks() self.sched.delete_zombie_actions() checks = self.sched.get_to_run_checks(True, False, worker_name='tester') actions = self.sched.get_to_run_checks(False, True, worker_name='tester') #print "------------ worker loop checks ----------------" #print checks #print "------------ worker loop actions ----------------" if verbose is True: self.show_actions() #print "------------ worker loop new ----------------" for a in actions: a.status = 'inpoller' a.check_time = time.time() a.exit_status = 0 self.sched.put_results(a) if verbose is True: self.show_actions() #print "------------ worker loop end ----------------" def show_logs(self): print "--- logs <<<----------------------------------" for brok in sorted(self.sched.broks.values(), lambda x, y: x.id - y.id): if brok.type == 'log': brok.prepare() print "LOG:", brok.data['log'] print "--- logs >>>----------------------------------" def show_actions(self): print "--- actions <<<----------------------------------" for a in sorted(self.sched.actions.values(), lambda x, y: x.id - y.id): if a.is_a == 'notification': if a.ref.my_type == "host": ref = "host: %s" % a.ref.get_name() else: ref = "host: %s svc: %s" % (a.ref.host.get_name(), a.ref.get_name()) print "NOTIFICATION %d %s %s %s %s" % (a.id, ref, a.type, time.asctime(time.localtime(a.t_to_go)), a.status) elif a.is_a == 'eventhandler': print "EVENTHANDLER:", a print "--- actions >>>----------------------------------" def show_and_clear_logs(self): self.show_logs() self.clear_logs() def show_and_clear_actions(self): self.show_actions() self.clear_actions() def count_logs(self): return len([b for b in self.sched.broks.values() if b.type == 'log']) def count_actions(self): return len(self.sched.actions.values()) def clear_logs(self): id_to_del = [] for b in self.sched.broks.values(): if b.type == 'log': id_to_del.append(b.id) for id in id_to_del: del self.sched.broks[id] def clear_actions(self): self.sched.actions = {} def log_match(self, index, pattern): # log messages are counted 1...n, so index=1 for the first message if index > self.count_logs(): return False else: regex = re.compile(pattern) lognum = 1 for brok in sorted(self.sched.broks.values(), lambda x, y: x.id - y.id): if brok.type == 'log': brok.prepare() if index == lognum: if re.search(regex, brok.data['log']): return True lognum += 1 return False def any_log_match(self, pattern): regex = re.compile(pattern) for brok in sorted(self.sched.broks.values(), lambda x, y: x.id - y.id): if brok.type == 'log': brok.prepare() if re.search(regex, brok.data['log']): return True return False def get_log_match(self, pattern): regex = re.compile(pattern) res = [] for brok in sorted(self.sched.broks.values(), lambda x, y: x.id - y.id): if brok.type == 'log': if re.search(regex, brok.data['log']): res.append(brok.data['log']) return res def print_header(self): print "\n" + "#" * 80 + "\n" + "#" + " " * 78 + "#" print "#" + string.center(self.id(), 78) + "#" print "#" + " " * 78 + "#\n" + "#" * 80 + "\n" def xtest_conf_is_correct(self): self.print_header() self.assert_(self.conf.conf_is_correct)