def __init__(self, config_file, is_daemon, do_replace, debug, debug_file): super(Receiver, self).__init__('receiver', config_file, is_daemon, do_replace, debug, debug_file) # Our arbiters self.arbiters = {} # Our pollers and reactionners self.pollers = {} self.reactionners = {} # Modules are load one time self.have_modules = False # Can have a queue of external_commands give by modules # will be taken by arbiter to process self.external_commands = [] # and the unprocessed one, a buffer self.unprocessed_external_commands = [] self.host_assoc = {} self.direct_routing = False self.accept_passive_unknown_check_results = False self.http_interface = ReceiverInterface(self) # Now create the external commander. It's just here to dispatch # the commands to schedulers ecm = ExternalCommandManager(None, 'receiver') ecm.load_receiver(self) self.external_command = ecm
def __init__(self, config_file, is_daemon, do_replace, debug, debug_file): super(Receiver, self).__init__( 'receiver', config_file, is_daemon, do_replace, debug, debug_file) # Our arbiters self.arbiters = {} # Our pollers and reactionners self.pollers = {} self.reactionners = {} # Modules are load one time self.have_modules = False # Can have a queue of external_commands give by modules # will be taken by arbiter to process self.external_commands = [] # and the unprocessed one, a buffer self.unprocessed_external_commands = [] self.host_assoc = {} self.direct_routing = False self.accept_passive_unknown_check_results = False self.http_interface = ReceiverInterface(self) # Now create the external commander. It's just here to dispatch # the commands to schedulers ecm = ExternalCommandManager(None, 'receiver') ecm.load_receiver(self) self.external_command = ecm
def test_unknown_check_result_brok(self): # unknown_host_check_result_brok excmd = '[1234567890] PROCESS_HOST_CHECK_RESULT;test_host_0;2;Bob is not happy' expected = { 'time_stamp': 1234567890, 'return_code': '2', 'host_name': 'test_host_0', 'output': 'Bob is not happy', 'perf_data': None } result = ujson.loads( ExternalCommandManager.get_unknown_check_result_brok(excmd).data) self.assertEqual(expected, result) # unknown_host_check_result_brok with perfdata excmd = '[1234567890] PROCESS_HOST_CHECK_RESULT;test_host_0;2;Bob is not happy|rtt=9999' expected = { 'time_stamp': 1234567890, 'return_code': '2', 'host_name': 'test_host_0', 'output': 'Bob is not happy', 'perf_data': 'rtt=9999' } result = ujson.loads( ExternalCommandManager.get_unknown_check_result_brok(excmd).data) self.assertEqual(expected, result) # unknown_service_check_result_brok excmd = '[1234567890] PROCESS_HOST_CHECK_RESULT;host-checked;0;Everything OK' expected = { 'time_stamp': 1234567890, 'return_code': '0', 'host_name': 'host-checked', 'output': 'Everything OK', 'perf_data': None } result = ujson.loads( ExternalCommandManager.get_unknown_check_result_brok(excmd).data) self.assertEqual(expected, result) # unknown_service_check_result_brok with perfdata excmd = '[1234567890] PROCESS_SERVICE_CHECK_RESULT;test_host_0;test_ok_0;1;Bobby is not happy|rtt=9999;5;10;0;10000' expected = { 'host_name': 'test_host_0', 'time_stamp': 1234567890, 'service_description': 'test_ok_0', 'return_code': '1', 'output': 'Bobby is not happy', 'perf_data': 'rtt=9999;5;10;0;10000' } result = ujson.loads( ExternalCommandManager.get_unknown_check_result_brok(excmd).data) self.assertEqual(expected, result)
def __init__(self, config_file, is_daemon, do_replace, debug, debug_file, port=None, local_log=None, daemon_name=None): self.daemon_name = 'receiver' if daemon_name: self.daemon_name = daemon_name super(Receiver, self).__init__(self.daemon_name, config_file, is_daemon, do_replace, debug, debug_file, port, local_log) # Our arbiters self.arbiters = {} # Our pollers and reactionners self.pollers = {} self.reactionners = {} # Modules are load one time self.have_modules = False # Now an external commands manager and a list for the external_commands self.external_commands_manager = None self.external_commands = [] # and the unprocessed one, a buffer self.unprocessed_external_commands = [] self.host_assoc = {} self.accept_passive_unknown_check_results = False self.http_interface = ReceiverInterface(self) # Now create the external commands manager # We are a receiver: our role is to get and dispatch commands to the schedulers self.external_commands_manager = ExternalCommandManager(None, 'receiver', self)
def test_unknown_check_result_brok(self): # unknown_host_check_result_brok excmd = '[1234567890] PROCESS_HOST_CHECK_RESULT;test_host_0;2;Bob is not happy' expected = {'time_stamp': 1234567890, 'return_code': '2', 'host_name': 'test_host_0', 'output': 'Bob is not happy', 'perf_data': None} result = ujson.loads(ExternalCommandManager.get_unknown_check_result_brok(excmd).data) self.assertEqual(expected, result) # unknown_host_check_result_brok with perfdata excmd = '[1234567890] PROCESS_HOST_CHECK_RESULT;test_host_0;2;Bob is not happy|rtt=9999' expected = {'time_stamp': 1234567890, 'return_code': '2', 'host_name': 'test_host_0', 'output': 'Bob is not happy', 'perf_data': 'rtt=9999'} result = ujson.loads(ExternalCommandManager.get_unknown_check_result_brok(excmd).data) self.assertEqual(expected, result) # unknown_service_check_result_brok excmd = '[1234567890] PROCESS_HOST_CHECK_RESULT;host-checked;0;Everything OK' expected = {'time_stamp': 1234567890, 'return_code': '0', 'host_name': 'host-checked', 'output': 'Everything OK', 'perf_data': None} result = ujson.loads(ExternalCommandManager.get_unknown_check_result_brok(excmd).data) self.assertEqual(expected, result) # unknown_service_check_result_brok with perfdata excmd = '[1234567890] PROCESS_SERVICE_CHECK_RESULT;test_host_0;test_ok_0;1;Bobby is not happy|rtt=9999;5;10;0;10000' expected = {'host_name': 'test_host_0', 'time_stamp': 1234567890, 'service_description': 'test_ok_0', 'return_code': '1', 'output': 'Bobby is not happy', 'perf_data': 'rtt=9999;5;10;0;10000'} result = ujson.loads(ExternalCommandManager.get_unknown_check_result_brok(excmd).data) self.assertEqual(expected, result)
def setup_new_conf(self): # pylint: disable=too-many-statements, too-many-branches, too-many-locals """Setup new conf received for scheduler :return: None """ # Execute the base class treatment... super(Alignak, self).setup_new_conf() # ...then our own specific treatment! with self.conf_lock: # self_conf is our own configuration from the alignak environment # self_conf = self.cur_conf['self_conf'] logger.debug("Got config: %s", self.cur_conf) if 'conf_part' not in self.cur_conf: self.cur_conf['conf_part'] = None conf_part = self.cur_conf['conf_part'] # Ok now we can save the retention data if self.sched.pushed_conf is not None: self.sched.update_retention() # Get the monitored objects configuration t00 = time.time() received_conf_part = None try: received_conf_part = unserialize(conf_part) assert received_conf_part is not None except AssertionError as exp: # This to indicate that no configuration is managed by this scheduler... logger.warning( "No managed configuration received from arbiter") except AlignakClassLookupException as exp: # pragma: no cover # This to indicate that the new configuration is not managed... self.new_conf = { "_status": "Cannot un-serialize configuration received from arbiter", "_error": str(exp) } logger.error(self.new_conf) logger.error("Back trace of the error:\n%s", traceback.format_exc()) return except Exception as exp: # pylint: disable=broad-except # This to indicate that the new configuration is not managed... self.new_conf = { "_status": "Cannot un-serialize configuration received from arbiter", "_error": str(exp) } logger.error(self.new_conf) self.exit_on_exception(exp, str(self.new_conf)) # if not received_conf_part: # return logger.info( "Monitored configuration %s received at %d. Un-serialized in %d secs", received_conf_part, t00, time.time() - t00) logger.info("Scheduler received configuration : %s", received_conf_part) # Now we create our pollers, reactionners and brokers for link_type in ['pollers', 'reactionners', 'brokers']: if link_type not in self.cur_conf['satellites']: logger.error("Missing %s in the configuration!", link_type) continue my_satellites = getattr(self, link_type, {}) received_satellites = self.cur_conf['satellites'][link_type] for link_uuid in received_satellites: rs_conf = received_satellites[link_uuid] logger.debug("- received %s - %s: %s", rs_conf['instance_id'], rs_conf['type'], rs_conf['name']) # Must look if we already had a configuration and save our broks already_got = rs_conf['instance_id'] in my_satellites broks = [] actions = {} wait_homerun = {} external_commands = {} running_id = 0 if already_got: logger.warning("I already got: %s", rs_conf['instance_id']) # Save some information running_id = my_satellites[link_uuid].running_id (broks, actions, wait_homerun, external_commands) = \ my_satellites[link_uuid].get_and_clear_context() # Delete the former link del my_satellites[link_uuid] # My new satellite link... new_link = SatelliteLink.get_a_satellite_link( link_type[:-1], rs_conf) my_satellites[new_link.uuid] = new_link logger.info("I got a new %s satellite: %s", link_type[:-1], new_link) new_link.running_id = running_id new_link.external_commands = external_commands new_link.broks = broks new_link.wait_homerun = wait_homerun new_link.actions = actions # Replacing the satellite address and port by those defined in satellite_map if new_link.name in self.cur_conf['override_conf'].get( 'satellite_map', {}): override_conf = self.cur_conf['override_conf'] overriding = override_conf.get('satellite_map')[ new_link.name] logger.warning( "Do not override the configuration for: %s, with: %s. " "Please check whether this is necessary!", new_link.name, overriding) # First mix conf and override_conf to have our definitive conf for prop in getattr(self.cur_conf, 'override_conf', []): logger.debug("Overriden: %s / %s ", prop, getattr(received_conf_part, prop, None)) logger.debug("Overriding: %s / %s ", prop, self.cur_conf['override_conf']) setattr(received_conf_part, prop, self.cur_conf['override_conf'].get(prop, None)) # Scheduler modules if not self.have_modules: try: logger.debug("Modules configuration: %s", self.cur_conf['modules']) self.modules = unserialize(self.cur_conf['modules'], no_load=True) except AlignakClassLookupException as exp: # pragma: no cover, simple protection logger.error( 'Cannot un-serialize modules configuration ' 'received from arbiter: %s', exp) if self.modules: logger.debug("I received some modules configuration: %s", self.modules) self.have_modules = True self.do_load_modules(self.modules) # and start external modules too self.modules_manager.start_external_instances() else: logger.info("I do not have modules") if received_conf_part: logger.info("Loading configuration...") # Propagate the global parameters to the configuration items received_conf_part.explode_global_conf() # We give the configuration to our scheduler self.sched.reset() self.sched.load_conf(self.cur_conf['instance_id'], self.cur_conf['instance_name'], received_conf_part) # Once loaded, the scheduler has an inner pushed_conf object logger.info("Loaded: %s", self.sched.pushed_conf) # Update the scheduler ticks according to the daemon configuration self.sched.update_recurrent_works_tick(self) # We must update our pushed configuration macros with correct values # from the configuration parameters # self.sched.pushed_conf.fill_resource_macros_names_macros() # Creating the Macroresolver Class & unique instance m_solver = MacroResolver() m_solver.init(received_conf_part) # Now create the external commands manager # We are an applyer: our role is not to dispatch commands, but to apply them ecm = ExternalCommandManager( received_conf_part, 'applyer', self.sched, received_conf_part.accept_passive_unknown_check_results, received_conf_part.log_external_commands) # Scheduler needs to know about this external command manager to use it if necessary self.sched.external_commands_manager = ecm # Ok now we can load the retention data self.sched.retention_load() # Log hosts/services initial states self.sched.log_initial_states() # Create brok new conf brok = Brok({'type': 'new_conf', 'data': {}}) self.sched.add_brok(brok) # Initialize connection with all our satellites logger.info("Initializing connection with my satellites:") my_satellites = self.get_links_of_type(s_type='') for satellite in list(my_satellites.values()): logger.info("- : %s/%s", satellite.type, satellite.name) if not self.daemon_connection_init(satellite): logger.error("Satellite connection failed: %s", satellite) if received_conf_part: # Enable the scheduling process logger.info("Loaded: %s", self.sched.pushed_conf) self.sched.start_scheduling() # Now I have a configuration! self.have_conf = True
def setup_with_file(self, paths, add_default=True): self.time_hacker.set_my_time() self.print_header() # i am arbiter-like self.broks = {} self.me = None self.log = logger self.log.load_obj(self) if not isinstance(paths, list): paths = [paths] # Fix for modules tests add_default = False # Don't mix config if add_default: paths.insert(0, 'etc/alignak_1r_1h_1s.cfg') self.config_files = paths self.conf = Config() buf = self.conf.read_config(self.config_files) raw_objects = self.conf.read_config_buf(buf) self.conf.create_objects_for_type(raw_objects, 'arbiter') self.conf.create_objects_for_type(raw_objects, 'module') self.conf.early_arbiter_linking() # If we got one arbiter defined here (before default) we should be in a case where # the tester want to load/test a module, so we simulate an arbiter daemon # and the modules loading phase. As it has its own modulesmanager, should # not impact scheduler modules ones, especially we are asking for arbiter type :) if len(self.conf.arbiters) == 1: arbdaemon = Arbiter([''],[''], False, False, None, None) # only load if the module_dir is reallyexisting, so was set explicitly # in the test configuration if os.path.exists(getattr(self.conf, 'modules_dir', '')): arbdaemon.modules_dir = self.conf.modules_dir arbdaemon.load_modules_manager() # we request the instances without them being *started* # (for those that are concerned ("external" modules): # we will *start* these instances after we have been daemonized (if requested) me = None for arb in self.conf.arbiters: me = arb arbdaemon.modules_manager.set_modules(arb.modules) arbdaemon.do_load_modules() arbdaemon.load_modules_configuration_objects(raw_objects) self.conf.create_objects(raw_objects) self.conf.instance_id = 0 self.conf.instance_name = 'test' # Hack push_flavor, that is set by the dispatcher self.conf.push_flavor = 0 self.conf.load_triggers() #import pdb;pdb.set_trace() self.conf.linkify_templates() #import pdb;pdb.set_trace() self.conf.apply_inheritance() #import pdb;pdb.set_trace() self.conf.explode() #print "Aconf.services has %d elements" % len(self.conf.services) self.conf.apply_implicit_inheritance() self.conf.fill_default() self.conf.remove_templates() #print "conf.services has %d elements" % len(self.conf.services) self.conf.override_properties() self.conf.linkify() self.conf.apply_dependencies() self.conf.explode_global_conf() self.conf.propagate_timezone_option() self.conf.create_business_rules() self.conf.create_business_rules_dependencies() self.conf.is_correct() if not self.conf.conf_is_correct: print "The conf is not correct, I stop here" self.conf.dump() return self.conf.clean() self.confs = self.conf.cut_into_parts() self.conf.prepare_for_sending() self.conf.show_errors() self.dispatcher = Dispatcher(self.conf, self.me) scheddaemon = Alignak(None, False, False, False, None, None) self.scheddaemon = scheddaemon self.sched = scheddaemon.sched scheddaemon.modules_dir = modules_dir scheddaemon.load_modules_manager() # Remember to clean the logs we just created before launching tests self.clear_logs() m = MacroResolver() m.init(self.conf) self.sched.load_conf(self.conf) e = ExternalCommandManager(self.conf, 'applyer') self.sched.external_command = e e.load_scheduler(self.sched) e2 = ExternalCommandManager(self.conf, 'dispatcher') e2.load_arbiter(self) self.external_command_dispatcher = e2 self.sched.conf.accept_passive_unknown_check_results = False self.sched.schedule()
def setup_with_file(self, configuration_file): """ Load alignak with defined configuration file If the configuration loading fails, a SystemExit exception is raised to the caller. The conf_is_correct property indicates if the configuration loading succeeded or failed. The configuration errors property contains a list of the error message that are normally logged as ERROR by the arbiter. @verified :param configuration_file: path + file name of the main configuration file :type configuration_file: str :return: None """ self.broks = {} self.schedulers = {} self.brokers = {} self.pollers = {} self.receivers = {} self.reactionners = {} self.arbiter = None self.conf_is_correct = False self.configuration_warnings = [] self.configuration_errors = [] # Add collector for test purpose. self.setup_logger() # Initialize the Arbiter with no daemon configuration file self.arbiter = Arbiter(None, [configuration_file], False, False, False, False, '/tmp/arbiter.log', 'arbiter-master') try: # The following is copy paste from setup_alignak_logger # The only difference is that keep logger at INFO level to gather messages # This is needed to assert later on logs we received. self.logger.setLevel(logging.INFO) # Force the debug level if the daemon is said to start with such level if self.arbiter.debug: self.logger.setLevel(logging.DEBUG) # Log will be broks for line in self.arbiter.get_header(): self.logger.info(line) self.arbiter.load_monitoring_config_file() # If this assertion does not match, then there is a bug in the arbiter :) self.assertTrue(self.arbiter.conf.conf_is_correct) self.conf_is_correct = True self.configuration_warnings = self.arbiter.conf.configuration_warnings self.configuration_errors = self.arbiter.conf.configuration_errors except SystemExit: self.configuration_warnings = self.arbiter.conf.configuration_warnings print("Configuration warnings:") for msg in self.configuration_warnings: print(" - %s" % msg) self.configuration_errors = self.arbiter.conf.configuration_errors print("Configuration errors:") for msg in self.configuration_errors: print(" - %s" % msg) raise for arb in self.arbiter.conf.arbiters: if arb.get_name() == self.arbiter.arbiter_name: self.arbiter.myself = arb self.arbiter.dispatcher = Dispatcher(self.arbiter.conf, self.arbiter.myself) self.arbiter.dispatcher.prepare_dispatch() # Build schedulers dictionary with the schedulers involved in the configuration for scheduler in self.arbiter.dispatcher.schedulers: sched = Alignak([], False, False, True, '/tmp/scheduler.log') sched.load_modules_manager(scheduler.name) sched.new_conf = scheduler.conf_package if sched.new_conf: sched.setup_new_conf() self.schedulers[scheduler.scheduler_name] = sched # Build pollers dictionary with the pollers involved in the configuration for poller in self.arbiter.dispatcher.pollers: self.pollers[poller.poller_name] = poller # Build receivers dictionary with the receivers involved in the configuration for receiver in self.arbiter.dispatcher.receivers: self.receivers[receiver.receiver_name] = receiver # Build reactionners dictionary with the reactionners involved in the configuration for reactionner in self.arbiter.dispatcher.reactionners: self.reactionners[reactionner.reactionner_name] = reactionner # Build brokers dictionary with the brokers involved in the configuration for broker in self.arbiter.dispatcher.brokers: self.brokers[broker.broker_name] = broker # Initialize the Receiver with no daemon configuration file self.receiver = Receiver(None, False, False, False, False) # Initialize the Receiver with no daemon configuration file self.broker = Broker(None, False, False, False, False) # External commands manager default mode; default is tha pplyer (scheduler) mode self.ecm_mode = 'applyer' # Now we create an external commands manager in dispatcher mode self.arbiter.external_commands_manager = ExternalCommandManager(self.arbiter.conf, 'dispatcher', self.arbiter, accept_unknown=True) # Now we get the external commands manager of our scheduler self.eca = None if 'scheduler-master' in self.schedulers: self._sched = self.schedulers['scheduler-master'].sched self.eca = self.schedulers['scheduler-master'].sched.external_commands_manager # Now we create an external commands manager in receiver mode self.ecr = ExternalCommandManager(self.receiver.cur_conf, 'receiver', self.receiver, accept_unknown=True) # and an external commands manager in dispatcher mode self.ecd = ExternalCommandManager(self.arbiter.conf, 'dispatcher', self.arbiter, accept_unknown=True)
class AlignakTest(unittest.TestCase): time_hacker = TimeHacker() maxDiff = None if sys.version_info < (2, 7): def assertRegex(self, *args, **kwargs): return self.assertRegexpMatches(*args, **kwargs) def setup_logger(self): """ Setup a log collector :return: """ self.logger = logging.getLogger("alignak") # Add collector for test purpose. collector_h = CollectorHandler() collector_h.setFormatter(DEFAULT_FORMATTER_NAMED) self.logger.addHandler(collector_h) def files_update(self, files, replacements): """Update files content with the defined replacements :param files: list of files to parse and replace :param replacements: list of values to replace :return: """ for filename in files: lines = [] with open(filename) as infile: for line in infile: for src, target in replacements.iteritems(): line = line.replace(src, target) lines.append(line) with open(filename, 'w') as outfile: for line in lines: outfile.write(line) def setup_with_file(self, configuration_file): """ Load alignak with defined configuration file If the configuration loading fails, a SystemExit exception is raised to the caller. The conf_is_correct property indicates if the configuration loading succeeded or failed. The configuration errors property contains a list of the error message that are normally logged as ERROR by the arbiter. @verified :param configuration_file: path + file name of the main configuration file :type configuration_file: str :return: None """ self.broks = {} self.schedulers = {} self.brokers = {} self.pollers = {} self.receivers = {} self.reactionners = {} self.arbiter = None self.conf_is_correct = False self.configuration_warnings = [] self.configuration_errors = [] # Add collector for test purpose. self.setup_logger() # Initialize the Arbiter with no daemon configuration file self.arbiter = Arbiter(None, [configuration_file], False, False, False, False, '/tmp/arbiter.log', 'arbiter-master') try: # The following is copy paste from setup_alignak_logger # The only difference is that keep logger at INFO level to gather messages # This is needed to assert later on logs we received. self.logger.setLevel(logging.INFO) # Force the debug level if the daemon is said to start with such level if self.arbiter.debug: self.logger.setLevel(logging.DEBUG) # Log will be broks for line in self.arbiter.get_header(): self.logger.info(line) self.arbiter.load_monitoring_config_file() # If this assertion does not match, then there is a bug in the arbiter :) self.assertTrue(self.arbiter.conf.conf_is_correct) self.conf_is_correct = True self.configuration_warnings = self.arbiter.conf.configuration_warnings self.configuration_errors = self.arbiter.conf.configuration_errors except SystemExit: self.configuration_warnings = self.arbiter.conf.configuration_warnings print("Configuration warnings:") for msg in self.configuration_warnings: print(" - %s" % msg) self.configuration_errors = self.arbiter.conf.configuration_errors print("Configuration errors:") for msg in self.configuration_errors: print(" - %s" % msg) raise for arb in self.arbiter.conf.arbiters: if arb.get_name() == self.arbiter.arbiter_name: self.arbiter.myself = arb self.arbiter.dispatcher = Dispatcher(self.arbiter.conf, self.arbiter.myself) self.arbiter.dispatcher.prepare_dispatch() # Build schedulers dictionary with the schedulers involved in the configuration for scheduler in self.arbiter.dispatcher.schedulers: sched = Alignak([], False, False, True, '/tmp/scheduler.log') sched.load_modules_manager(scheduler.name) sched.new_conf = scheduler.conf_package if sched.new_conf: sched.setup_new_conf() self.schedulers[scheduler.scheduler_name] = sched # Build pollers dictionary with the pollers involved in the configuration for poller in self.arbiter.dispatcher.pollers: self.pollers[poller.poller_name] = poller # Build receivers dictionary with the receivers involved in the configuration for receiver in self.arbiter.dispatcher.receivers: self.receivers[receiver.receiver_name] = receiver # Build reactionners dictionary with the reactionners involved in the configuration for reactionner in self.arbiter.dispatcher.reactionners: self.reactionners[reactionner.reactionner_name] = reactionner # Build brokers dictionary with the brokers involved in the configuration for broker in self.arbiter.dispatcher.brokers: self.brokers[broker.broker_name] = broker # Initialize the Receiver with no daemon configuration file self.receiver = Receiver(None, False, False, False, False) # Initialize the Receiver with no daemon configuration file self.broker = Broker(None, False, False, False, False) # External commands manager default mode; default is tha pplyer (scheduler) mode self.ecm_mode = 'applyer' # Now we create an external commands manager in dispatcher mode self.arbiter.external_commands_manager = ExternalCommandManager(self.arbiter.conf, 'dispatcher', self.arbiter, accept_unknown=True) # Now we get the external commands manager of our scheduler self.eca = None if 'scheduler-master' in self.schedulers: self._sched = self.schedulers['scheduler-master'].sched self.eca = self.schedulers['scheduler-master'].sched.external_commands_manager # Now we create an external commands manager in receiver mode self.ecr = ExternalCommandManager(self.receiver.cur_conf, 'receiver', self.receiver, accept_unknown=True) # and an external commands manager in dispatcher mode self.ecd = ExternalCommandManager(self.arbiter.conf, 'dispatcher', self.arbiter, accept_unknown=True) def fake_check(self, ref, exit_status, output="OK"): """ Simulate a check execution and result :param ref: host/service concerned by the check :param exit_status: check exit status code (0, 1, ...). If set to None, the check is simply scheduled but not "executed" :param output: check output (output + perf data) :return: """ now = time.time() check = ref.schedule(self.schedulers['scheduler-master'].sched.hosts, self.schedulers['scheduler-master'].sched.services, self.schedulers['scheduler-master'].sched.timeperiods, self.schedulers['scheduler-master'].sched.macromodulations, self.schedulers['scheduler-master'].sched.checkmodulations, self.schedulers['scheduler-master'].sched.checks, force=True, force_time=None) # now the check is scheduled and we get it in the action queue self.schedulers['scheduler-master'].sched.add(check) # check is now in sched.checks[] # Allows to force check scheduling without setting its status nor output. # Useful for manual business rules rescheduling, for instance. if exit_status is None: return # fake execution check.check_time = now # and lie about when we will launch it because # if not, the schedule call for ref # will not really reschedule it because there # is a valid value in the future ref.next_chk = now - 0.5 # Max plugin output is default to 8192 check.get_outputs(output, 8192) check.exit_status = exit_status check.execution_time = 0.001 check.status = 'waitconsume' # Put the check result in the waiting results for the scheduler ... self.schedulers['scheduler-master'].sched.waiting_results.put(check) def scheduler_loop(self, count, items, mysched=None): """ Manage scheduler checks @verified :param count: number of checks to pass :type count: int :param items: list of list [[object, exist_status, output]] :type items: list :param mysched: The scheduler :type mysched: None | object :return: None """ if mysched is None: mysched = self.schedulers['scheduler-master'] macroresolver = MacroResolver() macroresolver.init(mysched.conf) for num in range(count): for item in items: (obj, exit_status, output) = item if len(obj.checks_in_progress) == 0: for i in mysched.sched.recurrent_works: (name, fun, nb_ticks) = mysched.sched.recurrent_works[i] if nb_ticks == 1: fun() self.assertGreater(len(obj.checks_in_progress), 0) chk = mysched.sched.checks[obj.checks_in_progress[0]] chk.set_type_active() chk.check_time = time.time() chk.wait_time = 0.0001 chk.last_poll = chk.check_time chk.output = output chk.exit_status = exit_status mysched.sched.waiting_results.put(chk) for i in mysched.sched.recurrent_works: (name, fun, nb_ticks) = mysched.sched.recurrent_works[i] if nb_ticks == 1: fun() def manage_external_command(self, external_command, run=True): """Manage an external command. :return: result of external command resolution """ ext_cmd = ExternalCommand(external_command) if self.ecm_mode == 'applyer': res = None self._scheduler.run_external_command(external_command) self.external_command_loop() if self.ecm_mode == 'dispatcher': res = self.ecd.resolve_command(ext_cmd) if res and run: self.arbiter.broks = {} self.arbiter.add(ext_cmd) self.arbiter.push_external_commands_to_schedulers() # Our scheduler self._scheduler = self.schedulers['scheduler-master'].sched # Our broker self._broker = self._scheduler.brokers['broker-master'] for brok in self.arbiter.broks: print("Brok: %s : %s" % (brok, self.arbiter.broks[brok])) self._broker['broks'][brok] = self.arbiter.broks[brok] if self.ecm_mode == 'receiver': res = self.ecr.resolve_command(ext_cmd) if res and run: self.receiver.broks = {} self.receiver.add(ext_cmd) self.receiver.push_external_commands_to_schedulers() # Our scheduler self._scheduler = self.schedulers['scheduler-master'].sched # Our broker self._broker = self._scheduler.brokers['broker-master'] for brok in self.receiver.broks: print("Brok: %s : %s" % (brok, self.receiver.broks[brok])) self._broker.broks[brok] = self.receiver.broks[brok] return res def external_command_loop(self): """Execute the scheduler actions for external commands. The scheduler is not an ECM 'dispatcher' but an 'applyer' ... so this function is on the external command execution side of the problem. @verified :return: """ for i in self.schedulers['scheduler-master'].sched.recurrent_works: (name, fun, nb_ticks) = self.schedulers['scheduler-master'].sched.recurrent_works[i] if nb_ticks == 1: fun() self.assert_no_log_match("External command Brok could not be sent to any daemon!") def worker_loop(self, verbose=True): self.schedulers['scheduler-master'].sched.delete_zombie_checks() self.schedulers['scheduler-master'].sched.delete_zombie_actions() checks = self.schedulers['scheduler-master'].sched.get_to_run_checks(True, False, worker_name='tester') actions = self.schedulers['scheduler-master'].sched.get_to_run_checks(False, True, worker_name='tester') if verbose is True: self.show_actions() for a in actions: a.status = 'inpoller' a.check_time = time.time() a.exit_status = 0 self.schedulers['scheduler-master'].sched.put_results(a) if verbose is True: self.show_actions() def launch_internal_check(self, svc_br): """ Launch an internal check for the business rule service provided """ # Launch an internal check now = time.time() self._sched.add(svc_br.launch_check(now - 1, self._sched.hosts, self._sched.services, self._sched.timeperiods, self._sched.macromodulations, self._sched.checkmodulations, self._sched.checks)) c = svc_br.actions[0] self.assertEqual(True, c.internal) self.assertTrue(c.is_launchable(now)) # ask the scheduler to launch this check # and ask 2 loops: one to launch the check # and another to get the result self.scheduler_loop(2, []) # We should not have the check anymore self.assertEqual(0, len(svc_br.actions)) def show_logs(self, scheduler=False): """ Show logs. Get logs collected by the collector handler and print them @verified :param scheduler: :return: """ print "--- logs <<<----------------------------------" collector_h = [hand for hand in self.logger.handlers if isinstance(hand, CollectorHandler)][0] for log in collector_h.collector: safe_print(log) print "--- logs >>>----------------------------------" def show_actions(self): print "--- actions <<<----------------------------------" actions = sorted(self.schedulers['scheduler-master'].sched.actions.values(), key=lambda x: x.creation_time) for a in actions: if a.is_a == 'notification': item = self.schedulers['scheduler-master'].sched.find_item_by_id(a.ref) if item.my_type == "host": ref = "host: %s" % item.get_name() else: hst = self.schedulers['scheduler-master'].sched.find_item_by_id(item.host) ref = "host: %s svc: %s" % (hst.get_name(), item.get_name()) print "NOTIFICATION %s %s %s %s %s %s" % (a.uuid, ref, a.type, time.asctime(time.localtime(a.t_to_go)), a.status, a.contact_name) elif a.is_a == 'eventhandler': print "EVENTHANDLER:", a print "--- actions >>>----------------------------------" def show_checks(self): """ Show checks from the scheduler :return: """ print "--- checks <<<--------------------------------" checks = sorted(self.schedulers['scheduler-master'].sched.checks.values(), key=lambda x: x.creation_time) for check in checks: print("- %s" % check) print "--- checks >>>--------------------------------" def show_and_clear_logs(self): """ Prints and then deletes the current logs stored in the log collector @verified :return: """ self.show_logs() self.clear_logs() def show_and_clear_actions(self): self.show_actions() self.clear_actions() def count_logs(self): """ Count the log lines in the Arbiter broks. If 'scheduler' is True, then uses the scheduler's broks list. @verified :return: """ collector_h = [hand for hand in self.logger.handlers if isinstance(hand, CollectorHandler)][0] return len(collector_h.collector) def count_actions(self): """ Count the actions in the scheduler's actions. @verified :return: """ return len(self.schedulers['scheduler-master'].sched.actions.values()) def clear_logs(self): """ Remove all the logs stored in the logs collector @verified :return: """ collector_h = [hand for hand in self.logger.handlers if isinstance(hand, CollectorHandler)][0] collector_h.collector = [] def clear_actions(self): """ Clear the actions in the scheduler's actions. @verified :return: """ self.schedulers['scheduler-master'].sched.actions = {} def assert_actions_count(self, number): """ Check the number of actions @verified :param number: number of actions we must have :type number: int :return: None """ actions = sorted(self.schedulers['scheduler-master'].sched.actions.values(), key=lambda x: x.creation_time) self.assertEqual(number, len(self.schedulers['scheduler-master'].sched.actions), "Not found expected number of actions:\nactions_logs=[[[\n%s\n]]]" % ('\n'.join('\t%s = creation: %s, is_a: %s, type: %s, status: %s, ' 'planned: %s, command: %s' % (idx, b.creation_time, b.is_a, b.type, b.status, b.t_to_go, b.command) for idx, b in enumerate(actions)))) def assert_actions_match(self, index, pattern, field): """ Check if pattern verified in field(property) name of the action with index in action list @verified :param index: index in the actions list. If index is -1, all the actions in the list are searched for a matching pattern :type index: int :param pattern: pattern to verify is in the action :type pattern: str :param field: name of the field (property) of the action :type field: str :return: None """ regex = re.compile(pattern) actions = sorted(self.schedulers['scheduler-master'].sched.actions.values(), key=lambda x: x.creation_time) if index != -1: myaction = actions[index] self.assertTrue(regex.search(getattr(myaction, field)), "Not found a matching pattern in actions:\n" "index=%s field=%s pattern=%r\n" "action_line=creation: %s, is_a: %s, type: %s, " "status: %s, planned: %s, command: %s" % ( index, field, pattern, myaction.creation_time, myaction.is_a, myaction.type, myaction.status, myaction.t_to_go, myaction.command)) return for myaction in actions: if regex.search(getattr(myaction, field)): return self.assertTrue(False, "Not found a matching pattern in actions:\nfield=%s pattern=%r\n" % (field, pattern)) def assert_log_match(self, pattern, index=None): """ Search if the log with the index number has the pattern in the Arbiter logs. If index is None, then all the collected logs are searched for the pattern Logs numbering starts from 0 (the oldest stored log line) This function assert on the search result. As of it, if no log is found with th search criteria an assertion is raised and the test stops on error. :param pattern: string to search in log :type pattern: str :param index: index number :type index: int :return: None """ self.assertIsNotNone(pattern, "Searched pattern can not be None!") collector_h = [hand for hand in self.logger.handlers if isinstance(hand, CollectorHandler)][0] regex = re.compile(pattern) log_num = 0 found = False for log in collector_h.collector: if index is None: if regex.search(log): found = True break elif index == log_num: if regex.search(log): found = True break log_num += 1 self.assertTrue(found, "Not found a matching log line in logs:\nindex=%s pattern=%r\n" "logs=[[[\n%s\n]]]" % ( index, pattern, '\n'.join('\t%s=%s' % (idx, b.strip()) for idx, b in enumerate(collector_h.collector) ) ) ) def assert_checks_count(self, number): """ Check the number of actions @verified :param number: number of actions we must have :type number: int :return: None """ checks = sorted(self.schedulers['scheduler-master'].sched.checks.values(), key=lambda x: x.creation_time) self.assertEqual(number, len(checks), "Not found expected number of checks:\nchecks_logs=[[[\n%s\n]]]" % ('\n'.join('\t%s = creation: %s, is_a: %s, type: %s, status: %s, planned: %s, ' 'command: %s' % (idx, b.creation_time, b.is_a, b.type, b.status, b.t_to_go, b.command) for idx, b in enumerate(checks)))) def assert_checks_match(self, index, pattern, field): """ Check if pattern verified in field(property) name of the check with index in check list @verified :param index: index number of checks list :type index: int :param pattern: pattern to verify is in the check :type pattern: str :param field: name of the field (property) of the check :type field: str :return: None """ regex = re.compile(pattern) checks = sorted(self.schedulers['scheduler-master'].sched.checks.values(), key=lambda x: x.creation_time) mycheck = checks[index] self.assertTrue(regex.search(getattr(mycheck, field)), "Not found a matching pattern in checks:\nindex=%s field=%s pattern=%r\n" "check_line=creation: %s, is_a: %s, type: %s, status: %s, planned: %s, " "command: %s" % ( index, field, pattern, mycheck.creation_time, mycheck.is_a, mycheck.type, mycheck.status, mycheck.t_to_go, mycheck.command)) def _any_check_match(self, pattern, field, assert_not): """ Search if any check matches the requested pattern @verified :param pattern: :param field to search with pattern: :param assert_not: :return: """ regex = re.compile(pattern) checks = sorted(self.schedulers['scheduler-master'].sched.checks.values(), key=lambda x: x.creation_time) for check in checks: if re.search(regex, getattr(check, field)): self.assertTrue(not assert_not, "Found check:\nfield=%s pattern=%r\n" "check_line=creation: %s, is_a: %s, type: %s, status: %s, " "planned: %s, command: %s" % ( field, pattern, check.creation_time, check.is_a, check.type, check.status, check.t_to_go, check.command) ) return self.assertTrue(assert_not, "No matching check found:\n" "pattern = %r\n" "checks = %r" % (pattern, checks)) def assert_any_check_match(self, pattern, field): """ Assert if any check matches the pattern @verified :param pattern: :param field to search with pattern: :return: """ self._any_check_match(pattern, field, assert_not=False) def assert_no_check_match(self, pattern, field): """ Assert if no check matches the pattern @verified :param pattern: :param field to search with pattern: :return: """ self._any_check_match(pattern, field, assert_not=True) def _any_log_match(self, pattern, assert_not): """ Search if any log in the Arbiter logs matches the requested pattern If 'scheduler' is True, then uses the scheduler's broks list. @verified :param pattern: :param assert_not: :return: """ regex = re.compile(pattern) collector_h = [hand for hand in self.logger.handlers if isinstance(hand, CollectorHandler)][0] for log in collector_h.collector: if re.search(regex, log): self.assertTrue(not assert_not, "Found matching log line:\n" "pattern = %r\nbrok log = %r" % (pattern, log)) return self.assertTrue(assert_not, "No matching log line found:\n" "pattern = %r\n" "logs broks = %r" % (pattern, collector_h.collector)) def assert_any_log_match(self, pattern): """ Assert if any log (Arbiter or Scheduler if True) matches the pattern @verified :param pattern: :param scheduler: :return: """ self._any_log_match(pattern, assert_not=False) def assert_no_log_match(self, pattern): """ Assert if no log (Arbiter or Scheduler if True) matches the pattern @verified :param pattern: :param scheduler: :return: """ self._any_log_match(pattern, assert_not=True) def _any_brok_match(self, pattern, level, assert_not): """ Search if any brok message in the Scheduler broks matches the requested pattern and requested level @verified :param pattern: :param assert_not: :return: """ regex = re.compile(pattern) monitoring_logs = [] for brok in self._sched.brokers['broker-master']['broks'].itervalues(): if brok.type == 'monitoring_log': data = unserialize(brok.data) monitoring_logs.append((data['level'], data['message'])) if re.search(regex, data['message']) and (level is None or data['level'] == level): self.assertTrue(not assert_not, "Found matching brok:\n" "pattern = %r\nbrok message = %r" % (pattern, data['message'])) return self.assertTrue(assert_not, "No matching brok found:\n" "pattern = %r\n" "brok message = %r" % (pattern, monitoring_logs)) def assert_any_brok_match(self, pattern, level=None): """ Search if any brok message in the Scheduler broks matches the requested pattern and requested level @verified :param pattern: :param scheduler: :return: """ self._any_brok_match(pattern, level, assert_not=False) def assert_no_brok_match(self, pattern, level=None): """ Search if no brok message in the Scheduler broks matches the requested pattern and requested level @verified :param pattern: :param scheduler: :return: """ self._any_brok_match(pattern, level, assert_not=True) def get_log_match(self, pattern): regex = re.compile(pattern) res = [] collector_h = [hand for hand in self.logger.handlers if isinstance(hand, CollectorHandler)][0] for log in collector_h.collector: if re.search(regex, log): res.append(log) return res def print_header(self): print "\n" + "#" * 80 + "\n" + "#" + " " * 78 + "#" print "#" + string.center(self.id(), 78) + "#" print "#" + " " * 78 + "#\n" + "#" * 80 + "\n" def xtest_conf_is_correct(self): self.print_header() self.assertTrue(self.conf.conf_is_correct) def show_configuration_logs(self): """ Prints the configuration logs @verified :return: """ print("Configuration warnings:") for msg in self.configuration_warnings: print(" - %s" % msg) print("Configuration errors:") for msg in self.configuration_errors: print(" - %s" % msg) def _any_cfg_log_match(self, pattern, assert_not): """ Search a pattern in configuration log (warning and error) @verified :param pattern: :return: """ regex = re.compile(pattern) cfg_logs = self.configuration_warnings + self.configuration_errors for log in cfg_logs: if re.search(regex, log): self.assertTrue(not assert_not, "Found matching log line:\n" "pattern = %r\nlog = %r" % (pattern, log)) return self.assertTrue(assert_not, "No matching log line found:\n" "pattern = %r\n" "logs = %r" % (pattern, cfg_logs)) def assert_any_cfg_log_match(self, pattern): """ Assert if any configuration log matches the pattern @verified :param pattern: :return: """ self._any_cfg_log_match(pattern, assert_not=False) def assert_no_cfg_log_match(self, pattern): """ Assert if no configuration log matches the pattern @verified :param pattern: :return: """ self._any_cfg_log_match(pattern, assert_not=True)
def setup_with_file(self, paths, add_default=True): self.time_hacker.set_my_time() self.print_header() # i am arbiter-like self.broks = {} self.me = None self.log = logger self.log.load_obj(self) if not isinstance(paths, list): paths = [paths] # Fix for modules tests add_default = False # Don't mix config if add_default: paths.insert(0, 'etc/alignak_1r_1h_1s.cfg') self.config_files = paths self.conf = Config() buf = self.conf.read_config(self.config_files) raw_objects = self.conf.read_config_buf(buf) self.conf.create_objects_for_type(raw_objects, 'arbiter') self.conf.create_objects_for_type(raw_objects, 'module') self.conf.early_arbiter_linking() # If we got one arbiter defined here (before default) we should be in a case where # the tester want to load/test a module, so we simulate an arbiter daemon # and the modules loading phase. As it has its own modulesmanager, should # not impact scheduler modules ones, especially we are asking for arbiter type :) if len(self.conf.arbiters) == 1: arbdaemon = Arbiter([''], [''], False, False, None, None) arbdaemon.load_modules_manager() # we request the instances without them being *started* # (for those that are concerned ("external" modules): # we will *start* these instances after we have been daemonized (if requested) me = None for arb in self.conf.arbiters: me = arb arbdaemon.do_load_modules(arb.modules) arbdaemon.load_modules_configuration_objects(raw_objects) self.conf.create_objects(raw_objects) self.conf.instance_id = 0 self.conf.instance_name = 'test' # Hack push_flavor, that is set by the dispatcher self.conf.push_flavor = 0 self.conf.load_triggers() #import pdb;pdb.set_trace() self.conf.linkify_templates() #import pdb;pdb.set_trace() self.conf.apply_inheritance() #import pdb;pdb.set_trace() self.conf.explode() #print "Aconf.services has %d elements" % len(self.conf.services) self.conf.apply_implicit_inheritance() self.conf.fill_default() self.conf.remove_templates() #print "conf.services has %d elements" % len(self.conf.services) self.conf.override_properties() self.conf.linkify() self.conf.apply_dependencies() self.conf.explode_global_conf() self.conf.propagate_timezone_option() self.conf.create_business_rules() self.conf.create_business_rules_dependencies() self.conf.is_correct() if not self.conf.conf_is_correct: print "The conf is not correct, I stop here" self.conf.dump() return self.conf.clean() self.confs = self.conf.cut_into_parts() self.conf.prepare_for_sending() self.conf.show_errors() self.dispatcher = Dispatcher(self.conf, self.me) scheddaemon = Alignak(None, False, False, False, None, None) self.scheddaemon = scheddaemon self.sched = scheddaemon.sched scheddaemon.load_modules_manager() # Remember to clean the logs we just created before launching tests self.clear_logs() m = MacroResolver() m.init(self.conf) self.sched.load_conf(self.conf) e = ExternalCommandManager(self.conf, 'applyer') self.sched.external_command = e e.load_scheduler(self.sched) e2 = ExternalCommandManager(self.conf, 'dispatcher') e2.load_arbiter(self) self.external_command_dispatcher = e2 self.sched.conf.accept_passive_unknown_check_results = False self.sched.schedule()
def setup_new_conf(self): # pylint: disable=too-many-statements """Setup new conf received for scheduler :return: None """ with self.conf_lock: self.clean_previous_run() new_conf = self.new_conf logger.info("[%s] Sending us a configuration", self.name) conf_raw = new_conf['conf'] override_conf = new_conf['override_conf'] modules = new_conf['modules'] satellites = new_conf['satellites'] instance_name = new_conf['instance_name'] # Ok now we can save the retention data if hasattr(self.sched, 'conf'): self.sched.update_retention_file(forced=True) # horay, we got a name, we can set it in our stats objects statsmgr.register(instance_name, 'scheduler', statsd_host=new_conf['statsd_host'], statsd_port=new_conf['statsd_port'], statsd_prefix=new_conf['statsd_prefix'], statsd_enabled=new_conf['statsd_enabled']) t00 = time.time() try: conf = unserialize(conf_raw) except AlignakClassLookupException as exp: # pragma: no cover, simple protection logger.error( 'Cannot un-serialize configuration received from arbiter: %s', exp) logger.debug("Conf received at %d. Un-serialized in %d secs", t00, time.time() - t00) self.new_conf = None if 'scheduler_name' in new_conf: name = new_conf['scheduler_name'] else: name = instance_name self.name = name # Set my own process title self.set_proctitle(self.name) logger.info("[%s] Received a new configuration, containing: ", self.name) for key in new_conf: logger.info("[%s] - %s", self.name, key) logger.info("[%s] configuration identifiers: %s (%s)", self.name, new_conf['conf_uuid'], new_conf['push_flavor']) # Tag the conf with our data self.conf = conf self.conf.push_flavor = new_conf['push_flavor'] self.conf.alignak_name = new_conf['alignak_name'] self.conf.instance_name = instance_name self.conf.skip_initial_broks = new_conf['skip_initial_broks'] self.conf.accept_passive_unknown_check_results = \ new_conf['accept_passive_unknown_check_results'] self.cur_conf = conf self.override_conf = override_conf self.modules = unserialize(modules, True) self.satellites = satellites # Now We create our pollers, reactionners and brokers for sat_type in ['pollers', 'reactionners', 'brokers']: if sat_type not in satellites: continue for sat_id in satellites[sat_type]: # Must look if we already have it sats = getattr(self, sat_type) sat = satellites[sat_type][sat_id] sats[sat_id] = sat if sat['name'] in override_conf['satellitemap']: sat = dict(sat) # make a copy sat.update(override_conf['satellitemap'][sat['name']]) proto = 'http' if sat['use_ssl']: proto = 'https' uri = '%s://%s:%s/' % (proto, sat['address'], sat['port']) sats[sat_id]['uri'] = uri sats[sat_id]['con'] = None sats[sat_id]['running_id'] = 0 sats[sat_id]['last_connection'] = 0 sats[sat_id]['connection_attempt'] = 0 sats[sat_id]['max_failed_connections'] = 3 setattr(self, sat_type, sats) logger.debug("We have our %s: %s ", sat_type, satellites[sat_type]) logger.info("We have our %s:", sat_type) for daemon in satellites[sat_type].values(): logger.info(" - %s ", daemon['name']) # First mix conf and override_conf to have our definitive conf for prop in self.override_conf: val = self.override_conf[prop] setattr(self.conf, prop, val) if self.conf.use_timezone != '': logger.info("Setting our timezone to %s", str(self.conf.use_timezone)) os.environ['TZ'] = self.conf.use_timezone time.tzset() self.do_load_modules(self.modules) logger.info("Loading configuration.") self.conf.explode_global_conf() # pylint: disable=E1101 # we give sched it's conf self.sched.reset() self.sched.load_conf(self.conf) self.sched.load_satellites(self.pollers, self.reactionners, self.brokers) # We must update our Config dict macro with good value # from the config parameters self.sched.conf.fill_resource_macros_names_macros() # Creating the Macroresolver Class & unique instance m_solver = MacroResolver() m_solver.init(self.conf) # self.conf.dump() # self.conf.quick_debug() # Now create the external commands manager # We are an applyer: our role is not to dispatch commands, but to apply them ecm = ExternalCommandManager(self.conf, 'applyer', self.sched) # Scheduler needs to know about this external command manager to use it if necessary self.sched.set_external_commands_manager(ecm) # Update External Commands Manager self.sched.external_commands_manager.accept_passive_unknown_check_results = \ self.sched.conf.accept_passive_unknown_check_results # We clear our schedulers managed (it's us :) ) # and set ourselves in it self.schedulers = {self.conf.uuid: self.sched} # pylint: disable=E1101 # Ok now we can load the retention data self.sched.retention_load() # Create brok new conf brok = Brok({'type': 'new_conf', 'data': {}}) self.sched.add_brok(brok)
class Receiver(Satellite): """Receiver class. Referenced as "app" in most Interface """ my_type = 'receiver' properties = Satellite.properties.copy() properties.update({ 'type': StringProp(default='receiver'), 'port': IntegerProp(default=7773) }) def __init__(self, **kwargs): """Receiver daemon initialisation :param kwargs: command line arguments """ super(Receiver, self).__init__(kwargs.get('daemon_name', 'Default-receiver'), **kwargs) # Our schedulers and arbiters are initialized in the base class # Our related daemons # self.pollers = {} # self.reactionners = {} # Modules are load one time self.have_modules = False # Now an external commands manager and a list for the external_commands self.external_commands_manager = None # and the unprocessed one, a buffer self.unprocessed_external_commands = [] self.accept_passive_unknown_check_results = False self.http_interface = GenericInterface(self) def add(self, elt): """Generic function to add objects to the daemon internal lists. Manage Broks, External commands :param elt: object to add :type elt: alignak.AlignakObject :return: None """ # external commands may be received as a dictionary when pushed from the WebUI if isinstance(elt, dict) and 'my_type' in elt and elt['my_type'] == "externalcommand": if 'cmd_line' not in elt: logger.debug("Received a bad formated external command: %s. " "No cmd_line!", elt) return logger.debug("Received a dictionary external command: %s", elt) if 'creation_timestamp' not in elt: elt['creation_timestamp'] = None elt = ExternalCommand(elt['cmd_line'], elt['creation_timestamp']) if isinstance(elt, Brok): # For brok, we tag the brok with our instance_id elt.instance_id = self.instance_id if elt.type == 'monitoring_log': # The brok is a monitoring event with self.events_lock: self.events.append(elt) statsmgr.counter('events', 1) else: with self.broks_lock: self.broks.append(elt) statsmgr.counter('broks.added', 1) elif isinstance(elt, ExternalCommand): logger.debug("Queuing an external command: %s", str(ExternalCommand.__dict__)) self.unprocessed_external_commands.append(elt) statsmgr.counter('external-commands.added', 1) def setup_new_conf(self): """Receiver custom setup_new_conf method This function calls the base satellite treatment and manages the configuration needed for a receiver daemon: - get and configure its satellites - configure the modules :return: None """ # Execute the base class treatment... super(Receiver, self).setup_new_conf() # ...then our own specific treatment! with self.conf_lock: # self_conf is our own configuration from the alignak environment # self_conf = self.cur_conf['self_conf'] logger.debug("Got config: %s", self.cur_conf) # Configure and start our modules if not self.have_modules: try: self.modules = unserialize(self.cur_conf['modules'], no_load=True) except AlignakClassLookupException as exp: # pragma: no cover, simple protection logger.error('Cannot un-serialize modules configuration ' 'received from arbiter: %s', exp) if self.modules: logger.info("I received some modules configuration: %s", self.modules) self.have_modules = True self.do_load_modules(self.modules) # and start external modules too self.modules_manager.start_external_instances() else: logger.info("I do not have modules") # Now create the external commands manager # We are a receiver: our role is to get and dispatch commands to the schedulers global_conf = self.cur_conf.get('global_conf', None) if not global_conf: logger.error("Received a configuration without any global_conf! " "This may hide a configuration problem with the " "realms and the manage_sub_realms of the satellites!") global_conf = { 'accept_passive_unknown_check_results': False, 'log_external_commands': True } self.external_commands_manager = \ ExternalCommandManager(None, 'receiver', self, global_conf.get( 'accept_passive_unknown_check_results', False), global_conf.get( 'log_external_commands', False)) # Initialize connection with all our satellites logger.info("Initializing connection with my satellites:") my_satellites = self.get_links_of_type(s_type='') for satellite in list(my_satellites.values()): logger.info("- : %s/%s", satellite.type, satellite.name) if not self.daemon_connection_init(satellite): logger.error("Satellite connection failed: %s", satellite) # Now I have a configuration! self.have_conf = True def get_external_commands_from_arbiters(self): """Get external commands from our arbiters As of now, only the arbiter are requested to provide their external commands that the receiver will push to all the known schedulers to make them being executed. :return: None """ for arbiter_link_uuid in self.arbiters: link = self.arbiters[arbiter_link_uuid] if not link.active: logger.debug("The arbiter '%s' is not active, it is not possible to get " "its external commands!", link.name) continue try: logger.debug("Getting external commands from: %s", link.name) external_commands = link.get_external_commands() if external_commands: logger.debug("Got %d commands from: %s", len(external_commands), link.name) else: # Simple protection against None value external_commands = [] for external_command in external_commands: self.add(external_command) except LinkError: logger.warning("Arbiter connection failed, I could not get external commands!") except Exception as exp: # pylint: disable=broad-except logger.error("Arbiter connection failed, I could not get external commands!") logger.exception("Exception: %s", exp) def push_external_commands_to_schedulers(self): """Push received external commands to the schedulers :return: None """ if not self.unprocessed_external_commands: return # Those are the global external commands commands_to_process = self.unprocessed_external_commands self.unprocessed_external_commands = [] logger.debug("Commands: %s", commands_to_process) # Now get all external commands and put them into the good schedulers logger.debug("Commands to process: %d commands", len(commands_to_process)) for ext_cmd in commands_to_process: cmd = self.external_commands_manager.resolve_command(ext_cmd) logger.debug("Resolved command: %s, result: %s", ext_cmd.cmd_line, cmd) if cmd and cmd['global']: # Send global command to all our schedulers for scheduler_link_uuid in self.schedulers: self.schedulers[scheduler_link_uuid].pushed_commands.append(ext_cmd) # Now for all active schedulers, send the commands count_pushed_commands = 0 count_failed_commands = 0 for scheduler_link_uuid in self.schedulers: link = self.schedulers[scheduler_link_uuid] if not link.active: logger.debug("The scheduler '%s' is not active, it is not possible to push " "external commands to its connection!", link.name) continue # If there are some commands for this scheduler... commands = [ext_cmd.cmd_line for ext_cmd in link.pushed_commands] if not commands: logger.debug("The scheduler '%s' has no commands.", link.name) continue logger.debug("Sending %d commands to scheduler %s", len(commands), link.name) sent = [] try: sent = link.push_external_commands(commands) except LinkError: logger.warning("Scheduler connection failed, I could not push external commands!") # Whether we sent the commands or not, clean the scheduler list link.pushed_commands = [] # If we didn't sent them, add the commands to the arbiter list if sent: statsmgr.gauge('external-commands.pushed.%s' % link.name, len(commands)) count_pushed_commands = count_pushed_commands + len(commands) else: count_failed_commands = count_failed_commands + len(commands) statsmgr.gauge('external-commands.failed.%s' % link.name, len(commands)) # Kepp the not sent commands... for a next try self.external_commands.extend(commands) statsmgr.gauge('external-commands.pushed.all', count_pushed_commands) statsmgr.gauge('external-commands.failed.all', count_failed_commands) def do_loop_turn(self): """Receiver daemon main loop :return: None """ # Begin to clean modules self.check_and_del_zombie_modules() # Maybe the arbiter pushed a new configuration... if self.watch_for_new_conf(timeout=0.05): logger.info("I got a new configuration...") # Manage the new configuration self.setup_new_conf() # Maybe external modules raised 'objects' # we should get them _t0 = time.time() self.get_objects_from_from_queues() statsmgr.timer('core.get-objects-from-queues', time.time() - _t0) # Get external commands from the arbiters... _t0 = time.time() self.get_external_commands_from_arbiters() statsmgr.timer('external-commands.got.time', time.time() - _t0) statsmgr.gauge('external-commands.got.count', len(self.unprocessed_external_commands)) _t0 = time.time() self.push_external_commands_to_schedulers() statsmgr.timer('external-commands.pushed.time', time.time() - _t0) # Say to modules it's a new tick :) _t0 = time.time() self.hook_point('tick') statsmgr.timer('hook.tick', time.time() - _t0) def get_daemon_stats(self, details=False): """Increase the stats provided by the Daemon base class :return: stats dictionary :rtype: dict """ # Call the base Daemon one res = super(Receiver, self).get_daemon_stats(details=details) res.update({'name': self.name, 'type': self.type}) counters = res['counters'] counters['external-commands'] = len(self.external_commands) counters['external-commands-unprocessed'] = len(self.unprocessed_external_commands) return res def main(self): """Main receiver function Init daemon and loop forever :return: None """ try: # Start the daemon mode if not self.do_daemon_init_and_start(): self.exit_on_error(message="Daemon initialization error", exit_code=3) # We wait for initial conf self.wait_for_initial_conf() if self.new_conf: # Setup the received configuration self.setup_new_conf() # Now the main loop self.do_main_loop() logger.info("Exited from the main loop.") self.request_stop() except Exception: # pragma: no cover, this should never happen indeed ;) self.exit_on_exception(traceback.format_exc()) raise
def setup_new_conf(self): """Receiver custom setup_new_conf method This function calls the base satellite treatment and manages the configuration needed for a receiver daemon: - get and configure its satellites - configure the modules :return: None """ # Execute the base class treatment... super(Receiver, self).setup_new_conf() # ...then our own specific treatment! with self.conf_lock: # self_conf is our own configuration from the alignak environment # self_conf = self.cur_conf['self_conf'] logger.debug("Got config: %s", self.cur_conf) # Configure and start our modules if not self.have_modules: try: self.modules = unserialize(self.cur_conf['modules'], no_load=True) except AlignakClassLookupException as exp: # pragma: no cover, simple protection logger.error('Cannot un-serialize modules configuration ' 'received from arbiter: %s', exp) if self.modules: logger.info("I received some modules configuration: %s", self.modules) self.have_modules = True self.do_load_modules(self.modules) # and start external modules too self.modules_manager.start_external_instances() else: logger.info("I do not have modules") # Now create the external commands manager # We are a receiver: our role is to get and dispatch commands to the schedulers global_conf = self.cur_conf.get('global_conf', None) if not global_conf: logger.error("Received a configuration without any global_conf! " "This may hide a configuration problem with the " "realms and the manage_sub_realms of the satellites!") global_conf = { 'accept_passive_unknown_check_results': False, 'log_external_commands': True } self.external_commands_manager = \ ExternalCommandManager(None, 'receiver', self, global_conf.get( 'accept_passive_unknown_check_results', False), global_conf.get( 'log_external_commands', False)) # Initialize connection with all our satellites logger.info("Initializing connection with my satellites:") my_satellites = self.get_links_of_type(s_type='') for satellite in list(my_satellites.values()): logger.info("- : %s/%s", satellite.type, satellite.name) if not self.daemon_connection_init(satellite): logger.error("Satellite connection failed: %s", satellite) # Now I have a configuration! self.have_conf = True
def setup_new_conf(self): """Setup new conf received for scheduler :return: None """ with self.conf_lock: new_c = self.new_conf conf_raw = new_c['conf'] override_conf = new_c['override_conf'] modules = new_c['modules'] satellites = new_c['satellites'] instance_name = new_c['instance_name'] push_flavor = new_c['push_flavor'] skip_initial_broks = new_c['skip_initial_broks'] accept_passive_unknown_chk_res = new_c[ 'accept_passive_unknown_check_results'] api_key = new_c['api_key'] secret = new_c['secret'] http_proxy = new_c['http_proxy'] statsd_host = new_c['statsd_host'] statsd_port = new_c['statsd_port'] statsd_prefix = new_c['statsd_prefix'] statsd_enabled = new_c['statsd_enabled'] # horay, we got a name, we can set it in our stats objects statsmgr.register(self.sched, instance_name, 'scheduler', api_key=api_key, secret=secret, http_proxy=http_proxy, statsd_host=statsd_host, statsd_port=statsd_port, statsd_prefix=statsd_prefix, statsd_enabled=statsd_enabled) t00 = time.time() conf = cPickle.loads(conf_raw) logger.debug("Conf received at %d. Unserialized in %d secs", t00, time.time() - t00) self.new_conf = None # Tag the conf with our data self.conf = conf self.conf.push_flavor = push_flavor self.conf.instance_name = instance_name self.conf.skip_initial_broks = skip_initial_broks self.conf.accept_passive_unknown_check_results = accept_passive_unknown_chk_res self.cur_conf = conf self.override_conf = override_conf self.modules = modules self.satellites = satellites # self.pollers = self.app.pollers if self.conf.human_timestamp_log: # pylint: disable=E1101 logger.set_human_format() # Now We create our pollers for pol_id in satellites['pollers']: # Must look if we already have it already_got = pol_id in self.pollers poll = satellites['pollers'][pol_id] self.pollers[pol_id] = poll if poll['name'] in override_conf['satellitemap']: poll = dict(poll) # make a copy poll.update(override_conf['satellitemap'][poll['name']]) proto = 'http' if poll['use_ssl']: proto = 'https' uri = '%s://%s:%s/' % (proto, poll['address'], poll['port']) self.pollers[pol_id]['uri'] = uri self.pollers[pol_id]['last_connection'] = 0 # Now We create our reactionners for reac_id in satellites['reactionners']: # Must look if we already have it already_got = reac_id in self.reactionners reac = satellites['reactionners'][reac_id] self.reactionners[reac_id] = reac if reac['name'] in override_conf['satellitemap']: reac = dict(reac) # make a copy reac.update(override_conf['satellitemap'][reac['name']]) proto = 'http' if poll['use_ssl']: proto = 'https' uri = '%s://%s:%s/' % (proto, reac['address'], reac['port']) self.reactionners[reac_id]['uri'] = uri self.reactionners[reac_id]['last_connection'] = 0 # First mix conf and override_conf to have our definitive conf for prop in self.override_conf: val = self.override_conf[prop] setattr(self.conf, prop, val) if self.conf.use_timezone != '': logger.debug("Setting our timezone to %s", str(self.conf.use_timezone)) os.environ['TZ'] = self.conf.use_timezone time.tzset() if len(self.modules) != 0: logger.debug("I've got %s modules", str(self.modules)) # TODO: if scheduler had previous modules instanciated it must clean them! self.do_load_modules(self.modules) logger.info("Loading configuration.") self.conf.explode_global_conf() # we give sched it's conf self.sched.reset() self.sched.load_conf(self.conf) self.sched.load_satellites(self.pollers, self.reactionners) # We must update our Config dict macro with good value # from the config parameters self.sched.conf.fill_resource_macros_names_macros() # print "DBG: got macros", self.sched.conf.macros # Creating the Macroresolver Class & unique instance m_solver = MacroResolver() m_solver.init(self.conf) # self.conf.dump() # self.conf.quick_debug() # Now create the external commander # it's a applyer: it role is not to dispatch commands, # but to apply them ecm = ExternalCommandManager(self.conf, 'applyer') # Scheduler need to know about external command to # activate it if necessary self.sched.load_external_command(ecm) # External command need the sched because he can raise checks ecm.load_scheduler(self.sched) # We clear our schedulers managed (it's us :) ) # and set ourself in it self.schedulers = {self.conf.instance_id: self.sched}
def setup_new_conf(self): """Setup new conf received for scheduler :return: None """ with self.conf_lock: new_c = self.new_conf conf_raw = new_c['conf'] override_conf = new_c['override_conf'] modules = new_c['modules'] satellites = new_c['satellites'] instance_name = new_c['instance_name'] push_flavor = new_c['push_flavor'] skip_initial_broks = new_c['skip_initial_broks'] accept_passive_unknown_chk_res = new_c['accept_passive_unknown_check_results'] api_key = new_c['api_key'] secret = new_c['secret'] http_proxy = new_c['http_proxy'] statsd_host = new_c['statsd_host'] statsd_port = new_c['statsd_port'] statsd_prefix = new_c['statsd_prefix'] statsd_enabled = new_c['statsd_enabled'] # horay, we got a name, we can set it in our stats objects statsmgr.register(self.sched, instance_name, 'scheduler', api_key=api_key, secret=secret, http_proxy=http_proxy, statsd_host=statsd_host, statsd_port=statsd_port, statsd_prefix=statsd_prefix, statsd_enabled=statsd_enabled) t00 = time.time() conf = cPickle.loads(conf_raw) logger.debug("Conf received at %d. Unserialized in %d secs", t00, time.time() - t00) self.new_conf = None # Tag the conf with our data self.conf = conf self.conf.push_flavor = push_flavor self.conf.instance_name = instance_name self.conf.skip_initial_broks = skip_initial_broks self.conf.accept_passive_unknown_check_results = accept_passive_unknown_chk_res self.cur_conf = conf self.override_conf = override_conf self.modules = modules self.satellites = satellites # self.pollers = self.app.pollers if self.conf.human_timestamp_log: logger.set_human_format() # Now We create our pollers for pol_id in satellites['pollers']: # Must look if we already have it already_got = pol_id in self.pollers poll = satellites['pollers'][pol_id] self.pollers[pol_id] = poll if poll['name'] in override_conf['satellitemap']: poll = dict(poll) # make a copy poll.update(override_conf['satellitemap'][poll['name']]) proto = 'http' if poll['use_ssl']: proto = 'https' uri = '%s://%s:%s/' % (proto, poll['address'], poll['port']) self.pollers[pol_id]['uri'] = uri self.pollers[pol_id]['last_connection'] = 0 # Now We create our reactionners for reac_id in satellites['reactionners']: # Must look if we already have it already_got = reac_id in self.reactionners reac = satellites['reactionners'][reac_id] self.reactionners[reac_id] = reac if reac['name'] in override_conf['satellitemap']: reac = dict(reac) # make a copy reac.update(override_conf['satellitemap'][reac['name']]) proto = 'http' if poll['use_ssl']: proto = 'https' uri = '%s://%s:%s/' % (proto, reac['address'], reac['port']) self.reactionners[reac_id]['uri'] = uri self.reactionners[reac_id]['last_connection'] = 0 # First mix conf and override_conf to have our definitive conf for prop in self.override_conf: val = self.override_conf[prop] setattr(self.conf, prop, val) if self.conf.use_timezone != '': logger.debug("Setting our timezone to %s", str(self.conf.use_timezone)) os.environ['TZ'] = self.conf.use_timezone time.tzset() if len(self.modules) != 0: logger.debug("I've got %s modules", str(self.modules)) # TODO: if scheduler had previous modules instanciated it must clean them! self.modules_manager.set_modules(self.modules) self.do_load_modules() logger.info("Loading configuration.") self.conf.explode_global_conf() # we give sched it's conf self.sched.reset() self.sched.load_conf(self.conf) self.sched.load_satellites(self.pollers, self.reactionners) # We must update our Config dict macro with good value # from the config parameters self.sched.conf.fill_resource_macros_names_macros() # print "DBG: got macros", self.sched.conf.macros # Creating the Macroresolver Class & unique instance m_solver = MacroResolver() m_solver.init(self.conf) # self.conf.dump() # self.conf.quick_debug() # Now create the external commander # it's a applyer: it role is not to dispatch commands, # but to apply them ecm = ExternalCommandManager(self.conf, 'applyer') # Scheduler need to know about external command to # activate it if necessary self.sched.load_external_command(ecm) # External command need the sched because he can raise checks ecm.load_scheduler(self.sched) # We clear our schedulers managed (it's us :) ) # and set ourself in it self.schedulers = {self.conf.instance_id: self.sched}