def handle_snmp_initial_scan(self, errors, found, res_dict): _all_schemes = Schemes(self.log) _found_struct = {} # reorganize oids to dict with scheme -> {..., oid_list, ...} for _oid in found: _found_scheme = _all_schemes.get_scheme_by_oid(_oid) if _found_scheme: _key = (_found_scheme.priority, _found_scheme.pk) if _key not in _found_struct: _found_struct[_key] = { "scheme": _found_scheme, "oids": set(), "full_name": _found_scheme.full_name, } _found_struct[_key]["oids"].add(oid_to_str(_oid)) _handler = SNMPSink(self.log) result = ResultNode(error=errors) for _key in sorted(_found_struct, reverse=True): _struct = _found_struct[_key] result.merge( _handler.update( self.device, _struct["scheme"], _all_schemes.filter_results(res_dict, _struct["oids"]), _struct["oids"], self.flags, ) ) self.srv_com.set_result(*result.get_srv_com_result()) self.finish()
def _check_schemes(self): self.__local_schemes = {} if import_errors: self.log("{}:".format(logging_tools.get_plural("import error", len(import_errors))), logging_tools.LOG_LEVEL_ERROR) for _mod_name, _where, _line in import_errors: self.log(" {} ({}): {}".format(_mod_name, _where, _line), logging_tools.LOG_LEVEL_ERROR) else: self.log("no scheme import errors") self.log("found {}".format(logging_tools.get_plural("scheme", len(snmp_schemes)))) for _name, _obj in snmp_schemes: self.__local_schemes[_name] = _obj # add snmp sink schemes self.snmp_sink = SNMPSink(self.log) _mon_handlers = [_handler for _handler in self.snmp_sink.handlers if _handler.Meta.mon_check] self.log("found {} with monitoring capability".format(logging_tools.get_plural("SNMP handler", len(_mon_handlers)))) self.__gen_schemes = {} for _mh in _mon_handlers: for _mc in _mh.config_mon_check(): self.__gen_schemes["SS:{}".format(_mc.Meta.name)] = _mc self.log("found gen scheme '{}' in handler {}".format(_mc.Meta.name, unicode(_mh)))
def process_init(self): global_config.enable_pm(self) self.__log_template = logging_tools.get_logger( global_config["LOG_NAME"], global_config["LOG_DESTINATION"], context=self.zmq_context, ) # close database connection db_tools.close_connection() MotherSimpleCommand.setup(self) self.router_obj = config_tools.RouterObject(self.log) self.snmp_sink = SNMPSink(self.log) self.sc = config_tools.icswServerCheck( service_type_enum=icswServiceEnum.mother_server) self.register_func("delay_command", self._delay_command) self.register_func("hard_control", self._hard_control) self.register_func("snmp_finished", self._snmp_finished) self.register_timer(self._check_commands, 10) HardControlCommand.setup(self) self.send_pool_message("register_return", "command", target="snmp_process")
def setup(bg_proc): SNMPJob.run_idx = 0 SNMPJob.bg_proc = bg_proc SNMPJob.ref_dict = {} SNMPJob.snmp_sink = SNMPSink(bg_proc.log)
def __init__(self, log_com, full_build, routing_fingerprint=None, router_obj=None): tm = logging_tools.MeasureTime(log_com=self.log) self.log_com = log_com self.router = routing.SrvTypeRouting(log_com=self.log_com) self.instance_xml = InstanceXML(log_com=self.log, quiet=True) # build cache to speed up config generation # stores various cached objects # routing handling if router_obj is None: # slave, no consumer self.consumer = None self.routing_fingerprint = routing_fingerprint # must exist self.__trace_gen = MonHostTraceGeneration.objects.get( Q(fingerprint=self.routing_fingerprint)) else: # master, install the egg consumer self.consumer = server_mixins.EggConsumeObject(self) self.consumer.init( {"SERVICE_ENUM_NAME": icswServiceEnum.monitor_server.name}) self.routing_fingerprint = router_obj.fingerprint # get generation try: self.__trace_gen = MonHostTraceGeneration.objects.get( Q(fingerprint=self.routing_fingerprint)) except MonHostTraceGeneration.DoesNotExist: self.log("creating new tracegeneration") self.__trace_gen = router_obj.create_trace_generation() # delete old ones MonHostTrace.objects.exclude( Q(generation=self.__trace_gen)).delete() # global luts # print("i0") self.mcc_lut_3 = { _check.pk: _check for _check in mon_check_command.objects.all() } # add dummy entries for _value in self.mcc_lut_3.values(): # why ? FIXME # _value.mccs_id = None # _value.check_command_pk = _value.pk pass self.mcc_lut = { key: (v0, v1, v2) for key, v0, v1, v2 in mon_check_command.objects.all().values_list( "pk", "name", "description", "config_rel__name") } # lookup table for config -> mon_check_commands self.mcc_lut_2 = {} for v_list in mon_check_command.objects.all().values_list( "name", "config_rel__name"): self.mcc_lut_2.setdefault(v_list[1], []).append(v_list[0]) # print("i1") # import pprint # pprint.pprint(self.mcc_lut) # host list, set from caller self.host_list = [] self.dev_templates = None self.serv_templates = None self.single_build = False self.debug = False self.__var_cache = VarCache(prefill=full_build, def_dict={ "SNMP_VERSION": 2, "SNMP_READ_COMMUNITY": "public", "SNMP_WRITE_COMMUNITY": "private", }) self.join_char = "_" if global_config["SAFE_NAMES"] else " " # device_group user access self.dg_user_access = {} mon_user_pks = list( user.objects.filter(Q(mon_contact__pk__gt=0)).values_list( "pk", flat=True)) for _dg in device_group.objects.all().prefetch_related("user_set"): self.dg_user_access[_dg.pk] = list([ _user for _user in _dg.user_set.all() if _user.pk in mon_user_pks ]) # all hosts dict self.all_hosts_dict = { cur_dev.pk: cur_dev for cur_dev in device.objects.filter( Q(device_group__enabled=True) & Q(enabled=True)).select_related( "domain_tree_node", "device_group").prefetch_related( "monhosttrace_set") } for _host in self.all_hosts_dict.values(): _host.reachable = True # print(_res) # traces in database self.log("traces found in database: {:d}".format( MonHostTrace.objects.all().count())) # read traces self.__host_traces = {} for _trace in MonHostTrace.objects.filter( Q(generation=self.__trace_gen)): self.__host_traces.setdefault(_trace.device_id, []).append(_trace) # import pprint # pprint.pprint(self.__host_traces) # host / service clusters clusters = {} for _obj, _name in [ (mon_host_cluster, SpecialTypesEnum.mon_host_cluster), (mon_service_cluster, SpecialTypesEnum.mon_service_cluster) ]: _lut = {} _query = _obj.objects.all() if _name == SpecialTypesEnum.mon_service_cluster: _query = _query.select_related("mon_check_command") for _co in _query: _lut[_co.pk] = _co.main_device_id _co.devices_list = [] clusters.setdefault(_name, {}).setdefault(_co.main_device_id, []).append(_co) for _entry in _obj.devices.through.objects.all(): if _name == SpecialTypesEnum.mon_host_cluster: _pk = _entry.mon_host_cluster_id else: _pk = _entry.mon_service_cluster_id _tco = [ _co for _co in clusters[_name][_lut[_pk]] if _co.pk == _pk ][0] _tco.devices_list.append(_entry.device_id) # clusters[_name][_entry.] self.__clusters = clusters # host / service dependencies deps = {} for _obj, _name in [ (mon_host_dependency, SpecialTypesEnum.mon_host_dependency), (mon_service_dependency, SpecialTypesEnum.mon_service_dependency) ]: _lut = {} _query = _obj.objects.all().prefetch_related( "devices", "dependent_devices") if _name == SpecialTypesEnum.mon_host_dependency: _query = _query.select_related( "mon_host_dependency_templ", "mon_host_dependency_templ__dependency_period", ) else: _query = _query.select_related( "mon_service_cluster", "mon_check_command", "dependent_mon_check_command", "mon_service_dependency_templ", "mon_service_dependency_templ__dependency_period", ) for _do in _query: # == slaves _do.devices_list = [] # == dependent devices _do.master_list = [] _lut[_do.pk] = [] for _dd in _do.dependent_devices.all(): _lut[_do.pk].append(_dd.pk) deps.setdefault(_name, {}).setdefault(_dd.pk, []).append(_do) for _entry in _obj.devices.through.objects.all(): if _name == SpecialTypesEnum.mon_host_dependency: _pk = _entry.mon_host_dependency_id else: _pk = _entry.mon_service_dependency_id for _devpk in _lut[_pk]: _tdo = [ _do for _do in deps[_name][_devpk] if _do.pk == _pk ][0] _tdo.devices_list.append(_entry.device_id) for _entry in _obj.dependent_devices.through.objects.all(): if _name == SpecialTypesEnum.mon_host_dependency: _pk = _entry.mon_host_dependency_id else: _pk = _entry.mon_service_dependency_id for _devpk in _lut[_pk]: _tdo = [ _do for _do in deps[_name][_devpk] if _do.pk == _pk ][0] _tdo.master_list.append(_entry.device_id) self.__dependencies = deps # init snmp sink self.snmp_sink = SNMPSink(log_com) tm.step("init build_cache")
def get_commands(self): snmp_sink = SNMPSink(self.log) return sum([ _handler.config_mon_check() for _handler in snmp_sink.handlers if _handler.Meta.mon_check ], [])