def post(self, request): _post = json.loads(request.POST["blob"]) login_name = _post.get('username') login_password = _post.get('password') real_user_name = self.__class__.get_real_user_name(login_name) try: db_user = self.__class__._check_login_data(request, real_user_name, login_password) except ValidationError as e: for err_msg in e: request.xml_response.error(unicode(err_msg)) _failed_login(request, real_user_name) else: # check eggs for allegro _eco = server_mixins.EggConsumeObject(DummyLogger()) _eco.init( {"SERVICE_ENUM_NAME": icswServiceEnum.cluster_server.name}) if _eco.consume("allegro", db_user): login_credentials = (real_user_name, login_password, login_name) _num_dup_sessions = _login(request, db_user, login_credentials) request.xml_response["duplicate_sessions"] = "{:d}".format( _num_dup_sessions) if _post.get("next_url", "").strip(): request.xml_response["redirect"] = _post["next_url"] else: request.xml_response["redirect"] = "main.dashboard" else: request.xml_response.error("Ova resource problem") _failed_login(request, real_user_name)
def list(self, request, *args, **kwargs): device_ids = json.loads(request.GET.get("device_ids")) timespans_db = _device_status_history_util.get_timespans_db_from_request( request) data = [] if len(timespans_db): data = mon_icinga_log_aggregated_host_data.objects.filter( device_id__in=device_ids, timespan__in=timespans_db).values('device_id', 'state', 'state_type', 'value') data_per_device = {device_id: [] for device_id in device_ids} for d in data: d['state'] = mon_icinga_log_aggregated_host_data.STATE_CHOICES_READABLE[ d['state']].capitalize() data_per_device[d['device_id']].append(d) data_merged_state_types = {} _eco = server_mixins.EggConsumeObject(DummyLogger()) _eco.init({"SERVICE_ENUM_NAME": icswServiceEnum.monitor_server.name}) for device_id, device_data in data_per_device.iteritems(): if _eco.consume("dashboard", device_id): data_merged_state_types[ device_id] = mon_icinga_log_aggregated_service_data.objects.merge_state_types( device_data, mon_icinga_log_aggregated_host_data. STATE_CHOICES_READABLE[ mon_icinga_log_raw_base.STATE_UNDETERMINED]) return Response([data_merged_state_types ]) # fake a list, see coffeescript
def get_hist_device_data_from_request(request): device_ids = json.loads(request.GET.get("device_ids")) timespans_db = _device_status_history_util.get_timespans_db_from_request( request) data = [] if len(timespans_db): data = mon_icinga_log_aggregated_host_data.objects.filter( device_id__in=device_ids, timespan__in=timespans_db).values('device_id', 'state', 'state_type', 'value') data_per_device = {device_id: [] for device_id in device_ids} for d in data: data_per_device[d['device_id']].append(d) data_merged_state_types = {} _eco = server_mixins.EggConsumeObject(DummyLogger()) _eco.init({"SERVICE_ENUM_NAME": icswServiceEnum.monitor_server.name}) for device_id, device_data in data_per_device.items(): if _eco.consume("dashboard", device_id): data_merged_state_types[ device_id] = mon_icinga_log_aggregated_service_data.objects.merge_state_types( device_data, mon_icinga_log_raw_base.STATE_UNDETERMINED, normalize=True, ) return data_merged_state_types
def __init__(self, log_com): self.__log_com = log_com self.log("init host_matcher") self.EC = server_mixins.EggConsumeObject(self) self.EC.init(global_config) # dict, from {uuid, fqdn} to _dev self.__match = {} # dict: pk -> HostActiveRRD struct self.__active_dict = {}
def post(self, request): def _to_fqdn(_vals): if _vals[2]: return u"{}.{}".format(_vals[1], _vals[2]) else: return _vals[1] _post = request.POST pk_list = json.loads(_post["pk_list"]) srv_com = server_command.srv_command(command="get_node_status") # noinspection PyUnresolvedReferences # print list(device.objects.filter(Q(pk__in=pk_list)).values_list("pk", "name", "domain_tree_node__full_name")) srv_com["device_list"] = E.device_list(*[ E.device( pk="{:d}".format(cur_dev[0]), full_name=_to_fqdn(cur_dev), ) for cur_dev in device.objects.filter(Q(pk__in=pk_list)). values_list("pk", "name", "domain_tree_node__full_name") ]) result = contact_server( request, icswServiceEnum.monitor_server, srv_com, timeout=30, connect_port_enum=icswServiceEnum.monitor_slave) _eco = server_mixins.EggConsumeObject(DummyLogger()) _eco.init({"SERVICE_ENUM_NAME": icswServiceEnum.monitor_server.name}) if result: # print result.pretty_print() host_results = result.xpath(".//ns:host_result/text()", smart_strings=False) service_results = result.xpath(".//ns:service_result/text()", smart_strings=False) # if not len(host_results) and not len(service_results) and result.get_log_tuple()[1] >= logging_tools.LOG_LEVEL_ERROR: # # handle unreachable or not responding md-config-server, clear all logs to reduce error level # request.xml_response.clear_log_buffer() # log and lock access any_locked = False host_results_filtered = [] if len(host_results): for dev_res in json.loads(host_results[0]): dev_pk = dev_res["custom_variables"]["device_pk"] if _eco.consume("dashboard", dev_pk): host_results_filtered.append(dev_res) else: any_locked = True service_results_filtered = [] if len(service_results): for serv_res in json.loads(service_results[0]): host_pk, service_pk, _ = host_service_id_util.parse_host_service_description( serv_res['description'], log=logger.error) if host_pk is not None and service_pk is not None: if _eco.consume("dashboard", host_pk): service_results_filtered.append(serv_res) else: any_locked = True if any_locked: request.xml_response.info( "Some entries got no ova and are therefore not displayed.") # import pprint # pprint.pprint(host_results_filtered) # simply copy json dump request.xml_response["host_result"] = json.dumps( host_results_filtered) request.xml_response["service_result"] = json.dumps( service_results_filtered)
def __init__(self, log_com, full_build, routing_fingerprint=None, router_obj=None): tm = logging_tools.MeasureTime(log_com=self.log) self.log_com = log_com self.router = routing.SrvTypeRouting(log_com=self.log_com) self.instance_xml = InstanceXML(log_com=self.log, quiet=True) # build cache to speed up config generation # stores various cached objects # routing handling if router_obj is None: # slave, no consumer self.consumer = None self.routing_fingerprint = routing_fingerprint # must exist self.__trace_gen = MonHostTraceGeneration.objects.get( Q(fingerprint=self.routing_fingerprint)) else: # master, install the egg consumer self.consumer = server_mixins.EggConsumeObject(self) self.consumer.init( {"SERVICE_ENUM_NAME": icswServiceEnum.monitor_server.name}) self.routing_fingerprint = router_obj.fingerprint # get generation try: self.__trace_gen = MonHostTraceGeneration.objects.get( Q(fingerprint=self.routing_fingerprint)) except MonHostTraceGeneration.DoesNotExist: self.log("creating new tracegeneration") self.__trace_gen = router_obj.create_trace_generation() # delete old ones MonHostTrace.objects.exclude( Q(generation=self.__trace_gen)).delete() # global luts # print("i0") self.mcc_lut_3 = { _check.pk: _check for _check in mon_check_command.objects.all() } # add dummy entries for _value in self.mcc_lut_3.values(): # why ? FIXME # _value.mccs_id = None # _value.check_command_pk = _value.pk pass self.mcc_lut = { key: (v0, v1, v2) for key, v0, v1, v2 in mon_check_command.objects.all().values_list( "pk", "name", "description", "config_rel__name") } # lookup table for config -> mon_check_commands self.mcc_lut_2 = {} for v_list in mon_check_command.objects.all().values_list( "name", "config_rel__name"): self.mcc_lut_2.setdefault(v_list[1], []).append(v_list[0]) # print("i1") # import pprint # pprint.pprint(self.mcc_lut) # host list, set from caller self.host_list = [] self.dev_templates = None self.serv_templates = None self.single_build = False self.debug = False self.__var_cache = VarCache(prefill=full_build, def_dict={ "SNMP_VERSION": 2, "SNMP_READ_COMMUNITY": "public", "SNMP_WRITE_COMMUNITY": "private", }) self.join_char = "_" if global_config["SAFE_NAMES"] else " " # device_group user access self.dg_user_access = {} mon_user_pks = list( user.objects.filter(Q(mon_contact__pk__gt=0)).values_list( "pk", flat=True)) for _dg in device_group.objects.all().prefetch_related("user_set"): self.dg_user_access[_dg.pk] = list([ _user for _user in _dg.user_set.all() if _user.pk in mon_user_pks ]) # all hosts dict self.all_hosts_dict = { cur_dev.pk: cur_dev for cur_dev in device.objects.filter( Q(device_group__enabled=True) & Q(enabled=True)).select_related( "domain_tree_node", "device_group").prefetch_related( "monhosttrace_set") } for _host in self.all_hosts_dict.values(): _host.reachable = True # print(_res) # traces in database self.log("traces found in database: {:d}".format( MonHostTrace.objects.all().count())) # read traces self.__host_traces = {} for _trace in MonHostTrace.objects.filter( Q(generation=self.__trace_gen)): self.__host_traces.setdefault(_trace.device_id, []).append(_trace) # import pprint # pprint.pprint(self.__host_traces) # host / service clusters clusters = {} for _obj, _name in [ (mon_host_cluster, SpecialTypesEnum.mon_host_cluster), (mon_service_cluster, SpecialTypesEnum.mon_service_cluster) ]: _lut = {} _query = _obj.objects.all() if _name == SpecialTypesEnum.mon_service_cluster: _query = _query.select_related("mon_check_command") for _co in _query: _lut[_co.pk] = _co.main_device_id _co.devices_list = [] clusters.setdefault(_name, {}).setdefault(_co.main_device_id, []).append(_co) for _entry in _obj.devices.through.objects.all(): if _name == SpecialTypesEnum.mon_host_cluster: _pk = _entry.mon_host_cluster_id else: _pk = _entry.mon_service_cluster_id _tco = [ _co for _co in clusters[_name][_lut[_pk]] if _co.pk == _pk ][0] _tco.devices_list.append(_entry.device_id) # clusters[_name][_entry.] self.__clusters = clusters # host / service dependencies deps = {} for _obj, _name in [ (mon_host_dependency, SpecialTypesEnum.mon_host_dependency), (mon_service_dependency, SpecialTypesEnum.mon_service_dependency) ]: _lut = {} _query = _obj.objects.all().prefetch_related( "devices", "dependent_devices") if _name == SpecialTypesEnum.mon_host_dependency: _query = _query.select_related( "mon_host_dependency_templ", "mon_host_dependency_templ__dependency_period", ) else: _query = _query.select_related( "mon_service_cluster", "mon_check_command", "dependent_mon_check_command", "mon_service_dependency_templ", "mon_service_dependency_templ__dependency_period", ) for _do in _query: # == slaves _do.devices_list = [] # == dependent devices _do.master_list = [] _lut[_do.pk] = [] for _dd in _do.dependent_devices.all(): _lut[_do.pk].append(_dd.pk) deps.setdefault(_name, {}).setdefault(_dd.pk, []).append(_do) for _entry in _obj.devices.through.objects.all(): if _name == SpecialTypesEnum.mon_host_dependency: _pk = _entry.mon_host_dependency_id else: _pk = _entry.mon_service_dependency_id for _devpk in _lut[_pk]: _tdo = [ _do for _do in deps[_name][_devpk] if _do.pk == _pk ][0] _tdo.devices_list.append(_entry.device_id) for _entry in _obj.dependent_devices.through.objects.all(): if _name == SpecialTypesEnum.mon_host_dependency: _pk = _entry.mon_host_dependency_id else: _pk = _entry.mon_service_dependency_id for _devpk in _lut[_pk]: _tdo = [ _do for _do in deps[_name][_devpk] if _do.pk == _pk ][0] _tdo.master_list.append(_entry.device_id) self.__dependencies = deps # init snmp sink self.snmp_sink = SNMPSink(log_com) tm.step("init build_cache")
def list(self, request, *args, **kwargs): model_name = request.GET['model'] # try currently registered models try: model = [ i for i in icsw_register.REGISTERED_MODELS if i.__name__ == model_name ][0] except IndexError: model = getattr(initat.cluster.backbone.models, model_name) object_id = request.GET.get("object_id", None) def format_version(version): serialized_data = json.loads(version.serialized_data)[0] return_data = serialized_data['fields'] if version.revision.comment == "Initial version.": change_type = "initial" else: change_type = None meta = { 'date': version.revision.date_created, 'user': version.revision.user_id, 'type': change_type, 'object_repr': version.object_repr, 'object_id': serialized_data['pk'], } return {'meta': meta, 'data': return_data} def format_deletion(deletion): serialized_data = json.loads(deletion.serialized_data)[0] return_data = serialized_data['fields'] meta = { 'date': deletion.date, 'user': deletion.user_id, 'type': 'deleted', 'object_repr': deletion.object_repr, 'object_id': serialized_data['pk'], } return {'meta': meta, 'data': return_data} content_type = ContentType.objects.get_for_model(model) filter_dict = {'content_type': content_type} filter_dict_del = {"content_type": content_type} if object_id is not None: filter_dict['object_id'] = object_id filter_dict_del['object_id_int'] = object_id # get data for deletion and version (they mostly have the same fields) deletion_queryset = icsw_deletion_record.objects.filter( **filter_dict_del) # print dir(reversion.VersionAdapter) version_queryset = Version.objects.filter( **filter_dict).select_related('revision') formatted = itertools.chain( (format_version(ver) for ver in version_queryset), (format_deletion(dele) for dele in deletion_queryset)) _eco = server_mixins.EggConsumeObject(DummyLogger()) _eco.init({"SERVICE_ENUM_NAME": icswServiceEnum.monitor_server.name}) if model == device: print "FILTER" allowed_by_lic = ( elem for elem in formatted if _eco.consume("history", elem["meta"]["object_id"])) else: allowed_by_lic = formatted sorted_data = sorted(allowed_by_lic, key=lambda elem: elem['meta']['date']) foreign_keys = { foreign_key.name: foreign_key for foreign_key in model._meta.concrete_model._meta.local_fields if isinstance(foreign_key, ForeignKey) } m2ms = { m2m.name: m2m for m2m in model._meta.concrete_model._meta.local_many_to_many if m2m.rel.through._meta.auto_created } # only serialized m2ms, which are by djangos logic the ones which are not autocreated deleted_objects_cache = DeletedObjectsCache() def resolve_reference(target_model, foreign_key_val): try: return unicode(target_model.objects.get(pk=foreign_key_val)) except target_model.DoesNotExist: try: # unicode on Version object gives the saved object repr, which we use here return unicode( deleted_objects_cache[target_model][foreign_key_val]) except KeyError: return u"untracked object" def get_human_readable_value(key, value): if value is None: return value elif key in foreign_keys: return resolve_reference(foreign_keys[key].rel.to, value) elif key in m2ms: return list( resolve_reference(m2ms[key].rel.to, m2m_val) for m2m_val in value) else: return value used_device_ids = set() # calc change and type info last_entry_by_pk = {} for entry in sorted_data: pk = entry['meta']['object_id'] if model == device: used_device_ids.add(pk) # set missing type info if not entry['meta']['type']: if pk in last_entry_by_pk: entry['meta']['type'] = 'modified' else: entry['meta']['type'] = 'created' # extract change info and only transmit that if pk in last_entry_by_pk: entry['changes'] = {} for k in set( itertools.chain(entry['data'].iterkeys(), last_entry_by_pk[pk].iterkeys())): old = last_entry_by_pk[pk].get(k, None) new = entry['data'].get(k, None) if old != new: patch = None if isinstance(old, basestring) and isinstance( new, basestring): dmp = diff_match_patch() diffs = dmp.diff_main(old, new) dmp.diff_cleanupSemantic(diffs) patch = dmp.diff_prettyHtml(diffs) patch = patch.replace('¶', "") # don't show para signs entry['changes'][k] = { 'new_data_human': get_human_readable_value(k, new), 'old_data_human': get_human_readable_value(k, old), 'new_data': new, 'old_data': old, 'patch': patch, } else: entry['changes'] = { 'full_dump': entry['data'], 'full_dump_human': { k: get_human_readable_value(k, v) for k, v in entry['data'].iteritems() }, } last_entry_by_pk[pk] = entry['data'] del entry['data'] # NOTE: entries must be in chronological, earliest first return Response(sorted_data)
def get_data_per_device(devices, timespans): if isinstance(devices, dict): _queries = [] for dev_id, service_list in devices.items(): # query: device_pk matches as well as one service_pk/service_info combination service_qs = ((Q(service_id=serv_pk) & Q(service_info=service_info)) for serv_pk, service_info in service_list) _queries.append( Q(device_id=dev_id) & reduce(lambda x, y: x | y, service_qs)) # or around all queries query_filter = reduce(lambda x, y: x | y, _queries) device_ids = list(devices.keys()) else: query_filter = Q(device_id__in=devices) device_ids = devices queryset = mon_icinga_log_aggregated_service_data.objects.filter( query_filter & Q(timespan__in=timespans)) data_per_device = { device_id: defaultdict(lambda: []) for device_id in device_ids } used_device_services = { device_id: set() for device_id in device_ids } # can't do regular prefetch_related for queryset, this seems to work device_service_timespans = collections.defaultdict( lambda: collections.defaultdict(lambda: set())) _eco = server_mixins.EggConsumeObject(DummyLogger()) _eco.init( {"SERVICE_ENUM_NAME": icswServiceEnum.monitor_server.name}) for entry in queryset.prefetch_related(Prefetch("service"), Prefetch("timespan")): if entry.service is not None: locked = not _eco.consume("history", entry.device_id) else: locked = not _eco.consume("history", entry.device_id) if not locked: if entry.service is not None: used_device_services[entry.device_id].add( entry.service.pk) relevant_data_from_entry = { 'state': entry.state, 'state_type': entry.state_type, 'value': entry.value } if use_client_name: service_key = mon_icinga_log_raw_service_alert_data.objects.calculate_service_name_for_client( entry) else: service_key = (entry.service_id, entry.service_info) device_service_timespans[entry.device_id][service_key].add( entry.timespan) # there can be more than one entry for each state and state type per service # if there are multiple timespans data_per_device[entry.device_id][service_key].append( relevant_data_from_entry) if len(timespans) > 1: # now for each service, we should have len(timespans) entries. # if not, we don't have data for that, so fill it up for device_id, service_name_timespans in device_service_timespans.items( ): for service_key, timespans_present in service_name_timespans.items( ): num_missing = len(timespans) - len(timespans_present) if num_missing > 0: data_per_device[device_id][service_key].append({ 'state': mon_icinga_log_raw_service_alert_data. STATE_UNDETERMINED, 'state_type': mon_icinga_log_raw_service_alert_data. STATE_UNDETERMINED, 'value': 1 * num_missing, # this works since we normalize afterwards }) return data_per_device, used_device_services