Пример #1
0
 def feed(self, *res_list):
     self.waiting_for = None
     self.running = False
     error_list, _ok_list, res_dict = res_list[0:3]
     if error_list:
         self.log(
             "error fetching SNMP data from {}".format(self.device_name),
             logging_tools.LOG_LEVEL_ERROR)
     else:
         headers = {
             "name": self.device_name,
             "uuid": self.uuid,
             "time": "{:d}".format(int(self.last_start))
         }
         mv_tree = E.machine_vector(simple="0", **headers)
         mon_info = E.monitor_info(**headers)
         for _handler in self.snmp_handlers:
             try:
                 _handler.collect_feed(res_dict,
                                       mv_tree=mv_tree,
                                       mon_info=mon_info,
                                       vc=self.__vcache)
             except:
                 self.log(
                     "error feeding for handler {} (IP {}): {}".format(
                         str(_handler),
                         self.ip,
                         process_tools.get_except_info(),
                     ), logging_tools.LOG_LEVEL_CRITICAL)
                 for _log_line in process_tools.icswExceptionInfo(
                 ).log_lines:
                     self.log(_log_line, logging_tools.LOG_LEVEL_CRITICAL)
         # graphing
         self.bg_proc.process_data_xml(mv_tree,
                                       len(etree.tostring(mv_tree)))
Пример #2
0
 def init_build(self):
     do_exit = True
     try:
         cur_img = self.check_build_lock()
         if not cur_img.builds:
             cur_img.builds = 0
         cur_img.builds += 1
         cur_img.save()
         self.log("setting build number to {:d}".format(cur_img.builds))
         self._check_dirs(cur_img)
         self._check_packages(cur_img)
         self._umount_dirs(cur_img)
         if global_config["CHECK_SIZE"]:
             self._check_size(cur_img)
         else:
             self.log("size checking disabled", logging_tools.LOG_LEVEL_WARN)
         # get image from database (in case something has changed)
         cur_img = self._get_image()
         self.log("building image from {}".format(cur_img.source))
         self._generate_dir_list(cur_img)
         self._copy_image(cur_img)
         if not global_config["SKIPCLEANUP"]:
             self._clean_image(cur_img)
         self._init_compress_image()
         do_exit = False
     except:
         self._int_error("build failed: {}".format(process_tools.get_except_info()))
         for line in process_tools.icswExceptionInfo().log_lines:
             self.log("    {}".format(line), logging_tools.LOG_LEVEL_ERROR)
     else:
         if do_exit:
             self._int_error("done")
Пример #3
0
 def handle_nodeinfo(self, src_id, node_text):
     s_time = time.time()
     s_req = simple_request(self, src_id, node_text)
     com_call = self.__com_dict.get(s_req.command, None)
     if com_call:
         ConfigControl.update_router()
         try:
             ret_str = com_call(s_req)
         except:
             exc_info = process_tools.icswExceptionInfo()
             ret_str = "error interpreting command {}: {}".format(
                 node_text,
                 process_tools.get_except_info(),
             )
             for _line in exc_info.log_lines:
                 self.log("  {}".format(_line), logging_tools.LOG_LEVEL_ERROR)
     else:
         ret_str = "error unknown command '{}'".format(node_text)
     if ret_str is None:
         self.log("waiting for answer")
     else:
         e_time = time.time()
         self.log("handled nodeinfo '%s' (src_ip %s) in %s, returning %s" % (
             s_req.node_text,
             s_req.src_ip,
             logging_tools.get_diff_time_str(e_time - s_time),
             ret_str))
         ConfigControl.srv_process._send_simple_return(s_req.zmq_id, ret_str)
         del s_req
Пример #4
0
 def init_commands(self, server_proc: object, verbose: bool) -> bool:
     _init_ok = True
     for call_name, add_server_proc in MODULE_STATE_INIT_LIST:
         for cur_mod in self.module_list:
             if verbose:
                 self.log(
                     "calling {} for module '{}'".format(
                         call_name,
                         cur_mod.name,
                     )
                 )
             try:
                 if add_server_proc:
                     getattr(cur_mod, call_name)(server_proc)
                 else:
                     getattr(cur_mod, call_name)()
             except:
                 exc_info = process_tools.icswExceptionInfo()
                 for log_line in exc_info.log_lines:
                     self.log(log_line, logging_tools.LOG_LEVEL_CRITICAL)
                 _init_ok = False
                 break
             else:
                 cur_mod.module_state[call_name] = True
         if not _init_ok:
             break
     return _init_ok
Пример #5
0
 def loop(self):
     try:
         while self["run_flag"]:
             self.__disp.runDispatcher()
             self.step(blocking=self["run_flag"])
     except ValueConstraintError:
         self.log("caught ValueConstraintError, terminating process",
                  logging_tools.LOG_LEVEL_CRITICAL)
         _term_cause = "ValueConstraintError"
     except:
         exc_info = process_tools.icswExceptionInfo()
         self.log("exception in dispatcher, terminating process",
                  logging_tools.LOG_LEVEL_CRITICAL)
         for log_line in exc_info.log_lines:
             self.log(" - {}".format(log_line),
                      logging_tools.LOG_LEVEL_CRITICAL)
         _term_cause = "internal error"
     else:
         self.log("no more jobs running")
         _term_cause = ""
     self.log("jobs pending: {:d}".format(len(self.__job_dict)))
     # close all jobs
     if _term_cause:
         self._terminate_jobs(error="{}, check logs".format(_term_cause))
     else:
         self._terminate_jobs()
     self.__disp.closeDispatcher()
Пример #6
0
 def _handle_module_command(self, srv_com):
     try:
         self.local_mc[srv_com["command"].text](srv_com)
     except:
         for log_line in process_tools.icswExceptionInfo().log_lines:
             self.log(log_line, logging_tools.LOG_LEVEL_ERROR)
             srv_com.set_result(
                 "caught server exception '{}'".format(process_tools.get_except_info()),
                 server_command.SRV_REPLY_STATE_CRITICAL,
             )
Пример #7
0
 def handle_result(self, mes_id, result):
     cur_mes = self.messages[mes_id]
     # default: nor reuse (detection not possible or not important)
     _reuse = False
     if self.zmq_id != DUMMY_0MQ_ID:
         if "machine_uuid" in result:
             mach_uuid, dyn_uuid = (
                 result["*machine_uuid"],
                 result["*dynamic_uuid"],
             )
         else:
             mach_uuid, dyn_uuid = (
                 self.zmq_id,
                 ""
             )
         # reuse detected ?
         _reuse = HostConnection.zmq_discovery.update_mapping(
             self.__conn_str,
             self.zmq_id,
             mach_uuid,
             dyn_uuid
         )
     if cur_mes.sent:
         # ???
         cur_mes.sent = False
     if len(result.xpath(".//ns:raw", smart_strings=False)):
         # raw response, no interpret
         cur_mes.srv_com = result
         self.send_result(cur_mes, None)
         # self.send_result(cur_mes, None)
     else:
         try:
             if _reuse:
                 _map = HostConnection.zmq_discovery.get_mapping(self.__conn_str)
                 print(id(_map))
                 ret = ExtReturn(
                     limits.mon_STATE_CRITICAL,
                     "0MQ-ID reuse detected ({})".format(
                         _map.reuse_info,
                     )
                 )
                 # _map.clear_reuse()
             else:
                 ret = ExtReturn.get_ext_return(cur_mes.interpret(result))
         except:
             ret = ExtReturn(
                 limits.mon_STATE_CRITICAL,
                 "error interpreting result: {}".format(
                     process_tools.get_except_info()
                 )
             )
             exc_info = process_tools.icswExceptionInfo()
             for line in exc_info.log_lines:
                 HostConnection.relayer_process.log(line, logging_tools.LOG_LEVEL_CRITICAL)
         self.send_result(cur_mes, ret)
Пример #8
0
 def compound_struct(in_list):
     try:
         _comps = DataStore.compound_tree.append_compounds(in_list)
     except:
         for _line in process_tools.icswExceptionInfo().log_lines:
             DataStore.g_log(_line, logging_tools.LOG_LEVEL_ERROR)
         _comps = []
     else:
         # pprint.pprint(_comps)
         pass
     return _comps
Пример #9
0
 def process(self):
     try:
         self.__ct_struct.process(self)
     except:
         exc_info = process_tools.icswExceptionInfo()
         for _line in exc_info.log_lines:
             self.log(_line, logging_tools.LOG_LEVEL_ERROR)
         self.srv_com.set_result(
             "error in process() call: {}".format(
                 process_tools.get_except_info()),
             server_command.SRV_REPLY_STATE_CRITICAL)
Пример #10
0
 def check_rrd_file(self, f_name):
     _changed = False
     s_time = time.time()
     try:
         _old_size = os.stat(f_name)[stat.ST_SIZE]
     except:
         self.log(
             "cannot get size of {}: {}".format(
                 f_name, process_tools.get_except_info()),
             logging_tools.LOG_LEVEL_ERROR)
     else:
         if _old_size:
             try:
                 _rrd = rrd_tools.RRD(f_name,
                                      log_com=self.log,
                                      build_rras=False,
                                      verbose=self.__verbose)
             except:
                 # check if file is not an rrd file
                 _content = open(f_name, "rb").read()
                 if f_name.endswith(".rrd") and _content[:3] != b"RRD":
                     self.log(
                         "file {} has no RRD header, trying to remove it".
                         format(f_name), logging_tools.LOG_LEVEL_ERROR)
                     try:
                         os.unlink(f_name)
                     except:
                         pass
                 else:
                     self.log(
                         "cannot get info about {}: {}".format(
                             f_name, process_tools.get_except_info()),
                         logging_tools.LOG_LEVEL_ERROR)
                     for _line in process_tools.icswExceptionInfo(
                     ).log_lines:
                         self.log(_line, logging_tools.LOG_LEVEL_ERROR)
             else:
                 _changed = self.check_rrd_file_2(f_name, _rrd)
                 if _changed:
                     _new_size = os.stat(f_name)[stat.ST_SIZE]
                     e_time = time.time()
                     self.log("modification of {} took {} ({} -> {} Bytes)".
                              format(
                                  f_name,
                                  logging_tools.get_diff_time_str(e_time -
                                                                  s_time),
                                  _old_size,
                                  _new_size,
                              ))
         else:
             self.log("file {} is empty".format(f_name),
                      logging_tools.LOG_LEVEL_WARN)
     return _changed
Пример #11
0
 def close_modules(self):
     for cur_mod in self.module_list:
         if hasattr(cur_mod, "stop_module"):
             self.log("calling stop_module() for {}".format(cur_mod.name))
             try:
                 cur_mod.stop_module()
             except:
                 exc_info = process_tools.icswExceptionInfo()
                 for log_line in exc_info.log_lines:
                     self.log(log_line, logging_tools.LOG_LEVEL_CRITICAL)
         if cur_mod.module_state["init_module"]:
             cur_mod.close_module()
Пример #12
0
 def feed_result(self, id_str, srv_reply):
     if id_str in self.__pending_messages:
         dc_action = self.__pending_messages[id_str]
         dc_action.end_time = time.time()
         run_str = logging_tools.get_diff_time_str(dc_action.end_time -
                                                   dc_action.start_time)
         # print("----", id_str, id(dc_action), dc_action.special_instance.Meta.name)
         try:
             _str, _state = srv_reply.get_log_tuple()
             dc_action.log("feed_result() got {}".format(_str), _state)
             DeviceLogEntry.new(
                 device=dc_action.hbc.device,
                 source=global_config["LOG_SOURCE_IDX"],
                 level=_state,
                 text="result was '{}' after {}".format(
                     _str,
                     run_str,
                 ),
             )
             # print("*")
             # print(srv_reply.get_result())
             for _action in dc_action.special_instance.feed_result(
                     dc_action, srv_reply):
                 # print("g", _action)
                 if _action:
                     self.call(
                         _action.salt(dc_action.hbc,
                                      dc_action.special_instance))
         except:
             _info = process_tools.get_except_info()
             DeviceLogEntry.new(
                 device=dc_action.hbc.device,
                 source=global_config["LOG_SOURCE_IDX"],
                 level=logging_tools.LOG_LEVEL_CRITICAL,
                 text="an error occured in feed_result: {} (after {})".
                 format(
                     _info,
                     run_str,
                 ),
             )
             exc_info = process_tools.icswExceptionInfo()
             self.log(
                 "an error occured (device={}): {}".format(
                     str(dc_action.hbc.device),
                     _info,
                 ), logging_tools.LOG_LEVEL_ERROR)
             for line in exc_info.log_lines:
                 self.log("  - {}".format(line),
                          logging_tools.LOG_LEVEL_ERROR)
         del self.__pending_messages[id_str]
     else:
         self.log("Got unknown id_str {}".format(id_str))
Пример #13
0
    def dispatch_call(self):
        _now = timezone.now().replace(microsecond=0)

        for asset_batch in AssetBatch.objects.filter(run_status__in=[
                BatchStatus.PLANNED, BatchStatus.RUNNING,
                BatchStatus.FINISHED_RUNS, BatchStatus.GENERATING_ASSETS
        ]):
            if asset_batch.run_start_time:
                diff_time = (_now - asset_batch.run_start_time).total_seconds()
                if diff_time > 86400:
                    self.log(
                        "Closing pending/processing AssetBatch [now={}, run_start_time={}, diff_time={}]"
                        .format(_now, asset_batch.run_start_time,
                                diff_time), logging_tools.LOG_LEVEL_ERROR)

                    asset_batch.run_end_time = _now
                    asset_batch.run_status = BatchStatus.FINISHED
                    asset_batch.save()

        for schedule_item in ScheduleItem.objects.all():
            if schedule_item.run_now or schedule_item.planned_date < _now:
                schedule_handler_function = getattr(
                    self, schedule_item.schedule_handler)
                try:
                    schedule_handler_function(schedule_item)
                except Exception as e:
                    _ = e
                    _exc = process_tools.icswExceptionInfo()
                    self.log(
                        "error in schedule_handler: {}".format(
                            schedule_item.schedule_handler),
                        logging_tools.LOG_LEVEL_ERROR)
                    for line in _exc.log_lines:
                        self.log(line, logging_tools.LOG_LEVEL_ERROR)
                schedule_item.delete()

        # timeout handling
        _now = timezone.now()
        for run_index, host_monitoring_command in list(
                HostMonitoringCommand.host_monitoring_commands.items()):
            if _now > host_monitoring_command.timeout_date:
                self.log(
                    "HostMonitoring command [run_index:{} | command:{}] timed out"
                    .format(host_monitoring_command.run_index,
                            host_monitoring_command.command_string))
                host_monitoring_command.handle()
                self.discovery_process.send_pool_message(
                    "host_monitoring_command_timeout_handler", run_index)
Пример #14
0
 def _handle_module_command(self, srv_com, cur_ns, rest_str):
     cur_com = self.local_mc[srv_com["command"].text]
     sp_struct = None
     try:
         if cur_ns is None:
             cur_ns, _rest = cur_com.handle_commandline(
                 rest_str.strip().split())
         sp_struct = cur_com(srv_com, cur_ns)
     except:
         exc_info = process_tools.icswExceptionInfo()
         for log_line in exc_info.log_lines:
             self.log(log_line, logging_tools.LOG_LEVEL_ERROR)
         srv_com.set_result(
             "caught server exception '{}'".format(
                 process_tools.get_except_info()),
             server_command.SRV_REPLY_STATE_CRITICAL)
     return sp_struct
Пример #15
0
 def get_partition(self, *args):
     part_name = args[0]
     loc_tree = GeneratedTree()
     loc_dev = device.objects.get(Q(pk=self.pk))
     self.log("set act_partition_table and partdev to %s" % (part_name))
     loc_dev.act_partition_table = loc_dev.partition_table
     loc_dev.partdev = part_name
     loc_dev.save()
     success = False
     dummy_cont = BuildContainer(self, {}, {"device": loc_dev}, loc_tree,
                                 None)
     try:
         loc_ps = icswPartitionSetup(dummy_cont, self.log)
     except:
         self.log(
             "cannot generate partition info: {}".format(
                 process_tools.get_except_info()),
             logging_tools.LOG_LEVEL_ERROR)
         for _line in process_tools.icswExceptionInfo().log_lines:
             self.log("    {}".format(_line), logging_tools.LOG_LEVEL_ERROR)
     else:
         base_dir = os.path.join(global_config["CONFIG_DIR"], loc_dev.name)
         pinfo_dir = os.path.join(base_dir, "pinfo")
         if not os.path.isdir(pinfo_dir):
             try:
                 os.mkdir(pinfo_dir)
             except OSError:
                 self.log(
                     "cannot create pinfo_directory %s: %s" %
                     (pinfo_dir, process_tools.get_except_info()),
                     logging_tools.LOG_LEVEL_ERROR)
             else:
                 self.log("created pinfo directory %s" % (pinfo_dir))
         if os.path.isdir(pinfo_dir):
             for file_name in os.listdir(pinfo_dir):
                 try:
                     os.unlink("%s/%s" % (pinfo_dir, file_name))
                 except:
                     self.log(
                         "error removing %s in %s: %s" %
                         (file_name, pinfo_dir,
                          process_tools.get_except_info()),
                         logging_tools.LOG_LEVEL_ERROR)
             loc_ps.create_part_files(pinfo_dir)
             success = True
     return success
Пример #16
0
 def handle(self, srv_com):
     try:
         p_args = self.parse(srv_com)
         self.run(srv_com, p_args)
     except DeviceNotFoundException:
         srv_com.set_result(
             "device not found",
             server_command.SRV_REPLY_STATE_CRITICAL,
         )
     except:
         exc_com = process_tools.icswExceptionInfo()
         for _line in exc_com.log_lines:
             self.log(_line, logging_tools.LOG_LEVEL_ERROR)
         srv_com.set_result(
             "an exception occured: {}".format(
                 process_tools.get_except_info()),
             server_command.SRV_REPLY_STATE_CRITICAL,
         )
Пример #17
0
 def read(self):
     _all_files = [
         cur_entry for cur_entry in [
             entry.split(".")[0] for entry in os.listdir(
                 self.__root_dir
             ) if entry.endswith(".py")
         ] if cur_entry and not cur_entry.startswith("_")
     ]
     self.log(
         "{} found: {}".format(
             logging_tools.get_plural("file", len(_all_files)),
             ", ".join(sorted(_all_files)),
         )
     )
     import_errors = []
     _mod_list = []
     for mod_name in _all_files:
         try:
             new_mod = importlib.import_module(
                 "{}.{}".format(
                     self.__parent_module_name,
                     mod_name
                 )
             )
             if hasattr(new_mod, "ModuleDefinition"):
                 _mod_list.append(new_mod)
             else:
                 self.log(
                     "module {} is missing the 'ModuleDefinition' object".format(
                         mod_name
                     ),
                     logging_tools.LOG_LEVEL_WARN
                 )
         except:
             exc_info = process_tools.icswExceptionInfo()
             for log_line in exc_info.log_lines:
                 import_errors.append((mod_name, "import", log_line))
     # list of modules
     self.__pure_module_list = _mod_list
     self.reload_module_checksum()
     self._log_import_errors(import_errors)
Пример #18
0
 def update(self, dev, scheme, result_dict, oid_list, flags):
     # update dev with results from given snmp_scheme
     # valid oid_list is oid_list
     # results are in result_dict
     _handler = self.get_handler(scheme)
     if _handler:
         try:
             return _handler.update(dev, scheme, result_dict, oid_list,
                                    flags)
         except:
             exc_info = process_tools.icswExceptionInfo()
             _err_str = "unable to process results: {}".format(
                 process_tools.get_except_info())
             self.log(_err_str, logging_tools.LOG_LEVEL_ERROR)
             for _line in exc_info.log_lines:
                 self.log("  {}".format(_line),
                          logging_tools.LOG_LEVEL_ERROR)
             return ResultNode(error=_err_str)
     else:
         return ResultNode(error="no handler found for {}".format(
             scheme.full_name_version))
Пример #19
0
    def periodic_update(self):
        """Recalculate all kpis and save result to database"""
        enabled_kpis = Kpi.objects.filter(enabled=True)
        if License.objects.has_valid_license(LicenseEnum.kpi) and enabled_kpis.exists():
            KpiGlobals.set_context()
            try:
                kpi_data = KpiData(self.log, global_config)
            except Exception as e:
                _exc = process_tools.icswExceptionInfo()
                self.log(
                    "Exception when gathering kpi data: {}".format(process_tools.get_except_info()),
                    logging_tools.LOG_LEVEL_ERROR,
                )
                for _line in _exc.log_lines:
                    self.log("    {}".format(_line), logging_tools.LOG_LEVEL_ERROR)
            else:
                # recalculate kpis
                for kpi_db in enabled_kpis:
                    self._update_single_kpi_result(kpi_data, kpi_db)

                """
Пример #20
0
 def _check_for_pending_jobs(self):
     for c_name, _dev, scan_type_enum, _new_lock, _dev_xml in self.__job_list:
         # todo: make calls parallel
         s_time = time.time()
         try:
             getattr(self, c_name)(_dev_xml, _dev)
         except:
             _exc_info = process_tools.icswExceptionInfo()
             for _line in _exc_info.log_lines:
                 self.log("   {}".format(_line),
                          logging_tools.LOG_LEVEL_ERROR)
         finally:
             [
                 self.log(_what, _level)
                 for _what, _level in _new_lock.close()
             ]
         e_time = time.time()
         self.log("calling {} for device {} took {}".format(
             c_name, str(_dev),
             logging_tools.get_diff_time_str(e_time - s_time)))
     self.__job_list = []
Пример #21
0
    def __call__(self, *args, **kwargs):
        s_time = time.time()

        display_name = getattr(args[0], "display_name", None)
        # get: head.im_class.__name__ (contains class name for django class views)
        view_class_name = getattr(getattr(getattr(args[0], 'head', None), 'im_class', None), '__name__', None)

        if hasattr(args[0], "model") and args[0].model is not None:
            self.__obj_name = args[0].model._meta.object_name
        elif display_name is not None:
            self.__obj_name = display_name
        elif view_class_name is not None:
            self.__obj_name = view_class_name
        else:
            self.__obj_name = "unknown"

        try:
            result = self._func(*args, **kwargs)
        except:
            exc_info = process_tools.icswExceptionInfo()
            _err_str = process_tools.get_except_info()
            self.log(
                "exception: {}".format(_err_str),
                logging_tools.LOG_LEVEL_ERROR
            )
            for line in exc_info.log_lines:
                self.log("  {}".format(line))
            result = Response(_err_str, status=status.HTTP_406_NOT_ACCEPTABLE)
            # raise
        e_time = time.time()
        self.log(
            "call took {}".format(
                logging_tools.get_diff_time_str(e_time - s_time)
            )
        )
        return result
Пример #22
0
 def _fw_handle(self, *args, **kwargs):
     src_id, data = args
     srv_com = server_command.srv_command(source=data)
     in_com = srv_com["command"].text
     args = {}
     if "arguments" in srv_com:
         for entry in srv_com["arguments"]:
             _key = entry.tag.split("}")[-1]
             _val = entry.text
             if _val is None:
                 self.log("key {} has empty value ({})".format(_key, in_com), logging_tools.LOG_LEVEL_ERROR)
             else:
                 if _val.lower() in ["true", "false"]:
                     _val = bool(_val)
                 elif _val.isdigit():
                     _val = int(_val)
                 # if
                 args[_key] = _val
     self.log(
         "got '{}', {}: {}".format(
             in_com,
             logging_tools.get_plural("argument", len(args)),
             ", ".join(
                 [
                     "{}='{}' ({})".format(
                         key,
                         value,
                         type(value)
                     ) for key, value in args.items()
                 ]
             )
         )
     )
     args = {key.replace("-", "_"): value for key, value in args.items()}
     found_keys = set(args.keys())
     needed_keys = {
         "register_file_watch": {
             "send_id", "mode", "target_server", "target_port", "dir", "match"
         },
         "unregister_file_watch": {
             "id",
         },
     }.get(in_com, set())
     if needed_keys & found_keys == needed_keys:
         # set default return value
         srv_com.set_result(
             "got command {}".format(in_com)
         )
         try:
             getattr(self, "_{}".format(in_com))(srv_com, args)
         except:
             exc_info = process_tools.icswExceptionInfo()
             for line in exc_info.log_lines:
                 self.log("  {}".format(line), logging_tools.LOG_LEVEL_ERROR)
             srv_com.set_result(
                 "error processing '{}': {}".format(in_com, exc_info.except_info),
                 server_command.SRV_REPLY_STATE_CRITICAL
             )
         log_str, log_level = srv_com.get_log_tuple()
         self.log("result: {}".format(log_str), log_level)
     else:
         srv_com.set_result(
             "command {}, keys missing: {}".format(in_com, ", ".join(needed_keys - found_keys)),
             server_command.SRV_REPLY_STATE_ERROR
         )
     self.send_pool_message("callback_result", src_id, str(srv_com))
Пример #23
0
    def _get_node_status(self, srv_com_str, **kwargs):
        def _salt_line(line, key, in_dict):
            line["_comments"] = in_dict.get(line[key], [])
            return line

        srv_com = server_command.srv_command(source=srv_com_str)
        # overview mode if overview is a top-level element
        _host_overview = True if "host_overview" in srv_com else False
        _service_overview = True if "service_overview" in srv_com else False
        if not _host_overview:
            # ToDo, FIXME: receive full names in srv_command
            dev_names = srv_com.xpath(".//device_list/device/@full_name",
                                      smart_strings=False)
            # dev_names = sorted([cur_dev.full_name for cur_dev in device.objects.filter(Q(pk__in=pk_list))])
        try:
            cur_sock = self._open()
            if cur_sock:
                fetch_dict = LivstatusFetch(self.log, cur_sock)
                if _host_overview:
                    fetch_dict["host"] = cur_sock.hosts.columns(
                        "name",
                        "address",
                        "state",
                        "plugin_output",
                        "custom_variables",
                    )
                    if _service_overview:
                        fetch_dict["service"] = cur_sock.services.columns(
                            "description",
                            "state",
                            "plugin_output",
                            "custom_variables",
                        )
                else:
                    if dev_names:
                        fetch_dict["service"] = cur_sock.services.columns(
                            "host_name",
                            "description",
                            "state",
                            "plugin_output",
                            "last_check",
                            "check_type",
                            "state_type",
                            "last_state_change",
                            "max_check_attempts",
                            "display_name",
                            "current_attempt",
                            "custom_variables",
                            "acknowledged",
                            "acknowledgement_type",
                            "active_checks_enabled",
                        ).filter("host_name", "=", dev_names)
                        fetch_dict["host"] = cur_sock.hosts.columns(
                            "name",
                            "address",
                            "state",
                            "plugin_output",
                            "last_check",
                            "check_type",
                            "state_type",
                            "last_state_change",
                            "max_check_attempts",
                            "current_attempt",
                            "custom_variables",
                            "acknowledged",
                            "acknowledgement_type",
                            "active_checks_enabled",
                        ).filter("name", "=", dev_names)
                        fetch_dict["host_comment"] = cur_sock.comments.columns(
                            "host_name",
                            "author",
                            "comment",
                            "entry_type",
                            "entry_time",
                        ).filter("host_name", "=", dev_names).filter(
                            "is_service",
                            "=",
                            "0",
                            method="and",
                            count=2,
                        )
                        fetch_dict[
                            "service_comment"] = cur_sock.comments.columns(
                                "host_name",
                                "service_description",
                                "author",
                                "comment",
                                "entry_type",
                                "entry_time",
                            ).filter("host_name", "=", dev_names).filter(
                                "is_service",
                                "=",
                                "1",
                                method="and",
                                count=2,
                            )
                        # print str(fetch_dict["service_comment"])
                fetch_dict.fetch()
                _host_comments = {}
                for entry in fetch_dict["host_comment_result"]:
                    _host_comments.setdefault(entry["host_name"],
                                              []).append(entry)
                _service_comments = {}
                for entry in fetch_dict["service_comment_result"]:
                    _service_comments.setdefault(entry["service_description"],
                                                 []).append(entry)
                # import pprint
                # pprint.pprint(_host_comments)
                # pprint.pprint(_service_comments)
                srv_com["service_result"] = json.dumps([
                    _salt_line(_line, "description", _service_comments)
                    for _line in fetch_dict["service_result"]
                    if _line.get("host_name", "")
                ])
                # import pprint
                # pprint.pprint(fetch_dict["host_result"])
                srv_com["host_result"] = json.dumps([
                    _salt_line(_line, "name", _host_comments)
                    for _line in fetch_dict["host_result"]
                ])
                srv_com.set_result(fetch_dict.info_str)
                # print(srv_com.pretty_print())
            else:
                srv_com.set_result("cannot connect to socket",
                                   server_command.SRV_REPLY_STATE_CRITICAL)
        except:
            self.log(
                "fetch exception: {}".format(process_tools.get_except_info()),
                logging_tools.LOG_LEVEL_ERROR)
            exc_info = process_tools.icswExceptionInfo()
            for line in exc_info.log_lines:
                self.log(" - {}".format(line), logging_tools.LOG_LEVEL_ERROR)
            self._close()
            srv_com.set_result(
                "exception during fetch: {}".format(
                    process_tools.get_except_info()),
                server_command.SRV_REPLY_STATE_CRITICAL)
        self.send_pool_message("remote_call_async_result", str(srv_com))
Пример #24
0
 def graph(self, dev_pks, graph_keys):
     # end time with forecast
     local_ds = DataSource(self.log_com, dev_pks, graph_keys,
                           self.colorizer)
     self.para_dict["end_time_fc"] = self.para_dict["end_time"]
     if self.para_dict["graph_setting"].graph_setting_forecast:
         _fc = self.para_dict["graph_setting"].graph_setting_forecast
         if _fc.seconds:
             # add seconds
             self.para_dict["end_time_fc"] += datetime.timedelta(
                 seconds=_fc.seconds)
         else:
             # add timeframe
             self.para_dict["end_time_fc"] += self.para_dict[
                 "end_time"] - self.para_dict["start_time"]
     timeframe = abs((self.para_dict["end_time_fc"] -
                      self.para_dict["start_time"]).total_seconds())
     graph_width, graph_height = (
         self.para_dict["graph_setting"].graph_setting_size.width,
         self.para_dict["graph_setting"].graph_setting_size.height,
     )
     self.log("width / height : {:d} x {:d}, timeframe {}".format(
         graph_width,
         graph_height,
         logging_tools.get_diff_time_str(timeframe),
     ))
     # store for DEF generation
     self.width = graph_width
     self.height = graph_height
     dev_dict = {
         cur_dev.pk: str(cur_dev.display_name)
         for cur_dev in device.objects.filter(Q(pk__in=dev_pks))
     }
     s_graph_key_dict = self._create_graph_keys(graph_keys)
     self.log("found {}: {}".format(
         logging_tools.get_plural("device", len(dev_pks)), ", ".join([
             "{:d} ({})".format(pk, dev_dict.get(pk, "unknown"))
             for pk in dev_pks
         ])))
     if self.para_dict["graph_setting"].merge_graphs:
         # reorder all graph_keys into one graph_key_dict
         s_graph_key_dict = {
             "all": sum(list(s_graph_key_dict.values()), [])
         }
     self.log("{}: {}".format(
         logging_tools.get_plural("graph key", len(graph_keys)),
         ", ".join([full_graph_key(_v) for _v in graph_keys])))
     self.log("{}: {}".format(
         logging_tools.get_plural("top level key", len(s_graph_key_dict)),
         ", ".join(sorted(s_graph_key_dict)),
     ))
     enumerated_dev_pks = [("{:d}.{:d}".format(_idx, _pk), _pk)
                           for _idx, _pk in enumerate(dev_pks)]
     if self.para_dict["graph_setting"].merge_devices:
         # one device per graph
         graph_key_list = [[GraphTarget(g_key, enumerated_dev_pks, v_list)]
                           for g_key, v_list in s_graph_key_dict.items()]
     else:
         graph_key_list = []
         for g_key, v_list in sorted(s_graph_key_dict.items()):
             graph_key_list.append([
                 GraphTarget(g_key, [(dev_id, dev_pk)], v_list)
                 for dev_id, dev_pk in enumerated_dev_pks
             ])
     if self.para_dict["graph_setting"].merge_graphs:
         # set header
         [_gt.set_header("all") for _gt in sum(graph_key_list, [])]
     _num_g = sum([len(_graph_line) for _graph_line in graph_key_list])
     if self.para_dict["ordering"]:
         ORDER_MODE = self.para_dict["ordering"].upper()
         if ORDER_MODE.startswith("-"):
             _order_reverse = True
             ORDER_MODE = ORDER_MODE[1:]
         else:
             _order_reverse = False
     else:
         ORDER_MODE = "none"
     self.log("number of graphs to create: {:d}, ORDER_MODE is '{}'".format(
         _num_g, ORDER_MODE))
     graph_list = E.graph_list()
     for _graph_line in graph_key_list:
         graph_list.extend([
             _graph_target.graph_xml(dev_dict)
             for _graph_target in _graph_line
         ])
     self.early_return_call(graph_list)
     # print(etree.tostring(graph_list, pretty_print=True))
     graph_list = E.graph_list()
     _job_add_dict = self._get_jobs(dev_dict)
     for _graph_line in graph_key_list:
         self.log("starting graph_line")
         # iterate in case scale_mode is not None
         _iterate_line, _line_iteration = (True, 0)
         while _iterate_line:
             for _graph_target in _graph_line:
                 _graph_target.abs_file_loc = str(
                     os.path.join(self.para_dict["graph_root"],
                                  _graph_target.graph_name))
                 # clear list of defs, reset result
                 _graph_target.reset()
                 # reset colorizer for current graph
                 self.colorizer.reset()
                 self.abs_start_time = int((self.para_dict["start_time"] -
                                            self.dt_1970).total_seconds())
                 self.abs_end_time = int((self.para_dict["end_time_fc"] -
                                          self.dt_1970).total_seconds())
                 rrd_pre_args = [
                     _graph_target.abs_file_loc,
                     "-E",  # slope mode
                     "-Rlight",  # font render mode, slight hint
                     "-Gnormal",  # render mode
                     "-P",  # use pango markup
                     # "-nDEFAULT:8:",
                     "-w {:d}".format(graph_width),
                     "-h {:d}".format(graph_height),
                     "-aPNG",  # image format
                     # "--daemon", "unix:{}".format(global_config["RRD_CACHED_SOCKET"]),  # rrd caching daemon address
                     "-W {} by init.at".format(
                         License.objects.get_init_product().name),  # title
                     "--slope-mode",  # slope mode
                     "-cBACK#ffffff",
                     "--end",
                     "{:d}".format(self.abs_end_time),  # end
                     "--start",
                     "{:d}".format(self.abs_start_time),  # start
                     GraphVar(self, _graph_target, None, None,
                              "").header_line,
                 ]
                 # outer loop: iterate over all keys for the graph
                 for graph_key in sorted(_graph_target.graph_keys):
                     # inner loop: iterate over all dev ids for the graph
                     for _cur_id, cur_pk in _graph_target.dev_list:
                         # print "***", _cur_id, cur_pk
                         if (cur_pk, graph_key) in local_ds:
                             # resolve
                             for _mvs, _mvv in local_ds[(cur_pk,
                                                         graph_key)]:
                                 _take = True
                                 try:
                                     if os.stat(_mvs.file_name)[
                                             stat.ST_SIZE] < 100:
                                         self.log(
                                             "skipping {} (file is too small)"
                                             .format(_mvs.file_name, ),
                                             logging_tools.LOG_LEVEL_ERROR)
                                         _take = False
                                 except:
                                     self.log(
                                         "RRD file {} not accessible: {}".
                                         format(
                                             _mvs.file_name,
                                             process_tools.get_except_info(
                                             ),
                                         ), logging_tools.LOG_LEVEL_ERROR)
                                     _take = False
                                 if _take and _line_iteration == 0:
                                     # add GraphVars only on the first iteration
                                     # print "**", graph_key, _mvs.key, _mvv.key
                                     # store def
                                     _graph_target.add_def(
                                         (_mvs.key, _mvv.key),
                                         GraphVar(self, _graph_target, _mvs,
                                                  _mvv, graph_key,
                                                  dev_dict[cur_pk]),
                                         "header_str",
                                     )
                 if _graph_target.draw_keys:
                     draw_it = True
                     removed_keys = set()
                     while draw_it:
                         if self.para_dict[
                                 "graph_setting"].graph_setting_timeshift:
                             timeshift = self.para_dict[
                                 "graph_setting"].graph_setting_timeshift.seconds
                             if timeshift == 0:
                                 timeshift = self.abs_end_time - self.abs_start_time
                         else:
                             timeshift = 0
                         rrd_args = rrd_pre_args + sum([
                             _graph_target.graph_var_def(
                                 _key,
                                 timeshift=timeshift,
                             ) for _key in _graph_target.draw_keys
                         ], [])
                         rrd_args.extend(_graph_target.rrd_post_args)
                         rrd_args.extend([
                             "--title", "{} on {} (tf: {}{})".format(
                                 _graph_target.header,
                                 dev_dict.get(_graph_target.dev_list[0][1],
                                              "unknown")
                                 if len(_graph_target.dev_list) == 1 else
                                 logging_tools.get_plural(
                                     "device", len(_graph_target.dev_list)),
                                 logging_tools.get_diff_time_str(timeframe),
                                 ", with forecast"
                                 if self.para_dict["end_time"] !=
                                 self.para_dict["end_time_fc"] else "",
                             )
                         ])
                         rrd_args.extend(
                             self._create_job_args(_graph_target.dev_list,
                                                   _job_add_dict))
                         self.proc.flush_rrdcached(_graph_target.file_names)
                         try:
                             draw_result = rrdtool.graphv(
                                 *[str(_val) for _val in rrd_args])
                         except:
                             # in case of strange 'argument 0 has to be a string or a list of strings'
                             self.log(
                                 "error creating graph: {}".format(
                                     process_tools.get_except_info()),
                                 logging_tools.LOG_LEVEL_ERROR)
                             for _line in process_tools.icswExceptionInfo(
                             ).log_lines:
                                 self.log("    {}".format(_line),
                                          logging_tools.LOG_LEVEL_ERROR)
                             if global_config["DEBUG"]:
                                 for _idx, _entry in enumerate(rrd_args, 1):
                                     self.log("  {:4d} {}".format(
                                         _idx, _entry))
                             draw_result = None
                             draw_it = False
                         else:
                             # compare draw results, add -l / -u when scale_mode is not None
                             val_dict = {}
                             # new code
                             for key, value in draw_result.items():
                                 if not key.startswith("print["):
                                     continue
                                 _xml = etree.fromstring(value)
                                 _unique_id = int(_xml.get("unique_id"))
                                 # print etree.tostring(_xml, pretty_print=True)
                                 try:
                                     value = float(_xml.text)
                                 except:
                                     value = None
                                 else:
                                     pass  # value = None if value == 0.0 else value
                                 _s_key, _v_key = (_xml.get("mvs_key"),
                                                   _xml.get("mvv_key"))
                                 if value is not None:
                                     _key = (_unique_id, (_s_key, _v_key))
                                     val_dict.setdefault(
                                         _key,
                                         {})[_xml.get("cf")] = (value, _xml)
                             # list of empty (all none or 0.0 values) keys
                             _zero_keys = [
                                 key for key, value in val_dict.items()
                                 if all([
                                     _v[0] in [0.0, None]
                                     or math.isnan(_v[0])
                                     for _k, _v in value.items()
                                 ])
                             ]
                             if _zero_keys and self.para_dict[
                                     "graph_setting"].hide_empty:
                                 # remove all-zero structs
                                 val_dict = {
                                     key: value
                                     for key, value in val_dict.items()
                                     if key not in _zero_keys
                                 }
                             for key, value in val_dict.items():
                                 _graph_target.feed_draw_result(key, value)
                             # check if the graphs shall always include y=0
                             draw_it = False
                             if self.para_dict[
                                     "graph_setting"].include_zero:
                                 if "value_min" in draw_result and "value_max" in draw_result:
                                     if draw_result["value_min"] > 0.0:
                                         _graph_target.set_post_arg(
                                             "-l", "0")
                                         draw_it = True
                                     if draw_result["value_max"] < 0.0:
                                         _graph_target.set_post_arg(
                                             "-u", "0")
                                         draw_it = True
                             # check for empty graphs
                             empty_keys = set(
                                 _graph_target.draw_keys) - set(
                                     val_dict.keys())
                             if empty_keys and self.para_dict[
                                     "graph_setting"].hide_empty:
                                 self.log("{}: {}".format(
                                     logging_tools.get_plural(
                                         "empty key", len(empty_keys)),
                                     ", ".join(
                                         sorted([
                                             "{} (dev {:d})".format(
                                                 _key, _pk)
                                             for _pk, _key in empty_keys
                                         ])),
                                 ))
                                 removed_keys |= empty_keys
                                 _graph_target.remove_keys(empty_keys)
                                 # draw_keys = [_key for _key in draw_keys if _key not in empty_keys]
                                 if not _graph_target.draw_keys:
                                     draw_result = None
                                 else:
                                     draw_it = True
                     _graph_target.draw_result = draw_result
                     _graph_target.removed_keys = removed_keys
                 else:
                     self.log(
                         "no DEFs for graph_key_dict {}".format(
                             _graph_target.graph_key),
                         logging_tools.LOG_LEVEL_ERROR)
             # check if we should rerun the graphing process
             _iterate_line = False
             _valid_graphs = [
                 _entry for _entry in _graph_line if _entry.valid
             ]
             if _line_iteration == 0:
                 _iterate_scale = self.para_dict[
                     "graph_setting"].scale_mode in [
                         GraphScaleModeEnum.level.value,
                         GraphScaleModeEnum.to100
                     ] and (len(_valid_graphs) > 1
                            or self.para_dict["graph_setting"].scale_mode
                            == GraphScaleModeEnum.to100)
                 _iterate_order = True if self.para_dict[
                     "ordering"] else False
                 if _iterate_order or _iterate_scale:
                     _line_iteration += 1
                     if _iterate_scale:
                         if self.para_dict[
                                 "graph_setting"].scale_mode == GraphScaleModeEnum.level:
                             _vmin_v, _vmax_v = (
                                 [
                                     _entry.draw_result["value_min"]
                                     for _entry in _valid_graphs
                                 ],
                                 [
                                     _entry.draw_result["value_max"]
                                     for _entry in _valid_graphs
                                 ],
                             )
                             if set(_vmin_v) > 1 or set(_vmax_v) > 1:
                                 _vmin, _vmax = (
                                     FLOAT_FMT.format(min(_vmin_v)),
                                     FLOAT_FMT.format(max(_vmax_v)),
                                 )
                                 self.log(
                                     "setting y_min / y_max for {} to {} / {}"
                                     .format(
                                         _valid_graphs[0].graph_key,
                                         _vmin,
                                         _vmax,
                                     ))
                                 [
                                     _entry.set_y_mm(_vmin, _vmax)
                                     for _entry in _valid_graphs
                                 ]
                                 _iterate_line = True
                         else:
                             [
                                 _entry.adjust_max_y(100)
                                 for _entry in _valid_graphs
                             ]
                             self.log("set max y_val to 100 for all graphs")
                             _iterate_line = True
                     if _iterate_order:
                         _order_any = False
                         for _graph in _valid_graphs:
                             if len(_graph.draw_keys):
                                 _draw_res = _graph.get_draw_result(
                                     only_valid=True)
                                 new_draw_keys = sorted(
                                     _graph.draw_keys,
                                     key=lambda entry: _draw_res[entry][
                                         ORDER_MODE][0],
                                     reverse=_order_reverse)
                                 if _graph.draw_keys != new_draw_keys:
                                     _graph.draw_keys = new_draw_keys
                                     _order_any = True
                         if _order_any:
                             _line_iteration += 1
                             _iterate_line = True
             if not _iterate_line:
                 # graph list is no longer needed because we transfer the results via WebSocket(s)
                 # to the requiring frontend
                 graph_list.extend([
                     _graph_target.graph_xml(dev_dict)
                     for _graph_target in _graph_line
                 ])
                 propagate_channel_object(
                     WSStreamEnum.rrd_graph, {
                         "list": [
                             _graph_target.graph_json(dev_dict)
                             for _graph_target in _graph_line
                         ]
                     })
     # print(etree.tostring(graph_list, pretty_print=True))
     return graph_list
Пример #25
0
 def _update(self, key, from_reply=False):
     cur_time = time.time()
     # print cur_time
     # pprint.pprint(self.__work_dict)
     if key in self and key not in self.__handled:
         value = self[key]
         if value["sent"] < value["num"]:
             # send if last send was at least slide_time ago
             if value["next_send"] <= cur_time:  # or value["recv_ok"] == value["sent"]:
                 # print key, value["recv_ok"], value["sent"], value["next_send"] <= cur_time
                 value["sent"] += 1
                 try:
                     self.send_echo(value["host"])
                 except:
                     value["error_list"].append(process_tools.get_except_info())
                     for l in process_tools.icswExceptionInfo().log_lines:
                         print("l", l)
                     self.log(
                         "error sending to {}: {}".format(
                             value["host"],
                             ", ".join(value["error_list"])
                         ),
                         logging_tools.LOG_LEVEL_ERROR
                     )
                 else:
                     value["sent_list"][self.echo_seqno] = time.time()
                     value["next_send"] = cur_time + value["slide_time"]
                     self.__seqno_dict[self.echo_seqno] = key
                     if value["sent"] < value["num"]:
                         self.__process.register_timer(
                             self._update,
                             value["slide_time"] + 0.001,
                             oneshot=True,
                             data=key
                         )
                     if value["sent"] == 1:
                         # register final timeout
                         # print "reg_to", key, value["timeout"]
                         self.__process.register_timer(
                             self._update,
                             value["timeout"],
                             oneshot=True,
                             data=key
                         )
         # check for timeout
         # print value["sent_list"]
         if not from_reply:
             # only check timeouts when called from reactor via callLater
             for seq_to in [
                 s_key for s_key, _s_value in value[
                     "sent_list"
                 ].items() if cur_time >= value["end"] and s_key not in value["recv_list"]
             ]:
                 value["recv_fail"] += 1
                 value["recv_list"][seq_to] = None
         # check for ping finish
         if value["error_list"] or (
             value["sent"] == value["num"] and value["recv_ok"] + value["recv_fail"] == value["num"] or abs(cur_time - value["start"]) > value["timeout"]
         ):
             all_times = [
                 value["recv_list"][s_key] - value["sent_list"][s_key] for s_key in
                 value["sent_list"].keys() if value["recv_list"].get(s_key, None) is not None
             ]
             if key in self.__group_dict:
                 t_seq_str = self.__group_dict[key]
                 self.__group_dict[t_seq_str][key] = (
                     value["host"],
                     value["sent"],
                     value["recv_ok"],
                     all_times,
                     ", ".join(value["error_list"])
                 )
                 if len(
                     [
                         t_key for t_key, value in self.__group_dict[
                             t_seq_str
                         ].items() if value is None
                     ]
                 ) == 0:
                     # group done
                     self.__process.send_ping_result(
                         t_seq_str,
                         list(self.__group_dict[t_seq_str].values())
                     )
                     del self.__group_dict[t_seq_str]
                 del self.__group_dict[key]
             else:
                 self.__process.send_ping_result(
                     key,
                     value["sent"],
                     value["recv_ok"],
                     all_times,
                     ", ".join(value["error_list"])
                 )
             self.__handled.add(key)  # del self[key]
             self.__pings_in_flight -= 1
     else:
         if from_reply:
             # should only happen for delayed pings or pings with error
             self.log(
                 "got delayed ping reply ({})".format(
                     key
                 ),
                 logging_tools.LOG_LEVEL_WARN
             )
Пример #26
0
    def _graph_rrd(self, *args, **kwargs):
        srv_com = server_command.srv_command(source=args[0])
        orig_dev_pks = srv_com.xpath(".//device_list/device/@pk",
                                     smart_strings=False)
        orig_dev_pks = device.objects.filter(
            Q(pk__in=orig_dev_pks) & Q(machinevector__pk__gt=0)).values_list(
                "pk", flat=True)
        dev_pks = [
            dev_pk for dev_pk in orig_dev_pks
            if self.EC.consume("graph", dev_pk)
        ]
        if len(orig_dev_pks) != len(dev_pks):
            self.log(
                "Access to device rrds denied due to ova limits: {}".format(
                    set(orig_dev_pks).difference(dev_pks)),
                logging_tools.LOG_LEVEL_ERROR,
            )

        graph_keys = json.loads(srv_com["*graph_key_list"])
        para_dict = {
            para.tag: para.text
            for para in srv_com.xpath(".//parameters", smart_strings=False)[0]
        }
        for key in ["start_time", "end_time"]:
            # cast to datetime
            para_dict[key] = dateutil.parser.parse(para_dict[key])
        _raw = json.loads(para_dict["graph_setting"])
        # fake name
        _raw["name"] = uuid.uuid4().urn
        _setting = GraphSettingSerializerCustom(data=_raw)

        self._early_return_sent = False

        def early_return_call(xml_el):
            if not self._early_return_sent:
                srv_com["graphs"] = xml_el
                self._early_return_sent = True
                srv_com.set_result(
                    "generated {}".format(
                        logging_tools.get_plural("graph", len(xml_el))),
                    server_command.SRV_REPLY_STATE_OK)
                self.send_pool_message("remote_call_async_result",
                                       str(srv_com))
            else:
                self.log("return already sent", logging_tools.LOG_LEVEL_ERROR)

        if _setting.is_valid():
            para_dict["graph_setting"] = _setting.save()
            for key, _default in [
                ("debug_mode", "0"),
            ]:
                para_dict[key] = True if int(para_dict.get(key,
                                                           "0")) else False
            self._open_rrdcached_socket()
            try:
                graph_list = RRDGraph(
                    self.graph_root_debug if para_dict.get(
                        "debug_mode", False) else self.graph_root,
                    self.log,
                    self.colorizer,
                    para_dict,
                    self,
                    early_return_call,
                ).graph(dev_pks, graph_keys)
            except:
                for _line in process_tools.icswExceptionInfo().log_lines:
                    self.log(_line, logging_tools.LOG_LEVEL_ERROR)
                srv_com["graphs"] = []
                srv_com.set_result(
                    "error generating graphs: {}".format(
                        process_tools.get_except_info()),
                    server_command.SRV_REPLY_STATE_CRITICAL)
            else:
                if not self._early_return_sent:
                    early_return_call(graph_list)
                # srv_com["graphs"] = graph_list
                # srv_com.set_result(
                #    "generated {}".format(logging_tools.get_plural("graph", len(graph_list))),
                #    server_command.SRV_REPLY_STATE_OK
                # )
        else:
            srv_com["graphs"] = []
            srv_com.set_result(
                "graphsettings are not valid: {}".format(str(_setting.errors)),
                server_command.SRV_REPLY_STATE_CRITICAL)

        self._close_rrdcached_socket()
        if not self._early_return_sent:
            self.send_pool_message("remote_call_async_result", str(srv_com))
Пример #27
0
    def __call__(self):
        if self.Meta.background:
            if self.Meta.cur_running < self.Meta.max_instances:
                self.Meta.cur_running += 1
                icswCSComInstance.bg_idx += 1
                new_bg_name = "bg_{}_{:d}".format(self.sc_obj.name,
                                                  icswCSComInstance.bg_idx)

                self.sc_obj.main_proc.add_process(
                    BackgroundProcess(new_bg_name), start=True)

                self.sc_obj.main_proc.send_to_process(new_bg_name,
                                                      "set_option_dict",
                                                      self.option_dict)
                self.sc_obj.main_proc.send_to_process(
                    new_bg_name,
                    "set_srv_com",
                    str(self.srv_com),
                )
                self.sc_obj.main_proc.send_to_process(
                    new_bg_name,
                    "start_command",
                    self.sc_obj.name,
                )
                db_tools.close_connection()
                self.srv_com.set_result("sent to background")
            else:
                self.srv_com.set_result(
                    "too many instances running ({:d} of {:d})".format(
                        self.Meta.cur_running, self.Meta.max_instances),
                    server_command.SRV_REPLY_STATE_ERROR)
        else:
            self.start_time = time.time()
            try:
                result = self.sc_obj._call(self)
            except:
                exc_info = process_tools.icswExceptionInfo()
                for line in exc_info.log_lines:
                    self.log(line, logging_tools.LOG_LEVEL_CRITICAL)
                self.srv_com.set_result(
                    process_tools.get_except_info(exc_info.except_info),
                    server_command.SRV_REPLY_STATE_CRITICAL)
                # write to logging-server
                err_h = io_stream_helper.icswIOStream(
                    icswLogHandleTypes.err_py, zmq_context=self.zmq_context)
                err_h.write("\n".join(exc_info.log_lines))
                err_h.close()
            else:
                if result is not None:
                    self.log(
                        "command got an (unexpected) result: '{}'".format(
                            str(result)), logging_tools.LOG_LEVEL_ERROR)
            self.end_time = time.time()
            if int(self.srv_com["result"].attrib["state"]):
                self.log(
                    "result is ({:d}) {}".format(
                        int(self.srv_com["result"].attrib["state"]),
                        self.srv_com["result"].attrib["reply"]),
                    logging_tools.LOG_LEVEL_ERROR)
            if self.Meta.show_execution_time:
                self.log("run took {}".format(
                    logging_tools.get_diff_time_str(self.end_time -
                                                    self.start_time)))
                self.srv_com["result"].attrib["reply"] = "{} in {}".format(
                    self.srv_com["result"].attrib["reply"],
                    logging_tools.get_diff_time_str(self.end_time -
                                                    self.start_time))
Пример #28
0
_mods = [
    cur_entry for cur_entry in [
        entry.split(".")[0] for entry in os.listdir(os.path.dirname(__file__))
        if entry.endswith(".py")
    ] if cur_entry and not cur_entry.startswith("_")
]

ALL_PERFDATA = {}
IMPORT_ERRORS = []

for mod_name in _mods:
    # no circular import
    if mod_name in ["base"]:
        continue
    try:
        new_mod = importlib.import_module(
            "initat.collectd.collectd_types.{}".format(mod_name))
    except:
        exc_info = process_tools.icswExceptionInfo()
        IMPORT_ERRORS.extend(exc_info.log_lines)
    else:
        for _name in dir(new_mod):
            _entry = getattr(new_mod, _name)
            if inspect.isclass(_entry):
                if _entry != PerfdataObject and issubclass(
                        _entry, PerfdataObject):
                    _inst = _entry()
                    ALL_PERFDATA["{}_{}".format(mod_name,
                                                _name)] = (_inst.PD_RE, _inst)
Пример #29
0
 def _take_config(self, request, conf, ccat):
     _sets = {}
     # for key in conf.iterkeys():
     #    # remove all subsets, needed because of limitations in DRF
     #    if key.endswith("_set") and conf[key]:
     #        _sets[key] = conf[key]
     #        conf[key] = []
     if not conf.get("description", None):
         # fix missing or None description
         conf["description"] = ""
     import pprint
     pprint.pprint(conf)
     _ent = config_dump_serializer(data=conf)
     added = 0
     sub_added = 0
     try:
         _exists = config.objects.get(
             Q(name=conf["name"]) & Q(config_catalog=ccat))
     except config.DoesNotExist:
         _take = True
     else:
         request.xml_response.error(
             "config {} already exists in config catalog {}".format(
                 conf["name"], str(ccat)),
             logger=logger)
         _take = False
     # we create the config with a dummy name to simplify matching of vars / scripts /
     # monccs against configs with same name but different catalogs
     dummy_name = "_ul_config_{:d}".format(int(time.time()))
     taken = False
     if _take:
         if _ent.is_valid():
             print(dir(_ent))
             print("*", dummy_name)
             _ent.create_default_entries = False
             try:
                 # store config with config catalog
                 print("a")
                 _ent.save(name=dummy_name, config_catalog=ccat)
                 print("b")
                 # pass
             except:
                 for entry in process_tools.icswExceptionInfo().log_lines:
                     logger.log(logging_tools.LOG_LEVEL_ERROR, entry)
                 request.xml_response.error(
                     "error saving entry '{}': {}".format(
                         str(_ent), process_tools.get_except_info()),
                     logger=logger)
             else:
                 taken = True
                 # add sub-sets
                 for key in _sets.keys():
                     for entry in _sets[key]:
                         entry["config"] = dummy_name
                         if not entry.get("description", None):
                             # fix simple structure errors
                             entry["description"] = "dummy description"
                         _sub_ent = getattr(
                             serializers, "{}_nat_serializer".format(
                                 key[:-4]))(data=entry)
                         if _sub_ent.is_valid():
                             try:
                                 _sub_ent.object.save()
                             except:
                                 request.xml_response.error(
                                     "error saving subentry '{}': {}".
                                     format(
                                         str(_sub_ent),
                                         process_tools.get_except_info()),
                                     logger=logger)
                             else:
                                 sub_added += 1
                         else:
                             request.xml_response.error(
                                 "cannot create {} object: {}".format(
                                     key, str(_sub_ent.errors)),
                                 logger=logger)
                 added += 1
                 _ent.object.name = conf["name"]
                 _ent.object.save()
                 request.xml_response["new_pk"] = "{:d}".format(
                     _ent.object.pk)
                 request.xml_response.info(
                     "create new config {} ({:d}) in config catalog {}".
                     format(str(_ent.object), sub_added, str(ccat)))
         else:
             request.xml_response.error(
                 "cannot create config object: {}".format(str(_ent.errors)),
                 logger=logger)
     return taken
Пример #30
0
    def handle(self, gbc, hbc, cur_gc, s_check, mode):
        # import reverse lut for meta subcommands
        # init Dynamic Check Result
        rv = DynamicCheckResult()
        # mccs .... mon_check_command_special
        # mccs = gbc.mccs_dict[s_check.mccs_id]
        # mccs = s_check.mon_check_command_special
        # mccs to be called
        call_check = s_check
        # store name of mccs (for parenting)
        check_name = s_check.name
        if s_check.special_parent_id:
            print("handle special meta")
            # to get the correct command_line
            # link to parent
            # mccs = mccs.parent
            check_has_parent = True
        else:
            check_has_parent = False
        # create lut entry to rewrite command name to mccs
        rv.rewrite_lut["check_command"] = s_check.unique_name
        # print("***", _rewrite_lut)
        try:
            # create special check instance
            cur_special = self[s_check.name](
                hbc.log,
                self,
                # get mon_check_command (we need arg_ll)
                s_check=cur_gc["command"][s_check.unique_name],
                # monitoring check command
                parent_check=s_check,
                host=hbc.device,
                build_cache=gbc,
            )
        except:
            rv.feed_error("unable to initialize special '{}': {}".format(
                s_check.name, process_tools.get_except_info()))
        else:
            # calling handle to return a list of checks with format
            # [(description, [ARG1, ARG2, ARG3, ...]), (...)]
            try:
                if mode == DynamicCheckMode.create:
                    if check_has_parent:
                        # for meta specials
                        sc_array = cur_special(mode, instance=check_name)
                    else:
                        # sc array is the list of instances to be called
                        sc_array = cur_special(mode)
                else:
                    # fetch mode, currently not supported for meta checks
                    if cur_special.Meta.meta:
                        self.log(
                            "mode {} not supported for meta checks".format(
                                mode), logging_tools.LOG_LEVEL_CRITICAL)
                    else:
                        if cur_special.Meta.server_contact:
                            if hasattr(cur_special,
                                       "dynamic_update_calls") and hasattr(
                                           cur_special, "feed_result"):
                                rv.set_server_contact(cur_special)
                            else:
                                self.log(
                                    "specialcheck {} has no dynamic_update_calls() or feed_result() function"
                                    .format(check_name, ),
                                    logging_tools.LOG_LEVEL_ERROR)
            except:
                exc_info = process_tools.icswExceptionInfo()
                rv.feed_error("error calling special {}:".format(mccs.name), )
                for line in exc_info.log_lines:
                    rv.feed_error(" - {}".format(line))
                sc_array = []
            finally:
                cur_special.cleanup()
            if mode == DynamicCheckMode.create:
                if cur_special.Meta.meta and sc_array and not check_has_parent:
                    # dive in subcommands, for instance 'all SNMP checks'
                    # check for configs not really configured
                    # print("-" * 50)
                    # print("*", sc_array)
                    # this has to be fixed, check lines 329 ff. from build_cache.py
                    _dead_coms = [
                        # _entry for _entry in sc_array if not hasattr(gbc.mccs_dict[_entry], "check_command_name")
                    ]
                    if _dead_coms:
                        rv.feed_error(
                            "unconfigured checks: {}".format(", ".join(
                                sorted(_dead_coms))), )
                    # we return a list of config names (to be iterated over)

                    rv.set_configs([
                        META_SUB_REVERSE_LUT[_entry] for _entry in sc_array
                        if _entry not in _dead_coms
                    ])
                else:
                    # we return a list of checks
                    rv.set_checks(sc_array)
        return rv