Esempio n. 1
0
    def hostmonitor_full_update_handler_callback(self, callback_dict, result):
        callback_dict["command"] = "full_update_status"
        callback_dict["result"] = -1
        callback_dict["error_string"] = None

        if result:
            if "update_status" in result:
                callback_dict["result"] = 1

                progress = float(result["update_status"].text)
                if progress < 100.00:
                    _device = callback_dict["device"]
                    conn_str = "tcp://{}:{:d}".format(_device.target_ip,
                                                      self.__hm_port)
                    new_srv_com = server_command.srv_command(
                        command="full_update")
                    hm_command = HostMonitoringCommand(
                        self.hostmonitor_full_update_handler_callback,
                        callback_dict,
                        timeout=5)

                    self.discovery_process.send_pool_message(
                        "send_host_monitor_command", hm_command.run_index,
                        conn_str, str(new_srv_com))
                callback_dict["progress"] = progress
            else:
                callback_dict["error_string"], _ = result.get_result()

        callback_dict_copy = {}
        for key in callback_dict:
            if key == "device":
                continue
            callback_dict_copy[key] = callback_dict[key]
        propagate_channel_object(WSStreamEnum.hm_status, callback_dict_copy)
Esempio n. 2
0
def background_job_post_save(sender, **kwargs):
    if "instance" in kwargs:
        from ..websockets.constants import WSStreamEnum
        propagate_channel_object(WSStreamEnum.background_jobs, {
            "background_jobs":
            background_job.objects.get_number_of_pending_jobs()
        })
Esempio n. 3
0
def device_log_entry_post_save(sender, **kwargs):
    from initat.cluster.backbone.serializers import DeviceLogEntrySerializer
    from initat.tools.bgnotify.create import propagate_channel_object
    from ..websockets.constants import WSStreamEnum
    if "instance" in kwargs:
        cur_inst = kwargs["instance"]
        info_obj = DeviceLogEntrySerializer(cur_inst).data
        propagate_channel_object(WSStreamEnum.device_log_entries, info_obj)
Esempio n. 4
0
def icsw_egg_cradle_post_save(sender, **kwargs):
    if "instance" in kwargs:
        from initat.cluster.backbone.serializers import icswEggCradleSerializer
        from ..websockets.constants import WSStreamEnum
        _inst = kwargs["instance"]
        propagate_channel_object(
            WSStreamEnum.ova_counter,
            icswEggCradleSerializer(_inst).data,
        )
Esempio n. 5
0
def device_scan_lock_post_save(sender, **kwargs):
    if "instance" in kwargs:
        from initat.tools.bgnotify.create import propagate_channel_object
        from ..serializers import DeviceScanLockSerializer
        from ..websockets.constants import WSStreamEnum
        _inst = kwargs["instance"]
        # print("create", _inst)
        propagate_channel_object(
            WSStreamEnum.device_scan_lock,
            DeviceScanLockSerializer(_inst).data
        )
Esempio n. 6
0
def nmap_scan_post_save(sender, **kwargs):
    _ = sender
    if "instance" in kwargs:
        from initat.cluster.backbone.serializers import NmapScanSerializerSimple
        from initat.cluster.backbone.websockets.constants import WSStreamEnum

        cur_inst = kwargs["instance"]

        serializer = NmapScanSerializerSimple(cur_inst)

        propagate_channel_object(WSStreamEnum.nmap_scans, serializer.data)
Esempio n. 7
0
def asset_batch_post_save(sender, **kwargs):
    _ = sender
    if "instance" in kwargs:
        from initat.cluster.backbone.serializers import SimpleAssetBatchSerializer
        from initat.cluster.backbone.websockets.constants import WSStreamEnum

        cur_inst = kwargs["instance"]

        serializer = SimpleAssetBatchSerializer(cur_inst)

        propagate_channel_object(WSStreamEnum.asset_batch, serializer.data)
Esempio n. 8
0
    def hostmonitor_update_modules_handler_callback(callback_dict, result):
        callback_dict["command"] = "update_modules"

        if "new_modules_fingerprint" in result:
            callback_dict["result"] = 1
            callback_dict["new_fingerprint"] = result[
                "new_modules_fingerprint"].text
        else:
            callback_dict["result"] = -1
            callback_dict["error_string"], _ = result.get_result()

        propagate_channel_object(WSStreamEnum.hm_status, callback_dict)
Esempio n. 9
0
    def hostmonitor_status_schedule_handler_callback(callback_dict, result):
        callback_dict["result"] = None
        if callback_dict["command"] == "platform":
            try:
                callback_dict["result"] = PlatformSystemTypeEnum(
                    int(result["platform"].text)).name
                callback_dict["platform_bits"] = "N/A"
                if "platform_bits" in result:
                    callback_dict["platform_bits"] = result[
                        "platform_bits"].text
            except Exception as e:
                _ = e
        elif callback_dict["command"] == "version":
            try:
                callback_dict["result"] = result["version"].text
            except Exception as e:
                _ = e
        elif callback_dict["command"] == "modules_fingerprint":
            try:
                callback_dict["result"] = result["checksum"].text
            except Exception as e:
                _ = e

        propagate_channel_object(WSStreamEnum.hm_status, callback_dict)
Esempio n. 10
0
def propagate_channel_message(request, group):
    from initat.tools.bgnotify.create import propagate_channel_object
    from initat.cluster.backbone.websockets.constants import WSStreamEnum
    # data = json.loads(request.body)
    propagate_channel_object(getattr(WSStreamEnum, group), json.loads(request.body))
    return HttpResponse("ok")
Esempio n. 11
0
 def graph(self, dev_pks, graph_keys):
     # end time with forecast
     local_ds = DataSource(self.log_com, dev_pks, graph_keys,
                           self.colorizer)
     self.para_dict["end_time_fc"] = self.para_dict["end_time"]
     if self.para_dict["graph_setting"].graph_setting_forecast:
         _fc = self.para_dict["graph_setting"].graph_setting_forecast
         if _fc.seconds:
             # add seconds
             self.para_dict["end_time_fc"] += datetime.timedelta(
                 seconds=_fc.seconds)
         else:
             # add timeframe
             self.para_dict["end_time_fc"] += self.para_dict[
                 "end_time"] - self.para_dict["start_time"]
     timeframe = abs((self.para_dict["end_time_fc"] -
                      self.para_dict["start_time"]).total_seconds())
     graph_width, graph_height = (
         self.para_dict["graph_setting"].graph_setting_size.width,
         self.para_dict["graph_setting"].graph_setting_size.height,
     )
     self.log("width / height : {:d} x {:d}, timeframe {}".format(
         graph_width,
         graph_height,
         logging_tools.get_diff_time_str(timeframe),
     ))
     # store for DEF generation
     self.width = graph_width
     self.height = graph_height
     dev_dict = {
         cur_dev.pk: str(cur_dev.display_name)
         for cur_dev in device.objects.filter(Q(pk__in=dev_pks))
     }
     s_graph_key_dict = self._create_graph_keys(graph_keys)
     self.log("found {}: {}".format(
         logging_tools.get_plural("device", len(dev_pks)), ", ".join([
             "{:d} ({})".format(pk, dev_dict.get(pk, "unknown"))
             for pk in dev_pks
         ])))
     if self.para_dict["graph_setting"].merge_graphs:
         # reorder all graph_keys into one graph_key_dict
         s_graph_key_dict = {
             "all": sum(list(s_graph_key_dict.values()), [])
         }
     self.log("{}: {}".format(
         logging_tools.get_plural("graph key", len(graph_keys)),
         ", ".join([full_graph_key(_v) for _v in graph_keys])))
     self.log("{}: {}".format(
         logging_tools.get_plural("top level key", len(s_graph_key_dict)),
         ", ".join(sorted(s_graph_key_dict)),
     ))
     enumerated_dev_pks = [("{:d}.{:d}".format(_idx, _pk), _pk)
                           for _idx, _pk in enumerate(dev_pks)]
     if self.para_dict["graph_setting"].merge_devices:
         # one device per graph
         graph_key_list = [[GraphTarget(g_key, enumerated_dev_pks, v_list)]
                           for g_key, v_list in s_graph_key_dict.items()]
     else:
         graph_key_list = []
         for g_key, v_list in sorted(s_graph_key_dict.items()):
             graph_key_list.append([
                 GraphTarget(g_key, [(dev_id, dev_pk)], v_list)
                 for dev_id, dev_pk in enumerated_dev_pks
             ])
     if self.para_dict["graph_setting"].merge_graphs:
         # set header
         [_gt.set_header("all") for _gt in sum(graph_key_list, [])]
     _num_g = sum([len(_graph_line) for _graph_line in graph_key_list])
     if self.para_dict["ordering"]:
         ORDER_MODE = self.para_dict["ordering"].upper()
         if ORDER_MODE.startswith("-"):
             _order_reverse = True
             ORDER_MODE = ORDER_MODE[1:]
         else:
             _order_reverse = False
     else:
         ORDER_MODE = "none"
     self.log("number of graphs to create: {:d}, ORDER_MODE is '{}'".format(
         _num_g, ORDER_MODE))
     graph_list = E.graph_list()
     for _graph_line in graph_key_list:
         graph_list.extend([
             _graph_target.graph_xml(dev_dict)
             for _graph_target in _graph_line
         ])
     self.early_return_call(graph_list)
     # print(etree.tostring(graph_list, pretty_print=True))
     graph_list = E.graph_list()
     _job_add_dict = self._get_jobs(dev_dict)
     for _graph_line in graph_key_list:
         self.log("starting graph_line")
         # iterate in case scale_mode is not None
         _iterate_line, _line_iteration = (True, 0)
         while _iterate_line:
             for _graph_target in _graph_line:
                 _graph_target.abs_file_loc = str(
                     os.path.join(self.para_dict["graph_root"],
                                  _graph_target.graph_name))
                 # clear list of defs, reset result
                 _graph_target.reset()
                 # reset colorizer for current graph
                 self.colorizer.reset()
                 self.abs_start_time = int((self.para_dict["start_time"] -
                                            self.dt_1970).total_seconds())
                 self.abs_end_time = int((self.para_dict["end_time_fc"] -
                                          self.dt_1970).total_seconds())
                 rrd_pre_args = [
                     _graph_target.abs_file_loc,
                     "-E",  # slope mode
                     "-Rlight",  # font render mode, slight hint
                     "-Gnormal",  # render mode
                     "-P",  # use pango markup
                     # "-nDEFAULT:8:",
                     "-w {:d}".format(graph_width),
                     "-h {:d}".format(graph_height),
                     "-aPNG",  # image format
                     # "--daemon", "unix:{}".format(global_config["RRD_CACHED_SOCKET"]),  # rrd caching daemon address
                     "-W {} by init.at".format(
                         License.objects.get_init_product().name),  # title
                     "--slope-mode",  # slope mode
                     "-cBACK#ffffff",
                     "--end",
                     "{:d}".format(self.abs_end_time),  # end
                     "--start",
                     "{:d}".format(self.abs_start_time),  # start
                     GraphVar(self, _graph_target, None, None,
                              "").header_line,
                 ]
                 # outer loop: iterate over all keys for the graph
                 for graph_key in sorted(_graph_target.graph_keys):
                     # inner loop: iterate over all dev ids for the graph
                     for _cur_id, cur_pk in _graph_target.dev_list:
                         # print "***", _cur_id, cur_pk
                         if (cur_pk, graph_key) in local_ds:
                             # resolve
                             for _mvs, _mvv in local_ds[(cur_pk,
                                                         graph_key)]:
                                 _take = True
                                 try:
                                     if os.stat(_mvs.file_name)[
                                             stat.ST_SIZE] < 100:
                                         self.log(
                                             "skipping {} (file is too small)"
                                             .format(_mvs.file_name, ),
                                             logging_tools.LOG_LEVEL_ERROR)
                                         _take = False
                                 except:
                                     self.log(
                                         "RRD file {} not accessible: {}".
                                         format(
                                             _mvs.file_name,
                                             process_tools.get_except_info(
                                             ),
                                         ), logging_tools.LOG_LEVEL_ERROR)
                                     _take = False
                                 if _take and _line_iteration == 0:
                                     # add GraphVars only on the first iteration
                                     # print "**", graph_key, _mvs.key, _mvv.key
                                     # store def
                                     _graph_target.add_def(
                                         (_mvs.key, _mvv.key),
                                         GraphVar(self, _graph_target, _mvs,
                                                  _mvv, graph_key,
                                                  dev_dict[cur_pk]),
                                         "header_str",
                                     )
                 if _graph_target.draw_keys:
                     draw_it = True
                     removed_keys = set()
                     while draw_it:
                         if self.para_dict[
                                 "graph_setting"].graph_setting_timeshift:
                             timeshift = self.para_dict[
                                 "graph_setting"].graph_setting_timeshift.seconds
                             if timeshift == 0:
                                 timeshift = self.abs_end_time - self.abs_start_time
                         else:
                             timeshift = 0
                         rrd_args = rrd_pre_args + sum([
                             _graph_target.graph_var_def(
                                 _key,
                                 timeshift=timeshift,
                             ) for _key in _graph_target.draw_keys
                         ], [])
                         rrd_args.extend(_graph_target.rrd_post_args)
                         rrd_args.extend([
                             "--title", "{} on {} (tf: {}{})".format(
                                 _graph_target.header,
                                 dev_dict.get(_graph_target.dev_list[0][1],
                                              "unknown")
                                 if len(_graph_target.dev_list) == 1 else
                                 logging_tools.get_plural(
                                     "device", len(_graph_target.dev_list)),
                                 logging_tools.get_diff_time_str(timeframe),
                                 ", with forecast"
                                 if self.para_dict["end_time"] !=
                                 self.para_dict["end_time_fc"] else "",
                             )
                         ])
                         rrd_args.extend(
                             self._create_job_args(_graph_target.dev_list,
                                                   _job_add_dict))
                         self.proc.flush_rrdcached(_graph_target.file_names)
                         try:
                             draw_result = rrdtool.graphv(
                                 *[str(_val) for _val in rrd_args])
                         except:
                             # in case of strange 'argument 0 has to be a string or a list of strings'
                             self.log(
                                 "error creating graph: {}".format(
                                     process_tools.get_except_info()),
                                 logging_tools.LOG_LEVEL_ERROR)
                             for _line in process_tools.icswExceptionInfo(
                             ).log_lines:
                                 self.log("    {}".format(_line),
                                          logging_tools.LOG_LEVEL_ERROR)
                             if global_config["DEBUG"]:
                                 for _idx, _entry in enumerate(rrd_args, 1):
                                     self.log("  {:4d} {}".format(
                                         _idx, _entry))
                             draw_result = None
                             draw_it = False
                         else:
                             # compare draw results, add -l / -u when scale_mode is not None
                             val_dict = {}
                             # new code
                             for key, value in draw_result.items():
                                 if not key.startswith("print["):
                                     continue
                                 _xml = etree.fromstring(value)
                                 _unique_id = int(_xml.get("unique_id"))
                                 # print etree.tostring(_xml, pretty_print=True)
                                 try:
                                     value = float(_xml.text)
                                 except:
                                     value = None
                                 else:
                                     pass  # value = None if value == 0.0 else value
                                 _s_key, _v_key = (_xml.get("mvs_key"),
                                                   _xml.get("mvv_key"))
                                 if value is not None:
                                     _key = (_unique_id, (_s_key, _v_key))
                                     val_dict.setdefault(
                                         _key,
                                         {})[_xml.get("cf")] = (value, _xml)
                             # list of empty (all none or 0.0 values) keys
                             _zero_keys = [
                                 key for key, value in val_dict.items()
                                 if all([
                                     _v[0] in [0.0, None]
                                     or math.isnan(_v[0])
                                     for _k, _v in value.items()
                                 ])
                             ]
                             if _zero_keys and self.para_dict[
                                     "graph_setting"].hide_empty:
                                 # remove all-zero structs
                                 val_dict = {
                                     key: value
                                     for key, value in val_dict.items()
                                     if key not in _zero_keys
                                 }
                             for key, value in val_dict.items():
                                 _graph_target.feed_draw_result(key, value)
                             # check if the graphs shall always include y=0
                             draw_it = False
                             if self.para_dict[
                                     "graph_setting"].include_zero:
                                 if "value_min" in draw_result and "value_max" in draw_result:
                                     if draw_result["value_min"] > 0.0:
                                         _graph_target.set_post_arg(
                                             "-l", "0")
                                         draw_it = True
                                     if draw_result["value_max"] < 0.0:
                                         _graph_target.set_post_arg(
                                             "-u", "0")
                                         draw_it = True
                             # check for empty graphs
                             empty_keys = set(
                                 _graph_target.draw_keys) - set(
                                     val_dict.keys())
                             if empty_keys and self.para_dict[
                                     "graph_setting"].hide_empty:
                                 self.log("{}: {}".format(
                                     logging_tools.get_plural(
                                         "empty key", len(empty_keys)),
                                     ", ".join(
                                         sorted([
                                             "{} (dev {:d})".format(
                                                 _key, _pk)
                                             for _pk, _key in empty_keys
                                         ])),
                                 ))
                                 removed_keys |= empty_keys
                                 _graph_target.remove_keys(empty_keys)
                                 # draw_keys = [_key for _key in draw_keys if _key not in empty_keys]
                                 if not _graph_target.draw_keys:
                                     draw_result = None
                                 else:
                                     draw_it = True
                     _graph_target.draw_result = draw_result
                     _graph_target.removed_keys = removed_keys
                 else:
                     self.log(
                         "no DEFs for graph_key_dict {}".format(
                             _graph_target.graph_key),
                         logging_tools.LOG_LEVEL_ERROR)
             # check if we should rerun the graphing process
             _iterate_line = False
             _valid_graphs = [
                 _entry for _entry in _graph_line if _entry.valid
             ]
             if _line_iteration == 0:
                 _iterate_scale = self.para_dict[
                     "graph_setting"].scale_mode in [
                         GraphScaleModeEnum.level.value,
                         GraphScaleModeEnum.to100
                     ] and (len(_valid_graphs) > 1
                            or self.para_dict["graph_setting"].scale_mode
                            == GraphScaleModeEnum.to100)
                 _iterate_order = True if self.para_dict[
                     "ordering"] else False
                 if _iterate_order or _iterate_scale:
                     _line_iteration += 1
                     if _iterate_scale:
                         if self.para_dict[
                                 "graph_setting"].scale_mode == GraphScaleModeEnum.level:
                             _vmin_v, _vmax_v = (
                                 [
                                     _entry.draw_result["value_min"]
                                     for _entry in _valid_graphs
                                 ],
                                 [
                                     _entry.draw_result["value_max"]
                                     for _entry in _valid_graphs
                                 ],
                             )
                             if set(_vmin_v) > 1 or set(_vmax_v) > 1:
                                 _vmin, _vmax = (
                                     FLOAT_FMT.format(min(_vmin_v)),
                                     FLOAT_FMT.format(max(_vmax_v)),
                                 )
                                 self.log(
                                     "setting y_min / y_max for {} to {} / {}"
                                     .format(
                                         _valid_graphs[0].graph_key,
                                         _vmin,
                                         _vmax,
                                     ))
                                 [
                                     _entry.set_y_mm(_vmin, _vmax)
                                     for _entry in _valid_graphs
                                 ]
                                 _iterate_line = True
                         else:
                             [
                                 _entry.adjust_max_y(100)
                                 for _entry in _valid_graphs
                             ]
                             self.log("set max y_val to 100 for all graphs")
                             _iterate_line = True
                     if _iterate_order:
                         _order_any = False
                         for _graph in _valid_graphs:
                             if len(_graph.draw_keys):
                                 _draw_res = _graph.get_draw_result(
                                     only_valid=True)
                                 new_draw_keys = sorted(
                                     _graph.draw_keys,
                                     key=lambda entry: _draw_res[entry][
                                         ORDER_MODE][0],
                                     reverse=_order_reverse)
                                 if _graph.draw_keys != new_draw_keys:
                                     _graph.draw_keys = new_draw_keys
                                     _order_any = True
                         if _order_any:
                             _line_iteration += 1
                             _iterate_line = True
             if not _iterate_line:
                 # graph list is no longer needed because we transfer the results via WebSocket(s)
                 # to the requiring frontend
                 graph_list.extend([
                     _graph_target.graph_xml(dev_dict)
                     for _graph_target in _graph_line
                 ])
                 propagate_channel_object(
                     WSStreamEnum.rrd_graph, {
                         "list": [
                             _graph_target.graph_json(dev_dict)
                             for _graph_target in _graph_line
                         ]
                     })
     # print(etree.tostring(graph_list, pretty_print=True))
     return graph_list