Beispiel #1
0
 def _init_commands(self):
     self.log("init commands")
     self.__delayed = []
     self.module_list = self.modules.module_list
     self.commands = self.modules.command_dict
     if self.modules.IMPORT_ERRORS:
         self.log("modules import errors:", logging_tools.LOG_LEVEL_ERROR)
         for mod_name, com_name, error_str in self.modules.IMPORT_ERRORS:
             self.log(
                 "{:<24s} {:<32s} {}".format(
                     mod_name.split(".")[-1], com_name, error_str),
                 logging_tools.LOG_LEVEL_ERROR)
     _init_ok = True
     for call_name, add_self in [("register_server", True),
                                 ("init_module", False)]:
         for cur_mod in self.modules.module_list:
             if global_config["VERBOSE"]:
                 self.log("calling {} for module '{}'".format(
                     call_name, cur_mod.name))
             try:
                 if add_self:
                     getattr(cur_mod, call_name)(self)
                 else:
                     getattr(cur_mod, call_name)()
             except:
                 exc_info = process_tools.exception_info()
                 for log_line in exc_info.log_lines:
                     self.log(log_line, logging_tools.LOG_LEVEL_CRITICAL)
                 _init_ok = False
                 break
         if not _init_ok:
             break
     return _init_ok
Beispiel #2
0
    def __call__(self, *args, **kwargs):
        s_time = time.time()

        display_name = getattr(args[0], "display_name", None)
        # get: head.im_class.__name__ (contains class name for django class views)
        view_class_name = getattr(
            getattr(getattr(args[0], 'head', None), 'im_class', None),
            '__name__', None)

        if hasattr(args[0], "model") and args[0].model is not None:
            self.__obj_name = args[0].model._meta.object_name
        elif display_name is not None:
            self.__obj_name = display_name
        elif view_class_name is not None:
            self.__obj_name = view_class_name
        else:
            self.__obj_name = "unknown"

        try:
            result = self._func(*args, **kwargs)
        except:
            exc_info = process_tools.exception_info()
            _err_str = process_tools.get_except_info()
            self.log(u"exception: {}".format(_err_str),
                     logging_tools.LOG_LEVEL_ERROR)
            for line in exc_info.log_lines:
                self.log(u"  {}".format(line))
            result = Response(_err_str, status=status.HTTP_406_NOT_ACCEPTABLE)
            # raise
        e_time = time.time()
        self.log("call took {}".format(
            logging_tools.get_diff_time_str(e_time - s_time)))
        return result
Beispiel #3
0
 def handle_nodeinfo(self, src_id, node_text):
     s_time = time.time()
     s_req = simple_request(self, src_id, node_text)
     com_call = self.__com_dict.get(s_req.command, None)
     if com_call:
         config_control.update_router()
         try:
             ret_str = com_call(s_req)
         except:
             exc_info = process_tools.exception_info()
             ret_str = "error interpreting command {}: {}".format(
                 node_text,
                 process_tools.get_except_info(),
             )
             for _line in exc_info.log_lines:
                 self.log("  {}".format(_line),
                          logging_tools.LOG_LEVEL_ERROR)
     else:
         ret_str = "error unknown command '{}'".format(node_text)
     if ret_str is None:
         self.log("waiting for answer")
     else:
         e_time = time.time()
         self.log(
             "handled nodeinfo '%s' (src_ip %s) in %s, returning %s" %
             (s_req.node_text, s_req.src_ip,
              logging_tools.get_diff_time_str(e_time - s_time), ret_str))
         config_control.srv_process._send_simple_return(
             s_req.zmq_id, ret_str)
         del s_req
Beispiel #4
0
 def feed(self, *res_list):
     self.waiting_for = None
     self.running = False
     error_list, _ok_list, res_dict = res_list[0:3]
     if error_list:
         self.log("error fetching SNMP data from {}".format(self.device_name), logging_tools.LOG_LEVEL_ERROR)
     else:
         headers = {
             "name": self.device_name,
             "uuid": self.uuid,
             "time": "{:d}".format(int(self.last_start))
         }
         mv_tree = E.machine_vector(
             simple="0",
             **headers
         )
         mon_info = E.monitor_info(
             **headers
         )
         for _handler in self.snmp_handlers:
             try:
                 _handler.collect_feed(res_dict, mv_tree=mv_tree, mon_info=mon_info, vc=self.__vcache)
             except:
                 self.log(
                     "error feeding for handler {} (IP {}): {}".format(
                         unicode(_handler),
                         self.ip,
                         process_tools.get_except_info(),
                     ),
                     logging_tools.LOG_LEVEL_CRITICAL
                 )
                 for _log_line in process_tools.exception_info().log_lines:
                     self.log(_log_line, logging_tools.LOG_LEVEL_CRITICAL)
         # graphing
         self.bg_proc.process_data_xml(mv_tree, len(etree.tostring(mv_tree)))  # @UndefinedVariable
Beispiel #5
0
 def loop(self):
     try:
         while self["run_flag"]:
             self.__disp.runDispatcher()
             self.step(blocking=self["run_flag"])
     except ValueConstraintError:
         self.log("caught ValueConstraintError, terminating process",
                  logging_tools.LOG_LEVEL_CRITICAL)
         _term_cause = "ValueConstraintError"
     except:
         exc_info = process_tools.exception_info()
         self.log("exception in dispatcher, terminating process",
                  logging_tools.LOG_LEVEL_CRITICAL)
         for log_line in exc_info.log_lines:
             self.log(" - {}".format(log_line),
                      logging_tools.LOG_LEVEL_CRITICAL)
         _term_cause = "internal error"
     else:
         self.log("no more jobs running")
         _term_cause = ""
     self.log("jobs pending: {:d}".format(len(self.__job_dict)))
     # close all jobs
     if _term_cause:
         self._terminate_jobs(error="{}, check logs".format(_term_cause))
     else:
         self._terminate_jobs()
     self.__disp.closeDispatcher()
Beispiel #6
0
 def _fw_handle(self, *args, **kwargs):
     src_id, data = args
     srv_com = server_command.srv_command(source=data)
     in_com = srv_com["command"].text
     args = {}
     if "arguments" in srv_com:
         for entry in srv_com["arguments"]:
             _key = entry.tag.split("}")[-1]
             _val = entry.text
             if _val is None:
                 self.log("key {} has empty value ({})".format(_key, in_com), logging_tools.LOG_LEVEL_ERROR)
             else:
                 if _val.lower() in ["true", "false"]:
                     _val = bool(_val)
                 elif _val.isdigit():
                     _val = int(_val)
                 # if
                 args[_key] = _val
     self.log(
         "got '{}', {}: {}".format(
             in_com,
             logging_tools.get_plural("argument", len(args)),
             ", ".join(["{}='{}' ({})".format(key, value, type(value)) for key, value in args.iteritems()])
         )
     )
     args = {key.replace("-", "_"): value for key, value in args.iteritems()}
     found_keys = set(args.keys())
     needed_keys = {
         "register_file_watch": {
             "send_id", "mode", "target_server", "target_port", "dir", "match"
         },
         "unregister_file_watch": {
             "id",
         },
     }.get(in_com, set())
     if needed_keys & found_keys == needed_keys:
         # set default return value
         srv_com.set_result(
             "got command {}".format(in_com)
         )
         try:
             getattr(self, "_{}".format(in_com))(srv_com, args)
         except:
             exc_info = process_tools.exception_info()
             for line in exc_info.log_lines:
                 self.log("  {}".format(line), logging_tools.LOG_LEVEL_ERROR)
             srv_com.set_result(
                 "error processing '{}': {}".format(in_com, exc_info.except_info),
                 server_command.SRV_REPLY_STATE_CRITICAL
             )
         log_str, log_level = srv_com.get_log_tuple()
         self.log("result: {}".format(log_str), log_level)
     else:
         srv_com.set_result(
             "command {}, keys missing: {}".format(in_com, ", ".join(needed_keys - found_keys)),
             server_command.SRV_REPLY_STATE_ERROR
         )
     self.send_pool_message("callback_result", src_id, unicode(srv_com))
Beispiel #7
0
 def compound_struct(in_list):
     try:
         _comps = DataStore.compound_tree.append_compounds(in_list)
     except:
         for _line in process_tools.exception_info().log_lines:
             DataStore.g_log(_line, logging_tools.LOG_LEVEL_ERROR)
         _comps = []
     else:
         # pprint.pprint(_comps)
         pass
     return _comps
Beispiel #8
0
 def _close_modules(self):
     for cur_mod in self.module_list:
         if hasattr(cur_mod, "stop_module"):
             self.log("calling stop_module() for {}".format(cur_mod.name))
             try:
                 cur_mod.stop_module()
             except:
                 exc_info = process_tools.exception_info()
                 for log_line in exc_info.log_lines:
                     self.log(log_line, logging_tools.LOG_LEVEL_CRITICAL)
                 _init_ok = False
Beispiel #9
0
 def process(self):
     try:
         self.__ct_struct.process(self)
     except:
         exc_info = process_tools.exception_info()
         for _line in exc_info.log_lines:
             self.log(_line, logging_tools.LOG_LEVEL_ERROR)
         self.srv_com.set_result(
             "error in process() call: {}".format(
                 process_tools.get_except_info()),
             server_command.SRV_REPLY_STATE_CRITICAL)
Beispiel #10
0
 def _handle_module_command(self, srv_com):
     try:
         self.commands[srv_com["command"].text](srv_com)
     except:
         for log_line in process_tools.exception_info().log_lines:
             self.log(log_line, logging_tools.LOG_LEVEL_ERROR)
             srv_com.set_result(
                 "caught server exception '{}'".format(
                     process_tools.get_except_info()),
                 server_command.SRV_REPLY_STATE_CRITICAL,
             )
Beispiel #11
0
def main():
    my_disc = disclaimer_handler()
    my_disc.recv_mail()
    try:
        my_disc.process()
    except:
        _exc_info = process_tools.exception_info()
        for _line in _exc_info.log_lines:
            my_disc.log("error processing: {}".format(_line), logging_tools.LOG_LEVEL_CRITICAL)
    my_disc.send_via_spamc()
    my_disc.close()
    return my_disc._result
Beispiel #12
0
 def check_rrd_file(self, f_name):
     _changed = False
     s_time = time.time()
     try:
         _old_size = os.stat(f_name)[stat.ST_SIZE]
     except:
         self.log(
             "cannot get size of {}: {}".format(
                 f_name, process_tools.get_except_info()),
             logging_tools.LOG_LEVEL_ERROR)
         _old_size = 0
     else:
         if _old_size:
             try:
                 _rrd = rrd_tools.RRD(f_name,
                                      log_com=self.log,
                                      build_rras=False,
                                      verbose=self.__verbose)
             except:
                 # check if file is not an rrd file
                 _content = file(f_name, "rb").read()
                 if f_name.endswith(".rrd") and _content[:3] != "RRD":
                     self.log(
                         "file {} has no RRD header, trying to remove it".
                         format(f_name), logging_tools.LOG_LEVEL_ERROR)
                     try:
                         os.unlink(f_name)
                     except:
                         pass
                 else:
                     self.log(
                         "cannot get info about {}: {}".format(
                             f_name, process_tools.get_except_info()),
                         logging_tools.LOG_LEVEL_ERROR)
                     for _line in process_tools.exception_info().log_lines:
                         self.log(_line, logging_tools.LOG_LEVEL_ERROR)
             else:
                 _changed = self.check_rrd_file_2(f_name, _rrd)
                 if _changed:
                     _new_size = os.stat(f_name)[stat.ST_SIZE]
                     e_time = time.time()
                     self.log("modification of {} took {} ({} -> {} Bytes)".
                              format(
                                  f_name,
                                  logging_tools.get_diff_time_str(e_time -
                                                                  s_time),
                                  _old_size,
                                  _new_size,
                              ))
         else:
             self.log("file {} is empty".format(f_name),
                      logging_tools.LOG_LEVEL_WARN)
     return _changed
Beispiel #13
0
 def _handle_module_command(self, srv_com, cur_ns, rest_str):
     cur_com = self.commands[srv_com["command"].text]
     sp_struct = None
     try:
         if cur_ns is None:
             cur_ns, _rest = cur_com.handle_commandline(
                 rest_str.strip().split())
         sp_struct = cur_com(srv_com, cur_ns)
     except:
         exc_info = process_tools.exception_info()
         for log_line in exc_info.log_lines:
             self.log(log_line, logging_tools.LOG_LEVEL_ERROR)
         srv_com.set_result(
             "caught server exception '{}'".format(
                 process_tools.get_except_info()),
             server_command.SRV_REPLY_STATE_CRITICAL)
     return sp_struct
Beispiel #14
0
 def handle(self, srv_com):
     try:
         p_args = self.parse(srv_com)
         self.run(srv_com, p_args)
     except DeviceNotFoundException:
         srv_com.set_result(
             "device not found",
             server_command.SRV_REPLY_STATE_CRITICAL,
         )
     except:
         exc_com = process_tools.exception_info()
         for _line in exc_com.log_lines:
             self.log(_line, logging_tools.LOG_LEVEL_ERROR)
         srv_com.set_result(
             "an exception occured: {}".format(
                 process_tools.get_except_info()),
             server_command.SRV_REPLY_STATE_CRITICAL,
         )
Beispiel #15
0
 def _check_for_pending_jobs(self):
     for c_name, _dev, scan_type_enum, _new_lock, _dev_xml in self.__job_list:
         # todo: make calls parallel
         s_time = time.time()
         try:
             getattr(self, c_name)(_dev_xml, _dev)
         except:
             _exc_info = process_tools.exception_info()
             for _line in _exc_info.log_lines:
                 self.log("   {}".format(_line),
                          logging_tools.LOG_LEVEL_ERROR)
         finally:
             [
                 self.log(_what, _level)
                 for _what, _level in _new_lock.close()
             ]
         e_time = time.time()
         self.log(u"calling {} for device {} took {}".format(
             c_name, unicode(_dev),
             logging_tools.get_diff_time_str(e_time - s_time)))
     self.__job_list = []
Beispiel #16
0
 def update(self, dev, scheme, result_dict, oid_list, flags):
     # update dev with results from given snmp_scheme
     # valid oid_list is oid_list
     # results are in result_dict
     _handler = self.get_handler(scheme)
     if _handler:
         try:
             return _handler.update(dev, scheme, result_dict, oid_list,
                                    flags)
         except:
             exc_info = process_tools.exception_info()
             _err_str = "unable to process results: {}".format(
                 process_tools.get_except_info())
             self.log(_err_str, logging_tools.LOG_LEVEL_ERROR)
             for _line in exc_info.log_lines:
                 self.log("  {}".format(_line),
                          logging_tools.LOG_LEVEL_ERROR)
             return ResultNode(error=_err_str)
     else:
         return ResultNode(error="no handler found for {}".format(
             scheme.full_name_version))
 def handle_result(self, mes_id, result):
     cur_mes = self.messages[mes_id]
     if cur_mes.sent:
         cur_mes.sent = False
     if len(result.xpath(".//ns:raw", smart_strings=False)):
         # raw response, no interpret
         cur_mes.srv_com = result
         self.send_result(cur_mes, None)
         # self.send_result(cur_mes, None)
     else:
         try:
             ret = ExtReturn.get_ext_return(cur_mes.interpret(result))
         except:
             ret = ExtReturn(
                 limits.mon_STATE_CRITICAL,
                 "error interpreting result: {}".format(
                     process_tools.get_except_info()
                 )
             )
             exc_info = process_tools.exception_info()
             for line in exc_info.log_lines:
                 HostConnection.relayer_process.log(line, logging_tools.LOG_LEVEL_CRITICAL)
         self.send_result(cur_mes, ret)
Beispiel #18
0
 def _check_kernel_dir(self, srv_com):
     self.log("checking kernel dir")
     # build option dict
     opt_dict = {}
     for key, def_value in {
             "ignore_kernel_build_machine": False,
             "kernels_to_insert": [],
             "check_list": [],
             "insert_all_found": False,
             "kernels_to_sync": {}
     }.iteritems():
         if key in srv_com:
             cur_val = srv_com[key].text
             if type(def_value) == bool:
                 cur_val = True if int(cur_val) else False
         else:
             cur_val = def_value
         opt_dict[key] = cur_val
     # self.__ks_check._check(dc)
     self.log("option_dict has {}: {}".format(
         logging_tools.get_plural("key", len(opt_dict.keys())), ", ".join([
             "{} ({}, {})".format(key, str(type(value)), str(value))
             for key, value in opt_dict.iteritems()
         ])))
     srv_com.update_source()
     # problems are global problems, not kernel local
     kernels_found, problems = ([], [])
     if not self.kernel_dev.effective_device:
         self.log("no kernel_server, skipping check ...",
                  logging_tools.LOG_LEVEL_ERROR)
         srv_com.set_result("no kernel server",
                            server_command.SRV_REPLY_STATE_ERROR)
     else:
         all_k_servers = config_tools.device_with_config("kernel_server")
         def_k_servers = all_k_servers.get("kernel_server", [])
         self.log("found {}: {}".format(
             logging_tools.get_plural("kernel_server", len(def_k_servers)),
             ", ".join(
                 sorted([
                     unicode(s_struct.effective_device)
                     for s_struct in def_k_servers
                 ]))))
         all_kernels = {
             cur_kern.name: cur_kern
             for cur_kern in kernel.objects.all()
         }
         any_found_in_database = len(all_kernels) > 0
         if any_found_in_database:
             self.log(
                 "some kernels already present in database, not inserting all found",
                 logging_tools.LOG_LEVEL_WARN)
             opt_dict["insert_all_found"] = False
         kct_start = time.time()
         self.log(
             "Checking for kernels ({:d} already in database) ...".format(
                 len(all_kernels.keys())))
         if opt_dict["kernels_to_insert"]:
             self.log(" - only {} to insert: {}".format(
                 logging_tools.get_plural(
                     "kernels", len(opt_dict["kernels_to_insert"])),
                 ", ".join(opt_dict["kernels_to_insert"])))
         if "TFTP_DIR" in global_config:
             if not os.path.isdir(global_config["TFTP_DIR"]):
                 self.log(
                     "TFTP_DIR '{}' is not a directory".format(
                         global_config["TFTP_DIR"]),
                     logging_tools.LOG_LEVEL_ERROR)
                 problems.append("TFTP_DIR '{}' is not a directory".format(
                     global_config["TFTP_DIR"]))
         kern_dir = global_config["KERNEL_DIR"]
         if not os.path.isdir(kern_dir):
             self.log("kernel_dir '{}' is not a directory".format(kern_dir),
                      logging_tools.LOG_LEVEL_ERROR)
             problems.append("kernel_dir '%s' is not a directory" %
                             (kern_dir))
         else:
             for entry in os.listdir(kern_dir):
                 if not opt_dict["check_list"] or entry in opt_dict[
                         "check_list"]:
                     try:
                         act_kernel = KernelHelper(
                             entry,
                             kern_dir,
                             self.log,
                             global_config,
                             master_server=self.kernel_dev.effective_device)
                     except:
                         self.log(
                             "error in kernel handling ({}): {}".format(
                                 entry,
                                 process_tools.get_except_info(),
                             ), logging_tools.LOG_LEVEL_ERROR)
                         problems.append(
                             unicode(process_tools.get_except_info()))
                         for _log_line in process_tools.exception_info(
                         ).log_lines:
                             self.log("    {}".format(_log_line),
                                      logging_tools.LOG_LEVEL_ERROR)
                     else:
                         # handle initrd generated by old populate_ramdisk.py
                         act_kernel.move_old_initrd()
                         act_kernel.check_md5_sums()
                         act_kernel.check_kernel_dir()
                         act_kernel.set_option_dict_values()
                         # determine if we should insert the kernel into the database
                         if act_kernel.check_for_db_insert(opt_dict):
                             act_kernel.insert_into_database()
                             act_kernel.check_initrd()
                         kernels_found.append(act_kernel.name)
                         act_kernel.log_statistics()
                         del act_kernel
         kct_end = time.time()
         _ret_str = "checking of kernel_dir took {}{}".format(
             logging_tools.get_diff_time_str(kct_end - kct_start),
             ", problems: {}".format(", ".join(problems))
             if problems else "",
         )
         self.log(
             _ret_str, logging_tools.LOG_LEVEL_ERROR
             if problems else logging_tools.LOG_LEVEL_OK)
         srv_com.set_result(
             _ret_str, server_command.SRV_REPLY_STATE_ERROR
             if problems else server_command.SRV_REPLY_STATE_OK)
Beispiel #19
0
    def scan_network_info(self, dev_com, scan_dev):
        hm_port = InstanceXML(quiet=True).get_port_dict("host-monitoring",
                                                        command=True)
        res_node = ResultNode()
        strict_mode = True if int(dev_com.get("strict_mode")) else False
        modify_peering = True if int(dev_com.get("modify_peering")) else False
        scan_address = dev_com.get("scan_address")
        self.log(
            "scanning network for device '{}' ({:d}), scan_address is '{}', strict_mode is {}"
            .format(
                unicode(scan_dev),
                scan_dev.pk,
                scan_address,
                "on" if strict_mode else "off",
            ))
        zmq_con = net_tools.ZMQConnection("server:{}".format(
            process_tools.get_machine_name()),
                                          context=self.zmq_context)
        conn_str = "tcp://{}:{:d}".format(
            scan_address,
            hm_port,
        )
        self.log(u"connection_str for {} is {}".format(unicode(scan_dev),
                                                       conn_str))
        zmq_con.add_connection(
            conn_str,
            server_command.srv_command(command="network_info"),
            multi=True)
        res_list = zmq_con.loop()
        self.log("length of result list: {:d}".format(len(res_list)))
        num_taken, num_ignored = (0, 0)
        nds_list = netdevice_speed.objects.filter(
            Q(speed_bps__in=[1000000000, 100000000])).order_by(
                "-speed_bps", "-full_duplex", "-check_via_ethtool")
        default_nds = nds_list[0]
        self.log("default nds is {}".format(unicode(default_nds)))

        for _idx, (result, target_dev) in enumerate(zip(res_list, [scan_dev])):
            self.log("device {} ...".format(unicode(target_dev)))
            res_state = -1 if result is None else int(
                result["result"].attrib["state"])
            if res_state:
                # num_errors += 1
                if res_state == -1:
                    res_node.error(u"{}: no result".format(
                        unicode(target_dev)))
                else:
                    res_node.error(u"{}: error {:d}: {}".format(
                        unicode(target_dev),
                        int(result["result"].attrib["state"]),
                        result["result"].attrib["reply"]))
            else:
                try:
                    bridges = result["bridges"]
                    networks = result["networks"]
                except:
                    res_node.error(
                        u"{}: error missing keys in dict".format(target_dev))
                else:
                    # clear current network
                    with transaction.atomic():
                        sid = transaction.savepoint()
                        # store current peers
                        _peers = [
                            _obj.store_before_delete(target_dev)
                            for _obj in peer_information.objects.filter(
                                Q(s_netdevice__in=target_dev.netdevice_set.all(
                                )) | Q(d_netdevice__in=target_dev.
                                       netdevice_set.all()))
                        ]
                        _old_peer_dict = {}
                        for _old_peer in _peers:
                            _old_peer_dict.setdefault(_old_peer["my_name"],
                                                      []).append(_old_peer)
                        self.log("removing current network devices")
                        target_dev.netdevice_set.all().delete()
                        all_ok = True
                        exc_dict = {}
                        _all_devs = set(networks)
                        _br_devs = set(bridges)
                        # build bond dict
                        bond_dict = {}
                        for dev_name in _all_devs:
                            _struct = networks[dev_name]
                            if "MASTER" in _struct["flags"]:
                                bond_dict[dev_name] = {"slaves": []}
                        for dev_name in _all_devs:
                            _struct = networks[dev_name]
                            if "SLAVE" in _struct["flags"]:
                                master_name = _struct["features"]["master"]
                                bond_dict[master_name]["slaves"].append(
                                    dev_name)
                        NDStruct.setup(self, target_dev, default_nds,
                                       bond_dict)
                        for dev_name in sorted(
                                list(_all_devs & _br_devs)) + sorted(
                                    list(_all_devs - _br_devs)):
                            if any([
                                    dev_name.startswith(_ignore_pf)
                                    for _ignore_pf in IGNORE_LIST
                            ]):
                                self.log("ignoring device {}".format(dev_name))
                                num_ignored += 1
                                continue
                            _struct = networks[dev_name]
                            cur_nd = NDStruct(dev_name, _struct,
                                              bridges.get(dev_name, None))
                            try:
                                cur_nd.create()
                            except (NoMatchingNetworkDeviceTypeFoundError,
                                    NoMatchingNetworkFoundError) as exc:
                                _name = exc.__class__.__name__
                                self.log(
                                    "caught {} for {}".format(_name, dev_name),
                                    logging_tools.LOG_LEVEL_ERROR)
                                exc_dict.setdefault(_name, []).append(dev_name)
                                all_ok = False
                            except:
                                err_str = "error creating netdevice {}: {}".format(
                                    dev_name, process_tools.get_except_info())
                                if strict_mode:
                                    res_node.error(err_str)
                                for _log in process_tools.exception_info(
                                ).log_lines:
                                    self.log("  {}".format(_log),
                                             logging_tools.LOG_LEVEL_CRITICAL)
                                all_ok = False
                            else:
                                num_taken += 1
                            if cur_nd.nd is not None and cur_nd.nd.devname in _old_peer_dict:
                                #  relink peers
                                for _peer in _old_peer_dict[cur_nd.nd.devname]:
                                    _new_peer = peer_information.create_from_store(
                                        _peer, cur_nd.nd)
                                del _old_peer_dict[cur_nd.nd.devname]
                        if all_ok:
                            NDStruct.handle_bonds()
                        if exc_dict:
                            for key in sorted(exc_dict.keys()):
                                res_node.error("{} for {}: {}".format(
                                    key,
                                    logging_tools.get_plural(
                                        "netdevice", len(exc_dict[key])),
                                    ", ".join(sorted(exc_dict[key]))))
                        if _old_peer_dict.keys():
                            _err_str = "not all peers migrated: {}".format(
                                ", ".join(_old_peer_dict.keys()))
                            if strict_mode:
                                res_node.error(_err_str)
                                all_ok = False
                            else:
                                res_node.warn(_err_str)
                        [
                            NDStruct.dict[_bridge_name].link_bridge_slaves()
                            for _bridge_name in _br_devs
                            & set(NDStruct.dict.keys())
                        ]
                        if not all_ok and strict_mode:
                            self.log(
                                "rolling back to savepoint because strict_mode is enabled",
                                logging_tools.LOG_LEVEL_WARN)
                            num_taken -= target_dev.netdevice_set.all().count()
                            transaction.savepoint_rollback(sid)
                        else:
                            transaction.savepoint_commit(sid)
        if num_taken:
            res_node.ok("{} taken".format(
                logging_tools.get_plural("netdevice", num_taken)))
        if num_ignored:
            res_node.ok("{} ignored".format(
                logging_tools.get_plural("netdevice", num_ignored)))
        return res_node
Beispiel #20
0
 def _get_node_status(self, srv_com_str, **kwargs):
     srv_com = server_command.srv_command(source=srv_com_str)
     # overview mode if overview is a top-level element
     _host_overview = True if "host_overview" in srv_com else False
     _service_overview = True if "service_overview" in srv_com else False
     if not _host_overview:
         # ToDo, FIXME: receive full names in srv_command
         dev_names = srv_com.xpath(".//device_list/device/@full_name",
                                   smart_strings=False)
         # dev_names = sorted([cur_dev.full_name for cur_dev in device.objects.filter(Q(pk__in=pk_list))])
     try:
         cur_sock = self._open()
         if cur_sock:
             fetch_dict = LivstatusFetch(self.log, cur_sock)
             if _host_overview:
                 fetch_dict["host"] = cur_sock.hosts.columns(
                     "name",
                     "address",
                     "state",
                     "plugin_output",
                     "custom_variables",
                 )
                 if _service_overview:
                     fetch_dict["service"] = cur_sock.services.columns(
                         "description",
                         "state",
                         "plugin_output",
                         "custom_variables",
                     )
             else:
                 if dev_names:
                     fetch_dict["service"] = cur_sock.services.columns(
                         "host_name",
                         "description",
                         "state",
                         "plugin_output",
                         "last_check",
                         "check_type",
                         "state_type",
                         "last_state_change",
                         "max_check_attempts",
                         "display_name",
                         "current_attempt",
                         "custom_variables",
                         "acknowledged",
                         "acknowledgement_type",
                     ).filter("host_name", "=", dev_names)
                     fetch_dict["host"] = cur_sock.hosts.columns(
                         "name",
                         "address",
                         "state",
                         "plugin_output",
                         "last_check",
                         "check_type",
                         "state_type",
                         "last_state_change",
                         "max_check_attempts",
                         "current_attempt",
                         "custom_variables",
                         "acknowledged",
                         "acknowledgement_type",
                     ).filter("name", "=", dev_names)
                     fetch_dict["host_comment"] = cur_sock.comments.columns(
                         "host_name",
                         "author",
                         "comment",
                         "entry_type",
                         "entry_time",
                     ).filter("host_name", "=", dev_names).filter(
                         "is_service",
                         "=",
                         "0",
                         method="and",
                         count=2,
                     )
                     fetch_dict[
                         "service_comment"] = cur_sock.comments.columns(
                             "host_name",
                             "author",
                             "comment",
                             "entry_type",
                             "entry_time",
                         ).filter("host_name", "=", dev_names).filter(
                             "is_service",
                             "=",
                             "1",
                             method="and",
                             count=2,
                         )
                     # print str(fetch_dict["service_comment"])
             fetch_dict.fetch()
             srv_com["service_result"] = json.dumps([
                 _line for _line in fetch_dict["service_result"]
                 if _line.get("host_name", "")
             ])
             srv_com["host_result"] = json.dumps(fetch_dict["host_result"])
             srv_com.set_result(fetch_dict.info_str)
         else:
             srv_com.set_result("cannot connect to socket",
                                server_command.SRV_REPLY_STATE_CRITICAL)
     except:
         self.log(
             u"fetch exception: {}".format(process_tools.get_except_info()),
             logging_tools.LOG_LEVEL_ERROR)
         exc_info = process_tools.exception_info()
         for line in exc_info.log_lines:
             self.log(u" - {}".format(line), logging_tools.LOG_LEVEL_ERROR)
         self._close()
         srv_com.set_result("exception during fetch",
                            server_command.SRV_REPLY_STATE_CRITICAL)
     self.send_pool_message("remote_call_async_result", unicode(srv_com))
Beispiel #21
0
import inspect
from initat.tools import process_tools
from ..base import SNMPRelayScheme

_path = os.path.dirname(__file__)

snmp_schemes = []
import_errors = []

for mod_name in [
    _entry.split(".")[0] for _entry in os.listdir(_path) if _entry.endswith(".py") and _entry not in ["__init__.py"]
]:
    try:
        new_mod = __import__(mod_name, globals(), locals())
    except:
        exc_info = process_tools.exception_info()
        import_errors.extend(
            [
                (mod_name, "import", _line) for _line in exc_info.log_lines
            ]
        )
    else:
        for _key in dir(new_mod):
            _obj = getattr(new_mod, _key)
            if inspect.isclass(_obj) and issubclass(_obj, SNMPRelayScheme) and _obj != SNMPRelayScheme:
                if _key.endswith("_scheme"):
                    snmp_schemes.append((_key[:-7], _obj))
                else:
                    import_errors.append(
                        (
                            mod_name,
Beispiel #22
0
 def _take_config(self, request, conf, ccat):
     _sets = {}
     # for key in conf.iterkeys():
     #    # remove all subsets, needed because of limitations in DRF
     #    if key.endswith("_set") and conf[key]:
     #        _sets[key] = conf[key]
     #        conf[key] = []
     if not conf.get("description", None):
         # fix missing or None description
         conf["description"] = ""
     import pprint
     pprint.pprint(conf)
     _ent = config_dump_serializer(data=conf)
     added = 0
     sub_added = 0
     try:
         _exists = config.objects.get(
             Q(name=conf["name"])
             & Q(config_catalog=ccat))  # @UndefinedVariable
     except config.DoesNotExist:
         _take = True
     else:
         request.xml_response.error(
             "config {} already exists in config catalog {}".format(
                 conf["name"], unicode(ccat)),
             logger=logger)
         _take = False
     # we create the config with a dummy name to simplify matching of vars / scripts / monccs against configs with same name but different catalogs
     dummy_name = "_ul_config_{:d}".format(int(time.time()))
     taken = False
     if _take:
         if _ent.is_valid():
             print dir(_ent)
             print "*", dummy_name
             _ent.create_default_entries = False
             try:
                 # store config with config catalog
                 print "a"
                 _ent.save(name=dummy_name, config_catalog=ccat)
                 print "b"
                 # pass
             except:
                 for entry in process_tools.exception_info().log_lines:
                     logger.log(logging_tools.LOG_LEVEL_ERROR, entry)
                 request.xml_response.error(
                     "error saving entry '{}': {}".format(
                         unicode(_ent), process_tools.get_except_info()),
                     logger=logger)
             else:
                 taken = True
                 # add sub-sets
                 for key in _sets.iterkeys():
                     for entry in _sets[key]:
                         entry["config"] = dummy_name
                         if not entry.get("description", None):
                             # fix simple structure errors
                             entry["description"] = "dummy description"
                         _sub_ent = getattr(
                             serializers, "{}_nat_serializer".format(
                                 key[:-4]))(data=entry)
                         if _sub_ent.is_valid():
                             try:
                                 _sub_ent.object.save()
                             except:
                                 request.xml_response.error(
                                     "error saving subentry '{}': {}".
                                     format(
                                         unicode(_sub_ent),
                                         process_tools.get_except_info()),
                                     logger=logger)
                             else:
                                 sub_added += 1
                         else:
                             request.xml_response.error(
                                 "cannot create {} object: {}".format(
                                     key, unicode(_sub_ent.errors)),
                                 logger=logger)
                 added += 1
                 _ent.object.name = conf["name"]
                 _ent.object.save()
                 request.xml_response["new_pk"] = "{:d}".format(
                     _ent.object.pk)
                 request.xml_response.info(
                     "create new config {} ({:d}) in config catalog {}".
                     format(unicode(_ent.object), sub_added, unicode(ccat)))
         else:
             request.xml_response.error(
                 "cannot create config object: {}".format(
                     unicode(_ent.errors)),
                 logger=logger)
     return taken
Beispiel #23
0
 def graph(self, dev_pks, graph_keys):
     # end time with forecast
     local_ds = DataSource(self.log_com, dev_pks, graph_keys,
                           self.colorizer)
     self.para_dict["end_time_fc"] = self.para_dict["end_time"]
     if self.para_dict["graph_setting"].graph_setting_forecast:
         _fc = self.para_dict["graph_setting"].graph_setting_forecast
         if _fc.seconds:
             # add seconds
             self.para_dict["end_time_fc"] += datetime.timedelta(
                 seconds=_fc.seconds)
         else:
             # add timeframe
             self.para_dict["end_time_fc"] += self.para_dict[
                 "end_time"] - self.para_dict["start_time"]
     timeframe = abs((self.para_dict["end_time_fc"] -
                      self.para_dict["start_time"]).total_seconds())
     graph_width, graph_height = (
         self.para_dict["graph_setting"].graph_setting_size.width,
         self.para_dict["graph_setting"].graph_setting_size.height,
     )
     self.log("width / height : {:d} x {:d}, timeframe {}".format(
         graph_width,
         graph_height,
         logging_tools.get_diff_time_str(timeframe),
     ))
     # store for DEF generation
     self.width = graph_width
     self.height = graph_height
     dev_dict = {
         cur_dev.pk: unicode(cur_dev.display_name)
         for cur_dev in device.objects.filter(Q(pk__in=dev_pks))
     }
     s_graph_key_dict = self._create_graph_keys(graph_keys)
     self.log("found {}: {}".format(
         logging_tools.get_plural("device", len(dev_pks)), ", ".join([
             "{:d} ({})".format(pk, dev_dict.get(pk, "unknown"))
             for pk in dev_pks
         ])))
     if self.para_dict["graph_setting"].merge_graphs:
         # reorder all graph_keys into one graph_key_dict
         s_graph_key_dict = {"all": sum(s_graph_key_dict.values(), [])}
     self.log("{}: {}".format(
         logging_tools.get_plural("graph key", len(graph_keys)),
         ", ".join([full_graph_key(_v) for _v in graph_keys])))
     self.log("{}: {}".format(
         logging_tools.get_plural("top level key", len(s_graph_key_dict)),
         ", ".join(sorted(s_graph_key_dict)),
     ))
     enumerated_dev_pks = [("{:d}.{:d}".format(_idx, _pk), _pk)
                           for _idx, _pk in enumerate(dev_pks)]
     if self.para_dict["graph_setting"].merge_devices:
         # one device per graph
         graph_key_list = [[GraphTarget(
             g_key, enumerated_dev_pks,
             v_list)] for g_key, v_list in s_graph_key_dict.iteritems()]
     else:
         graph_key_list = []
         for g_key, v_list in sorted(s_graph_key_dict.iteritems()):
             graph_key_list.append([
                 GraphTarget(g_key, [(dev_id, dev_pk)], v_list)
                 for dev_id, dev_pk in enumerated_dev_pks
             ])
     if self.para_dict["graph_setting"].merge_graphs:
         # set header
         [_gt.set_header("all") for _gt in sum(graph_key_list, [])]
     _num_g = sum([len(_graph_line) for _graph_line in graph_key_list])
     self.log("number of graphs to create: {:d}".format(_num_g))
     graph_list = E.graph_list()
     _job_add_dict = self._get_jobs(dev_dict)
     for _graph_line in graph_key_list:
         self.log("starting graph_line")
         # iterate in case scale_mode is not None
         _iterate_line, _line_iteration = (True, 0)
         while _iterate_line:
             for _graph_target in _graph_line:
                 _graph_target.abs_file_loc = str(
                     os.path.join(self.para_dict["graph_root"],
                                  _graph_target.graph_name))
                 # clear list of defs, reset result
                 _graph_target.reset()
                 # reset colorizer for current graph
                 self.colorizer.reset()
                 self.abs_start_time = int((self.para_dict["start_time"] -
                                            self.dt_1970).total_seconds())
                 self.abs_end_time = int((self.para_dict["end_time_fc"] -
                                          self.dt_1970).total_seconds())
                 rrd_pre_args = [
                     _graph_target.abs_file_loc,
                     "-E",  # slope mode
                     "-Rlight",  # font render mode, slight hint
                     "-Gnormal",  # render mode
                     "-P",  # use pango markup
                     # "-nDEFAULT:8:",
                     "-w {:d}".format(graph_width),
                     "-h {:d}".format(graph_height),
                     "-aPNG",  # image format
                     # "--daemon", "unix:{}".format(global_config["RRD_CACHED_SOCKET"]),  # rrd caching daemon address
                     "-W {} by init.at".format(
                         License.objects.get_init_product().name),  # title
                     "--slope-mode",  # slope mode
                     "-cBACK#ffffff",
                     "--end",
                     "{:d}".format(self.abs_end_time),  # end
                     "--start",
                     "{:d}".format(self.abs_start_time),  # start
                     GraphVar(self, _graph_target, None, None,
                              "").header_line,
                 ]
                 # outer loop: iterate over all keys for the graph
                 for graph_key in sorted(_graph_target.graph_keys):
                     # inner loop: iterate over all dev ids for the graph
                     for _cur_id, cur_pk in _graph_target.dev_list:
                         # print "***", _cur_id, cur_pk
                         if (cur_pk, graph_key) in local_ds:
                             # resolve
                             for _mvs, _mvv in local_ds[(cur_pk,
                                                         graph_key)]:
                                 _take = True
                                 try:
                                     if os.stat(_mvs.file_name)[
                                             stat.ST_SIZE] < 100:
                                         self.log(
                                             "skipping {} (file is too small)"
                                             .format(_mvs.file_name, ),
                                             logging_tools.LOG_LEVEL_ERROR)
                                         _take = False
                                 except:
                                     self.log(
                                         "RRD file {} not accessible: {}".
                                         format(
                                             _mvs.file_name,
                                             process_tools.get_except_info(
                                             ),
                                         ), logging_tools.LOG_LEVEL_ERROR)
                                     _take = False
                                 if _take and _line_iteration == 0:
                                     # add GraphVars only on the first iteration
                                     # print "**", graph_key, _mvs.key, _mvv.key
                                     # store def
                                     _graph_target.add_def(
                                         (_mvs.key, _mvv.key),
                                         GraphVar(self, _graph_target, _mvs,
                                                  _mvv, graph_key,
                                                  dev_dict[cur_pk]),
                                         "header_str",
                                     )
                 if _graph_target.draw_keys:
                     draw_it = True
                     removed_keys = set()
                     while draw_it:
                         if self.para_dict[
                                 "graph_setting"].graph_setting_timeshift:
                             timeshift = self.para_dict[
                                 "graph_setting"].graph_setting_timeshift.seconds
                             if timeshift == 0:
                                 timeshift = self.abs_end_time - self.abs_start_time
                         else:
                             timeshift = 0
                         rrd_args = rrd_pre_args + sum([
                             _graph_target.graph_var_def(
                                 _key,
                                 timeshift=timeshift,
                             ) for _key in _graph_target.draw_keys
                         ], [])
                         rrd_args.extend(_graph_target.rrd_post_args)
                         rrd_args.extend([
                             "--title", "{} on {} (tf: {}{})".format(
                                 _graph_target.header,
                                 dev_dict.get(_graph_target.dev_list[0][1],
                                              "unknown")
                                 if len(_graph_target.dev_list) == 1 else
                                 logging_tools.get_plural(
                                     "device", len(_graph_target.dev_list)),
                                 logging_tools.get_diff_time_str(timeframe),
                                 ", with forecast"
                                 if self.para_dict["end_time"] !=
                                 self.para_dict["end_time_fc"] else "",
                             )
                         ])
                         rrd_args.extend(
                             self._create_job_args(_graph_target.dev_list,
                                                   _job_add_dict))
                         self.proc.flush_rrdcached(_graph_target.file_names)
                         try:
                             draw_result = rrdtool.graphv(*rrd_args)
                         except:
                             # in case of strange 'argument 0 has to be a string or a list of strings'
                             self.log(
                                 "error creating graph: {}".format(
                                     process_tools.get_except_info()),
                                 logging_tools.LOG_LEVEL_ERROR)
                             for _line in process_tools.exception_info(
                             ).log_lines:
                                 self.log(u"    {}".format(_line),
                                          logging_tools.LOG_LEVEL_ERROR)
                             if global_config["DEBUG"]:
                                 for _idx, _entry in enumerate(rrd_args, 1):
                                     self.log(u"  {:4d} {}".format(
                                         _idx, _entry))
                             draw_result = None
                             draw_it = False
                         else:
                             # compare draw results, add -l / -u when scale_mode is not None
                             val_dict = {}
                             # new code
                             for key, value in draw_result.iteritems():
                                 if not key.startswith("print["):
                                     continue
                                 _xml = etree.fromstring(value)
                                 _unique_id = int(_xml.get("unique_id"))
                                 # print etree.tostring(_xml, pretty_print=True)
                                 try:
                                     value = float(_xml.text)
                                 except:
                                     value = None
                                 else:
                                     pass  # value = None if value == 0.0 else value
                                 _s_key, _v_key = (_xml.get("mvs_key"),
                                                   _xml.get("mvv_key"))
                                 if value is not None:
                                     _key = (_unique_id, (_s_key, _v_key))
                                     val_dict.setdefault(
                                         _key,
                                         {})[_xml.get("cf")] = (value, _xml)
                             # list of empty (all none or 0.0 values) keys
                             _zero_keys = [
                                 key for key, value in val_dict.iteritems()
                                 if all([
                                     _v[0] in [0.0, None]
                                     for _k, _v in value.iteritems()
                                 ])
                             ]
                             if _zero_keys and self.para_dict[
                                     "graph_setting"].hide_empty:
                                 # remove all-zero structs
                                 val_dict = {
                                     key: value
                                     for key, value in val_dict.iteritems()
                                     if key not in _zero_keys
                                 }
                             for key, value in val_dict.iteritems():
                                 _graph_target.feed_draw_result(key, value)
                             # check if the graphs shall always include y=0
                             draw_it = False
                             if self.para_dict[
                                     "graph_setting"].include_zero:
                                 if "value_min" in draw_result and "value_max" in draw_result:
                                     if draw_result["value_min"] > 0.0:
                                         _graph_target.set_post_arg(
                                             "-l", "0")
                                         draw_it = True
                                     if draw_result["value_max"] < 0.0:
                                         _graph_target.set_post_arg(
                                             "-u", "0")
                                         draw_it = True
                             # check for empty graphs
                             empty_keys = set(
                                 _graph_target.draw_keys) - set(
                                     val_dict.keys())
                             if empty_keys and self.para_dict[
                                     "graph_setting"].hide_empty:
                                 self.log(u"{}: {}".format(
                                     logging_tools.get_plural(
                                         "empty key", len(empty_keys)),
                                     ", ".join(
                                         sorted([
                                             "{} (dev {:d})".format(
                                                 _key, _pk)
                                             for _pk, _key in empty_keys
                                         ])),
                                 ))
                                 removed_keys |= empty_keys
                                 _graph_target.remove_keys(empty_keys)
                                 # draw_keys = [_key for _key in draw_keys if _key not in empty_keys]
                                 if not _graph_target.draw_keys:
                                     draw_result = None
                                 else:
                                     draw_it = True
                     _graph_target.draw_result = draw_result
                     _graph_target.removed_keys = removed_keys
                 else:
                     self.log(
                         "no DEFs for graph_key_dict {}".format(
                             _graph_target.graph_key),
                         logging_tools.LOG_LEVEL_ERROR)
             _iterate_line = False
             _valid_graphs = [
                 _entry for _entry in _graph_line if _entry.valid
             ]
             if _line_iteration == 0 and self.para_dict[
                     "graph_setting"].scale_mode in [
                         GraphScaleModeEnum.level.value,
                         GraphScaleModeEnum.to100
                     ] and (len(_valid_graphs) > 1
                            or self.para_dict["graph_setting"].scale_mode
                            == GraphScaleModeEnum.to100):
                 _line_iteration += 1
                 if self.para_dict[
                         "graph_setting"].scale_mode == GraphScaleModeEnum.level:
                     _vmin_v, _vmax_v = (
                         [
                             _entry.draw_result["value_min"]
                             for _entry in _valid_graphs
                         ],
                         [
                             _entry.draw_result["value_max"]
                             for _entry in _valid_graphs
                         ],
                     )
                     if set(_vmin_v) > 1 or set(_vmax_v) > 1:
                         _vmin, _vmax = (
                             FLOAT_FMT.format(min(_vmin_v)),
                             FLOAT_FMT.format(max(_vmax_v)),
                         )
                         self.log("setting y_min / y_max for {} to {} / {}".
                                  format(
                                      _valid_graphs[0].graph_key,
                                      _vmin,
                                      _vmax,
                                  ))
                         [
                             _entry.set_y_mm(_vmin, _vmax)
                             for _entry in _valid_graphs
                         ]
                         _iterate_line = True
                 else:
                     [_entry.adjust_max_y(100) for _entry in _valid_graphs]
                     self.log("set max y_val to 100 for all graphs")
                     _iterate_line = True
             if not _iterate_line:
                 graph_list.extend([
                     _graph_target.graph_xml(dev_dict)
                     for _graph_target in _graph_line
                 ])
     # print etree.tostring(graph_list, pretty_print=True)
     return graph_list
Beispiel #24
0
    def __call__(self):
        if self.Meta.background:
            if self.Meta.cur_running < self.Meta.max_instances:
                self.Meta.cur_running += 1
                com_instance.bg_idx += 1
                new_bg_name = "bg_{}_{:d}".format(self.sc_obj.name, com_instance.bg_idx)

                self.sc_obj.main_proc.add_process(BackgroundProcess(new_bg_name), start=True)

                self.sc_obj.main_proc.send_to_process(
                    new_bg_name,
                    "set_option_dict",
                    self.option_dict)
                self.sc_obj.main_proc.send_to_process(
                    new_bg_name,
                    "set_srv_com",
                    unicode(self.srv_com),
                )
                self.sc_obj.main_proc.send_to_process(
                    new_bg_name,
                    "start_command",
                    self.sc_obj.name,
                )
                db_tools.close_connection()
                self.srv_com.set_result(
                    "sent to background"
                )
            else:
                self.srv_com.set_result(
                    "too many instances running ({:d} of {:d})".format(self.Meta.cur_running, self.Meta.max_instances),
                    server_command.SRV_REPLY_STATE_ERROR
                )
        else:
            db_debug = global_config["DATABASE_DEBUG"]
            if db_debug:
                pre_queries = len(connection.queries)
            self.start_time = time.time()
            try:
                result = self.sc_obj._call(self)
            except:
                exc_info = process_tools.exception_info()
                for line in exc_info.log_lines:
                    self.log(line, logging_tools.LOG_LEVEL_CRITICAL)
                self.srv_com.set_result(
                    process_tools.get_except_info(exc_info.except_info),
                    server_command.SRV_REPLY_STATE_CRITICAL
                )
                # write to logging-server
                err_h = io_stream_helper.io_stream(
                    "/var/lib/logging-server/py_err_zmq",
                    zmq_context=self.zmq_context
                )
                err_h.write("\n".join(exc_info.log_lines))
                err_h.close()
            else:
                if result is not None:
                    self.log(
                        "command got an (unexpected) result: '{}'".format(
                            str(result)
                        ),
                        logging_tools.LOG_LEVEL_ERROR
                    )
            self.end_time = time.time()
            if int(self.srv_com["result"].attrib["state"]):
                self.log(
                    u"result is ({:d}) {}".format(
                        int(self.srv_com["result"].attrib["state"]),
                        self.srv_com["result"].attrib["reply"]
                    ),
                    logging_tools.LOG_LEVEL_ERROR
                )
            if self.Meta.show_execution_time:
                self.log("run took {}".format(logging_tools.get_diff_time_str(self.end_time - self.start_time)))
                self.srv_com["result"].attrib["reply"] = u"{} in {}".format(
                    self.srv_com["result"].attrib["reply"],
                    logging_tools.get_diff_time_str(self.end_time - self.start_time)
                )
            if db_debug:
                self.log("queries executed : {:d}".format(len(connection.queries) - pre_queries))
Beispiel #25
0
    def _graph_rrd(self, *args, **kwargs):
        srv_com = server_command.srv_command(source=args[0])
        orig_dev_pks = srv_com.xpath(".//device_list/device/@pk",
                                     smart_strings=False)
        orig_dev_pks = device.objects.filter(
            Q(pk__in=orig_dev_pks) & Q(machinevector__pk__gt=0)).values_list(
                "pk", flat=True)
        dev_pks = [
            dev_pk for dev_pk in orig_dev_pks
            if self.EC.consume("graph", dev_pk)
        ]
        if len(orig_dev_pks) != len(dev_pks):
            self.log(
                "Access to device rrds denied due to ova limits: {}".format(
                    set(orig_dev_pks).difference(dev_pks)),
                logging_tools.LOG_LEVEL_ERROR,
            )

        graph_keys = json.loads(srv_com["*graph_key_list"])
        para_dict = {
            para.tag: para.text
            for para in srv_com.xpath(".//parameters", smart_strings=False)[0]
        }
        for key in ["start_time", "end_time"]:
            # cast to datetime
            para_dict[key] = dateutil.parser.parse(para_dict[key])
        _raw = json.loads(para_dict["graph_setting"])
        # fake name
        _raw["name"] = uuid.uuid4().get_urn()
        _setting = GraphSettingSerializerCustom(data=_raw)
        if _setting.is_valid():
            para_dict["graph_setting"] = _setting.save()
            for key, _default in [
                ("debug_mode", "0"),
            ]:
                para_dict[key] = True if int(para_dict.get(key,
                                                           "0")) else False
            self._open_rrdcached_socket()
            try:
                graph_list = RRDGraph(
                    self.graph_root_debug if para_dict.get(
                        "debug_mode", False) else self.graph_root, self.log,
                    self.colorizer, para_dict,
                    self).graph(dev_pks, graph_keys)
            except:
                for _line in process_tools.exception_info().log_lines:
                    self.log(_line, logging_tools.LOG_LEVEL_ERROR)
                srv_com["graphs"] = []
                srv_com.set_result(
                    "error generating graphs: {}".format(
                        process_tools.get_except_info()),
                    server_command.SRV_REPLY_STATE_CRITICAL)
            else:
                srv_com["graphs"] = graph_list
                srv_com.set_result(
                    "generated {}".format(
                        logging_tools.get_plural("graph", len(graph_list))),
                    server_command.SRV_REPLY_STATE_OK)
        else:
            srv_com["graphs"] = []
            srv_com.set_result(
                "graphsettings are not valid: {}".format(str(_setting.errors)),
                server_command.SRV_REPLY_STATE_CRITICAL)

        self._close_rrdcached_socket()
        self.send_pool_message("remote_call_async_result", unicode(srv_com))
Beispiel #26
0
 def process_scripts(self, conf_pk):
     cur_conf = self.config_dict[conf_pk]
     self.cur_conf = cur_conf
     # build local variables
     local_vars = dict(
         sum([[(cur_var.name, cur_var.value) for cur_var in getattr(
             cur_conf, "config_{}_set".format(var_type)).all()]
              for var_type in ["str", "int", "bool", "blob"]], []))
     # copy local vars
     conf_dict = self.conf_dict
     for key, value in local_vars.iteritems():
         conf_dict[key] = value
     self.log("config {}: {} defined, {} enabled, {}".format(
         cur_conf.name,
         logging_tools.get_plural("script",
                                  len(cur_conf.config_script_set.all())),
         logging_tools.get_plural(
             "script",
             len([
                 cur_scr for cur_scr in cur_conf.config_script_set.all()
                 if cur_scr.enabled
             ])),
         logging_tools.get_plural("local variable",
                                  len(local_vars.keys()))))
     for cur_script in [
             cur_scr for cur_scr in cur_conf.config_script_set.all()
             if cur_scr.enabled
     ]:
         self.init_uid_gid()
         lines = cur_script.value.split("\n")
         self.log(" - scriptname '{}' (pri {:d}) has {}".format(
             cur_script.name, cur_script.priority,
             logging_tools.get_plural("line", len(lines))))
         start_c_time = time.time()
         try:
             code_obj = compile(
                 cur_script.value.replace("\r\n", "\n") + "\n",
                 "<script {}>".format(cur_script.name), "exec")
         except:
             exc_info = process_tools.exception_info()
             self.log("error during compile of {} ({})".format(
                 cur_script.name,
                 logging_tools.get_diff_time_str(time.time() -
                                                 start_c_time)),
                      logging_tools.LOG_LEVEL_ERROR,
                      register=True)
             for line in exc_info.log_lines:
                 self.log("   *** {}".format(line),
                          logging_tools.LOG_LEVEL_ERROR)
             conf_dict["called"].setdefault(False, []).append(
                 (cur_conf.pk, [line for line in exc_info.log_lines]))
         else:
             compile_time = time.time() - start_c_time
             # prepare stdout / stderr
             start_time = time.time()
             stdout_c, stderr_c = (logging_tools.dummy_ios(),
                                   logging_tools.dummy_ios())
             old_stdout, old_stderr = (sys.stdout, sys.stderr)
             sys.stdout, sys.stderr = (stdout_c, stderr_c)
             self.__touched_objects, self.__touched_links, self.__deleted_files = (
                 [], [], [])
             try:
                 ret_code = eval(
                     code_obj,
                     {},
                     {
                         # old version
                         "dev_dict": conf_dict,
                         # new version
                         "conf_dict": conf_dict,
                         "router_obj": self.router_obj,
                         "config": self,
                         "dir_object": dir_object,
                         "delete_object": delete_object,
                         "copy_object": copy_object,
                         "link_object": link_object,
                         "file_object": file_object,
                         "do_ssh": do_ssh,
                         "do_etc_hosts": do_etc_hosts,
                         "do_hosts_equiv": do_hosts_equiv,
                         "do_nets": do_nets,
                         "do_routes": do_routes,
                         "do_fstab": do_fstab,
                         "do_uuid": do_uuid,
                         "do_uuid_old": do_uuid_old,
                         "partition_setup": partition_setup,
                     })
             except:
                 exc_info = process_tools.exception_info()
                 conf_dict["called"].setdefault(False, []).append(
                     (cur_conf.pk, [line for line in exc_info.log_lines]))
                 self.log("An Error occured during eval() after {}:".format(
                     logging_tools.get_diff_time_str(time.time() -
                                                     start_time)),
                          logging_tools.LOG_LEVEL_ERROR,
                          register=True)
                 for line in exc_info.log_lines:
                     self.log(" *** {}".format(line),
                              logging_tools.LOG_LEVEL_ERROR)
                 # log stdout / stderr
                 self._show_logs(stdout_c, stderr_c)
                 # create error-entry, preferable not direct in config :-)
                 # FIXME
                 # remove objects
                 if self.__touched_objects:
                     self.log("{} touched : {}".format(
                         logging_tools.get_plural(
                             "object", len(self.__touched_objects)),
                         ", ".join([
                             cur_obj.get_path()
                             for cur_obj in self.__touched_objects
                         ])))
                     for to in self.__touched_objects:
                         to.error_flag = True
                 else:
                     self.log("no objects touched")
                 if self.__touched_links:
                     self.log("{} touched: {}".format(
                         logging_tools.get_plural(
                             "link", len(self.__touched_links)), ", ".join([
                                 cur_link.get_path()
                                 for cur_link in self.__touched_links
                             ])))
                     for tl in self.__touched_links:
                         tl.error_flag = True
                 else:
                     self.log("no links touched")
                 if self.__deleted_files:
                     self.log("{} deleted: {}".format(
                         logging_tools.get_plural(
                             "delete", len(self.__deleted_files)),
                         ", ".join([
                             cur_dl.get_path()
                             for cur_dl in self.__deleted_files
                         ])))
                     for d_file in self.__deleted_files:
                         d_file.error_flag = True
                 else:
                     self.log("no objects deleted")
             else:
                 conf_dict["called"].setdefault(True,
                                                []).append(cur_conf.pk)
                 if ret_code is None:
                     ret_code = 0
                 self.log(
                     "  exited after {} ({} compile time) with return code {:d}"
                     .format(
                         logging_tools.get_diff_time_str(time.time() -
                                                         start_time),
                         logging_tools.get_diff_time_str(compile_time),
                         ret_code))
                 self._show_logs(
                     stdout_c,
                     stderr_c,
                     register_error=True,
                     pre_str="{} wrote something to stderr".format(
                         cur_conf.name))
             finally:
                 sys.stdout, sys.stderr = (old_stdout, old_stderr)
                 code_obj = None
     # print unicode(self.g_tree)
     # remove local vars
     for key in local_vars.iterkeys():
         del conf_dict[key]
     del self.cur_conf