示例#1
0
 def address(self):
     if not self.__read:
         from initat.icsw.service.instance import InstanceXML
         from initat.cluster.backbone.server_enums import icswServiceEnum
         _xml = InstanceXML(quiet=True)
         _port = _xml.get_port_dict(icswServiceEnum.memcached, command=True)
         self.__address = ["127.0.0.1:{:d}".format(_port)]
         self.__read = True
     return self.__address
示例#2
0
def _get_parser():
    _xml = InstanceXML(quiet=True)
    parser = argparse.ArgumentParser("set a passive check command")
    parser.add_argument("-p", type=int, default=_xml.get_port_dict(icswServiceEnum.monitor_slave, command=True), dest="port", help="target port [%(default)d]")
    parser.add_argument("-H", type=str, default="localhost", dest="host", help="target host [%(default)s]")
    parser.add_argument("--device", type=str, default="", help="device [%(default)s]", required=True)
    parser.add_argument("--check", type=str, default="", help="name of check [%(default)s]", required=True)
    parser.add_argument("--state", type=str, default="OK", choices=["OK", "WARN", "CRITICAL"], help="check state [%(default)s]")
    parser.add_argument("--output", type=str, default="", help="check output [%(default)s]", required=True)
    parser.add_argument("-v", help="verbose mode [%(default)s]", default=False, dest="verbose", action="store_true")
    parser.add_argument("-t", type=int, default=10, dest="timeout", help="set timeout [%(default)d]")
    return parser
示例#3
0
文件: relay.py 项目: bopopescu/icsw
 def _register_local_syncer(self):
     _inst_xml = InstanceXML(log_com=self.log)
     self.__local_syncer_uuid = "urn:uuid:{}:{}:".format(
         uuid_tools.get_uuid(),
         _inst_xml.get_uuid_postfix(icswServiceEnum.monitor_slave))
     self.__local_syncer_addr = "tcp://127.0.0.1:{:d}".format(
         _inst_xml.get_port_dict(icswServiceEnum.monitor_slave,
                                 command=True))
     self.log("connecting to local syncer {} (uuid={})".format(
         self.__local_syncer_addr,
         self.__local_syncer_uuid,
     ))
     self.main_socket.connect(self.__local_syncer_addr)
示例#4
0
class LocalParser(argparse.ArgumentParser):
    def __init__(self):
        argparse.ArgumentParser.__init__(
            self, "send command to servers of the init.at Clustersoftware")
        self.inst_xml = InstanceXML(quiet=True)
        inst_list = []
        for inst in self.inst_xml.get_all_instances():
            if len(inst.xpath(".//network/ports/port[@type='command']")):
                inst_list.append(inst.get("name"))
        self.add_argument("arguments",
                          nargs="+",
                          help="additional arguments, first one is command")
        self.add_argument("-t",
                          help="set timeout [%(default)d]",
                          default=10,
                          type=int,
                          dest="timeout")
        self.add_argument("-p",
                          help="port or instance/service [%(default)s]",
                          default="{:d}".format(
                              self.inst_xml.get_port_dict(
                                  icswServiceEnum.host_monitoring,
                                  command=True)),
                          dest="port",
                          type=str)
        self.add_argument("-P",
                          help="protocoll [%(default)s]",
                          type=str,
                          default="tcp",
                          choices=["tcp", "ipc"],
                          dest="protocoll")
        self.add_argument("-S",
                          help="servername [%(default)s]",
                          type=str,
                          default="collrelay",
                          dest="server_name")
        self.add_argument("-H",
                          "--host",
                          help="host [%(default)s] or server",
                          default="localhost",
                          dest="host")
        self.add_argument("-v",
                          help="verbose mode [%(default)s]",
                          default=False,
                          dest="verbose",
                          action="store_true")
        self.add_argument("-i",
                          help="set identity substring [%(default)s]",
                          type=str,
                          default="sc",
                          dest="identity_substring")
        self.add_argument(
            "-I",
            help="set identity string [%(default)s], has precedence over -i",
            type=str,
            default="",
            dest="identity_string")
        self.add_argument("-n",
                          help="set number of iterations [%(default)d]",
                          type=int,
                          default=1,
                          dest="iterations")
        self.add_argument("-q",
                          help="be quiet [%(default)s], overrides verbose",
                          default=False,
                          action="store_true",
                          dest="quiet")
        self.add_argument("--raw",
                          help="do not convert to server_command",
                          default=False,
                          action="store_true")
        self.add_argument("--root",
                          help="connect to root-socket [%(default)s]",
                          default=False,
                          action="store_true")
        self.add_argument("--kv",
                          help="key-value pair, colon-separated [key:value]",
                          action="append")
        self.add_argument(
            "--kva",
            help="key-attribute pair, colon-separated [key:attribute:value]",
            action="append")
        self.add_argument("--kv-path",
                          help="path to store key-value pairs under",
                          type=str,
                          default="")
        self.add_argument(
            "--split",
            help="set read socket (for split-socket command), [%(default)s]",
            type=str,
            default="")
        self.add_argument("--only-send",
                          help="only send command, [%(default)s]",
                          default=False,
                          action="store_true")

    def parse(self):
        opts = self.parse_args()
        if isinstance(opts.port, str) and opts.port.isdigit():
            opts.port = int(opts.port)
        else:
            if opts.port in self.inst_xml:
                opts.port = self.inst_xml.get_port_dict(opts.port,
                                                        command=True)
            else:
                print("Invalid service / instance name '{}'".format(opts.port))
                sys.exit(-1)
        return opts
示例#5
0
def device_syslog(opt_ns, cur_dev, j_logs):
    print(
        u"Information about device '{}' (full name {}, devicegroup {})".format(
            unicode(cur_dev), unicode(cur_dev.full_name),
            unicode(cur_dev.device_group)))
    print("UUID is '{}', database-ID is {:d}".format(cur_dev.uuid, cur_dev.pk))
    _cr = routing.SrvTypeRouting(force=True, ignore_errors=True)
    _ST = "logcheck-server"
    if _ST in _cr.service_types:
        _inst_xml = InstanceXML(quiet=True)
        # get logcheck-server IP
        _ls_ip = _cr[_ST][0][1]
        # get logcheck-server Port
        _ls_port = _inst_xml.get_port_dict(_ST, ptype="command")
        _sc = server_command.srv_command(command="get_syslog", )
        _sc["devices"] = _sc.builder(
            "devices", *[
                _sc.builder(
                    "device",
                    pk="{:d}".format(cur_dev.pk),
                    lines="{:d}".format(opt_ns.loglines),
                    minutes="{:d}".format(opt_ns.minutes),
                )
            ])
        _conn_str = "tcp://{}:{:d}".format(_ls_ip, _ls_port)
        _result = net_tools.ZMQConnection("icsw_state_{:d}".format(
            os.getpid())).add_connection(
                _conn_str,
                _sc,
            )
        if _result is not None:
            _dev = _result.xpath(".//ns:devices/ns:device[@pk]")[0]
            _lines = _result.xpath("ns:lines", start_el=_dev)[0]
            _rates = _result.xpath("ns:rates", start_el=_dev)
            if _rates:
                _rates = {
                    int(_el.get("timeframe")): float(_el.get("rate"))
                    for _el in _rates[0]
                }
                print("rate info: {}".format(", ".join([
                    "{:.2f} lines/sec in {}".format(
                        _rates[_seconds],
                        logging_tools.get_diff_time_str(_seconds))
                    for _seconds in sorted(_rates)
                ])))
            else:
                print("no rate info found")
                print(_rates)
            _out_lines = logging_tools.new_form_list()
            for _entry in process_tools.decompress_struct(_lines.text):
                _out_lines.append([
                    logging_tools.form_entry(_entry["line_id"], header="idx"),
                    logging_tools.form_entry(
                        "{:04d}-{:02d}-{:02d} {:02d}:{:02d}:{:02d}".format(
                            *_entry["line_datetime_parsed"]),
                        header="Timestamp",
                    ),
                ] + [
                    logging_tools.form_entry(_entry[_key], header=_key)
                    for _key in ["hostname", "priority", "facility", "tag"]
                ] + [
                    logging_tools.form_entry(_entry["text"], header="text"),
                ])
            print(unicode(_out_lines))
        else:
            print("got no result from {} ({})".format(_conn_str, _ST))
    else:
        print("No logcheck-server found, skipping syslog display")
示例#6
0
class CapabilityProcess(threading_tools.process_obj):
    def process_init(self):
        global_config.close()
        self.__log_template = logging_tools.get_logger(
            global_config["LOG_NAME"],
            global_config["LOG_DESTINATION"],
            zmq=True,
            context=self.zmq_context)
        db_tools.close_connection()
        self._instance = InstanceXML(log_com=self.log)
        self._init_network()
        self._init_capabilities()
        self.__last_user_scan = None
        self.__scan_running = False
        self.register_timer(self._update,
                            2 if global_config["DEBUG"] else 30,
                            instant=True)

    def log(self, what, log_level=logging_tools.LOG_LEVEL_OK):
        self.__log_template.log(log_level, what)

    def loop_post(self):
        self.collectd_socket.close()
        self.vector_socket.close()
        self.__log_template.close()

    def _init_network(self):
        # connection to local collserver socket
        conn_str = process_tools.get_zmq_ipc_name(
            "vector", s_name="collserver", connect_to_root_instance=True)
        vector_socket = self.zmq_context.socket(zmq.PUSH)  # @UndefinedVariable
        vector_socket.setsockopt(zmq.LINGER, 0)  # @UndefinedVariable
        vector_socket.connect(conn_str)
        self.vector_socket = vector_socket
        self.log("connected vector_socket to {}".format(conn_str))
        # connection to local collectd server
        _cc_str = "tcp://localhost:{:d}".format(
            # get receive port for collectd-server drop
            self._instance.get_port_dict(icswServiceEnum.collectd_server,
                                         ptype="receive"))
        collectd_socket = self.zmq_context.socket(zmq.PUSH)
        collectd_socket.setsockopt(zmq.LINGER, 0)
        collectd_socket.connect(_cc_str)
        self.log("connected collectd_socket to {}".format(_cc_str))
        self.collectd_socket = collectd_socket

    def _init_capabilities(self):
        self.__cap_list = []
        if global_config["BACKUP_DATABASE"]:
            self.log("doing database backup, ignoring capabilities",
                     logging_tools.LOG_LEVEL_WARN)
        else:
            # read caps
            _dir = os.path.dirname(__file__)
            self.log("init server capabilities from directory {}".format(_dir))
            SRV_CAPS = []
            for entry in os.listdir(_dir):
                if entry.endswith(".py") and entry not in ["__init__.py"]:
                    _imp_name = "initat.cluster_server.capabilities.{}".format(
                        entry.split(".")[0])
                    _mod = importlib.import_module(_imp_name)
                    for _key in dir(_mod):
                        _value = getattr(_mod, _key)
                        if inspect.isclass(_value) and issubclass(
                                _value, base.BackgroundBase
                        ) and _value != base.BackgroundBase:
                            SRV_CAPS.append(_value)
            self.log("checking {}".format(
                logging_tools.get_plural("capability", len(SRV_CAPS))))
            self.__server_cap_dict = {}
            self.__cap_list = []
            try:
                sys_cc = config_catalog.objects.get(Q(system_catalog=True))
            except config_catalog.DoesNotExist:
                sys_cc = factories.ConfigCatalog(name="local",
                                                 system_catalog=True)
            for _srv_cap in SRV_CAPS:
                cap_name = _srv_cap.Meta.name
                try:
                    cap_descr = _srv_cap.Meta.description
                except:
                    self.log(
                        "capability {} has no description set, ignoring...".
                        format(cap_name), logging_tools.LOG_LEVEL_ERROR)
                else:
                    _new_c = factories.Config(
                        name=cap_name,
                        description=cap_descr,
                        config_catalog=sys_cc,
                        server_config=True,
                        # system_config=True,
                    )
                    _sql_info = config_tools.server_check(server_type=cap_name)
                    if _sql_info.effective_device:
                        self.__cap_list.append(cap_name)
                        self.__server_cap_dict[cap_name] = _srv_cap(
                            self, _sql_info)
                        self.log("capability {} is enabled on {}".format(
                            cap_name,
                            unicode(_sql_info.effective_device),
                        ))
                    else:
                        self.log("capability {} is disabled".format(cap_name))

    def add_ova_statistics(self, cur_time, drop_com):
        _bldr = drop_com.builder
        # print "*", cur_time, drop_com, _bldr
        my_vector = _bldr("values")
        for _csr in icswEggConsumer.objects.all():
            my_vector.append(
                hm_classes.mvect_entry(
                    "icsw.ova.{}.{}".format(_csr.content_type.model,
                                            _csr.action),
                    info="Ova consumed by {} on {}".format(
                        _csr.action, _csr.content_type.model),
                    default=0,
                    value=_csr.consumed,
                    factor=1,
                    base=1,
                    valid_until=cur_time + 3600,
                ).build_xml(_bldr))
        drop_com["vector_ova"] = my_vector
        drop_com["vector_ova"].attrib["type"] = "vector"

    def _update(self):
        cur_time = time.time()
        drop_com = server_command.srv_command(command="set_vector")

        mach_vectors = []
        for cap_name in self.__cap_list:
            self.__server_cap_dict[cap_name](cur_time, drop_com, mach_vectors)
        self.add_ova_statistics(cur_time, drop_com)
        self.vector_socket.send_unicode(unicode(drop_com))
        # print drop_com.pretty_print()
        for _mv in mach_vectors:
            try:
                self.collectd_socket.send_unicode(etree.tostring(_mv))
            except:
                self.log(
                    "unable to send machvector to collectd: {}".format(
                        process_tools.get_except_info(), ),
                    logging_tools.LOG_LEVEL_ERROR,
                )
示例#7
0
 def network_bind(self, **kwargs):
     _need_all_binds = kwargs.get("need_all_binds", False)
     pollin = kwargs.get("pollin", None)
     ext_call = kwargs.get("ext_call", False)
     immediate = kwargs.get("immediate", True)
     if "server_type" in kwargs:
         _inst = InstanceXML(log_com=self.log)
         _srv_type = kwargs["server_type"]
         bind_port = _inst.get_port_dict(_srv_type, ptype="command")
     elif "service_type_enum" in kwargs:
         _inst = InstanceXML(log_com=self.log)
         _srv_type = kwargs["service_type_enum"]
         bind_port = _inst.get_port_dict(_srv_type, ptype="command")
     elif "bind_port" in kwargs:
         bind_port = kwargs["bind_port"]
     else:
         raise KeyError("neither bind_port, service_type_enum nor server_type defined in kwargs")
     main_socket_name = kwargs.get("main_socket_name", "main_socket")
     virtual_sockets_name = kwargs.get("virtual_sockets_name", "virtual_sockets")
     bind_to_localhost = kwargs.get("bind_to_localhost", False)
     _sock_type = kwargs.get("socket_type", "ROUTER")
     if "client_type" in kwargs:
         uuid = uuid_tools.get_uuid().get_urn()
         if not uuid.startswith("urn"):
             uuid = "urn:uuid:{}".format(uuid)
         self.bind_id = "{}:{}:".format(
             uuid,
             InstanceXML(quiet=True).get_uuid_postfix(kwargs["client_type"]),
         )
         dev_r = None
     else:
         from initat.tools import cluster_location
         from initat.cluster.backbone.routing import get_server_uuid
         self.bind_id = get_server_uuid(_srv_type)
         if kwargs.get("simple_server_bind", False):
             dev_r = None
         else:
             # device recognition
             dev_r = cluster_location.DeviceRecognition()
     # virtual sockets
     if hasattr(self, virtual_sockets_name):
         _virtual_sockets = getattr(self, virtual_sockets_name)
     else:
         _virtual_sockets = []
     # main socket
     _main_socket = None
     # create bind list
     if dev_r and dev_r.device_dict:
         _bind_ips = set(
             list(dev_r.local_ips) + sum(
                 [
                     _list for _dev, _list in dev_r.ip_r_lut.iteritems()
                 ],
                 []
             )
         )
         # complex bind
         master_bind_list = [
             (
                 True,
                 [
                     "tcp://{}:{:d}".format(_local_ip, bind_port) for _local_ip in dev_r.local_ips
                 ],
                 self.bind_id,
                 None,
             )
         ]
         _virt_list = []
         for _dev, _ip_list in dev_r.ip_r_lut.iteritems():
             if _dev.pk != dev_r.device.pk:
                 _virt_list.append(
                     (
                         False,
                         [
                             "tcp://{}:{:d}".format(_virtual_ip, bind_port) for _virtual_ip in _ip_list
                         ],
                         # ignore local device
                         get_server_uuid(_srv_type, _dev.uuid),
                         _dev,
                     )
                 )
             else:
                 self.log(
                     "ignoring virtual IP list ({}) (same device)".format(
                         ", ".join(sorted(_ip_list)),
                     )
                 )
         master_bind_list.extend(_virt_list)
         # we have to bind to localhost but localhost is not present in bind_list, add master_bind
         if bind_to_localhost and not any([_ip.startswith("127.") for _ip in _bind_ips]):
             self.log(
                 "bind_to_localhost is set but not IP in range 127.0.0.0/8 found in list, adding virtual_bind",
                 logging_tools.LOG_LEVEL_WARN
             )
             master_bind_list.append(
                 (
                     False,
                     [
                         "tcp://127.0.0.1:{:d}".format(bind_port)
                     ],
                     self.bind_id,
                     None,
                 )
             )
     else:
         # simple bind
         master_bind_list = [
             (
                 True,
                 [
                     "tcp://*:{:d}".format(bind_port)
                 ],
                 self.bind_id,
                 None,
             )
         ]
     _errors = []
     # pprint.pprint(master_bind_list)
     bound_list = set()
     for master_bind, bind_list, bind_id, bind_dev in master_bind_list:
         client = process_tools.get_socket(
             self.zmq_context,
             _sock_type,
             identity=bind_id,
             immediate=immediate
         )
         for _bind_str in bind_list:
             if _bind_str in bound_list:
                 self.log(
                     "bind_str '{}' (for {}) already used, skipping ...".format(
                         _bind_str,
                         " device '{}'".format(bind_dev) if bind_dev is not None else " master device",
                     ),
                     logging_tools.LOG_LEVEL_ERROR
                 )
             else:
                 bound_list.add(_bind_str)
                 try:
                     client.bind(_bind_str)
                 except zmq.ZMQError:
                     self.log(
                         "error binding to {}: {}".format(
                             _bind_str,
                             process_tools.get_except_info(),
                         ),
                         logging_tools.LOG_LEVEL_CRITICAL
                     )
                     _errors.append(_bind_str)
                 else:
                     self.log("bound {} to {} with id {}".format(_sock_type, _bind_str, bind_id))
                     if pollin:
                         self.register_poller(client, zmq.POLLIN, pollin, ext_call=ext_call, bind_id=bind_id)
         if master_bind:
             _main_socket = client
         else:
             _virtual_sockets.append(client)
     setattr(self, main_socket_name, _main_socket)
     setattr(self, virtual_sockets_name, _virtual_sockets)
     if _errors and _need_all_binds:
         raise ValueError("{} went wrong: {}".format(logging_tools.get_plural("bind", len(_errors)), ", ".join(_errors)))
示例#8
0
class ConfigCheckObject(object):
    def __init__(self, proc):
        self.__process = proc
        # self.log = self.__process.log

    def log(self, what, log_level=logging_tools.LOG_LEVEL_OK):
        self.__process.log("[CC] {}".format(what), log_level)

    def init(self, srv_type_enum, global_config, add_config_store=True, init_logging=True, native_logging=False, init_msi_block=True, log_name_postfix=None):
        if srv_type_enum is None:
            # srv_type_enum is None, use value stored in global config
            from initat.cluster.backbone.server_enums import icswServiceEnum
            # force reload of global-config
            global_config.close()
            srv_type_enum = getattr(icswServiceEnum, global_config["SERVICE_ENUM_NAME"])
        self.srv_type_enum = srv_type_enum
        self.global_config = global_config
        self.__native_logging = native_logging
        self.__init_msi_block = init_msi_block
        self._inst_xml = InstanceXML(self.log)
        if self.__init_msi_block:
            # init MSI block
            self.__msi_block = None
        if add_config_store:
            self.__cs = config_store.ConfigStore("client", self.log)
        else:
            self.__cs = None
        global_config.add_config_entries(
            [
                ("SERVICE_ENUM_NAME", configfile.str_c_var(self.srv_type_enum.name))
            ]
        )
        if init_logging:
            if "LOG_DESTINATION" not in global_config:
                global_config.add_config_entries(
                    [
                        ("LOG_DESTINATION", configfile.str_c_var("uds:/var/lib/logging-server/py_log_zmq")),
                    ]
                )

            if "LOG_NAME" not in global_config:
                _log_name = self._inst_xml[self.srv_type_enum.value.instance_name].attrib["name"]
                if log_name_postfix:
                    _log_name = "{}-{}".format(
                        _log_name,
                        log_name_postfix,
                    )
                global_config.add_config_entries(
                    [
                        (
                            "LOG_NAME",
                            configfile.str_c_var(
                                _log_name,
                                source="instance"
                            )
                        )
                    ]
                )
            if self.__native_logging:
                # build logger name
                logger_name = "{}.{}".format(
                    process_tools.get_machine_name(),
                    global_config["LOG_NAME"]
                )
                # get logger
                self.__process.log_template = logging_functions.get_logger(
                    self.__cs,
                    logger_name.replace(".", "/"),
                    logger_name,
                )
            else:
                self.__process.log_template = logging_tools.get_logger(
                    global_config["LOG_NAME"],
                    global_config["LOG_DESTINATION"],
                    context=self.__process.zmq_context,
                )

    def create_hfp(self):
        from initat.tools import hfp_tools
        _cur_hfp = hfp_tools.create_db_entry(
            self.__sql_info.effective_device,
            hfp_tools.get_local_hfp()
        )

    @property
    def CS(self):
        return self.__cs

    @property
    def Instance(self):
        return self._inst_xml

    def check_config(self):
        # late import (for clients without django)
        if self.srv_type_enum.value.server_service:
            from initat.tools import config_tools
            from django.db.models import Q
            from initat.cluster.backbone.models import LogSource
        if self.srv_type_enum.value.instance_name is None:
            raise KeyError("No instance_name set for srv_type_enum '{}'".format(self.srv_type_enum.name))
        self._instance = self._inst_xml[self.srv_type_enum.value.instance_name]
        # conf_names = self._inst_xml.get_config_names(self._instance)
        self.log(
            "check for service_type {} (==enum {})".format(
                self.srv_type_enum.value.name,
                self.srv_type_enum.name,
            )
        )
        _opts = [
            (
                "PID_NAME",
                configfile.str_c_var(self._inst_xml.get_pid_file_name(self._instance), source="instance", database=False)
            ),
        ]
        for _name, _value in self._inst_xml.get_port_dict(self._instance).iteritems():
            _opts.append(
                (
                    "{}_PORT".format(_name.upper()),
                    configfile.int_c_var(_value, source="instance", database=False)
                ),
            )
        if self.srv_type_enum.value.server_service:
            self.__sql_info = config_tools.server_check(service_type_enum=self.srv_type_enum)
            if self.__sql_info is None or not self.__sql_info.effective_device:
                # this can normally not happen due to start / stop via meta-server
                self.log("Not a valid {}".format(self.srv_type_enum.name), logging_tools.LOG_LEVEL_ERROR)
                sys.exit(5)
            else:
                # check eggConsumers
                # set values
                _opts.extend(
                    [
                        (
                            "SERVICE_ENUM_NAME",
                            configfile.str_c_var(self.srv_type_enum.name),
                        ),
                        (
                            "SERVER_SHORT_NAME",
                            configfile.str_c_var(process_tools.get_machine_name(True)),
                        ),
                        (
                            "SERVER_IDX",
                            configfile.int_c_var(self.__sql_info.device.pk, database=False, source="instance")
                        ),
                        (
                            "CONFIG_IDX",
                            configfile.int_c_var(self.__sql_info.config.pk, database=False, source="instance")
                        ),
                        (
                            "EFFECTIVE_DEVICE_IDX",
                            configfile.int_c_var(self.__sql_info.effective_device.pk, database=False, source="instance")
                        ),
                        (
                            "LOG_SOURCE_IDX",
                            configfile.int_c_var(
                                LogSource.new(self.srv_type_enum.name, device=self.__sql_info.effective_device).pk,
                                source="instance",
                            )
                        ),
                        (
                            "MEMCACHE_PORT",
                            configfile.int_c_var(self._inst_xml.get_port_dict("memcached", command=True), source="instance")
                        ),
                    ]
                )
        self.global_config.add_config_entries(_opts)

        if self.__init_msi_block:
            self.__pid_name = self.global_config["PID_NAME"]
            process_tools.save_pid(self.__pid_name)
            self.log("init MSI Block")
            self.__msi_block = process_tools.MSIBlock(self.srv_type_enum.value.msi_block_name)
            self.__msi_block.add_actual_pid(process_name="main")
            self.__msi_block.save()

    def process_added(self, src_process, src_pid):
        if self.__init_msi_block:
            process_tools.append_pids(self.__pid_name, src_pid)
            self.__msi_block.add_actual_pid(src_pid, process_name=src_process)
            self.__msi_block.save()

    def process_removed(self, src_pid):
        if self.__init_msi_block:
            process_tools.remove_pids(self.__pid_name, src_pid)
            self.__msi_block.remove_actual_pid(src_pid)
            self.__msi_block.save()

    # property functions to access device and config
    @property
    def server(self):
        return self.__sql_info.device

    @property
    def config(self):
        return self.__sql_info.config

    @property
    def msi_block(self):
        if self.__init_msi_block:
            return self.__msi_block
        else:
            raise AttributeError("No MSI Block defined")

    def close(self):
        if self.__init_msi_block and self.__msi_block:
            process_tools.delete_pid(self.__pid_name)
            self.__msi_block.remove()
            self.__msi_block = None
        if not self.__native_logging:
            self.__process.log_template.close()
        if isinstance(self.__process, threading_tools.process_pool):
            # remove global config if we were called from the process poll
            self.global_config.delete()

    def log_config(self):
        _log = self.global_config.get_log(clear=True)
        if len(_log):
            self.log(
                "Config log ({}):".format(
                    logging_tools.get_plural("line", len(_log)),
                )
            )
            for line, log_level in _log:
                self.log(" - clf: [{:d}] {}".format(log_level, line))
        else:
            self.log("no Config log")
        conf_info = self.global_config.get_config_info()
        self.log(
            "Found {}:".format(
                logging_tools.get_plural("valid config-line", len(conf_info))
            )
        )
        for conf in conf_info:
            self.log("Config : {}".format(conf))

    def read_config_from_db(self, default_list=[]):
        from initat.tools import cluster_location
        cluster_location.read_config_from_db(
            self.global_config,
            self.__sql_info,
            default_list,
        )

    def re_insert_config(self):
        if self.__sql_info:
            from initat.tools import cluster_location
            self.log(
                "re-inserting config for srv_type {} (config_name is {})".format(
                    self.srv_type_enum.name,
                    self.__sql_info.config_name,
                )
            )
            cluster_location.write_config_to_db(
                self.global_config,
                self.__sql_info,
            )
        else:
            self.log(
                "refuse to re-insert config because sql_info is None (srv_type={})".format(
                    self.srv_type,
                ),
                logging_tools.LOG_LEVEL_ERROR
            )
示例#9
0
class CapabilityProcess(threading_tools.icswProcessObj):
    def process_init(self):
        global_config.enable_pm(self)
        self.__log_template = logging_tools.get_logger(
            global_config["LOG_NAME"],
            global_config["LOG_DESTINATION"],
            context=self.zmq_context)
        # db_tools.close_connection()
        self._instance = InstanceXML(log_com=self.log)
        self._init_network()
        self._init_capabilities()
        self.__last_user_scan = None
        self.__scan_running = False
        self.register_timer(self._update,
                            2 if global_config["DEBUG"] else 30,
                            instant=True)

    def log(self, what, log_level=logging_tools.LOG_LEVEL_OK):
        self.__log_template.log(log_level, what)

    def loop_post(self):
        self.collectd_socket.close()
        self.vector_socket.close()
        self.__log_template.close()

    def _init_network(self):
        # connection to local collserver socket
        conn_str = process_tools.get_zmq_ipc_name(
            "vector", s_name="collserver", connect_to_root_instance=True)
        vector_socket = self.zmq_context.socket(zmq.PUSH)
        vector_socket.setsockopt(zmq.LINGER, 0)
        vector_socket.connect(conn_str)
        self.vector_socket = vector_socket
        self.log("connected vector_socket to {}".format(conn_str))
        # connection to local collectd server
        _cc_str = "tcp://*****:*****@{} on {} ({})".format(
                    csr.action,
                    service.name,
                    csr.content_type.model,
                    v_type,
                ),
                default=0,
                value=csr.consumed,
                factor=1,
                base=1,
                valid_until=cur_time + 3600,
            ).build_xml(_bldr)

        _bldr = drop_com.builder
        # print "*", cur_time, drop_com, _bldr
        my_vector = _bldr("values")
        _total = 0
        _total_ghost = 0
        for _csr in icswEggConsumer.objects.all().select_related(
                "config_service_enum"):
            my_vector.append(_vector_entry("ghost", _csr))
            _total_ghost += _csr.consumed
            if not _csr.ghost:
                my_vector.append(_vector_entry("consume", _csr))
                _total += _csr.consumed
        my_vector.append(
            hm_classes.MachineVectorEntry(
                "icsw.ova.overall.total",
                info="Ova consumed by all actions on all models",
                default=0,
                value=_total,
                factor=1,
                base=1,
                valid_until=cur_time + 3600,
            ).build_xml(_bldr))
        my_vector.append(
            hm_classes.MachineVectorEntry(
                "icsw.ova.overall.ghost",
                info="Ova consumed by all actions on all models (ghost)",
                default=0,
                value=_total_ghost,
                factor=1,
                base=1,
                valid_until=cur_time + 3600,
            ).build_xml(_bldr))
        # add ova per license
        ova_per_lic = icswEggBasket.objects.get_values_per_license_name()
        for lic_id_name, values in ova_per_lic.items():
            lic_id_name = lic_id_name or "global"
            for v_name, v_value in values.items():
                my_vector.append(
                    hm_classes.MachineVectorEntry(
                        "icsw.ova.license.{}.{}".format(lic_id_name, v_name),
                        info="Ova {} for license {}".format(
                            v_name, lic_id_name),
                        default=0,
                        value=v_value,
                        factor=1,
                        base=1,
                        valid_until=cur_time + 3600,
                    ).build_xml(_bldr))
        drop_com["vector_ova"] = my_vector
        drop_com["vector_ova"].attrib["type"] = "vector"

    def _update(self):
        cur_time = time.time()
        drop_com = server_command.srv_command(command="set_vector")

        mach_vectors = []
        for cap_name in self.__cap_list:
            self.__server_cap_dict[cap_name](cur_time, drop_com, mach_vectors)
        self.add_ova_statistics(cur_time, drop_com)
        self.vector_socket.send_unicode(str(drop_com))
        # print drop_com.pretty_print()
        for _mv in mach_vectors:
            try:
                self.collectd_socket.send_unicode(etree.tostring(_mv))
            except:
                self.log(
                    "unable to send machvector to collectd: {}".format(
                        process_tools.get_except_info(), ),
                    logging_tools.LOG_LEVEL_ERROR,
                )