示例#1
0
 def init(r_process, backlog_size, timeout, verbose):
     HostConnection.relayer_process = r_process
     # 2 queues for 0MQ and tcp, 0MQ is (True, conn_str), TCP is (False, conn_str)
     HostConnection.hc_dict = {}
     # lut to map message_ids to host_connections
     HostConnection.message_lut = {}
     HostConnection.backlog_size = backlog_size
     HostConnection.timeout = timeout
     HostConnection.verbose = verbose
     HostConnection.g_log(
         "backlog size is {:d}, timeout is {:d}, verbose is {}".format(
             HostConnection.backlog_size,
             HostConnection.timeout,
             str(HostConnection.verbose),
         )
     )
     # router socket
     id_str = "relayer_rtr_{}".format(process_tools.get_machine_name())
     new_sock = process_tools.get_socket(
         HostConnection.relayer_process.zmq_context,
         "ROUTER",
         identity=id_str,
         linger=0,
         sndhwm=HostConnection.backlog_size,
         rcvhwm=HostConnection.backlog_size,
         backlog=HostConnection.backlog_size,
         immediate=True,
     )
     HostConnection.zmq_socket = new_sock
     HostConnection.relayer_process.register_poller(new_sock, zmq.POLLIN, HostConnection.get_result)  # @UndefinedVariable
示例#2
0
 def process_init(self):
     self.__log_template = logging_functions.get_logger(
         config_store.ConfigStore("client", quiet=True),
         "{}/{}".format(
             process_tools.get_machine_name(),
             self.global_config["LOG_NAME"],
         ),
         process_name=self.name,
     )
     self.register_func("connection", self._connection)
     # clear flag for extra twisted thread
     self.__extra_twisted_threads = 0
     # print self.start_kwargs
     if self.start_kwargs.get("icmp", True):
         self.icmp_protocol = HMIcmpProtocol(
             self,
             self.__log_template,
             debug=self.global_config["DEBUG"]
         )
         # reactor.listenWith(icmp_twisted.icmp_port, self.icmp_protocol)
         # reactor.listen_ICMP(self.icmp_protocol)
         self.register_func("ping", self._ping)
     else:
         self.icmp_protocol = None
     self.register_func("resolved", self._resolved)
     self.register_timer(self._check_timeout, 5)
     self.__ip_re = re.compile("^\d+\.\d+\.\d+\.\d+$")
     self.__pending_id, self.__pending_dict = (0, {})
示例#3
0
 def process_init(self):
     self.__log_template = logging_functions.get_logger(
         config_store.ConfigStore("client", quiet=True),
         "{}/{}".format(
             process_tools.get_machine_name(),
             self.global_config["LOG_NAME"],
         ),
         process_name=self.name,
     )
     self.__watcher = inotify_tools.InotifyWatcher()
     # was INOTIFY_IDLE_TIMEOUT in self.global_config, now static
     self.__idle_timeout = 5
     # self.__watcher.add_watcher("internal", "/etc/sysconfig/host-monitoring.d", inotify_tools.IN_CREATE | inotify_tools.IN_MODIFY, self._trigger)
     self.__file_watcher_dict = {}
     self.__target_dict = {}
     # self.register_func("connection", self._connection)
     self.send_pool_message("register_callback", "register_file_watch", "fw_handle")
     self.send_pool_message("register_callback", "unregister_file_watch", "fw_handle")
     self.register_exception("term_error", self._sigint)
     self.allow_signal(15)
     self.register_func("fw_handle", self._fw_handle)
     # register watcher fd with 0MQ poller
     self.register_poller(self.__watcher._fd, zmq.POLLIN, self._inotify_check)
     self.log("idle_timeout is {:d}".format(self.__idle_timeout))
     self.register_timer(self._fw_timeout, 1000)
示例#4
0
 def init(cls, r_process, backlog_size, timeout, zmq_discovery):
     cls.relayer_process = r_process
     # 2 queues for 0MQ and tcp, 0MQ is (True, conn_str), TCP is (False, conn_str)
     cls.hc_dict = {}
     # lut to map message_ids to host_connections
     cls.message_lut = {}
     cls.backlog_size = backlog_size
     cls.timeout = timeout
     cls.g_log(
         "backlog size is {:d}, timeout is {:d}".format(
             cls.backlog_size,
             cls.timeout,
         )
     )
     # router socket
     id_str = "relayer_rtr_{}".format(
         process_tools.get_machine_name()
     )
     new_sock = process_tools.get_socket(
         cls.relayer_process.zmq_context,
         "ROUTER",
         identity=id_str,
         linger=0,
         sndhwm=cls.backlog_size,
         rcvhwm=cls.backlog_size,
         backlog=cls.backlog_size,
         immediate=True,
     )
     cls.zmq_socket = new_sock
     cls.relayer_process.register_poller(new_sock, zmq.POLLIN, cls.get_result)
     # ZMQDiscovery instance
     cls.zmq_discovery = zmq_discovery
示例#5
0
def _insert_bg_job(cmd, cause, obj):
    if getattr(obj, "_no_bg_job", False):
        # used in boot_views
        return
    # create entry to be handled by the cluster-server
    # get local device, key is defined in routing.py
    # see routing.py, SrvTypeRouting definition
    _routing_key = "_WF_ROUTING_{}".format(settings.ICSW_CACHE_KEY)
    _resolv_dict = cache.get(_routing_key)
    if _resolv_dict:
        _r_dict = json.loads(_resolv_dict)
        if "_local_device" in _r_dict:
            _local_pk = _r_dict["_local_device"][0]
        else:
            _local_pk = 0
    else:
        try:
            _local_pk = device.objects.get(
                Q(name=process_tools.get_machine_name())).pk
        except device.DoesNotExist:
            _local_pk = 0
    # we need local_pk and a valid user (so we have to be called via webfrontend)
    if _local_pk and thread_local_middleware().user and isinstance(
            thread_local_middleware().user, user):
        create_bg_job(_local_pk,
                      thread_local_middleware().user, cmd, cause, obj)
        # init if not already done
        if not hasattr(thread_local_obj, "num_bg_jobs"):
            thread_local_obj.num_bg_jobs = 1
        else:
            thread_local_obj.num_bg_jobs += 1
示例#6
0
文件: server.py 项目: bopopescu/icsw
 def _resolve_address(self, target):
     # to avoid loops in the 0MQ connection scheme (will result to nasty asserts)
     if target in self.__forward_lut:
         ip_addr = self.__forward_lut[target]
     else:
         orig_target = target
         if target.lower() in ["localhost", "127.0.0.1", "localhost.localdomain"]:
             target = process_tools.get_machine_name()
         # step 1: resolve to ip
         ip_addr = socket.gethostbyname(target)
         try:
             # step 2: try to get full name
             full_name, _aliases, _ip_addrs = socket.gethostbyaddr(ip_addr)
         except:
             # forget it
             pass
         else:
             # resolve full name
             try:
                 ip_addr = socket.gethostbyname(full_name)
             except:
                 self.log("error looking up {}: {}".format(full_name, process_tools.get_except_info()), logging_tools.LOG_LEVEL_ERROR)
         if ip_addr not in self.__ip_lut:
             self.log("resolved %s to %s" % (target, ip_addr))
             self.__ip_lut[ip_addr] = target
         self.__forward_lut[target] = ip_addr
         self.log("ip resolving: %s -> %s" % (target, ip_addr))
         if orig_target != target:
             self.__forward_lut[orig_target] = ip_addr
             self.log("ip resolving: %s -> %s" % (orig_target, ip_addr))
     return ip_addr
示例#7
0
文件: server.py 项目: bopopescu/icsw
 def _check_error_dict(self, force=False):
     c_name = process_tools.get_cluster_name()
     mails_sent = 0
     s_time = time.time()
     ep_dels = []
     for ep, es in self.__eg_dict.items():
         t_diff = s_time - es["last_update"]
         if force or (t_diff < 0 or t_diff > 60):
             subject = "Python error for pid {:d} on {}@{} ({}, {})".format(
                 ep,
                 process_tools.get_fqdn()[0],
                 c_name,
                 process_tools.get_machine_name(),
                 clusterid.get_cluster_id() or "N/A",
             )
             err_lines = "".join(es["error_str"]).split("\n")
             msg_body = "\n".join(
                 ["Processinfo {}".format(self._get_process_info(es))] + [
                     "{:3d} {}".format(line_num + 1, line)
                     for line_num, line in enumerate(err_lines)
                 ])
             if self.CC.CS["log.send.errormails"]:
                 self._send_mail(subject, msg_body)
                 mails_sent += 1
             ep_dels.append(ep)
     for epd in ep_dels:
         del self.__eg_dict[epd]
     e_time = time.time()
     if mails_sent:
         self.log("Sent {} in {}".format(
             logging_tools.get_plural("mail", mails_sent),
             logging_tools.get_diff_time_str(e_time - s_time)))
示例#8
0
    def _add_info_parser(self, sub_parser, server_mode):
        _mach_name = process_tools.get_machine_name(short=True)
        parser = sub_parser.add_parser("info", help="show command help")
        parser.set_defaults(subcom="info", execute=self._execute)
        _choices = ["host-monitoring"]
        if server_mode:
            _choices.extend(["cluster-server"])
        parser.add_argument(
            "--subsys",
            type=str,
            default=_choices[0],
            choices=_choices,
            help="show command info for given subsystem [%(default)s]")
        parser.add_argument("--detail",
                            default=False,
                            action="store_true",
                            help="show detailed help [%(default)s]")
        parser.add_argument("--no-overview",
                            default=True,
                            dest="overview",
                            action="store_false",
                            help="show overview [%(default)s]")
        parser.add_argument(
            "--update-json",
            default=False,
            action="store_true",
            help="update json file for all defined commands [%(default)s]")
        parser.add_argument("args", nargs="*")

        return parser
示例#9
0
 def __init__(self, **kwargs):
     self.short_host_name = kwargs.get("short_host_name",
                                       process_tools.get_machine_name())
     try:
         self.device = device.all_enabled.get(Q(name=self.short_host_name))
     except device.DoesNotExist:
         self.device = None
     # get IP-adresses (from IP)
     self.local_ips = list(
         net_ip.objects.filter(
             Q(netdevice__device__name=self.short_host_name)
             & Q(netdevice__device__enabled=True)
             & Q(netdevice__device__device_group__enabled=True)).
         values_list("ip", flat=True))
     # get configured IP-Adresses
     ipv4_dict = {
         cur_if_name: [ip_tuple["addr"] for ip_tuple in value[2]]
         for cur_if_name, value in [(if_name,
                                     netifaces.ifaddresses(if_name))
                                    for if_name in netifaces.interfaces()]
         if 2 in value
     }
     # remove loopback addresses
     self_ips = [
         _ip for _ip in sum(list(ipv4_dict.values()), [])
         if not _ip.startswith("127.")
     ]
     self.ip_lut = {}
     self.ip_r_lut = {}
     if self_ips:
         _do = device.all_enabled
         # get IPs
         self.device_dict = {
             cur_dev.pk: cur_dev
             for cur_dev in _do.filter(Q(
                 netdevice__net_ip__ip__in=self_ips)).prefetch_related(
                     "netdevice_set__net_ip_set__network__network_type")
         }
         for _ip in self.local_ips:
             self.ip_lut[_ip] = self.device
         self.ip_r_lut[self.device] = self.local_ips
         # build lut
         for _dev in self.device_dict.values():
             # gather all ips
             _dev_ips = sum([
                 list(_ndev.net_ip_set.all())
                 for _ndev in _dev.netdevice_set.all()
             ], [])
             # filter for valid ips (no loopback addresses)
             _dev_ips = [
                 _ip.ip for _ip in _dev_ips
                 if _ip.network.network_type.identifier != "l"
                 and not _ip.ip.startswith("127.")
             ]
             for _dev_ip in _dev_ips:
                 self.ip_lut[_dev_ip] = _dev
             self.ip_r_lut[_dev] = _dev_ips
     else:
         self.device_dict = {}
示例#10
0
 def _add_lw_parser(self, sub_parser):
     client_cs = config_store.ConfigStore("client", quiet=True)
     _mach_name = process_tools.get_machine_name(short=True)
     parser = sub_parser.add_parser("logwatch", help="watch icsw logs")
     parser.set_defaults(subcom="status", execute=self._execute)
     # get logroot from config_store
     parser.add_argument("--root",
                         type=str,
                         default=client_cs["log.logdir"],
                         help="root directory [%(default)s]")
     parser.add_argument("--machine",
                         type=str,
                         default=_mach_name,
                         help="machine to use [%(default)s]")
     parser.add_argument("-n",
                         type=int,
                         default=400,
                         help="show latest [%(default)d] lines")
     parser.add_argument("--format",
                         type=str,
                         default="%a %b %d %H:%M:%S %Y",
                         help="argument for parsing loglines [%(default)s]")
     parser.add_argument(
         "-f",
         dest="follow",
         default=True,
         action="store_true",
         help="enable follow mode, always enabled [%(default)s]")
     parser.add_argument("-F",
                         dest="follow",
                         default=True,
                         action="store_false",
                         help="disable follow mode")
     parser.add_argument("--system-filter",
                         type=str,
                         default=".*",
                         help="regexp filter for system [%(default)s]")
     parser.add_argument("--with-nodes",
                         default=False,
                         action="store_true",
                         help="add node logs [%(default)s]")
     parser.add_argument("--node-filter",
                         type=str,
                         default=".*",
                         help="regexp filter for nodes [%(default)s]")
     parser.add_argument("--verbose",
                         default=False,
                         action="store_true",
                         help="enable verbose mode [%(default)s]")
     parser.add_argument("--show-unparseable",
                         default=False,
                         action="store_true",
                         help="show unparseable lines [%(default)s]")
     parser.add_argument(
         "filter",
         nargs="*",
         type=str,
         help="list of regexp filter for system [%(default)s]")
     return parser
示例#11
0
def open_socket(zmq_context):
    send_sock = zmq_context.socket(zmq.DEALER)
    send_sock.setsockopt_string(
        zmq.IDENTITY,
        "{}:syslog_scan".format(process_tools.get_machine_name()))
    send_sock.setsockopt(zmq.LINGER, 0)
    send_sock.connect("tcp://localhost:8000")
    return send_sock
示例#12
0
文件: relay.py 项目: bopopescu/icsw
 def _resolve_address_resolve(self, target):
     # to avoid loops in the 0MQ connection scheme (will result to nasty asserts)
     if target in self.__forward_lut:
         ip_addr = self.__forward_lut[target]
     else:
         orig_target = target
         if target.lower().startswith(
                 "localhost") or target.lower().startswith("127.0.0."):
             # map localhost to something 0MQ can handle
             target = process_tools.get_machine_name()
         # step 1: resolve to ip
         try:
             ip_addr = socket.gethostbyname(target)
         except:
             self.log(
                 "cannot resolve target '{}': {}".format(
                     target, process_tools.get_except_info()),
                 logging_tools.LOG_LEVEL_CRITICAL)
             raise
         try:
             # step 2: try to get full name
             full_name, aliases, ip_addrs = socket.gethostbyaddr(ip_addr)
         except:
             # forget it
             pass
         else:
             # resolve full name
             self.log("ip_addr {} resolved to '{}' ({}), {}".format(
                 ip_addr, full_name, ", ".join(aliases) or "N/A",
                 ", ".join(ip_addrs) or "N/A"))
             try:
                 new_ip_addr = socket.gethostbyname(full_name)
             except:
                 self.log(
                     "cannot resolve full_name '{}': {}".format(
                         full_name, process_tools.get_except_info()),
                     logging_tools.LOG_LEVEL_CRITICAL)
                 raise
             else:
                 self.log(
                     "full_name {} resolves back to {} (was: {})".format(
                         full_name, new_ip_addr,
                         ip_addr), logging_tools.LOG_LEVEL_OK if new_ip_addr
                     == ip_addr else logging_tools.LOG_LEVEL_ERROR)
                 # should we use the new ip_addr ? dangerous, FIXME
                 # ip_addr = new_ip_addr
         if ip_addr not in self.__ip_lut:
             self.log("resolved {} to {}".format(target, ip_addr))
             self.__ip_lut[ip_addr] = target
         self.__forward_lut[target] = ip_addr
         self.log("ip resolving: {} -> {}".format(target, ip_addr))
         if orig_target != target:
             self.__forward_lut[orig_target] = ip_addr
             self.log("ip resolving: {} -> {}".format(orig_target, ip_addr))
     return ip_addr
示例#13
0
文件: server.py 项目: bopopescu/icsw
 def __init__(self):
     threading_tools.process_pool.__init__(self, "main", zmq=True)
     self.register_exception("int_error", self._int_error)
     self.register_exception("term_error", self._int_error)
     self.CC.init(icswServiceEnum.discovery_server, global_config)
     self.CC.check_config()
     # close connection (daemonize)
     db_tools.close_connection()
     self.CC.read_config_from_db([
         ("SNMP_PROCESSES",
          configfile.int_c_var(
              4,
              help_string="number of SNMP processes [%(default)d]",
              short_options="n")),
         ("MAX_CALLS",
          configfile.int_c_var(
              100,
              help_string="number of calls per helper process [%(default)d]"
          )),
     ])
     self.CC.re_insert_config()
     self.CC.log_config()
     self.add_process(DiscoveryProcess("discovery"), start=True)
     self.add_process(EventLogPollerProcess(
         EventLogPollerProcess.PROCESS_NAME),
                      start=True)
     self.add_process(GenerateAssetsProcess("generate_assets"), start=True)
     self._init_network_sockets()
     self.register_func("snmp_run", self._snmp_run)
     self.register_func("generate_assets", self._generate_assets)
     self.register_func(
         "process_assets_finished",
         self._process_assets_finished,
     )
     self.register_func(
         "process_batch_assets_finished",
         self._process_batch_assets_finished,
     )
     self.register_func("send_msg", self.send_msg)
     db_tools.close_connection()
     self.__max_calls = global_config[
         "MAX_CALLS"] if not global_config["DEBUG"] else 5
     self.__snmp_running = True
     self._init_processes()
     # not really necessary
     self.install_remote_call_handlers()
     # clear pending scans
     self.clear_pending_scans()
     self.__run_idx = 0
     self.__pending_commands = {}
     if process_tools.get_machine_name(
     ) == "eddiex" and global_config["DEBUG"]:
         self._test()
示例#14
0
 def __init__(self, srv_com, src_id, xml_input):
     if "conn_str" in srv_com:
         _conn_str = srv_com["*conn_str"]
         _parts = _conn_str.split(":")
         if len(_parts) == 3:
             _parts.pop(0)
         self.host = _parts.pop(0).split("/")[-1]
         self.port = int(_parts.pop(0))
     else:
         self.port = int(srv_com["port"].text)
         self.host = srv_com["host"].text
     self.raw_connect = True if int(srv_com.get("raw_connect",
                                                "0")) else False
     self.conn_str = "tcp://{}:{:d}".format(self.host, self.port)
     self.init_time = time.time()
     self.srv_com = srv_com
     self.src_id = src_id
     self.xml_input = xml_input
     cur_time = time.time()
     if self.conn_str in ZMQDiscovery.last_try and abs(
             ZMQDiscovery.last_try[self.conn_str] - cur_time) < 60:
         # need 60 seconds between tries
         self.socket = None
         self.send_return("last 0MQ discovery less than 60 seconds ago")
     else:
         ZMQDiscovery._pending[self.conn_str] = self
         new_sock = ZMQDiscovery.relayer_process.zmq_context.socket(
             zmq.DEALER)
         id_str = "relayer_dlr_{}_{}".format(
             process_tools.get_machine_name(), self.src_id)
         new_sock.setsockopt_string(zmq.IDENTITY, id_str)
         new_sock.setsockopt(zmq.LINGER, 0)
         new_sock.setsockopt(zmq.SNDHWM, ZMQDiscovery.backlog_size)
         new_sock.setsockopt(zmq.RCVHWM, ZMQDiscovery.backlog_size)
         new_sock.setsockopt(zmq.BACKLOG, ZMQDiscovery.backlog_size)
         new_sock.setsockopt(zmq.TCP_KEEPALIVE, 1)
         new_sock.setsockopt(zmq.TCP_KEEPALIVE_IDLE, 300)
         self.socket = new_sock
         ZMQDiscovery.relayer_process.register_poller(
             new_sock, zmq.POLLIN, self.get_result)
         # ZMQDiscovery.relayer_process.register_poller(new_sock, zmq.POLLIN, self.error)
         self.socket.connect(self.conn_str)
         if self.raw_connect:
             self.log("send raw discovery message")
             self.socket.send_unicode("get_0mq_id")
         else:
             self.log("send discovery message")
             dealer_message = server_command.srv_command(
                 command="get_0mq_id")
             dealer_message["target_ip"] = self.host
             self.socket.send_unicode(str(dealer_message))
示例#15
0
 def process_init(self):
     self.__log_template = logging_functions.get_logger(
         config_store.ConfigStore("client", quiet=True),
         "{}/{}".format(
             process_tools.get_machine_name(),
             self.global_config["LOG_NAME"],
         ),
         process_name=self.name,
     )
     # log.startLoggingWithObserver(my_observer, setStdout=False)
     self.__debug = self.global_config["DEBUG"]
     self.register_func("resolve", self._resolve, greedy=True)
     # clear flag for extra twisted thread
     self.__cache = {}
示例#16
0
文件: build.py 项目: bopopescu/icsw
 def loop_post(self):
     if self.__build_lock:
         self.log("removing buildlock")
         cur_img = self._get_image()
         cur_img.build_lock = False
         cur_img.release += 1
         if not cur_img.builds:
             cur_img.builds = 1
         else:
             cur_img.builds += 1
         cur_img.build_machine = process_tools.get_machine_name(short=False)
         cur_img.save()
     e_time = time.time()
     self.log("build took {}".format(logging_tools.get_diff_time_str(e_time - self.__start_time)))
     self.__log_template.close()
示例#17
0
    def _add_info_parser(self, sub_parser, server_mode):
        _mach_name = process_tools.get_machine_name(short=True)
        parser = sub_parser.add_parser("call", help="call subsystems")
        parser.set_defaults(subcom="call", execute=self._execute)
        _choices = ["host-monitoring"]
        if server_mode:
            _choices.extend(["cluster-server"])
        parser.add_argument(
            "--subsys",
            type=str,
            default=_choices[0],
            choices=_choices,
            help="show command info for given subsystem [%(default)s]")
        parser.add_argument("args", nargs="+")

        return parser
示例#18
0
 def process_init(self):
     self.__log_template = logging_functions.get_logger(
         config_store.ConfigStore("client", quiet=True),
         "{}/{}".format(
             process_tools.get_machine_name(),
             self.global_config["LOG_NAME"],
         ),
         process_name=self.name,
     )
     self.CS = config_store.ConfigStore("client", self.log)
     self.commands = []
     self.register_func("command_batch", self._command_batch)
     # commands pending becaus of missing package list
     self.pending_commands = []
     # list of pending package commands
     self.package_commands = []
     self.register_timer(self._check_commands, 10)
示例#19
0
 def _add_job_parser(self, sub_parser, server_mode):
     _mach_name = process_tools.get_machine_name(short=True)
     parser = sub_parser.add_parser("job", help="job helper commands")
     if server_mode:
         _choices = ["info", "setvar", "listvars"]
     else:
         _choices = ["info", "setvar"]
     parser.set_defaults(subcom="job", execute=self._execute)
     parser.add_argument("--job-id", default="", type=str, help="job ID (gets evaluated automatically via environ) [%(default)s]")
     parser.add_argument("--task-id", default=0, type=int, help="task ID (gets evaluated automatically via environ) [%(default)d]")
     parser.add_argument("--server-address", default="", type=str, help="RMS server address [%(default)s]")
     parser.add_argument("--server-port", default=8009, type=int, help="RMS server address [%(default)d]")
     parser.add_argument("--mode", default="info", type=str, choices=_choices, help="job subcommand [%(default)s]")
     parser.add_argument("--name", default="", type=str, help="variable name [%(default)s]")
     parser.add_argument("--value", default="", type=str, help="variable value [%(default)s]")
     parser.add_argument("--unit", default="", type=str, help="variable unit [%(default)s]")
     return parser
示例#20
0
 def _enable_syslog_config(self):
     syslog_srvcs = self.srv_helper.find_services(".*syslog", active=True)
     self.__syslog_type = None
     if syslog_srvcs:
         self.__syslog_type = syslog_srvcs[0]
         self.log("syslog type found: {}".format(self.__syslog_type))
         # hack for old sles11sp3 (liebherr) and Centos6 (Ac2T)
         if self.__syslog_type.count("rsys") or (self.__syslog_type in [
                 "syslog"
         ] and process_tools.get_machine_name() in ["lwnsu62020", "admin"]):
             self._enable_rsyslog()
         else:
             self.log(
                 "syslog-type {} not supported".format(self.__syslog_type),
                 logging_tools.LOG_LEVEL_ERROR)
     else:
         self.log("found no valid syslog service",
                  logging_tools.LOG_LEVEL_ERROR)
示例#21
0
 def _send_to_rms_server(self, srv_com):
     if not self.__server_socket:
         self.__server_socket = process_tools.get_socket(
             self.module_info.main_proc.zmq_context,
             "DEALER",
             linger=10,
             identity="afm_{}_{:d}".format(process_tools.get_machine_name(),
                                           os.getpid()),
             immediate=False,
         )
         _srv_address = "tcp://{}:{:d}".format(
             self._config_dict["SGE_SERVER"],
             self._config_dict["SGE_SERVER_PORT"],
         )
         self.__server_socket.connect(_srv_address)
         self.log("connected to {}".format(_srv_address))
     try:
         self.__server_socket.send_unicode(str(srv_com))
     except:
         self.log(
             "error sending affinity info: {}".format(
                 process_tools.get_except_info()),
             logging_tools.LOG_LEVEL_ERROR)
示例#22
0
# database config
_cs = config_store.ConfigStore(GEN_CS_NAME,
                               quiet=True,
                               access_mode=config_store.AccessModeEnum.GLOBAL)

# version config
# TODO: check for local config when running in debug (development) mode
_vers = config_store.ConfigStore(VERSION_CS_NAME, quiet=True)
_DEF_NAMES = ["database", "software", "models"]
ICSW_VERSION_DICT = {_name: _vers.get(_name, "???") for _name in _DEF_NAMES}

ICSW_DATABASE_VERSION = ICSW_VERSION_DICT["database"]
ICSW_SOFTWARE_VERSION = ICSW_VERSION_DICT["software"]
ICSW_MODELS_VERSION = ICSW_VERSION_DICT["models"]

ICSW_DEBUG = process_tools.get_machine_name() in ["eddie", "lemmy"]

# validate settings
if _cs["password.hash.function"] not in ["SHA1", "CRYPT"]:
    raise ImproperlyConfigured("password hash function '{}' not known".format(
        _cs["password.hash.function"]))

ICSW_ALLOWED_OVERALL_STYLES = {"normal", "condensed"}
ICSW_ALLOWED_MENU_LAYOUTS = {"newstyle"}

if "overall.style" in _cs:
    ICSW_OVERALL_STYLE = _cs["overall.style"]
else:
    ICSW_OVERALL_STYLE = "normal"

if _cs.get("missing.timezone.is.critical", True):
示例#23
0
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License Version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
#
""" tools for icsw commands """

from __future__ import unicode_literals, print_function

import os

from initat.tools import process_tools

__all__ = [
    "ICSW_DEBUG_MODE",
]

ICSW_DEBUG_MODE = process_tools.get_machine_name() in [
    "eddie", "lemmy"
] and (os.environ.get("DEBUG_ICSW_SOFTWARE")
       or os.environ.get("ICSW_DEBUG_SOFTWARE"))
示例#24
0
 def __call__(self, srv_com, cur_ns):
     srv_com["heartbeat_info"] = {
         "host": process_tools.get_machine_name(),
         "output": self.module._exec_command("/usr/sbin/crm_mon -1")
     }
示例#25
0
""" classes for building rpm-packages """

import argparse
import subprocess
import os
import pwd
import stat
import sys
import codecs

from initat.tools import logging_tools, process_tools

SCRIPT_TYPES = ["post", "pre", "postun", "preun"]

default_ns = argparse.Namespace(packager="{}@{}".format(
    pwd.getpwuid(os.getuid())[0], process_tools.get_machine_name()),
                                user="******",
                                group="root",
                                provides="",
                                arch={"i686":
                                      "i586"}.get(os.uname()[4],
                                                  os.uname()[4]))


class package_parser(argparse.ArgumentParser):
    def __init__(self):
        argparse.ArgumentParser.__init__(self)
        self.add_argument("-s",
                          "--summary",
                          dest="summary",
                          help="Summary string [%(default)s]",
示例#26
0
文件: server.py 项目: walong365/icsw
 def get_python_handle(self, record):
     if isinstance(record, icswLogHandleTypes):
         # special type for direct handles (log, log_py, err_py)
         sub_dirs = []
         record_host = "localhost"
         record_name, record_process, record_parent_process = (record.value,
                                                               os.getpid(),
                                                               os.getppid())
     else:
         if not hasattr(record, "host"):
             # no host set: use local machine name
             record.host = process_tools.get_machine_name()
         sub_dirs = [record.host]
         record_host = record.host
         record_name, record_process, record_parent_process = (
             record.name, record.process, getattr(record, "ppid", 0))
     # init.at logger, create subdirectories
     # generate list of dirs and file_name
     # strip init.at. prefix (no longer in use)
     if record_name.startswith("init.at."):
         record_name = record_name[8:]
     scr1_name = record_name.replace("\.",
                                     "#").replace(".",
                                                  "/").replace("#", ".")
     for path_part in os.path.dirname(scr1_name).split(os.path.sep):
         if path_part:
             path_part = "{}.d".format(path_part)
             if sub_dirs:
                 sub_dirs.append(os.path.join(sub_dirs[-1], path_part))
             else:
                 sub_dirs.append(path_part)
     if sub_dirs:
         h_name = os.path.join(sub_dirs[-1], os.path.basename(scr1_name))
     else:
         h_name = os.path.basename(scr1_name)
     # create logger_name
     logger_name = "{}.{}".format(record_host, record_name)
     if h_name in self.__handles:
         if not ({record_process, record_parent_process} & {
                 self.__handles[h_name].process_id,
                 self.__handles[h_name].parent_process_id
         }) and not self.__handles[h_name].ignore_process_id:
             self.log("access mismatch detected for {}".format(h_name, ),
                      logging_tools.LOG_LEVEL_ERROR)
             self.remove_handle(h_name)
     if h_name not in self.__handles:
         # TODO, FIXME, unify with native_logging code in server_mixins.py
         self.log("logger '{}' requested".format(logger_name, ))
         full_name = os.path.join(self.CC.CS["log.logdir"], h_name)
         base_dir, base_name = (os.path.dirname(full_name),
                                os.path.basename(full_name))
         self.log("attempting to create log_file '{}' in dir '{}'".format(
             base_name, base_dir))
         # add new sub_dirs
         sub_dirs = []
         for new_sub_dir in os.path.dirname(h_name).split("/"):
             if not sub_dirs:
                 sub_dirs.append(new_sub_dir)
             else:
                 sub_dirs.append(os.path.join(sub_dirs[-1], new_sub_dir))
         # create sub_dirs
         for sub_dir in sub_dirs:
             act_dir = os.path.join(self.CC.CS["log.logdir"], sub_dir)
             if not os.path.isdir(act_dir):
                 try:
                     os.makedirs(act_dir)
                 except OSError:
                     self.log(
                         "cannot create directory {}: {}".format(
                             act_dir,
                             process_tools.get_except_info(),
                         ), logging_tools.LOG_LEVEL_ERROR)
                 else:
                     self.log("created directory {}".format(act_dir))
         # get logger
         logger = logging_functions.get_logger(self.CC.CS, h_name,
                                               logger_name)
         self.__num_open += 1
         # save process_id to handle open / close
         logger.process_id = record_process
         logger.parent_process_id = record_parent_process
         # set ignore_process_id flag, usefull for apache process / threadpools
         logger.ignore_process_id = False
         logger.handle_name = h_name
         self.__handles[h_name] = logger
         self.__handle_usage[h_name] = set()
         self.__handle_usecount[h_name] = 0
         self.log(
             "added handle {} (file {} in dir {}), total open: {}".format(
                 h_name, base_name, base_dir,
                 logging_tools.get_plural("handle",
                                          len(list(
                                              self.__handles.keys())))))
     return self.__handles[h_name]
示例#27
0
文件: main.py 项目: walong365/icsw
def show_command(options):
    for f_name in options.files:
        _obj_name = f_name if not options.short_path else os.path.basename(
            f_name)
        for _rc in ["/", ".", "-"]:
            _obj_name = _obj_name.replace(_rc, "_")
        while _obj_name.startswith("_"):
            _obj_name = _obj_name[1:]
        obj_name = "{}_object".format(_obj_name)
        try:
            f_stat = os.stat(f_name)
            content = open(f_name).read()
        except:
            print("error reading file '{}': {}".format(
                f_name, process_tools.get_except_info()))
        else:
            if not options.binary:
                f_lines = content.split("\n")
                _f_info = logging_tools.get_plural("line", f_lines)
            else:
                _f_info = "binary"
            out_lines = [
                "",
                "# from {} ({}, host {}, size was {}, {})".format(
                    f_name,
                    datetime.datetime.now(),
                    process_tools.get_machine_name(short=False),
                    logging_tools.get_size_str(f_stat[stat.ST_SIZE]),
                    _f_info,
                ),
                "",
                "{} = config.add_file_object('{}')".format(obj_name, f_name),
            ]
            if options.binary:
                out_lines.extend([
                    "import bz2", "import base64", "",
                    "{} += bz2.decompress(base.b64decode('{}'))".format(
                        obj_name,
                        base64.b64encode(bz2.compress(content)),
                    )
                ])
            else:
                if options.full_strip:
                    f_lines = [
                        _line.strip() for _line in f_lines if _line.strip()
                    ]
                if options.remove_hashes:
                    f_lines = [
                        _line for _line in f_lines
                        if (not _line.startswith("#") or _line.startswith("#!")
                            )
                    ]
                p_line = " " * 4
                try:
                    out_lines.append("{} += [\n{}]\n".format(
                        obj_name, "".join([
                            "{}'{}',\n".format(
                                p_line,
                                _line.replace("'", '"').replace("\\", "\\\\"))
                            for _line in f_lines
                        ])))
                except UnicodeDecodeError:
                    print()
                    print(
                        "'{}' seems to be a binary file, please use -b switch".
                        format(f_name))
                    print()
                    sys.exit(3)
            out_lines.append("{}.mode = 0o{:o}".format(
                obj_name, stat.S_IMODE(f_stat[stat.ST_MODE])))
            print("\n".join(out_lines))
示例#28
0
def create_noctua_fixtures():
    print("Creating Noctua fixtures...")
    # first config catalog
    first_cc = config_catalog.objects.all()[0]

    # category tree
    ct = category_tree()
    cat_serv = ct.add_category("/mon/services")
    cat_web = ct.add_category("/mon/services/web")
    cat_mail = ct.add_category("/mon/services/mail")

    # config
    print("Creeating configurations.")
    ping_config = factories.Config(
        name="check_ping",
        config_catalog=first_cc,
    )
    snmp_config = factories.Config(
        name="check_snmp_info",
        config_catalog=first_cc,
    )
    ssh_config = factories.Config(
        name="check_ssh",
        config_catalog=first_cc,
    )
    http_config = factories.Config(
        name="check_http",
        config_catalog=first_cc,
    )
    https_config = factories.Config(
        name="check_https",
        config_catalog=first_cc,
    )
    ldap_config = factories.Config(
        name="check_ldap",
        config_catalog=first_cc,
    )
    imap_config = factories.Config(
        name="check_imap",
        config_catalog=first_cc,
    )
    imaps_config = factories.Config(
        name="check_imaps",
        config_catalog=first_cc,
    )
    pop3s_config = factories.Config(
        name="check_pop3s",
        config_catalog=first_cc,
    )
    smtps_config = factories.Config(
        name="check_smtps",
        config_catalog=first_cc,
    )
    print("Creating monitoring checks.")
    factories.MonCheckCommand(
        name="snmp_info",
        command_line="$USER3$ -m $HOSTADDRESS$ -C $ARG1$ -V $ARG2$ snmp_info",
        config=snmp_config).categories.add(cat_serv)
    factories.MonCheckCommand(
        name="check_ping",
        command_line="$USER2$ -m localhost ping $HOSTADDRESS$ 5 5.0",
        config=ping_config).categories.add(cat_serv)
    factories.MonCheckCommand(name="check_ssh",
                              command_line="$USER1$/check_ssh $HOSTADDRESS$",
                              config=ssh_config).categories.add(cat_serv)
    factories.MonCheckCommand(
        name="check_http",
        command_line="$USER1$/check_http -H $HOSTADDRESS$",
        config=http_config).categories.add(cat_web)
    factories.MonCheckCommand(
        name="check_imaps",
        command_line="$USER1$/check_imap -H $HOSTADDRESS$ -p 993 -S",
        config=imaps_config).categories.add(cat_mail)
    factories.MonCheckCommand(
        name="check_ldap",
        command_line="$USER1$/check_ldap -H $HOSTADDRESS$ -b dc=init,dc=at -3",
        config=ldap_config).categories.add(cat_serv)
    factories.MonCheckCommand(
        name="check_https",
        command_line="$USER1$/check_http -S -H $HOSTADDRESS$ -C 30",
        config=https_config).categories.add(cat_web)
    factories.MonCheckCommand(
        name="check_imap",
        command_line="$USER1$/check_imap -H $HOSTADDRESS$ -p 143",
        config=imap_config).categories.add(cat_mail)
    factories.MonCheckCommand(
        name="check_pop3s",
        command_line="$USER1$/check_pop3 -H $HOSTADDRESS$ -p 995 -S",
        config=pop3s_config).categories.add(cat_mail)
    factories.MonCheckCommand(
        name="check_smtps",
        command_line="$USER1$/check_smtps -H $HOSTADDRESS$ -p 465 -S",
        config=smtps_config).categories.add(cat_mail)

    # domain name tree
    dnt = domain_name_tree()
    _top_level_dtn = dnt.get_domain_tree_node("")
    # device_group
    print("Creating device and device group.")
    first_devg = factories.DeviceGroup(name="server_group")
    first_dev = factories.Device(
        name=process_tools.get_machine_name(),
        device_group=first_devg,
        domain_tree_node=_top_level_dtn,
    )

    print("Creating device configurations.")
    factories.DeviceConfig(
        device=first_dev,
        config=factories.Config(name="monitor_server",
                                config_catalog=first_cc),
    )
    factories.DeviceConfig(
        device=first_dev,
        config=factories.Config(name="rrd_server", config_catalog=first_cc),
    )
    factories.DeviceConfig(
        device=first_dev,
        config=factories.Config(name="server", config_catalog=first_cc),
    )
    factories.DeviceConfig(
        device=first_dev,
        config=factories.Config(name="rrd_collector", config_catalog=first_cc),
    )
    factories.DeviceConfig(
        device=first_dev,
        config=factories.Config(name="discovery_server",
                                config_catalog=first_cc),
    )
    factories.DeviceConfig(
        device=first_dev,
        config=ssh_config,
    )

    print("Creating monitoring periods.")
    initial_mon_period = factories.MonPeriod(name="always",
                                             sun_range="00:00-24:00",
                                             mon_range="00:00-24:00",
                                             tue_range="00:00-24:00",
                                             wed_range="00:00-24:00",
                                             thu_range="00:00-24:00",
                                             fri_range="00:00-24:00",
                                             sat_range="00:00-24:00")
    first_st = factories.MonServiceTempl(
        name="dummy_service_template",
        nsc_period=initial_mon_period,
        nsn_period=initial_mon_period,
    )
    _first_dt = factories.MonDeviceTempl(
        name="dummy_device_template",
        mon_service_templ=first_st,
        mon_period=initial_mon_period,
        not_period=initial_mon_period,
        host_check_command=host_check_command.objects.get(
            Q(name="check-host-alive")),
    )
    is_ucs = os.path.isfile("/usr/sbin/ucr")

    # the create_cluster script adds an admin user
    # if there are no users, or in case of an ucs system, if only this one new admin exists,
    # then we want an admin and a user user
    users = user.objects.all()
    empty_install = users.count() == 0
    new_install = (users.count() == 1 and users[0].login == 'admin'
                   and users[0].login_count == 0)
    if empty_install or (is_ucs and new_install):
        print('Creating user and groups.')
        user.objects.all().delete()
        group.objects.all().delete()

        # group / users
        _group = factories.Group(
            groupname="group",
            homestart="/",
            gid=100,
        )
        _group.allowed_device_groups.add(first_devg)
        _user = factories.User(
            login="******",
            uid=400,
            group=_group,
            password="******",
        )
        _user.allowed_device_groups.add(first_devg)
        _first_mc = factories.MonContact(
            user=_user,
            snperiod=initial_mon_period,
            hnperiod=initial_mon_period,
        )
        _admin = user.objects.create_superuser(
            "admin",
            "*****@*****.**",
            "admin",
        )
        # we need contacts for all initial users so that they can access icinga
        factories.MonContact(
            user=_admin,
            snperiod=initial_mon_period,
            hnperiod=initial_mon_period,
        )
        _admin.allowed_device_groups.add(first_devg)
        # network
    if is_ucs:
        if_address = get_local_ip_address("62.99.204.238")
        # print if_address

        if_name = get_interface_by_ip(if_address)
        # print if_name

        p = subprocess.Popen(
            ['ucr', 'get', 'interfaces/%s/address' % (if_name)],
            stdout=subprocess.PIPE)
        if_address = p.stdout.read().strip().split("\n")[0]

        p = subprocess.Popen(
            ['ucr', 'get', 'interfaces/%s/network' % (if_name)],
            stdout=subprocess.PIPE)
        if_network = p.stdout.read().strip().split("\n")[0]

        p = subprocess.Popen(
            ['ucr', 'get', 'interfaces/%s/broadcast' % (if_name)],
            stdout=subprocess.PIPE)
        if_broadcast = p.stdout.read().strip().split("\n")[0]

        p = subprocess.Popen(
            ['ucr', 'get', 'interfaces/%s/netmask' % (if_name)],
            stdout=subprocess.PIPE)
        if_netmask = p.stdout.read().strip().split("\n")[0]

        p = subprocess.Popen(['ucr', 'get', 'gateway'], stdout=subprocess.PIPE)
        out = p.stdout.read().strip().split("\n")[0]
        if_gateway = out
    else:
        print(
            "Not installed on UCS, /usr/sbin/ucr not found. Using python-netifaces."
        )

        if_address = get_local_ip_address("62.99.204.238")
        if_name = get_interface_by_ip(if_address)
        if_netmask = get_netmask_by_interface(if_name)
        if_broadcast = get_broadcast_by_interface(if_name)
        if_network = str(
            ipvx_tools.ipv4(if_netmask) & ipvx_tools.ipv4(if_broadcast))
        if_gateway = get_default_gateway_linux()

    print('Creating network objects.')
    _network = factories.Network(
        identifier="lan",
        network_type=network_type.objects.get(Q(identifier="o")),
        name="lan",
        network=if_network,
        broadcast=if_broadcast,
        netmask=if_netmask,
        gateway=if_gateway,
    )
    _netdevice = factories.NetDevice(
        device=first_dev,
        devname=if_name,
        routing=True,
        netdevice_speed=netdevice_speed.objects.get(
            Q(speed_bps=1000000000) & Q(full_duplex=True)
            & Q(check_via_ethtool=True)),
        network_device_type=network_device_type.objects.get(
            Q(identifier="eth")),
    )
    _net_ip = factories.NetIp(
        ip=if_address,
        network=_network,
        netdevice=_netdevice,
        domain_tree_node=_top_level_dtn,
    )
示例#29
0
def add_fixtures(**kwargs):
    _fact_dict = {}
    for _name, _prefix, _descr, _pri, _fixed, _sys_scope in [
        ("normal", "", "default Scope", 100, False, True),
        ("inventory", "__$$ICSW_INV$$__", "Scope for device inventory", 50,
         True, False),
        ("comm", "__$$ICSW_COM$$__", "Scope for device communication", 20,
         True, True),
    ]:
        _fact_dict[_name] = factories.device_variable_scope_factory(
            name=_name,
            prefix=_prefix,
            description=_descr,
            priority=_pri,
            fixed=_fixed,
            system_scope=_sys_scope,
        )
    # set default scope
    _fact_dict["normal"].default_scope = True
    _fact_dict["normal"].save()
    comm_list = []
    _used_vars = set()
    for dci in DeviceConnectionEnum:
        # name is group
        for cur_var in dci.value.var_list + dci.value.opt_list:
            if cur_var.name not in _used_vars:
                _used_vars.add(cur_var.name)
                comm_list.append((
                    "*{}".format(cur_var.name),
                    cur_var.info,
                    dci.name,
                    cur_var.var_type,
                    cur_var.is_password,
                ))
    _defaults = {
        "inventory": [
            ("serial", "Serial number", "admin", DeviceVarTypeEnum.string,
             False),
            ("id", "Numeric ID", "admin", DeviceVarTypeEnum.integer, False),
            ("time_of_purchase", "Date of purchase", "admin",
             DeviceVarTypeEnum.date, False),
        ],
        "comm":
        comm_list,
    }
    for _scope_name, _var_list in _defaults.items():
        for _name, _descr, _group, _forced_type, _passwd_field in _var_list:
            editable = not _name.startswith("*")
            if not editable:
                _name = _name[1:]
            factories.DVSAllowedNameFactory(
                name=_name,
                description=_descr,
                device_variable_scope=_fact_dict[_scope_name],
                forced_type=_forced_type.value,
                group=_group,
                editable=editable,
                password_field=_passwd_field,
            )

    if process_tools.get_machine_name() in ["eddie"]:
        # debug output
        for _e in device_variable_scope.objects.all():
            print(str(_e))

        for _e in dvs_allowed_name.objects.all():
            print(str(_e))
示例#30
0
 def _send_vector(self, *args, **kwargs):
     send_id = args[0]
     _struct = self.cs[send_id]
     _p_until = _struct.get("pause_until", 0)
     cur_time = int(time.time())
     # print "_", _p_until, cur_time
     if _p_until:
         if _p_until > cur_time:
             return
         else:
             self.log("clearing pause_until")
             del _struct["pause_until"]
     cur_id = _struct["sent"]
     full = cur_id % _struct.get("full_info_every", 10) == 0
     cur_id += 1
     _struct["sent"] = cur_id
     try:
         fqdn, _short_name = process_tools.get_fqdn()
     except:
         fqdn = process_tools.get_machine_name()
     send_format = _struct.get("format", "xml")
     if send_format == "xml":
         send_vector = self.build_xml(E, simple=not full)
         send_vector.attrib["name"] = _struct.get("send_name", fqdn) or fqdn
         send_vector.attrib["interval"] = "{:d}".format(
             _struct.get("send_every"))
         send_vector.attrib["uuid"] = self.module.main_proc.zeromq_id
     else:
         send_vector = self.build_json(simple=not full)
         send_vector[1]["name"] = _struct.get("send_name", fqdn) or fqdn
         send_vector[1]["interval"] = _struct.get("send_every")
         send_vector[1]["uuid"] = self.module.main_proc.zeromq_id
     # send to server
     t_host, t_port = (
         _struct.get("target", "127.0.0.1"),
         _struct.get("port", 8002),
     )
     try:
         if send_format == "xml":
             self.__socket_dict[send_id].send_unicode(
                 unicode(etree.tostring(send_vector)))  # @UndefinedVariable
         else:
             # print json.dumps(send_vector)
             self.__socket_dict[send_id].send_unicode(
                 json.dumps(send_vector))
     except:
         exc_info = process_tools.get_except_info()
         # ignore errors
         self.log(
             "error sending to ({}, {:d}): {}".format(
                 t_host, t_port, exc_info), logging_tools.LOG_LEVEL_ERROR)
         if exc_info.count("int_error"):
             raise
         else:
             # problem sending, wait 2 minutes
             _diff_t = 120
             _w_time = cur_time + _diff_t
             self.log(
                 "setting pause_until to {:d} (+{:d} seconds)".format(
                     _w_time, _diff_t), logging_tools.LOG_LEVEL_WARN)
             _struct["pause_until"] = _w_time
     self.cs[send_id] = _struct